summaryrefslogtreecommitdiffstats
path: root/js/src/wasm/WasmIntrinsic.yaml
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /js/src/wasm/WasmIntrinsic.yaml
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esr
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'js/src/wasm/WasmIntrinsic.yaml')
-rw-r--r--js/src/wasm/WasmIntrinsic.yaml201
1 files changed, 201 insertions, 0 deletions
diff --git a/js/src/wasm/WasmIntrinsic.yaml b/js/src/wasm/WasmIntrinsic.yaml
new file mode 100644
index 0000000000..7867604915
--- /dev/null
+++ b/js/src/wasm/WasmIntrinsic.yaml
@@ -0,0 +1,201 @@
+# The file contains information need to define wasm intrinsic operations.
+
+# i8vecmul(dest: i32, src1: i32, src2: i32, len: i32)
+# Performs pairwise multiplication of two i8 vectors of 'len' specified at
+# 'src1' and 'src2'. Output is written to 'dest'. This is used as a
+# basic self-test for intrinsics.
+- op: I8VecMul
+ symbolic_address:
+ name: IntrI8VecMul
+ type: Args_Int32_GeneralInt32Int32Int32Int32General
+ entry: Instance::intrI8VecMul
+ export: i8vecmul
+ params:
+ - I32
+ - I32
+ - I32
+ - I32
+
+#if defined(ENABLE_WASM_MOZ_INTGEMM)
+
+# Intrinsics for integer matrix multiplication followed by addition of bias.
+# Please refer to @TOPSRCDIR/js/src/intgemm/IntegerGemmIntrinsic.h for more details on these intrinsics.
+
+
+# Prepare B for the Matrix Multiply intrinsic from Input matrix B.
+#
+# Quantization is performed on the input.
+# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
+# intrinsic (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_b(const float* inputMatrixB, float scale, float zeroPoint, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
+# int8_prepare_b(inputMatrixB: i32, scale: f32, zeroPoint: f32, rowsB: i32, colsB: i32, outputMatrixB: i32)
+- op: I8PrepareB
+ symbolic_address:
+ name: IntrI8PrepareB
+ type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareB
+ export: int8_prepare_b
+ params:
+ - I32
+ - F32
+ - F32
+ - I32
+ - I32
+ - I32
+
+
+# Prepare B for the Matrix Multiply intrinsic from transposed version of Input matrix B.
+#
+# Quantization is performed on floating values of input.
+# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
+# intrinsic (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_b_from_transposed(const float* inputMatrixBTransposed, float scale, float zeroPoint, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
+# int8_prepare_b_from_transposed(inputMatrixBTransposed: i32, scale: f32, zeroPoint: f32, rowsB: i32, colsB: i32, outputMatrixB: i32)
+- op: I8PrepareBFromTransposed
+ symbolic_address:
+ name: IntrI8PrepareBFromTransposed
+ type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareBFromTransposed
+ export: int8_prepare_b_from_transposed
+ params:
+ - I32
+ - F32
+ - F32
+ - I32
+ - I32
+ - I32
+
+
+# Prepare B for the Matrix Multiply intrinsic from a quantized and transposed version of Input
+# matrix B which is also in a CPU-independent format.
+#
+# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
+# intrinsic (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_b_from_quantized_transposed(const int8_t* inputMatrixBQuantizedTransposed, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
+# int8_prepare_b_from_quantized_transposed(inputMatrixBQuantizedTransposed: i32, rowsB: i32, colsB: i32, outputMatrixB: i32)
+- op: I8PrepareBFromQuantizedTransposed
+ symbolic_address:
+ name: IntrI8PrepareBFromQuantizedTransposed
+ type: Args_Int32_GeneralInt32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareBFromQuantizedTransposed
+ export: int8_prepare_b_from_quantized_transposed
+ params:
+ - I32
+ - I32
+ - I32
+ - I32
+
+
+# Prepare A for the Matrix Multiply intrinsic from Input matrix A.
+#
+# It performs quantization on floating values of input.
+# The final prepared A might be architecture dependent. e.g. On some architectures like x86, it
+# might be unsigned (achieved by adding 127 to quantized values) while on others like Arm, it might
+# be signed.
+# The final prepared A can be used as an input to matrix multiply intrinsic
+# (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_a(const float* inputMatrixA, float scale, float zeroPoint, uint32_t rowsA, uint32_t colsA, int8_t* outputMatrixA)
+# int8_prepare_a(inputMatrixA: i32, scale: f32, zeroPoint: f32, rowsA: i32, colsA: i32, outputMatrixA: i32)
+- op: I8PrepareA
+ symbolic_address:
+ name: IntrI8PrepareA
+ type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareA
+ export: int8_prepare_a
+ params:
+ - I32
+ - F32
+ - F32
+ - I32
+ - I32
+ - I32
+
+
+# Prepares bias for the Matrix Multiply intrinsic.
+#
+# It uses the prepared B (which must be obtained by using any of the `int8_prepare_b*` intrinsics) and
+# a bias input to prepare the final bias.
+#
+# The final bias can be used as an input to matrix multiply intrinsic (`int8_multiply_and_add_bias`).
+#
+# int8_prepare_bias(const int8_t* inputMatrixBPrepared, float scaleA, float zeroPointA, float scaleB, float zeroPointB, uint32_t rowsB, uint32_t colsB, const float* inputBias, float* output)
+# int8_prepare_bias(inputMatrixBPrepared: i32, scaleA: f32, zeroPointA: f32, scaleB: f32, zeroPointB: f32, rowsB: i32, colsB: i32, inputBias: i32, output: i32)
+- op: I8PrepareBias
+ symbolic_address:
+ name: IntrI8PrepareBias
+ type: Args_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General
+ entry: intgemm::IntrI8PrepareBias
+ export: int8_prepare_bias
+ params:
+ - I32
+ - F32
+ - F32
+ - F32
+ - F32
+ - I32
+ - I32
+ - I32
+ - I32
+
+
+# Perform multiplication of 2 matrices followed by adding a bias.
+#
+# i.e Output = inputMatrixAPrepared * inputMatrixBPrepared + inputBiasPrepared
+#
+# The inputs of this intrinsic must be obtained by using `int8_prepare_A`,
+# one of the `int8_prepare_b*` and `int8_prepare_bias` intrinsics respectively.
+#
+# int8_multiply_and_add_bias(const int8_t* inputMatrixAPrepared, float scaleA, float zeroPointA,
+# const int8_t* inputMatrixBPrepared, float scaleB, float zeroPointB,
+# const float* inputBiasPrepared, float unquantMultiplier,
+# uint32_t rowsA, uint32_t width, uint32_t colsB, float* output)
+# int8_multiply_and_add_bias(inputMatrixAPrepared: i32, scaleA: f32, zeroPointA: f32,
+# inputMatrixBPrepared: i32, scaleB: f32, zeroPointB: f32,
+# inputBiasPrepared: i32, unquantMultiplier: f32,
+# rowsA: i32, width: i32, colsB: i32, output: i32)
+- op: I8MultiplyAndAddBias
+ symbolic_address:
+ name: IntrI8MultiplyAndAddBias
+ type: Args_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General
+ entry: intgemm::IntrI8MultiplyAndAddBias
+ export: int8_multiply_and_add_bias
+ params:
+ - I32
+ - F32
+ - F32
+ - I32
+ - F32
+ - F32
+ - I32
+ - F32
+ - I32
+ - I32
+ - I32
+ - I32
+
+
+# Select a subset of columns of prepared B.
+#
+# Indices of the columns to be selected are specified by an array.
+#
+# int8_select_columns_of_b(const int8_t* inputMatrixBPrepared, uint32_t rowsB, uint32_t colsB, const uint32_t* colIndexList, const uint32_t sizeColIndexList, int8_t* output)
+# int8_select_columns_of_b(inputMatrixBPrepared: i32, rowsB: i32, colsB: i32, colIndexList: i32, sizeColIndexList: i32, output: i32)
+- op: I8SelectColumnsOfB
+ symbolic_address:
+ name: IntrI8SelectColumnsOfB
+ type: Args_Int32_GeneralInt32Int32Int32Int32Int32Int32General
+ entry: intgemm::IntrI8SelectColumnsOfB
+ export: int8_select_columns_of_b
+ params:
+ - I32
+ - I32
+ - I32
+ - I32
+ - I32
+ - I32
+
+#endif // ENABLE_WASM_MOZ_INTGEMM