1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
|
# The file contains information need to define wasm intrinsic operations.
# i8vecmul(dest: i32, src1: i32, src2: i32, len: i32)
# Performs pairwise multiplication of two i8 vectors of 'len' specified at
# 'src1' and 'src2'. Output is written to 'dest'. This is used as a
# basic self-test for intrinsics.
- op: I8VecMul
symbolic_address:
name: IntrI8VecMul
type: Args_Int32_GeneralInt32Int32Int32Int32General
entry: Instance::intrI8VecMul
export: i8vecmul
params:
- I32
- I32
- I32
- I32
#if defined(ENABLE_WASM_MOZ_INTGEMM)
# Intrinsics for integer matrix multiplication followed by addition of bias.
# Please refer to @TOPSRCDIR/js/src/intgemm/IntegerGemmIntrinsic.h for more details on these intrinsics.
# Prepare B for the Matrix Multiply intrinsic from Input matrix B.
#
# Quantization is performed on the input.
# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
# intrinsic (`int8_multiply_and_add_bias`).
#
# int8_prepare_b(const float* inputMatrixB, float scale, float zeroPoint, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
# int8_prepare_b(inputMatrixB: i32, scale: f32, zeroPoint: f32, rowsB: i32, colsB: i32, outputMatrixB: i32)
- op: I8PrepareB
symbolic_address:
name: IntrI8PrepareB
type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
entry: intgemm::IntrI8PrepareB
export: int8_prepare_b
params:
- I32
- F32
- F32
- I32
- I32
- I32
# Prepare B for the Matrix Multiply intrinsic from transposed version of Input matrix B.
#
# Quantization is performed on floating values of input.
# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
# intrinsic (`int8_multiply_and_add_bias`).
#
# int8_prepare_b_from_transposed(const float* inputMatrixBTransposed, float scale, float zeroPoint, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
# int8_prepare_b_from_transposed(inputMatrixBTransposed: i32, scale: f32, zeroPoint: f32, rowsB: i32, colsB: i32, outputMatrixB: i32)
- op: I8PrepareBFromTransposed
symbolic_address:
name: IntrI8PrepareBFromTransposed
type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
entry: intgemm::IntrI8PrepareBFromTransposed
export: int8_prepare_b_from_transposed
params:
- I32
- F32
- F32
- I32
- I32
- I32
# Prepare B for the Matrix Multiply intrinsic from a quantized and transposed version of Input
# matrix B which is also in a CPU-independent format.
#
# The final prepared B is in CPU-dependent format and can be used as an input to matrix multiply
# intrinsic (`int8_multiply_and_add_bias`).
#
# int8_prepare_b_from_quantized_transposed(const int8_t* inputMatrixBQuantizedTransposed, uint32_t rowsB, uint32_t colsB, int8_t* outputMatrixB)
# int8_prepare_b_from_quantized_transposed(inputMatrixBQuantizedTransposed: i32, rowsB: i32, colsB: i32, outputMatrixB: i32)
- op: I8PrepareBFromQuantizedTransposed
symbolic_address:
name: IntrI8PrepareBFromQuantizedTransposed
type: Args_Int32_GeneralInt32Int32Int32Int32General
entry: intgemm::IntrI8PrepareBFromQuantizedTransposed
export: int8_prepare_b_from_quantized_transposed
params:
- I32
- I32
- I32
- I32
# Prepare A for the Matrix Multiply intrinsic from Input matrix A.
#
# It performs quantization on floating values of input.
# The final prepared A might be architecture dependent. e.g. On some architectures like x86, it
# might be unsigned (achieved by adding 127 to quantized values) while on others like Arm, it might
# be signed.
# The final prepared A can be used as an input to matrix multiply intrinsic
# (`int8_multiply_and_add_bias`).
#
# int8_prepare_a(const float* inputMatrixA, float scale, float zeroPoint, uint32_t rowsA, uint32_t colsA, int8_t* outputMatrixA)
# int8_prepare_a(inputMatrixA: i32, scale: f32, zeroPoint: f32, rowsA: i32, colsA: i32, outputMatrixA: i32)
- op: I8PrepareA
symbolic_address:
name: IntrI8PrepareA
type: Args_Int32_GeneralInt32Float32Float32Int32Int32Int32General
entry: intgemm::IntrI8PrepareA
export: int8_prepare_a
params:
- I32
- F32
- F32
- I32
- I32
- I32
# Prepares bias for the Matrix Multiply intrinsic.
#
# It uses the prepared B (which must be obtained by using any of the `int8_prepare_b*` intrinsics) and
# a bias input to prepare the final bias.
#
# The final bias can be used as an input to matrix multiply intrinsic (`int8_multiply_and_add_bias`).
#
# int8_prepare_bias(const int8_t* inputMatrixBPrepared, float scaleA, float zeroPointA, float scaleB, float zeroPointB, uint32_t rowsB, uint32_t colsB, const float* inputBias, float* output)
# int8_prepare_bias(inputMatrixBPrepared: i32, scaleA: f32, zeroPointA: f32, scaleB: f32, zeroPointB: f32, rowsB: i32, colsB: i32, inputBias: i32, output: i32)
- op: I8PrepareBias
symbolic_address:
name: IntrI8PrepareBias
type: Args_Int32_GeneralInt32Float32Float32Float32Float32Int32Int32Int32Int32General
entry: intgemm::IntrI8PrepareBias
export: int8_prepare_bias
params:
- I32
- F32
- F32
- F32
- F32
- I32
- I32
- I32
- I32
# Perform multiplication of 2 matrices followed by adding a bias.
#
# i.e Output = inputMatrixAPrepared * inputMatrixBPrepared + inputBiasPrepared
#
# The inputs of this intrinsic must be obtained by using `int8_prepare_A`,
# one of the `int8_prepare_b*` and `int8_prepare_bias` intrinsics respectively.
#
# int8_multiply_and_add_bias(const int8_t* inputMatrixAPrepared, float scaleA, float zeroPointA,
# const int8_t* inputMatrixBPrepared, float scaleB, float zeroPointB,
# const float* inputBiasPrepared, float unquantMultiplier,
# uint32_t rowsA, uint32_t width, uint32_t colsB, float* output)
# int8_multiply_and_add_bias(inputMatrixAPrepared: i32, scaleA: f32, zeroPointA: f32,
# inputMatrixBPrepared: i32, scaleB: f32, zeroPointB: f32,
# inputBiasPrepared: i32, unquantMultiplier: f32,
# rowsA: i32, width: i32, colsB: i32, output: i32)
- op: I8MultiplyAndAddBias
symbolic_address:
name: IntrI8MultiplyAndAddBias
type: Args_Int32_GeneralInt32Float32Float32Int32Float32Float32Int32Float32Int32Int32Int32Int32General
entry: intgemm::IntrI8MultiplyAndAddBias
export: int8_multiply_and_add_bias
params:
- I32
- F32
- F32
- I32
- F32
- F32
- I32
- F32
- I32
- I32
- I32
- I32
# Select a subset of columns of prepared B.
#
# Indices of the columns to be selected are specified by an array.
#
# int8_select_columns_of_b(const int8_t* inputMatrixBPrepared, uint32_t rowsB, uint32_t colsB, const uint32_t* colIndexList, const uint32_t sizeColIndexList, int8_t* output)
# int8_select_columns_of_b(inputMatrixBPrepared: i32, rowsB: i32, colsB: i32, colIndexList: i32, sizeColIndexList: i32, output: i32)
- op: I8SelectColumnsOfB
symbolic_address:
name: IntrI8SelectColumnsOfB
type: Args_Int32_GeneralInt32Int32Int32Int32Int32Int32General
entry: intgemm::IntrI8SelectColumnsOfB
export: int8_select_columns_of_b
params:
- I32
- I32
- I32
- I32
- I32
- I32
#endif // ENABLE_WASM_MOZ_INTGEMM
|