1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
|
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
; Copyright(c) 2011-2015 Intel Corporation All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
; are met:
; * Redistributions of source code must retain the above copyright
; notice, this list of conditions and the following disclaimer.
; * Redistributions in binary form must reproduce the above copyright
; notice, this list of conditions and the following disclaimer in
; the documentation and/or other materials provided with the
; distribution.
; * Neither the name of Intel Corporation nor the names of its
; contributors may be used to endorse or promote products derived
; from this software without specific prior written permission.
;
; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
; "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
; LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
; A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
; OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
; SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
; LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
; DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
; THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;;;
;;; gf_vect_dot_prod_avx2(len, vec, *g_tbls, **buffs, *dest);
;;;
%include "reg_sizes.asm"
%ifidn __OUTPUT_FORMAT__, elf64
%define arg0 rdi
%define arg1 rsi
%define arg2 rdx
%define arg3 rcx
%define arg4 r8
%define arg5 r9
%define tmp r11
%define tmp.w r11d
%define tmp.b r11b
%define tmp2 r10
%define tmp3 r9
%define return rax
%macro SLDR 2
%endmacro
%define SSTR SLDR
%define PS 8
%define func(x) x:
%define FUNC_SAVE
%define FUNC_RESTORE
%endif
%ifidn __OUTPUT_FORMAT__, win64
%define arg0 rcx
%define arg1 rdx
%define arg2 r8
%define arg3 r9
%define arg4 r12 ; must be saved and loaded
%define tmp r11
%define tmp.w r11d
%define tmp.b r11b
%define tmp2 r10
%define tmp3 rdi ; must be saved and loaded
%define return rax
%macro SLDR 2
%endmacro
%define SSTR SLDR
%define PS 8
%define frame_size 2*8
%define arg(x) [rsp + frame_size + PS + PS*x]
%define func(x) proc_frame x
%macro FUNC_SAVE 0
rex_push_reg r12
push_reg rdi
end_prolog
mov arg4, arg(4)
%endmacro
%macro FUNC_RESTORE 0
pop rdi
pop r12
%endmacro
%endif
%ifidn __OUTPUT_FORMAT__, elf32
;;;================== High Address;
;;; arg4
;;; arg3
;;; arg2
;;; arg1
;;; arg0
;;; return
;;;<================= esp of caller
;;; ebp
;;;<================= ebp = esp
;;; esi
;;; edi
;;; ebx
;;;<================= esp of callee
;;;
;;;================== Low Address;
%define PS 4
%define LOG_PS 2
%define func(x) x:
%define arg(x) [ebp + PS*2 + PS*x]
%define trans ecx ;trans is for the variables in stack
%define arg0 trans
%define arg0_m arg(0)
%define arg1 trans
%define arg1_m arg(1)
%define arg2 arg2_m
%define arg2_m arg(2)
%define arg3 ebx
%define arg4 trans
%define arg4_m arg(4)
%define tmp edx
%define tmp.w edx
%define tmp.b dl
%define tmp2 edi
%define tmp3 esi
%define return eax
%macro SLDR 2 ;stack load/restore
mov %1, %2
%endmacro
%define SSTR SLDR
%macro FUNC_SAVE 0
push ebp
mov ebp, esp
push esi
push edi
push ebx
mov arg3, arg(3)
%endmacro
%macro FUNC_RESTORE 0
pop ebx
pop edi
pop esi
mov esp, ebp
pop ebp
%endmacro
%endif ; output formats
%define len arg0
%define vec arg1
%define mul_array arg2
%define src arg3
%define dest arg4
%define vec_i tmp2
%define ptr tmp3
%define pos return
%ifidn PS,4 ;32-bit code
%define vec_m arg1_m
%define len_m arg0_m
%define dest_m arg4_m
%endif
%ifndef EC_ALIGNED_ADDR
;;; Use Un-aligned load/store
%define XLDR vmovdqu
%define XSTR vmovdqu
%else
;;; Use Non-temporal load/stor
%ifdef NO_NT_LDST
%define XLDR vmovdqa
%define XSTR vmovdqa
%else
%define XLDR vmovntdqa
%define XSTR vmovntdq
%endif
%endif
%ifidn PS,8 ;64-bit code
default rel
[bits 64]
%endif
section .text
%define xmask0f ymm3
%define xmask0fx xmm3
%define xgft_lo ymm4
%define xgft_hi ymm5
%define x0 ymm0
%define xtmpa ymm1
%define xp ymm2
align 16
global gf_vect_dot_prod_avx2:ISAL_SYM_TYPE_FUNCTION
func(gf_vect_dot_prod_avx2)
FUNC_SAVE
SLDR len, len_m
sub len, 32
SSTR len_m, len
jl .return_fail
xor pos, pos
mov tmp.b, 0x0f
vpinsrb xmask0fx, xmask0fx, tmp.w, 0
vpbroadcastb xmask0f, xmask0fx ;Construct mask 0x0f0f0f...
.loop32:
vpxor xp, xp
mov tmp, mul_array
xor vec_i, vec_i
.next_vect:
mov ptr, [src+vec_i*PS]
vmovdqu xgft_lo, [tmp] ;Load array Cx{00}, Cx{01}, Cx{02}, ...
; " Cx{00}, Cx{10}, Cx{20}, ... , Cx{f0}
vperm2i128 xgft_hi, xgft_lo, xgft_lo, 0x11 ; swapped to hi | hi
vperm2i128 xgft_lo, xgft_lo, xgft_lo, 0x00 ; swapped to lo | lo
XLDR x0, [ptr+pos] ;Get next source vector
add tmp, 32
add vec_i, 1
vpand xtmpa, x0, xmask0f ;Mask low src nibble in bits 4-0
vpsraw x0, x0, 4 ;Shift to put high nibble into bits 4-0
vpand x0, x0, xmask0f ;Mask high src nibble in bits 4-0
vpshufb xgft_hi, xgft_hi, x0 ;Lookup mul table of high nibble
vpshufb xgft_lo, xgft_lo, xtmpa ;Lookup mul table of low nibble
vpxor xgft_hi, xgft_hi, xgft_lo ;GF add high and low partials
vpxor xp, xp, xgft_hi ;xp += partial
SLDR vec, vec_m
cmp vec_i, vec
jl .next_vect
SLDR dest, dest_m
XSTR [dest+pos], xp
add pos, 32 ;Loop on 32 bytes at a time
SLDR len, len_m
cmp pos, len
jle .loop32
lea tmp, [len + 32]
cmp pos, tmp
je .return_pass
;; Tail len
mov pos, len ;Overlapped offset length-32
jmp .loop32 ;Do one more overlap pass
.return_pass:
mov return, 0
FUNC_RESTORE
ret
.return_fail:
mov return, 1
FUNC_RESTORE
ret
endproc_frame
section .data
;;; func core, ver, snum
slversion gf_vect_dot_prod_avx2, 04, 05, 0190
|