summaryrefslogtreecommitdiffstats
path: root/src/isa-l/igzip/adler32_avx2_4.asm
blob: 798310fd02679b1be891977f3d7269a756b01cf9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;  Copyright(c) 2011-2017 Intel Corporation All rights reserved.
;
;  Redistribution and use in source and binary forms, with or without
;  modification, are permitted provided that the following conditions
;  are met:
;    * Redistributions of source code must retain the above copyright
;      notice, this list of conditions and the following disclaimer.
;    * Redistributions in binary form must reproduce the above copyright
;      notice, this list of conditions and the following disclaimer in
;      the documentation and/or other materials provided with the
;      distribution.
;    * Neither the name of Intel Corporation nor the names of its
;      contributors may be used to endorse or promote products derived
;      from this software without specific prior written permission.
;
;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

; uint32_t adler32_avx2(uint32_t init, const unsigned char *buf, uint64_t len)

%define LIMIT 5552
%define BASE  0xFFF1 ; 65521

%define CHUNKSIZE 16
%define CHUNKSIZE_M1 (CHUNKSIZE-1)

%include "reg_sizes.asm"

default rel
[bits 64]

; need to keep free: eax, ecx, edx

%ifidn __OUTPUT_FORMAT__, elf64
 %define arg1   rdi
 %define arg2   rsi
 %define arg3   rdx

 %define init_d edi
 %define data   r9
 %define size   r10
 %define s      r11
 %define a_d    r12d
 %define b_d    r8d
 %define end    r13

 %define func(x) x: endbranch
 %macro FUNC_SAVE 0
	push	r12
	push	r13
 %endmacro
 %macro FUNC_RESTORE 0
	pop	r13
	pop	r12
 %endmacro
%endif

%ifidn __OUTPUT_FORMAT__, win64
 %define arg1   rcx
 %define arg2   rdx
 %define arg3   r8

 %define init_d r12d
 %define data   r9
 %define size	r10
 %define s	r11
 %define a_d	esi
 %define b_d	edi
 %define end	r13

 %define stack_size  2*16 + 5*8		; must be an odd multiple of 8
 %define arg(x)      [rsp + stack_size + PS + PS*x]
 %define func(x) proc_frame x
 %macro FUNC_SAVE 0
	alloc_stack	stack_size
	vmovdqa	[rsp + 0*16], xmm6
	vmovdqa	[rsp + 1*16], xmm7
	save_reg	rdi,  2*16 + 0*8
	save_reg	rsi,  2*16 + 1*8
	save_reg	r12,  2*16 + 2*8
	save_reg	r13,  2*16 + 3*8
	end_prolog
	mov	init_d, ecx	; initalize init_d from arg1 to keep ecx free
 %endmacro

 %macro FUNC_RESTORE 0
	vmovdqa	xmm6, [rsp + 0*16]
	vmovdqa	xmm7, [rsp + 1*16]
	mov	rdi,  [rsp + 2*16 + 0*8]
	mov	rsi,  [rsp + 2*16 + 1*8]
	mov	r12,  [rsp + 2*16 + 2*8]
	mov	r13,  [rsp + 2*16 + 3*8]
	add	rsp, stack_size
 %endmacro
%endif

%define ya	ymm0
%define yb	ymm1
%define ydata0	ymm2
%define ydata1	ymm3
%define ysa	ymm4
%define ydata   ysa
%define ytmp0   ydata0
%define ytmp1   ydata1
%define ytmp2   ymm5
%define xa	xmm0
%define xb      xmm1
%define xtmp0   xmm2
%define xtmp1   xmm3
%define xsa     xmm4
%define xtmp2   xmm5
%define yshuf0	ymm6
%define yshuf1	ymm7

[bits 64]
default rel
section .text

mk_global adler32_avx2_4, function
func(adler32_avx2_4)
	FUNC_SAVE

	vmovdqa	yshuf0, [SHUF0]
	vmovdqa	yshuf1, [SHUF1]

	mov	data, arg2
	mov	size, arg3

	mov	b_d, init_d
	shr	b_d, 16
	and	init_d, 0xFFFF
	cmp	size, 32
	jb	.lt64
	vmovd	xa, init_d
	vpxor	yb, yb, yb
.sloop1:
	mov	s, LIMIT
	cmp	s, size
	cmova	s, size		; s = min(size, LIMIT)
	lea	end, [data + s - CHUNKSIZE_M1]
	cmp	data, end
	jae	.skip_loop_1a
align 32
.sloop1a:
	; do CHUNKSIZE adds
	vbroadcastf128	ydata, [data]
	add	data, CHUNKSIZE
	vpshufb	ydata0, ydata, yshuf0
	vpaddd	ya, ya, ydata0
	vpaddd	yb, yb, ya
	vpshufb	ydata1, ydata, yshuf1
	vpaddd	ya, ya, ydata1
	vpaddd	yb, yb, ya
	cmp	data, end
	jb	.sloop1a

.skip_loop_1a:
	add	end, CHUNKSIZE_M1

	test	s, CHUNKSIZE_M1
	jnz	.do_final

	; either we're done, or we just did LIMIT
	sub	size, s

	; reduce
	vpslld	yb, 3   ; b is scaled by 8
	vpmulld	ysa, ya, [A_SCALE] ; scaled a

	; compute horizontal sums of ya, yb, ysa
	vextracti128 xtmp0, ya, 1
	vextracti128 xtmp1, yb, 1
	vextracti128 xtmp2, ysa, 1
	vpaddd	xa, xa, xtmp0
	vpaddd	xb, xb, xtmp1
	vpaddd	xsa, xsa, xtmp2
	vphaddd	xa, xa, xa
	vphaddd	xb, xb, xb
	vphaddd	xsa, xsa, xsa
	vphaddd	xa, xa, xa
	vphaddd	xb, xb, xb
	vphaddd	xsa, xsa, xsa

	vmovd	eax, xa
	xor	edx, edx
	mov	ecx, BASE
	div	ecx		; divide edx:eax by ecx, quot->eax, rem->edx
	mov	a_d, edx

	vpsubd	xb, xb, xsa
	vmovd	eax, xb
	add	eax, b_d
	xor	edx, edx
	mov	ecx, BASE
	div	ecx		; divide edx:eax by ecx, quot->eax, rem->edx
	mov	b_d, edx

	test	size, size
	jz	.finish

	; continue loop
	vmovd	xa, a_d
	vpxor	yb, yb
	jmp	.sloop1

.finish:
	mov	eax, b_d
	shl	eax, 16
	or	eax, a_d
	jmp	.end

.lt64:
	mov	a_d, init_d
	lea	end, [data + size]
	test	size, size
	jnz	.final_loop
	jmp	.zero_size

	; handle remaining 1...15 bytes
.do_final:
	; reduce
	vpslld	yb, 3   ; b is scaled by 8
	vpmulld	ysa, ya, [A_SCALE] ; scaled a

	vextracti128 xtmp0, ya, 1
	vextracti128 xtmp1, yb, 1
	vextracti128 xtmp2, ysa, 1
	vpaddd	xa, xa, xtmp0
	vpaddd	xb, xb, xtmp1
	vpaddd	xsa, xsa, xtmp2
	vphaddd	xa, xa, xa
	vphaddd	xb, xb, xb
	vphaddd	xsa, xsa, xsa
	vphaddd	xa, xa, xa
	vphaddd	xb, xb, xb
	vphaddd	xsa, xsa, xsa
	vpsubd	xb, xb, xsa

	vmovd	a_d, xa
	vmovd	eax, xb
	add	b_d, eax

align 32
.final_loop:
	movzx	eax, byte[data]
	add	a_d, eax
	inc	data
	add	b_d, a_d
	cmp	data, end
	jb	.final_loop

.zero_size:
	mov	eax, a_d
	xor	edx, edx
	mov	ecx, BASE
	div	ecx		; divide edx:eax by ecx, quot->eax, rem->edx
	mov	a_d, edx

	mov	eax, b_d
	xor	edx, edx
	mov	ecx, BASE
	div	ecx		; divide edx:eax by ecx, quot->eax, rem->edx
	shl	edx, 16
	or	edx, a_d
	mov	eax, edx

.end:
	FUNC_RESTORE
	ret

endproc_frame

section .data
align 32
A_SCALE:
	dq	0x0000000100000000, 0x0000000300000002
	dq	0x0000000500000004, 0x0000000700000006
SHUF0:
	dq	0xFFFFFF01FFFFFF00, 0xFFFFFF03FFFFFF02
	dq	0xFFFFFF05FFFFFF04, 0xFFFFFF07FFFFFF06
SHUF1:
	dq	0xFFFFFF09FFFFFF08, 0xFFFFFF0BFFFFFF0A
	dq	0xFFFFFF0DFFFFFF0C, 0xFFFFFF0FFFFFFF0E