summaryrefslogtreecommitdiffstats
path: root/src/spdk/isa-l/raid/pq_gen_avx.asm
blob: 54c0ded2e752261365d34e08f24780b02e91343f (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;
;  Copyright(c) 2011-2015 Intel Corporation All rights reserved.
;
;  Redistribution and use in source and binary forms, with or without
;  modification, are permitted provided that the following conditions
;  are met:
;    * Redistributions of source code must retain the above copyright
;      notice, this list of conditions and the following disclaimer.
;    * Redistributions in binary form must reproduce the above copyright
;      notice, this list of conditions and the following disclaimer in
;      the documentation and/or other materials provided with the
;      distribution.
;    * Neither the name of Intel Corporation nor the names of its
;      contributors may be used to endorse or promote products derived
;      from this software without specific prior written permission.
;
;  THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
;  "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
;  LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
;  A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
;  OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
;  SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
;  LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
;  DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
;  THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
;  (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
;  OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;

;;; Optimized pq of N source vectors using AVX
;;; int pq_gen_avx(int vects, int len, void **array)

;;; Generates P+Q parity vector from N (vects-2) sources in array of pointers
;;; (**array).  Last two pointers are the P and Q destinations respectively.
;;; Vectors must be aligned to 16 bytes.  Length must be 16 byte aligned.

%include "reg_sizes.asm"

%ifidn __OUTPUT_FORMAT__, elf64
 %define arg0  rdi
 %define arg1  rsi
 %define arg2  rdx
 %define arg3  rcx
 %define arg4  r8
 %define arg5  r9
 %define tmp   r11
 %define tmp3  arg4
 %define return rax
 %define func(x) x:
 %define FUNC_SAVE
 %define FUNC_RESTORE
%endif

%ifidn __OUTPUT_FORMAT__, win64
 %define arg0  rcx
 %define arg1  rdx
 %define arg2  r8
 %define arg3  r9
 %define tmp   r11
 %define tmp3  r10
 %define return rax
 %define stack_size  8*16 + 8 	; must be an odd multiple of 8
 %define func(x) proc_frame x
 %macro FUNC_SAVE 0
	alloc_stack	stack_size
	save_xmm128	xmm6, 0*16
	save_xmm128	xmm7, 1*16
	save_xmm128	xmm8, 2*16
	save_xmm128	xmm9, 3*16
	save_xmm128	xmm10, 4*16
	save_xmm128	xmm11, 5*16
	save_xmm128	xmm14, 6*16
	save_xmm128	xmm15, 7*16
	end_prolog
 %endmacro

 %macro FUNC_RESTORE 0
	movdqa	xmm6, [rsp + 0*16]
	movdqa	xmm7, [rsp + 1*16]
	movdqa	xmm8, [rsp + 2*16]
	movdqa	xmm9, [rsp + 3*16]
	movdqa	xmm10, [rsp + 4*16]
	movdqa	xmm11, [rsp + 5*16]
	movdqa	xmm14, [rsp + 6*16]
	movdqa	xmm15, [rsp + 7*16]
	add	rsp, stack_size
 %endmacro
%endif

%define vec arg0
%define	len arg1
%define ptr arg3
%define pos rax

%define xp1   xmm0
%define xq1   xmm1
%define xtmp1 xmm2
%define xs1   xmm3

%define xp2   xmm4
%define xq2   xmm5
%define xtmp2 xmm6
%define xs2   xmm7

%define xp3   xmm8
%define xq3   xmm9
%define xtmp3 xmm10
%define xs3   xmm11

%define xzero xmm14
%define xpoly xmm15

;;; Use Non-temporal load/stor
%ifdef NO_NT_LDST
 %define XLDR vmovdqa
 %define XSTR vmovdqa
%else
 %define XLDR vmovntdqa
 %define XSTR vmovntdq
%endif

default rel

[bits 64]
section .text

align 16
global pq_gen_avx:ISAL_SYM_TYPE_FUNCTION
func(pq_gen_avx)
	FUNC_SAVE
	sub	vec, 3			;Keep as offset to last source
	jng	return_fail		;Must have at least 2 sources
	cmp	len, 0
	je	return_pass
	test	len, (16-1)		;Check alignment of length
	jnz	return_fail
	mov	pos, 0
	vmovdqa	xpoly, [poly]
	vpxor	xzero, xzero, xzero
	cmp	len, 48
	jl	loop16

len_aligned_32bytes:
	sub	len, 48			;Len points to last block

loop48:
	mov 	ptr, [arg2+vec*8] 	;Fetch last source pointer
	mov	tmp, vec		;Set tmp to point back to last vector
	XLDR	xs1, [ptr+pos]		;Preload last vector (source)
	XLDR	xs2, [ptr+pos+16]	;Preload last vector (source)
	XLDR	xs3, [ptr+pos+32]	;Preload last vector (source)
	vpxor	xp1, xp1, xp1		;p1 = 0
	vpxor	xp2, xp2, xp2		;p2 = 0
	vpxor	xp3, xp3, xp3		;p3 = 0
	vpxor	xq1, xq1, xq1		;q1 = 0
	vpxor	xq2, xq2, xq2		;q2 = 0
	vpxor	xq3, xq3, xq3		;q3 = 0

next_vect:
	sub	tmp, 1		  	;Inner loop for each source vector
	mov 	ptr, [arg2+tmp*8] 	; get pointer to next vect
	vpxor	xq1, xq1, xs1		; q1 ^= s1
	vpxor	xq2, xq2, xs2		; q2 ^= s2
	vpxor	xq3, xq3, xs3		; q3 ^= s3
	vpxor	xp1, xp1, xs1		; p1 ^= s1
	vpxor	xp2, xp2, xs2		; p2 ^= s2
	vpxor	xp3, xp3, xs3		; p3 ^= s2
	vpblendvb xtmp1, xzero, xpoly, xq1 ; xtmp1 = poly or 0x00
	vpblendvb xtmp2, xzero, xpoly, xq2 ; xtmp2 = poly or 0x00
	vpblendvb xtmp3, xzero, xpoly, xq3 ; xtmp3 = poly or 0x00
	XLDR	xs1, [ptr+pos]		; Get next vector (source data1)
	XLDR	xs2, [ptr+pos+16]	; Get next vector (source data2)
	XLDR	xs3, [ptr+pos+32]	; Get next vector (source data3)
	vpaddb	xq1, xq1, xq1		; q1 = q1<<1
	vpaddb	xq2, xq2, xq2		; q2 = q2<<1
	vpaddb	xq3, xq3, xq3		; q3 = q3<<1
	vpxor	xq1, xq1, xtmp1		; q1 = q1<<1 ^ poly_masked
	vpxor	xq2, xq2, xtmp2		; q2 = q2<<1 ^ poly_masked
	vpxor	xq3, xq3, xtmp3		; q3 = q3<<1 ^ poly_masked
	jg	next_vect		; Loop for each vect except 0

	mov	ptr, [arg2+8+vec*8]	;Get address of P parity vector
	mov	tmp, [arg2+(2*8)+vec*8]	;Get address of Q parity vector
	vpxor	xp1, xp1, xs1		;p1 ^= s1[0] - last source is already loaded
	vpxor	xq1, xq1, xs1		;q1 ^= 1 * s1[0]
	vpxor	xp2, xp2, xs2		;p2 ^= s2[0]
	vpxor	xq2, xq2, xs2		;q2 ^= 1 * s2[0]
	vpxor	xp3, xp3, xs3		;p3 ^= s3[0]
	vpxor	xq3, xq3, xs3		;q3 ^= 1 * s3[0]
	XSTR	[ptr+pos], xp1		;Write parity P1 vector
	XSTR	[ptr+pos+16], xp2	;Write parity P2 vector
	XSTR	[ptr+pos+32], xp3	;Write parity P3 vector
	XSTR	[tmp+pos], xq1		;Write parity Q1 vector
	XSTR	[tmp+pos+16], xq2	;Write parity Q2 vector
	XSTR	[tmp+pos+32], xq3	;Write parity Q3 vector
	add	pos, 48
	cmp	pos, len
	jle	loop48

	;; ------------------------------
	;; Do last 16 or 32 Bytes remaining
	add	len, 48
	cmp	pos, len
	je	return_pass

loop16:
	mov 	ptr, [arg2+vec*8] 	;Fetch last source pointer
	mov	tmp, vec		;Set tmp to point back to last vector
	XLDR	xs1, [ptr+pos]		;Preload last vector (source)
	vpxor	xp1, xp1, xp1		;p = 0
	vpxor	xq1, xq1, xq1		;q = 0

next_vect16:
	sub	tmp, 1		  	;Inner loop for each source vector
	mov 	ptr, [arg2+tmp*8] 	; get pointer to next vect
	vpxor	xq1, xq1, xs1		; q1 ^= s1
	vpblendvb xtmp1, xzero, xpoly, xq1 ; xtmp1 = poly or 0x00
	vpxor	xp1, xp1, xs1		; p ^= s
	vpaddb	xq1, xq1, xq1		; q = q<<1
	vpxor	xq1, xq1, xtmp1		; q = q<<1 ^ poly_masked
	XLDR	xs1, [ptr+pos]		; Get next vector (source data)
	jg	next_vect16		; Loop for each vect except 0

	mov	ptr, [arg2+8+vec*8]	;Get address of P parity vector
	mov	tmp, [arg2+(2*8)+vec*8]	;Get address of Q parity vector
	vpxor	xp1, xp1, xs1		;p ^= s[0] - last source is already loaded
	vpxor	xq1, xq1, xs1		;q ^= 1 * s[0]
	XSTR	[ptr+pos], xp1		;Write parity P vector
	XSTR	[tmp+pos], xq1		;Write parity Q vector
	add	pos, 16
	cmp	pos, len
	jl	loop16


return_pass:
	mov	return, 0
	FUNC_RESTORE
	ret

return_fail:
	mov	return, 1
	FUNC_RESTORE
	ret

endproc_frame

section .data

align 16
poly:
dq 0x1d1d1d1d1d1d1d1d, 0x1d1d1d1d1d1d1d1d

;;;       func        core, ver, snum
slversion pq_gen_avx, 02,   0a,  0039