1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
|
/*
* Copyright © 2019, VideoLAN and dav1d authors
* Copyright © 2020, Martin Storsjo
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* 1. Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
*
* 2. Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
* WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
* DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
* ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
* (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
* SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "src/arm/asm.S"
#include "util.S"
#define BUF_POS 0
#define BUF_END 4
#define DIF 8
#define RNG 12
#define CNT 16
#define ALLOW_UPDATE_CDF 20
const coeffs
.short 60, 56, 52, 48, 44, 40, 36, 32, 28, 24, 20, 16, 12, 8, 4, 0
.short 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
endconst
const bits, align=4
.short 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80
.short 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000
endconst
.macro vld1_align_n d0, q0, q1, src, n
.if \n == 4
vld1.16 {\d0}, [\src, :64]
.elseif \n == 8
vld1.16 {\q0}, [\src, :128]
.else
vld1.16 {\q0, \q1}, [\src, :128]
.endif
.endm
.macro vld1_n d0, q0, q1, src, n
.if \n == 4
vld1.16 {\d0}, [\src]
.elseif \n == 8
vld1.16 {\q0}, [\src]
.else
vld1.16 {\q0, \q1}, [\src]
.endif
.endm
.macro vst1_align_n d0, q0, q1, src, n
.if \n == 4
vst1.16 {\d0}, [\src, :64]
.elseif \n == 8
vst1.16 {\q0}, [\src, :128]
.else
vst1.16 {\q0, \q1}, [\src, :128]
.endif
.endm
.macro vst1_n d0, q0, q1, src, n
.if \n == 4
vst1.16 {\d0}, [\src]
.elseif \n == 8
vst1.16 {\q0}, [\src]
.else
vst1.16 {\q0, \q1}, [\src]
.endif
.endm
.macro vshr_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vshr.u16 \d0, \s0, \s3
.else
vshr.u16 \d1, \s1, \s4
.if \n == 16
vshr.u16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vadd_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vadd.i16 \d0, \s0, \s3
.else
vadd.i16 \d1, \s1, \s4
.if \n == 16
vadd.i16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vsub_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vsub.i16 \d0, \s0, \s3
.else
vsub.i16 \d1, \s1, \s4
.if \n == 16
vsub.i16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vand_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vand \d0, \s0, \s3
.else
vand \d1, \s1, \s4
.if \n == 16
vand \d2, \s2, \s5
.endif
.endif
.endm
.macro vcge_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vcge.u16 \d0, \s0, \s3
.else
vcge.u16 \d1, \s1, \s4
.if \n == 16
vcge.u16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vrhadd_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vrhadd.u16 \d0, \s0, \s3
.else
vrhadd.u16 \d1, \s1, \s4
.if \n == 16
vrhadd.u16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vshl_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vshl.s16 \d0, \s0, \s3
.else
vshl.s16 \d1, \s1, \s4
.if \n == 16
vshl.s16 \d2, \s2, \s5
.endif
.endif
.endm
.macro vqdmulh_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
.if \n == 4
vqdmulh.s16 \d0, \s0, \s3
.else
vqdmulh.s16 \d1, \s1, \s4
.if \n == 16
vqdmulh.s16 \d2, \s2, \s5
.endif
.endif
.endm
// unsigned dav1d_msac_decode_symbol_adapt4_neon(MsacContext *s, uint16_t *cdf,
// size_t n_symbols);
function msac_decode_symbol_adapt4_neon, export=1
.macro decode_update n
push {r4-r10,lr}
sub sp, sp, #48
add r8, r0, #RNG
vld1_align_n d0, q0, q1, r1, \n // cdf
vld1.16 {d16[]}, [r8, :16] // rng
movrel_local r9, coeffs, 30
vmov.i16 d30, #0x7f00 // 0x7f00
sub r9, r9, r2, lsl #1
vmvn.i16 q14, #0x3f // 0xffc0
add r8, sp, #14
vand d22, d16, d30 // rng & 0x7f00
vst1.16 {d16[0]}, [r8, :16] // store original u = s->rng
vand_n d4, q2, q3, d0, q0, q1, d28, q14, q14, \n // cdf & 0xffc0
.if \n > 4
vmov d23, d22
.endif
vld1_n d16, q8, q9, r9, \n // EC_MIN_PROB * (n_symbols - ret)
vqdmulh_n d20, q10, q11, d4, q2, q3, d22, q11, q11, \n // ((cdf >> EC_PROB_SHIFT) * (r - 128)) >> 1
add r8, r0, #DIF + 2
vadd_n d16, q8, q9, d4, q2, q3, d16, q8, q9, \n // v = cdf + EC_MIN_PROB * (n_symbols - ret)
.if \n == 4
vmov.i16 d17, #0
.endif
vadd_n d16, q8, q9, d20, q10, q11, d16, q8, q9, \n // v = ((cdf >> EC_PROB_SHIFT) * r) >> 1 + EC_MIN_PROB * (n_symbols - ret)
add r9, sp, #16
vld1.16 {d20[]}, [r8, :16] // dif >> (EC_WIN_SIZE - 16)
movrel_local r8, bits
vst1_n q8, q8, q9, r9, \n // store v values to allow indexed access
vmov d21, d20
vld1_align_n q12, q12, q13, r8, \n
.if \n == 16
vmov q11, q10
.endif
vcge_n q2, q2, q3, q10, q10, q11, q8, q8, q9, \n // c >= v
vand_n q10, q10, q11, q2, q2, q3, q12, q12, q13, \n // One bit per halfword set in the mask
.if \n == 16
vadd.i16 q10, q10, q11
.endif
vadd.i16 d20, d20, d21 // Aggregate mask bits
ldr r4, [r0, #ALLOW_UPDATE_CDF]
vpadd.i16 d20, d20, d20
lsl r10, r2, #1
vpadd.i16 d20, d20, d20
vmov.u16 r3, d20[0]
cmp r4, #0
rbit r3, r3
clz lr, r3 // ret
beq L(renorm)
// update_cdf
ldrh r3, [r1, r10] // count = cdf[n_symbols]
vmov.i8 q10, #0xff
.if \n == 16
mov r4, #-5
.else
mvn r12, r2
mov r4, #-4
cmn r12, #3 // set C if n_symbols <= 2
.endif
vrhadd_n d16, q8, q9, d20, q10, q10, d4, q2, q3, \n // i >= val ? -1 : 32768
.if \n == 16
sub r4, r4, r3, lsr #4 // -((count >> 4) + 5)
.else
lsr r12, r3, #4 // count >> 4
sbc r4, r4, r12 // -((count >> 4) + (n_symbols > 2) + 4)
.endif
vsub_n d16, q8, q9, d16, q8, q9, d0, q0, q1, \n // (32768 - cdf[i]) or (-1 - cdf[i])
.if \n == 4
vdup.16 d20, r4 // -rate
.else
vdup.16 q10, r4 // -rate
.endif
sub r3, r3, r3, lsr #5 // count - (count == 32)
vsub_n d0, q0, q1, d0, q0, q1, d4, q2, q3, \n // cdf + (i >= val ? 1 : 0)
vshl_n d16, q8, q9, d16, q8, q9, d20, q10, q10, \n // ({32768,-1} - cdf[i]) >> rate
add r3, r3, #1 // count + (count < 32)
vadd_n d0, q0, q1, d0, q0, q1, d16, q8, q9, \n // cdf + (32768 - cdf[i]) >> rate
vst1_align_n d0, q0, q1, r1, \n
strh r3, [r1, r10]
.endm
decode_update 4
L(renorm):
add r8, sp, #16
add r8, r8, lr, lsl #1
ldrh r3, [r8] // v
ldrh r4, [r8, #-2] // u
ldr r6, [r0, #CNT]
ldr r7, [r0, #DIF]
sub r4, r4, r3 // rng = u - v
clz r5, r4 // clz(rng)
eor r5, r5, #16 // d = clz(rng) ^ 16
mvn r7, r7 // ~dif
add r7, r7, r3, lsl #16 // ~dif + (v << 16)
L(renorm2):
lsl r4, r4, r5 // rng << d
subs r6, r6, r5 // cnt -= d
lsl r7, r7, r5 // (~dif + (v << 16)) << d
str r4, [r0, #RNG]
mvn r7, r7 // ~dif
bhs 9f
// refill
ldr r3, [r0, #BUF_POS] // BUF_POS
ldr r4, [r0, #BUF_END] // BUF_END
add r5, r3, #4
cmp r5, r4
bgt 2f
ldr r3, [r3] // next_bits
add r8, r6, #23 // shift_bits = cnt + 23
add r6, r6, #16 // cnt += 16
rev r3, r3 // next_bits = bswap(next_bits)
sub r5, r5, r8, lsr #3 // buf_pos -= shift_bits >> 3
and r8, r8, #24 // shift_bits &= 24
lsr r3, r3, r8 // next_bits >>= shift_bits
sub r8, r8, r6 // shift_bits -= 16 + cnt
str r5, [r0, #BUF_POS]
lsl r3, r3, r8 // next_bits <<= shift_bits
rsb r6, r8, #16 // cnt = cnt + 32 - shift_bits
eor r7, r7, r3 // dif ^= next_bits
b 9f
2: // refill_eob
rsb r5, r6, #8 // c = 8 - cnt
3:
cmp r3, r4
bge 4f
ldrb r8, [r3], #1
lsl r8, r8, r5
eor r7, r7, r8
subs r5, r5, #8
bge 3b
4: // refill_eob_end
str r3, [r0, #BUF_POS]
rsb r6, r5, #8 // cnt = 8 - c
9:
str r6, [r0, #CNT]
str r7, [r0, #DIF]
mov r0, lr
add sp, sp, #48
pop {r4-r10,pc}
endfunc
function msac_decode_symbol_adapt8_neon, export=1
decode_update 8
b L(renorm)
endfunc
function msac_decode_symbol_adapt16_neon, export=1
decode_update 16
b L(renorm)
endfunc
function msac_decode_hi_tok_neon, export=1
push {r4-r10,lr}
vld1.16 {d0}, [r1, :64] // cdf
add r4, r0, #RNG
vmov.i16 d31, #0x7f00 // 0x7f00
movrel_local r5, coeffs, 30-2*3
vmvn.i16 d30, #0x3f // 0xffc0
ldrh r9, [r1, #6] // count = cdf[n_symbols]
vld1.16 {d1[]}, [r4, :16] // rng
movrel_local r4, bits
vld1.16 {d29}, [r5] // EC_MIN_PROB * (n_symbols - ret)
add r5, r0, #DIF + 2
vld1.16 {q8}, [r4, :128]
mov r2, #-24
vand d20, d0, d30 // cdf & 0xffc0
ldr r10, [r0, #ALLOW_UPDATE_CDF]
vld1.16 {d2[]}, [r5, :16] // dif >> (EC_WIN_SIZE - 16)
sub sp, sp, #48
ldr r6, [r0, #CNT]
ldr r7, [r0, #DIF]
vmov d3, d2
1:
vand d23, d1, d31 // rng & 0x7f00
vqdmulh.s16 d18, d20, d23 // ((cdf >> EC_PROB_SHIFT) * (r - 128)) >> 1
add r12, sp, #14
vadd.i16 d6, d20, d29 // v = cdf + EC_MIN_PROB * (n_symbols - ret)
vadd.i16 d6, d18, d6 // v = ((cdf >> EC_PROB_SHIFT) * r) >> 1 + EC_MIN_PROB * (n_symbols - ret)
vmov.i16 d7, #0
vst1.16 {d1[0]}, [r12, :16] // store original u = s->rng
add r12, sp, #16
vcge.u16 q2, q1, q3 // c >= v
vst1.16 {q3}, [r12] // store v values to allow indexed access
vand q9, q2, q8 // One bit per halfword set in the mask
vadd.i16 d18, d18, d19 // Aggregate mask bits
vpadd.i16 d18, d18, d18
vpadd.i16 d18, d18, d18
vmov.u16 r3, d18[0]
cmp r10, #0
add r2, r2, #5
rbit r3, r3
add r8, sp, #16
clz lr, r3 // ret
beq 2f
// update_cdf
vmov.i8 d22, #0xff
mov r4, #-5
vrhadd.u16 d6, d22, d4 // i >= val ? -1 : 32768
sub r4, r4, r9, lsr #4 // -((count >> 4) + 5)
vsub.i16 d6, d6, d0 // (32768 - cdf[i]) or (-1 - cdf[i])
vdup.16 d18, r4 // -rate
sub r9, r9, r9, lsr #5 // count - (count == 32)
vsub.i16 d0, d0, d4 // cdf + (i >= val ? 1 : 0)
vshl.s16 d6, d6, d18 // ({32768,-1} - cdf[i]) >> rate
add r9, r9, #1 // count + (count < 32)
vadd.i16 d0, d0, d6 // cdf + (32768 - cdf[i]) >> rate
vst1.16 {d0}, [r1, :64]
vand d20, d0, d30 // cdf & 0xffc0
strh r9, [r1, #6]
2:
add r8, r8, lr, lsl #1
ldrh r3, [r8] // v
ldrh r4, [r8, #-2] // u
sub r4, r4, r3 // rng = u - v
clz r5, r4 // clz(rng)
eor r5, r5, #16 // d = clz(rng) ^ 16
mvn r7, r7 // ~dif
add r7, r7, r3, lsl #16 // ~dif + (v << 16)
lsl r4, r4, r5 // rng << d
subs r6, r6, r5 // cnt -= d
lsl r7, r7, r5 // (~dif + (v << 16)) << d
str r4, [r0, #RNG]
vdup.16 d1, r4
mvn r7, r7 // ~dif
bhs 9f
// refill
ldr r3, [r0, #BUF_POS] // BUF_POS
ldr r4, [r0, #BUF_END] // BUF_END
add r5, r3, #4
cmp r5, r4
bgt 2f
ldr r3, [r3] // next_bits
add r8, r6, #23 // shift_bits = cnt + 23
add r6, r6, #16 // cnt += 16
rev r3, r3 // next_bits = bswap(next_bits)
sub r5, r5, r8, lsr #3 // buf_pos -= shift_bits >> 3
and r8, r8, #24 // shift_bits &= 24
lsr r3, r3, r8 // next_bits >>= shift_bits
sub r8, r8, r6 // shift_bits -= 16 + cnt
str r5, [r0, #BUF_POS]
lsl r3, r3, r8 // next_bits <<= shift_bits
rsb r6, r8, #16 // cnt = cnt + 32 - shift_bits
eor r7, r7, r3 // dif ^= next_bits
b 9f
2: // refill_eob
rsb r5, r6, #8 // c = 40 - cnt
3:
cmp r3, r4
bge 4f
ldrb r8, [r3], #1
lsl r8, r8, r5
eor r7, r7, r8
subs r5, r5, #8
bge 3b
4: // refill_eob_end
str r3, [r0, #BUF_POS]
rsb r6, r5, #8 // cnt = 40 - c
9:
lsl lr, lr, #1
sub lr, lr, #5
lsr r12, r7, #16
adds r2, r2, lr // carry = tok_br < 3 || tok == 15
vdup.16 q1, r12
bcc 1b // loop if !carry
add r2, r2, #30
str r6, [r0, #CNT]
add sp, sp, #48
str r7, [r0, #DIF]
lsr r0, r2, #1
pop {r4-r10,pc}
endfunc
function msac_decode_bool_equi_neon, export=1
push {r4-r10,lr}
ldr r5, [r0, #RNG]
ldr r6, [r0, #CNT]
sub sp, sp, #48
ldr r7, [r0, #DIF]
bic r4, r5, #0xff // r &= 0xff00
add r4, r4, #8
mov r2, #0
subs r8, r7, r4, lsl #15 // dif - vw
lsr r4, r4, #1 // v
sub r5, r5, r4 // r - v
itee lo
movlo r2, #1
movhs r4, r5 // if (ret) v = r - v;
movhs r7, r8 // if (ret) dif = dif - vw;
clz r5, r4 // clz(rng)
mvn r7, r7 // ~dif
eor r5, r5, #16 // d = clz(rng) ^ 16
mov lr, r2
b L(renorm2)
endfunc
function msac_decode_bool_neon, export=1
push {r4-r10,lr}
ldr r5, [r0, #RNG]
ldr r6, [r0, #CNT]
sub sp, sp, #48
ldr r7, [r0, #DIF]
lsr r4, r5, #8 // r >> 8
bic r1, r1, #0x3f // f &= ~63
mul r4, r4, r1
mov r2, #0
lsr r4, r4, #7
add r4, r4, #4 // v
subs r8, r7, r4, lsl #16 // dif - vw
sub r5, r5, r4 // r - v
itee lo
movlo r2, #1
movhs r4, r5 // if (ret) v = r - v;
movhs r7, r8 // if (ret) dif = dif - vw;
clz r5, r4 // clz(rng)
mvn r7, r7 // ~dif
eor r5, r5, #16 // d = clz(rng) ^ 16
mov lr, r2
b L(renorm2)
endfunc
function msac_decode_bool_adapt_neon, export=1
push {r4-r10,lr}
ldr r9, [r1] // cdf[0-1]
ldr r5, [r0, #RNG]
movw lr, #0xffc0
ldr r6, [r0, #CNT]
sub sp, sp, #48
ldr r7, [r0, #DIF]
lsr r4, r5, #8 // r >> 8
and r2, r9, lr // f &= ~63
mul r4, r4, r2
mov r2, #0
lsr r4, r4, #7
add r4, r4, #4 // v
subs r8, r7, r4, lsl #16 // dif - vw
sub r5, r5, r4 // r - v
ldr r10, [r0, #ALLOW_UPDATE_CDF]
itee lo
movlo r2, #1
movhs r4, r5 // if (ret) v = r - v;
movhs r7, r8 // if (ret) dif = dif - vw;
cmp r10, #0
clz r5, r4 // clz(rng)
mvn r7, r7 // ~dif
eor r5, r5, #16 // d = clz(rng) ^ 16
mov lr, r2
beq L(renorm2)
lsr r2, r9, #16 // count = cdf[1]
uxth r9, r9 // cdf[0]
sub r3, r2, r2, lsr #5 // count - (count >= 32)
lsr r2, r2, #4 // count >> 4
add r10, r3, #1 // count + (count < 32)
add r2, r2, #4 // rate = (count >> 4) | 4
sub r9, r9, lr // cdf[0] -= bit
sub r3, r9, lr, lsl #15 // {cdf[0], cdf[0] - 32769}
asr r3, r3, r2 // {cdf[0], cdf[0] - 32769} >> rate
sub r9, r9, r3 // cdf[0]
strh r9, [r1]
strh r10, [r1, #2]
b L(renorm2)
endfunc
|