summaryrefslogtreecommitdiffstats
path: root/third_party/dav1d/src/arm/32
diff options
context:
space:
mode:
Diffstat (limited to 'third_party/dav1d/src/arm/32')
-rw-r--r--third_party/dav1d/src/arm/32/cdef.S540
-rw-r--r--third_party/dav1d/src/arm/32/cdef16.S233
-rw-r--r--third_party/dav1d/src/arm/32/cdef_tmpl.S515
-rw-r--r--third_party/dav1d/src/arm/32/filmgrain.S2039
-rw-r--r--third_party/dav1d/src/arm/32/filmgrain16.S2137
-rw-r--r--third_party/dav1d/src/arm/32/ipred.S2937
-rw-r--r--third_party/dav1d/src/arm/32/ipred16.S3254
-rw-r--r--third_party/dav1d/src/arm/32/itx.S3343
-rw-r--r--third_party/dav1d/src/arm/32/itx16.S3625
-rw-r--r--third_party/dav1d/src/arm/32/loopfilter.S868
-rw-r--r--third_party/dav1d/src/arm/32/loopfilter16.S859
-rw-r--r--third_party/dav1d/src/arm/32/looprestoration.S791
-rw-r--r--third_party/dav1d/src/arm/32/looprestoration16.S801
-rw-r--r--third_party/dav1d/src/arm/32/looprestoration_common.S453
-rw-r--r--third_party/dav1d/src/arm/32/looprestoration_tmpl.S600
-rw-r--r--third_party/dav1d/src/arm/32/mc.S3340
-rw-r--r--third_party/dav1d/src/arm/32/mc16.S3658
-rw-r--r--third_party/dav1d/src/arm/32/msac.S575
-rw-r--r--third_party/dav1d/src/arm/32/refmvs.S97
-rw-r--r--third_party/dav1d/src/arm/32/util.S184
20 files changed, 30849 insertions, 0 deletions
diff --git a/third_party/dav1d/src/arm/32/cdef.S b/third_party/dav1d/src/arm/32/cdef.S
new file mode 100644
index 0000000000..4a0df6eac8
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/cdef.S
@@ -0,0 +1,540 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2019, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+#include "cdef_tmpl.S"
+
+// n1 = s0/d0
+// w1 = d0/q0
+// n2 = s4/d2
+// w2 = d2/q1
+.macro pad_top_bottom s1, s2, w, stride, n1, w1, n2, w2, align, ret
+ tst r7, #1 // CDEF_HAVE_LEFT
+ beq 2f
+ // CDEF_HAVE_LEFT
+ tst r7, #2 // CDEF_HAVE_RIGHT
+ beq 1f
+ // CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
+ ldrh r12, [\s1, #-2]
+ vldr \n1, [\s1]
+ vdup.16 d4, r12
+ ldrh r12, [\s1, #\w]
+ vmov.16 d4[1], r12
+ ldrh r12, [\s2, #-2]
+ vldr \n2, [\s2]
+ vmov.16 d4[2], r12
+ ldrh r12, [\s2, #\w]
+ vmovl.u8 q0, d0
+ vmov.16 d4[3], r12
+ vmovl.u8 q1, d2
+ vmovl.u8 q2, d4
+ vstr s8, [r0, #-4]
+ vst1.16 {\w1}, [r0, :\align]
+ vstr s9, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ vstr s10, [r0, #-4]
+ vst1.16 {\w2}, [r0, :\align]
+ vstr s11, [r0, #2*\w]
+.if \ret
+ pop {r4-r8,pc}
+.else
+ add r0, r0, #2*\stride
+ b 3f
+.endif
+
+1:
+ // CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
+ ldrh r12, [\s1, #-2]
+ vldr \n1, [\s1]
+ vdup.16 d4, r12
+ ldrh r12, [\s2, #-2]
+ vldr \n2, [\s2]
+ vmovl.u8 q0, d0
+ vmov.16 d4[1], r12
+ vmovl.u8 q1, d2
+ vmovl.u8 q2, d4
+ vstr s8, [r0, #-4]
+ vst1.16 {\w1}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ vstr s9, [r0, #-4]
+ vst1.16 {\w2}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+.if \ret
+ pop {r4-r8,pc}
+.else
+ add r0, r0, #2*\stride
+ b 3f
+.endif
+
+2:
+ // !CDEF_HAVE_LEFT
+ tst r7, #2 // CDEF_HAVE_RIGHT
+ beq 1f
+ // !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
+ vldr \n1, [\s1]
+ ldrh r12, [\s1, #\w]
+ vldr \n2, [\s2]
+ vdup.16 d4, r12
+ ldrh r12, [\s2, #\w]
+ vmovl.u8 q0, d0
+ vmov.16 d4[1], r12
+ vmovl.u8 q1, d2
+ vmovl.u8 q2, d4
+ vstr s12, [r0, #-4]
+ vst1.16 {\w1}, [r0, :\align]
+ vstr s8, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ vstr s12, [r0, #-4]
+ vst1.16 {\w2}, [r0, :\align]
+ vstr s9, [r0, #2*\w]
+.if \ret
+ pop {r4-r8,pc}
+.else
+ add r0, r0, #2*\stride
+ b 3f
+.endif
+
+1:
+ // !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
+ vldr \n1, [\s1]
+ vldr \n2, [\s2]
+ vmovl.u8 q0, d0
+ vmovl.u8 q1, d2
+ vstr s12, [r0, #-4]
+ vst1.16 {\w1}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ vstr s12, [r0, #-4]
+ vst1.16 {\w2}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+.if \ret
+ pop {r4-r8,pc}
+.else
+ add r0, r0, #2*\stride
+.endif
+3:
+.endm
+
+.macro load_n_incr dst, src, incr, w
+.if \w == 4
+ vld1.32 {\dst\()[0]}, [\src, :32], \incr
+.else
+ vld1.8 {\dst\()}, [\src, :64], \incr
+.endif
+.endm
+
+// void dav1d_cdef_paddingX_8bpc_neon(uint16_t *tmp, const pixel *src,
+// ptrdiff_t src_stride, const pixel (*left)[2],
+// const pixel *const top,
+// const pixel *const bottom, int h,
+// enum CdefEdgeFlags edges);
+
+// n1 = s0/d0
+// w1 = d0/q0
+// n2 = s4/d2
+// w2 = d2/q1
+.macro padding_func w, stride, n1, w1, n2, w2, align
+function cdef_padding\w\()_8bpc_neon, export=1
+ push {r4-r8,lr}
+ ldrd r4, r5, [sp, #24]
+ ldrd r6, r7, [sp, #32]
+ cmp r7, #0xf // fully edged
+ beq cdef_padding\w\()_edged_8bpc_neon
+ vmov.i16 q3, #0x8000
+ tst r7, #4 // CDEF_HAVE_TOP
+ bne 1f
+ // !CDEF_HAVE_TOP
+ sub r12, r0, #2*(2*\stride+2)
+ vmov.i16 q2, #0x8000
+ vst1.16 {q2,q3}, [r12]!
+.if \w == 8
+ vst1.16 {q2,q3}, [r12]!
+.endif
+ b 3f
+1:
+ // CDEF_HAVE_TOP
+ add r8, r4, r2
+ sub r0, r0, #2*(2*\stride)
+ pad_top_bottom r4, r8, \w, \stride, \n1, \w1, \n2, \w2, \align, 0
+
+ // Middle section
+3:
+ tst r7, #1 // CDEF_HAVE_LEFT
+ beq 2f
+ // CDEF_HAVE_LEFT
+ tst r7, #2 // CDEF_HAVE_RIGHT
+ beq 1f
+ // CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
+0:
+ vld1.16 {d2[]}, [r3, :16]!
+ ldrh r12, [r1, #\w]
+ load_n_incr d0, r1, r2, \w
+ subs r6, r6, #1
+ vmov.16 d2[1], r12
+ vmovl.u8 q0, d0
+ vmovl.u8 q1, d2
+ vstr s4, [r0, #-4]
+ vst1.16 {\w1}, [r0, :\align]
+ vstr s5, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ bgt 0b
+ b 3f
+1:
+ // CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
+ vld1.16 {d2[]}, [r3, :16]!
+ load_n_incr d0, r1, r2, \w
+ subs r6, r6, #1
+ vmovl.u8 q0, d0
+ vmovl.u8 q1, d2
+ vstr s4, [r0, #-4]
+ vst1.16 {\w1}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ bgt 1b
+ b 3f
+2:
+ tst r7, #2 // CDEF_HAVE_RIGHT
+ beq 1f
+ // !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
+0:
+ ldrh r12, [r1, #\w]
+ load_n_incr d0, r1, r2, \w
+ vdup.16 d2, r12
+ subs r6, r6, #1
+ vmovl.u8 q0, d0
+ vmovl.u8 q1, d2
+ vstr s12, [r0, #-4]
+ vst1.16 {\w1}, [r0, :\align]
+ vstr s4, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ bgt 0b
+ b 3f
+1:
+ // !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
+ load_n_incr d0, r1, r2, \w
+ subs r6, r6, #1
+ vmovl.u8 q0, d0
+ vstr s12, [r0, #-4]
+ vst1.16 {\w1}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ bgt 1b
+
+3:
+ tst r7, #8 // CDEF_HAVE_BOTTOM
+ bne 1f
+ // !CDEF_HAVE_BOTTOM
+ sub r12, r0, #4
+ vmov.i16 q2, #0x8000
+ vst1.16 {q2,q3}, [r12]!
+.if \w == 8
+ vst1.16 {q2,q3}, [r12]!
+.endif
+ pop {r4-r8,pc}
+1:
+ // CDEF_HAVE_BOTTOM
+ add r8, r5, r2
+ pad_top_bottom r5, r8, \w, \stride, \n1, \w1, \n2, \w2, \align, 1
+endfunc
+.endm
+
+padding_func 8, 16, d0, q0, d2, q1, 128
+padding_func 4, 8, s0, d0, s4, d2, 64
+
+// void cdef_paddingX_edged_8bpc_neon(uint16_t *tmp, const pixel *src,
+// ptrdiff_t src_stride, const pixel (*left)[2],
+// const pixel *const top,
+// const pixel *const bottom, int h,
+// enum CdefEdgeFlags edges);
+
+.macro padding_func_edged w, stride, reg, align
+function cdef_padding\w\()_edged_8bpc_neon
+ sub r0, r0, #(2*\stride)
+
+ ldrh r12, [r4, #-2]
+ vldr \reg, [r4]
+ add r8, r4, r2
+ strh r12, [r0, #-2]
+ ldrh r12, [r4, #\w]
+ vstr \reg, [r0]
+ strh r12, [r0, #\w]
+
+ ldrh r12, [r8, #-2]
+ vldr \reg, [r8]
+ strh r12, [r0, #\stride-2]
+ ldrh r12, [r8, #\w]
+ vstr \reg, [r0, #\stride]
+ strh r12, [r0, #\stride+\w]
+ add r0, r0, #2*\stride
+
+0:
+ ldrh r12, [r3], #2
+ vldr \reg, [r1]
+ str r12, [r0, #-2]
+ ldrh r12, [r1, #\w]
+ add r1, r1, r2
+ subs r6, r6, #1
+ vstr \reg, [r0]
+ str r12, [r0, #\w]
+ add r0, r0, #\stride
+ bgt 0b
+
+ ldrh r12, [r5, #-2]
+ vldr \reg, [r5]
+ add r8, r5, r2
+ strh r12, [r0, #-2]
+ ldrh r12, [r5, #\w]
+ vstr \reg, [r0]
+ strh r12, [r0, #\w]
+
+ ldrh r12, [r8, #-2]
+ vldr \reg, [r8]
+ strh r12, [r0, #\stride-2]
+ ldrh r12, [r8, #\w]
+ vstr \reg, [r0, #\stride]
+ strh r12, [r0, #\stride+\w]
+
+ pop {r4-r8,pc}
+endfunc
+.endm
+
+padding_func_edged 8, 16, d0, 64
+padding_func_edged 4, 8, s0, 32
+
+tables
+
+filter 8, 8
+filter 4, 8
+
+find_dir 8
+
+.macro load_px_8 d11, d12, d21, d22, w
+.if \w == 8
+ add r6, r2, r9 // x + off
+ sub r9, r2, r9 // x - off
+ vld1.8 {\d11}, [r6] // p0
+ add r6, r6, #16 // += stride
+ vld1.8 {\d21}, [r9] // p1
+ add r9, r9, #16 // += stride
+ vld1.8 {\d12}, [r6] // p0
+ vld1.8 {\d22}, [r9] // p1
+.else
+ add r6, r2, r9 // x + off
+ sub r9, r2, r9 // x - off
+ vld1.32 {\d11[0]}, [r6] // p0
+ add r6, r6, #8 // += stride
+ vld1.32 {\d21[0]}, [r9] // p1
+ add r9, r9, #8 // += stride
+ vld1.32 {\d11[1]}, [r6] // p0
+ add r6, r6, #8 // += stride
+ vld1.32 {\d21[1]}, [r9] // p1
+ add r9, r9, #8 // += stride
+ vld1.32 {\d12[0]}, [r6] // p0
+ add r6, r6, #8 // += stride
+ vld1.32 {\d22[0]}, [r9] // p1
+ add r9, r9, #8 // += stride
+ vld1.32 {\d12[1]}, [r6] // p0
+ vld1.32 {\d22[1]}, [r9] // p1
+.endif
+.endm
+.macro handle_pixel_8 s1, s2, thresh_vec, shift, tap, min
+.if \min
+ vmin.u8 q3, q3, \s1
+ vmax.u8 q4, q4, \s1
+ vmin.u8 q3, q3, \s2
+ vmax.u8 q4, q4, \s2
+.endif
+ vabd.u8 q8, q0, \s1 // abs(diff)
+ vabd.u8 q11, q0, \s2 // abs(diff)
+ vshl.u8 q9, q8, \shift // abs(diff) >> shift
+ vshl.u8 q12, q11, \shift // abs(diff) >> shift
+ vqsub.u8 q9, \thresh_vec, q9 // clip = imax(0, threshold - (abs(diff) >> shift))
+ vqsub.u8 q12, \thresh_vec, q12// clip = imax(0, threshold - (abs(diff) >> shift))
+ vcgt.u8 q10, q0, \s1 // px > p0
+ vcgt.u8 q13, q0, \s2 // px > p1
+ vmin.u8 q9, q9, q8 // imin(abs(diff), clip)
+ vmin.u8 q12, q12, q11 // imin(abs(diff), clip)
+ vneg.s8 q8, q9 // -imin()
+ vneg.s8 q11, q12 // -imin()
+ vbsl q10, q8, q9 // constrain() = imax(imin(diff, clip), -clip)
+ vdup.8 d18, \tap // taps[k]
+ vbsl q13, q11, q12 // constrain() = imax(imin(diff, clip), -clip)
+ vmlal.s8 q1, d20, d18 // sum += taps[k] * constrain()
+ vmlal.s8 q1, d26, d18 // sum += taps[k] * constrain()
+ vmlal.s8 q2, d21, d18 // sum += taps[k] * constrain()
+ vmlal.s8 q2, d27, d18 // sum += taps[k] * constrain()
+.endm
+
+// void cdef_filterX_edged_neon(pixel *dst, ptrdiff_t dst_stride,
+// const uint16_t *tmp, int pri_strength,
+// int sec_strength, int dir, int damping,
+// int h, size_t edges);
+.macro filter_func_8 w, pri, sec, min, suffix
+function cdef_filter\w\suffix\()_edged_neon
+.if \pri
+ movrel_local r8, pri_taps
+ and r9, r3, #1
+ add r8, r8, r9, lsl #1
+.endif
+ movrel_local r9, directions\w
+ add r5, r9, r5, lsl #1
+ vmov.u8 d17, #7
+ vdup.8 d16, r6 // damping
+
+ vmov.8 d8[0], r3
+ vmov.8 d8[1], r4
+ vclz.i8 d8, d8 // clz(threshold)
+ vsub.i8 d8, d17, d8 // ulog2(threshold)
+ vqsub.u8 d8, d16, d8 // shift = imax(0, damping - ulog2(threshold))
+ vneg.s8 d8, d8 // -shift
+.if \sec
+ vdup.8 q6, d8[1]
+.endif
+.if \pri
+ vdup.8 q5, d8[0]
+.endif
+
+1:
+.if \w == 8
+ add r12, r2, #16
+ vld1.8 {d0}, [r2, :64] // px
+ vld1.8 {d1}, [r12, :64] // px
+.else
+ add r12, r2, #8
+ vld1.32 {d0[0]}, [r2, :32] // px
+ add r9, r2, #2*8
+ vld1.32 {d0[1]}, [r12, :32] // px
+ add r12, r12, #2*8
+ vld1.32 {d1[0]}, [r9, :32] // px
+ vld1.32 {d1[1]}, [r12, :32] // px
+.endif
+
+ vmov.u8 q1, #0 // sum
+ vmov.u8 q2, #0 // sum
+.if \min
+ vmov.u16 q3, q0 // min
+ vmov.u16 q4, q0 // max
+.endif
+
+ // Instead of loading sec_taps 2, 1 from memory, just set it
+ // to 2 initially and decrease for the second round.
+ // This is also used as loop counter.
+ mov lr, #2 // sec_taps[0]
+
+2:
+.if \pri
+ ldrsb r9, [r5] // off1
+
+ load_px_8 d28, d29, d30, d31, \w
+.endif
+
+.if \sec
+ add r5, r5, #4 // +2*2
+ ldrsb r9, [r5] // off2
+.endif
+
+.if \pri
+ ldrb r12, [r8] // *pri_taps
+ vdup.8 q7, r3 // threshold
+
+ handle_pixel_8 q14, q15, q7, q5, r12, \min
+.endif
+
+.if \sec
+ load_px_8 d28, d29, d30, d31, \w
+
+ add r5, r5, #8 // +2*4
+ ldrsb r9, [r5] // off3
+
+ vdup.8 q7, r4 // threshold
+
+ handle_pixel_8 q14, q15, q7, q6, lr, \min
+
+ load_px_8 d28, d29, d30, d31, \w
+
+ handle_pixel_8 q14, q15, q7, q6, lr, \min
+
+ sub r5, r5, #11 // r5 -= 2*(2+4); r5 += 1;
+.else
+ add r5, r5, #1 // r5 += 1
+.endif
+ subs lr, lr, #1 // sec_tap-- (value)
+.if \pri
+ add r8, r8, #1 // pri_taps++ (pointer)
+.endif
+ bne 2b
+
+ vshr.s16 q14, q1, #15 // -(sum < 0)
+ vshr.s16 q15, q2, #15 // -(sum < 0)
+ vadd.i16 q1, q1, q14 // sum - (sum < 0)
+ vadd.i16 q2, q2, q15 // sum - (sum < 0)
+ vrshr.s16 q1, q1, #4 // (8 + sum - (sum < 0)) >> 4
+ vrshr.s16 q2, q2, #4 // (8 + sum - (sum < 0)) >> 4
+ vaddw.u8 q1, q1, d0 // px + (8 + sum ...) >> 4
+ vaddw.u8 q2, q2, d1 // px + (8 + sum ...) >> 4
+ vqmovun.s16 d0, q1
+ vqmovun.s16 d1, q2
+.if \min
+ vmin.u8 q0, q0, q4
+ vmax.u8 q0, q0, q3 // iclip(px + .., min, max)
+.endif
+.if \w == 8
+ vst1.8 {d0}, [r0, :64], r1
+ add r2, r2, #2*16 // tmp += 2*tmp_stride
+ subs r7, r7, #2 // h -= 2
+ vst1.8 {d1}, [r0, :64], r1
+.else
+ vst1.32 {d0[0]}, [r0, :32], r1
+ add r2, r2, #4*8 // tmp += 4*tmp_stride
+ vst1.32 {d0[1]}, [r0, :32], r1
+ subs r7, r7, #4 // h -= 4
+ vst1.32 {d1[0]}, [r0, :32], r1
+ vst1.32 {d1[1]}, [r0, :32], r1
+.endif
+
+ // Reset pri_taps and directions back to the original point
+ sub r5, r5, #2
+.if \pri
+ sub r8, r8, #2
+.endif
+
+ bgt 1b
+ vpop {q4-q7}
+ pop {r4-r9,pc}
+endfunc
+.endm
+
+.macro filter_8 w
+filter_func_8 \w, pri=1, sec=0, min=0, suffix=_pri
+filter_func_8 \w, pri=0, sec=1, min=0, suffix=_sec
+filter_func_8 \w, pri=1, sec=1, min=1, suffix=_pri_sec
+.endm
+
+filter_8 8
+filter_8 4
diff --git a/third_party/dav1d/src/arm/32/cdef16.S b/third_party/dav1d/src/arm/32/cdef16.S
new file mode 100644
index 0000000000..d14525d720
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/cdef16.S
@@ -0,0 +1,233 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+#include "cdef_tmpl.S"
+
+// r1 = d0/q0
+// r2 = d2/q1
+.macro pad_top_bot_16 s1, s2, w, stride, r1, r2, align, ret
+ tst r7, #1 // CDEF_HAVE_LEFT
+ beq 2f
+ // CDEF_HAVE_LEFT
+ tst r7, #2 // CDEF_HAVE_RIGHT
+ beq 1f
+ // CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
+ vldr s8, [\s1, #-4]
+ vld1.16 {\r1}, [\s1, :\align]
+ vldr s9, [\s1, #2*\w]
+ vldr s10, [\s2, #-4]
+ vld1.16 {\r2}, [\s2, :\align]
+ vldr s11, [\s2, #2*\w]
+ vstr s8, [r0, #-4]
+ vst1.16 {\r1}, [r0, :\align]
+ vstr s9, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ vstr s10, [r0, #-4]
+ vst1.16 {\r2}, [r0, :\align]
+ vstr s11, [r0, #2*\w]
+.if \ret
+ pop {r4-r8,pc}
+.else
+ add r0, r0, #2*\stride
+ b 3f
+.endif
+
+1:
+ // CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
+ vldr s8, [\s1, #-4]
+ vld1.16 {\r1}, [\s1, :\align]
+ vldr s9, [\s2, #-4]
+ vld1.16 {\r2}, [\s2, :\align]
+ vstr s8, [r0, #-4]
+ vst1.16 {\r1}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ vstr s9, [r0, #-4]
+ vst1.16 {\r2}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+.if \ret
+ pop {r4-r8,pc}
+.else
+ add r0, r0, #2*\stride
+ b 3f
+.endif
+
+2:
+ // !CDEF_HAVE_LEFT
+ tst r7, #2 // CDEF_HAVE_RIGHT
+ beq 1f
+ // !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
+ vld1.16 {\r1}, [\s1, :\align]
+ vldr s8, [\s1, #2*\w]
+ vld1.16 {\r2}, [\s2, :\align]
+ vldr s9, [\s2, #2*\w]
+ vstr s12, [r0, #-4]
+ vst1.16 {\r1}, [r0, :\align]
+ vstr s8, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ vstr s12, [r0, #-4]
+ vst1.16 {\r2}, [r0, :\align]
+ vstr s9, [r0, #2*\w]
+.if \ret
+ pop {r4-r8,pc}
+.else
+ add r0, r0, #2*\stride
+ b 3f
+.endif
+
+1:
+ // !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
+ vld1.16 {\r1}, [\s1, :\align]
+ vld1.16 {\r2}, [\s2, :\align]
+ vstr s12, [r0, #-4]
+ vst1.16 {\r1}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ vstr s12, [r0, #-4]
+ vst1.16 {\r2}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+.if \ret
+ pop {r4-r8,pc}
+.else
+ add r0, r0, #2*\stride
+.endif
+3:
+.endm
+
+// void dav1d_cdef_paddingX_16bpc_neon(uint16_t *tmp, const pixel *src,
+// ptrdiff_t src_stride, const pixel (*left)[2],
+// const pixel *const top,
+// const pixel *const bottom, int h,
+// enum CdefEdgeFlags edges);
+
+// r1 = d0/q0
+// r2 = d2/q1
+.macro padding_func_16 w, stride, r1, r2, align
+function cdef_padding\w\()_16bpc_neon, export=1
+ push {r4-r8,lr}
+ ldrd r4, r5, [sp, #24]
+ ldrd r6, r7, [sp, #32]
+ vmov.i16 q3, #0x8000
+ tst r7, #4 // CDEF_HAVE_TOP
+ bne 1f
+ // !CDEF_HAVE_TOP
+ sub r12, r0, #2*(2*\stride+2)
+ vmov.i16 q2, #0x8000
+ vst1.16 {q2,q3}, [r12]!
+.if \w == 8
+ vst1.16 {q2,q3}, [r12]!
+.endif
+ b 3f
+1:
+ // CDEF_HAVE_TOP
+ add r8, r4, r2
+ sub r0, r0, #2*(2*\stride)
+ pad_top_bot_16 r4, r8, \w, \stride, \r1, \r2, \align, 0
+
+ // Middle section
+3:
+ tst r7, #1 // CDEF_HAVE_LEFT
+ beq 2f
+ // CDEF_HAVE_LEFT
+ tst r7, #2 // CDEF_HAVE_RIGHT
+ beq 1f
+ // CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
+0:
+ vld1.32 {d2[]}, [r3, :32]!
+ vldr s5, [r1, #2*\w]
+ vld1.16 {\r1}, [r1, :\align], r2
+ subs r6, r6, #1
+ vstr s4, [r0, #-4]
+ vst1.16 {\r1}, [r0, :\align]
+ vstr s5, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ bgt 0b
+ b 3f
+1:
+ // CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
+ vld1.32 {d2[]}, [r3, :32]!
+ vld1.16 {\r1}, [r1, :\align], r2
+ subs r6, r6, #1
+ vstr s4, [r0, #-4]
+ vst1.16 {\r1}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ bgt 1b
+ b 3f
+2:
+ tst r7, #2 // CDEF_HAVE_RIGHT
+ beq 1f
+ // !CDEF_HAVE_LEFT+CDEF_HAVE_RIGHT
+0:
+ vldr s4, [r1, #2*\w]
+ vld1.16 {\r1}, [r1, :\align], r2
+ subs r6, r6, #1
+ vstr s12, [r0, #-4]
+ vst1.16 {\r1}, [r0, :\align]
+ vstr s4, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ bgt 0b
+ b 3f
+1:
+ // !CDEF_HAVE_LEFT+!CDEF_HAVE_RIGHT
+ vld1.16 {\r1}, [r1, :\align], r2
+ subs r6, r6, #1
+ vstr s12, [r0, #-4]
+ vst1.16 {\r1}, [r0, :\align]
+ vstr s12, [r0, #2*\w]
+ add r0, r0, #2*\stride
+ bgt 1b
+
+3:
+ tst r7, #8 // CDEF_HAVE_BOTTOM
+ bne 1f
+ // !CDEF_HAVE_BOTTOM
+ sub r12, r0, #4
+ vmov.i16 q2, #0x8000
+ vst1.16 {q2,q3}, [r12]!
+.if \w == 8
+ vst1.16 {q2,q3}, [r12]!
+.endif
+ pop {r4-r8,pc}
+1:
+ // CDEF_HAVE_BOTTOM
+ add r8, r5, r2
+ pad_top_bot_16 r5, r8, \w, \stride, \r1, \r2, \align, 1
+endfunc
+.endm
+
+padding_func_16 8, 16, q0, q1, 128
+padding_func_16 4, 8, d0, d2, 64
+
+tables
+
+filter 8, 16
+filter 4, 16
+
+find_dir 16
diff --git a/third_party/dav1d/src/arm/32/cdef_tmpl.S b/third_party/dav1d/src/arm/32/cdef_tmpl.S
new file mode 100644
index 0000000000..33ff9e5816
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/cdef_tmpl.S
@@ -0,0 +1,515 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+.macro dir_table w, stride
+const directions\w
+ .byte -1 * \stride + 1, -2 * \stride + 2
+ .byte 0 * \stride + 1, -1 * \stride + 2
+ .byte 0 * \stride + 1, 0 * \stride + 2
+ .byte 0 * \stride + 1, 1 * \stride + 2
+ .byte 1 * \stride + 1, 2 * \stride + 2
+ .byte 1 * \stride + 0, 2 * \stride + 1
+ .byte 1 * \stride + 0, 2 * \stride + 0
+ .byte 1 * \stride + 0, 2 * \stride - 1
+// Repeated, to avoid & 7
+ .byte -1 * \stride + 1, -2 * \stride + 2
+ .byte 0 * \stride + 1, -1 * \stride + 2
+ .byte 0 * \stride + 1, 0 * \stride + 2
+ .byte 0 * \stride + 1, 1 * \stride + 2
+ .byte 1 * \stride + 1, 2 * \stride + 2
+ .byte 1 * \stride + 0, 2 * \stride + 1
+endconst
+.endm
+
+.macro tables
+dir_table 8, 16
+dir_table 4, 8
+
+const pri_taps
+ .byte 4, 2, 3, 3
+endconst
+.endm
+
+.macro load_px d11, d12, d21, d22, w
+.if \w == 8
+ add r6, r2, r9, lsl #1 // x + off
+ sub r9, r2, r9, lsl #1 // x - off
+ vld1.16 {\d11,\d12}, [r6] // p0
+ vld1.16 {\d21,\d22}, [r9] // p1
+.else
+ add r6, r2, r9, lsl #1 // x + off
+ sub r9, r2, r9, lsl #1 // x - off
+ vld1.16 {\d11}, [r6] // p0
+ add r6, r6, #2*8 // += stride
+ vld1.16 {\d21}, [r9] // p1
+ add r9, r9, #2*8 // += stride
+ vld1.16 {\d12}, [r6] // p0
+ vld1.16 {\d22}, [r9] // p1
+.endif
+.endm
+.macro handle_pixel s1, s2, thresh_vec, shift, tap, min
+.if \min
+ vmin.u16 q2, q2, \s1
+ vmax.s16 q3, q3, \s1
+ vmin.u16 q2, q2, \s2
+ vmax.s16 q3, q3, \s2
+.endif
+ vabd.u16 q8, q0, \s1 // abs(diff)
+ vabd.u16 q11, q0, \s2 // abs(diff)
+ vshl.u16 q9, q8, \shift // abs(diff) >> shift
+ vshl.u16 q12, q11, \shift // abs(diff) >> shift
+ vqsub.u16 q9, \thresh_vec, q9 // clip = imax(0, threshold - (abs(diff) >> shift))
+ vqsub.u16 q12, \thresh_vec, q12// clip = imax(0, threshold - (abs(diff) >> shift))
+ vsub.i16 q10, \s1, q0 // diff = p0 - px
+ vsub.i16 q13, \s2, q0 // diff = p1 - px
+ vneg.s16 q8, q9 // -clip
+ vneg.s16 q11, q12 // -clip
+ vmin.s16 q10, q10, q9 // imin(diff, clip)
+ vmin.s16 q13, q13, q12 // imin(diff, clip)
+ vdup.16 q9, \tap // taps[k]
+ vmax.s16 q10, q10, q8 // constrain() = imax(imin(diff, clip), -clip)
+ vmax.s16 q13, q13, q11 // constrain() = imax(imin(diff, clip), -clip)
+ vmla.i16 q1, q10, q9 // sum += taps[k] * constrain()
+ vmla.i16 q1, q13, q9 // sum += taps[k] * constrain()
+.endm
+
+// void dav1d_cdef_filterX_Ybpc_neon(pixel *dst, ptrdiff_t dst_stride,
+// const uint16_t *tmp, int pri_strength,
+// int sec_strength, int dir, int damping,
+// int h, size_t edges);
+.macro filter_func w, bpc, pri, sec, min, suffix
+function cdef_filter\w\suffix\()_\bpc\()bpc_neon
+.if \bpc == 8
+ cmp r8, #0xf
+ beq cdef_filter\w\suffix\()_edged_neon
+.endif
+.if \pri
+.if \bpc == 16
+ clz r9, r9
+ sub r9, r9, #24 // -bitdepth_min_8
+ neg r9, r9 // bitdepth_min_8
+.endif
+ movrel_local r8, pri_taps
+.if \bpc == 16
+ lsr r9, r3, r9 // pri_strength >> bitdepth_min_8
+ and r9, r9, #1 // (pri_strength >> bitdepth_min_8) & 1
+.else
+ and r9, r3, #1
+.endif
+ add r8, r8, r9, lsl #1
+.endif
+ movrel_local r9, directions\w
+ add r5, r9, r5, lsl #1
+ vmov.u16 d17, #15
+ vdup.16 d16, r6 // damping
+
+.if \pri
+ vdup.16 q5, r3 // threshold
+.endif
+.if \sec
+ vdup.16 q7, r4 // threshold
+.endif
+ vmov.16 d8[0], r3
+ vmov.16 d8[1], r4
+ vclz.i16 d8, d8 // clz(threshold)
+ vsub.i16 d8, d17, d8 // ulog2(threshold)
+ vqsub.u16 d8, d16, d8 // shift = imax(0, damping - ulog2(threshold))
+ vneg.s16 d8, d8 // -shift
+.if \sec
+ vdup.16 q6, d8[1]
+.endif
+.if \pri
+ vdup.16 q4, d8[0]
+.endif
+
+1:
+.if \w == 8
+ vld1.16 {q0}, [r2, :128] // px
+.else
+ add r12, r2, #2*8
+ vld1.16 {d0}, [r2, :64] // px
+ vld1.16 {d1}, [r12, :64] // px
+.endif
+
+ vmov.u16 q1, #0 // sum
+.if \min
+ vmov.u16 q2, q0 // min
+ vmov.u16 q3, q0 // max
+.endif
+
+ // Instead of loading sec_taps 2, 1 from memory, just set it
+ // to 2 initially and decrease for the second round.
+ // This is also used as loop counter.
+ mov lr, #2 // sec_taps[0]
+
+2:
+.if \pri
+ ldrsb r9, [r5] // off1
+
+ load_px d28, d29, d30, d31, \w
+.endif
+
+.if \sec
+ add r5, r5, #4 // +2*2
+ ldrsb r9, [r5] // off2
+.endif
+
+.if \pri
+ ldrb r12, [r8] // *pri_taps
+
+ handle_pixel q14, q15, q5, q4, r12, \min
+.endif
+
+.if \sec
+ load_px d28, d29, d30, d31, \w
+
+ add r5, r5, #8 // +2*4
+ ldrsb r9, [r5] // off3
+
+ handle_pixel q14, q15, q7, q6, lr, \min
+
+ load_px d28, d29, d30, d31, \w
+
+ handle_pixel q14, q15, q7, q6, lr, \min
+
+ sub r5, r5, #11 // r5 -= 2*(2+4); r5 += 1;
+.else
+ add r5, r5, #1 // r5 += 1
+.endif
+ subs lr, lr, #1 // sec_tap-- (value)
+.if \pri
+ add r8, r8, #1 // pri_taps++ (pointer)
+.endif
+ bne 2b
+
+ vshr.s16 q14, q1, #15 // -(sum < 0)
+ vadd.i16 q1, q1, q14 // sum - (sum < 0)
+ vrshr.s16 q1, q1, #4 // (8 + sum - (sum < 0)) >> 4
+ vadd.i16 q0, q0, q1 // px + (8 + sum ...) >> 4
+.if \min
+ vmin.s16 q0, q0, q3
+ vmax.s16 q0, q0, q2 // iclip(px + .., min, max)
+.endif
+.if \bpc == 8
+ vmovn.u16 d0, q0
+.endif
+.if \w == 8
+ add r2, r2, #2*16 // tmp += tmp_stride
+ subs r7, r7, #1 // h--
+.if \bpc == 8
+ vst1.8 {d0}, [r0, :64], r1
+.else
+ vst1.16 {q0}, [r0, :128], r1
+.endif
+.else
+.if \bpc == 8
+ vst1.32 {d0[0]}, [r0, :32], r1
+.else
+ vst1.16 {d0}, [r0, :64], r1
+.endif
+ add r2, r2, #2*16 // tmp += 2*tmp_stride
+ subs r7, r7, #2 // h -= 2
+.if \bpc == 8
+ vst1.32 {d0[1]}, [r0, :32], r1
+.else
+ vst1.16 {d1}, [r0, :64], r1
+.endif
+.endif
+
+ // Reset pri_taps and directions back to the original point
+ sub r5, r5, #2
+.if \pri
+ sub r8, r8, #2
+.endif
+
+ bgt 1b
+ vpop {q4-q7}
+ pop {r4-r9,pc}
+endfunc
+.endm
+
+.macro filter w, bpc
+filter_func \w, \bpc, pri=1, sec=0, min=0, suffix=_pri
+filter_func \w, \bpc, pri=0, sec=1, min=0, suffix=_sec
+filter_func \w, \bpc, pri=1, sec=1, min=1, suffix=_pri_sec
+
+function cdef_filter\w\()_\bpc\()bpc_neon, export=1
+ push {r4-r9,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #92]
+ ldrd r6, r7, [sp, #100]
+.if \bpc == 16
+ ldrd r8, r9, [sp, #108]
+.else
+ ldr r8, [sp, #108]
+.endif
+ cmp r3, #0 // pri_strength
+ bne 1f
+ b cdef_filter\w\()_sec_\bpc\()bpc_neon // only sec
+1:
+ cmp r4, #0 // sec_strength
+ bne 1f
+ b cdef_filter\w\()_pri_\bpc\()bpc_neon // only pri
+1:
+ b cdef_filter\w\()_pri_sec_\bpc\()bpc_neon // both pri and sec
+endfunc
+.endm
+
+const div_table, align=4
+ .short 840, 420, 280, 210, 168, 140, 120, 105
+endconst
+
+const alt_fact, align=4
+ .short 420, 210, 140, 105, 105, 105, 105, 105, 140, 210, 420, 0
+endconst
+
+.macro cost_alt dest, s1, s2, s3, s4, s5, s6
+ vmull.s16 q1, \s1, \s1 // sum_alt[n]*sum_alt[n]
+ vmull.s16 q2, \s2, \s2
+ vmull.s16 q3, \s3, \s3
+ vmull.s16 q5, \s4, \s4 // sum_alt[n]*sum_alt[n]
+ vmull.s16 q12, \s5, \s5
+ vmull.s16 q6, \s6, \s6 // q6 overlaps the first \s1-\s2 here
+ vmul.i32 q1, q1, q13 // sum_alt[n]^2*fact
+ vmla.i32 q1, q2, q14
+ vmla.i32 q1, q3, q15
+ vmul.i32 q5, q5, q13 // sum_alt[n]^2*fact
+ vmla.i32 q5, q12, q14
+ vmla.i32 q5, q6, q15
+ vadd.i32 d2, d2, d3
+ vadd.i32 d3, d10, d11
+ vpadd.i32 \dest, d2, d3 // *cost_ptr
+.endm
+
+.macro find_best s1, s2, s3
+.ifnb \s2
+ vmov.32 lr, \s2
+.endif
+ cmp r12, r1 // cost[n] > best_cost
+ itt gt
+ movgt r0, r3 // best_dir = n
+ movgt r1, r12 // best_cost = cost[n]
+.ifnb \s2
+ add r3, r3, #1 // n++
+ cmp lr, r1 // cost[n] > best_cost
+ vmov.32 r12, \s3
+ itt gt
+ movgt r0, r3 // best_dir = n
+ movgt r1, lr // best_cost = cost[n]
+ add r3, r3, #1 // n++
+.endif
+.endm
+
+// int dav1d_cdef_find_dir_Xbpc_neon(const pixel *img, const ptrdiff_t stride,
+// unsigned *const var)
+.macro find_dir bpc
+function cdef_find_dir_\bpc\()bpc_neon, export=1
+ push {lr}
+ vpush {q4-q7}
+.if \bpc == 16
+ clz r3, r3 // clz(bitdepth_max)
+ sub lr, r3, #24 // -bitdepth_min_8
+.endif
+ sub sp, sp, #32 // cost
+ mov r3, #8
+ vmov.u16 q1, #0 // q0-q1 sum_diag[0]
+ vmov.u16 q3, #0 // q2-q3 sum_diag[1]
+ vmov.u16 q5, #0 // q4-q5 sum_hv[0-1]
+ vmov.u16 q8, #0 // q6,d16 sum_alt[0]
+ // q7,d17 sum_alt[1]
+ vmov.u16 q9, #0 // q9,d22 sum_alt[2]
+ vmov.u16 q11, #0
+ vmov.u16 q10, #0 // q10,d23 sum_alt[3]
+
+
+.irpc i, 01234567
+.if \bpc == 8
+ vld1.8 {d30}, [r0, :64], r1
+ vmov.u8 d31, #128
+ vsubl.u8 q15, d30, d31 // img[x] - 128
+.else
+ vld1.16 {q15}, [r0, :128], r1
+ vdup.16 q14, lr // -bitdepth_min_8
+ vshl.u16 q15, q15, q14
+ vmov.u16 q14, #128
+ vsub.i16 q15, q15, q14 // img[x] - 128
+.endif
+ vmov.u16 q14, #0
+
+.if \i == 0
+ vmov q0, q15 // sum_diag[0]
+.else
+ vext.8 q12, q14, q15, #(16-2*\i)
+ vext.8 q13, q15, q14, #(16-2*\i)
+ vadd.i16 q0, q0, q12 // sum_diag[0]
+ vadd.i16 q1, q1, q13 // sum_diag[0]
+.endif
+ vrev64.16 q13, q15
+ vswp d26, d27 // [-x]
+.if \i == 0
+ vmov q2, q13 // sum_diag[1]
+.else
+ vext.8 q12, q14, q13, #(16-2*\i)
+ vext.8 q13, q13, q14, #(16-2*\i)
+ vadd.i16 q2, q2, q12 // sum_diag[1]
+ vadd.i16 q3, q3, q13 // sum_diag[1]
+.endif
+
+ vpadd.u16 d26, d30, d31 // [(x >> 1)]
+ vmov.u16 d27, #0
+ vpadd.u16 d24, d26, d28
+ vpadd.u16 d24, d24, d28 // [y]
+ vmov.u16 r12, d24[0]
+ vadd.i16 q5, q5, q15 // sum_hv[1]
+.if \i < 4
+ vmov.16 d8[\i], r12 // sum_hv[0]
+.else
+ vmov.16 d9[\i-4], r12 // sum_hv[0]
+.endif
+
+.if \i == 0
+ vmov.u16 q6, q13 // sum_alt[0]
+.else
+ vext.8 q12, q14, q13, #(16-2*\i)
+ vext.8 q14, q13, q14, #(16-2*\i)
+ vadd.i16 q6, q6, q12 // sum_alt[0]
+ vadd.i16 d16, d16, d28 // sum_alt[0]
+.endif
+ vrev64.16 d26, d26 // [-(x >> 1)]
+ vmov.u16 q14, #0
+.if \i == 0
+ vmov q7, q13 // sum_alt[1]
+.else
+ vext.8 q12, q14, q13, #(16-2*\i)
+ vext.8 q13, q13, q14, #(16-2*\i)
+ vadd.i16 q7, q7, q12 // sum_alt[1]
+ vadd.i16 d17, d17, d26 // sum_alt[1]
+.endif
+
+.if \i < 6
+ vext.8 q12, q14, q15, #(16-2*(3-(\i/2)))
+ vext.8 q13, q15, q14, #(16-2*(3-(\i/2)))
+ vadd.i16 q9, q9, q12 // sum_alt[2]
+ vadd.i16 d22, d22, d26 // sum_alt[2]
+.else
+ vadd.i16 q9, q9, q15 // sum_alt[2]
+.endif
+.if \i == 0
+ vmov q10, q15 // sum_alt[3]
+.elseif \i == 1
+ vadd.i16 q10, q10, q15 // sum_alt[3]
+.else
+ vext.8 q12, q14, q15, #(16-2*(\i/2))
+ vext.8 q13, q15, q14, #(16-2*(\i/2))
+ vadd.i16 q10, q10, q12 // sum_alt[3]
+ vadd.i16 d23, d23, d26 // sum_alt[3]
+.endif
+.endr
+
+ vmov.u32 q15, #105
+
+ vmull.s16 q12, d8, d8 // sum_hv[0]*sum_hv[0]
+ vmlal.s16 q12, d9, d9
+ vmull.s16 q13, d10, d10 // sum_hv[1]*sum_hv[1]
+ vmlal.s16 q13, d11, d11
+ vadd.s32 d8, d24, d25
+ vadd.s32 d9, d26, d27
+ vpadd.s32 d8, d8, d9 // cost[2,6] (s16, s17)
+ vmul.i32 d8, d8, d30 // cost[2,6] *= 105
+
+ vrev64.16 q1, q1
+ vrev64.16 q3, q3
+ vext.8 q1, q1, q1, #10 // sum_diag[0][14-n]
+ vext.8 q3, q3, q3, #10 // sum_diag[1][14-n]
+
+ vstr s16, [sp, #2*4] // cost[2]
+ vstr s17, [sp, #6*4] // cost[6]
+
+ movrel_local r12, div_table
+ vld1.16 {q14}, [r12, :128]
+
+ vmull.s16 q5, d0, d0 // sum_diag[0]*sum_diag[0]
+ vmull.s16 q12, d1, d1
+ vmlal.s16 q5, d2, d2
+ vmlal.s16 q12, d3, d3
+ vmull.s16 q0, d4, d4 // sum_diag[1]*sum_diag[1]
+ vmull.s16 q1, d5, d5
+ vmlal.s16 q0, d6, d6
+ vmlal.s16 q1, d7, d7
+ vmovl.u16 q13, d28 // div_table
+ vmovl.u16 q14, d29
+ vmul.i32 q5, q5, q13 // cost[0]
+ vmla.i32 q5, q12, q14
+ vmul.i32 q0, q0, q13 // cost[4]
+ vmla.i32 q0, q1, q14
+ vadd.i32 d10, d10, d11
+ vadd.i32 d0, d0, d1
+ vpadd.i32 d0, d10, d0 // cost[0,4] = s0,s1
+
+ movrel_local r12, alt_fact
+ vld1.16 {d29, d30, d31}, [r12, :64] // div_table[2*m+1] + 105
+
+ vstr s0, [sp, #0*4] // cost[0]
+ vstr s1, [sp, #4*4] // cost[4]
+
+ vmovl.u16 q13, d29 // div_table[2*m+1] + 105
+ vmovl.u16 q14, d30
+ vmovl.u16 q15, d31
+
+ cost_alt d14, d12, d13, d16, d14, d15, d17 // cost[1], cost[3]
+ cost_alt d15, d18, d19, d22, d20, d21, d23 // cost[5], cost[7]
+ vstr s28, [sp, #1*4] // cost[1]
+ vstr s29, [sp, #3*4] // cost[3]
+
+ mov r0, #0 // best_dir
+ vmov.32 r1, d0[0] // best_cost
+ mov r3, #1 // n
+
+ vstr s30, [sp, #5*4] // cost[5]
+ vstr s31, [sp, #7*4] // cost[7]
+
+ vmov.32 r12, d14[0]
+
+ find_best d14[0], d8[0], d14[1]
+ find_best d14[1], d0[1], d15[0]
+ find_best d15[0], d8[1], d15[1]
+ find_best d15[1]
+
+ eor r3, r0, #4 // best_dir ^4
+ ldr r12, [sp, r3, lsl #2]
+ sub r1, r1, r12 // best_cost - cost[best_dir ^ 4]
+ lsr r1, r1, #10
+ str r1, [r2] // *var
+
+ add sp, sp, #32
+ vpop {q4-q7}
+ pop {pc}
+endfunc
+.endm
diff --git a/third_party/dav1d/src/arm/32/filmgrain.S b/third_party/dav1d/src/arm/32/filmgrain.S
new file mode 100644
index 0000000000..d1f83efb98
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/filmgrain.S
@@ -0,0 +1,2039 @@
+/*
+ * Copyright © 2021, VideoLAN and dav1d authors
+ * Copyright © 2021, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+#include "src/arm/asm-offsets.h"
+
+#define GRAIN_WIDTH 82
+#define GRAIN_HEIGHT 73
+
+#define SUB_GRAIN_WIDTH 44
+#define SUB_GRAIN_HEIGHT 38
+
+.macro increment_seed steps, shift=1
+ lsr r11, r2, #3
+ lsr r12, r2, #12
+ lsr lr, r2, #1
+ eor r11, r2, r11 // (r >> 0) ^ (r >> 3)
+ eor r12, r12, lr // (r >> 12) ^ (r >> 1)
+ eor r11, r11, r12 // (r >> 0) ^ (r >> 3) ^ (r >> 12) ^ (r >> 1)
+.if \shift
+ lsr r2, r2, #\steps
+.endif
+ and r11, r11, #((1 << \steps) - 1) // bit
+.if \shift
+ orr r2, r2, r11, lsl #(16 - \steps) // *state
+.else
+ orr r2, r2, r11, lsl #16 // *state
+.endif
+.endm
+
+.macro read_rand dest, bits, age
+ ubfx \dest, r2, #16 - \bits - \age, #\bits
+.endm
+
+.macro read_shift_rand dest, bits
+ ubfx \dest, r2, #17 - \bits, #\bits
+ lsr r2, r2, #1
+.endm
+
+// special calling convention:
+// r2 holds seed
+// r3 holds dav1d_gaussian_sequence
+// clobbers r11-r12
+// returns in d0-d1
+function get_gaussian_neon
+ push {r5-r6,lr}
+ increment_seed 4
+ read_rand r5, 11, 3
+ read_rand r6, 11, 2
+ add r5, r3, r5, lsl #1
+ add r6, r3, r6, lsl #1
+ vld1.16 {d0[0]}, [r5]
+ read_rand r5, 11, 1
+ vld1.16 {d0[1]}, [r6]
+ add r5, r3, r5, lsl #1
+ read_rand r6, 11, 0
+ increment_seed 4
+ add r6, r3, r6, lsl #1
+ vld1.16 {d0[2]}, [r5]
+ read_rand r5, 11, 3
+ vld1.16 {d0[3]}, [r6]
+ add r5, r3, r5, lsl #1
+ read_rand r6, 11, 2
+ vld1.16 {d1[0]}, [r5]
+ add r6, r3, r6, lsl #1
+ read_rand r5, 11, 1
+ vld1.16 {d1[1]}, [r6]
+ read_rand r6, 11, 0
+ add r5, r3, r5, lsl #1
+ add r6, r3, r6, lsl #1
+ vld1.16 {d1[2]}, [r5]
+ vld1.16 {d1[3]}, [r6]
+ pop {r5-r6,pc}
+endfunc
+
+.macro get_grain_row r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r0, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r1, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r2, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r3, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r4, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r5, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r6, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r7, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r8, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r9, q0
+ increment_seed 2
+ read_rand r11, 11, 1
+ read_rand r12, 11, 0
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d0[0]}, [r11]
+ vld1.16 {d0[1]}, [r12]
+ vrshl.s16 d0, d0, d30
+ vmovn.i16 \r10, q0
+.endm
+
+.macro store_grain_row r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10
+ vst1.16 {\r0, \r1, \r2, \r3}, [r0]!
+ vst1.16 {\r4, \r5, \r6, \r7}, [r0]!
+ vst1.16 {\r8, \r9}, [r0]!
+ vst1.16 {\r10[0]}, [r0]!
+.endm
+
+.macro get_grain_row_44 r0, r1, r2, r3, r4, r5
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r0, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r1, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r2, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r3, q0
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ vmovn.i16 \r4, q0
+ increment_seed 4
+ read_rand r11, 11, 3
+ read_rand r12, 11, 2
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d0[]}, [r11]
+ read_rand r11, 11, 1
+ vld1.16 {d0[1]}, [r12]
+ add r11, r3, r11, lsl #1
+ read_rand r12, 11, 0
+ vld1.16 {d0[2]}, [r11]
+ add r12, r3, r12, lsl #1
+ vld1.16 {d0[3]}, [r12]
+ vrshl.s16 d0, d0, d30
+ vmovn.i16 \r5, q0
+.endm
+
+.macro store_grain_row_44 r0, r1, r2, r3, r4, r5
+ vst1.16 {\r0, \r1, \r2, \r3}, [r0]!
+ vst1.16 {\r4, \r5}, [r0]
+ add r0, r0, #GRAIN_WIDTH-32
+.endm
+
+function get_grain_2_neon
+ push {r11,lr}
+ increment_seed 2
+ read_rand r11, 11, 1
+ read_rand r12, 11, 0
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d0[0]}, [r11]
+ vld1.16 {d0[1]}, [r12]
+ vrshl.s16 d0, d0, d30
+ vmovn.i16 d0, q0
+ pop {r11,pc}
+endfunc
+
+.macro get_grain_2 dst
+ bl get_grain_2_neon
+.ifnc \dst, d0
+ vmov \dst, d0
+.endif
+.endm
+
+// r1 holds the number of entries to produce
+// r6, r8 and r10 hold the previous output entries
+// q0 holds the vector of produced entries
+// q1 holds the input vector of sums from above
+.macro output_lag n
+function output_lag\n\()_neon
+ push {r0, lr}
+.if \n == 1
+ mov lr, #-128
+.else
+ mov r0, #1
+ mov lr, #1
+ sub r7, r7, #1
+ sub r9, r9, #1
+ lsl r0, r0, r7
+ lsl lr, lr, r9
+ add r7, r7, #1
+ add r9, r9, #1
+.endif
+1:
+ read_shift_rand r12, 11
+ vmov.32 r11, d2[0]
+ lsl r12, r12, #1
+ vext.8 q0, q0, q0, #1
+ ldrsh r12, [r3, r12]
+.if \n == 1
+ mla r11, r6, r4, r11 // sum (above) + *coeff * prev output
+ add r6, r11, r8 // 1 << (ar_coeff_shift - 1)
+ add r12, r12, r10
+ asr r6, r6, r7 // >> ar_coeff_shift
+ asr r12, r12, r9 // >> (4 + grain_scale_shift)
+ add r6, r6, r12
+ cmp r6, r5
+.elseif \n == 2
+ mla r11, r8, r4, r11 // sum (above) + *coeff * prev output 1
+ mla r11, r6, r10, r11 // += *coeff * prev output 2
+ mov r8, r6
+ add r6, r11, r0 // 1 << (ar_coeff_shift - 1)
+ add r12, r12, lr // 1 << (4 + grain_scale_shift - 1)
+ asr r6, r6, r7 // >> ar_coeff_shift
+ asr r12, r12, r9 // >> (4 + grain_scale_shift)
+ add r6, r6, r12
+ push {lr}
+ cmp r6, r5
+ mov lr, #-128
+.else
+ push {r1-r3}
+ sbfx r1, r4, #0, #8
+ sbfx r2, r4, #8, #8
+ sbfx r3, r4, #16, #8
+ mla r11, r10, r1, r11 // sum (above) + *coeff * prev output 1
+ mla r11, r8, r2, r11 // sum (above) + *coeff * prev output 2
+ mla r11, r6, r3, r11 // += *coeff * prev output 3
+ pop {r1-r3}
+ mov r10, r8
+ mov r8, r6
+
+ add r6, r11, r0 // 1 << (ar_coeff_shift - 1)
+ add r12, r12, lr // 1 << (4 + grain_scale_shift - 1)
+ asr r6, r6, r7 // >> ar_coeff_shift
+ asr r12, r12, r9 // >> (4 + grain_scale_shift)
+ add r6, r6, r12
+ push {lr}
+ cmp r6, r5
+ mov lr, #-128
+.endif
+ it gt
+ movgt r6, r5
+ cmp r6, lr
+ it lt
+ movlt r6, lr
+.if \n >= 2
+ pop {lr}
+.endif
+ subs r1, r1, #1
+ vext.8 q1, q1, q1, #4
+ vmov.8 d1[7], r6
+ bgt 1b
+ pop {r0, pc}
+endfunc
+.endm
+
+output_lag 1
+output_lag 2
+output_lag 3
+
+
+function sum_lag1_above_neon
+ vmull.s8 q2, d6, d28
+ vmull.s8 q3, d7, d28
+ vmull.s8 q4, d0, d27
+ vmull.s8 q5, d1, d27
+
+ vaddl.s16 q0, d4, d8
+ vaddl.s16 q2, d5, d9
+ vaddl.s16 q4, d6, d10
+ vaddl.s16 q5, d7, d11
+
+ vmull.s8 q3, d3, d29
+ vmull.s8 q1, d2, d29
+
+ vaddw.s16 q4, q4, d6
+ vaddw.s16 q5, q5, d7
+ vaddw.s16 q3, q2, d3
+ vaddw.s16 q2, q0, d2
+ bx lr
+endfunc
+
+.macro sum_lag_n_body lag, type, uv_layout, edge, elems, store, uv_coeff
+.ifc \lag\()_\edge, lag3_left
+ bl sum_lag3_left_above_neon
+.else
+ bl sum_\lag\()_above_neon
+.endif
+.ifc \type, uv_420
+ vpush {q6-q7}
+ add r12, r11, #GRAIN_WIDTH
+ vld1.16 {q0, q1}, [r11]!
+ vld1.16 {q6, q7}, [r12]!
+ vpaddl.s8 q0, q0
+ vpaddl.s8 q1, q1
+ vpaddl.s8 q6, q6
+ vpaddl.s8 q7, q7
+ vadd.i16 q0, q0, q6
+ vadd.i16 q1, q1, q7
+ vpop {q6-q7}
+ vrshrn.s16 d0, q0, #2
+ vrshrn.s16 d1, q1, #2
+.endif
+.ifc \type, uv_422
+ vld1.8 {q0, q1}, [r11]!
+ vpaddl.s8 q0, q0
+ vpaddl.s8 q1, q1
+ vrshrn.s16 d0, q0, #1
+ vrshrn.s16 d1, q1, #1
+.endif
+.ifc \type, uv_444
+ vld1.8 {q0}, [r11]!
+.endif
+.if \uv_layout
+.ifnb \uv_coeff
+ vdup.8 d13, \uv_coeff
+.endif
+ vmull.s8 q1, d0, d13
+ vmull.s8 q0, d1, d13
+ vaddw.s16 q2, q2, d2
+ vaddw.s16 q3, q3, d3
+ vaddw.s16 q4, q4, d0
+ vaddw.s16 q5, q5, d1
+.endif
+.if \uv_layout && \elems == 16
+ b sum_\lag\()_y_\edge\()_start
+.elseif \uv_layout == 444 && \elems == 15
+ b sum_\lag\()_y_\edge\()_start
+.elseif \uv_layout == 422 && \elems == 9
+ b sum_\lag\()_uv_420_\edge\()_start
+.else
+sum_\lag\()_\type\()_\edge\()_start:
+ push {r11}
+.ifc \edge, left
+ increment_seed 4
+ read_rand r11, 11, 3
+ read_rand r12, 11, 2
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d1[1]}, [r11]
+ read_rand r11, 11, 1
+ vld1.16 {d1[2]}, [r12]
+ add r11, r3, r11, lsl #1
+ vld1.16 {d1[3]}, [r11]
+ lsl r2, r2, #1 // shift back the state as if we'd done increment_seed with shift=0
+ vrshl.s16 d1, d1, d30
+ vmovn.i16 d1, q0
+ vext.8 q2, q2, q2, #12
+.ifc \lag, lag3
+ vmov.s8 r10, d1[5]
+.endif
+.ifnc \lag, lag1
+ vmov.s8 r8, d1[6]
+.endif
+ vmov.s8 r6, d1[7]
+
+ vmov q1, q2
+ mov r1, #1
+ bl output_\lag\()_neon
+.else
+ increment_seed 4, shift=0
+ vmov q1, q2
+ mov r1, #4
+ bl output_\lag\()_neon
+.endif
+
+ increment_seed 4, shift=0
+ vmov q1, q3
+ mov r1, #4
+ bl output_\lag\()_neon
+
+ increment_seed 4, shift=0
+ vmov q1, q4
+.if \elems == 9
+ mov r1, #1
+ bl output_\lag\()_neon
+ lsr r2, r2, #3
+
+ read_rand r11, 11, 2
+ read_rand r12, 11, 1
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d2[0]}, [r11]
+ read_rand r11, 11, 0
+ vld1.16 {d2[1]}, [r12]
+ add r11, r3, r11, lsl #1
+ vld1.16 {d2[2]}, [r11]
+ vrshl.s16 d2, d2, d30
+ vmovn.i16 d2, q1
+ vext.8 q0, q0, q1, #7
+.else
+ mov r1, #4
+ bl output_\lag\()_neon
+
+ increment_seed 4, shift=0
+ vmov q1, q5
+
+.ifc \edge, right
+ mov r1, #3
+ bl output_\lag\()_neon
+ read_shift_rand r11, 11
+ add r11, r3, r11, lsl #1
+ vld1.16 {d2[0]}, [r11]
+ vrshl.s16 d2, d2, d30
+ vext.8 q0, q0, q1, #1
+.else
+ mov r1, #4
+ bl output_\lag\()_neon
+.endif
+.endif
+.if \store
+ vst1.8 {q0}, [r0]!
+.endif
+ pop {r11}
+ pop {r1, pc}
+.endif
+.endm
+
+.macro sum_lag1_func type, uv_layout, edge, elems=16
+function sum_\type\()_lag1_\edge\()_neon
+ push {r1, lr}
+ sum_lag_n_body lag1, \type, \uv_layout, \edge, \elems, store=0
+endfunc
+.endm
+
+sum_lag1_func y, 0, left
+sum_lag1_func y, 0, mid
+sum_lag1_func y, 0, right, 15
+sum_lag1_func uv_444, 444, left
+sum_lag1_func uv_444, 444, mid
+sum_lag1_func uv_444, 444, right, 15
+sum_lag1_func uv_422, 422, left
+sum_lag1_func uv_422, 422, mid
+sum_lag1_func uv_422, 422, right, 9
+sum_lag1_func uv_420, 420, left
+sum_lag1_func uv_420, 420, mid
+sum_lag1_func uv_420, 420, right, 9
+
+.macro sum_lag1 type, dst, left, mid, right, edge=mid
+ vmov q3, \mid
+ vext.8 q0, \left, \mid, #15
+ vext.8 q1, \mid, \right, #1
+ bl sum_\type\()_lag1_\edge\()_neon
+ vmov \dst, q0
+.endm
+
+.macro sum_y_lag1 dst, left, mid, right, edge=mid
+ sum_lag1 y, \dst, \left, \mid, \right, \edge
+.endm
+
+.macro sum_uv_444_lag1 dst, left, mid, right, edge=mid
+ sum_lag1 uv_444, \dst, \left, \mid, \right, \edge
+.endm
+
+.macro sum_uv_422_lag1 dst, left, mid, right, edge=mid
+ sum_lag1 uv_422, \dst, \left, \mid, \right, \edge
+.endm
+
+.macro sum_uv_420_lag1 dst, left, mid, right, edge=mid
+ sum_lag1 uv_420, \dst, \left, \mid, \right, \edge
+.endm
+
+
+function sum_lag2_above_neon
+ push {lr}
+ sub r12, r0, #2*GRAIN_WIDTH - 16
+ sub lr, r0, #1*GRAIN_WIDTH - 16
+ vld1.8 {q10}, [r12] // load top right
+ vld1.8 {q13}, [lr]
+
+ vext.8 q6, q8, q9, #14 // top left, top mid
+ vdup.8 d14, d28[0]
+ vext.8 q8, q8, q9, #15
+ vdup.8 d15, d28[1]
+
+ vmull.s8 q0, d12, d14
+ vmull.s8 q1, d13, d14
+ vmull.s8 q6, d16, d15
+ vmull.s8 q8, d17, d15
+
+ vaddl.s16 q2, d0, d12
+ vaddl.s16 q3, d1, d13
+ vaddl.s16 q4, d2, d16
+ vaddl.s16 q5, d3, d17
+
+ vext.8 q6, q9, q10, #1 // top mid, top right
+ vdup.8 d14, d28[3]
+ vext.8 q8, q9, q10, #2
+ vdup.8 d15, d28[4]
+
+ vmull.s8 q0, d12, d14
+ vmull.s8 q1, d13, d14
+ vmull.s8 q6, d16, d15
+ vmull.s8 q8, d17, d15
+
+ vaddl.s16 q7, d0, d12
+ vaddl.s16 q0, d1, d13
+ vaddl.s16 q6, d2, d16
+ vaddl.s16 q1, d3, d17
+
+ vadd.i32 q2, q2, q7
+ vadd.i32 q3, q3, q0
+ vadd.i32 q4, q4, q6
+ vadd.i32 q5, q5, q1
+
+ vext.8 q6, q11, q12, #14 // top left, top mid
+ vdup.8 d14, d28[5]
+ vext.8 q8, q11, q12, #15
+ vdup.8 d15, d28[6]
+
+ vmull.s8 q0, d12, d14
+ vmull.s8 q1, d13, d14
+ vmull.s8 q6, d16, d15
+ vmull.s8 q8, d17, d15
+
+ vaddl.s16 q7, d0, d12
+ vaddl.s16 q0, d1, d13
+ vaddl.s16 q6, d2, d16
+ vaddl.s16 q1, d3, d17
+
+ vadd.i32 q2, q2, q7
+ vadd.i32 q3, q3, q0
+ vadd.i32 q4, q4, q6
+ vadd.i32 q5, q5, q1
+
+ vext.8 q6, q12, q13, #1 // top mid, top right
+ vdup.8 d14, d29[0]
+ vext.8 q8, q12, q13, #2
+ vdup.8 d15, d29[1]
+
+ vmull.s8 q0, d12, d14
+ vmull.s8 q1, d13, d14
+ vmull.s8 q6, d16, d15
+ vmull.s8 q8, d17, d15
+
+ vaddl.s16 q7, d0, d12
+ vaddl.s16 q0, d1, d13
+ vaddl.s16 q6, d2, d16
+ vaddl.s16 q1, d3, d17
+
+ vadd.i32 q2, q2, q7
+ vadd.i32 q3, q3, q0
+ vadd.i32 q4, q4, q6
+ vadd.i32 q5, q5, q1
+
+ vdup.8 d14, d28[2]
+ vdup.8 d15, d28[7]
+
+ vmull.s8 q0, d18, d14
+ vmull.s8 q1, d19, d14
+ vmull.s8 q6, d24, d15
+ vmull.s8 q8, d25, d15
+
+ vaddl.s16 q7, d0, d12
+ vaddl.s16 q0, d1, d13
+ vaddl.s16 q6, d2, d16
+ vaddl.s16 q1, d3, d17
+
+ vmov q8, q9
+ vmov q9, q10
+
+ vadd.i32 q2, q2, q7
+ vadd.i32 q3, q3, q0
+ vadd.i32 q4, q4, q6
+ vadd.i32 q5, q5, q1
+
+ vmov q11, q12
+ vmov q12, q13
+
+ pop {pc}
+endfunc
+
+.macro sum_lag2_func type, uv_layout, edge, elems=16
+function sum_\type\()_lag2_\edge\()_neon
+ push {r1, lr}
+.ifc \edge, left
+ sub r12, r0, #2*GRAIN_WIDTH
+ sub lr, r0, #1*GRAIN_WIDTH
+ vld1.8 {q9}, [r12] // load the previous block right above
+ vld1.8 {q12}, [lr]
+.endif
+ sum_lag_n_body lag2, \type, \uv_layout, \edge, \elems, store=1, uv_coeff=d29[4]
+endfunc
+.endm
+
+sum_lag2_func y, 0, left
+sum_lag2_func y, 0, mid
+sum_lag2_func y, 0, right, 15
+sum_lag2_func uv_444, 444, left
+sum_lag2_func uv_444, 444, mid
+sum_lag2_func uv_444, 444, right, 15
+sum_lag2_func uv_422, 422, left
+sum_lag2_func uv_422, 422, mid
+sum_lag2_func uv_422, 422, right, 9
+sum_lag2_func uv_420, 420, left
+sum_lag2_func uv_420, 420, mid
+sum_lag2_func uv_420, 420, right, 9
+
+
+function sum_lag3_left_above_neon
+ // A separate codepath for the left edge, to avoid reading outside
+ // of the edge of the buffer.
+ sub r12, r0, #3*GRAIN_WIDTH
+ vld1.8 {q11, q12}, [r12]
+ vext.8 q12, q11, q12, #13
+ vext.8 q11, q11, q11, #13
+ b sum_lag3_above_start
+endfunc
+
+function sum_lag3_above_neon
+ sub r12, r0, #3*GRAIN_WIDTH + 3
+ vld1.8 {q11, q12}, [r12]
+
+sum_lag3_above_start:
+ vdup.8 d20, d26[0]
+ vext.8 q9, q11, q12, #1
+ vdup.8 d21, d26[1]
+
+ vmull.s8 q0, d22, d20
+ vmull.s8 q1, d23, d20
+ vmull.s8 q6, d18, d21
+ vmull.s8 q7, d19, d21
+
+ vext.8 q8, q11, q12, #2
+ vdup.8 d20, d26[2]
+ vext.8 q9, q11, q12, #3
+ vdup.8 d21, d26[3]
+
+ vaddl.s16 q2, d0, d12
+ vaddl.s16 q3, d1, d13
+ vaddl.s16 q4, d2, d14
+ vaddl.s16 q5, d3, d15
+
+ vmull.s8 q0, d16, d20
+ vmull.s8 q1, d17, d20
+ vmull.s8 q6, d18, d21
+ vmull.s8 q7, d19, d21
+
+ vaddl.s16 q8, d0, d12
+ vaddl.s16 q9, d1, d13
+ vaddl.s16 q0, d2, d14
+ vaddl.s16 q1, d3, d15
+
+ vext.8 q6, q11, q12, #4
+ vdup.8 d20, d26[4]
+ vext.8 q7, q11, q12, #5
+ vdup.8 d21, d26[5]
+
+ vadd.i32 q2, q2, q8
+ vadd.i32 q3, q3, q9
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d12, d20
+ vmull.s8 q1, d13, d20
+ vmull.s8 q8, d14, d21
+ vmull.s8 q9, d15, d21
+
+ sub r12, r0, #2*GRAIN_WIDTH + 3
+
+ vaddl.s16 q6, d0, d16
+ vaddl.s16 q7, d1, d17
+ vaddl.s16 q0, d2, d18
+ vaddl.s16 q1, d3, d19
+
+ vext.8 q8, q11, q12, #6
+ vld1.8 {q11, q12}, [r12]
+ vdup.8 d20, d26[6]
+ vdup.8 d21, d26[7]
+
+ vadd.i32 q2, q2, q6
+ vadd.i32 q3, q3, q7
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d16, d20
+ vmull.s8 q1, d17, d20
+ vmull.s8 q6, d22, d21
+ vmull.s8 q7, d23, d21
+
+ vaddl.s16 q8, d0, d12
+ vaddl.s16 q9, d1, d13
+ vaddl.s16 q0, d2, d14
+ vaddl.s16 q1, d3, d15
+
+ vext.8 q6, q11, q12, #1
+ vdup.8 d20, d27[0]
+ vext.8 q7, q11, q12, #2
+ vdup.8 d21, d27[1]
+
+ vadd.i32 q2, q2, q8
+ vadd.i32 q3, q3, q9
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d12, d20
+ vmull.s8 q1, d13, d20
+ vmull.s8 q8, d14, d21
+ vmull.s8 q9, d15, d21
+
+ vaddl.s16 q6, d0, d16
+ vaddl.s16 q7, d1, d17
+ vaddl.s16 q0, d2, d18
+ vaddl.s16 q1, d3, d19
+
+ vext.8 q8, q11, q12, #3
+ vdup.8 d20, d27[2]
+ vext.8 q9, q11, q12, #4
+ vdup.8 d21, d27[3]
+
+ vadd.i32 q2, q2, q6
+ vadd.i32 q3, q3, q7
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d16, d20
+ vmull.s8 q1, d17, d20
+ vmull.s8 q6, d18, d21
+ vmull.s8 q7, d19, d21
+
+ sub r12, r0, #1*GRAIN_WIDTH + 3
+
+ vaddl.s16 q8, d0, d12
+ vaddl.s16 q9, d1, d13
+ vaddl.s16 q0, d2, d14
+ vaddl.s16 q1, d3, d15
+
+ vext.8 q6, q11, q12, #5
+ vdup.8 d20, d27[4]
+ vext.8 q7, q11, q12, #6
+ vdup.8 d21, d27[5]
+
+ vld1.8 {q11, q12}, [r12]
+
+ vadd.i32 q2, q2, q8
+ vadd.i32 q3, q3, q9
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d12, d20
+ vmull.s8 q1, d13, d20
+ vmull.s8 q8, d14, d21
+ vmull.s8 q9, d15, d21
+
+ vaddl.s16 q6, d0, d16
+ vaddl.s16 q7, d1, d17
+ vaddl.s16 q0, d2, d18
+ vaddl.s16 q1, d3, d19
+
+ vdup.8 d20, d27[6]
+ vext.8 q9, q11, q12, #1
+ vdup.8 d21, d27[7]
+
+ vadd.i32 q2, q2, q6
+ vadd.i32 q3, q3, q7
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d22, d20
+ vmull.s8 q1, d23, d20
+ vmull.s8 q6, d18, d21
+ vmull.s8 q7, d19, d21
+
+ vaddl.s16 q8, d0, d12
+ vaddl.s16 q9, d1, d13
+ vaddl.s16 q0, d2, d14
+ vaddl.s16 q1, d3, d15
+
+ vext.8 q6, q11, q12, #2
+ vdup.8 d20, d28[0]
+ vext.8 q7, q11, q12, #3
+ vdup.8 d21, d28[1]
+
+ vadd.i32 q2, q2, q8
+ vadd.i32 q3, q3, q9
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d12, d20
+ vmull.s8 q1, d13, d20
+ vmull.s8 q8, d14, d21
+ vmull.s8 q9, d15, d21
+
+ vaddl.s16 q6, d0, d16
+ vaddl.s16 q7, d1, d17
+ vaddl.s16 q0, d2, d18
+ vaddl.s16 q1, d3, d19
+
+ vext.8 q8, q11, q12, #4
+ vdup.8 d20, d28[2]
+ vext.8 q9, q11, q12, #5
+ vdup.8 d21, d28[3]
+
+ vadd.i32 q2, q2, q6
+ vadd.i32 q3, q3, q7
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d16, d20
+ vmull.s8 q1, d17, d20
+ vmull.s8 q6, d18, d21
+ vmull.s8 q7, d19, d21
+
+ vaddl.s16 q8, d0, d12
+ vaddl.s16 q9, d1, d13
+ vaddl.s16 q0, d2, d14
+ vaddl.s16 q1, d3, d15
+
+ vext.8 q6, q11, q12, #6
+ vdup.8 d20, d28[4]
+
+ vadd.i32 q2, q2, q8
+ vadd.i32 q3, q3, q9
+ vadd.i32 q4, q4, q0
+ vadd.i32 q5, q5, q1
+
+ vmull.s8 q0, d12, d20
+ vmull.s8 q1, d13, d20
+
+ vaddw.s16 q2, q2, d0
+ vaddw.s16 q3, q3, d1
+ vaddw.s16 q4, q4, d2
+ vaddw.s16 q5, q5, d3
+
+ bx lr
+endfunc
+
+.macro sum_lag3_func type, uv_layout, edge, elems=16
+function sum_\type\()_lag3_\edge\()_neon
+ push {r1, lr}
+ sum_lag_n_body lag3, \type, \uv_layout, \edge, \elems, store=1, uv_coeff=d29[0]
+endfunc
+.endm
+
+sum_lag3_func y, 0, left
+sum_lag3_func y, 0, mid
+sum_lag3_func y, 0, right, 15
+sum_lag3_func uv_444, 444, left
+sum_lag3_func uv_444, 444, mid
+sum_lag3_func uv_444, 444, right, 15
+sum_lag3_func uv_422, 422, left
+sum_lag3_func uv_422, 422, mid
+sum_lag3_func uv_422, 422, right, 9
+sum_lag3_func uv_420, 420, left
+sum_lag3_func uv_420, 420, mid
+sum_lag3_func uv_420, 420, right, 9
+
+function generate_grain_rows_neon
+ push {r11,lr}
+1:
+ get_grain_row d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26
+ subs r1, r1, #1
+ store_grain_row d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26
+ bgt 1b
+ pop {r11,pc}
+endfunc
+
+function generate_grain_rows_44_neon
+ push {r11,lr}
+1:
+ get_grain_row_44 d16, d17, d18, d19, d20, d21
+ subs r1, r1, #1
+ store_grain_row_44 d16, d17, d18, d19, d20, d21
+ bgt 1b
+ pop {r11,pc}
+endfunc
+
+function gen_grain_uv_444_lag0_neon
+ vld1.8 {q3}, [r11]!
+ push {r11,lr}
+ bl get_gaussian_neon
+ vrshl.s16 q8, q0, q15
+ bl get_gaussian_neon
+ vrshl.s16 q9, q0, q15
+ vqmovn.s16 d0, q8
+ vqmovn.s16 d1, q9
+
+ vand q3, q3, q1
+ vmull.s8 q2, d6, d22
+ vmull.s8 q3, d7, d22
+ vrshl.s16 q2, q2, q12
+ vrshl.s16 q3, q3, q12
+ vaddw.s8 q2, q2, d0
+ vaddw.s8 q3, q3, d1
+ vqmovn.s16 d4, q2
+ vqmovn.s16 d5, q3
+ vst1.8 {q2}, [r0]!
+ pop {r11,pc}
+endfunc
+
+function get_grain_row_44_neon
+ push {r11,lr}
+ get_grain_row_44 d16, d17, d18, d19, d20, d21
+ pop {r11,pc}
+endfunc
+
+function add_uv_420_coeff_lag0_neon
+ vld1.16 {q2, q3}, [r11]!
+ vld1.16 {q4, q5}, [r12]!
+ vpaddl.s8 q2, q2
+ vpaddl.s8 q3, q3
+ vpaddl.s8 q4, q4
+ vpaddl.s8 q5, q5
+ vadd.i16 q2, q2, q4
+ vadd.i16 q3, q3, q5
+ vrshrn.s16 d4, q2, #2
+ vrshrn.s16 d5, q3, #2
+ b add_coeff_lag0_start
+endfunc
+
+function add_uv_422_coeff_lag0_neon
+ vld1.16 {q2, q3}, [r11]!
+ vpaddl.s8 q2, q2
+ vpaddl.s8 q3, q3
+ vrshrn.s16 d4, q2, #1
+ vrshrn.s16 d5, q3, #1
+
+add_coeff_lag0_start:
+ vand q3, q2, q1
+ vmull.s8 q2, d6, d22
+ vmull.s8 q3, d7, d22
+ vrshl.s16 q2, q2, q12
+ vrshl.s16 q3, q3, q12
+ vaddw.s8 q2, q2, d0
+ vaddw.s8 q3, q3, d1
+ vqmovn.s16 d4, q2
+ vqmovn.s16 d5, q3
+ bx lr
+endfunc
+
+.macro gen_grain_82 type
+function generate_grain_\type\()_8bpc_neon, export=1
+ push {r4-r11,lr}
+
+.ifc \type, uv_444
+ mov r12, r3
+ mov lr, #28
+ add r11, r1, #3*GRAIN_WIDTH
+ mov r1, r2
+ mul r12, r12, lr
+.endif
+ movrel r3, X(gaussian_sequence)
+ ldr r2, [r1, #FGD_SEED]
+ ldr r9, [r1, #FGD_GRAIN_SCALE_SHIFT]
+.ifc \type, y
+ add r4, r1, #FGD_AR_COEFFS_Y
+.else
+ add r4, r1, #FGD_AR_COEFFS_UV
+.endif
+ adr r5, L(gen_grain_\type\()_tbl)
+ ldr r6, [r1, #FGD_AR_COEFF_LAG]
+ add r9, r9, #4
+ ldr r6, [r5, r6, lsl #2]
+ vdup.16 q15, r9 // 4 + data->grain_scale_shift
+ add r5, r5, r6
+ vneg.s16 q15, q15
+
+.ifc \type, uv_444
+ cmp r12, #0
+ movw r10, #0x49d8
+ movw lr, #0xb524
+ // Intentionally using a separate register instead of moveq with an
+ // immediate constant, to avoid armv8 deprecated it instruction forms.
+ it eq
+ moveq r10, lr
+ add r4, r4, r12 // Add offset to ar_coeffs_uv[1]
+ eor r2, r2, r10
+.endif
+
+ ldr r7, [r1, #FGD_AR_COEFF_SHIFT]
+ mov r8, #1
+ mov r10, #1
+ lsl r8, r8, r7 // 1 << ar_coeff_shift
+ lsl r10, r10, r9 // 1 << (4 + data->grain_scale_shift)
+ lsr r8, r8, #1 // 1 << (ar_coeff_shift - 1)
+ lsr r10, r10, #1 // 1 << (4 + data->grain_scale_shift - 1)
+
+ bx r5
+
+ .align 2
+L(gen_grain_\type\()_tbl):
+ .word L(generate_grain_\type\()_lag0) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag1) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag2) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag3) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+
+L(generate_grain_\type\()_lag0):
+.ifc \type, y
+ mov r1, #GRAIN_HEIGHT
+ bl generate_grain_rows_neon
+.else
+
+ mov r1, #3
+ bl generate_grain_rows_neon
+ mov r1, #GRAIN_HEIGHT-3
+
+ vdup.16 q12, r7
+ vld1.8 {d22[]}, [r4] // ar_coeffs_uv[0]
+ vmov.i8 q0, #0
+ vmov.i8 q1, #255
+ vext.8 q13, q0, q1, #13
+ vext.8 q14, q1, q0, #1
+ vneg.s16 q12, q12
+
+1:
+ vmov q1, q13
+ bl gen_grain_uv_444_lag0_neon // 16
+ vmov.i8 q1, #255
+ bl gen_grain_uv_444_lag0_neon // 32
+ bl gen_grain_uv_444_lag0_neon // 48
+ bl gen_grain_uv_444_lag0_neon // 64
+ vmov q1, q14
+ bl gen_grain_uv_444_lag0_neon // 80
+ get_grain_2 d16
+ subs r1, r1, #1
+ add r11, r11, #2
+ vst1.16 {d16[0]}, [r0]!
+ bgt 1b
+.endif
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag1):
+ vpush {q4-q7}
+ mov r5, #127
+ vld1.8 {d27[]}, [r4]! // ar_coeffs_y[0]
+ vld1.8 {d28[]}, [r4]! // ar_coeffs_y[1]
+ vld1.8 {d29[]}, [r4] // ar_coeffs_y[2]
+.ifc \type, y
+ ldrsb r4, [r4, #1] // ar_coeffs_y[3]
+.else
+ add r4, r4, #2
+.endif
+
+ mov r1, #3
+.ifc \type, uv_444
+ vld1.8 {d13[]}, [r4] // ar_coeffs_uv[4]
+ ldrsb r4, [r4, #-1] // ar_coeffs_uv[3]
+.endif
+ bl generate_grain_rows_neon
+
+ mov r1, #GRAIN_HEIGHT - 3
+1:
+ sum_\type\()_lag1 q7, q8, q8, q9, left
+ sum_\type\()_lag1 q8, q8, q9, q10
+ sum_\type\()_lag1 q9, q9, q10, q11
+ sum_\type\()_lag1 q10, q10, q11, q12
+ sum_\type\()_lag1 q12, q11, q12, q13, right
+ get_grain_2 d26
+ subs r1, r1, #1
+.ifc \type, uv_444
+ add r11, r11, #2
+.endif
+ store_grain_row d14, d15, d16, d17, d18, d19, d20, d21, d24, d25, d26
+ vmov q11, q10
+ vmov q10, q9
+ vmov q9, q8
+ vmov q8, q7
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag2):
+ vpush {q4-q7}
+ mov r5, #127
+ vld1.8 {d28,d29}, [r4] // ar_coeffs_y[0-11], ar_coeffs_uv[0-12]
+
+ vmov.s8 r4, d29[2]
+ vmov.s8 r10, d29[3]
+
+ mov r1, #3
+ bl generate_grain_rows_neon
+
+ mov r1, #GRAIN_HEIGHT - 3
+1:
+ bl sum_\type\()_lag2_left_neon
+ bl sum_\type\()_lag2_mid_neon
+ bl sum_\type\()_lag2_mid_neon
+ bl sum_\type\()_lag2_mid_neon
+ bl sum_\type\()_lag2_right_neon
+ get_grain_2 d16
+ subs r1, r1, #1
+.ifc \type, uv_444
+ add r11, r11, #2
+.endif
+ vst1.16 {d16[0]}, [r0]!
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag3):
+ vpush {q4-q7}
+ mov r5, #127
+ vld1.8 {q13, q14}, [r4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
+
+ vmov.u8 r4, d28[5]
+ vmov.u8 r10, d28[6]
+ vmov.u8 r12, d28[7]
+
+ orr r4, r4, r10, lsl #8
+ orr r4, r4, r12, lsl #16
+
+ mov r1, #3
+ vpush {d26}
+ bl generate_grain_rows_neon
+ vpop {d26}
+
+ mov r1, #GRAIN_HEIGHT - 3
+1:
+ bl sum_\type\()_lag3_left_neon
+ bl sum_\type\()_lag3_mid_neon
+ bl sum_\type\()_lag3_mid_neon
+ bl sum_\type\()_lag3_mid_neon
+ bl sum_\type\()_lag3_right_neon
+ get_grain_2 d16
+ subs r1, r1, #1
+.ifc \type, uv_444
+ add r11, r11, #2
+.endif
+ vst1.16 {d16[0]}, [r0]!
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+gen_grain_82 y
+gen_grain_82 uv_444
+
+.macro set_height dst, type
+.ifc \type, uv_420
+ mov \dst, #SUB_GRAIN_HEIGHT-3
+.else
+ mov \dst, #GRAIN_HEIGHT-3
+.endif
+.endm
+
+.macro increment_y_ptr reg, type
+.ifc \type, uv_420
+ add \reg, \reg, #2*GRAIN_WIDTH-(3*32)
+.else
+ sub \reg, \reg, #3*32-GRAIN_WIDTH
+.endif
+.endm
+
+.macro gen_grain_44 type
+function generate_grain_\type\()_8bpc_neon, export=1
+ push {r4-r11,lr}
+
+ mov r12, r3
+ mov lr, #28
+ add r11, r1, #3*GRAIN_WIDTH-3
+ mov r1, r2
+ mul r12, r12, lr
+
+ movrel r3, X(gaussian_sequence)
+ ldr r2, [r1, #FGD_SEED]
+ ldr r9, [r1, #FGD_GRAIN_SCALE_SHIFT]
+ add r4, r1, #FGD_AR_COEFFS_UV
+ adr r5, L(gen_grain_\type\()_tbl)
+ ldr r6, [r1, #FGD_AR_COEFF_LAG]
+ add r9, r9, #4
+ ldr r6, [r5, r6, lsl #2]
+ vdup.16 q15, r9 // 4 + data->grain_scale_shift
+ add r5, r5, r6
+ vneg.s16 q15, q15
+
+ cmp r12, #0
+ movw r10, #0x49d8
+ movw lr, #0xb524
+ // Intentionally using a separate register instead of moveq with an
+ // immediate constant, to avoid armv8 deprecated it instruction forms.
+ it eq
+ moveq r10, lr
+ add r4, r4, r12 // Add offset to ar_coeffs_uv[1]
+ eor r2, r2, r10
+
+ ldr r7, [r1, #FGD_AR_COEFF_SHIFT]
+ mov r8, #1
+ mov r10, #1
+ lsl r8, r8, r7 // 1 << ar_coeff_shift
+ lsl r10, r10, r9 // 1 << (4 + data->grain_scale_shift)
+ lsr r8, r8, #1 // 1 << (ar_coeff_shift - 1)
+ lsr r10, r10, #1 // 1 << (4 + data->grain_scale_shift - 1)
+ bx r5
+
+ .align 2
+L(gen_grain_\type\()_tbl):
+ .word L(generate_grain_\type\()_lag0) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag1) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag2) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag3) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+
+L(generate_grain_\type\()_lag0):
+.ifc \type, uv_420
+ vpush {q4-q5}
+.endif
+ mov r1, #3
+ bl generate_grain_rows_44_neon
+ set_height r1, \type
+
+ vdup.16 q12, r7
+ vld1.8 {d22[]}, [r4] // ar_coeffs_uv[0]
+ vmov.i8 q0, #0
+ vmov.i8 q1, #255
+ vext.8 q13, q0, q1, #13
+ vext.8 q14, q1, q0, #7
+ vneg.s16 q12, q12
+
+1:
+ bl get_grain_row_44_neon
+.ifc \type, uv_420
+ add r12, r11, #GRAIN_WIDTH
+.endif
+ vmov q1, q13
+ vmov q0, q8
+ bl add_\type\()_coeff_lag0_neon
+ vmov.i8 q1, #255
+ vmov q0, q9
+ vmov q8, q2
+ bl add_\type\()_coeff_lag0_neon
+ vmov.i8 q1, q14
+ vmov q0, q10
+ vmov q9, q2
+ bl add_\type\()_coeff_lag0_neon
+ vmov q10, q2
+ subs r1, r1, #1
+ increment_y_ptr r11, \type
+ store_grain_row_44 d16, d17, d18, d19, d20, d21
+ bgt 1b
+
+.ifc \type, uv_420
+ vpop {q4-q5}
+.endif
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag1):
+ vpush {q4-q7}
+ mov r5, #127
+ vld1.8 {d27[]}, [r4]! // ar_coeffs_uv[0]
+ vld1.8 {d28[]}, [r4]! // ar_coeffs_uv[1]
+ vld1.8 {d29[]}, [r4] // ar_coeffs_uv[2]
+ add r4, r4, #2
+
+ mov r1, #3
+ vld1.8 {d13[]}, [r4] // ar_coeffs_uv[4]
+ ldrsb r4, [r4, #-1] // ar_coeffs_uv[3]
+ bl generate_grain_rows_44_neon
+
+ set_height r1, \type
+1:
+ sum_\type\()_lag1 q7, q8, q8, q9, left
+ sum_\type\()_lag1 q8, q8, q9, q10
+ sum_\type\()_lag1 q10, q9, q10, q11, right
+ subs r1, r1, #1
+ increment_y_ptr r11, \type
+ store_grain_row_44 d14, d15, d16, d17, d20, d21
+ vmov q9, q8
+ vmov q8, q7
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag2):
+ vpush {q4-q7}
+ mov r5, #127
+ vld1.8 {d28,d29}, [r4] // ar_coeffs_uv[0-12]
+
+ vmov.s8 r4, d29[2]
+ vmov.s8 r10, d29[3]
+
+ mov r1, #3
+ bl generate_grain_rows_44_neon
+
+ set_height r1, \type
+1:
+ bl sum_\type\()_lag2_left_neon
+ bl sum_\type\()_lag2_mid_neon
+ bl sum_\type\()_lag2_right_neon
+ subs r1, r1, #1
+ increment_y_ptr r11, \type
+ add r0, r0, #GRAIN_WIDTH-48
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag3):
+ vpush {q4-q7}
+ mov r5, #127
+ vld1.8 {q13, q14}, [r4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
+
+ vmov.u8 r4, d28[5]
+ vmov.u8 r10, d28[6]
+ vmov.u8 r12, d28[7]
+
+ orr r4, r4, r10, lsl #8
+ orr r4, r4, r12, lsl #16
+
+ mov r1, #3
+ bl generate_grain_rows_44_neon
+
+ set_height r1, \type
+1:
+ bl sum_\type\()_lag3_left_neon
+ bl sum_\type\()_lag3_mid_neon
+ bl sum_\type\()_lag3_right_neon
+ subs r1, r1, #1
+ increment_y_ptr r11, \type
+ add r0, r0, #GRAIN_WIDTH-48
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+gen_grain_44 uv_420
+gen_grain_44 uv_422
+
+.macro gather_interleaved dst1, dst2, src1, src2, off
+ vmov.u8 r11, \src1[0+\off]
+ vmov.u8 r12, \src2[0+\off]
+ add r11, r11, r3
+ vmov.u8 lr, \src1[2+\off]
+ add r12, r12, r3
+ vld1.8 {\dst1[0+\off]}, [r11]
+ vmov.u8 r11, \src2[2+\off]
+ add lr, lr, r3
+ vld1.8 {\dst2[0+\off]}, [r12]
+ vmov.u8 r12, \src1[4+\off]
+ add r11, r11, r3
+ vld1.8 {\dst1[2+\off]}, [lr]
+ vmov.u8 lr, \src2[4+\off]
+ add r12, r12, r3
+ vld1.8 {\dst2[2+\off]}, [r11]
+ vmov.u8 r11, \src1[6+\off]
+ add lr, lr, r3
+ vld1.8 {\dst1[4+\off]}, [r12]
+ vmov.u8 r12, \src2[6+\off]
+ add r11, r11, r3
+ vld1.8 {\dst2[4+\off]}, [lr]
+ add r12, r12, r3
+ vld1.8 {\dst1[6+\off]}, [r11]
+ vld1.8 {\dst2[6+\off]}, [r12]
+.endm
+
+.macro gather dst1, dst2, dst3, dst4, src1, src2, src3, src4
+ gather_interleaved \dst1, \dst3, \src1, \src3, 0
+ gather_interleaved \dst1, \dst3, \src1, \src3, 1
+ gather_interleaved \dst2, \dst4, \src2, \src4, 0
+ gather_interleaved \dst2, \dst4, \src2, \src4, 1
+.endm
+
+function gather32_neon
+ push {r11-r12,lr}
+ gather d8, d9, d10, d11, d0, d1, d2, d3
+ pop {r11-r12,pc}
+endfunc
+
+function gather16_neon
+ push {r11-r12,lr}
+ gather_interleaved d8, d9, d0, d1, 0
+ gather_interleaved d8, d9, d0, d1, 1
+ pop {r11-r12,pc}
+endfunc
+
+const overlap_coeffs_0, align=4
+ .byte 27, 17, 0, 0, 0, 0, 0, 0
+ .byte 17, 27, 32, 32, 32, 32, 32, 32
+endconst
+
+const overlap_coeffs_1, align=4
+ .byte 23, 0, 0, 0, 0, 0, 0, 0
+ .byte 22, 32, 32, 32, 32, 32, 32, 32
+endconst
+
+.macro calc_offset offx, offy, src, sx, sy
+ and \offy, \src, #0xF // randval & 0xF
+ lsr \offx, \src, #4 // randval >> 4
+.if \sy == 0
+ add \offy, \offy, \offy // 2 * (randval & 0xF)
+.endif
+.if \sx == 0
+ add \offx, \offx, \offx // 2 * (randval >> 4)
+.endif
+.endm
+
+.macro add_offset dst, offx, offy, src, stride
+ mla \dst, \stride, \offy, \src // grain_lut += grain_stride * offy
+ add \dst, \dst, \offx // grain_lut += offx
+.endm
+
+// void dav1d_fgy_32x32_8bpc_neon(pixel *const dst, const pixel *const src,
+// const ptrdiff_t stride,
+// const uint8_t scaling[SCALING_SIZE],
+// const int scaling_shift,
+// const entry grain_lut[][GRAIN_WIDTH],
+// const int offsets[][2],
+// const int h, const ptrdiff_t clip,
+// const ptrdiff_t type);
+function fgy_32x32_8bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100] // scaling_shift, grain_lut
+ ldrd r6, r7, [sp, #108] // offsets, h
+ ldr r8, [sp, #116] // clip
+ mov r9, #GRAIN_WIDTH // grain_lut stride
+
+ neg r4, r4
+ vdup.16 q13, r4 // -scaling_shift
+ cmp r8, #0
+
+ movrel_local r12, overlap_coeffs_0
+
+ beq 1f
+ // clip
+ vmov.i8 q14, #16
+ vmov.i8 q15, #235
+ b 2f
+1:
+ // no clip
+ vmov.i8 q14, #0
+ vmov.i8 q15, #255
+2:
+
+ vld1.8 {d24, d25}, [r12, :128] // overlap_coeffs
+
+ add r5, r5, #9 // grain_lut += 9
+ add r5, r5, r9, lsl #3 // grain_lut += 8 * grain_stride
+ add r5, r5, r9 // grain_lut += grain_stride
+
+ ldr r10, [r6, #8] // offsets[1][0]
+ calc_offset r10, r4, r10, 0, 0
+ add_offset r4, r10, r4, r5, r9
+ ldr r10, [r6, #4] // offsets[0][1]
+ calc_offset r10, r11, r10, 0, 0
+ add_offset r11, r10, r11, r5, r9
+ ldr r10, [r6, #12] // offsets[1][1]
+ calc_offset r10, r8, r10, 0, 0
+ add_offset r8, r10, r8, r5, r9
+ ldr r6, [r6] // offsets[0][0]
+ calc_offset r6, lr, r6, 0, 0
+ add_offset r5, r6, lr, r5, r9
+
+ add r4, r4, #32 // grain_lut += BLOCK_SIZE * bx
+ add r6, r11, r9, lsl #5 // grain_lut += grain_stride * BLOCK_SIZE * by
+
+ ldr r10, [sp, #120] // type
+ adr r11, L(fgy_loop_tbl)
+
+ tst r10, #1
+ ldr r10, [r11, r10, lsl #2]
+
+ add r8, r8, r9, lsl #5 // grain_lut += grain_stride * BLOCK_SIZE * by
+ add r8, r8, #32 // grain_lut += BLOCK_SIZE * bx
+
+ add r11, r11, r10
+
+ beq 1f
+ // y overlap
+ vdup.8 d14, d24[0]
+ vdup.8 d15, d24[1]
+ mov r10, r7 // backup actual h
+ mov r7, #2
+1:
+ bx r11
+endfunc
+
+function fgy_loop_neon
+L(fgy_loop_tbl):
+ .word L(loop_00) - L(fgy_loop_tbl) + CONFIG_THUMB
+ .word L(loop_01) - L(fgy_loop_tbl) + CONFIG_THUMB
+ .word L(loop_10) - L(fgy_loop_tbl) + CONFIG_THUMB
+ .word L(loop_11) - L(fgy_loop_tbl) + CONFIG_THUMB
+
+.macro fgy ox, oy
+L(loop_\ox\oy):
+1:
+.if \ox
+ vld1.8 {d8}, [r4], r9 // grain_lut old
+.endif
+.if \oy
+ vld1.8 {q2, q3}, [r6], r9 // grain_lut top
+.endif
+.if \ox && \oy
+ vld1.8 {d10}, [r8], r9 // grain_lut top old
+.endif
+ vld1.8 {q0, q1}, [r1, :128], r2 // src
+ vld1.8 {q10, q11}, [r5], r9 // grain_lut
+
+.if \ox
+ vmull.s8 q4, d8, d24
+ vmlal.s8 q4, d20, d25
+.endif
+
+.if \oy
+.if \ox
+ vmull.s8 q5, d10, d24
+ vmlal.s8 q5, d4, d25
+ vqrshrn.s16 d20, q4, #5
+ vqrshrn.s16 d4, q5, #5
+.endif
+
+ vmull.s8 q4, d20, d15
+ vmull.s8 q5, d21, d15
+ vmull.s8 q8, d22, d15
+ vmull.s8 q9, d23, d15
+ vmlal.s8 q4, d4, d14
+ vmlal.s8 q5, d5, d14
+ vmlal.s8 q8, d6, d14
+ vmlal.s8 q9, d7, d14
+ vqrshrn.s16 d20, q4, #5
+ vqrshrn.s16 d21, q5, #5
+ vqrshrn.s16 d22, q8, #5
+ vqrshrn.s16 d23, q9, #5
+.elseif \ox
+ vqrshrn.s16 d20, q4, #5
+.endif
+
+ bl gather32_neon
+
+ vmovl.s8 q8, d20 // grain
+ vmovl.s8 q9, d21
+ vmovl.s8 q10, d22
+ vmovl.s8 q11, d23
+
+ vmovl.u8 q2, d8 // scaling
+ vmovl.u8 q3, d9
+ vmovl.u8 q4, d10
+ vmovl.u8 q5, d11
+
+ vmul.i16 q8, q8, q2 // scaling * grain
+ vmul.i16 q9, q9, q3
+ vmul.i16 q10, q10, q4
+ vmul.i16 q11, q11, q5
+
+ vrshl.s16 q8, q8, q13 // round2(scaling * grain, scaling_shift)
+ vrshl.s16 q9, q9, q13
+ vrshl.s16 q10, q10, q13
+ vrshl.s16 q11, q11, q13
+
+ vaddw.u8 q8, q8, d0 // *src + noise
+ vaddw.u8 q9, q9, d1
+ vaddw.u8 q10, q10, d2
+ vaddw.u8 q11, q11, d3
+
+ vqmovun.s16 d0, q8
+ vqmovun.s16 d1, q9
+ vqmovun.s16 d2, q10
+ vqmovun.s16 d3, q11
+
+ vmax.u8 q0, q0, q14
+ vmax.u8 q1, q1, q14
+ vmin.u8 q0, q0, q15
+ vmin.u8 q1, q1, q15
+
+ subs r7, r7, #1
+.if \oy
+ vdup.8 d14, d25[0]
+ vdup.8 d15, d25[1]
+.endif
+ vst1.8 {q0, q1}, [r0, :128], r2 // dst
+ bgt 1b
+
+.if \oy
+ cmp r10, #2
+ sub r7, r10, #2 // restore actual remaining h
+ bgt L(loop_\ox\()0)
+.endif
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+.endm
+
+ fgy 0, 0
+ fgy 0, 1
+ fgy 1, 0
+ fgy 1, 1
+endfunc
+
+// void dav1d_fguv_32x32_420_8bpc_neon(pixel *const dst,
+// const pixel *const src,
+// const ptrdiff_t stride,
+// const uint8_t scaling[SCALING_SIZE],
+// const Dav1dFilmGrainData *const data,
+// const entry grain_lut[][GRAIN_WIDTH],
+// const pixel *const luma_row,
+// const ptrdiff_t luma_stride,
+// const int offsets[][2],
+// const ptrdiff_t h, const ptrdiff_t uv,
+// const ptrdiff_t is_id,
+// const ptrdiff_t type);
+.macro fguv layout, sx, sy
+function fguv_32x32_\layout\()_8bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100] // data, grain_lut
+ ldrd r6, r7, [sp, #108] // luma_row, luma_stride
+ ldrd r8, r9, [sp, #116] // offsets, h
+ ldrd r10, r11, [sp, #124] // uv, is_id
+
+ // !csfl
+ add r10, r4, r10, lsl #2 // + 4*uv
+ add r12, r10, #FGD_UV_LUMA_MULT
+ add lr, r10, #FGD_UV_MULT
+ add r10, r10, #FGD_UV_OFFSET
+ vld1.16 {d4[]}, [r12] // uv_luma_mult
+ vld1.16 {d4[2]}, [r10] // uv_offset
+ vld1.16 {d4[1]}, [lr] // uv_mult
+
+ ldr lr, [r4, #FGD_SCALING_SHIFT]
+ ldr r12, [r4, #FGD_CLIP_TO_RESTRICTED_RANGE]
+ neg lr, lr // -scaling_shift
+
+ cmp r12, #0
+ vdup.16 q13, lr // -scaling_shift
+
+ beq 1f
+ // clip
+ cmp r11, #0
+ vmov.i8 q14, #16
+ vmov.i8 q15, #240
+ beq 2f
+ // is_id
+ vmov.i8 q15, #235
+ b 2f
+1:
+ // no clip
+ vmov.i8 q14, #0
+ vmov.i8 q15, #255
+2:
+
+ mov r10, #GRAIN_WIDTH // grain_lut stride
+
+ add r5, r5, #(3 + (2 >> \sx)*3) // grain_lut += 9 or 6
+.if \sy
+ add r5, r5, r10, lsl #2 // grain_lut += 4 * grain_stride
+ add r5, r5, r10, lsl #1 // grain_lut += 2 * grain_stride
+.else
+ add r5, r5, r10, lsl #3 // grain_lut += 8 * grain_stride
+ add r5, r5, r10 // grain_lut += grain_stride
+.endif
+
+ ldr r12, [r8, #8] // offsets[1][0]
+ calc_offset r12, r4, r12, \sx, \sy
+ add_offset r4, r12, r4, r5, r10
+
+ ldr r12, [r8, #4] // offsets[0][1]
+ calc_offset r12, lr, r12, \sx, \sy
+ add_offset lr, r12, lr, r5, r10
+
+ ldr r12, [r8, #12] // offsets[1][1]
+ calc_offset r12, r11, r12, \sx, \sy
+ add_offset r11, r12, r11, r5, r10
+
+ ldr r8, [r8] // offsets[0][0]
+ calc_offset r8, r12, r8, \sx, \sy
+ add_offset r5, r8, r12, r5, r10
+
+ add r4, r4, #(32 >> \sx) // grain_lut += BLOCK_SIZE * bx
+ add r8, lr, r10, lsl #(5 - \sy) // grain_lut += grain_stride * BLOCK_SIZE * by
+ add r11, r11, r10, lsl #(5 - \sy) // grain_lut += grain_stride * BLOCK_SIZE * by
+ add r11, r11, #(32 >> \sx) // grain_lut += BLOCK_SIZE * bx
+
+ movrel_local r12, overlap_coeffs_\sx
+ ldr lr, [sp, #132] // type
+
+ vld1.8 {d24, d25}, [r12, :128] // overlap_coeffs
+
+ movrel_local r12, L(fguv_loop_sx\sx\()_tbl)
+#if CONFIG_THUMB
+ // This uses movrel_local instead of adr above, because the target
+ // can be out of range for adr. But movrel_local leaves the thumb bit
+ // set on COFF (but probably wouldn't if building for thumb on ELF),
+ // thus try to clear the bit for robustness.
+ bic r12, r12, #1
+#endif
+
+ tst lr, #1
+ ldr lr, [r12, lr, lsl #2]
+
+ add r12, r12, lr
+
+ beq 1f
+ // y overlap
+ sub lr, r9, #(2 >> \sy) // backup remaining h
+ mov r9, #(2 >> \sy)
+
+1:
+
+.if \sy
+ vmov.i8 d6, #23
+ vmov.i8 d7, #22
+.else
+ vmov.i8 d6, #27
+ vmov.i8 d7, #17
+.endif
+
+.if \sy
+ add r7, r7, r7 // luma_stride *= 2
+.endif
+
+ bx r12
+endfunc
+.endm
+
+fguv 420, 1, 1
+fguv 422, 1, 0
+fguv 444, 0, 0
+
+function fguv_loop_sx0_neon
+L(fguv_loop_sx0_tbl):
+ .word L(fguv_loop_sx0_csfl0_00) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl0_01) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl0_10) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl0_11) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl1_00) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl1_01) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl1_10) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl1_11) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+
+.macro fguv_loop_sx0 csfl, ox, oy
+L(fguv_loop_sx0_csfl\csfl\()_\ox\oy):
+.if \oy
+ mov r12, lr
+.endif
+1:
+.if \ox
+ vld1.8 {d8}, [r4], r10 // grain_lut old
+.endif
+.if \oy
+ vld1.8 {q8, q9}, [r8], r10 // grain_lut top
+.endif
+.if \ox && \oy
+ vld1.8 {d10}, [r11], r10 // grain_lut top old
+.endif
+ vld1.8 {q0, q1}, [r6, :128], r7 // luma
+ vld1.8 {q10, q11}, [r5], r10 // grain_lut
+
+.if \ox
+ vmull.s8 q4, d8, d24
+ vmlal.s8 q4, d20, d25
+.endif
+
+.if \oy
+.if \ox
+ vmull.s8 q5, d10, d24
+ vmlal.s8 q5, d16, d25
+ vqrshrn.s16 d20, q4, #5
+ vqrshrn.s16 d16, q5, #5
+.endif
+
+ vmull.s8 q4, d20, d7
+ vmull.s8 q5, d21, d7
+ vmull.s8 q6, d22, d7
+ vmull.s8 q7, d23, d7
+ vmlal.s8 q4, d16, d6
+ vmlal.s8 q5, d17, d6
+ vmlal.s8 q6, d18, d6
+ vmlal.s8 q7, d19, d6
+ vqrshrn.s16 d20, q4, #5
+ vqrshrn.s16 d21, q5, #5
+ vqrshrn.s16 d22, q6, #5
+ vqrshrn.s16 d23, q7, #5
+.elseif \ox
+ vqrshrn.s16 d20, q4, #5
+.endif
+.if !\csfl
+ vld1.8 {q8, q9}, [r1, :128] // src
+ vmovl.u8 q4, d0
+ vmovl.u8 q5, d1
+ vmovl.u8 q6, d2
+ vmovl.u8 q7, d3
+ vmovl.u8 q0, d16
+ vmovl.u8 q1, d17
+ vmovl.u8 q8, d18
+ vmovl.u8 q9, d19
+ vmul.i16 q4, q4, d4[0]
+ vmul.i16 q5, q5, d4[0]
+ vmul.i16 q6, q6, d4[0]
+ vmul.i16 q7, q7, d4[0]
+ vmul.i16 q0, q0, d4[1]
+ vmul.i16 q1, q1, d4[1]
+ vmul.i16 q8, q8, d4[1]
+ vmul.i16 q9, q9, d4[1]
+ vqadd.s16 q4, q4, q0
+ vqadd.s16 q5, q5, q1
+ vqadd.s16 q6, q6, q8
+ vqadd.s16 q7, q7, q9
+ vdup.16 q0, d4[2]
+ vshr.s16 q4, q4, #6
+ vshr.s16 q5, q5, #6
+ vshr.s16 q6, q6, #6
+ vshr.s16 q7, q7, #6
+ vadd.i16 q4, q4, q0
+ vadd.i16 q5, q5, q0
+ vadd.i16 q6, q6, q0
+ vadd.i16 q7, q7, q0
+ vqmovun.s16 d0, q4
+ vqmovun.s16 d1, q5
+ vqmovun.s16 d2, q6
+ vqmovun.s16 d3, q7
+.endif
+
+ bl gather32_neon
+
+ vld1.8 {q0, q1}, [r1, :128], r2 // src
+
+ vmovl.s8 q8, d20 // grain
+ vmovl.s8 q9, d21
+ vmovl.s8 q10, d22
+ vmovl.s8 q11, d23
+
+ vmovl.u8 q6, d8 // scaling
+ vmovl.u8 q7, d9
+ vmovl.u8 q4, d10
+ vmovl.u8 q5, d11
+
+ vmul.i16 q8, q8, q6 // scaling * grain
+ vmul.i16 q9, q9, q7
+ vmul.i16 q10, q10, q4
+ vmul.i16 q11, q11, q5
+
+ vrshl.s16 q8, q8, q13 // round2(scaling * grain, scaling_shift)
+ vrshl.s16 q9, q9, q13
+ vrshl.s16 q10, q10, q13
+ vrshl.s16 q11, q11, q13
+
+ vaddw.u8 q8, q8, d0 // *src + noise
+ vaddw.u8 q9, q9, d1
+ vaddw.u8 q10, q10, d2
+ vaddw.u8 q11, q11, d3
+
+ vqmovun.s16 d0, q8
+ vqmovun.s16 d1, q9
+ vqmovun.s16 d2, q10
+ vqmovun.s16 d3, q11
+
+ vmax.u8 q0, q0, q14
+ vmax.u8 q1, q1, q14
+ vmin.u8 q0, q0, q15
+ vmin.u8 q1, q1, q15
+
+ subs r9, r9, #1
+.if \oy
+ vdup.8 d6, d25[0]
+ vdup.8 d7, d25[1]
+.endif
+
+ vst1.8 {q0, q1}, [r0, :128], r2 // dst
+ bgt 1b
+
+.if \oy
+ cmp r12, #0
+ mov r9, r12 // restore actual remaining h
+ bgt L(fguv_loop_sx0_csfl\csfl\()_\ox\()0)
+.endif
+ b 9f
+.endm
+ fguv_loop_sx0 0, 0, 0
+ fguv_loop_sx0 0, 0, 1
+ fguv_loop_sx0 0, 1, 0
+ fguv_loop_sx0 0, 1, 1
+ fguv_loop_sx0 1, 0, 0
+ fguv_loop_sx0 1, 0, 1
+ fguv_loop_sx0 1, 1, 0
+ fguv_loop_sx0 1, 1, 1
+
+9:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function fguv_loop_sx1_neon
+L(fguv_loop_sx1_tbl):
+ .word L(fguv_loop_sx1_csfl0_00) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl0_01) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl0_10) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl0_11) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl1_00) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl1_01) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl1_10) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl1_11) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+
+.macro fguv_loop_sx1 csfl, ox, oy
+L(fguv_loop_sx1_csfl\csfl\()_\ox\oy):
+.if \oy
+ mov r12, lr
+.endif
+1:
+.if \ox
+ vld1.8 {d8}, [r4], r10 // grain_lut old
+.endif
+.if \oy
+ vld1.8 {q8}, [r8], r10 // grain_lut top
+.endif
+.if \ox && \oy
+ vld1.8 {d10}, [r11], r10 // grain_lut top old
+.endif
+ vld1.8 {q0, q1}, [r6, :128], r7 // luma
+ vld1.8 {q10}, [r5], r10 // grain_lut
+ vld1.8 {q11}, [r1, :128], r2 // src
+
+.if \ox
+ vmull.s8 q4, d8, d24
+ vmlal.s8 q4, d20, d25
+.endif
+
+ vpaddl.u8 q0, q0
+ vpaddl.u8 q1, q1
+.if \oy
+.if \ox
+ vmull.s8 q5, d10, d24
+ vmlal.s8 q5, d16, d25
+ vqrshrn.s16 d20, q4, #5
+ vqrshrn.s16 d16, q5, #5
+.endif
+
+ vmull.s8 q4, d20, d7
+ vmull.s8 q5, d21, d7
+ vmlal.s8 q4, d16, d6
+ vmlal.s8 q5, d17, d6
+ vqrshrn.s16 d20, q4, #5
+ vqrshrn.s16 d21, q5, #5
+.elseif \ox
+ vqrshrn.s16 d20, q4, #5
+.endif
+.if \csfl
+ vrshrn.u16 d0, q0, #1
+ vrshrn.u16 d1, q1, #1
+.else
+ vrshr.u16 q4, q0, #1
+ vrshr.u16 q5, q1, #1
+ vmovl.u8 q0, d22
+ vmovl.u8 q1, d23
+ vmul.i16 q4, q4, d4[0]
+ vmul.i16 q5, q5, d4[0]
+ vmul.i16 q0, q0, d4[1]
+ vmul.i16 q1, q1, d4[1]
+ vqadd.s16 q4, q4, q0
+ vqadd.s16 q5, q5, q1
+ vdup.16 q0, d4[2]
+ vshr.s16 q4, q4, #6
+ vshr.s16 q5, q5, #6
+ vadd.i16 q4, q4, q0
+ vadd.i16 q5, q5, q0
+ vqmovun.s16 d0, q4
+ vqmovun.s16 d1, q5
+.endif
+
+ bl gather16_neon
+
+ vmovl.s8 q8, d20 // grain
+ vmovl.s8 q9, d21
+
+ vmovl.u8 q6, d8 // scaling
+ vmovl.u8 q7, d9
+
+ vmul.i16 q8, q8, q6 // scaling * grain
+ vmul.i16 q9, q9, q7
+
+ vrshl.s16 q8, q8, q13 // round2(scaling * grain, scaling_shift)
+ vrshl.s16 q9, q9, q13
+
+ vaddw.u8 q8, q8, d22 // *src + noise
+ vaddw.u8 q9, q9, d23
+
+ vqmovun.s16 d0, q8
+ vqmovun.s16 d1, q9
+
+ vmax.u8 q0, q0, q14
+ vmin.u8 q0, q0, q15
+
+ subs r9, r9, #1
+.if \oy
+ vswp d6, d7
+.endif
+ vst1.8 {q0}, [r0, :128], r2 // dst
+ bgt 1b
+
+.if \oy
+ cmp r12, #0
+ mov r9, r12 // restore actual remaining h
+ bgt L(fguv_loop_sx1_csfl\csfl\()_\ox\()0)
+.endif
+
+ b 9f
+.endm
+ fguv_loop_sx1 0, 0, 0
+ fguv_loop_sx1 0, 0, 1
+ fguv_loop_sx1 0, 1, 0
+ fguv_loop_sx1 0, 1, 1
+ fguv_loop_sx1 1, 0, 0
+ fguv_loop_sx1 1, 0, 1
+ fguv_loop_sx1 1, 1, 0
+ fguv_loop_sx1 1, 1, 1
+
+9:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
diff --git a/third_party/dav1d/src/arm/32/filmgrain16.S b/third_party/dav1d/src/arm/32/filmgrain16.S
new file mode 100644
index 0000000000..6c36caceae
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/filmgrain16.S
@@ -0,0 +1,2137 @@
+/*
+ * Copyright © 2021, VideoLAN and dav1d authors
+ * Copyright © 2021, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+#include "src/arm/asm-offsets.h"
+
+#define GRAIN_WIDTH 82
+#define GRAIN_HEIGHT 73
+
+#define SUB_GRAIN_WIDTH 44
+#define SUB_GRAIN_HEIGHT 38
+
+.macro increment_seed steps, shift=1
+ lsr r11, r2, #3
+ lsr r12, r2, #12
+ lsr lr, r2, #1
+ eor r11, r2, r11 // (r >> 0) ^ (r >> 3)
+ eor r12, r12, lr // (r >> 12) ^ (r >> 1)
+ eor r11, r11, r12 // (r >> 0) ^ (r >> 3) ^ (r >> 12) ^ (r >> 1)
+.if \shift
+ lsr r2, r2, #\steps
+.endif
+ and r11, r11, #((1 << \steps) - 1) // bit
+.if \shift
+ orr r2, r2, r11, lsl #(16 - \steps) // *state
+.else
+ orr r2, r2, r11, lsl #16 // *state
+.endif
+.endm
+
+.macro read_rand dest, bits, age
+ ubfx \dest, r2, #16 - \bits - \age, #\bits
+.endm
+
+.macro read_shift_rand dest, bits
+ ubfx \dest, r2, #17 - \bits, #\bits
+ lsr r2, r2, #1
+.endm
+
+// special calling convention:
+// r2 holds seed
+// r3 holds dav1d_gaussian_sequence
+// clobbers r11-r12
+// returns in d0-d1
+function get_gaussian_neon
+ push {r5-r6,lr}
+ increment_seed 4
+ read_rand r5, 11, 3
+ read_rand r6, 11, 2
+ add r5, r3, r5, lsl #1
+ add r6, r3, r6, lsl #1
+ vld1.16 {d0[0]}, [r5]
+ read_rand r5, 11, 1
+ vld1.16 {d0[1]}, [r6]
+ add r5, r3, r5, lsl #1
+ read_rand r6, 11, 0
+ increment_seed 4
+ add r6, r3, r6, lsl #1
+ vld1.16 {d0[2]}, [r5]
+ read_rand r5, 11, 3
+ vld1.16 {d0[3]}, [r6]
+ add r5, r3, r5, lsl #1
+ read_rand r6, 11, 2
+ vld1.16 {d1[0]}, [r5]
+ add r6, r3, r6, lsl #1
+ read_rand r5, 11, 1
+ vld1.16 {d1[1]}, [r6]
+ read_rand r6, 11, 0
+ add r5, r3, r5, lsl #1
+ add r6, r3, r6, lsl #1
+ vld1.16 {d1[2]}, [r5]
+ vld1.16 {d1[3]}, [r6]
+ pop {r5-r6,pc}
+endfunc
+
+function get_grain_2_neon
+ push {r11,lr}
+ increment_seed 2
+ read_rand r11, 11, 1
+ read_rand r12, 11, 0
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d0[0]}, [r11]
+ vld1.16 {d0[1]}, [r12]
+ vrshl.s16 d0, d0, d30
+ pop {r11,pc}
+endfunc
+
+.macro get_grain_2 dst
+ bl get_grain_2_neon
+.ifnc \dst, d0
+ vmov \dst, d0
+.endif
+.endm
+
+function get_grain_4_neon
+ push {r11,lr}
+ increment_seed 4
+ read_rand r11, 11, 3
+ read_rand r12, 11, 2
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d0[0]}, [r11]
+ read_rand r11, 11, 1
+ vld1.16 {d0[1]}, [r12]
+ read_rand r12, 11, 0
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d0[2]}, [r11]
+ vld1.16 {d0[3]}, [r12]
+ vrshl.s16 d0, d0, d30
+ pop {r11,pc}
+endfunc
+
+.macro get_grain_4 dst
+ bl get_grain_4_neon
+.ifnc \dst, d0
+ vmov \dst, d0
+.endif
+.endm
+
+// r1 holds the number of entries to produce
+// r6, r8 and r10 hold the previous output entries
+// q0 holds the vector of produced entries
+// q1 holds the input vector of sums from above
+.macro output_lag n
+function output_lag\n\()_neon
+ push {r0, lr}
+.if \n == 1
+ mvn lr, r5 // grain_min = ~grain_max
+.else
+ mov r0, #1
+ mov lr, #1
+ sub r7, r7, #1
+ sub r9, r9, #1
+ lsl r0, r0, r7
+ lsl lr, lr, r9
+ add r7, r7, #1
+ add r9, r9, #1
+.endif
+1:
+ read_shift_rand r12, 11
+ vmov.32 r11, d2[0]
+ lsl r12, r12, #1
+ vext.8 q0, q0, q0, #2
+ ldrsh r12, [r3, r12]
+.if \n == 1
+ mla r11, r6, r4, r11 // sum (above) + *coeff * prev output
+ add r6, r11, r8 // 1 << (ar_coeff_shift - 1)
+ add r12, r12, r10
+ asr r6, r6, r7 // >> ar_coeff_shift
+ asr r12, r12, r9 // >> (4 - bitdepth_min_8 + grain_scale_shift)
+ add r6, r6, r12
+ cmp r6, r5
+.elseif \n == 2
+ mla r11, r8, r4, r11 // sum (above) + *coeff * prev output 1
+ mla r11, r6, r10, r11 // += *coeff * prev output 2
+ mov r8, r6
+ add r6, r11, r0 // 1 << (ar_coeff_shift - 1)
+ add r12, r12, lr // 1 << (4 - bitdepth_min_8 + grain_scale_shift - 1)
+ asr r6, r6, r7 // >> ar_coeff_shift
+ asr r12, r12, r9 // >> (4 - bitdepth_min_8 + grain_scale_shift)
+ add r6, r6, r12
+ push {lr}
+ cmp r6, r5
+ mvn lr, r5 // grain_min = ~grain_max
+.else
+ push {r1-r3}
+ sbfx r1, r4, #0, #8
+ sbfx r2, r4, #8, #8
+ sbfx r3, r4, #16, #8
+ mla r11, r10, r1, r11 // sum (above) + *coeff * prev output 1
+ mla r11, r8, r2, r11 // sum (above) + *coeff * prev output 2
+ mla r11, r6, r3, r11 // += *coeff * prev output 3
+ pop {r1-r3}
+ mov r10, r8
+ mov r8, r6
+
+ add r6, r11, r0 // 1 << (ar_coeff_shift - 1)
+ add r12, r12, lr // 1 << (4 - bitdepth_min_8 + grain_scale_shift - 1)
+ asr r6, r6, r7 // >> ar_coeff_shift
+ asr r12, r12, r9 // >> (4 - bitdepth_min_8 + grain_scale_shift)
+ add r6, r6, r12
+ push {lr}
+ cmp r6, r5
+ mvn lr, r5 // grain_min = ~grain_max
+.endif
+ it gt
+ movgt r6, r5
+ cmp r6, lr
+ it lt
+ movlt r6, lr
+.if \n >= 2
+ pop {lr}
+.endif
+ subs r1, r1, #1
+ vext.8 q1, q1, q1, #4
+ vmov.16 d1[3], r6
+ bgt 1b
+ pop {r0, pc}
+endfunc
+.endm
+
+output_lag 1
+output_lag 2
+output_lag 3
+
+
+function sum_lag1_above_neon
+ sub r12, r0, #1*GRAIN_WIDTH*2 - 16
+ vld1.16 {q10}, [r12] // load top right
+
+ vext.8 q0, q8, q9, #14 // top left, top mid
+ vext.8 q1, q9, q10, #2 // top left, top mid
+
+ vmull.s16 q2, d18, d28
+ vmlal.s16 q2, d0, d27
+ vmlal.s16 q2, d2, d29
+ vmull.s16 q3, d19, d28
+ vmlal.s16 q3, d1, d27
+ vmlal.s16 q3, d3, d29
+
+ vmov q8, q9
+ vmov q9, q10
+
+ bx lr
+endfunc
+
+.macro sum_lag_n_body lag, type, uv_layout, edge, elems, uv_coeff
+.ifc \lag\()_\edge, lag3_left
+ bl sum_lag3_left_above_neon
+.else
+ bl sum_\lag\()_above_neon
+.endif
+.ifc \type, uv_420
+ vpush {q6-q7}
+ add r12, r11, #GRAIN_WIDTH*2
+ vld1.16 {q0, q1}, [r11]!
+ vld1.16 {q6, q7}, [r12]!
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vpadd.i16 d12, d12, d13
+ vpadd.i16 d13, d14, d15
+ vadd.i16 q0, q0, q6
+ vpop {q6-q7}
+ vrshr.s16 q0, q0, #2
+.endif
+.ifc \type, uv_422
+ vld1.16 {q0, q1}, [r11]!
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vrshr.s16 q0, q0, #1
+.endif
+.ifc \type, uv_444
+ vld1.16 {q0}, [r11]!
+.endif
+.if \uv_layout
+.ifnb \uv_coeff
+ vdup.8 d13, \uv_coeff
+ vmovl.s8 q6, d13
+.endif
+ vmlal.s16 q2, d0, d13
+ vmlal.s16 q3, d1, d13
+.endif
+.if \uv_layout && \elems == 8
+ b sum_\lag\()_y_\edge\()_start
+.elseif \uv_layout == 444 && \elems == 7
+ b sum_\lag\()_y_\edge\()_start
+.elseif \uv_layout == 422 && \elems == 1
+ b sum_\lag\()_uv_420_\edge\()_start
+.else
+sum_\lag\()_\type\()_\edge\()_start:
+ push {r11}
+.if \elems > 4
+.ifc \edge, left
+ increment_seed 4
+ read_rand r11, 11, 3
+ read_rand r12, 11, 2
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d1[1]}, [r11]
+ read_rand r11, 11, 1
+ vld1.16 {d1[2]}, [r12]
+ add r11, r3, r11, lsl #1
+ vld1.16 {d1[3]}, [r11]
+ lsl r2, r2, #1 // shift back the state as if we'd done increment_seed with shift=0
+ vrshl.s16 d1, d1, d30
+ vext.8 q2, q2, q2, #12
+.ifc \lag, lag3
+ vmov.s16 r10, d1[1]
+.endif
+.ifnc \lag, lag1
+ vmov.s16 r8, d1[2]
+.endif
+ vmov.s16 r6, d1[3]
+
+ vmov q1, q2
+ mov r1, #1
+ bl output_\lag\()_neon
+.else
+ increment_seed 4, shift=0
+ vmov q1, q2
+ mov r1, #4
+ bl output_\lag\()_neon
+.endif
+
+ increment_seed 4, shift=0
+ vmov q1, q3
+.ifc \edge, right
+ mov r1, #3
+ bl output_\lag\()_neon
+ read_shift_rand r12, 11
+ add r12, r3, r12, lsl #1
+ vld1.16 {d2[0]}, [r12]
+ vrshl.s16 d2, d2, d30
+ vext.8 q0, q0, q1, #2
+.else
+ mov r1, #4
+ bl output_\lag\()_neon
+.endif
+.else
+ // elems == 1
+ increment_seed 4, shift=0
+ vmov q1, q2
+ mov r1, #1
+ bl output_\lag\()_neon
+ lsr r2, r2, #3
+
+ read_rand r11, 11, 2
+ read_rand r12, 11, 1
+ add r11, r3, r11, lsl #1
+ add r12, r3, r12, lsl #1
+ vld1.16 {d2[0]}, [r11]
+ read_rand r11, 11, 0
+ vld1.16 {d2[1]}, [r12]
+ add r11, r3, r11, lsl #1
+ vld1.16 {d2[2]}, [r11]
+ vrshl.s16 d2, d2, d30
+ vext.8 q0, q0, q1, #14
+.endif
+ vst1.16 {q0}, [r0]!
+ pop {r11}
+ pop {r1, pc}
+.endif
+.endm
+
+.macro sum_lag1_func type, uv_layout, edge, elems=8
+function sum_\type\()_lag1_\edge\()_neon
+ push {r1, lr}
+.ifc \edge, left
+ sub r12, r0, #1*GRAIN_WIDTH*2
+ vld1.8 {q9}, [r12] // load the previous block right above
+.endif
+ sum_lag_n_body lag1, \type, \uv_layout, \edge, \elems
+endfunc
+.endm
+
+sum_lag1_func y, 0, left
+sum_lag1_func y, 0, mid
+sum_lag1_func y, 0, right, 7
+sum_lag1_func uv_444, 444, left
+sum_lag1_func uv_444, 444, mid
+sum_lag1_func uv_444, 444, right, 7
+sum_lag1_func uv_422, 422, left
+sum_lag1_func uv_422, 422, mid
+sum_lag1_func uv_422, 422, right, 1
+sum_lag1_func uv_420, 420, left
+sum_lag1_func uv_420, 420, mid
+sum_lag1_func uv_420, 420, right, 1
+
+
+function sum_lag2_above_neon
+ push {lr}
+ sub r12, r0, #2*GRAIN_WIDTH*2 - 16
+ sub lr, r0, #1*GRAIN_WIDTH*2 - 16
+ vld1.16 {q10}, [r12] // load top right
+ vld1.16 {q13}, [lr]
+
+ vdup.8 d10, d28[0]
+ vext.8 q0, q8, q9, #12 // top left, top mid
+ vdup.8 d12, d28[1]
+ vext.8 q1, q8, q9, #14
+ vdup.8 d14, d28[3]
+ vext.8 q4, q9, q10, #2 // top mid, top right
+ vmovl.s8 q5, d10
+ vmovl.s8 q6, d12
+ vmovl.s8 q7, d14
+
+ vmull.s16 q2, d0, d10
+ vmlal.s16 q2, d2, d12
+ vmlal.s16 q2, d8, d14
+ vmull.s16 q3, d1, d10
+ vmlal.s16 q3, d3, d12
+ vmlal.s16 q3, d9, d14
+
+ vdup.8 d10, d28[4]
+ vext.8 q0, q9, q10, #4 // top mid, top right
+ vdup.8 d12, d28[5]
+ vext.8 q1, q11, q12, #12 // top left, top mid
+ vdup.8 d14, d28[6]
+ vext.8 q4, q11, q12, #14
+ vmovl.s8 q5, d10
+ vmovl.s8 q6, d12
+ vmovl.s8 q7, d14
+
+ vmlal.s16 q2, d0, d10
+ vmlal.s16 q2, d2, d12
+ vmlal.s16 q2, d8, d14
+ vmlal.s16 q3, d1, d10
+ vmlal.s16 q3, d3, d12
+ vmlal.s16 q3, d9, d14
+
+ vdup.8 d10, d29[0]
+ vext.8 q0, q12, q13, #2 // top mid, top right
+ vdup.8 d12, d29[1]
+ vext.8 q1, q12, q13, #4
+
+ vdup.8 d14, d28[2]
+ vdup.8 d8, d28[7]
+
+ vmovl.s8 q5, d10
+ vmovl.s8 q6, d12
+ vmovl.s8 q7, d14
+ vmovl.s8 q4, d8
+
+ vmlal.s16 q2, d0, d10
+ vmlal.s16 q2, d2, d12
+ vmlal.s16 q2, d18, d14
+ vmlal.s16 q2, d24, d8
+ vmlal.s16 q3, d1, d10
+ vmlal.s16 q3, d3, d12
+ vmlal.s16 q3, d19, d14
+ vmlal.s16 q3, d25, d8
+
+ vmov q8, q9
+ vmov q9, q10
+
+ vmov q11, q12
+ vmov q12, q13
+
+ pop {pc}
+endfunc
+
+.macro sum_lag2_func type, uv_layout, edge, elems=8
+function sum_\type\()_lag2_\edge\()_neon
+ push {r1, lr}
+.ifc \edge, left
+ sub r12, r0, #2*GRAIN_WIDTH*2
+ sub lr, r0, #1*GRAIN_WIDTH*2
+ vld1.16 {q9}, [r12] // load the previous block right above
+ vld1.16 {q12}, [lr]
+.endif
+ sum_lag_n_body lag2, \type, \uv_layout, \edge, \elems, uv_coeff=d29[4]
+endfunc
+.endm
+
+sum_lag2_func y, 0, left
+sum_lag2_func y, 0, mid
+sum_lag2_func y, 0, right, 7
+sum_lag2_func uv_444, 444, left
+sum_lag2_func uv_444, 444, mid
+sum_lag2_func uv_444, 444, right, 7
+sum_lag2_func uv_422, 422, left
+sum_lag2_func uv_422, 422, mid
+sum_lag2_func uv_422, 422, right, 1
+sum_lag2_func uv_420, 420, left
+sum_lag2_func uv_420, 420, mid
+sum_lag2_func uv_420, 420, right, 1
+
+
+function sum_lag3_left_above_neon
+ // A separate codepath for the left edge, to avoid reading outside
+ // of the edge of the buffer.
+ sub r12, r0, #3*GRAIN_WIDTH*2
+ vld1.8 {q11, q12}, [r12]
+ vext.8 q12, q11, q12, #10
+ vext.8 q11, q11, q11, #10
+ b sum_lag3_above_start
+endfunc
+
+function sum_lag3_above_neon
+ movw r12, #(3*GRAIN_WIDTH + 3)*2
+ sub r12, r0, r12
+ vld1.8 {q11, q12}, [r12]
+
+sum_lag3_above_start:
+ vdup.8 d12, d26[0]
+ vext.8 q1, q11, q12, #2
+ vdup.8 d14, d26[1]
+ vext.8 q4, q11, q12, #4
+ vdup.8 d16, d26[2]
+ vext.8 q5, q11, q12, #6
+ vdup.8 d18, d26[3]
+ vmovl.s8 q6, d12
+ vmovl.s8 q7, d14
+ vmovl.s8 q8, d16
+ vmovl.s8 q9, d18
+
+ movw r12, #(2*GRAIN_WIDTH + 3)*2
+ sub r12, r0, r12
+
+ vmull.s16 q2, d22, d12
+ vmlal.s16 q2, d2, d14
+ vmlal.s16 q2, d8, d16
+ vmlal.s16 q2, d10, d18
+ vmull.s16 q3, d23, d12
+ vmlal.s16 q3, d3, d14
+ vmlal.s16 q3, d9, d16
+ vmlal.s16 q3, d11, d18
+
+ vdup.8 d12, d26[4]
+ vext.8 q0, q11, q12, #8
+ vdup.8 d14, d26[5]
+ vext.8 q1, q11, q12, #10
+ vdup.8 d16, d26[6]
+ vext.8 q4, q11, q12, #12
+ vld1.8 {q11, q12}, [r12]
+ vdup.8 d18, d26[7]
+ vmovl.s8 q6, d12
+ vmovl.s8 q7, d14
+ vmovl.s8 q8, d16
+ vmovl.s8 q9, d18
+
+ vmlal.s16 q2, d0, d12
+ vmlal.s16 q2, d2, d14
+ vmlal.s16 q2, d8, d16
+ vmlal.s16 q2, d22, d18
+ vmlal.s16 q3, d1, d12
+ vmlal.s16 q3, d3, d14
+ vmlal.s16 q3, d9, d16
+ vmlal.s16 q3, d23, d18
+
+ vdup.8 d12, d27[0]
+ vext.8 q0, q11, q12, #2
+ vdup.8 d14, d27[1]
+ vext.8 q1, q11, q12, #4
+ vdup.8 d16, d27[2]
+ vext.8 q4, q11, q12, #6
+ vdup.8 d18, d27[3]
+ vext.8 q5, q11, q12, #8
+ vmovl.s8 q6, d12
+ vmovl.s8 q7, d14
+ vmovl.s8 q8, d16
+ vmovl.s8 q9, d18
+
+ sub r12, r0, #(1*GRAIN_WIDTH + 3)*2
+
+ vmlal.s16 q2, d0, d12
+ vmlal.s16 q2, d2, d14
+ vmlal.s16 q2, d8, d16
+ vmlal.s16 q2, d10, d18
+ vmlal.s16 q3, d1, d12
+ vmlal.s16 q3, d3, d14
+ vmlal.s16 q3, d9, d16
+ vmlal.s16 q3, d11, d18
+
+ vdup.8 d12, d27[4]
+ vext.8 q0, q11, q12, #10
+ vdup.8 d14, d27[5]
+ vext.8 q1, q11, q12, #12
+ vld1.8 {q11, q12}, [r12]
+ vdup.8 d16, d27[6]
+ vdup.8 d18, d27[7]
+ vmovl.s8 q6, d12
+ vmovl.s8 q7, d14
+ vext.8 q5, q11, q12, #2
+ vmovl.s8 q8, d16
+ vmovl.s8 q9, d18
+
+ vmlal.s16 q2, d0, d12
+ vmlal.s16 q2, d2, d14
+ vmlal.s16 q2, d22, d16
+ vmlal.s16 q2, d10, d18
+ vmlal.s16 q3, d1, d12
+ vmlal.s16 q3, d3, d14
+ vmlal.s16 q3, d23, d16
+ vmlal.s16 q3, d11, d18
+
+ vdup.8 d12, d28[0]
+ vext.8 q0, q11, q12, #4
+ vdup.8 d14, d28[1]
+ vext.8 q1, q11, q12, #6
+ vdup.8 d16, d28[2]
+ vext.8 q4, q11, q12, #8
+ vdup.8 d18, d28[3]
+ vext.8 q5, q11, q12, #10
+ vmovl.s8 q6, d12
+ vmovl.s8 q7, d14
+ vmovl.s8 q8, d16
+ vmovl.s8 q9, d18
+
+ vmlal.s16 q2, d0, d12
+ vmlal.s16 q2, d2, d14
+ vmlal.s16 q2, d8, d16
+ vmlal.s16 q2, d10, d18
+ vmlal.s16 q3, d1, d12
+ vmlal.s16 q3, d3, d14
+ vmlal.s16 q3, d9, d16
+ vmlal.s16 q3, d11, d18
+
+ vdup.8 d12, d28[4]
+ vext.8 q0, q11, q12, #12
+ vmovl.s8 q6, d12
+
+ vmlal.s16 q2, d0, d12
+ vmlal.s16 q3, d1, d12
+
+ bx lr
+endfunc
+
+.macro sum_lag3_func type, uv_layout, edge, elems=8
+function sum_\type\()_lag3_\edge\()_neon
+ push {r1, lr}
+ sum_lag_n_body lag3, \type, \uv_layout, \edge, \elems, uv_coeff=d29[0]
+endfunc
+.endm
+
+sum_lag3_func y, 0, left
+sum_lag3_func y, 0, mid
+sum_lag3_func y, 0, right, 7
+sum_lag3_func uv_444, 444, left
+sum_lag3_func uv_444, 444, mid
+sum_lag3_func uv_444, 444, right, 7
+sum_lag3_func uv_422, 422, left
+sum_lag3_func uv_422, 422, mid
+sum_lag3_func uv_422, 422, right, 1
+sum_lag3_func uv_420, 420, left
+sum_lag3_func uv_420, 420, mid
+sum_lag3_func uv_420, 420, right, 1
+
+function generate_grain_rows_neon
+ push {r10-r11,lr}
+1:
+ mov r10, #80
+2:
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ subs r10, r10, #8
+ vst1.16 {q0}, [r0]!
+ bgt 2b
+ get_grain_2 d0
+ subs r1, r1, #1
+ vst1.32 {d0[0]}, [r0]!
+ bgt 1b
+ pop {r10-r11,pc}
+endfunc
+
+function generate_grain_rows_44_neon
+ push {r10-r11,lr}
+1:
+ mov r10, #40
+2:
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+ subs r10, r10, #8
+ vst1.16 {q0}, [r0]!
+ bgt 2b
+ get_grain_4 d0
+ subs r1, r1, #1
+ vst1.16 {d0}, [r0]
+ add r0, r0, #GRAIN_WIDTH*2-80
+ bgt 1b
+ pop {r10-r11,pc}
+endfunc
+
+function gen_grain_uv_444_lag0_neon
+ vld1.16 {q3}, [r11]!
+gen_grain_uv_lag0_8_start:
+ push {r11,lr}
+ bl get_gaussian_neon
+ vrshl.s16 q0, q0, q15
+gen_grain_uv_lag0_8_add:
+ vand q3, q3, q1
+ vmull.s16 q2, d6, d22
+ vmull.s16 q3, d7, d22
+ vrshl.s32 q2, q2, q12
+ vrshl.s32 q3, q3, q12
+ vqmovn.s32 d4, q2
+ vqmovn.s32 d5, q3
+ vqadd.s16 q2, q2, q0
+ vmin.s16 q2, q2, q9
+ vmax.s16 q2, q2, q10
+ vst1.16 {q2}, [r0]!
+ pop {r11,pc}
+endfunc
+
+function gen_grain_uv_420_lag0_8_neon
+ add r12, r11, #GRAIN_WIDTH*2
+ vld1.16 {q2,q3}, [r11]!
+ vld1.16 {q4,q5}, [r12]
+ vpadd.i16 d4, d4, d5
+ vpadd.i16 d5, d6, d7
+ vpadd.i16 d8, d8, d9
+ vpadd.i16 d9, d10, d11
+ vadd.i16 q2, q2, q4
+ vrshr.s16 q3, q2, #2
+ b gen_grain_uv_lag0_8_start
+endfunc
+
+function gen_grain_uv_422_lag0_8_neon
+ vld1.16 {q2,q3}, [r11]!
+ vpadd.i16 d4, d4, d5
+ vpadd.i16 d5, d6, d7
+ vrshr.s16 q3, q2, #1
+ b gen_grain_uv_lag0_8_start
+endfunc
+
+function gen_grain_uv_420_lag0_4_neon
+ add r12, r11, #GRAIN_WIDTH*2
+ vld1.16 {q2}, [r11]
+ vld1.16 {q0}, [r12]
+ add r11, r11, #32
+ vpadd.i16 d4, d4, d5
+ vpadd.i16 d0, d0, d1
+ vadd.i16 d4, d4, d0
+ vrshr.s16 d6, d4, #2
+ push {r11,lr}
+ get_grain_4 d0
+ b gen_grain_uv_lag0_8_add
+endfunc
+
+function gen_grain_uv_422_lag0_4_neon
+ vld1.16 {q2}, [r11]
+ add r11, r11, #32
+ vpadd.i16 d4, d4, d5
+ vrshr.s16 d6, d4, #1
+ push {r11,lr}
+ get_grain_4 d0
+ b gen_grain_uv_lag0_8_add
+endfunc
+
+.macro gen_grain_82 type
+function generate_grain_\type\()_16bpc_neon, export=1
+ push {r4-r11,lr}
+
+.ifc \type, uv_444
+ ldr r4, [sp, #36]
+ mov r12, r3
+ mov lr, #28
+ add r11, r1, #3*GRAIN_WIDTH*2
+ mov r1, r2
+ mul r12, r12, lr
+ clz lr, r4
+.else
+ clz lr, r2
+.endif
+ movrel r3, X(gaussian_sequence)
+ sub lr, lr, #24 // -bitdepth_min_8
+ ldr r2, [r1, #FGD_SEED]
+ ldr r9, [r1, #FGD_GRAIN_SCALE_SHIFT]
+.ifc \type, y
+ add r4, r1, #FGD_AR_COEFFS_Y
+.else
+ add r4, r1, #FGD_AR_COEFFS_UV
+.endif
+ add r9, r9, lr // grain_scale_shift - bitdepth_min_8
+ adr r5, L(gen_grain_\type\()_tbl)
+ ldr r6, [r1, #FGD_AR_COEFF_LAG]
+ add r9, r9, #4
+ ldr r6, [r5, r6, lsl #2]
+ vdup.16 q15, r9 // 4 - bitdepth_min_8 + data->grain_scale_shift
+ add r5, r5, r6
+ vneg.s16 q15, q15
+
+.ifc \type, uv_444
+ push {lr}
+ cmp r12, #0
+ movw r10, #0x49d8
+ movw lr, #0xb524
+ // Intentionally using a separate register instead of moveq with an
+ // immediate constant, to avoid armv8 deprecated it instruction forms.
+ it eq
+ moveq r10, lr
+ add r4, r4, r12 // Add offset to ar_coeffs_uv[1]
+ eor r2, r2, r10
+ pop {lr}
+.endif
+
+ ldr r7, [r1, #FGD_AR_COEFF_SHIFT]
+ neg lr, lr // bitdepth_min_8
+ mov r8, #1
+ mov r10, #1
+ lsl r8, r8, r7 // 1 << ar_coeff_shift
+ lsl r10, r10, r9 // 1 << (4 + data->grain_scale_shift)
+ lsr r8, r8, #1 // 1 << (ar_coeff_shift - 1)
+ lsr r10, r10, #1 // 1 << (4 + data->grain_scale_shift - 1)
+
+ bx r5
+
+ .align 2
+L(gen_grain_\type\()_tbl):
+ .word L(generate_grain_\type\()_lag0) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag1) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag2) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag3) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+
+L(generate_grain_\type\()_lag0):
+.ifc \type, y
+ mov r1, #GRAIN_HEIGHT
+ bl generate_grain_rows_neon
+.else
+ mov r5, #128
+ lsl r5, r5, lr // 128 << bitdepth_min_8
+ sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
+ mvn r6, r5 // grain_min = ~grain_max
+
+ mov r1, #3
+ bl generate_grain_rows_neon
+ mov r1, #GRAIN_HEIGHT-3
+
+ vdup.32 q12, r7
+ vld1.8 {d22[]}, [r4] // ar_coeffs_uv[0]
+ vmov.i8 q0, #0
+ vmov.i8 q1, #255
+ vdup.16 q9, r5
+ vdup.16 q10, r6
+ vext.8 q13, q0, q1, #10
+ vext.8 q14, q1, q0, #2
+ vneg.s32 q12, q12
+ vmovl.s8 q11, d22
+
+1:
+ vmov q1, q13
+ bl gen_grain_uv_444_lag0_neon // 8
+ vmov.i8 q1, #255
+ bl gen_grain_uv_444_lag0_neon // 16
+ bl gen_grain_uv_444_lag0_neon // 24
+ bl gen_grain_uv_444_lag0_neon // 32
+ bl gen_grain_uv_444_lag0_neon // 40
+ bl gen_grain_uv_444_lag0_neon // 48
+ bl gen_grain_uv_444_lag0_neon // 56
+ bl gen_grain_uv_444_lag0_neon // 64
+ bl gen_grain_uv_444_lag0_neon // 72
+ vmov q1, q14
+ bl gen_grain_uv_444_lag0_neon // 80
+ get_grain_2 d16
+ subs r1, r1, #1
+ add r11, r11, #4
+ vst1.32 {d16[0]}, [r0]!
+ bgt 1b
+.endif
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag1):
+ vpush {q4-q7}
+ mov r5, #128
+ lsl r5, r5, lr // 128 << bitdepth_min_8
+ sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
+ vld1.8 {d27[]}, [r4]! // ar_coeffs_y[0]
+ vld1.8 {d28[]}, [r4]! // ar_coeffs_y[1]
+ vld1.8 {d29[]}, [r4] // ar_coeffs_y[2]
+.ifc \type, y
+ ldrsb r4, [r4, #1] // ar_coeffs_y[3]
+.else
+ add r4, r4, #2
+.endif
+
+ mov r1, #3
+.ifc \type, uv_444
+ vld1.8 {d13[]}, [r4] // ar_coeffs_uv[4]
+ ldrsb r4, [r4, #-1] // ar_coeffs_uv[3]
+.endif
+ bl generate_grain_rows_neon
+ vmovl.s8 q13, d27
+ vmovl.s8 q12, d29
+ vmovl.s8 q14, d28
+ vmov d29, d24
+.ifc \type, uv_444
+ vmovl.s8 q6, d13
+.endif
+
+ mov r1, #GRAIN_HEIGHT - 3
+1:
+ bl sum_\type\()_lag1_left_neon // 8
+ bl sum_\type\()_lag1_mid_neon // 16
+ bl sum_\type\()_lag1_mid_neon // 24
+ bl sum_\type\()_lag1_mid_neon // 32
+ bl sum_\type\()_lag1_mid_neon // 40
+ bl sum_\type\()_lag1_mid_neon // 48
+ bl sum_\type\()_lag1_mid_neon // 56
+ bl sum_\type\()_lag1_mid_neon // 64
+ bl sum_\type\()_lag1_mid_neon // 72
+ bl sum_\type\()_lag1_right_neon // 80
+ get_grain_2 d16
+ subs r1, r1, #1
+.ifc \type, uv_444
+ add r11, r11, #4
+.endif
+ vst1.32 {d16[0]}, [r0]!
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag2):
+ vpush {q4-q7}
+ mov r5, #128
+ lsl r5, r5, lr // 128 << bitdepth_min_8
+ sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
+ vld1.8 {d28,d29}, [r4] // ar_coeffs_y[0-11], ar_coeffs_uv[0-12]
+
+ vmov.s8 r4, d29[2]
+ vmov.s8 r10, d29[3]
+
+ mov r1, #3
+ bl generate_grain_rows_neon
+
+ mov r1, #GRAIN_HEIGHT - 3
+1:
+ bl sum_\type\()_lag2_left_neon // 8
+ bl sum_\type\()_lag2_mid_neon // 16
+ bl sum_\type\()_lag2_mid_neon // 24
+ bl sum_\type\()_lag2_mid_neon // 32
+ bl sum_\type\()_lag2_mid_neon // 40
+ bl sum_\type\()_lag2_mid_neon // 48
+ bl sum_\type\()_lag2_mid_neon // 56
+ bl sum_\type\()_lag2_mid_neon // 64
+ bl sum_\type\()_lag2_mid_neon // 72
+ bl sum_\type\()_lag2_right_neon // 80
+ get_grain_2 d16
+ subs r1, r1, #1
+.ifc \type, uv_444
+ add r11, r11, #4
+.endif
+ vst1.32 {d16[0]}, [r0]!
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag3):
+ vpush {q4-q7}
+ mov r5, #128
+ lsl r5, r5, lr // 128 << bitdepth_min_8
+ sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
+ vld1.8 {q13, q14}, [r4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
+
+ vmov.u8 r4, d28[5]
+ vmov.u8 r10, d28[6]
+ vmov.u8 r12, d28[7]
+
+ orr r4, r4, r10, lsl #8
+ orr r4, r4, r12, lsl #16
+
+ mov r1, #3
+ vpush {d26}
+ bl generate_grain_rows_neon
+ vpop {d26}
+
+ mov r1, #GRAIN_HEIGHT - 3
+1:
+ bl sum_\type\()_lag3_left_neon // 8
+ bl sum_\type\()_lag3_mid_neon // 16
+ bl sum_\type\()_lag3_mid_neon // 24
+ bl sum_\type\()_lag3_mid_neon // 32
+ bl sum_\type\()_lag3_mid_neon // 40
+ bl sum_\type\()_lag3_mid_neon // 48
+ bl sum_\type\()_lag3_mid_neon // 56
+ bl sum_\type\()_lag3_mid_neon // 64
+ bl sum_\type\()_lag3_mid_neon // 72
+ bl sum_\type\()_lag3_right_neon // 80
+ get_grain_2 d16
+ subs r1, r1, #1
+.ifc \type, uv_444
+ add r11, r11, #4
+.endif
+ vst1.32 {d16[0]}, [r0]!
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+gen_grain_82 y
+gen_grain_82 uv_444
+
+.macro set_height dst, type
+.ifc \type, uv_420
+ mov \dst, #SUB_GRAIN_HEIGHT-3
+.else
+ mov \dst, #GRAIN_HEIGHT-3
+.endif
+.endm
+
+.macro increment_y_ptr reg, type
+.ifc \type, uv_420
+ add \reg, \reg, #2*GRAIN_WIDTH*2-(6*32)
+.else
+ sub \reg, \reg, #6*32-GRAIN_WIDTH*2
+.endif
+.endm
+
+.macro gen_grain_44 type
+function generate_grain_\type\()_16bpc_neon, export=1
+ push {r4-r11,lr}
+
+ ldr r4, [sp, #36]
+ mov r12, r3
+ movw r11, #(3*GRAIN_WIDTH-3)*2
+ mov lr, #28
+ add r11, r1, r11
+ mov r1, r2
+ mul r12, r12, lr
+ clz lr, r4
+
+ movrel r3, X(gaussian_sequence)
+ sub lr, lr, #24 // -bitdepth_min_8
+ ldr r2, [r1, #FGD_SEED]
+ ldr r9, [r1, #FGD_GRAIN_SCALE_SHIFT]
+ add r4, r1, #FGD_AR_COEFFS_UV
+ add r9, r9, lr // grain_scale_shift - bitdepth_min_8
+ adr r5, L(gen_grain_\type\()_tbl)
+ ldr r6, [r1, #FGD_AR_COEFF_LAG]
+ add r9, r9, #4
+ ldr r6, [r5, r6, lsl #2]
+ vdup.16 q15, r9 // 4 - bitdepth_min_8 + data->grain_scale_shift
+ add r5, r5, r6
+ vneg.s16 q15, q15
+
+ push {lr}
+ cmp r12, #0
+ movw r10, #0x49d8
+ movw lr, #0xb524
+ // Intentionally using a separate register instead of moveq with an
+ // immediate constant, to avoid armv8 deprecated it instruction forms.
+ it eq
+ moveq r10, lr
+ add r4, r4, r12 // Add offset to ar_coeffs_uv[1]
+ eor r2, r2, r10
+ pop {lr}
+
+ ldr r7, [r1, #FGD_AR_COEFF_SHIFT]
+ neg lr, lr
+ mov r8, #1
+ mov r10, #1
+ lsl r8, r8, r7 // 1 << ar_coeff_shift
+ lsl r10, r10, r9 // 1 << (4 + data->grain_scale_shift)
+ lsr r8, r8, #1 // 1 << (ar_coeff_shift - 1)
+ lsr r10, r10, #1 // 1 << (4 + data->grain_scale_shift - 1)
+ bx r5
+
+ .align 2
+L(gen_grain_\type\()_tbl):
+ .word L(generate_grain_\type\()_lag0) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag1) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag2) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+ .word L(generate_grain_\type\()_lag3) - L(gen_grain_\type\()_tbl) + CONFIG_THUMB
+
+L(generate_grain_\type\()_lag0):
+.ifc \type, uv_420
+ vpush {q4-q5}
+.endif
+ mov r5, #128
+ lsl r5, r5, lr // 128 << bitdepth_min_8
+ sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
+ mvn r6, r5 // grain_min = ~grain_max
+
+ mov r1, #3
+ bl generate_grain_rows_44_neon
+ set_height r1, \type
+
+ vdup.32 q12, r7
+ vld1.8 {d22[]}, [r4] // ar_coeffs_uv[0]
+ vmov.i8 q0, #0
+ vmov.i8 q1, #255
+ vdup.16 q9, r5
+ vdup.16 q10, r6
+ vext.8 q13, q0, q1, #10
+ vext.8 q14, q1, q0, #14
+ vneg.s32 q12, q12
+ vmovl.s8 q11, d22
+
+1:
+ vmov q1, q13
+ bl gen_grain_\type\()_lag0_8_neon // 8
+ vmov.i8 q1, #255
+ bl gen_grain_\type\()_lag0_8_neon // 16
+ bl gen_grain_\type\()_lag0_8_neon // 24
+ bl gen_grain_\type\()_lag0_8_neon // 32
+ bl gen_grain_\type\()_lag0_8_neon // 40
+ vmov q1, q14
+ bl gen_grain_\type\()_lag0_4_neon // 44
+ subs r1, r1, #1
+ increment_y_ptr r11, \type
+ add r0, r0, #GRAIN_WIDTH*2-6*16
+ bgt 1b
+
+.ifc \type, uv_420
+ vpop {q4-q5}
+.endif
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag1):
+ vpush {q4-q7}
+ mov r5, #128
+ lsl r5, r5, lr // 128 << bitdepth_min_8
+ sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
+ vld1.8 {d27[]}, [r4]! // ar_coeffs_uv[0]
+ vld1.8 {d28[]}, [r4]! // ar_coeffs_uv[1]
+ vld1.8 {d29[]}, [r4] // ar_coeffs_uv[2]
+ add r4, r4, #2
+
+ mov r1, #3
+ vld1.8 {d13[]}, [r4] // ar_coeffs_uv[4]
+ ldrsb r4, [r4, #-1] // ar_coeffs_uv[3]
+ bl generate_grain_rows_44_neon
+ vmovl.s8 q13, d27
+ vmovl.s8 q12, d29
+ vmovl.s8 q14, d28
+ vmov d29, d24
+ vmovl.s8 q6, d13
+
+ set_height r1, \type
+1:
+ bl sum_\type\()_lag1_left_neon // 8
+ bl sum_\type\()_lag1_mid_neon // 16
+ bl sum_\type\()_lag1_mid_neon // 24
+ bl sum_\type\()_lag1_mid_neon // 32
+ bl sum_\type\()_lag1_mid_neon // 40
+ bl sum_\type\()_lag1_right_neon // 44
+ subs r1, r1, #1
+ increment_y_ptr r11, \type
+ add r0, r0, #GRAIN_WIDTH*2-6*16
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag2):
+ vpush {q4-q7}
+ mov r5, #128
+ lsl r5, r5, lr // 128 << bitdepth_min_8
+ sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
+ vld1.8 {d28,d29}, [r4] // ar_coeffs_uv[0-12]
+
+ vmov.s8 r4, d29[2]
+ vmov.s8 r10, d29[3]
+
+ mov r1, #3
+ bl generate_grain_rows_44_neon
+
+ set_height r1, \type
+1:
+ bl sum_\type\()_lag2_left_neon // 8
+ bl sum_\type\()_lag2_mid_neon // 16
+ bl sum_\type\()_lag2_mid_neon // 24
+ bl sum_\type\()_lag2_mid_neon // 32
+ bl sum_\type\()_lag2_mid_neon // 40
+ bl sum_\type\()_lag2_right_neon // 44
+ subs r1, r1, #1
+ increment_y_ptr r11, \type
+ add r0, r0, #GRAIN_WIDTH*2-6*16
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(generate_grain_\type\()_lag3):
+ vpush {q4-q7}
+ mov r5, #128
+ lsl r5, r5, lr // 128 << bitdepth_min_8
+ sub r5, r5, #1 // (128 << bitdepth_min_8) - 1
+ vld1.8 {q13, q14}, [r4] // ar_coeffs_y[0-23], ar_coeffs_uv[0-24]
+
+ vmov.u8 r4, d28[5]
+ vmov.u8 r10, d28[6]
+ vmov.u8 r12, d28[7]
+
+ orr r4, r4, r10, lsl #8
+ orr r4, r4, r12, lsl #16
+
+ mov r1, #3
+ bl generate_grain_rows_44_neon
+
+ set_height r1, \type
+1:
+ bl sum_\type\()_lag3_left_neon // 8
+ bl sum_\type\()_lag3_mid_neon // 16
+ bl sum_\type\()_lag3_mid_neon // 24
+ bl sum_\type\()_lag3_mid_neon // 32
+ bl sum_\type\()_lag3_mid_neon // 40
+ bl sum_\type\()_lag3_right_neon // 44
+ subs r1, r1, #1
+ increment_y_ptr r11, \type
+ add r0, r0, #GRAIN_WIDTH*2-6*16
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+gen_grain_44 uv_420
+gen_grain_44 uv_422
+
+.macro gather_interleaved dst1, dst2, src1, src2, src3, src4, off
+ vmov.u16 r11, \src1[0+\off]
+ vmov.u16 r12, \src3[0+\off]
+ add r11, r11, r3
+ vmov.u16 lr, \src1[2+\off]
+ add r12, r12, r3
+ vld1.8 {\dst1[0+\off]}, [r11]
+ vmov.u16 r11, \src3[2+\off]
+ add lr, lr, r3
+ vld1.8 {\dst2[0+\off]}, [r12]
+ vmov.u16 r12, \src2[0+\off]
+ add r11, r11, r3
+ vld1.8 {\dst1[2+\off]}, [lr]
+ vmov.u16 lr, \src4[0+\off]
+ add r12, r12, r3
+ vld1.8 {\dst2[2+\off]}, [r11]
+ vmov.u16 r11, \src2[2+\off]
+ add lr, lr, r3
+ vld1.8 {\dst1[4+\off]}, [r12]
+ vmov.u16 r12, \src4[2+\off]
+ add r11, r11, r3
+ vld1.8 {\dst2[4+\off]}, [lr]
+ add r12, r12, r3
+ vld1.8 {\dst1[6+\off]}, [r11]
+ vld1.8 {\dst2[6+\off]}, [r12]
+.endm
+
+.macro gather dst1, dst2, dst3, dst4, src1, src2, src3, src4, src5, src6, src7, src8
+ gather_interleaved \dst1, \dst3, \src1, \src2, \src5, \src6, 0
+ gather_interleaved \dst1, \dst3, \src1, \src2, \src5, \src6, 1
+ gather_interleaved \dst2, \dst4, \src3, \src4, \src7, \src8, 0
+ gather_interleaved \dst2, \dst4, \src3, \src4, \src7, \src8, 1
+.endm
+
+function gather32_neon
+ push {r11-r12,lr}
+ gather d8, d9, d10, d11, d0, d1, d2, d3, d4, d5, d6, d7
+ pop {r11-r12,pc}
+endfunc
+
+function gather16_neon
+ push {r11-r12,lr}
+ gather_interleaved d8, d9, d0, d1, d2, d3, 0
+ gather_interleaved d8, d9, d0, d1, d2, d3, 1
+ pop {r11-r12,pc}
+endfunc
+
+const overlap_coeffs_0, align=4
+ .short 27, 17, 0, 0
+ .short 17, 27, 32, 32
+endconst
+
+const overlap_coeffs_1, align=4
+ .short 23, 0, 0, 0
+ .short 22, 32, 32, 32
+endconst
+
+.macro calc_offset offx, offy, src, sx, sy
+ and \offy, \src, #0xF // randval & 0xF
+ lsr \offx, \src, #4 // randval >> 4
+.if \sy == 0
+ add \offy, \offy, \offy // 2 * (randval & 0xF)
+.endif
+.if \sx == 0
+ add \offx, \offx, \offx // 2 * (randval >> 4)
+.endif
+.endm
+
+.macro add_offset dst, offx, offy, src, stride
+ mla \dst, \stride, \offy, \src // grain_lut += grain_stride * offy
+ add \dst, \dst, \offx, lsl #1 // grain_lut += offx
+.endm
+
+// void dav1d_fgy_32x32_16bpc_neon(pixel *const dst, const pixel *const src,
+// const ptrdiff_t stride,
+// const uint8_t scaling[SCALING_SIZE],
+// const int scaling_shift,
+// const entry grain_lut[][GRAIN_WIDTH],
+// const int offsets[][2],
+// const int h, const ptrdiff_t clip,
+// const ptrdiff_t type,
+// const int bitdepth_max);
+function fgy_32x32_16bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100] // scaling_shift, grain_lut
+ ldrd r6, r7, [sp, #108] // offsets, h
+ ldr r8, [sp, #116] // clip
+ mov r9, #GRAIN_WIDTH*2 // grain_lut stride
+ ldr r10, [sp, #124] // bitdepth_max
+
+ eor r4, r4, #15 // 15 - scaling_shift
+ vdup.16 q6, r10 // bitdepth_max
+ clz r10, r10
+ vdup.16 q13, r4 // 15 - scaling_shift
+ rsb r10, r10, #24 // bitdepth_min_8
+ cmp r8, #0
+ vdup.16 q12, r10 // bitdepth_min_8
+
+ movrel_local r12, overlap_coeffs_0
+
+ beq 1f
+ // clip
+ vmov.i16 q14, #16
+ vmov.i16 q15, #235
+ vshl.s16 q14, q14, q12
+ vshl.s16 q15, q15, q12
+ b 2f
+1:
+ // no clip
+ vmov.i16 q14, #0
+ vmov q15, q6
+2:
+ vshr.u16 q6, q6, #1 // grain_max
+
+ vld1.16 {d24, d25}, [r12, :128] // overlap_coeffs
+
+ add r5, r5, #18 // grain_lut += 9
+ add r5, r5, r9, lsl #3 // grain_lut += 8 * grain_stride
+ add r5, r5, r9 // grain_lut += grain_stride
+
+ ldr r10, [r6, #8] // offsets[1][0]
+ calc_offset r10, r4, r10, 0, 0
+ add_offset r4, r10, r4, r5, r9
+ ldr r10, [r6, #4] // offsets[0][1]
+ calc_offset r10, r11, r10, 0, 0
+ add_offset r11, r10, r11, r5, r9
+ ldr r10, [r6, #12] // offsets[1][1]
+ calc_offset r10, r8, r10, 0, 0
+ add_offset r8, r10, r8, r5, r9
+ ldr r6, [r6] // offsets[0][0]
+ calc_offset r6, lr, r6, 0, 0
+ add_offset r5, r6, lr, r5, r9
+
+ add r4, r4, #32*2 // grain_lut += BLOCK_SIZE * bx
+ add r6, r11, r9, lsl #5 // grain_lut += grain_stride * BLOCK_SIZE * by
+
+ ldr r10, [sp, #120] // type
+ adr r11, L(fgy_loop_tbl)
+
+ tst r10, #1
+ ldr r10, [r11, r10, lsl #2]
+
+ add r8, r8, r9, lsl #5 // grain_lut += grain_stride * BLOCK_SIZE * by
+ add r8, r8, #32*2 // grain_lut += BLOCK_SIZE * bx
+
+ add r11, r11, r10
+
+ beq 1f
+ // y overlap
+ vdup.16 d14, d24[0]
+ vdup.16 d15, d24[1]
+ mov r10, r7 // backup actual h
+ mov r7, #2
+1:
+ sub r2, r2, #32 // src_stride -= 32
+ sub r9, r9, #32 // grain_stride -= 32
+ bx r11
+endfunc
+
+function fgy_loop_neon
+L(fgy_loop_tbl):
+ .word L(loop_00) - L(fgy_loop_tbl) + CONFIG_THUMB
+ .word L(loop_01) - L(fgy_loop_tbl) + CONFIG_THUMB
+ .word L(loop_10) - L(fgy_loop_tbl) + CONFIG_THUMB
+ .word L(loop_11) - L(fgy_loop_tbl) + CONFIG_THUMB
+
+.macro fgy ox, oy
+L(loop_\ox\oy):
+1:
+.if \ox
+ vld1.16 {d0}, [r4], r9 // grain_lut old
+.endif
+.if \oy
+ vld1.16 {q2, q3}, [r6]! // grain_lut top
+.endif
+.if \ox && \oy
+ vld1.16 {d2}, [r8], r9 // grain_lut top old
+.endif
+.if \oy
+ vld1.16 {q4, q5}, [r6], r9 // grain_lut top
+.endif
+.if !\ox && !\oy
+ vld1.16 {q0, q1}, [r1, :128]! // src
+.endif
+ vld1.16 {q8, q9}, [r5]! // grain_lut
+.if !\ox && !\oy
+ vld1.16 {q2, q3}, [r1, :128], r2 // src
+.endif
+.if !\oy
+ vmvn.i16 q5, #0xf000 // 0x0fff
+.endif
+ vld1.16 {q10, q11}, [r5], r9 // grain_lut
+
+.if \ox
+ add r4, r4, #32
+ vmull.s16 q0, d0, d24
+ vmlal.s16 q0, d16, d25
+.endif
+
+.if \oy
+.if \ox
+ add r8, r8, #32
+ vmull.s16 q1, d2, d24
+ vmlal.s16 q1, d4, d25
+ vqrshrn.s32 d16, q0, #5
+ vmvn d0, d12 // grain_min
+ vqrshrn.s32 d4, q1, #5
+ vmin.s16 d16, d16, d12
+ vmin.s16 d4, d4, d12
+ vmax.s16 d16, d16, d0
+ vmax.s16 d4, d4, d0
+.endif
+
+ vmull.s16 q0, d4, d14
+ vmull.s16 q1, d5, d14
+ vmull.s16 q2, d6, d14
+ vmull.s16 q3, d7, d14
+ vmlal.s16 q0, d16, d15
+ vmlal.s16 q1, d17, d15
+ vmlal.s16 q2, d18, d15
+ vmlal.s16 q3, d19, d15
+ vmull.s16 q8, d20, d15
+ vmull.s16 q9, d21, d15
+ vmull.s16 q10, d22, d15
+ vmull.s16 q11, d23, d15
+ vmlal.s16 q8, d8, d14
+ vmlal.s16 q9, d9, d14
+ vmlal.s16 q10, d10, d14
+ vmlal.s16 q11, d11, d14
+ vmvn q4, q6 // grain_min
+ vqrshrn.s32 d0, q0, #5
+ vqrshrn.s32 d1, q1, #5
+ vqrshrn.s32 d2, q2, #5
+ vqrshrn.s32 d3, q3, #5
+ vqrshrn.s32 d4, q8, #5
+ vqrshrn.s32 d5, q9, #5
+ vqrshrn.s32 d6, q10, #5
+ vqrshrn.s32 d7, q11, #5
+ vmin.s16 q8, q0, q6
+ vmin.s16 q9, q1, q6
+ vld1.16 {q0, q1}, [r1, :128]! // src
+ vmin.s16 q10, q2, q6
+ vmin.s16 q11, q3, q6
+ vmax.s16 q8, q8, q4
+ vmax.s16 q9, q9, q4
+ vld1.16 {q2, q3}, [r1, :128], r2 // src
+ vmvn.i16 q5, #0xf000 // 0x0fff
+ vmax.s16 q10, q10, q4
+ vmax.s16 q11, q11, q4
+.elseif \ox
+ vmvn d4, d12 // grain_min
+ vqrshrn.s32 d16, q0, #5
+ vld1.16 {q0, q1}, [r1, :128]! // src
+ vmin.s16 d16, d16, d12
+ vmax.s16 d16, d16, d4
+ vld1.16 {q2, q3}, [r1, :128], r2 // src
+.endif
+
+ // Make sure that uninitialized pixels out of range past the right
+ // edge are in range; their actual values shouldn't matter.
+ vand q0, q0, q5
+ vand q1, q1, q5
+ vand q2, q2, q5
+ vand q3, q3, q5
+
+ bl gather32_neon
+
+.if \ox || \oy
+ vpush {q6-q7}
+.endif
+
+ vmovl.u8 q6, d8 // scaling
+ vmovl.u8 q7, d9
+ vmovl.u8 q4, d10
+ vmovl.u8 q5, d11
+
+ vshl.u16 q6, q6, q13 // scaling << (15 - scaling_shift)
+ vshl.u16 q7, q7, q13
+ vshl.u16 q4, q4, q13
+ vshl.u16 q5, q5, q13
+
+ vqrdmulh.s16 q8, q8, q6 // round2((scaling << (15 - scaling_shift) * grain, 15)
+ vqrdmulh.s16 q9, q9, q7
+ vqrdmulh.s16 q10, q10, q4
+ vqrdmulh.s16 q11, q11, q5
+
+.if \ox || \oy
+ vpop {q6-q7}
+.endif
+
+ vqadd.s16 q0, q0, q8 // *src + noise
+ vqadd.s16 q1, q1, q9
+ vqadd.s16 q2, q2, q10
+ vqadd.s16 q3, q3, q11
+
+ vmax.s16 q0, q0, q14
+ vmax.s16 q1, q1, q14
+ vmax.s16 q2, q2, q14
+ vmax.s16 q3, q3, q14
+ vmin.s16 q0, q0, q15
+ vmin.s16 q1, q1, q15
+ vmin.s16 q2, q2, q15
+ vmin.s16 q3, q3, q15
+
+ vst1.16 {q0, q1}, [r0, :128]! // dst
+ subs r7, r7, #1
+.if \oy
+ vdup.16 d14, d25[0]
+ vdup.16 d15, d25[1]
+.endif
+ vst1.16 {q2, q3}, [r0, :128], r2 // dst
+ bgt 1b
+
+.if \oy
+ cmp r10, #2
+ sub r7, r10, #2 // restore actual remaining h
+ bgt L(loop_\ox\()0)
+.endif
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+.endm
+
+ fgy 0, 0
+ fgy 0, 1
+ fgy 1, 0
+ fgy 1, 1
+endfunc
+
+// void dav1d_fguv_32x32_420_16bpc_neon(pixel *const dst,
+// const pixel *const src,
+// const ptrdiff_t stride,
+// const uint8_t scaling[SCALING_SIZE],
+// const Dav1dFilmGrainData *const data,
+// const entry grain_lut[][GRAIN_WIDTH],
+// const pixel *const luma_row,
+// const ptrdiff_t luma_stride,
+// const int offsets[][2],
+// const ptrdiff_t h, const ptrdiff_t uv,
+// const ptrdiff_t is_id,
+// const ptrdiff_t type,
+// const int bitdepth_max);
+.macro fguv layout, sx, sy
+function fguv_32x32_\layout\()_16bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100] // data, grain_lut
+ ldrd r10, r11, [sp, #124] // uv, is_id
+ ldr r6, [sp, #136] // bitdepth_max
+
+ clz r7, r6
+ rsb r7, r7, #24 // bitdepth_min_8
+
+ // !csfl
+ add r10, r4, r10, lsl #2 // + 4*uv
+ add r12, r10, #FGD_UV_LUMA_MULT
+ add lr, r10, #FGD_UV_MULT
+ ldrh r10, [r10, #FGD_UV_OFFSET] // uv_offset
+ vld1.16 {d30[]}, [r12] // uv_luma_mult
+ lsl r10, r10, r7 // uv_offset << bitdepth_min_8
+ vld1.16 {d30[1]}, [lr] // uv_mult
+
+ ldr lr, [r4, #FGD_SCALING_SHIFT]
+ ldr r12, [r4, #FGD_CLIP_TO_RESTRICTED_RANGE]
+ eor lr, lr, #15 // 15 - scaling_shift
+
+ vmov.16 d30[2], r10 // uv_offset << bitdepth_min_8
+
+ cmp r12, #0
+ vdup.16 q13, lr // 15 - scaling_shift
+
+ beq 1f
+ // clip
+ cmp r11, #0
+ mov r8, #16
+ mov r9, #240
+ lsl r8, r8, r7
+ lsl r9, r9, r7
+ beq 2f
+ // is_id
+ mov r9, #235
+ lsl r9, r9, r7
+ b 2f
+1:
+ // no clip
+ mov r8, #0
+ mov r9, r6 // bitdepth_max
+2:
+ vmov.16 d30[3], r6 // bitdepth_max
+ vdup.16 d31, r8 // clip_min
+
+ mov r10, #GRAIN_WIDTH*2 // grain_lut stride
+
+.if \sy
+ mov r6, #23
+ mov r7, #22
+.else
+ mov r6, #27
+ mov r7, #17
+.endif
+ vmov.16 d31[1], r9 // clip_max
+
+ ldrd r8, r9, [sp, #116] // offsets, h
+
+ add r5, r5, #(2*(3 + (2 >> \sx)*3)) // grain_lut += 9 or 6
+.if \sy
+ add r5, r5, r10, lsl #2 // grain_lut += 4 * grain_stride
+ add r5, r5, r10, lsl #1 // grain_lut += 2 * grain_stride
+.else
+ add r5, r5, r10, lsl #3 // grain_lut += 8 * grain_stride
+ add r5, r5, r10 // grain_lut += grain_stride
+.endif
+ vmov.16 d31[2], r6 // overlap y [0]
+
+ ldr r12, [r8, #8] // offsets[1][0]
+ calc_offset r12, r4, r12, \sx, \sy
+ add_offset r4, r12, r4, r5, r10
+
+ ldr r12, [r8, #4] // offsets[0][1]
+ calc_offset r12, lr, r12, \sx, \sy
+ add_offset lr, r12, lr, r5, r10
+
+ ldr r12, [r8, #12] // offsets[1][1]
+ calc_offset r12, r11, r12, \sx, \sy
+ add_offset r11, r12, r11, r5, r10
+
+ ldr r8, [r8] // offsets[0][0]
+ calc_offset r8, r12, r8, \sx, \sy
+ add_offset r5, r8, r12, r5, r10
+
+ vmov.16 d31[3], r7 // overlap y [1]
+
+ add r4, r4, #2*(32 >> \sx) // grain_lut += BLOCK_SIZE * bx
+ add r8, lr, r10, lsl #(5 - \sy) // grain_lut += grain_stride * BLOCK_SIZE * by
+ add r11, r11, r10, lsl #(5 - \sy) // grain_lut += grain_stride * BLOCK_SIZE * by
+ add r11, r11, #2*(32 >> \sx) // grain_lut += BLOCK_SIZE * bx
+
+ movrel_local r12, overlap_coeffs_\sx
+ ldr lr, [sp, #132] // type
+ ldrd r6, r7, [sp, #108] // luma_row, luma_stride
+
+ vld1.16 {d24, d25}, [r12, :128] // overlap_coeffs
+
+ movrel_local r12, L(fguv_loop_sx\sx\()_tbl)
+#if CONFIG_THUMB
+ // This uses movrel_local instead of adr above, because the target
+ // can be out of range for adr. But movrel_local leaves the thumb bit
+ // set on COFF (but probably wouldn't if building for thumb on ELF),
+ // thus try to clear the bit for robustness.
+ bic r12, r12, #1
+#endif
+
+ tst lr, #1
+ ldr lr, [r12, lr, lsl #2]
+
+ add r12, r12, lr
+
+ beq 1f
+ // y overlap
+ sub lr, r9, #(2 >> \sy) // backup remaining h
+ mov r9, #(2 >> \sy)
+
+1:
+.if \sy
+ add r7, r7, r7 // luma_stride *= 2
+.endif
+ sub r7, r7, #32 // luma_stride -= 32
+
+ bx r12
+endfunc
+.endm
+
+fguv 420, 1, 1
+fguv 422, 1, 0
+fguv 444, 0, 0
+
+function fguv_loop_sx0_neon
+L(fguv_loop_sx0_tbl):
+ .word L(fguv_loop_sx0_csfl0_00) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl0_01) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl0_10) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl0_11) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl1_00) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl1_01) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl1_10) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx0_csfl1_11) - L(fguv_loop_sx0_tbl) + CONFIG_THUMB
+
+.macro fguv_loop_sx0 csfl, ox, oy
+L(fguv_loop_sx0_csfl\csfl\()_\ox\oy):
+ sub r2, r2, #32 // src_stride -= 32
+ sub r10, r10, #32 // grain_stride -= 32
+.if \oy
+ mov r12, lr
+.endif
+L(fguv_loop_sx0_csfl\csfl\()_\ox\oy\()_loopstart):
+1:
+.if \ox
+ vld1.16 {d0}, [r4], r10 // grain_lut old
+.endif
+.if \oy
+ vld1.16 {q2, q3}, [r8]! // grain_lut top
+.endif
+.if \ox && \oy
+ vld1.16 {d2}, [r11], r10 // grain_lut top old
+.endif
+.if !\ox && !\oy
+ vld1.16 {q0, q1}, [r6, :128]! // luma
+.endif
+ vld1.16 {q8, q9}, [r5]! // grain_lut
+.if \oy
+ vld1.16 {q4, q5}, [r8], r10 // grain_lut top
+.endif
+.if !\ox && !\oy
+ vld1.16 {q2, q3}, [r6, :128], r7 // luma
+.endif
+.if \oy
+ vdup.16 d28, d31[2] // overlap y coeff
+ vdup.16 d29, d31[3] // overlap y coeff
+.endif
+ vld1.16 {q10, q11}, [r5], r10 // grain_lut
+
+.if \ox
+ vdup.16 q7, d30[3] // bitdepth_max
+ add r4, r4, #32
+ vmull.s16 q0, d0, d24
+ vshr.u16 q7, q7, #1 // grain_max
+ vmlal.s16 q0, d16, d25
+ vmvn q6, q7 // grain_min
+.endif
+
+.if \oy
+.if \ox
+ add r11, r11, #32
+ vmull.s16 q1, d2, d24
+ vmlal.s16 q1, d4, d25
+ vqrshrn.s32 d16, q0, #5
+ vqrshrn.s32 d4, q1, #5
+ vmin.s16 d4, d4, d14
+ vmin.s16 d16, d16, d14
+ vmax.s16 d4, d4, d12
+ vmax.s16 d16, d16, d12
+.endif
+
+ vmull.s16 q0, d4, d28
+ vmull.s16 q1, d5, d28
+ vmull.s16 q2, d6, d28
+ vmull.s16 q3, d7, d28
+.if !\ox
+ vdup.16 q7, d30[3] // bitdepth_max
+.endif
+ vmlal.s16 q0, d16, d29
+ vmlal.s16 q1, d17, d29
+ vmlal.s16 q2, d18, d29
+ vmlal.s16 q3, d19, d29
+.if !\ox
+ vshr.u16 q7, q7, #1 // grain_max
+.endif
+ vmull.s16 q8, d20, d29
+ vmull.s16 q9, d21, d29
+ vmull.s16 q10, d22, d29
+ vmull.s16 q11, d23, d29
+.if !\ox
+ vmvn q6, q7 // grain_min
+.endif
+ vmlal.s16 q8, d8, d28
+ vmlal.s16 q9, d9, d28
+ vmlal.s16 q10, d10, d28
+ vmlal.s16 q11, d11, d28
+ vqrshrn.s32 d0, q0, #5
+ vqrshrn.s32 d1, q1, #5
+ vqrshrn.s32 d2, q2, #5
+ vqrshrn.s32 d3, q3, #5
+ vqrshrn.s32 d4, q8, #5
+ vqrshrn.s32 d5, q9, #5
+ vqrshrn.s32 d6, q10, #5
+ vqrshrn.s32 d7, q11, #5
+ vmin.s16 q8, q0, q7
+ vmin.s16 q9, q1, q7
+ vld1.16 {q0, q1}, [r6, :128]! // luma
+ vmin.s16 q10, q2, q7
+ vmin.s16 q11, q3, q7
+ vmax.s16 q8, q8, q6
+ vmax.s16 q9, q9, q6
+ vld1.16 {q2, q3}, [r6, :128], r7 // luma
+ vmax.s16 q10, q10, q6
+ vmax.s16 q11, q11, q6
+.elseif \ox
+ vqrshrn.s32 d16, q0, #5
+ vld1.16 {q0, q1}, [r6, :128]! // luma
+ vmin.s16 d16, d16, d14
+ vld1.16 {q2, q3}, [r6, :128], r7 // luma
+ vmax.s16 d16, d16, d12
+.endif
+
+.if !\csfl
+ vdup.16 d28, d30[0] // uv_luma_mult
+ vld1.16 {q4, q5}, [r1, :128]! // src
+ vdup.16 d29, d30[1] // uv_mult
+ vmull.s16 q6, d0, d28
+ vmull.s16 q7, d1, d28
+ vmull.s16 q0, d2, d28
+ vmull.s16 q1, d3, d28
+ vmlal.s16 q6, d8, d29
+ vmlal.s16 q7, d9, d29
+ vmlal.s16 q0, d10, d29
+ vmlal.s16 q1, d11, d29
+ vld1.16 {q4, q5}, [r1, :128] // src
+ sub r1, r1, #32
+ vshrn.s32 d12, q6, #6
+ vshrn.s32 d13, q7, #6
+ vshrn.s32 d14, q0, #6
+ vshrn.s32 d15, q1, #6
+ vmull.s16 q0, d4, d28
+ vmull.s16 q1, d5, d28
+ vmull.s16 q2, d6, d28
+ vmull.s16 q3, d7, d28
+ vmlal.s16 q0, d8, d29
+ vmlal.s16 q1, d9, d29
+ vmlal.s16 q2, d10, d29
+ vmlal.s16 q3, d11, d29
+ vdup.16 q14, d30[2] // uv_offset
+ vshrn.s32 d0, q0, #6
+ vshrn.s32 d1, q1, #6
+ vshrn.s32 d2, q2, #6
+ vshrn.s32 d3, q3, #6
+ vdup.16 q4, d30[3] // bitdepth_max
+ vmov.i16 q5, #0
+ vadd.i16 q6, q6, q14
+ vadd.i16 q7, q7, q14
+ vadd.i16 q2, q0, q14
+ vadd.i16 q3, q1, q14
+ vmin.s16 q0, q6, q4
+ vmin.s16 q1, q7, q4
+ vmin.s16 q2, q2, q4
+ vmin.s16 q3, q3, q4
+ vmax.s16 q0, q0, q5
+ vmax.s16 q1, q1, q5
+ vmax.s16 q2, q2, q5
+ vmax.s16 q3, q3, q5
+.else
+ vdup.16 q14, d30[3] // bitdepth_max
+ // Make sure that uninitialized pixels out of range past the right
+ // edge are in range; their actual values shouldn't matter.
+ vand q0, q0, q14
+ vand q1, q1, q14
+ vand q2, q2, q14
+ vand q3, q3, q14
+.endif
+
+ bl gather32_neon
+
+ vld1.16 {q0, q1}, [r1, :128]! // src
+
+ vmovl.u8 q6, d8 // scaling
+ vmovl.u8 q7, d9
+ vmovl.u8 q4, d10
+ vmovl.u8 q5, d11
+
+ vld1.16 {q2, q3}, [r1, :128], r2 // src
+
+ vshl.u16 q6, q6, q13 // scaling << (15 - scaling_shift)
+ vshl.u16 q7, q7, q13
+ vshl.u16 q4, q4, q13
+ vshl.u16 q5, q5, q13
+
+ vqrdmulh.s16 q8, q8, q6 // round2((scaling << (15 - scaling_shift) * grain, 15)
+ vqrdmulh.s16 q9, q9, q7
+ vqrdmulh.s16 q10, q10, q4
+ vqrdmulh.s16 q11, q11, q5
+
+
+ vdup.16 q4, d31[0] // clip_min
+ vdup.16 q5, d31[1] // clip_max
+
+ vqadd.s16 q0, q0, q8 // *src + noise
+ vqadd.s16 q1, q1, q9
+ vqadd.s16 q2, q2, q10
+ vqadd.s16 q3, q3, q11
+
+.if \oy
+ vmov.32 lr, d25[0] // 2 first 16 bit coeffs from overlap x
+.endif
+
+ vmax.s16 q0, q0, q4
+ vmax.s16 q1, q1, q4
+ vmax.s16 q2, q2, q4
+ vmax.s16 q3, q3, q4
+ vmin.s16 q0, q0, q5
+ vmin.s16 q1, q1, q5
+ vmin.s16 q2, q2, q5
+ vmin.s16 q3, q3, q5
+
+ vst1.16 {q0, q1}, [r0, :128]! // dst
+
+ subs r9, r9, #1
+.if \oy
+ vmov.32 d31[1], lr // new coeffs for overlap y
+.endif
+
+ vst1.16 {q2, q3}, [r0, :128], r2 // dst
+ bgt 1b
+
+.if \oy
+ cmp r12, #0
+ mov r9, r12 // restore actual remaining h
+ bgt L(fguv_loop_sx0_csfl\csfl\()_\ox\()0_loopstart)
+.endif
+ b 9f
+.endm
+ fguv_loop_sx0 0, 0, 0
+ fguv_loop_sx0 0, 0, 1
+ fguv_loop_sx0 0, 1, 0
+ fguv_loop_sx0 0, 1, 1
+ fguv_loop_sx0 1, 0, 0
+ fguv_loop_sx0 1, 0, 1
+ fguv_loop_sx0 1, 1, 0
+ fguv_loop_sx0 1, 1, 1
+
+9:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function fguv_loop_sx1_neon
+L(fguv_loop_sx1_tbl):
+ .word L(fguv_loop_sx1_csfl0_00) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl0_01) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl0_10) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl0_11) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl1_00) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl1_01) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl1_10) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+ .word L(fguv_loop_sx1_csfl1_11) - L(fguv_loop_sx1_tbl) + CONFIG_THUMB
+
+.macro fguv_loop_sx1 csfl, ox, oy
+L(fguv_loop_sx1_csfl\csfl\()_\ox\oy):
+.if \oy
+ mov r12, lr
+.endif
+1:
+.if \ox
+ vld1.16 {d0}, [r4], r10 // grain_lut old
+.endif
+.if \ox && \oy
+ vld1.16 {d2}, [r11], r10 // grain_lut top old
+.endif
+.if \oy
+ vld1.16 {q2, q3}, [r8], r10 // grain_lut top
+.endif
+.if !\ox && !\oy
+ vld1.16 {q0, q1}, [r6, :128]! // luma
+.endif
+ vld1.16 {q8, q9}, [r5], r10 // grain_lut
+.if \oy
+ vdup.16 d28, d31[2] // overlap y coeff
+ vdup.16 d29, d31[3] // overlap y coeff
+.endif
+.if !\ox && !\oy
+ vld1.16 {q2, q3}, [r6, :128], r7 // luma
+.endif
+
+.if \ox
+ vdup.16 q7, d30[3] // bitdepth_max
+ vmull.s16 q0, d0, d24
+ vshr.u16 q7, q7, #1 // grain_max
+ vmlal.s16 q0, d16, d25
+ vmvn q6, q7 // grain_min
+.endif
+
+.if \oy
+.if \ox
+ vmull.s16 q1, d2, d24
+ vmlal.s16 q1, d4, d25
+ vqrshrn.s32 d16, q0, #5
+ vqrshrn.s32 d4, q1, #5
+ vmin.s16 d4, d4, d14
+ vmin.s16 d16, d16, d14
+ vmax.s16 d4, d4, d12
+ vmax.s16 d16, d16, d12
+.endif
+
+ vmull.s16 q0, d4, d28
+ vmull.s16 q1, d5, d28
+ vmull.s16 q2, d6, d28
+ vmull.s16 q3, d7, d28
+.if !\ox
+ vdup.16 q7, d30[3] // bitdepth_max
+.endif
+ vmlal.s16 q0, d16, d29
+ vmlal.s16 q1, d17, d29
+ vmlal.s16 q2, d18, d29
+ vmlal.s16 q3, d19, d29
+.if !\ox
+ vshr.u16 q7, q7, #1 // grain_max
+.endif
+ vqrshrn.s32 d16, q0, #5
+ vqrshrn.s32 d17, q1, #5
+ vqrshrn.s32 d18, q2, #5
+ vqrshrn.s32 d19, q3, #5
+.if !\ox
+ vmvn q6, q7 // grain_min
+.endif
+ vld1.16 {q0, q1}, [r6, :128]! // luma
+ vmin.s16 q8, q8, q7
+ vmin.s16 q9, q9, q7
+ vmax.s16 q8, q8, q6
+ vmax.s16 q9, q9, q6
+ vld1.16 {q2, q3}, [r6, :128], r7 // luma
+.elseif \ox
+ vqrshrn.s32 d16, q0, #5
+ vld1.16 {q0, q1}, [r6, :128]! // luma
+ vmin.s16 d16, d16, d14
+ vld1.16 {q2, q3}, [r6, :128], r7 // luma
+ vmax.s16 d16, d16, d12
+.endif
+
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vpadd.i16 d2, d4, d5
+ vpadd.i16 d3, d6, d7
+ vrshr.u16 q0, q0, #1
+ vrshr.u16 q1, q1, #1
+.if !\csfl
+ vdup.16 d28, d30[0] // uv_luma_mult
+ vld1.16 {q2, q3}, [r1, :128], r2 // src
+ vdup.16 d29, d30[1] // uv_mult
+ vmull.s16 q6, d0, d28
+ vmull.s16 q7, d1, d28
+ vmull.s16 q0, d2, d28
+ vmull.s16 q1, d3, d28
+ vmlal.s16 q6, d4, d29
+ vmlal.s16 q7, d5, d29
+ vmlal.s16 q0, d6, d29
+ vmlal.s16 q1, d7, d29
+ vshrn.s32 d12, q6, #6
+ vshrn.s32 d13, q7, #6
+ vshrn.s32 d14, q0, #6
+ vshrn.s32 d15, q1, #6
+ vdup.16 q14, d30[2] // uv_offset
+ vdup.16 q4, d30[3] // bitdepth_max
+ vmov.i16 q5, #0
+ vadd.i16 q6, q6, q14
+ vadd.i16 q7, q7, q14
+ vmin.s16 q0, q6, q4
+ vmin.s16 q1, q7, q4
+ vmax.s16 q0, q0, q5
+ vmax.s16 q1, q1, q5
+.else
+ vdup.16 q14, d30[3] // bitdepth_max
+ vld1.16 {q2, q3}, [r1, :128], r2 // src
+
+ // Make sure that uninitialized pixels out of range past the right
+ // edge are in range; their actual values shouldn't matter.
+ vand q0, q0, q14
+ vand q1, q1, q14
+.endif
+
+ bl gather16_neon
+
+ vmovl.u8 q6, d8 // scaling
+ vmovl.u8 q7, d9
+
+ vshl.u16 q6, q6, q13 // scaling << (15 - scaling_shift)
+ vshl.u16 q7, q7, q13
+
+ vqrdmulh.s16 q8, q8, q6 // round2((scaling << (15 - scaling_shift) * grain, 15)
+ vqrdmulh.s16 q9, q9, q7
+
+
+ vdup.16 q4, d31[0] // clip_min
+ vdup.16 q5, d31[1] // clip_max
+
+ vqadd.s16 q0, q2, q8 // *src + noise
+ vqadd.s16 q1, q3, q9
+
+.if \oy
+ // Swap the two last coefficients of d31, place them first in d28
+ vrev64.16 d28, d31
+.endif
+
+ vmax.s16 q0, q0, q4
+ vmax.s16 q1, q1, q4
+ vmin.s16 q0, q0, q5
+ vmin.s16 q1, q1, q5
+
+ subs r9, r9, #1
+.if \oy
+ // Take the first two 16 bit coefficients of d28 and place them at the
+ // end of d31
+ vtrn.32 d31, d28
+.endif
+
+ vst1.16 {q0, q1}, [r0, :128], r2 // dst
+ bgt 1b
+
+.if \oy
+ cmp r12, #0
+ mov r9, r12 // restore actual remaining h
+ bgt L(fguv_loop_sx1_csfl\csfl\()_\ox\()0)
+.endif
+
+ b 9f
+.endm
+ fguv_loop_sx1 0, 0, 0
+ fguv_loop_sx1 0, 0, 1
+ fguv_loop_sx1 0, 1, 0
+ fguv_loop_sx1 0, 1, 1
+ fguv_loop_sx1 1, 0, 0
+ fguv_loop_sx1 1, 0, 1
+ fguv_loop_sx1 1, 1, 0
+ fguv_loop_sx1 1, 1, 1
+
+9:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
diff --git a/third_party/dav1d/src/arm/32/ipred.S b/third_party/dav1d/src/arm/32/ipred.S
new file mode 100644
index 0000000000..ff55d95d4a
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/ipred.S
@@ -0,0 +1,2937 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2020, Martin Storsjo
+ * Copyright © 2019, B Krishnan Iyer
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+// void ipred_dc_128_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_dc_128_8bpc_neon, export=1
+ push {r4, lr}
+ ldr r4, [sp, #8]
+ clz r3, r3
+ adr r2, L(ipred_dc_128_tbl)
+ sub r3, r3, #25
+ ldr r3, [r2, r3, lsl #2]
+ vmov.i8 q0, #128
+ add r2, r2, r3
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r2
+
+ .align 2
+L(ipred_dc_128_tbl):
+ .word 640f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+ .word 16f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+ .word 8f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+ .word 4f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+4:
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ subs r4, r4, #4
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ bgt 4b
+ pop {r4, pc}
+8:
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ bgt 8b
+ pop {r4, pc}
+16:
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ bgt 16b
+ pop {r4, pc}
+320:
+ vmov.i8 q1, #128
+32:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 32b
+ pop {r4, pc}
+640:
+ vmov.i8 q1, #128
+ sub r1, r1, #32
+64:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 64b
+ pop {r4, pc}
+endfunc
+
+// void ipred_v_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_v_8bpc_neon, export=1
+ push {r4, lr}
+ ldr lr, [sp, #8]
+ clz r3, r3
+ adr r4, L(ipred_v_tbl)
+ sub r3, r3, #25
+ ldr r3, [r4, r3, lsl #2]
+ add r2, r2, #1
+ add r4, r4, r3
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r4
+
+ .align 2
+L(ipred_v_tbl):
+ .word 640f - L(ipred_v_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_v_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_v_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_v_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_v_tbl) + CONFIG_THUMB
+40:
+ vld1.32 {d0[]}, [r2]
+4:
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ subs lr, lr, #4
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ bgt 4b
+ pop {r4, pc}
+80:
+ vld1.8 {d0}, [r2]
+8:
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ subs lr, lr, #4
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ bgt 8b
+ pop {r4, pc}
+160:
+ vld1.8 {q0}, [r2]
+16:
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ subs lr, lr, #4
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ bgt 16b
+ pop {r4, pc}
+320:
+ vld1.8 {q0, q1}, [r2]
+32:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs lr, lr, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 32b
+ pop {r4, pc}
+640:
+ vld1.8 {q0, q1}, [r2]!
+ sub r1, r1, #32
+ vld1.8 {q2, q3}, [r2]
+64:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d4, d5, d6, d7}, [r0, :128], r1
+ vst1.8 {d4, d5, d6, d7}, [r12, :128], r1
+ subs lr, lr, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d4, d5, d6, d7}, [r0, :128], r1
+ vst1.8 {d4, d5, d6, d7}, [r12, :128], r1
+ bgt 64b
+ pop {r4, pc}
+endfunc
+
+// void ipred_h_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_h_8bpc_neon, export=1
+ push {r4-r5, lr}
+ ldr r4, [sp, #12]
+ clz r3, r3
+ adr r5, L(ipred_h_tbl)
+ sub r3, r3, #25
+ ldr r3, [r5, r3, lsl #2]
+ sub r2, r2, #4
+ mov lr, #-4
+ add r5, r5, r3
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_h_tbl):
+ .word 640f - L(ipred_h_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_h_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_h_tbl) + CONFIG_THUMB
+ .word 8f - L(ipred_h_tbl) + CONFIG_THUMB
+ .word 4f - L(ipred_h_tbl) + CONFIG_THUMB
+4:
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], lr
+ vst1.32 {d3[0]}, [r0, :32], r1
+ vst1.32 {d2[0]}, [r12, :32], r1
+ subs r4, r4, #4
+ vst1.32 {d1[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ bgt 4b
+ pop {r4-r5, pc}
+8:
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], lr
+ vst1.8 {d3}, [r0, :64], r1
+ vst1.8 {d2}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d1}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ bgt 8b
+ pop {r4-r5, pc}
+160:
+ add r2, r2, #3
+ mov lr, #-1
+16:
+ vld1.8 {d0[], d1[]}, [r2], lr
+ subs r4, r4, #4
+ vld1.8 {d2[], d3[]}, [r2], lr
+ vst1.8 {q0}, [r0, :128], r1
+ vld1.8 {d4[], d5[]}, [r2], lr
+ vst1.8 {q1}, [r12, :128], r1
+ vld1.8 {d6[], d7[]}, [r2], lr
+ vst1.8 {q2}, [r0, :128], r1
+ vst1.8 {q3}, [r12, :128], r1
+ bgt 16b
+ pop {r4-r5, pc}
+320:
+ add r2, r2, #3
+ mov lr, #-1
+ sub r1, r1, #16
+32:
+ vld1.8 {d0[], d1[]}, [r2], lr
+ subs r4, r4, #4
+ vld1.8 {d2[], d3[]}, [r2], lr
+ vst1.8 {q0}, [r0, :128]!
+ vld1.8 {d4[], d5[]}, [r2], lr
+ vst1.8 {q1}, [r12, :128]!
+ vld1.8 {d6[], d7[]}, [r2], lr
+ vst1.8 {q0}, [r0, :128], r1
+ vst1.8 {q1}, [r12, :128], r1
+ vst1.8 {q2}, [r0, :128]!
+ vst1.8 {q3}, [r12, :128]!
+ vst1.8 {q2}, [r0, :128], r1
+ vst1.8 {q3}, [r12, :128], r1
+ bgt 32b
+ pop {r4-r5, pc}
+640:
+ add r2, r2, #3
+ mov lr, #-1
+ sub r1, r1, #48
+64:
+ vld1.8 {d0[], d1[]}, [r2], lr
+ subs r4, r4, #4
+ vld1.8 {d2[], d3[]}, [r2], lr
+ vst1.8 {q0}, [r0, :128]!
+ vld1.8 {d4[], d5[]}, [r2], lr
+ vst1.8 {q1}, [r12, :128]!
+ vld1.8 {d6[], d7[]}, [r2], lr
+ vst1.8 {q0}, [r0, :128]!
+ vst1.8 {q1}, [r12, :128]!
+ vst1.8 {q0}, [r0, :128]!
+ vst1.8 {q1}, [r12, :128]!
+ vst1.8 {q0}, [r0, :128], r1
+ vst1.8 {q1}, [r12, :128], r1
+ vst1.8 {q2}, [r0, :128]!
+ vst1.8 {q3}, [r12, :128]!
+ vst1.8 {q2}, [r0, :128]!
+ vst1.8 {q3}, [r12, :128]!
+ vst1.8 {q2}, [r0, :128]!
+ vst1.8 {q3}, [r12, :128]!
+ vst1.8 {q2}, [r0, :128], r1
+ vst1.8 {q3}, [r12, :128], r1
+ bgt 64b
+ pop {r4-r5, pc}
+endfunc
+
+// void ipred_dc_top_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_dc_top_8bpc_neon, export=1
+ push {r4-r5, lr}
+ ldr r4, [sp, #12]
+ clz r3, r3
+ adr r5, L(ipred_dc_top_tbl)
+ sub r3, r3, #25
+ ldr r3, [r5, r3, lsl #2]
+ add r2, r2, #1
+ add r5, r5, r3
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_dc_top_tbl):
+ .word 640f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+40:
+ vld1.32 {d0[]}, [r2]
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d0, q0, #2
+ vdup.8 d0, d0[0]
+4:
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ subs r4, r4, #4
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ bgt 4b
+ pop {r4-r5, pc}
+80:
+ vld1.8 {d0}, [r2]
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d0, q0, #3
+ vdup.8 d0, d0[0]
+8:
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ bgt 8b
+ pop {r4-r5, pc}
+160:
+ vld1.8 {d0, d1}, [r2]
+ vaddl.u8 q0, d0, d1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d0, q0, #4
+ vdup.8 q0, d0[0]
+16:
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ bgt 16b
+ pop {r4-r5, pc}
+320:
+ vld1.8 {d0, d1, d2, d3}, [r2]
+ vaddl.u8 q0, d0, d1
+ vaddl.u8 q1, d2, d3
+ vadd.u16 q0, q0, q1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d4, q0, #5
+ vdup.8 q0, d4[0]
+ vdup.8 q1, d4[0]
+32:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 32b
+ pop {r4-r5, pc}
+640:
+ vld1.8 {d0, d1, d2, d3}, [r2]!
+ vaddl.u8 q0, d0, d1
+ vld1.8 {d4, d5, d6, d7}, [r2]
+ vaddl.u8 q1, d2, d3
+ vaddl.u8 q2, d4, d5
+ vaddl.u8 q3, d6, d7
+ vadd.u16 q0, q0, q1
+ vadd.u16 q1, q2, q3
+ vadd.u16 q0, q0, q1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d18, q0, #6
+ vdup.8 q0, d18[0]
+ vdup.8 q1, d18[0]
+ sub r1, r1, #32
+64:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 64b
+ pop {r4-r5, pc}
+endfunc
+
+// void ipred_dc_left_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_dc_left_8bpc_neon, export=1
+ push {r4-r5, lr}
+ ldr r4, [sp, #12]
+ sub r2, r2, r4
+ clz r3, r3
+ clz lr, r4
+ sub lr, lr, #25
+ adr r5, L(ipred_dc_left_tbl)
+ sub r3, r3, #20
+ ldr r3, [r5, r3, lsl #2]
+ ldr lr, [r5, lr, lsl #2]
+ add r3, r5, r3
+ add r5, r5, lr
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_dc_left_tbl):
+ .word L(ipred_dc_left_h64) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_h32) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_h16) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_h8) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_h4) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w64) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w32) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w16) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w8) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w4) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+
+L(ipred_dc_left_h4):
+ vld1.32 {d0[]}, [r2, :32]
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d0, q0, #2
+ vdup.8 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w4):
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ subs r4, r4, #4
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ bgt L(ipred_dc_left_w4)
+ pop {r4-r5, pc}
+L(ipred_dc_left_h8):
+ vld1.8 {d0}, [r2, :64]
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d0, q0, #3
+ vdup.8 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w8):
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ bgt L(ipred_dc_left_w8)
+ pop {r4-r5, pc}
+L(ipred_dc_left_h16):
+ vld1.8 {d0, d1}, [r2, :128]
+ vaddl.u8 q0, d0, d1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d0, q0, #4
+ vdup.8 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w16):
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ bgt L(ipred_dc_left_w16)
+ pop {r4-r5, pc}
+L(ipred_dc_left_h32):
+ vld1.8 {d0, d1, d2, d3}, [r2, :128]
+ vaddl.u8 q0, d0, d1
+ vaddl.u8 q1, d2, d3
+ vadd.u16 q0, q0, q1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d0, q0, #5
+ vdup.8 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w32):
+ vmov.8 q1, q0
+1:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 1b
+ pop {r4-r5, pc}
+L(ipred_dc_left_h64):
+ vld1.8 {d0, d1, d2, d3}, [r2, :128]!
+ vld1.8 {d4, d5, d6, d7}, [r2, :128]
+ vaddl.u8 q0, d0, d1
+ vaddl.u8 q1, d2, d3
+ vaddl.u8 q2, d4, d5
+ vaddl.u8 q3, d6, d7
+ vadd.u16 q0, q0, q1
+ vadd.u16 q1, q2, q3
+ vadd.u16 q0, q0, q1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshrn.u16 d0, q0, #6
+ vdup.8 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w64):
+ vmov.8 q1, q0
+ sub r1, r1, #32
+1:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 1b
+ pop {r4-r5, pc}
+endfunc
+
+// void ipred_dc_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_dc_8bpc_neon, export=1
+ push {r4-r6, lr}
+ ldr r4, [sp, #16]
+ sub r2, r2, r4
+ add lr, r3, r4 // width + height
+ clz r3, r3
+ clz r12, r4
+ vdup.16 q15, lr // width + height
+ adr r5, L(ipred_dc_tbl)
+ rbit lr, lr // rbit(width + height)
+ sub r3, r3, #20 // 25 leading bits, minus table offset 5
+ sub r12, r12, #25
+ clz lr, lr // ctz(width + height)
+ ldr r3, [r5, r3, lsl #2]
+ ldr r12, [r5, r12, lsl #2]
+ neg lr, lr // -ctz(width + height)
+ add r3, r5, r3
+ add r5, r5, r12
+ vshr.u16 q15, q15, #1 // (width + height) >> 1
+ vdup.16 q14, lr // -ctz(width + height)
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_dc_tbl):
+ .word L(ipred_dc_h64) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_h32) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_h16) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_h8) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_h4) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w64) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w32) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w16) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w8) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w4) - L(ipred_dc_tbl) + CONFIG_THUMB
+
+L(ipred_dc_h4):
+ vld1.32 {d0[]}, [r2, :32]!
+ vpaddl.u8 d0, d0
+ add r2, r2, #1
+ vpadd.u16 d0, d0
+ bx r3
+L(ipred_dc_w4):
+ vld1.32 {d1[]}, [r2]
+ vadd.s16 d0, d0, d30
+ vpaddl.u8 d1, d1
+ vpadd.u16 d1, d1
+ cmp r4, #4
+ vadd.s16 d0, d0, d1
+ vshl.u16 d0, d0, d28
+ beq 1f
+ // h = 8/16
+ movw lr, #(0x3334/2)
+ movw r5, #(0x5556/2)
+ cmp r4, #16
+ it ne
+ movne lr, r5
+ vdup.16 d30, lr
+ vqdmulh.s16 d0, d0, d30
+1:
+ vdup.8 d0, d0[0]
+2:
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ subs r4, r4, #4
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[0]}, [r12, :32], r1
+ bgt 2b
+ pop {r4-r6, pc}
+
+L(ipred_dc_h8):
+ vld1.8 {d0}, [r2, :64]!
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ add r2, r2, #1
+ vpadd.u16 d0, d0
+ bx r3
+L(ipred_dc_w8):
+ vld1.8 {d2}, [r2]
+ vadd.s16 d0, d0, d30
+ vpaddl.u8 d2, d2
+ vpadd.u16 d2, d2
+ vpadd.u16 d2, d2
+ cmp r4, #8
+ vadd.s16 d0, d0, d2
+ vshl.u16 d0, d0, d28
+ beq 1f
+ // h = 4/16/32
+ cmp r4, #32
+ movw lr, #(0x3334/2)
+ movw r5, #(0x5556/2)
+ it ne
+ movne lr, r5
+ vdup.16 d24, lr
+ vqdmulh.s16 d0, d0, d24
+1:
+ vdup.8 d0, d0[0]
+2:
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d0}, [r12, :64], r1
+ bgt 2b
+ pop {r4-r6, pc}
+
+L(ipred_dc_h16):
+ vld1.8 {d0, d1}, [r2, :128]!
+ vaddl.u8 q0, d0, d1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ add r2, r2, #1
+ vpadd.u16 d0, d0
+ bx r3
+L(ipred_dc_w16):
+ vld1.8 {d2, d3}, [r2]
+ vadd.s16 d0, d0, d30
+ vaddl.u8 q1, d2, d3
+ vadd.u16 d2, d2, d3
+ vpadd.u16 d2, d2
+ vpadd.u16 d2, d2
+ cmp r4, #16
+ vadd.s16 d0, d0, d2
+ vshl.u16 d0, d0, d28
+ beq 1f
+ // h = 4/8/32/64
+ tst r4, #(32+16+8) // 16 added to make a consecutive bitmask
+ movw lr, #(0x3334/2)
+ movw r5, #(0x5556/2)
+ it ne
+ movne lr, r5
+ vdup.16 d24, lr
+ vqdmulh.s16 d0, d0, d24
+1:
+ vdup.8 q0, d0[0]
+2:
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1}, [r0, :128], r1
+ vst1.8 {d0, d1}, [r12, :128], r1
+ bgt 2b
+ pop {r4-r6, pc}
+
+L(ipred_dc_h32):
+ vld1.8 {d0, d1, d2, d3}, [r2, :128]!
+ vaddl.u8 q0, d0, d1
+ vaddl.u8 q1, d2, d3
+ vadd.u16 q0, q0, q1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ add r2, r2, #1
+ vpadd.u16 d0, d0
+ bx r3
+L(ipred_dc_w32):
+ vld1.8 {d2, d3, d4, d5}, [r2]
+ vadd.s16 d0, d0, d30
+ vaddl.u8 q1, d2, d3
+ vaddl.u8 q2, d4, d5
+ vadd.u16 q1, q1, q2
+ vadd.u16 d2, d2, d3
+ vpadd.u16 d2, d2
+ vpadd.u16 d2, d2
+ cmp r4, #32
+ vadd.s16 d0, d0, d2
+ vshl.u16 d4, d0, d28
+ beq 1f
+ // h = 8/16/64
+ cmp r4, #8
+ movw lr, #(0x3334/2)
+ movw r5, #(0x5556/2)
+ it ne
+ movne lr, r5
+ vdup.16 d24, lr
+ vqdmulh.s16 d4, d4, d24
+1:
+ vdup.8 q0, d4[0]
+ vdup.8 q1, d4[0]
+2:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 2b
+ pop {r4-r6, pc}
+
+L(ipred_dc_h64):
+ vld1.8 {d0, d1, d2, d3}, [r2, :128]!
+ vaddl.u8 q0, d0, d1
+ vld1.8 {d4, d5, d6, d7}, [r2, :128]!
+ vaddl.u8 q1, d2, d3
+ vaddl.u8 q2, d4, d5
+ vaddl.u8 q3, d6, d7
+ vadd.u16 q0, q0, q1
+ vadd.u16 q1, q2, q3
+ vadd.u16 q0, q0, q1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ add r2, r2, #1
+ vpadd.u16 d0, d0
+ bx r3
+L(ipred_dc_w64):
+ vld1.8 {d2, d3, d4, d5}, [r2]!
+ vadd.s16 d0, d0, d30
+ vaddl.u8 q2, d4, d5
+ vaddl.u8 q1, d2, d3
+ vadd.u16 d4, d4, d5
+ vadd.u16 d2, d2, d3
+ vld1.8 {d16, d17, d18, d19}, [r2]
+ vpadd.u16 d4, d4
+ vpadd.u16 d2, d2
+ vpadd.u16 d4, d4
+ vpadd.u16 d2, d2
+ vaddl.u8 q8, d16, d17
+ vaddl.u8 q9, d18, d19
+ vadd.u16 d16, d16, d17
+ vadd.u16 d18, d18, d19
+ vpadd.u16 d16, d16
+ vpadd.u16 d18, d18
+ vpadd.u16 d16, d16
+ vpadd.u16 d18, d18
+ vadd.u16 d2, d2, d4
+ vadd.u16 d3, d16, d18
+ cmp r4, #64
+ vadd.s16 d0, d0, d2
+ vadd.s16 d0, d0, d3
+ vshl.u16 d18, d0, d28
+ beq 1f
+ // h = 16/32
+ movw lr, #(0x5556/2)
+ movt lr, #(0x3334/2)
+ and r5, r4, #31
+ lsr lr, lr, r5
+ vdup.16 d30, lr
+ vqdmulh.s16 d18, d18, d30
+1:
+ sub r1, r1, #32
+ vdup.8 q0, d18[0]
+ vdup.8 q1, d18[0]
+2:
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.8 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.8 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.8 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 2b
+ pop {r4-r6, pc}
+endfunc
+
+// void ipred_paeth_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_paeth_8bpc_neon, export=1
+ push {r4-r8, lr}
+ ldr r4, [sp, #24]
+ clz lr, r3
+ adr r5, L(ipred_paeth_tbl)
+ sub lr, lr, #25
+ ldr lr, [r5, lr, lsl #2]
+ vld1.8 {d4[], d5[]}, [r2]
+ add r8, r2, #1
+ sub r2, r2, #4
+ add r5, r5, lr
+ mov r7, #-4
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_paeth_tbl):
+ .word 640f - L(ipred_paeth_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_paeth_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_paeth_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_paeth_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_paeth_tbl) + CONFIG_THUMB
+
+40:
+ vld1.32 {d6[], d7[]}, [r8]
+ vsubl.u8 q8, d6, d4 // top - topleft
+4:
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7
+ vzip.32 d0, d1
+ vzip.32 d2, d3
+ vaddw.u8 q9, q8, d0
+ vaddw.u8 q10, q8, d2
+ vqmovun.s16 d18, q9 // base
+ vqmovun.s16 d19, q10
+ vmov d1, d2
+ vabd.u8 q10, q3, q9 // tdiff
+ vabd.u8 q11, q2, q9 // tldiff
+ vabd.u8 q9, q0, q9 // ldiff
+ vmin.u8 q12, q10, q11 // min(tdiff, tldiff)
+ vcge.u8 q10, q11, q10 // tldiff >= tdiff
+ vcge.u8 q9, q12, q9 // min(tdiff, tldiff) >= ldiff
+ vbsl q10, q3, q2 // tdiff <= tldiff ? top : topleft
+ vbit q10, q0, q9 // ldiff <= min ? left : ...
+ vst1.32 {d21[1]}, [r0, :32], r1
+ vst1.32 {d21[0]}, [r6, :32], r1
+ subs r4, r4, #4
+ vst1.32 {d20[1]}, [r0, :32], r1
+ vst1.32 {d20[0]}, [r6, :32], r1
+ bgt 4b
+ pop {r4-r8, pc}
+80:
+ vld1.8 {d6}, [r8]
+ vsubl.u8 q8, d6, d4 // top - topleft
+ vmov d7, d6
+8:
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7
+ vaddw.u8 q9, q8, d0
+ vaddw.u8 q10, q8, d1
+ vaddw.u8 q11, q8, d2
+ vaddw.u8 q12, q8, d3
+ vqmovun.s16 d18, q9 // base
+ vqmovun.s16 d19, q10
+ vqmovun.s16 d20, q11
+ vqmovun.s16 d21, q12
+ vabd.u8 q11, q3, q9 // tdiff
+ vabd.u8 q12, q3, q10
+ vabd.u8 q13, q2, q9 // tldiff
+ vabd.u8 q14, q2, q10
+ vabd.u8 q10, q1, q10 // ldiff
+ vabd.u8 q9, q0, q9
+ vmin.u8 q15, q12, q14 // min(tdiff, tldiff)
+ vcge.u8 q12, q14, q12 // tldiff >= tdiff
+ vmin.u8 q14, q11, q13 // min(tdiff, tldiff)
+ vcge.u8 q11, q13, q11 // tldiff >= tdiff
+ vcge.u8 q10, q15, q10 // min(tdiff, tldiff) >= ldiff
+ vcge.u8 q9, q14, q9
+ vbsl q12, q3, q2 // tdiff <= tldiff ? top : topleft
+ vbsl q11, q3, q2
+ vbit q12, q1, q10 // ldiff <= min ? left : ...
+ vbit q11, q0, q9
+ vst1.8 {d25}, [r0, :64], r1
+ vst1.8 {d24}, [r6, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d23}, [r0, :64], r1
+ vst1.8 {d22}, [r6, :64], r1
+ bgt 8b
+ pop {r4-r8, pc}
+160:
+320:
+640:
+ vld1.8 {d6}, [r8]!
+ mov r12, r3
+ // Set up pointers for four rows in parallel; r0, r6, r5, lr
+ add r5, r0, r1
+ add lr, r6, r1
+ lsl r1, r1, #1
+ sub r1, r1, r3
+1:
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7
+2:
+ vsubl.u8 q8, d6, d4 // top - topleft
+ vmov d7, d6
+ vaddw.u8 q9, q8, d0
+ vaddw.u8 q10, q8, d1
+ vaddw.u8 q11, q8, d2
+ vaddw.u8 q12, q8, d3
+ vqmovun.s16 d18, q9 // base
+ vqmovun.s16 d19, q10
+ vqmovun.s16 d20, q11
+ vqmovun.s16 d21, q12
+ vabd.u8 q11, q3, q9 // tdiff
+ vabd.u8 q12, q3, q10
+ vabd.u8 q13, q2, q9 // tldiff
+ vabd.u8 q14, q2, q10
+ vabd.u8 q10, q1, q10 // ldiff
+ vabd.u8 q9, q0, q9
+ vmin.u8 q15, q12, q14 // min(tdiff, tldiff)
+ vcge.u8 q12, q14, q12 // tldiff >= tdiff
+ vmin.u8 q14, q11, q13 // min(tdiff, tldiff)
+ vcge.u8 q11, q13, q11 // tldiff >= tdiff
+ vcge.u8 q10, q15, q10 // min(tdiff, tldiff) >= ldiff
+ vcge.u8 q9, q14, q9
+ vbsl q12, q3, q2 // tdiff <= tldiff ? top : topleft
+ vbsl q11, q3, q2
+ vbit q12, q1, q10 // ldiff <= min ? left : ...
+ vbit q11, q0, q9
+ subs r3, r3, #8
+ vst1.8 {d25}, [r0, :64]!
+ vst1.8 {d24}, [r6, :64]!
+ vst1.8 {d23}, [r5, :64]!
+ vst1.8 {d22}, [lr, :64]!
+ ble 8f
+ vld1.8 {d6}, [r8]!
+ b 2b
+8:
+ subs r4, r4, #4
+ ble 9f
+ // End of horizontal loop, move pointers to next four rows
+ sub r8, r8, r12
+ add r0, r0, r1
+ add r6, r6, r1
+ vld1.8 {d6}, [r8]!
+ add r5, r5, r1
+ add lr, lr, r1
+ mov r3, r12
+ b 1b
+9:
+ pop {r4-r8, pc}
+endfunc
+
+// void ipred_smooth_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_smooth_8bpc_neon, export=1
+ push {r4-r10, lr}
+ ldr r4, [sp, #32]
+ movrel r10, X(sm_weights)
+ add r12, r10, r4
+ add r10, r10, r3
+ clz r9, r3
+ adr r5, L(ipred_smooth_tbl)
+ sub lr, r2, r4
+ sub r9, r9, #25
+ ldr r9, [r5, r9, lsl #2]
+ vld1.8 {d4[]}, [lr] // bottom
+ add r8, r2, #1
+ add r5, r5, r9
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_smooth_tbl):
+ .word 640f - L(ipred_smooth_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_smooth_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_smooth_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_smooth_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_smooth_tbl) + CONFIG_THUMB
+
+40:
+ vld1.32 {d16[]}, [r8] // top
+ vld1.32 {d18[]}, [r10, :32] // weights_hor
+ sub r2, r2, #4
+ mov r7, #-4
+ vdup.8 q3, d16[3] // right
+ vsubl.u8 q8, d16, d4 // top-bottom
+ vmovl.u8 q9, d18 // weights_hor
+4:
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7 // left
+ vld4.8 {d20[], d21[], d22[], d23[]}, [r12, :32]! // weights_ver
+ vshll.i8 q12, d6, #8 // right*256
+ vshll.i8 q13, d6, #8
+ vzip.32 d1, d0 // left, flipped
+ vzip.32 d3, d2
+ vzip.32 d20, d21 // weights_ver
+ vzip.32 d22, d23
+ vshll.i8 q14, d4, #8 // bottom*256
+ vshll.i8 q15, d4, #8
+ vsubl.u8 q0, d1, d6 // left-right
+ vsubl.u8 q1, d3, d6
+ vmovl.u8 q10, d20 // weights_ver
+ vmovl.u8 q11, d22
+ vmla.i16 q12, q1, q9 // right*256 + (left-right)*weights_hor
+ vmla.i16 q13, q0, q9 // (left flipped)
+ vmla.i16 q14, q8, q10 // bottom*256 + (top-bottom)*weights_ver
+ vmla.i16 q15, q8, q11
+ vhadd.u16 q12, q12, q14
+ vhadd.u16 q13, q13, q15
+ vrshrn.i16 d24, q12, #8
+ vrshrn.i16 d25, q13, #8
+ vst1.32 {d24[0]}, [r0, :32], r1
+ vst1.32 {d24[1]}, [r6, :32], r1
+ subs r4, r4, #4
+ vst1.32 {d25[0]}, [r0, :32], r1
+ vst1.32 {d25[1]}, [r6, :32], r1
+ bgt 4b
+ pop {r4-r10, pc}
+80:
+ vld1.8 {d16}, [r8] // top
+ vld1.8 {d18}, [r10, :64] // weights_hor
+ sub r2, r2, #2
+ mov r7, #-2
+ vdup.8 q3, d16[7] // right
+ vsubl.u8 q8, d16, d4 // top-bottom
+ vmovl.u8 q9, d18 // weights_hor
+8:
+ vld2.8 {d0[], d1[]}, [r2, :16], r7 // left
+ vld2.8 {d20[], d22[]}, [r12, :16]! // weights_ver
+ vshll.i8 q12, d6, #8 // right*256
+ vshll.i8 q13, d6, #8
+ vshll.i8 q14, d4, #8 // bottom*256
+ vshll.i8 q15, d4, #8
+ vsubl.u8 q1, d0, d6 // left-right (left flipped)
+ vsubl.u8 q0, d1, d6
+ vmovl.u8 q10, d20 // weights_ver
+ vmovl.u8 q11, d22
+ vmla.i16 q12, q0, q9 // right*256 + (left-right)*weights_hor
+ vmla.i16 q13, q1, q9
+ vmla.i16 q14, q8, q10 // bottom*256 + (top-bottom)*weights_ver
+ vmla.i16 q15, q8, q11
+ vhadd.u16 q12, q12, q14
+ vhadd.u16 q13, q13, q15
+ vrshrn.i16 d24, q12, #8
+ vrshrn.i16 d25, q13, #8
+ subs r4, r4, #2
+ vst1.8 {d24}, [r0, :64], r1
+ vst1.8 {d25}, [r6, :64], r1
+ bgt 8b
+ pop {r4-r10, pc}
+160:
+320:
+640:
+ add lr, r2, r3
+ sub r2, r2, #2
+ mov r7, #-2
+ vld1.8 {d6[], d7[]}, [lr] // right
+ sub r1, r1, r3
+ mov r9, r3
+
+1:
+ vld2.8 {d0[], d1[]}, [r2, :16], r7 // left
+ vld2.8 {d20[], d22[]}, [r12, :16]! // weights_ver
+ vsubl.u8 q1, d0, d6 // left-right (left flipped)
+ vsubl.u8 q0, d1, d6
+ vmovl.u8 q10, d20 // weights_ver
+ vmovl.u8 q11, d22
+2:
+ vld1.8 {d16}, [r8]! // top
+ vld1.8 {d18}, [r10, :64]! // weights_hor
+ vshll.i8 q12, d6, #8 // right*256
+ vshll.i8 q13, d6, #8
+ vmovl.u8 q9, d18 // weights_hor
+ vshll.i8 q14, d4, #8 // bottom*256
+ vshll.i8 q15, d4, #8
+ vsubl.u8 q8, d16, d4 // top-bottom
+ vmla.i16 q12, q0, q9 // right*256 + (left-right)*weights_hor
+ vmla.i16 q13, q1, q9
+ vmla.i16 q14, q8, q10 // bottom*256 + (top-bottom)*weights_ver
+ vmla.i16 q15, q8, q11
+ vhadd.u16 q12, q12, q14
+ vhadd.u16 q13, q13, q15
+ vrshrn.i16 d24, q12, #8
+ vrshrn.i16 d25, q13, #8
+ subs r3, r3, #8
+ vst1.8 {d24}, [r0, :64]!
+ vst1.8 {d25}, [r6, :64]!
+ bgt 2b
+ subs r4, r4, #2
+ ble 9f
+ sub r8, r8, r9
+ sub r10, r10, r9
+ add r0, r0, r1
+ add r6, r6, r1
+ mov r3, r9
+ b 1b
+9:
+ pop {r4-r10, pc}
+endfunc
+
+// void ipred_smooth_v_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_smooth_v_8bpc_neon, export=1
+ push {r4-r7, lr}
+ ldr r4, [sp, #20]
+ movrel r7, X(sm_weights)
+ add r7, r7, r4
+ clz lr, r3
+ adr r5, L(ipred_smooth_v_tbl)
+ sub r12, r2, r4
+ sub lr, lr, #25
+ ldr lr, [r5, lr, lsl #2]
+ vld1.8 {d4[]}, [r12] // bottom
+ add r2, r2, #1
+ add r5, r5, lr
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_smooth_v_tbl):
+ .word 640f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+
+40:
+ vld1.32 {d6[]}, [r2] // top
+ vsubl.u8 q3, d6, d4 // top-bottom
+4:
+ vld4.8 {d16[], d17[], d18[], d19[]}, [r7, :32]! // weights_ver
+ vshll.i8 q10, d4, #8 // bottom*256
+ vshll.i8 q11, d4, #8
+ vzip.32 d16, d17 // weights_ver
+ vzip.32 d18, d19
+ vmovl.u8 q8, d16 // weights_ver
+ vmovl.u8 q9, d18
+ subs r4, r4, #4
+ vmla.i16 q10, q3, q8 // bottom*256 + (top-bottom)*weights_ver
+ vmla.i16 q11, q3, q9
+ vrshrn.i16 d20, q10, #8
+ vrshrn.i16 d21, q11, #8
+ vst1.32 {d20[0]}, [r0, :32], r1
+ vst1.32 {d20[1]}, [r6, :32], r1
+ vst1.32 {d21[0]}, [r0, :32], r1
+ vst1.32 {d21[1]}, [r6, :32], r1
+ bgt 4b
+ pop {r4-r7, pc}
+80:
+ vld1.8 {d6}, [r2] // top
+ vsubl.u8 q3, d6, d4 // top-bottom
+8:
+ vld4.8 {d16[], d18[], d20[], d22[]}, [r7, :32]! // weights_ver
+ vshll.i8 q12, d4, #8 // bottom*256
+ vshll.i8 q13, d4, #8
+ vshll.i8 q14, d4, #8
+ vshll.i8 q15, d4, #8
+ vmovl.u8 q8, d16 // weights_ver
+ vmovl.u8 q9, d18
+ vmovl.u8 q10, d20
+ vmovl.u8 q11, d22
+ vmla.i16 q12, q3, q8 // bottom*256 + (top-bottom)*weights_ver
+ vmla.i16 q13, q3, q9
+ vmla.i16 q14, q3, q10
+ vmla.i16 q15, q3, q11
+ vrshrn.i16 d24, q12, #8
+ vrshrn.i16 d25, q13, #8
+ vrshrn.i16 d26, q14, #8
+ vrshrn.i16 d27, q15, #8
+ vst1.8 {d24}, [r0, :64], r1
+ vst1.8 {d25}, [r6, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d26}, [r0, :64], r1
+ vst1.8 {d27}, [r6, :64], r1
+ bgt 8b
+ pop {r4-r7, pc}
+160:
+320:
+640:
+ vpush {q4-q7}
+ // Set up pointers for four rows in parallel; r0, r6, r5, lr
+ add r5, r0, r1
+ add lr, r6, r1
+ lsl r1, r1, #1
+ sub r1, r1, r3
+ mov r12, r3
+
+1:
+ vld4.8 {d8[], d10[], d12[], d14[]}, [r7, :32]! // weights_ver
+ vmovl.u8 q4, d8 // weights_ver
+ vmovl.u8 q5, d10
+ vmovl.u8 q6, d12
+ vmovl.u8 q7, d14
+2:
+ vld1.8 {q3}, [r2]! // top
+ vshll.i8 q8, d4, #8 // bottom*256
+ vshll.i8 q9, d4, #8
+ vshll.i8 q10, d4, #8
+ vshll.i8 q11, d4, #8
+ vsubl.u8 q0, d6, d4 // top-bottom
+ vsubl.u8 q1, d7, d4
+ vshll.i8 q12, d4, #8
+ vshll.i8 q13, d4, #8
+ vshll.i8 q14, d4, #8
+ vshll.i8 q15, d4, #8
+ vmla.i16 q8, q0, q4 // bottom*256 + (top-bottom)*weights_ver
+ vmla.i16 q9, q1, q4
+ vmla.i16 q10, q0, q5
+ vmla.i16 q11, q1, q5
+ vmla.i16 q12, q0, q6 // bottom*256 + (top-bottom)*weights_ver
+ vmla.i16 q13, q1, q6
+ vmla.i16 q14, q0, q7
+ vmla.i16 q15, q1, q7
+ vrshrn.i16 d16, q8, #8
+ vrshrn.i16 d17, q9, #8
+ vrshrn.i16 d18, q10, #8
+ vrshrn.i16 d19, q11, #8
+ vrshrn.i16 d20, q12, #8
+ vrshrn.i16 d21, q13, #8
+ vrshrn.i16 d22, q14, #8
+ vrshrn.i16 d23, q15, #8
+ subs r3, r3, #16
+ vst1.8 {q8}, [r0, :128]!
+ vst1.8 {q9}, [r6, :128]!
+ vst1.8 {q10}, [r5, :128]!
+ vst1.8 {q11}, [lr, :128]!
+ bgt 2b
+ subs r4, r4, #4
+ ble 9f
+ sub r2, r2, r12
+ add r0, r0, r1
+ add r6, r6, r1
+ add r5, r5, r1
+ add lr, lr, r1
+ mov r3, r12
+ b 1b
+9:
+ vpop {q4-q7}
+ pop {r4-r7, pc}
+endfunc
+
+// void ipred_smooth_h_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_smooth_h_8bpc_neon, export=1
+ push {r4-r8, lr}
+ ldr r4, [sp, #24]
+ movrel r8, X(sm_weights)
+ add r8, r8, r3
+ clz lr, r3
+ adr r5, L(ipred_smooth_h_tbl)
+ add r12, r2, r3
+ sub lr, lr, #25
+ ldr lr, [r5, lr, lsl #2]
+ vld1.8 {d4[]}, [r12] // right
+ add r5, r5, lr
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_smooth_h_tbl):
+ .word 640f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+
+40:
+ vld1.32 {d6[]}, [r8, :32] // weights_hor
+ sub r2, r2, #4
+ mov r7, #-4
+ vmovl.u8 q3, d6 // weights_hor
+4:
+ vld4.8 {d0[], d1[], d2[], d3[]}, [r2, :32], r7 // left
+ vshll.i8 q8, d4, #8 // right*256
+ vshll.i8 q9, d4, #8
+ vzip.32 d3, d2 // left, flipped
+ vzip.32 d1, d0
+ vsubl.u8 q1, d3, d4 // left-right
+ vsubl.u8 q0, d1, d4
+ subs r4, r4, #4
+ vmla.i16 q8, q1, q3 // right*256 + (left-right)*weights_hor
+ vmla.i16 q9, q0, q3
+ vrshrn.i16 d16, q8, #8
+ vrshrn.i16 d17, q9, #8
+ vst1.32 {d16[0]}, [r0, :32], r1
+ vst1.32 {d16[1]}, [r6, :32], r1
+ vst1.32 {d17[0]}, [r0, :32], r1
+ vst1.32 {d17[1]}, [r6, :32], r1
+ bgt 4b
+ pop {r4-r8, pc}
+80:
+ vld1.8 {d6}, [r8, :64] // weights_hor
+ sub r2, r2, #4
+ mov r7, #-4
+ vmovl.u8 q3, d6 // weights_hor
+8:
+ vld4.8 {d16[], d18[], d20[], d22[]}, [r2, :32], r7 // left
+ vshll.i8 q12, d4, #8 // right*256
+ vshll.i8 q13, d4, #8
+ vshll.i8 q14, d4, #8
+ vshll.i8 q15, d4, #8
+ vsubl.u8 q11, d22, d4 // left-right
+ vsubl.u8 q10, d20, d4
+ vsubl.u8 q9, d18, d4
+ vsubl.u8 q8, d16, d4
+ vmla.i16 q12, q11, q3 // right*256 + (left-right)*weights_hor
+ vmla.i16 q13, q10, q3 // (left flipped)
+ vmla.i16 q14, q9, q3
+ vmla.i16 q15, q8, q3
+ vrshrn.i16 d24, q12, #8
+ vrshrn.i16 d25, q13, #8
+ vrshrn.i16 d26, q14, #8
+ vrshrn.i16 d27, q15, #8
+ vst1.8 {d24}, [r0, :64], r1
+ vst1.8 {d25}, [r6, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d26}, [r0, :64], r1
+ vst1.8 {d27}, [r6, :64], r1
+ bgt 8b
+ pop {r4-r8, pc}
+160:
+320:
+640:
+ vpush {q4-q7}
+ sub r2, r2, #4
+ mov r7, #-4
+ // Set up pointers for four rows in parallel; r0, r6, r5, lr
+ add r5, r0, r1
+ add lr, r6, r1
+ lsl r1, r1, #1
+ sub r1, r1, r3
+ mov r12, r3
+
+1:
+ vld4.8 {d8[], d10[], d12[], d14[]}, [r2, :32], r7 // left
+ vsubl.u8 q4, d8, d4 // left-right
+ vsubl.u8 q5, d10, d4
+ vsubl.u8 q6, d12, d4
+ vsubl.u8 q7, d14, d4
+2:
+ vld1.8 {q1}, [r8, :128]! // weights_hor
+ vshll.i8 q8, d4, #8 // right*256
+ vshll.i8 q9, d4, #8
+ vshll.i8 q10, d4, #8
+ vshll.i8 q11, d4, #8
+ vmovl.u8 q0, d2 // weights_hor
+ vmovl.u8 q1, d3
+ vshll.i8 q12, d4, #8
+ vshll.i8 q13, d4, #8
+ vshll.i8 q14, d4, #8
+ vshll.i8 q15, d4, #8
+ vmla.i16 q8, q7, q0 // right*256 + (left-right)*weights_hor
+ vmla.i16 q9, q7, q1 // (left flipped)
+ vmla.i16 q10, q6, q0
+ vmla.i16 q11, q6, q1
+ vmla.i16 q12, q5, q0
+ vmla.i16 q13, q5, q1
+ vmla.i16 q14, q4, q0
+ vmla.i16 q15, q4, q1
+ vrshrn.i16 d16, q8, #8
+ vrshrn.i16 d17, q9, #8
+ vrshrn.i16 d18, q10, #8
+ vrshrn.i16 d19, q11, #8
+ vrshrn.i16 d20, q12, #8
+ vrshrn.i16 d21, q13, #8
+ vrshrn.i16 d22, q14, #8
+ vrshrn.i16 d23, q15, #8
+ subs r3, r3, #16
+ vst1.8 {q8}, [r0, :128]!
+ vst1.8 {q9}, [r6, :128]!
+ vst1.8 {q10}, [r5, :128]!
+ vst1.8 {q11}, [lr, :128]!
+ bgt 2b
+ subs r4, r4, #4
+ ble 9f
+ sub r8, r8, r12
+ add r0, r0, r1
+ add r6, r6, r1
+ add r5, r5, r1
+ add lr, lr, r1
+ mov r3, r12
+ b 1b
+9:
+ vpop {q4-q7}
+ pop {r4-r8, pc}
+endfunc
+
+// void ipred_filter_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int filt_idx,
+// const int max_width, const int max_height);
+function ipred_filter_8bpc_neon, export=1
+ push {r4-r8, lr}
+ movw r12, #511
+ ldrd r4, r5, [sp, #24]
+ and r5, r5, r12 // 511
+ movrel r6, X(filter_intra_taps)
+ lsl r5, r5, #6
+ add r6, r6, r5
+ vld1.8 {d20, d21, d22, d23}, [r6, :128]!
+ clz lr, r3
+ adr r5, L(ipred_filter_tbl)
+ vld1.8 {d27, d28, d29}, [r6, :64]
+ sub lr, lr, #26
+ ldr lr, [r5, lr, lsl #2]
+ vmovl.s8 q8, d20
+ vmovl.s8 q9, d21
+ add r5, r5, lr
+ vmovl.s8 q10, d22
+ vmovl.s8 q11, d23
+ add r6, r0, r1
+ lsl r1, r1, #1
+ vmovl.s8 q12, d27
+ vmovl.s8 q13, d28
+ vmovl.s8 q14, d29
+ add r8, r2, #1
+ sub r2, r2, #2
+ mov r7, #-2
+ bx r5
+
+ .align 2
+L(ipred_filter_tbl):
+ .word 320f - L(ipred_filter_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_filter_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_filter_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_filter_tbl) + CONFIG_THUMB
+
+40:
+ vld1.32 {d0[]}, [r8] // top (0-3)
+ vmovl.u8 q0, d0 // top (0-3)
+4:
+ vld1.32 {d2[]}, [r2], r7 // left (0-1) + topleft (2)
+ vmul.i16 q2, q9, d0[0] // p1(top[0]) * filter(1)
+ vmla.i16 q2, q10, d0[1] // p2(top[1]) * filter(2)
+ vmla.i16 q2, q11, d0[2] // p3(top[2]) * filter(3)
+ vmovl.u8 q1, d2 // left (0-1) + topleft (2)
+ vmla.i16 q2, q12, d0[3] // p4(top[3]) * filter(4)
+ vmla.i16 q2, q8, d2[2] // p0(topleft) * filter(0)
+ vmla.i16 q2, q13, d2[1] // p5(left[0]) * filter(5)
+ vmla.i16 q2, q14, d2[0] // p6(left[1]) * filter(6)
+ vqrshrun.s16 d4, q2, #4
+ subs r4, r4, #2
+ vst1.32 {d4[0]}, [r0, :32], r1
+ vmovl.u8 q0, d4
+ vst1.32 {d4[1]}, [r6, :32], r1
+ vmov d0, d1 // move top from [4-7] to [0-3]
+ bgt 4b
+ pop {r4-r8, pc}
+80:
+ vld1.8 {d0}, [r8] // top (0-7)
+ vmovl.u8 q0, d0 // top (0-7)
+8:
+ vld1.32 {d2[]}, [r2], r7 // left (0-1) + topleft (2)
+ vmul.i16 q2, q9, d0[0] // p1(top[0]) * filter(1)
+ vmla.i16 q2, q10, d0[1] // p2(top[1]) * filter(2)
+ vmla.i16 q2, q11, d0[2] // p3(top[2]) * filter(3)
+ vmovl.u8 q1, d2 // left (0-1) + topleft (2)
+ vmla.i16 q2, q12, d0[3] // p4(top[3]) * filter(4)
+ vmla.i16 q2, q8, d2[2] // p0(topleft) * filter(0)
+ vmla.i16 q2, q13, d2[1] // p5(left[0]) * filter(5)
+ vmla.i16 q2, q14, d2[0] // p6(left[1]) * filter(6)
+ vmul.i16 q3, q9, d1[0] // p1(top[0]) * filter(1)
+ vmla.i16 q3, q10, d1[1] // p2(top[1]) * filter(2)
+ vmla.i16 q3, q11, d1[2] // p3(top[2]) * filter(3)
+ vqrshrun.s16 d4, q2, #4
+ vmovl.u8 q1, d4 // first block, in 16 bit
+ vmla.i16 q3, q12, d1[3] // p4(top[3]) * filter(4)
+ vmla.i16 q3, q8, d0[3] // p0(topleft) * filter(0)
+ vmla.i16 q3, q13, d2[3] // p5(left[0]) * filter(5)
+ vmla.i16 q3, q14, d3[3] // p6(left[1]) * filter(6)
+ vqrshrun.s16 d5, q3, #4
+ vzip.32 d4, d5
+ subs r4, r4, #2
+ vst1.8 {d4}, [r0, :64], r1
+ vmovl.u8 q0, d5
+ vst1.8 {d5}, [r6, :64], r1
+ bgt 8b
+ pop {r4-r8, pc}
+160:
+320:
+ vpush {q4-q5}
+ sub r1, r1, r3
+ mov lr, r3
+
+1:
+ vld1.32 {d0[]}, [r2], r7 // left (0-1) + topleft (2)
+ vmovl.u8 q0, d0 // left (0-1) + topleft (2)
+2:
+ vld1.8 {q2}, [r8]! // top(0-15)
+ vmul.i16 q3, q8, d0[2] // p0(topleft) * filter(0)
+ vmla.i16 q3, q13, d0[1] // p5(left[0]) * filter(5)
+ vmovl.u8 q1, d4 // top(0-7)
+ vmovl.u8 q2, d5 // top(8-15)
+ vmla.i16 q3, q14, d0[0] // p6(left[1]) * filter(6)
+ vmla.i16 q3, q9, d2[0] // p1(top[0]) * filter(1)
+ vmla.i16 q3, q10, d2[1] // p2(top[1]) * filter(2)
+ vmla.i16 q3, q11, d2[2] // p3(top[2]) * filter(3)
+ vmla.i16 q3, q12, d2[3] // p4(top[3]) * filter(4)
+
+ vmul.i16 q4, q9, d3[0] // p1(top[0]) * filter(1)
+ vmla.i16 q4, q10, d3[1] // p2(top[1]) * filter(2)
+ vmla.i16 q4, q11, d3[2] // p3(top[2]) * filter(3)
+ vqrshrun.s16 d6, q3, #4
+ vmovl.u8 q0, d6 // first block, in 16 bit
+ vmla.i16 q4, q12, d3[3] // p4(top[3]) * filter(4)
+ vmla.i16 q4, q8, d2[3] // p0(topleft) * filter(0)
+ vmla.i16 q4, q13, d0[3] // p5(left[0]) * filter(5)
+ vmla.i16 q4, q14, d1[3] // p6(left[1]) * filter(6)
+
+ vmul.i16 q5, q9, d4[0] // p1(top[0]) * filter(1)
+ vmla.i16 q5, q10, d4[1] // p2(top[1]) * filter(2)
+ vmla.i16 q5, q11, d4[2] // p3(top[2]) * filter(3)
+ vqrshrun.s16 d7, q4, #4
+ vmovl.u8 q0, d7 // second block, in 16 bit
+ vmla.i16 q5, q12, d4[3] // p4(top[3]) * filter(4)
+ vmla.i16 q5, q8, d3[3] // p0(topleft) * filter(0)
+ vmla.i16 q5, q13, d0[3] // p5(left[0]) * filter(5)
+ vmla.i16 q5, q14, d1[3] // p6(left[1]) * filter(6)
+
+ vmul.i16 q15, q9, d5[0] // p1(top[0]) * filter(1)
+ vmla.i16 q15, q10, d5[1] // p2(top[1]) * filter(2)
+ vmla.i16 q15, q11, d5[2] // p3(top[2]) * filter(3)
+ vqrshrun.s16 d8, q5, #4
+ vmovl.u8 q0, d8 // third block, in 16 bit
+ vmov.u8 r12, d5[6]
+ vmla.i16 q15, q12, d5[3] // p4(top[3]) * filter(4)
+ vmla.i16 q15, q8, d4[3] // p0(topleft) * filter(0)
+ vmla.i16 q15, q13, d0[3] // p5(left[0]) * filter(5)
+ vmla.i16 q15, q14, d1[3] // p6(left[1]) * filter(6)
+ vmov.8 d0[4], r12
+
+ subs r3, r3, #16
+ vqrshrun.s16 d9, q15, #4
+
+ vst4.32 {d6[0], d7[0], d8[0], d9[0]}, [r0, :128]!
+ vst4.32 {d6[1], d7[1], d8[1], d9[1]}, [r6, :128]!
+ ble 8f
+ vmov.u8 r12, d9[7]
+ vmov.8 d0[0], r12
+ vmov.u8 r12, d9[3]
+ vmov.8 d0[2], r12
+ b 2b
+8:
+ subs r4, r4, #2
+
+ ble 9f
+ sub r8, r6, lr
+ add r0, r0, r1
+ add r6, r6, r1
+ mov r3, lr
+ b 1b
+9:
+ vpop {q4-q5}
+ pop {r4-r8, pc}
+endfunc
+
+// void pal_pred_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const uint16_t *const pal, const uint8_t *idx,
+// const int w, const int h);
+function pal_pred_8bpc_neon, export=1
+ push {r4-r5, lr}
+ ldrd r4, r5, [sp, #12]
+ vld1.16 {q0}, [r2, :128]
+ clz lr, r4
+ adr r12, L(pal_pred_tbl)
+ sub lr, lr, #25
+ ldr lr, [r12, lr, lsl #2]
+ vmovn.i16 d0, q0
+ add r12, r12, lr
+ add r2, r0, r1
+ bx r12
+
+ .align 2
+L(pal_pred_tbl):
+ .word 640f - L(pal_pred_tbl) + CONFIG_THUMB
+ .word 320f - L(pal_pred_tbl) + CONFIG_THUMB
+ .word 160f - L(pal_pred_tbl) + CONFIG_THUMB
+ .word 80f - L(pal_pred_tbl) + CONFIG_THUMB
+ .word 40f - L(pal_pred_tbl) + CONFIG_THUMB
+
+40:
+ lsl r1, r1, #1
+4:
+ vld1.8 {q1}, [r3, :128]!
+ subs r5, r5, #4
+ vtbl.8 d2, {d0}, d2
+ vtbl.8 d3, {d0}, d3
+ vst1.32 {d2[0]}, [r0, :32], r1
+ vst1.32 {d2[1]}, [r2, :32], r1
+ vst1.32 {d3[0]}, [r0, :32], r1
+ vst1.32 {d3[1]}, [r2, :32], r1
+ bgt 4b
+ pop {r4-r5, pc}
+80:
+ lsl r1, r1, #1
+8:
+ vld1.8 {q1, q2}, [r3, :128]!
+ subs r5, r5, #4
+ vtbl.8 d2, {d0}, d2
+ vtbl.8 d3, {d0}, d3
+ vst1.8 {d2}, [r0, :64], r1
+ vtbl.8 d4, {d0}, d4
+ vst1.8 {d3}, [r2, :64], r1
+ vtbl.8 d5, {d0}, d5
+ vst1.8 {d4}, [r0, :64], r1
+ vst1.8 {d5}, [r2, :64], r1
+ bgt 8b
+ pop {r4-r5, pc}
+160:
+ lsl r1, r1, #1
+16:
+ vld1.8 {q8, q9}, [r3, :128]!
+ subs r5, r5, #4
+ vld1.8 {q10, q11}, [r3, :128]!
+ vtbl.8 d16, {d0}, d16
+ vtbl.8 d17, {d0}, d17
+ vtbl.8 d18, {d0}, d18
+ vtbl.8 d19, {d0}, d19
+ vtbl.8 d20, {d0}, d20
+ vtbl.8 d21, {d0}, d21
+ vst1.8 {q8}, [r0, :128], r1
+ vtbl.8 d22, {d0}, d22
+ vst1.8 {q9}, [r2, :128], r1
+ vtbl.8 d23, {d0}, d23
+ vst1.8 {q10}, [r0, :128], r1
+ vst1.8 {q11}, [r2, :128], r1
+ bgt 16b
+ pop {r4-r5, pc}
+320:
+ lsl r1, r1, #1
+32:
+ vld1.8 {q8, q9}, [r3, :128]!
+ subs r5, r5, #2
+ vld1.8 {q10, q11}, [r3, :128]!
+ vtbl.8 d16, {d0}, d16
+ vtbl.8 d17, {d0}, d17
+ vtbl.8 d18, {d0}, d18
+ vtbl.8 d19, {d0}, d19
+ vtbl.8 d20, {d0}, d20
+ vtbl.8 d21, {d0}, d21
+ vst1.8 {q8, q9}, [r0, :128], r1
+ vtbl.8 d22, {d0}, d22
+ vtbl.8 d23, {d0}, d23
+ vst1.8 {q10, q11}, [r2, :128], r1
+ bgt 32b
+ pop {r4-r5, pc}
+640:
+ sub r1, r1, #32
+64:
+ vld1.8 {q8, q9}, [r3, :128]!
+ subs r5, r5, #1
+ vld1.8 {q10, q11}, [r3, :128]!
+ vtbl.8 d16, {d0}, d16
+ vtbl.8 d17, {d0}, d17
+ vtbl.8 d18, {d0}, d18
+ vtbl.8 d19, {d0}, d19
+ vtbl.8 d20, {d0}, d20
+ vtbl.8 d21, {d0}, d21
+ vst1.8 {q8, q9}, [r0, :128]!
+ vtbl.8 d22, {d0}, d22
+ vtbl.8 d23, {d0}, d23
+ vst1.8 {q10, q11}, [r0, :128], r1
+ bgt 64b
+ pop {r4-r5, pc}
+endfunc
+
+// void ipred_cfl_128_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height,
+// const int16_t *ac, const int alpha);
+function ipred_cfl_128_8bpc_neon, export=1
+ push {r4-r8, lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ clz lr, r3
+ adr r12, L(ipred_cfl_128_tbl)
+ sub lr, lr, #26
+ ldr lr, [r12, lr, lsl #2]
+ vmov.i16 q0, #128 // dc
+ vdup.i16 q1, r6 // alpha
+ add r12, r12, lr
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r12
+
+ .align 2
+L(ipred_cfl_128_tbl):
+L(ipred_cfl_splat_tbl):
+ .word L(ipred_cfl_splat_w16) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_splat_w16) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_splat_w8) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_splat_w4) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_splat_w4):
+ vld1.16 {q2, q3}, [r5, :128]!
+ vmul.i16 q2, q2, q1 // diff = ac * alpha
+ vmul.i16 q3, q3, q1
+ vshr.s16 q8, q2, #15 // sign = diff >> 15
+ vshr.s16 q9, q3, #15
+ vadd.i16 q2, q2, q8 // diff + sign
+ vadd.i16 q3, q3, q9
+ vrshr.s16 q2, q2, #6 // (diff + sign + 32) >> 6 = apply_sign()
+ vrshr.s16 q3, q3, #6
+ vadd.i16 q2, q2, q0 // dc + apply_sign()
+ vadd.i16 q3, q3, q0
+ vqmovun.s16 d4, q2 // iclip_pixel(dc + apply_sign())
+ vqmovun.s16 d5, q3
+ vst1.32 {d4[0]}, [r0, :32], r1
+ vst1.32 {d4[1]}, [r6, :32], r1
+ subs r4, r4, #4
+ vst1.32 {d5[0]}, [r0, :32], r1
+ vst1.32 {d5[1]}, [r6, :32], r1
+ bgt L(ipred_cfl_splat_w4)
+ pop {r4-r8, pc}
+L(ipred_cfl_splat_w8):
+ vld1.16 {q8, q9}, [r5, :128]!
+ vld1.16 {q10, q11}, [r5, :128]!
+ vmul.i16 q8, q8, q1 // diff = ac * alpha
+ vmul.i16 q9, q9, q1
+ vmul.i16 q10, q10, q1
+ vmul.i16 q11, q11, q1
+ vshr.s16 q12, q8, #15 // sign = diff >> 15
+ vshr.s16 q13, q9, #15
+ vshr.s16 q14, q10, #15
+ vshr.s16 q15, q11, #15
+ vadd.i16 q8, q8, q12 // diff + sign
+ vadd.i16 q9, q9, q13
+ vadd.i16 q10, q10, q14
+ vadd.i16 q11, q11, q15
+ vrshr.s16 q8, q8, #6 // (diff + sign + 32) >> 6 = apply_sign()
+ vrshr.s16 q9, q9, #6
+ vrshr.s16 q10, q10, #6
+ vrshr.s16 q11, q11, #6
+ vadd.i16 q8, q8, q0 // dc + apply_sign()
+ vadd.i16 q9, q9, q0
+ vadd.i16 q10, q10, q0
+ vadd.i16 q11, q11, q0
+ vqmovun.s16 d16, q8 // iclip_pixel(dc + apply_sign())
+ vqmovun.s16 d17, q9
+ vqmovun.s16 d18, q10
+ vqmovun.s16 d19, q11
+ vst1.8 {d16}, [r0, :64], r1
+ vst1.8 {d17}, [r6, :64], r1
+ subs r4, r4, #4
+ vst1.8 {d18}, [r0, :64], r1
+ vst1.8 {d19}, [r6, :64], r1
+ bgt L(ipred_cfl_splat_w8)
+ pop {r4-r8, pc}
+L(ipred_cfl_splat_w16):
+ add r12, r5, r3, lsl #1
+ sub r1, r1, r3
+ mov lr, r3
+1:
+ vld1.16 {q8, q9}, [r5, :128]!
+ vmul.i16 q8, q8, q1 // diff = ac * alpha
+ vld1.16 {q10, q11}, [r12, :128]!
+ vmul.i16 q9, q9, q1
+ vmul.i16 q10, q10, q1
+ vmul.i16 q11, q11, q1
+ vshr.s16 q12, q8, #15 // sign = diff >> 15
+ vshr.s16 q13, q9, #15
+ vshr.s16 q14, q10, #15
+ vshr.s16 q15, q11, #15
+ vadd.i16 q8, q8, q12 // diff + sign
+ vadd.i16 q9, q9, q13
+ vadd.i16 q10, q10, q14
+ vadd.i16 q11, q11, q15
+ vrshr.s16 q8, q8, #6 // (diff + sign + 32) >> 6 = apply_sign()
+ vrshr.s16 q9, q9, #6
+ vrshr.s16 q10, q10, #6
+ vrshr.s16 q11, q11, #6
+ vadd.i16 q8, q8, q0 // dc + apply_sign()
+ vadd.i16 q9, q9, q0
+ vadd.i16 q10, q10, q0
+ vadd.i16 q11, q11, q0
+ vqmovun.s16 d16, q8 // iclip_pixel(dc + apply_sign())
+ vqmovun.s16 d17, q9
+ vqmovun.s16 d18, q10
+ vqmovun.s16 d19, q11
+ subs r3, r3, #16
+ vst1.16 {q8}, [r0, :128]!
+ vst1.16 {q9}, [r6, :128]!
+ bgt 1b
+ subs r4, r4, #2
+ add r5, r5, lr, lsl #1
+ add r12, r12, lr, lsl #1
+ add r0, r0, r1
+ add r6, r6, r1
+ mov r3, lr
+ bgt 1b
+ pop {r4-r8, pc}
+endfunc
+
+// void ipred_cfl_top_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height,
+// const int16_t *ac, const int alpha);
+function ipred_cfl_top_8bpc_neon, export=1
+ push {r4-r8, lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ clz lr, r3
+ adr r12, L(ipred_cfl_top_tbl)
+ sub lr, lr, #26
+ ldr lr, [r12, lr, lsl #2]
+ vdup.16 q1, r6 // alpha
+ add r2, r2, #1
+ add r12, r12, lr
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r12
+
+ .align 2
+L(ipred_cfl_top_tbl):
+ .word 32f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
+ .word 16f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
+ .word 8f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
+ .word 4f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
+
+4:
+ vld1.32 {d0[]}, [r2]
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ vrshr.u16 d0, d0, #2
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w4)
+8:
+ vld1.8 {d0}, [r2]
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshr.u16 d0, d0, #3
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w8)
+16:
+ vld1.8 {q0}, [r2]
+ vaddl.u8 q0, d0, d1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshr.u16 d0, d0, #4
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w16)
+32:
+ vld1.8 {q2, q3}, [r2]
+ vaddl.u8 q2, d4, d5
+ vaddl.u8 q3, d6, d7
+ vadd.u16 q0, q2, q3
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshr.u16 d0, d0, #5
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w16)
+endfunc
+
+// void ipred_cfl_left_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height,
+// const int16_t *ac, const int alpha);
+function ipred_cfl_left_8bpc_neon, export=1
+ push {r4-r8, lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ sub r2, r2, r4
+ clz lr, r3
+ clz r8, r4
+ adr r12, L(ipred_cfl_splat_tbl)
+ adr r7, L(ipred_cfl_left_tbl)
+ sub lr, lr, #26
+ sub r8, r8, #26
+ ldr lr, [r12, lr, lsl #2]
+ ldr r8, [r7, r8, lsl #2]
+ vdup.16 q1, r6 // alpha
+ add r12, r12, lr
+ add r7, r7, r8
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r7
+
+ .align 2
+L(ipred_cfl_left_tbl):
+ .word L(ipred_cfl_left_h32) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_left_h16) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_left_h8) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_left_h4) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_left_h4):
+ vld1.32 {d0[]}, [r2, :32]
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ vrshr.u16 d0, d0, #2
+ vdup.16 q0, d0[0]
+ bx r12
+
+L(ipred_cfl_left_h8):
+ vld1.8 {d0}, [r2, :64]
+ vpaddl.u8 d0, d0
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshr.u16 d0, d0, #3
+ vdup.16 q0, d0[0]
+ bx r12
+
+L(ipred_cfl_left_h16):
+ vld1.8 {q0}, [r2, :128]
+ vaddl.u8 q0, d0, d1
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshr.u16 d0, d0, #4
+ vdup.16 q0, d0[0]
+ bx r12
+
+L(ipred_cfl_left_h32):
+ vld1.8 {q2, q3}, [r2, :128]
+ vaddl.u8 q2, d4, d5
+ vaddl.u8 q3, d6, d7
+ vadd.u16 q0, q2, q3
+ vadd.u16 d0, d0, d1
+ vpadd.u16 d0, d0
+ vpadd.u16 d0, d0
+ vrshr.u16 d0, d0, #5
+ vdup.16 q0, d0[0]
+ bx r12
+endfunc
+
+// void ipred_cfl_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height,
+// const int16_t *ac, const int alpha);
+function ipred_cfl_8bpc_neon, export=1
+ push {r4-r8, lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ sub r2, r2, r4
+ add r8, r3, r4 // width + height
+ vdup.16 q1, r6 // alpha
+ clz lr, r3
+ clz r6, r4
+ vdup.16 d16, r8 // width + height
+ adr r7, L(ipred_cfl_tbl)
+ rbit r8, r8 // rbit(width + height)
+ sub lr, lr, #22 // 26 leading bits, minus table offset 4
+ sub r6, r6, #26
+ clz r8, r8 // ctz(width + height)
+ ldr lr, [r7, lr, lsl #2]
+ ldr r6, [r7, r6, lsl #2]
+ neg r8, r8 // -ctz(width + height)
+ add r12, r7, lr
+ add r7, r7, r6
+ vshr.u16 d16, d16, #1 // (width + height) >> 1
+ vdup.16 d17, r8 // -ctz(width + height)
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r7
+
+ .align 2
+L(ipred_cfl_tbl):
+ .word L(ipred_cfl_h32) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_h16) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_h8) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_h4) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_w32) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_w16) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_w8) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_w4) - L(ipred_cfl_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_h4):
+ vld1.32 {d0[]}, [r2, :32]!
+ vpaddl.u8 d0, d0
+ add r2, r2, #1
+ vpadd.i16 d0, d0
+ bx r12
+L(ipred_cfl_w4):
+ vld1.32 {d1[]}, [r2]
+ vadd.i16 d0, d0, d16
+ vpaddl.u8 d1, d1
+ vpadd.u16 d1, d1
+ cmp r4, #4
+ vadd.i16 d0, d0, d1
+ vshl.u16 d0, d0, d17
+ beq 1f
+ // h = 8/16
+ movw lr, #(0x3334/2)
+ movw r8, #(0x5556/2)
+ cmp r4, #16
+ it ne
+ movne lr, r8
+ vdup.16 d18, lr
+ vqdmulh.s16 d0, d0, d18
+1:
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w4)
+
+L(ipred_cfl_h8):
+ vld1.8 {d0}, [r2, :64]!
+ vpaddl.u8 d0, d0
+ vpadd.i16 d0, d0
+ add r2, r2, #1
+ vpadd.i16 d0, d0
+ bx r12
+L(ipred_cfl_w8):
+ vld1.8 {d1}, [r2]
+ vadd.i16 d0, d0, d16
+ vpaddl.u8 d1, d1
+ vpadd.i16 d1, d1
+ vpadd.i16 d1, d1
+ cmp r4, #8
+ vadd.i16 d0, d0, d1
+ vshl.u16 d0, d0, d17
+ beq 1f
+ // h = 4/16/32
+ cmp r4, #32
+ movw lr, #(0x3334/2)
+ movw r8, #(0x5556/2)
+ it ne
+ movne lr, r8
+ vdup.16 d18, lr
+ vqdmulh.s16 d0, d0, d18
+1:
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w8)
+
+L(ipred_cfl_h16):
+ vld1.8 {q0}, [r2, :128]!
+ vaddl.u8 q0, d0, d1
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0
+ add r2, r2, #1
+ vpadd.i16 d0, d0
+ bx r12
+L(ipred_cfl_w16):
+ vld1.8 {q2}, [r2]
+ vadd.i16 d0, d0, d16
+ vaddl.u8 q2, d4, d5
+ vadd.i16 d4, d4, d5
+ vpadd.i16 d4, d4
+ vpadd.i16 d4, d4
+ cmp r4, #16
+ vadd.i16 d0, d0, d4
+ vshl.u16 d0, d0, d17
+ beq 1f
+ // h = 4/8/32/64
+ tst r4, #(32+16+8) // 16 added to make a consecutive bitmask
+ movw lr, #(0x3334/2)
+ movw r8, #(0x5556/2)
+ it ne
+ movne lr, r8
+ vdup.16 d18, lr
+ vqdmulh.s16 d0, d0, d18
+1:
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w16)
+
+L(ipred_cfl_h32):
+ vld1.8 {q2, q3}, [r2, :128]!
+ vaddl.u8 q2, d4, d5
+ vaddl.u8 q3, d6, d7
+ vadd.i16 q0, q2, q3
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0
+ add r2, r2, #1
+ vpadd.i16 d0, d0
+ bx r12
+L(ipred_cfl_w32):
+ vld1.8 {q2, q3}, [r2]
+ vadd.i16 d0, d0, d16
+ vaddl.u8 q2, d4, d5
+ vaddl.u8 q3, d6, d7
+ vadd.i16 q2, q2, q3
+ vadd.i16 d4, d4, d5
+ vpadd.i16 d4, d4
+ vpadd.i16 d4, d4
+ cmp r4, #32
+ vadd.i16 d0, d0, d4
+ vshl.u16 d0, d0, d17
+ beq 1f
+ // h = 8/16/64
+ cmp r4, #8
+ movw lr, #(0x3334/2)
+ movw r8, #(0x5556/2)
+ it ne
+ movne lr, r8
+ vdup.16 d18, lr
+ vqdmulh.s16 d0, d0, d18
+1:
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w16)
+endfunc
+
+// void cfl_ac_420_8bpc_neon(int16_t *const ac, const pixel *const ypx,
+// const ptrdiff_t stride, const int w_pad,
+// const int h_pad, const int cw, const int ch);
+function ipred_cfl_ac_420_8bpc_neon, export=1
+ push {r4-r8,lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ clz r8, r5
+ lsl r4, r4, #2
+ adr r7, L(ipred_cfl_ac_420_tbl)
+ sub r8, r8, #27
+ ldr r8, [r7, r8, lsl #2]
+ vmov.i16 q8, #0
+ vmov.i16 q9, #0
+ vmov.i16 q10, #0
+ vmov.i16 q11, #0
+ add r7, r7, r8
+ sub r8, r6, r4 // height - h_pad
+ rbit lr, r5 // rbit(width)
+ rbit r12, r6 // rbit(height)
+ clz lr, lr // ctz(width)
+ clz r12, r12 // ctz(height)
+ add lr, lr, r12 // log2sz
+ add r12, r1, r2
+ vdup.32 d31, lr
+ lsl r2, r2, #1
+ vneg.s32 d31, d31 // -log2sz
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_420_tbl):
+ .word L(ipred_cfl_ac_420_w16) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w8) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w4) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_420_w4):
+1: // Copy and subsample input
+ vld1.8 {d0}, [r1, :64], r2
+ vld1.8 {d2}, [r12, :64], r2
+ vld1.8 {d1}, [r1, :64], r2
+ vld1.8 {d3}, [r12, :64], r2
+ vpaddl.u8 q0, q0
+ vpaddl.u8 q1, q1
+ vadd.i16 q0, q0, q1
+ vshl.i16 q0, q0, #1
+ subs r8, r8, #2
+ vst1.16 {q0}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ bgt 1b
+ cmp r4, #0
+ vmov d0, d1
+ vmov d2, d1
+ vmov d3, d1
+L(ipred_cfl_ac_420_w4_hpad):
+ beq 3f // This assumes that all callers already did "cmp r4, #0"
+2: // Vertical padding (h_pad > 0)
+ subs r4, r4, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q8, q8, q1
+ bgt 2b
+3:
+L(ipred_cfl_ac_420_w4_calc_subtract_dc):
+ // Aggregate the sums
+ vadd.i16 q0, q8, q9
+ vadd.i16 q1, q10, q11
+ vpaddl.u16 q0, q0
+ vpaddl.u16 q1, q1
+ vadd.i32 q0, q1
+ vadd.i32 d0, d0, d1
+ vpadd.i32 d0, d0, d0 // sum
+ sub r0, r0, r6, lsl #3
+ vrshl.u32 d16, d0, d31 // (sum + (1 << (log2sz - 1))) >>= log2sz
+ vdup.16 q8, d16[0]
+L(ipred_cfl_ac_420_w4_subtract_dc):
+6: // Subtract dc from ac
+ vld1.16 {q0, q1}, [r0, :128]
+ subs r6, r6, #4
+ vsub.i16 q0, q0, q8
+ vsub.i16 q1, q1, q8
+ vst1.16 {q0, q1}, [r0, :128]!
+ bgt 6b
+ pop {r4-r8, pc}
+
+L(ipred_cfl_ac_420_w8):
+ cmp r3, #0
+ bne L(ipred_cfl_ac_420_w8_wpad)
+1: // Copy and subsample input, without padding
+ vld1.8 {q0}, [r1, :128], r2
+ vld1.8 {q1}, [r12, :128], r2
+ vld1.8 {q2}, [r1, :128], r2
+ vpaddl.u8 q0, q0
+ vld1.8 {q3}, [r12, :128], r2
+ vpaddl.u8 q1, q1
+ vpaddl.u8 q2, q2
+ vpaddl.u8 q3, q3
+ vadd.i16 q0, q0, q1
+ vadd.i16 q2, q2, q3
+ vshl.i16 q0, q0, #1
+ vshl.i16 q1, q2, #1
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q1
+ b L(ipred_cfl_ac_420_w8_hpad)
+
+L(ipred_cfl_ac_420_w8_wpad):
+1: // Copy and subsample input, padding 4
+ vld1.16 {d0}, [r1, :64], r2
+ vld1.16 {d2}, [r12, :64], r2
+ vld1.16 {d1}, [r1, :64], r2
+ vld1.16 {d3}, [r12, :64], r2
+ vpaddl.u8 q0, q0
+ vpaddl.u8 q1, q1
+ vadd.i16 q0, q0, q1
+ vshl.i16 q0, q0, #1
+ vdup.16 d3, d1[3]
+ vmov d2, d1
+ vdup.16 d1, d0[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q1
+
+L(ipred_cfl_ac_420_w8_hpad):
+ beq 3f // This assumes that all callers already did "cmp r4, #0"
+2: // Vertical padding (h_pad > 0)
+ subs r4, r4, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q10, q10, q0
+ vadd.i16 q11, q11, q1
+ bgt 2b
+3:
+
+ // Double the height and reuse the w4 summing/subtracting
+ lsl r6, r6, #1
+ b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
+
+L(ipred_cfl_ac_420_w16):
+ adr r7, L(ipred_cfl_ac_420_w16_tbl)
+ ldr r3, [r7, r3, lsl #2]
+ add r7, r7, r3
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_420_w16_tbl):
+ .word L(ipred_cfl_ac_420_w16_wpad0) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w16_wpad1) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w16_wpad2) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w16_wpad3) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_420_w16_wpad0):
+1: // Copy and subsample input, without padding
+ vld1.8 {q0, q1}, [r1, :128], r2
+ vld1.8 {q2, q3}, [r12, :128], r2
+ vpaddl.u8 q0, q0
+ vld1.8 {q12, q13}, [r1, :128], r2
+ vpaddl.u8 q1, q1
+ vpaddl.u8 q2, q2
+ vpaddl.u8 q3, q3
+ vadd.i16 q0, q0, q2
+ vadd.i16 q1, q1, q3
+ vld1.8 {q2, q3}, [r12, :128], r2
+ vpaddl.u8 q12, q12
+ vpaddl.u8 q13, q13
+ vpaddl.u8 q2, q2
+ vpaddl.u8 q3, q3
+ vadd.i16 q12, q12, q2
+ vadd.i16 q13, q13, q3
+ vshl.i16 q0, q0, #1
+ vshl.i16 q1, q1, #1
+ vshl.i16 q2, q12, #1
+ vshl.i16 q3, q13, #1
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_420_w16_wpad1):
+1: // Copy and subsample input, padding 4
+ vldr d2, [r1, #16]
+ vld1.8 {q0}, [r1, :128], r2
+ vldr d6, [r12, #16]
+ vld1.8 {q2}, [r12, :128], r2
+ vpaddl.u8 d2, d2
+ vldr d26, [r1, #16]
+ vpaddl.u8 q0, q0
+ vld1.8 {q12}, [r1, :128], r2
+ vpaddl.u8 d6, d6
+ vldr d30, [r12, #16]
+ vpaddl.u8 q2, q2
+ vld1.8 {q14}, [r12, :128], r2
+ vpaddl.u8 d26, d26
+ vpaddl.u8 q12, q12
+ vpaddl.u8 d30, d30
+ vpaddl.u8 q14, q14
+ vadd.i16 d2, d2, d6
+ vadd.i16 q0, q0, q2
+ vadd.i16 d26, d26, d30
+ vadd.i16 q12, q12, q14
+ vshl.i16 d2, d2, #1
+ vshl.i16 q0, q0, #1
+ vshl.i16 d6, d26, #1
+ vshl.i16 q2, q12, #1
+ vdup.16 d3, d2[3]
+ vdup.16 d7, d6[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_420_w16_wpad2):
+1: // Copy and subsample input, padding 8
+ vld1.8 {q0}, [r1, :128], r2
+ vld1.8 {q1}, [r12, :128], r2
+ vld1.8 {q2}, [r1, :128], r2
+ vpaddl.u8 q0, q0
+ vld1.8 {q3}, [r12, :128], r2
+ vpaddl.u8 q1, q1
+ vpaddl.u8 q2, q2
+ vpaddl.u8 q3, q3
+ vadd.i16 q0, q0, q1
+ vadd.i16 q2, q2, q3
+ vshl.i16 q0, q0, #1
+ vshl.i16 q2, q2, #1
+ vdup.16 q1, d1[3]
+ vdup.16 q3, d5[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_420_w16_wpad3):
+1: // Copy and subsample input, padding 12
+ vld1.8 {d0}, [r1, :64], r2
+ vld1.8 {d1}, [r12, :64], r2
+ vld1.8 {d4}, [r1, :64], r2
+ vpaddl.u8 q0, q0
+ vld1.8 {d5}, [r12, :64], r2
+ vpaddl.u8 q2, q2
+ vadd.i16 d0, d0, d1
+ vadd.i16 d4, d4, d5
+ vshl.i16 d0, d0, #1
+ vshl.i16 d4, d4, #1
+ vdup.16 q1, d0[3]
+ vdup.16 q3, d4[3]
+ vdup.16 d1, d0[3]
+ vdup.16 d5, d4[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_420_w16_hpad):
+ beq 3f // This assumes that all callers already did "cmp r4, #0"
+2: // Vertical padding (h_pad > 0)
+ subs r4, r4, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 2b
+3:
+
+ // Quadruple the height and reuse the w4 summing/subtracting
+ lsl r6, r6, #2
+ b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
+endfunc
+
+// void cfl_ac_422_8bpc_neon(int16_t *const ac, const pixel *const ypx,
+// const ptrdiff_t stride, const int w_pad,
+// const int h_pad, const int cw, const int ch);
+function ipred_cfl_ac_422_8bpc_neon, export=1
+ push {r4-r8,lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ clz r8, r5
+ lsl r4, r4, #2
+ adr r7, L(ipred_cfl_ac_422_tbl)
+ sub r8, r8, #27
+ ldr r8, [r7, r8, lsl #2]
+ vmov.i16 q8, #0
+ vmov.i16 q9, #0
+ vmov.i16 q10, #0
+ vmov.i16 q11, #0
+ add r7, r7, r8
+ sub r8, r6, r4 // height - h_pad
+ rbit lr, r5 // rbit(width)
+ rbit r12, r6 // rbit(height)
+ clz lr, lr // ctz(width)
+ clz r12, r12 // ctz(height)
+ add lr, lr, r12 // log2sz
+ add r12, r1, r2
+ vdup.32 d31, lr
+ lsl r2, r2, #1
+ vneg.s32 d31, d31 // -log2sz
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_422_tbl):
+ .word L(ipred_cfl_ac_422_w16) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w8) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w4) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_422_w4):
+1: // Copy and subsample input
+ vld1.8 {d0}, [r1, :64], r2
+ vld1.8 {d1}, [r12, :64], r2
+ vld1.8 {d2}, [r1, :64], r2
+ vld1.8 {d3}, [r12, :64], r2
+ vpaddl.u8 q0, q0
+ vpaddl.u8 q1, q1
+ vshl.i16 q0, q0, #2
+ vshl.i16 q1, q1, #2
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ bgt 1b
+ cmp r4, #0
+ vmov d0, d3
+ vmov d1, d3
+ vmov d2, d3
+ b L(ipred_cfl_ac_420_w4_hpad)
+
+L(ipred_cfl_ac_422_w8):
+ cmp r3, #0
+ bne L(ipred_cfl_ac_422_w8_wpad)
+1: // Copy and subsample input, without padding
+ vld1.8 {q0}, [r1, :128], r2
+ vld1.8 {q1}, [r12, :128], r2
+ vld1.8 {q2}, [r1, :128], r2
+ vpaddl.u8 q0, q0
+ vld1.8 {q3}, [r12, :128], r2
+ vpaddl.u8 q1, q1
+ vpaddl.u8 q2, q2
+ vpaddl.u8 q3, q3
+ vshl.i16 q0, q0, #2
+ vshl.i16 q1, q1, #2
+ vshl.i16 q2, q2, #2
+ vshl.i16 q3, q3, #2
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q3
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w8_hpad)
+
+L(ipred_cfl_ac_422_w8_wpad):
+1: // Copy and subsample input, padding 4
+ vld1.8 {d0}, [r1, :64], r2
+ vld1.8 {d1}, [r12, :64], r2
+ vld1.8 {d2}, [r1, :64], r2
+ vld1.8 {d3}, [r12, :64], r2
+ vpaddl.u8 q0, q0
+ vpaddl.u8 q1, q1
+ vshl.i16 q0, q0, #2
+ vshl.i16 q1, q1, #2
+ vdup.16 d7, d3[3]
+ vmov d6, d3
+ vdup.16 d5, d2[3]
+ vmov d4, d2
+ vdup.16 d3, d1[3]
+ vmov d2, d1
+ vdup.16 d1, d0[3]
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q3
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w8_hpad)
+
+L(ipred_cfl_ac_422_w16):
+ adr r7, L(ipred_cfl_ac_422_w16_tbl)
+ ldr r3, [r7, r3, lsl #2]
+ add r7, r7, r3
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_422_w16_tbl):
+ .word L(ipred_cfl_ac_422_w16_wpad0) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w16_wpad1) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w16_wpad2) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w16_wpad3) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_422_w16_wpad0):
+1: // Copy and subsample input, without padding
+ vld1.8 {q0, q1}, [r1, :128], r2
+ vld1.8 {q2, q3}, [r12, :128], r2
+ vpaddl.u8 q0, q0
+ vpaddl.u8 q1, q1
+ vpaddl.u8 q2, q2
+ vpaddl.u8 q3, q3
+ vshl.i16 q0, q0, #2
+ vshl.i16 q1, q1, #2
+ vshl.i16 q2, q2, #2
+ vshl.i16 q3, q3, #2
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_422_w16_wpad1):
+1: // Copy and subsample input, padding 4
+ vldr d2, [r1, #16]
+ vld1.8 {q0}, [r1, :128], r2
+ vldr d6, [r12, #16]
+ vld1.8 {q2}, [r12, :128], r2
+ vpaddl.u8 d2, d2
+ vpaddl.u8 q0, q0
+ vpaddl.u8 d6, d6
+ vpaddl.u8 q2, q2
+ vshl.i16 d2, d2, #2
+ vshl.i16 q0, q0, #2
+ vshl.i16 d6, d6, #2
+ vshl.i16 q2, q2, #2
+ vdup.16 d3, d2[3]
+ vdup.16 d7, d6[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_422_w16_wpad2):
+1: // Copy and subsample input, padding 8
+ vld1.8 {q0}, [r1, :128], r2
+ vld1.8 {q2}, [r12, :128], r2
+ vpaddl.u8 q0, q0
+ vpaddl.u8 q2, q2
+ vshl.i16 q0, q0, #2
+ vshl.i16 q2, q2, #2
+ vdup.16 q1, d1[3]
+ vdup.16 q3, d5[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_422_w16_wpad3):
+1: // Copy and subsample input, padding 12
+ vld1.8 {d0}, [r1, :64], r2
+ vld1.8 {d1}, [r12, :64], r2
+ vpaddl.u8 q0, q0
+ vshl.i16 q0, q0, #2
+ vdup.16 q3, d1[3]
+ vdup.16 q1, d0[3]
+ vdup.16 d5, d1[3]
+ vmov d4, d1
+ vdup.16 d1, d0[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+endfunc
+
+// void cfl_ac_444_8bpc_neon(int16_t *const ac, const pixel *const ypx,
+// const ptrdiff_t stride, const int w_pad,
+// const int h_pad, const int cw, const int ch);
+function ipred_cfl_ac_444_8bpc_neon, export=1
+ push {r4-r8,lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ clz r8, r5
+ lsl r4, r4, #2
+ adr r7, L(ipred_cfl_ac_444_tbl)
+ sub r8, r8, #26
+ ldr r8, [r7, r8, lsl #2]
+ vmov.i16 q8, #0
+ vmov.i16 q9, #0
+ vmov.i16 q10, #0
+ vmov.i16 q11, #0
+ add r7, r7, r8
+ sub r8, r6, r4 // height - h_pad
+ rbit lr, r5 // rbit(width)
+ rbit r12, r6 // rbit(height)
+ clz lr, lr // ctz(width)
+ clz r12, r12 // ctz(height)
+ add lr, lr, r12 // log2sz
+ add r12, r1, r2
+ vdup.32 d31, lr
+ lsl r2, r2, #1
+ vneg.s32 d31, d31 // -log2sz
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_444_tbl):
+ .word L(ipred_cfl_ac_444_w32) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w16) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w8) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w4) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_444_w4):
+1: // Copy and expand input
+ vld1.32 {d0[]}, [r1, :32], r2
+ vld1.32 {d0[1]}, [r12, :32], r2
+ vld1.32 {d2[]}, [r1, :32], r2
+ vld1.32 {d2[1]}, [r12, :32], r2
+ vshll.u8 q0, d0, #3
+ vshll.u8 q1, d2, #3
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ bgt 1b
+ cmp r4, #0
+ vmov d0, d3
+ vmov d1, d3
+ vmov d2, d3
+ b L(ipred_cfl_ac_420_w4_hpad)
+
+L(ipred_cfl_ac_444_w8):
+1: // Copy and expand input
+ vld1.16 {d0}, [r1, :64], r2
+ vld1.16 {d2}, [r12, :64], r2
+ vld1.16 {d4}, [r1, :64], r2
+ vshll.u8 q0, d0, #3
+ vld1.16 {d6}, [r12, :64], r2
+ vshll.u8 q1, d2, #3
+ vshll.u8 q2, d4, #3
+ vshll.u8 q3, d6, #3
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q3
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w8_hpad)
+
+L(ipred_cfl_ac_444_w16):
+ cmp r3, #0
+ bne L(ipred_cfl_ac_444_w16_wpad)
+1: // Copy and expand input, without padding
+ vld1.8 {q1}, [r1, :128], r2
+ vld1.8 {q3}, [r12, :128], r2
+ vshll.u8 q0, d2, #3
+ vshll.u8 q1, d3, #3
+ vshll.u8 q2, d6, #3
+ vshll.u8 q3, d7, #3
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_444_w16_wpad):
+1: // Copy and expand input, padding 8
+ vld1.8 {d0}, [r1, :64], r2
+ vld1.8 {d4}, [r12, :64], r2
+ vshll.u8 q0, d0, #3
+ vshll.u8 q2, d4, #3
+ vdup.16 q1, d1[3]
+ vdup.16 q3, d5[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_444_w32):
+ adr r7, L(ipred_cfl_ac_444_w32_tbl)
+ ldr r3, [r7, r3, lsl #1] // (w3>>1) << 2
+ add r7, r7, r3
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_444_w32_tbl):
+ .word L(ipred_cfl_ac_444_w32_wpad0) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w32_wpad2) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w32_wpad4) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w32_wpad6) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_444_w32_wpad0):
+1: // Copy and expand input, without padding
+ vld1.8 {q2, q3}, [r1, :128], r2
+ vld1.8 {q13, q14}, [r12, :128], r2
+ vshll.u8 q0, d4, #3
+ vshll.u8 q1, d5, #3
+ vshll.u8 q2, d6, #3
+ vshll.u8 q3, d7, #3
+ vshll.u8 q12, d26, #3
+ vshll.u8 q13, d27, #3
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vshll.u8 q0, d28, #3
+ vshll.u8 q1, d29, #3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ vst1.16 {q12, q13}, [r0, :128]!
+ vadd.i16 q8, q8, q12
+ vadd.i16 q9, q9, q13
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q10, q10, q0
+ vadd.i16 q11, q11, q1
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_444_w32_hpad)
+
+L(ipred_cfl_ac_444_w32_wpad2):
+1: // Copy and expand input, padding 8
+ vldr d4, [r1, #16]
+ vld1.8 {q1}, [r1, :128], r2
+ vldr d28, [r12, #16]
+ vld1.8 {q13}, [r12, :128], r2
+ vshll.u8 q2, d4, #3
+ vshll.u8 q0, d2, #3
+ vshll.u8 q1, d3, #3
+ vshll.u8 q12, d26, #3
+ vshll.u8 q13, d27, #3
+ vdup.16 q3, d5[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vshll.u8 q0, d28, #3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ vdup.16 q1, d1[3]
+ vst1.16 {q12, q13}, [r0, :128]!
+ vadd.i16 q8, q8, q12
+ vadd.i16 q9, q9, q13
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q10, q10, q0
+ vadd.i16 q11, q11, q1
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_444_w32_hpad)
+
+L(ipred_cfl_ac_444_w32_wpad4):
+1: // Copy and expand input, padding 16
+ vld1.8 {q1}, [r1, :128], r2
+ vld1.8 {q13}, [r12, :128], r2
+ vshll.u8 q0, d2, #3
+ vshll.u8 q1, d3, #3
+ vshll.u8 q12, d26, #3
+ vshll.u8 q13, d27, #3
+ vdup.16 q2, d3[3]
+ vdup.16 q3, d3[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vdup.16 q0, d27[3]
+ vdup.16 q1, d27[3]
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ vst1.16 {q12, q13}, [r0, :128]!
+ vadd.i16 q8, q8, q12
+ vadd.i16 q9, q9, q13
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q10, q10, q0
+ vadd.i16 q11, q11, q1
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_444_w32_hpad)
+
+L(ipred_cfl_ac_444_w32_wpad6):
+1: // Copy and expand input, padding 24
+ vld1.8 {d0}, [r1, :64], r2
+ vld1.8 {d24}, [r12, :64], r2
+ vshll.u8 q0, d0, #3
+ vshll.u8 q12, d24, #3
+ subs r8, r8, #2
+ vdup.16 q1, d1[3]
+ vdup.16 q2, d1[3]
+ vdup.16 q3, d1[3]
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q8, q8, q0
+ vadd.i16 q9, q9, q1
+ vdup.16 q13, d25[3]
+ vdup.16 q0, d25[3]
+ vdup.16 q1, d25[3]
+ vst1.16 {q2, q3}, [r0, :128]!
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q3
+ vst1.16 {q12, q13}, [r0, :128]!
+ vadd.i16 q8, q8, q12
+ vadd.i16 q9, q9, q13
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q10, q10, q0
+ vadd.i16 q11, q11, q1
+ bgt 1b
+ cmp r4, #0
+
+L(ipred_cfl_ac_444_w32_hpad):
+ beq 3f // This assumes that all callers already did "cmp r4, #0"
+2: // Vertical padding (h_pad > 0)
+ subs r4, r4, #1
+ vst1.16 {q12, q13}, [r0, :128]!
+ vadd.i16 q8, q8, q12
+ vadd.i16 q9, q9, q13
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q10, q10, q0
+ vadd.i16 q11, q11, q1
+ bgt 2b
+3:
+
+ // Multiply the height by eight and reuse the w4 subtracting
+ lsl r6, r6, #3
+ // Aggregate the sums, with wider intermediates earlier than in
+ // ipred_cfl_ac_420_w8_calc_subtract_dc.
+ vpaddl.u16 q0, q8
+ vpaddl.u16 q1, q9
+ vpaddl.u16 q2, q10
+ vpaddl.u16 q3, q11
+ vadd.i32 q0, q0, q1
+ vadd.i32 q2, q2, q3
+ vadd.i32 q0, q0, q2
+ vadd.i32 d0, d0, d1
+ vpadd.i32 d0, d0, d0 // sum
+ sub r0, r0, r6, lsl #3
+ vrshl.u32 d16, d0, d31 // (sum + (1 << (log2sz - 1))) >>= log2sz
+ vdup.16 q8, d16[0]
+ b L(ipred_cfl_ac_420_w4_subtract_dc)
+endfunc
diff --git a/third_party/dav1d/src/arm/32/ipred16.S b/third_party/dav1d/src/arm/32/ipred16.S
new file mode 100644
index 0000000000..993d9500aa
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/ipred16.S
@@ -0,0 +1,3254 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2019, B Krishnan Iyer
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+// void ipred_dc_128_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height,
+// const int bitdepth_max);
+function ipred_dc_128_16bpc_neon, export=1
+ push {r4, lr}
+ ldr r4, [sp, #8]
+ ldr r12, [sp, #24]
+ clz r3, r3
+ adr r2, L(ipred_dc_128_tbl)
+ sub r3, r3, #25
+ vdup.16 q0, r12
+ ldr r3, [r2, r3, lsl #2]
+ add r12, r0, r1
+ vrshr.u16 q0, q0, #1
+ add r2, r2, r3
+ lsl r1, r1, #1
+ bx r2
+
+ .align 2
+L(ipred_dc_128_tbl):
+ .word 640f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+ .word 8f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+ .word 4f - L(ipred_dc_128_tbl) + CONFIG_THUMB
+4:
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ bgt 4b
+ pop {r4, pc}
+8:
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ bgt 8b
+ pop {r4, pc}
+160:
+ vmov q1, q0
+16:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 16b
+ pop {r4, pc}
+320:
+ vmov q1, q0
+ sub r1, r1, #32
+32:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 32b
+ pop {r4, pc}
+640:
+ vmov q1, q0
+ sub r1, r1, #96
+64:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ subs r4, r4, #2
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 64b
+ pop {r4, pc}
+endfunc
+
+// void ipred_v_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_v_16bpc_neon, export=1
+ push {r4, lr}
+ ldr lr, [sp, #8]
+ clz r3, r3
+ adr r4, L(ipred_v_tbl)
+ sub r3, r3, #25
+ ldr r3, [r4, r3, lsl #2]
+ add r2, r2, #2
+ add r4, r4, r3
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r4
+
+ .align 2
+L(ipred_v_tbl):
+ .word 640f - L(ipred_v_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_v_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_v_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_v_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_v_tbl) + CONFIG_THUMB
+
+40:
+ vld1.16 {d0}, [r2]
+4:
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ subs lr, lr, #4
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ bgt 4b
+ pop {r4, pc}
+80:
+ vld1.16 {q0}, [r2]
+8:
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ subs lr, lr, #4
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ bgt 8b
+ pop {r4, pc}
+160:
+ vld1.16 {q0, q1}, [r2]
+16:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs lr, lr, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 16b
+ pop {r4, pc}
+320:
+ vld1.16 {q0, q1}, [r2]!
+ sub r1, r1, #32
+ vld1.16 {q2, q3}, [r2]
+32:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d4, d5, d6, d7}, [r0, :128], r1
+ vst1.16 {d4, d5, d6, d7}, [r12, :128], r1
+ subs lr, lr, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d4, d5, d6, d7}, [r0, :128], r1
+ vst1.16 {d4, d5, d6, d7}, [r12, :128], r1
+ bgt 32b
+ pop {r4, pc}
+640:
+ vld1.16 {q0, q1}, [r2]!
+ sub r1, r1, #96
+ vld1.16 {q2, q3}, [r2]!
+ vld1.16 {q8, q9}, [r2]!
+ vld1.16 {q10, q11}, [r2]!
+64:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d4, d5, d6, d7}, [r0, :128]!
+ vst1.16 {d4, d5, d6, d7}, [r12, :128]!
+ subs lr, lr, #2
+ vst1.16 {d16, d17, d18, d19}, [r0, :128]!
+ vst1.16 {d16, d17, d18, d19}, [r12, :128]!
+ vst1.16 {d20, d21, d22, d23}, [r0, :128], r1
+ vst1.16 {d20, d21, d22, d23}, [r12, :128], r1
+ bgt 64b
+ pop {r4, pc}
+endfunc
+
+// void ipred_h_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_h_16bpc_neon, export=1
+ push {r4-r5, lr}
+ ldr r4, [sp, #12]
+ clz r3, r3
+ adr r5, L(ipred_h_tbl)
+ sub r3, r3, #25
+ ldr r3, [r5, r3, lsl #2]
+ sub r2, r2, #2
+ mov lr, #-2
+ add r5, r5, r3
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_h_tbl):
+ .word 640f - L(ipred_h_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_h_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_h_tbl) + CONFIG_THUMB
+ .word 8f - L(ipred_h_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_h_tbl) + CONFIG_THUMB
+40:
+ sub r2, r2, #6
+ mov lr, #-8
+4:
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r2], lr
+ vst1.16 {d3}, [r0, :64], r1
+ vst1.16 {d2}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d1}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ bgt 4b
+ pop {r4-r5, pc}
+8:
+ vld1.16 {d0[], d1[]}, [r2], lr
+ subs r4, r4, #4
+ vld1.16 {d2[], d3[]}, [r2], lr
+ vst1.16 {q0}, [r0, :128], r1
+ vld1.16 {d4[], d5[]}, [r2], lr
+ vst1.16 {q1}, [r12, :128], r1
+ vld1.16 {d6[], d7[]}, [r2], lr
+ vst1.16 {q2}, [r0, :128], r1
+ vst1.16 {q3}, [r12, :128], r1
+ bgt 8b
+ pop {r4-r5, pc}
+160:
+ sub r1, r1, #16
+16:
+ vld1.16 {d0[], d1[]}, [r2], lr
+ subs r4, r4, #4
+ vld1.16 {d2[], d3[]}, [r2], lr
+ vst1.16 {q0}, [r0, :128]!
+ vld1.16 {d4[], d5[]}, [r2], lr
+ vst1.16 {q1}, [r12, :128]!
+ vld1.16 {d6[], d7[]}, [r2], lr
+ vst1.16 {q0}, [r0, :128], r1
+ vst1.16 {q1}, [r12, :128], r1
+ vst1.16 {q2}, [r0, :128]!
+ vst1.16 {q3}, [r12, :128]!
+ vst1.16 {q2}, [r0, :128], r1
+ vst1.16 {q3}, [r12, :128], r1
+ bgt 16b
+ pop {r4-r5, pc}
+320:
+ sub r1, r1, #48
+32:
+ vld1.16 {d0[], d1[]}, [r2], lr
+ subs r4, r4, #4
+ vld1.16 {d2[], d3[]}, [r2], lr
+ vst1.16 {q0}, [r0, :128]!
+ vld1.16 {d4[], d5[]}, [r2], lr
+ vst1.16 {q1}, [r12, :128]!
+ vld1.16 {d6[], d7[]}, [r2], lr
+ vst1.16 {q0}, [r0, :128]!
+ vst1.16 {q1}, [r12, :128]!
+ vst1.16 {q0}, [r0, :128]!
+ vst1.16 {q1}, [r12, :128]!
+ vst1.16 {q0}, [r0, :128], r1
+ vst1.16 {q1}, [r12, :128], r1
+ vst1.16 {q2}, [r0, :128]!
+ vst1.16 {q3}, [r12, :128]!
+ vst1.16 {q2}, [r0, :128]!
+ vst1.16 {q3}, [r12, :128]!
+ vst1.16 {q2}, [r0, :128]!
+ vst1.16 {q3}, [r12, :128]!
+ vst1.16 {q2}, [r0, :128], r1
+ vst1.16 {q3}, [r12, :128], r1
+ bgt 32b
+ pop {r4-r5, pc}
+640:
+ sub r1, r1, #96
+64:
+ vld1.16 {d0[], d1[]}, [r2], lr
+ subs r4, r4, #2
+ vld1.16 {d4[], d5[]}, [r2], lr
+ vmov q1, q0
+ vmov q3, q2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vst1.16 {q2, q3}, [r12, :128]!
+ vst1.16 {q0, q1}, [r0, :128]!
+ vst1.16 {q2, q3}, [r12, :128]!
+ vst1.16 {q0, q1}, [r0, :128]!
+ vst1.16 {q2, q3}, [r12, :128]!
+ vst1.16 {q0, q1}, [r0, :128], r1
+ vst1.16 {q2, q3}, [r12, :128], r1
+ bgt 64b
+ pop {r4-r5, pc}
+endfunc
+
+// void ipred_dc_top_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_dc_top_16bpc_neon, export=1
+ push {r4-r5, lr}
+ ldr r4, [sp, #12]
+ clz r3, r3
+ adr r5, L(ipred_dc_top_tbl)
+ sub r3, r3, #25
+ ldr r3, [r5, r3, lsl #2]
+ add r2, r2, #2
+ add r5, r5, r3
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_dc_top_tbl):
+ .word 640f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_dc_top_tbl) + CONFIG_THUMB
+
+40:
+ vld1.16 {d0}, [r2]
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #2
+ vdup.16 d0, d0[0]
+4:
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ bgt 4b
+ pop {r4-r5, pc}
+80:
+ vld1.16 {d0, d1}, [r2]
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #3
+ vdup.16 q0, d0[0]
+8:
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ bgt 8b
+ pop {r4-r5, pc}
+160:
+ vld1.16 {d0, d1, d2, d3}, [r2]
+ vadd.i16 q0, q0, q1
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d4, d0, #4
+ vdup.16 q0, d4[0]
+ vdup.16 q1, d4[0]
+16:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 16b
+ pop {r4-r5, pc}
+320:
+ vld1.16 {d0, d1, d2, d3}, [r2]!
+ vld1.16 {d4, d5, d6, d7}, [r2]
+ vadd.i16 q0, q0, q1
+ vadd.i16 q2, q2, q3
+ vadd.i16 q0, q0, q2
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpaddl.u16 d0, d0
+ vrshrn.i32 d18, q0, #5
+ vdup.16 q0, d18[0]
+ vdup.16 q1, d18[0]
+ sub r1, r1, #32
+32:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 32b
+ pop {r4-r5, pc}
+640:
+ vld1.16 {d0, d1, d2, d3}, [r2]!
+ vld1.16 {d4, d5, d6, d7}, [r2]!
+ vadd.i16 q0, q0, q1
+ vld1.16 {d16, d17, d18, d19}, [r2]!
+ vadd.i16 q2, q2, q3
+ vld1.16 {d20, d21, d22, d23}, [r2]
+ vadd.i16 q8, q8, q9
+ vadd.i16 q10, q10, q11
+ vadd.i16 q0, q0, q2
+ vadd.i16 q8, q8, q10
+ vadd.i16 q0, q0, q8
+ vadd.i16 d0, d0, d1
+ vpaddl.u16 d0, d0
+ vpadd.i32 d0, d0, d0
+ vrshrn.i32 d18, q0, #6
+ vdup.16 q0, d18[0]
+ vdup.16 q1, d18[0]
+ sub r1, r1, #96
+64:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ subs r4, r4, #2
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 64b
+ pop {r4-r5, pc}
+endfunc
+
+// void ipred_dc_left_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_dc_left_16bpc_neon, export=1
+ push {r4-r5, lr}
+ ldr r4, [sp, #12]
+ sub r2, r2, r4, lsl #1
+ clz r3, r3
+ clz lr, r4
+ sub lr, lr, #25
+ adr r5, L(ipred_dc_left_tbl)
+ sub r3, r3, #20
+ ldr r3, [r5, r3, lsl #2]
+ ldr lr, [r5, lr, lsl #2]
+ add r3, r5, r3
+ add r5, r5, lr
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_dc_left_tbl):
+ .word L(ipred_dc_left_h64) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_h32) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_h16) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_h8) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_h4) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w64) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w32) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w16) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w8) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_left_w4) - L(ipred_dc_left_tbl) + CONFIG_THUMB
+
+L(ipred_dc_left_h4):
+ vld1.16 {d0}, [r2, :64]
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #2
+ vdup.16 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w4):
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ bgt L(ipred_dc_left_w4)
+ pop {r4-r5, pc}
+L(ipred_dc_left_h8):
+ vld1.16 {d0, d1}, [r2, :128]
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #3
+ vdup.16 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w8):
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ bgt L(ipred_dc_left_w8)
+ pop {r4-r5, pc}
+L(ipred_dc_left_h16):
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]
+ vadd.i16 q0, q0, q1
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #4
+ vdup.16 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w16):
+ vmov q1, q0
+1:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 1b
+ pop {r4-r5, pc}
+L(ipred_dc_left_h32):
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]!
+ vld1.16 {d4, d5, d6, d7}, [r2, :128]
+ vadd.i16 q0, q0, q1
+ vadd.i16 q2, q2, q3
+ vadd.i16 q0, q0, q2
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpaddl.u16 d0, d0
+ vrshrn.i32 d0, q0, #5
+ vdup.16 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w32):
+ sub r1, r1, #32
+ vmov q1, q0
+1:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 1b
+ pop {r4-r5, pc}
+L(ipred_dc_left_h64):
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]!
+ vld1.16 {d4, d5, d6, d7}, [r2, :128]!
+ vadd.i16 q0, q0, q1
+ vld1.16 {d16, d17, d18, d19}, [r2, :128]!
+ vadd.i16 q2, q2, q3
+ vld1.16 {d20, d21, d22, d23}, [r2, :128]
+ vadd.i16 q8, q8, q9
+ vadd.i16 q10, q10, q11
+ vadd.i16 q0, q0, q2
+ vadd.i16 q8, q8, q10
+ vadd.i16 q0, q0, q8
+ vadd.i16 d0, d0, d1
+ vpaddl.u16 d0, d0
+ vpadd.i32 d0, d0, d0
+ vrshrn.i32 d0, q0, #6
+ vdup.16 q0, d0[0]
+ bx r3
+L(ipred_dc_left_w64):
+ sub r1, r1, #96
+ vmov q1, q0
+1:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ subs r4, r4, #2
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 1b
+ pop {r4-r5, pc}
+endfunc
+
+// void ipred_dc_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_dc_16bpc_neon, export=1
+ push {r4-r6, lr}
+ ldr r4, [sp, #16]
+ sub r2, r2, r4, lsl #1
+ add lr, r3, r4 // width + height
+ clz r3, r3
+ clz r12, r4
+ vdup.32 q15, lr // width + height
+ adr r5, L(ipred_dc_tbl)
+ rbit lr, lr // rbit(width + height)
+ sub r3, r3, #20 // 25 leading bits, minus table offset 5
+ sub r12, r12, #25
+ clz lr, lr // ctz(width + height)
+ ldr r3, [r5, r3, lsl #2]
+ ldr r12, [r5, r12, lsl #2]
+ neg lr, lr // -ctz(width + height)
+ add r3, r5, r3
+ add r5, r5, r12
+ vshr.u32 q15, q15, #1 // (width + height) >> 1
+ vdup.32 q14, lr // -ctz(width + height)
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_dc_tbl):
+ .word L(ipred_dc_h64) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_h32) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_h16) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_h8) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_h4) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w64) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w32) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w16) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w8) - L(ipred_dc_tbl) + CONFIG_THUMB
+ .word L(ipred_dc_w4) - L(ipred_dc_tbl) + CONFIG_THUMB
+
+L(ipred_dc_h4):
+ vld1.16 {d0}, [r2, :64]!
+ vpadd.i16 d0, d0, d0
+ add r2, r2, #2
+ vpaddl.u16 d0, d0
+ bx r3
+L(ipred_dc_w4):
+ vld1.16 {d2}, [r2]
+ vadd.i32 d0, d0, d30
+ vpadd.i16 d2, d2, d2
+ vpaddl.u16 d2, d2
+ cmp r4, #4
+ vadd.i32 d0, d0, d2
+ vshl.u32 d0, d0, d28
+ beq 1f
+ // h = 8/16
+ cmp r4, #16
+ movw lr, #0x6667
+ movw r5, #0xAAAB
+ it ne
+ movne lr, r5
+ vdup.32 d24, lr
+ vmul.i32 d0, d0, d24
+ vshr.u32 d0, d0, #17
+1:
+ vdup.16 d0, d0[0]
+2:
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d0}, [r12, :64], r1
+ bgt 2b
+ pop {r4-r6, pc}
+
+L(ipred_dc_h8):
+ vld1.16 {d0, d1}, [r2, :128]!
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ add r2, r2, #2
+ vpaddl.u16 d0, d0
+ bx r3
+L(ipred_dc_w8):
+ vld1.16 {d2, d3}, [r2]
+ vadd.i32 d0, d0, d30
+ vadd.i16 d2, d2, d3
+ vpadd.i16 d2, d2, d2
+ vpaddl.u16 d2, d2
+ cmp r4, #8
+ vadd.i32 d0, d0, d2
+ vshl.u32 d0, d0, d28
+ beq 1f
+ // h = 4/16/32
+ cmp r4, #32
+ movw lr, #0x6667
+ movw r5, #0xAAAB
+ it ne
+ movne lr, r5
+ vdup.32 d24, lr
+ vmul.i32 d0, d0, d24
+ vshr.u32 d0, d0, #17
+1:
+ vdup.16 q0, d0[0]
+2:
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1}, [r0, :128], r1
+ vst1.16 {d0, d1}, [r12, :128], r1
+ bgt 2b
+ pop {r4-r6, pc}
+
+L(ipred_dc_h16):
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]!
+ vadd.i16 q0, q0, q1
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ add r2, r2, #2
+ vpaddl.u16 d0, d0
+ bx r3
+L(ipred_dc_w16):
+ vld1.16 {d2, d3, d4, d5}, [r2]
+ vadd.i32 d0, d0, d30
+ vadd.i16 q1, q1, q2
+ vadd.i16 d2, d2, d3
+ vpadd.i16 d2, d2, d1
+ vpaddl.u16 d2, d2
+ cmp r4, #16
+ vadd.i32 d0, d0, d2
+ vshl.u32 d4, d0, d28
+ beq 1f
+ // h = 4/8/32/64
+ tst r4, #(32+16+8) // 16 added to make a consecutive bitmask
+ movw lr, #0x6667
+ movw r5, #0xAAAB
+ it ne
+ movne lr, r5
+ vdup.32 d24, lr
+ vmul.i32 d4, d4, d24
+ vshr.u32 d4, d4, #17
+1:
+ vdup.16 q0, d4[0]
+ vdup.16 q1, d4[0]
+2:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 2b
+ pop {r4-r6, pc}
+
+L(ipred_dc_h32):
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]!
+ vld1.16 {d4, d5, d6, d7}, [r2, :128]!
+ vadd.i16 q0, q0, q1
+ vadd.i16 q2, q2, q3
+ vadd.i16 q0, q0, q2
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ add r2, r2, #2
+ vpaddl.u16 d0, d0
+ bx r3
+L(ipred_dc_w32):
+ vld1.16 {d2, d3, d4, d5}, [r2]!
+ vadd.i32 d0, d0, d30
+ vld1.16 {d16, d17, d18, d19}, [r2]
+ vadd.i16 q1, q1, q2
+ vadd.i16 q8, q8, q9
+ vadd.i16 q1, q1, q8
+ vadd.i16 d2, d2, d3
+ vpadd.i16 d2, d2, d2
+ vpaddl.u16 d2, d2
+ cmp r4, #32
+ vadd.i32 d0, d0, d2
+ vshl.u32 d4, d0, d28
+ beq 1f
+ // h = 8/16/64
+ cmp r4, #8
+ movw lr, #0x6667
+ movw r5, #0xAAAB
+ it ne
+ movne lr, r5
+ vdup.32 d24, lr
+ vmul.i32 d4, d4, d24
+ vshr.u32 d4, d4, #17
+1:
+ sub r1, r1, #32
+ vdup.16 q0, d4[0]
+ vdup.16 q1, d4[0]
+2:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ subs r4, r4, #4
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 2b
+ pop {r4-r6, pc}
+L(ipred_dc_h64):
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]!
+ vld1.16 {d4, d5, d6, d7}, [r2, :128]!
+ vadd.i16 q0, q0, q1
+ vld1.16 {d16, d17, d18, d19}, [r2, :128]!
+ vadd.i16 q2, q2, q3
+ vld1.16 {d20, d21, d22, d23}, [r2, :128]!
+ vadd.i16 q8, q8, q9
+ vadd.i16 q10, q10, q11
+ vadd.i16 q0, q0, q2
+ vadd.i16 q8, q8, q10
+ vadd.i16 q0, q0, q8
+ vadd.i16 d0, d0, d1
+ vpaddl.u16 d0, d0
+ add r2, r2, #2
+ vpadd.i32 d0, d0, d0
+ bx r3
+L(ipred_dc_w64):
+ vld1.16 {d2, d3, d4, d5}, [r2]!
+ vadd.i32 d0, d0, d30
+ vld1.16 {d16, d17, d18, d19}, [r2]!
+ vadd.i16 q1, q1, q2
+ vld1.16 {d20, d21, d22, d23}, [r2]!
+ vadd.i16 q8, q8, q9
+ vld1.16 {d24, d25, d26, d27}, [r2]!
+ vadd.i16 q10, q10, q11
+ vadd.i16 q12, q12, q13
+ vadd.i16 q1, q1, q8
+ vadd.i16 q10, q10, q12
+ vadd.i16 q1, q1, q10
+ vadd.i16 d2, d2, d3
+ vpaddl.u16 d2, d2
+ vpadd.i32 d2, d2, d2
+ cmp r4, #64
+ vadd.i32 d0, d0, d2
+ vshl.u32 d4, d0, d28
+ beq 1f
+ // h = 16/32
+ cmp r4, #16
+ movw lr, #0x6667
+ movw r5, #0xAAAB
+ it ne
+ movne lr, r5
+ vdup.32 d24, lr
+ vmul.i32 d4, d4, d24
+ vshr.u32 d4, d4, #17
+1:
+ sub r1, r1, #96
+ vdup.16 q0, d4[0]
+ vdup.16 q1, d4[0]
+2:
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ subs r4, r4, #2
+ vst1.16 {d0, d1, d2, d3}, [r0, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r12, :128]!
+ vst1.16 {d0, d1, d2, d3}, [r0, :128], r1
+ vst1.16 {d0, d1, d2, d3}, [r12, :128], r1
+ bgt 2b
+ pop {r4-r6, pc}
+endfunc
+
+// void ipred_paeth_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_paeth_16bpc_neon, export=1
+ push {r4-r6, lr}
+ vpush {q4}
+ ldr r4, [sp, #32]
+ clz lr, r3
+ adr r12, L(ipred_paeth_tbl)
+ sub lr, lr, #25
+ ldr lr, [r12, lr, lsl #2]
+ vld1.16 {d4[], d5[]}, [r2]
+ add r6, r2, #2
+ sub r2, r2, #4
+ add r12, r12, lr
+ mov r5, #-4
+ add lr, r0, r1
+ lsl r1, r1, #1
+ bx r12
+
+ .align 2
+L(ipred_paeth_tbl):
+ .word 640f - L(ipred_paeth_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_paeth_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_paeth_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_paeth_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_paeth_tbl) + CONFIG_THUMB
+
+40:
+ sub r2, r2, #4
+ mov r5, #-8
+ vld1.16 {d6}, [r6]
+ vsub.i16 d16, d6, d4 // top - topleft
+ vmov d7, d6
+ vmov d17, d16
+4:
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r2, :64], r5
+ vadd.i16 q9, q8, q0 // base
+ vadd.i16 q10, q8, q1
+ vabd.s16 q11, q3, q9 // tdiff
+ vabd.s16 q12, q3, q10
+ vabd.s16 q13, q2, q9 // tldiff
+ vabd.s16 q14, q2, q10
+ vabd.s16 q9, q0, q9 // ldiff
+ vabd.s16 q10, q1, q10
+ vmin.u16 q15, q11, q13 // min(tdiff, tldiff)
+ vmin.u16 q4, q12, q14
+ vcge.u16 q11, q13, q11 // tldiff >= tdiff
+ vcge.u16 q12, q14, q12
+ vcge.u16 q9, q15, q9 // min(tdiff, tldiff) >= ldiff
+ vcge.u16 q10, q4, q10
+ vbsl q12, q3, q2 // tdiff <= tldiff ? top : topleft
+ vbsl q11, q3, q2
+ vbit q12, q1, q10 // ldiff <= min ? left : ...
+ vbit q11, q0, q9
+ vst1.16 {d25}, [r0, :64], r1
+ vst1.16 {d24}, [lr, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d23}, [r0, :64], r1
+ vst1.16 {d22}, [lr, :64], r1
+ bgt 4b
+ vpop {q4}
+ pop {r4-r6, pc}
+80:
+160:
+320:
+640:
+ vld1.16 {q3}, [r6]!
+ mov r12, r3
+ sub r1, r1, r3, lsl #1
+1:
+ vld2.16 {d0[], d2[]}, [r2, :32], r5
+ vmov d1, d0
+ vmov d3, d2
+2:
+ vsub.i16 q8, q3, q2 // top - topleft
+ vadd.i16 q9, q8, q0 // base
+ vadd.i16 q10, q8, q1
+ vabd.s16 q11, q3, q9 // tdiff
+ vabd.s16 q12, q3, q10
+ vabd.s16 q13, q2, q9 // tldiff
+ vabd.s16 q14, q2, q10
+ vabd.s16 q9, q0, q9 // ldiff
+ vabd.s16 q10, q1, q10
+ vmin.u16 q15, q11, q13 // min(tdiff, tldiff)
+ vmin.u16 q4, q12, q14
+ vcge.u16 q11, q13, q11 // tldiff >= tdiff
+ vcge.u16 q12, q14, q12
+ vcge.u16 q9, q15, q9 // min(tdiff, tldiff) >= ldiff
+ vcge.u16 q10, q4, q10
+ vbsl q12, q3, q2 // tdiff <= tldiff ? top : topleft
+ vbsl q11, q3, q2
+ vbit q12, q1, q10 // ldiff <= min ? left : ...
+ vbit q11, q0, q9
+ subs r3, r3, #8
+ vst1.16 {q12}, [r0, :128]!
+ vst1.16 {q11}, [lr, :128]!
+ ble 8f
+ vld1.16 {q3}, [r6]!
+ b 2b
+8:
+ subs r4, r4, #2
+ ble 9f
+ // End of horizontal loop, move pointers to next two rows
+ sub r6, r6, r12, lsl #1
+ add r0, r0, r1
+ add lr, lr, r1
+ vld1.16 {q3}, [r6]!
+ mov r3, r12
+ b 1b
+9:
+ vpop {q4}
+ pop {r4-r6, pc}
+endfunc
+
+// void ipred_smooth_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_smooth_16bpc_neon, export=1
+ push {r4-r10, lr}
+ ldr r4, [sp, #32]
+ movrel r10, X(sm_weights)
+ add r12, r10, r4
+ add r10, r10, r3
+ clz r9, r3
+ adr r5, L(ipred_smooth_tbl)
+ sub lr, r2, r4, lsl #1
+ sub r9, r9, #25
+ ldr r9, [r5, r9, lsl #2]
+ vld1.16 {d4[], d5[]}, [lr] // bottom
+ add r8, r2, #2
+ add r5, r5, r9
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_smooth_tbl):
+ .word 640f - L(ipred_smooth_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_smooth_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_smooth_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_smooth_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_smooth_tbl) + CONFIG_THUMB
+
+40:
+ vld1.16 {d16}, [r8] // top
+ vld1.32 {d18[]}, [r10, :32] // weights_hor
+ sub r2, r2, #8
+ mov r7, #-8
+ vdup.16 q3, d16[3] // right
+ vsub.i16 q8, q8, q2 // top-bottom
+ vmovl.u8 q9, d18 // weights_hor
+ vadd.i16 d19, d4, d6 // bottom+right
+4:
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r2, :64], r7 // left
+ vld4.8 {d20[], d21[], d22[], d23[]}, [r12, :32]! // weights_ver
+ vshll.u16 q12, d19, #8 // (bottom+right)*256
+ vshll.u16 q13, d19, #8
+ vshll.u16 q14, d19, #8
+ vshll.u16 q15, d19, #8
+ vzip.32 d20, d21 // weights_ver
+ vzip.32 d22, d23
+ vsub.i16 q1, q1, q3 // left-right
+ vsub.i16 q0, q0, q3
+ vmovl.u8 q10, d20 // weights_ver
+ vmovl.u8 q11, d22
+ vmlal.s16 q12, d3, d18 // += (left-right)*weights_hor
+ vmlal.s16 q13, d2, d18 // (left flipped)
+ vmlal.s16 q14, d1, d18
+ vmlal.s16 q15, d0, d18
+ vmlal.s16 q12, d16, d20 // += (top-bottom)*weights_ver
+ vmlal.s16 q13, d16, d21
+ vmlal.s16 q14, d16, d22
+ vmlal.s16 q15, d16, d23
+ vrshrn.i32 d24, q12, #9
+ vrshrn.i32 d25, q13, #9
+ vrshrn.i32 d26, q14, #9
+ vrshrn.i32 d27, q15, #9
+ vst1.16 {d24}, [r0, :64], r1
+ vst1.16 {d25}, [r6, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d26}, [r0, :64], r1
+ vst1.16 {d27}, [r6, :64], r1
+ bgt 4b
+ pop {r4-r10, pc}
+80:
+ vld1.16 {q8}, [r8] // top
+ vld1.8 {d18}, [r10, :64] // weights_hor
+ sub r2, r2, #4
+ mov r7, #-4
+ vdup.16 q3, d17[3] // right
+ vsub.i16 q8, q8, q2 // top-bottom
+ vmovl.u8 q9, d18 // weights_hor
+ vadd.i16 d3, d4, d6 // bottom+right
+8:
+ vld2.16 {d0[], d1[]}, [r2, :32], r7 // left
+ vld2.8 {d20[], d22[]}, [r12, :16]! // weights_ver
+ vshll.u16 q12, d3, #8 // (bottom+right)*256
+ vshll.u16 q13, d3, #8
+ vshll.u16 q14, d3, #8
+ vshll.u16 q15, d3, #8
+ vsub.i16 q0, q0, q3 // left-right
+ vmovl.u8 q10, d20 // weights_ver
+ vmovl.u8 q11, d22
+ vmlal.s16 q12, d1, d18 // += (left-right)*weights_hor
+ vmlal.s16 q13, d1, d19 // (left flipped)
+ vmlal.s16 q14, d0, d18
+ vmlal.s16 q15, d0, d19
+ vmlal.s16 q12, d16, d20 // += (top-bottom)*weights_ver
+ vmlal.s16 q13, d17, d20
+ vmlal.s16 q14, d16, d22
+ vmlal.s16 q15, d17, d22
+ vrshrn.i32 d24, q12, #9
+ vrshrn.i32 d25, q13, #9
+ vrshrn.i32 d26, q14, #9
+ vrshrn.i32 d27, q15, #9
+ subs r4, r4, #2
+ vst1.16 {q12}, [r0, :128], r1
+ vst1.16 {q13}, [r6, :128], r1
+ bgt 8b
+ pop {r4-r10, pc}
+160:
+320:
+640:
+ add lr, r2, r3, lsl #1
+ sub r2, r2, #4
+ mov r7, #-4
+ vld1.16 {d6[], d7[]}, [lr] // right
+ sub r1, r1, r3, lsl #1
+ mov r9, r3
+ vadd.i16 d3, d4, d6 // bottom+right
+
+1:
+ vld2.16 {d0[], d1[]}, [r2, :32], r7 // left
+ vld2.8 {d20[], d22[]}, [r12, :16]! // weights_ver
+ vsub.i16 q0, q0, q3 // left-right
+ vmovl.u8 q10, d20 // weights_ver
+ vmovl.u8 q11, d22
+2:
+ vld1.8 {d18}, [r10, :64]! // weights_hor
+ vld1.16 {q8}, [r8]! // top
+ vshll.u16 q12, d3, #8 // (bottom+right)*256
+ vshll.u16 q13, d3, #8
+ vmovl.u8 q9, d18 // weights_hor
+ vshll.u16 q14, d3, #8
+ vshll.u16 q15, d3, #8
+ vsub.i16 q8, q8, q2 // top-bottom
+ vmlal.s16 q12, d1, d18 // += (left-right)*weights_hor
+ vmlal.s16 q13, d1, d19 // (left flipped)
+ vmlal.s16 q14, d0, d18
+ vmlal.s16 q15, d0, d19
+ vmlal.s16 q12, d16, d20 // += (top-bottom)*weights_ver
+ vmlal.s16 q13, d17, d20
+ vmlal.s16 q14, d16, d22
+ vmlal.s16 q15, d17, d22
+ vrshrn.i32 d24, q12, #9
+ vrshrn.i32 d25, q13, #9
+ vrshrn.i32 d26, q14, #9
+ vrshrn.i32 d27, q15, #9
+ subs r3, r3, #8
+ vst1.16 {q12}, [r0, :128]!
+ vst1.16 {q13}, [r6, :128]!
+ bgt 2b
+ subs r4, r4, #2
+ ble 9f
+ sub r8, r8, r9, lsl #1
+ sub r10, r10, r9
+ add r0, r0, r1
+ add r6, r6, r1
+ mov r3, r9
+ b 1b
+9:
+ pop {r4-r10, pc}
+endfunc
+
+// void ipred_smooth_v_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_smooth_v_16bpc_neon, export=1
+ push {r4-r7, lr}
+ ldr r4, [sp, #20]
+ movrel r7, X(sm_weights)
+ add r7, r7, r4
+ clz lr, r3
+ adr r5, L(ipred_smooth_v_tbl)
+ sub r12, r2, r4, lsl #1
+ sub lr, lr, #25
+ ldr lr, [r5, lr, lsl #2]
+ vld1.16 {d4[], d5[]}, [r12] // bottom
+ add r2, r2, #2
+ add r5, r5, lr
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_smooth_v_tbl):
+ .word 640f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_smooth_v_tbl) + CONFIG_THUMB
+
+40:
+ vld1.16 {d6}, [r2] // top
+ vsub.i16 d6, d6, d4 // top-bottom
+ vmov d7, d6
+4:
+ vld4.8 {d16[], d17[], d18[], d19[]}, [r7, :32]! // weights_ver
+ vzip.32 d16, d17 // weights_ver
+ vzip.32 d18, d19
+ vshll.u8 q8, d16, #7 // weights_ver << 7
+ vshll.u8 q9, d18, #7
+ vqrdmulh.s16 q10, q3, q8 // ((top-bottom)*weights_ver + 128) >> 8
+ vqrdmulh.s16 q11, q3, q9
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q2
+ vst1.16 {d20}, [r0, :64], r1
+ vst1.16 {d21}, [r6, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d22}, [r0, :64], r1
+ vst1.16 {d23}, [r6, :64], r1
+ bgt 4b
+ pop {r4-r7, pc}
+80:
+ vld1.16 {q3}, [r2] // top
+ vsub.i16 q3, q3, q2 // top-bottom
+8:
+ vld4.8 {d16[], d18[], d20[], d22[]}, [r7, :32]! // weights_ver
+ vshll.u8 q8, d16, #7 // weights_ver << 7
+ vshll.u8 q9, d18, #7
+ vshll.u8 q10, d20, #7
+ vshll.u8 q11, d22, #7
+ vqrdmulh.s16 q8, q3, q8 // ((top-bottom)*weights_ver + 128) >> 8
+ vqrdmulh.s16 q9, q3, q9
+ vqrdmulh.s16 q10, q3, q10
+ vqrdmulh.s16 q11, q3, q11
+ vadd.i16 q8, q8, q2
+ vadd.i16 q9, q9, q2
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q2
+ vst1.16 {q8}, [r0, :128], r1
+ vst1.16 {q9}, [r6, :128], r1
+ subs r4, r4, #4
+ vst1.16 {q10}, [r0, :128], r1
+ vst1.16 {q11}, [r6, :128], r1
+ bgt 8b
+ pop {r4-r7, pc}
+160:
+320:
+640:
+ vpush {q4-q7}
+ // Set up pointers for four rows in parallel; r0, r6, r5, lr
+ add r5, r0, r1
+ add lr, r6, r1
+ lsl r1, r1, #1
+ sub r1, r1, r3, lsl #1
+ mov r12, r3
+
+1:
+ vld4.8 {d8[], d10[], d12[], d14[]}, [r7, :32]! // weights_ver
+ vshll.u8 q4, d8, #7 // weights_ver << 7
+ vshll.u8 q5, d10, #7
+ vshll.u8 q6, d12, #7
+ vshll.u8 q7, d14, #7
+2:
+ vld1.16 {q0, q1}, [r2]! // top
+ vsub.i16 q0, q0, q2 // top-bottom
+ vsub.i16 q1, q1, q2
+ vqrdmulh.s16 q8, q0, q4 // ((top-bottom)*weights_ver + 128) >> 8
+ vqrdmulh.s16 q9, q1, q4
+ vqrdmulh.s16 q10, q0, q5
+ vqrdmulh.s16 q11, q1, q5
+ vqrdmulh.s16 q12, q0, q6
+ vqrdmulh.s16 q13, q1, q6
+ vqrdmulh.s16 q14, q0, q7
+ vqrdmulh.s16 q15, q1, q7
+ vadd.i16 q8, q8, q2
+ vadd.i16 q9, q9, q2
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q2
+ vadd.i16 q12, q12, q2
+ vadd.i16 q13, q13, q2
+ vadd.i16 q14, q14, q2
+ vadd.i16 q15, q15, q2
+ subs r3, r3, #16
+ vst1.16 {q8, q9}, [r0, :128]!
+ vst1.16 {q10, q11}, [r6, :128]!
+ vst1.16 {q12, q13}, [r5, :128]!
+ vst1.16 {q14, q15}, [lr, :128]!
+ bgt 2b
+ subs r4, r4, #4
+ ble 9f
+ sub r2, r2, r12, lsl #1
+ add r0, r0, r1
+ add r6, r6, r1
+ add r5, r5, r1
+ add lr, lr, r1
+ mov r3, r12
+ b 1b
+9:
+ vpop {q4-q7}
+ pop {r4-r7, pc}
+endfunc
+
+// void ipred_smooth_h_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int a,
+// const int max_width, const int max_height);
+function ipred_smooth_h_16bpc_neon, export=1
+ push {r4-r8, lr}
+ ldr r4, [sp, #24]
+ movrel r8, X(sm_weights)
+ add r8, r8, r3
+ clz lr, r3
+ adr r5, L(ipred_smooth_h_tbl)
+ add r12, r2, r3, lsl #1
+ sub lr, lr, #25
+ ldr lr, [r5, lr, lsl #2]
+ vld1.16 {d4[], d5[]}, [r12] // right
+ add r5, r5, lr
+ add r6, r0, r1
+ lsl r1, r1, #1
+ bx r5
+
+ .align 2
+L(ipred_smooth_h_tbl):
+ .word 640f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+ .word 320f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_smooth_h_tbl) + CONFIG_THUMB
+
+40:
+ vld1.32 {d6[]}, [r8, :32] // weights_hor
+ sub r2, r2, #8
+ mov r7, #-8
+ vshll.u8 q3, d6, #7 // weights_hor << 7
+4:
+ vld4.16 {d0[], d1[], d2[], d3[]}, [r2, :64], r7 // left
+ vsub.i16 q0, q0, q2 // left-right
+ vsub.i16 q1, q1, q2
+ subs r4, r4, #4
+ vqrdmulh.s16 q8, q1, q3 // ((left-right)*weights_hor + 128) >> 8
+ vqrdmulh.s16 q9, q0, q3 // (left flipped)
+ vadd.i16 q8, q8, q2
+ vadd.i16 q9, q9, q2
+ vst1.16 {d17}, [r0, :64], r1
+ vst1.16 {d16}, [r6, :64], r1
+ vst1.16 {d19}, [r0, :64], r1
+ vst1.16 {d18}, [r6, :64], r1
+ bgt 4b
+ pop {r4-r8, pc}
+80:
+ vld1.8 {d6}, [r8, :64] // weights_hor
+ sub r2, r2, #8
+ mov r7, #-8
+ vshll.u8 q3, d6, #7 // weights_hor << 7
+8:
+ vld1.16 {d23}, [r2, :64], r7 // left
+ subs r4, r4, #4
+ vsub.i16 d23, d23, d4 // left-right
+ vdup.16 q8, d23[3] // flip left
+ vdup.16 q9, d23[2]
+ vdup.16 q10, d23[1]
+ vdup.16 q11, d23[0]
+ vqrdmulh.s16 q8, q8, q3 // ((left-right)*weights_hor + 128) >> 8
+ vqrdmulh.s16 q9, q9, q3
+ vqrdmulh.s16 q10, q10, q3
+ vqrdmulh.s16 q11, q11, q3
+ vadd.i16 q8, q8, q2
+ vadd.i16 q9, q9, q2
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q2
+ vst1.16 {q8}, [r0, :128], r1
+ vst1.16 {q9}, [r6, :128], r1
+ vst1.16 {q10}, [r0, :128], r1
+ vst1.16 {q11}, [r6, :128], r1
+ bgt 8b
+ pop {r4-r8, pc}
+160:
+320:
+640:
+ vpush {q4-q7}
+ sub r2, r2, #8
+ mov r7, #-8
+ // Set up pointers for four rows in parallel; r0, r6, r5, lr
+ add r5, r0, r1
+ add lr, r6, r1
+ lsl r1, r1, #1
+ sub r1, r1, r3, lsl #1
+ mov r12, r3
+
+1:
+ vld1.16 {d15}, [r2, :64], r7 // left
+ vsub.i16 d15, d15, d4 // left-right
+ vdup.16 q4, d15[3] // flip left
+ vdup.16 q5, d15[2]
+ vdup.16 q6, d15[1]
+ vdup.16 q7, d15[0]
+2:
+ vld1.8 {q1}, [r8, :128]! // weights_hor
+ subs r3, r3, #16
+ vshll.u8 q0, d2, #7 // weights_hor << 7
+ vshll.u8 q1, d3, #7
+ vqrdmulh.s16 q8, q0, q4 // ((left-right)*weights_hor + 128) >> 8
+ vqrdmulh.s16 q9, q1, q4
+ vqrdmulh.s16 q10, q0, q5
+ vqrdmulh.s16 q11, q1, q5
+ vqrdmulh.s16 q12, q0, q6
+ vqrdmulh.s16 q13, q1, q6
+ vqrdmulh.s16 q14, q0, q7
+ vqrdmulh.s16 q15, q1, q7
+ vadd.i16 q8, q8, q2
+ vadd.i16 q9, q9, q2
+ vadd.i16 q10, q10, q2
+ vadd.i16 q11, q11, q2
+ vadd.i16 q12, q12, q2
+ vadd.i16 q13, q13, q2
+ vadd.i16 q14, q14, q2
+ vadd.i16 q15, q15, q2
+ vst1.16 {q8, q9}, [r0, :128]!
+ vst1.16 {q10, q11}, [r6, :128]!
+ vst1.16 {q12, q13}, [r5, :128]!
+ vst1.16 {q14, q15}, [lr, :128]!
+ bgt 2b
+ subs r4, r4, #4
+ ble 9f
+ sub r8, r8, r12
+ add r0, r0, r1
+ add r6, r6, r1
+ add r5, r5, r1
+ add lr, lr, r1
+ mov r3, r12
+ b 1b
+9:
+ vpop {q4-q7}
+ pop {r4-r8, pc}
+endfunc
+
+// void ipred_filter_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height, const int filt_idx,
+// const int max_width, const int max_height,
+// const int bitdepth_max);
+.macro filter_fn bpc
+function ipred_filter_\bpc\()bpc_neon, export=1
+ movw r12, #511
+ ldrd r4, r5, [sp, #88]
+ and r5, r5, r12 // 511
+ movrel r6, X(filter_intra_taps)
+ lsl r5, r5, #6
+ add r6, r6, r5
+ vld1.8 {d20, d21, d22, d23}, [r6, :128]!
+ clz lr, r3
+ adr r5, L(ipred_filter\bpc\()_tbl)
+ vld1.8 {d27, d28, d29}, [r6, :64]
+ sub lr, lr, #26
+ ldr lr, [r5, lr, lsl #2]
+ vmovl.s8 q8, d20
+ vmovl.s8 q9, d21
+ add r5, r5, lr
+ vmovl.s8 q10, d22
+ vmovl.s8 q11, d23
+ add r6, r0, r1
+ lsl r1, r1, #1
+ vmovl.s8 q12, d27
+ vmovl.s8 q13, d28
+ vmovl.s8 q14, d29
+ mov r7, #-4
+ vdup.16 q15, r8
+ add r8, r2, #2
+ sub r2, r2, #4
+.if \bpc == 10
+ vmov.i16 q7, #0
+.endif
+ bx r5
+
+ .align 2
+L(ipred_filter\bpc\()_tbl):
+ .word 320f - L(ipred_filter\bpc\()_tbl) + CONFIG_THUMB
+ .word 160f - L(ipred_filter\bpc\()_tbl) + CONFIG_THUMB
+ .word 80f - L(ipred_filter\bpc\()_tbl) + CONFIG_THUMB
+ .word 40f - L(ipred_filter\bpc\()_tbl) + CONFIG_THUMB
+
+40:
+ vld1.16 {d0}, [r8] // top (0-3)
+4:
+ vld1.16 {d2}, [r2], r7 // left (0-1) + topleft (2)
+.if \bpc == 10
+ vmul.i16 q2, q9, d0[0] // p1(top[0]) * filter(1)
+ vmla.i16 q2, q10, d0[1] // p2(top[1]) * filter(2)
+ vmla.i16 q2, q11, d0[2] // p3(top[2]) * filter(3)
+ vmla.i16 q2, q12, d0[3] // p4(top[3]) * filter(4)
+ vmla.i16 q2, q8, d2[2] // p0(topleft) * filter(0)
+ vmla.i16 q2, q13, d2[1] // p5(left[0]) * filter(5)
+ vmla.i16 q2, q14, d2[0] // p6(left[1]) * filter(6)
+ vrshr.s16 q2, q2, #4
+ vmax.s16 q2, q2, q7
+.else
+ vmull.s16 q2, d18, d0[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q2, d20, d0[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q2, d22, d0[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q2, d24, d0[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q2, d16, d2[2] // p0(topleft) * filter(0)
+ vmlal.s16 q2, d26, d2[1] // p5(left[0]) * filter(5)
+ vmlal.s16 q2, d28, d2[0] // p6(left[1]) * filter(6)
+ vmull.s16 q3, d19, d0[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q3, d21, d0[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q3, d23, d0[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q3, d25, d0[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q3, d17, d2[2] // p0(topleft) * filter(0)
+ vmlal.s16 q3, d27, d2[1] // p5(left[0]) * filter(5)
+ vmlal.s16 q3, d29, d2[0] // p6(left[1]) * filter(6)
+ vqrshrun.s32 d4, q2, #4
+ vqrshrun.s32 d5, q3, #4
+.endif
+ vmin.s16 q2, q2, q15
+ subs r4, r4, #2
+ vst1.16 {d4}, [r0, :64], r1
+ vst1.16 {d5}, [r6, :64], r1
+ vmov d0, d5 // move top from [4-7] to [0-3]
+ bgt 4b
+ vpop {q4-q7}
+ pop {r4-r8, pc}
+80:
+ vld1.16 {q0}, [r8] // top (0-7)
+8:
+ vld1.16 {d2}, [r2], r7 // left (0-1) + topleft (2)
+.if \bpc == 10
+ vmul.i16 q2, q9, d0[0] // p1(top[0]) * filter(1)
+ vmla.i16 q2, q10, d0[1] // p2(top[1]) * filter(2)
+ vmla.i16 q2, q11, d0[2] // p3(top[2]) * filter(3)
+ vmla.i16 q2, q12, d0[3] // p4(top[3]) * filter(4)
+ vmla.i16 q2, q8, d2[2] // p0(topleft) * filter(0)
+ vmla.i16 q2, q13, d2[1] // p5(left[0]) * filter(5)
+ vmla.i16 q2, q14, d2[0] // p6(left[1]) * filter(6)
+ vmul.i16 q3, q9, d1[0] // p1(top[0]) * filter(1)
+ vmla.i16 q3, q10, d1[1] // p2(top[1]) * filter(2)
+ vmla.i16 q3, q11, d1[2] // p3(top[2]) * filter(3)
+ vrshr.s16 q2, q2, #4
+ vmax.s16 q2, q2, q7
+ vmin.s16 q2, q2, q15
+ vmla.i16 q3, q12, d1[3] // p4(top[3]) * filter(4)
+ vmla.i16 q3, q8, d0[3] // p0(topleft) * filter(0)
+ vmla.i16 q3, q13, d4[3] // p5(left[0]) * filter(5)
+ vmla.i16 q3, q14, d5[3] // p6(left[1]) * filter(6)
+ vrshr.s16 q3, q3, #4
+ vmax.s16 q3, q3, q7
+.else
+ vmull.s16 q2, d18, d0[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q2, d20, d0[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q2, d22, d0[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q2, d24, d0[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q2, d16, d2[2] // p0(topleft) * filter(0)
+ vmlal.s16 q2, d26, d2[1] // p5(left[0]) * filter(5)
+ vmlal.s16 q2, d28, d2[0] // p6(left[1]) * filter(6)
+ vmull.s16 q3, d19, d0[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q3, d21, d0[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q3, d23, d0[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q3, d25, d0[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q3, d17, d2[2] // p0(topleft) * filter(0)
+ vmlal.s16 q3, d27, d2[1] // p5(left[0]) * filter(5)
+ vmlal.s16 q3, d29, d2[0] // p6(left[1]) * filter(6)
+ vqrshrun.s32 d4, q2, #4
+ vmull.s16 q4, d18, d1[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q4, d20, d1[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q4, d22, d1[2] // p3(top[2]) * filter(3)
+ vqrshrun.s32 d5, q3, #4
+ vmin.s16 q2, q2, q15
+ vmlal.s16 q4, d24, d1[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q4, d16, d0[3] // p0(topleft) * filter(0)
+ vmlal.s16 q4, d26, d4[3] // p5(left[0]) * filter(5)
+ vmlal.s16 q4, d28, d5[3] // p6(left[1]) * filter(6)
+ vmull.s16 q5, d19, d1[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q5, d21, d1[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q5, d23, d1[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q5, d25, d1[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q5, d17, d0[3] // p0(topleft) * filter(0)
+ vmlal.s16 q5, d27, d4[3] // p5(left[0]) * filter(5)
+ vmlal.s16 q5, d29, d5[3] // p6(left[1]) * filter(6)
+ vqrshrun.s32 d6, q4, #4
+ vqrshrun.s32 d7, q5, #4
+.endif
+ vmin.s16 q3, q3, q15
+ vswp d5, d6
+ subs r4, r4, #2
+ vst1.16 {q2}, [r0, :128], r1
+ vmov q0, q3
+ vst1.16 {q3}, [r6, :128], r1
+ bgt 8b
+ vpop {q4-q7}
+ pop {r4-r8, pc}
+160:
+320:
+ sub r1, r1, r3, lsl #1
+ mov lr, r3
+
+1:
+ vld1.16 {d0}, [r2], r7 // left (0-1) + topleft (2)
+2:
+ vld1.16 {q1, q2}, [r8]! // top(0-15)
+.if \bpc == 10
+ vmul.i16 q3, q8, d0[2] // p0(topleft) * filter(0)
+ vmla.i16 q3, q13, d0[1] // p5(left[0]) * filter(5)
+ vmla.i16 q3, q14, d0[0] // p6(left[1]) * filter(6)
+ vmla.i16 q3, q9, d2[0] // p1(top[0]) * filter(1)
+ vmla.i16 q3, q10, d2[1] // p2(top[1]) * filter(2)
+ vmla.i16 q3, q11, d2[2] // p3(top[2]) * filter(3)
+ vmla.i16 q3, q12, d2[3] // p4(top[3]) * filter(4)
+
+ vmul.i16 q4, q9, d3[0] // p1(top[0]) * filter(1)
+ vmla.i16 q4, q10, d3[1] // p2(top[1]) * filter(2)
+ vmla.i16 q4, q11, d3[2] // p3(top[2]) * filter(3)
+ vrshr.s16 q3, q3, #4
+ vmax.s16 q3, q3, q7
+ vmin.s16 q3, q3, q15
+ vmla.i16 q4, q12, d3[3] // p4(top[3]) * filter(4)
+ vmla.i16 q4, q8, d2[3] // p0(topleft) * filter(0)
+ vmla.i16 q4, q13, d6[3] // p5(left[0]) * filter(5)
+ vmla.i16 q4, q14, d7[3] // p6(left[1]) * filter(6)
+
+ vmul.i16 q5, q9, d4[0] // p1(top[0]) * filter(1)
+ vmla.i16 q5, q10, d4[1] // p2(top[1]) * filter(2)
+ vmla.i16 q5, q11, d4[2] // p3(top[2]) * filter(3)
+ vrshr.s16 q4, q4, #4
+ vmax.s16 q4, q4, q7
+ vmin.s16 q4, q4, q15
+ vmov q0, q4
+ vmla.i16 q5, q12, d4[3] // p4(top[3]) * filter(4)
+ vmla.i16 q5, q8, d3[3] // p0(topleft) * filter(0)
+ vmla.i16 q5, q13, d0[3] // p5(left[0]) * filter(5)
+ vmla.i16 q5, q14, d1[3] // p6(left[1]) * filter(6)
+
+ vmul.i16 q6, q9, d5[0] // p1(top[0]) * filter(1)
+ vmla.i16 q6, q10, d5[1] // p2(top[1]) * filter(2)
+ vmla.i16 q6, q11, d5[2] // p3(top[2]) * filter(3)
+ vrshr.s16 q5, q5, #4
+ vmax.s16 q5, q5, q7
+ vmin.s16 q5, q5, q15
+ vmov q0, q5
+ vmov.u16 r12, d5[3]
+ vmla.i16 q6, q12, d5[3] // p4(top[3]) * filter(4)
+ vmla.i16 q6, q8, d4[3] // p0(topleft) * filter(0)
+ vmla.i16 q6, q13, d0[3] // p5(left[0]) * filter(5)
+ vmla.i16 q6, q14, d1[3] // p6(left[1]) * filter(6)
+ vmov.16 d0[2], r12
+ subs r3, r3, #16
+ vrshr.s16 q6, q6, #4
+.else
+ vmull.s16 q3, d16, d0[2] // p0(topleft) * filter(0)
+ vmlal.s16 q3, d26, d0[1] // p5(left[0]) * filter(5)
+ vmlal.s16 q3, d28, d0[0] // p6(left[1]) * filter(6)
+ vmlal.s16 q3, d18, d2[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q3, d20, d2[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q3, d22, d2[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q3, d24, d2[3] // p4(top[3]) * filter(4)
+ vmull.s16 q4, d17, d0[2] // p0(topleft) * filter(0)
+ vmlal.s16 q4, d27, d0[1] // p5(left[0]) * filter(5)
+ vmlal.s16 q4, d29, d0[0] // p6(left[1]) * filter(6)
+ vmlal.s16 q4, d19, d2[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q4, d21, d2[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q4, d23, d2[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q4, d25, d2[3] // p4(top[3]) * filter(4)
+ vqrshrun.s32 d6, q3, #4
+ vmull.s16 q5, d18, d3[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q5, d20, d3[1] // p2(top[1]) * filter(2)
+ vqrshrun.s32 d7, q4, #4
+ vmin.s16 q3, q3, q15
+ vmlal.s16 q5, d22, d3[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q5, d24, d3[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q5, d16, d2[3] // p0(topleft) * filter(0)
+ vmlal.s16 q5, d26, d6[3] // p5(left[0]) * filter(5)
+ vmlal.s16 q5, d28, d7[3] // p6(left[1]) * filter(6)
+ vmull.s16 q6, d19, d3[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q6, d21, d3[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q6, d23, d3[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q6, d25, d3[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q6, d17, d2[3] // p0(topleft) * filter(0)
+ vmlal.s16 q6, d27, d6[3] // p5(left[0]) * filter(5)
+ vmlal.s16 q6, d29, d7[3] // p6(left[1]) * filter(6)
+ vqrshrun.s32 d8, q5, #4
+ vmull.s16 q7, d18, d4[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q7, d20, d4[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q7, d22, d4[2] // p3(top[2]) * filter(3)
+ vqrshrun.s32 d9, q6, #4
+ vmin.s16 q0, q4, q15
+ vmlal.s16 q7, d24, d4[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q7, d16, d3[3] // p0(topleft) * filter(0)
+ vmlal.s16 q7, d26, d0[3] // p5(left[0]) * filter(5)
+ vmlal.s16 q7, d28, d1[3] // p6(left[1]) * filter(6)
+ vmin.s16 q4, q4, q15
+ vmull.s16 q6, d19, d4[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q6, d21, d4[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q6, d23, d4[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q6, d25, d4[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q6, d17, d3[3] // p0(topleft) * filter(0)
+ vmlal.s16 q6, d27, d0[3] // p5(left[0]) * filter(5)
+ vmlal.s16 q6, d29, d1[3] // p6(left[1]) * filter(6)
+ vqrshrun.s32 d10, q7, #4
+ vmull.s16 q1, d18, d5[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q1, d20, d5[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q1, d22, d5[2] // p3(top[2]) * filter(3)
+ vqrshrun.s32 d11, q6, #4
+ vmin.s16 q0, q5, q15
+ vmlal.s16 q1, d24, d5[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q1, d16, d4[3] // p0(topleft) * filter(0)
+ vmlal.s16 q1, d26, d0[3] // p5(left[0]) * filter(5)
+ vmlal.s16 q1, d28, d1[3] // p6(left[1]) * filter(6)
+ vmin.s16 q5, q5, q15
+ vmov.u16 r12, d5[3]
+ vmull.s16 q7, d19, d5[0] // p1(top[0]) * filter(1)
+ vmlal.s16 q7, d21, d5[1] // p2(top[1]) * filter(2)
+ vmlal.s16 q7, d23, d5[2] // p3(top[2]) * filter(3)
+ vmlal.s16 q7, d25, d5[3] // p4(top[3]) * filter(4)
+ vmlal.s16 q7, d17, d4[3] // p0(topleft) * filter(0)
+ vmlal.s16 q7, d27, d0[3] // p5(left[0]) * filter(5)
+ vmlal.s16 q7, d29, d1[3] // p6(left[1]) * filter(6)
+ vmov.16 d0[2], r12
+ vqrshrun.s32 d12, q1, #4
+ subs r3, r3, #16
+ vqrshrun.s32 d13, q7, #4
+.endif
+ vswp q4, q5
+.if \bpc == 10
+ vmax.s16 q6, q6, q7
+.endif
+ vswp d7, d10
+ vmin.s16 q6, q6, q15
+
+ vswp d9, d12
+
+ vst1.16 {q3, q4}, [r0, :128]!
+ vst1.16 {q5, q6}, [r6, :128]!
+ ble 8f
+ vmov.u16 r12, d13[3]
+ vmov.16 d0[0], r12
+ vmov.u16 r12, d9[3]
+ vmov.16 d0[1], r12
+ b 2b
+8:
+ subs r4, r4, #2
+
+ ble 9f
+ sub r8, r6, lr, lsl #1
+ add r0, r0, r1
+ add r6, r6, r1
+ mov r3, lr
+ b 1b
+9:
+ vpop {q4-q7}
+ pop {r4-r8, pc}
+endfunc
+.endm
+
+filter_fn 10
+filter_fn 12
+
+function ipred_filter_16bpc_neon, export=1
+ push {r4-r8, lr}
+ vpush {q4-q7}
+ movw r12, 0x3ff
+ ldr r8, [sp, #104]
+ cmp r8, r12
+ ble ipred_filter_10bpc_neon
+ b ipred_filter_12bpc_neon
+endfunc
+
+// void pal_pred_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const uint16_t *const pal, const uint8_t *idx,
+// const int w, const int h);
+function pal_pred_16bpc_neon, export=1
+ push {r4-r5, lr}
+ ldr r4, [sp, #12]
+ ldr r5, [sp, #16]
+ vld1.16 {q14}, [r2, :128]
+ clz lr, r4
+ adr r12, L(pal_pred_tbl)
+ sub lr, lr, #25
+ ldr lr, [r12, lr, lsl #2]
+ vmov.i16 q15, #0x100
+ add r12, r12, lr
+ add r2, r0, r1
+ bx r12
+
+ .align 2
+L(pal_pred_tbl):
+ .word 640f - L(pal_pred_tbl) + CONFIG_THUMB
+ .word 320f - L(pal_pred_tbl) + CONFIG_THUMB
+ .word 160f - L(pal_pred_tbl) + CONFIG_THUMB
+ .word 80f - L(pal_pred_tbl) + CONFIG_THUMB
+ .word 40f - L(pal_pred_tbl) + CONFIG_THUMB
+
+40:
+ lsl r1, r1, #1
+4:
+ vld1.8 {q1}, [r3, :128]!
+ subs r5, r5, #4
+ // Restructure q1 from a, b, c, ... into 2*a, 2*a+1, 2*b, 2*b+1, 2*c, 2*c+1, ...
+ vadd.i8 q0, q1, q1
+ vadd.i8 q1, q1, q1
+ vzip.8 q0, q1
+ vadd.i16 q0, q0, q15
+ vadd.i16 q1, q1, q15
+ vtbl.8 d0, {q14}, d0
+ vtbl.8 d1, {q14}, d1
+ vst1.16 {d0}, [r0, :64], r1
+ vtbl.8 d2, {q14}, d2
+ vst1.16 {d1}, [r2, :64], r1
+ vtbl.8 d3, {q14}, d3
+ vst1.16 {d2}, [r0, :64], r1
+ vst1.16 {d3}, [r2, :64], r1
+ bgt 4b
+ pop {r4-r5, pc}
+80:
+ lsl r1, r1, #1
+8:
+ vld1.8 {q1, q2}, [r3, :128]!
+ subs r5, r5, #4
+ // Prefer doing the adds twice, instead of chaining a vmov after
+ // the add.
+ vadd.i8 q0, q1, q1
+ vadd.i8 q1, q1, q1
+ vadd.i8 q3, q2, q2
+ vadd.i8 q2, q2, q2
+ vzip.8 q0, q1
+ vzip.8 q2, q3
+ vadd.i16 q0, q0, q15
+ vadd.i16 q1, q1, q15
+ vtbl.8 d0, {q14}, d0
+ vadd.i16 q2, q2, q15
+ vtbl.8 d1, {q14}, d1
+ vadd.i16 q3, q3, q15
+ vtbl.8 d2, {q14}, d2
+ vtbl.8 d3, {q14}, d3
+ vtbl.8 d4, {q14}, d4
+ vtbl.8 d5, {q14}, d5
+ vst1.16 {q0}, [r0, :128], r1
+ vtbl.8 d6, {q14}, d6
+ vst1.16 {q1}, [r2, :128], r1
+ vtbl.8 d7, {q14}, d7
+ vst1.16 {q2}, [r0, :128], r1
+ vst1.16 {q3}, [r2, :128], r1
+ bgt 8b
+ pop {r4-r5, pc}
+160:
+ lsl r1, r1, #1
+16:
+ vld1.8 {q2, q3}, [r3, :128]!
+ subs r5, r5, #4
+ vld1.8 {q10, q11}, [r3, :128]!
+ vadd.i8 q0, q2, q2
+ vadd.i8 q1, q2, q2
+ vadd.i8 q2, q3, q3
+ vadd.i8 q3, q3, q3
+ vadd.i8 q8, q10, q10
+ vadd.i8 q9, q10, q10
+ vadd.i8 q10, q11, q11
+ vzip.8 q0, q1
+ vadd.i8 q11, q11, q11
+ vzip.8 q2, q3
+ vzip.8 q8, q9
+ vadd.i16 q0, q0, q15
+ vzip.8 q10, q11
+ vadd.i16 q1, q1, q15
+ vadd.i16 q2, q2, q15
+ vadd.i16 q3, q3, q15
+ vadd.i16 q8, q8, q15
+ vadd.i16 q9, q9, q15
+ vadd.i16 q10, q10, q15
+ vtbl.8 d0, {q14}, d0
+ vadd.i16 q11, q11, q15
+ vtbl.8 d1, {q14}, d1
+ vtbl.8 d2, {q14}, d2
+ vtbl.8 d3, {q14}, d3
+ vtbl.8 d4, {q14}, d4
+ vtbl.8 d5, {q14}, d5
+ vtbl.8 d6, {q14}, d6
+ vtbl.8 d7, {q14}, d7
+ vtbl.8 d16, {q14}, d16
+ vtbl.8 d17, {q14}, d17
+ vtbl.8 d18, {q14}, d18
+ vst1.16 {q0, q1}, [r0, :128], r1
+ vtbl.8 d19, {q14}, d19
+ vtbl.8 d20, {q14}, d20
+ vst1.16 {q2, q3}, [r2, :128], r1
+ vtbl.8 d21, {q14}, d21
+ vtbl.8 d22, {q14}, d22
+ vst1.16 {q8, q9}, [r0, :128], r1
+ vtbl.8 d23, {q14}, d23
+ vst1.16 {q10, q11}, [r2, :128], r1
+ bgt 16b
+ pop {r4-r5, pc}
+320:
+ lsl r1, r1, #1
+ sub r1, r1, #32
+32:
+ vld1.8 {q2, q3}, [r3, :128]!
+ subs r5, r5, #2
+ vld1.8 {q10, q11}, [r3, :128]!
+ vadd.i8 q0, q2, q2
+ vadd.i8 q1, q2, q2
+ vadd.i8 q2, q3, q3
+ vadd.i8 q3, q3, q3
+ vadd.i8 q8, q10, q10
+ vadd.i8 q9, q10, q10
+ vadd.i8 q10, q11, q11
+ vzip.8 q0, q1
+ vadd.i8 q11, q11, q11
+ vzip.8 q2, q3
+ vzip.8 q8, q9
+ vadd.i16 q0, q0, q15
+ vzip.8 q10, q11
+ vadd.i16 q1, q1, q15
+ vadd.i16 q2, q2, q15
+ vadd.i16 q3, q3, q15
+ vadd.i16 q8, q8, q15
+ vadd.i16 q9, q9, q15
+ vadd.i16 q10, q10, q15
+ vtbl.8 d0, {q14}, d0
+ vadd.i16 q11, q11, q15
+ vtbl.8 d1, {q14}, d1
+ vtbl.8 d2, {q14}, d2
+ vtbl.8 d3, {q14}, d3
+ vtbl.8 d4, {q14}, d4
+ vtbl.8 d5, {q14}, d5
+ vtbl.8 d6, {q14}, d6
+ vtbl.8 d7, {q14}, d7
+ vtbl.8 d16, {q14}, d16
+ vtbl.8 d17, {q14}, d17
+ vtbl.8 d18, {q14}, d18
+ vst1.16 {q0, q1}, [r0, :128]!
+ vtbl.8 d19, {q14}, d19
+ vtbl.8 d20, {q14}, d20
+ vst1.16 {q2, q3}, [r0, :128], r1
+ vtbl.8 d21, {q14}, d21
+ vtbl.8 d22, {q14}, d22
+ vst1.16 {q8, q9}, [r2, :128]!
+ vtbl.8 d23, {q14}, d23
+ vst1.16 {q10, q11}, [r2, :128], r1
+ bgt 32b
+ pop {r4-r5, pc}
+640:
+ sub r1, r1, #96
+64:
+ vld1.8 {q2, q3}, [r3, :128]!
+ subs r5, r5, #1
+ vld1.8 {q10, q11}, [r3, :128]!
+ vadd.i8 q0, q2, q2
+ vadd.i8 q1, q2, q2
+ vadd.i8 q2, q3, q3
+ vadd.i8 q3, q3, q3
+ vadd.i8 q8, q10, q10
+ vadd.i8 q9, q10, q10
+ vadd.i8 q10, q11, q11
+ vzip.8 q0, q1
+ vadd.i8 q11, q11, q11
+ vzip.8 q2, q3
+ vzip.8 q8, q9
+ vadd.i16 q0, q0, q15
+ vzip.8 q10, q11
+ vadd.i16 q1, q1, q15
+ vadd.i16 q2, q2, q15
+ vadd.i16 q3, q3, q15
+ vadd.i16 q8, q8, q15
+ vadd.i16 q9, q9, q15
+ vadd.i16 q10, q10, q15
+ vtbl.8 d0, {q14}, d0
+ vadd.i16 q11, q11, q15
+ vtbl.8 d1, {q14}, d1
+ vtbl.8 d2, {q14}, d2
+ vtbl.8 d3, {q14}, d3
+ vtbl.8 d4, {q14}, d4
+ vtbl.8 d5, {q14}, d5
+ vtbl.8 d6, {q14}, d6
+ vtbl.8 d7, {q14}, d7
+ vtbl.8 d16, {q14}, d16
+ vtbl.8 d17, {q14}, d17
+ vtbl.8 d18, {q14}, d18
+ vst1.16 {q0, q1}, [r0, :128]!
+ vtbl.8 d19, {q14}, d19
+ vtbl.8 d20, {q14}, d20
+ vst1.16 {q2, q3}, [r0, :128]!
+ vtbl.8 d21, {q14}, d21
+ vtbl.8 d22, {q14}, d22
+ vst1.16 {q8, q9}, [r0, :128]!
+ vtbl.8 d23, {q14}, d23
+ vst1.16 {q10, q11}, [r0, :128], r1
+ bgt 64b
+ pop {r4-r5, pc}
+endfunc
+
+// void ipred_cfl_128_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height,
+// const int16_t *ac, const int alpha,
+// const int bitdepth_max);
+function ipred_cfl_128_16bpc_neon, export=1
+ push {r4-r8, lr}
+ ldrd r4, r5, [sp, #24]
+ ldrd r6, r7, [sp, #32]
+ clz lr, r3
+ vdup.16 q15, r7 // bitdepth_max
+ adr r12, L(ipred_cfl_128_tbl)
+ sub lr, lr, #26
+ ldr lr, [r12, lr, lsl #2]
+ vrshr.u16 q0, q15, #1
+ vdup.16 q1, r6 // alpha
+ add r12, r12, lr
+ add r6, r0, r1
+ lsl r1, r1, #1
+ vmov.i16 q14, #0
+ bx r12
+
+ .align 2
+L(ipred_cfl_128_tbl):
+L(ipred_cfl_splat_tbl):
+ .word L(ipred_cfl_splat_w16) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_splat_w16) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_splat_w8) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_splat_w4) - L(ipred_cfl_128_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_splat_w4):
+ vld1.16 {q8, q9}, [r5, :128]!
+ vmull.s16 q2, d16, d2 // diff = ac * alpha
+ vmull.s16 q3, d17, d3
+ vmull.s16 q8, d18, d2
+ vmull.s16 q9, d19, d3
+ vshr.s32 q10, q2, #31 // sign = diff >> 15
+ vshr.s32 q11, q3, #31
+ vshr.s32 q12, q8, #31
+ vshr.s32 q13, q9, #31
+ vadd.i32 q2, q2, q10 // diff + sign
+ vadd.i32 q3, q3, q11
+ vadd.i32 q8, q8, q12
+ vadd.i32 q9, q9, q13
+ vrshrn.i32 d4, q2, #6 // (diff + sign + 32) >> 6 = apply_sign()
+ vrshrn.i32 d5, q3, #6
+ vrshrn.i32 d6, q8, #6
+ vrshrn.i32 d7, q9, #6
+ vadd.i16 q2, q2, q0 // dc + apply_sign()
+ vadd.i16 q3, q3, q0
+ vmax.s16 q2, q2, q14
+ vmax.s16 q3, q3, q14
+ vmin.s16 q2, q2, q15
+ vmin.s16 q3, q3, q15
+ vst1.16 {d4}, [r0, :64], r1
+ vst1.16 {d5}, [r6, :64], r1
+ subs r4, r4, #4
+ vst1.16 {d6}, [r0, :64], r1
+ vst1.16 {d7}, [r6, :64], r1
+ bgt L(ipred_cfl_splat_w4)
+ pop {r4-r8, pc}
+L(ipred_cfl_splat_w8):
+ vld1.16 {q8, q9}, [r5, :128]!
+ subs r4, r4, #2
+ vmull.s16 q2, d16, d2 // diff = ac * alpha
+ vmull.s16 q3, d17, d3
+ vmull.s16 q8, d18, d2
+ vmull.s16 q9, d19, d3
+ vshr.s32 q10, q2, #31 // sign = diff >> 15
+ vshr.s32 q11, q3, #31
+ vshr.s32 q12, q8, #31
+ vshr.s32 q13, q9, #31
+ vadd.i32 q2, q2, q10 // diff + sign
+ vadd.i32 q3, q3, q11
+ vadd.i32 q8, q8, q12
+ vadd.i32 q9, q9, q13
+ vrshrn.i32 d4, q2, #6 // (diff + sign + 32) >> 6 = apply_sign()
+ vrshrn.i32 d5, q3, #6
+ vrshrn.i32 d6, q8, #6
+ vrshrn.i32 d7, q9, #6
+ vadd.i16 q2, q2, q0 // dc + apply_sign()
+ vadd.i16 q3, q3, q0
+ vmax.s16 q2, q2, q14
+ vmax.s16 q3, q3, q14
+ vmin.s16 q2, q2, q15
+ vmin.s16 q3, q3, q15
+ vst1.16 {q2}, [r0, :128], r1
+ vst1.16 {q3}, [r6, :128], r1
+ bgt L(ipred_cfl_splat_w8)
+ pop {r4-r8, pc}
+L(ipred_cfl_splat_w16):
+ vpush {q4-q7}
+ add r12, r5, r3, lsl #1
+ sub r1, r1, r3, lsl #1
+ mov lr, r3
+1:
+ vld1.16 {q6, q7}, [r5, :128]!
+ vmull.s16 q2, d12, d2 // diff = ac * alpha
+ vld1.16 {q8, q9}, [r12, :128]!
+ vmull.s16 q3, d13, d3
+ vmull.s16 q4, d14, d2
+ vmull.s16 q5, d15, d3
+ vmull.s16 q6, d16, d2
+ vmull.s16 q7, d17, d3
+ vmull.s16 q8, d18, d2
+ vmull.s16 q9, d19, d3
+ vshr.s32 q10, q2, #31 // sign = diff >> 15
+ vshr.s32 q11, q3, #31
+ vshr.s32 q12, q4, #31
+ vshr.s32 q13, q5, #31
+ vadd.i32 q2, q2, q10 // diff + sign
+ vshr.s32 q10, q6, #31
+ vadd.i32 q3, q3, q11
+ vshr.s32 q11, q7, #31
+ vadd.i32 q4, q4, q12
+ vshr.s32 q12, q8, #31
+ vadd.i32 q5, q5, q13
+ vshr.s32 q13, q9, #31
+ vadd.i32 q6, q6, q10
+ vadd.i32 q7, q7, q11
+ vadd.i32 q8, q8, q12
+ vadd.i32 q9, q9, q13
+ vrshrn.i32 d4, q2, #6 // (diff + sign + 32) >> 6 = apply_sign()
+ vrshrn.i32 d5, q3, #6
+ vrshrn.i32 d6, q4, #6
+ vrshrn.i32 d7, q5, #6
+ vadd.i16 q2, q2, q0 // dc + apply_sign()
+ vrshrn.i32 d8, q6, #6
+ vrshrn.i32 d9, q7, #6
+ vadd.i16 q3, q3, q0
+ vrshrn.i32 d10, q8, #6
+ vrshrn.i32 d11, q9, #6
+ vadd.i16 q4, q4, q0
+ vadd.i16 q5, q5, q0
+ vmax.s16 q2, q2, q14
+ vmax.s16 q3, q3, q14
+ vmax.s16 q4, q4, q14
+ vmax.s16 q5, q5, q14
+ vmin.s16 q2, q2, q15
+ vmin.s16 q3, q3, q15
+ vmin.s16 q4, q4, q15
+ vmin.s16 q5, q5, q15
+ subs r3, r3, #16
+ vst1.16 {q2, q3}, [r0, :128]!
+ vst1.16 {q4, q5}, [r6, :128]!
+ bgt 1b
+ subs r4, r4, #2
+ add r5, r5, lr, lsl #1
+ add r12, r12, lr, lsl #1
+ add r0, r0, r1
+ add r6, r6, r1
+ mov r3, lr
+ bgt 1b
+ vpop {q4-q7}
+ pop {r4-r8, pc}
+endfunc
+
+// void ipred_cfl_top_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height,
+// const int16_t *ac, const int alpha,
+// const int bitdepth_max);
+function ipred_cfl_top_16bpc_neon, export=1
+ push {r4-r8, lr}
+ ldrd r4, r5, [sp, #24]
+ ldrd r6, r7, [sp, #32]
+ clz lr, r3
+ vdup.16 q15, r7 // bitdepth_max
+ adr r12, L(ipred_cfl_top_tbl)
+ sub lr, lr, #26
+ ldr lr, [r12, lr, lsl #2]
+ vdup.16 q1, r6 // alpha
+ add r2, r2, #2
+ add r12, r12, lr
+ add r6, r0, r1
+ lsl r1, r1, #1
+ vmov.i16 q14, #0
+ bx r12
+
+ .align 2
+L(ipred_cfl_top_tbl):
+ .word 32f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
+ .word 16f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
+ .word 8f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
+ .word 4f - L(ipred_cfl_top_tbl) + CONFIG_THUMB
+
+4:
+ vld1.16 {d0}, [r2]
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #2
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w4)
+8:
+ vld1.16 {q0}, [r2]
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #3
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w8)
+16:
+ vld1.16 {q2, q3}, [r2]
+ vadd.i16 q0, q2, q3
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #4
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w16)
+32:
+ vld1.16 {q8, q9}, [r2]!
+ vld1.16 {q10, q11}, [r2]
+ vadd.i16 q8, q8, q9
+ vadd.i16 q10, q10, q11
+ vadd.i16 q0, q8, q10
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpaddl.u16 d0, d0
+ vrshrn.i32 d0, q0, #5
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w16)
+endfunc
+
+// void ipred_cfl_left_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height,
+// const int16_t *ac, const int alpha,
+// const int bitdepth_max);
+function ipred_cfl_left_16bpc_neon, export=1
+ push {r4-r8, lr}
+ ldrd r4, r5, [sp, #24]
+ ldrd r6, r7, [sp, #32]
+ sub r2, r2, r4, lsl #1
+ clz lr, r3
+ clz r8, r4
+ vdup.16 q15, r7 // bitdepth_max
+ adr r12, L(ipred_cfl_splat_tbl)
+ adr r7, L(ipred_cfl_left_tbl)
+ sub lr, lr, #26
+ sub r8, r8, #26
+ ldr lr, [r12, lr, lsl #2]
+ ldr r8, [r7, r8, lsl #2]
+ vdup.16 q1, r6 // alpha
+ add r12, r12, lr
+ add r7, r7, r8
+ add r6, r0, r1
+ lsl r1, r1, #1
+ vmov.i16 q14, #0
+ bx r7
+
+ .align 2
+L(ipred_cfl_left_tbl):
+ .word L(ipred_cfl_left_h32) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_left_h16) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_left_h8) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_left_h4) - L(ipred_cfl_left_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_left_h4):
+ vld1.16 {d0}, [r2, :64]
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #2
+ vdup.16 q0, d0[0]
+ bx r12
+
+L(ipred_cfl_left_h8):
+ vld1.16 {q0}, [r2, :128]
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #3
+ vdup.16 q0, d0[0]
+ bx r12
+
+L(ipred_cfl_left_h16):
+ vld1.16 {q2, q3}, [r2, :128]
+ vadd.i16 q0, q2, q3
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpadd.i16 d0, d0, d0
+ vrshr.u16 d0, d0, #4
+ vdup.16 q0, d0[0]
+ bx r12
+
+L(ipred_cfl_left_h32):
+ vld1.16 {q8, q9}, [r2, :128]!
+ vld1.16 {q10, q11}, [r2, :128]
+ vadd.i16 q8, q8, q9
+ vadd.i16 q10, q10, q11
+ vadd.i16 q0, q8, q10
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ vpaddl.u16 d0, d0
+ vrshrn.i32 d0, q0, #5
+ vdup.16 q0, d0[0]
+ bx r12
+endfunc
+
+// void ipred_cfl_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *const topleft,
+// const int width, const int height,
+// const int16_t *ac, const int alpha,
+// const int bitdepth_max);
+function ipred_cfl_16bpc_neon, export=1
+ push {r4-r8, lr}
+ ldrd r4, r5, [sp, #24]
+ ldrd r6, r7, [sp, #32]
+ sub r2, r2, r4, lsl #1
+ add r8, r3, r4 // width + height
+ vdup.16 q1, r6 // alpha
+ clz lr, r3
+ clz r6, r4
+ vdup.32 d16, r8 // width + height
+ vdup.16 q15, r7 // bitdepth_max
+ adr r7, L(ipred_cfl_tbl)
+ rbit r8, r8 // rbit(width + height)
+ sub lr, lr, #22 // 26 leading bits, minus table offset 4
+ sub r6, r6, #26
+ clz r8, r8 // ctz(width + height)
+ ldr lr, [r7, lr, lsl #2]
+ ldr r6, [r7, r6, lsl #2]
+ neg r8, r8 // -ctz(width + height)
+ add r12, r7, lr
+ add r7, r7, r6
+ vshr.u32 d16, d16, #1 // (width + height) >> 1
+ vdup.32 d17, r8 // -ctz(width + height)
+ add r6, r0, r1
+ lsl r1, r1, #1
+ vmov.i16 q14, #0
+ bx r7
+
+ .align 2
+L(ipred_cfl_tbl):
+ .word L(ipred_cfl_h32) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_h16) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_h8) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_h4) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_w32) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_w16) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_w8) - L(ipred_cfl_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_w4) - L(ipred_cfl_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_h4):
+ vld1.16 {d0}, [r2, :64]!
+ vpadd.i16 d0, d0, d0
+ add r2, r2, #2
+ vpaddl.u16 d0, d0
+ bx r12
+L(ipred_cfl_w4):
+ vld1.16 {d1}, [r2]
+ vadd.i32 d0, d0, d16
+ vpadd.i16 d1, d1, d1
+ vpaddl.u16 d1, d1
+ cmp r4, #4
+ vadd.i32 d0, d0, d1
+ vshl.u32 d0, d0, d17
+ beq 1f
+ // h = 8/16
+ cmp r4, #16
+ movw lr, #0x6667
+ movw r8, #0xAAAB
+ it ne
+ movne lr, r8
+ vdup.32 d18, lr
+ vmul.i32 d0, d0, d18
+ vshr.u32 d0, d0, #17
+1:
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w4)
+
+L(ipred_cfl_h8):
+ vld1.16 {q0}, [r2, :128]!
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ add r2, r2, #2
+ vpaddl.u16 d0, d0
+ bx r12
+L(ipred_cfl_w8):
+ vld1.16 {q2}, [r2]
+ vadd.i32 d0, d0, d16
+ vadd.i16 d1, d4, d5
+ vpadd.i16 d1, d1, d1
+ vpaddl.u16 d1, d1
+ cmp r4, #8
+ vadd.i32 d0, d0, d1
+ vshl.u32 d0, d0, d17
+ beq 1f
+ // h = 4/16/32
+ cmp r4, #32
+ movw lr, #0x6667
+ movw r8, #0xAAAB
+ it ne
+ movne lr, r8
+ vdup.32 d18, lr
+ vmul.i32 d0, d0, d18
+ vshr.u32 d0, d0, #17
+1:
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w8)
+
+L(ipred_cfl_h16):
+ vld1.16 {q2, q3}, [r2, :128]!
+ vadd.i16 q0, q2, q3
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ add r2, r2, #2
+ vpaddl.u16 d0, d0
+ bx r12
+L(ipred_cfl_w16):
+ vld1.16 {q2, q3}, [r2]
+ vadd.i32 d0, d0, d16
+ vadd.i16 q2, q2, q3
+ vadd.i16 d1, d4, d5
+ vpadd.i16 d1, d1, d1
+ vpaddl.u16 d1, d1
+ cmp r4, #16
+ vadd.i32 d0, d0, d1
+ vshl.u32 d0, d0, d17
+ beq 1f
+ // h = 4/8/32/64
+ tst r4, #(32+16+8) // 16 added to make a consecutive bitmask
+ movw lr, #0x6667
+ movw r8, #0xAAAB
+ it ne
+ movne lr, r8
+ vdup.32 d18, lr
+ vmul.i32 d0, d0, d18
+ vshr.u32 d0, d0, #17
+1:
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w16)
+
+L(ipred_cfl_h32):
+ vld1.16 {q2, q3}, [r2, :128]!
+ vld1.16 {q10, q11}, [r2, :128]!
+ vadd.i16 q2, q2, q3
+ vadd.i16 q10, q10, q11
+ vadd.i16 q0, q2, q10
+ vadd.i16 d0, d0, d1
+ vpadd.i16 d0, d0, d0
+ add r2, r2, #2
+ vpaddl.u16 d0, d0
+ bx r12
+L(ipred_cfl_w32):
+ vld1.16 {q2, q3}, [r2]!
+ vadd.i32 d0, d0, d16
+ vld1.16 {q10, q11}, [r2]!
+ vadd.i16 q2, q2, q3
+ vadd.i16 q10, q10, q11
+ vadd.i16 q2, q2, q10
+ vadd.i16 d1, d4, d5
+ vpadd.i16 d1, d1, d1
+ vpaddl.u16 d1, d1
+ cmp r4, #32
+ vadd.i32 d0, d0, d1
+ vshl.u32 d0, d0, d17
+ beq 1f
+ // h = 8/16/64
+ cmp r4, #8
+ movw lr, #0x6667
+ movw r8, #0xAAAB
+ it ne
+ movne lr, r8
+ vdup.32 d18, lr
+ vmul.i32 d0, d0, d18
+ vshr.u32 d0, d0, #17
+1:
+ vdup.16 q0, d0[0]
+ b L(ipred_cfl_splat_w16)
+endfunc
+
+// void cfl_ac_420_16bpc_neon(int16_t *const ac, const pixel *const ypx,
+// const ptrdiff_t stride, const int w_pad,
+// const int h_pad, const int cw, const int ch);
+function ipred_cfl_ac_420_16bpc_neon, export=1
+ push {r4-r8,lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ clz r8, r5
+ lsl r4, r4, #2
+ adr r7, L(ipred_cfl_ac_420_tbl)
+ sub r8, r8, #27
+ ldr r8, [r7, r8, lsl #2]
+ vmov.i32 q8, #0
+ vmov.i32 q9, #0
+ vmov.i32 q10, #0
+ vmov.i32 q11, #0
+ add r7, r7, r8
+ sub r8, r6, r4 // height - h_pad
+ rbit lr, r5 // rbit(width)
+ rbit r12, r6 // rbit(height)
+ clz lr, lr // ctz(width)
+ clz r12, r12 // ctz(height)
+ add lr, lr, r12 // log2sz
+ add r12, r1, r2
+ vdup.32 d31, lr
+ lsl r2, r2, #1
+ vneg.s32 d31, d31 // -log2sz
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_420_tbl):
+ .word L(ipred_cfl_ac_420_w16) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w8) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w4) - L(ipred_cfl_ac_420_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_420_w4):
+1: // Copy and subsample input
+ vld1.16 {q0}, [r1, :128], r2
+ vld1.16 {q1}, [r12, :128], r2
+ vld1.16 {q2}, [r1, :128], r2
+ vld1.16 {q3}, [r12, :128], r2
+ vadd.i16 q0, q0, q1
+ vadd.i16 q2, q2, q3
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d4, d5
+ vshl.i16 q0, q0, #1
+ subs r8, r8, #2
+ vst1.16 {q0}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ bgt 1b
+ cmp r4, #0
+ vmov d0, d1
+ vmov d2, d1
+ vmov d3, d1
+L(ipred_cfl_ac_420_w4_hpad):
+ beq 3f // This assumes that all callers already did "cmp r4, #0"
+2: // Vertical padding (h_pad > 0)
+ subs r4, r4, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 2b
+3:
+L(ipred_cfl_ac_420_w4_calc_subtract_dc):
+ // Aggregate the sums
+ vadd.i32 q8, q8, q9
+ vadd.i32 q10, q10, q11
+ vadd.i32 q0, q8, q10
+ vadd.i32 d0, d0, d1
+ vpadd.i32 d0, d0, d0 // sum
+ sub r0, r0, r6, lsl #3
+ vrshl.u32 d16, d0, d31 // (sum + (1 << (log2sz - 1))) >>= log2sz
+ vdup.16 q8, d16[0]
+6: // Subtract dc from ac
+ vld1.16 {q0, q1}, [r0, :128]
+ subs r6, r6, #4
+ vsub.i16 q0, q0, q8
+ vsub.i16 q1, q1, q8
+ vst1.16 {q0, q1}, [r0, :128]!
+ bgt 6b
+ pop {r4-r8, pc}
+
+L(ipred_cfl_ac_420_w8):
+ cmp r3, #0
+ bne L(ipred_cfl_ac_420_w8_wpad)
+1: // Copy and subsample input, without padding
+ vld1.16 {q0, q1}, [r1, :128], r2
+ vld1.16 {q2, q3}, [r12, :128], r2
+ vld1.16 {q12, q13}, [r1, :128], r2
+ vadd.i16 q0, q0, q2
+ vadd.i16 q1, q1, q3
+ vld1.16 {q2, q3}, [r12, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vadd.i16 q12, q12, q2
+ vadd.i16 q13, q13, q3
+ vpadd.i16 d2, d24, d25
+ vpadd.i16 d3, d26, d27
+ vshl.i16 q0, q0, #1
+ vshl.i16 q1, q1, #1
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q1
+ b L(ipred_cfl_ac_420_w8_hpad)
+
+L(ipred_cfl_ac_420_w8_wpad):
+1: // Copy and subsample input, padding 4
+ vld1.16 {q0}, [r1, :128], r2
+ vld1.16 {q1}, [r12, :128], r2
+ vld1.16 {q2}, [r1, :128], r2
+ vld1.16 {q3}, [r12, :128], r2
+ vadd.i16 q0, q0, q1
+ vadd.i16 q2, q2, q3
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d4, d5
+ vshl.i16 q0, q0, #1
+ vdup.16 d3, d1[3]
+ vmov d2, d1
+ vdup.16 d1, d0[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q1
+
+L(ipred_cfl_ac_420_w8_hpad):
+ beq 3f // This assumes that all callers already did "cmp r4, #0"
+2: // Vertical padding (h_pad > 0)
+ subs r4, r4, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 2b
+3:
+
+ // Double the height and reuse the w4 summing/subtracting
+ lsl r6, r6, #1
+ b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
+
+L(ipred_cfl_ac_420_w16):
+ adr r7, L(ipred_cfl_ac_420_w16_tbl)
+ ldr r3, [r7, r3, lsl #2]
+ add r7, r7, r3
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_420_w16_tbl):
+ .word L(ipred_cfl_ac_420_w16_wpad0) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w16_wpad1) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w16_wpad2) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_420_w16_wpad3) - L(ipred_cfl_ac_420_w16_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_420_w16_wpad0):
+ sub r2, r2, #32
+1: // Copy and subsample input, without padding
+ vld1.16 {q0, q1}, [r1, :128]!
+ vld1.16 {q12, q13}, [r12, :128]!
+ vld1.16 {q2, q3}, [r1, :128], r2
+ vadd.i16 q0, q0, q12
+ vadd.i16 q1, q1, q13
+ vld1.16 {q12, q13}, [r12, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vadd.i16 q2, q2, q12
+ vadd.i16 q3, q3, q13
+ vpadd.i16 d2, d4, d5
+ vpadd.i16 d3, d6, d7
+ vshl.i16 q0, q0, #1
+ vshl.i16 q1, q1, #1
+ subs r8, r8, #1
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_420_w16_wpad1):
+ sub r2, r2, #32
+1: // Copy and subsample input, padding 4
+ vld1.16 {q0, q1}, [r1, :128]!
+ vld1.16 {q12, q13}, [r12, :128]!
+ vld1.16 {q2}, [r1, :128], r2
+ vadd.i16 q0, q0, q12
+ vadd.i16 q1, q1, q13
+ vld1.16 {q12}, [r12, :128], r2
+ vpadd.i16 d0, d0, d1
+ vadd.i16 q2, q2, q12
+ vpadd.i16 d1, d2, d3
+ vpadd.i16 d2, d4, d5
+ vshl.i16 q0, q0, #1
+ vshl.i16 d2, d2, #1
+ subs r8, r8, #1
+ vdup.16 d3, d2[3]
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_420_w16_wpad2):
+1: // Copy and subsample input, padding 8
+ vld1.16 {q0, q1}, [r1, :128], r2
+ vld1.16 {q12, q13}, [r12, :128], r2
+ vadd.i16 q0, q0, q12
+ vadd.i16 q1, q1, q13
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vshl.i16 q0, q0, #1
+ subs r8, r8, #1
+ vdup.16 q1, d1[3]
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_420_w16_wpad3):
+1: // Copy and subsample input, padding 12
+ vld1.16 {q0}, [r1, :128], r2
+ vld1.16 {q12}, [r12, :128], r2
+ vadd.i16 q0, q0, q12
+ vpadd.i16 d0, d0, d1
+ vshl.i16 d0, d0, #1
+ subs r8, r8, #1
+ vdup.16 q1, d0[3]
+ vdup.16 d1, d0[3]
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_420_w16_hpad):
+ beq 3f // This assumes that all callers already did "cmp r4, #0"
+2: // Vertical padding (h_pad > 0)
+ subs r4, r4, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 2b
+3:
+
+ // Quadruple the height and reuse the w4 summing/subtracting
+ lsl r6, r6, #2
+ b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
+endfunc
+
+// void cfl_ac_422_16bpc_neon(int16_t *const ac, const pixel *const ypx,
+// const ptrdiff_t stride, const int w_pad,
+// const int h_pad, const int cw, const int ch);
+function ipred_cfl_ac_422_16bpc_neon, export=1
+ push {r4-r8,lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ clz r8, r5
+ lsl r4, r4, #2
+ adr r7, L(ipred_cfl_ac_422_tbl)
+ sub r8, r8, #27
+ ldr r8, [r7, r8, lsl #2]
+ vmov.i16 q8, #0
+ vmov.i16 q9, #0
+ vmov.i16 q10, #0
+ vmov.i16 q11, #0
+ add r7, r7, r8
+ sub r8, r6, r4 // height - h_pad
+ rbit lr, r5 // rbit(width)
+ rbit r12, r6 // rbit(height)
+ clz lr, lr // ctz(width)
+ clz r12, r12 // ctz(height)
+ add lr, lr, r12 // log2sz
+ add r12, r1, r2
+ vdup.32 d31, lr
+ lsl r2, r2, #1
+ vneg.s32 d31, d31 // -log2sz
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_422_tbl):
+ .word L(ipred_cfl_ac_422_w16) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w8) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w4) - L(ipred_cfl_ac_422_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_422_w4):
+1: // Copy and subsample input
+ vld1.16 {q0}, [r1, :128], r2
+ vld1.16 {q1}, [r12, :128], r2
+ vld1.16 {q2}, [r1, :128], r2
+ vld1.16 {q3}, [r12, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vpadd.i16 d2, d4, d5
+ vpadd.i16 d3, d6, d7
+ vshl.i16 q0, q0, #2
+ vshl.i16 q1, q1, #2
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 1b
+ cmp r4, #0
+ vmov d0, d3
+ vmov d1, d3
+ vmov d2, d3
+ b L(ipred_cfl_ac_420_w4_hpad)
+
+L(ipred_cfl_ac_422_w8):
+ cmp r3, #0
+ bne L(ipred_cfl_ac_422_w8_wpad)
+1: // Copy and subsample input, without padding
+ vld1.16 {q0, q1}, [r1, :128], r2
+ vld1.16 {q2, q3}, [r12, :128], r2
+ vld1.16 {q12, q13}, [r1, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vpadd.i16 d2, d4, d5
+ vpadd.i16 d3, d6, d7
+ vld1.16 {q2, q3}, [r12, :128], r2
+ vpadd.i16 d24, d24, d25
+ vpadd.i16 d25, d26, d27
+ vpadd.i16 d26, d4, d5
+ vpadd.i16 d27, d6, d7
+ vshl.i16 q0, q0, #2
+ vshl.i16 q1, q1, #2
+ vshl.i16 q2, q12, #2
+ vshl.i16 q3, q13, #2
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q3
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w8_hpad)
+
+L(ipred_cfl_ac_422_w8_wpad):
+1: // Copy and subsample input, padding 4
+ vld1.16 {q0}, [r1, :128], r2
+ vld1.16 {q2}, [r12, :128], r2
+ vld1.16 {q12}, [r1, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d4, d5
+ vld1.16 {q2, q3}, [r12, :128], r2
+ vpadd.i16 d24, d24, d25
+ vpadd.i16 d25, d4, d5
+ vshl.i16 q0, q0, #2
+ vshl.i16 q12, q12, #2
+ vdup.16 d7, d25[3]
+ vmov d6, d25
+ vdup.16 d5, d24[3]
+ vmov d4, d24
+ vdup.16 d3, d1[3]
+ vmov d2, d1
+ vdup.16 d1, d0[3]
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q3
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w8_hpad)
+
+L(ipred_cfl_ac_422_w16):
+ adr r7, L(ipred_cfl_ac_422_w16_tbl)
+ ldr r3, [r7, r3, lsl #2]
+ add r7, r7, r3
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_422_w16_tbl):
+ .word L(ipred_cfl_ac_422_w16_wpad0) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w16_wpad1) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w16_wpad2) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_422_w16_wpad3) - L(ipred_cfl_ac_422_w16_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_422_w16_wpad0):
+ sub r2, r2, #32
+1: // Copy and subsample input, without padding
+ vld1.16 {q0, q1}, [r1, :128]!
+ vld1.16 {q2, q3}, [r12, :128]!
+ vld1.16 {q12, q13}, [r1, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vpadd.i16 d2, d24, d25
+ vpadd.i16 d3, d26, d27
+ vld1.16 {q12, q13}, [r12, :128], r2
+ vpadd.i16 d4, d4, d5
+ vpadd.i16 d5, d6, d7
+ vpadd.i16 d6, d24, d25
+ vpadd.i16 d7, d26, d27
+ vshl.i16 q0, q0, #2
+ vshl.i16 q1, q1, #2
+ vshl.i16 q2, q2, #2
+ vshl.i16 q3, q3, #2
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_422_w16_wpad1):
+ sub r2, r2, #32
+1: // Copy and subsample input, padding 4
+ vld1.16 {q0, q1}, [r1, :128]!
+ vld1.16 {q2, q3}, [r12, :128]!
+ vld1.16 {q12}, [r1, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vpadd.i16 d2, d24, d25
+ vld1.16 {q12}, [r12, :128], r2
+ vpadd.i16 d4, d4, d5
+ vpadd.i16 d5, d6, d7
+ vpadd.i16 d6, d24, d25
+ vshl.i16 q0, q0, #2
+ vshl.i16 d2, d2, #2
+ vshl.i16 q2, q2, #2
+ vshl.i16 d6, d6, #2
+ vdup.16 d3, d2[3]
+ vdup.16 d7, d6[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_422_w16_wpad2):
+1: // Copy and subsample input, padding 8
+ vld1.16 {q0, q1}, [r1, :128], r2
+ vld1.16 {q2, q3}, [r12, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d2, d3
+ vpadd.i16 d4, d4, d5
+ vpadd.i16 d5, d6, d7
+ vshl.i16 q0, q0, #2
+ vshl.i16 q2, q2, #2
+ vdup.16 q1, d1[3]
+ vdup.16 q3, d5[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_422_w16_wpad3):
+1: // Copy and subsample input, padding 12
+ vld1.16 {q0}, [r1, :128], r2
+ vld1.16 {q2}, [r12, :128], r2
+ vpadd.i16 d0, d0, d1
+ vpadd.i16 d1, d4, d5
+ vshl.i16 q0, q0, #2
+ vdup.16 q3, d1[3]
+ vdup.16 q1, d0[3]
+ vdup.16 d5, d1[3]
+ vmov d4, d1
+ vdup.16 d1, d0[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+endfunc
+
+// void cfl_ac_444_16bpc_neon(int16_t *const ac, const pixel *const ypx,
+// const ptrdiff_t stride, const int w_pad,
+// const int h_pad, const int cw, const int ch);
+function ipred_cfl_ac_444_16bpc_neon, export=1
+ push {r4-r8,lr}
+ ldrd r4, r5, [sp, #24]
+ ldr r6, [sp, #32]
+ clz r8, r5
+ lsl r4, r4, #2
+ adr r7, L(ipred_cfl_ac_444_tbl)
+ sub r8, r8, #26
+ ldr r8, [r7, r8, lsl #2]
+ vmov.i16 q8, #0
+ vmov.i16 q9, #0
+ vmov.i16 q10, #0
+ vmov.i16 q11, #0
+ add r7, r7, r8
+ sub r8, r6, r4 // height - h_pad
+ rbit lr, r5 // rbit(width)
+ rbit r12, r6 // rbit(height)
+ clz lr, lr // ctz(width)
+ clz r12, r12 // ctz(height)
+ add lr, lr, r12 // log2sz
+ add r12, r1, r2
+ vdup.32 d31, lr
+ lsl r2, r2, #1
+ vneg.s32 d31, d31 // -log2sz
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_444_tbl):
+ .word L(ipred_cfl_ac_444_w32) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w16) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w8) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w4) - L(ipred_cfl_ac_444_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_444_w4):
+1: // Copy and expand input
+ vld1.16 {d0}, [r1, :64], r2
+ vld1.16 {d1}, [r12, :64], r2
+ vld1.16 {d2}, [r1, :64], r2
+ vld1.16 {d3}, [r12, :64], r2
+ vshl.i16 q0, q0, #3
+ vshl.i16 q1, q1, #3
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ bgt 1b
+ cmp r4, #0
+ vmov d0, d3
+ vmov d1, d3
+ vmov d2, d3
+ b L(ipred_cfl_ac_420_w4_hpad)
+
+L(ipred_cfl_ac_444_w8):
+1: // Copy and expand input
+ vld1.16 {q0}, [r1, :128], r2
+ vld1.16 {q1}, [r12, :128], r2
+ vld1.16 {q2}, [r1, :128], r2
+ vld1.16 {q3}, [r12, :128], r2
+ vshl.i16 q0, q0, #3
+ vshl.i16 q1, q1, #3
+ vshl.i16 q2, q2, #3
+ vshl.i16 q3, q3, #3
+ subs r8, r8, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q3
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w8_hpad)
+
+L(ipred_cfl_ac_444_w16):
+ cmp r3, #0
+ bne L(ipred_cfl_ac_444_w16_wpad)
+1: // Copy and expand input, without padding
+ vld1.16 {q0, q1}, [r1, :128], r2
+ vld1.16 {q2, q3}, [r12, :128], r2
+ vshl.i16 q0, q0, #3
+ vshl.i16 q1, q1, #3
+ vshl.i16 q2, q2, #3
+ vshl.i16 q3, q3, #3
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_444_w16_wpad):
+1: // Copy and expand input, padding 8
+ vld1.16 {q0}, [r1, :128], r2
+ vld1.16 {q2}, [r12, :128], r2
+ vshl.i16 q0, q0, #3
+ vshl.i16 q2, q2, #3
+ vdup.16 q1, d1[3]
+ vdup.16 q3, d5[3]
+ subs r8, r8, #2
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ vmov q0, q2
+ vmov q1, q3
+ b L(ipred_cfl_ac_420_w16_hpad)
+
+L(ipred_cfl_ac_444_w32):
+ adr r7, L(ipred_cfl_ac_444_w32_tbl)
+ ldr r3, [r7, r3, lsl #1] // (w3>>1) << 2
+ asr r2, r2, #1
+ add r7, r7, r3
+ bx r7
+
+ .align 2
+L(ipred_cfl_ac_444_w32_tbl):
+ .word L(ipred_cfl_ac_444_w32_wpad0) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w32_wpad2) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w32_wpad4) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
+ .word L(ipred_cfl_ac_444_w32_wpad6) - L(ipred_cfl_ac_444_w32_tbl) + CONFIG_THUMB
+
+L(ipred_cfl_ac_444_w32_wpad0):
+ sub r2, r2, #32
+1: // Copy and expand input, without padding
+ vld1.16 {q0, q1}, [r1, :128]!
+ vld1.16 {q2, q3}, [r1, :128], r2
+ vshl.i16 q0, q0, #3
+ vshl.i16 q1, q1, #3
+ vshl.i16 q2, q2, #3
+ vshl.i16 q3, q3, #3
+ subs r8, r8, #1
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_444_w32_hpad)
+
+L(ipred_cfl_ac_444_w32_wpad2):
+ sub r2, r2, #32
+1: // Copy and expand input, padding 8
+ vld1.16 {q0, q1}, [r1, :128]!
+ vld1.16 {q2}, [r1, :128], r2
+ vshl.i16 q0, q0, #3
+ vshl.i16 q1, q1, #3
+ vshl.i16 q2, q2, #3
+ subs r8, r8, #1
+ vst1.16 {q0, q1}, [r0, :128]!
+ vdup.16 q3, d5[3]
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_444_w32_hpad)
+
+L(ipred_cfl_ac_444_w32_wpad4):
+1: // Copy and expand input, padding 16
+ vld1.16 {q0, q1}, [r1, :128], r2
+ vshl.i16 q0, q0, #3
+ vshl.i16 q1, q1, #3
+ subs r8, r8, #1
+ vst1.16 {q0, q1}, [r0, :128]!
+ vdup.16 q2, d3[3]
+ vdup.16 q3, d3[3]
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+ b L(ipred_cfl_ac_444_w32_hpad)
+
+L(ipred_cfl_ac_444_w32_wpad6):
+1: // Copy and expand input, padding 24
+ vld1.16 {q0}, [r1, :128], r2
+ vshl.i16 q0, q0, #3
+ subs r8, r8, #1
+ vdup.16 q1, d1[3]
+ vst1.16 {q0, q1}, [r0, :128]!
+ vdup.16 q2, d1[3]
+ vdup.16 q3, d1[3]
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 1b
+ cmp r4, #0
+
+L(ipred_cfl_ac_444_w32_hpad):
+ beq 3f // This assumes that all callers already did "cmp r4, #0"
+2: // Vertical padding (h_pad > 0)
+ subs r4, r4, #1
+ vst1.16 {q0, q1}, [r0, :128]!
+ vaddw.u16 q8, q8, d0
+ vaddw.u16 q9, q9, d1
+ vaddw.u16 q10, q10, d2
+ vaddw.u16 q11, q11, d3
+ vst1.16 {q2, q3}, [r0, :128]!
+ vaddw.u16 q8, q8, d4
+ vaddw.u16 q9, q9, d5
+ vaddw.u16 q10, q10, d6
+ vaddw.u16 q11, q11, d7
+ bgt 2b
+3:
+
+ // Multiply the height by eight and reuse the w4 subtracting
+ lsl r6, r6, #3
+ b L(ipred_cfl_ac_420_w4_calc_subtract_dc)
+endfunc
diff --git a/third_party/dav1d/src/arm/32/itx.S b/third_party/dav1d/src/arm/32/itx.S
new file mode 100644
index 0000000000..ceea025e45
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/itx.S
@@ -0,0 +1,3343 @@
+/******************************************************************************
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+// The exported functions in this file have got the following signature:
+// void itxfm_add(pixel *dst, ptrdiff_t dst_stride, coef *coeff, int eob);
+
+// Most of the functions use the following register layout:
+// r0-r3 external parameters
+// r4 function pointer to first transform
+// r5 function pointer to second transform
+// r6 output parameter for helper function
+// r7 input parameter for helper function
+// r8 input stride for helper function
+// r9 scratch variable for helper functions
+// r10-r11 pointer to list of eob thresholds, eob threshold value,
+// scratch variables within helper functions (backed up)
+
+// The SIMD registers most often use the following layout:
+// d0-d3 multiplication coefficients
+// d4-d7 scratch registers
+// d8-d15 unused in some transforms, used for scratch registers in others
+// d16-v31 inputs/outputs of transforms
+
+// Potential further optimizations, that are left unimplemented for now:
+// - Trying to keep multiplication coefficients in registers across multiple
+// transform functions. (The register layout is designed to potentially
+// allow this.)
+// - Use a simplified version of the transforms themselves for cases where
+// we know a significant number of inputs are zero. E.g. if the eob value
+// indicates only a quarter of input values are set, for idct16 and up,
+// a significant amount of calculation can be skipped, at the cost of more
+// code duplication and special casing.
+
+const idct_coeffs, align=4
+ // idct4
+ .short 2896, 2896*8, 1567, 3784
+ // idct8
+ .short 799, 4017, 3406, 2276
+ // idct16
+ .short 401, 4076, 3166, 2598
+ .short 1931, 3612, 3920, 1189
+ // idct32
+ .short 201, 4091, 3035, 2751
+ .short 1751, 3703, 3857, 1380
+ .short 995, 3973, 3513, 2106
+ .short 2440, 3290, 4052, 601
+endconst
+
+const idct64_coeffs, align=4
+ .short 101*8, 4095*8, 2967*8, -2824*8
+ .short 1660*8, 3745*8, 3822*8, -1474*8
+ .short 4076, 401, 4017, 799
+
+ .short 4036*8, -700*8, 2359*8, 3349*8
+ .short 3461*8, -2191*8, 897*8, 3996*8
+ .short -3166, -2598, -799, -4017
+
+ .short 501*8, 4065*8, 3229*8, -2520*8
+ .short 2019*8, 3564*8, 3948*8, -1092*8
+ .short 3612, 1931, 2276, 3406
+
+ .short 4085*8, -301*8, 2675*8, 3102*8
+ .short 3659*8, -1842*8, 1285*8, 3889*8
+ .short -3920, -1189, -3406, -2276
+endconst
+
+const iadst4_coeffs, align=4
+ // .h[4-5] can be interpreted as .s[2]
+ .short 1321, 3803, 2482, 3344, 3344, 0
+endconst
+
+const iadst8_coeffs, align=4
+ .short 4076, 401, 3612, 1931
+ .short 2598, 3166, 1189, 3920
+ // idct_coeffs
+ .short 2896, 0, 1567, 3784, 0, 0, 0, 0
+endconst
+
+const iadst16_coeffs, align=4
+ .short 4091, 201, 3973, 995
+ .short 3703, 1751, 3290, 2440
+ .short 2751, 3035, 2106, 3513
+ .short 1380, 3857, 601, 4052
+endconst
+
+.macro vmull_vmlal d0, s0, s1, c0, c1
+ vmull.s16 \d0, \s0, \c0
+ vmlal.s16 \d0, \s1, \c1
+.endm
+
+.macro vmull_vmlal_8h d0, d1, s0, s1, s2, s3, c0, c1
+ vmull.s16 \d0, \s0, \c0
+ vmlal.s16 \d0, \s2, \c1
+ vmull.s16 \d1, \s1, \c0
+ vmlal.s16 \d1, \s3, \c1
+.endm
+
+.macro vmull_vmlsl d0, s0, s1, c0, c1
+ vmull.s16 \d0, \s0, \c0
+ vmlsl.s16 \d0, \s1, \c1
+.endm
+
+.macro vmull_vmlsl_8h d0, d1, s0, s1, s2, s3, c0, c1
+ vmull.s16 \d0, \s0, \c0
+ vmlsl.s16 \d0, \s2, \c1
+ vmull.s16 \d1, \s1, \c0
+ vmlsl.s16 \d1, \s3, \c1
+.endm
+
+.macro vqrshrn_8h d0, d1, s0, s1, shift
+ vqrshrn.s32 \d0, \s0, \shift
+ vqrshrn.s32 \d1, \s1, \shift
+.endm
+
+.macro scale_input c, r0, r1, r2 r3, r4, r5, r6, r7
+ vqrdmulh.s16 \r0, \r0, \c
+ vqrdmulh.s16 \r1, \r1, \c
+.ifnb \r2
+ vqrdmulh.s16 \r2, \r2, \c
+ vqrdmulh.s16 \r3, \r3, \c
+.endif
+.ifnb \r4
+ vqrdmulh.s16 \r4, \r4, \c
+ vqrdmulh.s16 \r5, \r5, \c
+ vqrdmulh.s16 \r6, \r6, \c
+ vqrdmulh.s16 \r7, \r7, \c
+.endif
+.endm
+
+.macro load_add_store load, shift, addsrc, adddst, narrowsrc, narrowdst, store, dst, src, shiftbits=4
+.ifnb \load
+ vld1.8 {\load}, [\src, :64], r1
+.endif
+.ifnb \shift
+ vrshr.s16 \shift, \shift, #\shiftbits
+.endif
+.ifnb \addsrc
+ vaddw.u8 \adddst, \adddst, \addsrc
+.endif
+.ifnb \narrowsrc
+ vqmovun.s16 \narrowdst, \narrowsrc
+.endif
+.ifnb \store
+ vst1.8 {\store}, [\dst, :64], r1
+.endif
+.endm
+.macro load_add_store_8x8 dst, src, shiftbits=4
+ mov \src, \dst
+ load_add_store d2, q8, , , , , , \dst, \src, \shiftbits
+ load_add_store d3, q9, , , , , , \dst, \src, \shiftbits
+ load_add_store d4, q10, d2, q8, , , , \dst, \src, \shiftbits
+ load_add_store d5, q11, d3, q9, q8, d2, , \dst, \src, \shiftbits
+ load_add_store d6, q12, d4, q10, q9, d3, d2, \dst, \src, \shiftbits
+ load_add_store d7, q13, d5, q11, q10, d4, d3, \dst, \src, \shiftbits
+ load_add_store d2, q14, d6, q12, q11, d5, d4, \dst, \src, \shiftbits
+ load_add_store d3, q15, d7, q13, q12, d6, d5, \dst, \src, \shiftbits
+ load_add_store , , d2, q14, q13, d7, d6, \dst, \src, \shiftbits
+ load_add_store , , d3, q15, q14, d2, d7, \dst, \src, \shiftbits
+ load_add_store , , , , q15, d3, d2, \dst, \src, \shiftbits
+ load_add_store , , , , , , d3, \dst, \src, \shiftbits
+.endm
+.macro load_add_store_8x4 dst, src
+ mov \src, \dst
+ load_add_store d2, q8, , , , , , \dst, \src
+ load_add_store d3, q9, , , , , , \dst, \src
+ load_add_store d4, q10, d2, q8, , , , \dst, \src
+ load_add_store d5, q11, d3, q9, q8, d2, , \dst, \src
+ load_add_store , , d4, q10, q9, d3, d2, \dst, \src
+ load_add_store , , d5, q11, q10, d4, d3, \dst, \src
+ load_add_store , , , , q11, d5, d4, \dst, \src
+ load_add_store , , , , , , d5, \dst, \src
+.endm
+.macro load_add_store4 load, shift, addsrc, adddst, narrowsrc, narrowdst, store, dst, src
+.ifnb \load
+ vld1.32 {\load[0]}, [\src, :32], r1
+.endif
+.ifnb \shift
+ vrshr.s16 \shift, \shift, #4
+.endif
+.ifnb \load
+ vld1.32 {\load[1]}, [\src, :32], r1
+.endif
+.ifnb \addsrc
+ vaddw.u8 \adddst, \adddst, \addsrc
+.endif
+.ifnb \store
+ vst1.32 {\store[0]}, [\dst, :32], r1
+.endif
+.ifnb \narrowsrc
+ vqmovun.s16 \narrowdst, \narrowsrc
+.endif
+.ifnb \store
+ vst1.32 {\store[1]}, [\dst, :32], r1
+.endif
+.endm
+.macro load_add_store_4x16 dst, src
+ mov \src, \dst
+ load_add_store4 d0, , , , , , , \dst, \src
+ load_add_store4 d1, q8, , , , , , \dst, \src
+ load_add_store4 d2, q9, d0, q8, , , , \dst, \src
+ load_add_store4 d3, q10, d1, q9, q8, d0, , \dst, \src
+ load_add_store4 d4, q11, d2, q10, q9, d1, d0, \dst, \src
+ load_add_store4 d5, q12, d3, q11, q10, d2, d1, \dst, \src
+ load_add_store4 d6, q13, d4, q12, q11, d3, d2, \dst, \src
+ load_add_store4 d7, q14, d5, q13, q12, d4, d3, \dst, \src
+ load_add_store4 , q15, d6, q14, q13, d5, d4, \dst, \src
+ load_add_store4 , , d7, q15, q14, d6, d5, \dst, \src
+ load_add_store4 , , , , q15, d7, d6, \dst, \src
+ load_add_store4 , , , , , , d7, \dst, \src
+.endm
+.macro load_add_store_4x8 dst, src
+ mov \src, \dst
+ load_add_store4 d0, , , , , , , \dst, \src
+ load_add_store4 d1, q8, , , , , , \dst, \src
+ load_add_store4 d2, q9, d0, q8, , , , \dst, \src
+ load_add_store4 d3, q10, d1, q9, q8, d0, , \dst, \src
+ load_add_store4 , q11, d2, q10, q9, d1, d0, \dst, \src
+ load_add_store4 , , d3, q11, q10, d2, d1, \dst, \src
+ load_add_store4 , , , , q11, d3, d2, \dst, \src
+ load_add_store4 , , , , , , d3, \dst, \src
+.endm
+
+.macro idct_dc w, h, shift
+ cmp r3, #0
+ bne 1f
+ vmov.i16 d30, #0
+ movw r12, #2896*8
+ vld1.16 {d16[]}, [r2, :16]
+ vdup.16 d0, r12
+ vqrdmulh.s16 d16, d16, d0[0]
+ vst1.16 {d30[0]}, [r2, :16]
+.if (\w == 2*\h) || (2*\w == \h)
+ vqrdmulh.s16 d16, d16, d0[0]
+.endif
+.if \shift > 0
+ vrshr.s16 d16, d16, #\shift
+.endif
+ vqrdmulh.s16 d20, d16, d0[0]
+ mov r3, #\h
+ vrshr.s16 d16, d20, #4
+ vrshr.s16 d17, d20, #4
+ b idct_dc_w\w\()_neon
+1:
+.endm
+
+function idct_dc_w4_neon
+1:
+ vld1.32 {d0[0]}, [r0, :32], r1
+ vld1.32 {d0[1]}, [r0, :32], r1
+ vld1.32 {d1[0]}, [r0, :32], r1
+ vld1.32 {d1[1]}, [r0, :32], r1
+ subs r3, r3, #4
+ sub r0, r0, r1, lsl #2
+ vaddw.u8 q10, q8, d0
+ vqmovun.s16 d0, q10
+ vaddw.u8 q11, q8, d1
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vqmovun.s16 d1, q11
+ vst1.32 {d0[1]}, [r0, :32], r1
+ vst1.32 {d1[0]}, [r0, :32], r1
+ vst1.32 {d1[1]}, [r0, :32], r1
+ bgt 1b
+ bx lr
+endfunc
+
+function idct_dc_w8_neon
+1:
+ vld1.8 {d0}, [r0, :64], r1
+ vld1.8 {d1}, [r0, :64], r1
+ vld1.8 {d2}, [r0, :64], r1
+ vaddw.u8 q10, q8, d0
+ vld1.8 {d3}, [r0, :64], r1
+ sub r0, r0, r1, lsl #2
+ subs r3, r3, #4
+ vaddw.u8 q11, q8, d1
+ vqmovun.s16 d0, q10
+ vaddw.u8 q12, q8, d2
+ vqmovun.s16 d1, q11
+ vaddw.u8 q13, q8, d3
+ vst1.8 {d0}, [r0, :64], r1
+ vqmovun.s16 d2, q12
+ vst1.8 {d1}, [r0, :64], r1
+ vqmovun.s16 d3, q13
+ vst1.8 {d2}, [r0, :64], r1
+ vst1.8 {d3}, [r0, :64], r1
+ bgt 1b
+ bx lr
+endfunc
+
+function idct_dc_w16_neon
+1:
+ vld1.8 {q0}, [r0, :128], r1
+ vld1.8 {q1}, [r0, :128], r1
+ vld1.8 {q2}, [r0, :128], r1
+ subs r3, r3, #4
+ vaddw.u8 q10, q8, d0
+ vaddw.u8 q11, q8, d1
+ vld1.8 {q3}, [r0, :128], r1
+ vaddw.u8 q12, q8, d2
+ vaddw.u8 q13, q8, d3
+ sub r0, r0, r1, lsl #2
+ vaddw.u8 q14, q8, d4
+ vaddw.u8 q15, q8, d5
+ vqmovun.s16 d0, q10
+ vqmovun.s16 d1, q11
+ vaddw.u8 q10, q8, d6
+ vaddw.u8 q11, q8, d7
+ vqmovun.s16 d2, q12
+ vqmovun.s16 d3, q13
+ vqmovun.s16 d4, q14
+ vqmovun.s16 d5, q15
+ vst1.8 {q0}, [r0, :128], r1
+ vqmovun.s16 d6, q10
+ vqmovun.s16 d7, q11
+ vst1.8 {q1}, [r0, :128], r1
+ vst1.8 {q2}, [r0, :128], r1
+ vst1.8 {q3}, [r0, :128], r1
+ bgt 1b
+ bx lr
+endfunc
+
+function idct_dc_w32_neon
+1:
+ vld1.8 {q0, q1}, [r0, :128], r1
+ subs r3, r3, #2
+ vld1.8 {q2, q3}, [r0, :128], r1
+ vaddw.u8 q10, q8, d0
+ vaddw.u8 q11, q8, d1
+ vaddw.u8 q12, q8, d2
+ vaddw.u8 q13, q8, d3
+ sub r0, r0, r1, lsl #1
+ vaddw.u8 q14, q8, d4
+ vaddw.u8 q15, q8, d5
+ vqmovun.s16 d0, q10
+ vqmovun.s16 d1, q11
+ vaddw.u8 q10, q8, d6
+ vaddw.u8 q11, q8, d7
+ vqmovun.s16 d2, q12
+ vqmovun.s16 d3, q13
+ vqmovun.s16 d4, q14
+ vqmovun.s16 d5, q15
+ vst1.8 {q0, q1}, [r0, :128], r1
+ vqmovun.s16 d6, q10
+ vqmovun.s16 d7, q11
+ vst1.8 {q2, q3}, [r0, :128], r1
+ bgt 1b
+ bx lr
+endfunc
+
+function idct_dc_w64_neon
+ sub r1, r1, #32
+1:
+ vld1.8 {q0, q1}, [r0, :128]!
+ subs r3, r3, #1
+ vld1.8 {q2, q3}, [r0, :128]
+ vaddw.u8 q10, q8, d0
+ vaddw.u8 q11, q8, d1
+ vaddw.u8 q12, q8, d2
+ vaddw.u8 q13, q8, d3
+ sub r0, r0, #32
+ vaddw.u8 q14, q8, d4
+ vaddw.u8 q15, q8, d5
+ vqmovun.s16 d0, q10
+ vqmovun.s16 d1, q11
+ vaddw.u8 q10, q8, d6
+ vaddw.u8 q11, q8, d7
+ vqmovun.s16 d2, q12
+ vqmovun.s16 d3, q13
+ vqmovun.s16 d4, q14
+ vqmovun.s16 d5, q15
+ vst1.8 {q0, q1}, [r0, :128]!
+ vqmovun.s16 d6, q10
+ vqmovun.s16 d7, q11
+ vst1.8 {q2, q3}, [r0, :128], r1
+ bgt 1b
+ bx lr
+endfunc
+
+.macro iwht4
+ vadd.i16 d16, d16, d17
+ vsub.i16 d21, d18, d19
+ vsub.i16 d20, d16, d21
+ vshr.s16 d20, d20, #1
+ vsub.i16 d18, d20, d17
+ vsub.i16 d17, d20, d19
+ vadd.i16 d19, d21, d18
+ vsub.i16 d16, d16, d17
+.endm
+
+.macro idct_4h_x4 r0, r1, r2, r3
+ vmull_vmlal q3, \r1, \r3, d0[3], d0[2]
+ vmull_vmlsl q2, \r1, \r3, d0[2], d0[3]
+ vmull_vmlal q1, \r0, \r2, d0[0], d0[0]
+ vqrshrn.s32 d6, q3, #12
+ vqrshrn.s32 d7, q2, #12
+ vmull_vmlsl q2, \r0, \r2, d0[0], d0[0]
+ vqrshrn.s32 d2, q1, #12
+ vqrshrn.s32 d3, q2, #12
+ vqadd.s16 \r0, d2, d6
+ vqsub.s16 \r3, d2, d6
+ vqadd.s16 \r1, d3, d7
+ vqsub.s16 \r2, d3, d7
+.endm
+
+.macro idct_8h_x4 q0, q1, q2, q3, r0, r1, r2, r3, r4, r5, r6, r7
+ vmull_vmlal_8h q6, q7, \r2, \r3, \r6, \r7, d0[3], d0[2]
+ vmull_vmlsl_8h q4, q5, \r2, \r3, \r6, \r7, d0[2], d0[3]
+ vmull_vmlal_8h q2, q3, \r0, \r1, \r4, \r5, d0[0], d0[0]
+ vqrshrn_8h d12, d13, q6, q7, #12
+ vqrshrn_8h d14, d15, q4, q5, #12
+ vmull_vmlsl_8h q4, q5, \r0, \r1, \r4, \r5, d0[0], d0[0]
+ vqrshrn_8h d4, d5, q2, q3, #12
+ vqrshrn_8h d6, d7, q4, q5, #12
+ vqadd.s16 \q0, q2, q6
+ vqsub.s16 \q3, q2, q6
+ vqadd.s16 \q1, q3, q7
+ vqsub.s16 \q2, q3, q7
+.endm
+
+function inv_dct_4h_x4_neon, export=1
+ movrel_local r12, idct_coeffs
+ vld1.16 {d0}, [r12, :64]
+ idct_4h_x4 d16, d17, d18, d19
+ bx lr
+endfunc
+
+function inv_dct_8h_x4_neon, export=1
+ movrel_local r12, idct_coeffs
+ vld1.16 {d0}, [r12, :64]
+ idct_8h_x4 q8, q9, q10, q11, d16, d17, d18, d19, d20, d21, d22, d23
+ bx lr
+endfunc
+
+.macro iadst_4x4 o0, o1, o2, o3
+ movrel_local r12, iadst4_coeffs
+ vld1.16 {d0, d1}, [r12, :128]
+
+ vsubl.s16 q1, d16, d18
+ vmull.s16 q2, d16, d0[0]
+ vmlal.s16 q2, d18, d0[1]
+ vmlal.s16 q2, d19, d0[2]
+ vmull.s16 q10, d17, d0[3]
+ vaddw.s16 q1, q1, d19
+ vmull.s16 q3, d16, d0[2]
+ vmlsl.s16 q3, d18, d0[0]
+ vmlsl.s16 q3, d19, d0[1]
+
+ vadd.s32 q11, q2, q3
+ vmul.s32 q1, q1, d1[0]
+ vadd.s32 q2, q2, q10
+ vadd.s32 q3, q3, q10
+ vsub.s32 q11, q11, q10
+
+ vqrshrn.s32 \o0, q2, #12
+ vqrshrn.s32 \o2, q1, #12
+ vqrshrn.s32 \o1, q3, #12
+ vqrshrn.s32 \o3, q11, #12
+.endm
+
+function inv_adst_4h_x4_neon, export=1
+ iadst_4x4 d16, d17, d18, d19
+ bx lr
+endfunc
+
+function inv_flipadst_4h_x4_neon, export=1
+ iadst_4x4 d19, d18, d17, d16
+ bx lr
+endfunc
+
+.macro iadst_8x4 o0, o1, o2, o3, o4, o5, o6, o7
+ movrel_local r12, iadst4_coeffs
+ vld1.16 {d0, d1}, [r12, :128]
+
+ vsubl.s16 q2, d16, d20
+ vsubl.s16 q3, d17, d21
+ vmull.s16 q4, d16, d0[0]
+ vmlal.s16 q4, d20, d0[1]
+ vmlal.s16 q4, d22, d0[2]
+ vmull.s16 q5, d17, d0[0]
+ vmlal.s16 q5, d21, d0[1]
+ vmlal.s16 q5, d23, d0[2]
+ vaddw.s16 q2, q2, d22
+ vaddw.s16 q3, q3, d23
+ vmull.s16 q6, d16, d0[2]
+ vmlsl.s16 q6, d20, d0[0]
+ vmlsl.s16 q6, d22, d0[1]
+ vmull.s16 q7, d17, d0[2]
+ vmlsl.s16 q7, d21, d0[0]
+ vmlsl.s16 q7, d23, d0[1]
+
+ vmul.s32 q10, q2, d1[0]
+ vmul.s32 q11, q3, d1[0]
+
+ vmull.s16 q2, d18, d0[3]
+ vmull.s16 q3, d19, d0[3]
+
+ vadd.s32 q8, q4, q2 // out0
+ vadd.s32 q9, q5, q3
+
+ vadd.s32 q4, q4, q6 // out3
+ vadd.s32 q5, q5, q7
+
+ vadd.s32 q6, q6, q2 // out1
+ vadd.s32 q7, q7, q3
+
+ vsub.s32 q4, q4, q2 // out3
+ vsub.s32 q5, q5, q3
+
+ vqrshrn.s32 d20, q10, #12
+ vqrshrn.s32 d21, q11, #12
+
+ vqrshrn.s32 \o0, q8, #12
+ vqrshrn.s32 \o1, q9, #12
+
+.ifc \o4, d18
+ vmov q9, q10
+.endif
+
+ vqrshrn.s32 \o2, q6, #12
+ vqrshrn.s32 \o3, q7, #12
+
+ vqrshrn.s32 \o6, q4, #12
+ vqrshrn.s32 \o7, q5, #12
+.endm
+
+function inv_adst_8h_x4_neon, export=1
+ iadst_8x4 d16, d17, d18, d19, d20, d21, d22, d23
+ bx lr
+endfunc
+
+function inv_flipadst_8h_x4_neon, export=1
+ iadst_8x4 d22, d23, d20, d21, d18, d19, d16, d17
+ bx lr
+endfunc
+
+function inv_identity_4h_x4_neon, export=1
+ movw r12, #(5793-4096)*8
+ vdup.16 d0, r12
+ vqrdmulh.s16 q2, q8, d0[0]
+ vqrdmulh.s16 q3, q9, d0[0]
+ vqadd.s16 q8, q8, q2
+ vqadd.s16 q9, q9, q3
+ bx lr
+endfunc
+
+function inv_identity_8h_x4_neon, export=1
+ movw r12, #(5793-4096)*8
+ vdup.16 d0, r12
+ vqrdmulh.s16 q1, q8, d0[0]
+ vqrdmulh.s16 q2, q9, d0[0]
+ vqrdmulh.s16 q3, q10, d0[0]
+ vqadd.s16 q8, q8, q1
+ vqrdmulh.s16 q1, q11, d0[0]
+ vqadd.s16 q9, q9, q2
+ vqadd.s16 q10, q10, q3
+ vqadd.s16 q11, q11, q1
+ bx lr
+endfunc
+
+.macro identity_8x4_shift1 r0, r1, r2, r3, c
+.irp i, \r0, \r1, \r2, \r3
+ vqrdmulh.s16 q1, \i, \c
+ vrhadd.s16 \i, \i, q1
+.endr
+.endm
+
+function inv_txfm_add_wht_wht_4x4_8bpc_neon, export=1
+ push {r4-r5,lr}
+ vmov.i16 q15, #0
+ vld1.16 {d16, d17, d18, d19}, [r2, :128]
+ vst1.16 {q15}, [r2, :128]!
+
+ vshr.s16 q8, q8, #2
+ vshr.s16 q9, q9, #2
+
+ iwht4
+
+ vst1.16 {q15}, [r2, :128]!
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+
+ iwht4
+
+ vld1.32 {d0[]}, [r0, :32], r1
+ vld1.32 {d0[1]}, [r0, :32], r1
+ vld1.32 {d1[]}, [r0, :32], r1
+ vld1.32 {d1[1]}, [r0, :32], r1
+
+ b L(itx_4x4_end)
+endfunc
+
+function inv_txfm_add_4x4_neon
+ vmov.i16 q15, #0
+ vld1.16 {d16, d17, d18, d19}, [r2, :128]
+ vst1.16 {q15}, [r2, :128]!
+
+ blx r4
+
+ vst1.16 {q15}, [r2, :128]!
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+
+ blx r5
+
+ vld1.32 {d0[]}, [r0, :32], r1
+ vld1.32 {d0[1]}, [r0, :32], r1
+ vld1.32 {d1[]}, [r0, :32], r1
+ vld1.32 {d1[1]}, [r0, :32], r1
+ vrshr.s16 q8, q8, #4
+ vrshr.s16 q9, q9, #4
+
+L(itx_4x4_end):
+ sub r0, r0, r1, lsl #2
+ vaddw.u8 q8, q8, d0
+ vqmovun.s16 d0, q8
+ vaddw.u8 q9, q9, d1
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vqmovun.s16 d1, q9
+ vst1.32 {d0[1]}, [r0, :32], r1
+ vst1.32 {d1[0]}, [r0, :32], r1
+ vst1.32 {d1[1]}, [r0, :32], r1
+
+ pop {r4-r5,pc}
+endfunc
+
+.macro def_fn_4x4 txfm1, txfm2
+function inv_txfm_add_\txfm1\()_\txfm2\()_4x4_8bpc_neon, export=1
+ push {r4-r5,lr}
+
+.ifc \txfm1\()_\txfm2, dct_dct
+ cmp r3, #0
+ bne 1f
+ vmov.i16 d30, #0
+ movw r12, #2896*8
+ vld1.16 {d16[]}, [r2, :16]
+ vdup.16 d4, r12
+ vst1.16 {d30[0]}, [r2, :16]
+ vqrdmulh.s16 d16, d16, d4[0]
+ vld1.32 {d0[0]}, [r0, :32], r1
+ vqrdmulh.s16 d20, d16, d4[0]
+ vld1.32 {d0[1]}, [r0, :32], r1
+ vrshr.s16 d16, d20, #4
+ vrshr.s16 d17, d20, #4
+ vld1.32 {d1[0]}, [r0, :32], r1
+ vmov q9, q8
+ vld1.32 {d1[1]}, [r0, :32], r1
+ b L(itx_4x4_end)
+1:
+.endif
+ movrel_local r4, inv_\txfm1\()_4h_x4_neon
+ movrel_local r5, inv_\txfm2\()_4h_x4_neon
+ b inv_txfm_add_4x4_neon
+endfunc
+.endm
+
+def_fn_4x4 dct, dct
+def_fn_4x4 identity, identity
+def_fn_4x4 dct, adst
+def_fn_4x4 dct, flipadst
+def_fn_4x4 dct, identity
+def_fn_4x4 adst, dct
+def_fn_4x4 adst, adst
+def_fn_4x4 adst, flipadst
+def_fn_4x4 flipadst, dct
+def_fn_4x4 flipadst, adst
+def_fn_4x4 flipadst, flipadst
+def_fn_4x4 identity, dct
+
+def_fn_4x4 adst, identity
+def_fn_4x4 flipadst, identity
+def_fn_4x4 identity, adst
+def_fn_4x4 identity, flipadst
+
+.macro idct_8h_x8 q0, q1, q2, q3, q4, q5, q6, q7, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15
+ idct_8h_x4 \q0, \q2, \q4, \q6, \r0, \r1, \r4, \r5, \r8, \r9, \r12, \r13
+
+ vmull_vmlsl_8h q2, q3, \r2, \r3, \r14, \r15, d1[0], d1[1] // -> t4a
+ vmull_vmlal_8h q4, q5, \r2, \r3, \r14, \r15, d1[1], d1[0] // -> t7a
+ vmull_vmlsl_8h q6, q7, \r10, \r11, \r6, \r7, d1[2], d1[3] // -> t5a
+ vqrshrn_8h \r2, \r3, q2, q3, #12 // t4a
+ vqrshrn_8h \r14, \r15, q4, q5, #12 // t7a
+ vmull_vmlal_8h q2, q3, \r10, \r11, \r6, \r7, d1[3], d1[2] // -> t6a
+ vqrshrn_8h \r6, \r7, q6, q7, #12 // t5a
+ vqrshrn_8h \r10, \r11, q2, q3, #12 // t6a
+
+ vqadd.s16 q2, \q1, \q3 // t4
+ vqsub.s16 \q1, \q1, \q3 // t5a
+ vqadd.s16 q3, \q7, \q5 // t7
+ vqsub.s16 \q3, \q7, \q5 // t6a
+
+ vmull_vmlsl_8h q4, q5, \r6, \r7, \r2, \r3, d0[0], d0[0] // -> t5
+ vmull_vmlal_8h q6, q7, \r6, \r7, \r2, \r3, d0[0], d0[0] // -> t6
+ vqrshrn_8h d8, d9, q4, q5, #12 // t5
+ vqrshrn_8h d10, d11, q6, q7, #12 // t6
+
+ vqsub.s16 \q7, \q0, q3 // out7
+ vqadd.s16 \q0, \q0, q3 // out0
+ vqadd.s16 \q1, \q2, q5 // out1
+ vqsub.s16 q6, \q2, q5 // out6
+ vqadd.s16 \q2, \q4, q4 // out2
+ vqsub.s16 \q5, \q4, q4 // out5
+ vqadd.s16 \q3, \q6, q2 // out3
+ vqsub.s16 \q4, \q6, q2 // out4
+ vmov \q6, q6 // out6
+.endm
+
+.macro idct_4h_x8 r0, r1, r2, r3, r4, r5, r6, r7
+ idct_4h_x4 \r0, \r2, \r4, \r6
+
+ vmull_vmlsl q1, \r1, \r7, d1[0], d1[1] // -> t4a
+ vmull_vmlal q2, \r1, \r7, d1[1], d1[0] // -> t7a
+ vmull_vmlsl q3, \r5, \r3, d1[2], d1[3] // -> t5a
+ vqrshrn.s32 \r1, q1, #12 // t4a
+ vmull_vmlal q1, \r5, \r3, d1[3], d1[2] // -> t6a
+ vqrshrn.s32 \r7, q2, #12 // t7a
+ vqrshrn.s32 \r3, q3, #12 // t5a
+ vqrshrn.s32 \r5, q1, #12 // taa
+
+ vqadd.s16 d2, \r1, \r3 // t4
+ vqsub.s16 \r1, \r1, \r3 // t5a
+ vqadd.s16 d3, \r7, \r5 // t7
+ vqsub.s16 \r3, \r7, \r5 // t6a
+
+ vmull_vmlsl q2, \r3, \r1, d0[0], d0[0] // -> t5
+ vmull_vmlal q3, \r3, \r1, d0[0], d0[0] // -> t6
+ vqrshrn.s32 d4, q2, #12 // t5
+ vqrshrn.s32 d5, q3, #12 // t6
+
+ vqsub.s16 \r7, \r0, d3 // out7
+ vqadd.s16 \r0, \r0, d3 // out0
+ vqadd.s16 \r1, \r2, d5 // out1
+ vqsub.s16 d6, \r2, d5 // out6
+ vqadd.s16 \r2, \r4, d4 // out2
+ vqsub.s16 \r5, \r4, d4 // out5
+ vqadd.s16 \r3, \r6, d2 // out3
+ vqsub.s16 \r4, \r6, d2 // out4
+ vmov \r6, d6 // out6
+.endm
+
+function inv_dct_8h_x8_neon, export=1
+ movrel_local r12, idct_coeffs
+ vld1.16 {q0}, [r12, :128]
+ idct_8h_x8 q8, q9, q10, q11, q12, q13, q14, q15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ bx lr
+endfunc
+
+function inv_dct_4h_x8_neon, export=1
+ movrel_local r12, idct_coeffs
+ vld1.16 {q0}, [r12, :128]
+ idct_4h_x8 d16, d17, d18, d19, d20, d21, d22, d23
+ bx lr
+endfunc
+
+.macro iadst_8h_x8 q0, q1, q2, q3, q4, q5, q6, q7, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10, r11, r12, r13, r14, r15
+ movrel_local r12, iadst8_coeffs
+ vld1.16 {d0, d1, d2}, [r12, :64]
+
+ vmull_vmlal_8h q2, q3, d30, d31, d16, d17, d0[0], d0[1]
+ vmull_vmlsl_8h q4, q5, d30, d31, d16, d17, d0[1], d0[0]
+ vmull_vmlal_8h q6, q7, d26, d27, d20, d21, d0[2], d0[3]
+ vqrshrn_8h d16, d17, q2, q3, #12 // t0a
+ vqrshrn_8h d30, d31, q4, q5, #12 // t1a
+ vmull_vmlsl_8h q2, q3, d26, d27, d20, d21, d0[3], d0[2]
+ vmull_vmlal_8h q4, q5, d22, d23, d24, d25, d1[0], d1[1]
+ vqrshrn_8h d20, d21, q6, q7, #12 // t2a
+ vqrshrn_8h d26, d27, q2, q3, #12 // t3a
+ vmull_vmlsl_8h q6, q7, d22, d23, d24, d25, d1[1], d1[0]
+ vmull_vmlal_8h q2, q3, d18, d19, d28, d29, d1[2], d1[3]
+ vqrshrn_8h d24, d25, q4, q5, #12 // t4a
+ vqrshrn_8h d22, d23, q6, q7, #12 // t5a
+ vmull_vmlsl_8h q4, q5, d18, d19, d28, d29, d1[3], d1[2]
+ vqrshrn_8h d28, d29, q2, q3, #12 // t6a
+ vqrshrn_8h d18, d19, q4, q5, #12 // t7a
+
+ vqadd.s16 q2, q8, q12 // t0
+ vqsub.s16 q3, q8, q12 // t4
+ vqadd.s16 q4, q15, q11 // t1
+ vqsub.s16 q5, q15, q11 // t5
+ vqadd.s16 q6, q10, q14 // t2
+ vqsub.s16 q7, q10, q14 // t6
+ vqadd.s16 q10, q13, q9 // t3
+ vqsub.s16 q11, q13, q9 // t7
+
+ vmull_vmlal_8h q8, q9, d6, d7, d10, d11, d2[3], d2[2]
+ vmull_vmlsl_8h q12, q13, d6, d7, d10, d11, d2[2], d2[3]
+ vmull_vmlsl_8h q14, q15, d22, d23, d14, d15, d2[3], d2[2]
+
+ vqrshrn_8h d6, d7, q8, q9, #12 // t4a
+ vqrshrn_8h d10, d11, q12, q13, #12 // t5a
+
+ vmull_vmlal_8h q8, q9, d22, d23, d14, d15, d2[2], d2[3]
+
+ vqrshrn_8h d14, d15, q14, q15, #12 // t6a
+ vqrshrn_8h d22, d23, q8, q9, #12 // t7a
+
+ vqadd.s16 \q0, q2, q6 // out0
+ vqsub.s16 q2, q2, q6 // t2
+ vqadd.s16 \q7, q4, q10 // out7
+ vqsub.s16 q4, q4, q10 // t3
+ vqneg.s16 \q7, \q7 // out7
+
+ vqadd.s16 \q1, q3, q7 // out1
+ vqsub.s16 q3, q3, q7 // t6
+ vqadd.s16 \q6, q5, q11 // out6
+ vqsub.s16 q5, q5, q11 // t7
+ vqneg.s16 \q1, \q1 // out1
+
+ vmull_vmlal_8h q10, q11, d4, d5, d8, d9, d2[0], d2[0] // -> out3 (q11 or q12)
+ vmull_vmlsl_8h q6, q7, d4, d5, d8, d9, d2[0], d2[0] // -> out4 (q12 or q11)
+ vmull_vmlsl_8h q12, q13, d6, d7, d10, d11, d2[0], d2[0] // -> out5 (q13 or q10)
+ vqrshrn_8h d4, d5, q10, q11, #12 // out3
+ vmull_vmlal_8h q10, q11, d6, d7, d10, d11, d2[0], d2[0] // -> out2 (q10 or q13)
+ vqrshrn_8h d6, d7, q12, q13, #12 // out5
+ vqrshrn_8h \r4, \r5, q10, q11, #12 // out2 (q10 or q13)
+ vqrshrn_8h \r8, \r9, q6, q7, #12 // out4 (q12 or q11)
+
+ vqneg.s16 \q3, q2 // out3
+ vqneg.s16 \q5, q3 // out5
+.endm
+
+.macro iadst_4h_x8 r0, r1, r2, r3, r4, r5, r6, r7
+ movrel_local r12, iadst8_coeffs
+ vld1.16 {d0, d1, d2}, [r12, :64]
+
+ vmull_vmlal q2, d23, d16, d0[0], d0[1]
+ vmull_vmlsl q3, d23, d16, d0[1], d0[0]
+ vmull_vmlal q4, d21, d18, d0[2], d0[3]
+ vqrshrn.s32 d16, q2, #12 // t0a
+ vqrshrn.s32 d23, q3, #12 // t1a
+ vmull_vmlsl q5, d21, d18, d0[3], d0[2]
+ vmull_vmlal q6, d19, d20, d1[0], d1[1]
+ vqrshrn.s32 d18, q4, #12 // t2a
+ vqrshrn.s32 d21, q5, #12 // t3a
+ vmull_vmlsl q7, d19, d20, d1[1], d1[0]
+ vmull_vmlal q2, d17, d22, d1[2], d1[3]
+ vqrshrn.s32 d20, q6, #12 // t4a
+ vqrshrn.s32 d19, q7, #12 // t5a
+ vmull_vmlsl q3, d17, d22, d1[3], d1[2]
+ vqrshrn.s32 d22, q2, #12 // t6a
+ vqrshrn.s32 d17, q3, #12 // t7a
+
+ vqadd.s16 d4, d16, d20 // t0
+ vqsub.s16 d5, d16, d20 // t4
+ vqadd.s16 d6, d23, d19 // t1
+ vqsub.s16 d7, d23, d19 // t5
+ vqadd.s16 d8, d18, d22 // t2
+ vqsub.s16 d9, d18, d22 // t6
+ vqadd.s16 d18, d21, d17 // t3
+ vqsub.s16 d19, d21, d17 // t7
+
+ vmull_vmlal q8, d5, d7, d2[3], d2[2]
+ vmull_vmlsl q10, d5, d7, d2[2], d2[3]
+ vmull_vmlsl q11, d19, d9, d2[3], d2[2]
+
+ vqrshrn.s32 d5, q8, #12 // t4a
+ vqrshrn.s32 d7, q10, #12 // t5a
+
+ vmull_vmlal q8, d19, d9, d2[2], d2[3]
+
+ vqrshrn.s32 d9, q11, #12 // t6a
+ vqrshrn.s32 d19, q8, #12 // t7a
+
+ vqadd.s16 \r0, d4, d8 // out0
+ vqsub.s16 d4, d4, d8 // t2
+ vqadd.s16 \r7, d6, d18 // out7
+ vqsub.s16 d6, d6, d18 // t3
+ vqneg.s16 \r7, \r7 // out7
+
+ vqadd.s16 \r1, d5, d9 // out1
+ vqsub.s16 d5, d5, d9 // t6
+ vqadd.s16 \r6, d7, d19 // out6
+ vqsub.s16 d7, d7, d19 // t7
+ vqneg.s16 \r1, \r1 // out1
+
+ vmull_vmlal q9, d4, d6, d2[0], d2[0] // -> out3 (d19 or d20)
+ vmull_vmlsl q4, d4, d6, d2[0], d2[0] // -> out4 (d20 or d19)
+ vmull_vmlsl q10, d5, d7, d2[0], d2[0] // -> out5 (d21 or d18)
+ vqrshrn.s32 d4, q9, #12 // out3
+ vmull_vmlal q9, d5, d7, d2[0], d2[0] // -> out2 (d18 or d21)
+ vqrshrn.s32 d5, q10, #12 // out5
+ vqrshrn.s32 \r2, q9, #12 // out2 (d18 or d21)
+ vqrshrn.s32 \r4, q4, #12 // out4 (d20 or d19)
+
+ vqneg.s16 \r3, d4 // out3
+ vqneg.s16 \r5, d5 // out5
+.endm
+
+function inv_adst_8h_x8_neon, export=1
+ iadst_8h_x8 q8, q9, q10, q11, q12, q13, q14, q15, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ bx lr
+endfunc
+
+function inv_flipadst_8h_x8_neon, export=1
+ iadst_8h_x8 q15, q14, q13, q12, q11, q10, q9, q8, d30, d31, d28, d29, d26, d27, d24, d25, d22, d23, d20, d21, d18, d19, d16, d17
+ bx lr
+endfunc
+
+function inv_adst_4h_x8_neon, export=1
+ iadst_4h_x8 d16, d17, d18, d19, d20, d21, d22, d23
+ bx lr
+endfunc
+
+function inv_flipadst_4h_x8_neon, export=1
+ iadst_4h_x8 d23, d22, d21, d20, d19, d18, d17, d16
+ bx lr
+endfunc
+
+function inv_identity_8h_x8_neon, export=1
+ vqshl.s16 q8, q8, #1
+ vqshl.s16 q9, q9, #1
+ vqshl.s16 q10, q10, #1
+ vqshl.s16 q11, q11, #1
+ vqshl.s16 q12, q12, #1
+ vqshl.s16 q13, q13, #1
+ vqshl.s16 q14, q14, #1
+ vqshl.s16 q15, q15, #1
+ bx lr
+endfunc
+
+function inv_identity_4h_x8_neon, export=1
+ vqshl.s16 q8, q8, #1
+ vqshl.s16 q9, q9, #1
+ vqshl.s16 q10, q10, #1
+ vqshl.s16 q11, q11, #1
+ bx lr
+endfunc
+
+.macro def_fn_8x8_base variant
+function inv_txfm_\variant\()add_8x8_neon
+ vmov.i16 q0, #0
+ vmov.i16 q1, #0
+ vld1.16 {q8, q9}, [r2, :128]
+ vst1.16 {q0, q1}, [r2, :128]!
+ vld1.16 {q10, q11}, [r2, :128]
+ vst1.16 {q0, q1}, [r2, :128]!
+ vld1.16 {q12, q13}, [r2, :128]
+ vst1.16 {q0, q1}, [r2, :128]!
+ vld1.16 {q14, q15}, [r2, :128]
+ vst1.16 {q0, q1}, [r2, :128]
+
+.ifc \variant, identity_
+ // The identity shl #1 and downshift srshr #1 cancel out
+.else
+ blx r4
+
+ vrshr.s16 q8, q8, #1
+ vrshr.s16 q9, q9, #1
+ vrshr.s16 q10, q10, #1
+ vrshr.s16 q11, q11, #1
+ vrshr.s16 q12, q12, #1
+ vrshr.s16 q13, q13, #1
+ vrshr.s16 q14, q14, #1
+ vrshr.s16 q15, q15, #1
+.endif
+
+ transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
+
+ blx r5
+
+ load_add_store_8x8 r0, r7
+ vpop {q4-q7}
+ pop {r4-r5,r7,pc}
+endfunc
+.endm
+
+def_fn_8x8_base
+def_fn_8x8_base identity_
+
+.macro def_fn_8x8 txfm1, txfm2
+function inv_txfm_add_\txfm1\()_\txfm2\()_8x8_8bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc 8, 8, 1
+.endif
+ push {r4-r5,r7,lr}
+ vpush {q4-q7}
+ movrel_local r5, inv_\txfm2\()_8h_x8_neon
+.ifc \txfm1, identity
+ b inv_txfm_identity_add_8x8_neon
+.else
+ movrel_local r4, inv_\txfm1\()_8h_x8_neon
+ b inv_txfm_add_8x8_neon
+.endif
+endfunc
+.endm
+
+def_fn_8x8 dct, dct
+def_fn_8x8 identity, identity
+def_fn_8x8 dct, adst
+def_fn_8x8 dct, flipadst
+def_fn_8x8 dct, identity
+def_fn_8x8 adst, dct
+def_fn_8x8 adst, adst
+def_fn_8x8 adst, flipadst
+def_fn_8x8 flipadst, dct
+def_fn_8x8 flipadst, adst
+def_fn_8x8 flipadst, flipadst
+def_fn_8x8 identity, dct
+def_fn_8x8 adst, identity
+def_fn_8x8 flipadst, identity
+def_fn_8x8 identity, adst
+def_fn_8x8 identity, flipadst
+
+function inv_txfm_add_8x4_neon
+ vmov.i16 q14, #0
+ vmov.i16 q15, #0
+ movw r12, #2896*8
+ vdup.16 d0, r12
+ vld1.16 {d16, d17, d18, d19}, [r2, :128]
+ vst1.16 {q14, q15}, [r2, :128]!
+ vld1.16 {d20, d21, d22, d23}, [r2, :128]
+ vst1.16 {q14, q15}, [r2, :128]
+
+ scale_input d0[0], q8, q9, q10, q11
+
+ blx r4
+
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ vswp d17, d20
+ vswp d19, d21
+ vswp d18, d20
+ vswp d21, d22
+
+ blx r5
+
+ load_add_store_8x4 r0, r7
+ vpop {q4-q7}
+ pop {r4-r5,r7,pc}
+endfunc
+
+function inv_txfm_add_4x8_neon
+ vmov.i16 q14, #0
+ vmov.i16 q15, #0
+ movw r12, #2896*8
+ vdup.16 d0, r12
+ vld1.16 {q8, q9}, [r2, :128]
+ vst1.16 {q14, q15}, [r2, :128]!
+ vld1.16 {q10, q11}, [r2, :128]
+ vst1.16 {q14, q15}, [r2, :128]
+
+ scale_input d0[0], q8, q9, q10, q11
+
+ blx r4
+
+ transpose_4x8h q8, q9, q10, q11
+ vswp d17, d20
+ vswp d19, d21
+ vswp d17, d18
+ vswp d19, d22
+
+ blx r5
+
+ load_add_store_4x8 r0, r7
+ vpop {q4-q7}
+ pop {r4-r5,r7,pc}
+endfunc
+
+.macro def_fn_48 w, h, txfm1, txfm2
+function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc \w, \h, 0
+.endif
+ push {r4-r5,r7,lr}
+ vpush {q4-q7}
+ movrel_local r4, inv_\txfm1\()_\h\()h_x\w\()_neon
+ movrel_local r5, inv_\txfm2\()_\w\()h_x\h\()_neon
+ b inv_txfm_add_\w\()x\h\()_neon
+endfunc
+.endm
+
+.macro def_fns_48 w, h
+def_fn_48 \w, \h, dct, dct
+def_fn_48 \w, \h, identity, identity
+def_fn_48 \w, \h, dct, adst
+def_fn_48 \w, \h, dct, flipadst
+def_fn_48 \w, \h, dct, identity
+def_fn_48 \w, \h, adst, dct
+def_fn_48 \w, \h, adst, adst
+def_fn_48 \w, \h, adst, flipadst
+def_fn_48 \w, \h, flipadst, dct
+def_fn_48 \w, \h, flipadst, adst
+def_fn_48 \w, \h, flipadst, flipadst
+def_fn_48 \w, \h, identity, dct
+def_fn_48 \w, \h, adst, identity
+def_fn_48 \w, \h, flipadst, identity
+def_fn_48 \w, \h, identity, adst
+def_fn_48 \w, \h, identity, flipadst
+.endm
+
+def_fns_48 4, 8
+def_fns_48 8, 4
+
+function inv_dct_4h_x16_neon, export=1
+ movrel_local r12, idct_coeffs
+ vld1.16 {q0, q1}, [r12, :128]
+
+ vmull_vmlsl q2, d17, d31, d2[0], d2[1] // -> t8a
+ vmull_vmlal q3, d17, d31, d2[1], d2[0] // -> t15a
+ vmull_vmlsl q4, d25, d23, d2[2], d2[3] // -> t9a
+ vqrshrn.s32 d17, q2, #12 // t8a
+ vqrshrn.s32 d31, q3, #12 // t15a
+ vmull_vmlal q2, d25, d23, d2[3], d2[2] // -> t14a
+ vmull_vmlsl q3, d21, d27, d3[0], d3[1] // -> t10a
+ vqrshrn.s32 d23, q4, #12 // t9a
+ vqrshrn.s32 d25, q2, #12 // t14a
+ vmull_vmlal q4, d21, d27, d3[1], d3[0] // -> t13a
+ vmull_vmlsl q2, d29, d19, d3[2], d3[3] // -> t11a
+ vqrshrn.s32 d21, q3, #12 // t10a
+ vqrshrn.s32 d27, q4, #12 // t13a
+ vmull_vmlal q3, d29, d19, d3[3], d3[2] // -> t12a
+ vqrshrn.s32 d19, q2, #12 // t11a
+ vqrshrn.s32 d29, q3, #12 // t12a
+
+ idct_4h_x8 d16, d18, d20, d22, d24, d26, d28, d30
+
+ vqsub.s16 d4, d17, d23 // t9
+ vqadd.s16 d17, d17, d23 // t8
+ vqsub.s16 d5, d31, d25 // t14
+ vqadd.s16 d31, d31, d25 // t15
+ vqsub.s16 d23, d19, d21 // t10
+ vqadd.s16 d19, d19, d21 // t11
+ vqadd.s16 d25, d29, d27 // t12
+ vqsub.s16 d29, d29, d27 // t13
+
+ vmull_vmlsl q3, d5, d4, d0[2], d0[3] // -> t9a
+ vmull_vmlal q4, d5, d4, d0[3], d0[2] // -> t14a
+ vqrshrn.s32 d21, q3, #12 // t9a
+ vqrshrn.s32 d27, q4, #12 // t14a
+
+ vmull_vmlsl q3, d29, d23, d0[2], d0[3] // -> t13a
+ vmull_vmlal q4, d29, d23, d0[3], d0[2] // -> t10a
+ vqrshrn.s32 d29, q3, #12 // t13a
+ vneg.s32 q4, q4
+ vqrshrn.s32 d23, q4, #12 // t10a
+
+ vqsub.s16 d4, d17, d19 // t11a
+ vqadd.s16 d17, d17, d19 // t8a
+ vqsub.s16 d5, d31, d25 // t12a
+ vqadd.s16 d31, d31, d25 // t15a
+ vqadd.s16 d19, d21, d23 // t9
+ vqsub.s16 d21, d21, d23 // t10
+ vqsub.s16 d25, d27, d29 // t13
+ vqadd.s16 d27, d27, d29 // t14
+
+ vmull_vmlsl q3, d5, d4, d0[0], d0[0] // -> t11
+ vmull_vmlal q4, d5, d4, d0[0], d0[0] // -> t12
+ vmull_vmlsl q2, d25, d21, d0[0], d0[0] // -> t10a
+
+ vqrshrn.s32 d6, q3, #12 // t11
+ vqrshrn.s32 d7, q4, #12 // t12
+ vmull_vmlal q4, d25, d21, d0[0], d0[0] // -> t13a
+ vqrshrn.s32 d4, q2, #12 // t10a
+ vqrshrn.s32 d5, q4, #12 // t13a
+
+ vqadd.s16 d8, d16, d31 // out0
+ vqsub.s16 d31, d16, d31 // out15
+ vmov d16, d8
+ vqadd.s16 d23, d30, d17 // out7
+ vqsub.s16 d9, d30, d17 // out8
+ vqadd.s16 d17, d18, d27 // out1
+ vqsub.s16 d30, d18, d27 // out14
+ vqadd.s16 d18, d20, d5 // out2
+ vqsub.s16 d29, d20, d5 // out13
+ vqadd.s16 d5, d28, d19 // out6
+ vqsub.s16 d25, d28, d19 // out9
+ vqadd.s16 d19, d22, d7 // out3
+ vqsub.s16 d28, d22, d7 // out12
+ vqadd.s16 d20, d24, d6 // out4
+ vqsub.s16 d27, d24, d6 // out11
+ vqadd.s16 d21, d26, d4 // out5
+ vqsub.s16 d26, d26, d4 // out10
+ vmov d24, d9
+ vmov d22, d5
+
+ bx lr
+endfunc
+
+.macro iadst_16 o0, o1, o2, o3, o4, o5, o6, o7, o8, o9, o10, o11, o12, o13, o14, o15
+ movrel_local r12, iadst16_coeffs
+ vld1.16 {q0, q1}, [r12, :128]
+ movrel_local r12, idct_coeffs
+
+ vmull_vmlal q2, d31, d16, d0[0], d0[1] // -> t0
+ vmull_vmlsl q3, d31, d16, d0[1], d0[0] // -> t1
+ vmull_vmlal q4, d29, d18, d0[2], d0[3] // -> t2
+ vqrshrn.s32 d16, q2, #12 // t0
+ vqrshrn.s32 d31, q3, #12 // t1
+ vmull_vmlsl q2, d29, d18, d0[3], d0[2] // -> t3
+ vmull_vmlal q3, d27, d20, d1[0], d1[1] // -> t4
+ vqrshrn.s32 d18, q4, #12 // t2
+ vqrshrn.s32 d29, q2, #12 // t3
+ vmull_vmlsl q4, d27, d20, d1[1], d1[0] // -> t5
+ vmull_vmlal q2, d25, d22, d1[2], d1[3] // -> t6
+ vqrshrn.s32 d20, q3, #12 // t4
+ vqrshrn.s32 d27, q4, #12 // t5
+ vmull_vmlsl q3, d25, d22, d1[3], d1[2] // -> t7
+ vmull_vmlal q4, d23, d24, d2[0], d2[1] // -> t8
+ vqrshrn.s32 d22, q2, #12 // t6
+ vqrshrn.s32 d25, q3, #12 // t7
+ vmull_vmlsl q2, d23, d24, d2[1], d2[0] // -> t9
+ vmull_vmlal q3, d21, d26, d2[2], d2[3] // -> t10
+ vqrshrn.s32 d23, q4, #12 // t8
+ vqrshrn.s32 d24, q2, #12 // t9
+ vmull_vmlsl q4, d21, d26, d2[3], d2[2] // -> t11
+ vmull_vmlal q2, d19, d28, d3[0], d3[1] // -> t12
+ vqrshrn.s32 d21, q3, #12 // t10
+ vqrshrn.s32 d26, q4, #12 // t11
+ vmull_vmlsl q3, d19, d28, d3[1], d3[0] // -> t13
+ vmull_vmlal q4, d17, d30, d3[2], d3[3] // -> t14
+ vqrshrn.s32 d19, q2, #12 // t12
+ vqrshrn.s32 d28, q3, #12 // t13
+ vmull_vmlsl q2, d17, d30, d3[3], d3[2] // -> t15
+ vqrshrn.s32 d17, q4, #12 // t14
+ vqrshrn.s32 d30, q2, #12 // t15
+
+ vld1.16 {q0}, [r12, :128]
+
+ vqsub.s16 d2, d16, d23 // t8a
+ vqadd.s16 d16, d16, d23 // t0a
+ vqsub.s16 d3, d31, d24 // t9a
+ vqadd.s16 d31, d31, d24 // t1a
+ vqadd.s16 d23, d18, d21 // t2a
+ vqsub.s16 d18, d18, d21 // t10a
+ vqadd.s16 d24, d29, d26 // t3a
+ vqsub.s16 d29, d29, d26 // t11a
+ vqadd.s16 d21, d20, d19 // t4a
+ vqsub.s16 d20, d20, d19 // t12a
+ vqadd.s16 d26, d27, d28 // t5a
+ vqsub.s16 d27, d27, d28 // t13a
+ vqadd.s16 d19, d22, d17 // t6a
+ vqsub.s16 d22, d22, d17 // t14a
+ vqadd.s16 d28, d25, d30 // t7a
+ vqsub.s16 d25, d25, d30 // t15a
+
+ vmull_vmlal q2, d2, d3, d1[1], d1[0] // -> t8
+ vmull_vmlsl q3, d2, d3, d1[0], d1[1] // -> t9
+ vmull_vmlal q4, d18, d29, d1[3], d1[2] // -> t10
+ vqrshrn.s32 d17, q2, #12 // t8
+ vqrshrn.s32 d30, q3, #12 // t9
+ vmull_vmlsl q2, d18, d29, d1[2], d1[3] // -> t11
+ vmull_vmlsl q3, d27, d20, d1[1], d1[0] // -> t12
+ vqrshrn.s32 d18, q4, #12 // t10
+ vqrshrn.s32 d29, q2, #12 // t11
+ vmull_vmlal q4, d27, d20, d1[0], d1[1] // -> t13
+ vmull_vmlsl q2, d25, d22, d1[3], d1[2] // -> t14
+ vqrshrn.s32 d27, q3, #12 // t12
+ vqrshrn.s32 d20, q4, #12 // t13
+ vmull_vmlal q3, d25, d22, d1[2], d1[3] // -> t15
+ vqrshrn.s32 d25, q2, #12 // t14
+ vqrshrn.s32 d22, q3, #12 // t15
+
+ vqsub.s16 d2, d16, d21 // t4
+ vqadd.s16 d16, d16, d21 // t0
+ vqsub.s16 d3, d31, d26 // t5
+ vqadd.s16 d31, d31, d26 // t1
+ vqadd.s16 d21, d23, d19 // t2
+ vqsub.s16 d23, d23, d19 // t6
+ vqadd.s16 d26, d24, d28 // t3
+ vqsub.s16 d24, d24, d28 // t7
+ vqadd.s16 d19, d17, d27 // t8a
+ vqsub.s16 d17, d17, d27 // t12a
+ vqadd.s16 d28, d30, d20 // t9a
+ vqsub.s16 d30, d30, d20 // t13a
+ vqadd.s16 d27, d18, d25 // t10a
+ vqsub.s16 d18, d18, d25 // t14a
+ vqadd.s16 d20, d29, d22 // t11a
+ vqsub.s16 d29, d29, d22 // t15a
+
+ vmull_vmlal q2, d2, d3, d0[3], d0[2] // -> t4a
+ vmull_vmlsl q3, d2, d3, d0[2], d0[3] // -> t5a
+ vmull_vmlsl q4, d24, d23, d0[3], d0[2] // -> t6a
+ vqrshrn.s32 d22, q2, #12 // t4a
+ vqrshrn.s32 d25, q3, #12 // t5a
+ vmull_vmlal q2, d24, d23, d0[2], d0[3] // -> t7a
+ vmull_vmlal q3, d17, d30, d0[3], d0[2] // -> t12
+ vqrshrn.s32 d24, q4, #12 // t6a
+ vqrshrn.s32 d23, q2, #12 // t7a
+ vmull_vmlsl q4, d17, d30, d0[2], d0[3] // -> t13
+ vmull_vmlsl q2, d29, d18, d0[3], d0[2] // -> t14
+ vqrshrn.s32 d17, q3, #12 // t12
+ vmull_vmlal q3, d29, d18, d0[2], d0[3] // -> t15
+ vqrshrn.s32 d29, q4, #12 // t13
+ vqrshrn.s32 d30, q2, #12 // t14
+ vqrshrn.s32 d18, q3, #12 // t15
+
+ vqsub.s16 d2, d16, d21 // t2a
+.ifc \o0, d16
+ vqadd.s16 \o0, d16, d21 // out0
+ vqsub.s16 d21, d31, d26 // t3a
+ vqadd.s16 \o15,d31, d26 // out15
+.else
+ vqadd.s16 d4, d16, d21 // out0
+ vqsub.s16 d21, d31, d26 // t3a
+ vqadd.s16 \o15,d31, d26 // out15
+ vmov \o0, d4
+.endif
+ vqneg.s16 \o15, \o15 // out15
+
+ vqsub.s16 d3, d29, d18 // t15a
+ vqadd.s16 \o13,d29, d18 // out13
+ vqadd.s16 \o2, d17, d30 // out2
+ vqsub.s16 d26, d17, d30 // t14a
+ vqneg.s16 \o13,\o13 // out13
+
+ vqadd.s16 \o1, d19, d27 // out1
+ vqsub.s16 d27, d19, d27 // t10
+ vqadd.s16 \o14,d28, d20 // out14
+ vqsub.s16 d20, d28, d20 // t11
+ vqneg.s16 \o1, \o1 // out1
+
+ vqadd.s16 \o3, d22, d24 // out3
+ vqsub.s16 d22, d22, d24 // t6
+ vqadd.s16 \o12,d25, d23 // out12
+ vqsub.s16 d23, d25, d23 // t7
+ vqneg.s16 \o3, \o3 // out3
+
+ vmull_vmlsl q12, d2, d21, d0[0], d0[0] // -> out8 (d24 or d23)
+ vmull_vmlal q2, d2, d21, d0[0], d0[0] // -> out7 (d23 or d24)
+ vmull_vmlal q3, d26, d3, d0[0], d0[0] // -> out5 (d21 or d26)
+
+ vqrshrn.s32 d24, q12, #12 // out8
+ vqrshrn.s32 d4, q2, #12 // out7
+ vqrshrn.s32 d5, q3, #12 // out5
+ vmull_vmlsl q4, d26, d3, d0[0], d0[0] // -> out10 (d26 or d21)
+ vmull_vmlal q1, d22, d23, d0[0], d0[0] // -> out4 (d20 or d27)
+ vqrshrn.s32 d26, q4, #12 // out10
+
+ vmull_vmlsl q4, d22, d23, d0[0], d0[0] // -> out11 (d27 or d20)
+ vmull_vmlal q11, d27, d20, d0[0], d0[0] // -> out6 (d22 or d25)
+ vmull_vmlsl q3, d27, d20, d0[0], d0[0] // -> out9 (d25 or d22)
+
+ vqrshrn.s32 \o4, q1, #12 // out4
+ vqrshrn.s32 d7, q3, #12 // out9
+ vqrshrn.s32 d6, q4, #12 // out11
+ vqrshrn.s32 \o6, q11, #12 // out6
+
+.ifc \o8, d23
+ vmov \o8, d24
+ vmov \o10,d26
+.endif
+
+ vqneg.s16 \o7, d4 // out7
+ vqneg.s16 \o5, d5 // out5
+ vqneg.s16 \o11,d6 // out11
+ vqneg.s16 \o9, d7 // out9
+.endm
+
+function inv_adst_4h_x16_neon, export=1
+ iadst_16 d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ bx lr
+endfunc
+
+function inv_flipadst_4h_x16_neon, export=1
+ iadst_16 d31, d30, d29, d28, d27, d26, d25, d24, d23, d22, d21, d20, d19, d18, d17, d16
+ bx lr
+endfunc
+
+function inv_identity_4h_x16_neon, export=1
+ movw r12, #2*(5793-4096)*8
+ vdup.16 d0, r12
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vqrdmulh.s16 q1, \i, d0[0]
+ vqadd.s16 \i, \i, \i
+ vqadd.s16 \i, \i, q1
+.endr
+ bx lr
+endfunc
+
+.macro identity_4x16_shift2 c
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vqrdmulh.s16 q2, \i, \c
+ vshr.s16 q2, q2, #1
+ vrhadd.s16 \i, \i, q2
+.endr
+.endm
+
+.macro identity_4x16_shift1 c
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vqrdmulh.s16 q2, \i, \c
+ vrshr.s16 q2, q2, #1
+ vqadd.s16 \i, \i, q2
+.endr
+.endm
+
+.macro identity_8x8_shift1 c
+ identity_4x16_shift1 \c
+.endm
+
+.macro identity_8x8 c
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vqrdmulh.s16 q2, \i, \c
+ vqadd.s16 \i, \i, \i
+ vqadd.s16 \i, \i, q2
+.endr
+.endm
+
+.macro def_horz_16 scale=0, identity=0, shift=2, suffix
+function inv_txfm_horz\suffix\()_16x4_neon
+ push {lr}
+ vmov.i16 d7, #0
+.if \identity
+ movw r12, #2*(5793-4096)*8
+ vdup.16 d0, r12
+.endif
+.if \scale
+ movw r12, #2896*8
+ vdup.16 d1, r12
+.endif
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64]
+ vst1.16 {d7}, [r7, :64], r8
+.endr
+.if \scale
+ scale_input d1[0], q8, q9, q10, q11, q12, q13, q14, q15
+.endif
+.if \identity
+.if \shift == -2
+ identity_4x16_shift2 d0[0]
+.else
+ identity_4x16_shift1 d0[0]
+.endif
+.else
+ blx r4
+.endif
+.if \shift > 0
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vrshr.s16 \i, \i, #\shift
+.endr
+.endif
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+ transpose_4x4h q14, q15, d28, d29, d30, d31
+
+.irp i, d16, d20, d24, d28, d17, d21, d25, d29, d18, d22, d26, d30, d19, d23, d27, d31
+ vst1.16 {\i}, [r6, :64]!
+.endr
+
+ pop {pc}
+endfunc
+.endm
+
+def_horz_16 scale=0, identity=0, shift=2
+def_horz_16 scale=1, identity=0, shift=1, suffix=_scale
+def_horz_16 scale=0, identity=1, shift=-2, suffix=_identity
+def_horz_16 scale=1, identity=1, shift=-1, suffix=_scale_identity
+
+function inv_txfm_add_vert_4x16_neon
+ push {lr}
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64], r8
+.endr
+ blx r5
+ load_add_store_4x16 r6, r7
+ pop {pc}
+endfunc
+
+function inv_txfm_add_16x16_neon
+ sub_sp_align 512
+ ldrh r11, [r10], #2
+.irp i, 0, 4, 8, 12
+ add r6, sp, #(\i*16*2)
+.if \i > 0
+ mov r8, #(16 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 12
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #16*2
+ blx r9
+.endr
+ b 3f
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #4
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+3:
+.irp i, 0, 4, 8, 12
+ add r6, r0, #(\i)
+ add r7, sp, #(\i*2)
+ mov r8, #32
+ bl inv_txfm_add_vert_4x16_neon
+.endr
+
+ add_sp_align 512
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
+
+const eob_16x16
+ .short 10, 36, 78, 256
+endconst
+
+const eob_16x16_identity
+ .short 4, 8, 12, 256
+endconst
+
+.macro def_fn_16x16 txfm1, txfm2
+function inv_txfm_add_\txfm1\()_\txfm2\()_16x16_8bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc 16, 16, 2
+.endif
+ push {r4-r11,lr}
+ vpush {q4}
+.ifc \txfm1, identity
+ movrel_local r9, inv_txfm_horz_identity_16x4_neon
+.else
+ movrel_local r9, inv_txfm_horz_16x4_neon
+ movrel_local r4, inv_\txfm1\()_4h_x16_neon
+.endif
+ movrel_local r5, inv_\txfm2\()_4h_x16_neon
+.ifc \txfm1, identity
+.ifc \txfm2, identity
+ movrel_local r10, eob_16x16
+.else
+ movrel_local r10, eob_16x16_identity
+.endif
+.else
+.ifc \txfm2, identity
+ movrel_local r10, eob_16x16_identity
+.else
+ movrel_local r10, eob_16x16
+.endif
+.endif
+ b inv_txfm_add_16x16_neon
+endfunc
+.endm
+
+def_fn_16x16 dct, dct
+def_fn_16x16 identity, identity
+def_fn_16x16 dct, adst
+def_fn_16x16 dct, flipadst
+def_fn_16x16 dct, identity
+def_fn_16x16 adst, dct
+def_fn_16x16 adst, adst
+def_fn_16x16 adst, flipadst
+def_fn_16x16 flipadst, dct
+def_fn_16x16 flipadst, adst
+def_fn_16x16 flipadst, flipadst
+def_fn_16x16 identity, dct
+
+.macro def_fn_416_base variant
+function inv_txfm_\variant\()add_16x4_neon
+
+.ifc \variant, identity_
+ vmov.i16 d4, #0
+.irp i, d16, d18, d20, d22
+ vld1.16 {\i}, [r2, :64]
+ vst1.16 {d4}, [r2, :64]!
+.endr
+.irp i, d17, d19, d21, d23
+ vld1.16 {\i}, [r2, :64]
+ vst1.16 {d4}, [r2, :64]!
+.endr
+ movw r12, #2*(5793-4096)*8
+ vdup.16 d0, r12
+.irp i, d24, d26, d28, d30
+ vld1.16 {\i}, [r2, :64]
+ vst1.16 {d4}, [r2, :64]!
+.endr
+.irp i, d25, d27, d29, d31
+ vld1.16 {\i}, [r2, :64]
+ vst1.16 {d4}, [r2, :64]!
+.endr
+
+ identity_4x16_shift1 d0[0]
+.else
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+ vld1.16 {d16, d17, d18, d19}, [r2, :128]
+ vst1.16 {q2, q3}, [r2, :128]!
+ vld1.16 {d20, d21, d22, d23}, [r2, :128]
+ vst1.16 {q2, q3}, [r2, :128]!
+ vld1.16 {d24, d25, d26, d27}, [r2, :128]
+ vst1.16 {q2, q3}, [r2, :128]!
+ vld1.16 {d28, d29, d30, d31}, [r2, :128]
+ vst1.16 {q2, q3}, [r2, :128]!
+
+ blx r4
+
+ vswp d17, d20
+ vswp d19, d22
+ vswp d18, d20
+ vswp d19, d21
+.irp i, q8, q9, q10, q11
+ vrshr.s16 \i, \i, #1
+.endr
+.endif
+ transpose_4x8h q8, q9, q10, q11
+ blx r5
+ mov r6, r0
+ load_add_store_8x4 r6, r7
+
+.ifc \variant, identity_
+ vmov q8, q12
+ vmov q9, q13
+ vmov q10, q14
+ vmov q11, q15
+.else
+ vswp d25, d28
+ vswp d27, d30
+ vswp d26, d28
+ vswp d27, d29
+ vrshr.s16 q8, q12, #1
+ vrshr.s16 q9, q13, #1
+ vrshr.s16 q10, q14, #1
+ vrshr.s16 q11, q15, #1
+.endif
+ transpose_4x8h q8, q9, q10, q11
+ blx r5
+ add r6, r0, #8
+ load_add_store_8x4 r6, r7
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_\variant\()add_4x16_neon
+ vmov.i16 q2, #0
+
+ mov r11, #32
+ cmp r3, r10
+ blt 1f
+
+ add r6, r2, #16
+.ifc \variant, identity_
+.irp i, q12, q13, q14, q15
+ vld1.16 {\i}, [r6, :128]
+ vst1.16 {q2}, [r6, :128], r11
+.endr
+ movw r12, #(5793-4096)*8
+ vdup.16 d0, r12
+ identity_8x4_shift1 q12, q13, q14, q15, d0[0]
+.else
+.irp i, q8, q9, q10, q11
+ vld1.16 {\i}, [r6, :128]
+ vst1.16 {q2}, [r6, :128], r11
+.endr
+ blx r4
+ vrshr.s16 q12, q8, #1
+ vrshr.s16 q13, q9, #1
+ vrshr.s16 q14, q10, #1
+ vrshr.s16 q15, q11, #1
+.endif
+ transpose_4x8h q12, q13, q14, q15
+ vswp d27, d29
+ vswp d26, d28
+ vswp d27, d30
+ vswp d25, d28
+
+ b 2f
+1:
+.irp i, q12, q13, q14, q15
+ vmov.i16 \i, #0
+.endr
+2:
+ vmov.i16 q2, #0
+.irp i, q8, q9, q10, q11
+ vld1.16 {\i}, [r2, :128]
+ vst1.16 {q2}, [r2, :128], r11
+.endr
+.ifc \variant, identity_
+ movw r12, #(5793-4096)*8
+ vdup.16 d0, r12
+ identity_8x4_shift1 q8, q9, q10, q11, d0[0]
+.else
+ blx r4
+.irp i, q8, q9, q10, q11
+ vrshr.s16 \i, \i, #1
+.endr
+.endif
+ transpose_4x8h q8, q9, q10, q11
+ vswp d19, d21
+ vswp d18, d20
+ vswp d19, d22
+ vswp d17, d20
+
+ blx r5
+
+ load_add_store_4x16 r0, r6
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+def_fn_416_base
+def_fn_416_base identity_
+
+.macro def_fn_416 w, h, txfm1, txfm2, eob_half
+function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc \w, \h, 1
+.endif
+ push {r4-r11,lr}
+ vpush {q4-q7}
+.if \w == 4
+ movrel_local r4, inv_\txfm1\()_8h_x\w\()_neon
+ movrel_local r5, inv_\txfm2\()_4h_x\h\()_neon
+ mov r10, #\eob_half
+.else
+ movrel_local r4, inv_\txfm1\()_4h_x\w\()_neon
+ movrel_local r5, inv_\txfm2\()_8h_x\h\()_neon
+.endif
+.ifc \txfm1, identity
+ b inv_txfm_identity_add_\w\()x\h\()_neon
+.else
+ b inv_txfm_add_\w\()x\h\()_neon
+.endif
+endfunc
+.endm
+
+.macro def_fns_416 w, h
+def_fn_416 \w, \h, dct, dct, 29
+def_fn_416 \w, \h, identity, identity, 29
+def_fn_416 \w, \h, dct, adst, 29
+def_fn_416 \w, \h, dct, flipadst, 29
+def_fn_416 \w, \h, dct, identity, 8
+def_fn_416 \w, \h, adst, dct, 29
+def_fn_416 \w, \h, adst, adst, 29
+def_fn_416 \w, \h, adst, flipadst, 29
+def_fn_416 \w, \h, flipadst, dct, 29
+def_fn_416 \w, \h, flipadst, adst, 29
+def_fn_416 \w, \h, flipadst, flipadst, 29
+def_fn_416 \w, \h, identity, dct, 32
+def_fn_416 \w, \h, adst, identity, 8
+def_fn_416 \w, \h, flipadst, identity, 8
+def_fn_416 \w, \h, identity, adst, 32
+def_fn_416 \w, \h, identity, flipadst, 32
+.endm
+
+def_fns_416 4, 16
+def_fns_416 16, 4
+
+.macro def_fn_816_base variant
+function inv_txfm_\variant\()add_16x8_neon
+ sub_sp_align 256
+
+.irp i, 0, 4
+ add r6, sp, #(\i*16*2)
+.if \i > 0
+ cmp r3, r10
+ blt 1f
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #8*2
+ blx r9
+.endr
+ b 2f
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+2:
+
+.irp i, 0, 8
+ add r7, sp, #(\i*2)
+ mov r8, #32
+.irp j, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\j}, [r7, :128], r8
+.endr
+ blx r5
+
+ add r6, r0, #(\i)
+ load_add_store_8x8 r6, r7
+.endr
+
+ add_sp_align 256
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_\variant\()add_8x16_neon
+ sub_sp_align 256
+
+.irp i, 0, 8
+ add r6, sp, #(\i*8*2)
+.if \i > 0
+ cmp r3, r10
+ blt 1f
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #16*2
+
+ vmov.i16 q2, #0
+ movw r12, #2896*8
+ vdup.16 d0, r12
+
+.irp j, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\j}, [r7, :128]
+ vst1.16 {q2}, [r7, :128], r8
+.endr
+ scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
+.ifc \variant, identity_
+ // The identity shl #1 and downshift vrshr #1 cancel out
+.else
+ blx r4
+.irp j, q8, q9, q10, q11, q12, q13, q14, q15
+ vrshr.s16 \j, \j, #1
+.endr
+.endif
+ transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
+ vst1.16 {q8, q9}, [r6, :128]!
+ vst1.16 {q10, q11}, [r6, :128]!
+ vst1.16 {q12, q13}, [r6, :128]!
+ vst1.16 {q14, q15}, [r6, :128]!
+.endr
+ b 2f
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+2:
+
+.irp i, 0, 4
+ add r6, r0, #(\i)
+ add r7, sp, #(\i*2)
+ mov r8, #16
+ bl inv_txfm_add_vert_4x16_neon
+.endr
+
+ add_sp_align 256
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+def_fn_816_base
+def_fn_816_base identity_
+
+.macro def_fn_816 w, h, txfm1, txfm2, eob_8x8, eob_4x4
+function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_8bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc \w, \h, 1
+.endif
+ push {r4-r11,lr}
+ vpush {q4-q7}
+.if \w == 8
+ movrel_local r4, inv_\txfm1\()_8h_x8_neon
+ movrel_local r5, inv_\txfm2\()_4h_x16_neon
+.else
+.ifc \txfm1, identity
+ movrel_local r9, inv_txfm_horz_scale_identity_16x4_neon
+.else
+ movrel_local r4, inv_\txfm1\()_4h_x16_neon
+ movrel_local r9, inv_txfm_horz_scale_16x4_neon
+.endif
+ movrel_local r5, inv_\txfm2\()_8h_x8_neon
+.endif
+.if \w == 8
+ mov r10, #\eob_8x8
+.else
+ mov r10, #\eob_4x4
+.endif
+.ifc \txfm1, identity
+ b inv_txfm_identity_add_\w\()x\h\()_neon
+.else
+ b inv_txfm_add_\w\()x\h\()_neon
+.endif
+endfunc
+.endm
+
+.macro def_fns_816 w, h
+def_fn_816 \w, \h, dct, dct, 43, 10
+def_fn_816 \w, \h, identity, identity, 43, 10
+def_fn_816 \w, \h, dct, adst, 43, 10
+def_fn_816 \w, \h, dct, flipadst, 43, 10
+def_fn_816 \w, \h, dct, identity, 8, 4
+def_fn_816 \w, \h, adst, dct, 43, 10
+def_fn_816 \w, \h, adst, adst, 43, 10
+def_fn_816 \w, \h, adst, flipadst, 43, 10
+def_fn_816 \w, \h, flipadst, dct, 43, 10
+def_fn_816 \w, \h, flipadst, adst, 43, 10
+def_fn_816 \w, \h, flipadst, flipadst, 43, 10
+def_fn_816 \w, \h, identity, dct, 64, 4
+def_fn_816 \w, \h, adst, identity, 8, 4
+def_fn_816 \w, \h, flipadst, identity, 8, 4
+def_fn_816 \w, \h, identity, adst, 64, 4
+def_fn_816 \w, \h, identity, flipadst, 64, 4
+.endm
+
+def_fns_816 8, 16
+def_fns_816 16, 8
+
+function inv_dct32_odd_4h_x16_neon, export=1
+ movrel_local r12, idct_coeffs, 2*16
+ vld1.16 {q0, q1}, [r12, :128]
+ sub r12, r12, #2*16
+
+ vmull_vmlsl q2, d16, d31, d0[0], d0[1] // -> t16a
+ vmull_vmlal q3, d16, d31, d0[1], d0[0] // -> t31a
+ vmull_vmlsl q4, d24, d23, d0[2], d0[3] // -> t17a
+ vqrshrn.s32 d16, q2, #12 // t16a
+ vqrshrn.s32 d31, q3, #12 // t31a
+ vmull_vmlal q2, d24, d23, d0[3], d0[2] // -> t30a
+ vmull_vmlsl q3, d20, d27, d1[0], d1[1] // -> t18a
+ vqrshrn.s32 d24, q4, #12 // t17a
+ vqrshrn.s32 d23, q2, #12 // t30a
+ vmull_vmlal q4, d20, d27, d1[1], d1[0] // -> t29a
+ vmull_vmlsl q2, d28, d19, d1[2], d1[3] // -> t19a
+ vqrshrn.s32 d20, q3, #12 // t18a
+ vqrshrn.s32 d27, q4, #12 // t29a
+ vmull_vmlal q3, d28, d19, d1[3], d1[2] // -> t28a
+ vmull_vmlsl q4, d18, d29, d2[0], d2[1] // -> t20a
+ vqrshrn.s32 d28, q2, #12 // t19a
+ vqrshrn.s32 d19, q3, #12 // t28a
+ vmull_vmlal q2, d18, d29, d2[1], d2[0] // -> t27a
+ vmull_vmlsl q3, d26, d21, d2[2], d2[3] // -> t21a
+ vqrshrn.s32 d18, q4, #12 // t20a
+ vqrshrn.s32 d29, q2, #12 // t27a
+ vmull_vmlal q4, d26, d21, d2[3], d2[2] // -> t26a
+ vmull_vmlsl q2, d22, d25, d3[0], d3[1] // -> t22a
+ vqrshrn.s32 d26, q3, #12 // t21a
+ vqrshrn.s32 d21, q4, #12 // t26a
+ vmull_vmlal q3, d22, d25, d3[1], d3[0] // -> t25a
+ vmull_vmlsl q4, d30, d17, d3[2], d3[3] // -> t23a
+ vqrshrn.s32 d22, q2, #12 // t22a
+ vqrshrn.s32 d25, q3, #12 // t25a
+ vmull_vmlal q2, d30, d17, d3[3], d3[2] // -> t24a
+ vqrshrn.s32 d30, q4, #12 // t23a
+ vqrshrn.s32 d17, q2, #12 // t24a
+
+ vld1.16 {q0}, [r12, :128]
+
+ vqsub.s16 d2, d16, d24 // t17
+ vqadd.s16 d16, d16, d24 // t16
+ vqsub.s16 d3, d31, d23 // t30
+ vqadd.s16 d31, d31, d23 // t31
+ vqsub.s16 d24, d28, d20 // t18
+ vqadd.s16 d28, d28, d20 // t19
+ vqadd.s16 d23, d18, d26 // t20
+ vqsub.s16 d18, d18, d26 // t21
+ vqsub.s16 d20, d30, d22 // t22
+ vqadd.s16 d30, d30, d22 // t23
+ vqadd.s16 d26, d17, d25 // t24
+ vqsub.s16 d17, d17, d25 // t25
+ vqsub.s16 d22, d29, d21 // t26
+ vqadd.s16 d29, d29, d21 // t27
+ vqadd.s16 d25, d19, d27 // t28
+ vqsub.s16 d19, d19, d27 // t29
+
+ vmull_vmlsl q2, d3, d2, d1[0], d1[1] // -> t17a
+ vmull_vmlal q3, d3, d2, d1[1], d1[0] // -> t30a
+ vmull_vmlal q4, d19, d24, d1[1], d1[0] // -> t18a
+ vqrshrn.s32 d21, q2, #12 // t17a
+ vqrshrn.s32 d27, q3, #12 // t30a
+ vneg.s32 q4, q4 // -> t18a
+ vmull_vmlsl q1, d19, d24, d1[0], d1[1] // -> t29a
+ vmull_vmlsl q2, d22, d18, d1[2], d1[3] // -> t21a
+ vqrshrn.s32 d19, q4, #12 // t18a
+ vqrshrn.s32 d24, q1, #12 // t29a
+ vmull_vmlal q3, d22, d18, d1[3], d1[2] // -> t26a
+ vmull_vmlal q4, d17, d20, d1[3], d1[2] // -> t22a
+ vqrshrn.s32 d22, q2, #12 // t21a
+ vqrshrn.s32 d18, q3, #12 // t26a
+ vneg.s32 q4, q4 // -> t22a
+ vmull_vmlsl q1, d17, d20, d1[2], d1[3] // -> t25a
+ vqrshrn.s32 d17, q4, #12 // t22a
+ vqrshrn.s32 d20, q1, #12 // t25a
+
+ vqsub.s16 d2, d27, d24 // t29
+ vqadd.s16 d27, d27, d24 // t30
+ vqsub.s16 d3, d21, d19 // t18
+ vqadd.s16 d21, d21, d19 // t17
+ vqsub.s16 d24, d16, d28 // t19a
+ vqadd.s16 d16, d16, d28 // t16a
+ vqsub.s16 d19, d30, d23 // t20a
+ vqadd.s16 d30, d30, d23 // t23a
+ vqsub.s16 d28, d17, d22 // t21
+ vqadd.s16 d17, d17, d22 // t22
+ vqadd.s16 d23, d26, d29 // t24a
+ vqsub.s16 d26, d26, d29 // t27a
+ vqadd.s16 d22, d20, d18 // t25
+ vqsub.s16 d20, d20, d18 // t26
+ vqsub.s16 d29, d31, d25 // t28a
+ vqadd.s16 d31, d31, d25 // t31a
+
+ vmull_vmlsl q2, d2, d3, d0[2], d0[3] // -> t18a
+ vmull_vmlal q3, d2, d3, d0[3], d0[2] // -> t29a
+ vmull_vmlsl q4, d29, d24, d0[2], d0[3] // -> t19
+ vqrshrn.s32 d18, q2, #12 // t18a
+ vqrshrn.s32 d25, q3, #12 // t29a
+ vmull_vmlal q1, d29, d24, d0[3], d0[2] // -> t28
+ vmull_vmlal q2, d26, d19, d0[3], d0[2] // -> t20
+ vqrshrn.s32 d29, q4, #12 // t19
+ vqrshrn.s32 d24, q1, #12 // t28
+ vneg.s32 q2, q2 // -> t20
+ vmull_vmlsl q3, d26, d19, d0[2], d0[3] // -> t27
+ vmull_vmlal q4, d20, d28, d0[3], d0[2] // -> t21a
+ vqrshrn.s32 d26, q2, #12 // t20
+ vqrshrn.s32 d19, q3, #12 // t27
+ vneg.s32 q4, q4 // -> t21a
+ vmull_vmlsl q1, d20, d28, d0[2], d0[3] // -> t26a
+ vqrshrn.s32 d20, q4, #12 // t21a
+ vqrshrn.s32 d28, q1, #12 // t26a
+
+ vqsub.s16 d2, d16, d30 // t23
+ vqadd.s16 d16, d16, d30 // t16 = out16
+ vqsub.s16 d3, d31, d23 // t24
+ vqadd.s16 d31, d31, d23 // t31 = out31
+ vqsub.s16 d23, d21, d17 // t22a
+ vqadd.s16 d17, d21, d17 // t17a = out17
+ vqadd.s16 d30, d27, d22 // t30a = out30
+ vqsub.s16 d21, d27, d22 // t25a
+ vqsub.s16 d27, d18, d20 // t21
+ vqadd.s16 d18, d18, d20 // t18 = out18
+ vqadd.s16 d4, d29, d26 // t19a = out19
+ vqsub.s16 d26, d29, d26 // t20a
+ vqadd.s16 d29, d25, d28 // t29 = out29
+ vqsub.s16 d25, d25, d28 // t26
+ vqadd.s16 d28, d24, d19 // t28a = out28
+ vqsub.s16 d24, d24, d19 // t27a
+ vmov d19, d4 // out19
+
+ vmull_vmlsl q2, d24, d26, d0[0], d0[0] // -> t20
+ vmull_vmlal q3, d24, d26, d0[0], d0[0] // -> t27
+ vqrshrn.s32 d20, q2, #12 // t20
+ vqrshrn.s32 d22, q3, #12 // t27
+
+ vmull_vmlal q2, d25, d27, d0[0], d0[0] // -> t26a
+ vmull_vmlsl q3, d25, d27, d0[0], d0[0] // -> t21a
+ vmov d27, d22 // t27
+ vqrshrn.s32 d26, q2, #12 // t26a
+
+ vmull_vmlsl q12, d21, d23, d0[0], d0[0] // -> t22
+ vmull_vmlal q2, d21, d23, d0[0], d0[0] // -> t25
+ vqrshrn.s32 d21, q3, #12 // t21a
+ vqrshrn.s32 d22, q12, #12 // t22
+ vqrshrn.s32 d25, q2, #12 // t25
+
+ vmull_vmlsl q2, d3, d2, d0[0], d0[0] // -> t23a
+ vmull_vmlal q3, d3, d2, d0[0], d0[0] // -> t24a
+ vqrshrn.s32 d23, q2, #12 // t23a
+ vqrshrn.s32 d24, q3, #12 // t24a
+
+ bx lr
+endfunc
+
+.macro def_horz_32 scale=0, shift=2, suffix
+function inv_txfm_horz\suffix\()_dct_32x4_neon
+ push {lr}
+ vmov.i16 d7, #0
+ lsl r8, r8, #1
+.if \scale
+ movw r12, #2896*8
+ vdup.16 d0, r12
+.endif
+
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64]
+ vst1.16 {d7}, [r7, :64], r8
+.endr
+ sub r7, r7, r8, lsl #4
+ add r7, r7, r8, lsr #1
+.if \scale
+ scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
+.endif
+ bl inv_dct_4h_x16_neon
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+ transpose_4x4h q14, q15, d28, d29, d30, d31
+
+.macro store1 r0, r1, r2, r3
+ vst1.16 {\r0}, [r6, :64]!
+ vst1.16 {\r1}, [r6, :64]!
+ vst1.16 {\r2}, [r6, :64]!
+ vst1.16 {\r3}, [r6, :64]!
+ add r6, r6, #32
+.endm
+ store1 d16, d20, d24, d28
+ store1 d17, d21, d25, d29
+ store1 d18, d22, d26, d30
+ store1 d19, d23, d27, d31
+.purgem store1
+ sub r6, r6, #64*4
+
+ vmov.i16 d7, #0
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64]
+ vst1.16 {d7}, [r7, :64], r8
+.endr
+.if \scale
+ // This relies on the fact that the idct also leaves the right coeff in d0[1]
+ scale_input d0[1], q8, q9, q10, q11, q12, q13, q14, q15
+.endif
+ bl inv_dct32_odd_4h_x16_neon
+ transpose_4x4h q15, q14, d31, d30, d29, d28
+ transpose_4x4h q13, q12, d27, d26, d25, d24
+ transpose_4x4h q11, q10, d23, d22, d21, d20
+ transpose_4x4h q9, q8, d19, d18, d17, d16
+.macro store2 r0, r1, r2, r3, shift
+ vld1.16 {q0, q1}, [r6, :128]
+ vqsub.s16 d7, d0, \r0
+ vqadd.s16 d0, d0, \r0
+ vqsub.s16 d6, d1, \r1
+ vqadd.s16 d1, d1, \r1
+ vqsub.s16 d5, d2, \r2
+ vqadd.s16 d2, d2, \r2
+ vqsub.s16 d4, d3, \r3
+ vqadd.s16 d3, d3, \r3
+ vrev64.16 q2, q2
+ vrev64.16 q3, q3
+ vrshr.s16 q0, q0, #\shift
+ vrshr.s16 q1, q1, #\shift
+ vrshr.s16 q2, q2, #\shift
+ vrshr.s16 q3, q3, #\shift
+ vst1.16 {q0, q1}, [r6, :128]!
+ vst1.16 {q2, q3}, [r6, :128]!
+.endm
+
+ store2 d31, d27, d23, d19, \shift
+ store2 d30, d26, d22, d18, \shift
+ store2 d29, d25, d21, d17, \shift
+ store2 d28, d24, d20, d16, \shift
+.purgem store2
+ pop {pc}
+endfunc
+.endm
+
+def_horz_32 scale=0, shift=2
+def_horz_32 scale=1, shift=1, suffix=_scale
+
+function inv_txfm_add_vert_dct_4x32_neon
+ push {r10-r11,lr}
+ lsl r8, r8, #1
+
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64], r8
+.endr
+ sub r7, r7, r8, lsl #4
+
+ bl inv_dct_4h_x16_neon
+
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vst1.16 {\i}, [r7, :64], r8
+.endr
+ sub r7, r7, r8, lsl #4
+ add r7, r7, r8, lsr #1
+
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64], r8
+.endr
+ sub r7, r7, r8, lsl #4
+ sub r7, r7, r8, lsr #1
+ bl inv_dct32_odd_4h_x16_neon
+
+ neg r9, r8
+ mov r10, r6
+.macro combine r0, r1, r2, r3, op, stride
+ vld1.16 {d4}, [r7, :64], \stride
+ vld1.32 {d2[0]}, [r10, :32], r1
+ vld1.16 {d5}, [r7, :64], \stride
+ vld1.32 {d2[1]}, [r10, :32], r1
+ \op\().s16 d4, d4, \r0
+ vld1.16 {d6}, [r7, :64], \stride
+ vld1.32 {d3[0]}, [r10, :32], r1
+ \op\().s16 d5, d5, \r1
+ vld1.32 {d3[1]}, [r10, :32], r1
+ vrshr.s16 q2, q2, #4
+ \op\().s16 d6, d6, \r2
+ vld1.16 {d7}, [r7, :64], \stride
+ vaddw.u8 q2, q2, d2
+ \op\().s16 d7, d7, \r3
+ vqmovun.s16 d2, q2
+ vrshr.s16 q3, q3, #4
+ vst1.32 {d2[0]}, [r6, :32], r1
+ vaddw.u8 q3, q3, d3
+ vst1.32 {d2[1]}, [r6, :32], r1
+ vqmovun.s16 d3, q3
+ vst1.32 {d3[0]}, [r6, :32], r1
+ vst1.32 {d3[1]}, [r6, :32], r1
+.endm
+ combine d31, d30, d29, d28, vqadd, r8
+ combine d27, d26, d25, d24, vqadd, r8
+ combine d23, d22, d21, d20, vqadd, r8
+ combine d19, d18, d17, d16, vqadd, r8
+ sub r7, r7, r8
+ combine d16, d17, d18, d19, vqsub, r9
+ combine d20, d21, d22, d23, vqsub, r9
+ combine d24, d25, d26, d27, vqsub, r9
+ combine d28, d29, d30, d31, vqsub, r9
+.purgem combine
+
+ pop {r10-r11,pc}
+endfunc
+
+const eob_32x32
+ .short 10, 36, 78, 136, 210, 300, 406, 1024
+endconst
+
+const eob_16x32
+ .short 10, 36, 78, 151, 215, 279, 343, 512
+endconst
+
+const eob_16x32_shortside
+ .short 10, 36, 78, 512
+endconst
+
+const eob_8x32
+ // Contrary to the others, this one is only ever used in increments of 8x8
+ .short 43, 107, 171, 256
+endconst
+
+function inv_txfm_add_identity_identity_32x32_8bpc_neon, export=1
+ push {r4-r7,lr}
+ vmov.i16 q0, #0
+ movrel_local r5, eob_32x32, 2
+
+ mov r6, #2*32
+1:
+ mov r12, #0
+ movrel_local r4, eob_32x32, 2
+2:
+ add r12, r12, #8
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\i}, [r2, :128]
+ vst1.16 {q0}, [r2, :128], r6
+.endr
+ transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
+
+ load_add_store_8x8 r0, r7, shiftbits=2
+ ldrh lr, [r4], #4
+ sub r0, r0, r1, lsl #3
+ cmp r3, lr
+ add r0, r0, #8
+ bge 2b
+
+ ldrh lr, [r5], #4
+ cmp r3, lr
+ blt 9f
+
+ sub r0, r0, r12
+ add r0, r0, r1, lsl #3
+ mls r2, r6, r12, r2
+ add r2, r2, #2*8
+ b 1b
+9:
+ pop {r4-r7,pc}
+endfunc
+
+.macro shift_8_regs op, shift
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ \op \i, \i, #\shift
+.endr
+.endm
+
+.macro def_identity_1632 w, h, wshort, hshort
+function inv_txfm_add_identity_identity_\w\()x\h\()_8bpc_neon, export=1
+ push {r4-r7,lr}
+ movw r6, #2896*8
+ movw r7, #2*(5793-4096)*8
+ vdup.i16 d0, r6
+ movrel_local r5, eob_16x32\hshort, 2
+ vmov.16 d0[1], r7
+
+ mov r6, #2*\h
+1:
+ mov r12, #0
+ movrel_local r4, eob_16x32\wshort, 2
+2:
+ vmov.i16 q1, #0
+ add r12, r12, #8
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\i}, [r2, :128]
+ vst1.16 {q1}, [r2, :128], r6
+.endr
+ scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
+
+.if \w == 16
+ // 16x32
+ identity_8x8_shift1 d0[1]
+.else
+ // 32x16
+ shift_8_regs vqshl.s16, 1
+ identity_8x8 d0[1]
+.endif
+
+ transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
+
+.if \w == 16
+ load_add_store_8x8 r0, r7, shiftbits=2
+.else
+ load_add_store_8x8 r0, r7, shiftbits=4
+.endif
+ ldrh lr, [r4], #4
+ sub r0, r0, r1, lsl #3
+ cmp r3, lr
+ add r0, r0, #8
+ bge 2b
+
+ ldrh lr, [r5], #4
+ cmp r3, lr
+ blt 9f
+
+ sub r0, r0, r12
+ add r0, r0, r1, lsl #3
+ mls r2, r6, r12, r2
+ add r2, r2, #2*8
+ b 1b
+9:
+ pop {r4-r7,pc}
+endfunc
+.endm
+
+def_identity_1632 16, 32, _shortside,
+def_identity_1632 32, 16, , _shortside
+
+.macro def_identity_832 w, h
+function inv_txfm_add_identity_identity_\w\()x\h\()_8bpc_neon, export=1
+ push {r4-r5,lr}
+ vmov.i16 q0, #0
+ movrel_local r4, eob_8x32
+
+ mov r12, #2*\h
+1:
+ ldrh lr, [r4], #2
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\i}, [r2, :128]
+ vst1.16 {q0}, [r2, :128], r12
+.endr
+
+.if \w == 8
+ // 8x32
+ shift_8_regs vrshr.s16, 1
+.endif
+
+ transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
+
+ cmp r3, lr
+.if \w == 8
+ load_add_store_8x8 r0, r5, shiftbits=2
+.else
+ load_add_store_8x8 r0, r5, shiftbits=3
+.endif
+
+ blt 9f
+.if \w == 8
+ sub r2, r2, r12, lsl #3
+ add r2, r2, #2*8
+.else
+ sub r0, r0, r1, lsl #3
+ add r0, r0, #8
+.endif
+ b 1b
+
+9:
+ pop {r4-r5,pc}
+endfunc
+.endm
+
+def_identity_832 8, 32
+def_identity_832 32, 8
+
+function inv_txfm_add_dct_dct_32x32_8bpc_neon, export=1
+ idct_dc 32, 32, 2
+
+ push {r4-r11,lr}
+ vpush {q4}
+ sub_sp_align 2048
+ movrel_local r10, eob_32x32
+ ldrh r11, [r10], #2
+
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, sp, #(\i*32*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 28
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_horz_dct_32x4_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, r0, #(\i)
+ add r7, sp, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_add_vert_dct_4x32_neon
+.endr
+
+ add_sp_align 2048
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_16x32_8bpc_neon, export=1
+ idct_dc 16, 32, 1
+
+ push {r4-r11,lr}
+ vpush {q4}
+ sub_sp_align 1024
+ movrel_local r10, eob_16x32
+ ldrh r11, [r10], #2
+ movrel_local r4, inv_dct_4h_x16_neon
+
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, sp, #(\i*16*2)
+ add r7, r2, #(\i*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 28
+ ldrh r11, [r10], #2
+.endif
+.endif
+ mov r8, #2*32
+ bl inv_txfm_horz_scale_16x4_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #4
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12
+ add r6, r0, #(\i)
+ add r7, sp, #(\i*2)
+ mov r8, #16*2
+ bl inv_txfm_add_vert_dct_4x32_neon
+.endr
+
+ add_sp_align 1024
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_32x16_8bpc_neon, export=1
+ idct_dc 32, 16, 1
+
+ push {r4-r11,lr}
+ vpush {q4}
+ sub_sp_align 1024
+ movrel_local r10, eob_16x32
+ ldrh r11, [r10], #2
+ movrel_local r5, inv_dct_4h_x16_neon
+
+.irp i, 0, 4, 8, 12
+ add r6, sp, #(\i*32*2)
+ add r7, r2, #(\i*2)
+.if \i > 0
+ mov r8, #(16 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 12
+ ldrh r11, [r10], #2
+.endif
+.endif
+ mov r8, #2*16
+ bl inv_txfm_horz_scale_dct_32x4_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, r0, #(\i)
+ add r7, sp, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_add_vert_4x16_neon
+.endr
+
+ add_sp_align 1024
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_8x32_8bpc_neon, export=1
+ idct_dc 8, 32, 2
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ sub_sp_align 512
+
+ movrel_local r10, eob_8x32
+
+ mov r8, #2*32
+ mov r9, #32
+ mov r6, sp
+1:
+ vmov.i16 q0, #0
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\i}, [r2, :128]
+ vst1.16 {q0}, [r2, :128], r8
+.endr
+ ldrh r11, [r10], #2
+ sub r2, r2, r8, lsl #3
+ sub r9, r9, #8
+ add r2, r2, #2*8
+
+ bl inv_dct_8h_x8_neon
+
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vrshr.s16 \i, \i, #2
+.endr
+
+ transpose_8x8h q8, q9, q10, q11, q12, q13, q14, q15, d17, d19, d21, d23, d24, d26, d28, d30
+
+ vst1.16 {q8, q9}, [r6, :128]!
+ cmp r3, r11
+ vst1.16 {q10, q11}, [r6, :128]!
+ vst1.16 {q12, q13}, [r6, :128]!
+ vst1.16 {q14, q15}, [r6, :128]!
+
+ bge 1b
+ cmp r9, #0
+ beq 3f
+
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r9, r9, #8
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4
+ add r6, r0, #(\i)
+ add r7, sp, #(\i*2)
+ mov r8, #8*2
+ bl inv_txfm_add_vert_dct_4x32_neon
+.endr
+
+ add_sp_align 512
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_32x8_8bpc_neon, export=1
+ idct_dc 32, 8, 2
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ sub_sp_align 512
+
+.irp i, 0, 4
+ add r6, sp, #(\i*32*2)
+ add r7, r2, #(\i*2)
+.if \i > 0
+ cmp r3, #10
+ blt 1f
+.endif
+ mov r8, #8*2
+ bl inv_txfm_horz_dct_32x4_neon
+.endr
+ b 2f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+.rept 8
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+
+2:
+ mov r8, #2*32
+ mov r9, #0
+1:
+ add r6, r0, r9
+ add r7, sp, r9, lsl #1 // #(\i*2)
+
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\i}, [r7, :128], r8
+.endr
+ add r9, r9, #8
+
+ bl inv_dct_8h_x8_neon
+
+ cmp r9, #32
+
+ load_add_store_8x8 r6, r7
+
+ blt 1b
+
+ add_sp_align 512
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_dct64_step1_neon
+ // in1/31/17/15 -> t32a/33/34a/35/60/61a/62/63a
+ // in7/25/23/ 9 -> t56a/57/58a/59/36/37a/38/39a
+ // in5/27/21/11 -> t40a/41/42a/43/52/53a/54/55a
+ // in3/29/19/13 -> t48a/49/50a/51/44/45a/46/47a
+
+ vld1.16 {d0, d1, d2}, [r12, :64]!
+
+ vqrdmulh.s16 d23, d16, d0[1] // t63a
+ vqrdmulh.s16 d16, d16, d0[0] // t32a
+ vqrdmulh.s16 d22, d17, d0[2] // t62a
+ vqrdmulh.s16 d17, d17, d0[3] // t33a
+ vqrdmulh.s16 d21, d18, d1[1] // t61a
+ vqrdmulh.s16 d18, d18, d1[0] // t34a
+ vqrdmulh.s16 d20, d19, d1[2] // t60a
+ vqrdmulh.s16 d19, d19, d1[3] // t35a
+
+ vqadd.s16 d24, d16, d17 // t32
+ vqsub.s16 d25, d16, d17 // t33
+ vqsub.s16 d26, d19, d18 // t34
+ vqadd.s16 d27, d19, d18 // t35
+ vqadd.s16 d28, d20, d21 // t60
+ vqsub.s16 d29, d20, d21 // t61
+ vqsub.s16 d30, d23, d22 // t62
+ vqadd.s16 d31, d23, d22 // t63
+
+ vmull_vmlal q2, d29, d26, d2[0], d2[1] // -> t34a
+ vmull_vmlsl q3, d29, d26, d2[1], d2[0] // -> t61a
+ vneg.s32 q2, q2 // t34a
+ vmull_vmlsl q4, d30, d25, d2[1], d2[0] // -> t33a
+ vqrshrn.s32 d26, q2, #12 // t34a
+ vmull_vmlal q2, d30, d25, d2[0], d2[1] // -> t62a
+ vqrshrn.s32 d29, q3, #12 // t61a
+ vqrshrn.s32 d25, q4, #12 // t33a
+ vqrshrn.s32 d30, q2, #12 // t62a
+
+ vqadd.s16 d16, d24, d27 // t32a
+ vqsub.s16 d19, d24, d27 // t35a
+ vqadd.s16 d17, d25, d26 // t33
+ vqsub.s16 d18, d25, d26 // t34
+ vqsub.s16 d20, d31, d28 // t60a
+ vqadd.s16 d23, d31, d28 // t63a
+ vqsub.s16 d21, d30, d29 // t61
+ vqadd.s16 d22, d30, d29 // t62
+
+ vmull_vmlal q2, d21, d18, d2[2], d2[3] // -> t61a
+ vmull_vmlsl q3, d21, d18, d2[3], d2[2] // -> t34a
+ vmull_vmlal q4, d20, d19, d2[2], d2[3] // -> t60
+ vqrshrn.s32 d21, q2, #12 // t61a
+ vqrshrn.s32 d18, q3, #12 // t34a
+ vmull_vmlsl q2, d20, d19, d2[3], d2[2] // -> t35
+ vqrshrn.s32 d20, q4, #12 // t60
+ vqrshrn.s32 d19, q2, #12 // t35
+
+ vst1.16 {d16, d17, d18, d19}, [r6, :128]!
+ vst1.16 {d20, d21, d22, d23}, [r6, :128]!
+
+ bx lr
+endfunc
+
+function inv_dct64_step2_neon
+ movrel_local r12, idct_coeffs
+ vld1.16 {d0}, [r12, :64]
+1:
+ // t32a/33/34a/35/60/61a/62/63a
+ // t56a/57/58a/59/36/37a/38/39a
+ // t40a/41/42a/43/52/53a/54/55a
+ // t48a/49/50a/51/44/45a/46/47a
+ vldr d16, [r6, #2*4*0] // t32a
+ vldr d17, [r9, #2*4*8] // t39a
+ vldr d18, [r9, #2*4*0] // t63a
+ vldr d19, [r6, #2*4*8] // t56a
+ vldr d20, [r6, #2*4*16] // t40a
+ vldr d21, [r9, #2*4*24] // t47a
+ vldr d22, [r9, #2*4*16] // t55a
+ vldr d23, [r6, #2*4*24] // t48a
+
+ vqadd.s16 d24, d16, d17 // t32
+ vqsub.s16 d25, d16, d17 // t39
+ vqadd.s16 d26, d18, d19 // t63
+ vqsub.s16 d27, d18, d19 // t56
+ vqsub.s16 d28, d21, d20 // t40
+ vqadd.s16 d29, d21, d20 // t47
+ vqadd.s16 d30, d23, d22 // t48
+ vqsub.s16 d31, d23, d22 // t55
+
+ vmull_vmlal q2, d27, d25, d0[3], d0[2] // -> t56a
+ vmull_vmlsl q3, d27, d25, d0[2], d0[3] // -> t39a
+ vmull_vmlal q4, d31, d28, d0[3], d0[2] // -> t40a
+ vqrshrn.s32 d25, q2, #12 // t56a
+ vqrshrn.s32 d27, q3, #12 // t39a
+ vneg.s32 q4, q4 // t40a
+ vmull_vmlsl q2, d31, d28, d0[2], d0[3] // -> t55a
+ vqrshrn.s32 d31, q4, #12 // t40a
+ vqrshrn.s32 d28, q2, #12 // t55a
+
+ vqadd.s16 d16, d24, d29 // t32a
+ vqsub.s16 d19, d24, d29 // t47a
+ vqadd.s16 d17, d27, d31 // t39
+ vqsub.s16 d18, d27, d31 // t40
+ vqsub.s16 d20, d26, d30 // t48a
+ vqadd.s16 d23, d26, d30 // t63a
+ vqsub.s16 d21, d25, d28 // t55
+ vqadd.s16 d22, d25, d28 // t56
+
+ vmull_vmlsl q2, d21, d18, d0[0], d0[0] // -> t40a
+ vmull_vmlal q3, d21, d18, d0[0], d0[0] // -> t55a
+ vmull_vmlsl q4, d20, d19, d0[0], d0[0] // -> t47
+ vqrshrn.s32 d18, q2, #12 // t40a
+ vqrshrn.s32 d21, q3, #12 // t55a
+ vmull_vmlal q2, d20, d19, d0[0], d0[0] // -> t48
+ vqrshrn.s32 d19, q4, #12 // t47
+ vqrshrn.s32 d20, q2, #12 // t48
+
+ vstr d16, [r6, #2*4*0] // t32a
+ vstr d17, [r9, #2*4*0] // t39
+ vstr d18, [r6, #2*4*8] // t40a
+ vstr d19, [r9, #2*4*8] // t47
+ vstr d20, [r6, #2*4*16] // t48
+ vstr d21, [r9, #2*4*16] // t55a
+ vstr d22, [r6, #2*4*24] // t56
+ vstr d23, [r9, #2*4*24] // t63a
+
+ add r6, r6, #2*4
+ sub r9, r9, #2*4
+ cmp r6, r9
+ blt 1b
+ bx lr
+endfunc
+
+.macro load8 src, strd, zero, clear
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23
+.if \clear
+ vld1.16 {\i}, [\src, :64]
+ vst1.16 {\zero}, [\src, :64], \strd
+.else
+ vld1.16 {\i}, [\src, :64], \strd
+.endif
+.endr
+.endm
+
+.macro store16 dst
+ vst1.16 {q8, q9}, [\dst, :128]!
+ vst1.16 {q10, q11}, [\dst, :128]!
+ vst1.16 {q12, q13}, [\dst, :128]!
+ vst1.16 {q14, q15}, [\dst, :128]!
+.endm
+
+.macro clear_upper8
+.irp i, q12, q13, q14, q15
+ vmov.i16 \i, #0
+.endr
+.endm
+
+.macro vmov_if reg, val, cond
+.if \cond
+ vmov.i16 \reg, \val
+.endif
+.endm
+
+.macro movdup_if reg, gpr, val, cond
+.if \cond
+ movw \gpr, \val
+ vdup.16 \reg, \gpr
+.endif
+.endm
+
+.macro vst1_if regs, dst, dstalign, cond
+.if \cond
+ vst1.16 \regs, \dst, \dstalign
+.endif
+.endm
+
+.macro scale_if cond, c, r0, r1, r2, r3, r4, r5, r6, r7
+.if \cond
+ scale_input \c, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
+.endif
+.endm
+
+.macro def_dct64_func suffix, clear=0, scale=0
+function inv_txfm_dct\suffix\()_4h_x64_neon, export=1
+ mov r6, sp
+
+ push {r10-r11,lr}
+
+ lsl r8, r8, #2
+
+ movdup_if d0, r12, #2896*8, \scale
+ vmov_if d7, #0, \clear
+ load8 r7, r8, d7, \clear
+ clear_upper8
+ sub r7, r7, r8, lsl #3
+ add r7, r7, r8, lsr #1
+ scale_if \scale, d0[0], q8, q9, q10, q11
+
+ bl inv_dct_4h_x16_neon
+
+ store16 r6
+
+ movdup_if d0, r12, #2896*8, \scale
+ vmov_if d7, #0, \clear
+ load8 r7, r8, d7, \clear
+ clear_upper8
+ sub r7, r7, r8, lsl #3
+ lsr r8, r8, #1
+ sub r7, r7, r8, lsr #1
+ scale_if \scale, d0[0], q8, q9, q10, q11
+
+ bl inv_dct32_odd_4h_x16_neon
+
+ add r10, r6, #8*15
+ sub r6, r6, #8*16
+
+ mov r9, #-8
+
+.macro store_addsub r0, r1, r2, r3
+ vld1.16 {d2}, [r6, :64]!
+ vld1.16 {d3}, [r6, :64]!
+ vqadd.s16 d6, d2, \r0
+ vqsub.s16 \r0, d2, \r0
+ vld1.16 {d4}, [r6, :64]!
+ vqadd.s16 d7, d3, \r1
+ vqsub.s16 \r1, d3, \r1
+ vld1.16 {d5}, [r6, :64]!
+ vqadd.s16 d2, d4, \r2
+ sub r6, r6, #8*4
+ vqsub.s16 \r2, d4, \r2
+ vst1.16 {d6}, [r6, :64]!
+ vst1.16 {\r0}, [r10, :64], r9
+ vqadd.s16 d3, d5, \r3
+ vqsub.s16 \r3, d5, \r3
+ vst1.16 {d7}, [r6, :64]!
+ vst1.16 {\r1}, [r10, :64], r9
+ vst1.16 {d2}, [r6, :64]!
+ vst1.16 {\r2}, [r10, :64], r9
+ vst1.16 {d3}, [r6, :64]!
+ vst1.16 {\r3}, [r10, :64], r9
+.endm
+ store_addsub d31, d30, d29, d28
+ store_addsub d27, d26, d25, d24
+ store_addsub d23, d22, d21, d20
+ store_addsub d19, d18, d17, d16
+.purgem store_addsub
+
+ add r6, r6, #2*4*16
+
+ movrel_local r12, idct64_coeffs
+ movdup_if d0, lr, #2896*8, \scale
+ vmov_if d7, #0, \clear
+ add r9, r7, r8, lsl #4 // offset 16
+ add r10, r7, r8, lsl #3 // offset 8
+ sub r9, r9, r8 // offset 15
+ sub r11, r10, r8 // offset 7
+ vld1.16 {d16}, [r7, :64] // in1 (offset 0)
+ vld1.16 {d17}, [r9, :64] // in31 (offset 15)
+ vld1.16 {d18}, [r10, :64] // in17 (offset 8)
+ vld1.16 {d19}, [r11, :64] // in15 (offset 7)
+ vst1_if {d7}, [r7, :64], \clear
+ vst1_if {d7}, [r9, :64], \clear
+ vst1_if {d7}, [r10, :64], \clear
+ vst1_if {d7}, [r11, :64], \clear
+ scale_if \scale, d0[0], q8, q9
+ bl inv_dct64_step1_neon
+ movdup_if d0, lr, #2896*8, \scale
+ vmov_if d7, #0, \clear
+ add r7, r7, r8, lsl #2 // offset 4
+ sub r9, r9, r8, lsl #2 // offset 11
+ sub r10, r7, r8 // offset 3
+ add r11, r9, r8 // offset 12
+ vld1.16 {d16}, [r10, :64] // in7 (offset 3)
+ vld1.16 {d17}, [r11, :64] // in25 (offset 12)
+ vld1.16 {d18}, [r9, :64] // in23 (offset 11)
+ vld1.16 {d19}, [r7, :64] // in9 (offset 4)
+ vst1_if {d7}, [r7, :64], \clear
+ vst1_if {d7}, [r9, :64], \clear
+ vst1_if {d7}, [r10, :64], \clear
+ vst1_if {d7}, [r11, :64], \clear
+ scale_if \scale, d0[0], q8, q9
+ bl inv_dct64_step1_neon
+ movdup_if d0, lr, #2896*8, \scale
+ vmov_if d7, #0, \clear
+ sub r10, r10, r8, lsl #1 // offset 1
+ sub r9, r9, r8, lsl #1 // offset 9
+ add r10, r10, r8 // offset 2
+ add r9, r9, r8 // offset 10
+ add r7, r7, r8 // offset 5
+ add r11, r11, r8 // offset 13
+ vld1.16 d16, [r10, :64] // in5 (offset 2)
+ vld1.16 d17, [r11, :64] // in27 (offset 13)
+ vld1.16 d18, [r9, :64] // in21 (offset 10)
+ vld1.16 d19, [r7, :64] // in11 (offset 5)
+ vst1_if d7, [r10, :64], \clear
+ vst1_if d7, [r11, :64], \clear
+ vst1_if d7, [r9, :64], \clear
+ vst1_if d7, [r7, :64], \clear
+ scale_if \scale, d0[0], q8, q9
+ bl inv_dct64_step1_neon
+ movdup_if d0, lr, #2896*8, \scale
+ vmov_if d7, #0, \clear
+ sub r10, r10, r8 // offset 1
+ sub r9, r9, r8 // offset 9
+ add r11, r11, r8 // offset 14
+ add r7, r7, r8 // offset 6
+ vld1.16 d16, [r10, :64] // in3 (offset 1)
+ vld1.16 d17, [r11, :64] // in29 (offset 14)
+ vld1.16 d18, [r9, :64] // in19 (offset 9)
+ vld1.16 d19, [r7, :64] // in13 (offset 6)
+ vst1_if d7, [r10, :64], \clear
+ vst1_if d7, [r11, :64], \clear
+ vst1_if d7, [r9, :64], \clear
+ vst1_if d7, [r7, :64], \clear
+ scale_if \scale, d0[0], q8, q9
+ bl inv_dct64_step1_neon
+
+ sub r6, r6, #2*4*32
+ add r9, r6, #2*4*7
+
+ bl inv_dct64_step2_neon
+
+ pop {r10-r11,pc}
+endfunc
+.endm
+
+def_dct64_func
+def_dct64_func _clear, clear=1
+def_dct64_func _clear_scale, clear=1, scale=1
+
+function inv_txfm_horz_dct_64x4_neon
+ vdup.16 q3, r9
+
+ mov r7, sp
+ add r8, sp, #2*4*(64 - 4)
+ add r9, r6, #2*56
+
+ push {r10-r11,lr}
+
+ mov r10, #2*64
+ mov r11, #-2*4*4
+
+1:
+ vld1.16 {d16, d17, d18, d19}, [r7, :128]!
+ vld1.16 {d28, d29, d30, d31}, [r8, :128], r11
+ vld1.16 {d20, d21, d22, d23}, [r7, :128]!
+ vld1.16 {d24, d25, d26, d27}, [r8, :128], r11
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ transpose_4x4h q15, q14, d31, d30, d29, d28
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ transpose_4x4h q13, q12, d27, d26, d25, d24
+
+.macro store_addsub src0, src1, src2, src3
+ vqsub.s16 d3, \src0, \src1
+ vqsub.s16 d2, \src2, \src3
+ vqadd.s16 d0, \src0, \src1
+ vqadd.s16 d1, \src2, \src3
+ vrshl.s16 q1, q1, q3
+ vrshl.s16 q0, q0, q3
+ vrev64.16 q1, q1
+ vst1.16 {q0}, [r6, :128], r10
+ vst1.16 {q1}, [r9, :128], r10
+.endm
+ store_addsub d16, d31, d20, d27
+ store_addsub d17, d30, d21, d26
+ store_addsub d18, d29, d22, d25
+ store_addsub d19, d28, d23, d24
+.purgem store_addsub
+ sub r6, r6, r10, lsl #2
+ sub r9, r9, r10, lsl #2
+ add r6, r6, #16
+ sub r9, r9, #16
+
+ cmp r7, r8
+ blt 1b
+ pop {r10-r11,pc}
+endfunc
+
+function inv_txfm_add_vert_dct_4x64_neon
+ lsl r8, r8, #1
+
+ mov r7, sp
+ add r8, sp, #2*4*(64 - 4)
+ add r9, r6, r1, lsl #6
+ sub r9, r9, r1
+
+ push {r10-r11,lr}
+
+ neg r10, r1
+ mov r11, #-2*4*4
+
+1:
+ vld1.16 {d16, d17, d18, d19}, [r7, :128]!
+ vld1.16 {d28, d29, d30, d31}, [r8, :128], r11
+ vld1.16 {d20, d21, d22, d23}, [r7, :128]!
+ vld1.16 {d24, d25, d26, d27}, [r8, :128], r11
+
+.macro add_dest_addsub src0, src1, src2, src3
+ vld1.32 {d0[0]}, [r6, :32], r1
+ vld1.32 {d1[0]}, [r9, :32], r10
+ vqadd.s16 d4, \src0, \src1
+ vld1.32 {d0[1]}, [r6, :32]
+ vqadd.s16 d5, \src2, \src3
+ vld1.32 {d1[1]}, [r9, :32]
+ vqsub.s16 d6, \src0, \src1
+ vqsub.s16 d7, \src2, \src3
+ sub r6, r6, r1
+ sub r9, r9, r10
+ vrshr.s16 q2, q2, #4
+ vrshr.s16 q3, q3, #4
+ vaddw.u8 q2, q2, d0
+ vaddw.u8 q3, q3, d1
+ vqmovun.s16 d0, q2
+ vqmovun.s16 d1, q3
+ vst1.32 {d0[0]}, [r6, :32], r1
+ vst1.32 {d1[0]}, [r9, :32], r10
+ vst1.32 {d0[1]}, [r6, :32], r1
+ vst1.32 {d1[1]}, [r9, :32], r10
+.endm
+ add_dest_addsub d16, d31, d17, d30
+ add_dest_addsub d18, d29, d19, d28
+ add_dest_addsub d20, d27, d21, d26
+ add_dest_addsub d22, d25, d23, d24
+.purgem add_dest_addsub
+ cmp r7, r8
+ blt 1b
+
+ pop {r10-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_64x64_8bpc_neon, export=1
+ idct_dc 64, 64, 2
+
+ push {r4-r11,lr}
+ vpush {q4}
+
+ sub_sp_align 64*32*2+64*4*2
+ add r5, sp, #64*4*2
+
+ movrel_local r10, eob_32x32
+
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, r5, #(\i*64*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_dct_clear_4h_x64_neon
+ add r6, r5, #(\i*64*2)
+ mov r9, #-2 // shift
+ bl inv_txfm_horz_dct_64x4_neon
+.if \i < 28
+ ldrh r11, [r10], #2
+.endif
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 8
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
+ add r7, r5, #(\i*2)
+ mov r8, #64*2
+ bl inv_txfm_dct_4h_x64_neon
+ add r6, r0, #(\i)
+ bl inv_txfm_add_vert_dct_4x64_neon
+.endr
+
+ add_sp_align 64*32*2+64*4*2
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_64x32_8bpc_neon, export=1
+ idct_dc 64, 32, 1
+
+ push {r4-r11,lr}
+ vpush {q4}
+
+ sub_sp_align 64*32*2+64*4*2
+ add r5, sp, #64*4*2
+
+ movrel_local r10, eob_32x32
+
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, r5, #(\i*64*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_dct_clear_scale_4h_x64_neon
+ add r6, r5, #(\i*64*2)
+ mov r9, #-1 // shift
+ bl inv_txfm_horz_dct_64x4_neon
+.if \i < 28
+ ldrh r11, [r10], #2
+.endif
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 8
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
+ add r6, r0, #(\i)
+ add r7, r5, #(\i*2)
+ mov r8, #64*2
+ bl inv_txfm_add_vert_dct_4x32_neon
+.endr
+
+ add_sp_align 64*32*2+64*4*2
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_32x64_8bpc_neon, export=1
+ idct_dc 32, 64, 1
+
+ push {r4-r11,lr}
+ vpush {q4}
+
+ sub_sp_align 32*32*2+64*4*2
+ add r5, sp, #64*4*2
+
+ movrel_local r10, eob_32x32
+ ldrh r11, [r10], #2
+
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, r5, #(\i*32*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 28
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_horz_scale_dct_32x4_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r7, r5, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_dct_4h_x64_neon
+ add r6, r0, #(\i)
+ bl inv_txfm_add_vert_dct_4x64_neon
+.endr
+
+ add_sp_align 32*32*2+64*4*2
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_64x16_8bpc_neon, export=1
+ idct_dc 64, 16, 2
+
+ push {r4-r11,lr}
+ vpush {q4}
+
+ sub_sp_align 64*16*2+64*4*2
+ add r4, sp, #64*4*2
+
+ movrel_local r10, eob_16x32
+
+.irp i, 0, 4, 8, 12
+ add r6, r4, #(\i*64*2)
+.if \i > 0
+ mov r8, #(16 - \i)
+ cmp r3, r11
+ blt 1f
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #16*2
+ bl inv_txfm_dct_clear_4h_x64_neon
+ add r6, r4, #(\i*64*2)
+ mov r9, #-2 // shift
+ bl inv_txfm_horz_dct_64x4_neon
+.if \i < 12
+ ldrh r11, [r10], #2
+.endif
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 8
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+ movrel_local r5, inv_dct_4h_x16_neon
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
+ add r6, r0, #(\i)
+ add r7, r4, #(\i*2)
+ mov r8, #64*2
+ bl inv_txfm_add_vert_4x16_neon
+.endr
+
+ add_sp_align 64*16*2+64*4*2
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_16x64_8bpc_neon, export=1
+ idct_dc 16, 64, 2
+
+ push {r4-r11,lr}
+ vpush {q4}
+
+ sub_sp_align 16*32*2+64*4*2
+ add r5, sp, #64*4*2
+
+ movrel_local r10, eob_16x32
+ ldrh r11, [r10], #2
+
+ movrel_local r4, inv_dct_4h_x16_neon
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, r5, #(\i*16*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 28
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_horz_16x4_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #4
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12
+ add r7, r5, #(\i*2)
+ mov r8, #16*2
+ bl inv_txfm_dct_4h_x64_neon
+ add r6, r0, #(\i)
+ bl inv_txfm_add_vert_dct_4x64_neon
+.endr
+
+ add_sp_align 16*32*2+64*4*2
+ vpop {q4}
+ pop {r4-r11,pc}
+endfunc
diff --git a/third_party/dav1d/src/arm/32/itx16.S b/third_party/dav1d/src/arm/32/itx16.S
new file mode 100644
index 0000000000..aa6c272e71
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/itx16.S
@@ -0,0 +1,3625 @@
+/******************************************************************************
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+// The exported functions in this file have got the following signature:
+// void itxfm_add(pixel *dst, ptrdiff_t dst_stride, coef *coeff, int eob);
+
+// Most of the functions use the following register layout:
+// r0-r3 external parameters
+// r4 function pointer to first transform
+// r5 function pointer to second transform
+// r6 output parameter for helper function
+// r7 input parameter for helper function
+// r8 input stride for helper function
+// r9 scratch variable for helper functions
+// r10-r11 pointer to list of eob thresholds, eob threshold value,
+// scratch variables within helper functions (backed up)
+
+// The SIMD registers most often use the following layout:
+// d0-d3 multiplication coefficients
+// d4-d7 scratch registers
+// d8-d15 unused in some transforms, used for scratch registers in others
+// d16-v31 inputs/outputs of transforms
+
+// Potential further optimizations, that are left unimplemented for now:
+// - Trying to keep multiplication coefficients in registers across multiple
+// transform functions. (The register layout is designed to potentially
+// allow this.)
+// - Use a simplified version of the transforms themselves for cases where
+// we know a significant number of inputs are zero. E.g. if the eob value
+// indicates only a quarter of input values are set, for idct16 and up,
+// a significant amount of calculation can be skipped, at the cost of more
+// code duplication and special casing.
+
+// A macro for cases where a thumb mov can express the constant in one
+// instruction, while arm mode requires two separate movw+movt pairs.
+.macro mov_const reg, val
+#if CONFIG_THUMB
+ mov.w \reg, #\val
+#else
+ movw \reg, #((\val) & 0xffff)
+ movt \reg, #(((\val) >> 16) & 0xffff)
+#endif
+.endm
+
+const idct_coeffs, align=4
+ // idct4
+ .int 2896, 2896*8*(1<<16), 1567, 3784
+ // idct8
+ .int 799, 4017, 3406, 2276
+ // idct16
+ .int 401, 4076, 3166, 2598
+ .int 1931, 3612, 3920, 1189
+ // idct32
+ .int 201, 4091, 3035, 2751
+ .int 1751, 3703, 3857, 1380
+ .int 995, 3973, 3513, 2106
+ .int 2440, 3290, 4052, 601
+endconst
+
+const idct64_coeffs, align=4
+ .int 101*8*(1<<16), 4095*8*(1<<16), 2967*8*(1<<16), -2824*8*(1<<16)
+ .int 1660*8*(1<<16), 3745*8*(1<<16), 3822*8*(1<<16), -1474*8*(1<<16)
+ .int 4076, 401, 4017, 799
+
+ .int 4036*8*(1<<16), -700*8*(1<<16), 2359*8*(1<<16), 3349*8*(1<<16)
+ .int 3461*8*(1<<16), -2191*8*(1<<16), 897*8*(1<<16), 3996*8*(1<<16)
+ .int -3166, -2598, -799, -4017
+
+ .int 501*8*(1<<16), 4065*8*(1<<16), 3229*8*(1<<16), -2520*8*(1<<16)
+ .int 2019*8*(1<<16), 3564*8*(1<<16), 3948*8*(1<<16), -1092*8*(1<<16)
+ .int 3612, 1931, 2276, 3406
+
+ .int 4085*8*(1<<16), -301*8*(1<<16), 2675*8*(1<<16), 3102*8*(1<<16)
+ .int 3659*8*(1<<16), -1842*8*(1<<16), 1285*8*(1<<16), 3889*8*(1<<16)
+ .int -3920, -1189, -3406, -2276
+endconst
+
+const iadst4_coeffs, align=4
+ .int 1321, 3803, 2482, 3344
+endconst
+
+const iadst8_coeffs, align=4
+ .int 4076, 401, 3612, 1931
+ .int 2598, 3166, 1189, 3920
+ // idct_coeffs
+ .int 2896, 0, 1567, 3784
+endconst
+
+const iadst16_coeffs, align=4
+ .int 4091, 201, 3973, 995
+ .int 3703, 1751, 3290, 2440
+ .int 2751, 3035, 2106, 3513
+ .int 1380, 3857, 601, 4052
+endconst
+
+.macro vmul_vmla d0, s0, s1, c0, c1
+ vmul.i32 \d0, \s0, \c0
+ vmla.i32 \d0, \s1, \c1
+.endm
+
+.macro vmul_vmls d0, s0, s1, c0, c1
+ vmul.i32 \d0, \s0, \c0
+ vmls.i32 \d0, \s1, \c1
+.endm
+
+.macro scale_input c, r0, r1, r2 r3, r4, r5, r6, r7
+ vqrdmulh.s32 \r0, \r0, \c
+ vqrdmulh.s32 \r1, \r1, \c
+.ifnb \r2
+ vqrdmulh.s32 \r2, \r2, \c
+ vqrdmulh.s32 \r3, \r3, \c
+.endif
+.ifnb \r4
+ vqrdmulh.s32 \r4, \r4, \c
+ vqrdmulh.s32 \r5, \r5, \c
+ vqrdmulh.s32 \r6, \r6, \c
+ vqrdmulh.s32 \r7, \r7, \c
+.endif
+.endm
+
+.macro load_add_store load, shift, addsrc, adddst, max, min, store, dst, src, shiftbits=4
+.ifnb \load
+ vld1.16 {\load}, [\src, :128], r1
+.endif
+.ifnb \shift
+ vrshr.s16 \shift, \shift, #\shiftbits
+.endif
+.ifnb \addsrc
+ vqadd.s16 \adddst, \adddst, \addsrc
+.endif
+.ifnb \max
+ vmax.s16 \max, \max, q6
+.endif
+.ifnb \min
+ vmin.s16 \min, \min, q7
+.endif
+.ifnb \store
+ vst1.16 {\store}, [\dst, :128], r1
+.endif
+.endm
+.macro load_add_store_8x8 dst, src, shiftbits=4
+ mov \src, \dst
+ vmov.i16 q6, #0
+ vmvn.i16 q7, #0xfc00 // 0x3ff
+ load_add_store q0, q8, , , , , , \dst, \src, \shiftbits
+ load_add_store q1, q9, , , , , , \dst, \src, \shiftbits
+ load_add_store q2, q10, q0, q8, , , , \dst, \src, \shiftbits
+ load_add_store q3, q11, q1, q9, q8, , , \dst, \src, \shiftbits
+ load_add_store q4, q12, q2, q10, q9, q8, , \dst, \src, \shiftbits
+ load_add_store q5, q13, q3, q11, q10, q9, q8, \dst, \src, \shiftbits
+ load_add_store q0, q14, q4, q12, q11, q10, q9, \dst, \src, \shiftbits
+ load_add_store q1, q15, q5, q13, q12, q11, q10, \dst, \src, \shiftbits
+ load_add_store , , q0, q14, q13, q12, q11, \dst, \src, \shiftbits
+ load_add_store , , q1, q15, q14, q13, q12, \dst, \src, \shiftbits
+ load_add_store , , , , q15, q14, q13, \dst, \src, \shiftbits
+ load_add_store , , , , , q15, q14, \dst, \src, \shiftbits
+ load_add_store , , , , , , q15, \dst, \src, \shiftbits
+.endm
+.macro load_add_store_8x4 dst, src, shiftbits=4
+ mov \src, \dst
+ vmov.i16 q6, #0
+ vmvn.i16 q7, #0xfc00 // 0x3ff
+ load_add_store q0, q8, , , , , , \dst, \src, \shiftbits
+ load_add_store q1, q9, , , , , , \dst, \src, \shiftbits
+ load_add_store q2, q10, q0, q8, , , , \dst, \src, \shiftbits
+ load_add_store q3, q11, q1, q9, q8, , , \dst, \src, \shiftbits
+ load_add_store , , q2, q10, q9, q8, , \dst, \src, \shiftbits
+ load_add_store , , q3, q11, q10, q9, q8, \dst, \src, \shiftbits
+ load_add_store , , , , q11, q10, q9, \dst, \src, \shiftbits
+ load_add_store , , , , , q11, q10, \dst, \src, \shiftbits
+ load_add_store , , , , , , q11, \dst, \src, \shiftbits
+.endm
+.macro load_add_store4 load1, load2, shift, addsrc, adddst, max, min, store1, store2, dst, src, shiftbits=4
+.ifnb \load1
+ vld1.16 {\load1}, [\src, :64], r1
+.endif
+.ifnb \shift
+ vrshr.s16 \shift, \shift, #\shiftbits
+.endif
+.ifnb \load2
+ vld1.16 {\load2}, [\src, :64], r1
+.endif
+.ifnb \addsrc
+ vqadd.s16 \adddst, \adddst, \addsrc
+.endif
+.ifnb \max
+ vmax.s16 \max, \max, q6
+.endif
+.ifnb \store1
+ vst1.16 {\store1}, [\dst, :64], r1
+.endif
+.ifnb \min
+ vmin.s16 \min, \min, q7
+.endif
+.ifnb \store2
+ vst1.16 {\store2}, [\dst, :64], r1
+.endif
+.endm
+.macro load_add_store_4x16 dst, src
+ mov \src, \dst
+ vmov.i16 q6, #0
+ vmvn.i16 q7, #0xfc00 // 0x3ff
+ mov \src, \dst
+ load_add_store4 d0, d1, q8, , , , , , , \dst, \src
+ load_add_store4 d2, d3, q9, , , , , , , \dst, \src
+ load_add_store4 d4, d5, q10, q0, q8, , , , , \dst, \src
+ load_add_store4 d6, d7, q11, q1, q9, q8, , , , \dst, \src
+ load_add_store4 d8, d9, q12, q2, q10, q9, q8, , , \dst, \src
+ load_add_store4 d10, d11, q13, q3, q11, q10, q9, d16, d17, \dst, \src
+ load_add_store4 d0, d1, q14, q4, q12, q11, q10, d18, d19, \dst, \src
+ load_add_store4 d2, d3, q15, q5, q13, q12, q11, d20, d21, \dst, \src
+ load_add_store4 , , , q0, q14, q13, q12, d22, d23, \dst, \src
+ load_add_store4 , , , q1, q15, q14, q13, d24, d25, \dst, \src
+ load_add_store4 , , , , , q15, q14, d26, d27, \dst, \src
+ load_add_store4 , , , , , , q15, d28, d29, \dst, \src
+ load_add_store4 , , , , , , , d30, d31, \dst, \src
+.endm
+.macro load_add_store_4x8 dst, src, shiftbits=4
+ mov \src, \dst
+ vmov.i16 q6, #0
+ vmvn.i16 q7, #0xfc00 // 0x3ff
+ mov \src, \dst
+ load_add_store4 d0, d1, q8, , , , , , , \dst, \src, \shiftbits
+ load_add_store4 d2, d3, q9, , , , , , , \dst, \src, \shiftbits
+ load_add_store4 d4, d5, q10, q0, q8, , , , , \dst, \src, \shiftbits
+ load_add_store4 d6, d7, q11, q1, q9, q8, , , , \dst, \src, \shiftbits
+ load_add_store4 , , , q2, q10, q9, q8, , , \dst, \src, \shiftbits
+ load_add_store4 , , , q3, q11, q10, q9, d16, d17, \dst, \src, \shiftbits
+ load_add_store4 , , , , , q11, q10, d18, d19, \dst, \src, \shiftbits
+ load_add_store4 , , , , , , q11, d20, d21, \dst, \src, \shiftbits
+ load_add_store4 , , , , , , , d22, d23, \dst, \src, \shiftbits
+.endm
+.macro load_add_store_4x4 dst, src, shiftbits=4
+ mov \src, \dst
+ vmov.i16 q6, #0
+ vmvn.i16 q7, #0xfc00 // 0x3ff
+ mov \src, \dst
+ load_add_store4 d0, d1, q8, , , , , , , \dst, \src, \shiftbits
+ load_add_store4 d2, d3, q9, q0, q8, , , , , \dst, \src, \shiftbits
+ load_add_store4 , , , q1, q9, q8, , , , \dst, \src, \shiftbits
+ load_add_store4 , , , , , q9, q8, , , \dst, \src, \shiftbits
+ load_add_store4 , , , , , , q9, d16, d17, \dst, \src, \shiftbits
+ load_add_store4 , , , , , , , d18, d19, \dst, \src, \shiftbits
+.endm
+
+.macro idct_dc w, h, shift
+ cmp r3, #0
+ bne 1f
+ vmov.i16 q14, #0
+ mov_const r12, 2896*8*(1<<16)
+ vld1.32 {d24[], d25[]}, [r2, :32]
+ vdup.32 d0, r12
+ vqrdmulh.s32 q13, q12, d0[0]
+ vst1.32 {d28[0]}, [r2, :32]
+.if (\w == 2*\h) || (2*\w == \h)
+ vqrdmulh.s32 q13, q13, d0[0]
+.endif
+.if \shift > 0
+ vqrshrn.s32 d24, q13, #\shift
+ vqrshrn.s32 d25, q13, #\shift
+.else
+ vqmovn.s32 d24, q13
+ vqmovn.s32 d25, q13
+.endif
+ vqrdmulh.s16 q12, q12, d0[1]
+ mov r3, #\h
+ vrshr.s16 q12, q12, #4
+ b idct_dc_w\w\()_neon
+1:
+.endm
+
+function idct_dc_w4_neon
+ vmvn.i16 q15, #0xfc00 // 0x3ff
+1:
+ vld1.16 {d0}, [r0, :64], r1
+ vld1.16 {d1}, [r0, :64], r1
+ vld1.16 {d2}, [r0, :64], r1
+ vld1.16 {d3}, [r0, :64], r1
+ subs r3, r3, #4
+ vqadd.s16 q0, q0, q12
+ sub r0, r0, r1, lsl #2
+ vqadd.s16 q1, q1, q12
+ vmax.s16 q0, q0, q14
+ vmax.s16 q1, q1, q14
+ vmin.s16 q0, q0, q15
+ vst1.16 {d0}, [r0, :64], r1
+ vmin.s16 q1, q1, q15
+ vst1.16 {d1}, [r0, :64], r1
+ vst1.16 {d2}, [r0, :64], r1
+ vst1.16 {d3}, [r0, :64], r1
+ bgt 1b
+ bx lr
+endfunc
+
+function idct_dc_w8_neon
+ vmvn.i16 q15, #0xfc00 // 0x3ff
+1:
+ vld1.16 {q0}, [r0, :128], r1
+ subs r3, r3, #4
+ vld1.16 {q1}, [r0, :128], r1
+ vqadd.s16 q0, q0, q12
+ vld1.16 {q2}, [r0, :128], r1
+ vqadd.s16 q1, q1, q12
+ vld1.16 {q3}, [r0, :128], r1
+ vqadd.s16 q2, q2, q12
+ vqadd.s16 q3, q3, q12
+ sub r0, r0, r1, lsl #2
+ vmax.s16 q0, q0, q14
+ vmax.s16 q1, q1, q14
+ vmax.s16 q2, q2, q14
+ vmax.s16 q3, q3, q14
+ vmin.s16 q0, q0, q15
+ vmin.s16 q1, q1, q15
+ vst1.16 {q0}, [r0, :128], r1
+ vmin.s16 q2, q2, q15
+ vst1.16 {q1}, [r0, :128], r1
+ vmin.s16 q3, q3, q15
+ vst1.16 {q2}, [r0, :128], r1
+ vst1.16 {q3}, [r0, :128], r1
+ bgt 1b
+ bx lr
+endfunc
+
+function idct_dc_w16_neon
+ vmvn.i16 q15, #0xfc00 // 0x3ff
+1:
+ vld1.16 {q0, q1}, [r0, :128], r1
+ subs r3, r3, #2
+ vld1.16 {q2, q3}, [r0, :128], r1
+ vqadd.s16 q0, q0, q12
+ vqadd.s16 q1, q1, q12
+ vqadd.s16 q2, q2, q12
+ vqadd.s16 q3, q3, q12
+ sub r0, r0, r1, lsl #1
+ vmax.s16 q0, q0, q14
+ vmax.s16 q1, q1, q14
+ vmax.s16 q2, q2, q14
+ vmax.s16 q3, q3, q14
+ vmin.s16 q0, q0, q15
+ vmin.s16 q1, q1, q15
+ vmin.s16 q2, q2, q15
+ vst1.16 {q0, q1}, [r0, :128], r1
+ vmin.s16 q3, q3, q15
+ vst1.16 {q2, q3}, [r0, :128], r1
+ bgt 1b
+ bx lr
+endfunc
+
+function idct_dc_w32_neon
+ sub r1, r1, #32
+ vmvn.i16 q15, #0xfc00 // 0x3ff
+1:
+ vld1.16 {q0, q1}, [r0, :128]!
+ subs r3, r3, #1
+ vld1.16 {q2, q3}, [r0, :128]
+ vqadd.s16 q0, q0, q12
+ vqadd.s16 q1, q1, q12
+ vqadd.s16 q2, q2, q12
+ vqadd.s16 q3, q3, q12
+ sub r0, r0, #32
+ vmax.s16 q0, q0, q14
+ vmax.s16 q1, q1, q14
+ vmax.s16 q2, q2, q14
+ vmax.s16 q3, q3, q14
+ vmin.s16 q0, q0, q15
+ vmin.s16 q1, q1, q15
+ vmin.s16 q2, q2, q15
+ vst1.16 {q0, q1}, [r0, :128]!
+ vmin.s16 q3, q3, q15
+ vst1.16 {q2, q3}, [r0, :128], r1
+ bgt 1b
+ bx lr
+endfunc
+
+function idct_dc_w64_neon
+ sub r1, r1, #96
+ vmvn.i16 q15, #0xfc00 // 0x3ff
+1:
+ vld1.16 {q0, q1}, [r0, :128]!
+ subs r3, r3, #1
+ vld1.16 {q2, q3}, [r0, :128]!
+ vqadd.s16 q0, q0, q12
+ vld1.16 {q8, q9}, [r0, :128]!
+ vqadd.s16 q1, q1, q12
+ vld1.16 {q10, q11}, [r0, :128]
+ vqadd.s16 q2, q2, q12
+ vqadd.s16 q3, q3, q12
+ vqadd.s16 q8, q8, q12
+ vqadd.s16 q9, q9, q12
+ vqadd.s16 q10, q10, q12
+ vqadd.s16 q11, q11, q12
+ sub r0, r0, #96
+ vmax.s16 q0, q0, q14
+ vmax.s16 q1, q1, q14
+ vmax.s16 q2, q2, q14
+ vmax.s16 q3, q3, q14
+ vmax.s16 q8, q8, q14
+ vmax.s16 q9, q9, q14
+ vmax.s16 q10, q10, q14
+ vmax.s16 q11, q11, q14
+ vmin.s16 q0, q0, q15
+ vmin.s16 q1, q1, q15
+ vmin.s16 q2, q2, q15
+ vmin.s16 q3, q3, q15
+ vmin.s16 q8, q8, q15
+ vst1.16 {q0, q1}, [r0, :128]!
+ vmin.s16 q9, q9, q15
+ vst1.16 {q2, q3}, [r0, :128]!
+ vmin.s16 q10, q10, q15
+ vst1.16 {q8, q9}, [r0, :128]!
+ vmin.s16 q11, q11, q15
+ vst1.16 {q10, q11}, [r0, :128], r1
+ bgt 1b
+ bx lr
+endfunc
+
+.macro iwht4
+ vadd.i32 q8, q8, q9
+ vsub.i32 q13, q10, q11
+ vsub.i32 q12, q8, q13
+ vshr.s32 q12, q12, #1
+ vsub.i32 q10, q12, q9
+ vsub.i32 q9, q12, q11
+ vadd.i32 q11, q13, q10
+ vsub.i32 q8, q8, q9
+.endm
+
+.macro idct_4s_x4 r0, r1, r2, r3
+ vmul_vmla q4, \r1, \r3, d1[1], d1[0]
+ vmul_vmla q2, \r0, \r2, d0[0], d0[0]
+ vmul_vmls q3, \r1, \r3, d1[0], d1[1]
+ vmul_vmls q5, \r0, \r2, d0[0], d0[0]
+ vrshr.s32 q4, q4, #12
+ vrshr.s32 q2, q2, #12
+ vrshr.s32 q3, q3, #12
+ vrshr.s32 q5, q5, #12
+ vqadd.s32 \r0, q2, q4
+ vqsub.s32 \r3, q2, q4
+ vqadd.s32 \r1, q5, q3
+ vqsub.s32 \r2, q5, q3
+.endm
+
+.macro idct_2s_x4 r0, r1, r2, r3
+ vmul_vmla d6, \r1, \r3, d1[1], d1[0]
+ vmul_vmla d4, \r0, \r2, d0[0], d0[0]
+ vmul_vmls d5, \r1, \r3, d1[0], d1[1]
+ vmul_vmls d7, \r0, \r2, d0[0], d0[0]
+ vrshr.s32 d6, d6, #12
+ vrshr.s32 d4, d4, #12
+ vrshr.s32 d5, d5, #12
+ vrshr.s32 d7, d7, #12
+ vqadd.s32 \r0, d4, d6
+ vqsub.s32 \r3, d4, d6
+ vqadd.s32 \r1, d7, d5
+ vqsub.s32 \r2, d7, d5
+.endm
+
+function inv_dct_4s_x4_neon
+ movrel_local r12, idct_coeffs
+ vld1.32 {d0, d1}, [r12, :128]
+ idct_4s_x4 q8, q9, q10, q11
+ bx lr
+endfunc
+
+.macro iadst_4x4 o0, o1, o2, o3
+ movrel_local r12, iadst4_coeffs
+ vld1.32 {d0, d1}, [r12, :128]
+
+ vsub.i32 q1, q8, q10
+ vmul.i32 q2, q8, d0[0]
+ vmla.i32 q2, q10, d0[1]
+ vmla.i32 q2, q11, d1[0]
+ vmul.i32 q4, q9, d1[1]
+ vadd.i32 q1, q1, q11
+ vmul.i32 q3, q8, d1[0]
+ vmls.i32 q3, q10, d0[0]
+ vmls.i32 q3, q11, d0[1]
+
+ vadd.i32 \o3, q2, q3
+ vmul.i32 \o2, q1, d1[1]
+ vadd.i32 \o0, q2, q4
+ vadd.i32 \o1, q3, q4
+ vsub.i32 \o3, \o3, q4
+
+ vrshr.s32 \o0, \o0, #12
+ vrshr.s32 \o2, \o2, #12
+ vrshr.s32 \o1, \o1, #12
+ vrshr.s32 \o3, \o3, #12
+.endm
+
+function inv_adst_4s_x4_neon
+ iadst_4x4 q8, q9, q10, q11
+ bx lr
+endfunc
+
+function inv_flipadst_4s_x4_neon
+ iadst_4x4 q11, q10, q9, q8
+ bx lr
+endfunc
+
+function inv_identity_4s_x4_neon
+ mov r12, #0
+ movt r12, #(5793-4096)*8
+ vdup.32 d0, r12
+ vqrdmulh.s32 q1, q8, d0[0]
+ vqrdmulh.s32 q2, q9, d0[0]
+ vqrdmulh.s32 q3, q10, d0[0]
+ vqrdmulh.s32 q4, q11, d0[0]
+ vqadd.s32 q8, q8, q1
+ vqadd.s32 q9, q9, q2
+ vqadd.s32 q10, q10, q3
+ vqadd.s32 q11, q11, q4
+ bx lr
+endfunc
+
+function inv_txfm_add_wht_wht_4x4_16bpc_neon, export=1
+ push {r4-r5,lr}
+ vpush {q4-q5}
+ vmov.i16 q14, #0
+ vmov.i16 q15, #0
+ vld1.32 {q8, q9}, [r2, :128]
+ vst1.32 {q14, q15}, [r2, :128]!
+ vshr.s16 q8, q8, #2
+ vld1.32 {q10, q11}, [r2, :128]
+ vshr.s16 q9, q9, #2
+ vshr.s16 q10, q10, #2
+ vshr.s16 q11, q11, #2
+
+ iwht4
+
+ vst1.32 {q14, q15}, [r2, :128]
+ transpose_4x4s q8, q9, q10, q11, d16, d17, d18, d19, d20, d21, d22, d23
+
+ iwht4
+
+ vld1.16 {d0}, [r0, :64], r1
+ vqmovn.s32 d16, q8
+ vld1.16 {d1}, [r0, :64], r1
+ vqmovn.s32 d17, q9
+ vld1.16 {d2}, [r0, :64], r1
+ vqmovn.s32 d18, q10
+ vld1.16 {d3}, [r0, :64], r1
+ vqmovn.s32 d19, q11
+
+ b L(itx_4x4_end)
+endfunc
+
+function inv_txfm_add_4x4_neon
+ vmov.i16 q14, #0
+ vmov.i16 q15, #0
+ vld1.32 {q8, q9}, [r2, :128]
+ vst1.16 {q14, q15}, [r2, :128]!
+ vld1.32 {q10, q11}, [r2, :128]
+ vst1.16 {q14, q15}, [r2, :128]
+
+ blx r4
+
+ vqmovn.s32 d16, q8
+ vqmovn.s32 d17, q9
+ vqmovn.s32 d18, q10
+ vqmovn.s32 d19, q11
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+
+ blx r5
+
+ vld1.16 {d0}, [r0, :64], r1
+ vld1.16 {d1}, [r0, :64], r1
+ vrshr.s16 q8, q8, #4
+ vld1.16 {d2}, [r0, :64], r1
+ vrshr.s16 q9, q9, #4
+ vld1.16 {d3}, [r0, :64], r1
+
+L(itx_4x4_end):
+ vmvn.i16 q15, #0xfc00 // 0x3ff
+ sub r0, r0, r1, lsl #2
+ vqadd.s16 q8, q8, q0
+ vqadd.s16 q9, q9, q1
+ vmax.s16 q8, q8, q14
+ vmax.s16 q9, q9, q14
+ vmin.s16 q8, q8, q15
+ vmin.s16 q9, q9, q15
+ vst1.16 {d16}, [r0, :64], r1
+ vst1.16 {d17}, [r0, :64], r1
+ vst1.16 {d18}, [r0, :64], r1
+ vst1.16 {d19}, [r0, :64], r1
+
+ vpop {q4-q5}
+ pop {r4-r5,pc}
+endfunc
+
+.macro def_fn_4x4 txfm1, txfm2
+function inv_txfm_add_\txfm1\()_\txfm2\()_4x4_16bpc_neon, export=1
+ push {r4-r5,lr}
+ vpush {q4-q5}
+
+.ifc \txfm1\()_\txfm2, dct_dct
+ cmp r3, #0
+ bne 1f
+ vmov.i16 q14, #0
+ mov_const r12, 2896*8*(1<<16)
+ vld1.32 {d16[], d17[]}, [r2, :32]
+ vdup.32 d4, r12
+ vst1.32 {d28[0]}, [r2, :32]
+ vqrdmulh.s32 q8, q8, d4[0]
+ vld1.16 {d0}, [r0, :64], r1
+ vqmovn.s32 d20, q8
+ vqmovn.s32 d21, q8
+ vld1.16 {d1}, [r0, :64], r1
+ vqrdmulh.s16 q10, q10, d4[1]
+ vld1.16 {d2}, [r0, :64], r1
+ vrshr.s16 q8, q10, #4
+ vld1.16 {d3}, [r0, :64], r1
+ vrshr.s16 q9, q10, #4
+ b L(itx_4x4_end)
+1:
+.endif
+ movrel_local r4, inv_\txfm1\()_4s_x4_neon
+ movrel r5, X(inv_\txfm2\()_4h_x4_neon)
+ b inv_txfm_add_4x4_neon
+endfunc
+.endm
+
+def_fn_4x4 dct, dct
+def_fn_4x4 identity, identity
+def_fn_4x4 dct, adst
+def_fn_4x4 dct, flipadst
+def_fn_4x4 dct, identity
+def_fn_4x4 adst, dct
+def_fn_4x4 adst, adst
+def_fn_4x4 adst, flipadst
+def_fn_4x4 flipadst, dct
+def_fn_4x4 flipadst, adst
+def_fn_4x4 flipadst, flipadst
+def_fn_4x4 identity, dct
+
+def_fn_4x4 adst, identity
+def_fn_4x4 flipadst, identity
+def_fn_4x4 identity, adst
+def_fn_4x4 identity, flipadst
+
+.macro idct_4s_x8 r0, r1, r2, r3, r4, r5, r6, r7
+ idct_4s_x4 \r0, \r2, \r4, \r6
+
+ vmov.i32 q5, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vmvn.i32 q4, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+.irp r, \r0, \r2, \r4, \r6
+ vmin.s32 \r, \r, q5
+.endr
+.irp r, \r0, \r2, \r4, \r6
+ vmax.s32 \r, \r, q4
+.endr
+
+ vmul_vmls q2, \r1, \r7, d2[0], d2[1] // -> t4a
+ vmul_vmla q3, \r1, \r7, d2[1], d2[0] // -> t7a
+ vmul_vmls q6, \r5, \r3, d3[0], d3[1] // -> t5a
+ vmul_vmla q7, \r5, \r3, d3[1], d3[0] // -> t6a
+ vrshr.s32 \r1, q2, #12 // t4a
+ vrshr.s32 \r7, q3, #12 // t7a
+ vrshr.s32 \r3, q6, #12 // t5a
+ vrshr.s32 \r5, q7, #12 // t6a
+
+ vqadd.s32 q2, \r1, \r3 // t4
+ vqsub.s32 \r1, \r1, \r3 // t5a
+ vqadd.s32 q3, \r7, \r5 // t7
+ vqsub.s32 \r3, \r7, \r5 // t6a
+
+.irp r, q2, \r1, q3, \r3
+ vmin.s32 \r, \r, q5
+.endr
+.irp r, q2, \r1, q3, \r3
+ vmax.s32 \r, \r, q4
+.endr
+
+ vmul_vmls q7, \r3, \r1, d0[0], d0[0] // -> t5
+ vmul_vmla q6, \r3, \r1, d0[0], d0[0] // -> t6
+ vrshr.s32 q7, q7, #12 // t5
+ vrshr.s32 q5, q6, #12 // t6
+
+ vqsub.s32 \r7, \r0, q3 // out7
+ vqadd.s32 \r0, \r0, q3 // out0
+ vqadd.s32 \r1, \r2, q5 // out1
+ vqsub.s32 q6, \r2, q5 // out6
+ vqadd.s32 \r2, \r4, q7 // out2
+ vqsub.s32 \r5, \r4, q7 // out5
+ vqadd.s32 \r3, \r6, q2 // out3
+ vqsub.s32 \r4, \r6, q2 // out4
+ vmov \r6, q6 // out6
+.endm
+
+.macro idct_2s_x8 r0, r1, r2, r3, r4, r5, r6, r7
+ idct_2s_x4 \r0, \r2, \r4, \r6
+
+ vmov.i32 d9, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vmvn.i32 d8, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+.irp r, \r0, \r2, \r4, \r6
+ vmin.s32 \r, \r, d9
+.endr
+.irp r, \r0, \r2, \r4, \r6
+ vmax.s32 \r, \r, d8
+.endr
+
+ vmul_vmls d4, \r1, \r7, d2[0], d2[1] // -> t4a
+ vmul_vmla d5, \r1, \r7, d2[1], d2[0] // -> t7a
+ vmul_vmls d6, \r5, \r3, d3[0], d3[1] // -> t5a
+ vmul_vmla d7, \r5, \r3, d3[1], d3[0] // -> t6a
+ vrshr.s32 \r1, d4, #12 // t4a
+ vrshr.s32 \r7, d5, #12 // t7a
+ vrshr.s32 \r3, d6, #12 // t5a
+ vrshr.s32 \r5, d7, #12 // t6a
+
+ vqadd.s32 d4, \r1, \r3 // t4
+ vqsub.s32 \r1, \r1, \r3 // t5a
+ vqadd.s32 d5, \r7, \r5 // t7
+ vqsub.s32 \r3, \r7, \r5 // t6a
+
+.irp r, d4, \r1, d5, \r3
+ vmin.s32 \r, \r, d9
+.endr
+.irp r, d4, \r1, d5, \r3
+ vmax.s32 \r, \r, d8
+.endr
+
+ vmul_vmls d6, \r3, \r1, d0[0], d0[0] // -> t5
+ vmul_vmla d7, \r3, \r1, d0[0], d0[0] // -> t6
+ vrshr.s32 d6, d6, #12 // t5
+ vrshr.s32 d7, d7, #12 // t6
+
+ vqsub.s32 \r7, \r0, d5 // out7
+ vqadd.s32 \r0, \r0, d5 // out0
+ vqadd.s32 \r1, \r2, d7 // out1
+ vqsub.s32 d7, \r2, d7 // out6
+ vqadd.s32 \r2, \r4, d6 // out2
+ vqsub.s32 \r5, \r4, d6 // out5
+ vqadd.s32 \r3, \r6, d4 // out3
+ vqsub.s32 \r4, \r6, d4 // out4
+ vmov \r6, d7 // out6
+.endm
+
+function inv_dct_4s_x8_neon
+ movrel_local r12, idct_coeffs
+ vld1.32 {q0, q1}, [r12, :128]
+ idct_4s_x8 q8, q9, q10, q11, q12, q13, q14, q15
+ bx lr
+endfunc
+
+.macro iadst_4s_x8 r0, r1, r2, r3, r4, r5, r6, r7
+ movrel_local r12, iadst8_coeffs
+ vld1.32 {q0, q1}, [r12, :128]!
+
+ vmul_vmla q2, q15, q8, d0[0], d0[1]
+ vmul_vmls q3, q15, q8, d0[1], d0[0]
+ vmul_vmla q4, q13, q10, d1[0], d1[1]
+ vrshr.s32 q8, q2, #12 // t0a
+ vrshr.s32 q15, q3, #12 // t1a
+ vmul_vmls q5, q13, q10, d1[1], d1[0]
+ vmul_vmla q6, q11, q12, d2[0], d2[1]
+ vrshr.s32 q10, q4, #12 // t2a
+ vrshr.s32 q13, q5, #12 // t3a
+ vmul_vmls q7, q11, q12, d2[1], d2[0]
+ vmul_vmla q2, q9, q14, d3[0], d3[1]
+ vrshr.s32 q12, q6, #12 // t4a
+ vrshr.s32 q11, q7, #12 // t5a
+ vmul_vmls q3, q9, q14, d3[1], d3[0]
+ vrshr.s32 q14, q2, #12 // t6a
+ vrshr.s32 q9, q3, #12 // t7a
+
+ vld1.32 {q0}, [r12]
+
+ vqadd.s32 q2, q8, q12 // t0
+ vqsub.s32 q3, q8, q12 // t4
+ vmov.i32 q12, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vqadd.s32 q4, q15, q11 // t1
+ vqsub.s32 q5, q15, q11 // t5
+ vqadd.s32 q6, q10, q14 // t2
+ vqsub.s32 q7, q10, q14 // t6
+ vmvn.i32 q14, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+ vqadd.s32 q10, q13, q9 // t3
+ vqsub.s32 q11, q13, q9 // t7
+
+.irp r, q2, q3, q4, q5, q6, q7, q10, q11
+ vmin.s32 \r, \r, q12
+.endr
+.irp r, q2, q3, q4, q5, q6, q7, q10, q11
+ vmax.s32 \r, \r, q14
+.endr
+
+ vmul_vmla q8, q3, q5, d1[1], d1[0]
+ vmul_vmls q13, q3, q5, d1[0], d1[1]
+ vmul_vmls q14, q11, q7, d1[1], d1[0]
+
+ vrshr.s32 q3, q8, #12 // t4a
+ vrshr.s32 q5, q13, #12 // t5a
+
+ vmul_vmla q8, q11, q7, d1[0], d1[1]
+
+ vrshr.s32 q7, q14, #12 // t6a
+ vrshr.s32 q11, q8, #12 // t7a
+
+ vqadd.s32 \r0, q2, q6 // out0
+ vqsub.s32 q2, q2, q6 // t2
+ vqadd.s32 \r7, q4, q10 // out7
+ vqsub.s32 q4, q4, q10 // t3
+
+ vmvn.i32 q10, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+
+ vqadd.s32 \r1, q3, q7 // out1
+ vqsub.s32 q3, q3, q7 // t6
+ vqadd.s32 \r6, q5, q11 // out6
+ vqsub.s32 q5, q5, q11 // t7
+
+ // Not clipping the output registers, as they will be downshifted and
+ // narrowed afterwards anyway.
+.irp r, q2, q4, q3, q5
+ vmin.s32 \r, \r, q12
+.endr
+.irp r, q2, q4, q3, q5
+ vmax.s32 \r, \r, q10
+.endr
+
+ vqneg.s32 \r7, \r7 // out7
+ vqneg.s32 \r1, \r1 // out1
+
+ vmul_vmla q10, q2, q4, d0[0], d0[0] // -> out3 (q11 or q12)
+ vmul_vmls q6, q2, q4, d0[0], d0[0] // -> out4 (q12 or q11)
+ vmul_vmls q12, q3, q5, d0[0], d0[0] // -> out5 (q13 or q10)
+ vrshr.s32 q2, q10, #12 // out3
+ vmul_vmla q10, q3, q5, d0[0], d0[0] // -> out2 (q10 or q13)
+ vrshr.s32 q3, q12, #12 // out5
+ vrshr.s32 \r2, q10, #12 // out2 (q10 or q13)
+ vrshr.s32 \r4, q6, #12 // out4 (q12 or q11)
+
+ vqneg.s32 \r3, q2 // out3
+ vqneg.s32 \r5, q3 // out5
+.endm
+
+function inv_adst_4s_x8_neon
+ iadst_4s_x8 q8, q9, q10, q11, q12, q13, q14, q15
+ bx lr
+endfunc
+
+function inv_flipadst_4s_x8_neon
+ iadst_4s_x8 q15, q14, q13, q12, q11, q10, q9, q8
+ bx lr
+endfunc
+
+function inv_identity_4s_x8_neon
+ vqshl.s32 q8, q8, #1
+ vqshl.s32 q9, q9, #1
+ vqshl.s32 q10, q10, #1
+ vqshl.s32 q11, q11, #1
+ vqshl.s32 q12, q12, #1
+ vqshl.s32 q13, q13, #1
+ vqshl.s32 q14, q14, #1
+ vqshl.s32 q15, q15, #1
+ bx lr
+endfunc
+
+function inv_txfm_add_8x8_neon
+ vmov.i32 q0, #0
+ mov r7, #8*4
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.32 {\i}, [r2, :128]
+ vst1.32 {q0}, [r2, :128], r7
+.endr
+
+ blx r4
+
+ vqrshrn.s32 d16, q8, #1
+ vqrshrn.s32 d17, q12, #1
+ vqrshrn.s32 d18, q9, #1
+ vqrshrn.s32 d19, q13, #1
+ vqrshrn.s32 d20, q10, #1
+ vqrshrn.s32 d21, q14, #1
+ vqrshrn.s32 d22, q11, #1
+ vqrshrn.s32 d23, q15, #1
+
+ cmp r3, r10
+ transpose_4x8h q8, q9, q10, q11
+
+ blt 1f
+
+ sub r2, r2, r7, lsl #3
+ vpush {q8-q11}
+
+ add r2, r2, #16
+ vmov.i32 q0, #0
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.32 {\i}, [r2, :128]
+ vst1.32 {q0}, [r2, :128], r7
+.endr
+
+ blx r4
+
+ vqrshrn.s32 d31, q15, #1
+ vqrshrn.s32 d30, q11, #1
+ vqrshrn.s32 d29, q14, #1
+ vqrshrn.s32 d28, q10, #1
+ vqrshrn.s32 d27, q13, #1
+ vqrshrn.s32 d26, q9, #1
+ vqrshrn.s32 d25, q12, #1
+ vqrshrn.s32 d24, q8, #1
+ vpop {q8-q11}
+
+ transpose_4x8h q12, q13, q14, q15
+
+ b 2f
+
+1:
+ vmov.i16 q12, #0
+ vmov.i16 q13, #0
+ vmov.i16 q14, #0
+ vmov.i16 q15, #0
+
+2:
+ blx r5
+
+ load_add_store_8x8 r0, r7
+ vpop {q4-q7}
+ pop {r4-r5,r7,r10,pc}
+endfunc
+
+.macro def_fn_8x8 txfm1, txfm2, eob_half
+function inv_txfm_add_\txfm1\()_\txfm2\()_8x8_16bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc 8, 8, 1
+.endif
+ push {r4-r5,r7,r10,lr}
+ vpush {q4-q7}
+ mov r10, #\eob_half
+ movrel_local r4, inv_\txfm1\()_4s_x8_neon
+ movrel r5, X(inv_\txfm2\()_8h_x8_neon)
+ b inv_txfm_add_8x8_neon
+endfunc
+.endm
+
+def_fn_8x8 dct, dct, 10
+def_fn_8x8 identity, identity, 10
+def_fn_8x8 dct, adst, 10
+def_fn_8x8 dct, flipadst, 10
+def_fn_8x8 dct, identity, 4
+def_fn_8x8 adst, dct, 10
+def_fn_8x8 adst, adst, 10
+def_fn_8x8 adst, flipadst, 10
+def_fn_8x8 flipadst, dct, 10
+def_fn_8x8 flipadst, adst, 10
+def_fn_8x8 flipadst, flipadst, 10
+def_fn_8x8 identity, dct, 4
+def_fn_8x8 adst, identity, 4
+def_fn_8x8 flipadst, identity, 4
+def_fn_8x8 identity, adst, 4
+def_fn_8x8 identity, flipadst, 4
+
+function inv_txfm_add_8x4_neon
+ mov_const r12, 2896*8*(1<<16)
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+ vld1.16 {q8, q9}, [r2, :128]
+ vst1.16 {q0, q1}, [r2, :128]!
+ vdup.32 d4, r12
+ vld1.16 {q10, q11}, [r2, :128]
+ vst1.16 {q0, q1}, [r2, :128]!
+ vld1.16 {q12, q13}, [r2, :128]
+ vst1.16 {q0, q1}, [r2, :128]!
+ vld1.16 {q14, q15}, [r2, :128]
+ vst1.16 {q0, q1}, [r2, :128]!
+
+ scale_input d4[0], q8, q9, q10, q11, q12, q13, q14, q15
+
+ blx r4
+
+ vqmovn.s32 d16, q8
+ vqmovn.s32 d17, q9
+ vqmovn.s32 d18, q10
+ vqmovn.s32 d19, q11
+ vqmovn.s32 d20, q12
+ vqmovn.s32 d21, q13
+ vqmovn.s32 d22, q14
+ vqmovn.s32 d23, q15
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ vswp d17, d20
+ vswp d19, d21
+ vswp d18, d20
+ vswp d21, d22
+
+ blx r5
+
+ load_add_store_8x4 r0, r7
+ vpop {q4-q7}
+ pop {r4-r5,r7,r10,pc}
+endfunc
+
+function inv_txfm_add_4x8_neon
+ mov_const r12, 2896*8*(1<<16)
+ vmov.i32 q0, #0
+ cmp r3, r10
+ mov r7, #32
+ blt 1f
+
+ add r2, r2, #16
+ vdup.32 d2, r12
+.irp i, q8, q9, q10, q11
+ vld1.32 {\i}, [r2, :128]
+ vst1.32 {q0}, [r2, :128], r7
+.endr
+
+ scale_input d2[0], q8, q9, q10, q11
+ sub r2, r2, r7, lsl #2
+
+ blx r4
+
+ sub r2, r2, #16
+
+ vqmovn.s32 d24, q8
+ vqmovn.s32 d25, q9
+ vqmovn.s32 d26, q10
+ vqmovn.s32 d27, q11
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+
+ b 2f
+
+1:
+ vmov.i16 q12, #0
+ vmov.i16 q13, #0
+
+2:
+ mov_const r12, 2896*8*(1<<16)
+ vmov.i32 q0, #0
+ vdup.32 d2, r12
+.irp i, q8, q9, q10, q11
+ vld1.32 {\i}, [r2, :128]
+ vst1.32 {q0}, [r2, :128], r7
+.endr
+ scale_input d2[0], q8, q9, q10, q11
+ blx r4
+
+ vqmovn.s32 d16, q8
+ vqmovn.s32 d17, q9
+ vqmovn.s32 d18, q10
+ vqmovn.s32 d19, q11
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+
+ vmov q10, q12
+ vmov q11, q13
+
+ blx r5
+
+ load_add_store_4x8 r0, r7
+ vpop {q4-q7}
+ pop {r4-r5,r7,r10,pc}
+endfunc
+
+.macro def_fn_48 w, h, txfm1, txfm2, eob_half
+function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc \w, \h, 0
+.endif
+ push {r4-r5,r7,r10,lr}
+ vpush {q4-q7}
+ movrel_local r4, inv_\txfm1\()_4s_x\w\()_neon
+.if \w == 4
+ mov r10, #\eob_half
+.endif
+ movrel r5, X(inv_\txfm2\()_\w\()h_x\h\()_neon)
+ b inv_txfm_add_\w\()x\h\()_neon
+endfunc
+.endm
+
+.macro def_fns_48 w, h
+def_fn_48 \w, \h, dct, dct, 13
+def_fn_48 \w, \h, identity, identity, 13
+def_fn_48 \w, \h, dct, adst, 13
+def_fn_48 \w, \h, dct, flipadst, 13
+def_fn_48 \w, \h, dct, identity, 4
+def_fn_48 \w, \h, adst, dct, 13
+def_fn_48 \w, \h, adst, adst, 13
+def_fn_48 \w, \h, adst, flipadst, 13
+def_fn_48 \w, \h, flipadst, dct, 13
+def_fn_48 \w, \h, flipadst, adst, 13
+def_fn_48 \w, \h, flipadst, flipadst, 13
+def_fn_48 \w, \h, identity, dct, 16
+def_fn_48 \w, \h, adst, identity, 4
+def_fn_48 \w, \h, flipadst, identity, 4
+def_fn_48 \w, \h, identity, adst, 16
+def_fn_48 \w, \h, identity, flipadst, 16
+.endm
+
+def_fns_48 4, 8
+def_fns_48 8, 4
+
+function inv_dct_2s_x16_neon
+ movrel_local r12, idct_coeffs
+ vld1.32 {q0, q1}, [r12, :128]!
+
+ idct_2s_x8 d16, d18, d20, d22, d24, d26, d28, d30
+
+ // idct_8 leaves the row_clip_max/min constants in d9 and d8
+.irp r, d16, d18, d20, d22, d24, d26, d28, d30
+ vmin.s32 \r, \r, d9
+.endr
+.irp r, d16, d18, d20, d22, d24, d26, d28, d30
+ vmax.s32 \r, \r, d8
+.endr
+
+ vld1.32 {q0, q1}, [r12, :128]
+ sub r12, r12, #32
+
+ vmul_vmls d4, d17, d31, d0[0], d0[1] // -> t8a
+ vmul_vmla d5, d17, d31, d0[1], d0[0] // -> t15a
+ vmul_vmls d6, d25, d23, d1[0], d1[1] // -> t9a
+ vrshr.s32 d17, d4, #12 // t8a
+ vrshr.s32 d31, d5, #12 // t15a
+ vmul_vmla d4, d25, d23, d1[1], d1[0] // -> t14a
+ vmul_vmls d5, d21, d27, d2[0], d2[1] // -> t10a
+ vrshr.s32 d23, d6, #12 // t9a
+ vrshr.s32 d25, d4, #12 // t14a
+ vmul_vmla d6, d21, d27, d2[1], d2[0] // -> t13a
+ vmul_vmls d4, d29, d19, d3[0], d3[1] // -> t11a
+ vrshr.s32 d21, d5, #12 // t10a
+ vrshr.s32 d27, d6, #12 // t13a
+ vmul_vmla d5, d29, d19, d3[1], d3[0] // -> t12a
+ vrshr.s32 d19, d4, #12 // t11a
+ vrshr.s32 d29, d5, #12 // t12a
+
+ vld1.32 {q0}, [r12, :128]
+
+ vqsub.s32 d4, d17, d23 // t9
+ vqadd.s32 d17, d17, d23 // t8
+ vqsub.s32 d5, d31, d25 // t14
+ vqadd.s32 d31, d31, d25 // t15
+ vqsub.s32 d23, d19, d21 // t10
+ vqadd.s32 d19, d19, d21 // t11
+ vqadd.s32 d25, d29, d27 // t12
+ vqsub.s32 d29, d29, d27 // t13
+
+.irp r, d4, d17, d5, d31, d23, d19, d25, d29
+ vmin.s32 \r, \r, d9
+.endr
+.irp r, d4, d17, d5, d31, d23, d19, d25, d29
+ vmax.s32 \r, \r, d8
+.endr
+
+ vmul_vmls d6, d5, d4, d1[0], d1[1] // -> t9a
+ vmul_vmla d7, d5, d4, d1[1], d1[0] // -> t14a
+ vrshr.s32 d21, d6, #12 // t9a
+ vrshr.s32 d27, d7, #12 // t14a
+
+ vmul_vmls d6, d29, d23, d1[0], d1[1] // -> t13a
+ vmul_vmla d7, d29, d23, d1[1], d1[0] // -> t10a
+ vrshr.s32 d29, d6, #12 // t13a
+ vneg.s32 d7, d7
+ vrshr.s32 d23, d7, #12 // t10a
+
+ vqsub.s32 d4, d17, d19 // t11a
+ vqadd.s32 d17, d17, d19 // t8a
+ vqsub.s32 d5, d31, d25 // t12a
+ vqadd.s32 d31, d31, d25 // t15a
+ vqadd.s32 d19, d21, d23 // t9
+ vqsub.s32 d21, d21, d23 // t10
+ vqsub.s32 d25, d27, d29 // t13
+ vqadd.s32 d27, d27, d29 // t14
+
+.irp r, d4, d17, d5, d31, d19, d21, d25, d27
+ vmin.s32 \r, \r, d9
+.endr
+.irp r, d4, d17, d5, d31, d19, d21, d25, d27
+ vmax.s32 \r, \r, d8
+.endr
+
+ vmul_vmls d6, d5, d4, d0[0], d0[0] // -> t11
+ vmul_vmla d7, d5, d4, d0[0], d0[0] // -> t12
+ vmul_vmls d4, d25, d21, d0[0], d0[0] // -> t10a
+
+ vrshr.s32 d6, d6, #12 // t11
+ vrshr.s32 d7, d7, #12 // t12
+ vmul_vmla d5, d25, d21, d0[0], d0[0] // -> t13a
+ vrshr.s32 d4, d4, #12 // t10a
+ vrshr.s32 d5, d5, #12 // t13a
+
+ vqadd.s32 d8, d16, d31 // out0
+ vqsub.s32 d31, d16, d31 // out15
+ vmov d16, d8
+ vqadd.s32 d23, d30, d17 // out7
+ vqsub.s32 d9, d30, d17 // out8
+ vqadd.s32 d17, d18, d27 // out1
+ vqsub.s32 d30, d18, d27 // out14
+ vqadd.s32 d18, d20, d5 // out2
+ vqsub.s32 d29, d20, d5 // out13
+ vqadd.s32 d5, d28, d19 // out6
+ vqsub.s32 d25, d28, d19 // out9
+ vqadd.s32 d19, d22, d7 // out3
+ vqsub.s32 d28, d22, d7 // out12
+ vqadd.s32 d20, d24, d6 // out4
+ vqsub.s32 d27, d24, d6 // out11
+ vqadd.s32 d21, d26, d4 // out5
+ vqsub.s32 d26, d26, d4 // out10
+ vmov d24, d9
+ vmov d22, d5
+
+ bx lr
+endfunc
+
+.macro iadst_16 o0, o1, o2, o3, o4, o5, o6, o7, o8, o9, o10, o11, o12, o13, o14, o15
+ movrel_local r12, iadst16_coeffs
+ vld1.32 {q0, q1}, [r12, :128]!
+
+ vmul_vmla d4, d31, d16, d0[0], d0[1] // -> t0
+ vmul_vmls d6, d31, d16, d0[1], d0[0] // -> t1
+ vmul_vmla d8, d29, d18, d1[0], d1[1] // -> t2
+ vrshr.s32 d16, d4, #12 // t0
+ vrshr.s32 d31, d6, #12 // t1
+ vmul_vmls d4, d29, d18, d1[1], d1[0] // -> t3
+ vmul_vmla d6, d27, d20, d2[0], d2[1] // -> t4
+ vrshr.s32 d18, d8, #12 // t2
+ vrshr.s32 d29, d4, #12 // t3
+ vmul_vmls d8, d27, d20, d2[1], d2[0] // -> t5
+ vmul_vmla d4, d25, d22, d3[0], d3[1] // -> t6
+ vrshr.s32 d20, d6, #12 // t4
+ vrshr.s32 d27, d8, #12 // t5
+ vmul_vmls d6, d25, d22, d3[1], d3[0] // -> t7
+ vld1.32 {q0, q1}, [r12, :128]
+ movrel_local r12, idct_coeffs
+ vmul_vmla d8, d23, d24, d0[0], d0[1] // -> t8
+ vrshr.s32 d22, d4, #12 // t6
+ vrshr.s32 d25, d6, #12 // t7
+ vmul_vmls d4, d23, d24, d0[1], d0[0] // -> t9
+ vmul_vmla d6, d21, d26, d1[0], d1[1] // -> t10
+ vrshr.s32 d23, d8, #12 // t8
+ vrshr.s32 d24, d4, #12 // t9
+ vmul_vmls d8, d21, d26, d1[1], d1[0] // -> t11
+ vmul_vmla d4, d19, d28, d2[0], d2[1] // -> t12
+ vrshr.s32 d21, d6, #12 // t10
+ vrshr.s32 d26, d8, #12 // t11
+ vmul_vmls d6, d19, d28, d2[1], d2[0] // -> t13
+ vmul_vmla d8, d17, d30, d3[0], d3[1] // -> t14
+ vrshr.s32 d19, d4, #12 // t12
+ vrshr.s32 d28, d6, #12 // t13
+ vmul_vmls d4, d17, d30, d3[1], d3[0] // -> t15
+ vrshr.s32 d17, d8, #12 // t14
+ vrshr.s32 d30, d4, #12 // t15
+
+ vld1.32 {q0, q1}, [r12, :128]
+
+ vmov.i32 d11, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vmvn.i32 d10, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+
+ vqsub.s32 d5, d16, d23 // t8a
+ vqadd.s32 d16, d16, d23 // t0a
+ vqsub.s32 d7, d31, d24 // t9a
+ vqadd.s32 d31, d31, d24 // t1a
+ vqadd.s32 d23, d18, d21 // t2a
+ vqsub.s32 d18, d18, d21 // t10a
+ vqadd.s32 d24, d29, d26 // t3a
+ vqsub.s32 d29, d29, d26 // t11a
+ vqadd.s32 d21, d20, d19 // t4a
+ vqsub.s32 d20, d20, d19 // t12a
+ vqadd.s32 d26, d27, d28 // t5a
+ vqsub.s32 d27, d27, d28 // t13a
+ vqadd.s32 d19, d22, d17 // t6a
+ vqsub.s32 d22, d22, d17 // t14a
+ vqadd.s32 d28, d25, d30 // t7a
+ vqsub.s32 d25, d25, d30 // t15a
+
+.irp r, d5, d16, d7, d31, d23, d18, d24, d29, d21, d20, d26, d27, d19, d22, d28, d25
+ vmin.s32 \r, \r, d11
+.endr
+.irp r, d5, d16, d7, d31, d23, d18, d24, d29, d21, d20, d26, d27, d19, d22, d28, d25
+ vmax.s32 \r, \r, d10
+.endr
+
+ vmul_vmla d4, d5, d7, d2[1], d2[0] // -> t8
+ vmul_vmls d6, d5, d7, d2[0], d2[1] // -> t9
+ vmul_vmla d8, d18, d29, d3[1], d3[0] // -> t10
+ vrshr.s32 d17, d4, #12 // t8
+ vrshr.s32 d30, d6, #12 // t9
+ vmul_vmls d4, d18, d29, d3[0], d3[1] // -> t11
+ vmul_vmls d6, d27, d20, d2[1], d2[0] // -> t12
+ vrshr.s32 d18, d8, #12 // t10
+ vrshr.s32 d29, d4, #12 // t11
+ vmul_vmla d8, d27, d20, d2[0], d2[1] // -> t13
+ vmul_vmls d4, d25, d22, d3[1], d3[0] // -> t14
+ vrshr.s32 d27, d6, #12 // t12
+ vrshr.s32 d20, d8, #12 // t13
+ vmul_vmla d6, d25, d22, d3[0], d3[1] // -> t15
+ vrshr.s32 d25, d4, #12 // t14
+ vrshr.s32 d22, d6, #12 // t15
+
+ vqsub.s32 d2, d16, d21 // t4
+ vqadd.s32 d16, d16, d21 // t0
+ vqsub.s32 d3, d31, d26 // t5
+ vqadd.s32 d31, d31, d26 // t1
+ vqadd.s32 d21, d23, d19 // t2
+ vqsub.s32 d23, d23, d19 // t6
+ vqadd.s32 d26, d24, d28 // t3
+ vqsub.s32 d24, d24, d28 // t7
+ vqadd.s32 d19, d17, d27 // t8a
+ vqsub.s32 d17, d17, d27 // t12a
+ vqadd.s32 d28, d30, d20 // t9a
+ vqsub.s32 d30, d30, d20 // t13a
+ vqadd.s32 d27, d18, d25 // t10a
+ vqsub.s32 d18, d18, d25 // t14a
+ vqadd.s32 d20, d29, d22 // t11a
+ vqsub.s32 d29, d29, d22 // t15a
+
+.irp r, d2, d16, d3, d31, d21, d23, d26, d24, d19, d17, d28, d30, d27, d18, d20, d29
+ vmin.s32 \r, \r, d11
+.endr
+.irp r, d2, d16, d3, d31, d21, d23, d26, d24, d19, d17, d28, d30, d27, d18, d20, d29
+ vmax.s32 \r, \r, d10
+.endr
+
+ vmul_vmla d4, d2, d3, d1[1], d1[0] // -> t4a
+ vmul_vmls d6, d2, d3, d1[0], d1[1] // -> t5a
+ vmul_vmls d8, d24, d23, d1[1], d1[0] // -> t6a
+ vrshr.s32 d22, d4, #12 // t4a
+ vrshr.s32 d25, d6, #12 // t5a
+ vmul_vmla d4, d24, d23, d1[0], d1[1] // -> t7a
+ vmul_vmla d6, d17, d30, d1[1], d1[0] // -> t12
+ vrshr.s32 d24, d8, #12 // t6a
+ vrshr.s32 d23, d4, #12 // t7a
+ vmul_vmls d8, d17, d30, d1[0], d1[1] // -> t13
+ vmul_vmls d4, d29, d18, d1[1], d1[0] // -> t14
+ vrshr.s32 d17, d6, #12 // t12
+ vmul_vmla d6, d29, d18, d1[0], d1[1] // -> t15
+ vrshr.s32 d29, d8, #12 // t13
+ vrshr.s32 d30, d4, #12 // t14
+ vrshr.s32 d18, d6, #12 // t15
+
+ vqsub.s32 d2, d16, d21 // t2a
+.ifc \o0, d16
+ vqadd.s32 \o0, d16, d21 // out0
+ vqsub.s32 d21, d31, d26 // t3a
+ vqadd.s32 \o15,d31, d26 // out15
+.else
+ vqadd.s32 d4, d16, d21 // out0
+ vqsub.s32 d21, d31, d26 // t3a
+ vqadd.s32 \o15,d31, d26 // out15
+ vmov \o0, d4
+.endif
+
+ vqsub.s32 d3, d29, d18 // t15a
+ vqadd.s32 \o13,d29, d18 // out13
+ vqadd.s32 \o2, d17, d30 // out2
+ vqsub.s32 d26, d17, d30 // t14a
+
+ vqadd.s32 \o1, d19, d27 // out1
+ vqsub.s32 d27, d19, d27 // t10
+ vqadd.s32 \o14,d28, d20 // out14
+ vqsub.s32 d20, d28, d20 // t11
+
+ vqadd.s32 \o3, d22, d24 // out3
+ vqsub.s32 d22, d22, d24 // t6
+ vqadd.s32 \o12,d25, d23 // out12
+ vqsub.s32 d23, d25, d23 // t7
+
+ // Not clipping the output registers, as they will be downshifted and
+ // narrowed afterwards anyway.
+.irp r, d2, d21, d3, d26, d27, d20, d22, d23
+ vmin.s32 \r, \r, d11
+.endr
+.irp r, d2, d21, d3, d26, d27, d20, d22, d23
+ vmax.s32 \r, \r, d10
+.endr
+
+ vqneg.s32 \o15, \o15 // out15
+ vqneg.s32 \o13,\o13 // out13
+ vqneg.s32 \o1, \o1 // out1
+ vqneg.s32 \o3, \o3 // out3
+
+ vmul_vmls d24, d2, d21, d0[0], d0[0] // -> out8 (d24 or d23)
+ vmul_vmla d4, d2, d21, d0[0], d0[0] // -> out7 (d23 or d24)
+ vmul_vmla d6, d26, d3, d0[0], d0[0] // -> out5 (d21 or d26)
+
+ vrshr.s32 d24, d24, #12 // out8
+ vrshr.s32 d4, d4, #12 // out7
+ vrshr.s32 d5, d6, #12 // out5
+ vmul_vmls d8, d26, d3, d0[0], d0[0] // -> out10 (d26 or d21)
+ vmul_vmla d2, d22, d23, d0[0], d0[0] // -> out4 (d20 or d27)
+ vrshr.s32 d26, d8, #12 // out10
+
+ vmul_vmls d8, d22, d23, d0[0], d0[0] // -> out11 (d27 or d20)
+ vmul_vmla d22, d27, d20, d0[0], d0[0] // -> out6 (d22 or d25)
+ vmul_vmls d6, d27, d20, d0[0], d0[0] // -> out9 (d25 or d22)
+
+ vrshr.s32 \o4, d2, #12 // out4
+ vrshr.s32 d7, d6, #12 // out9
+ vrshr.s32 d6, d8, #12 // out11
+ vrshr.s32 \o6, d22, #12 // out6
+
+.ifc \o8, d23
+ vmov \o8, d24
+ vmov \o10,d26
+.endif
+
+ vqneg.s32 \o7, d4 // out7
+ vqneg.s32 \o5, d5 // out5
+ vqneg.s32 \o11,d6 // out11
+ vqneg.s32 \o9, d7 // out9
+.endm
+
+function inv_adst_2s_x16_neon
+ iadst_16 d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ bx lr
+endfunc
+
+function inv_flipadst_2s_x16_neon
+ iadst_16 d31, d30, d29, d28, d27, d26, d25, d24, d23, d22, d21, d20, d19, d18, d17, d16
+ bx lr
+endfunc
+
+function inv_identity_2s_x16_neon
+ mov r12, #0
+ movt r12, #2*(5793-4096)*8
+ vdup.32 d0, r12
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vqrdmulh.s32 q1, \i, d0[0]
+ vqadd.s32 \i, \i, \i
+ vqadd.s32 \i, \i, q1
+.endr
+ bx lr
+endfunc
+
+.macro identity_8x4_shift1 c
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vqrdmulh.s32 q2, \i, \c
+ vrshr.s32 q2, q2, #1
+ vqadd.s32 \i, \i, q2
+.endr
+.endm
+
+.macro identity_8x4 c
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vqrdmulh.s32 q2, \i, \c
+ vqadd.s32 \i, \i, \i
+ vqadd.s32 \i, \i, q2
+.endr
+.endm
+
+.macro def_horz_16 scale=0, shift=2, suffix
+function inv_txfm_horz\suffix\()_16x2_neon
+ push {lr}
+ vmov.i32 d7, #0
+.if \scale
+ mov_const r12, 2896*8*(1<<16)
+ vdup.32 d1, r12
+.endif
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.32 {\i}, [r7, :64]
+ vst1.32 {d7}, [r7, :64], r8
+.endr
+.if \scale
+ scale_input d1[0], q8, q9, q10, q11, q12, q13, q14, q15
+.endif
+ blx r4
+ vqrshrn.s32 d16, q8, #\shift
+ vqrshrn.s32 d17, q9, #\shift
+ vqrshrn.s32 d18, q10, #\shift
+ vqrshrn.s32 d19, q11, #\shift
+ vqrshrn.s32 d20, q12, #\shift
+ vqrshrn.s32 d21, q13, #\shift
+ vqrshrn.s32 d22, q14, #\shift
+ vqrshrn.s32 d23, q15, #\shift
+ vuzp.16 q8, q9
+ vuzp.16 q10, q11
+
+.irp i, q8, q10, q9, q11
+ vst1.16 {\i}, [r6, :128]!
+.endr
+
+ pop {pc}
+endfunc
+.endm
+
+def_horz_16 scale=0, shift=2
+def_horz_16 scale=1, shift=1, suffix=_scale
+
+function inv_txfm_add_vert_4x16_neon
+ push {lr}
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64], r8
+.endr
+ blx r5
+ load_add_store_4x16 r6, r7
+ pop {pc}
+endfunc
+
+function inv_txfm_add_16x16_neon
+ sub_sp_align 512
+ ldrh r11, [r10], #2
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14
+ add r6, sp, #(\i*16*2)
+.if \i > 0
+ mov r8, #(16 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 14
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #16*4
+ bl inv_txfm_horz_16x2_neon
+.endr
+ b 3f
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 2
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+3:
+.irp i, 0, 4, 8, 12
+ add r6, r0, #(\i*2)
+ add r7, sp, #(\i*2)
+ mov r8, #32
+ bl inv_txfm_add_vert_4x16_neon
+.endr
+
+ add_sp_align 512
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+const eob_16x16
+ .short 3, 10, 21, 36, 55, 78, 105, 256
+endconst
+
+const eob_16x16_identity
+ .short 2, 4, 6, 8, 10, 12, 14, 256
+endconst
+
+.macro def_fn_16x16 txfm1, txfm2
+function inv_txfm_add_\txfm1\()_\txfm2\()_16x16_16bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc 16, 16, 2
+.endif
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ movrel_local r4, inv_\txfm1\()_2s_x16_neon
+ movrel r5, X(inv_\txfm2\()_4h_x16_neon)
+.ifc \txfm1, identity
+.ifc \txfm2, identity
+ movrel_local r10, eob_16x16
+.else
+ movrel_local r10, eob_16x16_identity
+.endif
+.else
+.ifc \txfm2, identity
+ movrel_local r10, eob_16x16_identity
+.else
+ movrel_local r10, eob_16x16
+.endif
+.endif
+ b inv_txfm_add_16x16_neon
+endfunc
+.endm
+
+def_fn_16x16 dct, dct
+def_fn_16x16 identity, identity
+def_fn_16x16 dct, adst
+def_fn_16x16 dct, flipadst
+def_fn_16x16 dct, identity
+def_fn_16x16 adst, dct
+def_fn_16x16 adst, adst
+def_fn_16x16 adst, flipadst
+def_fn_16x16 flipadst, dct
+def_fn_16x16 flipadst, adst
+def_fn_16x16 flipadst, flipadst
+def_fn_16x16 identity, dct
+
+function inv_txfm_add_16x4_neon
+ cmp r3, r10
+ mov r11, #16
+ blt 1f
+
+ add r6, r2, #8
+ vmov.i32 d4, #0
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.32 {\i}, [r6, :64]
+ vst1.32 {d4}, [r6, :64], r11
+.endr
+ blx r4
+
+ vqrshrn.s32 d16, q8, #1
+ vqrshrn.s32 d17, q9, #1
+ vqrshrn.s32 d18, q10, #1
+ vqrshrn.s32 d19, q11, #1
+ vqrshrn.s32 d20, q12, #1
+ vqrshrn.s32 d21, q13, #1
+ vqrshrn.s32 d22, q14, #1
+ vqrshrn.s32 d23, q15, #1
+ vuzp.16 q8, q9
+ mov r6, sp
+ vuzp.16 q10, q11
+ vpush {q8-q11}
+
+ b 2f
+
+1:
+ vmov.i16 q8, #0
+ vmov.i16 q9, #0
+ mov r6, sp
+ vpush {q8-q9}
+ vpush {q8-q9}
+
+2:
+ vmov.i32 d4, #0
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.32 {\i}, [r2, :64]
+ vst1.32 {d4}, [r2, :64], r11
+.endr
+
+ blx r4
+
+ vqrshrn.s32 d16, q8, #1
+ vqrshrn.s32 d17, q9, #1
+ vqrshrn.s32 d18, q10, #1
+ vqrshrn.s32 d19, q11, #1
+ vqrshrn.s32 d20, q12, #1
+ vqrshrn.s32 d21, q13, #1
+ vqrshrn.s32 d22, q14, #1
+ vqrshrn.s32 d23, q15, #1
+ vuzp.16 q8, q9
+ mov r6, sp
+ vuzp.16 q10, q11
+
+ vmov q12, q10
+ vmov q13, q11
+
+ vpop {q10-q11}
+ blx r5
+ mov r6, r0
+ load_add_store_8x4 r6, r7
+
+ vpop {q10-q11}
+ vmov q8, q12
+ vmov q9, q13
+ blx r5
+ add r6, r0, #16
+ load_add_store_8x4 r6, r7
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_4x16_neon
+ ldrh r9, [r10, #4]
+
+ mov r11, #64
+ cmp r3, r9
+ ldrh r9, [r10, #2]
+ blt 1f
+
+ add r6, r2, #48
+ vmov.i32 q2, #0
+.irp i, q8, q9, q10, q11
+ vld1.32 {\i}, [r6, :128]
+ vst1.32 {q2}, [r6, :128], r11
+.endr
+ blx r4
+ vqrshrn.s32 d28, q8, #1
+ vqrshrn.s32 d29, q9, #1
+ vqrshrn.s32 d30, q10, #1
+ vqrshrn.s32 d31, q11, #1
+ transpose_4x4h q14, q15, d28, d29, d30, d31
+
+ b 2f
+1:
+ vmov.i16 q14, #0
+ vmov.i16 q15, #0
+2:
+ cmp r3, r9
+ ldrh r9, [r10]
+ blt 1f
+
+ add r6, r2, #32
+ vmov.i32 q2, #0
+.irp i, q8, q9, q10, q11
+ vld1.32 {\i}, [r6, :128]
+ vst1.32 {q2}, [r6, :128], r11
+.endr
+ blx r4
+ vqrshrn.s32 d24, q8, #1
+ vqrshrn.s32 d25, q9, #1
+ vqrshrn.s32 d26, q10, #1
+ vqrshrn.s32 d27, q11, #1
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+
+ b 2f
+1:
+ vmov.i16 q12, #0
+ vmov.i16 q13, #0
+2:
+ cmp r3, r9
+ blt 1f
+
+ add r6, r2, #16
+ vmov.i32 q2, #0
+.irp i, q8, q9, q10, q11
+ vld1.32 {\i}, [r6, :128]
+ vst1.32 {q2}, [r6, :128], r11
+.endr
+ blx r4
+ vqrshrn.s32 d16, q8, #1
+ vqrshrn.s32 d17, q9, #1
+ vqrshrn.s32 d18, q10, #1
+ vqrshrn.s32 d19, q11, #1
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+
+ b 2f
+1:
+ vmov.i16 q8, #0
+ vmov.i16 q9, #0
+2:
+ vmov.i16 q2, #0
+ vpush {q8-q9}
+.irp i, q8, q9, q10, q11
+ vld1.16 {\i}, [r2, :128]
+ vst1.16 {q2}, [r2, :128], r11
+.endr
+ blx r4
+ vqrshrn.s32 d16, q8, #1
+ vqrshrn.s32 d17, q9, #1
+ vqrshrn.s32 d18, q10, #1
+ vqrshrn.s32 d19, q11, #1
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ vpop {q10-q11}
+
+ blx r5
+
+ load_add_store_4x16 r0, r6
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+const eob_4x16
+ .short 13, 29, 45, 64
+endconst
+
+const eob_4x16_identity1
+ .short 16, 32, 48, 64
+endconst
+
+const eob_4x16_identity2
+ .short 4, 8, 12, 64
+endconst
+
+.macro def_fn_416 w, h, txfm1, txfm2, eob_16x4
+function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc \w, \h, 1
+.endif
+ push {r4-r11,lr}
+ vpush {q4-q7}
+.if \w == 4
+ movrel_local r4, inv_\txfm1\()_4s_x\w\()_neon
+ movrel r5, X(inv_\txfm2\()_4h_x\h\()_neon)
+.ifc \txfm1, identity
+.ifc \txfm2, identity
+ movrel_local r10, eob_4x16
+.else
+ movrel_local r10, eob_4x16_identity1
+.endif
+.else
+.ifc \txfm2, identity
+ movrel_local r10, eob_4x16_identity2
+.else
+ movrel_local r10, eob_4x16
+.endif
+.endif
+.else
+ mov r10, #\eob_16x4
+ movrel_local r4, inv_\txfm1\()_2s_x\w\()_neon
+ movrel r5, X(inv_\txfm2\()_8h_x\h\()_neon)
+.endif
+ b inv_txfm_add_\w\()x\h\()_neon
+endfunc
+.endm
+
+.macro def_fns_416 w, h
+def_fn_416 \w, \h, dct, dct, 3
+def_fn_416 \w, \h, identity, identity, 3
+def_fn_416 \w, \h, dct, adst, 3
+def_fn_416 \w, \h, dct, flipadst, 3
+def_fn_416 \w, \h, dct, identity, 2
+def_fn_416 \w, \h, adst, dct, 3
+def_fn_416 \w, \h, adst, adst, 3
+def_fn_416 \w, \h, adst, flipadst, 3
+def_fn_416 \w, \h, flipadst, dct, 3
+def_fn_416 \w, \h, flipadst, adst, 3
+def_fn_416 \w, \h, flipadst, flipadst, 3
+def_fn_416 \w, \h, identity, dct, 2
+def_fn_416 \w, \h, adst, identity, 2
+def_fn_416 \w, \h, flipadst, identity, 2
+def_fn_416 \w, \h, identity, adst, 2
+def_fn_416 \w, \h, identity, flipadst, 2
+.endm
+
+def_fns_416 4, 16
+def_fns_416 16, 4
+
+function inv_txfm_add_16x8_neon
+ sub_sp_align 256
+ ldrh r11, [r10], #2
+
+.irp i, 0, 2, 4, 6
+ add r6, sp, #(\i*16*2)
+.if \i > 0
+ mov r8, #(8 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 6
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #8*4
+ bl inv_txfm_horz_scale_16x2_neon
+.endr
+ b 3f
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 2
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+3:
+
+.irp i, 0, 8
+ add r7, sp, #(\i*2)
+ mov r8, #32
+.irp j, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\j}, [r7, :128], r8
+.endr
+ blx r5
+
+ add r6, r0, #(\i*2)
+ load_add_store_8x8 r6, r7
+.endr
+
+ add_sp_align 256
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_8x16_neon
+ add r10, r10, #2
+ sub_sp_align 256
+ ldrh r11, [r10], #4
+
+.irp i, 0, 4, 8, 12
+ add r6, sp, #(\i*8*2)
+.if \i > 0
+ mov r8, #(16 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 12
+ ldrh r11, [r10], #4
+.endif
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #16*4
+
+ mov_const r12, 2896*8*(1<<16)
+ vmov.i32 q2, #0
+ vdup.32 d0, r12
+
+.irp j, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.32 {\j}, [r7, :128]
+ vst1.32 {q2}, [r7, :128], r8
+.endr
+ scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
+ blx r4
+ vqrshrn.s32 d16, q8, #1
+ vqrshrn.s32 d17, q9, #1
+ vqrshrn.s32 d18, q10, #1
+ vqrshrn.s32 d19, q11, #1
+ vqrshrn.s32 d20, q12, #1
+ vqrshrn.s32 d21, q13, #1
+ vqrshrn.s32 d22, q14, #1
+ vqrshrn.s32 d23, q15, #1
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+.irp j, d16, d20, d17, d21, d18, d22, d19, d23
+ vst1.16 {\j}, [r6, :64]!
+.endr
+.endr
+ b 3f
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #4
+.rept 2
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+3:
+
+.irp i, 0, 4
+ add r6, r0, #(\i*2)
+ add r7, sp, #(\i*2)
+ mov r8, #16
+ bl inv_txfm_add_vert_4x16_neon
+.endr
+
+ add_sp_align 256
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+const eob_8x16
+ .short 3, 10, 21, 43, 59, 75, 91, 128
+endconst
+
+const eob_8x16_identity1
+ .short 2, 4, 6, 64, 80, 96, 112, 128
+endconst
+
+const eob_8x16_identity2
+ .short 2, 4, 6, 8, 10, 12, 14, 128
+endconst
+
+.macro def_fn_816 w, h, txfm1, txfm2
+function inv_txfm_add_\txfm1\()_\txfm2\()_\w\()x\h\()_16bpc_neon, export=1
+.ifc \txfm1\()_\txfm2, dct_dct
+ idct_dc \w, \h, 1
+.endif
+ push {r4-r11,lr}
+ vpush {q4-q7}
+.if \w == 8
+ movrel_local r4, inv_\txfm1\()_4s_x8_neon
+ movrel r5, X(inv_\txfm2\()_4h_x16_neon)
+.else
+ movrel_local r4, inv_\txfm1\()_2s_x16_neon
+ movrel r5, X(inv_\txfm2\()_8h_x8_neon)
+.endif
+.ifc \txfm1, identity
+.ifc \txfm2, identity
+ movrel_local r10, eob_8x16
+.else
+ movrel_local r10, eob_8x16_identity1
+.endif
+.else
+.ifc \txfm2, identity
+ movrel_local r10, eob_8x16_identity2
+.else
+ movrel_local r10, eob_8x16
+.endif
+.endif
+ b inv_txfm_add_\w\()x\h\()_neon
+endfunc
+.endm
+
+.macro def_fns_816 w, h
+def_fn_816 \w, \h, dct, dct
+def_fn_816 \w, \h, identity, identity
+def_fn_816 \w, \h, dct, adst
+def_fn_816 \w, \h, dct, flipadst
+def_fn_816 \w, \h, dct, identity
+def_fn_816 \w, \h, adst, dct
+def_fn_816 \w, \h, adst, adst
+def_fn_816 \w, \h, adst, flipadst
+def_fn_816 \w, \h, flipadst, dct
+def_fn_816 \w, \h, flipadst, adst
+def_fn_816 \w, \h, flipadst, flipadst
+def_fn_816 \w, \h, identity, dct
+def_fn_816 \w, \h, adst, identity
+def_fn_816 \w, \h, flipadst, identity
+def_fn_816 \w, \h, identity, adst
+def_fn_816 \w, \h, identity, flipadst
+.endm
+
+def_fns_816 8, 16
+def_fns_816 16, 8
+
+function inv_dct32_odd_2s_x16_neon
+ movrel_local r12, idct_coeffs, 4*16
+ vld1.32 {q0, q1}, [r12, :128]!
+
+ vmul_vmls d4, d16, d31, d0[0], d0[1] // -> t16a
+ vmul_vmla d6, d16, d31, d0[1], d0[0] // -> t31a
+ vmul_vmls d8, d24, d23, d1[0], d1[1] // -> t17a
+ vrshr.s32 d16, d4, #12 // t16a
+ vrshr.s32 d31, d6, #12 // t31a
+ vmul_vmla d4, d24, d23, d1[1], d1[0] // -> t30a
+ vmul_vmls d6, d20, d27, d2[0], d2[1] // -> t18a
+ vrshr.s32 d24, d8, #12 // t17a
+ vrshr.s32 d23, d4, #12 // t30a
+ vmul_vmla d8, d20, d27, d2[1], d2[0] // -> t29a
+ vmul_vmls d4, d28, d19, d3[0], d3[1] // -> t19a
+ vrshr.s32 d20, d6, #12 // t18a
+ vrshr.s32 d27, d8, #12 // t29a
+ vmul_vmla d6, d28, d19, d3[1], d3[0] // -> t28a
+ vld1.32 {q0, q1}, [r12, :128]
+ sub r12, r12, #4*24
+ vmul_vmls d8, d18, d29, d0[0], d0[1] // -> t20a
+ vrshr.s32 d28, d4, #12 // t19a
+ vrshr.s32 d19, d6, #12 // t28a
+ vmul_vmla d4, d18, d29, d0[1], d0[0] // -> t27a
+ vmul_vmls d6, d26, d21, d1[0], d1[1] // -> t21a
+ vrshr.s32 d18, d8, #12 // t20a
+ vrshr.s32 d29, d4, #12 // t27a
+ vmul_vmla d8, d26, d21, d1[1], d1[0] // -> t26a
+ vmul_vmls d4, d22, d25, d2[0], d2[1] // -> t22a
+ vrshr.s32 d26, d6, #12 // t21a
+ vrshr.s32 d21, d8, #12 // t26a
+ vmul_vmla d6, d22, d25, d2[1], d2[0] // -> t25a
+ vmul_vmls d8, d30, d17, d3[0], d3[1] // -> t23a
+ vrshr.s32 d22, d4, #12 // t22a
+ vrshr.s32 d25, d6, #12 // t25a
+ vmul_vmla d4, d30, d17, d3[1], d3[0] // -> t24a
+ vrshr.s32 d30, d8, #12 // t23a
+ vrshr.s32 d17, d4, #12 // t24a
+
+ vld1.32 {q0, q1}, [r12, :128]
+
+ vmov.i32 d11, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vmvn.i32 d10, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+
+ vqsub.s32 d5, d16, d24 // t17
+ vqadd.s32 d16, d16, d24 // t16
+ vqsub.s32 d7, d31, d23 // t30
+ vqadd.s32 d31, d31, d23 // t31
+ vqsub.s32 d24, d28, d20 // t18
+ vqadd.s32 d28, d28, d20 // t19
+ vqadd.s32 d23, d18, d26 // t20
+ vqsub.s32 d18, d18, d26 // t21
+ vqsub.s32 d20, d30, d22 // t22
+ vqadd.s32 d30, d30, d22 // t23
+ vqadd.s32 d26, d17, d25 // t24
+ vqsub.s32 d17, d17, d25 // t25
+ vqsub.s32 d22, d29, d21 // t26
+ vqadd.s32 d29, d29, d21 // t27
+ vqadd.s32 d25, d19, d27 // t28
+ vqsub.s32 d19, d19, d27 // t29
+
+.irp r, d5, d16, d7, d31, d24, d28, d23, d18, d20, d30, d26, d17, d22, d29, d25, d19
+ vmin.s32 \r, \r, d11
+.endr
+.irp r, d5, d16, d7, d31, d24, d28, d23, d18, d20, d30, d26, d17, d22, d29, d25, d19
+ vmax.s32 \r, \r, d10
+.endr
+
+ vmul_vmls d4, d7, d5, d2[0], d2[1] // -> t17a
+ vmul_vmla d6, d7, d5, d2[1], d2[0] // -> t30a
+ vmul_vmla d8, d19, d24, d2[1], d2[0] // -> t18a
+ vrshr.s32 d21, d4, #12 // t17a
+ vrshr.s32 d27, d6, #12 // t30a
+ vneg.s32 d8, d8 // -> t18a
+ vmul_vmls d5, d19, d24, d2[0], d2[1] // -> t29a
+ vmul_vmls d4, d22, d18, d3[0], d3[1] // -> t21a
+ vrshr.s32 d19, d8, #12 // t18a
+ vrshr.s32 d24, d5, #12 // t29a
+ vmul_vmla d6, d22, d18, d3[1], d3[0] // -> t26a
+ vmul_vmla d8, d17, d20, d3[1], d3[0] // -> t22a
+ vrshr.s32 d22, d4, #12 // t21a
+ vrshr.s32 d18, d6, #12 // t26a
+ vneg.s32 d8, d8 // -> t22a
+ vmul_vmls d5, d17, d20, d3[0], d3[1] // -> t25a
+ vrshr.s32 d17, d8, #12 // t22a
+ vrshr.s32 d20, d5, #12 // t25a
+
+ vqsub.s32 d2, d27, d24 // t29
+ vqadd.s32 d27, d27, d24 // t30
+ vqsub.s32 d3, d21, d19 // t18
+ vqadd.s32 d21, d21, d19 // t17
+ vqsub.s32 d24, d16, d28 // t19a
+ vqadd.s32 d16, d16, d28 // t16a
+ vqsub.s32 d19, d30, d23 // t20a
+ vqadd.s32 d30, d30, d23 // t23a
+ vqsub.s32 d28, d17, d22 // t21
+ vqadd.s32 d17, d17, d22 // t22
+ vqadd.s32 d23, d26, d29 // t24a
+ vqsub.s32 d26, d26, d29 // t27a
+ vqadd.s32 d22, d20, d18 // t25
+ vqsub.s32 d20, d20, d18 // t26
+ vqsub.s32 d29, d31, d25 // t28a
+ vqadd.s32 d31, d31, d25 // t31a
+
+.irp r, d2, d27, d3, d21, d24, d16, d19, d30, d28, d17, d23, d26, d22, d20, d29, d31
+ vmin.s32 \r, \r, d11
+.endr
+.irp r, d2, d27, d3, d21, d24, d16, d19, d30, d28, d17, d23, d26, d22, d20, d29, d31
+ vmax.s32 \r, \r, d10
+.endr
+
+ vmul_vmls d4, d2, d3, d1[0], d1[1] // -> t18a
+ vmul_vmla d6, d2, d3, d1[1], d1[0] // -> t29a
+ vmul_vmls d8, d29, d24, d1[0], d1[1] // -> t19
+ vrshr.s32 d18, d4, #12 // t18a
+ vrshr.s32 d25, d6, #12 // t29a
+ vmul_vmla d5, d29, d24, d1[1], d1[0] // -> t28
+ vmul_vmla d4, d26, d19, d1[1], d1[0] // -> t20
+ vrshr.s32 d29, d8, #12 // t19
+ vrshr.s32 d24, d5, #12 // t28
+ vneg.s32 d4, d4 // -> t20
+ vmul_vmls d6, d26, d19, d1[0], d1[1] // -> t27
+ vmul_vmla d8, d20, d28, d1[1], d1[0] // -> t21a
+ vrshr.s32 d26, d4, #12 // t20
+ vrshr.s32 d19, d6, #12 // t27
+ vneg.s32 d8, d8 // -> t21a
+ vmul_vmls d5, d20, d28, d1[0], d1[1] // -> t26a
+ vrshr.s32 d20, d8, #12 // t21a
+ vrshr.s32 d28, d5, #12 // t26a
+
+ vqsub.s32 d2, d16, d30 // t23
+ vqadd.s32 d16, d16, d30 // t16 = out16
+ vqsub.s32 d3, d31, d23 // t24
+ vqadd.s32 d31, d31, d23 // t31 = out31
+ vqsub.s32 d23, d21, d17 // t22a
+ vqadd.s32 d17, d21, d17 // t17a = out17
+ vqadd.s32 d30, d27, d22 // t30a = out30
+ vqsub.s32 d21, d27, d22 // t25a
+ vqsub.s32 d27, d18, d20 // t21
+ vqadd.s32 d18, d18, d20 // t18 = out18
+ vqadd.s32 d4, d29, d26 // t19a = out19
+ vqsub.s32 d26, d29, d26 // t20a
+ vqadd.s32 d29, d25, d28 // t29 = out29
+ vqsub.s32 d25, d25, d28 // t26
+ vqadd.s32 d28, d24, d19 // t28a = out28
+ vqsub.s32 d24, d24, d19 // t27a
+ vmov d19, d4 // out19
+
+.irp r, d2, d16, d3, d31, d23, d17, d30, d21, d27, d18, d19, d26, d29, d25, d28, d24
+ vmin.s32 \r, \r, d11
+.endr
+.irp r, d2, d16, d3, d31, d23, d17, d30, d21, d27, d18, d19, d26, d29, d25, d28, d24
+ vmax.s32 \r, \r, d10
+.endr
+
+ vmul_vmls d4, d24, d26, d0[0], d0[0] // -> t20
+ vmul_vmla d6, d24, d26, d0[0], d0[0] // -> t27
+ vrshr.s32 d20, d4, #12 // t20
+ vrshr.s32 d22, d6, #12 // t27
+
+ vmul_vmla d4, d25, d27, d0[0], d0[0] // -> t26a
+ vmul_vmls d6, d25, d27, d0[0], d0[0] // -> t21a
+ vmov d27, d22 // t27
+ vrshr.s32 d26, d4, #12 // t26a
+
+ vmul_vmls d24, d21, d23, d0[0], d0[0] // -> t22
+ vmul_vmla d4, d21, d23, d0[0], d0[0] // -> t25
+ vrshr.s32 d21, d6, #12 // t21a
+ vrshr.s32 d22, d24, #12 // t22
+ vrshr.s32 d25, d4, #12 // t25
+
+ vmul_vmls d4, d3, d2, d0[0], d0[0] // -> t23a
+ vmul_vmla d6, d3, d2, d0[0], d0[0] // -> t24a
+ vrshr.s32 d23, d4, #12 // t23a
+ vrshr.s32 d24, d6, #12 // t24a
+
+ bx lr
+endfunc
+
+.macro def_horz_32 scale=0, shift=2, suffix
+function inv_txfm_horz\suffix\()_dct_32x2_neon
+ push {lr}
+ vmov.i32 d7, #0
+ lsl r8, r8, #1
+.if \scale
+ mov_const r12, 2896*8*(1<<16)
+ vdup.32 d0, r12
+.endif
+
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.32 {\i}, [r7, :64]
+ vst1.32 {d7}, [r7, :64], r8
+.endr
+ sub r7, r7, r8, lsl #4
+ add r7, r7, r8, lsr #1
+.if \scale
+ scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
+.endif
+ bl inv_dct_2s_x16_neon
+
+ // idct_16 leaves the row_clip_max/min constants in d9 and d8,
+ // but here we want to use full q registers for clipping.
+ vmov.i32 q3, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vmvn.i32 q2, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+.irp r, q8, q9, q10, q11, q12, q13, q14, q15
+ vmin.s32 \r, \r, q3
+.endr
+.irp r, q8, q9, q10, q11, q12, q13, q14, q15
+ vmax.s32 \r, \r, q2
+.endr
+
+ vtrn.32 d16, d17
+ vtrn.32 d18, d19
+ vtrn.32 d20, d21
+ vtrn.32 d22, d23
+ vtrn.32 d24, d25
+ vtrn.32 d26, d27
+ vtrn.32 d28, d29
+ vtrn.32 d30, d31
+
+.macro store1 r0, r1, r2, r3
+ vst1.16 {\r0}, [r6, :64]!
+ vst1.16 {\r1}, [r6, :64]!
+ vst1.16 {\r2}, [r6, :64]!
+ vst1.16 {\r3}, [r6, :64]!
+.endm
+ store1 d16, d18, d20, d22
+ store1 d24, d26, d28, d30
+ store1 d17, d19, d21, d23
+ store1 d25, d27, d29, d31
+.purgem store1
+ sub r6, r6, #64*2
+
+ vmov.i32 d7, #0
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.32 {\i}, [r7, :64]
+ vst1.32 {d7}, [r7, :64], r8
+.endr
+.if \scale
+ // This relies on the fact that the idct also leaves the right coeff in d0[1]
+ scale_input d0[1], q8, q9, q10, q11, q12, q13, q14, q15
+.endif
+ bl inv_dct32_odd_2s_x16_neon
+ vtrn.32 d31, d30
+ vtrn.32 d29, d28
+ vtrn.32 d27, d26
+ vtrn.32 d25, d24
+ vtrn.32 d23, d22
+ vtrn.32 d21, d20
+ vtrn.32 d19, d18
+ vtrn.32 d17, d16
+.macro store2 r0, r1, r2, r3, r4, r5, r6, r7, shift
+ vld1.32 {q0, q1}, [r6, :128]!
+ vld1.32 {q2, q3}, [r6, :128]
+ sub r6, r6, #32
+ vqsub.s32 d15, d0, \r0
+ vqadd.s32 d0, d0, \r0
+ vqsub.s32 d14, d1, \r1
+ vqadd.s32 d1, d1, \r1
+ vqsub.s32 d13, d2, \r2
+ vqadd.s32 d2, d2, \r2
+ vqsub.s32 d12, d3, \r3
+ vqadd.s32 d3, d3, \r3
+ vqsub.s32 d11, d4, \r4
+ vqadd.s32 d4, d4, \r4
+ vqsub.s32 d10, d5, \r5
+ vqadd.s32 d5, d5, \r5
+ vqsub.s32 d9, d6, \r6
+ vqadd.s32 d6, d6, \r6
+ vqsub.s32 d8, d7, \r7
+ vqadd.s32 d7, d7, \r7
+ vqrshrn.s32 d0, q0, #\shift
+ vqrshrn.s32 d1, q1, #\shift
+ vqrshrn.s32 d2, q2, #\shift
+ vqrshrn.s32 d3, q3, #\shift
+ vqrshrn.s32 d4, q4, #\shift
+ vqrshrn.s32 d5, q5, #\shift
+ vqrshrn.s32 d6, q6, #\shift
+ vqrshrn.s32 d7, q7, #\shift
+ vrev32.16 q2, q2
+ vrev32.16 q3, q3
+ vst1.16 {q0, q1}, [r6, :128]!
+ vst1.16 {q2, q3}, [r6, :128]!
+.endm
+
+ store2 d31, d29, d27, d25, d23, d21, d19, d17, \shift
+ store2 d30, d28, d26, d24, d22, d20, d18, d16, \shift
+.purgem store2
+ pop {pc}
+endfunc
+.endm
+
+def_horz_32 scale=0, shift=2
+def_horz_32 scale=1, shift=1, suffix=_scale
+
+function inv_txfm_add_vert_dct_4x32_neon
+ push {r10-r11,lr}
+ lsl r8, r8, #1
+
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64], r8
+.endr
+ sub r7, r7, r8, lsl #4
+
+ bl X(inv_dct_4h_x16_neon)
+
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vst1.16 {\i}, [r7, :64], r8
+.endr
+ sub r7, r7, r8, lsl #4
+ add r7, r7, r8, lsr #1
+
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23, d24, d25, d26, d27, d28, d29, d30, d31
+ vld1.16 {\i}, [r7, :64], r8
+.endr
+ sub r7, r7, r8, lsl #4
+ sub r7, r7, r8, lsr #1
+ bl X(inv_dct32_odd_4h_x16_neon)
+
+ neg r9, r8
+ mov r10, r6
+ vmov.i16 q6, #0
+ vmvn.i16 q7, #0xfc00 // 0x3ff
+.macro combine r0, r1, r2, r3, op, stride
+ vld1.16 {d4}, [r7, :64], \stride
+ vld1.16 {d0}, [r10, :64], r1
+ vld1.16 {d5}, [r7, :64], \stride
+ vld1.16 {d1}, [r10, :64], r1
+ \op\().s16 d4, d4, \r0
+ vld1.16 {d6}, [r7, :64], \stride
+ vld1.16 {d2}, [r10, :64], r1
+ \op\().s16 d5, d5, \r1
+ vld1.16 {d3}, [r10, :64], r1
+ vrshr.s16 q2, q2, #4
+ \op\().s16 d6, d6, \r2
+ vld1.16 {d7}, [r7, :64], \stride
+ vqadd.s16 q0, q0, q2
+ \op\().s16 d7, d7, \r3
+ vmax.s16 q0, q0, q6
+ vrshr.s16 q3, q3, #4
+ vmin.s16 q0, q0, q7
+ vqadd.s16 q1, q1, q3
+ vst1.16 {d0}, [r6, :64], r1
+ vmax.s16 q1, q1, q6
+ vst1.16 {d1}, [r6, :64], r1
+ vmin.s16 q1, q1, q7
+ vst1.16 {d2}, [r6, :64], r1
+ vst1.16 {d3}, [r6, :64], r1
+.endm
+ combine d31, d30, d29, d28, vqadd, r8
+ combine d27, d26, d25, d24, vqadd, r8
+ combine d23, d22, d21, d20, vqadd, r8
+ combine d19, d18, d17, d16, vqadd, r8
+ sub r7, r7, r8
+ combine d16, d17, d18, d19, vqsub, r9
+ combine d20, d21, d22, d23, vqsub, r9
+ combine d24, d25, d26, d27, vqsub, r9
+ combine d28, d29, d30, d31, vqsub, r9
+.purgem combine
+
+ pop {r10-r11,pc}
+endfunc
+
+const eob_32x32
+ .short 3, 10, 21, 36, 55, 78, 105, 136, 171, 210, 253, 300, 351, 406, 465, 1024
+endconst
+
+const eob_16x32
+ .short 3, 10, 21, 36, 55, 78, 105, 151, 183, 215, 247, 279, 311, 343, 375, 512
+endconst
+
+const eob_16x32_shortside
+ .short 3, 10, 21, 36, 55, 78, 105, 512
+endconst
+
+const eob_8x32
+ .short 3, 10, 21, 43, 59, 75, 91, 107, 123, 139, 155, 171, 187, 203, 219, 256
+endconst
+
+function inv_txfm_add_identity_identity_32x32_16bpc_neon, export=1
+ push {r4-r7,lr}
+ vpush {q6-q7}
+ movrel_local r5, eob_32x32, 2
+
+ mov r6, #4*32
+1:
+ mov r12, #0
+ movrel_local r4, eob_32x32, 6
+2:
+ vmov.i32 q0, #0
+ add r12, r12, #8
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.32 {\i}, [r2, :128]
+ vst1.32 {q0}, [r2, :128], r6
+.endr
+ vqmovn.s32 d16, q8
+ vqmovn.s32 d17, q12
+ vqmovn.s32 d18, q9
+ vqmovn.s32 d19, q13
+ vqmovn.s32 d20, q10
+ vqmovn.s32 d21, q14
+ vqmovn.s32 d22, q11
+ vqmovn.s32 d23, q15
+ transpose_4x8h q8, q9, q10, q11
+
+ load_add_store_8x4 r0, r7, shiftbits=2
+ ldrh lr, [r4], #8
+ sub r0, r0, r1, lsl #2
+ cmp r3, lr
+ add r0, r0, #2*8
+ bge 2b
+
+ ldrh lr, [r5], #4
+ cmp r3, lr
+ blt 9f
+
+ sub r0, r0, r12, lsl #1
+ add r0, r0, r1, lsl #2
+ mls r2, r6, r12, r2
+ add r2, r2, #4*4
+ b 1b
+9:
+ vpop {q6-q7}
+ pop {r4-r7,pc}
+endfunc
+
+.macro shift_8_regs op, shift
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ \op \i, \i, #\shift
+.endr
+.endm
+
+.macro def_identity_1632 w, h, wshort, hshort
+function inv_txfm_add_identity_identity_\w\()x\h\()_16bpc_neon, export=1
+ push {r4-r9,lr}
+ vpush {q6-q7}
+ mov r9, #0
+ mov_const r8, 2896*8*(1<<16)
+ movt r9, #2*(5793-4096)*8
+ movrel_local r5, eob_16x32\hshort, 2
+
+ mov r6, #4*\h
+1:
+ mov r12, #0
+ movrel_local r4, eob_16x32\wshort, 6
+2:
+ vdup.i32 d0, r8
+ vmov.i32 q1, #0
+ vmov.32 d0[1], r9
+ add r12, r12, #8
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.32 {\i}, [r2, :128]
+ vst1.32 {q1}, [r2, :128], r6
+.endr
+ scale_input d0[0], q8, q9, q10, q11, q12, q13, q14, q15
+
+.if \w == 16
+ // 16x32
+ identity_8x4_shift1 d0[1]
+.else
+ // 32x16
+ shift_8_regs vqshl.s32, 1
+ identity_8x4 d0[1]
+.endif
+ vqmovn.s32 d16, q8
+ vqmovn.s32 d17, q12
+ vqmovn.s32 d18, q9
+ vqmovn.s32 d19, q13
+ vqmovn.s32 d20, q10
+ vqmovn.s32 d21, q14
+ vqmovn.s32 d22, q11
+ vqmovn.s32 d23, q15
+ transpose_4x8h q8, q9, q10, q11
+
+.if \w == 16
+ load_add_store_8x4 r0, r7, shiftbits=2
+.else
+ load_add_store_8x4 r0, r7, shiftbits=4
+.endif
+ ldrh lr, [r4], #8
+ sub r0, r0, r1, lsl #2
+ cmp r3, lr
+ add r0, r0, #2*8
+ bge 2b
+
+ ldrh lr, [r5], #4
+ cmp r3, lr
+ blt 9f
+
+ sub r0, r0, r12, lsl #1
+ add r0, r0, r1, lsl #2
+ mls r2, r6, r12, r2
+ add r2, r2, #4*4
+ b 1b
+9:
+ vpop {q6-q7}
+ pop {r4-r9,pc}
+endfunc
+.endm
+
+def_identity_1632 16, 32, _shortside,
+def_identity_1632 32, 16, , _shortside
+
+.macro def_identity_832 w, h
+function inv_txfm_add_identity_identity_\w\()x\h\()_16bpc_neon, export=1
+ push {r4-r5,lr}
+ vpush {q6-q7}
+ movrel_local r4, eob_8x32, 2
+
+ mov r12, #4*\h
+1:
+ ldrh lr, [r4], #4
+.if \w == 8
+ vmov.i32 q0, #0
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.32 {\i}, [r2, :128]
+ vst1.32 {q0}, [r2, :128], r12
+.endr
+
+ vqrshrn.s32 d16, q8, #1
+ vqrshrn.s32 d17, q12, #1
+ vqrshrn.s32 d18, q9, #1
+ vqrshrn.s32 d19, q13, #1
+ vqrshrn.s32 d20, q10, #1
+ vqrshrn.s32 d21, q14, #1
+ vqrshrn.s32 d22, q11, #1
+ vqrshrn.s32 d23, q15, #1
+
+ transpose_4x8h q8, q9, q10, q11
+
+ cmp r3, lr
+ load_add_store_8x4 r0, r5, shiftbits=2
+ blt 9f
+ sub r2, r2, r12, lsl #3
+ add r2, r2, #4*4
+.else
+ vmov.i32 q0, #0
+ vmov.i32 q1, #0
+ vld1.32 {q8, q9}, [r2, :128]
+ vst1.32 {q0, q1}, [r2, :128], r12
+ vld1.32 {q10, q11}, [r2, :128]
+ vst1.32 {q0, q1}, [r2, :128], r12
+ vld1.32 {q12, q13}, [r2, :128]
+ vst1.32 {q0, q1}, [r2, :128], r12
+ vld1.32 {q14, q15}, [r2, :128]
+ vst1.32 {q0, q1}, [r2, :128], r12
+ vqmovn.s32 d16, q8
+ vqmovn.s32 d17, q10
+ vqmovn.s32 d20, q9
+ vqmovn.s32 d21, q11
+ vqmovn.s32 d18, q12
+ vqmovn.s32 d19, q14
+ vqmovn.s32 d22, q13
+ vqmovn.s32 d23, q15
+
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+
+ cmp r3, lr
+ load_add_store_4x8 r0, r5, shiftbits=3
+ blt 9f
+ sub r0, r0, r1, lsl #3
+ add r0, r0, #2*4
+.endif
+ b 1b
+
+9:
+ vpop {q6-q7}
+ pop {r4-r5,pc}
+endfunc
+.endm
+
+def_identity_832 8, 32
+def_identity_832 32, 8
+
+function inv_txfm_add_dct_dct_32x32_16bpc_neon, export=1
+ idct_dc 32, 32, 2
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ sub_sp_align 2048
+ movrel_local r10, eob_32x32
+ ldrh r11, [r10], #2
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+ add r6, sp, #(\i*32*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 30
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #32*4
+ bl inv_txfm_horz_dct_32x2_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, r0, #(\i*2)
+ add r7, sp, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_add_vert_dct_4x32_neon
+.endr
+
+ add_sp_align 2048
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_16x32_16bpc_neon, export=1
+ idct_dc 16, 32, 1
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ sub_sp_align 1024
+ movrel_local r10, eob_16x32
+ ldrh r11, [r10], #2
+ movrel_local r4, inv_dct_2s_x16_neon
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+ add r6, sp, #(\i*16*2)
+ add r7, r2, #(\i*4)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 30
+ ldrh r11, [r10], #2
+.endif
+.endif
+ mov r8, #4*32
+ bl inv_txfm_horz_scale_16x2_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 2
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12
+ add r6, r0, #(\i*2)
+ add r7, sp, #(\i*2)
+ mov r8, #16*2
+ bl inv_txfm_add_vert_dct_4x32_neon
+.endr
+
+ add_sp_align 1024
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_32x16_16bpc_neon, export=1
+ idct_dc 32, 16, 1
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ sub_sp_align 1024
+ movrel_local r10, eob_16x32
+ ldrh r11, [r10], #2
+ movrel r5, X(inv_dct_4h_x16_neon)
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14
+ add r6, sp, #(\i*32*2)
+ add r7, r2, #(\i*4)
+.if \i > 0
+ mov r8, #(16 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 14
+ ldrh r11, [r10], #2
+.endif
+.endif
+ mov r8, #4*16
+ bl inv_txfm_horz_scale_dct_32x2_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r6, r0, #(\i*2)
+ add r7, sp, #(\i*2)
+ mov r8, #32*2
+ bl inv_txfm_add_vert_4x16_neon
+.endr
+
+ add_sp_align 1024
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_8x32_16bpc_neon, export=1
+ idct_dc 8, 32, 2
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ sub_sp_align 512
+
+ movrel_local r10, eob_8x32, 2
+
+ mov r8, #4*32
+ mov r9, #32
+ mov r6, sp
+1:
+ vmov.i32 q0, #0
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.32 {\i}, [r2, :128]
+ vst1.32 {q0}, [r2, :128], r8
+.endr
+ ldrh r11, [r10], #4
+ sub r2, r2, r8, lsl #3
+ sub r9, r9, #4
+ add r2, r2, #4*4
+
+ bl inv_dct_4s_x8_neon
+
+ vqrshrn.s32 d16, q8, #2
+ vqrshrn.s32 d18, q9, #2
+ vqrshrn.s32 d20, q10, #2
+ vqrshrn.s32 d22, q11, #2
+ vqrshrn.s32 d17, q12, #2
+ vqrshrn.s32 d19, q13, #2
+ vqrshrn.s32 d21, q14, #2
+ vqrshrn.s32 d23, q15, #2
+
+ transpose_4x8h q8, q9, q10, q11
+
+ vst1.16 {q8, q9}, [r6, :128]!
+ cmp r3, r11
+ vst1.16 {q10, q11}, [r6, :128]!
+
+ bge 1b
+ cmp r9, #0
+ beq 3f
+
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r9, r9, #4
+.rept 2
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4
+ add r6, r0, #(\i*2)
+ add r7, sp, #(\i*2)
+ mov r8, #8*2
+ bl inv_txfm_add_vert_dct_4x32_neon
+.endr
+
+ add_sp_align 512
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_32x8_16bpc_neon, export=1
+ idct_dc 32, 8, 2
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ movrel_local r10, eob_8x32
+ sub_sp_align 512
+ ldrh r11, [r10], #2
+
+.irp i, 0, 2, 4, 6
+ add r6, sp, #(\i*32*2)
+ add r7, r2, #(\i*4)
+.if \i > 0
+ cmp r3, r11
+ mov r8, #(8 - \i)
+ blt 1f
+.if \i < 6
+ ldrh r11, [r10], #2
+.endif
+.endif
+ mov r8, #8*4
+ bl inv_txfm_horz_dct_32x2_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+ mov r8, #2*32
+ mov r9, #0
+1:
+ add r6, r0, r9, lsl #1
+ add r7, sp, r9, lsl #1 // #(\i*2)
+
+.irp i, q8, q9, q10, q11, q12, q13, q14, q15
+ vld1.16 {\i}, [r7, :128], r8
+.endr
+ add r9, r9, #8
+
+ bl X(inv_dct_8h_x8_neon)
+
+ cmp r9, #32
+
+ load_add_store_8x8 r6, r7
+
+ blt 1b
+
+ add_sp_align 512
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_dct64_step1_neon
+ // in1/31/17/15 -> t32a/33/34a/35/60/61a/62/63a
+ // in7/25/23/ 9 -> t56a/57/58a/59/36/37a/38/39a
+ // in5/27/21/11 -> t40a/41/42a/43/52/53a/54/55a
+ // in3/29/19/13 -> t48a/49/50a/51/44/45a/46/47a
+
+ vld1.32 {q0, q1}, [r12, :128]!
+
+ vqrdmulh.s32 d23, d16, d0[1] // t63a
+ vqrdmulh.s32 d16, d16, d0[0] // t32a
+ vqrdmulh.s32 d22, d17, d1[0] // t62a
+ vqrdmulh.s32 d17, d17, d1[1] // t33a
+ vqrdmulh.s32 d21, d18, d2[1] // t61a
+ vqrdmulh.s32 d18, d18, d2[0] // t34a
+ vqrdmulh.s32 d20, d19, d3[0] // t60a
+ vqrdmulh.s32 d19, d19, d3[1] // t35a
+
+ vld1.32 {q0}, [r12, :128]!
+
+ vqadd.s32 d24, d16, d17 // t32
+ vqsub.s32 d25, d16, d17 // t33
+ vqsub.s32 d26, d19, d18 // t34
+ vqadd.s32 d27, d19, d18 // t35
+ vqadd.s32 d28, d20, d21 // t60
+ vqsub.s32 d29, d20, d21 // t61
+ vqsub.s32 d30, d23, d22 // t62
+ vqadd.s32 d31, d23, d22 // t63
+
+.irp r, q12, q13, q14, q15
+ vmin.s32 \r, \r, q5
+.endr
+.irp r, q12, q13, q14, q15
+ vmax.s32 \r, \r, q4
+.endr
+
+ vmul_vmla d4, d29, d26, d0[0], d0[1] // -> t34a
+ vmul_vmls d6, d29, d26, d0[1], d0[0] // -> t61a
+ vneg.s32 d4, d4 // t34a
+ vmul_vmls d7, d30, d25, d0[1], d0[0] // -> t33a
+ vrshr.s32 d26, d4, #12 // t34a
+ vmul_vmla d4, d30, d25, d0[0], d0[1] // -> t62a
+ vrshr.s32 d29, d6, #12 // t61a
+ vrshr.s32 d25, d7, #12 // t33a
+ vrshr.s32 d30, d4, #12 // t62a
+
+ vqadd.s32 d16, d24, d27 // t32a
+ vqsub.s32 d19, d24, d27 // t35a
+ vqadd.s32 d17, d25, d26 // t33
+ vqsub.s32 d18, d25, d26 // t34
+ vqsub.s32 d20, d31, d28 // t60a
+ vqadd.s32 d23, d31, d28 // t63a
+ vqsub.s32 d21, d30, d29 // t61
+ vqadd.s32 d22, d30, d29 // t62
+
+.irp r, q8, q9, q10, q11
+ vmin.s32 \r, \r, q5
+.endr
+.irp r, q8, q9, q10, q11
+ vmax.s32 \r, \r, q4
+.endr
+
+ vmul_vmla d4, d21, d18, d1[0], d1[1] // -> t61a
+ vmul_vmls d6, d21, d18, d1[1], d1[0] // -> t34a
+ vmul_vmla d7, d20, d19, d1[0], d1[1] // -> t60
+ vrshr.s32 d21, d4, #12 // t61a
+ vrshr.s32 d18, d6, #12 // t34a
+ vmul_vmls d4, d20, d19, d1[1], d1[0] // -> t35
+ vrshr.s32 d20, d7, #12 // t60
+ vrshr.s32 d19, d4, #12 // t35
+
+ vst1.32 {d16, d17, d18, d19}, [r6, :128]!
+ vst1.32 {d20, d21, d22, d23}, [r6, :128]!
+
+ bx lr
+endfunc
+
+function inv_dct64_step2_neon
+ movrel_local r12, idct_coeffs
+ vld1.32 {q0}, [r12, :128]
+1:
+ // t32a/33/34a/35/60/61a/62/63a
+ // t56a/57/58a/59/36/37a/38/39a
+ // t40a/41/42a/43/52/53a/54/55a
+ // t48a/49/50a/51/44/45a/46/47a
+ vldr d16, [r6, #4*2*0] // t32a
+ vldr d17, [r9, #4*2*8] // t39a
+ vldr d18, [r9, #4*2*0] // t63a
+ vldr d19, [r6, #4*2*8] // t56a
+ vldr d20, [r6, #4*2*16] // t40a
+ vldr d21, [r9, #4*2*24] // t47a
+ vldr d22, [r9, #4*2*16] // t55a
+ vldr d23, [r6, #4*2*24] // t48a
+
+ vqadd.s32 d24, d16, d17 // t32
+ vqsub.s32 d25, d16, d17 // t39
+ vqadd.s32 d26, d18, d19 // t63
+ vqsub.s32 d27, d18, d19 // t56
+ vqsub.s32 d28, d21, d20 // t40
+ vqadd.s32 d29, d21, d20 // t47
+ vqadd.s32 d30, d23, d22 // t48
+ vqsub.s32 d31, d23, d22 // t55
+
+.irp r, q12, q13, q14, q15
+ vmin.s32 \r, \r, q5
+.endr
+.irp r, q12, q13, q14, q15
+ vmax.s32 \r, \r, q4
+.endr
+
+ vmul_vmla d4, d27, d25, d1[1], d1[0] // -> t56a
+ vmul_vmls d6, d27, d25, d1[0], d1[1] // -> t39a
+ vmul_vmla d7, d31, d28, d1[1], d1[0] // -> t40a
+ vrshr.s32 d25, d4, #12 // t56a
+ vrshr.s32 d27, d6, #12 // t39a
+ vneg.s32 d7, d7 // t40a
+ vmul_vmls d4, d31, d28, d1[0], d1[1] // -> t55a
+ vrshr.s32 d31, d7, #12 // t40a
+ vrshr.s32 d28, d4, #12 // t55a
+
+ vqadd.s32 d16, d24, d29 // t32a
+ vqsub.s32 d19, d24, d29 // t47a
+ vqadd.s32 d17, d27, d31 // t39
+ vqsub.s32 d18, d27, d31 // t40
+ vqsub.s32 d20, d26, d30 // t48a
+ vqadd.s32 d23, d26, d30 // t63a
+ vqsub.s32 d21, d25, d28 // t55
+ vqadd.s32 d22, d25, d28 // t56
+
+.irp r, q8, q9, q10, q11
+ vmin.s32 \r, \r, q5
+.endr
+.irp r, q8, q9, q10, q11
+ vmax.s32 \r, \r, q4
+.endr
+
+ vmul_vmls d4, d21, d18, d0[0], d0[0] // -> t40a
+ vmul_vmla d6, d21, d18, d0[0], d0[0] // -> t55a
+ vmul_vmls d7, d20, d19, d0[0], d0[0] // -> t47
+ vrshr.s32 d18, d4, #12 // t40a
+ vrshr.s32 d21, d6, #12 // t55a
+ vmul_vmla d4, d20, d19, d0[0], d0[0] // -> t48
+ vrshr.s32 d19, d7, #12 // t47
+ vrshr.s32 d20, d4, #12 // t48
+
+ vstr d16, [r6, #4*2*0] // t32a
+ vstr d17, [r9, #4*2*0] // t39
+ vstr d18, [r6, #4*2*8] // t40a
+ vstr d19, [r9, #4*2*8] // t47
+ vstr d20, [r6, #4*2*16] // t48
+ vstr d21, [r9, #4*2*16] // t55a
+ vstr d22, [r6, #4*2*24] // t56
+ vstr d23, [r9, #4*2*24] // t63a
+
+ add r6, r6, #4*2
+ sub r9, r9, #4*2
+ cmp r6, r9
+ blt 1b
+ bx lr
+endfunc
+
+.macro load8 src, strd, zero, clear
+.irp i, d16, d17, d18, d19, d20, d21, d22, d23
+.if \clear
+ vld1.32 {\i}, [\src, :64]
+ vst1.32 {\zero}, [\src, :64], \strd
+.else
+ vld1.32 {\i}, [\src, :64], \strd
+.endif
+.endr
+.endm
+
+.macro store16 dst
+ vst1.32 {q8, q9}, [\dst, :128]!
+ vst1.32 {q10, q11}, [\dst, :128]!
+ vst1.32 {q12, q13}, [\dst, :128]!
+ vst1.32 {q14, q15}, [\dst, :128]!
+.endm
+
+.macro clear_upper8
+.irp i, q12, q13, q14, q15
+ vmov.i32 \i, #0
+.endr
+.endm
+
+.macro vmov_if reg, val, cond
+.if \cond
+ vmov.i32 \reg, \val
+.endif
+.endm
+
+.macro movdup_if reg, gpr, val, cond
+.if \cond
+ mov_const \gpr, \val
+ vdup.32 \reg, \gpr
+.endif
+.endm
+
+.macro vst1_if regs, dst, dstalign, cond
+.if \cond
+ vst1.32 \regs, \dst, \dstalign
+.endif
+.endm
+
+.macro scale_if cond, c, r0, r1, r2, r3, r4, r5, r6, r7
+.if \cond
+ scale_input \c, \r0, \r1, \r2, \r3, \r4, \r5, \r6, \r7
+.endif
+.endm
+
+.macro def_dct64_func suffix, clear=0, scale=0
+function inv_txfm_dct\suffix\()_2s_x64_neon
+ mov r6, sp
+
+ push {r10-r11,lr}
+
+ lsl r8, r8, #2
+
+ movdup_if d0, r12, 2896*8*(1<<16), \scale
+ vmov_if d7, #0, \clear
+ load8 r7, r8, d7, \clear
+ clear_upper8
+ sub r7, r7, r8, lsl #3
+ add r7, r7, r8, lsr #1
+ scale_if \scale, d0[0], q8, q9, q10, q11
+
+ bl inv_dct_2s_x16_neon
+
+ // idct_16 leaves the row_clip_max/min constants in d9 and d8,
+ // but here we want to use full q registers for clipping.
+ vmov.i32 q3, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vmvn.i32 q2, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+.irp r, q8, q9, q10, q11, q12, q13, q14, q15
+ vmin.s32 \r, \r, q3
+.endr
+.irp r, q8, q9, q10, q11, q12, q13, q14, q15
+ vmax.s32 \r, \r, q2
+.endr
+
+ store16 r6
+
+ movdup_if d0, r12, 2896*8*(1<<16), \scale
+ vmov_if d7, #0, \clear
+ load8 r7, r8, d7, \clear
+ clear_upper8
+ sub r7, r7, r8, lsl #3
+ lsr r8, r8, #1
+ sub r7, r7, r8, lsr #1
+ scale_if \scale, d0[0], q8, q9, q10, q11
+
+ bl inv_dct32_odd_2s_x16_neon
+
+ add r10, r6, #8*15
+ sub r6, r6, #8*16
+
+ mov r9, #-8
+
+ vmov.i32 d1, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vmvn.i32 d0, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+.macro store_addsub r0, r1, r2, r3
+ vld1.32 {d2}, [r6, :64]!
+ vld1.32 {d3}, [r6, :64]!
+ vqadd.s32 d6, d2, \r0
+ vqsub.s32 \r0, d2, \r0
+ vld1.32 {d4}, [r6, :64]!
+ vqadd.s32 d7, d3, \r1
+ vqsub.s32 \r1, d3, \r1
+ vmin.s32 d6, d6, d1
+ vmin.s32 \r0, \r0, d1
+ vld1.32 {d5}, [r6, :64]!
+ vqadd.s32 d2, d4, \r2
+ sub r6, r6, #8*4
+ vmax.s32 d6, d6, d0
+ vmax.s32 \r0, \r0, d0
+ vqsub.s32 \r2, d4, \r2
+ vmin.s32 d7, d7, d1
+ vmin.s32 \r1, \r1, d1
+ vst1.32 {d6}, [r6, :64]!
+ vst1.32 {\r0}, [r10, :64], r9
+ vmin.s32 d2, d2, d1
+ vmin.s32 \r2, \r2, d1
+ vmax.s32 d7, d7, d0
+ vmax.s32 \r1, \r1, d0
+ vqadd.s32 d3, d5, \r3
+ vqsub.s32 \r3, d5, \r3
+ vmax.s32 d2, d2, d0
+ vmax.s32 \r2, \r2, d0
+ vmin.s32 d3, d3, d1
+ vmin.s32 \r3, \r3, d1
+ vst1.32 {d7}, [r6, :64]!
+ vst1.32 {\r1}, [r10, :64], r9
+ vmax.s32 d3, d3, d0
+ vmax.s32 \r3, \r3, d0
+ vst1.32 {d2}, [r6, :64]!
+ vst1.32 {\r2}, [r10, :64], r9
+ vst1.32 {d3}, [r6, :64]!
+ vst1.32 {\r3}, [r10, :64], r9
+.endm
+ store_addsub d31, d30, d29, d28
+ store_addsub d27, d26, d25, d24
+ store_addsub d23, d22, d21, d20
+ store_addsub d19, d18, d17, d16
+.purgem store_addsub
+
+ add r6, r6, #2*4*16
+
+ movrel_local r12, idct64_coeffs
+ vmov.i32 q5, #0x1ffff // row_clip_max = ~(~bdmax << 7), 0x1ffff
+ vmvn.i32 q4, #0x1ffff // row_clip_min = (~bdmax << 7), 0xfffe0000
+ movdup_if d0, lr, 2896*8*(1<<16), \scale
+ vmov_if d7, #0, \clear
+ add r9, r7, r8, lsl #4 // offset 16
+ add r10, r7, r8, lsl #3 // offset 8
+ sub r9, r9, r8 // offset 15
+ sub r11, r10, r8 // offset 7
+ vld1.32 {d16}, [r7, :64] // in1 (offset 0)
+ vld1.32 {d17}, [r9, :64] // in31 (offset 15)
+ vld1.32 {d18}, [r10, :64] // in17 (offset 8)
+ vld1.32 {d19}, [r11, :64] // in15 (offset 7)
+ vst1_if {d7}, [r7, :64], \clear
+ vst1_if {d7}, [r9, :64], \clear
+ vst1_if {d7}, [r10, :64], \clear
+ vst1_if {d7}, [r11, :64], \clear
+ scale_if \scale, d0[0], q8, q9
+ bl inv_dct64_step1_neon
+ movdup_if d0, lr, 2896*8*(1<<16), \scale
+ vmov_if d7, #0, \clear
+ add r7, r7, r8, lsl #2 // offset 4
+ sub r9, r9, r8, lsl #2 // offset 11
+ sub r10, r7, r8 // offset 3
+ add r11, r9, r8 // offset 12
+ vld1.32 {d16}, [r10, :64] // in7 (offset 3)
+ vld1.32 {d17}, [r11, :64] // in25 (offset 12)
+ vld1.32 {d18}, [r9, :64] // in23 (offset 11)
+ vld1.32 {d19}, [r7, :64] // in9 (offset 4)
+ vst1_if {d7}, [r7, :64], \clear
+ vst1_if {d7}, [r9, :64], \clear
+ vst1_if {d7}, [r10, :64], \clear
+ vst1_if {d7}, [r11, :64], \clear
+ scale_if \scale, d0[0], q8, q9
+ bl inv_dct64_step1_neon
+ movdup_if d0, lr, 2896*8*(1<<16), \scale
+ vmov_if d7, #0, \clear
+ sub r10, r10, r8, lsl #1 // offset 1
+ sub r9, r9, r8, lsl #1 // offset 9
+ add r10, r10, r8 // offset 2
+ add r9, r9, r8 // offset 10
+ add r7, r7, r8 // offset 5
+ add r11, r11, r8 // offset 13
+ vld1.32 d16, [r10, :64] // in5 (offset 2)
+ vld1.32 d17, [r11, :64] // in27 (offset 13)
+ vld1.32 d18, [r9, :64] // in21 (offset 10)
+ vld1.32 d19, [r7, :64] // in11 (offset 5)
+ vst1_if d7, [r10, :64], \clear
+ vst1_if d7, [r11, :64], \clear
+ vst1_if d7, [r9, :64], \clear
+ vst1_if d7, [r7, :64], \clear
+ scale_if \scale, d0[0], q8, q9
+ bl inv_dct64_step1_neon
+ movdup_if d0, lr, 2896*8*(1<<16), \scale
+ vmov_if d7, #0, \clear
+ sub r10, r10, r8 // offset 1
+ sub r9, r9, r8 // offset 9
+ add r11, r11, r8 // offset 14
+ add r7, r7, r8 // offset 6
+ vld1.32 d16, [r10, :64] // in3 (offset 1)
+ vld1.32 d17, [r11, :64] // in29 (offset 14)
+ vld1.32 d18, [r9, :64] // in19 (offset 9)
+ vld1.32 d19, [r7, :64] // in13 (offset 6)
+ vst1_if d7, [r10, :64], \clear
+ vst1_if d7, [r11, :64], \clear
+ vst1_if d7, [r9, :64], \clear
+ vst1_if d7, [r7, :64], \clear
+ scale_if \scale, d0[0], q8, q9
+ bl inv_dct64_step1_neon
+
+ sub r6, r6, #2*4*32
+ add r9, r6, #2*4*7
+
+ bl inv_dct64_step2_neon
+
+ pop {r10-r11,pc}
+endfunc
+.endm
+
+def_dct64_func _clear, clear=1
+def_dct64_func _clear_scale, clear=1, scale=1
+
+function inv_txfm_horz_dct_64x2_neon
+ vdup.32 q4, r9
+
+ mov r7, sp
+ add r8, sp, #2*4*(64 - 4)
+ add r9, r6, #2*56
+
+ push {r10-r11,lr}
+
+ mov r10, #2*64
+ mov r11, #-2*4*4
+
+1:
+ vld1.32 {d16, d17, d18, d19}, [r7, :128]!
+ vld1.32 {d28, d29, d30, d31}, [r8, :128], r11
+ vld1.32 {d20, d21, d22, d23}, [r7, :128]!
+ vld1.32 {d24, d25, d26, d27}, [r8, :128], r11
+ vtrn.32 d16, d17
+ vtrn.32 d18, d19
+ vtrn.32 d20, d21
+ vtrn.32 d22, d23
+ vtrn.32 d31, d30
+ vtrn.32 d29, d28
+ vtrn.32 d27, d26
+ vtrn.32 d25, d24
+
+.macro store_addsub src0, src1, src2, src3, src4, src5, src6, src7
+ vqsub.s32 d7, \src0, \src1
+ vqsub.s32 d6, \src2, \src3
+ vqsub.s32 d5, \src4, \src5
+ vqsub.s32 d4, \src6, \src7
+ vqadd.s32 d0, \src0, \src1
+ vqadd.s32 d1, \src2, \src3
+ vqadd.s32 d2, \src4, \src5
+ vqadd.s32 d3, \src6, \src7
+ vrshl.s32 q3, q3, q4
+ vrshl.s32 q2, q2, q4
+ vrshl.s32 q0, q0, q4
+ vrshl.s32 q1, q1, q4
+ vqmovn.s32 d7, q3
+ vqmovn.s32 d6, q2
+ vqmovn.s32 d0, q0
+ vqmovn.s32 d1, q1
+ vrev32.16 q3, q3
+ vst1.16 {q0}, [r6, :128], r10
+ vst1.16 {q3}, [r9, :128], r10
+.endm
+ store_addsub d16, d31, d18, d29, d20, d27, d22, d25
+ store_addsub d17, d30, d19, d28, d21, d26, d23, d24
+.purgem store_addsub
+ sub r6, r6, r10, lsl #1
+ sub r9, r9, r10, lsl #1
+ add r6, r6, #16
+ sub r9, r9, #16
+
+ cmp r7, r8
+ blt 1b
+ pop {r10-r11,pc}
+endfunc
+
+function inv_txfm_add_vert_dct_4x64_neon
+ lsl r8, r8, #1
+
+ mov r7, sp
+ add r8, sp, #2*4*(64 - 4)
+ add r9, r6, r1, lsl #6
+ sub r9, r9, r1
+
+ push {r10-r11,lr}
+
+ neg r10, r1
+ mov r11, #-2*4*4
+
+1:
+ vld1.16 {d16, d17, d18, d19}, [r7, :128]!
+ vld1.16 {d28, d29, d30, d31}, [r8, :128], r11
+ vld1.16 {d20, d21, d22, d23}, [r7, :128]!
+ vld1.16 {d24, d25, d26, d27}, [r8, :128], r11
+
+ vmov.i16 q6, #0
+ vmvn.i16 q7, #0xfc00 // 0x3ff
+.macro add_dest_addsub src0, src1, src2, src3
+ vld1.16 {d0}, [r6, :64], r1
+ vld1.16 {d1}, [r9, :64], r10
+ vqadd.s16 d4, \src0, \src1
+ vld1.16 {d2}, [r6, :64]
+ vqsub.s16 d5, \src0, \src1
+ vld1.16 {d3}, [r9, :64]
+ vqadd.s16 d6, \src2, \src3
+ vqsub.s16 d7, \src2, \src3
+ sub r6, r6, r1
+ sub r9, r9, r10
+ vrshr.s16 q2, q2, #4
+ vrshr.s16 q3, q3, #4
+ vqadd.s16 q2, q2, q0
+ vqadd.s16 q3, q3, q1
+ vmax.s16 q2, q2, q6
+ vmax.s16 q3, q3, q6
+ vmin.s16 q2, q2, q7
+ vmin.s16 q3, q3, q7
+ vst1.16 {d4}, [r6, :64], r1
+ vst1.16 {d5}, [r9, :64], r10
+ vst1.16 {d6}, [r6, :64], r1
+ vst1.16 {d7}, [r9, :64], r10
+.endm
+ add_dest_addsub d16, d31, d17, d30
+ add_dest_addsub d18, d29, d19, d28
+ add_dest_addsub d20, d27, d21, d26
+ add_dest_addsub d22, d25, d23, d24
+.purgem add_dest_addsub
+ cmp r7, r8
+ blt 1b
+
+ pop {r10-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_64x64_16bpc_neon, export=1
+ idct_dc 64, 64, 2
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+
+ sub_sp_align 64*32*2+64*4*2
+ add r5, sp, #64*4*2
+
+ movrel_local r10, eob_32x32
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+ add r6, r5, #(\i*64*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #32*4
+ bl inv_txfm_dct_clear_2s_x64_neon
+ add r6, r5, #(\i*64*2)
+ mov r9, #-2 // shift
+ bl inv_txfm_horz_dct_64x2_neon
+.if \i < 30
+ ldrh r11, [r10], #2
+.endif
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 8
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
+ add r7, r5, #(\i*2)
+ mov r8, #64*2
+ bl X(inv_txfm_dct_4h_x64_neon)
+ add r6, r0, #(\i*2)
+ bl inv_txfm_add_vert_dct_4x64_neon
+.endr
+
+ add_sp_align 64*32*2+64*4*2
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_64x32_16bpc_neon, export=1
+ idct_dc 64, 32, 1
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+
+ sub_sp_align 64*32*2+64*4*2
+ add r5, sp, #64*4*2
+
+ movrel_local r10, eob_32x32
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+ add r6, r5, #(\i*64*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #32*4
+ bl inv_txfm_dct_clear_scale_2s_x64_neon
+ add r6, r5, #(\i*64*2)
+ mov r9, #-1 // shift
+ bl inv_txfm_horz_dct_64x2_neon
+.if \i < 30
+ ldrh r11, [r10], #2
+.endif
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 8
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
+ add r6, r0, #(\i*2)
+ add r7, r5, #(\i*2)
+ mov r8, #64*2
+ bl inv_txfm_add_vert_dct_4x32_neon
+.endr
+
+ add_sp_align 64*32*2+64*4*2
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_32x64_16bpc_neon, export=1
+ idct_dc 32, 64, 1
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+
+ sub_sp_align 32*32*2+64*4*2
+ add r5, sp, #64*4*2
+
+ movrel_local r10, eob_32x32
+ ldrh r11, [r10], #2
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+ add r6, r5, #(\i*32*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 30
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #32*4
+ bl inv_txfm_horz_scale_dct_32x2_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 4
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28
+ add r7, r5, #(\i*2)
+ mov r8, #32*2
+ bl X(inv_txfm_dct_4h_x64_neon)
+ add r6, r0, #(\i*2)
+ bl inv_txfm_add_vert_dct_4x64_neon
+.endr
+
+ add_sp_align 32*32*2+64*4*2
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_64x16_16bpc_neon, export=1
+ idct_dc 64, 16, 2
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+
+ sub_sp_align 64*16*2+64*4*2
+ add r4, sp, #64*4*2
+
+ movrel_local r10, eob_16x32
+
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14
+ add r6, r4, #(\i*64*2)
+.if \i > 0
+ mov r8, #(16 - \i)
+ cmp r3, r11
+ blt 1f
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #16*4
+ bl inv_txfm_dct_clear_2s_x64_neon
+ add r6, r4, #(\i*64*2)
+ mov r9, #-2 // shift
+ bl inv_txfm_horz_dct_64x2_neon
+.if \i < 8
+ ldrh r11, [r10], #2
+.endif
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 8
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+ movrel r5, X(inv_dct_4h_x16_neon)
+.irp i, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60
+ add r6, r0, #(\i*2)
+ add r7, r4, #(\i*2)
+ mov r8, #64*2
+ bl inv_txfm_add_vert_4x16_neon
+.endr
+
+ add_sp_align 64*16*2+64*4*2
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+function inv_txfm_add_dct_dct_16x64_16bpc_neon, export=1
+ idct_dc 16, 64, 2
+
+ push {r4-r11,lr}
+ vpush {q4-q7}
+
+ sub_sp_align 16*32*2+64*4*2
+ add r5, sp, #64*4*2
+
+ movrel_local r10, eob_16x32
+ ldrh r11, [r10], #2
+
+ movrel_local r4, inv_dct_2s_x16_neon
+.irp i, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26, 28, 30
+ add r6, r5, #(\i*16*2)
+.if \i > 0
+ mov r8, #(32 - \i)
+ cmp r3, r11
+ blt 1f
+.if \i < 30
+ ldrh r11, [r10], #2
+.endif
+.endif
+ add r7, r2, #(\i*4)
+ mov r8, #32*4
+ bl inv_txfm_horz_16x2_neon
+.endr
+ b 3f
+
+1:
+ vmov.i16 q2, #0
+ vmov.i16 q3, #0
+2:
+ subs r8, r8, #2
+.rept 2
+ vst1.16 {q2, q3}, [r6, :128]!
+.endr
+ bgt 2b
+
+3:
+.irp i, 0, 4, 8, 12
+ add r7, r5, #(\i*2)
+ mov r8, #16*2
+ bl X(inv_txfm_dct_4h_x64_neon)
+ add r6, r0, #(\i*2)
+ bl inv_txfm_add_vert_dct_4x64_neon
+.endr
+
+ add_sp_align 16*32*2+64*4*2
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
diff --git a/third_party/dav1d/src/arm/32/loopfilter.S b/third_party/dav1d/src/arm/32/loopfilter.S
new file mode 100644
index 0000000000..97b960534f
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/loopfilter.S
@@ -0,0 +1,868 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2019, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+.macro loop_filter wd
+function lpf_8_wd\wd\()_neon
+ vabd.u8 d0, d22, d23 // abs(p1 - p0)
+ vabd.u8 d1, d25, d24 // abs(q1 - q0)
+ vabd.u8 d2, d23, d24 // abs(p0 - q0)
+ vabd.u8 d3, d22, d25 // abs(p1 - q1)
+.if \wd >= 6
+ vabd.u8 d4, d21, d22 // abs(p2 - p1)
+ vabd.u8 d5, d26, d25 // abs(q2 - q1)
+.endif
+.if \wd >= 8
+ vabd.u8 d6, d20, d21 // abs(p3 - p2)
+ vabd.u8 d7, d27, d26 // abs(q3 - q3)
+.endif
+.if \wd >= 6
+ vmax.u8 d4, d4, d5
+.endif
+ vqadd.u8 d2, d2, d2 // abs(p0 - q0) * 2
+.if \wd >= 8
+ vmax.u8 d6, d6, d7
+.endif
+ vshr.u8 d3, d3, #1
+.if \wd >= 8
+ vmax.u8 d4, d4, d6
+.endif
+.if \wd >= 6
+ vand d4, d4, d14
+.endif
+ vmax.u8 d0, d0, d1 // max(abs(p1 - p0), abs(q1 - q0))
+ vqadd.u8 d2, d2, d3 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
+.if \wd >= 6
+ vmax.u8 d4, d0, d4
+ vcge.u8 d1, d11, d4 // max(abs(p1 - p0), abs(q1 - q0), abs(), abs(), ...) <= I
+.else
+ vcge.u8 d1, d11, d0 // max(abs(p1 - p0), abs(q1 - q0)) <= I
+.endif
+ vcge.u8 d2, d10, d2 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1 <= E
+ vand d1, d1, d2 // fm
+ vand d1, d1, d13 // fm && wd >= 4
+.if \wd >= 6
+ vand d14, d14, d1 // fm && wd > 4
+.endif
+.if \wd >= 16
+ vand d15, d15, d1 // fm && wd == 16
+.endif
+
+ vmov r10, r11, d1
+ orrs r10, r10, r11
+ beq 9f // if (!fm || wd < 4) return;
+
+.if \wd >= 6
+ vmov.i8 d10, #1
+ vabd.u8 d2, d21, d23 // abs(p2 - p0)
+ vabd.u8 d3, d22, d23 // abs(p1 - p0)
+ vabd.u8 d4, d25, d24 // abs(q1 - q0)
+ vabd.u8 d5, d26, d24 // abs(q2 - q0)
+.if \wd >= 8
+ vabd.u8 d6, d20, d23 // abs(p3 - p0)
+ vabd.u8 d7, d27, d24 // abs(q3 - q0)
+.endif
+ vmax.u8 d2, d2, d3
+ vmax.u8 d4, d4, d5
+.if \wd >= 8
+ vmax.u8 d6, d6, d7
+.endif
+ vmax.u8 d2, d2, d4
+.if \wd >= 8
+ vmax.u8 d2, d2, d6
+.endif
+
+.if \wd == 16
+ vabd.u8 d3, d17, d23 // abs(p6 - p0)
+ vabd.u8 d4, d18, d23 // abs(p5 - p0)
+ vabd.u8 d5, d19, d23 // abs(p4 - p0)
+.endif
+ vcge.u8 d2, d10, d2 // flat8in
+.if \wd == 16
+ vabd.u8 d6, d28, d24 // abs(q4 - q0)
+ vabd.u8 d7, d29, d24 // abs(q5 - q0)
+ vabd.u8 d8, d30, d24 // abs(q6 - q0)
+.endif
+ vand d14, d2, d14 // flat8in && fm && wd > 4
+ vbic d1, d1, d14 // fm && wd >= 4 && !flat8in
+.if \wd == 16
+ vmax.u8 d3, d3, d4
+ vmax.u8 d5, d5, d6
+.endif
+ vmov r10, r11, d1
+.if \wd == 16
+ vmax.u8 d7, d7, d8
+ vmax.u8 d3, d3, d5
+ vmax.u8 d3, d3, d7
+ vcge.u8 d3, d10, d3 // flat8out
+.endif
+ orrs r10, r10, r11
+.if \wd == 16
+ vand d15, d15, d3 // flat8out && fm && wd == 16
+ vand d15, d15, d14 // flat8out && flat8in && fm && wd == 16
+ vbic d14, d14, d15 // flat8in && fm && wd >= 4 && !flat8out
+.endif
+ beq 1f // skip wd == 4 case
+.endif
+
+ vsubl.u8 q1, d22, d25 // p1 - q1
+ vcgt.u8 d0, d0, d12 // hev
+ vqmovn.s16 d2, q1
+ vand d4, d2, d0 // if (hev) iclip_diff(p1 - q1)
+ vbic d0, d1, d0 // (fm && wd >= 4 && !hev)
+ vsubl.u8 q1, d24, d23
+ vmov.i16 q3, #3
+ vmul.i16 q1, q1, q3
+ vmov.i8 d6, #4
+ vaddw.s8 q1, q1, d4
+ vmov.i8 d7, #3
+ vqmovn.s16 d2, q1 // f
+ vqadd.s8 d4, d6, d2 // imin(f + 4, 127)
+ vqadd.s8 d5, d7, d2 // imin(f + 3, 127)
+ vshr.s8 d4, d4, #3 // f1
+ vshr.s8 d5, d5, #3 // f2
+ vmovl.u8 q1, d23 // p0
+ vmovl.u8 q3, d24 // q0
+ vaddw.s8 q1, q1, d5
+ vsubw.s8 q3, q3, d4
+ vrshr.s8 d4, d4, #1 // (f1 + 1) >> 1
+ vqmovun.s16 d2, q1 // out p0
+ vqmovun.s16 d6, q3 // out q0
+ vbit d23, d2, d1 // if (fm && wd >= 4)
+ vmovl.u8 q1, d22 // p1
+ vbit d24, d6, d1 // if (fm && wd >= 4)
+ vmovl.u8 q3, d25 // q1
+ vaddw.s8 q1, q1, d4
+ vsubw.s8 q3, q3, d4
+ vqmovun.s16 d2, q1 // out p1
+ vqmovun.s16 d6, q3 // out q1
+ vbit d22, d2, d0 // if (fm && wd >= 4 && !hev)
+ vbit d25, d6, d0 // if (fm && wd >= 4 && !hev)
+1:
+
+.if \wd == 6
+ vmov r10, r11, d14
+ orrs r10, r10, r11
+ beq 2f // skip if there's no flat8in
+
+ vaddl.u8 q0, d21, d21 // p2 * 2
+ vaddl.u8 q1, d21, d22 // p2 + p1
+ vaddl.u8 q2, d22, d23 // p1 + p0
+ vaddl.u8 q3, d23, d24 // p0 + q0
+ vadd.i16 q4, q0, q1
+ vadd.i16 q5, q2, q3
+ vaddl.u8 q6, d24, d25 // q0 + q1
+ vadd.i16 q4, q4, q5
+ vsub.i16 q6, q6, q0
+ vaddl.u8 q5, d25, d26 // q1 + q2
+ vrshrn.i16 d0, q4, #3 // out p1
+
+ vadd.i16 q4, q4, q6
+ vsub.i16 q5, q5, q1
+ vaddl.u8 q6, d26, d26 // q2 + q2
+ vrshrn.i16 d1, q4, #3 // out p0
+
+ vadd.i16 q4, q4, q5
+ vsub.i16 q6, q6, q2
+ vrshrn.i16 d2, q4, #3 // out q0
+
+ vbit d22, d0, d14 // p1 if (flat8in)
+ vadd.i16 q4, q4, q6
+ vbit d23, d1, d14 // p0 if (flat8in)
+ vrshrn.i16 d3, q4, #3 // out q1
+ vbit d24, d2, d14 // q0 if (flat8in)
+ vbit d25, d3, d14 // q1 if (flat8in)
+.elseif \wd >= 8
+ vmov r10, r11, d14
+ orrs r10, r10, r11
+.if \wd == 8
+ beq 8f // skip if there's no flat8in
+.else
+ beq 2f // skip if there's no flat8in
+.endif
+
+ vaddl.u8 q0, d20, d21 // p3 + p2
+ vaddl.u8 q1, d22, d25 // p1 + q1
+ vaddl.u8 q2, d20, d22 // p3 + p1
+ vaddl.u8 q3, d23, d26 // p0 + q2
+ vadd.i16 q4, q0, q0 // 2 * (p3 + p2)
+ vaddw.u8 q4, q4, d23 // + p0
+ vaddw.u8 q4, q4, d24 // + q0
+ vadd.i16 q4, q4, q2 // + p3 + p1
+ vsub.i16 q1, q1, q0 // p1 + q1 - p3 - p2
+ vsub.i16 q3, q3, q2 // p0 + q2 - p3 - p1
+ vrshrn.i16 d10, q4, #3 // out p2
+
+ vadd.i16 q4, q4, q1
+ vaddl.u8 q0, d20, d23 // p3 + p0
+ vaddl.u8 q1, d24, d27 // q0 + q3
+ vrshrn.i16 d11, q4, #3 // out p1
+
+ vadd.i16 q4, q4, q3
+ vsub.i16 q1, q1, q0 // q0 + q3 - p3 - p0
+ vaddl.u8 q2, d21, d24 // p2 + q0
+ vaddl.u8 q3, d25, d27 // q1 + q3
+ vrshrn.i16 d12, q4, #3 // out p0
+
+ vadd.i16 q4, q4, q1
+ vsub.i16 q3, q3, q2 // q1 + q3 - p2 - q0
+ vaddl.u8 q0, d22, d25 // p1 + q1
+ vaddl.u8 q1, d26, d27 // q2 + q3
+ vrshrn.i16 d13, q4, #3 // out q0
+
+ vadd.i16 q4, q4, q3
+ vsub.i16 q1, q1, q0 // q2 + q3 - p1 - q1
+ vrshrn.i16 d0, q4, #3 // out q1
+
+ vadd.i16 q4, q4, q1
+
+ vbit d21, d10, d14
+ vbit d22, d11, d14
+ vbit d23, d12, d14
+ vrshrn.i16 d1, q4, #3 // out q2
+ vbit d24, d13, d14
+ vbit d25, d0, d14
+ vbit d26, d1, d14
+.endif
+2:
+.if \wd == 16
+ vmov r10, r11, d15
+ orrs r10, r10, r11
+ bne 1f // check if flat8out is needed
+ vmov r10, r11, d14
+ orrs r10, r10, r11
+ beq 8f // if there was no flat8in, just write the inner 4 pixels
+ b 7f // if flat8in was used, write the inner 6 pixels
+1:
+
+ vaddl.u8 q1, d17, d17 // p6 + p6
+ vaddl.u8 q2, d17, d18 // p6 + p5
+ vaddl.u8 q3, d17, d19 // p6 + p4
+ vaddl.u8 q4, d17, d20 // p6 + p3
+ vadd.i16 q6, q1, q2
+ vadd.i16 q5, q3, q4
+ vaddl.u8 q3, d17, d21 // p6 + p2
+ vadd.i16 q6, q6, q5
+ vaddl.u8 q4, d17, d22 // p6 + p1
+ vaddl.u8 q5, d18, d23 // p5 + p0
+ vadd.i16 q3, q3, q4
+ vaddl.u8 q4, d19, d24 // p4 + q0
+ vadd.i16 q6, q6, q3
+ vadd.i16 q5, q5, q4
+ vaddl.u8 q3, d20, d25 // p3 + q1
+ vadd.i16 q6, q6, q5
+ vsub.i16 q3, q3, q1
+ vaddl.u8 q1, d21, d26 // p2 + q2
+ vrshrn.i16 d0, q6, #4 // out p5
+ vadd.i16 q6, q6, q3 // - (p6 + p6) + (p3 + q1)
+ vsub.i16 q1, q1, q2
+ vaddl.u8 q2, d22, d27 // p1 + q3
+ vaddl.u8 q3, d17, d19 // p6 + p4
+ vrshrn.i16 d1, q6, #4 // out p4
+ vadd.i16 q6, q6, q1 // - (p6 + p5) + (p2 + q2)
+ vsub.i16 q2, q2, q3
+ vaddl.u8 q3, d23, d28 // p0 + q4
+ vaddl.u8 q4, d17, d20 // p6 + p3
+ vrshrn.i16 d2, q6, #4 // out p3
+ vadd.i16 q6, q6, q2 // - (p6 + p4) + (p1 + q3)
+ vsub.i16 q3, q3, q4
+ vaddl.u8 q4, d24, d29 // q0 + q5
+ vaddl.u8 q2, d17, d21 // p6 + p2
+ vrshrn.i16 d3, q6, #4 // out p2
+ vadd.i16 q6, q6, q3 // - (p6 + p3) + (p0 + q4)
+ vsub.i16 q4, q4, q2
+ vaddl.u8 q3, d25, d30 // q1 + q6
+ vaddl.u8 q5, d17, d22 // p6 + p1
+ vrshrn.i16 d4, q6, #4 // out p1
+ vadd.i16 q6, q6, q4 // - (p6 + p2) + (q0 + q5)
+ vsub.i16 q3, q3, q5
+ vaddl.u8 q4, d26, d30 // q2 + q6
+ vbif d0, d18, d15 // out p5
+ vaddl.u8 q5, d18, d23 // p5 + p0
+ vrshrn.i16 d5, q6, #4 // out p0
+ vadd.i16 q6, q6, q3 // - (p6 + p1) + (q1 + q6)
+ vsub.i16 q4, q4, q5
+ vaddl.u8 q5, d27, d30 // q3 + q6
+ vbif d1, d19, d15 // out p4
+ vaddl.u8 q9, d19, d24 // p4 + q0
+ vrshrn.i16 d6, q6, #4 // out q0
+ vadd.i16 q6, q6, q4 // - (p5 + p0) + (q2 + q6)
+ vsub.i16 q5, q5, q9
+ vaddl.u8 q4, d28, d30 // q4 + q6
+ vbif d2, d20, d15 // out p3
+ vaddl.u8 q9, d20, d25 // p3 + q1
+ vrshrn.i16 d7, q6, #4 // out q1
+ vadd.i16 q6, q6, q5 // - (p4 + q0) + (q3 + q6)
+ vsub.i16 q9, q4, q9
+ vaddl.u8 q5, d29, d30 // q5 + q6
+ vbif d3, d21, d15 // out p2
+ vaddl.u8 q10, d21, d26 // p2 + q2
+ vrshrn.i16 d8, q6, #4 // out q2
+ vadd.i16 q6, q6, q9 // - (p3 + q1) + (q4 + q6)
+ vsub.i16 q5, q5, q10
+ vaddl.u8 q9, d30, d30 // q6 + q6
+ vbif d4, d22, d15 // out p1
+ vaddl.u8 q10, d22, d27 // p1 + q3
+ vrshrn.i16 d9, q6, #4 // out q3
+ vadd.i16 q6, q6, q5 // - (p2 + q2) + (q5 + q6)
+ vsub.i16 q9, q9, q10
+ vbif d5, d23, d15 // out p0
+ vrshrn.i16 d10, q6, #4 // out q4
+ vadd.i16 q6, q6, q9 // - (p1 + q3) + (q6 + q6)
+ vrshrn.i16 d11, q6, #4 // out q5
+ vbif d6, d24, d15 // out q0
+ vbif d7, d25, d15 // out q1
+ vbif d8, d26, d15 // out q2
+ vbif d9, d27, d15 // out q3
+ vbif d10, d28, d15 // out q4
+ vbif d11, d29, d15 // out q5
+.endif
+
+ bx lr
+.if \wd == 16
+7:
+ // Return to a shorter epilogue, writing only the inner 6 pixels
+ bx r8
+.endif
+.if \wd >= 8
+8:
+ // Return to a shorter epilogue, writing only the inner 4 pixels
+ bx r9
+.endif
+9:
+ // Return directly without writing back any pixels
+ bx r12
+endfunc
+.endm
+
+loop_filter 16
+loop_filter 8
+loop_filter 6
+loop_filter 4
+
+.macro lpf_8_wd16
+ adr r8, 7f + CONFIG_THUMB
+ adr r9, 8f + CONFIG_THUMB
+ bl lpf_8_wd16_neon
+.endm
+
+.macro lpf_8_wd8
+ adr r9, 8f + CONFIG_THUMB
+ bl lpf_8_wd8_neon
+.endm
+
+.macro lpf_8_wd6
+ bl lpf_8_wd6_neon
+.endm
+
+.macro lpf_8_wd4
+ bl lpf_8_wd4_neon
+.endm
+
+function lpf_v_4_8_neon
+ mov r12, lr
+ sub r10, r0, r1, lsl #1
+ vld1.8 {d22}, [r10, :64], r1 // p1
+ vld1.8 {d24}, [r0, :64], r1 // q0
+ vld1.8 {d23}, [r10, :64], r1 // p0
+ vld1.8 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+
+ lpf_8_wd4
+
+ sub r10, r0, r1, lsl #1
+ vst1.8 {d22}, [r10, :64], r1 // p1
+ vst1.8 {d24}, [r0, :64], r1 // q0
+ vst1.8 {d23}, [r10, :64], r1 // p0
+ vst1.8 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+ bx r12
+endfunc
+
+function lpf_h_4_8_neon
+ mov r12, lr
+ sub r10, r0, #2
+ add r0, r10, r1, lsl #2
+ vld1.32 {d22[0]}, [r10], r1
+ vld1.32 {d22[1]}, [r0], r1
+ vld1.32 {d23[0]}, [r10], r1
+ vld1.32 {d23[1]}, [r0], r1
+ vld1.32 {d24[0]}, [r10], r1
+ vld1.32 {d24[1]}, [r0], r1
+ vld1.32 {d25[0]}, [r10], r1
+ vld1.32 {d25[1]}, [r0], r1
+ add r0, r0, #2
+
+ transpose_4x8b q11, q12, d22, d23, d24, d25
+
+ lpf_8_wd4
+
+ sub r10, r0, r1, lsl #3
+ sub r10, r10, #2
+ transpose_4x8b q11, q12, d22, d23, d24, d25
+ add r0, r10, r1, lsl #2
+
+ vst1.32 {d22[0]}, [r10], r1
+ vst1.32 {d22[1]}, [r0], r1
+ vst1.32 {d23[0]}, [r10], r1
+ vst1.32 {d23[1]}, [r0], r1
+ vst1.32 {d24[0]}, [r10], r1
+ vst1.32 {d24[1]}, [r0], r1
+ vst1.32 {d25[0]}, [r10], r1
+ vst1.32 {d25[1]}, [r0], r1
+ add r0, r0, #2
+ bx r12
+endfunc
+
+function lpf_v_6_8_neon
+ mov r12, lr
+ sub r10, r0, r1, lsl #1
+ sub r10, r10, r1
+ vld1.8 {d21}, [r10, :64], r1 // p2
+ vld1.8 {d24}, [r0, :64], r1 // q0
+ vld1.8 {d22}, [r10, :64], r1 // p1
+ vld1.8 {d25}, [r0, :64], r1 // q1
+ vld1.8 {d23}, [r10, :64], r1 // p0
+ vld1.8 {d26}, [r0, :64], r1 // q2
+ sub r0, r0, r1, lsl #1
+ sub r0, r0, r1
+
+ lpf_8_wd6
+
+ sub r10, r0, r1, lsl #1
+ vst1.8 {d22}, [r10, :64], r1 // p1
+ vst1.8 {d24}, [r0, :64], r1 // q0
+ vst1.8 {d23}, [r10, :64], r1 // p0
+ vst1.8 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+ bx r12
+endfunc
+
+function lpf_h_6_8_neon
+ mov r12, lr
+ sub r10, r0, #4
+ add r0, r10, r1, lsl #2
+ vld1.8 {d20}, [r10], r1
+ vld1.8 {d24}, [r0], r1
+ vld1.8 {d21}, [r10], r1
+ vld1.8 {d25}, [r0], r1
+ vld1.8 {d22}, [r10], r1
+ vld1.8 {d26}, [r0], r1
+ vld1.8 {d23}, [r10], r1
+ vld1.8 {d27}, [r0], r1
+ add r0, r0, #4
+
+ transpose_8x8b q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
+
+ lpf_8_wd6
+
+ sub r10, r0, r1, lsl #3
+ sub r10, r10, #2
+ transpose_4x8b q11, q12, d22, d23, d24, d25
+ add r0, r10, r1, lsl #2
+
+ vst1.32 {d22[0]}, [r10], r1
+ vst1.32 {d22[1]}, [r0], r1
+ vst1.32 {d23[0]}, [r10], r1
+ vst1.32 {d23[1]}, [r0], r1
+ vst1.32 {d24[0]}, [r10], r1
+ vst1.32 {d24[1]}, [r0], r1
+ vst1.32 {d25[0]}, [r10], r1
+ vst1.32 {d25[1]}, [r0], r1
+ add r0, r0, #2
+ bx r12
+endfunc
+
+function lpf_v_8_8_neon
+ mov r12, lr
+ sub r10, r0, r1, lsl #2
+ vld1.8 {d20}, [r10, :64], r1 // p3
+ vld1.8 {d24}, [r0, :64], r1 // q0
+ vld1.8 {d21}, [r10, :64], r1 // p2
+ vld1.8 {d25}, [r0, :64], r1 // q1
+ vld1.8 {d22}, [r10, :64], r1 // p1
+ vld1.8 {d26}, [r0, :64], r1 // q2
+ vld1.8 {d23}, [r10, :64], r1 // p0
+ vld1.8 {d27}, [r0, :64], r1 // q3
+ sub r0, r0, r1, lsl #2
+
+ lpf_8_wd8
+
+ sub r10, r0, r1, lsl #1
+ sub r10, r10, r1
+ vst1.8 {d21}, [r10, :64], r1 // p2
+ vst1.8 {d24}, [r0, :64], r1 // q0
+ vst1.8 {d22}, [r10, :64], r1 // p1
+ vst1.8 {d25}, [r0, :64], r1 // q1
+ vst1.8 {d23}, [r10, :64], r1 // p0
+ vst1.8 {d26}, [r0, :64], r1 // q2
+ sub r0, r0, r1, lsl #1
+ sub r0, r0, r1
+ bx r12
+
+8:
+ sub r10, r0, r1, lsl #1
+ vst1.8 {d22}, [r10, :64], r1 // p1
+ vst1.8 {d24}, [r0, :64], r1 // q0
+ vst1.8 {d23}, [r10, :64], r1 // p0
+ vst1.8 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+ bx r12
+endfunc
+
+function lpf_h_8_8_neon
+ mov r12, lr
+ sub r10, r0, #4
+ add r0, r10, r1, lsl #2
+ vld1.8 {d20}, [r10], r1
+ vld1.8 {d24}, [r0], r1
+ vld1.8 {d21}, [r10], r1
+ vld1.8 {d25}, [r0], r1
+ vld1.8 {d22}, [r10], r1
+ vld1.8 {d26}, [r0], r1
+ vld1.8 {d23}, [r10], r1
+ vld1.8 {d27}, [r0], r1
+ add r0, r0, #4
+
+ transpose_8x8b q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
+
+ lpf_8_wd8
+
+ sub r10, r0, r1, lsl #3
+ sub r10, r10, #4
+ transpose_8x8b q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
+ add r0, r10, r1, lsl #2
+
+ vst1.8 {d20}, [r10], r1
+ vst1.8 {d24}, [r0], r1
+ vst1.8 {d21}, [r10], r1
+ vst1.8 {d25}, [r0], r1
+ vst1.8 {d22}, [r10], r1
+ vst1.8 {d26}, [r0], r1
+ vst1.8 {d23}, [r10], r1
+ vst1.8 {d27}, [r0], r1
+ add r0, r0, #4
+ bx r12
+8:
+ sub r10, r0, r1, lsl #3
+ sub r10, r10, #2
+ transpose_4x8b q11, q12, d22, d23, d24, d25
+ add r0, r10, r1, lsl #2
+
+ vst1.32 {d22[0]}, [r10], r1
+ vst1.32 {d22[1]}, [r0], r1
+ vst1.32 {d23[0]}, [r10], r1
+ vst1.32 {d23[1]}, [r0], r1
+ vst1.32 {d24[0]}, [r10], r1
+ vst1.32 {d24[1]}, [r0], r1
+ vst1.32 {d25[0]}, [r10], r1
+ vst1.32 {d25[1]}, [r0], r1
+ add r0, r0, #2
+ bx r12
+endfunc
+
+function lpf_v_16_8_neon
+ mov r12, lr
+
+ sub r10, r0, r1, lsl #3
+ add r10, r10, r1
+ vld1.8 {d17}, [r10, :64], r1 // p6
+ vld1.8 {d24}, [r0, :64], r1 // q0
+ vld1.8 {d18}, [r10, :64], r1 // p5
+ vld1.8 {d25}, [r0, :64], r1 // q1
+ vld1.8 {d19}, [r10, :64], r1 // p4
+ vld1.8 {d26}, [r0, :64], r1 // q2
+ vld1.8 {d20}, [r10, :64], r1 // p3
+ vld1.8 {d27}, [r0, :64], r1 // q3
+ vld1.8 {d21}, [r10, :64], r1 // p2
+ vld1.8 {d28}, [r0, :64], r1 // q4
+ vld1.8 {d22}, [r10, :64], r1 // p1
+ vld1.8 {d29}, [r0, :64], r1 // q5
+ vld1.8 {d23}, [r10, :64], r1 // p0
+ vld1.8 {d30}, [r0, :64], r1 // q6
+ sub r0, r0, r1, lsl #3
+ add r0, r0, r1
+
+ lpf_8_wd16
+
+ sub r10, r0, r1, lsl #2
+ sub r10, r10, r1, lsl #1
+ vst1.8 {d0}, [r10, :64], r1 // p5
+ vst1.8 {d6}, [r0, :64], r1 // q0
+ vst1.8 {d1}, [r10, :64], r1 // p4
+ vst1.8 {d7}, [r0, :64], r1 // q1
+ vst1.8 {d2}, [r10, :64], r1 // p3
+ vst1.8 {d8}, [r0, :64], r1 // q2
+ vst1.8 {d3}, [r10, :64], r1 // p2
+ vst1.8 {d9}, [r0, :64], r1 // q3
+ vst1.8 {d4}, [r10, :64], r1 // p1
+ vst1.8 {d10}, [r0, :64], r1 // q4
+ vst1.8 {d5}, [r10, :64], r1 // p0
+ vst1.8 {d11}, [r0, :64], r1 // q5
+ sub r0, r0, r1, lsl #2
+ sub r0, r0, r1, lsl #1
+ bx r12
+7:
+ sub r10, r0, r1
+ sub r10, r10, r1, lsl #1
+ vst1.8 {d21}, [r10, :64], r1 // p2
+ vst1.8 {d24}, [r0, :64], r1 // q0
+ vst1.8 {d22}, [r10, :64], r1 // p1
+ vst1.8 {d25}, [r0, :64], r1 // q1
+ vst1.8 {d23}, [r10, :64], r1 // p0
+ vst1.8 {d26}, [r0, :64], r1 // q2
+ sub r0, r0, r1, lsl #1
+ sub r0, r0, r1
+ bx r12
+
+8:
+ sub r10, r0, r1, lsl #1
+ vst1.8 {d22}, [r10, :64], r1 // p1
+ vst1.8 {d24}, [r0, :64], r1 // q0
+ vst1.8 {d23}, [r10, :64], r1 // p0
+ vst1.8 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+ bx r12
+endfunc
+
+function lpf_h_16_8_neon
+ mov r12, lr
+ sub r10, r0, #8
+ vld1.8 {d16}, [r10, :64], r1
+ vld1.8 {d24}, [r0, :64], r1
+ vld1.8 {d17}, [r10, :64], r1
+ vld1.8 {d25}, [r0, :64], r1
+ vld1.8 {d18}, [r10, :64], r1
+ vld1.8 {d26}, [r0, :64], r1
+ vld1.8 {d19}, [r10, :64], r1
+ vld1.8 {d27}, [r0, :64], r1
+ vld1.8 {d20}, [r10, :64], r1
+ vld1.8 {d28}, [r0, :64], r1
+ vld1.8 {d21}, [r10, :64], r1
+ vld1.8 {d29}, [r0, :64], r1
+ vld1.8 {d22}, [r10, :64], r1
+ vld1.8 {d30}, [r0, :64], r1
+ vld1.8 {d23}, [r10, :64], r1
+ vld1.8 {d31}, [r0, :64], r1
+
+ transpose_8x8b q8, q9, q10, q11, d16, d17, d18, d19, d20, d21, d22, d23
+ transpose_8x8b q12, q13, q14, q15, d24, d25, d26, d27, d28, d29, d30, d31
+
+ lpf_8_wd16
+
+ sub r0, r0, r1, lsl #3
+ sub r10, r0, #8
+
+ transpose_8x8b q8, q0, q1, q2, d16, d17, d0, d1, d2, d3, d4, d5
+ transpose_8x8b q3, q4, q5, q15, d6, d7, d8, d9, d10, d11, d30, d31
+
+ vst1.8 {d16}, [r10, :64], r1
+ vst1.8 {d6}, [r0, :64], r1
+ vst1.8 {d17}, [r10, :64], r1
+ vst1.8 {d7}, [r0, :64], r1
+ vst1.8 {d0}, [r10, :64], r1
+ vst1.8 {d8}, [r0, :64], r1
+ vst1.8 {d1}, [r10, :64], r1
+ vst1.8 {d9}, [r0, :64], r1
+ vst1.8 {d2}, [r10, :64], r1
+ vst1.8 {d10}, [r0, :64], r1
+ vst1.8 {d3}, [r10, :64], r1
+ vst1.8 {d11}, [r0, :64], r1
+ vst1.8 {d4}, [r10, :64], r1
+ vst1.8 {d30}, [r0, :64], r1
+ vst1.8 {d5}, [r10, :64], r1
+ vst1.8 {d31}, [r0, :64], r1
+ bx r12
+
+7:
+ sub r10, r0, r1, lsl #3
+ sub r10, r10, #4
+ transpose_8x8b q10, q11, q12, q13, d20, d21, d22, d23, d24, d25, d26, d27
+ add r0, r10, r1, lsl #2
+
+ vst1.8 {d20}, [r10], r1
+ vst1.8 {d24}, [r0], r1
+ vst1.8 {d21}, [r10], r1
+ vst1.8 {d25}, [r0], r1
+ vst1.8 {d22}, [r10], r1
+ vst1.8 {d26}, [r0], r1
+ vst1.8 {d23}, [r10], r1
+ vst1.8 {d27}, [r0], r1
+ add r0, r0, #4
+ bx r12
+8:
+ sub r10, r0, r1, lsl #3
+ sub r10, r10, #2
+ transpose_4x8b q11, q12, d22, d23, d24, d25
+ add r0, r10, r1, lsl #2
+
+ vst1.32 {d22[0]}, [r10], r1
+ vst1.32 {d22[1]}, [r0], r1
+ vst1.32 {d23[0]}, [r10], r1
+ vst1.32 {d23[1]}, [r0], r1
+ vst1.32 {d24[0]}, [r10], r1
+ vst1.32 {d24[1]}, [r0], r1
+ vst1.32 {d25[0]}, [r10], r1
+ vst1.32 {d25[1]}, [r0], r1
+ add r0, r0, #2
+ bx r12
+endfunc
+
+// void dav1d_lpf_v_sb_y_8bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const uint32_t *const vmask,
+// const uint8_t (*l)[4], ptrdiff_t b4_stride,
+// const Av1FilterLUT *lut, const int w)
+
+.macro lpf_func dir, type
+function lpf_\dir\()_sb_\type\()_8bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldrd r6, r7, [r2] // vmask[0], vmask[1]
+.ifc \type, y
+ ldr r2, [r2, #8] // vmask[2]
+.endif
+ add r5, r5, #128 // Move to sharp part of lut
+.ifc \type, y
+ orr r7, r7, r2 // vmask[1] |= vmask[2]
+.endif
+.ifc \dir, v
+ sub r4, r3, r4, lsl #2
+.else
+ sub r3, r3, #4
+ lsl r4, r4, #2
+.endif
+ orr r6, r6, r7 // vmask[0] |= vmask[1]
+
+1:
+ tst r6, #0x03
+.ifc \dir, v
+ vld1.8 {d0}, [r4]!
+ vld1.8 {d1}, [r3]!
+.else
+ vld2.32 {d0[0], d1[0]}, [r3], r4
+ vld2.32 {d0[1], d1[1]}, [r3], r4
+.endif
+ beq 7f // if (!(vm & bits)) continue;
+
+ vld1.8 {d5[]}, [r5] // sharp[0]
+ add r5, r5, #8
+ vmov.i32 d2, #0xff
+ vdup.32 d13, r6 // vmask[0]
+
+ vand d0, d0, d2 // Keep only lowest byte in each 32 bit word
+ vand d1, d1, d2
+ vtst.8 d3, d1, d2 // Check for nonzero values in l[0][0]
+ vmov.i8 d4, #1
+ vld1.8 {d6[]}, [r5] // sharp[1]
+ sub r5, r5, #8
+ vbif d1, d0, d3 // if (!l[0][0]) L = l[offset][0]
+ vtst.32 d2, d1, d2 // L != 0
+ vmul.i32 d1, d1, d4 // L
+.ifc \type, y
+ vdup.32 d15, r2 // vmask[2]
+.endif
+ vdup.32 d14, r7 // vmask[1]
+ vmov r10, r11, d2
+ orrs r10, r10, r11
+ beq 7f // if (!L) continue;
+ vneg.s8 d5, d5 // -sharp[0]
+ movrel_local r10, word_12
+ vshr.u8 d12, d1, #4 // H
+ vld1.32 {d16}, [r10, :64]
+ vshl.s8 d3, d1, d5 // L >> sharp[0]
+.ifc \type, y
+ vtst.32 d15, d15, d16 // if (vmask[2] & bits)
+.endif
+ vmov.i8 d7, #2
+ vmin.u8 d3, d3, d6 // imin(L >> sharp[0], sharp[1])
+ vadd.i8 d0, d1, d7 // L + 2
+ vmax.u8 d11, d3, d4 // imax(imin(), 1) = limit = I
+ vadd.u8 d0, d0, d0 // 2*(L + 2)
+ vtst.32 d14, d14, d16 // if (vmask[1] & bits)
+ vadd.i8 d10, d0, d11 // 2*(L + 2) + limit = E
+ vtst.32 d13, d13, d16 // if (vmask[0] & bits)
+ vand d13, d13, d2 // vmask[0] &= L != 0
+
+.ifc \type, y
+ tst r2, #0x03
+ beq 2f
+ // wd16
+ bl lpf_\dir\()_16_8_neon
+ b 8f
+2:
+.endif
+ tst r7, #0x03
+ beq 3f
+.ifc \type, y
+ // wd8
+ bl lpf_\dir\()_8_8_neon
+.else
+ // wd6
+ bl lpf_\dir\()_6_8_neon
+.endif
+ b 8f
+3:
+ // wd4
+ bl lpf_\dir\()_4_8_neon
+.ifc \dir, h
+ b 8f
+7:
+ // For dir h, the functions above increment r0.
+ // If the whole function is skipped, increment it here instead.
+ add r0, r0, r1, lsl #3
+.else
+7:
+.endif
+8:
+ lsrs r6, r6, #2 // vmask[0] >>= 2
+ lsr r7, r7, #2 // vmask[1] >>= 2
+.ifc \type, y
+ lsr r2, r2, #2 // vmask[2] >>= 2
+.endif
+.ifc \dir, v
+ add r0, r0, #8
+.else
+ // For dir h, r0 is returned incremented
+.endif
+ bne 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+lpf_func v, y
+lpf_func h, y
+lpf_func v, uv
+lpf_func h, uv
+
+const word_12, align=4
+ .word 1, 2
+endconst
diff --git a/third_party/dav1d/src/arm/32/loopfilter16.S b/third_party/dav1d/src/arm/32/loopfilter16.S
new file mode 100644
index 0000000000..d7daf21f1a
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/loopfilter16.S
@@ -0,0 +1,859 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+.macro loop_filter wd
+function lpf_4_wd\wd\()_neon
+ vabd.u16 d0, d22, d23 // abs(p1 - p0)
+ vabd.u16 d1, d25, d24 // abs(q1 - q0)
+ vabd.u16 d2, d23, d24 // abs(p0 - q0)
+ vabd.u16 d3, d22, d25 // abs(p1 - q1)
+.if \wd >= 6
+ vabd.u16 d4, d21, d22 // abs(p2 - p1)
+ vabd.u16 d5, d26, d25 // abs(q2 - q1)
+.endif
+.if \wd >= 8
+ vabd.u16 d6, d20, d21 // abs(p3 - p2)
+ vabd.u16 d7, d27, d26 // abs(q3 - q3)
+.endif
+.if \wd >= 6
+ vmax.u16 d4, d4, d5
+.endif
+ vqadd.u16 d2, d2, d2 // abs(p0 - q0) * 2
+.if \wd >= 8
+ vmax.u16 d6, d6, d7
+.endif
+ vshr.u16 d3, d3, #1
+.if \wd >= 8
+ vmax.u16 d4, d4, d6
+.endif
+ vmax.u16 d0, d0, d1 // max(abs(p1 - p0), abs(q1 - q0))
+ vqadd.u16 d2, d2, d3 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1
+.if \wd >= 6
+ vmax.u16 d4, d0, d4
+ vcge.u16 d1, d11, d4 // max(abs(p1 - p0), abs(q1 - q0), abs(), abs(), ...) <= I
+.else
+ vcge.u16 d1, d11, d0 // max(abs(p1 - p0), abs(q1 - q0)) <= I
+.endif
+ vcge.u16 d2, d10, d2 // abs(p0 - q0) * 2 + abs(p1 - q1) >> 1 <= E
+ vand d1, d1, d2 // fm && wd >= 4 (implicit)
+.if \wd >= 6
+ vmov d14, d1 // fm && wd > 4 (implicit)
+.endif
+.if \wd >= 16
+ vmov d15, d1 // fm && wd == 16 (implicit)
+.endif
+
+ vmov r10, r11, d1
+ orrs r10, r10, r11
+ beq 9f // if (!fm || wd < 4) return;
+
+.if \wd >= 6
+ vmov.i16 d10, #1
+ vabd.u16 d2, d21, d23 // abs(p2 - p0)
+ vabd.u16 d3, d22, d23 // abs(p1 - p0)
+ vabd.u16 d4, d25, d24 // abs(q1 - q0)
+ vabd.u16 d5, d26, d24 // abs(q2 - q0)
+ vdup.16 d9, r9 // bitdepth_min_8
+.if \wd >= 8
+ vabd.u16 d6, d20, d23 // abs(p3 - p0)
+ vabd.u16 d7, d27, d24 // abs(q3 - q0)
+.endif
+ vmax.u16 d2, d2, d3
+ vmax.u16 d4, d4, d5
+.if \wd >= 8
+ vmax.u16 d6, d6, d7
+.endif
+ vmax.u16 d2, d2, d4
+ vshl.u16 d10, d10, d9 // F = 1 << bitdepth_min_8
+.if \wd >= 8
+ vmax.u16 d2, d2, d6
+.endif
+
+.if \wd == 16
+ vabd.u16 d3, d17, d23 // abs(p6 - p0)
+ vabd.u16 d4, d18, d23 // abs(p5 - p0)
+ vabd.u16 d5, d19, d23 // abs(p4 - p0)
+.endif
+ vcge.u16 d2, d10, d2 // flat8in
+.if \wd == 16
+ vabd.u16 d6, d28, d24 // abs(q4 - q0)
+ vabd.u16 d7, d29, d24 // abs(q5 - q0)
+ vabd.u16 d8, d30, d24 // abs(q6 - q0)
+.endif
+ vand d14, d2, d14 // flat8in && fm && wd > 4
+ vbic d1, d1, d14 // fm && wd >= 4 && !flat8in
+.if \wd == 16
+ vmax.u16 d3, d3, d4
+ vmax.u16 d5, d5, d6
+.endif
+ vmov r10, r11, d1
+.if \wd == 16
+ vmax.u16 d7, d7, d8
+ vmax.u16 d3, d3, d5
+ vmax.u16 d3, d3, d7
+ vcge.u16 d3, d10, d3 // flat8out
+.endif
+ orrs r10, r10, r11
+.if \wd == 16
+ vand d15, d15, d3 // flat8out && fm && wd == 16
+ vand d15, d15, d14 // flat8out && flat8in && fm && wd == 16
+ vbic d14, d14, d15 // flat8in && fm && wd >= 4 && !flat8out
+.endif
+ beq 1f // skip wd == 4 case
+.endif
+
+ vdup.16 d3, r8 // bitdepth_max
+ vsub.u16 d2, d22, d25 // p1 - q1
+ vshr.u16 d3, d3, #1 // 128 << bitdepth_min_8 - 1
+ vcgt.u16 d0, d0, d12 // hev
+ vmvn d9, d3 // - 128 * (1 << bitdepth_min_8)
+ vmin.s16 d2, d2, d3 // iclip_diff(p1 - q1)
+ vmax.s16 d2, d2, d9 // iclip_diff(p1 - q1)
+ vand d4, d2, d0 // if (hev) iclip_diff(p1 - q1)
+ vsub.u16 d2, d24, d23
+ vmov.i16 d6, #3
+ vbic d0, d1, d0 // (fm && wd >= 4 && !hev)
+ vmul.i16 d2, d2, d6
+ vmov.i16 d7, #4
+ vadd.i16 d2, d2, d4
+ vmin.s16 d2, d2, d3 // f = iclip_diff()
+ vmax.s16 d2, d2, d9 // f = iclip_diff()
+ vqadd.s16 d4, d7, d2 // f + 4
+ vqadd.s16 d5, d6, d2 // f + 3
+ vmin.s16 d4, d4, d3 // imin(f + 4, 128 << bitdepth_min_8 - 1)
+ vmin.s16 d5, d5, d3 // imin(f + 3, 128 << bitdepth_min_8 - 1)
+ vshr.s16 d4, d4, #3 // f1
+ vshr.s16 d5, d5, #3 // f2
+ vmov.i16 d9, #0
+ vdup.16 d3, r8 // bitdepth_max
+ vqadd.s16 d2, d23, d5 // p0 + f2
+ vqsub.s16 d6, d24, d4 // q0 - f1
+ vrshr.s16 d4, d4, #1 // (f1 + 1) >> 1
+ vmin.s16 d2, d2, d3 // out p0 = iclip_pixel()
+ vmin.s16 d6, d6, d3 // out q0 = iclip_pixel()
+ vmax.s16 d2, d2, d9 // out p0 = iclip_pixel()
+ vmax.s16 d6, d6, d9 // out q0 = iclip_pixel()
+ vbit d23, d2, d1 // if (fm && wd >= 4)
+ vbit d24, d6, d1 // if (fm && wd >= 4)
+ vqadd.s16 d2, d22, d4 // p1 + f
+ vqsub.s16 d6, d25, d4 // q1 - f
+ vmin.s16 d2, d2, d3 // out p1 = iclip_pixel()
+ vmin.s16 d6, d6, d3 // out q1 = iclip_pixel()
+ vmax.s16 d2, d2, d9 // out p1 = iclip_pixel()
+ vmax.s16 d6, d6, d9 // out q1 = iclip_pixel()
+ vbit d22, d2, d0 // if (fm && wd >= 4 && !hev)
+ vbit d25, d6, d0 // if (fm && wd >= 4 && !hev)
+1:
+
+.if \wd == 6
+ vmov r10, r11, d14
+ orrs r10, r10, r11
+ beq 2f // skip if there's no flat8in
+
+ vadd.i16 d0, d21, d21 // p2 * 2
+ vadd.i16 d2, d21, d22 // p2 + p1
+ vadd.i16 d4, d22, d23 // p1 + p0
+ vadd.i16 d6, d23, d24 // p0 + q0
+ vadd.i16 d8, d0, d2
+ vadd.i16 d10, d4, d6
+ vadd.i16 d12, d24, d25 // q0 + q1
+ vadd.i16 d8, d8, d10
+ vsub.i16 d12, d12, d0
+ vadd.i16 d10, d25, d26 // q1 + q2
+ vrshr.u16 d0, d8, #3 // out p1
+
+ vadd.i16 d8, d8, d12
+ vsub.i16 d10, d10, d2
+ vadd.i16 d12, d26, d26 // q2 + q2
+ vrshr.u16 d1, d8, #3 // out p0
+
+ vadd.i16 d8, d8, d10
+ vsub.i16 d12, d12, d4
+ vrshr.u16 d2, d8, #3 // out q0
+
+ vbit d22, d0, d14 // p1 if (flat8in)
+ vadd.i16 d8, d8, d12
+ vbit d23, d1, d14 // p0 if (flat8in)
+ vrshr.u16 d3, d8, #3 // out q1
+ vbit d24, d2, d14 // q0 if (flat8in)
+ vbit d25, d3, d14 // q1 if (flat8in)
+.elseif \wd >= 8
+ vmov r10, r11, d14
+ orrs r10, r10, r11
+.if \wd == 8
+ beq 8f // skip if there's no flat8in
+.else
+ beq 2f // skip if there's no flat8in
+.endif
+
+ vadd.i16 d0, d20, d21 // p3 + p2
+ vadd.i16 d2, d22, d25 // p1 + q1
+ vadd.i16 d4, d20, d22 // p3 + p1
+ vadd.i16 d6, d23, d26 // p0 + q2
+ vadd.i16 d8, d0, d0 // 2 * (p3 + p2)
+ vadd.i16 d9, d23, d24 // p0 + q0
+ vadd.i16 d8, d8, d4 // + p3 + p1
+ vsub.i16 d2, d2, d0 // p1 + q1 - p3 - p2
+ vadd.i16 d8, d8, d9 // + p0 + q0
+ vsub.i16 d6, d6, d4 // p0 + q2 - p3 - p1
+ vrshr.u16 d10, d8, #3 // out p2
+
+ vadd.i16 d8, d8, d2
+ vadd.i16 d0, d20, d23 // p3 + p0
+ vadd.i16 d2, d24, d27 // q0 + q3
+ vrshr.u16 d11, d8, #3 // out p1
+
+ vadd.i16 d8, d8, d6
+ vsub.i16 d2, d2, d0 // q0 + q3 - p3 - p0
+ vadd.i16 d4, d21, d24 // p2 + q0
+ vadd.i16 d6, d25, d27 // q1 + q3
+ vrshr.u16 d12, d8, #3 // out p0
+
+ vadd.i16 d8, d8, d2
+ vsub.i16 d6, d6, d4 // q1 + q3 - p2 - q0
+ vadd.i16 d0, d22, d25 // p1 + q1
+ vadd.i16 d2, d26, d27 // q2 + q3
+ vrshr.u16 d13, d8, #3 // out q0
+
+ vadd.i16 d8, d8, d6
+ vsub.i16 d2, d2, d0 // q2 + q3 - p1 - q1
+ vrshr.u16 d0, d8, #3 // out q1
+
+ vadd.i16 d8, d8, d2
+
+ vbit d21, d10, d14
+ vbit d22, d11, d14
+ vbit d23, d12, d14
+ vrshr.u16 d1, d8, #3 // out q2
+ vbit d24, d13, d14
+ vbit d25, d0, d14
+ vbit d26, d1, d14
+.endif
+2:
+.if \wd == 16
+ vmov r10, r11, d15
+ orrs r10, r10, r11
+ bne 1f // check if flat8out is needed
+ vmov r10, r11, d14
+ orrs r10, r10, r11
+ beq 8f // if there was no flat8in, just write the inner 4 pixels
+ b 7f // if flat8in was used, write the inner 6 pixels
+1:
+
+ vadd.i16 d2, d17, d17 // p6 + p6
+ vadd.i16 d4, d17, d18 // p6 + p5
+ vadd.i16 d6, d17, d19 // p6 + p4
+ vadd.i16 d8, d17, d20 // p6 + p3
+ vadd.i16 d12, d2, d4
+ vadd.i16 d10, d6, d8
+ vadd.i16 d6, d17, d21 // p6 + p2
+ vadd.i16 d12, d12, d10
+ vadd.i16 d8, d17, d22 // p6 + p1
+ vadd.i16 d10, d18, d23 // p5 + p0
+ vadd.i16 d6, d6, d8
+ vadd.i16 d8, d19, d24 // p4 + q0
+ vadd.i16 d12, d12, d6
+ vadd.i16 d10, d10, d8
+ vadd.i16 d6, d20, d25 // p3 + q1
+ vadd.i16 d12, d12, d10
+ vsub.i16 d6, d6, d2
+ vadd.i16 d2, d21, d26 // p2 + q2
+ vrshr.u16 d0, d12, #4 // out p5
+ vadd.i16 d12, d12, d6 // - (p6 + p6) + (p3 + q1)
+ vsub.i16 d2, d2, d4
+ vadd.i16 d4, d22, d27 // p1 + q3
+ vadd.i16 d6, d17, d19 // p6 + p4
+ vrshr.u16 d1, d12, #4 // out p4
+ vadd.i16 d12, d12, d2 // - (p6 + p5) + (p2 + q2)
+ vsub.i16 d4, d4, d6
+ vadd.i16 d6, d23, d28 // p0 + q4
+ vadd.i16 d8, d17, d20 // p6 + p3
+ vrshr.u16 d2, d12, #4 // out p3
+ vadd.i16 d12, d12, d4 // - (p6 + p4) + (p1 + q3)
+ vsub.i16 d6, d6, d8
+ vadd.i16 d8, d24, d29 // q0 + q5
+ vadd.i16 d4, d17, d21 // p6 + p2
+ vrshr.u16 d3, d12, #4 // out p2
+ vadd.i16 d12, d12, d6 // - (p6 + p3) + (p0 + q4)
+ vsub.i16 d8, d8, d4
+ vadd.i16 d6, d25, d30 // q1 + q6
+ vadd.i16 d10, d17, d22 // p6 + p1
+ vrshr.u16 d4, d12, #4 // out p1
+ vadd.i16 d12, d12, d8 // - (p6 + p2) + (q0 + q5)
+ vsub.i16 d6, d6, d10
+ vadd.i16 d8, d26, d30 // q2 + q6
+ vbif d0, d18, d15 // out p5
+ vadd.i16 d10, d18, d23 // p5 + p0
+ vrshr.u16 d5, d12, #4 // out p0
+ vadd.i16 d12, d12, d6 // - (p6 + p1) + (q1 + q6)
+ vsub.i16 d8, d8, d10
+ vadd.i16 d10, d27, d30 // q3 + q6
+ vbif d1, d19, d15 // out p4
+ vadd.i16 d18, d19, d24 // p4 + q0
+ vrshr.u16 d6, d12, #4 // out q0
+ vadd.i16 d12, d12, d8 // - (p5 + p0) + (q2 + q6)
+ vsub.i16 d10, d10, d18
+ vadd.i16 d8, d28, d30 // q4 + q6
+ vbif d2, d20, d15 // out p3
+ vadd.i16 d18, d20, d25 // p3 + q1
+ vrshr.u16 d7, d12, #4 // out q1
+ vadd.i16 d12, d12, d10 // - (p4 + q0) + (q3 + q6)
+ vsub.i16 d18, d8, d18
+ vadd.i16 d10, d29, d30 // q5 + q6
+ vbif d3, d21, d15 // out p2
+ vadd.i16 d20, d21, d26 // p2 + q2
+ vrshr.u16 d8, d12, #4 // out q2
+ vadd.i16 d12, d12, d18 // - (p3 + q1) + (q4 + q6)
+ vsub.i16 d10, d10, d20
+ vadd.i16 d18, d30, d30 // q6 + q6
+ vbif d4, d22, d15 // out p1
+ vadd.i16 d20, d22, d27 // p1 + q3
+ vrshr.u16 d9, d12, #4 // out q3
+ vadd.i16 d12, d12, d10 // - (p2 + q2) + (q5 + q6)
+ vsub.i16 d18, d18, d20
+ vbif d5, d23, d15 // out p0
+ vrshr.u16 d10, d12, #4 // out q4
+ vadd.i16 d12, d12, d18 // - (p1 + q3) + (q6 + q6)
+ vrshr.u16 d11, d12, #4 // out q5
+ vbif d6, d24, d15 // out q0
+ vbif d7, d25, d15 // out q1
+ vbif d8, d26, d15 // out q2
+ vbif d9, d27, d15 // out q3
+ vbif d10, d28, d15 // out q4
+ vbif d11, d29, d15 // out q5
+.endif
+
+ bx lr
+.if \wd == 16
+7:
+ // Return to a shorter epilogue, writing only the inner 6 pixels
+ bx r6
+.endif
+.if \wd >= 8
+8:
+ // Return to a shorter epilogue, writing only the inner 4 pixels
+ bx r7
+.endif
+9:
+ // Return directly without writing back any pixels
+ bx r12
+endfunc
+.endm
+
+loop_filter 16
+loop_filter 8
+loop_filter 6
+loop_filter 4
+
+.macro lpf_4_wd16
+ adr r6, 7f + CONFIG_THUMB
+ adr r7, 8f + CONFIG_THUMB
+ bl lpf_4_wd16_neon
+.endm
+
+.macro lpf_4_wd8
+ adr r7, 8f + CONFIG_THUMB
+ bl lpf_4_wd8_neon
+.endm
+
+.macro lpf_4_wd6
+ bl lpf_4_wd6_neon
+.endm
+
+.macro lpf_4_wd4
+ bl lpf_4_wd4_neon
+.endm
+
+function lpf_v_4_4_neon
+ mov r12, lr
+ sub r10, r0, r1, lsl #1
+ vld1.16 {d22}, [r10, :64], r1 // p1
+ vld1.16 {d24}, [r0, :64], r1 // q0
+ vld1.16 {d23}, [r10, :64], r1 // p0
+ vld1.16 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+
+ lpf_4_wd4
+
+ sub r10, r0, r1, lsl #1
+ vst1.16 {d22}, [r10, :64], r1 // p1
+ vst1.16 {d24}, [r0, :64], r1 // q0
+ vst1.16 {d23}, [r10, :64], r1 // p0
+ vst1.16 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+ bx r12
+endfunc
+
+function lpf_h_4_4_neon
+ mov r12, lr
+ sub r10, r0, #4
+ add r0, r10, r1, lsl #1
+ vld1.16 {d22}, [r10], r1
+ vld1.16 {d24}, [r0], r1
+ vld1.16 {d23}, [r10], r1
+ vld1.16 {d25}, [r0], r1
+ add r0, r0, #4
+
+ transpose_4x4h q11, q12, d22, d23, d24, d25
+
+ lpf_4_wd4
+
+ sub r10, r0, r1, lsl #2
+ sub r10, r10, #4
+ transpose_4x4h q11, q12, d22, d23, d24, d25
+ add r0, r10, r1, lsl #1
+
+ vst1.16 {d22}, [r10], r1
+ vst1.16 {d24}, [r0], r1
+ vst1.16 {d23}, [r10], r1
+ vst1.16 {d25}, [r0], r1
+ add r0, r0, #4
+ bx r12
+endfunc
+
+function lpf_v_6_4_neon
+ mov r12, lr
+ sub r10, r0, r1, lsl #1
+ sub r10, r10, r1
+ vld1.16 {d21}, [r10, :64], r1 // p2
+ vld1.16 {d24}, [r0, :64], r1 // q0
+ vld1.16 {d22}, [r10, :64], r1 // p1
+ vld1.16 {d25}, [r0, :64], r1 // q1
+ vld1.16 {d23}, [r10, :64], r1 // p0
+ vld1.16 {d26}, [r0, :64], r1 // q2
+ sub r0, r0, r1, lsl #1
+ sub r0, r0, r1
+
+ lpf_4_wd6
+
+ sub r10, r0, r1, lsl #1
+ vst1.16 {d22}, [r10, :64], r1 // p1
+ vst1.16 {d24}, [r0, :64], r1 // q0
+ vst1.16 {d23}, [r10, :64], r1 // p0
+ vst1.16 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+ bx r12
+endfunc
+
+function lpf_h_6_4_neon
+ mov r12, lr
+ sub r10, r0, #8
+ vld1.16 {d20}, [r10, :64], r1
+ vld1.16 {d24}, [r0, :64], r1
+ vld1.16 {d21}, [r10, :64], r1
+ vld1.16 {d25}, [r0, :64], r1
+ vld1.16 {d22}, [r10, :64], r1
+ vld1.16 {d26}, [r0, :64], r1
+ vld1.16 {d23}, [r10, :64], r1
+ vld1.16 {d27}, [r0, :64], r1
+
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+
+ lpf_4_wd6
+
+ sub r0, r0, #4
+ transpose_4x4h q11, q12, d22, d23, d24, d25
+ sub r10, r0, r1, lsl #2
+ sub r0, r0, r1, lsl #1
+
+ vst1.16 {d22}, [r10], r1
+ vst1.16 {d24}, [r0], r1
+ vst1.16 {d23}, [r10], r1
+ vst1.16 {d25}, [r0], r1
+ add r0, r0, #4
+ bx r12
+endfunc
+
+function lpf_v_8_4_neon
+ mov r12, lr
+ sub r10, r0, r1, lsl #2
+ vld1.16 {d20}, [r10, :64], r1 // p3
+ vld1.16 {d24}, [r0, :64], r1 // q0
+ vld1.16 {d21}, [r10, :64], r1 // p2
+ vld1.16 {d25}, [r0, :64], r1 // q1
+ vld1.16 {d22}, [r10, :64], r1 // p1
+ vld1.16 {d26}, [r0, :64], r1 // q2
+ vld1.16 {d23}, [r10, :64], r1 // p0
+ vld1.16 {d27}, [r0, :64], r1 // q3
+ sub r0, r0, r1, lsl #2
+
+ lpf_4_wd8
+
+ sub r10, r0, r1, lsl #1
+ sub r10, r10, r1
+ vst1.16 {d21}, [r10, :64], r1 // p2
+ vst1.16 {d24}, [r0, :64], r1 // q0
+ vst1.16 {d22}, [r10, :64], r1 // p1
+ vst1.16 {d25}, [r0, :64], r1 // q1
+ vst1.16 {d23}, [r10, :64], r1 // p0
+ vst1.16 {d26}, [r0, :64], r1 // q2
+ sub r0, r0, r1, lsl #1
+ sub r0, r0, r1
+ bx r12
+
+8:
+ sub r10, r0, r1, lsl #1
+ vst1.16 {d22}, [r10, :64], r1 // p1
+ vst1.16 {d24}, [r0, :64], r1 // q0
+ vst1.16 {d23}, [r10, :64], r1 // p0
+ vst1.16 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+ bx r12
+endfunc
+
+function lpf_h_8_4_neon
+ mov r12, lr
+ sub r10, r0, #8
+ vld1.16 {d20}, [r10, :64], r1
+ vld1.16 {d24}, [r0, :64], r1
+ vld1.16 {d21}, [r10, :64], r1
+ vld1.16 {d25}, [r0, :64], r1
+ vld1.16 {d22}, [r10, :64], r1
+ vld1.16 {d26}, [r0, :64], r1
+ vld1.16 {d23}, [r10, :64], r1
+ vld1.16 {d27}, [r0, :64], r1
+
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+
+ lpf_4_wd8
+
+ sub r0, r0, r1, lsl #2
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+ sub r10, r0, #8
+
+ vst1.16 {d20}, [r10, :64], r1
+ vst1.16 {d24}, [r0, :64], r1
+ vst1.16 {d21}, [r10, :64], r1
+ vst1.16 {d25}, [r0, :64], r1
+ vst1.16 {d22}, [r10, :64], r1
+ vst1.16 {d26}, [r0, :64], r1
+ vst1.16 {d23}, [r10, :64], r1
+ vst1.16 {d27}, [r0, :64], r1
+ bx r12
+8:
+ sub r0, r0, #4
+ transpose_4x4h q11, q12, d22, d23, d24, d25
+ sub r10, r0, r1, lsl #2
+ sub r0, r0, r1, lsl #1
+
+ vst1.16 {d22}, [r10], r1
+ vst1.16 {d24}, [r0], r1
+ vst1.16 {d23}, [r10], r1
+ vst1.16 {d25}, [r0], r1
+ add r0, r0, #4
+ bx r12
+endfunc
+
+function lpf_v_16_4_neon
+ mov r12, lr
+
+ sub r10, r0, r1, lsl #3
+ add r10, r10, r1
+ vld1.16 {d17}, [r10, :64], r1 // p6
+ vld1.16 {d24}, [r0, :64], r1 // q0
+ vld1.16 {d18}, [r10, :64], r1 // p5
+ vld1.16 {d25}, [r0, :64], r1 // q1
+ vld1.16 {d19}, [r10, :64], r1 // p4
+ vld1.16 {d26}, [r0, :64], r1 // q2
+ vld1.16 {d20}, [r10, :64], r1 // p3
+ vld1.16 {d27}, [r0, :64], r1 // q3
+ vld1.16 {d21}, [r10, :64], r1 // p2
+ vld1.16 {d28}, [r0, :64], r1 // q4
+ vld1.16 {d22}, [r10, :64], r1 // p1
+ vld1.16 {d29}, [r0, :64], r1 // q5
+ vld1.16 {d23}, [r10, :64], r1 // p0
+ vld1.16 {d30}, [r0, :64], r1 // q6
+ sub r0, r0, r1, lsl #3
+ add r0, r0, r1
+
+ lpf_4_wd16
+
+ sub r10, r0, r1, lsl #2
+ sub r10, r10, r1, lsl #1
+ vst1.16 {d0}, [r10, :64], r1 // p5
+ vst1.16 {d6}, [r0, :64], r1 // q0
+ vst1.16 {d1}, [r10, :64], r1 // p4
+ vst1.16 {d7}, [r0, :64], r1 // q1
+ vst1.16 {d2}, [r10, :64], r1 // p3
+ vst1.16 {d8}, [r0, :64], r1 // q2
+ vst1.16 {d3}, [r10, :64], r1 // p2
+ vst1.16 {d9}, [r0, :64], r1 // q3
+ vst1.16 {d4}, [r10, :64], r1 // p1
+ vst1.16 {d10}, [r0, :64], r1 // q4
+ vst1.16 {d5}, [r10, :64], r1 // p0
+ vst1.16 {d11}, [r0, :64], r1 // q5
+ sub r0, r0, r1, lsl #2
+ sub r0, r0, r1, lsl #1
+ bx r12
+7:
+ sub r10, r0, r1
+ sub r10, r10, r1, lsl #1
+ vst1.16 {d21}, [r10, :64], r1 // p2
+ vst1.16 {d24}, [r0, :64], r1 // q0
+ vst1.16 {d22}, [r10, :64], r1 // p1
+ vst1.16 {d25}, [r0, :64], r1 // q1
+ vst1.16 {d23}, [r10, :64], r1 // p0
+ vst1.16 {d26}, [r0, :64], r1 // q2
+ sub r0, r0, r1, lsl #1
+ sub r0, r0, r1
+ bx r12
+
+8:
+ sub r10, r0, r1, lsl #1
+ vst1.16 {d22}, [r10, :64], r1 // p1
+ vst1.16 {d24}, [r0, :64], r1 // q0
+ vst1.16 {d23}, [r10, :64], r1 // p0
+ vst1.16 {d25}, [r0, :64], r1 // q1
+ sub r0, r0, r1, lsl #1
+ bx r12
+endfunc
+
+function lpf_h_16_4_neon
+ mov r12, lr
+ sub r10, r0, #16
+ sub r0, r0, #8
+ vld1.16 {d16}, [r10, :64], r1
+ vld1.16 {d20}, [r0, :64], r1
+ vld1.16 {d17}, [r10, :64], r1
+ vld1.16 {d21}, [r0, :64], r1
+ vld1.16 {d18}, [r10, :64], r1
+ vld1.16 {d22}, [r0, :64], r1
+ vld1.16 {d19}, [r10, :64], r1
+ vld1.16 {d23}, [r0, :64], r1
+ sub r10, r10, r1, lsl #2
+ sub r0, r0, r1, lsl #2
+ add r10, r10, #16
+ add r0, r0, #16
+ vld1.16 {d24}, [r10, :64], r1
+ vld1.16 {d28}, [r0, :64], r1
+ vld1.16 {d25}, [r10, :64], r1
+ vld1.16 {d29}, [r0, :64], r1
+ vld1.16 {d26}, [r10, :64], r1
+ vld1.16 {d30}, [r0, :64], r1
+ vld1.16 {d27}, [r10, :64], r1
+ vld1.16 {d31}, [r0, :64], r1
+ sub r0, r0, #8
+
+ transpose_4x4h q8, q9, d16, d17, d18, d19
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+ transpose_4x4h q14, q15, d28, d29, d30, d31
+
+ lpf_4_wd16
+
+ sub r0, r0, r1, lsl #2
+ transpose_4x4h q8, q0, d16, d17, d0, d1
+ transpose_4x4h q1, q2, d2, d3, d4, d5
+ transpose_4x4h q3, q4, d6, d7, d8, d9
+ transpose_4x4h q5, q15, d10, d11, d30, d31
+ sub r10, r0, #16
+ sub r0, r0, #8
+
+ vst1.16 {d16}, [r10, :64], r1
+ vst1.16 {d2}, [r0, :64], r1
+ vst1.16 {d17}, [r10, :64], r1
+ vst1.16 {d3}, [r0, :64], r1
+ vst1.16 {d0}, [r10, :64], r1
+ vst1.16 {d4}, [r0, :64], r1
+ vst1.16 {d1}, [r10, :64], r1
+ vst1.16 {d5}, [r0, :64], r1
+ sub r10, r10, r1, lsl #2
+ sub r0, r0, r1, lsl #2
+ add r10, r10, #16
+ add r0, r0, #16
+ vst1.16 {d6}, [r10, :64], r1
+ vst1.16 {d10}, [r0, :64], r1
+ vst1.16 {d7}, [r10, :64], r1
+ vst1.16 {d11}, [r0, :64], r1
+ vst1.16 {d8}, [r10, :64], r1
+ vst1.16 {d30}, [r0, :64], r1
+ vst1.16 {d9}, [r10, :64], r1
+ vst1.16 {d31}, [r0, :64], r1
+ sub r0, r0, #8
+
+ bx r12
+
+7:
+ sub r0, r0, r1, lsl #2
+ transpose_4x4h q10, q11, d20, d21, d22, d23
+ transpose_4x4h q12, q13, d24, d25, d26, d27
+ sub r10, r0, #8
+
+ vst1.16 {d20}, [r10, :64], r1
+ vst1.16 {d24}, [r0, :64], r1
+ vst1.16 {d21}, [r10, :64], r1
+ vst1.16 {d25}, [r0, :64], r1
+ vst1.16 {d22}, [r10, :64], r1
+ vst1.16 {d26}, [r0, :64], r1
+ vst1.16 {d23}, [r10, :64], r1
+ vst1.16 {d27}, [r0, :64], r1
+ bx r12
+8:
+ sub r0, r0, #4
+ transpose_4x4h q11, q12, d22, d23, d24, d25
+ sub r10, r0, r1, lsl #2
+ sub r0, r0, r1, lsl #1
+
+ vst1.16 {d22}, [r10], r1
+ vst1.16 {d24}, [r0], r1
+ vst1.16 {d23}, [r10], r1
+ vst1.16 {d25}, [r0], r1
+ add r0, r0, #4
+ bx r12
+endfunc
+
+// void dav1d_lpf_v_sb_y_16bpc_neon(pixel *dst, const ptrdiff_t stride,
+// const uint32_t *const vmask,
+// const uint8_t (*l)[4], ptrdiff_t b4_stride,
+// const Av1FilterLUT *lut, const int w,
+// const int bitdepth_max)
+
+.macro lpf_func dir, type
+function lpf_\dir\()_sb_\type\()_16bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldr r8, [sp, #112] // bitdepth_max; the 'w' parameter isn't loaded
+ sub sp, sp, #8
+ clz r9, r8
+ rsb r9, r9, #24 // bitdepth_min_8
+ ldrd r6, r7, [r2] // vmask[0], vmask[1]
+.ifc \type, y
+ ldr r2, [r2, #8] // vmask[2]
+.endif
+ add r5, r5, #128 // Move to sharp part of lut
+.ifc \type, y
+ orr r7, r7, r2 // vmask[1] |= vmask[2]
+.endif
+.ifc \dir, v
+ sub r4, r3, r4, lsl #2
+.else
+ sub r3, r3, #4
+ lsl r4, r4, #2
+.endif
+ orr r6, r6, r7 // vmask[0] |= vmask[1]
+
+1:
+ tst r6, #0x01
+ strd r6, r7, [sp]
+.ifc \dir, v
+ ldrb r10, [r4], #4
+ ldrb r11, [r3], #4
+.else
+ ldrb r10, [r3]
+ ldrb r11, [r3, #4]
+ add r3, r3, r4
+.endif
+ beq 7f // if (!(vm & bits)) continue;
+
+ orrs r12, r10, r11
+ vdup.16 d31, r9 // bitdepth_min_8
+ beq 7f // if (!(l[0][0] | l[offset][0])) continue;
+ cmp r11, #0 // Check for nonzero values in l[0][0]
+ ldrb r6, [r5], #8 // sharp[0]
+ it eq
+ moveq r11, r10 // if (!l[0][0]) L = l[offset][0]
+ ldrb r12, [r5] // sharp[1]
+ lsr r6, r11, r6 // L >> sharp[0]
+ sub r5, r5, #8
+ cmp r12, r6
+ lsr r10, r11, #4 // H
+ add r11, r11, #2 // L + 2
+ it lt
+ movlt r6, r12 // imin(L >> sharp[0], sharp[1])
+ add r11, r11, r11 // 2*(L + 2)
+ cmp r6, #1
+ lsl r10, r10, r9 // H << bitdepth_min_8
+ it lt
+ movlt r6, #1 // imax(imin(), 1) = limit = I
+ vdup.16 d12, r10 // H << bitdepth_min_8
+ add r11, r11, r6 // 2*(L + 2) + limit = E
+ lsl r6, r6, r9 // I << bitdepth_min_8
+ lsl r11, r11, r9 // E << bitdepth_min_8
+ vdup.16 d11, r6 // I << bitdepth_min_8
+ vdup.16 d10, r11 // E << bitdepth_min_8
+
+.ifc \type, y
+ tst r2, #0x01
+ beq 2f
+ // wd16
+ bl lpf_\dir\()_16_4_neon
+ b 8f
+2:
+.endif
+ tst r7, #0x01
+ beq 3f
+.ifc \type, y
+ // wd8
+ bl lpf_\dir\()_8_4_neon
+.else
+ // wd6
+ bl lpf_\dir\()_6_4_neon
+.endif
+ b 8f
+3:
+ // wd4
+ bl lpf_\dir\()_4_4_neon
+.ifc \dir, h
+ b 8f
+7:
+ // For dir h, the functions above increment r0.
+ // If the whole function is skipped, increment it here instead.
+ add r0, r0, r1, lsl #2
+.else
+7:
+.endif
+8:
+ ldrd r6, r7, [sp]
+.ifc \type, y
+ lsr r2, r2, #1 // vmask[2] >>= 1
+.endif
+.ifc \dir, v
+ add r0, r0, #8
+.else
+ // For dir h, r0 is returned incremented
+.endif
+ lsrs r6, r6, #1 // vmask[0] >>= 1
+ lsr r7, r7, #1 // vmask[1] >>= 1
+ bne 1b
+
+ add sp, sp, #8
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+lpf_func v, y
+lpf_func h, y
+lpf_func v, uv
+lpf_func h, uv
diff --git a/third_party/dav1d/src/arm/32/looprestoration.S b/third_party/dav1d/src/arm/32/looprestoration.S
new file mode 100644
index 0000000000..be5c658d6d
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/looprestoration.S
@@ -0,0 +1,791 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2019, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+const right_ext_mask_buf
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+right_ext_mask:
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+endconst
+
+// void dav1d_wiener_filter_h_8bpc_neon(int16_t *dst, const pixel (*left)[4],
+// const pixel *src, ptrdiff_t stride,
+// const int16_t fh[8], intptr_t w,
+// int h, enum LrEdgeFlags edges);
+function wiener_filter_h_8bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldrd r6, r7, [sp, #108]
+ mov r8, r5
+ vld1.16 {q0}, [r4, :128]
+ movw r9, #(1 << 14) - (1 << 2)
+ vdup.16 q14, r9
+ vmov.s16 q15, #2048
+ // Calculate mid_stride
+ add r10, r5, #7
+ bic r10, r10, #7
+ lsl r10, r10, #1
+
+ // Set up pointers for reading/writing alternate rows
+ add r12, r0, r10
+ lsl r10, r10, #1
+ add lr, r2, r3
+ lsl r3, r3, #1
+
+ // Subtract the aligned width from mid_stride
+ add r11, r5, #7
+ bic r11, r11, #7
+ sub r10, r10, r11, lsl #1
+
+ // Subtract the number of pixels read from the source stride
+ add r11, r11, #8
+ sub r3, r3, r11
+
+ // Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 2f
+ // LR_HAVE_LEFT
+ cmp r1, #0
+ bne 0f
+ // left == NULL
+ sub r2, r2, #3
+ sub lr, lr, #3
+ b 1f
+0: // LR_HAVE_LEFT, left != NULL
+2: // !LR_HAVE_LEFT, increase the stride.
+ // For this case we don't read the left 3 pixels from the src pointer,
+ // but shift it as if we had done that.
+ add r3, r3, #3
+
+
+1: // Loop vertically
+ vld1.8 {q2}, [r2]!
+ vld1.8 {q9}, [lr]!
+
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 0f
+ cmp r1, #0
+ beq 2f
+ // LR_HAVE_LEFT, left != NULL
+ vld1.32 {d3[1]}, [r1]!
+ // Move r2/lr back to account for the last 3 bytes we loaded earlier,
+ // which we'll shift out.
+ sub r2, r2, #3
+ sub lr, lr, #3
+ vld1.32 {d17[1]}, [r1]!
+ vext.8 q2, q1, q2, #13
+ vext.8 q9, q8, q9, #13
+ b 2f
+0:
+ // !LR_HAVE_LEFT, fill q1 with the leftmost byte
+ // and shift q2 to have 3x the first byte at the front.
+ vdup.8 q1, d4[0]
+ vdup.8 q8, d18[0]
+ // Move r2 back to account for the last 3 bytes we loaded before,
+ // which we shifted out.
+ sub r2, r2, #3
+ sub lr, lr, #3
+ vext.8 q2, q1, q2, #13
+ vext.8 q9, q8, q9, #13
+
+2:
+ vmovl.u8 q1, d4
+ vmovl.u8 q2, d5
+ vmovl.u8 q8, d18
+ vmovl.u8 q9, d19
+
+ tst r7, #2 // LR_HAVE_RIGHT
+ bne 4f
+ // If we'll need to pad the right edge, load that byte to pad with
+ // here since we can find it pretty easily from here.
+ sub r9, r5, #14
+ ldrb r11, [r2, r9]
+ ldrb r9, [lr, r9]
+ // Fill q12/q13 with the right padding pixel
+ vdup.16 q12, r11
+ vdup.16 q13, r9
+3: // !LR_HAVE_RIGHT
+
+ // Check whether we need to pad the right edge
+ cmp r5, #11
+ bge 4f // If w >= 11, all used input pixels are valid
+
+ // 1 <= w < 11, w+3 pixels valid in q1-q2. For w=9 or w=10,
+ // this ends up called again; it's not strictly needed in those
+ // cases (we pad enough here), but keeping the code as simple as possible.
+
+ // Insert padding in q1/2.h[w+3] onwards; fuse the +3 (*2) into the
+ // buffer pointer.
+ movrel_local r4, right_ext_mask, -6
+ sub r4, r4, r5, lsl #1
+ vld1.8 {q10, q11}, [r4]
+
+ vbit q1, q12, q10
+ vbit q2, q12, q11
+ vbit q8, q13, q10
+ vbit q9, q13, q11
+
+4: // Loop horizontally
+ vext.8 q11, q1, q2, #4
+ vext.8 q5, q1, q2, #8
+ vext.8 q10, q1, q2, #2
+ vext.8 q6, q1, q2, #10
+ vext.8 q7, q1, q2, #12
+ vext.8 q4, q1, q2, #6
+ vadd.i16 q5, q5, q11
+ vadd.i16 q6, q6, q10
+ vadd.i16 q7, q7, q1
+ vmul.s16 q3, q4, d0[3]
+ vmla.s16 q3, q5, d1[0]
+ vmla.s16 q3, q6, d1[1]
+ vmla.s16 q3, q7, d1[2]
+
+ vext.8 q4, q8, q9, #4
+ vext.8 q6, q8, q9, #8
+ vext.8 q11, q8, q9, #2
+ vext.8 q7, q8, q9, #10
+ vadd.i16 q6, q6, q4
+ vext.8 q4, q8, q9, #12
+ vext.8 q5, q8, q9, #6
+ vadd.i16 q7, q7, q11
+ vadd.i16 q4, q4, q8
+ vmul.s16 q10, q5, d0[3]
+ vmla.s16 q10, q6, d1[0]
+ vmla.s16 q10, q7, d1[1]
+ vmla.s16 q10, q4, d1[2]
+
+ vext.8 q1, q1, q2, #6
+ vext.8 q8, q8, q9, #6
+ vshl.s16 q1, q1, #7
+ vshl.s16 q8, q8, #7
+ vsub.s16 q1, q1, q14
+ vsub.s16 q8, q8, q14
+ vqadd.s16 q3, q3, q1
+ vqadd.s16 q10, q10, q8
+ vshr.s16 q3, q3, #3
+ vshr.s16 q10, q10, #3
+ vadd.s16 q3, q3, q15
+ vadd.s16 q10, q10, q15
+ subs r5, r5, #8
+ vst1.16 {q3}, [r0, :128]!
+ vst1.16 {q10}, [r12, :128]!
+
+ ble 9f
+ tst r7, #2 // LR_HAVE_RIGHT
+ vmov q1, q2
+ vmov q8, q9
+ vld1.8 {d4}, [r2]!
+ vld1.8 {d18}, [lr]!
+ vmovl.u8 q2, d4
+ vmovl.u8 q9, d18
+ bne 4b // If we don't need to pad, just keep filtering.
+ b 3b // If we need to pad, check how many pixels we have left.
+
+9:
+ subs r6, r6, #2
+ ble 0f
+ // Jump to the next row and loop horizontally
+ add r0, r0, r10
+ add r12, r12, r10
+ add r2, r2, r3
+ add lr, lr, r3
+ mov r5, r8
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+// void dav1d_wiener_filter_v_8bpc_neon(pixel *dst, ptrdiff_t stride,
+// const int16_t *mid, int w, int h,
+// const int16_t fv[8], enum LrEdgeFlags edges,
+// ptrdiff_t mid_stride);
+function wiener_filter_v_8bpc_neon, export=1
+ push {r4-r7,lr}
+ vpush {q4-q6}
+ ldrd r4, r5, [sp, #68]
+ ldrd r6, r7, [sp, #76]
+ mov lr, r4
+ vld1.16 {q0}, [r5, :128]
+
+ // Calculate the number of rows to move back when looping vertically
+ mov r12, r4
+ tst r6, #4 // LR_HAVE_TOP
+ beq 0f
+ sub r2, r2, r7, lsl #1
+ add r12, r12, #2
+0:
+ tst r6, #8 // LR_HAVE_BOTTOM
+ beq 1f
+ add r12, r12, #2
+
+1: // Start of horizontal loop; start one vertical filter slice.
+ // Load rows into q8-q11 and pad properly.
+ tst r6, #4 // LR_HAVE_TOP
+ vld1.16 {q8}, [r2, :128], r7
+ beq 2f
+ // LR_HAVE_TOP
+ vld1.16 {q10}, [r2, :128], r7
+ vmov q9, q8
+ vld1.16 {q11}, [r2, :128], r7
+ b 3f
+2: // !LR_HAVE_TOP
+ vmov q9, q8
+ vmov q10, q8
+ vmov q11, q8
+
+3:
+ cmp r4, #4
+ blt 5f
+ // Start filtering normally; fill in q12-q14 with unique rows.
+ vld1.16 {q12}, [r2, :128], r7
+ vld1.16 {q13}, [r2, :128], r7
+ vld1.16 {q14}, [r2, :128], r7
+
+4:
+.macro filter compare
+ subs r4, r4, #1
+ // Interleaving the mul/mla chains actually hurts performance
+ // significantly on Cortex A53, thus keeping mul/mla tightly
+ // chained like this.
+ vadd.i16 q4, q10, q12
+ vadd.i16 q5, q9, q13
+ vadd.i16 q6, q8, q14
+ vmull.s16 q2, d22, d0[3]
+ vmlal.s16 q2, d8, d1[0]
+ vmlal.s16 q2, d10, d1[1]
+ vmlal.s16 q2, d12, d1[2]
+ vmull.s16 q3, d23, d0[3]
+ vmlal.s16 q3, d9, d1[0]
+ vmlal.s16 q3, d11, d1[1]
+ vmlal.s16 q3, d13, d1[2]
+ vqrshrun.s32 d4, q2, #11
+ vqrshrun.s32 d5, q3, #11
+ vqmovun.s16 d4, q2
+ vst1.8 {d4}, [r0, :64], r1
+.if \compare
+ cmp r4, #4
+.else
+ ble 9f
+.endif
+ vmov q8, q9
+ vmov q9, q10
+ vmov q10, q11
+ vmov q11, q12
+ vmov q12, q13
+ vmov q13, q14
+.endm
+ filter 1
+ blt 7f
+ vld1.16 {q14}, [r2, :128], r7
+ b 4b
+
+5: // Less than 4 rows in total; not all of q12-q13 are filled yet.
+ tst r6, #8 // LR_HAVE_BOTTOM
+ beq 6f
+ // LR_HAVE_BOTTOM
+ cmp r4, #2
+ // We load at least 2 rows in all cases.
+ vld1.16 {q12}, [r2, :128], r7
+ vld1.16 {q13}, [r2, :128], r7
+ bgt 53f // 3 rows in total
+ beq 52f // 2 rows in total
+51: // 1 row in total, q11 already loaded, load edge into q12-q14.
+ vmov q13, q12
+ b 8f
+52: // 2 rows in total, q11 already loaded, load q12 with content data
+ // and 2 rows of edge.
+ vld1.16 {q14}, [r2, :128], r7
+ vmov q15, q14
+ b 8f
+53:
+ // 3 rows in total, q11 already loaded, load q12 and q13 with content
+ // and 2 rows of edge.
+ vld1.16 {q14}, [r2, :128], r7
+ vld1.16 {q15}, [r2, :128], r7
+ vmov q1, q15
+ b 8f
+
+6:
+ // !LR_HAVE_BOTTOM
+ cmp r4, #2
+ bgt 63f // 3 rows in total
+ beq 62f // 2 rows in total
+61: // 1 row in total, q11 already loaded, pad that into q12-q14.
+ vmov q12, q11
+ vmov q13, q11
+ vmov q14, q11
+ b 8f
+62: // 2 rows in total, q11 already loaded, load q12 and pad that into q12-q15.
+ vld1.16 {q12}, [r2, :128], r7
+ vmov q13, q12
+ vmov q14, q12
+ vmov q15, q12
+ b 8f
+63:
+ // 3 rows in total, q11 already loaded, load q12 and q13 and pad q13 into q14-q15,q1.
+ vld1.16 {q12}, [r2, :128], r7
+ vld1.16 {q13}, [r2, :128], r7
+ vmov q14, q13
+ vmov q15, q13
+ vmov q1, q13
+ b 8f
+
+7:
+ // All registers up to q13 are filled already, 3 valid rows left.
+ // < 4 valid rows left; fill in padding and filter the last
+ // few rows.
+ tst r6, #8 // LR_HAVE_BOTTOM
+ beq 71f
+ // LR_HAVE_BOTTOM; load 2 rows of edge.
+ vld1.16 {q14}, [r2, :128], r7
+ vld1.16 {q15}, [r2, :128], r7
+ vmov q1, q15
+ b 8f
+71:
+ // !LR_HAVE_BOTTOM, pad 3 rows
+ vmov q14, q13
+ vmov q15, q13
+ vmov q1, q13
+
+8: // At this point, all registers up to q14-15,q1 are loaded with
+ // edge/padding (depending on how many rows are left).
+ filter 0 // This branches to 9f when done
+ vmov q14, q15
+ vmov q15, q1
+ b 8b
+
+9: // End of one vertical slice.
+ subs r3, r3, #8
+ ble 0f
+ // Move pointers back up to the top and loop horizontally.
+ mls r0, r1, lr, r0
+ mls r2, r7, r12, r2
+ add r0, r0, #8
+ add r2, r2, #16
+ mov r4, lr
+ b 1b
+
+0:
+ vpop {q4-q6}
+ pop {r4-r7,pc}
+.purgem filter
+endfunc
+
+#define SUM_STRIDE (384+16)
+
+#include "looprestoration_tmpl.S"
+
+// void dav1d_sgr_box3_h_8bpc_neon(int32_t *sumsq, int16_t *sum,
+// const pixel (*left)[4],
+// const pixel *src, const ptrdiff_t stride,
+// const int w, const int h,
+// const enum LrEdgeFlags edges);
+function sgr_box3_h_8bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldrd r6, r7, [sp, #108]
+ add r5, r5, #2 // w += 2
+
+ // Set up pointers for reading/writing alternate rows
+ add r10, r0, #(4*SUM_STRIDE) // sumsq
+ add r11, r1, #(2*SUM_STRIDE) // sum
+ add r12, r3, r4 // src
+ lsl r4, r4, #1
+ mov r9, #(2*2*SUM_STRIDE) // double sum stride
+
+ // Subtract the aligned width from the output stride.
+ add lr, r5, #7
+ bic lr, lr, #7
+ sub r9, r9, lr, lsl #1
+
+ // Store the width for the vertical loop
+ mov r8, r5
+
+ // Subtract the number of pixels read from the input from the stride
+ add lr, lr, #8
+ sub r4, r4, lr
+
+ // Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 2f
+ // LR_HAVE_LEFT
+ cmp r2, #0
+ bne 0f
+ // left == NULL
+ sub r3, r3, #2
+ sub r12, r12, #2
+ b 1f
+0: // LR_HAVE_LEFT, left != NULL
+2: // !LR_HAVE_LEFT, increase the stride.
+ // For this case we don't read the left 2 pixels from the src pointer,
+ // but shift it as if we had done that.
+ add r4, r4, #2
+
+
+1: // Loop vertically
+ vld1.8 {q0}, [r3]!
+ vld1.8 {q4}, [r12]!
+
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 0f
+ cmp r2, #0
+ beq 2f
+ // LR_HAVE_LEFT, left != NULL
+ vld1.32 {d3[]}, [r2]!
+ // Move r3/r12 back to account for the last 2 bytes we loaded earlier,
+ // which we'll shift out.
+ sub r3, r3, #2
+ sub r12, r12, #2
+ vld1.32 {d11[]}, [r2]!
+ vext.8 q0, q1, q0, #14
+ vext.8 q4, q5, q4, #14
+ b 2f
+0:
+ // !LR_HAVE_LEFT, fill q1 with the leftmost byte
+ // and shift q0 to have 2x the first byte at the front.
+ vdup.8 q1, d0[0]
+ vdup.8 q5, d8[0]
+ // Move r3 back to account for the last 2 bytes we loaded before,
+ // which we shifted out.
+ sub r3, r3, #2
+ sub r12, r12, #2
+ vext.8 q0, q1, q0, #14
+ vext.8 q4, q5, q4, #14
+
+2:
+ vmull.u8 q1, d0, d0
+ vmull.u8 q2, d1, d1
+ vmull.u8 q5, d8, d8
+ vmull.u8 q6, d9, d9
+
+ tst r7, #2 // LR_HAVE_RIGHT
+ bne 4f
+ // If we'll need to pad the right edge, load that byte to pad with
+ // here since we can find it pretty easily from here.
+ sub lr, r5, #(2 + 16 - 2 + 1)
+ ldrb r11, [r3, lr]
+ ldrb lr, [r12, lr]
+ // Fill q14/q15 with the right padding pixel
+ vdup.8 q14, r11
+ vdup.8 q15, lr
+ // Restore r11 after using it for a temporary value
+ add r11, r1, #(2*SUM_STRIDE)
+3: // !LR_HAVE_RIGHT
+
+ // Check whether we need to pad the right edge
+ cmp r5, #10
+ bge 4f // If w >= 10, all used input pixels are valid
+
+ // 1 <= w < 10, w pixels valid in q0. For w=9, this ends up called
+ // again; it's not strictly needed in those cases (we pad enough here),
+ // but keeping the code as simple as possible.
+
+ // Insert padding in q0/4.b[w] onwards
+ movrel_local lr, right_ext_mask
+ sub lr, lr, r5
+ vld1.8 {q13}, [lr]
+
+ vbit q0, q14, q13
+ vbit q4, q15, q13
+
+ // Update the precalculated squares
+ vmull.u8 q1, d0, d0
+ vmull.u8 q2, d1, d1
+ vmull.u8 q5, d8, d8
+ vmull.u8 q6, d9, d9
+
+4: // Loop horizontally
+ vext.8 d16, d0, d1, #1
+ vext.8 d17, d0, d1, #2
+ vext.8 d18, d8, d9, #1
+ vext.8 d19, d8, d9, #2
+ vaddl.u8 q3, d0, d16
+ vaddw.u8 q3, q3, d17
+ vaddl.u8 q7, d8, d18
+ vaddw.u8 q7, q7, d19
+
+ vext.8 q8, q1, q2, #2
+ vext.8 q9, q1, q2, #4
+ vext.8 q10, q5, q6, #2
+ vext.8 q11, q5, q6, #4
+
+ vaddl.u16 q12, d2, d16
+ vaddl.u16 q13, d3, d17
+ vaddw.u16 q12, q12, d18
+ vaddw.u16 q13, q13, d19
+
+ vaddl.u16 q8, d10, d20
+ vaddl.u16 q9, d11, d21
+ vaddw.u16 q8, q8, d22
+ vaddw.u16 q9, q9, d23
+
+ subs r5, r5, #8
+ vst1.16 {q3}, [r1, :128]!
+ vst1.16 {q7}, [r11, :128]!
+ vst1.32 {q12, q13}, [r0, :128]!
+ vst1.32 {q8, q9}, [r10, :128]!
+
+ ble 9f
+ tst r7, #2 // LR_HAVE_RIGHT
+ vld1.8 {d6}, [r3]!
+ vld1.8 {d14}, [r12]!
+ vmov q1, q2
+ vmov q5, q6
+ vext.8 q0, q0, q3, #8
+ vext.8 q4, q4, q7, #8
+ vmull.u8 q2, d6, d6
+ vmull.u8 q6, d14, d14
+
+ bne 4b // If we don't need to pad, just keep summing.
+ b 3b // If we need to pad, check how many pixels we have left.
+
+9:
+ subs r6, r6, #2
+ ble 0f
+ // Jump to the next row and loop horizontally
+ add r0, r0, r9, lsl #1
+ add r10, r10, r9, lsl #1
+ add r1, r1, r9
+ add r11, r11, r9
+ add r3, r3, r4
+ add r12, r12, r4
+ mov r5, r8
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+// void dav1d_sgr_box5_h_8bpc_neon(int32_t *sumsq, int16_t *sum,
+// const pixel (*left)[4],
+// const pixel *src, const ptrdiff_t stride,
+// const int w, const int h,
+// const enum LrEdgeFlags edges);
+function sgr_box5_h_8bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldrd r6, r7, [sp, #108]
+ add r5, r5, #2 // w += 2
+
+ // Set up pointers for reading/writing alternate rows
+ add r10, r0, #(4*SUM_STRIDE) // sumsq
+ add r11, r1, #(2*SUM_STRIDE) // sum
+ add r12, r3, r4 // src
+ lsl r4, r4, #1
+ mov r9, #(2*2*SUM_STRIDE) // double sum stride
+
+ // Subtract the aligned width from the output stride.
+ add lr, r5, #7
+ bic lr, lr, #7
+ sub r9, r9, lr, lsl #1
+ add lr, lr, #8
+ sub r4, r4, lr
+
+ // Store the width for the vertical loop
+ mov r8, r5
+
+ // Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 2f
+ // LR_HAVE_LEFT
+ cmp r2, #0
+ bne 0f
+ // left == NULL
+ sub r3, r3, #3
+ sub r12, r12, #3
+ b 1f
+0: // LR_HAVE_LEFT, left != NULL
+2: // !LR_HAVE_LEFT, increase the stride.
+ // For this case we don't read the left 3 pixels from the src pointer,
+ // but shift it as if we had done that.
+ add r4, r4, #3
+
+1: // Loop vertically
+ vld1.8 {q0}, [r3]!
+ vld1.8 {q4}, [r12]!
+
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 0f
+ cmp r2, #0
+ beq 2f
+ // LR_HAVE_LEFT, left != NULL
+ vld1.32 {d3[]}, [r2]!
+ // Move r3/r12 back to account for the last 3 bytes we loaded earlier,
+ // which we'll shift out.
+ sub r3, r3, #3
+ sub r12, r12, #3
+ vld1.32 {d11[]}, [r2]!
+ vext.8 q0, q1, q0, #13
+ vext.8 q4, q5, q4, #13
+ b 2f
+0:
+ // !LR_HAVE_LEFT, fill q1 with the leftmost byte
+ // and shift q0 to have 3x the first byte at the front.
+ vdup.8 q1, d0[0]
+ vdup.8 q5, d8[0]
+ // Move r3 back to account for the last 3 bytes we loaded before,
+ // which we shifted out.
+ sub r3, r3, #3
+ sub r12, r12, #3
+ vext.8 q0, q1, q0, #13
+ vext.8 q4, q5, q4, #13
+
+2:
+ vmull.u8 q1, d0, d0
+ vmull.u8 q2, d1, d1
+ vmull.u8 q5, d8, d8
+ vmull.u8 q6, d9, d9
+
+ tst r7, #2 // LR_HAVE_RIGHT
+ bne 4f
+ // If we'll need to pad the right edge, load that byte to pad with
+ // here since we can find it pretty easily from here.
+ sub lr, r5, #(2 + 16 - 3 + 1)
+ ldrb r11, [r3, lr]
+ ldrb lr, [r12, lr]
+ // Fill q14/q15 with the right padding pixel
+ vdup.8 q14, r11
+ vdup.8 q15, lr
+ // Restore r11 after using it for a temporary value
+ add r11, r1, #(2*SUM_STRIDE)
+3: // !LR_HAVE_RIGHT
+
+ // Check whether we need to pad the right edge
+ cmp r5, #11
+ bge 4f // If w >= 11, all used input pixels are valid
+
+ // 1 <= w < 11, w+1 pixels valid in q0. For w=9 or w=10,
+ // this ends up called again; it's not strictly needed in those
+ // cases (we pad enough here), but keeping the code as simple as possible.
+
+ // Insert padding in q0/4.b[w+1] onwards; fuse the +1 into the
+ // buffer pointer.
+ movrel_local lr, right_ext_mask, -1
+ sub lr, lr, r5
+ vld1.8 {q13}, [lr]
+
+ vbit q0, q14, q13
+ vbit q4, q15, q13
+
+ // Update the precalculated squares
+ vmull.u8 q1, d0, d0
+ vmull.u8 q2, d1, d1
+ vmull.u8 q5, d8, d8
+ vmull.u8 q6, d9, d9
+
+4: // Loop horizontally
+ vext.8 d16, d0, d1, #1
+ vext.8 d17, d0, d1, #2
+ vext.8 d18, d0, d1, #3
+ vext.8 d19, d0, d1, #4
+ vext.8 d20, d8, d9, #1
+ vext.8 d21, d8, d9, #2
+ vext.8 d22, d8, d9, #3
+ vext.8 d23, d8, d9, #4
+ vaddl.u8 q3, d0, d16
+ vaddl.u8 q12, d17, d18
+ vaddl.u8 q7, d8, d20
+ vaddl.u8 q13, d21, d22
+ vaddw.u8 q3, q3, d19
+ vaddw.u8 q7, q7, d23
+ vadd.u16 q3, q3, q12
+ vadd.u16 q7, q7, q13
+
+ vext.8 q8, q1, q2, #2
+ vext.8 q9, q1, q2, #4
+ vext.8 q10, q1, q2, #6
+ vext.8 q11, q1, q2, #8
+ vaddl.u16 q12, d2, d16
+ vaddl.u16 q13, d3, d17
+ vaddl.u16 q8, d18, d20
+ vaddl.u16 q9, d19, d21
+ vaddw.u16 q12, q12, d22
+ vaddw.u16 q13, q13, d23
+ vadd.i32 q12, q12, q8
+ vadd.i32 q13, q13, q9
+ vext.8 q8, q5, q6, #2
+ vext.8 q9, q5, q6, #4
+ vext.8 q10, q5, q6, #6
+ vext.8 q11, q5, q6, #8
+ vaddl.u16 q1, d10, d16
+ vaddl.u16 q5, d11, d17
+ vaddl.u16 q8, d18, d20
+ vaddl.u16 q9, d19, d21
+ vaddw.u16 q1, q1, d22
+ vaddw.u16 q5, q5, d23
+ vadd.i32 q10, q1, q8
+ vadd.i32 q11, q5, q9
+
+ subs r5, r5, #8
+ vst1.16 {q3}, [r1, :128]!
+ vst1.16 {q7}, [r11, :128]!
+ vst1.32 {q12, q13}, [r0, :128]!
+ vst1.32 {q10, q11}, [r10, :128]!
+
+ ble 9f
+ tst r7, #2 // LR_HAVE_RIGHT
+ vld1.8 {d6}, [r3]!
+ vld1.8 {d14}, [r12]!
+ vmov q1, q2
+ vmov q5, q6
+ vext.8 q0, q0, q3, #8
+ vext.8 q4, q4, q7, #8
+ vmull.u8 q2, d6, d6
+ vmull.u8 q6, d14, d14
+ bne 4b // If we don't need to pad, just keep summing.
+ b 3b // If we need to pad, check how many pixels we have left.
+
+9:
+ subs r6, r6, #2
+ ble 0f
+ // Jump to the next row and loop horizontally
+ add r0, r0, r9, lsl #1
+ add r10, r10, r9, lsl #1
+ add r1, r1, r9
+ add r11, r11, r9
+ add r3, r3, r4
+ add r12, r12, r4
+ mov r5, r8
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+sgr_funcs 8
diff --git a/third_party/dav1d/src/arm/32/looprestoration16.S b/third_party/dav1d/src/arm/32/looprestoration16.S
new file mode 100644
index 0000000000..d699617a87
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/looprestoration16.S
@@ -0,0 +1,801 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+const right_ext_mask_buf
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+ .byte 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00
+right_ext_mask:
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+ .byte 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff
+endconst
+
+// void dav1d_wiener_filter_h_16bpc_neon(int16_t *dst, const pixel (*left)[4],
+// const pixel *src, ptrdiff_t stride,
+// const int16_t fh[7], const intptr_t w,
+// int h, enum LrEdgeFlags edges,
+// const int bitdepth_max);
+function wiener_filter_h_16bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldrd r6, r7, [sp, #108]
+ ldr r8, [sp, #116] // bitdepth_max
+ vld1.16 {q0}, [r4, :128]
+ clz r8, r8
+ vmov.i32 q14, #1
+ sub r9, r8, #38 // -(bitdepth + 6)
+ sub r8, r8, #25 // -round_bits_h
+ neg r9, r9 // bitdepth + 6
+ vdup.32 q1, r9
+ vdup.32 q13, r8 // -round_bits_h
+ vmov.i16 q15, #8192
+ vshl.u32 q14, q14, q1 // 1 << (bitdepth + 6)
+ mov r8, r5
+ // Calculate mid_stride
+ add r10, r5, #7
+ bic r10, r10, #7
+ lsl r10, r10, #1
+
+ // Set up pointers for reading/writing alternate rows
+ add r12, r0, r10
+ lsl r10, r10, #1
+ add lr, r2, r3
+ lsl r3, r3, #1
+
+ // Subtract the aligned width from mid_stride
+ add r11, r5, #7
+ bic r11, r11, #7
+ sub r10, r10, r11, lsl #1
+
+ // Subtract the number of pixels read from the source stride
+ add r11, r11, #8
+ sub r3, r3, r11, lsl #1
+
+ // Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 2f
+ // LR_HAVE_LEFT
+ cmp r1, #0
+ bne 0f
+ // left == NULL
+ sub r2, r2, #6
+ sub lr, lr, #6
+ b 1f
+0: // LR_HAVE_LEFT, left != NULL
+2: // !LR_HAVE_LEFT, increase the stride.
+ // For this case we don't read the left 3 pixels from the src pointer,
+ // but shift it as if we had done that.
+ add r3, r3, #6
+
+
+1: // Loop vertically
+ vld1.16 {q2, q3}, [r2]!
+ vld1.16 {q4, q5}, [lr]!
+
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 0f
+ cmp r1, #0
+ beq 2f
+ // LR_HAVE_LEFT, left != NULL
+ vld1.16 {d3}, [r1]!
+ // Move r2/lr back to account for the last 3 pixels we loaded earlier,
+ // which we'll shift out.
+ sub r2, r2, #6
+ sub lr, lr, #6
+ vld1.16 {d13}, [r1]!
+ vext.8 q3, q2, q3, #10
+ vext.8 q2, q1, q2, #10
+ vext.8 q5, q4, q5, #10
+ vext.8 q4, q6, q4, #10
+ b 2f
+0:
+ // !LR_HAVE_LEFT, fill q1 with the leftmost pixel
+ // and shift q2/q3 to have 3x the first pixel at the front.
+ vdup.16 q1, d4[0]
+ vdup.16 q6, d8[0]
+ // Move r2 back to account for the last 3 pixels we loaded before,
+ // which we shifted out.
+ sub r2, r2, #6
+ sub lr, lr, #6
+ vext.8 q3, q2, q3, #10
+ vext.8 q2, q1, q2, #10
+ vext.8 q5, q4, q5, #10
+ vext.8 q4, q6, q4, #10
+
+2:
+
+ tst r7, #2 // LR_HAVE_RIGHT
+ bne 4f
+ // If we'll need to pad the right edge, load that pixel to pad with
+ // here since we can find it pretty easily from here.
+ sub r9, r5, #14
+ lsl r9, r9, #1
+ ldrh r11, [r2, r9]
+ ldrh r9, [lr, r9]
+ // Fill q11/q12 with the right padding pixel
+ vdup.16 q11, r11
+ vdup.16 q12, r9
+3: // !LR_HAVE_RIGHT
+
+ // Check whether we need to pad the right edge
+ cmp r5, #11
+ bge 4f // If w >= 11, all used input pixels are valid
+
+ // 1 <= w < 11, w+3 pixels valid in q2-q3. For w=9 or w=10,
+ // this ends up called again; it's not strictly needed in those
+ // cases (we pad enough here), but keeping the code as simple as possible.
+
+ // Insert padding in q2/3.h[w+3] onwards; fuse the +3 (*2) into the
+ // buffer pointer.
+ movrel_local r4, right_ext_mask, -6
+ sub r4, r4, r5, lsl #1
+ vld1.8 {q9, q10}, [r4]
+
+ vbit q2, q11, q9
+ vbit q3, q11, q10
+ vbit q4, q12, q9
+ vbit q5, q12, q10
+
+4: // Loop horizontally
+ vext.8 q7, q2, q3, #4
+ vext.8 q8, q2, q3, #8
+ vext.8 q6, q2, q3, #2
+ vext.8 q9, q2, q3, #10
+ vadd.i16 q8, q8, q7
+ vadd.i16 q9, q9, q6
+ vext.8 q6, q2, q3, #12
+ vext.8 q7, q2, q3, #6
+ vadd.i16 q2, q2, q6
+ vmull.s16 q6, d14, d0[3]
+ vmlal.s16 q6, d16, d1[0]
+ vmlal.s16 q6, d18, d1[1]
+ vmlal.s16 q6, d4, d1[2]
+ vmull.s16 q7, d15, d0[3]
+ vmlal.s16 q7, d17, d1[0]
+ vmlal.s16 q7, d19, d1[1]
+ vmlal.s16 q7, d5, d1[2]
+
+ vext.8 q8, q4, q5, #4
+ vext.8 q10, q4, q5, #8
+ vext.8 q9, q4, q5, #2
+ vext.8 q2, q4, q5, #10
+ vadd.i16 q10, q10, q8
+ vadd.i16 q2, q2, q9
+ vext.8 q8, q4, q5, #12
+ vext.8 q9, q4, q5, #6
+ vadd.i16 q4, q4, q8
+ vmull.s16 q8, d18, d0[3]
+ vmlal.s16 q8, d20, d1[0]
+ vmlal.s16 q8, d4, d1[1]
+ vmlal.s16 q8, d8, d1[2]
+ vmull.s16 q9, d19, d0[3]
+ vmlal.s16 q9, d21, d1[0]
+ vmlal.s16 q9, d5, d1[1]
+ vmlal.s16 q9, d9, d1[2]
+
+ vmvn.i16 q10, #0x8000 // 0x7fff = (1 << 15) - 1
+ vadd.i32 q6, q6, q14
+ vadd.i32 q7, q7, q14
+ vadd.i32 q8, q8, q14
+ vadd.i32 q9, q9, q14
+ vrshl.s32 q6, q6, q13
+ vrshl.s32 q7, q7, q13
+ vrshl.s32 q8, q8, q13
+ vrshl.s32 q9, q9, q13
+ vqmovun.s32 d12, q6
+ vqmovun.s32 d13, q7
+ vqmovun.s32 d14, q8
+ vqmovun.s32 d15, q9
+ vmin.u16 q6, q6, q10
+ vmin.u16 q7, q7, q10
+ vsub.i16 q6, q6, q15
+ vsub.i16 q7, q7, q15
+ subs r5, r5, #8
+ vst1.16 {q6}, [r0, :128]!
+ vst1.16 {q7}, [r12, :128]!
+
+ ble 9f
+ tst r7, #2 // LR_HAVE_RIGHT
+ vmov q2, q3
+ vmov q4, q5
+ vld1.16 {q3}, [r2]!
+ vld1.16 {q5}, [lr]!
+ bne 4b // If we don't need to pad, just keep filtering.
+ b 3b // If we need to pad, check how many pixels we have left.
+
+9:
+ subs r6, r6, #2
+ ble 0f
+ // Jump to the next row and loop horizontally
+ add r0, r0, r10
+ add r12, r12, r10
+ add r2, r2, r3
+ add lr, lr, r3
+ mov r5, r8
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+// void dav1d_wiener_filter_v_16bpc_neon(pixel *dst, ptrdiff_t stride,
+// const int16_t *mid, int w, int h,
+// const int16_t fv[7], enum LrEdgeFlags edges,
+// ptrdiff_t mid_stride, const int bitdepth_max);
+function wiener_filter_v_16bpc_neon, export=1
+ push {r4-r7,lr}
+ vpush {q4-q5}
+ ldrd r4, r5, [sp, #52]
+ ldrd r6, r7, [sp, #60]
+ ldr lr, [sp, #68] // bitdepth_max
+ vld1.16 {q0}, [r5, :128]
+ vdup.16 q5, lr
+ clz lr, lr
+ sub lr, lr, #11 // round_bits_v
+ vdup.32 q4, lr
+ mov lr, r4
+ vneg.s32 q4, q4 // -round_bits_v
+
+ // Calculate the number of rows to move back when looping vertically
+ mov r12, r4
+ tst r6, #4 // LR_HAVE_TOP
+ beq 0f
+ sub r2, r2, r7, lsl #1
+ add r12, r12, #2
+0:
+ tst r6, #8 // LR_HAVE_BOTTOM
+ beq 1f
+ add r12, r12, #2
+
+1: // Start of horizontal loop; start one vertical filter slice.
+ // Load rows into q8-q11 and pad properly.
+ tst r6, #4 // LR_HAVE_TOP
+ vld1.16 {q8}, [r2, :128], r7
+ beq 2f
+ // LR_HAVE_TOP
+ vld1.16 {q10}, [r2, :128], r7
+ vmov q9, q8
+ vld1.16 {q11}, [r2, :128], r7
+ b 3f
+2: // !LR_HAVE_TOP
+ vmov q9, q8
+ vmov q10, q8
+ vmov q11, q8
+
+3:
+ cmp r4, #4
+ blt 5f
+ // Start filtering normally; fill in q12-q14 with unique rows.
+ vld1.16 {q12}, [r2, :128], r7
+ vld1.16 {q13}, [r2, :128], r7
+ vld1.16 {q14}, [r2, :128], r7
+
+4:
+.macro filter compare
+ subs r4, r4, #1
+ // Interleaving the mul/mla chains actually hurts performance
+ // significantly on Cortex A53, thus keeping mul/mla tightly
+ // chained like this.
+ vmull.s16 q2, d16, d0[0]
+ vmlal.s16 q2, d18, d0[1]
+ vmlal.s16 q2, d20, d0[2]
+ vmlal.s16 q2, d22, d0[3]
+ vmlal.s16 q2, d24, d1[0]
+ vmlal.s16 q2, d26, d1[1]
+ vmlal.s16 q2, d28, d1[2]
+ vmull.s16 q3, d17, d0[0]
+ vmlal.s16 q3, d19, d0[1]
+ vmlal.s16 q3, d21, d0[2]
+ vmlal.s16 q3, d23, d0[3]
+ vmlal.s16 q3, d25, d1[0]
+ vmlal.s16 q3, d27, d1[1]
+ vmlal.s16 q3, d29, d1[2]
+ vrshl.s32 q2, q2, q4 // round_bits_v
+ vrshl.s32 q3, q3, q4
+ vqmovun.s32 d4, q2
+ vqmovun.s32 d5, q3
+ vmin.u16 q2, q2, q5 // bitdepth_max
+ vst1.16 {q2}, [r0, :128], r1
+.if \compare
+ cmp r4, #4
+.else
+ ble 9f
+.endif
+ vmov q8, q9
+ vmov q9, q10
+ vmov q10, q11
+ vmov q11, q12
+ vmov q12, q13
+ vmov q13, q14
+.endm
+ filter 1
+ blt 7f
+ vld1.16 {q14}, [r2, :128], r7
+ b 4b
+
+5: // Less than 4 rows in total; not all of q12-q13 are filled yet.
+ tst r6, #8 // LR_HAVE_BOTTOM
+ beq 6f
+ // LR_HAVE_BOTTOM
+ cmp r4, #2
+ // We load at least 2 rows in all cases.
+ vld1.16 {q12}, [r2, :128], r7
+ vld1.16 {q13}, [r2, :128], r7
+ bgt 53f // 3 rows in total
+ beq 52f // 2 rows in total
+51: // 1 row in total, q11 already loaded, load edge into q12-q14.
+ vmov q13, q12
+ b 8f
+52: // 2 rows in total, q11 already loaded, load q12 with content data
+ // and 2 rows of edge.
+ vld1.16 {q14}, [r2, :128], r7
+ vmov q15, q14
+ b 8f
+53:
+ // 3 rows in total, q11 already loaded, load q12 and q13 with content
+ // and 2 rows of edge.
+ vld1.16 {q14}, [r2, :128], r7
+ vld1.16 {q15}, [r2, :128], r7
+ vmov q1, q15
+ b 8f
+
+6:
+ // !LR_HAVE_BOTTOM
+ cmp r4, #2
+ bgt 63f // 3 rows in total
+ beq 62f // 2 rows in total
+61: // 1 row in total, q11 already loaded, pad that into q12-q14.
+ vmov q12, q11
+ vmov q13, q11
+ vmov q14, q11
+ b 8f
+62: // 2 rows in total, q11 already loaded, load q12 and pad that into q12-q15.
+ vld1.16 {q12}, [r2, :128], r7
+ vmov q13, q12
+ vmov q14, q12
+ vmov q15, q12
+ b 8f
+63:
+ // 3 rows in total, q11 already loaded, load q12 and q13 and pad q13 into q14-q15,q1.
+ vld1.16 {q12}, [r2, :128], r7
+ vld1.16 {q13}, [r2, :128], r7
+ vmov q14, q13
+ vmov q15, q13
+ vmov q1, q13
+ b 8f
+
+7:
+ // All registers up to q13 are filled already, 3 valid rows left.
+ // < 4 valid rows left; fill in padding and filter the last
+ // few rows.
+ tst r6, #8 // LR_HAVE_BOTTOM
+ beq 71f
+ // LR_HAVE_BOTTOM; load 2 rows of edge.
+ vld1.16 {q14}, [r2, :128], r7
+ vld1.16 {q15}, [r2, :128], r7
+ vmov q1, q15
+ b 8f
+71:
+ // !LR_HAVE_BOTTOM, pad 3 rows
+ vmov q14, q13
+ vmov q15, q13
+ vmov q1, q13
+
+8: // At this point, all registers up to q14-q15,q1 are loaded with
+ // edge/padding (depending on how many rows are left).
+ filter 0 // This branches to 9f when done
+ vmov q14, q15
+ vmov q15, q1
+ b 8b
+
+9: // End of one vertical slice.
+ subs r3, r3, #8
+ ble 0f
+ // Move pointers back up to the top and loop horizontally.
+ mls r0, r1, lr, r0
+ mls r2, r7, r12, r2
+ add r0, r0, #16
+ add r2, r2, #16
+ mov r4, lr
+ b 1b
+
+0:
+ vpop {q4-q5}
+ pop {r4-r7,pc}
+.purgem filter
+endfunc
+
+#define SUM_STRIDE (384+16)
+
+#include "looprestoration_tmpl.S"
+
+// void dav1d_sgr_box3_h_16bpc_neon(int32_t *sumsq, int16_t *sum,
+// const pixel (*left)[4],
+// const pixel *src, const ptrdiff_t stride,
+// const int w, const int h,
+// const enum LrEdgeFlags edges);
+function sgr_box3_h_16bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldrd r6, r7, [sp, #108]
+ add r5, r5, #2 // w += 2
+
+ // Set up pointers for reading/writing alternate rows
+ add r10, r0, #(4*SUM_STRIDE) // sumsq
+ add r11, r1, #(2*SUM_STRIDE) // sum
+ add r12, r3, r4 // src
+ lsl r4, r4, #1
+ mov r9, #(2*2*SUM_STRIDE) // double sum stride
+
+ // Subtract the aligned width from the output stride.
+ add lr, r5, #7
+ bic lr, lr, #7
+ sub r9, r9, lr, lsl #1
+
+ // Store the width for the vertical loop
+ mov r8, r5
+
+ // Subtract the number of pixels read from the input from the stride
+ add lr, lr, #8
+ sub r4, r4, lr, lsl #1
+
+ // Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 2f
+ // LR_HAVE_LEFT
+ cmp r2, #0
+ bne 0f
+ // left == NULL
+ sub r3, r3, #4
+ sub r12, r12, #4
+ b 1f
+0: // LR_HAVE_LEFT, left != NULL
+2: // !LR_HAVE_LEFT, increase the stride.
+ // For this case we don't read the left 2 pixels from the src pointer,
+ // but shift it as if we had done that.
+ add r4, r4, #4
+
+
+1: // Loop vertically
+ vld1.16 {q0, q1}, [r3]!
+ vld1.16 {q4, q5}, [r12]!
+
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 0f
+ cmp r2, #0
+ beq 2f
+ // LR_HAVE_LEFT, left != NULL
+ vld1.16 {d5}, [r2]!
+ // Move r3/r12 back to account for the last 2 pixels we loaded earlier,
+ // which we'll shift out.
+ sub r3, r3, #4
+ sub r12, r12, #4
+ vld1.16 {d13}, [r2]!
+ vext.8 q1, q0, q1, #12
+ vext.8 q0, q2, q0, #12
+ vext.8 q5, q4, q5, #12
+ vext.8 q4, q6, q4, #12
+ b 2f
+0:
+ // !LR_HAVE_LEFT, fill q2 with the leftmost pixel
+ // and shift q0 to have 2x the first byte at the front.
+ vdup.16 q2, d0[0]
+ vdup.16 q6, d8[0]
+ // Move r3 back to account for the last 2 pixels we loaded before,
+ // which we shifted out.
+ sub r3, r3, #4
+ sub r12, r12, #4
+ vext.8 q1, q0, q1, #12
+ vext.8 q0, q2, q0, #12
+ vext.8 q5, q4, q5, #12
+ vext.8 q4, q6, q4, #12
+
+2:
+ tst r7, #2 // LR_HAVE_RIGHT
+ bne 4f
+ // If we'll need to pad the right edge, load that pixel to pad with
+ // here since we can find it pretty easily from here.
+ sub lr, r5, #(2 + 16 - 2 + 1)
+ lsl lr, lr, #1
+ ldrh r11, [r3, lr]
+ ldrh lr, [r12, lr]
+ // Fill q14/q15 with the right padding pixel
+ vdup.16 q14, r11
+ vdup.16 q15, lr
+ // Restore r11 after using it for a temporary value
+ add r11, r1, #(2*SUM_STRIDE)
+3: // !LR_HAVE_RIGHT
+
+ // Check whether we need to pad the right edge
+ cmp r5, #10
+ bge 4f // If w >= 10, all used input pixels are valid
+
+ // 1 <= w < 10, w pixels valid in q0-q1. For w=9, this ends up called
+ // again; it's not strictly needed in those cases (we pad enough here),
+ // but keeping the code as simple as possible.
+
+ // Insert padding in q0/1.h[w] onwards
+ movrel_local lr, right_ext_mask
+ sub lr, lr, r5, lsl #1
+ vld1.8 {q12, q13}, [lr]
+
+ vbit q0, q14, q12
+ vbit q1, q14, q13
+ vbit q4, q15, q12
+ vbit q5, q15, q13
+
+4: // Loop horizontally
+ vext.8 q8, q0, q1, #2
+ vext.8 q10, q4, q5, #2
+ vext.8 q9, q0, q1, #4
+ vext.8 q11, q4, q5, #4
+ vadd.i16 q2, q0, q8
+ vadd.i16 q3, q4, q10
+ vadd.i16 q2, q2, q9
+ vadd.i16 q3, q3, q11
+
+ vmull.u16 q6, d0, d0
+ vmlal.u16 q6, d16, d16
+ vmlal.u16 q6, d18, d18
+ vmull.u16 q12, d8, d8
+ vmlal.u16 q12, d20, d20
+ vmlal.u16 q12, d22, d22
+ vmull.u16 q7, d1, d1
+ vmlal.u16 q7, d17, d17
+ vmlal.u16 q7, d19, d19
+ vmull.u16 q13, d9, d9
+ vmlal.u16 q13, d21, d21
+ vmlal.u16 q13, d23, d23
+ subs r5, r5, #8
+ vst1.16 {q2}, [r1, :128]!
+ vst1.16 {q3}, [r11, :128]!
+ vst1.32 {q6, q7}, [r0, :128]!
+ vst1.32 {q12, q13}, [r10, :128]!
+
+ ble 9f
+ tst r7, #2 // LR_HAVE_RIGHT
+ vmov q0, q1
+ vmov q4, q5
+ vld1.16 {q1}, [r3]!
+ vld1.16 {q5}, [r12]!
+
+ bne 4b // If we don't need to pad, just keep summing.
+ b 3b // If we need to pad, check how many pixels we have left.
+
+9:
+ subs r6, r6, #2
+ ble 0f
+ // Jump to the next row and loop horizontally
+ add r0, r0, r9, lsl #1
+ add r10, r10, r9, lsl #1
+ add r1, r1, r9
+ add r11, r11, r9
+ add r3, r3, r4
+ add r12, r12, r4
+ mov r5, r8
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+// void dav1d_sgr_box5_h_16bpc_neon(int32_t *sumsq, int16_t *sum,
+// const pixel (*left)[4],
+// const pixel *src, const ptrdiff_t stride,
+// const int w, const int h,
+// const enum LrEdgeFlags edges);
+function sgr_box5_h_16bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldrd r6, r7, [sp, #108]
+ add r5, r5, #2 // w += 2
+
+ // Set up pointers for reading/writing alternate rows
+ add r10, r0, #(4*SUM_STRIDE) // sumsq
+ add r11, r1, #(2*SUM_STRIDE) // sum
+ add r12, r3, r4 // src
+ lsl r4, r4, #1
+ mov r9, #(2*2*SUM_STRIDE) // double sum stride
+
+ // Subtract the aligned width from the output stride.
+ add lr, r5, #7
+ bic lr, lr, #7
+ sub r9, r9, lr, lsl #1
+ add lr, lr, #8
+ sub r4, r4, lr, lsl #1
+
+ // Store the width for the vertical loop
+ mov r8, r5
+
+ // Set up the src pointers to include the left edge, for LR_HAVE_LEFT, left == NULL
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 2f
+ // LR_HAVE_LEFT
+ cmp r2, #0
+ bne 0f
+ // left == NULL
+ sub r3, r3, #6
+ sub r12, r12, #6
+ b 1f
+0: // LR_HAVE_LEFT, left != NULL
+2: // !LR_HAVE_LEFT, increase the stride.
+ // For this case we don't read the left 3 pixels from the src pointer,
+ // but shift it as if we had done that.
+ add r4, r4, #6
+
+1: // Loop vertically
+ vld1.16 {q0, q1}, [r3]!
+ vld1.16 {q4, q5}, [r12]!
+
+ tst r7, #1 // LR_HAVE_LEFT
+ beq 0f
+ cmp r2, #0
+ beq 2f
+ // LR_HAVE_LEFT, left != NULL
+ vld1.16 {d5}, [r2]!
+ // Move r3/r12 back to account for the last 3 pixels we loaded earlier,
+ // which we'll shift out.
+ sub r3, r3, #6
+ sub r12, r12, #6
+ vld1.16 {d13}, [r2]!
+ vext.8 q1, q0, q1, #10
+ vext.8 q0, q2, q0, #10
+ vext.8 q5, q4, q5, #10
+ vext.8 q4, q6, q4, #10
+ b 2f
+0:
+ // !LR_HAVE_LEFT, fill q2 with the leftmost pixel
+ // and shift q0 to have 3x the first pixel at the front.
+ vdup.16 q2, d0[0]
+ vdup.16 q6, d8[0]
+ // Move r3 back to account for the last 3 pixels we loaded before,
+ // which we shifted out.
+ sub r3, r3, #6
+ sub r12, r12, #6
+ vext.8 q1, q0, q1, #10
+ vext.8 q0, q2, q0, #10
+ vext.8 q5, q4, q5, #10
+ vext.8 q4, q6, q4, #10
+
+2:
+ tst r7, #2 // LR_HAVE_RIGHT
+ bne 4f
+ // If we'll need to pad the right edge, load that pixel to pad with
+ // here since we can find it pretty easily from here.
+ sub lr, r5, #(2 + 16 - 3 + 1)
+ lsl lr, lr, #1
+ ldrh r11, [r3, lr]
+ ldrh lr, [r12, lr]
+ // Fill q14/q15 with the right padding pixel
+ vdup.16 q14, r11
+ vdup.16 q15, lr
+ // Restore r11 after using it for a temporary value
+ add r11, r1, #(2*SUM_STRIDE)
+3: // !LR_HAVE_RIGHT
+
+ // Check whether we need to pad the right edge
+ cmp r5, #11
+ bge 4f // If w >= 11, all used input pixels are valid
+
+ // 1 <= w < 11, w+1 pixels valid in q0-q1. For w=9 or w=10,
+ // this ends up called again; it's not strictly needed in those
+ // cases (we pad enough here), but keeping the code as simple as possible.
+
+ // Insert padding in q0/1.h[w+1] onwards; fuse the +1 into the
+ // buffer pointer.
+ movrel_local lr, right_ext_mask, -2
+ sub lr, lr, r5, lsl #1
+ vld1.8 {q12, q13}, [lr]
+
+ vbit q0, q14, q12
+ vbit q1, q14, q13
+ vbit q4, q15, q12
+ vbit q5, q15, q13
+
+4: // Loop horizontally
+ vext.8 q8, q0, q1, #2
+ vext.8 q10, q4, q5, #2
+ vext.8 q9, q0, q1, #4
+ vext.8 q11, q4, q5, #4
+ vadd.i16 q2, q0, q8
+ vadd.i16 q3, q4, q10
+ vadd.i16 q2, q2, q9
+ vadd.i16 q3, q3, q11
+
+ vmull.u16 q6, d0, d0
+ vmlal.u16 q6, d16, d16
+ vmlal.u16 q6, d18, d18
+ vmull.u16 q12, d8, d8
+ vmlal.u16 q12, d20, d20
+ vmlal.u16 q12, d22, d22
+ vmull.u16 q7, d1, d1
+ vmlal.u16 q7, d17, d17
+ vmlal.u16 q7, d19, d19
+ vmull.u16 q13, d9, d9
+ vmlal.u16 q13, d21, d21
+ vmlal.u16 q13, d23, d23
+
+ vext.8 q8, q0, q1, #6
+ vext.8 q10, q4, q5, #6
+ vext.8 q9, q0, q1, #8
+ vext.8 q11, q4, q5, #8
+ vadd.i16 q2, q2, q8
+ vadd.i16 q3, q3, q10
+ vadd.i16 q2, q2, q9
+ vadd.i16 q3, q3, q11
+
+ vmlal.u16 q6, d16, d16
+ vmlal.u16 q6, d1, d1
+ vmlal.u16 q12, d20, d20
+ vmlal.u16 q12, d9, d9
+ vmlal.u16 q7, d17, d17
+ vmlal.u16 q7, d19, d19
+ vmlal.u16 q13, d21, d21
+ vmlal.u16 q13, d23, d23
+
+ subs r5, r5, #8
+ vst1.16 {q2}, [r1, :128]!
+ vst1.16 {q3}, [r11, :128]!
+ vst1.32 {q6, q7}, [r0, :128]!
+ vst1.32 {q12, q13}, [r10, :128]!
+
+ ble 9f
+ tst r7, #2 // LR_HAVE_RIGHT
+ vmov q0, q1
+ vmov q4, q5
+ vld1.16 {q1}, [r3]!
+ vld1.16 {q5}, [r12]!
+ bne 4b // If we don't need to pad, just keep summing.
+ b 3b // If we need to pad, check how many pixels we have left.
+
+9:
+ subs r6, r6, #2
+ ble 0f
+ // Jump to the next row and loop horizontally
+ add r0, r0, r9, lsl #1
+ add r10, r10, r9, lsl #1
+ add r1, r1, r9
+ add r11, r11, r9
+ add r3, r3, r4
+ add r12, r12, r4
+ mov r5, r8
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+sgr_funcs 16
diff --git a/third_party/dav1d/src/arm/32/looprestoration_common.S b/third_party/dav1d/src/arm/32/looprestoration_common.S
new file mode 100644
index 0000000000..b080bb5115
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/looprestoration_common.S
@@ -0,0 +1,453 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2019, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+#define SUM_STRIDE (384+16)
+
+// void dav1d_sgr_box3_v_neon(int32_t *sumsq, int16_t *sum,
+// const int w, const int h,
+// const enum LrEdgeFlags edges);
+function sgr_box3_v_neon, export=1
+ push {r4-r9,lr}
+ ldr r4, [sp, #28]
+ add r12, r3, #2 // Number of output rows to move back
+ mov lr, r3 // Number of input rows to move back
+ add r2, r2, #2 // Actual summed width
+ mov r7, #(4*SUM_STRIDE) // sumsq stride
+ mov r8, #(2*SUM_STRIDE) // sum stride
+ sub r0, r0, #(4*SUM_STRIDE) // sumsq -= stride
+ sub r1, r1, #(2*SUM_STRIDE) // sum -= stride
+
+ tst r4, #4 // LR_HAVE_TOP
+ beq 0f
+ // If have top, read from row -2.
+ sub r5, r0, #(4*SUM_STRIDE)
+ sub r6, r1, #(2*SUM_STRIDE)
+ add lr, lr, #2
+ b 1f
+0:
+ // !LR_HAVE_TOP
+ // If we don't have top, read from row 0 even if
+ // we start writing to row -1.
+ add r5, r0, #(4*SUM_STRIDE)
+ add r6, r1, #(2*SUM_STRIDE)
+1:
+
+ tst r4, #8 // LR_HAVE_BOTTOM
+ beq 1f
+ // LR_HAVE_BOTTOM
+ add r3, r3, #2 // Sum all h+2 lines with the main loop
+ add lr, lr, #2
+1:
+ mov r9, r3 // Backup of h for next loops
+
+1:
+ // Start of horizontal loop; start one vertical filter slice.
+ // Start loading rows into q8-q13 and q0-q2 taking top
+ // padding into consideration.
+ tst r4, #4 // LR_HAVE_TOP
+ vld1.32 {q8, q9}, [r5, :128], r7
+ vld1.16 {q0}, [r6, :128], r8
+ beq 2f
+ // LR_HAVE_TOP
+ vld1.32 {q10, q11}, [r5, :128], r7
+ vld1.16 {q1}, [r6, :128], r8
+ vld1.32 {q12, q13}, [r5, :128], r7
+ vld1.16 {q2}, [r6, :128], r8
+ b 3f
+2: // !LR_HAVE_TOP
+ vmov q10, q8
+ vmov q11, q9
+ vmov q1, q0
+ vmov q12, q8
+ vmov q13, q9
+ vmov q2, q0
+
+3:
+ subs r3, r3, #1
+.macro add3
+ vadd.i32 q8, q8, q10
+ vadd.i32 q9, q9, q11
+ vadd.i16 q0, q0, q1
+ vadd.i32 q8, q8, q12
+ vadd.i32 q9, q9, q13
+ vadd.i16 q0, q0, q2
+ vst1.32 {q8, q9}, [r0, :128], r7
+ vst1.16 {q0}, [r1, :128], r8
+.endm
+ add3
+ vmov q8, q10
+ vmov q9, q11
+ vmov q0, q1
+ vmov q10, q12
+ vmov q11, q13
+ vmov q1, q2
+ ble 4f
+ vld1.32 {q12, q13}, [r5, :128], r7
+ vld1.16 {q2}, [r6, :128], r8
+ b 3b
+
+4:
+ tst r4, #8 // LR_HAVE_BOTTOM
+ bne 5f
+ // !LR_HAVE_BOTTOM
+ // Produce two more rows, extending the already loaded rows.
+ add3
+ vmov q8, q10
+ vmov q9, q11
+ vmov q0, q1
+ add3
+
+5: // End of one vertical slice.
+ subs r2, r2, #8
+ ble 0f
+ // Move pointers back up to the top and loop horizontally.
+ // Input pointers
+ mls r5, r7, lr, r5
+ mls r6, r8, lr, r6
+ // Output pointers
+ mls r0, r7, r12, r0
+ mls r1, r8, r12, r1
+ add r0, r0, #32
+ add r1, r1, #16
+ add r5, r5, #32
+ add r6, r6, #16
+ mov r3, r9
+ b 1b
+
+0:
+ pop {r4-r9,pc}
+.purgem add3
+endfunc
+
+// void dav1d_sgr_box5_v_neon(int32_t *sumsq, int16_t *sum,
+// const int w, const int h,
+// const enum LrEdgeFlags edges);
+function sgr_box5_v_neon, export=1
+ push {r4-r9,lr}
+ vpush {q5-q7}
+ ldr r4, [sp, #76]
+ add r12, r3, #2 // Number of output rows to move back
+ mov lr, r3 // Number of input rows to move back
+ add r2, r2, #8 // Actual summed width
+ mov r7, #(4*SUM_STRIDE) // sumsq stride
+ mov r8, #(2*SUM_STRIDE) // sum stride
+ sub r0, r0, #(4*SUM_STRIDE) // sumsq -= stride
+ sub r1, r1, #(2*SUM_STRIDE) // sum -= stride
+
+ tst r4, #4 // LR_HAVE_TOP
+ beq 0f
+ // If have top, read from row -2.
+ sub r5, r0, #(4*SUM_STRIDE)
+ sub r6, r1, #(2*SUM_STRIDE)
+ add lr, lr, #2
+ b 1f
+0:
+ // !LR_HAVE_TOP
+ // If we don't have top, read from row 0 even if
+ // we start writing to row -1.
+ add r5, r0, #(4*SUM_STRIDE)
+ add r6, r1, #(2*SUM_STRIDE)
+1:
+
+ tst r4, #8 // LR_HAVE_BOTTOM
+ beq 0f
+ // LR_HAVE_BOTTOM
+ add r3, r3, #2 // Handle h+2 lines with the main loop
+ add lr, lr, #2
+ b 1f
+0:
+ // !LR_HAVE_BOTTOM
+ sub r3, r3, #1 // Handle h-1 lines with the main loop
+1:
+ mov r9, r3 // Backup of h for next loops
+
+1:
+ // Start of horizontal loop; start one vertical filter slice.
+ // Start loading rows into q6-q15 and q0-q3,q5 taking top
+ // padding into consideration.
+ tst r4, #4 // LR_HAVE_TOP
+ vld1.32 {q6, q7}, [r5, :128], r7
+ vld1.16 {q0}, [r6, :128], r8
+ beq 2f
+ // LR_HAVE_TOP
+ vld1.32 {q10, q11}, [r5, :128], r7
+ vld1.16 {q2}, [r6, :128], r8
+ vmov q8, q6
+ vmov q9, q7
+ vmov q1, q0
+ vld1.32 {q12, q13}, [r5, :128], r7
+ vld1.16 {q3}, [r6, :128], r8
+ b 3f
+2: // !LR_HAVE_TOP
+ vmov q8, q6
+ vmov q9, q7
+ vmov q1, q0
+ vmov q10, q6
+ vmov q11, q7
+ vmov q2, q0
+ vmov q12, q6
+ vmov q13, q7
+ vmov q3, q0
+
+3:
+ cmp r3, #0
+ beq 4f
+ vld1.32 {q14, q15}, [r5, :128], r7
+ vld1.16 {q5}, [r6, :128], r8
+
+3:
+ // Start of vertical loop
+ subs r3, r3, #2
+.macro add5
+ vadd.i32 q6, q6, q8
+ vadd.i32 q7, q7, q9
+ vadd.i16 q0, q0, q1
+ vadd.i32 q6, q6, q10
+ vadd.i32 q7, q7, q11
+ vadd.i16 q0, q0, q2
+ vadd.i32 q6, q6, q12
+ vadd.i32 q7, q7, q13
+ vadd.i16 q0, q0, q3
+ vadd.i32 q6, q6, q14
+ vadd.i32 q7, q7, q15
+ vadd.i16 q0, q0, q5
+ vst1.32 {q6, q7}, [r0, :128], r7
+ vst1.16 {q0}, [r1, :128], r8
+.endm
+ add5
+.macro shift2
+ vmov q6, q10
+ vmov q7, q11
+ vmov q0, q2
+ vmov q8, q12
+ vmov q9, q13
+ vmov q1, q3
+ vmov q10, q14
+ vmov q11, q15
+ vmov q2, q5
+.endm
+ shift2
+ add r0, r0, r7
+ add r1, r1, r8
+ ble 5f
+ vld1.32 {q12, q13}, [r5, :128], r7
+ vld1.16 {q3}, [r6, :128], r8
+ vld1.32 {q14, q15}, [r5, :128], r7
+ vld1.16 {q5}, [r6, :128], r8
+ b 3b
+
+4:
+ // h == 1, !LR_HAVE_BOTTOM.
+ // Pad the last row with the only content row, and add.
+ vmov q14, q12
+ vmov q15, q13
+ vmov q5, q3
+ add5
+ shift2
+ add r0, r0, r7
+ add r1, r1, r8
+ add5
+ b 6f
+
+5:
+ tst r4, #8 // LR_HAVE_BOTTOM
+ bne 6f
+ // !LR_HAVE_BOTTOM
+ cmp r3, #0
+ bne 5f
+ // The intended three edge rows left; output the one at h-2 and
+ // the past edge one at h.
+ vld1.32 {q12, q13}, [r5, :128], r7
+ vld1.16 {q3}, [r6, :128], r8
+ // Pad the past-edge row from the last content row.
+ vmov q14, q12
+ vmov q15, q13
+ vmov q5, q3
+ add5
+ shift2
+ add r0, r0, r7
+ add r1, r1, r8
+ // The last two rows are already padded properly here.
+ add5
+ b 6f
+
+5:
+ // r3 == -1, two rows left, output one.
+ // Pad the last two rows from the mid one.
+ vmov q12, q10
+ vmov q13, q11
+ vmov q3, q2
+ vmov q14, q10
+ vmov q15, q11
+ vmov q5, q2
+ add5
+ add r0, r0, r7
+ add r1, r1, r8
+ b 6f
+
+6: // End of one vertical slice.
+ subs r2, r2, #8
+ ble 0f
+ // Move pointers back up to the top and loop horizontally.
+ // Input pointers
+ mls r5, r7, lr, r5
+ mls r6, r8, lr, r6
+ // Output pointers
+ mls r0, r7, r12, r0
+ mls r1, r8, r12, r1
+ add r0, r0, #32
+ add r1, r1, #16
+ add r5, r5, #32
+ add r6, r6, #16
+ mov r3, r9
+ b 1b
+
+0:
+ vpop {q5-q7}
+ pop {r4-r9,pc}
+.purgem add5
+endfunc
+
+// void dav1d_sgr_calc_ab1_neon(int32_t *a, int16_t *b,
+// const int w, const int h, const int strength,
+// const int bitdepth_max);
+// void dav1d_sgr_calc_ab2_neon(int32_t *a, int16_t *b,
+// const int w, const int h, const int strength,
+// const int bitdepth_max);
+function sgr_calc_ab1_neon, export=1
+ push {r4-r7,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #84]
+ add r3, r3, #2 // h += 2
+ clz r6, r5
+ vmov.i32 q15, #9 // n
+ movw r5, #455
+ mov lr, #SUM_STRIDE
+ b sgr_calc_ab_neon
+endfunc
+
+function sgr_calc_ab2_neon, export=1
+ push {r4-r7,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #84]
+ add r3, r3, #3 // h += 3
+ clz r6, r5
+ asr r3, r3, #1 // h /= 2
+ vmov.i32 q15, #25 // n
+ mov r5, #164
+ mov lr, #(2*SUM_STRIDE)
+endfunc
+
+function sgr_calc_ab_neon
+ movrel r12, X(sgr_x_by_x)
+ sub r6, r6, #24 // -bitdepth_min_8
+ vld1.8 {q8, q9}, [r12, :128]!
+ add r7, r6, r6 // -2*bitdepth_min_8
+ vmov.i8 q11, #5
+ vmov.i8 d10, #55 // idx of last 5
+ vld1.8 {q10}, [r12, :128]
+ vmov.i8 d11, #72 // idx of last 4
+ vmov.i8 d12, #101 // idx of last 3
+ vmov.i8 d13, #169 // idx of last 2
+ vmov.i8 d14, #254 // idx of last 1
+ vmov.i8 d15, #32 // elements consumed in first vtbl
+ add r2, r2, #2 // w += 2
+ add r12, r2, #7
+ bic r12, r12, #7 // aligned w
+ sub r12, lr, r12 // increment between rows
+ vdup.32 q12, r4
+ sub r0, r0, #(4*(SUM_STRIDE))
+ sub r1, r1, #(2*(SUM_STRIDE))
+ mov r4, r2 // backup of w
+ vsub.i8 q8, q8, q11
+ vsub.i8 q9, q9, q11
+ vsub.i8 q10, q10, q11
+1:
+ vld1.32 {q0, q1}, [r0, :128] // a
+ vld1.16 {q2}, [r1, :128] // b
+ vdup.32 q13, r7 // -2*bitdepth_min_8
+ vdup.16 q14, r6 // -bitdepth_min_8
+ subs r2, r2, #8
+ vrshl.s32 q0, q0, q13
+ vrshl.s32 q1, q1, q13
+ vrshl.s16 q4, q2, q14
+ vmul.i32 q0, q0, q15 // a * n
+ vmul.i32 q1, q1, q15 // a * n
+ vmull.u16 q3, d8, d8 // b * b
+ vmull.u16 q4, d9, d9 // b * b
+ vqsub.u32 q0, q0, q3 // imax(a * n - b * b, 0)
+ vqsub.u32 q1, q1, q4 // imax(a * n - b * b, 0)
+ vmul.i32 q0, q0, q12 // p * s
+ vmul.i32 q1, q1, q12 // p * s
+ vqshrn.u32 d0, q0, #16
+ vqshrn.u32 d1, q1, #16
+ vqrshrn.u16 d0, q0, #4 // imin(z, 255)
+
+ vcgt.u8 d2, d0, d10 // = -1 if sgr_x_by_x[d0] < 5
+ vcgt.u8 d3, d0, d11 // = -1 if sgr_x_by_x[d0] < 4
+ vtbl.8 d1, {q8, q9}, d0
+ vcgt.u8 d6, d0, d12 // = -1 if sgr_x_by_x[d0] < 3
+ vsub.i8 d9, d0, d15 // indices for vtbx
+ vcgt.u8 d7, d0, d13 // = -1 if sgr_x_by_x[d0] < 2
+ vadd.i8 d2, d2, d3
+ vtbx.8 d1, {q10}, d9
+ vcgt.u8 d8, d0, d14 // = -1 if sgr_x_by_x[d0] < 1
+ vadd.i8 d6, d6, d7
+ vadd.i8 d8, d8, d22
+ vadd.i8 d2, d2, d6
+ vadd.i8 d1, d1, d8
+ vadd.i8 d1, d1, d2
+ vmovl.u8 q0, d1 // x
+
+ vmov.i16 q13, #256
+ vdup.32 q14, r5 // one_by_x
+
+ vmull.u16 q1, d0, d4 // x * BB[i]
+ vmull.u16 q2, d1, d5 // x * BB[i]
+ vmul.i32 q1, q1, q14 // x * BB[i] * sgr_one_by_x
+ vmul.i32 q2, q2, q14 // x * BB[i] * sgr_one_by_x
+ vrshr.s32 q1, q1, #12 // AA[i]
+ vrshr.s32 q2, q2, #12 // AA[i]
+ vsub.i16 q0, q13, q0 // 256 - x
+
+ vst1.32 {q1, q2}, [r0, :128]!
+ vst1.16 {q0}, [r1, :128]!
+ bgt 1b
+
+ subs r3, r3, #1
+ ble 0f
+ add r0, r0, r12, lsl #2
+ add r1, r1, r12, lsl #1
+ mov r2, r4
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r7,pc}
+endfunc
diff --git a/third_party/dav1d/src/arm/32/looprestoration_tmpl.S b/third_party/dav1d/src/arm/32/looprestoration_tmpl.S
new file mode 100644
index 0000000000..8a9940bb3a
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/looprestoration_tmpl.S
@@ -0,0 +1,600 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2019, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+
+#define FILTER_OUT_STRIDE 384
+
+.macro sgr_funcs bpc
+// void dav1d_sgr_finish_filter1_Xbpc_neon(int16_t *tmp,
+// const pixel *src, const ptrdiff_t stride,
+// const int32_t *a, const int16_t *b,
+// const int w, const int h);
+function sgr_finish_filter1_\bpc\()bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldr r6, [sp, #108]
+ sub r7, r3, #(4*SUM_STRIDE)
+ add r8, r3, #(4*SUM_STRIDE)
+ sub r9, r4, #(2*SUM_STRIDE)
+ add r10, r4, #(2*SUM_STRIDE)
+ mov r11, #SUM_STRIDE
+ mov r12, #FILTER_OUT_STRIDE
+ add lr, r5, #3
+ bic lr, lr, #3 // Aligned width
+.if \bpc == 8
+ sub r2, r2, lr
+.else
+ sub r2, r2, lr, lsl #1
+.endif
+ sub r12, r12, lr
+ sub r11, r11, lr
+ sub r11, r11, #4 // We read 4 extra elements from both a and b
+ mov lr, r5
+ vmov.i16 q14, #3
+ vmov.i32 q15, #3
+1:
+ vld1.16 {q0}, [r9, :128]!
+ vld1.16 {q1}, [r4, :128]!
+ vld1.16 {q2}, [r10, :128]!
+ vld1.32 {q8, q9}, [r7, :128]!
+ vld1.32 {q10, q11}, [r3, :128]!
+ vld1.32 {q12, q13}, [r8, :128]!
+
+2:
+ subs r5, r5, #4
+ vext.8 d6, d0, d1, #2 // -stride
+ vext.8 d7, d2, d3, #2 // 0
+ vext.8 d8, d4, d5, #2 // +stride
+ vext.8 d9, d0, d1, #4 // +1-stride
+ vext.8 d10, d2, d3, #4 // +1
+ vext.8 d11, d4, d5, #4 // +1+stride
+ vadd.i16 d2, d2, d6 // -1, -stride
+ vadd.i16 d7, d7, d8 // 0, +stride
+ vadd.i16 d0, d0, d9 // -1-stride, +1-stride
+ vadd.i16 d2, d2, d7
+ vadd.i16 d4, d4, d11 // -1+stride, +1+stride
+ vadd.i16 d2, d2, d10 // +1
+ vadd.i16 d0, d0, d4
+
+ vext.8 q3, q8, q9, #4 // -stride
+ vshl.i16 d2, d2, #2
+ vext.8 q4, q8, q9, #8 // +1-stride
+ vext.8 q5, q10, q11, #4 // 0
+ vext.8 q6, q10, q11, #8 // +1
+ vmla.i16 d2, d0, d28 // * 3 -> a
+ vadd.i32 q3, q3, q10 // -stride, -1
+ vadd.i32 q8, q8, q4 // -1-stride, +1-stride
+ vadd.i32 q5, q5, q6 // 0, +1
+ vadd.i32 q8, q8, q12 // -1+stride
+ vadd.i32 q3, q3, q5
+ vext.8 q7, q12, q13, #4 // +stride
+ vext.8 q10, q12, q13, #8 // +1+stride
+.if \bpc == 8
+ vld1.32 {d24[0]}, [r1, :32]! // src
+.else
+ vld1.16 {d24}, [r1, :64]! // src
+.endif
+ vadd.i32 q3, q3, q7 // +stride
+ vadd.i32 q8, q8, q10 // +1+stride
+ vshl.i32 q3, q3, #2
+ vmla.i32 q3, q8, q15 // * 3 -> b
+.if \bpc == 8
+ vmovl.u8 q12, d24 // src
+.endif
+ vmov d0, d1
+ vmlal.u16 q3, d2, d24 // b + a * src
+ vmov d2, d3
+ vrshrn.i32 d6, q3, #9
+ vmov d4, d5
+ vst1.16 {d6}, [r0]!
+
+ ble 3f
+ vmov q8, q9
+ vmov q10, q11
+ vmov q12, q13
+ vld1.16 {d1}, [r9, :64]!
+ vld1.16 {d3}, [r4, :64]!
+ vld1.16 {d5}, [r10, :64]!
+ vld1.32 {q9}, [r7, :128]!
+ vld1.32 {q11}, [r3, :128]!
+ vld1.32 {q13}, [r8, :128]!
+ b 2b
+
+3:
+ subs r6, r6, #1
+ ble 0f
+ mov r5, lr
+ add r0, r0, r12, lsl #1
+ add r1, r1, r2
+ add r3, r3, r11, lsl #2
+ add r7, r7, r11, lsl #2
+ add r8, r8, r11, lsl #2
+ add r4, r4, r11, lsl #1
+ add r9, r9, r11, lsl #1
+ add r10, r10, r11, lsl #1
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+// void dav1d_sgr_finish_filter2_Xbpc_neon(int16_t *tmp,
+// const pixel *src, const ptrdiff_t stride,
+// const int32_t *a, const int16_t *b,
+// const int w, const int h);
+function sgr_finish_filter2_\bpc\()bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldr r6, [sp, #108]
+ add r7, r3, #(4*(SUM_STRIDE))
+ sub r3, r3, #(4*(SUM_STRIDE))
+ add r8, r4, #(2*(SUM_STRIDE))
+ sub r4, r4, #(2*(SUM_STRIDE))
+ mov r9, #(2*SUM_STRIDE)
+ mov r10, #FILTER_OUT_STRIDE
+ add r11, r5, #7
+ bic r11, r11, #7 // Aligned width
+.if \bpc == 8
+ sub r2, r2, r11
+.else
+ sub r2, r2, r11, lsl #1
+.endif
+ sub r10, r10, r11
+ sub r9, r9, r11
+ sub r9, r9, #4 // We read 4 extra elements from a
+ sub r12, r9, #4 // We read 8 extra elements from b
+ mov lr, r5
+
+1:
+ vld1.16 {q0, q1}, [r4, :128]!
+ vld1.16 {q2, q3}, [r8, :128]!
+ vld1.32 {q8, q9}, [r3, :128]!
+ vld1.32 {q11, q12}, [r7, :128]!
+ vld1.32 {q10}, [r3, :128]!
+ vld1.32 {q13}, [r7, :128]!
+
+2:
+ vmov.i16 q14, #5
+ vmov.i16 q15, #6
+ subs r5, r5, #8
+ vext.8 q4, q0, q1, #4 // +1-stride
+ vext.8 q5, q2, q3, #4 // +1+stride
+ vext.8 q6, q0, q1, #2 // -stride
+ vext.8 q7, q2, q3, #2 // +stride
+ vadd.i16 q0, q0, q4 // -1-stride, +1-stride
+ vadd.i16 q5, q2, q5 // -1+stride, +1+stride
+ vadd.i16 q2, q6, q7 // -stride, +stride
+ vadd.i16 q0, q0, q5
+
+ vext.8 q4, q8, q9, #8 // +1-stride
+ vext.8 q5, q9, q10, #8
+ vext.8 q6, q11, q12, #8 // +1+stride
+ vext.8 q7, q12, q13, #8
+ vmul.i16 q0, q0, q14 // * 5
+ vmla.i16 q0, q2, q15 // * 6
+ vadd.i32 q4, q4, q8 // -1-stride, +1-stride
+ vadd.i32 q5, q5, q9
+ vadd.i32 q6, q6, q11 // -1+stride, +1+stride
+ vadd.i32 q7, q7, q12
+ vadd.i32 q4, q4, q6
+ vadd.i32 q5, q5, q7
+ vext.8 q6, q8, q9, #4 // -stride
+ vext.8 q7, q9, q10, #4
+ vext.8 q8, q11, q12, #4 // +stride
+ vext.8 q11, q12, q13, #4
+
+.if \bpc == 8
+ vld1.8 {d4}, [r1, :64]!
+.else
+ vld1.8 {q2}, [r1, :128]!
+.endif
+
+ vmov.i32 q14, #5
+ vmov.i32 q15, #6
+
+ vadd.i32 q6, q6, q8 // -stride, +stride
+ vadd.i32 q7, q7, q11
+ vmul.i32 q4, q4, q14 // * 5
+ vmla.i32 q4, q6, q15 // * 6
+ vmul.i32 q5, q5, q14 // * 5
+ vmla.i32 q5, q7, q15 // * 6
+
+.if \bpc == 8
+ vmovl.u8 q2, d4
+.endif
+ vmlal.u16 q4, d0, d4 // b + a * src
+ vmlal.u16 q5, d1, d5 // b + a * src
+ vmov q0, q1
+ vrshrn.i32 d8, q4, #9
+ vrshrn.i32 d9, q5, #9
+ vmov q2, q3
+ vst1.16 {q4}, [r0, :128]!
+
+ ble 3f
+ vmov q8, q10
+ vmov q11, q13
+ vld1.16 {q1}, [r4, :128]!
+ vld1.16 {q3}, [r8, :128]!
+ vld1.32 {q9, q10}, [r3, :128]!
+ vld1.32 {q12, q13}, [r7, :128]!
+ b 2b
+
+3:
+ subs r6, r6, #1
+ ble 0f
+ mov r5, lr
+ add r0, r0, r10, lsl #1
+ add r1, r1, r2
+ add r3, r3, r9, lsl #2
+ add r7, r7, r9, lsl #2
+ add r4, r4, r12, lsl #1
+ add r8, r8, r12, lsl #1
+
+ vld1.32 {q8, q9}, [r3, :128]!
+ vld1.16 {q0, q1}, [r4, :128]!
+ vld1.32 {q10}, [r3, :128]!
+
+ vmov.i16 q12, #5
+ vmov.i16 q13, #6
+
+4:
+ subs r5, r5, #8
+ vext.8 q3, q0, q1, #4 // +1
+ vext.8 q2, q0, q1, #2 // 0
+ vadd.i16 q0, q0, q3 // -1, +1
+
+ vext.8 q4, q8, q9, #4 // 0
+ vext.8 q5, q9, q10, #4
+ vext.8 q6, q8, q9, #8 // +1
+ vext.8 q7, q9, q10, #8
+ vmul.i16 q2, q2, q13 // * 6
+ vmla.i16 q2, q0, q12 // * 5 -> a
+.if \bpc == 8
+ vld1.8 {d22}, [r1, :64]!
+.else
+ vld1.16 {q11}, [r1, :128]!
+.endif
+ vadd.i32 q8, q8, q6 // -1, +1
+ vadd.i32 q9, q9, q7
+.if \bpc == 8
+ vmovl.u8 q11, d22
+.endif
+ vmul.i32 q4, q4, q15 // * 6
+ vmla.i32 q4, q8, q14 // * 5 -> b
+ vmul.i32 q5, q5, q15 // * 6
+ vmla.i32 q5, q9, q14 // * 5 -> b
+
+ vmlal.u16 q4, d4, d22 // b + a * src
+ vmlal.u16 q5, d5, d23
+ vmov q0, q1
+ vrshrn.i32 d8, q4, #8
+ vrshrn.i32 d9, q5, #8
+ vmov q8, q10
+ vst1.16 {q4}, [r0, :128]!
+
+ ble 5f
+ vld1.16 {q1}, [r4, :128]!
+ vld1.32 {q9, q10}, [r3, :128]!
+ b 4b
+
+5:
+ subs r6, r6, #1
+ ble 0f
+ mov r5, lr
+ sub r3, r3, r11, lsl #2 // Rewind r3/r4 to where they started
+ sub r4, r4, r11, lsl #1
+ add r0, r0, r10, lsl #1
+ add r1, r1, r2
+ sub r3, r3, #16
+ sub r4, r4, #16
+ b 1b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+
+// void dav1d_sgr_weighted1_Xbpc_neon(pixel *dst, const ptrdiff_t dst_stride,
+// const pixel *src, const ptrdiff_t src_stride,
+// const int16_t *t1, const int w, const int h,
+// const int wt, const int bitdepth_max);
+function sgr_weighted1_\bpc\()bpc_neon, export=1
+ push {r4-r9,lr}
+ ldrd r4, r5, [sp, #28]
+ ldrd r6, r7, [sp, #36]
+.if \bpc == 16
+ ldr r8, [sp, #44]
+.endif
+ vdup.16 d31, r7
+ cmp r6, #2
+.if \bpc == 16
+ vdup.16 q14, r8
+.endif
+ add r9, r0, r1
+ add r12, r2, r3
+ add lr, r4, #2*FILTER_OUT_STRIDE
+ mov r7, #(4*FILTER_OUT_STRIDE)
+ lsl r1, r1, #1
+ lsl r3, r3, #1
+ add r8, r5, #7
+ bic r8, r8, #7 // Aligned width
+.if \bpc == 8
+ sub r1, r1, r8
+ sub r3, r3, r8
+.else
+ sub r1, r1, r8, lsl #1
+ sub r3, r3, r8, lsl #1
+.endif
+ sub r7, r7, r8, lsl #1
+ mov r8, r5
+ blt 2f
+1:
+.if \bpc == 8
+ vld1.8 {d0}, [r2, :64]!
+ vld1.8 {d16}, [r12, :64]!
+.else
+ vld1.16 {q0}, [r2, :128]!
+ vld1.16 {q8}, [r12, :128]!
+.endif
+ vld1.16 {q1}, [r4, :128]!
+ vld1.16 {q9}, [lr, :128]!
+ subs r5, r5, #8
+.if \bpc == 8
+ vshll.u8 q0, d0, #4 // u
+ vshll.u8 q8, d16, #4 // u
+.else
+ vshl.i16 q0, q0, #4 // u
+ vshl.i16 q8, q8, #4 // u
+.endif
+ vsub.i16 q1, q1, q0 // t1 - u
+ vsub.i16 q9, q9, q8 // t1 - u
+ vshll.u16 q2, d0, #7 // u << 7
+ vshll.u16 q3, d1, #7 // u << 7
+ vshll.u16 q10, d16, #7 // u << 7
+ vshll.u16 q11, d17, #7 // u << 7
+ vmlal.s16 q2, d2, d31 // v
+ vmlal.s16 q3, d3, d31 // v
+ vmlal.s16 q10, d18, d31 // v
+ vmlal.s16 q11, d19, d31 // v
+.if \bpc == 8
+ vrshrn.i32 d4, q2, #11
+ vrshrn.i32 d5, q3, #11
+ vrshrn.i32 d20, q10, #11
+ vrshrn.i32 d21, q11, #11
+ vqmovun.s16 d4, q2
+ vqmovun.s16 d20, q10
+ vst1.8 {d4}, [r0, :64]!
+ vst1.8 {d20}, [r9, :64]!
+.else
+ vqrshrun.s32 d4, q2, #11
+ vqrshrun.s32 d5, q3, #11
+ vqrshrun.s32 d20, q10, #11
+ vqrshrun.s32 d21, q11, #11
+ vmin.u16 q2, q2, q14
+ vmin.u16 q10, q10, q14
+ vst1.16 {q2}, [r0, :128]!
+ vst1.16 {q10}, [r9, :128]!
+.endif
+ bgt 1b
+
+ sub r6, r6, #2
+ cmp r6, #1
+ blt 0f
+ mov r5, r8
+ add r0, r0, r1
+ add r9, r9, r1
+ add r2, r2, r3
+ add r12, r12, r3
+ add r4, r4, r7
+ add lr, lr, r7
+ beq 2f
+ b 1b
+
+2:
+.if \bpc == 8
+ vld1.8 {d0}, [r2, :64]!
+.else
+ vld1.16 {q0}, [r2, :128]!
+.endif
+ vld1.16 {q1}, [r4, :128]!
+ subs r5, r5, #8
+.if \bpc == 8
+ vshll.u8 q0, d0, #4 // u
+.else
+ vshl.i16 q0, q0, #4 // u
+.endif
+ vsub.i16 q1, q1, q0 // t1 - u
+ vshll.u16 q2, d0, #7 // u << 7
+ vshll.u16 q3, d1, #7 // u << 7
+ vmlal.s16 q2, d2, d31 // v
+ vmlal.s16 q3, d3, d31 // v
+.if \bpc == 8
+ vrshrn.i32 d4, q2, #11
+ vrshrn.i32 d5, q3, #11
+ vqmovun.s16 d2, q2
+ vst1.8 {d2}, [r0, :64]!
+.else
+ vqrshrun.s32 d4, q2, #11
+ vqrshrun.s32 d5, q3, #11
+ vmin.u16 q2, q2, q14
+ vst1.16 {q2}, [r0, :128]!
+.endif
+ bgt 2b
+0:
+ pop {r4-r9,pc}
+endfunc
+
+// void dav1d_sgr_weighted2_Xbpc_neon(pixel *dst, const ptrdiff_t stride,
+// const pixel *src, const ptrdiff_t src_stride,
+// const int16_t *t1, const int16_t *t2,
+// const int w, const int h,
+// const int16_t wt[2], const int bitdepth_max);
+function sgr_weighted2_\bpc\()bpc_neon, export=1
+ push {r4-r11,lr}
+ ldrd r4, r5, [sp, #36]
+ ldrd r6, r7, [sp, #44]
+.if \bpc == 8
+ ldr r8, [sp, #52]
+.else
+ ldrd r8, r9, [sp, #52]
+.endif
+ cmp r7, #2
+ add r10, r0, r1
+ add r11, r2, r3
+ add r12, r4, #2*FILTER_OUT_STRIDE
+ add lr, r5, #2*FILTER_OUT_STRIDE
+ vld2.16 {d30[], d31[]}, [r8] // wt[0], wt[1]
+.if \bpc == 16
+ vdup.16 q14, r9
+.endif
+ mov r8, #4*FILTER_OUT_STRIDE
+ lsl r1, r1, #1
+ lsl r3, r3, #1
+ add r9, r6, #7
+ bic r9, r9, #7 // Aligned width
+.if \bpc == 8
+ sub r1, r1, r9
+ sub r3, r3, r9
+.else
+ sub r1, r1, r9, lsl #1
+ sub r3, r3, r9, lsl #1
+.endif
+ sub r8, r8, r9, lsl #1
+ mov r9, r6
+ blt 2f
+1:
+.if \bpc == 8
+ vld1.8 {d0}, [r2, :64]!
+ vld1.8 {d16}, [r11, :64]!
+.else
+ vld1.16 {q0}, [r2, :128]!
+ vld1.16 {q8}, [r11, :128]!
+.endif
+ vld1.16 {q1}, [r4, :128]!
+ vld1.16 {q9}, [r12, :128]!
+ vld1.16 {q2}, [r5, :128]!
+ vld1.16 {q10}, [lr, :128]!
+ subs r6, r6, #8
+.if \bpc == 8
+ vshll.u8 q0, d0, #4 // u
+ vshll.u8 q8, d16, #4 // u
+.else
+ vshl.i16 q0, q0, #4 // u
+ vshl.i16 q8, q8, #4 // u
+.endif
+ vsub.i16 q1, q1, q0 // t1 - u
+ vsub.i16 q2, q2, q0 // t2 - u
+ vsub.i16 q9, q9, q8 // t1 - u
+ vsub.i16 q10, q10, q8 // t2 - u
+ vshll.u16 q3, d0, #7 // u << 7
+ vshll.u16 q0, d1, #7 // u << 7
+ vshll.u16 q11, d16, #7 // u << 7
+ vshll.u16 q8, d17, #7 // u << 7
+ vmlal.s16 q3, d2, d30 // wt[0] * (t1 - u)
+ vmlal.s16 q3, d4, d31 // wt[1] * (t2 - u)
+ vmlal.s16 q0, d3, d30 // wt[0] * (t1 - u)
+ vmlal.s16 q0, d5, d31 // wt[1] * (t2 - u)
+ vmlal.s16 q11, d18, d30 // wt[0] * (t1 - u)
+ vmlal.s16 q11, d20, d31 // wt[1] * (t2 - u)
+ vmlal.s16 q8, d19, d30 // wt[0] * (t1 - u)
+ vmlal.s16 q8, d21, d31 // wt[1] * (t2 - u)
+.if \bpc == 8
+ vrshrn.i32 d6, q3, #11
+ vrshrn.i32 d7, q0, #11
+ vrshrn.i32 d22, q11, #11
+ vrshrn.i32 d23, q8, #11
+ vqmovun.s16 d6, q3
+ vqmovun.s16 d22, q11
+ vst1.8 {d6}, [r0, :64]!
+ vst1.8 {d22}, [r10, :64]!
+.else
+ vqrshrun.s32 d6, q3, #11
+ vqrshrun.s32 d7, q0, #11
+ vqrshrun.s32 d22, q11, #11
+ vqrshrun.s32 d23, q8, #11
+ vmin.u16 q3, q3, q14
+ vmin.u16 q11, q11, q14
+ vst1.16 {q3}, [r0, :128]!
+ vst1.16 {q11}, [r10, :128]!
+.endif
+ bgt 1b
+
+ subs r7, r7, #2
+ cmp r7, #1
+ blt 0f
+ mov r6, r9
+ add r0, r0, r1
+ add r10, r10, r1
+ add r2, r2, r3
+ add r11, r11, r3
+ add r4, r4, r8
+ add r12, r12, r8
+ add r5, r5, r8
+ add lr, lr, r8
+ beq 2f
+ b 1b
+
+2:
+.if \bpc == 8
+ vld1.8 {d0}, [r2, :64]!
+.else
+ vld1.16 {q0}, [r2, :128]!
+.endif
+ vld1.16 {q1}, [r4, :128]!
+ vld1.16 {q2}, [r5, :128]!
+ subs r6, r6, #8
+.if \bpc == 8
+ vshll.u8 q0, d0, #4 // u
+.else
+ vshl.i16 q0, q0, #4 // u
+.endif
+ vsub.i16 q1, q1, q0 // t1 - u
+ vsub.i16 q2, q2, q0 // t2 - u
+ vshll.u16 q3, d0, #7 // u << 7
+ vshll.u16 q0, d1, #7 // u << 7
+ vmlal.s16 q3, d2, d30 // wt[0] * (t1 - u)
+ vmlal.s16 q3, d4, d31 // wt[1] * (t2 - u)
+ vmlal.s16 q0, d3, d30 // wt[0] * (t1 - u)
+ vmlal.s16 q0, d5, d31 // wt[1] * (t2 - u)
+.if \bpc == 8
+ vrshrn.i32 d6, q3, #11
+ vrshrn.i32 d7, q0, #11
+ vqmovun.s16 d6, q3
+ vst1.8 {d6}, [r0, :64]!
+.else
+ vqrshrun.s32 d6, q3, #11
+ vqrshrun.s32 d7, q0, #11
+ vmin.u16 q3, q3, q14
+ vst1.16 {q3}, [r0, :128]!
+.endif
+ bgt 1b
+0:
+ pop {r4-r11,pc}
+endfunc
+.endm
diff --git a/third_party/dav1d/src/arm/32/mc.S b/third_party/dav1d/src/arm/32/mc.S
new file mode 100644
index 0000000000..1b60a7bdb3
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/mc.S
@@ -0,0 +1,3340 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Janne Grunau
+ * Copyright © 2018, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+.macro avg dst0, dst1, t0, t1, t2, t3
+ vld1.16 {\t0,\t1}, [r2, :128]!
+ vld1.16 {\t2,\t3}, [r3, :128]!
+ vadd.i16 \t0, \t0, \t2
+ vadd.i16 \t1, \t1, \t3
+ vqrshrun.s16 \dst0, \t0, #5
+ vqrshrun.s16 \dst1, \t1, #5
+.endm
+
+.macro w_avg dst0, dst1, t0, t1, t2, t3
+ vld1.16 {\t0,\t1}, [r2, :128]!
+ vld1.16 {\t2,\t3}, [r3, :128]!
+ vsub.i16 \t0, \t2, \t0
+ vsub.i16 \t1, \t3, \t1
+ vqdmulh.s16 \t0, \t0, q15
+ vqdmulh.s16 \t1, \t1, q15
+ vadd.i16 \t0, \t2, \t0
+ vadd.i16 \t1, \t3, \t1
+ vqrshrun.s16 \dst0, \t0, #4
+ vqrshrun.s16 \dst1, \t1, #4
+.endm
+
+.macro mask dst0, dst1, t0, t1, t2, t3
+ vld1.8 {q14}, [lr, :128]!
+ vld1.16 {\t0,\t1}, [r2, :128]!
+ vmul.i8 q14, q14, q15
+ vld1.16 {\t2,\t3}, [r3, :128]!
+ vshll.i8 q13, d28, #8
+ vshll.i8 q14, d29, #8
+ vsub.i16 \t0, \t2, \t0
+ vsub.i16 \t1, \t3, \t1
+ vqdmulh.s16 \t0, \t0, q13
+ vqdmulh.s16 \t1, \t1, q14
+ vadd.i16 \t0, \t2, \t0
+ vadd.i16 \t1, \t3, \t1
+ vqrshrun.s16 \dst0, \t0, #4
+ vqrshrun.s16 \dst1, \t1, #4
+.endm
+
+.macro bidir_fn type
+function \type\()_8bpc_neon, export=1
+ push {r4-r6,lr}
+ ldrd r4, r5, [sp, #16]
+ clz r4, r4
+.ifnc \type, avg
+ ldr lr, [sp, #24]
+.endif
+.ifc \type, w_avg
+ vdup.s16 q15, lr
+ vneg.s16 q15, q15
+ vshl.i16 q15, q15, #11
+.endif
+.ifc \type, mask
+ vmov.i8 q15, #256-2
+.endif
+ adr r12, L(\type\()_tbl)
+ sub r4, r4, #24
+ ldr r4, [r12, r4, lsl #2]
+ \type d16, d17, q0, q1, q2, q3
+ add r12, r12, r4
+ bx r12
+
+ .align 2
+L(\type\()_tbl):
+ .word 1280f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 4f - L(\type\()_tbl) + CONFIG_THUMB
+
+4:
+ add r6, r0, r1
+ lsl r1, r1, #1
+ cmp r5, #4
+ vst1.32 {d16[0]}, [r0, :32], r1
+ vst1.32 {d16[1]}, [r6, :32], r1
+ vst1.32 {d17[0]}, [r0, :32], r1
+ vst1.32 {d17[1]}, [r6, :32], r1
+ beq 0f
+ \type d18, d19, q0, q1, q2, q3
+ cmp r5, #8
+ vst1.32 {d18[0]}, [r0, :32], r1
+ vst1.32 {d18[1]}, [r6, :32], r1
+ vst1.32 {d19[0]}, [r0, :32], r1
+ vst1.32 {d19[1]}, [r6, :32], r1
+ beq 0f
+ \type d16, d17, q0, q1, q2, q3
+ vst1.32 {d16[0]}, [r0, :32], r1
+ vst1.32 {d16[1]}, [r6, :32], r1
+ \type d18, d19, q0, q1, q2, q3
+ vst1.32 {d17[0]}, [r0, :32], r1
+ vst1.32 {d17[1]}, [r6, :32], r1
+ vst1.32 {d18[0]}, [r0, :32], r1
+ vst1.32 {d18[1]}, [r6, :32], r1
+ vst1.32 {d19[0]}, [r0, :32], r1
+ vst1.32 {d19[1]}, [r6, :32], r1
+ pop {r4-r6,pc}
+80:
+ add r6, r0, r1
+ lsl r1, r1, #1
+8:
+ vst1.8 {d16}, [r0, :64], r1
+ \type d18, d19, q0, q1, q2, q3
+ vst1.8 {d17}, [r6, :64], r1
+ vst1.8 {d18}, [r0, :64], r1
+ subs r5, r5, #4
+ vst1.8 {d19}, [r6, :64], r1
+ ble 0f
+ \type d16, d17, q0, q1, q2, q3
+ b 8b
+160:
+ add r6, r0, r1
+ lsl r1, r1, #1
+16:
+ \type d18, d19, q0, q1, q2, q3
+ vst1.8 {q8}, [r0, :128], r1
+ \type d20, d21, q0, q1, q2, q3
+ vst1.8 {q9}, [r6, :128], r1
+ \type d22, d23, q0, q1, q2, q3
+ vst1.8 {q10}, [r0, :128], r1
+ subs r5, r5, #4
+ vst1.8 {q11}, [r6, :128], r1
+ ble 0f
+ \type d16, d17, q0, q1, q2, q3
+ b 16b
+320:
+ add r6, r0, r1
+ lsl r1, r1, #1
+32:
+ \type d18, d19, q0, q1, q2, q3
+ \type d20, d21, q0, q1, q2, q3
+ vst1.8 {q8, q9}, [r0, :128], r1
+ \type d22, d23, q0, q1, q2, q3
+ subs r5, r5, #2
+ vst1.8 {q10, q11}, [r6, :128], r1
+ ble 0f
+ \type d16, d17, q0, q1, q2, q3
+ b 32b
+640:
+ add r6, r0, #32
+64:
+ \type d18, d19, q0, q1, q2, q3
+ \type d20, d21, q0, q1, q2, q3
+ \type d22, d23, q0, q1, q2, q3
+ vst1.8 {q8, q9}, [r0, :128], r1
+ \type d16, d17, q0, q1, q2, q3
+ vst1.8 {q10, q11}, [r6, :128], r1
+ \type d18, d19, q0, q1, q2, q3
+ \type d20, d21, q0, q1, q2, q3
+ vst1.8 {q8, q9}, [r0, :128], r1
+ \type d22, d23, q0, q1, q2, q3
+ subs r5, r5, #2
+ vst1.8 {q10, q11}, [r6, :128], r1
+ ble 0f
+ \type d16, d17, q0, q1, q2, q3
+ b 64b
+1280:
+ sub r1, r1, #32
+ add r6, r0, #64
+128:
+ \type d18, d19, q0, q1, q2, q3
+ \type d20, d21, q0, q1, q2, q3
+ \type d22, d23, q0, q1, q2, q3
+ vst1.8 {q8, q9}, [r0, :128]!
+ \type d16, d17, q0, q1, q2, q3
+ vst1.8 {q10, q11}, [r0, :128], r1
+ \type d18, d19, q0, q1, q2, q3
+ \type d20, d21, q0, q1, q2, q3
+ vst1.8 {q8, q9}, [r6, :128]!
+ \type d22, d23, q0, q1, q2, q3
+ subs r5, r5, #1
+ vst1.8 {q10, q11}, [r6, :128], r1
+ ble 0f
+ \type d16, d17, q0, q1, q2, q3
+ b 128b
+
+0:
+ pop {r4-r6,pc}
+endfunc
+.endm
+
+bidir_fn avg
+bidir_fn w_avg
+bidir_fn mask
+
+
+.macro w_mask_fn type
+function w_mask_\type\()_8bpc_neon, export=1
+ push {r4-r9,lr}
+ ldrd r4, r5, [sp, #28]
+ ldrd r6, r7, [sp, #36]
+ clz r8, r4
+ adr r9, L(w_mask_\type\()_tbl)
+ sub r8, r8, #24
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ movw r12, #6903
+ vdup.16 q14, r12
+.if \type == 444
+ vmov.i8 q15, #64
+.elseif \type == 422
+ vdup.8 d0, r7 // d0[] <- sign
+ vmov.i8 d30, #129
+ vsub.i8 d30, d30, d0 // 129 - sign
+.elseif \type == 420
+ vdup.16 q0, r7 // d0[] <- sign
+ vmov.i16 q15, #256
+ vsub.i16 q15, q15, q0 // 256 - sign
+.endif
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r9
+
+ .align 2
+L(w_mask_\type\()_tbl):
+ .word 1280f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 640f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 320f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 160f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 8f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 4f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+
+4:
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]! // tmp1 (four rows at once)
+ vld1.16 {d4, d5, d6, d7}, [r3, :128]! // tmp2 (four rows at once)
+ subs r5, r5, #4
+ vsub.i16 q8, q2, q0 // tmp2-tmp1
+ vsub.i16 q9, q3, q1
+ vabd.s16 q10, q0, q2 // (abs(tmp1[x] - tmp2[x]))
+ vabd.s16 q11, q1, q3
+ vqsub.u16 q10, q14, q10 // 6903 - abs ()
+ vqsub.u16 q11, q14, q11
+ vshr.s16 q10, q10, #8 // 64-m = (6903 - abs()) >> 8
+ vshr.s16 q11, q11, #8
+ vshl.s16 q12, q10, #9 // (64-m)<<9
+ vshl.s16 q13, q11, #9
+ vqdmulh.s16 q12, q12, q8 // ((tmp2-tmp1)*(64-m)<<9)>>15
+ vqdmulh.s16 q13, q13, q9
+ vadd.i16 q12, q12, q0 // (((tmp2-tmp1)*(64-m)<<9)>>15) + tmp1
+ vadd.i16 q13, q13, q1
+ vqrshrun.s16 d24, q12, #4 // (((((tmp2-tmp1)*(64-m)<<9)>>15) + tmp1) + 8) >> 4
+ vqrshrun.s16 d25, q13, #4
+.if \type == 444
+ vmovn.u16 d20, q10 // 64 - m
+ vmovn.u16 d21, q11
+ vsub.i8 q10, q15, q10 // m
+ vst1.8 {d20, d21}, [r6, :128]!
+.elseif \type == 422
+ vpadd.s16 d20, d20, d21 // (64 - m) + (64 - n) (column wise addition)
+ vpadd.s16 d21, d22, d23
+ vmovn.s16 d6, q10
+ vhsub.u8 d6, d30, d6 // ((129 - sign) - ((64 - m) + (64 - n))) >> 1
+ vst1.8 {d6}, [r6, :64]!
+.elseif \type == 420
+ vadd.s16 d20, d20, d21 // (64 - my1) + (64 - my2) (row wise addition)
+ vadd.s16 d21, d22, d23
+ vpadd.s16 d20, d20, d21 // (128 - m) + (128 - n) (column wise addition)
+ vsub.s16 d20, d30, d20 // (256 - sign) - ((128 - m) + (128 - n))
+ vrshrn.u16 d20, q10, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
+ vst1.32 {d20[0]}, [r6, :32]!
+.endif
+ vst1.32 {d24[0]}, [r0, :32], r1
+ vst1.32 {d24[1]}, [r12, :32], r1
+ vst1.32 {d25[0]}, [r0, :32], r1
+ vst1.32 {d25[1]}, [r12, :32], r1
+ bgt 4b
+ pop {r4-r9,pc}
+8:
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]! // tmp1y1, tmp1y2
+ vld1.16 {d4, d5, d6, d7}, [r3, :128]! // tmp2y1, tmp2y2
+ subs r5, r5, #2
+ vsub.i16 q8, q2, q0 // tmp2y1 - tmp1y1
+ vsub.i16 q9, q3, q1 // tmp2y2 - tmp1y2
+ vabd.s16 q10, q0, q2 // abs(tmp1y1 - tmp2y1)
+ vabd.s16 q11, q1, q3 // abs(tmp1y2 - tmp2y2)
+ vqsub.u16 q10, q14, q10 // 6903 - abs(tmp1y1 - tmp2y1)
+ vqsub.u16 q11, q14, q11 // 6903 - abs(tmp1y2 - tmp2y2)
+ vshr.s16 q10, q10, #8 // 64 - my1 = 6903 - abs(tmp1y1 - tmp2y1) >> 8
+ vshr.s16 q11, q11, #8 // 64 - my2 = 6903 - abs(tmp1y2 - tmp2y2) >> 8
+ vshl.s16 q12, q10, #9 // (64 - my1) << 9
+ vshl.s16 q13, q11, #9 // (64 - my2) << 9
+ vqdmulh.s16 q12, q12, q8 // ((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15
+ vqdmulh.s16 q13, q13, q9 // ((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15
+ vadd.s16 q12, q12, q0 // (((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15) + tmp1y1
+ vadd.s16 q13, q13, q1 // (((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15) + tmp1y2
+ vqrshrun.s16 d24, q12, #4 // (((((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15) + tmp1y1) + 8) >> 4
+ vqrshrun.s16 d25, q13, #4 // (((((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15) + tmp1y2) + 8) >> 4
+.if \type == 444
+ vmovn.u16 d20, q10 // 64 - m
+ vmovn.u16 d21, q11
+ vsub.i8 q10, q15, q10 // m
+ vst1.8 {d20, d21}, [r6, :128]!
+.elseif \type == 422
+ vpadd.s16 d20, d20, d21 // (64 - my1) + (64 - ny1) (column wise addition)
+ vpadd.s16 d21, d22, d23 // (64 - my2) + (64 - ny2)
+ vmovn.s16 d20, q10
+ vhsub.u8 d20, d30, d20 // ((129 - sign) - ((64 - my1/y2) + (64 - ny1/y2))) >> 1
+ vst1.8 {d20}, [r6, :64]!
+.elseif \type == 420
+ vadd.s16 q10, q10, q11 // (64 - my1) + (64 - my2) (row wise addition)
+ vpadd.s16 d20, d20, d21 // (128 - m) + (128 - n) (column wise addition)
+ vsub.s16 d20, d30, d20 // (256 - sign) - ((128 - m) + (128 - n))
+ vrshrn.u16 d20, q10, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
+ vst1.32 {d20[0]}, [r6, :32]!
+.endif
+ vst1.16 {d24}, [r0, :64], r1
+ vst1.16 {d25}, [r12, :64], r1
+ bgt 8b
+ pop {r4-r9,pc}
+1280:
+640:
+320:
+160:
+ sub r1, r1, r4
+.if \type == 444
+ add lr, r6, r4
+.elseif \type == 422
+ add lr, r6, r4, lsr #1
+.endif
+ add r9, r3, r4, lsl #1
+ add r7, r2, r4, lsl #1
+161:
+ mov r8, r4
+16:
+ vld1.16 {d0, d1, d2, d3}, [r2, :128]! // tmp1y1
+ vld1.16 {d4, d5, d6, d7}, [r3, :128]! // tmp2y1
+ vld1.16 {d16, d17, d18, d19}, [r7, :128]! // tmp1y2
+ subs r8, r8, #16
+ vsub.i16 q2, q2, q0 // tmp2y1 - tmp1y1
+ vsub.i16 q3, q3, q1
+ vabs.s16 q10, q2 // abs(tm2y1 - tmp1y1)
+ vabs.s16 q11, q3
+ vqsub.u16 q10, q14, q10 // 6903 - abs(tmp1y1 - tmp2y1)
+ vqsub.u16 q11, q14, q11
+ vshr.s16 q10, q10, #8 // 64 - my1 = 6903 - abs(tmp1y1 - tmp2y1) >> 8
+ vshr.s16 q11, q11, #8
+ vshl.s16 q12, q10, #9 // (64 - my1) << 9
+ vshl.s16 q13, q11, #9
+ vqdmulh.s16 q12, q12, q2 // ((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15
+ vqdmulh.s16 q13, q13, q3
+ vadd.i16 q12, q12, q0 // (((tmp2y1 - tmp1y1) * (64 - my1) << 9) >> 15) + tmp1y1
+ vadd.i16 q13, q13, q1
+ vld1.16 {d0, d1, d2, d3}, [r9, :128]! // tmp2h2
+.if \type == 444
+ vmovn.u16 d20, q10 // 64 - my1
+ vmovn.u16 d21, q11
+ vsub.i8 q10, q15, q10 // my1
+ vst1.8 {d20, d21}, [r6, :128]!
+.elseif \type == 422
+ vpadd.s16 d20, d20, d21 // (64 - my1) + (64 - ny1) (column wise addition)
+ vpadd.s16 d21, d22, d23
+ vmovn.s16 d20, q10
+ vhsub.u8 d20, d30, d20 // ((129 - sign) - ((64 - my1) + (64 - ny1))) >> 1
+ vst1.8 {d20}, [r6, :64]!
+.endif
+ vqrshrun.s16 d24, q12, #4 // (((((tmp2y1 - tmp1y1)*(64 - my1) << 9) >> 15) + tmp1y1) + 8) >> 4
+ vqrshrun.s16 d25, q13, #4
+ vsub.i16 q0, q0, q8 // tmp2y2 - tmp1y2
+ vsub.i16 q1, q1, q9
+ vst1.16 {d24, d25}, [r0, :128]! // store dsty1
+ vabs.s16 q2, q0 // abs(tmp2y2 - tmp1y2)
+ vabs.s16 q3, q1
+ vqsub.u16 q2, q14, q2 // 6903 - abs(tmp2y2 - tmp1y2)
+ vqsub.u16 q3, q14, q3
+ vshr.s16 q2, q2, #8 // (6903 - abs(tmp2y2 - tmp1y2)) >> 8
+ vshr.s16 q3, q3, #8
+ vshl.s16 q12, q2, #9 // (64 - my2) << 9
+ vshl.s16 q13, q3, #9
+.if \type == 444
+ vmovn.u16 d4, q2 // 64 - my2
+ vmovn.u16 d5, q3
+ vsub.i8 q2, q15, q2 // my2
+ vst1.8 {d4, d5}, [lr, :128]!
+.elseif \type == 422
+ vpadd.s16 d4, d4, d5 // (64 - my2) + (64 - ny2) (column wise addition)
+ vpadd.s16 d5, d6, d7
+ vmovn.s16 d4, q2
+ vhsub.u8 d4, d30, d4 // ((129 - sign) - ((64 - my2) + (64 - ny2))) >> 1
+ vst1.8 {d4}, [lr, :64]!
+.elseif \type == 420
+ vadd.s16 q10, q10, q2 // (64 - my1) + (64 - my2) (row wise addition)
+ vadd.s16 q11, q11, q3
+ vpadd.s16 d20, d20, d21 // (128 - m) + (128 - n) (column wise addition)
+ vpadd.s16 d21, d22, d23
+ vsub.s16 q10, q15, q10 // (256 - sign) - ((128 - m) + (128 - n))
+ vrshrn.u16 d20, q10, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
+ vst1.8 {d20}, [r6, :64]!
+.endif
+ vqdmulh.s16 q12, q12, q0 // ((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15
+ vqdmulh.s16 q13, q13, q1
+ vadd.i16 q12, q12, q8 // (((tmp2y2 - tmp1y2) * (64 - my2) << 9) >> 15) + tmp1y2
+ vadd.i16 q13, q13, q9
+ vqrshrun.s16 d24, q12, #4 // (((((tmp2y2 - tmp1y2)*(64 - my2) << 9) >> 15) + tmp1y2) + 8) >> 4
+ vqrshrun.s16 d25, q13, #4
+ vst1.16 {d24, d25}, [r12, :128]! // store dsty2
+ bgt 16b
+ subs r5, r5, #2
+ add r2, r2, r4, lsl #1
+ add r3, r3, r4, lsl #1
+ add r7, r7, r4, lsl #1
+ add r9, r9, r4, lsl #1
+.if \type == 444
+ add r6, r6, r4
+ add lr, lr, r4
+.elseif \type == 422
+ add r6, r6, r4, lsr #1
+ add lr, lr, r4, lsr #1
+.endif
+ add r0, r0, r1
+ add r12, r12, r1
+ bgt 161b
+ pop {r4-r9,pc}
+endfunc
+.endm
+
+w_mask_fn 444
+w_mask_fn 422
+w_mask_fn 420
+
+
+function blend_8bpc_neon, export=1
+ push {r4-r5,lr}
+ ldrd r4, r5, [sp, #12]
+ clz lr, r3
+ adr r3, L(blend_tbl)
+ sub lr, lr, #26
+ ldr lr, [r3, lr, lsl #2]
+ add r3, r3, lr
+ bx r3
+
+ .align 2
+L(blend_tbl):
+ .word 320f - L(blend_tbl) + CONFIG_THUMB
+ .word 160f - L(blend_tbl) + CONFIG_THUMB
+ .word 80f - L(blend_tbl) + CONFIG_THUMB
+ .word 40f - L(blend_tbl) + CONFIG_THUMB
+
+40:
+ vmov.i8 d22, #64
+ add r12, r0, r1
+ lsl r1, r1, #1
+4:
+ vld1.u8 {d2}, [r5, :64]!
+ vld1.u8 {d1}, [r2, :64]!
+ vld1.32 {d0[]}, [r0, :32]
+ subs r4, r4, #2
+ vld1.32 {d0[1]}, [r12, :32]
+ vsub.i8 d3, d22, d2
+ vmull.u8 q8, d1, d2
+ vmlal.u8 q8, d0, d3
+ vrshrn.i16 d20, q8, #6
+ vst1.32 {d20[0]}, [r0, :32], r1
+ vst1.32 {d20[1]}, [r12, :32], r1
+ bgt 4b
+ pop {r4-r5,pc}
+80:
+ vmov.i8 d16, #64
+ add r12, r0, r1
+ lsl r1, r1, #1
+8:
+ vld1.u8 {q1}, [r5, :128]!
+ vld1.u8 {q2}, [r2, :128]!
+ vld1.u8 {d0}, [r0, :64]
+ vsub.i8 d17, d16, d2
+ vld1.u8 {d1}, [r12, :64]
+ subs r4, r4, #2
+ vsub.i8 d18, d16, d3
+ vmull.u8 q3, d2, d4
+ vmlal.u8 q3, d0, d17
+ vmull.u8 q10, d3, d5
+ vmlal.u8 q10, d1, d18
+ vrshrn.i16 d22, q3, #6
+ vrshrn.i16 d23, q10, #6
+ vst1.u8 {d22}, [r0, :64], r1
+ vst1.u8 {d23}, [r12, :64], r1
+ bgt 8b
+ pop {r4-r5,pc}
+160:
+ vmov.i8 q12, #64
+ add r12, r0, r1
+ lsl r1, r1, #1
+16:
+ vld1.u8 {q1, q2}, [r5, :128]!
+ vld1.u8 {q8, q9}, [r2, :128]!
+ vld1.u8 {q0}, [r0, :128]
+ subs r4, r4, #2
+ vsub.i8 q15, q12, q1
+ vld1.u8 {q13}, [r12, :128]
+ vmull.u8 q3, d16, d2
+ vmlal.u8 q3, d0, d30
+ vmull.u8 q14, d17, d3
+ vmlal.u8 q14, d1, d31
+ vsub.i8 q15, q12, q2
+ vrshrn.i16 d20, q3, #6
+ vrshrn.i16 d21, q14, #6
+ vmull.u8 q3, d18, d4
+ vmlal.u8 q3, d26, d30
+ vmull.u8 q14, d19, d5
+ vmlal.u8 q14, d27, d31
+ vrshrn.i16 d22, q3, #6
+ vrshrn.i16 d23, q14, #6
+ vst1.u8 {q10}, [r0, :128], r1
+ vst1.u8 {q11}, [r12, :128], r1
+ bgt 16b
+ pop {r4-r5,pc}
+320:
+ vmov.i8 q10, #64
+32:
+ vld1.u8 {q2, q3}, [r5, :128]!
+ vld1.u8 {q8, q9}, [r2, :128]!
+ vld1.u8 {q0, q1}, [r0, :128]
+ subs r4, r4, #1
+ vsub.i8 q11, q10, q2
+ vmull.u8 q15, d16, d4
+ vmlal.u8 q15, d0, d22
+ vmull.u8 q14, d17, d5
+ vmlal.u8 q14, d1, d23
+ vsub.i8 q11, q10, q3
+ vrshrn.i16 d24, q15, #6
+ vrshrn.i16 d25, q14, #6
+ vmull.u8 q15, d18, d6
+ vmlal.u8 q15, d2, d22
+ vmull.u8 q14, d19, d7
+ vmlal.u8 q14, d3, d23
+ vrshrn.i16 d26, q15, #6
+ vrshrn.i16 d27, q14, #6
+ vst1.u8 {q12, q13}, [r0, :128], r1
+ bgt 32b
+ pop {r4-r5,pc}
+endfunc
+
+function blend_h_8bpc_neon, export=1
+ push {r4-r5,lr}
+ ldr r4, [sp, #12]
+ movrel r5, X(obmc_masks)
+ add r5, r5, r4
+ sub r4, r4, r4, lsr #2
+ clz lr, r3
+ adr r12, L(blend_h_tbl)
+ sub lr, lr, #24
+ ldr lr, [r12, lr, lsl #2]
+ add r12, r12, lr
+ bx r12
+
+ .align 2
+L(blend_h_tbl):
+ .word 1280f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 640f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 320f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 160f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 80f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 40f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 20f - L(blend_h_tbl) + CONFIG_THUMB
+
+20:
+ vmov.i8 d22, #64
+ add r12, r0, r1
+ lsl r1, r1, #1
+2:
+ vld1.16 {d2[], d3[]}, [r5, :16]!
+ vld1.32 {d1[]}, [r2, :32]!
+ subs r4, r4, #2
+ vld1.16 {d0[]}, [r0, :16]
+ vzip.8 d2, d3
+ vsub.i8 d4, d22, d2
+ vld1.16 {d0[1]}, [r12, :16]
+ vmull.u8 q8, d1, d2
+ vmlal.u8 q8, d0, d4
+ vrshrn.i16 d20, q8, #6
+ vst1.16 {d20[0]}, [r0, :16], r1
+ vst1.16 {d20[1]}, [r12, :16], r1
+ bgt 2b
+ pop {r4-r5,pc}
+40:
+ vmov.i8 d22, #64
+ add r12, r0, r1
+ lsl r1, r1, #1
+4:
+ vld2.u8 {d2[], d3[]}, [r5, :16]!
+ vld1.u8 {d1}, [r2, :64]!
+ subs r4, r4, #2
+ vext.u8 d2, d2, d3, #4
+ vld1.32 {d0[]}, [r0, :32]
+ vsub.i8 d6, d22, d2
+ vld1.32 {d0[1]}, [r12, :32]
+ vmull.u8 q8, d1, d2
+ vmlal.u8 q8, d0, d6
+ vrshrn.i16 d20, q8, #6
+ vst1.32 {d20[0]}, [r0, :32], r1
+ vst1.32 {d20[1]}, [r12, :32], r1
+ bgt 4b
+ pop {r4-r5,pc}
+80:
+ vmov.i8 q8, #64
+ add r12, r0, r1
+ lsl r1, r1, #1
+8:
+ vld2.u8 {d2[], d3[]}, [r5, :16]!
+ vld1.u8 {d4, d5}, [r2, :128]!
+ vld1.u8 {d0}, [r0, :64]
+ vsub.i8 q9, q8, q1
+ vld1.u8 {d1}, [r12, :64]
+ subs r4, r4, #2
+ vmull.u8 q3, d2, d4
+ vmlal.u8 q3, d0, d18
+ vmull.u8 q10, d3, d5
+ vmlal.u8 q10, d1, d19
+ vrshrn.i16 d22, q3, #6
+ vrshrn.i16 d23, q10, #6
+ vst1.u8 {d22}, [r0, :64], r1
+ vst1.u8 {d23}, [r12, :64], r1
+ bgt 8b
+ pop {r4-r5,pc}
+160:
+ vmov.i8 q12, #64
+ add r12, r0, r1
+ lsl r1, r1, #1
+16:
+ vld2.u8 {d28[], d29[]}, [r5, :16]!
+ vld1.u8 {d2, d3, d4, d5}, [r2, :128]!
+ vsub.i8 q15, q12, q14
+ vld1.u8 {q0}, [r0, :128]
+ subs r4, r4, #2
+ vld1.u8 {q13}, [r12, :128]
+ vmull.u8 q3, d2, d28
+ vmlal.u8 q3, d0, d30
+ vmull.u8 q8, d3, d28
+ vmlal.u8 q8, d1, d30
+ vrshrn.i16 d18, q3, #6
+ vrshrn.i16 d19, q8, #6
+ vmull.u8 q3, d4, d29
+ vmlal.u8 q3, d26, d31
+ vmull.u8 q8, d5, d29
+ vmlal.u8 q8, d27, d31
+ vrshrn.i16 d20, q3, #6
+ vrshrn.i16 d21, q8, #6
+ vst1.u8 {q9}, [r0, :128], r1
+ vst1.u8 {q10}, [r12, :128], r1
+ bgt 16b
+ pop {r4-r5,pc}
+320:
+640:
+1280:
+ vmov.i8 d20, #64
+ sub r1, r1, r3
+321:
+ vld1.u8 {d6[]}, [r5]!
+ vsub.i8 d7, d20, d6
+ mov r12, r3
+32:
+ vld1.u8 {q8, q9}, [r2, :128]!
+ vld1.u8 {q0, q1}, [r0, :128]
+ vmull.u8 q15, d16, d6
+ vmlal.u8 q15, d0, d7
+ vmull.u8 q14, d17, d6
+ vmlal.u8 q14, d1, d7
+ vrshrn.i16 d0, q15, #6
+ vrshrn.i16 d1, q14, #6
+ vmull.u8 q15, d18, d6
+ vmlal.u8 q15, d2, d7
+ vmull.u8 q14, d19, d6
+ vmlal.u8 q14, d3, d7
+ vrshrn.i16 d2, q15, #6
+ vrshrn.i16 d3, q14, #6
+ subs r12, r12, #32
+ vst1.u8 {q0, q1}, [r0, :128]!
+ bgt 32b
+ add r0, r0, r1
+ subs r4, r4, #1
+ bgt 321b
+ pop {r4-r5,pc}
+endfunc
+
+function blend_v_8bpc_neon, export=1
+ push {r4,lr}
+ ldr r4, [sp, #8]
+ movrel lr, X(obmc_masks)
+ add lr, lr, r3
+ clz r12, r3
+ adr r3, L(blend_v_tbl)
+ sub r12, r12, #26
+ ldr r12, [r3, r12, lsl #2]
+ add r3, r3, r12
+ bx r3
+
+ .align 2
+L(blend_v_tbl):
+ .word 320f - L(blend_v_tbl) + CONFIG_THUMB
+ .word 160f - L(blend_v_tbl) + CONFIG_THUMB
+ .word 80f - L(blend_v_tbl) + CONFIG_THUMB
+ .word 40f - L(blend_v_tbl) + CONFIG_THUMB
+ .word 20f - L(blend_v_tbl) + CONFIG_THUMB
+
+20:
+ vmov.i8 d22, #64
+ vld1.8 {d2[]}, [lr]
+ add r12, r0, r1
+ lsl r1, r1, #1
+ vsub.i8 d3, d22, d2
+2:
+ vld1.16 {d1[0]}, [r2, :16]!
+ vld1.8 {d0[]}, [r0]
+ subs r4, r4, #2
+ vld1.8 {d1[1]}, [r2]
+ vld1.8 {d0[1]}, [r12]
+ vmull.u8 q2, d1, d2
+ vmlal.u8 q2, d0, d3
+ vrshrn.i16 d6, q2, #6
+ add r2, r2, #2
+ vst1.8 {d6[0]}, [r0], r1
+ vst1.8 {d6[1]}, [r12], r1
+ bgt 2b
+ pop {r4,pc}
+40:
+ vmov.i8 d22, #64
+ vld1.32 {d4[]}, [lr, :32]
+ add r12, r0, r1
+ lsl r1, r1, #1
+ vsub.i8 d5, d22, d4
+ sub r1, r1, #2
+4:
+ vld1.u8 {d2}, [r2, :64]!
+ vld1.32 {d0[]}, [r0, :32]
+ vld1.32 {d0[1]}, [r12, :32]
+ subs r4, r4, #2
+ vmull.u8 q3, d2, d4
+ vmlal.u8 q3, d0, d5
+ vrshrn.i16 d20, q3, #6
+ vst1.16 {d20[0]}, [r0, :16]!
+ vst1.16 {d20[2]}, [r12, :16]!
+ vst1.8 {d20[2]}, [r0], r1
+ vst1.8 {d20[6]}, [r12], r1
+ bgt 4b
+ pop {r4,pc}
+80:
+ vmov.i8 d16, #64
+ vld1.u8 {d2}, [lr, :64]
+ add r12, r0, r1
+ lsl r1, r1, #1
+ vsub.i8 d17, d16, d2
+ sub r1, r1, #4
+8:
+ vld1.u8 {d4, d5}, [r2, :128]!
+ vld1.u8 {d0}, [r0, :64]
+ vld1.u8 {d1}, [r12, :64]
+ subs r4, r4, #2
+ vmull.u8 q3, d2, d4
+ vmlal.u8 q3, d0, d17
+ vmull.u8 q10, d2, d5
+ vmlal.u8 q10, d1, d17
+ vrshrn.i16 d22, q3, #6
+ vrshrn.i16 d23, q10, #6
+ vst1.32 {d22[0]}, [r0, :32]!
+ vst1.32 {d23[0]}, [r12, :32]!
+ vst1.16 {d22[2]}, [r0, :16], r1
+ vst1.16 {d23[2]}, [r12, :16], r1
+ bgt 8b
+ pop {r4,pc}
+160:
+ vmov.i8 q12, #64
+ vld1.u8 {q14}, [lr, :128]
+ add r12, r0, r1
+ lsl r1, r1, #1
+ vsub.i8 q11, q12, q14
+ sub r1, r1, #8
+16:
+ vld1.u8 {q1, q2}, [r2, :128]!
+ vld1.u8 {q0}, [r0, :128]
+ subs r4, r4, #2
+ vld1.u8 {q13}, [r12, :128]
+ vmull.u8 q3, d2, d28
+ vmlal.u8 q3, d0, d22
+ vmull.u8 q8, d3, d29
+ vmlal.u8 q8, d1, d23
+ vrshrn.i16 d18, q3, #6
+ vrshrn.i16 d19, q8, #6
+ vmull.u8 q3, d4, d28
+ vmlal.u8 q3, d26, d22
+ vmull.u8 q8, d5, d29
+ vmlal.u8 q8, d27, d23
+ vrshrn.i16 d20, q3, #6
+ vrshrn.i16 d21, q8, #6
+ vst1.u8 {d18}, [r0, :64]!
+ vst1.u8 {d20}, [r12, :64]!
+ vst1.32 {d19[0]}, [r0, :32], r1
+ vst1.32 {d21[0]}, [r12, :32], r1
+ bgt 16b
+ pop {r4,pc}
+320:
+ vmov.i8 q10, #64
+ vld1.u8 {q2, q3}, [lr, :128]
+ vsub.i8 q11, q10, q2
+ vsub.i8 d24, d20, d6
+32:
+ vld1.u8 {q8, q9}, [r2, :128]!
+ vld1.u8 {d0, d1, d2}, [r0, :64]
+ subs r4, r4, #1
+ vmull.u8 q15, d16, d4
+ vmlal.u8 q15, d0, d22
+ vmull.u8 q14, d17, d5
+ vmlal.u8 q14, d1, d23
+ vrshrn.i16 d0, q15, #6
+ vrshrn.i16 d1, q14, #6
+ vmull.u8 q15, d18, d6
+ vmlal.u8 q15, d2, d24
+ vrshrn.i16 d2, q15, #6
+ vst1.u8 {d0, d1, d2}, [r0, :64], r1
+ bgt 32b
+ pop {r4,pc}
+endfunc
+
+
+// This has got the same signature as the put_8tap functions,
+// assumes that the caller has loaded the h argument into r5,
+// and assumes that r8 is set to (clz(w)-24).
+function put_neon
+ adr r9, L(put_tbl)
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ bx r9
+
+ .align 2
+L(put_tbl):
+ .word 1280f - L(put_tbl) + CONFIG_THUMB
+ .word 640f - L(put_tbl) + CONFIG_THUMB
+ .word 32f - L(put_tbl) + CONFIG_THUMB
+ .word 160f - L(put_tbl) + CONFIG_THUMB
+ .word 8f - L(put_tbl) + CONFIG_THUMB
+ .word 4f - L(put_tbl) + CONFIG_THUMB
+ .word 2f - L(put_tbl) + CONFIG_THUMB
+
+2:
+ vld1.16 {d0[]}, [r2], r3
+ vld1.16 {d1[]}, [r2], r3
+ subs r5, r5, #2
+ vst1.16 {d0[0]}, [r0, :16], r1
+ vst1.16 {d1[0]}, [r0, :16], r1
+ bgt 2b
+ pop {r4-r11,pc}
+4:
+ vld1.32 {d0[]}, [r2], r3
+ vld1.32 {d1[]}, [r2], r3
+ subs r5, r5, #2
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d1[0]}, [r0, :32], r1
+ bgt 4b
+ pop {r4-r11,pc}
+8:
+ vld1.8 {d0}, [r2], r3
+ vld1.8 {d1}, [r2], r3
+ subs r5, r5, #2
+ vst1.8 {d0}, [r0, :64], r1
+ vst1.8 {d1}, [r0, :64], r1
+ bgt 8b
+ pop {r4-r11,pc}
+160:
+ add r8, r0, r1
+ lsl r1, r1, #1
+ add r9, r2, r3
+ lsl r3, r3, #1
+16:
+ vld1.8 {q0}, [r2], r3
+ vld1.8 {q1}, [r9], r3
+ subs r5, r5, #2
+ vst1.8 {q0}, [r0, :128], r1
+ vst1.8 {q1}, [r8, :128], r1
+ bgt 16b
+ pop {r4-r11,pc}
+32:
+ vld1.8 {q0, q1}, [r2], r3
+ subs r5, r5, #1
+ vst1.8 {q0, q1}, [r0, :128], r1
+ bgt 32b
+ pop {r4-r11,pc}
+640:
+ sub r1, r1, #32
+ sub r3, r3, #32
+64:
+ vld1.8 {q0, q1}, [r2]!
+ vst1.8 {q0, q1}, [r0, :128]!
+ vld1.8 {q2, q3}, [r2], r3
+ subs r5, r5, #1
+ vst1.8 {q2, q3}, [r0, :128], r1
+ bgt 64b
+ pop {r4-r11,pc}
+1280:
+ sub r1, r1, #96
+ sub r3, r3, #96
+128:
+ vld1.8 {q8, q9}, [r2]!
+ vst1.8 {q8, q9}, [r0, :128]!
+ vld1.8 {q10, q11}, [r2]!
+ vst1.8 {q10, q11}, [r0, :128]!
+ vld1.8 {q12, q13}, [r2]!
+ vst1.8 {q12, q13}, [r0, :128]!
+ vld1.8 {q14, q15}, [r2], r3
+ subs r5, r5, #1
+ vst1.8 {q14, q15}, [r0, :128], r1
+ bgt 128b
+ pop {r4-r11,pc}
+endfunc
+
+
+// This has got the same signature as the put_8tap functions,
+// assumes that the caller has loaded the h argument into r4,
+// and assumes that r8 is set to (clz(w)-24), and r7 to w*2.
+function prep_neon
+ adr r9, L(prep_tbl)
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ bx r9
+
+ .align 2
+L(prep_tbl):
+ .word 1280f - L(prep_tbl) + CONFIG_THUMB
+ .word 640f - L(prep_tbl) + CONFIG_THUMB
+ .word 320f - L(prep_tbl) + CONFIG_THUMB
+ .word 160f - L(prep_tbl) + CONFIG_THUMB
+ .word 8f - L(prep_tbl) + CONFIG_THUMB
+ .word 4f - L(prep_tbl) + CONFIG_THUMB
+
+4:
+ vld1.32 {d0[]}, [r1], r2
+ vld1.32 {d2[]}, [r1], r2
+ subs r4, r4, #2
+ vshll.u8 q0, d0, #4
+ vshll.u8 q1, d2, #4
+ vst1.16 {d1, d2}, [r0, :64]!
+ bgt 4b
+ pop {r4-r11,pc}
+8:
+ vld1.8 {d0}, [r1], r2
+ vld1.8 {d2}, [r1], r2
+ subs r4, r4, #2
+ vshll.u8 q0, d0, #4
+ vshll.u8 q1, d2, #4
+ vst1.16 {q0, q1}, [r0, :128]!
+ bgt 8b
+ pop {r4-r11,pc}
+160:
+ add r9, r1, r2
+ lsl r2, r2, #1
+ add r8, r0, r7
+ lsl r7, r7, #1
+16:
+ vld1.8 {q2}, [r1], r2
+ vld1.8 {q3}, [r9], r2
+ subs r4, r4, #2
+ vshll.u8 q0, d4, #4
+ vshll.u8 q1, d5, #4
+ vshll.u8 q2, d6, #4
+ vshll.u8 q3, d7, #4
+ vst1.16 {q0, q1}, [r0, :128], r7
+ vst1.16 {q2, q3}, [r8, :128], r7
+ bgt 16b
+ pop {r4-r11,pc}
+320:
+ add r8, r0, r3
+32:
+ vld1.8 {q0, q1}, [r1], r2
+ subs r4, r4, #2
+ vshll.u8 q8, d0, #4
+ vshll.u8 q9, d1, #4
+ vld1.8 {q2, q3}, [r1], r2
+ vshll.u8 q10, d2, #4
+ vshll.u8 q11, d3, #4
+ vshll.u8 q12, d4, #4
+ vst1.16 {q8, q9}, [r0, :128], r7
+ vshll.u8 q13, d5, #4
+ vst1.16 {q10, q11}, [r8, :128], r7
+ vshll.u8 q14, d6, #4
+ vst1.16 {q12, q13}, [r0, :128], r7
+ vshll.u8 q15, d7, #4
+ vst1.16 {q14, q15}, [r8, :128], r7
+ bgt 32b
+ pop {r4-r11,pc}
+640:
+ sub r2, r2, #32
+ add r8, r0, #32
+ mov r6, #64
+64:
+ vld1.8 {q0, q1}, [r1]!
+ subs r4, r4, #1
+ vshll.u8 q8, d0, #4
+ vshll.u8 q9, d1, #4
+ vld1.8 {q2, q3}, [r1], r2
+ vshll.u8 q10, d2, #4
+ vshll.u8 q11, d3, #4
+ vshll.u8 q12, d4, #4
+ vst1.16 {q8, q9}, [r0, :128], r6
+ vshll.u8 q13, d5, #4
+ vshll.u8 q14, d6, #4
+ vst1.16 {q10, q11}, [r8, :128], r6
+ vshll.u8 q15, d7, #4
+ vst1.16 {q12, q13}, [r0, :128], r6
+ vst1.16 {q14, q15}, [r8, :128], r6
+ bgt 64b
+ pop {r4-r11,pc}
+1280:
+ sub r2, r2, #96
+ add r8, r0, #32
+ mov r6, #64
+128:
+ vld1.8 {q0, q1}, [r1]!
+ vld1.8 {q2, q3}, [r1]!
+ vshll.u8 q10, d0, #4
+ vshll.u8 q11, d1, #4
+ vshll.u8 q12, d2, #4
+ vshll.u8 q13, d3, #4
+ vshll.u8 q14, d4, #4
+ vshll.u8 q15, d5, #4
+ vld1.8 {q8, q9}, [r1]!
+ vst1.16 {q10, q11}, [r0, :128], r6
+ vst1.16 {q12, q13}, [r8, :128], r6
+ vshll.u8 q0, d6, #4
+ vshll.u8 q1, d7, #4
+ vshll.u8 q2, d16, #4
+ vshll.u8 q3, d17, #4
+ vshll.u8 q8, d18, #4
+ vshll.u8 q9, d19, #4
+ vld1.8 {q10, q11}, [r1], r2
+ vst1.16 {q14, q15}, [r0, :128], r6
+ vst1.16 {q0, q1}, [r8, :128], r6
+ vshll.u8 q12, d20, #4
+ vshll.u8 q13, d21, #4
+ vshll.u8 q14, d22, #4
+ vshll.u8 q15, d23, #4
+ subs r4, r4, #1
+ vst1.16 {q2, q3}, [r0, :128], r6
+ vst1.16 {q8, q9}, [r8, :128], r6
+ vst1.16 {q12, q13}, [r0, :128], r6
+ vst1.16 {q14, q15}, [r8, :128], r6
+ bgt 128b
+ pop {r4-r11,pc}
+endfunc
+
+
+.macro load_slice s0, s1, strd, wd, d0, d1, d2, d3, d4, d5, d6
+ vld1.\wd {\d0[]}, [\s0], \strd
+ vld1.\wd {\d1[]}, [\s1], \strd
+.ifnb \d2
+ vld1.\wd {\d2[]}, [\s0], \strd
+ vld1.\wd {\d3[]}, [\s1], \strd
+.endif
+.ifnb \d4
+ vld1.\wd {\d4[]}, [\s0], \strd
+.endif
+.ifnb \d5
+ vld1.\wd {\d5[]}, [\s1], \strd
+.endif
+.ifnb \d6
+ vld1.\wd {\d6[]}, [\s0], \strd
+.endif
+.endm
+.macro load_reg s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
+ vld1.8 {\d0}, [\s0], \strd
+ vld1.8 {\d1}, [\s1], \strd
+.ifnb \d2
+ vld1.8 {\d2}, [\s0], \strd
+ vld1.8 {\d3}, [\s1], \strd
+.endif
+.ifnb \d4
+ vld1.8 {\d4}, [\s0], \strd
+.endif
+.ifnb \d5
+ vld1.8 {\d5}, [\s1], \strd
+.endif
+.ifnb \d6
+ vld1.8 {\d6}, [\s0], \strd
+.endif
+.endm
+.macro load_16 s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
+ load_slice \s0, \s1, \strd, 16, \d0, \d1, \d2, \d3, \d4, \d5, \d6
+.endm
+.macro load_32 s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
+ load_slice \s0, \s1, \strd, 32, \d0, \d1, \d2, \d3, \d4, \d5, \d6
+.endm
+.macro interleave_1_16 r0, r1, r2, r3, r4
+ vext.8 \r0, \r0, \r1, #6
+ vext.8 \r1, \r1, \r2, #6
+.ifnb \r3
+ vext.8 \r2, \r2, \r3, #6
+ vext.8 \r3, \r3, \r4, #6
+.endif
+.endm
+.macro interleave_1_32 r0, r1, r2, r3, r4
+ vext.8 \r0, \r0, \r1, #4
+ vext.8 \r1, \r1, \r2, #4
+.ifnb \r3
+ vext.8 \r2, \r2, \r3, #4
+ vext.8 \r3, \r3, \r4, #4
+.endif
+.endm
+.macro vmovl_u8 q0, d0, q1, d1, q2, d2, q3, d3, q4, d4, q5, d5, q6, d6
+ vmovl.u8 \q0, \d0
+ vmovl.u8 \q1, \d1
+.ifnb \q2
+ vmovl.u8 \q2, \d2
+ vmovl.u8 \q3, \d3
+.endif
+.ifnb \q4
+ vmovl.u8 \q4, \d4
+.endif
+.ifnb \q5
+ vmovl.u8 \q5, \d5
+.endif
+.ifnb \q6
+ vmovl.u8 \q6, \d6
+.endif
+.endm
+.macro mul_mla_4 d, s0, s1, s2, s3
+ vmul.s16 \d, \s0, d0[0]
+ vmla.s16 \d, \s1, d0[1]
+ vmla.s16 \d, \s2, d0[2]
+ vmla.s16 \d, \s3, d0[3]
+.endm
+.macro mul_mla_8_0 d0, s0, s1, s2, s3, s4, s5, s6, s7
+ vmul.s16 \d0, \s0, d0[0]
+ vmla.s16 \d0, \s1, d0[1]
+ vmla.s16 \d0, \s2, d0[2]
+ vmla.s16 \d0, \s3, d0[3]
+ vmla.s16 \d0, \s4, d1[0]
+ vmla.s16 \d0, \s5, d1[1]
+ vmla.s16 \d0, \s6, d1[2]
+ vmla.s16 \d0, \s7, d1[3]
+.endm
+.macro mul_mla_8_1 d0, d1, s0, s1, s2, s3, s4, s5, s6, s7, s8
+ vmul.s16 \d0, \s0, d0[0]
+ vmla.s16 \d0, \s1, d0[1]
+ vmla.s16 \d0, \s2, d0[2]
+ vmla.s16 \d0, \s3, d0[3]
+ vmla.s16 \d0, \s4, d1[0]
+ vmla.s16 \d0, \s5, d1[1]
+ vmla.s16 \d0, \s6, d1[2]
+ vmla.s16 \d0, \s7, d1[3]
+ vmul.s16 \d1, \s1, d0[0]
+ vmla.s16 \d1, \s2, d0[1]
+ vmla.s16 \d1, \s3, d0[2]
+ vmla.s16 \d1, \s4, d0[3]
+ vmla.s16 \d1, \s5, d1[0]
+ vmla.s16 \d1, \s6, d1[1]
+ vmla.s16 \d1, \s7, d1[2]
+ vmla.s16 \d1, \s8, d1[3]
+.endm
+.macro mul_mla_8_2 d0, d1, s0, s1, s2, s3, s4, s5, s6, s7, s8, s9
+ vmul.s16 \d0, \s0, d0[0]
+ vmla.s16 \d0, \s1, d0[1]
+ vmla.s16 \d0, \s2, d0[2]
+ vmla.s16 \d0, \s3, d0[3]
+ vmla.s16 \d0, \s4, d1[0]
+ vmla.s16 \d0, \s5, d1[1]
+ vmla.s16 \d0, \s6, d1[2]
+ vmla.s16 \d0, \s7, d1[3]
+ vmul.s16 \d1, \s2, d0[0]
+ vmla.s16 \d1, \s3, d0[1]
+ vmla.s16 \d1, \s4, d0[2]
+ vmla.s16 \d1, \s5, d0[3]
+ vmla.s16 \d1, \s6, d1[0]
+ vmla.s16 \d1, \s7, d1[1]
+ vmla.s16 \d1, \s8, d1[2]
+ vmla.s16 \d1, \s9, d1[3]
+.endm
+.macro vqrshrun_s16 shift, q0, d0, q1, d1, q2, d2, q3, d3
+ vqrshrun.s16 \d0, \q0, #\shift
+.ifnb \q1
+ vqrshrun.s16 \d1, \q1, #\shift
+.endif
+.ifnb \q2
+ vqrshrun.s16 \d2, \q2, #\shift
+ vqrshrun.s16 \d3, \q3, #\shift
+.endif
+.endm
+.macro vrshr_s16 shift, r0, r1, r2, r3
+ vrshr.s16 \r0, \r0, #\shift
+.ifnb \r1
+ vrshr.s16 \r1, \r1, #\shift
+.endif
+.ifnb \r2
+ vrshr.s16 \r2, \r2, #\shift
+ vrshr.s16 \r3, \r3, #\shift
+.endif
+.endm
+.macro st_16 strd, reg, lanes
+ vst1.16 {\reg[0]}, [r0, :16], \strd
+ vst1.16 {\reg[1]}, [r8, :16], \strd
+.if \lanes > 2
+ vst1.16 {\reg[2]}, [r0, :16], \strd
+ vst1.16 {\reg[3]}, [r8, :16], \strd
+.endif
+.endm
+.macro st_32 strd, r0, r1
+ vst1.32 {\r0[0]}, [r0, :32], \strd
+ vst1.32 {\r0[1]}, [r8, :32], \strd
+.ifnb \r1
+ vst1.32 {\r1[0]}, [r0, :32], \strd
+ vst1.32 {\r1[1]}, [r8, :32], \strd
+.endif
+.endm
+.macro st_reg strd, align, r0, r1, r2, r3, r4, r5, r6, r7
+ vst1.8 {\r0}, [r0, \align], \strd
+ vst1.8 {\r1}, [r8, \align], \strd
+.ifnb \r2
+ vst1.8 {\r2}, [r0, \align], \strd
+ vst1.8 {\r3}, [r8, \align], \strd
+.endif
+.ifnb \r4
+ vst1.8 {\r4}, [r0, \align], \strd
+ vst1.8 {\r5}, [r8, \align], \strd
+ vst1.8 {\r6}, [r0, \align], \strd
+ vst1.8 {\r7}, [r8, \align], \strd
+.endif
+.endm
+.macro shift_store_4 type, strd, q0, d0, d1, q1, d2, d3
+.ifc \type, put
+ vqrshrun_s16 6, \q0, \d0, \q1, \d2
+ st_32 \strd, \d0, \d2
+.else
+ vrshr_s16 2, \q0, \q1
+ st_reg \strd, :64, \d0, \d1, \d2, \d3
+.endif
+.endm
+.macro shift_store_8 type, strd, q0, d0, q1, d1, q2, d2, q3, d3
+.ifc \type, put
+ vqrshrun_s16 6, \q0, \d0, \q1, \d1, \q2, \d2, \q3, \d3
+ st_reg \strd, :64, \d0, \d1, \d2, \d3
+.else
+ vrshr_s16 2, \q0, \q1, \q2, \q3
+ st_reg \strd, :128,\q0, \q1, \q2, \q3
+.endif
+.endm
+.macro shift_store_16 type, strd, q0, d0, d1, q1, q2, d4, d5, q3
+.ifc \type, put
+ vqrshrun.s16 \d0, \q0, #6
+ vqrshrun.s16 \d1, \q1, #6
+ vqrshrun.s16 \d4, \q2, #6
+ vqrshrun.s16 \d5, \q3, #6
+ st_reg \strd, :128, \q0, \q2
+.else
+ vrshr_s16 2, \q0, \q1, \q2, \q3
+ vst1.16 {\q0, \q1}, [r0, :128], \strd
+ vst1.16 {\q2, \q3}, [r8, :128], \strd
+.endif
+.endm
+
+.macro make_8tap_fn op, type, type_h, type_v
+function \op\()_8tap_\type\()_8bpc_neon, export=1
+ push {r4-r11,lr}
+ movw r8, \type_h
+ movw r9, \type_v
+ b \op\()_8tap_neon
+endfunc
+.endm
+
+// No spaces in these expressions, due to gas-preprocessor.
+#define REGULAR ((0*15<<7)|3*15)
+#define SMOOTH ((1*15<<7)|4*15)
+#define SHARP ((2*15<<7)|3*15)
+
+.macro filter_fn type, dst, d_strd, src, s_strd, w, h, mx, my, ds2, sr2, shift_hv
+make_8tap_fn \type, regular, REGULAR, REGULAR
+make_8tap_fn \type, regular_smooth, REGULAR, SMOOTH
+make_8tap_fn \type, regular_sharp, REGULAR, SHARP
+make_8tap_fn \type, smooth, SMOOTH, SMOOTH
+make_8tap_fn \type, smooth_regular, SMOOTH, REGULAR
+make_8tap_fn \type, smooth_sharp, SMOOTH, SHARP
+make_8tap_fn \type, sharp, SHARP, SHARP
+make_8tap_fn \type, sharp_regular, SHARP, REGULAR
+make_8tap_fn \type, sharp_smooth, SHARP, SMOOTH
+
+function \type\()_8tap_neon
+ ldrd r4, r5, [sp, #36]
+ ldrd r6, r7, [sp, #44]
+ movw r10, #0x4081 // (1 << 14) | (1 << 7) | (1 << 0)
+ mul \mx, \mx, r10
+ mul \my, \my, r10
+ add \mx, \mx, r8 // mx, 8tap_h, 4tap_h
+ add \my, \my, r9 // my, 8tap_v, 4tap_v
+.ifc \type, prep
+ lsl \d_strd, \w, #1
+.endif
+
+ clz r8, \w
+ tst \mx, #(0x7f << 14)
+ sub r8, r8, #24
+ movrel r10, X(mc_subpel_filters), -8
+ bne L(\type\()_8tap_h)
+ tst \my, #(0x7f << 14)
+ bne L(\type\()_8tap_v)
+ b \type\()_neon
+
+L(\type\()_8tap_h):
+ cmp \w, #4
+ ubfx r9, \mx, #7, #7
+ and \mx, \mx, #0x7f
+ it gt
+ movgt \mx, r9
+ tst \my, #(0x7f << 14)
+ add \mx, r10, \mx, lsl #3
+ bne L(\type\()_8tap_hv)
+
+ adr r9, L(\type\()_8tap_h_tbl)
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ bx r9
+
+ .align 2
+L(\type\()_8tap_h_tbl):
+ .word 1280f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+
+20: // 2xN h
+.ifc \type, put
+ add \mx, \mx, #2
+ vld1.32 {d0[]}, [\mx]
+ sub \src, \src, #1
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+2:
+ vld1.8 {d4}, [\src], \s_strd
+ vld1.8 {d6}, [\sr2], \s_strd
+ vmovl.u8 q2, d4
+ vmovl.u8 q3, d6
+ vext.8 d5, d4, d5, #2
+ vext.8 d7, d6, d7, #2
+ subs \h, \h, #2
+ vtrn.32 d4, d6
+ vtrn.32 d5, d7
+ vmul.s16 d2, d4, d0[0]
+ vmla.s16 d2, d5, d0[1]
+ vmla.s16 d2, d6, d0[2]
+ vmla.s16 d2, d7, d0[3]
+ vrshr.s16 d2, d2, #2
+ vqrshrun.s16 d2, q1, #4
+ vst1.16 {d2[0]}, [\dst, :16], \d_strd
+ vst1.16 {d2[1]}, [\ds2, :16], \d_strd
+ bgt 2b
+ pop {r4-r11,pc}
+.endif
+
+40: // 4xN h
+ add \mx, \mx, #2
+ vld1.32 {d0[]}, [\mx]
+ sub \src, \src, #1
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+4:
+ vld1.8 {d16}, [\src], \s_strd
+ vld1.8 {d24}, [\sr2], \s_strd
+ vmovl.u8 q8, d16
+ vmovl.u8 q12, d24
+ vext.8 d18, d16, d17, #2
+ vext.8 d20, d16, d17, #4
+ vext.8 d22, d16, d17, #6
+ vext.8 d26, d24, d25, #2
+ vext.8 d28, d24, d25, #4
+ vext.8 d30, d24, d25, #6
+ subs \h, \h, #2
+ vmul.s16 d4, d16, d0[0]
+ vmla.s16 d4, d18, d0[1]
+ vmla.s16 d4, d20, d0[2]
+ vmla.s16 d4, d22, d0[3]
+ vmul.s16 d5, d24, d0[0]
+ vmla.s16 d5, d26, d0[1]
+ vmla.s16 d5, d28, d0[2]
+ vmla.s16 d5, d30, d0[3]
+ vrshr.s16 q2, q2, #2
+.ifc \type, put
+ vqrshrun.s16 d4, q2, #4
+ vst1.32 {d4[0]}, [\dst, :32], \d_strd
+ vst1.32 {d4[1]}, [\ds2, :32], \d_strd
+.else
+ vst1.16 {d4}, [\dst, :64], \d_strd
+ vst1.16 {d5}, [\ds2, :64], \d_strd
+.endif
+ bgt 4b
+ pop {r4-r11,pc}
+
+80: // 8xN h
+ vld1.8 {d0}, [\mx, :64]
+ sub \src, \src, #3
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+8:
+ vld1.8 {q8}, [\src], \s_strd
+ vld1.8 {q12}, [\sr2], \s_strd
+ vmovl.u8 q9, d17
+ vmovl.u8 q8, d16
+ vmovl.u8 q13, d25
+ vmovl.u8 q12, d24
+
+ vmul.s16 q10, q8, d0[0]
+ vmul.s16 q14, q12, d0[0]
+.irpc i, 1234567
+ vext.8 q11, q8, q9, #(2*\i)
+ vext.8 q15, q12, q13, #(2*\i)
+.if \i < 4
+ vmla.s16 q10, q11, d0[\i]
+ vmla.s16 q14, q15, d0[\i]
+.else
+ vmla.s16 q10, q11, d1[\i-4]
+ vmla.s16 q14, q15, d1[\i-4]
+.endif
+.endr
+ subs \h, \h, #2
+ vrshr.s16 q10, q10, #2
+ vrshr.s16 q14, q14, #2
+.ifc \type, put
+ vqrshrun.s16 d20, q10, #4
+ vqrshrun.s16 d28, q14, #4
+ vst1.8 {d20}, [\dst, :64], \d_strd
+ vst1.8 {d28}, [\ds2, :64], \d_strd
+.else
+ vst1.16 {q10}, [\dst, :128], \d_strd
+ vst1.16 {q14}, [\ds2, :128], \d_strd
+.endif
+ bgt 8b
+ pop {r4-r11,pc}
+
+160:
+320:
+640:
+1280: // 16xN, 32xN, ... h
+ // This could be done without touching q4-q6, by using only
+ // one temporary for vext in the loop. That's slower on A7 and A53,
+ // (but surprisingly, marginally faster on A8 and A73).
+ vpush {q4-q6}
+ vld1.8 {d0}, [\mx, :64]
+ sub \src, \src, #3
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+
+ sub \s_strd, \s_strd, \w
+ sub \s_strd, \s_strd, #8
+.ifc \type, put
+ lsl \d_strd, \d_strd, #1
+ sub \d_strd, \d_strd, \w
+.endif
+161:
+ vld1.8 {d16, d17, d18}, [\src]!
+ vld1.8 {d24, d25, d26}, [\sr2]!
+ mov \mx, \w
+ vmovl.u8 q10, d18
+ vmovl.u8 q9, d17
+ vmovl.u8 q8, d16
+ vmovl.u8 q14, d26
+ vmovl.u8 q13, d25
+ vmovl.u8 q12, d24
+
+16:
+ vmul.s16 q1, q8, d0[0]
+ vmul.s16 q2, q9, d0[0]
+ vmul.s16 q3, q12, d0[0]
+ vmul.s16 q4, q13, d0[0]
+.irpc i, 1234567
+ vext.8 q5, q8, q9, #(2*\i)
+ vext.8 q6, q9, q10, #(2*\i)
+ vext.8 q11, q12, q13, #(2*\i)
+ vext.8 q15, q13, q14, #(2*\i)
+.if \i < 4
+ vmla.s16 q1, q5, d0[\i]
+ vmla.s16 q2, q6, d0[\i]
+ vmla.s16 q3, q11, d0[\i]
+ vmla.s16 q4, q15, d0[\i]
+.else
+ vmla.s16 q1, q5, d1[\i-4]
+ vmla.s16 q2, q6, d1[\i-4]
+ vmla.s16 q3, q11, d1[\i-4]
+ vmla.s16 q4, q15, d1[\i-4]
+.endif
+.endr
+ vrshr.s16 q1, q1, #2
+ vrshr.s16 q2, q2, #2
+ vrshr.s16 q3, q3, #2
+ vrshr.s16 q4, q4, #2
+ subs \mx, \mx, #16
+.ifc \type, put
+ vqrshrun.s16 d2, q1, #4
+ vqrshrun.s16 d3, q2, #4
+ vqrshrun.s16 d4, q3, #4
+ vqrshrun.s16 d5, q4, #4
+ vst1.8 {q1}, [\dst, :128]!
+ vst1.8 {q2}, [\ds2, :128]!
+.else
+ vst1.16 {q1, q2}, [\dst, :128]!
+ vst1.16 {q3, q4}, [\ds2, :128]!
+.endif
+ ble 9f
+
+ vmov q8, q10
+ vmov q12, q14
+ vld1.8 {d18, d19}, [\src]!
+ vld1.8 {d26, d27}, [\sr2]!
+ vmovl.u8 q10, d19
+ vmovl.u8 q9, d18
+ vmovl.u8 q14, d27
+ vmovl.u8 q13, d26
+ b 16b
+
+9:
+ add \dst, \dst, \d_strd
+ add \ds2, \ds2, \d_strd
+ add \src, \src, \s_strd
+ add \sr2, \sr2, \s_strd
+
+ subs \h, \h, #2
+ bgt 161b
+ vpop {q4-q6}
+ pop {r4-r11,pc}
+
+L(\type\()_8tap_v):
+ cmp \h, #4
+ ubfx r9, \my, #7, #7
+ and \my, \my, #0x7f
+ it gt
+ movgt \my, r9
+ add \my, r10, \my, lsl #3
+
+ adr r9, L(\type\()_8tap_v_tbl)
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ bx r9
+
+ .align 2
+L(\type\()_8tap_v_tbl):
+ .word 1280f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+
+20: // 2xN v
+.ifc \type, put
+ bgt 28f
+
+ cmp \h, #2
+ add \my, \my, #2
+ vld1.32 {d0[]}, [\my]
+ sub \src, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ // 2x2 v
+ load_16 \src, \sr2, \s_strd, d1, d2, d3, d4, d5
+ interleave_1_16 d1, d2, d3, d4, d5
+ bgt 24f
+ vmovl_u8 q8, d1, q9, d2, q10, d3, q11, d4
+ mul_mla_4 d6, d16, d18, d20, d22
+ vqrshrun_s16 6, q3, d6
+ st_16 \d_strd, d6, 2
+ pop {r4-r11,pc}
+
+24: // 2x4 v
+ load_16 \sr2, \src, \s_strd, d6, d7
+ interleave_1_16 d5, d6, d7
+ vmovl_u8 q8, d1, q9, d2, q10, d3, q11, d4, q12, d5, q13, d6
+ vmov d17, d20
+ vmov d19, d22
+ vmov d21, d24
+ vmov d23, d26
+ mul_mla_4 q3, q8, q9, q10, q11
+ vqrshrun_s16 6, q3, d6
+ st_16 \d_strd, d6, 4
+ pop {r4-r11,pc}
+
+28: // 2x6, 2x8, 2x12, 2x16 v
+ vpush {q4-q7}
+ vld1.8 {d0}, [\my, :64]
+ sub \sr2, \src, \s_strd, lsl #1
+ add \ds2, \dst, \d_strd
+ sub \src, \sr2, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+
+ load_16 \src, \sr2, \s_strd, d2, d4, d6, d8, d10, d12, d14
+ interleave_1_16 d2, d4, d6, d8, d10
+ interleave_1_16 d10, d12, d14
+ vmovl_u8 q1, d2, q2, d4, q3, d6, q4, d8, q5, d10, q6, d12
+ vmov d3, d6
+ vmov d5, d8
+ vmov d7, d10
+ vmov d9, d12
+216:
+ subs \h, \h, #4
+ load_16 \sr2, \src, \s_strd, d16, d18, d20, d22
+ interleave_1_16 d14, d16, d18, d20, d22
+ vmovl_u8 q7, d14, q8, d16, q9, d18, q10, d20
+ vmov d11, d14
+ vmov d13, d16
+ vmov d15, d18
+ vmov d17, d20
+ mul_mla_8_0 q1, q1, q2, q3, q4, q5, q6, q7, q8
+ vqrshrun_s16 6, q1, d2
+ st_16 \d_strd, d2, 4
+ ble 0f
+ cmp \h, #2
+ vmov q1, q5
+ vmov q2, q6
+ vmov q3, q7
+ vmov q4, q8
+ vmov q5, q9
+ vmov q6, q10
+ vmov d14, d22
+ beq 26f
+ b 216b
+26:
+ load_16 \sr2, \src, \s_strd, d16, d18
+ interleave_1_16 d14, d16, d18
+ vmovl_u8 q7, d14, q8, d16
+ vmov d11, d14
+ vmov d13, d16
+ mul_mla_8_0 d2, d2, d4, d6, d8, d10, d12, d14, d16
+ vqrshrun_s16 6, q1, d2
+ st_16 \d_strd, d2, 2
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+.endif
+
+40:
+ bgt 480f
+
+ // 4x2, 4x4 v
+ cmp \h, #2
+ add \my, \my, #2
+ vld1.32 {d0[]}, [\my]
+ sub \src, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ load_32 \src, \sr2, \s_strd, d1, d2, d3, d4, d5
+ interleave_1_32 d1, d2, d3, d4, d5
+ vmovl_u8 q8, d1, q9, d2, q10, d3, q11, d4
+ mul_mla_4 q3, q8, q9, q10, q11
+ shift_store_4 \type, \d_strd, q3, d6, d7
+ ble 0f
+ load_32 \sr2, \src, \s_strd, d6, d7
+ interleave_1_32 d5, d6, d7
+ vmovl_u8 q12, d5, q13, d6
+ mul_mla_4 q3, q10, q11, q12, q13
+ shift_store_4 \type, \d_strd, q3, d6, d7
+0:
+ pop {r4-r11,pc}
+
+480: // 4x6, 4x8, 4x12, 4x16 v
+ vpush {q4}
+ vld1.8 {d0}, [\my, :64]
+ sub \sr2, \src, \s_strd, lsl #1
+ add \ds2, \dst, \d_strd
+ sub \src, \sr2, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ load_32 \src, \sr2, \s_strd, d2, d4, d6, d8, d16, d18, d20
+ interleave_1_32 d2, d4, d6
+ interleave_1_32 d6, d8, d16, d18, d20
+ vmovl_u8 q1, d2, q2, d4, q3, d6, q4, d8, q8, d16, q9, d18
+
+48:
+ subs \h, \h, #4
+ load_32 \sr2, \src, \s_strd, d22, d24, d26, d28
+ interleave_1_32 d20, d22, d24, d26, d28
+ vmovl_u8 q10, d20, q11, d22, q12, d24, q13, d26
+ mul_mla_8_2 q1, q2, q1, q2, q3, q4, q8, q9, q10, q11, q12, q13
+ shift_store_4 \type, \d_strd, q1, d2, d3, q2, d4, d5
+ ble 0f
+ load_32 \sr2, \src, \s_strd, d30, d2
+ subs \h, \h, #2
+ interleave_1_32 d28, d30, d2
+ vmovl_u8 q14, d28, q15, d30
+ mul_mla_8_0 q8, q8, q9, q10, q11, q12, q13, q14, q15
+ shift_store_4 \type, \d_strd, q8, d16, d17
+ ble 0f
+ load_32 \sr2, \src, \s_strd, d4, d6
+ subs \h, \h, #2
+ interleave_1_32 d2, d4, d6
+ vmovl_u8 q1, d2, q2, d4
+ mul_mla_8_0 q9, q10, q11, q12, q13, q14, q15, q1, q2
+ shift_store_4 \type, \d_strd, q9, d18, d19
+ ble 0f
+ subs \h, \h, #4
+ load_32 \sr2, \src, \s_strd, d8, d16, d18, d20
+ interleave_1_32 d6, d8, d16, d18, d20
+ vmovl_u8 q3, d6, q4, d8, q8, d16, q9, d18
+ mul_mla_8_2 q12, q13, q12, q13, q14, q15, q1, q2, q3, q4, q8, q9
+ shift_store_4 \type, \d_strd, q12, d24, d25, q13, d26, d27
+ bgt 48b
+0:
+ vpop {q4}
+ pop {r4-r11,pc}
+
+80:
+ bgt 880f
+
+ // 8x2, 8x4 v
+ cmp \h, #2
+ add \my, \my, #2
+ vld1.32 {d0[]}, [\my]
+ sub \src, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ load_reg \src, \sr2, \s_strd, d1, d2, d3, d4, d5
+ vmovl_u8 q8, d1, q9, d2, q10, d3, q11, d4, q12, d5
+ mul_mla_4 q1, q8, q9, q10, q11
+ mul_mla_4 q2, q9, q10, q11, q12
+ shift_store_8 \type, \d_strd, q1, d2, q2, d4
+ ble 0f
+ load_reg \sr2, \src, \s_strd, d6, d7
+ vmovl_u8 q13, d6, q14, d7
+ mul_mla_4 q1, q10, q11, q12, q13
+ mul_mla_4 q2, q11, q12, q13, q14
+ shift_store_8 \type, \d_strd, q1, d2, q2, d4
+0:
+ pop {r4-r11,pc}
+
+880: // 8x6, 8x8, 8x16, 8x32 v
+1680: // 16x8, 16x16, ...
+320: // 32x8, 32x16, ...
+640:
+1280:
+ vpush {q4}
+ vld1.8 {d0}, [\my, :64]
+ sub \src, \src, \s_strd
+ sub \src, \src, \s_strd, lsl #1
+ vmovl.s8 q0, d0
+ mov \my, \h
+168:
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ load_reg \src, \sr2, \s_strd, d2, d4, d6, d8, d16, d18, d20
+ vmovl_u8 q1, d2, q2, d4, q3, d6, q4, d8, q8, d16, q9, d18, q10, d20
+
+88:
+ subs \h, \h, #2
+ load_reg \sr2, \src, \s_strd, d22, d24
+ vmovl_u8 q11, d22, q12, d24
+ mul_mla_8_1 q1, q2, q1, q2, q3, q4, q8, q9, q10, q11, q12
+ shift_store_8 \type, \d_strd, q1, d2, q2, d4
+ ble 9f
+ subs \h, \h, #2
+ load_reg \sr2, \src, \s_strd, d26, d28
+ vmovl_u8 q13, d26, q14, d28
+ mul_mla_8_1 q3, q4, q3, q4, q8, q9, q10, q11, q12, q13, q14
+ shift_store_8 \type, \d_strd, q3, d6, q4, d8
+ ble 9f
+ subs \h, \h, #2
+ load_reg \sr2, \src, \s_strd, d30, d2
+ vmovl_u8 q15, d30, q1, d2
+ mul_mla_8_1 q8, q9, q8, q9, q10, q11, q12, q13, q14, q15, q1
+ shift_store_8 \type, \d_strd, q8, d16, q9, d18
+ ble 9f
+ subs \h, \h, #2
+ load_reg \sr2, \src, \s_strd, d4, d6
+ vmovl_u8 q2, d4, q3, d6
+ mul_mla_8_1 q10, q11, q10, q11, q12, q13, q14, q15, q1, q2, q3
+ shift_store_8 \type, \d_strd, q10, d20, q11, d22
+ ble 9f
+ subs \h, \h, #4
+ load_reg \sr2, \src, \s_strd, d8, d16, d18, d20
+ vmovl_u8 q4, d8, q8, d16, q9, d18, q10, d20
+ mul_mla_8_1 q12, q13, q12, q13, q14, q15, q1, q2, q3, q4, q8
+ mul_mla_8_1 q14, q15, q14, q15, q1, q2, q3, q4, q8, q9, q10
+ shift_store_8 \type, \d_strd, q12, d24, q13, d26, q14, d28, q15, d30
+ bgt 88b
+9:
+ subs \w, \w, #8
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #3
+ mov \h, \my
+ add \src, \src, #8
+.ifc \type, put
+ add \dst, \dst, #8
+.else
+ add \dst, \dst, #16
+.endif
+ b 168b
+0:
+ vpop {q4}
+ pop {r4-r11,pc}
+
+160:
+ bgt 1680b
+
+ // 16x2, 16x4 v
+ add \my, \my, #2
+ vld1.32 {d0[]}, [\my]
+ sub \src, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ cmp \h, #2
+ load_reg \src, \sr2, \s_strd, q11, q12, q13, q14, q15
+ vmovl.u8 q1, d22
+ vmovl.u8 q2, d24
+ vmovl.u8 q3, d26
+ vmovl.u8 q8, d28
+ vmovl.u8 q9, d30
+ vmovl.u8 q11, d23
+ vmovl.u8 q12, d25
+ vmovl.u8 q13, d27
+ vmovl.u8 q14, d29
+ vmovl.u8 q15, d31
+ mul_mla_4 q1, q1, q2, q3, q8
+ mul_mla_4 q10, q2, q3, q8, q9
+ mul_mla_4 q2, q11, q12, q13, q14
+ mul_mla_4 q11, q12, q13, q14, q15
+ shift_store_16 \type, \d_strd, q1, d2, d3, q2, q10, d20, d21, q11
+ ble 0f
+ load_reg \sr2, \src, \s_strd, q10, q11
+ vmovl.u8 q1, d20
+ vmovl.u8 q10, d21
+ vmovl.u8 q12, d22
+ vmovl.u8 q11, d23
+ mul_mla_4 q2, q3, q8, q9, q1
+ mul_mla_4 q3, q13, q14, q15, q10
+ mul_mla_4 q13, q8, q9, q1, q12
+ mul_mla_4 q14, q14, q15, q10, q11
+ shift_store_16 \type, \d_strd, q2, d4, d5, q3, q13, d26, d27, q14
+0:
+ pop {r4-r11,pc}
+
+L(\type\()_8tap_hv):
+ cmp \h, #4
+ ubfx r9, \my, #7, #7
+ and \my, \my, #0x7f
+ it gt
+ movgt \my, r9
+ add \my, r10, \my, lsl #3
+
+ adr r9, L(\type\()_8tap_hv_tbl)
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ bx r9
+
+ .align 2
+L(\type\()_8tap_hv_tbl):
+ .word 1280f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+
+20:
+.ifc \type, put
+ add \mx, \mx, #2
+ vld1.32 {d0[]}, [\mx]
+ bgt 280f
+ add \my, \my, #2
+ vld1.32 {d2[]}, [\my]
+
+ // 2x2, 2x4 hv
+ sub \sr2, \src, #1
+ sub \src, \sr2, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+
+
+ vld1.8 {d26}, [\src], \s_strd
+ vmovl.u8 q13, d26
+ vext.8 q14, q13, q13, #2
+ vmul.s16 d26, d26, d0
+ vmul.s16 d28, d28, d0
+ vpadd.s16 d26, d26, d28
+ vpadd.s16 d26, d26, d26
+ vrshr.s16 d16, d26, #2
+ bl L(\type\()_8tap_filter_2)
+
+ vext.8 d16, d16, d16, #4
+ vmov d17, d26
+ vext.8 d16, d16, d26, #4
+
+2:
+ bl L(\type\()_8tap_filter_2)
+
+ vext.8 d18, d17, d26, #4
+ vmull.s16 q2, d16, d2[0]
+ vmlal.s16 q2, d17, d2[1]
+ vmlal.s16 q2, d18, d2[2]
+ vmlal.s16 q2, d26, d2[3]
+
+ vqrshrn.s32 d4, q2, #\shift_hv
+ vqmovun.s16 d4, q2
+ subs \h, \h, #2
+ vst1.16 {d4[0]}, [\dst, :16], \d_strd
+ vst1.16 {d4[1]}, [\ds2, :16], \d_strd
+ ble 0f
+ vmov d16, d18
+ vmov d17, d26
+ b 2b
+
+280: // 2x8, 2x16, 2x32 hv
+ vld1.8 {d2}, [\my, :64]
+ sub \src, \src, #1
+ sub \sr2, \src, \s_strd, lsl #1
+ sub \src, \sr2, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+
+ vld1.8 {d26}, [\src], \s_strd
+ vmovl.u8 q13, d26
+ vext.8 q14, q13, q13, #2
+ vmul.s16 d26, d26, d0
+ vmul.s16 d28, d28, d0
+ vpadd.s16 d26, d26, d28
+ vpadd.s16 d26, d26, d26
+ vrshr.s16 d16, d26, #2
+
+ bl L(\type\()_8tap_filter_2)
+ vext.8 d16, d16, d16, #4
+ vmov d17, d26
+ vext.8 d16, d16, d26, #4
+ bl L(\type\()_8tap_filter_2)
+ vext.8 d18, d17, d26, #4
+ vmov d19, d26
+ bl L(\type\()_8tap_filter_2)
+ vext.8 d20, d19, d26, #4
+ vmov d21, d26
+
+28:
+ bl L(\type\()_8tap_filter_2)
+ vext.8 d22, d21, d26, #4
+ vmull.s16 q2, d16, d2[0]
+ vmlal.s16 q2, d17, d2[1]
+ vmlal.s16 q2, d18, d2[2]
+ vmlal.s16 q2, d19, d2[3]
+ vmlal.s16 q2, d20, d3[0]
+ vmlal.s16 q2, d21, d3[1]
+ vmlal.s16 q2, d22, d3[2]
+ vmlal.s16 q2, d26, d3[3]
+
+ vqrshrn.s32 d4, q2, #\shift_hv
+ vqmovun.s16 d4, q2
+ subs \h, \h, #2
+ vst1.16 {d4[0]}, [\dst, :16], \d_strd
+ vst1.16 {d4[1]}, [\ds2, :16], \d_strd
+ ble 0f
+ vmov d16, d18
+ vmov d17, d19
+ vmov d18, d20
+ vmov d19, d21
+ vmov d20, d22
+ vmov d21, d26
+ b 28b
+
+0:
+ pop {r4-r11,pc}
+
+L(\type\()_8tap_filter_2):
+ vld1.8 {d28}, [\sr2], \s_strd
+ vld1.8 {d30}, [\src], \s_strd
+ vext.8 d29, d28, d28, #1
+ vext.8 d31, d30, d30, #1
+ vmovl.u8 q13, d28
+ vmovl.u8 q14, d29
+ vmov d27, d28
+ vmovl.u8 q14, d30
+ vmovl.u8 q15, d31
+ vtrn.32 d26, d28
+ vtrn.32 d27, d30
+ vmul.s16 d26, d26, d0[0]
+ vmla.s16 d26, d27, d0[1]
+ vmla.s16 d26, d28, d0[2]
+ vmla.s16 d26, d30, d0[3]
+ vrshr.s16 d26, d26, #2
+ vext.8 d27, d26, d26, #4
+ bx lr
+.endif
+
+40:
+ add \mx, \mx, #2
+ vld1.32 {d0[]}, [\mx]
+ bgt 480f
+ add \my, \my, #2
+ vld1.32 {d2[]}, [\my]
+ sub \sr2, \src, #1
+ sub \src, \sr2, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+
+ // 4x2, 4x4 hv
+ vld1.8 {d30}, [\src], \s_strd
+ vmovl.u8 q14, d30
+ vext.8 d27, d28, d29, #2
+ vext.8 d30, d28, d29, #4
+ vext.8 d31, d28, d29, #6
+ vmul.s16 d26, d28, d0[0]
+ vmla.s16 d26, d27, d0[1]
+ vmla.s16 d26, d30, d0[2]
+ vmla.s16 d26, d31, d0[3]
+ vrshr.s16 d16, d26, #2
+
+ bl L(\type\()_8tap_filter_4)
+ vmov d17, d26
+ vmov d18, d27
+
+4:
+ bl L(\type\()_8tap_filter_4)
+ vmull.s16 q2, d16, d2[0]
+ vmlal.s16 q2, d17, d2[1]
+ vmlal.s16 q2, d18, d2[2]
+ vmlal.s16 q2, d26, d2[3]
+ vmull.s16 q3, d17, d2[0]
+ vmlal.s16 q3, d18, d2[1]
+ vmlal.s16 q3, d26, d2[2]
+ vmlal.s16 q3, d27, d2[3]
+ vqrshrn.s32 d4, q2, #\shift_hv
+ vqrshrn.s32 d6, q3, #\shift_hv
+ subs \h, \h, #2
+.ifc \type, put
+ vqmovun.s16 d4, q2
+ vqmovun.s16 d6, q3
+ vst1.32 {d4[0]}, [\dst, :32], \d_strd
+ vst1.32 {d6[0]}, [\ds2, :32], \d_strd
+.else
+ vst1.16 {d4}, [\dst, :64], \d_strd
+ vst1.16 {d6}, [\ds2, :64], \d_strd
+.endif
+ ble 0f
+ vmov d16, d18
+ vmov d17, d26
+ vmov d18, d27
+ b 4b
+
+480: // 4x8, 4x16, 4x32 hv
+ vld1.8 {d2}, [\my, :64]
+ sub \src, \src, #1
+ sub \sr2, \src, \s_strd, lsl #1
+ sub \src, \sr2, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+
+ vld1.8 {d30}, [\src], \s_strd
+ vmovl.u8 q14, d30
+ vext.8 d27, d28, d29, #2
+ vext.8 d30, d28, d29, #4
+ vext.8 d31, d28, d29, #6
+ vmul.s16 d26, d28, d0[0]
+ vmla.s16 d26, d27, d0[1]
+ vmla.s16 d26, d30, d0[2]
+ vmla.s16 d26, d31, d0[3]
+ vrshr.s16 d16, d26, #2
+
+ bl L(\type\()_8tap_filter_4)
+ vmov d17, d26
+ vmov d18, d27
+ bl L(\type\()_8tap_filter_4)
+ vmov d19, d26
+ vmov d20, d27
+ bl L(\type\()_8tap_filter_4)
+ vmov d21, d26
+ vmov d22, d27
+
+48:
+ bl L(\type\()_8tap_filter_4)
+ vmull.s16 q2, d16, d2[0]
+ vmlal.s16 q2, d17, d2[1]
+ vmlal.s16 q2, d18, d2[2]
+ vmlal.s16 q2, d19, d2[3]
+ vmlal.s16 q2, d20, d3[0]
+ vmlal.s16 q2, d21, d3[1]
+ vmlal.s16 q2, d22, d3[2]
+ vmlal.s16 q2, d26, d3[3]
+ vmull.s16 q3, d17, d2[0]
+ vmlal.s16 q3, d18, d2[1]
+ vmlal.s16 q3, d19, d2[2]
+ vmlal.s16 q3, d20, d2[3]
+ vmlal.s16 q3, d21, d3[0]
+ vmlal.s16 q3, d22, d3[1]
+ vmlal.s16 q3, d26, d3[2]
+ vmlal.s16 q3, d27, d3[3]
+ vqrshrn.s32 d4, q2, #\shift_hv
+ vqrshrn.s32 d6, q3, #\shift_hv
+ subs \h, \h, #2
+.ifc \type, put
+ vqmovun.s16 d4, q2
+ vqmovun.s16 d6, q3
+ vst1.32 {d4[0]}, [\dst, :32], \d_strd
+ vst1.32 {d6[0]}, [\ds2, :32], \d_strd
+.else
+ vst1.16 {d4}, [\dst, :64], \d_strd
+ vst1.16 {d6}, [\ds2, :64], \d_strd
+.endif
+ ble 0f
+ vmov d16, d18
+ vmov d17, d19
+ vmov d18, d20
+ vmov d19, d21
+ vmov d20, d22
+ vmov d21, d26
+ vmov d22, d27
+ b 48b
+0:
+ pop {r4-r11,pc}
+
+L(\type\()_8tap_filter_4):
+ vld1.8 {d30}, [\sr2], \s_strd
+ vld1.8 {d31}, [\src], \s_strd
+ vmovl.u8 q14, d30
+ vext.8 d27, d28, d29, #2
+ vext.8 d30, d28, d29, #4
+ vext.8 d1, d28, d29, #6
+ vmul.s16 d26, d28, d0[0]
+ vmla.s16 d26, d27, d0[1]
+ vmla.s16 d26, d30, d0[2]
+ vmla.s16 d26, d1, d0[3]
+
+ vmovl.u8 q14, d31
+ vext.8 d30, d28, d29, #2
+ vext.8 d31, d28, d29, #4
+ vext.8 d1, d28, d29, #6
+ vmul.s16 d27, d28, d0[0]
+ vmla.s16 d27, d30, d0[1]
+ vmla.s16 d27, d31, d0[2]
+ vmla.s16 d27, d1, d0[3]
+ vrshr.s16 d26, d26, #2
+ vrshr.s16 d27, d27, #2
+ bx lr
+
+80:
+160:
+320:
+ bgt 880f
+ vpush {q4-q7}
+ add \my, \my, #2
+ vld1.8 {d0}, [\mx, :64]
+ vld1.32 {d2[]}, [\my]
+ sub \src, \src, #3
+ sub \src, \src, \s_strd
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+ mov \my, \h
+
+164: // 8x2, 8x4, 16x2, 16x4, 32x2, 32x4 hv
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+
+ vld1.8 {q14}, [\src], \s_strd
+ vmovl.u8 q12, d28
+ vmovl.u8 q13, d29
+ vmul.s16 q10, q12, d0[0]
+.irpc i, 123
+ vext.8 q14, q12, q13, #(2*\i)
+ vmla.s16 q10, q14, d0[\i]
+.endr
+.irpc i, 4567
+ vext.8 q14, q12, q13, #(2*\i)
+ vmla.s16 q10, q14, d1[\i-4]
+.endr
+ vrshr.s16 q3, q10, #2
+
+ bl L(\type\()_8tap_filter_8)
+ vmov q4, q10
+ vmov q5, q11
+
+8:
+ bl L(\type\()_8tap_filter_8)
+ vmull.s16 q12, d6, d2[0]
+ vmull.s16 q13, d7, d2[0]
+ vmull.s16 q14, d8, d2[0]
+ vmull.s16 q15, d9, d2[0]
+ vmlal.s16 q12, d8, d2[1]
+ vmlal.s16 q13, d9, d2[1]
+ vmlal.s16 q14, d10, d2[1]
+ vmlal.s16 q15, d11, d2[1]
+ vmlal.s16 q12, d10, d2[2]
+ vmlal.s16 q13, d11, d2[2]
+ vmlal.s16 q14, d20, d2[2]
+ vmlal.s16 q15, d21, d2[2]
+ vmlal.s16 q12, d20, d2[3]
+ vmlal.s16 q13, d21, d2[3]
+ vmlal.s16 q14, d22, d2[3]
+ vmlal.s16 q15, d23, d2[3]
+ vqrshrn.s32 d24, q12, #\shift_hv
+ vqrshrn.s32 d25, q13, #\shift_hv
+ vqrshrn.s32 d28, q14, #\shift_hv
+ vqrshrn.s32 d29, q15, #\shift_hv
+ subs \h, \h, #2
+.ifc \type, put
+ vqmovun.s16 d24, q12
+ vqmovun.s16 d28, q14
+ vst1.8 {d24}, [\dst, :64], \d_strd
+ vst1.8 {d28}, [\ds2, :64], \d_strd
+.else
+ vst1.16 {q12}, [\dst, :128], \d_strd
+ vst1.16 {q14}, [\ds2, :128], \d_strd
+.endif
+ ble 9f
+ vmov q3, q5
+ vmov q4, q10
+ vmov q5, q11
+ b 8b
+9:
+ subs \w, \w, #8
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #2
+ mov \h, \my
+ add \src, \src, #8
+.ifc \type, put
+ add \dst, \dst, #8
+.else
+ add \dst, \dst, #16
+.endif
+ b 164b
+
+880: // 8x8, 8x16, ..., 16x8, ..., 32x8, ... hv
+640:
+1280:
+ vpush {q4-q7}
+ vld1.8 {d0}, [\mx, :64]
+ vld1.8 {d2}, [\my, :64]
+ sub \src, \src, #3
+ sub \src, \src, \s_strd
+ sub \src, \src, \s_strd, lsl #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+ mov \my, \h
+
+168:
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+
+ vld1.8 {q14}, [\src], \s_strd
+ vmovl.u8 q12, d28
+ vmovl.u8 q13, d29
+ vmul.s16 q10, q12, d0[0]
+.irpc i, 123
+ vext.8 q14, q12, q13, #(2*\i)
+ vmla.s16 q10, q14, d0[\i]
+.endr
+.irpc i, 4567
+ vext.8 q14, q12, q13, #(2*\i)
+ vmla.s16 q10, q14, d1[\i-4]
+.endr
+ vrshr.s16 q3, q10, #2
+
+ bl L(\type\()_8tap_filter_8)
+ vmov q4, q10
+ vmov q5, q11
+ bl L(\type\()_8tap_filter_8)
+ vmov q6, q10
+ vmov q7, q11
+ bl L(\type\()_8tap_filter_8)
+ vmov q8, q10
+ vmov q9, q11
+
+88:
+ bl L(\type\()_8tap_filter_8)
+ vmull.s16 q12, d6, d2[0]
+ vmull.s16 q13, d7, d2[0]
+ vmull.s16 q14, d8, d2[0]
+ vmull.s16 q15, d9, d2[0]
+ vmlal.s16 q12, d8, d2[1]
+ vmlal.s16 q13, d9, d2[1]
+ vmlal.s16 q14, d10, d2[1]
+ vmlal.s16 q15, d11, d2[1]
+ vmlal.s16 q12, d10, d2[2]
+ vmlal.s16 q13, d11, d2[2]
+ vmlal.s16 q14, d12, d2[2]
+ vmlal.s16 q15, d13, d2[2]
+ vmlal.s16 q12, d12, d2[3]
+ vmlal.s16 q13, d13, d2[3]
+ vmlal.s16 q14, d14, d2[3]
+ vmlal.s16 q15, d15, d2[3]
+ vmlal.s16 q12, d14, d3[0]
+ vmlal.s16 q13, d15, d3[0]
+ vmlal.s16 q14, d16, d3[0]
+ vmlal.s16 q15, d17, d3[0]
+ vmlal.s16 q12, d16, d3[1]
+ vmlal.s16 q13, d17, d3[1]
+ vmlal.s16 q14, d18, d3[1]
+ vmlal.s16 q15, d19, d3[1]
+ vmlal.s16 q12, d18, d3[2]
+ vmlal.s16 q13, d19, d3[2]
+ vmlal.s16 q14, d20, d3[2]
+ vmlal.s16 q15, d21, d3[2]
+ vmlal.s16 q12, d20, d3[3]
+ vmlal.s16 q13, d21, d3[3]
+ vmlal.s16 q14, d22, d3[3]
+ vmlal.s16 q15, d23, d3[3]
+ vqrshrn.s32 d24, q12, #\shift_hv
+ vqrshrn.s32 d25, q13, #\shift_hv
+ vqrshrn.s32 d28, q14, #\shift_hv
+ vqrshrn.s32 d29, q15, #\shift_hv
+ subs \h, \h, #2
+.ifc \type, put
+ vqmovun.s16 d24, q12
+ vqmovun.s16 d28, q14
+ vst1.8 {d24}, [\dst, :64], \d_strd
+ vst1.8 {d28}, [\ds2, :64], \d_strd
+.else
+ vst1.16 {q12}, [\dst, :128], \d_strd
+ vst1.16 {q14}, [\ds2, :128], \d_strd
+.endif
+ ble 9f
+ vmov q3, q5
+ vmov q4, q6
+ vmov q5, q7
+ vmov q6, q8
+ vmov q7, q9
+ vmov q8, q10
+ vmov q9, q11
+ b 88b
+9:
+ subs \w, \w, #8
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #3
+ mov \h, \my
+ add \src, \src, #8
+.ifc \type, put
+ add \dst, \dst, #8
+.else
+ add \dst, \dst, #16
+.endif
+ b 168b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(\type\()_8tap_filter_8):
+ vld1.8 {q14}, [\sr2], \s_strd
+ vld1.8 {q15}, [\src], \s_strd
+ vmovl.u8 q12, d28
+ vmovl.u8 q13, d29
+ vmul.s16 q10, q12, d0[0]
+.irpc i, 123
+ vext.8 q14, q12, q13, #(2*\i)
+ vmla.s16 q10, q14, d0[\i]
+.endr
+.irpc i, 4567
+ vext.8 q14, q12, q13, #(2*\i)
+ vmla.s16 q10, q14, d1[\i-4]
+.endr
+ vmovl.u8 q12, d30
+ vmovl.u8 q13, d31
+ vmul.s16 q11, q12, d0[0]
+.irpc i, 123
+ vext.8 q14, q12, q13, #(2*\i)
+ vmla.s16 q11, q14, d0[\i]
+.endr
+.irpc i, 4567
+ vext.8 q14, q12, q13, #(2*\i)
+ vmla.s16 q11, q14, d1[\i-4]
+.endr
+ vrshr.s16 q10, q10, #2
+ vrshr.s16 q11, q11, #2
+ bx lr
+endfunc
+
+
+function \type\()_bilin_8bpc_neon, export=1
+ push {r4-r11,lr}
+ ldrd r4, r5, [sp, #36]
+ ldrd r6, r7, [sp, #44]
+ vdup.8 d1, \mx
+ vdup.8 d3, \my
+ rsb r8, \mx, #16
+ rsb r9, \my, #16
+ vdup.8 d0, r8
+ vdup.8 d2, r9
+.ifc \type, prep
+ lsl \d_strd, \w, #1
+.endif
+ clz r8, \w
+ cmp \mx, #0
+ sub r8, r8, #24
+ bne L(\type\()_bilin_h)
+ cmp \my, #0
+ bne L(\type\()_bilin_v)
+ b \type\()_neon
+
+L(\type\()_bilin_h):
+ cmp \my, #0
+ bne L(\type\()_bilin_hv)
+
+ adr r9, L(\type\()_bilin_h_tbl)
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ bx r9
+
+ .align 2
+L(\type\()_bilin_h_tbl):
+ .word 1280f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+
+20: // 2xN h
+.ifc \type, put
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+2:
+ vld1.32 {d4[]}, [\src], \s_strd
+ vld1.32 {d6[]}, [\sr2], \s_strd
+ vext.8 d5, d4, d4, #1
+ vext.8 d7, d6, d6, #1
+ vtrn.16 q2, q3
+ subs \h, \h, #2
+ vmull.u8 q3, d4, d0
+ vmlal.u8 q3, d5, d1
+ vqrshrn.u16 d4, q3, #4
+ vst1.16 {d4[0]}, [\dst, :16], \d_strd
+ vst1.16 {d4[1]}, [\ds2, :16], \d_strd
+ bgt 2b
+ pop {r4-r11,pc}
+.endif
+
+40: // 4xN h
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+4:
+ vld1.8 {d4}, [\src], \s_strd
+ vld1.8 {d6}, [\sr2], \s_strd
+ vext.8 d5, d4, d4, #1
+ vext.8 d7, d6, d6, #1
+ vtrn.32 q2, q3
+ subs \h, \h, #2
+ vmull.u8 q3, d4, d0
+ vmlal.u8 q3, d5, d1
+.ifc \type, put
+ vqrshrn.u16 d4, q3, #4
+ vst1.32 {d4[0]}, [\dst, :32], \d_strd
+ vst1.32 {d4[1]}, [\ds2, :32], \d_strd
+.else
+ vst1.16 {d6}, [\dst, :64], \d_strd
+ vst1.16 {d7}, [\ds2, :64], \d_strd
+.endif
+ bgt 4b
+ pop {r4-r11,pc}
+
+80: // 8xN h
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+8:
+ vld1.8 {q8}, [\src], \s_strd
+ vld1.8 {q10}, [\sr2], \s_strd
+ vext.8 q9, q8, q8, #1
+ vext.8 q11, q10, q10, #1
+ subs \h, \h, #2
+ vmull.u8 q8, d16, d0
+ vmull.u8 q10, d20, d0
+ vmlal.u8 q8, d18, d1
+ vmlal.u8 q10, d22, d1
+.ifc \type, put
+ vqrshrn.u16 d16, q8, #4
+ vqrshrn.u16 d18, q10, #4
+ vst1.8 {d16}, [\dst, :64], \d_strd
+ vst1.8 {d18}, [\ds2, :64], \d_strd
+.else
+ vst1.16 {q8}, [\dst, :128], \d_strd
+ vst1.16 {q10}, [\ds2, :128], \d_strd
+.endif
+ bgt 8b
+ pop {r4-r11,pc}
+160:
+320:
+640:
+1280: // 16xN, 32xN, ... h
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+
+ sub \s_strd, \s_strd, \w
+ sub \s_strd, \s_strd, #8
+.ifc \type, put
+ lsl \d_strd, \d_strd, #1
+ sub \d_strd, \d_strd, \w
+.endif
+161:
+ vld1.8 {d16}, [\src]!
+ vld1.8 {d22}, [\sr2]!
+ mov \mx, \w
+
+16:
+ vld1.8 {d17,d18}, [\src]!
+ vld1.8 {d23,d24}, [\sr2]!
+ vext.8 q10, q8, q9, #1
+ vext.8 q13, q11, q12, #1
+ vmull.u8 q2, d16, d0
+ vmull.u8 q3, d17, d0
+ vmull.u8 q14, d22, d0
+ vmull.u8 q15, d23, d0
+ vmlal.u8 q2, d20, d1
+ vmlal.u8 q3, d21, d1
+ vmlal.u8 q14, d26, d1
+ vmlal.u8 q15, d27, d1
+ subs \mx, \mx, #16
+.ifc \type, put
+ vqrshrn.u16 d4, q2, #4
+ vqrshrn.u16 d5, q3, #4
+ vqrshrn.u16 d28, q14, #4
+ vqrshrn.u16 d29, q15, #4
+ vst1.8 {q2}, [\dst, :128]!
+ vst1.8 {q14}, [\ds2, :128]!
+.else
+ vst1.16 {q2, q3}, [\dst, :128]!
+ vst1.16 {q14, q15}, [\ds2, :128]!
+.endif
+ ble 9f
+
+ vmov d16, d18
+ vmov d22, d24
+ b 16b
+
+9:
+ add \dst, \dst, \d_strd
+ add \ds2, \ds2, \d_strd
+ add \src, \src, \s_strd
+ add \sr2, \sr2, \s_strd
+
+ subs \h, \h, #2
+ bgt 161b
+ pop {r4-r11,pc}
+
+L(\type\()_bilin_v):
+ cmp \h, #4
+ adr r9, L(\type\()_bilin_v_tbl)
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ bx r9
+
+ .align 2
+L(\type\()_bilin_v_tbl):
+ .word 1280f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+
+20: // 2xN v
+.ifc \type, put
+ cmp \h, #2
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ // 2x2 v
+ vld1.16 {d16[]}, [\src], \s_strd
+ bgt 24f
+22:
+ vld1.16 {d17[]}, [\sr2], \s_strd
+ vld1.16 {d18[]}, [\src], \s_strd
+ vext.8 d16, d16, d17, #6
+ vext.8 d17, d17, d18, #6
+ vmull.u8 q2, d16, d2
+ vmlal.u8 q2, d17, d3
+ vqrshrn.u16 d4, q2, #4
+ vst1.16 {d4[0]}, [\dst, :16]
+ vst1.16 {d4[1]}, [\ds2, :16]
+ pop {r4-r11,pc}
+24: // 2x4, 2x6, 2x8, ... v
+ vld1.16 {d17[]}, [\sr2], \s_strd
+ vld1.16 {d18[]}, [\src], \s_strd
+ vld1.16 {d19[]}, [\sr2], \s_strd
+ vld1.16 {d20[]}, [\src], \s_strd
+ sub \h, \h, #4
+ vext.8 d16, d16, d17, #6
+ vext.8 d17, d17, d18, #6
+ vext.8 d18, d18, d19, #6
+ vext.8 d19, d19, d20, #6
+ vtrn.32 d16, d18
+ vtrn.32 d17, d19
+ vmull.u8 q2, d16, d2
+ vmlal.u8 q2, d17, d3
+ cmp \h, #2
+ vqrshrn.u16 d4, q2, #4
+ vst1.16 {d4[0]}, [\dst, :16], \d_strd
+ vst1.16 {d4[1]}, [\ds2, :16], \d_strd
+ vst1.16 {d4[2]}, [\dst, :16], \d_strd
+ vst1.16 {d4[3]}, [\ds2, :16], \d_strd
+ blt 0f
+ vmov d16, d20
+ beq 22b
+ b 24b
+0:
+ pop {r4-r11,pc}
+.endif
+
+40: // 4xN v
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vld1.32 {d16[]}, [\src], \s_strd
+4:
+ vld1.32 {d17[]}, [\sr2], \s_strd
+ vld1.32 {d18[]}, [\src], \s_strd
+ vext.8 d16, d16, d17, #4
+ vext.8 d17, d17, d18, #4
+ vmull.u8 q2, d16, d2
+ vmlal.u8 q2, d17, d3
+ subs \h, \h, #2
+.ifc \type, put
+ vqrshrn.u16 d4, q2, #4
+ vst1.32 {d4[0]}, [\dst, :32], \d_strd
+ vst1.32 {d4[1]}, [\ds2, :32], \d_strd
+.else
+ vst1.16 {d4}, [\dst, :64], \d_strd
+ vst1.16 {d5}, [\ds2, :64], \d_strd
+.endif
+ ble 0f
+ vmov d16, d18
+ b 4b
+0:
+ pop {r4-r11,pc}
+
+80: // 8xN v
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vld1.8 {d16}, [\src], \s_strd
+8:
+ vld1.8 {d17}, [\sr2], \s_strd
+ vld1.8 {d18}, [\src], \s_strd
+ vmull.u8 q2, d16, d2
+ vmull.u8 q3, d17, d2
+ vmlal.u8 q2, d17, d3
+ vmlal.u8 q3, d18, d3
+ subs \h, \h, #2
+.ifc \type, put
+ vqrshrn.u16 d4, q2, #4
+ vqrshrn.u16 d6, q3, #4
+ vst1.8 {d4}, [\dst, :64], \d_strd
+ vst1.8 {d6}, [\ds2, :64], \d_strd
+.else
+ vst1.16 {q2}, [\dst, :128], \d_strd
+ vst1.16 {q3}, [\ds2, :128], \d_strd
+.endif
+ ble 0f
+ vmov d16, d18
+ b 8b
+0:
+ pop {r4-r11,pc}
+
+160: // 16xN, 32xN, ...
+320:
+640:
+1280:
+ mov \my, \h
+1:
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ vld1.8 {q8}, [\src], \s_strd
+2:
+ vld1.8 {q9}, [\sr2], \s_strd
+ vld1.8 {q10}, [\src], \s_strd
+ vmull.u8 q12, d16, d2
+ vmull.u8 q13, d17, d2
+ vmull.u8 q14, d18, d2
+ vmull.u8 q15, d19, d2
+ vmlal.u8 q12, d18, d3
+ vmlal.u8 q13, d19, d3
+ vmlal.u8 q14, d20, d3
+ vmlal.u8 q15, d21, d3
+ subs \h, \h, #2
+.ifc \type, put
+ vqrshrn.u16 d24, q12, #4
+ vqrshrn.u16 d25, q13, #4
+ vqrshrn.u16 d28, q14, #4
+ vqrshrn.u16 d29, q15, #4
+ vst1.8 {q12}, [\dst, :128], \d_strd
+ vst1.8 {q14}, [\ds2, :128], \d_strd
+.else
+ vst1.16 {q12, q13}, [\dst, :128], \d_strd
+ vst1.16 {q14, q15}, [\ds2, :128], \d_strd
+.endif
+ ble 9f
+ vmov q8, q10
+ b 2b
+9:
+ subs \w, \w, #16
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #1
+ mov \h, \my
+ add \src, \src, #16
+.ifc \type, put
+ add \dst, \dst, #16
+.else
+ add \dst, \dst, #32
+.endif
+ b 1b
+0:
+ pop {r4-r11,pc}
+
+L(\type\()_bilin_hv):
+ vmovl.u8 q2, d2
+ vmovl.u8 q3, d3
+ adr r9, L(\type\()_bilin_hv_tbl)
+ ldr r8, [r9, r8, lsl #2]
+ add r9, r9, r8
+ bx r9
+
+ .align 2
+L(\type\()_bilin_hv_tbl):
+ .word 1280f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+
+20: // 2xN hv
+.ifc \type, put
+ add \sr2, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ vld1.32 {d28[]}, [\src], \s_strd
+ vext.8 d29, d28, d28, #1
+ vmull.u8 q8, d28, d0
+ vmlal.u8 q8, d29, d1
+
+2:
+ vld1.32 {d28[]}, [\sr2], \s_strd
+ vld1.32 {d30[]}, [\src], \s_strd
+ vext.8 d29, d28, d28, #1
+ vext.8 d31, d30, d30, #1
+ vtrn.16 d28, d30
+ vtrn.16 d29, d31
+ vmull.u8 q9, d28, d0
+ vmlal.u8 q9, d29, d1
+
+ vtrn.32 d16, d18
+
+ vmul.u16 d20, d16, d4
+ vmla.u16 d20, d19, d6
+ vqrshrn.u16 d20, q10, #8
+ subs \h, \h, #2
+ vst1.16 {d20[0]}, [\dst, :16], \d_strd
+ vst1.16 {d20[1]}, [\ds2, :16], \d_strd
+ ble 0f
+ vtrn.32 d19, d16
+ b 2b
+0:
+ pop {r4-r11,pc}
+.endif
+
+40: // 4xN hv
+ add \sr2, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ vld1.8 {d28}, [\src], \s_strd
+ vext.8 d29, d28, d28, #1
+ vmull.u8 q8, d28, d0
+ vmlal.u8 q8, d29, d1
+
+4:
+ vld1.8 {d28}, [\sr2], \s_strd
+ vld1.8 {d30}, [\src], \s_strd
+ vext.8 d29, d28, d28, #1
+ vext.8 d31, d30, d30, #1
+ vtrn.32 d28, d30
+ vtrn.32 d29, d31
+ vmull.u8 q9, d28, d0
+ vmlal.u8 q9, d29, d1
+
+ vmov d17, d18
+
+ vmul.u16 q10, q8, q2
+ vmla.u16 q10, q9, q3
+ subs \h, \h, #2
+.ifc \type, put
+ vqrshrn.u16 d20, q10, #8
+ vst1.32 {d20[0]}, [\dst, :32], \d_strd
+ vst1.32 {d20[1]}, [\ds2, :32], \d_strd
+.else
+ vrshr.u16 q10, q10, #4
+ vst1.16 {d20}, [\dst, :64], \d_strd
+ vst1.16 {d21}, [\ds2, :64], \d_strd
+.endif
+ ble 0f
+ vmov d16, d19
+ b 4b
+0:
+ pop {r4-r11,pc}
+
+80: // 8xN, 16xN, ... hv
+160:
+320:
+640:
+1280:
+ mov \my, \h
+
+1:
+ add \sr2, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ vld1.8 {q12}, [\src], \s_strd
+ vext.8 q13, q12, q12, #1
+ vmull.u8 q8, d24, d0
+ vmlal.u8 q8, d26, d1
+
+2:
+ vld1.8 {q12}, [\sr2], \s_strd
+ vld1.8 {q14}, [\src], \s_strd
+ vext.8 q13, q12, q12, #1
+ vext.8 q15, q14, q14, #1
+ vmull.u8 q9, d24, d0
+ vmlal.u8 q9, d26, d1
+ vmull.u8 q10, d28, d0
+ vmlal.u8 q10, d30, d1
+
+ vmul.u16 q8, q8, q2
+ vmla.u16 q8, q9, q3
+ vmul.u16 q9, q9, q2
+ vmla.u16 q9, q10, q3
+ subs \h, \h, #2
+.ifc \type, put
+ vqrshrn.u16 d16, q8, #8
+ vqrshrn.u16 d18, q9, #8
+ vst1.8 {d16}, [\dst, :64], \d_strd
+ vst1.8 {d18}, [\ds2, :64], \d_strd
+.else
+ vrshr.u16 q8, q8, #4
+ vrshr.u16 q9, q9, #4
+ vst1.16 {q8}, [\dst, :128], \d_strd
+ vst1.16 {q9}, [\ds2, :128], \d_strd
+.endif
+ ble 9f
+ vmov q8, q10
+ b 2b
+9:
+ subs \w, \w, #8
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #1
+ mov \h, \my
+ add \src, \src, #8
+.ifc \type, put
+ add \dst, \dst, #8
+.else
+ add \dst, \dst, #16
+.endif
+ b 1b
+0:
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+filter_fn put, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, 10
+filter_fn prep, r0, r7, r1, r2, r3, r4, r5, r6, r8, r9, 6
+
+.macro load_filter_ptr src
+ asr r12, \src, #10
+ add r12, r11, r12, lsl #3
+.endm
+
+.macro load_filter_coef dst, src, inc
+ add \src, \src, \inc
+ vld1.8 {\dst}, [r12, :64]
+.endm
+
+.macro load_filter_row dst, src, inc
+ load_filter_ptr \src
+ load_filter_coef \dst, \src, \inc
+.endm
+
+function warp_filter_horz_neon
+ load_filter_ptr r5 // filter 0
+ vld1.16 {q7}, [r2], r3
+ vmov.i8 q6, #128
+
+ load_filter_coef d0, r5, r7 // filter 0
+ load_filter_row d1, r5, r7 // filter 1
+ load_filter_row d2, r5, r7 // filter 2
+ load_filter_ptr r5 // filter 3
+ veor q7, q7, q6 // subtract by 128 to allow using vmull
+ load_filter_coef d3, r5, r7 // filter 3
+ vext.8 d12, d14, d15, #1 // filter 1 pixels
+ vext.8 d13, d14, d15, #2 // filter 2 pixels
+ load_filter_ptr r5 // filter 4
+ vmull.s8 q2, d14, d0 // filter 0 output
+ vmull.s8 q3, d12, d1 // filter 1 output
+ load_filter_coef d0, r5, r7 // filter 4
+ load_filter_ptr r5 // filter 5
+ vext.8 d12, d14, d15, #3 // filter 3 pixels
+ vmull.s8 q4, d13, d2 // filter 2 output
+ vext.8 d13, d14, d15, #4 // filter 4 pixels
+ vpadd.i16 d4, d4, d5 // pixel 0 (4x16)
+ vpadd.i16 d5, d6, d7 // pixel 1 (4x16)
+ load_filter_coef d1, r5, r7 // filter 5
+ load_filter_ptr r5 // filter 6
+ vmull.s8 q5, d12, d3 // filter 3 output
+ vext.8 d12, d14, d15, #5 // filter 5 pixels
+ vmull.s8 q3, d13, d0 // filter 4 output
+ load_filter_coef d0, r5, r7 // filter 6
+ vext.8 d13, d14, d15, #6 // filter 6 pixels
+ load_filter_ptr r5 // filter 7
+ vpadd.i16 d8, d8, d9 // pixel 2 (4x16)
+ vpadd.i16 d9, d10, d11 // pixel 3 (4x16)
+ vmull.s8 q5, d12, d1 // filter 5 output
+ load_filter_coef d1, r5, r7 // filter 7
+ vext.8 d14, d14, d15, #7 // filter 7 pixels
+ vpadd.i16 d6, d6, d7 // pixel 4 (4x16)
+ vpadd.i16 d10, d10, d11 // pixel 5 (4x16)
+ vmull.s8 q6, d13, d0 // filter 6 output
+ vmull.s8 q7, d14, d1 // filter 7 output
+
+ sub r5, r5, r7, lsl #3
+
+ vpadd.i16 d4, d4, d5 // pixel 0,1 (2x16)
+ vpadd.i16 d5, d8, d9 // pixel 2,3 (2x16)
+ vpadd.i16 d12, d12, d13 // pixel 6 (4x16)
+ vpadd.i16 d14, d14, d15 // pixel 7 (4x16)
+ vpadd.i16 d6, d6, d10 // pixel 4,5 (2x16)
+ vpadd.i16 d10, d12, d14 // pixel 6,7 (2x16)
+ vpadd.i16 d4, d4, d5 // pixel 0-3
+ vpadd.i16 d5, d6, d10 // pixel 4-7
+
+ add r5, r5, r8
+
+ bx lr
+endfunc
+
+// void dav1d_warp_affine_8x8_8bpc_neon(
+// pixel *dst, const ptrdiff_t dst_stride,
+// const pixel *src, const ptrdiff_t src_stride,
+// const int16_t *const abcd, int mx, int my)
+.macro warp t, shift
+function warp_affine_8x8\t\()_8bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldr r6, [sp, #108]
+ ldrd r8, r9, [r4]
+ sxth r7, r8
+ asr r8, r8, #16
+ asr r4, r9, #16
+ sxth r9, r9
+ mov r10, #8
+ sub r2, r2, r3, lsl #1
+ sub r2, r2, r3
+ sub r2, r2, #3
+ movrel r11, X(mc_warp_filter), 64*8
+.ifnb \t
+ lsl r1, r1, #1
+.endif
+ add r5, r5, #512
+ add r6, r6, #512
+
+ bl warp_filter_horz_neon
+ vrshr.s16 q8, q2, #3
+ bl warp_filter_horz_neon
+ vrshr.s16 q9, q2, #3
+ bl warp_filter_horz_neon
+ vrshr.s16 q10, q2, #3
+ bl warp_filter_horz_neon
+ vrshr.s16 q11, q2, #3
+ bl warp_filter_horz_neon
+ vrshr.s16 q12, q2, #3
+ bl warp_filter_horz_neon
+ vrshr.s16 q13, q2, #3
+ bl warp_filter_horz_neon
+ vrshr.s16 q14, q2, #3
+
+1:
+ bl warp_filter_horz_neon
+ vrshr.s16 q15, q2, #3
+
+ load_filter_row d8, r6, r9
+ load_filter_row d9, r6, r9
+ load_filter_row d10, r6, r9
+ load_filter_row d11, r6, r9
+ load_filter_row d12, r6, r9
+ load_filter_row d13, r6, r9
+ load_filter_row d14, r6, r9
+ load_filter_row d15, r6, r9
+ transpose_8x8b q4, q5, q6, q7, d8, d9, d10, d11, d12, d13, d14, d15
+ vmovl.s8 q1, d8
+ vmovl.s8 q2, d9
+ vmovl.s8 q3, d10
+ vmovl.s8 q4, d11
+ vmovl.s8 q5, d12
+ vmovl.s8 q6, d13
+
+ sub r6, r6, r9, lsl #3
+
+ // This ordering of vmull/vmlal is highly beneficial for
+ // Cortex A8/A9/A53 here, but harmful for Cortex A7.
+ vmull.s16 q0, d16, d2
+ vmlal.s16 q0, d18, d4
+ vmlal.s16 q0, d20, d6
+ vmlal.s16 q0, d22, d8
+ vmlal.s16 q0, d24, d10
+ vmlal.s16 q0, d26, d12
+ vmull.s16 q1, d17, d3
+ vmlal.s16 q1, d19, d5
+ vmlal.s16 q1, d21, d7
+ vmlal.s16 q1, d23, d9
+ vmlal.s16 q1, d25, d11
+ vmlal.s16 q1, d27, d13
+
+ vmovl.s8 q2, d14
+ vmovl.s8 q3, d15
+
+ vmlal.s16 q0, d28, d4
+ vmlal.s16 q0, d30, d6
+ vmlal.s16 q1, d29, d5
+ vmlal.s16 q1, d31, d7
+
+.ifb \t
+ vmov.i16 q7, #128
+.else
+ vmov.i16 q7, #0x800
+.endif
+
+ vmov q8, q9
+ vmov q9, q10
+ vqrshrn.s32 d0, q0, #\shift
+ vmov q10, q11
+ vqrshrn.s32 d1, q1, #\shift
+ vmov q11, q12
+ vadd.i16 q0, q0, q7
+ vmov q12, q13
+.ifb \t
+ vqmovun.s16 d0, q0
+.endif
+ vmov q13, q14
+ vmov q14, q15
+ subs r10, r10, #1
+.ifnb \t
+ vst1.16 {q0}, [r0, :128], r1
+.else
+ vst1.8 {d0}, [r0, :64], r1
+.endif
+
+ add r6, r6, r4
+ bgt 1b
+
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+warp , 11
+warp t, 7
+
+// void dav1d_emu_edge_8bpc_neon(
+// const intptr_t bw, const intptr_t bh,
+// const intptr_t iw, const intptr_t ih,
+// const intptr_t x, const intptr_t y,
+// pixel *dst, const ptrdiff_t dst_stride,
+// const pixel *ref, const ptrdiff_t ref_stride)
+function emu_edge_8bpc_neon, export=1
+ push {r4-r11,lr}
+ ldrd r4, r5, [sp, #36]
+ ldrd r6, r7, [sp, #44]
+ ldrd r8, r9, [sp, #52]
+
+ // ref += iclip(y, 0, ih - 1) * PXSTRIDE(ref_stride)
+ // ref += iclip(x, 0, iw - 1)
+ sub r12, r3, #1 // ih - 1
+ cmp r5, r3
+ sub lr, r2, #1 // iw - 1
+ it lt
+ movlt r12, r5 // min(y, ih - 1)
+ cmp r4, r2
+ bic r12, r12, r12, asr #31 // max(min(y, ih - 1), 0)
+ it lt
+ movlt lr, r4 // min(x, iw - 1)
+ bic lr, lr, lr, asr #31 // max(min(x, iw - 1), 0)
+ mla r8, r12, r9, r8 // ref += iclip() * stride
+ add r8, r8, lr // ref += iclip()
+
+ // bottom_ext = iclip(y + bh - ih, 0, bh - 1)
+ // top_ext = iclip(-y, 0, bh - 1)
+ add r10, r5, r1 // y + bh
+ neg r5, r5 // -y
+ sub r10, r10, r3 // y + bh - ih
+ sub r12, r1, #1 // bh - 1
+ cmp r10, r1
+ bic r5, r5, r5, asr #31 // max(-y, 0)
+ it ge
+ movge r10, r12 // min(y + bh - ih, bh-1)
+ cmp r5, r1
+ bic r10, r10, r10, asr #31 // max(min(y + bh - ih, bh-1), 0)
+ it ge
+ movge r5, r12 // min(max(-y, 0), bh-1)
+
+ // right_ext = iclip(x + bw - iw, 0, bw - 1)
+ // left_ext = iclip(-x, 0, bw - 1)
+ add r11, r4, r0 // x + bw
+ neg r4, r4 // -x
+ sub r11, r11, r2 // x + bw - iw
+ sub lr, r0, #1 // bw - 1
+ cmp r11, r0
+ bic r4, r4, r4, asr #31 // max(-x, 0)
+ it ge
+ movge r11, lr // min(x + bw - iw, bw-1)
+ cmp r4, r0
+ bic r11, r11, r11, asr #31 // max(min(x + bw - iw, bw-1), 0)
+ it ge
+ movge r4, lr // min(max(-x, 0), bw - 1)
+
+ // center_h = bh - top_ext - bottom_ext
+ // dst += top_ext * PXSTRIDE(dst_stride)
+ // center_w = bw - left_ext - right_ext
+ sub r1, r1, r5 // bh - top_ext
+ mla r6, r5, r7, r6
+ sub r2, r0, r4 // bw - left_ext
+ sub r1, r1, r10 // center_h = bh - top_ext - bottom_ext
+ sub r2, r2, r11 // center_w = bw - left_ext - right_ext
+
+ mov r0, r6 // backup of dst
+
+.macro v_loop need_left, need_right
+0:
+.if \need_left
+ vld1.8 {d0[], d1[]}, [r8]
+ mov r12, r6 // out = dst
+ mov r3, r4
+1:
+ subs r3, r3, #16
+ vst1.8 {q0}, [r12, :128]!
+ bgt 1b
+.endif
+ mov lr, r8
+ add r12, r6, r4 // out = dst + left_ext
+ mov r3, r2
+1:
+ vld1.8 {q0, q1}, [lr]!
+ subs r3, r3, #32
+.if \need_left
+ vst1.8 {q0, q1}, [r12]!
+.else
+ vst1.8 {q0, q1}, [r12, :128]!
+.endif
+ bgt 1b
+.if \need_right
+ add r3, r8, r2 // in + center_w
+ sub r3, r3, #1 // in + center_w - 1
+ add r12, r6, r4 // dst + left_ext
+ vld1.8 {d0[], d1[]}, [r3]
+ add r12, r12, r2 // out = dst + left_ext + center_w
+ mov r3, r11
+1:
+ subs r3, r3, #16
+ vst1.8 {q0}, [r12]!
+ bgt 1b
+.endif
+
+ subs r1, r1, #1 // center_h--
+ add r6, r6, r7
+ add r8, r8, r9
+ bgt 0b
+.endm
+
+ cmp r4, #0
+ beq 2f
+ // need_left
+ cmp r11, #0
+ beq 3f
+ // need_left + need_right
+ v_loop 1, 1
+ b 5f
+
+2:
+ // !need_left
+ cmp r11, #0
+ beq 4f
+ // !need_left + need_right
+ v_loop 0, 1
+ b 5f
+
+3:
+ // need_left + !need_right
+ v_loop 1, 0
+ b 5f
+
+4:
+ // !need_left + !need_right
+ v_loop 0, 0
+
+5:
+ cmp r10, #0
+ // Storing the original dst in r0 overwrote bw, recalculate it here
+ add r2, r2, r4 // center_w + left_ext
+ add r2, r2, r11 // bw = center_w + left_ext + right_ext
+
+ beq 3f
+ // need_bottom
+ sub r8, r6, r7 // ref = dst - stride
+ mov r4, r2
+1:
+ vld1.8 {q0, q1}, [r8, :128]!
+ mov r3, r10
+2:
+ subs r3, r3, #1
+ vst1.8 {q0, q1}, [r6, :128], r7
+ bgt 2b
+ mls r6, r7, r10, r6 // dst -= bottom_ext * stride
+ subs r4, r4, #32 // bw -= 32
+ add r6, r6, #32 // dst += 32
+ bgt 1b
+
+3:
+ cmp r5, #0
+ beq 3f
+ // need_top
+ mls r6, r7, r5, r0 // dst = stored_dst - top_ext * stride
+1:
+ vld1.8 {q0, q1}, [r0, :128]!
+ mov r3, r5
+2:
+ subs r3, r3, #1
+ vst1.8 {q0, q1}, [r6, :128], r7
+ bgt 2b
+ mls r6, r7, r5, r6 // dst -= top_ext * stride
+ subs r2, r2, #32 // bw -= 32
+ add r6, r6, #32 // dst += 32
+ bgt 1b
+
+3:
+ pop {r4-r11,pc}
+endfunc
diff --git a/third_party/dav1d/src/arm/32/mc16.S b/third_party/dav1d/src/arm/32/mc16.S
new file mode 100644
index 0000000000..b7d845e219
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/mc16.S
@@ -0,0 +1,3658 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Janne Grunau
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+#define PREP_BIAS 8192
+
+.macro avg d0, d00, d01, d1, d10, d11
+ vld1.16 {q0, q1}, [r2, :128]!
+ vld1.16 {q2, q3}, [r3, :128]!
+ vqadd.s16 q0, q0, q2
+ vqadd.s16 q1, q1, q3
+ vmax.s16 q0, q0, q12 // -2*PREP_BIAS - 1 << intermediate_bits
+ vmax.s16 q1, q1, q12 // -2*PREP_BIAS - 1 << intermediate_bits
+ vqsub.s16 q0, q0, q12 // -2*PREP_BIAS - 1 << intermediate_bits
+ vqsub.s16 q1, q1, q12 // -2*PREP_BIAS - 1 << intermediate_bits
+ vshl.s16 \d0, q0, q13 // -(intermediate_bits+1)
+ vshl.s16 \d1, q1, q13 // -(intermediate_bits+1)
+.endm
+
+.macro w_avg d0, d00, d01, d1, d10, d11
+ vld1.16 {q0, q1}, [r2, :128]!
+ vld1.16 {q2, q3}, [r3, :128]!
+ // This difference requires a 17 bit range, and all bits are
+ // significant for the following multiplication.
+ vsubl.s16 \d0, d4, d0
+ vsubl.s16 q0, d5, d1
+ vsubl.s16 \d1, d6, d2
+ vsubl.s16 q1, d7, d3
+ vmul.s32 \d0, \d0, q4
+ vmul.s32 q0, q0, q4
+ vmul.s32 \d1, \d1, q4
+ vmul.s32 q1, q1, q4
+ vshr.s32 \d0, \d0, #4
+ vshr.s32 q0, q0, #4
+ vshr.s32 \d1, \d1, #4
+ vshr.s32 q1, q1, #4
+ vaddw.s16 \d0, \d0, d4
+ vaddw.s16 q0, q0, d5
+ vaddw.s16 \d1, \d1, d6
+ vaddw.s16 q1, q1, d7
+ vmovn.i32 \d00, \d0
+ vmovn.i32 \d01, q0
+ vmovn.i32 \d10, \d1
+ vmovn.i32 \d11, q1
+ vrshl.s16 \d0, \d0, q13 // -intermediate_bits
+ vrshl.s16 \d1, \d1, q13 // -intermediate_bits
+ vadd.s16 \d0, \d0, q12 // PREP_BIAS >> intermediate_bits
+ vadd.s16 \d1, \d1, q12 // PREP_BIAS >> intermediate_bits
+ vmin.s16 \d0, \d0, q15 // bitdepth_max
+ vmin.s16 \d1, \d1, q15 // bitdepth_max
+ vmax.s16 \d0, \d0, q14 // 0
+ vmax.s16 \d1, \d1, q14 // 0
+.endm
+
+.macro mask d0, d00, d01, d1, d10, d11
+ vld1.8 {q7}, [r6, :128]!
+ vld1.16 {q0, q1}, [r2, :128]!
+ vneg.s8 q7, q7
+ vld1.16 {q2, q3}, [r3, :128]!
+ vmovl.s8 q6, d14
+ vmovl.s8 q7, d15
+ vmovl.s16 q4, d12
+ vmovl.s16 q5, d13
+ vmovl.s16 q6, d14
+ vmovl.s16 q7, d15
+ vsubl.s16 \d0, d4, d0
+ vsubl.s16 q0, d5, d1
+ vsubl.s16 \d1, d6, d2
+ vsubl.s16 q1, d7, d3
+ vmul.s32 \d0, \d0, q4
+ vmul.s32 q0, q0, q5
+ vmul.s32 \d1, \d1, q6
+ vmul.s32 q1, q1, q7
+ vshr.s32 \d0, \d0, #6
+ vshr.s32 q0, q0, #6
+ vshr.s32 \d1, \d1, #6
+ vshr.s32 q1, q1, #6
+ vaddw.s16 \d0, \d0, d4
+ vaddw.s16 q0, q0, d5
+ vaddw.s16 \d1, \d1, d6
+ vaddw.s16 q1, q1, d7
+ vmovn.i32 \d00, \d0
+ vmovn.i32 \d01, q0
+ vmovn.i32 \d10, \d1
+ vmovn.i32 \d11, q1
+ vrshl.s16 \d0, \d0, q13 // -intermediate_bits
+ vrshl.s16 \d1, \d1, q13 // -intermediate_bits
+ vadd.s16 \d0, \d0, q12 // PREP_BIAS >> intermediate_bits
+ vadd.s16 \d1, \d1, q12 // PREP_BIAS >> intermediate_bits
+ vmin.s16 \d0, \d0, q15 // bitdepth_max
+ vmin.s16 \d1, \d1, q15 // bitdepth_max
+ vmax.s16 \d0, \d0, q14 // 0
+ vmax.s16 \d1, \d1, q14 // 0
+.endm
+
+.macro bidir_fn type, bdmax
+function \type\()_16bpc_neon, export=1
+ push {r4-r7,lr}
+ ldrd r4, r5, [sp, #20]
+ ldr r6, [sp, #28]
+ clz r4, r4
+.ifnc \type, avg
+ ldr r7, [sp, #32]
+ vmov.i16 q14, #0
+ vdup.16 q15, r7 // bitdepth_max
+.endif
+.ifc \type, w_avg
+ vpush {q4}
+.endif
+.ifc \type, mask
+ vpush {q4-q7}
+.endif
+ clz r7, \bdmax
+ sub r7, r7, #18 // intermediate_bits = clz(bitdepth_max) - 18
+.ifc \type, avg
+ mov lr, #1
+ movw r12, #2*PREP_BIAS
+ lsl lr, lr, r7 // 1 << intermediate_bits
+ neg r12, r12 // -2*PREP_BIAS
+ add r7, r7, #1
+ sub r12, r12, lr // -2*PREP_BIAS - 1 << intermediate_bits
+ neg r7, r7 // -(intermediate_bits+1)
+ vdup.16 q12, r12 // -2*PREP_BIAS - 1 << intermediate_bits
+ vdup.16 q13, r7 // -(intermediate_bits+1)
+.else
+ mov r12, #PREP_BIAS
+ lsr r12, r12, r7 // PREP_BIAS >> intermediate_bits
+ neg r7, r7 // -intermediate_bits
+ vdup.16 q12, r12 // PREP_BIAS >> intermediate_bits
+ vdup.16 q13, r7 // -intermediate_bits
+.endif
+.ifc \type, w_avg
+ vdup.32 q4, r6
+ vneg.s32 q4, q4
+.endif
+ adr r7, L(\type\()_tbl)
+ sub r4, r4, #24
+ \type q8, d16, d17, q9, d18, d19
+ ldr r4, [r7, r4, lsl #2]
+ add r7, r7, r4
+ bx r7
+
+ .align 2
+L(\type\()_tbl):
+ .word 1280f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_tbl) + CONFIG_THUMB
+
+40:
+ add r7, r0, r1
+ lsl r1, r1, #1
+4:
+ subs r5, r5, #4
+ vst1.16 {d16}, [r0, :64], r1
+ vst1.16 {d17}, [r7, :64], r1
+ vst1.16 {d18}, [r0, :64], r1
+ vst1.16 {d19}, [r7, :64], r1
+ ble 0f
+ \type q8, d16, d17, q9, d18, d19
+ b 4b
+80:
+ add r7, r0, r1
+ lsl r1, r1, #1
+8:
+ vst1.16 {q8}, [r0, :128], r1
+ subs r5, r5, #2
+ vst1.16 {q9}, [r7, :128], r1
+ ble 0f
+ \type q8, d16, d17, q9, d18, d19
+ b 8b
+160:
+16:
+ \type q10, d20, d21, q11, d22, d23
+ vst1.16 {q8, q9}, [r0, :128], r1
+ subs r5, r5, #2
+ vst1.16 {q10, q11}, [r0, :128], r1
+ ble 0f
+ \type q8, d16, d17, q9, d18, d19
+ b 16b
+320:
+ add r7, r0, #32
+32:
+ \type q10, d20, d21, q11, d22, d23
+ vst1.16 {q8, q9}, [r0, :128], r1
+ subs r5, r5, #1
+ vst1.16 {q10, q11}, [r7, :128], r1
+ ble 0f
+ \type q8, d16, d17, q9, d18, d19
+ b 32b
+640:
+ add r7, r0, #32
+ mov r12, #64
+ sub r1, r1, #64
+64:
+ \type q10, d20, d21, q11, d22, d23
+ vst1.16 {q8, q9}, [r0, :128], r12
+ \type q8, d16, d17, q9, d18, d19
+ vst1.16 {q10, q11}, [r7, :128], r12
+ \type q10, d20, d21, q11, d22, d23
+ vst1.16 {q8, q9}, [r0, :128], r1
+ subs r5, r5, #1
+ vst1.16 {q10, q11}, [r7, :128], r1
+ ble 0f
+ \type q8, d16, d17, q9, d18, d19
+ b 64b
+1280:
+ add r7, r0, #32
+ mov r12, #64
+ sub r1, r1, #192
+128:
+ \type q10, d20, d21, q11, d22, d23
+ vst1.16 {q8, q9}, [r0, :128], r12
+ \type q8, d16, d17, q9, d18, d19
+ vst1.16 {q10, q11}, [r7, :128], r12
+ \type q10, d20, d21, q11, d22, d23
+ vst1.16 {q8, q9}, [r0, :128], r12
+ \type q8, d16, d17, q9, d18, d19
+ vst1.16 {q10, q11}, [r7, :128], r12
+ \type q10, d20, d21, q11, d22, d23
+ vst1.16 {q8, q9}, [r0, :128], r12
+ \type q8, d16, d17, q9, d18, d19
+ vst1.16 {q10, q11}, [r7, :128], r12
+ \type q10, d20, d21, q11, d22, d23
+ vst1.16 {q8, q9}, [r0, :128], r1
+ subs r5, r5, #1
+ vst1.16 {q10, q11}, [r7, :128], r1
+ ble 0f
+ \type q8, d16, d17, q9, d18, d19
+ b 128b
+0:
+.ifc \type, mask
+ vpop {q4-q7}
+.endif
+.ifc \type, w_avg
+ vpop {q4}
+.endif
+ pop {r4-r7,pc}
+endfunc
+.endm
+
+bidir_fn avg, r6
+bidir_fn w_avg, r7
+bidir_fn mask, r7
+
+
+.macro w_mask_fn type
+function w_mask_\type\()_16bpc_neon, export=1
+ push {r4-r10,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #96]
+ ldrd r6, r7, [sp, #104]
+ ldr r8, [sp, #112]
+ clz r9, r4
+ adr lr, L(w_mask_\type\()_tbl)
+ vdup.16 q15, r8 // bitdepth_max
+ sub r9, r9, #24
+ clz r8, r8 // clz(bitdepth_max)
+ ldr r9, [lr, r9, lsl #2]
+ add r9, lr, r9
+ sub r8, r8, #12 // sh = intermediate_bits + 6 = clz(bitdepth_max) - 12
+ mov r10, #PREP_BIAS*64
+ neg r8, r8 // -sh
+ movw r12, #27615 // (64 + 1 - 38)<<mask_sh - 1 - mask_rnd
+ vdup.32 q14, r8 // -sh
+ vdup.16 q0, r12
+.if \type == 444
+ vmov.i8 q1, #64
+.elseif \type == 422
+ vdup.8 d4, r7
+ vmov.i8 d2, #129
+ vsub.i16 d2, d2, d4
+.elseif \type == 420
+ vdup.16 q2, r7
+ vmov.i16 q1, #0x100
+ vsub.i16 q1, q1, q2
+.endif
+ add r12, r0, r1
+ lsl r1, r1, #1
+ bx r9
+
+ .align 2
+L(w_mask_\type\()_tbl):
+ .word 1280f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 640f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 320f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 160f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 8f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+ .word 4f - L(w_mask_\type\()_tbl) + CONFIG_THUMB
+
+4:
+ vld1.16 {q2, q3}, [r2, :128]! // tmp1 (four rows at once)
+ vld1.16 {q4, q5}, [r3, :128]! // tmp2 (four rows at once)
+ subs r5, r5, #4
+ vdup.32 q13, r10 // PREP_BIAS*64
+ vabd.s16 q6, q2, q4 // abs(tmp1 - tmp2)
+ vabd.s16 q7, q3, q5
+ vsubl.s16 q8, d8, d4 // tmp2 - tmp1 (requires 17 bit)
+ vsubl.s16 q9, d9, d5
+ vsubl.s16 q10, d10, d6
+ vsubl.s16 q11, d11, d7
+ vqsub.u16 q6, q0, q6 // 27615 - abs()
+ vqsub.u16 q7, q0, q7
+ vshll.s16 q5, d7, #6 // tmp1 << 6
+ vshll.s16 q4, d6, #6
+ vshll.s16 q3, d5, #6
+ vshll.s16 q2, d4, #6
+ vshr.u16 q6, q6, #10 // 64-m = (27615 - abs()) >> mask_sh
+ vshr.u16 q7, q7, #10
+ vadd.i32 q2, q2, q13 // += PREP_BIAS*64
+ vadd.i32 q3, q3, q13
+ vadd.i32 q4, q4, q13
+ vadd.i32 q5, q5, q13
+ vmovl.u16 q12, d12
+ vmovl.u16 q13, d13
+ vmla.i32 q2, q8, q12 // (tmp2-tmp1)*(64-m)
+ vmovl.u16 q12, d14
+ vmla.i32 q3, q9, q13
+ vmovl.u16 q13, d15
+ vmla.i32 q4, q10, q12
+ vmla.i32 q5, q11, q13
+ vrshl.s32 q2, q2, q14 // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
+ vrshl.s32 q3, q3, q14
+ vrshl.s32 q4, q4, q14
+ vrshl.s32 q5, q5, q14
+ vqmovun.s32 d4, q2 // iclip_pixel
+ vqmovun.s32 d5, q3
+ vqmovun.s32 d6, q4
+ vqmovun.s32 d7, q5
+ vmin.u16 q2, q2, q15 // iclip_pixel
+ vmin.u16 q3, q3, q15 // iclip_pixel
+.if \type == 444
+ vmovn.i16 d12, q6 // 64 - m
+ vmovn.i16 d13, q7
+ vsub.i16 q6, q1, q6 // m
+ vst1.8 {q6}, [r6, :128]!
+.elseif \type == 422
+ vpadd.i16 d12, d12, d13 // (64 - m) + (64 - n) (column wise addition)
+ vpadd.i16 d13, d14, d15
+ vmovn.i16 d12, q6
+ vhsub.u8 d12, d2, d12 // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
+ vst1.8 {d12}, [r6, :64]!
+.elseif \type == 420
+ vadd.i16 d12, d12, d13 // (64 - my1) + (64 - my2) (row wise addition)
+ vadd.i16 d13, d14, d15
+ vpadd.i16 d12, d12, d13 // (128 - m) + (128 - n) (column wise addition)
+ vsub.i16 d12, d2, d12 // (256 - sign) - ((128 - m) + (128 - n))
+ vrshrn.i16 d12, q6, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
+ vst1.32 {d12[0]}, [r6, :32]!
+.endif
+ vst1.16 {d4}, [r0, :64], r1
+ vst1.16 {d5}, [r12, :64], r1
+ vst1.16 {d6}, [r0, :64], r1
+ vst1.16 {d7}, [r12, :64], r1
+ bgt 4b
+ vpop {q4-q7}
+ pop {r4-r10,pc}
+8:
+ vld1.16 {q2, q3}, [r2, :128]! // tmp1
+ vld1.16 {q4, q5}, [r3, :128]! // tmp2
+ subs r5, r5, #2
+ vdup.32 q13, r10 // PREP_BIAS*64
+ vabd.s16 q6, q2, q4 // abs(tmp1 - tmp2)
+ vabd.s16 q7, q3, q5
+ vsubl.s16 q8, d8, d4 // tmp2 - tmp1 (requires 17 bit)
+ vsubl.s16 q9, d9, d5
+ vsubl.s16 q10, d10, d6
+ vsubl.s16 q11, d11, d7
+ vqsub.u16 q6, q0, q6 // 27615 - abs()
+ vqsub.u16 q7, q0, q7
+ vshll.s16 q5, d7, #6 // tmp1 << 6
+ vshll.s16 q4, d6, #6
+ vshll.s16 q3, d5, #6
+ vshll.s16 q2, d4, #6
+ vshr.u16 q6, q6, #10 // 64-m = (27615 - abs()) >> mask_sh
+ vshr.u16 q7, q7, #10
+ vadd.i32 q2, q2, q13 // += PREP_BIAS*64
+ vadd.i32 q3, q3, q13
+ vadd.i32 q4, q4, q13
+ vadd.i32 q5, q5, q13
+ vmovl.u16 q12, d12
+ vmovl.u16 q13, d13
+ vmla.i32 q2, q8, q12 // (tmp2-tmp1)*(64-m)
+ vmovl.u16 q12, d14
+ vmla.i32 q3, q9, q13
+ vmovl.u16 q13, d15
+ vmla.i32 q4, q10, q12
+ vmla.i32 q5, q11, q13
+ vrshl.s32 q2, q2, q14 // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
+ vrshl.s32 q3, q3, q14
+ vrshl.s32 q4, q4, q14
+ vrshl.s32 q5, q5, q14
+ vqmovun.s32 d4, q2 // iclip_pixel
+ vqmovun.s32 d5, q3
+ vqmovun.s32 d6, q4
+ vqmovun.s32 d7, q5
+ vmin.u16 q2, q2, q15 // iclip_pixel
+ vmin.u16 q3, q3, q15 // iclip_pixel
+.if \type == 444
+ vmovn.i16 d12, q6 // 64 - m
+ vmovn.i16 d13, q7
+ vsub.i16 q6, q1, q6 // m
+ vst1.8 {q6}, [r6, :128]!
+.elseif \type == 422
+ vpadd.i16 d12, d12, d13 // (64 - m) + (64 - n) (column wise addition)
+ vpadd.i16 d13, d14, d15
+ vmovn.i16 d12, q6
+ vhsub.u8 d12, d2, d12 // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
+ vst1.8 {d12}, [r6, :64]!
+.elseif \type == 420
+ vadd.i16 q6, q6, q7 // (64 - my1) + (64 - my2) (row wise addition)
+ vpadd.i16 d12, d12, d13 // (128 - m) + (128 - n) (column wise addition)
+ vsub.i16 d12, d2, d12 // (256 - sign) - ((128 - m) + (128 - n))
+ vrshrn.i16 d12, q6, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
+ vst1.32 {d12[0]}, [r6, :32]!
+.endif
+ vst1.16 {q2}, [r0, :128], r1
+ vst1.16 {q3}, [r12, :128], r1
+ bgt 8b
+ vpop {q4-q7}
+ pop {r4-r10,pc}
+1280:
+640:
+320:
+160:
+ sub r1, r1, r4, lsl #1
+.if \type == 444
+ add lr, r6, r4
+.elseif \type == 422
+ add lr, r6, r4, lsr #1
+.endif
+ add r7, r2, r4, lsl #1
+ add r9, r3, r4, lsl #1
+161:
+ mov r8, r4
+16:
+ vld1.16 {q2}, [r2, :128]! // tmp1
+ vld1.16 {q4}, [r3, :128]! // tmp2
+ vld1.16 {q3}, [r7, :128]!
+ vld1.16 {q5}, [r9, :128]!
+ subs r8, r8, #8
+ vdup.32 q13, r10 // PREP_BIAS*64
+ vabd.s16 q6, q2, q4 // abs(tmp1 - tmp2)
+ vabd.s16 q7, q3, q5
+ vsubl.s16 q8, d8, d4 // tmp2 - tmp1 (requires 17 bit)
+ vsubl.s16 q9, d9, d5
+ vsubl.s16 q10, d10, d6
+ vsubl.s16 q11, d11, d7
+ vqsub.u16 q6, q0, q6 // 27615 - abs()
+ vqsub.u16 q7, q0, q7
+ vshll.s16 q5, d7, #6 // tmp1 << 6
+ vshll.s16 q4, d6, #6
+ vshll.s16 q3, d5, #6
+ vshll.s16 q2, d4, #6
+ vshr.u16 q6, q6, #10 // 64-m = (27615 - abs()) >> mask_sh
+ vshr.u16 q7, q7, #10
+ vadd.i32 q2, q2, q13 // += PREP_BIAS*64
+ vadd.i32 q3, q3, q13
+ vadd.i32 q4, q4, q13
+ vadd.i32 q5, q5, q13
+ vmovl.u16 q12, d12
+ vmovl.u16 q13, d13
+ vmla.i32 q2, q8, q12 // (tmp2-tmp1)*(64-m)
+ vmovl.u16 q12, d14
+ vmla.i32 q3, q9, q13
+ vmovl.u16 q13, d15
+ vmla.i32 q4, q10, q12
+ vmla.i32 q5, q11, q13
+ vrshl.s32 q2, q2, q14 // (tmp1<<6 + (tmp2-tmp1)*(64-m) + (1 << (sh-1)) + PREP_BIAS*64) >> sh
+ vrshl.s32 q3, q3, q14
+ vrshl.s32 q4, q4, q14
+ vrshl.s32 q5, q5, q14
+ vqmovun.s32 d4, q2 // iclip_pixel
+ vqmovun.s32 d5, q3
+ vqmovun.s32 d6, q4
+ vqmovun.s32 d7, q5
+ vmin.u16 q2, q2, q15 // iclip_pixel
+ vmin.u16 q3, q3, q15 // iclip_pixel
+.if \type == 444
+ vmovn.i16 d12, q6 // 64 - m
+ vmovn.i16 d13, q7
+ vsub.i16 q6, q1, q6 // m
+ vst1.8 {d12}, [r6, :64]!
+ vst1.8 {d13}, [lr, :64]!
+.elseif \type == 422
+ vpadd.i16 d12, d12, d13 // (64 - m) + (64 - n) (column wise addition)
+ vpadd.i16 d13, d14, d15
+ vmovn.i16 d12, q6
+ vhsub.u8 d12, d2, d12 // ((129 - sign) - ((64 - m) + (64 - n)) >> 1
+ vst1.32 {d12[0]}, [r6, :32]!
+ vst1.32 {d12[1]}, [lr, :32]!
+.elseif \type == 420
+ vadd.i16 q6, q6, q7 // (64 - my1) + (64 - my2) (row wise addition)
+ vpadd.i16 d12, d12, d13 // (128 - m) + (128 - n) (column wise addition)
+ vsub.i16 d12, d2, d12 // (256 - sign) - ((128 - m) + (128 - n))
+ vrshrn.i16 d12, q6, #2 // ((256 - sign) - ((128 - m) + (128 - n)) + 2) >> 2
+ vst1.32 {d12[0]}, [r6, :32]!
+.endif
+ vst1.16 {q2}, [r0, :128]!
+ vst1.16 {q3}, [r12, :128]!
+ bgt 16b
+ subs r5, r5, #2
+ add r2, r2, r4, lsl #1
+ add r3, r3, r4, lsl #1
+ add r7, r7, r4, lsl #1
+ add r9, r9, r4, lsl #1
+.if \type == 444
+ add r6, r6, r4
+ add lr, lr, r4
+.elseif \type == 422
+ add r6, r6, r4, lsr #1
+ add lr, lr, r4, lsr #1
+.endif
+ add r0, r0, r1
+ add r12, r12, r1
+ bgt 161b
+ vpop {q4-q7}
+ pop {r4-r10,pc}
+endfunc
+.endm
+
+w_mask_fn 444
+w_mask_fn 422
+w_mask_fn 420
+
+function blend_16bpc_neon, export=1
+ push {r4-r5,lr}
+ ldrd r4, r5, [sp, #12]
+ clz lr, r3
+ adr r3, L(blend_tbl)
+ sub lr, lr, #26
+ ldr lr, [r3, lr, lsl #2]
+ add r3, r3, lr
+ bx r3
+
+ .align 2
+L(blend_tbl):
+ .word 320f - L(blend_tbl) + CONFIG_THUMB
+ .word 160f - L(blend_tbl) + CONFIG_THUMB
+ .word 80f - L(blend_tbl) + CONFIG_THUMB
+ .word 40f - L(blend_tbl) + CONFIG_THUMB
+
+40:
+ add r12, r0, r1
+ lsl r1, r1, #1
+4:
+ vld1.8 {d4}, [r5, :64]!
+ vld1.16 {q1}, [r2, :128]!
+ vld1.16 {d0}, [r0, :64]
+ vneg.s8 d4, d4 // -m
+ subs r4, r4, #2
+ vld1.16 {d1}, [r12, :64]
+ vmovl.s8 q2, d4
+ vshl.i16 q2, q2, #9 // -m << 9
+ vsub.i16 q1, q0, q1 // a - b
+ vqrdmulh.s16 q1, q1, q2 // ((a-b)*-m + 32) >> 6
+ vadd.i16 q0, q0, q1
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d1}, [r12, :64], r1
+ bgt 4b
+ pop {r4-r5,pc}
+80:
+ add r12, r0, r1
+ lsl r1, r1, #1
+8:
+ vld1.8 {q8}, [r5, :128]!
+ vld1.16 {q2, q3}, [r2, :128]!
+ vneg.s8 q9, q8 // -m
+ vld1.16 {q0}, [r0, :128]
+ vld1.16 {q1}, [r12, :128]
+ vmovl.s8 q8, d18
+ vmovl.s8 q9, d19
+ vshl.i16 q8, q8, #9 // -m << 9
+ vshl.i16 q9, q9, #9
+ vsub.i16 q2, q0, q2 // a - b
+ vsub.i16 q3, q1, q3
+ subs r4, r4, #2
+ vqrdmulh.s16 q2, q2, q8 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 q3, q3, q9
+ vadd.i16 q0, q0, q2
+ vadd.i16 q1, q1, q3
+ vst1.16 {q0}, [r0, :128], r1
+ vst1.16 {q1}, [r12, :128], r1
+ bgt 8b
+ pop {r4-r5,pc}
+160:
+ add r12, r0, r1
+ lsl r1, r1, #1
+16:
+ vld1.8 {q12, q13}, [r5, :128]!
+ vld1.16 {q8, q9}, [r2, :128]!
+ subs r4, r4, #2
+ vneg.s8 q14, q12 // -m
+ vld1.16 {q0, q1}, [r0, :128]
+ vneg.s8 q15, q13
+ vld1.16 {q10, q11}, [r2, :128]!
+ vmovl.s8 q12, d28
+ vmovl.s8 q13, d29
+ vmovl.s8 q14, d30
+ vmovl.s8 q15, d31
+ vld1.16 {q2, q3}, [r12, :128]
+ vshl.i16 q12, q12, #9 // -m << 9
+ vshl.i16 q13, q13, #9
+ vshl.i16 q14, q14, #9
+ vshl.i16 q15, q15, #9
+ vsub.i16 q8, q0, q8 // a - b
+ vsub.i16 q9, q1, q9
+ vsub.i16 q10, q2, q10
+ vsub.i16 q11, q3, q11
+ vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 q9, q9, q13
+ vqrdmulh.s16 q10, q10, q14
+ vqrdmulh.s16 q11, q11, q15
+ vadd.i16 q0, q0, q8
+ vadd.i16 q1, q1, q9
+ vadd.i16 q2, q2, q10
+ vst1.16 {q0, q1}, [r0, :128], r1
+ vadd.i16 q3, q3, q11
+ vst1.16 {q2, q3}, [r12, :128], r1
+ bgt 16b
+ pop {r4-r5,pc}
+320:
+ add r12, r0, #32
+32:
+ vld1.8 {q12, q13}, [r5, :128]!
+ vld1.16 {q8, q9}, [r2, :128]!
+ subs r4, r4, #1
+ vneg.s8 q14, q12 // -m
+ vld1.16 {q0, q1}, [r0, :128]
+ vneg.s8 q15, q13
+ vld1.16 {q10, q11}, [r2, :128]!
+ vmovl.s8 q12, d28
+ vmovl.s8 q13, d29
+ vmovl.s8 q14, d30
+ vmovl.s8 q15, d31
+ vld1.16 {q2, q3}, [r12, :128]
+ vshl.i16 q12, q12, #9 // -m << 9
+ vshl.i16 q13, q13, #9
+ vshl.i16 q14, q14, #9
+ vshl.i16 q15, q15, #9
+ vsub.i16 q8, q0, q8 // a - b
+ vsub.i16 q9, q1, q9
+ vsub.i16 q10, q2, q10
+ vsub.i16 q11, q3, q11
+ vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 q9, q9, q13
+ vqrdmulh.s16 q10, q10, q14
+ vqrdmulh.s16 q11, q11, q15
+ vadd.i16 q0, q0, q8
+ vadd.i16 q1, q1, q9
+ vadd.i16 q2, q2, q10
+ vst1.16 {q0, q1}, [r0, :128], r1
+ vadd.i16 q3, q3, q11
+ vst1.16 {q2, q3}, [r12, :128], r1
+ bgt 32b
+ pop {r4-r5,pc}
+endfunc
+
+function blend_h_16bpc_neon, export=1
+ push {r4-r5,lr}
+ ldr r4, [sp, #12]
+ movrel r5, X(obmc_masks)
+ add r5, r5, r4
+ sub r4, r4, r4, lsr #2
+ clz lr, r3
+ adr r12, L(blend_h_tbl)
+ sub lr, lr, #24
+ ldr lr, [r12, lr, lsl #2]
+ add r12, r12, lr
+ bx r12
+
+ .align 2
+L(blend_h_tbl):
+ .word 1280f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 640f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 320f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 160f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 80f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 40f - L(blend_h_tbl) + CONFIG_THUMB
+ .word 20f - L(blend_h_tbl) + CONFIG_THUMB
+
+20:
+ add r12, r0, r1
+ lsl r1, r1, #1
+2:
+ vld2.8 {d4[], d5[]}, [r5, :16]!
+ vld1.16 {d2}, [r2, :64]!
+ vext.8 d4, d4, d5, #6
+ subs r4, r4, #2
+ vneg.s8 d4, d4 // -m
+ vld1.32 {d0[]}, [r0, :32]
+ vld1.32 {d0[1]}, [r12, :32]
+ vmovl.s8 q2, d4
+ vshl.i16 d4, d4, #9 // -m << 9
+ vsub.i16 d2, d0, d2 // a - b
+ vqrdmulh.s16 d2, d2, d4 // ((a-b)*-m + 32) >> 6
+ vadd.i16 d0, d0, d2
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d0[1]}, [r12, :32], r1
+ bgt 2b
+ pop {r4-r5,pc}
+40:
+ add r12, r0, r1
+ lsl r1, r1, #1
+4:
+ vld2.8 {d4[], d5[]}, [r5, :16]!
+ vld1.16 {q1}, [r2, :128]!
+ vext.8 d4, d4, d5, #4
+ subs r4, r4, #2
+ vneg.s8 d4, d4 // -m
+ vld1.16 {d0}, [r0, :64]
+ vld1.16 {d1}, [r12, :64]
+ vmovl.s8 q2, d4
+ vshl.i16 q2, q2, #9 // -m << 9
+ vsub.i16 q1, q0, q1 // a - b
+ vqrdmulh.s16 q1, q1, q2 // ((a-b)*-m + 32) >> 6
+ vadd.i16 q0, q0, q1
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d1}, [r12, :64], r1
+ bgt 4b
+ pop {r4-r5,pc}
+80:
+ add r12, r0, r1
+ lsl r1, r1, #1
+8:
+ vld2.8 {d16[], d17[]}, [r5, :16]!
+ vld1.16 {q2, q3}, [r2, :128]!
+ vneg.s8 q9, q8 // -m
+ vld1.16 {q0}, [r0, :128]
+ subs r4, r4, #2
+ vmovl.s8 q8, d18
+ vmovl.s8 q9, d19
+ vld1.16 {q1}, [r12, :128]
+ vshl.i16 q8, q8, #9 // -m << 9
+ vshl.i16 q9, q9, #9
+ vsub.i16 q2, q0, q2 // a - b
+ vsub.i16 q3, q1, q3
+ vqrdmulh.s16 q2, q2, q8 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 q3, q3, q9
+ vadd.i16 q0, q0, q2
+ vadd.i16 q1, q1, q3
+ vst1.16 {q0}, [r0, :128], r1
+ vst1.16 {q1}, [r12, :128], r1
+ bgt 8b
+ pop {r4-r5,pc}
+160:
+ add r12, r0, r1
+ lsl r1, r1, #1
+16:
+ vld2.8 {d24[], d25[]}, [r5, :16]!
+ vld1.16 {q8, q9}, [r2, :128]!
+ subs r4, r4, #2
+ vneg.s8 q13, q12 // -m
+ vld1.16 {q0, q1}, [r0, :128]
+ vmovl.s8 q12, d26
+ vld1.16 {q10, q11}, [r2, :128]!
+ vmovl.s8 q13, d27
+ vld1.16 {q2, q3}, [r12, :128]
+ vshl.i16 q12, q12, #9 // -m << 9
+ vshl.i16 q13, q13, #9
+ vsub.i16 q8, q0, q8 // a - b
+ vsub.i16 q9, q1, q9
+ vsub.i16 q10, q2, q10
+ vsub.i16 q11, q3, q11
+ vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 q9, q9, q12
+ vqrdmulh.s16 q10, q10, q13
+ vqrdmulh.s16 q11, q11, q13
+ vadd.i16 q0, q0, q8
+ vadd.i16 q1, q1, q9
+ vadd.i16 q2, q2, q10
+ vadd.i16 q3, q3, q11
+ vst1.16 {q0, q1}, [r0, :128], r1
+ vst1.16 {q2, q3}, [r12, :128], r1
+ bgt 16b
+ pop {r4-r5,pc}
+1280:
+640:
+320:
+ sub r1, r1, r3, lsl #1
+321:
+ vld1.8 {d24[]}, [r5]!
+ mov r12, r3
+ vneg.s8 d24, d24 // -m
+ vmovl.s8 q12, d24
+ vshl.i16 q12, q12, #9 // -m << 9
+32:
+ vld1.16 {q8, q9}, [r2, :128]!
+ vld1.16 {q0, q1}, [r0, :128]!
+ subs r12, r12, #32
+ vld1.16 {q10, q11}, [r2, :128]!
+ vld1.16 {q2, q3}, [r0, :128]
+ vsub.i16 q8, q0, q8 // a - b
+ vsub.i16 q9, q1, q9
+ vsub.i16 q10, q2, q10
+ vsub.i16 q11, q3, q11
+ sub r0, r0, #32
+ vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 q9, q9, q12
+ vqrdmulh.s16 q10, q10, q12
+ vqrdmulh.s16 q11, q11, q12
+ vadd.i16 q0, q0, q8
+ vadd.i16 q1, q1, q9
+ vadd.i16 q2, q2, q10
+ vst1.16 {q0, q1}, [r0, :128]!
+ vadd.i16 q3, q3, q11
+ vst1.16 {q2, q3}, [r0, :128]!
+ bgt 32b
+ subs r4, r4, #1
+ add r0, r0, r1
+ bgt 321b
+ pop {r4-r5,pc}
+endfunc
+
+function blend_v_16bpc_neon, export=1
+ push {r4,lr}
+ ldr r4, [sp, #8]
+ movrel lr, X(obmc_masks)
+ add lr, lr, r3
+ clz r12, r3
+ adr r3, L(blend_v_tbl)
+ sub r12, r12, #26
+ ldr r12, [r3, r12, lsl #2]
+ add r3, r3, r12
+ bx r3
+
+ .align 2
+L(blend_v_tbl):
+ .word 320f - L(blend_v_tbl) + CONFIG_THUMB
+ .word 160f - L(blend_v_tbl) + CONFIG_THUMB
+ .word 80f - L(blend_v_tbl) + CONFIG_THUMB
+ .word 40f - L(blend_v_tbl) + CONFIG_THUMB
+ .word 20f - L(blend_v_tbl) + CONFIG_THUMB
+
+20:
+ add r12, r0, r1
+ lsl r1, r1, #1
+ vld1.8 {d4[]}, [lr]
+ vneg.s8 d4, d4 // -m
+ vmovl.s8 q2, d4
+ vshl.i16 d4, d4, #9 // -m << 9
+2:
+ vld1.32 {d2[]}, [r2, :32]!
+ vld1.16 {d0[]}, [r0, :16]
+ subs r4, r4, #2
+ vld1.16 {d2[1]}, [r2, :16]
+ vld1.16 {d0[1]}, [r12, :16]
+ add r2, r2, #4
+ vsub.i16 d2, d0, d2 // a - b
+ vqrdmulh.s16 d2, d2, d4 // ((a-b)*-m + 32) >> 6
+ vadd.i16 d0, d0, d2
+ vst1.16 {d0[0]}, [r0, :16], r1
+ vst1.16 {d0[1]}, [r12, :16], r1
+ bgt 2b
+ pop {r4,pc}
+40:
+ vld1.32 {d4[]}, [lr, :32]
+ add r12, r0, r1
+ vneg.s8 d4, d4 // -m
+ lsl r1, r1, #1
+ vmovl.s8 q2, d4
+ sub r1, r1, #4
+ vshl.i16 q2, q2, #9 // -m << 9
+4:
+ vld1.16 {q1}, [r2, :128]!
+ vld1.16 {d0}, [r0, :64]
+ vld1.16 {d1}, [r12, :64]
+ subs r4, r4, #2
+ vsub.i16 q1, q0, q1 // a - b
+ vqrdmulh.s16 q1, q1, q2 // ((a-b)*-m + 32) >> 6
+ vadd.i16 q0, q0, q1
+ vst1.32 {d0[0]}, [r0, :32]!
+ vst1.32 {d1[0]}, [r12, :32]!
+ vst1.16 {d0[2]}, [r0, :16], r1
+ vst1.16 {d1[2]}, [r12, :16], r1
+ bgt 4b
+ pop {r4,pc}
+80:
+ vld1.8 {d16}, [lr, :64]
+ add r12, r0, r1
+ vneg.s8 d16, d16 // -m
+ lsl r1, r1, #1
+ vmovl.s8 q8, d16
+ sub r1, r1, #8
+ vshl.i16 q8, q8, #9 // -m << 9
+8:
+ vld1.16 {q2, q3}, [r2, :128]!
+ vld1.16 {q0}, [r0, :128]
+ vld1.16 {q1}, [r12, :128]
+ subs r4, r4, #2
+ vsub.i16 q2, q0, q2 // a - b
+ vsub.i16 q3, q1, q3
+ vqrdmulh.s16 q2, q2, q8 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 q3, q3, q8
+ vadd.i16 q0, q0, q2
+ vadd.i16 q1, q1, q3
+ vst1.16 {d0}, [r0, :64]!
+ vst1.16 {d2}, [r12, :64]!
+ vst1.32 {d1[0]}, [r0, :32], r1
+ vst1.32 {d3[0]}, [r12, :32], r1
+ bgt 8b
+ pop {r4,pc}
+160:
+ vld1.8 {q12}, [lr, :128]
+ add r12, r0, r1
+ vneg.s8 q13, q12 // -m
+ lsl r1, r1, #1
+ vmovl.s8 q12, d26
+ vmovl.s8 q13, d27
+ vshl.i16 q12, q12, #9 // -m << 9
+ vshl.i16 d26, d26, #9
+16:
+ vld1.16 {q8, q9}, [r2, :128]!
+ vld1.16 {d0, d1, d2}, [r0, :64]
+ subs r4, r4, #2
+ vld1.16 {q10, q11}, [r2, :128]!
+ vsub.i16 q8, q0, q8 // a - b
+ vld1.16 {d4, d5, d6}, [r12, :64]
+ vsub.i16 d18, d2, d18
+ vsub.i16 q10, q2, q10
+ vsub.i16 d22, d6, d22
+ vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 d18, d18, d26
+ vqrdmulh.s16 q10, q10, q12
+ vqrdmulh.s16 d22, d22, d26
+ vadd.i16 q0, q0, q8
+ vadd.i16 d2, d2, d18
+ vadd.i16 q2, q2, q10
+ vst1.16 {d0, d1, d2}, [r0, :64], r1
+ vadd.i16 d6, d6, d22
+ vst1.16 {d4, d5, d6}, [r12, :64], r1
+ bgt 16b
+ pop {r4,pc}
+320:
+ vld1.8 {d24, d25, d26}, [lr, :64]
+ vneg.s8 q14, q12 // -m
+ vneg.s8 d30, d26
+ vmovl.s8 q12, d28
+ vmovl.s8 q13, d29
+ vmovl.s8 q14, d30
+ sub r1, r1, #32
+ vshl.i16 q12, q12, #9 // -m << 9
+ vshl.i16 q13, q13, #9
+ vshl.i16 q14, q14, #9
+32:
+ vld1.16 {q8, q9}, [r2, :128]!
+ vld1.16 {q0, q1}, [r0, :128]!
+ subs r4, r4, #1
+ vld1.16 {q10}, [r2, :128]
+ vsub.i16 q8, q0, q8 // a - b
+ vld1.16 {q2}, [r0, :128]
+ sub r0, r0, #32
+ vsub.i16 q9, q1, q9
+ vsub.i16 q10, q2, q10
+ vqrdmulh.s16 q8, q8, q12 // ((a-b)*-m + 32) >> 6
+ vqrdmulh.s16 q9, q9, q13
+ vqrdmulh.s16 q10, q10, q14
+ vadd.i16 q0, q0, q8
+ vadd.i16 q1, q1, q9
+ vadd.i16 q2, q2, q10
+ vst1.16 {q0, q1}, [r0, :128]!
+ add r2, r2, #32
+ vst1.16 {q2}, [r0, :128], r1
+ bgt 32b
+ pop {r4,pc}
+endfunc
+
+// This has got the same signature as the put_8tap functions,
+// and assumes that r9 is set to (clz(w)-24).
+function put_neon
+ adr r10, L(put_tbl)
+ ldr r9, [r10, r9, lsl #2]
+ add r10, r10, r9
+ bx r10
+
+ .align 2
+L(put_tbl):
+ .word 1280f - L(put_tbl) + CONFIG_THUMB
+ .word 640f - L(put_tbl) + CONFIG_THUMB
+ .word 320f - L(put_tbl) + CONFIG_THUMB
+ .word 16f - L(put_tbl) + CONFIG_THUMB
+ .word 80f - L(put_tbl) + CONFIG_THUMB
+ .word 4f - L(put_tbl) + CONFIG_THUMB
+ .word 2f - L(put_tbl) + CONFIG_THUMB
+
+2:
+ vld1.32 {d0[]}, [r2], r3
+ vld1.32 {d1[]}, [r2], r3
+ subs r5, r5, #2
+ vst1.32 {d0[0]}, [r0, :32], r1
+ vst1.32 {d1[1]}, [r0, :32], r1
+ bgt 2b
+ pop {r4-r11,pc}
+4:
+ vld1.16 {d0}, [r2], r3
+ vld1.16 {d1}, [r2], r3
+ subs r5, r5, #2
+ vst1.16 {d0}, [r0, :64], r1
+ vst1.16 {d1}, [r0, :64], r1
+ bgt 4b
+ pop {r4-r11,pc}
+80:
+ add r8, r0, r1
+ lsl r1, r1, #1
+ add r9, r2, r3
+ lsl r3, r3, #1
+8:
+ vld1.16 {q0}, [r2], r3
+ vld1.16 {q1}, [r9], r3
+ subs r5, r5, #2
+ vst1.16 {q0}, [r0, :128], r1
+ vst1.16 {q1}, [r8, :128], r1
+ bgt 8b
+ pop {r4-r11,pc}
+16:
+ vld1.16 {q0, q1}, [r2], r3
+ subs r5, r5, #1
+ vst1.16 {q0, q1}, [r0, :128], r1
+ bgt 16b
+ pop {r4-r11,pc}
+320:
+ sub r1, r1, #32
+ sub r3, r3, #32
+32:
+ vld1.16 {q0, q1}, [r2]!
+ vst1.16 {q0, q1}, [r0, :128]!
+ vld1.16 {q2, q3}, [r2], r3
+ subs r5, r5, #1
+ vst1.16 {q2, q3}, [r0, :128], r1
+ bgt 32b
+ pop {r4-r11,pc}
+640:
+ sub r1, r1, #96
+ sub r3, r3, #96
+64:
+ vld1.16 {q8, q9}, [r2]!
+ vst1.16 {q8, q9}, [r0, :128]!
+ vld1.16 {q10, q11}, [r2]!
+ vst1.16 {q10, q11}, [r0, :128]!
+ vld1.16 {q12, q13}, [r2]!
+ vst1.16 {q12, q13}, [r0, :128]!
+ vld1.16 {q14, q15}, [r2], r3
+ subs r5, r5, #1
+ vst1.16 {q14, q15}, [r0, :128], r1
+ bgt 64b
+ pop {r4-r11,pc}
+1280:
+ sub r1, r1, #224
+ sub r3, r3, #224
+128:
+ vld1.16 {q8, q9}, [r2]!
+ vst1.16 {q8, q9}, [r0, :128]!
+ vld1.16 {q10, q11}, [r2]!
+ vst1.16 {q10, q11}, [r0, :128]!
+ vld1.16 {q12, q13}, [r2]!
+ vst1.16 {q12, q13}, [r0, :128]!
+ vld1.16 {q14, q15}, [r2]!
+ vst1.16 {q14, q15}, [r0, :128]!
+ vld1.16 {q8, q9}, [r2]!
+ vst1.16 {q8, q9}, [r0, :128]!
+ vld1.16 {q10, q11}, [r2]!
+ vst1.16 {q10, q11}, [r0, :128]!
+ vld1.16 {q12, q13}, [r2]!
+ vst1.16 {q12, q13}, [r0, :128]!
+ vld1.16 {q14, q15}, [r2], r3
+ subs r5, r5, #1
+ vst1.16 {q14, q15}, [r0, :128], r1
+ bgt 128b
+ pop {r4-r11,pc}
+endfunc
+
+// This has got the same signature as the prep_8tap functions,
+// and assumes that r9 is set to (clz(w)-24), r7 to intermediate_bits and
+// r8 to w*2.
+function prep_neon
+ adr r10, L(prep_tbl)
+ ldr r9, [r10, r9, lsl #2]
+ vdup.16 q15, r7 // intermediate_bits
+ vmov.i16 q14, #PREP_BIAS
+ add r10, r10, r9
+ bx r10
+
+ .align 2
+L(prep_tbl):
+ .word 1280f - L(prep_tbl) + CONFIG_THUMB
+ .word 640f - L(prep_tbl) + CONFIG_THUMB
+ .word 320f - L(prep_tbl) + CONFIG_THUMB
+ .word 16f - L(prep_tbl) + CONFIG_THUMB
+ .word 80f - L(prep_tbl) + CONFIG_THUMB
+ .word 40f - L(prep_tbl) + CONFIG_THUMB
+
+40:
+ add r9, r1, r2
+ lsl r2, r2, #1
+4:
+ vld1.16 {d0}, [r1], r2
+ vld1.16 {d1}, [r9], r2
+ subs r4, r4, #2
+ vshl.s16 q0, q0, q15
+ vsub.i16 q0, q0, q14
+ vst1.16 {q0}, [r0, :128]!
+ bgt 4b
+ pop {r4-r11,pc}
+80:
+ add r9, r1, r2
+ lsl r2, r2, #1
+8:
+ vld1.16 {q0}, [r1], r2
+ vld1.16 {q1}, [r9], r2
+ subs r4, r4, #2
+ vshl.s16 q0, q0, q15
+ vshl.s16 q1, q1, q15
+ vsub.i16 q0, q0, q14
+ vsub.i16 q1, q1, q14
+ vst1.16 {q0, q1}, [r0, :128]!
+ bgt 8b
+ pop {r4-r11,pc}
+16:
+ vld1.16 {q0, q1}, [r1], r2
+ vshl.s16 q0, q0, q15
+ vld1.16 {q2, q3}, [r1], r2
+ subs r4, r4, #2
+ vshl.s16 q1, q1, q15
+ vshl.s16 q2, q2, q15
+ vshl.s16 q3, q3, q15
+ vsub.i16 q0, q0, q14
+ vsub.i16 q1, q1, q14
+ vsub.i16 q2, q2, q14
+ vst1.16 {q0, q1}, [r0, :128]!
+ vsub.i16 q3, q3, q14
+ vst1.16 {q2, q3}, [r0, :128]!
+ bgt 16b
+ pop {r4-r11,pc}
+320:
+ sub r2, r2, #32
+32:
+ vld1.16 {q0, q1}, [r1]!
+ subs r4, r4, #1
+ vshl.s16 q0, q0, q15
+ vld1.16 {q2, q3}, [r1], r2
+ vshl.s16 q1, q1, q15
+ vshl.s16 q2, q2, q15
+ vshl.s16 q3, q3, q15
+ vsub.i16 q0, q0, q14
+ vsub.i16 q1, q1, q14
+ vsub.i16 q2, q2, q14
+ vst1.16 {q0, q1}, [r0, :128]!
+ vsub.i16 q3, q3, q14
+ vst1.16 {q2, q3}, [r0, :128]!
+ bgt 32b
+ pop {r4-r11,pc}
+640:
+ sub r2, r2, #96
+64:
+ vld1.16 {q0, q1}, [r1]!
+ subs r4, r4, #1
+ vshl.s16 q0, q0, q15
+ vld1.16 {q2, q3}, [r1]!
+ vshl.s16 q1, q1, q15
+ vld1.16 {q8, q9}, [r1]!
+ vshl.s16 q2, q2, q15
+ vld1.16 {q10, q11}, [r1], r2
+ vshl.s16 q3, q3, q15
+ vshl.s16 q8, q8, q15
+ vshl.s16 q9, q9, q15
+ vshl.s16 q10, q10, q15
+ vshl.s16 q11, q11, q15
+ vsub.i16 q0, q0, q14
+ vsub.i16 q1, q1, q14
+ vsub.i16 q2, q2, q14
+ vsub.i16 q3, q3, q14
+ vsub.i16 q8, q8, q14
+ vst1.16 {q0, q1}, [r0, :128]!
+ vsub.i16 q9, q9, q14
+ vst1.16 {q2, q3}, [r0, :128]!
+ vsub.i16 q10, q10, q14
+ vst1.16 {q8, q9}, [r0, :128]!
+ vsub.i16 q11, q11, q14
+ vst1.16 {q10, q11}, [r0, :128]!
+ bgt 64b
+ pop {r4-r11,pc}
+1280:
+ sub r2, r2, #224
+128:
+ vld1.16 {q0, q1}, [r1]!
+ subs r4, r4, #1
+ vshl.s16 q0, q0, q15
+ vld1.16 {q2, q3}, [r1]!
+ vshl.s16 q1, q1, q15
+ vld1.16 {q8, q9}, [r1]!
+ vshl.s16 q2, q2, q15
+ vld1.16 {q10, q11}, [r1]!
+ vshl.s16 q3, q3, q15
+ vshl.s16 q8, q8, q15
+ vshl.s16 q9, q9, q15
+ vshl.s16 q10, q10, q15
+ vshl.s16 q11, q11, q15
+ vsub.i16 q0, q0, q14
+ vsub.i16 q1, q1, q14
+ vsub.i16 q2, q2, q14
+ vsub.i16 q3, q3, q14
+ vsub.i16 q8, q8, q14
+ vst1.16 {q0, q1}, [r0, :128]!
+ vld1.16 {q0, q1}, [r1]!
+ vsub.i16 q9, q9, q14
+ vsub.i16 q10, q10, q14
+ vst1.16 {q2, q3}, [r0, :128]!
+ vld1.16 {q2, q3}, [r1]!
+ vsub.i16 q11, q11, q14
+ vshl.s16 q0, q0, q15
+ vst1.16 {q8, q9}, [r0, :128]!
+ vld1.16 {q8, q9}, [r1]!
+ vshl.s16 q1, q1, q15
+ vshl.s16 q2, q2, q15
+ vst1.16 {q10, q11}, [r0, :128]!
+ vld1.16 {q10, q11}, [r1], r2
+ vshl.s16 q3, q3, q15
+ vshl.s16 q8, q8, q15
+ vshl.s16 q9, q9, q15
+ vshl.s16 q10, q10, q15
+ vshl.s16 q11, q11, q15
+ vsub.i16 q0, q0, q14
+ vsub.i16 q1, q1, q14
+ vsub.i16 q2, q2, q14
+ vsub.i16 q3, q3, q14
+ vsub.i16 q8, q8, q14
+ vst1.16 {q0, q1}, [r0, :128]!
+ vsub.i16 q9, q9, q14
+ vst1.16 {q2, q3}, [r0, :128]!
+ vsub.i16 q10, q10, q14
+ vst1.16 {q8, q9}, [r0, :128]!
+ vsub.i16 q11, q11, q14
+ vst1.16 {q10, q11}, [r0, :128]!
+ bgt 128b
+ pop {r4-r11,pc}
+endfunc
+
+.macro load_slice s0, s1, strd, wd, d0, d1, d2, d3, d4, d5, d6
+ vld1.\wd {\d0[]}, [\s0], \strd
+ vld1.\wd {\d1[]}, [\s1], \strd
+.ifnb \d2
+ vld1.\wd {\d2[]}, [\s0], \strd
+ vld1.\wd {\d3[]}, [\s1], \strd
+.endif
+.ifnb \d4
+ vld1.\wd {\d4[]}, [\s0], \strd
+.endif
+.ifnb \d5
+ vld1.\wd {\d5[]}, [\s1], \strd
+.endif
+.ifnb \d6
+ vld1.\wd {\d6[]}, [\s0], \strd
+.endif
+.endm
+.macro load_reg s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
+ vld1.16 {\d0}, [\s0], \strd
+ vld1.16 {\d1}, [\s1], \strd
+.ifnb \d2
+ vld1.16 {\d2}, [\s0], \strd
+ vld1.16 {\d3}, [\s1], \strd
+.endif
+.ifnb \d4
+ vld1.16 {\d4}, [\s0], \strd
+.endif
+.ifnb \d5
+ vld1.16 {\d5}, [\s1], \strd
+.endif
+.ifnb \d6
+ vld1.16 {\d6}, [\s0], \strd
+.endif
+.endm
+.macro load_regpair s0, s1, strd, d0, d1, d2, d3, d4, d5
+ vld1.16 {\d0, \d1}, [\s0], \strd
+.ifnb \d2
+ vld1.16 {\d2, \d3}, [\s1], \strd
+.endif
+.ifnb \d4
+ vld1.16 {\d4, \d5}, [\s0], \strd
+.endif
+.endm
+.macro load_32 s0, s1, strd, d0, d1, d2, d3, d4, d5, d6
+ load_slice \s0, \s1, \strd, 32, \d0, \d1, \d2, \d3, \d4, \d5, \d6
+.endm
+.macro load_16s16 s0, s1, strd, d0, d1, d2, d3, d4, d5
+ load_regpair \s0, \s1, \strd, \d0, \d1, \d2, \d3, \d4, \d5
+.endm
+.macro interleave_1_32 r0, r1, r2, r3, r4
+ vext.8 \r0, \r0, \r1, #4
+ vext.8 \r1, \r1, \r2, #4
+.ifnb \r3
+ vext.8 \r2, \r2, \r3, #4
+ vext.8 \r3, \r3, \r4, #4
+.endif
+.endm
+.macro vmin_u16 c, r0, r1, r2, r3
+ vmin.u16 \r0, \r0, \c
+.ifnb \r1
+ vmin.u16 \r1, \r1, \c
+.endif
+.ifnb \r2
+ vmin.u16 \r2, \r2, \c
+ vmin.u16 \r3, \r3, \c
+.endif
+.endm
+.macro vsub_i16 c, r0, r1, r2, r3
+ vsub.i16 \r0, \r0, \c
+.ifnb \r1
+ vsub.i16 \r1, \r1, \c
+.endif
+.ifnb \r2
+ vsub.i16 \r2, \r2, \c
+ vsub.i16 \r3, \r3, \c
+.endif
+.endm
+.macro vmull_vmlal_4 d, s0, s1, s2, s3
+ vmull.s16 \d, \s0, d0[0]
+ vmlal.s16 \d, \s1, d0[1]
+ vmlal.s16 \d, \s2, d0[2]
+ vmlal.s16 \d, \s3, d0[3]
+.endm
+.macro vmull_vmlal_8 d, s0, s1, s2, s3, s4, s5, s6, s7
+ vmull.s16 \d, \s0, d0[0]
+ vmlal.s16 \d, \s1, d0[1]
+ vmlal.s16 \d, \s2, d0[2]
+ vmlal.s16 \d, \s3, d0[3]
+ vmlal.s16 \d, \s4, d1[0]
+ vmlal.s16 \d, \s5, d1[1]
+ vmlal.s16 \d, \s6, d1[2]
+ vmlal.s16 \d, \s7, d1[3]
+.endm
+.macro vqrshrun_s32 shift, q0, d0, q1, d1, q2, d2, q3, d3
+ vqrshrun.s32 \d0, \q0, #\shift
+.ifnb \q1
+ vqrshrun.s32 \d1, \q1, #\shift
+.endif
+.ifnb \q2
+ vqrshrun.s32 \d2, \q2, #\shift
+ vqrshrun.s32 \d3, \q3, #\shift
+.endif
+.endm
+.macro vmovn_i32 q0, d0, q1, d1, q2, d2, q3, d3
+ vmovn.i32 \d0, \q0
+.ifnb \q1
+ vmovn.i32 \d1, \q1
+.endif
+.ifnb \q2
+ vmovn.i32 \d2, \q2
+ vmovn.i32 \d3, \q3
+.endif
+.endm
+.macro vrshl_s32 shift, r0, r1, r2, r3
+ vrshl.s32 \r0, \r0, \shift
+ vrshl.s32 \r1, \r1, \shift
+.ifnb \r2
+ vrshl.s32 \r2, \r2, \shift
+ vrshl.s32 \r3, \r3, \shift
+.endif
+.endm
+.macro vst1_32 strd, r0, r1
+ vst1.32 {\r0[0]}, [r0, :32], \strd
+ vst1.32 {\r0[1]}, [r9, :32], \strd
+.ifnb \r1
+ vst1.32 {\r1[0]}, [r0, :32], \strd
+ vst1.32 {\r1[1]}, [r9, :32], \strd
+.endif
+.endm
+.macro vst1_reg strd, align, r0, r1, r2, r3, r4, r5, r6, r7
+ vst1.16 {\r0}, [r0, \align], \strd
+ vst1.16 {\r1}, [r9, \align], \strd
+.ifnb \r2
+ vst1.16 {\r2}, [r0, \align], \strd
+ vst1.16 {\r3}, [r9, \align], \strd
+.endif
+.ifnb \r4
+ vst1.16 {\r4}, [r0, \align], \strd
+ vst1.16 {\r5}, [r9, \align], \strd
+ vst1.16 {\r6}, [r0, \align], \strd
+ vst1.16 {\r7}, [r9, \align], \strd
+.endif
+.endm
+.macro finalize type, q0, q1, d0, d1, q2, q3, d2, d3
+.ifc \type, put
+ vqrshrun_s32 6, \q0, \d0, \q1, \d1, \q2, \d2, \q3, \d3
+ vmin_u16 q15, \q0, \q1
+.else
+ vrshl_s32 q14, \q0, \q1, \q2, \q3 // -(6-intermediate_bits)
+ vmovn_i32 \q0, \d0, \q1, \d1, \q2, \d2, \q3, \d3
+ vsub_i16 q15, \q0, \q1 // PREP_BIAS
+.endif
+.endm
+.macro shift_store_4 type, strd, q0, q1, d0, d1, q2, q3, d2, d3
+ finalize \type, \q0, \q1, \d0, \d1, \q2, \q3, \d2, \d3
+ vst1_reg \strd, :64, \d0, \d1, \d2, \d3
+.endm
+.macro shift_store_8 type, strd, q0, q1, d0, d1, q2, q3, d2, d3
+ finalize \type, \q0, \q1, \d0, \d1, \q2, \q3, \d2, \d3
+ vst1_reg \strd, :128, \q0, \q1
+.endm
+.macro shift_store_16 type, strd, q0, q1, d0, d1, q2, q3, d2, d3
+ finalize \type, \q0, \q1, \d0, \d1, \q2, \q3, \d2, \d3
+ vst1.16 {\q0, \q1}, [r0, :128], \strd
+.endm
+
+.macro make_8tap_fn op, type, type_h, type_v
+function \op\()_8tap_\type\()_16bpc_neon, export=1
+ push {r4-r11,lr}
+ movw r9, \type_h
+ movw r10, \type_v
+ b \op\()_8tap_neon
+endfunc
+.endm
+
+// No spaces in these expressions, due to gas-preprocessor.
+#define REGULAR ((0*15<<7)|3*15)
+#define SMOOTH ((1*15<<7)|4*15)
+#define SHARP ((2*15<<7)|3*15)
+
+.macro filter_fn type, dst, d_strd, src, s_strd, w, h, mx, my, bdmax, ds2, sr2
+make_8tap_fn \type, regular, REGULAR, REGULAR
+make_8tap_fn \type, regular_smooth, REGULAR, SMOOTH
+make_8tap_fn \type, regular_sharp, REGULAR, SHARP
+make_8tap_fn \type, smooth, SMOOTH, SMOOTH
+make_8tap_fn \type, smooth_regular, SMOOTH, REGULAR
+make_8tap_fn \type, smooth_sharp, SMOOTH, SHARP
+make_8tap_fn \type, sharp, SHARP, SHARP
+make_8tap_fn \type, sharp_regular, SHARP, REGULAR
+make_8tap_fn \type, sharp_smooth, SHARP, SMOOTH
+
+function \type\()_8tap_neon
+ ldrd r4, r5, [sp, #36]
+ ldrd r6, r7, [sp, #44]
+.ifc \bdmax, r8
+ ldr r8, [sp, #52]
+.endif
+ movw r11, #0x4081 // (1 << 14) | (1 << 7) | (1 << 0)
+ mul \mx, \mx, r11
+ mul \my, \my, r11
+ add \mx, \mx, r9 // mx, 8tap_h, 4tap_h
+ add \my, \my, r10 // my, 8tap_v, 4tap_v
+
+.ifc \type, prep
+ lsl \d_strd, \w, #1
+.endif
+
+ vdup.16 q15, \bdmax // bitdepth_max
+ clz \bdmax, \bdmax
+ clz r9, \w
+ sub \bdmax, \bdmax, #18 // intermediate_bits = clz(bitdepth_max) - 18
+ tst \mx, #(0x7f << 14)
+ sub r9, r9, #24
+ add lr, \bdmax, #6 // 6 + intermediate_bits
+ rsb r12, \bdmax, #6 // 6 - intermediate_bits
+ movrel r11, X(mc_subpel_filters), -8
+ bne L(\type\()_8tap_h)
+ tst \my, #(0x7f << 14)
+ bne L(\type\()_8tap_v)
+ b \type\()_neon
+
+L(\type\()_8tap_h):
+ cmp \w, #4
+ ubfx r10, \mx, #7, #7
+ and \mx, \mx, #0x7f
+ it gt
+ movgt \mx, r10
+ tst \my, #(0x7f << 14)
+ add \mx, r11, \mx, lsl #3
+ bne L(\type\()_8tap_hv)
+
+ adr r10, L(\type\()_8tap_h_tbl)
+ vdup.32 q14, r12 // 6 - intermediate_bits
+ ldr r9, [r10, r9, lsl #2]
+ vneg.s32 q14, q14 // -(6-intermediate_bits)
+.ifc \type, put
+ vdup.16 q13, \bdmax // intermediate_bits
+.else
+ vmov.i16 q13, #PREP_BIAS
+.endif
+ add r10, r10, r9
+.ifc \type, put
+ vneg.s16 q13, q13 // -intermediate_bits
+.endif
+ bx r10
+
+ .align 2
+L(\type\()_8tap_h_tbl):
+ .word 1280f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_8tap_h_tbl) + CONFIG_THUMB
+
+20: // 2xN h
+.ifc \type, put
+ add \mx, \mx, #2
+ vld1.32 {d0[]}, [\mx]
+ sub \src, \src, #2
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+2:
+ vld1.16 {q2}, [\src], \s_strd
+ vld1.16 {q3}, [\sr2], \s_strd
+ vext.8 d5, d4, d5, #2
+ vext.8 d7, d6, d7, #2
+ subs \h, \h, #2
+ vtrn.32 d4, d6
+ vtrn.32 d5, d7
+ vmull.s16 q1, d4, d0[0]
+ vmlal.s16 q1, d5, d0[1]
+ vmlal.s16 q1, d6, d0[2]
+ vmlal.s16 q1, d7, d0[3]
+ vrshl.s32 q1, q1, q14 // -(6-intermediate_bits)
+ vqmovun.s32 d2, q1
+ vrshl.s16 d2, d2, d26 // -intermediate_bits
+ vmin.u16 d2, d2, d30
+ vst1.32 {d2[0]}, [\dst, :32], \d_strd
+ vst1.32 {d2[1]}, [\ds2, :32], \d_strd
+ bgt 2b
+ pop {r4-r11,pc}
+.endif
+
+40: // 4xN h
+ add \mx, \mx, #2
+ vld1.32 {d0[]}, [\mx]
+ sub \src, \src, #2
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+4:
+ vld1.16 {q8}, [\src], \s_strd
+ vld1.16 {q11}, [\sr2], \s_strd
+ vext.8 d18, d16, d17, #2
+ vext.8 d19, d16, d17, #4
+ vext.8 d20, d16, d17, #6
+ vext.8 d24, d22, d23, #2
+ vext.8 d25, d22, d23, #4
+ vext.8 d21, d22, d23, #6
+ subs \h, \h, #2
+ vmull.s16 q2, d16, d0[0]
+ vmlal.s16 q2, d18, d0[1]
+ vmlal.s16 q2, d19, d0[2]
+ vmlal.s16 q2, d20, d0[3]
+ vmull.s16 q3, d22, d0[0]
+ vmlal.s16 q3, d24, d0[1]
+ vmlal.s16 q3, d25, d0[2]
+ vmlal.s16 q3, d21, d0[3]
+ vrshl.s32 q2, q2, q14 // -(6-intermediate_bits)
+ vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
+.ifc \type, put
+ vqmovun.s32 d4, q2
+ vqmovun.s32 d5, q3
+ vrshl.s16 q2, q2, q13 // -intermediate_bits
+ vmin.u16 q2, q2, q15
+.else
+ vmovn.s32 d4, q2
+ vmovn.s32 d5, q3
+ vsub.i16 q2, q2, q13 // PREP_BIAS
+.endif
+ vst1.16 {d4}, [\dst, :64], \d_strd
+ vst1.16 {d5}, [\ds2, :64], \d_strd
+ bgt 4b
+ pop {r4-r11,pc}
+
+80:
+160:
+320:
+640:
+1280: // 8xN, 16xN, 32xN, ... h
+ vpush {q4-q5}
+ vld1.8 {d0}, [\mx, :64]
+ sub \src, \src, #6
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+
+ sub \s_strd, \s_strd, \w, lsl #1
+ sub \s_strd, \s_strd, #16
+.ifc \type, put
+ lsl \d_strd, \d_strd, #1
+ sub \d_strd, \d_strd, \w, lsl #1
+.endif
+81:
+ vld1.16 {q8, q9}, [\src]!
+ vld1.16 {q10, q11}, [\sr2]!
+ mov \mx, \w
+
+8:
+ vmull.s16 q1, d16, d0[0]
+ vmull.s16 q2, d17, d0[0]
+ vmull.s16 q3, d20, d0[0]
+ vmull.s16 q4, d21, d0[0]
+.irpc i, 1234567
+ vext.8 q12, q8, q9, #(2*\i)
+ vext.8 q5, q10, q11, #(2*\i)
+.if \i < 4
+ vmlal.s16 q1, d24, d0[\i]
+ vmlal.s16 q2, d25, d0[\i]
+ vmlal.s16 q3, d10, d0[\i]
+ vmlal.s16 q4, d11, d0[\i]
+.else
+ vmlal.s16 q1, d24, d1[\i-4]
+ vmlal.s16 q2, d25, d1[\i-4]
+ vmlal.s16 q3, d10, d1[\i-4]
+ vmlal.s16 q4, d11, d1[\i-4]
+.endif
+.endr
+ subs \mx, \mx, #8
+ vrshl.s32 q1, q1, q14 // -(6-intermediate_bits)
+ vrshl.s32 q2, q2, q14 // -(6-intermediate_bits)
+ vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
+ vrshl.s32 q4, q4, q14 // -(6-intermediate_bits)
+.ifc \type, put
+ vqmovun.s32 d2, q1
+ vqmovun.s32 d3, q2
+ vqmovun.s32 d4, q3
+ vqmovun.s32 d5, q4
+ vrshl.s16 q1, q1, q13 // -intermediate_bits
+ vrshl.s16 q2, q2, q13 // -intermediate_bits
+ vmin.u16 q1, q1, q15
+ vmin.u16 q2, q2, q15
+.else
+ vmovn.s32 d2, q1
+ vmovn.s32 d3, q2
+ vmovn.s32 d4, q3
+ vmovn.s32 d5, q4
+ vsub.i16 q1, q1, q13 // PREP_BIAS
+ vsub.i16 q2, q2, q13 // PREP_BIAS
+.endif
+ vst1.16 {q1}, [\dst, :128]!
+ vst1.16 {q2}, [\ds2, :128]!
+ ble 9f
+
+ vmov q8, q9
+ vmov q10, q11
+ vld1.16 {q9}, [\src]!
+ vld1.16 {q11}, [\sr2]!
+ b 8b
+
+9:
+ add \dst, \dst, \d_strd
+ add \ds2, \ds2, \d_strd
+ add \src, \src, \s_strd
+ add \sr2, \sr2, \s_strd
+
+ subs \h, \h, #2
+ bgt 81b
+ vpop {q4-q5}
+ pop {r4-r11,pc}
+
+
+L(\type\()_8tap_v):
+ cmp \h, #4
+ ubfx r10, \my, #7, #7
+ and \my, \my, #0x7f
+ it gt
+ movgt \my, r10
+ add \my, r11, \my, lsl #3
+
+.ifc \type, prep
+ vdup.32 q14, r12 // 6 - intermediate_bits
+ vmov.i16 q15, #PREP_BIAS
+.endif
+ adr r10, L(\type\()_8tap_v_tbl)
+ ldr r9, [r10, r9, lsl #2]
+.ifc \type, prep
+ vneg.s32 q14, q14 // -(6-intermediate_bits)
+.endif
+ add r10, r10, r9
+ bx r10
+
+ .align 2
+L(\type\()_8tap_v_tbl):
+ .word 1280f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_8tap_v_tbl) + CONFIG_THUMB
+
+20: // 2xN v
+.ifc \type, put
+ bgt 28f
+
+ cmp \h, #2
+ add \my, \my, #2
+ vld1.32 {d0[]}, [\my]
+ sub \src, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ // 2x2 v
+ load_32 \src, \sr2, \s_strd, d1, d2, d3, d4, d5
+ interleave_1_32 d1, d2, d3, d4, d5
+ bgt 24f
+ vmull_vmlal_4 q8, d1, d2, d3, d4
+ vqrshrun_s32 6, q8, d16
+ vmin_u16 d30, d16
+ vst1_32 \d_strd, d16
+ pop {r4-r11,pc}
+
+24: // 2x4 v
+ load_32 \sr2, \src, \s_strd, d6, d7
+ interleave_1_32 d5, d6, d7
+ vmull_vmlal_4 q8, d1, d2, d3, d4
+ vmull_vmlal_4 q9, d3, d4, d5, d6
+ vqrshrun_s32 6, q8, d16, q9, d17
+ vmin_u16 q15, q8
+ vst1_32 \d_strd, d16, d17
+ pop {r4-r11,pc}
+
+28: // 2x6, 2x8, 2x12, 2x16 v
+ vld1.8 {d0}, [\my, :64]
+ sub \sr2, \src, \s_strd, lsl #1
+ add \ds2, \dst, \d_strd
+ sub \src, \sr2, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+ vmovl.s8 q0, d0
+
+ load_32 \src, \sr2, \s_strd, d2, d3, d4, d5, d6, d7, d16
+ interleave_1_32 d2, d3, d4, d5, d6
+ interleave_1_32 d6, d7, d16
+216:
+ subs \h, \h, #4
+ load_32 \sr2, \src, \s_strd, d17, d18, d19, d20
+ interleave_1_32 d16, d17, d18, d19, d20
+ vmull_vmlal_8 q13, d2, d3, d4, d5, d6, d7, d16, d17
+ vmull_vmlal_8 q1, d4, d5, d6, d7, d16, d17, d18, d19
+ vqrshrun_s32 6, q13, d26, q1, d27
+ vmin_u16 q15, q13
+ vst1_32 \d_strd, d26, d27
+ ble 0f
+ cmp \h, #2
+ vmov q1, q3
+ vmov q2, q8
+ vmov q3, q9
+ vmov d16, d20
+ beq 26f
+ b 216b
+26:
+ load_32 \sr2, \src, \s_strd, d17, d18
+ interleave_1_32 d16, d17, d18
+ vmull_vmlal_8 q13, d2, d3, d4, d5, d6, d7, d16, d17
+ vqrshrun_s32 6, q13, d26
+ vmin_u16 d30, d26
+ vst1_32 \d_strd, d26
+0:
+ pop {r4-r11,pc}
+.endif
+
+40:
+ bgt 480f
+
+ // 4x2, 4x4 v
+ cmp \h, #2
+ add \my, \my, #2
+ vld1.32 {d0[]}, [\my]
+ sub \src, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ load_reg \src, \sr2, \s_strd, d1, d2, d3, d4, d5
+ vmull_vmlal_4 q8, d1, d2, d3, d4
+ vmull_vmlal_4 q9, d2, d3, d4, d5
+ shift_store_4 \type, \d_strd, q8, q9, d16, d17
+ ble 0f
+ load_reg \sr2, \src, \s_strd, d6, d7
+ vmull_vmlal_4 q8, d3, d4, d5, d6
+ vmull_vmlal_4 q9, d4, d5, d6, d7
+ shift_store_4 \type, \d_strd, q8, q9, d16, d17
+0:
+ pop {r4-r11,pc}
+
+480: // 4x6, 4x8, 4x12, 4x16 v
+ vld1.8 {d0}, [\my, :64]
+ sub \sr2, \src, \s_strd, lsl #1
+ add \ds2, \dst, \d_strd
+ sub \src, \sr2, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ load_reg \src, \sr2, \s_strd, d16, d17, d18, d19, d20, d21, d22
+
+48:
+ subs \h, \h, #4
+ load_reg \sr2, \src, \s_strd, d23, d24, d25, d26
+ vmull_vmlal_8 q1, d16, d17, d18, d19, d20, d21, d22, d23
+ vmull_vmlal_8 q2, d17, d18, d19, d20, d21, d22, d23, d24
+ vmull_vmlal_8 q3, d18, d19, d20, d21, d22, d23, d24, d25
+ vmull_vmlal_8 q8, d19, d20, d21, d22, d23, d24, d25, d26
+ shift_store_4 \type, \d_strd, q1, q2, d2, d3, q3, q8, d4, d5
+ ble 0f
+ cmp \h, #2
+ vmov q8, q10
+ vmov q9, q11
+ vmov q10, q12
+ vmov d22, d26
+ beq 46f
+ b 48b
+46:
+ load_reg \sr2, \src, \s_strd, d23, d24
+ vmull_vmlal_8 q1, d16, d17, d18, d19, d20, d21, d22, d23
+ vmull_vmlal_8 q2, d17, d18, d19, d20, d21, d22, d23, d24
+ shift_store_4 \type, \d_strd, q1, q2, d2, d3
+0:
+ pop {r4-r11,pc}
+
+80:
+ bgt 880f
+
+ // 8x2, 8x4 v
+ cmp \h, #2
+ add \my, \my, #2
+ vld1.32 {d0[]}, [\my]
+ sub \src, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+
+ load_reg \src, \sr2, \s_strd, q1, q2, q3, q8, q9
+ vmull_vmlal_4 q10, d2, d4, d6, d16
+ vmull_vmlal_4 q11, d3, d5, d7, d17
+ vmull_vmlal_4 q12, d4, d6, d16, d18
+ vmull_vmlal_4 q13, d5, d7, d17, d19
+ shift_store_8 \type, \d_strd, q10, q11, d20, d21, q12, q13, d22, d23
+ ble 0f
+ load_reg \sr2, \src, \s_strd, q10, q11
+ vmull_vmlal_4 q1, d6, d16, d18, d20
+ vmull_vmlal_4 q2, d7, d17, d19, d21
+ vmull_vmlal_4 q12, d16, d18, d20, d22
+ vmull_vmlal_4 q13, d17, d19, d21, d23
+ shift_store_8 \type, \d_strd, q1, q2, d2, d3, q12, q13, d4, d5
+0:
+ pop {r4-r11,pc}
+
+880: // 8x6, 8x8, 8x16, 8x32 v
+1680: // 16x8, 16x16, ...
+320: // 32x8, 32x16, ...
+640:
+1280:
+ vpush {q4-q7}
+ vld1.8 {d0}, [\my, :64]
+ sub \src, \src, \s_strd
+ sub \src, \src, \s_strd, lsl #1
+ vmovl.s8 q0, d0
+ mov \my, \h
+168:
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ load_reg \src, \sr2, \s_strd, q5, q6, q7, q8, q9, q10, q11
+
+88:
+ subs \h, \h, #2
+ load_reg \sr2, \src, \s_strd, q12, q13
+ vmull_vmlal_8 q1, d10, d12, d14, d16, d18, d20, d22, d24
+ vmull_vmlal_8 q2, d11, d13, d15, d17, d19, d21, d23, d25
+ vmull_vmlal_8 q3, d12, d14, d16, d18, d20, d22, d24, d26
+ vmull_vmlal_8 q4, d13, d15, d17, d19, d21, d23, d25, d27
+ shift_store_8 \type, \d_strd, q1, q2, d2, d3, q3, q4, d4, d5
+ ble 9f
+ subs \h, \h, #2
+ load_reg \sr2, \src, \s_strd, q1, q2
+ vmull_vmlal_8 q3, d14, d16, d18, d20, d22, d24, d26, d2
+ vmull_vmlal_8 q4, d15, d17, d19, d21, d23, d25, d27, d3
+ vmull_vmlal_8 q5, d16, d18, d20, d22, d24, d26, d2, d4
+ vmull_vmlal_8 q6, d17, d19, d21, d23, d25, d27, d3, d5
+ shift_store_8 \type, \d_strd, q3, q4, d6, d7, q5, q6, d8, d9
+ ble 9f
+ vmov q5, q9
+ vmov q6, q10
+ vmov q7, q11
+ vmov q8, q12
+ vmov q9, q13
+ vmov q10, q1
+ vmov q11, q2
+ b 88b
+9:
+ subs \w, \w, #8
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #3
+ mov \h, \my
+ add \src, \src, #16
+ add \dst, \dst, #16
+ b 168b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+160:
+ bgt 1680b
+
+ // 16x2, 16x4 v
+ vpush {q6-q7}
+ add \my, \my, #2
+ vld1.32 {d0[]}, [\my]
+ sub \src, \src, \s_strd
+ vmovl.s8 q0, d0
+
+ load_16s16 \src, \src, \s_strd, q6, q7, q8, q9, q10, q11
+16:
+ load_16s16 \src, \src, \s_strd, q12, q13
+ subs \h, \h, #1
+ vmull_vmlal_4 q1, d12, d16, d20, d24
+ vmull_vmlal_4 q2, d13, d17, d21, d25
+ vmull_vmlal_4 q3, d14, d18, d22, d26
+ vmull_vmlal_4 q6, d15, d19, d23, d27
+ shift_store_16 \type, \d_strd, q1, q2, d2, d3, q3, q6, d4, d5
+ ble 0f
+ vmov q6, q8
+ vmov q7, q9
+ vmov q8, q10
+ vmov q9, q11
+ vmov q10, q12
+ vmov q11, q13
+ b 16b
+0:
+ vpop {q6-q7}
+ pop {r4-r11,pc}
+
+
+L(\type\()_8tap_hv):
+ cmp \h, #4
+ ubfx r10, \my, #7, #7
+ and \my, \my, #0x7f
+ it gt
+ movgt \my, r10
+4:
+ add \my, r11, \my, lsl #3
+
+ adr r10, L(\type\()_8tap_hv_tbl)
+ neg r12, r12 // -(6-intermediate_bits)
+ ldr r9, [r10, r9, lsl #2]
+ vdup.32 q14, r12 // -(6-intermediate_bits)
+.ifc \type, put
+ neg r8, lr // -(6+intermeidate_bits)
+.else
+ vmov.i16 q13, #PREP_BIAS
+.endif
+ add r10, r10, r9
+.ifc \type, put
+ vdup.32 q13, r8 // -(6+intermediate_bits)
+.endif
+ bx r10
+
+ .align 2
+L(\type\()_8tap_hv_tbl):
+ .word 1280f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_8tap_hv_tbl) + CONFIG_THUMB
+
+20:
+.ifc \type, put
+ add \mx, \mx, #2
+ vld1.32 {d0[]}, [\mx]
+ bgt 280f
+ add \my, \my, #2
+ vld1.32 {d2[]}, [\my]
+
+ // 2x2, 2x4 hv
+ sub \sr2, \src, #2
+ sub \src, \sr2, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+
+ vld1.16 {q11}, [\src], \s_strd
+ vext.8 d24, d22, d23, #2
+ vmull.s16 q11, d22, d0
+ vmull.s16 q12, d24, d0
+ vpadd.s32 d22, d22, d23
+ vpadd.s32 d23, d24, d25
+ vpadd.s32 d22, d22, d23
+ vrshl.s32 d16, d22, d28 // -(6-intermediate_bits)
+ vmovn.i32 d16, q8
+ bl L(\type\()_8tap_filter_2)
+
+ vext.8 d16, d16, d16, #4
+ vext.8 d16, d16, d24, #4
+ vmov d17, d24
+
+2:
+ bl L(\type\()_8tap_filter_2)
+
+ vext.8 d18, d17, d24, #4
+ vmull.s16 q2, d16, d2[0]
+ vmlal.s16 q2, d17, d2[1]
+ vmlal.s16 q2, d18, d2[2]
+ vmlal.s16 q2, d24, d2[3]
+
+ vrshl.s32 q2, q2, q13 // -(6+intermediate_bits)
+ vqmovun.s32 d4, q2
+ vmin.u16 d4, d4, d30
+ subs \h, \h, #2
+ vst1.32 {d4[0]}, [\dst, :32], \d_strd
+ vst1.32 {d4[1]}, [\ds2, :32], \d_strd
+ ble 0f
+ vmov d16, d18
+ vmov d17, d24
+ b 2b
+
+280: // 2x8, 2x16, 2x32 hv
+ vld1.8 {d2}, [\my, :64]
+ sub \src, \src, #2
+ sub \sr2, \src, \s_strd, lsl #1
+ sub \src, \sr2, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+
+ vld1.16 {q11}, [\src], \s_strd
+ vext.8 d24, d22, d23, #2
+ vmull.s16 q11, d22, d0
+ vmull.s16 q12, d24, d0
+ vpadd.s32 d22, d22, d23
+ vpadd.s32 d23, d24, d25
+ vpadd.s32 d22, d22, d23
+ vrshl.s32 d16, d22, d28 // -(6-intermediate_bits)
+ vmovn.i32 d16, q8
+
+ bl L(\type\()_8tap_filter_2)
+
+ vext.8 d16, d16, d16, #4
+ vext.8 d16, d16, d24, #4
+ vmov d17, d24
+ bl L(\type\()_8tap_filter_2)
+ vext.8 d18, d17, d24, #4
+ vmov d19, d24
+ bl L(\type\()_8tap_filter_2)
+ vext.8 d20, d19, d24, #4
+ vmov d21, d24
+
+28:
+ bl L(\type\()_8tap_filter_2)
+ vext.8 d22, d21, d24, #4
+ vmull.s16 q3, d16, d2[0]
+ vmlal.s16 q3, d17, d2[1]
+ vmlal.s16 q3, d18, d2[2]
+ vmlal.s16 q3, d19, d2[3]
+ vmlal.s16 q3, d20, d3[0]
+ vmlal.s16 q3, d21, d3[1]
+ vmlal.s16 q3, d22, d3[2]
+ vmlal.s16 q3, d24, d3[3]
+
+ vrshl.s32 q3, q3, q13 // -(6+intermediate_bits)
+ vqmovun.s32 d6, q3
+ vmin.u16 d6, d6, d30
+ subs \h, \h, #2
+ vst1.32 {d6[0]}, [\dst, :32], \d_strd
+ vst1.32 {d6[1]}, [\ds2, :32], \d_strd
+ ble 0f
+ vmov q8, q9
+ vmov q9, q10
+ vmov d20, d22
+ vmov d21, d24
+ b 28b
+0:
+ pop {r4-r11,pc}
+
+L(\type\()_8tap_filter_2):
+ vld1.16 {q11}, [\sr2], \s_strd
+ vld1.16 {q12}, [\src], \s_strd
+ vext.8 d23, d22, d23, #2
+ vext.8 d25, d24, d25, #2
+ vtrn.32 q11, q12
+ vmull.s16 q3, d22, d0[0]
+ vmlal.s16 q3, d23, d0[1]
+ vmlal.s16 q3, d24, d0[2]
+ vmlal.s16 q3, d25, d0[3]
+ vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
+ vmovn.i32 d24, q3
+ bx lr
+.endif
+
+40:
+ add \mx, \mx, #2
+ vld1.32 {d0[]}, [\mx]
+ bgt 480f
+ add \my, \my, #2
+ vld1.32 {d2[]}, [\my]
+ sub \sr2, \src, #2
+ sub \src, \sr2, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+
+ // 4x2, 4x4 hv
+ vld1.16 {q11}, [\src], \s_strd
+ vext.8 d24, d22, d23, #2
+ vext.8 d25, d22, d23, #4
+ vext.8 d23, d22, d23, #6
+ vmull.s16 q10, d22, d0[0]
+ vmlal.s16 q10, d24, d0[1]
+ vmlal.s16 q10, d25, d0[2]
+ vmlal.s16 q10, d23, d0[3]
+ vrshl.s32 q10, q10, q14 // -(6-intermediate_bits)
+ vmovn.i32 d17, q10
+
+ bl L(\type\()_8tap_filter_4)
+ vmov q9, q12
+
+4:
+ bl L(\type\()_8tap_filter_4)
+ vmull.s16 q2, d17, d2[0]
+ vmlal.s16 q2, d18, d2[1]
+ vmlal.s16 q2, d19, d2[2]
+ vmlal.s16 q2, d24, d2[3]
+ vmull.s16 q3, d18, d2[0]
+ vmlal.s16 q3, d19, d2[1]
+ vmlal.s16 q3, d24, d2[2]
+ vmlal.s16 q3, d25, d2[3]
+.ifc \type, put
+ vrshl.s32 q2, q2, q13 // -(6+intermediate_bits)
+ vrshl.s32 q3, q3, q13 // -(6+intermediate_bits)
+ vqmovun.s32 d4, q2
+ vqmovun.s32 d5, q3
+ vmin.u16 q2, q2, q15
+.else
+ vrshrn.i32 d4, q2, #6
+ vrshrn.i32 d5, q3, #6
+ vsub.i16 q2, q2, q13 // PREP_BIAS
+.endif
+ subs \h, \h, #2
+
+ vst1.16 {d4}, [\dst, :64], \d_strd
+ vst1.16 {d5}, [\ds2, :64], \d_strd
+ ble 0f
+ vmov d17, d19
+ vmov q9, q12
+ b 4b
+0:
+ pop {r4-r11,pc}
+
+480: // 4x8, 4x16, 4x32 hv
+ vpush {d13-d15}
+ vld1.8 {d2}, [\my, :64]
+ sub \src, \src, #2
+ sub \sr2, \src, \s_strd, lsl #1
+ sub \src, \sr2, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+
+ vld1.16 {q11}, [\src], \s_strd
+ vext.8 d24, d22, d23, #2
+ vext.8 d25, d22, d23, #4
+ vext.8 d23, d22, d23, #6
+ vmull.s16 q10, d22, d0[0]
+ vmlal.s16 q10, d24, d0[1]
+ vmlal.s16 q10, d25, d0[2]
+ vmlal.s16 q10, d23, d0[3]
+ vrshl.s32 q10, q10, q14 // -(6-intermediate_bits)
+ vmovn.i32 d13, q10
+
+ bl L(\type\()_8tap_filter_4)
+ vmov q7, q12
+ bl L(\type\()_8tap_filter_4)
+ vmov q8, q12
+ bl L(\type\()_8tap_filter_4)
+ vmov q9, q12
+
+48:
+ bl L(\type\()_8tap_filter_4)
+ vmull.s16 q2, d13, d2[0]
+ vmlal.s16 q2, d14, d2[1]
+ vmlal.s16 q2, d15, d2[2]
+ vmlal.s16 q2, d16, d2[3]
+ vmlal.s16 q2, d17, d3[0]
+ vmlal.s16 q2, d18, d3[1]
+ vmlal.s16 q2, d19, d3[2]
+ vmlal.s16 q2, d24, d3[3]
+ vmull.s16 q3, d14, d2[0]
+ vmlal.s16 q3, d15, d2[1]
+ vmlal.s16 q3, d16, d2[2]
+ vmlal.s16 q3, d17, d2[3]
+ vmlal.s16 q3, d18, d3[0]
+ vmlal.s16 q3, d19, d3[1]
+ vmlal.s16 q3, d24, d3[2]
+ vmlal.s16 q3, d25, d3[3]
+.ifc \type, put
+ vrshl.s32 q2, q2, q13 // -(6+intermediate_bits)
+ vrshl.s32 q3, q3, q13 // -(6+intermediate_bits)
+ vqmovun.s32 d4, q2
+ vqmovun.s32 d5, q3
+ vmin.u16 q2, q2, q15
+.else
+ vrshrn.i32 d4, q2, #6
+ vrshrn.i32 d5, q3, #6
+ vsub.i16 q2, q2, q13 // PREP_BIAS
+.endif
+ subs \h, \h, #2
+ vst1.16 {d4}, [\dst, :64], \d_strd
+ vst1.16 {d5}, [\ds2, :64], \d_strd
+ ble 0f
+ vmov d13, d15
+ vmov q7, q8
+ vmov q8, q9
+ vmov q9, q12
+ b 48b
+0:
+ vpop {d13-d15}
+ pop {r4-r11,pc}
+
+L(\type\()_8tap_filter_4):
+ vld1.16 {q10}, [\sr2], \s_strd
+ vld1.16 {q11}, [\src], \s_strd
+ vext.8 d24, d20, d21, #2
+ vext.8 d25, d20, d21, #4
+ vext.8 d21, d20, d21, #6
+ vmull.s16 q3, d20, d0[0]
+ vmlal.s16 q3, d24, d0[1]
+ vmlal.s16 q3, d25, d0[2]
+ vmlal.s16 q3, d21, d0[3]
+ vext.8 d24, d22, d23, #2
+ vext.8 d25, d22, d23, #4
+ vext.8 d23, d22, d23, #6
+ vmull.s16 q10, d22, d0[0]
+ vmlal.s16 q10, d24, d0[1]
+ vmlal.s16 q10, d25, d0[2]
+ vmlal.s16 q10, d23, d0[3]
+ vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
+ vrshl.s32 q10, q10, q14 // -(6-intermediate_bits)
+ vmovn.i32 d24, q3
+ vmovn.i32 d25, q10
+ bx lr
+
+80:
+160:
+320:
+ bgt 880f
+ add \my, \my, #2
+ vld1.8 {d0}, [\mx, :64]
+ vld1.32 {d2[]}, [\my]
+ sub \src, \src, #6
+ sub \src, \src, \s_strd
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+ mov \my, \h
+
+164: // 8x2, 8x4, 16x2, 16x4, 32x2, 32x4 hv
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+
+ vld1.16 {q11, q12}, [\src], \s_strd
+ vmull.s16 q2, d22, d0[0]
+ vmull.s16 q3, d23, d0[0]
+ vdup.32 q14, r12 // -(6-intermediate_bits)
+.irpc i, 1234567
+ vext.8 q10, q11, q12, #(2*\i)
+.if \i < 4
+ vmlal.s16 q2, d20, d0[\i]
+ vmlal.s16 q3, d21, d0[\i]
+.else
+ vmlal.s16 q2, d20, d1[\i - 4]
+ vmlal.s16 q3, d21, d1[\i - 4]
+.endif
+.endr
+ vrshl.s32 q2, q2, q14 // -(6-intermediate_bits)
+ vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
+ vmovn.i32 d16, q2
+ vmovn.i32 d17, q3
+
+ bl L(\type\()_8tap_filter_8)
+ vmov q9, q11
+ vmov q10, q12
+
+8:
+ bl L(\type\()_8tap_filter_8)
+ vmull.s16 q2, d16, d2[0]
+ vmull.s16 q3, d17, d2[0]
+ vmull.s16 q13, d18, d2[0]
+ vmull.s16 q14, d19, d2[0]
+.ifc \type, put
+ vdup.32 q8, r8 // -(6+intermediate_bits)
+.endif
+ vmlal.s16 q2, d18, d2[1]
+ vmlal.s16 q3, d19, d2[1]
+ vmlal.s16 q13, d20, d2[1]
+ vmlal.s16 q14, d21, d2[1]
+ vmlal.s16 q2, d20, d2[2]
+ vmlal.s16 q3, d21, d2[2]
+ vmlal.s16 q13, d22, d2[2]
+ vmlal.s16 q14, d23, d2[2]
+ vmlal.s16 q2, d22, d2[3]
+ vmlal.s16 q3, d23, d2[3]
+ vmlal.s16 q13, d24, d2[3]
+ vmlal.s16 q14, d25, d2[3]
+.ifc \type, put
+ vdup.16 q9, \bdmax // bitdepth_max
+ vrshl.s32 q2, q2, q8 // -(6+intermediate_bits)
+ vrshl.s32 q3, q3, q8 // -(6+intermediate_bits)
+ vrshl.s32 q13, q13, q8 // -(6+intermediate_bits)
+ vrshl.s32 q14, q14, q8 // -(6+intermediate_bits)
+ vqmovun.s32 d4, q2
+ vqmovun.s32 d5, q3
+ vqmovun.s32 d6, q13
+ vqmovun.s32 d7, q14
+ vmin.u16 q2, q2, q15
+ vmin.u16 q3, q3, q15
+.else
+ vmov.i16 q9, #PREP_BIAS
+ vrshrn.i32 d4, q2, #6
+ vrshrn.i32 d5, q3, #6
+ vrshrn.i32 d6, q13, #6
+ vrshrn.i32 d7, q14, #6
+ vsub.i16 q2, q2, q9 // PREP_BIAS
+ vsub.i16 q3, q3, q9 // PREP_BIAS
+.endif
+ subs \h, \h, #2
+ vst1.16 {q2}, [\dst, :128], \d_strd
+ vst1.16 {q3}, [\ds2, :128], \d_strd
+ ble 9f
+ vmov q8, q10
+ vmov q9, q11
+ vmov q10, q12
+ b 8b
+9:
+ subs \w, \w, #8
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #2
+ mov \h, \my
+ add \src, \src, #16
+ add \dst, \dst, #16
+ b 164b
+0:
+ pop {r4-r11,pc}
+
+880: // 8x8, 8x16, ..., 16x8, ..., 32x8, ... hv
+640:
+1280:
+ vpush {q4-q7}
+ vld1.8 {d0}, [\mx, :64]
+ vld1.8 {d2}, [\my, :64]
+ sub \src, \src, #6
+ sub \src, \src, \s_strd
+ sub \src, \src, \s_strd, lsl #1
+ vmovl.s8 q0, d0
+ vmovl.s8 q1, d2
+ mov \my, \h
+
+168:
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+
+ vld1.16 {q11, q12}, [\src], \s_strd
+ vmull.s16 q2, d22, d0[0]
+ vmull.s16 q3, d23, d0[0]
+ vdup.32 q14, r12 // -(6-intermediate_bits)
+.irpc i, 1234567
+ vext.8 q10, q11, q12, #(2*\i)
+.if \i < 4
+ vmlal.s16 q2, d20, d0[\i]
+ vmlal.s16 q3, d21, d0[\i]
+.else
+ vmlal.s16 q2, d20, d1[\i - 4]
+ vmlal.s16 q3, d21, d1[\i - 4]
+.endif
+.endr
+ vrshl.s32 q2, q2, q14 // -(6-intermediate_bits)
+ vrshl.s32 q3, q3, q14 // -(6-intermediate_bits)
+ vmovn.i32 d8, q2
+ vmovn.i32 d9, q3
+
+ bl L(\type\()_8tap_filter_8)
+ vmov q5, q11
+ vmov q6, q12
+ bl L(\type\()_8tap_filter_8)
+ vmov q7, q11
+ vmov q8, q12
+ bl L(\type\()_8tap_filter_8)
+ vmov q9, q11
+ vmov q10, q12
+
+88:
+ bl L(\type\()_8tap_filter_8)
+ vmull.s16 q2, d8, d2[0]
+ vmull.s16 q3, d9, d2[0]
+ vmull.s16 q13, d10, d2[0]
+ vmull.s16 q14, d11, d2[0]
+.ifc \type, put
+ vdup.32 q4, r8 // -(6+intermediate_bits)
+.endif
+ vmlal.s16 q2, d10, d2[1]
+ vmlal.s16 q3, d11, d2[1]
+ vmlal.s16 q13, d12, d2[1]
+ vmlal.s16 q14, d13, d2[1]
+ vmlal.s16 q2, d12, d2[2]
+ vmlal.s16 q3, d13, d2[2]
+ vmlal.s16 q13, d14, d2[2]
+ vmlal.s16 q14, d15, d2[2]
+ vmlal.s16 q2, d14, d2[3]
+ vmlal.s16 q3, d15, d2[3]
+ vmlal.s16 q13, d16, d2[3]
+ vmlal.s16 q14, d17, d2[3]
+ vmlal.s16 q2, d16, d3[0]
+ vmlal.s16 q3, d17, d3[0]
+ vmlal.s16 q13, d18, d3[0]
+ vmlal.s16 q14, d19, d3[0]
+ vmlal.s16 q2, d18, d3[1]
+ vmlal.s16 q3, d19, d3[1]
+ vmlal.s16 q13, d20, d3[1]
+ vmlal.s16 q14, d21, d3[1]
+ vmlal.s16 q2, d20, d3[2]
+ vmlal.s16 q3, d21, d3[2]
+ vmlal.s16 q13, d22, d3[2]
+ vmlal.s16 q14, d23, d3[2]
+ vmlal.s16 q2, d22, d3[3]
+ vmlal.s16 q3, d23, d3[3]
+ vmlal.s16 q13, d24, d3[3]
+ vmlal.s16 q14, d25, d3[3]
+.ifc \type, put
+ vrshl.s32 q2, q2, q4 // -(6+intermediate_bits)
+ vrshl.s32 q3, q3, q4 // -(6+intermediate_bits)
+ vrshl.s32 q13, q13, q4 // -(6+intermediate_bits)
+ vrshl.s32 q14, q14, q4 // -(6+intermediate_bits)
+ vqmovun.s32 d4, q2
+ vqmovun.s32 d5, q3
+ vqmovun.s32 d6, q13
+ vqmovun.s32 d7, q14
+ vmin.u16 q2, q2, q15
+ vmin.u16 q3, q3, q15
+.else
+ vmov.i16 q5, #PREP_BIAS
+ vrshrn.i32 d4, q2, #6
+ vrshrn.i32 d5, q3, #6
+ vrshrn.i32 d6, q13, #6
+ vrshrn.i32 d7, q14, #6
+ vsub.i16 q2, q2, q5 // PREP_BIAS
+ vsub.i16 q3, q3, q5 // PREP_BIAS
+.endif
+ subs \h, \h, #2
+ vst1.16 {q2}, [\dst, :128], \d_strd
+ vst1.16 {q3}, [\ds2, :128], \d_strd
+ ble 9f
+ vmov q4, q6
+ vmov q5, q7
+ vmov q6, q8
+ vmov q7, q9
+ vmov q8, q10
+ vmov q9, q11
+ vmov q10, q12
+ b 88b
+9:
+ subs \w, \w, #8
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #3
+ mov \h, \my
+ add \src, \src, #16
+ add \dst, \dst, #16
+ b 168b
+0:
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+L(\type\()_8tap_filter_8):
+ vld1.16 {q13, q14}, [\sr2], \s_strd
+ vmull.s16 q2, d26, d0[0]
+ vmull.s16 q3, d27, d0[0]
+.irpc i, 1234567
+ vext.8 q12, q13, q14, #(2*\i)
+.if \i < 4
+ vmlal.s16 q2, d24, d0[\i]
+ vmlal.s16 q3, d25, d0[\i]
+.else
+ vmlal.s16 q2, d24, d1[\i - 4]
+ vmlal.s16 q3, d25, d1[\i - 4]
+.endif
+.endr
+ vdup.32 q12, r12 // -(6-intermediate_bits)
+ vld1.16 {q13, q14}, [\src], \s_strd
+ vrshl.s32 q2, q2, q12 // -(6-intermediate_bits)
+ vrshl.s32 q3, q3, q12 // -(6-intermediate_bits)
+ vmovn.i32 d4, q2
+ vmovn.i32 d5, q3
+
+ vmull.s16 q3, d26, d0[0]
+ vmull.s16 q11, d27, d0[0]
+.irpc i, 1234567
+ vext.8 q12, q13, q14, #(2*\i)
+.if \i < 4
+ vmlal.s16 q3, d24, d0[\i]
+ vmlal.s16 q11, d25, d0[\i]
+.else
+ vmlal.s16 q3, d24, d1[\i - 4]
+ vmlal.s16 q11, d25, d1[\i - 4]
+.endif
+.endr
+ vdup.32 q13, r12 // -(6-intermediate_bits)
+ vrshl.s32 q3, q3, q13 // -(6-intermediate_bits)
+ vrshl.s32 q11, q11, q13 // -(6-intermediate_bits)
+
+ vmovn.i32 d24, q3
+ vmovn.i32 d25, q11
+ vmov q11, q2
+ bx lr
+endfunc
+
+function \type\()_bilin_16bpc_neon, export=1
+ push {r4-r11,lr}
+ ldrd r4, r5, [sp, #36]
+ ldrd r6, r7, [sp, #44]
+.ifc \bdmax, r8
+ ldr r8, [sp, #52]
+.endif
+ vdup.16 q1, \mx
+ vdup.16 q3, \my
+ rsb r9, \mx, #16
+ rsb r10, \my, #16
+ vdup.16 q0, r9
+ vdup.16 q2, r10
+.ifc \type, prep
+ lsl \d_strd, \w, #1
+.endif
+ clz \bdmax, \bdmax // bitdepth_max
+ clz r9, \w
+ sub \bdmax, \bdmax, #18 // intermediate_bits = clz(bitdepth_max) - 18
+ cmp \mx, #0
+ sub r9, r9, #24
+ rsb r11, \bdmax, #4 // 4 - intermediate_bits
+ add r12, \bdmax, #4 // 4 + intermediate_bits
+ bne L(\type\()_bilin_h)
+ cmp \my, #0
+ bne L(\type\()_bilin_v)
+ b \type\()_neon
+
+L(\type\()_bilin_h):
+ cmp \my, #0
+ bne L(\type\()_bilin_hv)
+
+ adr r10, L(\type\()_bilin_h_tbl)
+ vdup.16 q15, r11 // 4 - intermediate_bits
+ ldr r9, [r10, r9, lsl #2]
+ vneg.s16 q15, q15 // -(4-intermediate_bits)
+.ifc \type, put
+ vdup.16 q14, \bdmax // intermediate_bits
+.else
+ vmov.i16 q14, #PREP_BIAS
+.endif
+ add r10, r10, r9
+.ifc \type, put
+ vneg.s16 q14, q14 // -intermediate_bits
+.endif
+ bx r10
+
+ .align 2
+L(\type\()_bilin_h_tbl):
+ .word 1280f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_bilin_h_tbl) + CONFIG_THUMB
+
+20: // 2xN h
+.ifc \type, put
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+2:
+ vld1.16 {d16}, [\src], \s_strd
+ vld1.16 {d18}, [\sr2], \s_strd
+ vext.8 d17, d16, d16, #2
+ vext.8 d19, d18, d18, #2
+ vtrn.32 d16, d18
+ vtrn.32 d17, d19
+ subs \h, \h, #2
+ vmul.i16 d16, d16, d0
+ vmla.i16 d16, d17, d2
+ vrshl.u16 d16, d16, d30
+ vrshl.u16 d16, d16, d28
+ vst1.32 {d16[0]}, [\dst, :32], \d_strd
+ vst1.32 {d16[1]}, [\ds2, :32], \d_strd
+ bgt 2b
+ pop {r4-r11,pc}
+.endif
+
+40: // 4xN h
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+4:
+ vld1.16 {q8}, [\src], \s_strd
+ vld1.16 {q10}, [\sr2], \s_strd
+ vext.8 q9, q8, q8, #2
+ vext.8 q11, q10, q10, #2
+ vmov d17, d20
+ vmov d19, d22
+ subs \h, \h, #2
+ vmul.i16 q8, q8, q0
+ vmla.i16 q8, q9, q1
+ vrshl.u16 q8, q8, q15
+.ifc \type, put
+ vrshl.u16 q8, q8, q14
+.else
+ vsub.i16 q8, q8, q14
+.endif
+ vst1.16 {d16}, [\dst, :64], \d_strd
+ vst1.16 {d17}, [\ds2, :64], \d_strd
+ bgt 4b
+ pop {r4-r11,pc}
+
+80: // 8xN h
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \d_strd, \d_strd, #1
+ lsl \s_strd, \s_strd, #1
+8:
+ vld1.16 {d16, d17, d18}, [\src], \s_strd
+ vld1.16 {d20, d21, d22}, [\sr2], \s_strd
+ vext.8 q9, q8, q9, #2
+ vext.8 q11, q10, q11, #2
+ subs \h, \h, #2
+ vmul.i16 q8, q8, q0
+ vmla.i16 q8, q9, q1
+ vmul.i16 q10, q10, q0
+ vmla.i16 q10, q11, q1
+ vrshl.u16 q8, q8, q15
+ vrshl.u16 q10, q10, q15
+.ifc \type, put
+ vrshl.u16 q8, q8, q14
+ vrshl.u16 q10, q10, q14
+.else
+ vsub.i16 q8, q8, q14
+ vsub.i16 q10, q10, q14
+.endif
+ vst1.16 {q8}, [\dst, :128], \d_strd
+ vst1.16 {q10}, [\ds2, :128], \d_strd
+ bgt 8b
+ pop {r4-r11,pc}
+160:
+320:
+640:
+1280: // 16xN, 32xN, ... h
+ vpush {q4-q7}
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+
+ sub \s_strd, \s_strd, \w, lsl #1
+ sub \s_strd, \s_strd, #16
+.ifc \type, put
+ lsl \d_strd, \d_strd, #1
+ sub \d_strd, \d_strd, \w, lsl #1
+.endif
+161:
+ vld1.16 {q4}, [\src]!
+ vld1.16 {q9}, [\sr2]!
+ mov \mx, \w
+
+16:
+ vld1.16 {q5, q6}, [\src]!
+ vld1.16 {q10, q11}, [\sr2]!
+ vext.8 q7, q4, q5, #2
+ vext.8 q8, q5, q6, #2
+ vext.8 q12, q9, q10, #2
+ vext.8 q13, q10, q11, #2
+ vmul.i16 q4, q4, q0
+ vmla.i16 q4, q7, q1
+ vmul.i16 q5, q5, q0
+ vmla.i16 q5, q8, q1
+ vmul.i16 q9, q9, q0
+ vmla.i16 q9, q12, q1
+ vmul.i16 q10, q10, q0
+ vmla.i16 q10, q13, q1
+ vrshl.u16 q4, q4, q15
+ vrshl.u16 q5, q5, q15
+ vrshl.u16 q9, q9, q15
+ vrshl.u16 q10, q10, q15
+ subs \mx, \mx, #16
+.ifc \type, put
+ vrshl.u16 q4, q4, q14
+ vrshl.u16 q5, q5, q14
+ vrshl.u16 q9, q9, q14
+ vrshl.u16 q10, q10, q14
+.else
+ vsub.i16 q4, q4, q14
+ vsub.i16 q5, q5, q14
+ vsub.i16 q9, q9, q14
+ vsub.i16 q10, q10, q14
+.endif
+ vst1.16 {q4, q5}, [\dst, :128]!
+ vst1.16 {q9, q10}, [\ds2, :128]!
+ ble 9f
+
+ vmov q4, q6
+ vmov q9, q11
+ b 16b
+
+9:
+ add \dst, \dst, \d_strd
+ add \ds2, \ds2, \d_strd
+ add \src, \src, \s_strd
+ add \sr2, \sr2, \s_strd
+
+ subs \h, \h, #2
+ bgt 161b
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+
+
+L(\type\()_bilin_v):
+ cmp \h, #4
+ adr r10, L(\type\()_bilin_v_tbl)
+.ifc \type, prep
+ vdup.16 q15, r11 // 4 - intermediate_bits
+.endif
+ ldr r9, [r10, r9, lsl #2]
+.ifc \type, prep
+ vmov.i16 q14, #PREP_BIAS
+ vneg.s16 q15, q15 // -(4-intermediate_bits)
+.endif
+ add r10, r10, r9
+ bx r10
+
+ .align 2
+L(\type\()_bilin_v_tbl):
+ .word 1280f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_bilin_v_tbl) + CONFIG_THUMB
+
+20: // 2xN v
+.ifc \type, put
+ cmp \h, #2
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ // 2x2 v
+ vld1.32 {d16[]}, [\src], \s_strd
+ bgt 24f
+22:
+ vld1.32 {d17[]}, [\sr2], \s_strd
+ vld1.32 {d18[]}, [\src], \s_strd
+ vext.8 d16, d16, d17, #4
+ vext.8 d17, d17, d18, #4
+ vmul.i16 d16, d16, d4
+ vmla.i16 d16, d17, d6
+ vrshr.u16 d16, d16, #4
+ vst1.32 {d16[0]}, [\dst, :32]
+ vst1.32 {d16[1]}, [\ds2, :32]
+ pop {r4-r11,pc}
+24: // 2x4, 2x6, 2x8, ... v
+ vld1.32 {d17[]}, [\sr2], \s_strd
+ vld1.32 {d18[]}, [\src], \s_strd
+ vld1.32 {d19[]}, [\sr2], \s_strd
+ vld1.32 {d20[]}, [\src], \s_strd
+ subs \h, \h, #4
+ vext.8 d16, d16, d17, #4
+ vext.8 d17, d17, d18, #4
+ vext.8 d18, d18, d19, #4
+ vext.8 d19, d19, d20, #4
+ vswp d17, d18
+ vmul.i16 q8, q8, q2
+ vmla.i16 q8, q9, q3
+ cmp \h, #2
+ vrshr.u16 q8, q8, #4
+ vst1.32 {d16[0]}, [\dst, :32], \d_strd
+ vst1.32 {d16[1]}, [\ds2, :32], \d_strd
+ vst1.32 {d17[0]}, [\dst, :32], \d_strd
+ vst1.32 {d17[1]}, [\ds2, :32], \d_strd
+ blt 0f
+ vmov d16, d20
+ beq 22b
+ b 24b
+0:
+ pop {r4-r11,pc}
+.endif
+
+40: // 4xN v
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vld1.16 {d16}, [\src], \s_strd
+4:
+ vld1.16 {d17}, [\sr2], \s_strd
+ vld1.16 {d19}, [\src], \s_strd
+ vmov d18, d17
+ vmul.i16 q8, q8, q2
+ vmla.i16 q8, q9, q3
+ subs \h, \h, #2
+.ifc \type, put
+ vrshr.u16 q8, q8, #4
+.else
+ vrshl.u16 q8, q8, q15
+ vsub.i16 q8, q8, q14
+.endif
+ vst1.16 {d16}, [\dst, :64], \d_strd
+ vst1.16 {d17}, [\ds2, :64], \d_strd
+ ble 0f
+ vmov d16, d19
+ b 4b
+0:
+ pop {r4-r11,pc}
+
+80: // 8xN v
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+ vld1.16 {q8}, [\src], \s_strd
+8:
+ vld1.16 {q9}, [\sr2], \s_strd
+ vld1.16 {q10}, [\src], \s_strd
+ vmul.i16 q8, q8, q2
+ vmla.i16 q8, q9, q3
+ vmul.i16 q9, q9, q2
+ vmla.i16 q9, q10, q3
+ subs \h, \h, #2
+.ifc \type, put
+ vrshr.u16 q8, q8, #4
+ vrshr.u16 q9, q9, #4
+.else
+ vrshl.u16 q8, q8, q15
+ vrshl.u16 q9, q9, q15
+ vsub.i16 q8, q8, q14
+ vsub.i16 q9, q9, q14
+.endif
+ vst1.16 {q8}, [\dst, :128], \d_strd
+ vst1.16 {q9}, [\ds2, :128], \d_strd
+ ble 0f
+ vmov q8, q10
+ b 8b
+0:
+ pop {r4-r11,pc}
+
+160: // 16xN, 32xN, ...
+320:
+640:
+1280:
+ mov \my, \h
+1:
+ add \ds2, \dst, \d_strd
+ add \sr2, \src, \s_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ vld1.16 {q8, q9}, [\src], \s_strd
+2:
+ vld1.16 {q10, q11}, [\sr2], \s_strd
+ vld1.16 {q12, q13}, [\src], \s_strd
+ vmul.i16 q8, q8, q2
+ vmla.i16 q8, q10, q3
+ vmul.i16 q9, q9, q2
+ vmla.i16 q9, q11, q3
+ vmul.i16 q10, q10, q2
+ vmla.i16 q10, q12, q3
+ vmul.i16 q11, q11, q2
+ vmla.i16 q11, q13, q3
+ subs \h, \h, #2
+.ifc \type, put
+ vrshr.u16 q8, q8, #4
+ vrshr.u16 q9, q9, #4
+ vrshr.u16 q10, q10, #4
+ vrshr.u16 q11, q11, #4
+.else
+ vrshl.u16 q8, q8, q15
+ vrshl.u16 q9, q9, q15
+ vrshl.u16 q10, q10, q15
+ vrshl.u16 q11, q11, q15
+ vsub.i16 q8, q8, q14
+ vsub.i16 q9, q9, q14
+ vsub.i16 q10, q10, q14
+ vsub.i16 q11, q11, q14
+.endif
+ vst1.16 {q8, q9}, [\dst, :128], \d_strd
+ vst1.16 {q10, q11}, [\ds2, :128], \d_strd
+ ble 9f
+ vmov q8, q12
+ vmov q9, q13
+ b 2b
+9:
+ subs \w, \w, #16
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #1
+ mov \h, \my
+ add \src, \src, #32
+ add \dst, \dst, #32
+ b 1b
+0:
+ pop {r4-r11,pc}
+
+L(\type\()_bilin_hv):
+ adr r10, L(\type\()_bilin_hv_tbl)
+ vdup.16 q15, r11 // 4 - intermediate_bits
+ ldr r9, [r10, r9, lsl #2]
+ vneg.s16 q15, q15 // -(4-intermediate_bits)
+.ifc \type, put
+ vdup.32 q14, r12 // 4 + intermediate_bits
+.else
+ vmov.i16 q14, #PREP_BIAS
+.endif
+ add r10, r10, r9
+.ifc \type, put
+ vneg.s32 q14, q14 // -(4+intermediate_bits)
+.endif
+ bx r10
+
+ .align 2
+L(\type\()_bilin_hv_tbl):
+ .word 1280f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 640f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 320f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 160f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 80f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 40f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+ .word 20f - L(\type\()_bilin_hv_tbl) + CONFIG_THUMB
+
+20: // 2xN hv
+.ifc \type, put
+ add \sr2, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ vld1.16 {d20}, [\src], \s_strd
+ vext.8 d21, d20, d20, #2
+ vmul.i16 d16, d20, d0
+ vmla.i16 d16, d21, d2
+ vrshl.u16 d16, d16, d30
+ vext.8 d16, d16, d16, #4
+
+2:
+ vld1.16 {d20}, [\sr2], \s_strd
+ vld1.16 {d22}, [\src], \s_strd
+ vext.8 d21, d20, d20, #2
+ vext.8 d23, d22, d22, #2
+ vtrn.32 d20, d22
+ vtrn.32 d21, d23
+ vmul.i16 d18, d20, d0
+ vmla.i16 d18, d21, d2
+ vrshl.u16 d18, d18, d30
+
+ vext.8 d16, d16, d18, #4
+
+ vmull.u16 q8, d16, d4
+ vmlal.u16 q8, d18, d6
+ vrshl.u32 q8, q8, q14
+ vmovn.i32 d16, q8
+ subs \h, \h, #2
+ vst1.32 {d16[0]}, [\dst, :32], \d_strd
+ vst1.32 {d16[1]}, [\ds2, :32], \d_strd
+ ble 0f
+ vmov d16, d18
+ b 2b
+0:
+ pop {r4-r11,pc}
+.endif
+
+40: // 4xN hv
+ add \sr2, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ vld1.16 {q10}, [\src], \s_strd
+ vext.8 d21, d20, d21, #2
+ vmul.i16 d16, d20, d0
+ vmla.i16 d16, d21, d2
+ vrshl.u16 d16, d16, d30
+
+4:
+ vld1.16 {q10}, [\sr2], \s_strd
+ vld1.16 {q11}, [\src], \s_strd
+ vext.8 d21, d20, d21, #2
+ vext.8 d23, d22, d23, #2
+ vswp d21, d22
+ vmul.i16 q9, q10, q0
+ vmla.i16 q9, q11, q1
+ vrshl.u16 q9, q9, q15
+
+ vmull.u16 q10, d16, d4
+ vmlal.u16 q10, d18, d6
+ vmull.u16 q11, d18, d4
+ vmlal.u16 q11, d19, d6
+.ifc \type, put
+ vrshl.u32 q10, q10, q14
+ vrshl.u32 q11, q11, q14
+ vmovn.i32 d20, q10
+ vmovn.i32 d21, q11
+.else
+ vrshrn.i32 d20, q10, #4
+ vrshrn.i32 d21, q11, #4
+ vsub.i16 q10, q10, q14
+.endif
+ subs \h, \h, #2
+ vst1.16 {d20}, [\dst, :64], \d_strd
+ vst1.16 {d21}, [\ds2, :64], \d_strd
+ ble 0f
+ vmov d16, d19
+ b 4b
+0:
+ pop {r4-r11,pc}
+
+80: // 8xN, 16xN, ... hv
+160:
+320:
+640:
+1280:
+ mov \my, \h
+
+1:
+ add \sr2, \src, \s_strd
+ add \ds2, \dst, \d_strd
+ lsl \s_strd, \s_strd, #1
+ lsl \d_strd, \d_strd, #1
+
+ vld1.16 {d20, d21, d22}, [\src], \s_strd
+ vext.8 q11, q10, q11, #2
+ vmul.i16 q8, q10, q0
+ vmla.i16 q8, q11, q1
+ vrshl.u16 q8, q8, q15
+
+2:
+ vld1.16 {d20, d21, d22}, [\sr2], \s_strd
+ vld1.16 {d24, d25, d26}, [\src], \s_strd
+ vext.8 q11, q10, q11, #2
+ vext.8 q13, q12, q13, #2
+ vmul.i16 q9, q10, q0
+ vmla.i16 q9, q11, q1
+ vmul.i16 q10, q12, q0
+ vmla.i16 q10, q13, q1
+ vrshl.u16 q9, q9, q15
+ vrshl.u16 q10, q10, q15
+
+ vmull.u16 q11, d16, d4
+ vmlal.u16 q11, d18, d6
+ vmull.u16 q12, d17, d4
+ vmlal.u16 q12, d19, d6
+ vmull.u16 q8, d18, d4
+ vmlal.u16 q8, d20, d6
+ vmull.u16 q9, d19, d4
+ vmlal.u16 q9, d21, d6
+.ifc \type, put
+ vrshl.u32 q11, q11, q14
+ vrshl.u32 q12, q12, q14
+ vrshl.u32 q8, q8, q14
+ vrshl.u32 q9, q9, q14
+ vmovn.i32 d22, q11
+ vmovn.i32 d23, q12
+ vmovn.i32 d16, q8
+ vmovn.i32 d17, q9
+.else
+ vrshrn.i32 d22, q11, #4
+ vrshrn.i32 d23, q12, #4
+ vrshrn.i32 d16, q8, #4
+ vrshrn.i32 d17, q9, #4
+ vsub.i16 q11, q11, q14
+ vsub.i16 q8, q8, q14
+.endif
+ subs \h, \h, #2
+ vst1.16 {q11}, [\dst, :128], \d_strd
+ vst1.16 {q8}, [\ds2, :128], \d_strd
+ ble 9f
+ vmov q8, q10
+ b 2b
+9:
+ subs \w, \w, #8
+ ble 0f
+ asr \s_strd, \s_strd, #1
+ asr \d_strd, \d_strd, #1
+ mls \src, \s_strd, \my, \src
+ mls \dst, \d_strd, \my, \dst
+ sub \src, \src, \s_strd, lsl #1
+ mov \h, \my
+ add \src, \src, #16
+ add \dst, \dst, #16
+ b 1b
+0:
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+filter_fn put, r0, r1, r2, r3, r4, r5, r6, r7, r8, r9, r10
+filter_fn prep, r0, r8, r1, r2, r3, r4, r5, r6, r7, r9, r10
+
+.macro load_filter_ptr src
+ asr r12, \src, #10
+ add r12, r11, r12, lsl #3
+.endm
+
+.macro load_filter_coef dst, src, inc
+ add \src, \src, \inc
+ vld1.8 {\dst}, [r12, :64]
+.endm
+
+.macro load_filter_row dst, src, inc
+ load_filter_ptr \src
+ load_filter_coef \dst, \src, \inc
+.endm
+
+function warp_filter_horz_neon
+ load_filter_ptr r5 // filter 0
+ vld1.16 {q6,q7}, [r2], r3
+
+ load_filter_coef d0, r5, r7 // filter 0
+ load_filter_row d2, r5, r7 // filter 1
+ vmovl.s8 q0, d0 // filter 0
+ vext.8 q3, q6, q7, #2*1 // filter 1 pixels
+ vmovl.s8 q1, d2 // filter 1
+
+ vmull.s16 q4, d12, d0 // filter 0 output (0-3)
+ vmull.s16 q5, d13, d1 // filter 0 output (4-7)
+
+ load_filter_ptr r5 // filter 2
+
+ vmull.s16 q2, d6, d2 // filter 1 output (0-3)
+ vmull.s16 q3, d7, d3 // filter 1 output (4-7)
+
+ load_filter_coef d0, r5, r7 // filter 2
+
+ vpadd.i32 d8, d8, d9 // half pixel 0 (2x32)
+ vpadd.i32 d9, d10, d11 // half pixel 0 (2x32)
+
+ load_filter_ptr r5 // filter 3
+
+ vpadd.i32 d4, d4, d5 // half pixel 1 (2x32)
+ vpadd.i32 d5, d6, d7 // half pixel 1 (2x32)
+
+ vmovl.s8 q0, d0 // filter 2
+ vext.8 q3, q6, q7, #2*2 // filter 2 pixels
+
+ vpadd.i32 d8, d8, d9 // pixel 0 (2x32)
+ vpadd.i32 d9, d4, d5 // pixel 1 (2x32)
+
+ load_filter_coef d2, r5, r7 // filter 3
+
+ vmull.s16 q2, d6, d0 // filter 2 output (0-3)
+ vmull.s16 q3, d7, d1 // filter 2 output (4-7)
+
+ load_filter_ptr r5 // filter 4
+
+ vpadd.i32 d8, d8, d9 // pixel 0,1
+
+ vpadd.i32 d9, d4, d5 // half pixel 2 (2x32)
+ vpadd.i32 d10, d6, d7 // half pixel 2 (2x32)
+
+ vmovl.s8 q1, d2 // filter 3
+ vext.8 q3, q6, q7, #2*3 // filter 3 pixels
+
+ load_filter_coef d0, r5, r7 // filter 4
+
+ vpadd.i32 d9, d9, d10 // pixel 2 (2x32)
+
+ vmull.s16 q2, d6, d2 // filter 3 output (0-3)
+ vmull.s16 q3, d7, d3 // filter 3 output (4-7)
+
+ vmovl.s8 q0, d0 // filter 4
+ load_filter_ptr r5 // filter 5
+
+ vpadd.i32 d10, d4, d5 // half pixel 3 (2x32)
+ vpadd.i32 d11, d6, d7 // half pixel 3 (2x32)
+
+ vext.8 q3, q6, q7, #2*4 // filter 4 pixels
+ load_filter_coef d2, r5, r7 // filter 5
+
+ vpadd.i32 d10, d10, d11 // pixel 3 (2x32)
+
+ vpadd.i32 d9, d9, d10 // pixel 2,3
+
+ vmull.s16 q2, d6, d0 // filter 4 output (0-3)
+ vmull.s16 q3, d7, d1 // filter 4 output (4-7)
+
+ vmovl.s8 q1, d2 // filter 5
+ load_filter_ptr r5 // filter 6
+
+ vpadd.i32 d10, d4, d5 // half pixel 4 (2x32)
+ vpadd.i32 d11, d6, d7 // half pixel 4 (2x32)
+
+ vext.8 q3, q6, q7, #2*5 // filter 5 pixels
+ load_filter_coef d0, r5, r7 // filter 6
+
+ vpadd.i32 d10, d10, d11 // pixel 4 (2x32)
+
+ vmull.s16 q2, d6, d2 // filter 5 output (0-3)
+ vmull.s16 q3, d7, d3 // filter 5 output (4-7)
+
+ vmovl.s8 q0, d0 // filter 6
+ load_filter_ptr r5 // filter 7
+
+ vpadd.i32 d4, d4, d5 // half pixel 5 (2x32)
+ vpadd.i32 d5, d6, d7 // half pixel 5 (2x32)
+
+ vext.8 q3, q6, q7, #2*6 // filter 6 pixels
+ load_filter_coef d2, r5, r7 // filter 7
+
+ vpadd.i32 d11, d4, d5 // pixel 5 (2x32)
+
+ vmull.s16 q2, d6, d0 // filter 6 output (0-3)
+ vmull.s16 q3, d7, d1 // filter 6 output (4-7)
+
+ vmovl.s8 q1, d2 // filter 7
+
+ vpadd.i32 d10, d10, d11 // pixel 4,5
+
+ vpadd.i32 d4, d4, d5 // half pixel 6 (2x32)
+ vpadd.i32 d5, d6, d7 // half pixel 6 (2x32)
+
+ vext.8 q3, q6, q7, #2*7 // filter 7 pixels
+
+ vpadd.i32 d11, d4, d5 // pixel 6 (2x32)
+
+ vmull.s16 q2, d6, d2 // filter 7 output (0-3)
+ vmull.s16 q3, d7, d3 // filter 7 output (4-7)
+
+ vld1.32 {d14[],d15[]}, [sp] // -(7 - intermediate_bits)
+
+ vpadd.i32 d4, d4, d5 // half pixel 7 (2x32)
+ vpadd.i32 d5, d6, d7 // half pixel 7 (2x32)
+
+ sub r5, r5, r7, lsl #3
+
+ vpadd.i32 d4, d4, d5 // pixel 7 (2x32)
+
+ add r5, r5, r8
+
+ vpadd.i32 d11, d11, d4 // pixel 6,7
+
+ vrshl.s32 q4, q4, q7 // -(7 - intermediate_bits)
+ vrshl.s32 q5, q5, q7 // -(7 - intermediate_bits)
+
+ bx lr
+endfunc
+
+// void dav1d_warp_affine_8x8_16bpc_neon(
+// pixel *dst, const ptrdiff_t dst_stride,
+// const pixel *src, const ptrdiff_t src_stride,
+// const int16_t *const abcd, int mx, int my,
+// const int bitdepth_max)
+.macro warp t
+function warp_affine_8x8\t\()_16bpc_neon, export=1
+ push {r4-r11,lr}
+ vpush {q4-q7}
+ ldrd r4, r5, [sp, #100]
+ ldrd r6, r7, [sp, #108]
+ sub sp, sp, #8
+
+ clz r7, r7
+ // intermediate_bits = clz(bitdepth_max) - 18
+.ifb \t
+ sub r8, r7, #11 // 7 + intermediate_bits = clz(bitdepth_max) - 18 + 7
+.endif
+ sub r7, r7, #25 // -(7 - intermediate_bits)
+.ifb \t
+ neg r8, r8 // -(7 + intermediate_bits)
+.endif
+ str r7, [sp] // spill -(7 - intermediate_bits) on stack
+.ifb \t
+ str r8, [sp, #4] // spill -(7 + intermediate_bits) on stack
+.endif
+
+ ldrd r8, r9, [r4]
+ sxth r7, r8
+ asr r8, r8, #16
+ asr r4, r9, #16
+ sxth r9, r9
+ mov r10, #8
+ sub r2, r2, r3, lsl #1
+ sub r2, r2, r3
+ sub r2, r2, #6
+ movrel r11, X(mc_warp_filter), 64*8
+.ifnb \t
+ lsl r1, r1, #1
+.endif
+ add r5, r5, #512
+ add r6, r6, #512
+
+ bl warp_filter_horz_neon
+ vmovn.i32 d16, q4
+ vmovn.i32 d17, q5
+ bl warp_filter_horz_neon
+ vmovn.i32 d18, q4
+ vmovn.i32 d19, q5
+ bl warp_filter_horz_neon
+ vmovn.i32 d20, q4
+ vmovn.i32 d21, q5
+ bl warp_filter_horz_neon
+ vmovn.i32 d22, q4
+ vmovn.i32 d23, q5
+ bl warp_filter_horz_neon
+ vmovn.i32 d24, q4
+ vmovn.i32 d25, q5
+ bl warp_filter_horz_neon
+ vmovn.i32 d26, q4
+ vmovn.i32 d27, q5
+ bl warp_filter_horz_neon
+ vmovn.i32 d28, q4
+ vmovn.i32 d29, q5
+
+1:
+ bl warp_filter_horz_neon
+ vmovn.i32 d30, q4
+ vmovn.i32 d31, q5
+
+ load_filter_row d8, r6, r9
+ load_filter_row d9, r6, r9
+ load_filter_row d10, r6, r9
+ load_filter_row d11, r6, r9
+ load_filter_row d12, r6, r9
+ load_filter_row d13, r6, r9
+ load_filter_row d14, r6, r9
+ load_filter_row d15, r6, r9
+ transpose_8x8b q4, q5, q6, q7, d8, d9, d10, d11, d12, d13, d14, d15
+ vmovl.s8 q1, d8
+ vmovl.s8 q2, d9
+ vmovl.s8 q3, d10
+ vmovl.s8 q4, d11
+ vmovl.s8 q5, d12
+ vmovl.s8 q6, d13
+
+ sub r6, r6, r9, lsl #3
+
+ // This ordering of vmull/vmlal is highly beneficial for
+ // Cortex A8/A9/A53 here, but harmful for Cortex A7.
+ vmull.s16 q0, d16, d2
+ vmlal.s16 q0, d18, d4
+ vmlal.s16 q0, d20, d6
+ vmlal.s16 q0, d22, d8
+ vmlal.s16 q0, d24, d10
+ vmlal.s16 q0, d26, d12
+ vmull.s16 q1, d17, d3
+ vmlal.s16 q1, d19, d5
+ vmlal.s16 q1, d21, d7
+ vmlal.s16 q1, d23, d9
+ vmlal.s16 q1, d25, d11
+ vmlal.s16 q1, d27, d13
+
+ vmovl.s8 q2, d14
+ vmovl.s8 q3, d15
+
+ vmlal.s16 q0, d28, d4
+ vmlal.s16 q0, d30, d6
+ vmlal.s16 q1, d29, d5
+ vmlal.s16 q1, d31, d7
+
+.ifb \t
+ ldr lr, [sp, #4] // -(7 + intermediate_bits)
+ ldr r12, [sp, #120] // bitdepth_max
+ vdup.32 q2, lr // -(7 + intermediate_bits)
+ vdup.16 q3, r12 // bitdepth_max
+.endif
+
+ vmov q8, q9
+ vmov q9, q10
+.ifb \t
+ vrshl.s32 q0, q0, q2 // -(7 + intermediate_bits)
+ vrshl.s32 q1, q1, q2 // -(7 + intermediate_bits)
+.else
+ vrshrn.s32 d0, q0, #7
+ vrshrn.s32 d1, q1, #7
+ vmov.i16 q3, #PREP_BIAS
+.endif
+ vmov q10, q11
+.ifb \t
+ vqmovun.s32 d0, q0
+ vqmovun.s32 d1, q1
+.else
+ vsub.i16 q0, q0, q3 // PREP_BIAS
+.endif
+ vmov q11, q12
+ vmov q12, q13
+.ifb \t
+ vmin.u16 q0, q0, q3 // bitdepth_max
+.endif
+ vmov q13, q14
+ vmov q14, q15
+ subs r10, r10, #1
+ vst1.16 {q0}, [r0, :128], r1
+
+ add r6, r6, r4
+ bgt 1b
+
+ add sp, sp, #8
+ vpop {q4-q7}
+ pop {r4-r11,pc}
+endfunc
+.endm
+
+warp
+warp t
+
+// void dav1d_emu_edge_16bpc_neon(
+// const intptr_t bw, const intptr_t bh,
+// const intptr_t iw, const intptr_t ih,
+// const intptr_t x, const intptr_t y,
+// pixel *dst, const ptrdiff_t dst_stride,
+// const pixel *ref, const ptrdiff_t ref_stride)
+function emu_edge_16bpc_neon, export=1
+ push {r4-r11,lr}
+ ldrd r4, r5, [sp, #36]
+ ldrd r6, r7, [sp, #44]
+ ldrd r8, r9, [sp, #52]
+
+ // ref += iclip(y, 0, ih - 1) * PXSTRIDE(ref_stride)
+ // ref += iclip(x, 0, iw - 1)
+ sub r12, r3, #1 // ih - 1
+ cmp r5, r3
+ sub lr, r2, #1 // iw - 1
+ it lt
+ movlt r12, r5 // min(y, ih - 1)
+ cmp r4, r2
+ bic r12, r12, r12, asr #31 // max(min(y, ih - 1), 0)
+ it lt
+ movlt lr, r4 // min(x, iw - 1)
+ bic lr, lr, lr, asr #31 // max(min(x, iw - 1), 0)
+ mla r8, r12, r9, r8 // ref += iclip() * stride
+ add r8, r8, lr, lsl #1 // ref += iclip()
+
+ // bottom_ext = iclip(y + bh - ih, 0, bh - 1)
+ // top_ext = iclip(-y, 0, bh - 1)
+ add r10, r5, r1 // y + bh
+ neg r5, r5 // -y
+ sub r10, r10, r3 // y + bh - ih
+ sub r12, r1, #1 // bh - 1
+ cmp r10, r1
+ bic r5, r5, r5, asr #31 // max(-y, 0)
+ it ge
+ movge r10, r12 // min(y + bh - ih, bh-1)
+ cmp r5, r1
+ bic r10, r10, r10, asr #31 // max(min(y + bh - ih, bh-1), 0)
+ it ge
+ movge r5, r12 // min(max(-y, 0), bh-1)
+
+ // right_ext = iclip(x + bw - iw, 0, bw - 1)
+ // left_ext = iclip(-x, 0, bw - 1)
+ add r11, r4, r0 // x + bw
+ neg r4, r4 // -x
+ sub r11, r11, r2 // x + bw - iw
+ sub lr, r0, #1 // bw - 1
+ cmp r11, r0
+ bic r4, r4, r4, asr #31 // max(-x, 0)
+ it ge
+ movge r11, lr // min(x + bw - iw, bw-1)
+ cmp r4, r0
+ bic r11, r11, r11, asr #31 // max(min(x + bw - iw, bw-1), 0)
+ it ge
+ movge r4, lr // min(max(-x, 0), bw - 1)
+
+ // center_h = bh - top_ext - bottom_ext
+ // dst += top_ext * PXSTRIDE(dst_stride)
+ // center_w = bw - left_ext - right_ext
+ sub r1, r1, r5 // bh - top_ext
+ mla r6, r5, r7, r6
+ sub r2, r0, r4 // bw - left_ext
+ sub r1, r1, r10 // center_h = bh - top_ext - bottom_ext
+ sub r2, r2, r11 // center_w = bw - left_ext - right_ext
+
+ mov r0, r6 // backup of dst
+
+.macro v_loop need_left, need_right
+0:
+.if \need_left
+ vld1.16 {d0[], d1[]}, [r8]
+ mov r12, r6 // out = dst
+ mov r3, r4
+ vmov q1, q0
+1:
+ subs r3, r3, #16
+ vst1.16 {q0, q1}, [r12, :128]!
+ bgt 1b
+.endif
+ mov lr, r8
+ add r12, r6, r4, lsl #1 // out = dst + left_ext
+ mov r3, r2
+1:
+ vld1.16 {q0, q1}, [lr]!
+ subs r3, r3, #32
+ vld1.16 {q2, q3}, [lr]!
+.if \need_left
+ vst1.16 {q0, q1}, [r12]!
+ vst1.16 {q2, q3}, [r12]!
+.else
+ vst1.16 {q0, q1}, [r12, :128]!
+ vst1.16 {q2, q3}, [r12, :128]!
+.endif
+ bgt 1b
+.if \need_right
+ add r3, r8, r2, lsl #1 // in + center_w
+ sub r3, r3, #2 // in + center_w - 1
+ add r12, r6, r4, lsl #1 // dst + left_ext
+ vld1.16 {d0[], d1[]}, [r3]
+ add r12, r12, r2, lsl #1 // out = dst + left_ext + center_w
+ mov r3, r11
+ vmov q1, q0
+1:
+ subs r3, r3, #16
+ vst1.16 {q0, q1}, [r12]!
+ bgt 1b
+.endif
+
+ subs r1, r1, #1 // center_h--
+ add r6, r6, r7
+ add r8, r8, r9
+ bgt 0b
+.endm
+
+ cmp r4, #0
+ beq 2f
+ // need_left
+ cmp r11, #0
+ beq 3f
+ // need_left + need_right
+ v_loop 1, 1
+ b 5f
+
+2:
+ // !need_left
+ cmp r11, #0
+ beq 4f
+ // !need_left + need_right
+ v_loop 0, 1
+ b 5f
+
+3:
+ // need_left + !need_right
+ v_loop 1, 0
+ b 5f
+
+4:
+ // !need_left + !need_right
+ v_loop 0, 0
+
+5:
+ cmp r10, #0
+ // Storing the original dst in r0 overwrote bw, recalculate it here
+ add r2, r2, r4 // center_w + left_ext
+ add r2, r2, r11 // bw = center_w + left_ext + right_ext
+
+ beq 3f
+ // need_bottom
+ sub r8, r6, r7 // ref = dst - stride
+ mov r4, r2
+ sub r12, r7, #32
+1:
+ vld1.16 {q0, q1}, [r8, :128]!
+ mov r3, r10
+ vld1.16 {q2, q3}, [r8, :128]!
+2:
+ vst1.16 {q0, q1}, [r6, :128]!
+ subs r3, r3, #1
+ vst1.16 {q2, q3}, [r6, :128], r12
+ bgt 2b
+ mls r6, r7, r10, r6 // dst -= bottom_ext * stride
+ subs r4, r4, #32 // bw -= 32
+ add r6, r6, #64 // dst += 32
+ bgt 1b
+
+3:
+ cmp r5, #0
+ beq 3f
+ // need_top
+ mls r6, r7, r5, r0 // dst = stored_dst - top_ext * stride
+ sub r12, r7, #32
+1:
+ vld1.16 {q0, q1}, [r0, :128]!
+ mov r3, r5
+ vld1.16 {q2, q3}, [r0, :128]!
+2:
+ vst1.16 {q0, q1}, [r6, :128]!
+ subs r3, r3, #1
+ vst1.16 {q2, q3}, [r6, :128], r12
+ bgt 2b
+ mls r6, r7, r5, r6 // dst -= top_ext * stride
+ subs r2, r2, #32 // bw -= 32
+ add r6, r6, #64 // dst += 32
+ bgt 1b
+
+3:
+ pop {r4-r11,pc}
+endfunc
diff --git a/third_party/dav1d/src/arm/32/msac.S b/third_party/dav1d/src/arm/32/msac.S
new file mode 100644
index 0000000000..b06e109dda
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/msac.S
@@ -0,0 +1,575 @@
+/*
+ * Copyright © 2019, VideoLAN and dav1d authors
+ * Copyright © 2020, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+#define BUF_POS 0
+#define BUF_END 4
+#define DIF 8
+#define RNG 12
+#define CNT 16
+#define ALLOW_UPDATE_CDF 20
+
+const coeffs
+ .short 60, 56, 52, 48, 44, 40, 36, 32, 28, 24, 20, 16, 12, 8, 4, 0
+ .short 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0
+endconst
+
+const bits, align=4
+ .short 0x1, 0x2, 0x4, 0x8, 0x10, 0x20, 0x40, 0x80
+ .short 0x100, 0x200, 0x400, 0x800, 0x1000, 0x2000, 0x4000, 0x8000
+endconst
+
+.macro vld1_align_n d0, q0, q1, src, n
+.if \n == 4
+ vld1.16 {\d0}, [\src, :64]
+.elseif \n == 8
+ vld1.16 {\q0}, [\src, :128]
+.else
+ vld1.16 {\q0, \q1}, [\src, :128]
+.endif
+.endm
+
+.macro vld1_n d0, q0, q1, src, n
+.if \n == 4
+ vld1.16 {\d0}, [\src]
+.elseif \n == 8
+ vld1.16 {\q0}, [\src]
+.else
+ vld1.16 {\q0, \q1}, [\src]
+.endif
+.endm
+
+.macro vst1_align_n d0, q0, q1, src, n
+.if \n == 4
+ vst1.16 {\d0}, [\src, :64]
+.elseif \n == 8
+ vst1.16 {\q0}, [\src, :128]
+.else
+ vst1.16 {\q0, \q1}, [\src, :128]
+.endif
+.endm
+
+.macro vst1_n d0, q0, q1, src, n
+.if \n == 4
+ vst1.16 {\d0}, [\src]
+.elseif \n == 8
+ vst1.16 {\q0}, [\src]
+.else
+ vst1.16 {\q0, \q1}, [\src]
+.endif
+.endm
+
+.macro vshr_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
+.if \n == 4
+ vshr.u16 \d0, \s0, \s3
+.else
+ vshr.u16 \d1, \s1, \s4
+.if \n == 16
+ vshr.u16 \d2, \s2, \s5
+.endif
+.endif
+.endm
+
+.macro vadd_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
+.if \n == 4
+ vadd.i16 \d0, \s0, \s3
+.else
+ vadd.i16 \d1, \s1, \s4
+.if \n == 16
+ vadd.i16 \d2, \s2, \s5
+.endif
+.endif
+.endm
+
+.macro vsub_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
+.if \n == 4
+ vsub.i16 \d0, \s0, \s3
+.else
+ vsub.i16 \d1, \s1, \s4
+.if \n == 16
+ vsub.i16 \d2, \s2, \s5
+.endif
+.endif
+.endm
+
+.macro vand_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
+.if \n == 4
+ vand \d0, \s0, \s3
+.else
+ vand \d1, \s1, \s4
+.if \n == 16
+ vand \d2, \s2, \s5
+.endif
+.endif
+.endm
+
+.macro vcge_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
+.if \n == 4
+ vcge.u16 \d0, \s0, \s3
+.else
+ vcge.u16 \d1, \s1, \s4
+.if \n == 16
+ vcge.u16 \d2, \s2, \s5
+.endif
+.endif
+.endm
+
+.macro vrhadd_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
+.if \n == 4
+ vrhadd.u16 \d0, \s0, \s3
+.else
+ vrhadd.u16 \d1, \s1, \s4
+.if \n == 16
+ vrhadd.u16 \d2, \s2, \s5
+.endif
+.endif
+.endm
+
+.macro vshl_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
+.if \n == 4
+ vshl.s16 \d0, \s0, \s3
+.else
+ vshl.s16 \d1, \s1, \s4
+.if \n == 16
+ vshl.s16 \d2, \s2, \s5
+.endif
+.endif
+.endm
+
+.macro vqdmulh_n d0, d1, d2, s0, s1, s2, s3, s4, s5, n
+.if \n == 4
+ vqdmulh.s16 \d0, \s0, \s3
+.else
+ vqdmulh.s16 \d1, \s1, \s4
+.if \n == 16
+ vqdmulh.s16 \d2, \s2, \s5
+.endif
+.endif
+.endm
+
+// unsigned dav1d_msac_decode_symbol_adapt4_neon(MsacContext *s, uint16_t *cdf,
+// size_t n_symbols);
+
+function msac_decode_symbol_adapt4_neon, export=1
+.macro decode_update n
+ push {r4-r10,lr}
+ sub sp, sp, #48
+ add r8, r0, #RNG
+
+ vld1_align_n d0, q0, q1, r1, \n // cdf
+ vld1.16 {d16[]}, [r8, :16] // rng
+ movrel_local r9, coeffs, 30
+ vmov.i16 d30, #0x7f00 // 0x7f00
+ sub r9, r9, r2, lsl #1
+ vmvn.i16 q14, #0x3f // 0xffc0
+ add r8, sp, #14
+ vand d22, d16, d30 // rng & 0x7f00
+ vst1.16 {d16[0]}, [r8, :16] // store original u = s->rng
+ vand_n d4, q2, q3, d0, q0, q1, d28, q14, q14, \n // cdf & 0xffc0
+.if \n > 4
+ vmov d23, d22
+.endif
+
+ vld1_n d16, q8, q9, r9, \n // EC_MIN_PROB * (n_symbols - ret)
+ vqdmulh_n d20, q10, q11, d4, q2, q3, d22, q11, q11, \n // ((cdf >> EC_PROB_SHIFT) * (r - 128)) >> 1
+ add r8, r0, #DIF + 2
+
+ vadd_n d16, q8, q9, d4, q2, q3, d16, q8, q9, \n // v = cdf + EC_MIN_PROB * (n_symbols - ret)
+.if \n == 4
+ vmov.i16 d17, #0
+.endif
+ vadd_n d16, q8, q9, d20, q10, q11, d16, q8, q9, \n // v = ((cdf >> EC_PROB_SHIFT) * r) >> 1 + EC_MIN_PROB * (n_symbols - ret)
+
+ add r9, sp, #16
+ vld1.16 {d20[]}, [r8, :16] // dif >> (EC_WIN_SIZE - 16)
+ movrel_local r8, bits
+ vst1_n q8, q8, q9, r9, \n // store v values to allow indexed access
+
+ vmov d21, d20
+ vld1_align_n q12, q12, q13, r8, \n
+.if \n == 16
+ vmov q11, q10
+.endif
+
+ vcge_n q2, q2, q3, q10, q10, q11, q8, q8, q9, \n // c >= v
+
+ vand_n q10, q10, q11, q2, q2, q3, q12, q12, q13, \n // One bit per halfword set in the mask
+.if \n == 16
+ vadd.i16 q10, q10, q11
+.endif
+ vadd.i16 d20, d20, d21 // Aggregate mask bits
+ ldr r4, [r0, #ALLOW_UPDATE_CDF]
+ vpadd.i16 d20, d20, d20
+ lsl r10, r2, #1
+ vpadd.i16 d20, d20, d20
+ vmov.u16 r3, d20[0]
+ cmp r4, #0
+ rbit r3, r3
+ clz lr, r3 // ret
+
+ beq L(renorm)
+ // update_cdf
+ ldrh r3, [r1, r10] // count = cdf[n_symbols]
+ vmov.i8 q10, #0xff
+.if \n == 16
+ mov r4, #-5
+.else
+ mvn r12, r2
+ mov r4, #-4
+ cmn r12, #3 // set C if n_symbols <= 2
+.endif
+ vrhadd_n d16, q8, q9, d20, q10, q10, d4, q2, q3, \n // i >= val ? -1 : 32768
+.if \n == 16
+ sub r4, r4, r3, lsr #4 // -((count >> 4) + 5)
+.else
+ lsr r12, r3, #4 // count >> 4
+ sbc r4, r4, r12 // -((count >> 4) + (n_symbols > 2) + 4)
+.endif
+ vsub_n d16, q8, q9, d16, q8, q9, d0, q0, q1, \n // (32768 - cdf[i]) or (-1 - cdf[i])
+.if \n == 4
+ vdup.16 d20, r4 // -rate
+.else
+ vdup.16 q10, r4 // -rate
+.endif
+
+ sub r3, r3, r3, lsr #5 // count - (count == 32)
+ vsub_n d0, q0, q1, d0, q0, q1, d4, q2, q3, \n // cdf + (i >= val ? 1 : 0)
+ vshl_n d16, q8, q9, d16, q8, q9, d20, q10, q10, \n // ({32768,-1} - cdf[i]) >> rate
+ add r3, r3, #1 // count + (count < 32)
+ vadd_n d0, q0, q1, d0, q0, q1, d16, q8, q9, \n // cdf + (32768 - cdf[i]) >> rate
+ vst1_align_n d0, q0, q1, r1, \n
+ strh r3, [r1, r10]
+.endm
+
+ decode_update 4
+
+L(renorm):
+ add r8, sp, #16
+ add r8, r8, lr, lsl #1
+ ldrh r3, [r8] // v
+ ldrh r4, [r8, #-2] // u
+ ldr r6, [r0, #CNT]
+ ldr r7, [r0, #DIF]
+ sub r4, r4, r3 // rng = u - v
+ clz r5, r4 // clz(rng)
+ eor r5, r5, #16 // d = clz(rng) ^ 16
+ mvn r7, r7 // ~dif
+ add r7, r7, r3, lsl #16 // ~dif + (v << 16)
+L(renorm2):
+ lsl r4, r4, r5 // rng << d
+ subs r6, r6, r5 // cnt -= d
+ lsl r7, r7, r5 // (~dif + (v << 16)) << d
+ str r4, [r0, #RNG]
+ mvn r7, r7 // ~dif
+ bhs 9f
+
+ // refill
+ ldr r3, [r0, #BUF_POS] // BUF_POS
+ ldr r4, [r0, #BUF_END] // BUF_END
+ add r5, r3, #4
+ cmp r5, r4
+ bgt 2f
+
+ ldr r3, [r3] // next_bits
+ add r8, r6, #23 // shift_bits = cnt + 23
+ add r6, r6, #16 // cnt += 16
+ rev r3, r3 // next_bits = bswap(next_bits)
+ sub r5, r5, r8, lsr #3 // buf_pos -= shift_bits >> 3
+ and r8, r8, #24 // shift_bits &= 24
+ lsr r3, r3, r8 // next_bits >>= shift_bits
+ sub r8, r8, r6 // shift_bits -= 16 + cnt
+ str r5, [r0, #BUF_POS]
+ lsl r3, r3, r8 // next_bits <<= shift_bits
+ rsb r6, r8, #16 // cnt = cnt + 32 - shift_bits
+ eor r7, r7, r3 // dif ^= next_bits
+ b 9f
+
+2: // refill_eob
+ rsb r5, r6, #8 // c = 8 - cnt
+3:
+ cmp r3, r4
+ bge 4f
+ ldrb r8, [r3], #1
+ lsl r8, r8, r5
+ eor r7, r7, r8
+ subs r5, r5, #8
+ bge 3b
+
+4: // refill_eob_end
+ str r3, [r0, #BUF_POS]
+ rsb r6, r5, #8 // cnt = 8 - c
+
+9:
+ str r6, [r0, #CNT]
+ str r7, [r0, #DIF]
+
+ mov r0, lr
+ add sp, sp, #48
+
+ pop {r4-r10,pc}
+endfunc
+
+function msac_decode_symbol_adapt8_neon, export=1
+ decode_update 8
+ b L(renorm)
+endfunc
+
+function msac_decode_symbol_adapt16_neon, export=1
+ decode_update 16
+ b L(renorm)
+endfunc
+
+function msac_decode_hi_tok_neon, export=1
+ push {r4-r10,lr}
+ vld1.16 {d0}, [r1, :64] // cdf
+ add r4, r0, #RNG
+ vmov.i16 d31, #0x7f00 // 0x7f00
+ movrel_local r5, coeffs, 30-2*3
+ vmvn.i16 d30, #0x3f // 0xffc0
+ ldrh r9, [r1, #6] // count = cdf[n_symbols]
+ vld1.16 {d1[]}, [r4, :16] // rng
+ movrel_local r4, bits
+ vld1.16 {d29}, [r5] // EC_MIN_PROB * (n_symbols - ret)
+ add r5, r0, #DIF + 2
+ vld1.16 {q8}, [r4, :128]
+ mov r2, #-24
+ vand d20, d0, d30 // cdf & 0xffc0
+ ldr r10, [r0, #ALLOW_UPDATE_CDF]
+ vld1.16 {d2[]}, [r5, :16] // dif >> (EC_WIN_SIZE - 16)
+ sub sp, sp, #48
+ ldr r6, [r0, #CNT]
+ ldr r7, [r0, #DIF]
+ vmov d3, d2
+1:
+ vand d23, d1, d31 // rng & 0x7f00
+ vqdmulh.s16 d18, d20, d23 // ((cdf >> EC_PROB_SHIFT) * (r - 128)) >> 1
+ add r12, sp, #14
+ vadd.i16 d6, d20, d29 // v = cdf + EC_MIN_PROB * (n_symbols - ret)
+ vadd.i16 d6, d18, d6 // v = ((cdf >> EC_PROB_SHIFT) * r) >> 1 + EC_MIN_PROB * (n_symbols - ret)
+ vmov.i16 d7, #0
+ vst1.16 {d1[0]}, [r12, :16] // store original u = s->rng
+ add r12, sp, #16
+ vcge.u16 q2, q1, q3 // c >= v
+ vst1.16 {q3}, [r12] // store v values to allow indexed access
+ vand q9, q2, q8 // One bit per halfword set in the mask
+
+ vadd.i16 d18, d18, d19 // Aggregate mask bits
+ vpadd.i16 d18, d18, d18
+ vpadd.i16 d18, d18, d18
+ vmov.u16 r3, d18[0]
+ cmp r10, #0
+ add r2, r2, #5
+ rbit r3, r3
+ add r8, sp, #16
+ clz lr, r3 // ret
+
+ beq 2f
+ // update_cdf
+ vmov.i8 d22, #0xff
+ mov r4, #-5
+ vrhadd.u16 d6, d22, d4 // i >= val ? -1 : 32768
+ sub r4, r4, r9, lsr #4 // -((count >> 4) + 5)
+ vsub.i16 d6, d6, d0 // (32768 - cdf[i]) or (-1 - cdf[i])
+ vdup.16 d18, r4 // -rate
+
+ sub r9, r9, r9, lsr #5 // count - (count == 32)
+ vsub.i16 d0, d0, d4 // cdf + (i >= val ? 1 : 0)
+ vshl.s16 d6, d6, d18 // ({32768,-1} - cdf[i]) >> rate
+ add r9, r9, #1 // count + (count < 32)
+ vadd.i16 d0, d0, d6 // cdf + (32768 - cdf[i]) >> rate
+ vst1.16 {d0}, [r1, :64]
+ vand d20, d0, d30 // cdf & 0xffc0
+ strh r9, [r1, #6]
+
+2:
+ add r8, r8, lr, lsl #1
+ ldrh r3, [r8] // v
+ ldrh r4, [r8, #-2] // u
+ sub r4, r4, r3 // rng = u - v
+ clz r5, r4 // clz(rng)
+ eor r5, r5, #16 // d = clz(rng) ^ 16
+ mvn r7, r7 // ~dif
+ add r7, r7, r3, lsl #16 // ~dif + (v << 16)
+ lsl r4, r4, r5 // rng << d
+ subs r6, r6, r5 // cnt -= d
+ lsl r7, r7, r5 // (~dif + (v << 16)) << d
+ str r4, [r0, #RNG]
+ vdup.16 d1, r4
+ mvn r7, r7 // ~dif
+ bhs 9f
+
+ // refill
+ ldr r3, [r0, #BUF_POS] // BUF_POS
+ ldr r4, [r0, #BUF_END] // BUF_END
+ add r5, r3, #4
+ cmp r5, r4
+ bgt 2f
+
+ ldr r3, [r3] // next_bits
+ add r8, r6, #23 // shift_bits = cnt + 23
+ add r6, r6, #16 // cnt += 16
+ rev r3, r3 // next_bits = bswap(next_bits)
+ sub r5, r5, r8, lsr #3 // buf_pos -= shift_bits >> 3
+ and r8, r8, #24 // shift_bits &= 24
+ lsr r3, r3, r8 // next_bits >>= shift_bits
+ sub r8, r8, r6 // shift_bits -= 16 + cnt
+ str r5, [r0, #BUF_POS]
+ lsl r3, r3, r8 // next_bits <<= shift_bits
+ rsb r6, r8, #16 // cnt = cnt + 32 - shift_bits
+ eor r7, r7, r3 // dif ^= next_bits
+ b 9f
+
+2: // refill_eob
+ rsb r5, r6, #8 // c = 40 - cnt
+3:
+ cmp r3, r4
+ bge 4f
+ ldrb r8, [r3], #1
+ lsl r8, r8, r5
+ eor r7, r7, r8
+ subs r5, r5, #8
+ bge 3b
+
+4: // refill_eob_end
+ str r3, [r0, #BUF_POS]
+ rsb r6, r5, #8 // cnt = 40 - c
+
+9:
+ lsl lr, lr, #1
+ sub lr, lr, #5
+ lsr r12, r7, #16
+ adds r2, r2, lr // carry = tok_br < 3 || tok == 15
+ vdup.16 q1, r12
+ bcc 1b // loop if !carry
+ add r2, r2, #30
+ str r6, [r0, #CNT]
+ add sp, sp, #48
+ str r7, [r0, #DIF]
+ lsr r0, r2, #1
+ pop {r4-r10,pc}
+endfunc
+
+function msac_decode_bool_equi_neon, export=1
+ push {r4-r10,lr}
+ ldr r5, [r0, #RNG]
+ ldr r6, [r0, #CNT]
+ sub sp, sp, #48
+ ldr r7, [r0, #DIF]
+ bic r4, r5, #0xff // r &= 0xff00
+ add r4, r4, #8
+ mov r2, #0
+ subs r8, r7, r4, lsl #15 // dif - vw
+ lsr r4, r4, #1 // v
+ sub r5, r5, r4 // r - v
+ itee lo
+ movlo r2, #1
+ movhs r4, r5 // if (ret) v = r - v;
+ movhs r7, r8 // if (ret) dif = dif - vw;
+
+ clz r5, r4 // clz(rng)
+ mvn r7, r7 // ~dif
+ eor r5, r5, #16 // d = clz(rng) ^ 16
+ mov lr, r2
+ b L(renorm2)
+endfunc
+
+function msac_decode_bool_neon, export=1
+ push {r4-r10,lr}
+ ldr r5, [r0, #RNG]
+ ldr r6, [r0, #CNT]
+ sub sp, sp, #48
+ ldr r7, [r0, #DIF]
+ lsr r4, r5, #8 // r >> 8
+ bic r1, r1, #0x3f // f &= ~63
+ mul r4, r4, r1
+ mov r2, #0
+ lsr r4, r4, #7
+ add r4, r4, #4 // v
+ subs r8, r7, r4, lsl #16 // dif - vw
+ sub r5, r5, r4 // r - v
+ itee lo
+ movlo r2, #1
+ movhs r4, r5 // if (ret) v = r - v;
+ movhs r7, r8 // if (ret) dif = dif - vw;
+
+ clz r5, r4 // clz(rng)
+ mvn r7, r7 // ~dif
+ eor r5, r5, #16 // d = clz(rng) ^ 16
+ mov lr, r2
+ b L(renorm2)
+endfunc
+
+function msac_decode_bool_adapt_neon, export=1
+ push {r4-r10,lr}
+ ldr r9, [r1] // cdf[0-1]
+ ldr r5, [r0, #RNG]
+ movw lr, #0xffc0
+ ldr r6, [r0, #CNT]
+ sub sp, sp, #48
+ ldr r7, [r0, #DIF]
+ lsr r4, r5, #8 // r >> 8
+ and r2, r9, lr // f &= ~63
+ mul r4, r4, r2
+ mov r2, #0
+ lsr r4, r4, #7
+ add r4, r4, #4 // v
+ subs r8, r7, r4, lsl #16 // dif - vw
+ sub r5, r5, r4 // r - v
+ ldr r10, [r0, #ALLOW_UPDATE_CDF]
+ itee lo
+ movlo r2, #1
+ movhs r4, r5 // if (ret) v = r - v;
+ movhs r7, r8 // if (ret) dif = dif - vw;
+
+ cmp r10, #0
+ clz r5, r4 // clz(rng)
+ mvn r7, r7 // ~dif
+ eor r5, r5, #16 // d = clz(rng) ^ 16
+ mov lr, r2
+
+ beq L(renorm2)
+
+ lsr r2, r9, #16 // count = cdf[1]
+ uxth r9, r9 // cdf[0]
+
+ sub r3, r2, r2, lsr #5 // count - (count >= 32)
+ lsr r2, r2, #4 // count >> 4
+ add r10, r3, #1 // count + (count < 32)
+ add r2, r2, #4 // rate = (count >> 4) | 4
+
+ sub r9, r9, lr // cdf[0] -= bit
+ sub r3, r9, lr, lsl #15 // {cdf[0], cdf[0] - 32769}
+ asr r3, r3, r2 // {cdf[0], cdf[0] - 32769} >> rate
+ sub r9, r9, r3 // cdf[0]
+
+ strh r9, [r1]
+ strh r10, [r1, #2]
+
+ b L(renorm2)
+endfunc
diff --git a/third_party/dav1d/src/arm/32/refmvs.S b/third_party/dav1d/src/arm/32/refmvs.S
new file mode 100644
index 0000000000..e16c5448d0
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/refmvs.S
@@ -0,0 +1,97 @@
+/*
+ * Copyright © 2021, VideoLAN and dav1d authors
+ * Copyright © 2021, Martin Storsjo
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/arm/asm.S"
+#include "util.S"
+
+// void dav1d_splat_mv_neon(refmvs_block **rr, const refmvs_block *rmv,
+// int bx4, int bw4, int bh4)
+
+function splat_mv_neon, export=1
+ push {r4, lr}
+ vld1.8 {q3}, [r1]
+ ldr r4, [sp, #8]
+ clz r3, r3
+ adr lr, L(splat_tbl)
+ sub r3, r3, #26
+ vext.8 q2, q3, q3, #12
+ ldr r3, [lr, r3, lsl #2]
+ add r2, r2, r2, lsl #1
+ vext.8 q0, q2, q3, #4
+ add r3, lr, r3
+ vext.8 q1, q2, q3, #8
+ lsl r2, r2, #2
+ vext.8 q2, q2, q3, #12
+ vmov q3, q0
+1:
+ ldr r1, [r0], #4
+ subs r4, r4, #1
+ add r1, r1, r2
+ bx r3
+
+ .align 2
+L(splat_tbl):
+ .word 320f - L(splat_tbl) + CONFIG_THUMB
+ .word 160f - L(splat_tbl) + CONFIG_THUMB
+ .word 80f - L(splat_tbl) + CONFIG_THUMB
+ .word 40f - L(splat_tbl) + CONFIG_THUMB
+ .word 20f - L(splat_tbl) + CONFIG_THUMB
+ .word 10f - L(splat_tbl) + CONFIG_THUMB
+
+10:
+ vst1.8 {d0}, [r1]
+ vstr s2, [r1, #8]
+ bgt 1b
+ pop {r4, pc}
+20:
+ vst1.8 {q0}, [r1]
+ vstr d2, [r1, #16]
+ bgt 1b
+ pop {r4, pc}
+40:
+ vst1.8 {q0, q1}, [r1]!
+ vst1.8 {q2}, [r1]
+ bgt 1b
+ pop {r4, pc}
+320:
+ vst1.8 {q0, q1}, [r1]!
+ vst1.8 {q2, q3}, [r1]!
+ vst1.8 {q1, q2}, [r1]!
+ vst1.8 {q0, q1}, [r1]!
+ vst1.8 {q2, q3}, [r1]!
+ vst1.8 {q1, q2}, [r1]!
+160:
+ vst1.8 {q0, q1}, [r1]!
+ vst1.8 {q2, q3}, [r1]!
+ vst1.8 {q1, q2}, [r1]!
+80:
+ vst1.8 {q0, q1}, [r1]!
+ vst1.8 {q2, q3}, [r1]!
+ vst1.8 {q1, q2}, [r1]
+ bgt 1b
+ pop {r4, pc}
+endfunc
diff --git a/third_party/dav1d/src/arm/32/util.S b/third_party/dav1d/src/arm/32/util.S
new file mode 100644
index 0000000000..c3710d3767
--- /dev/null
+++ b/third_party/dav1d/src/arm/32/util.S
@@ -0,0 +1,184 @@
+/******************************************************************************
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2015 Martin Storsjo
+ * Copyright © 2015 Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#ifndef DAV1D_SRC_ARM_32_UTIL_S
+#define DAV1D_SRC_ARM_32_UTIL_S
+
+#include "config.h"
+#include "src/arm/asm.S"
+
+.macro movrel_local rd, val, offset=0
+#if defined(PIC)
+ ldr \rd, 90001f
+ b 90002f
+90001:
+ .word \val + \offset - (90002f + 8 - 4 * CONFIG_THUMB)
+90002:
+ add \rd, \rd, pc
+#else
+ movw \rd, #:lower16:\val+\offset
+ movt \rd, #:upper16:\val+\offset
+#endif
+.endm
+
+.macro movrel rd, val, offset=0
+#if defined(PIC) && defined(__APPLE__)
+ ldr \rd, 1f
+ b 2f
+1:
+ .word 3f - (2f + 8 - 4 * CONFIG_THUMB)
+2:
+ ldr \rd, [pc, \rd]
+.if \offset < 0
+ sub \rd, \rd, #-(\offset)
+.elseif \offset > 0
+ add \rd, \rd, #\offset
+.endif
+ .non_lazy_symbol_pointer
+3:
+ .indirect_symbol \val
+ .word 0
+ .text
+#else
+ movrel_local \rd, \val, \offset
+#endif
+.endm
+
+// This macro clobbers r7 (and r12 on windows) and stores data at the
+// bottom of the stack; sp is the start of the space allocated that
+// the caller can use.
+.macro sub_sp_align space
+#if CONFIG_THUMB
+ mov r7, sp
+ and r7, r7, #15
+#else
+ and r7, sp, #15
+#endif
+ sub sp, sp, r7
+ // Now the stack is aligned, store the amount of adjustment back
+ // on the stack, as we don't want to waste a register as frame
+ // pointer.
+ str r7, [sp, #-16]!
+#ifdef _WIN32
+.if \space > 8192
+ // Here, we'd need to touch two (or more) pages while decrementing
+ // the stack pointer.
+ .error "sub_sp_align doesn't support values over 8K at the moment"
+.elseif \space > 4096
+ sub r7, sp, #4096
+ ldr r12, [r7]
+ sub r7, r7, #(\space - 4096)
+ mov sp, r7
+.else
+ sub sp, sp, #\space
+.endif
+#else
+.if \space >= 4096
+ sub sp, sp, #(\space)/4096*4096
+.endif
+.if (\space % 4096) != 0
+ sub sp, sp, #(\space)%4096
+.endif
+#endif
+.endm
+
+.macro add_sp_align space
+.if \space >= 4096
+ add sp, sp, #(\space)/4096*4096
+.endif
+.if (\space % 4096) != 0
+ add sp, sp, #(\space)%4096
+.endif
+ ldr r7, [sp], #16
+ // Add back the original stack adjustment
+ add sp, sp, r7
+.endm
+
+.macro transpose_8x8b q0, q1, q2, q3, r0, r1, r2, r3, r4, r5, r6, r7
+ vtrn.32 \q0, \q2
+ vtrn.32 \q1, \q3
+
+ vtrn.16 \r0, \r2
+ vtrn.16 \r1, \r3
+ vtrn.16 \r4, \r6
+ vtrn.16 \r5, \r7
+
+ vtrn.8 \r0, \r1
+ vtrn.8 \r2, \r3
+ vtrn.8 \r4, \r5
+ vtrn.8 \r6, \r7
+.endm
+
+.macro transpose_8x8h r0, r1, r2, r3, r4, r5, r6, r7, d0, d1, d2, d3, d4, d5, d6, d7
+ vswp \d0, \d4
+ vswp \d1, \d5
+ vswp \d2, \d6
+ vswp \d3, \d7
+
+ vtrn.32 \r0, \r2
+ vtrn.32 \r1, \r3
+ vtrn.32 \r4, \r6
+ vtrn.32 \r5, \r7
+
+ vtrn.16 \r0, \r1
+ vtrn.16 \r2, \r3
+ vtrn.16 \r4, \r5
+ vtrn.16 \r6, \r7
+.endm
+
+.macro transpose_4x8b q0, q1, r0, r1, r2, r3
+ vtrn.16 \q0, \q1
+
+ vtrn.8 \r0, \r1
+ vtrn.8 \r2, \r3
+.endm
+
+.macro transpose_4x4s q0, q1, q2, q3, r0, r1, r2, r3, r4, r5, r6, r7
+ vswp \r1, \r4 // vtrn.64 \q0, \q2
+ vswp \r3, \r6 // vtrn.64 \q1, \q3
+
+ vtrn.32 \q0, \q1
+ vtrn.32 \q2, \q3
+.endm
+
+.macro transpose_4x4h q0, q1, r0, r1, r2, r3
+ vtrn.32 \q0, \q1
+
+ vtrn.16 \r0, \r1
+ vtrn.16 \r2, \r3
+.endm
+
+.macro transpose_4x8h r0, r1, r2, r3
+ vtrn.32 \r0, \r2
+ vtrn.32 \r1, \r3
+
+ vtrn.16 \r0, \r1
+ vtrn.16 \r2, \r3
+.endm
+
+#endif /* DAV1D_SRC_ARM_32_UTIL_S */