summaryrefslogtreecommitdiffstats
path: root/third_party/dav1d/tests
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 14:29:10 +0000
commit2aa4a82499d4becd2284cdb482213d541b8804dd (patch)
treeb80bf8bf13c3766139fbacc530efd0dd9d54394c /third_party/dav1d/tests
parentInitial commit. (diff)
downloadfirefox-2aa4a82499d4becd2284cdb482213d541b8804dd.tar.xz
firefox-2aa4a82499d4becd2284cdb482213d541b8804dd.zip
Adding upstream version 86.0.1.upstream/86.0.1upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/dav1d/tests')
-rw-r--r--third_party/dav1d/tests/checkasm/arm/checkasm_32.S201
-rw-r--r--third_party/dav1d/tests/checkasm/arm/checkasm_64.S211
-rw-r--r--third_party/dav1d/tests/checkasm/cdef.c150
-rw-r--r--third_party/dav1d/tests/checkasm/checkasm.c815
-rw-r--r--third_party/dav1d/tests/checkasm/checkasm.h332
-rw-r--r--third_party/dav1d/tests/checkasm/filmgrain.c331
-rw-r--r--third_party/dav1d/tests/checkasm/ipred.c286
-rw-r--r--third_party/dav1d/tests/checkasm/itx.c291
-rw-r--r--third_party/dav1d/tests/checkasm/loopfilter.c204
-rw-r--r--third_party/dav1d/tests/checkasm/looprestoration.c185
-rw-r--r--third_party/dav1d/tests/checkasm/mc.c756
-rw-r--r--third_party/dav1d/tests/checkasm/msac.c291
-rw-r--r--third_party/dav1d/tests/checkasm/x86/checkasm.asm287
-rw-r--r--third_party/dav1d/tests/header_test.c33
-rw-r--r--third_party/dav1d/tests/libfuzzer/alloc_fail.c102
-rw-r--r--third_party/dav1d/tests/libfuzzer/alloc_fail.h35
-rw-r--r--third_party/dav1d/tests/libfuzzer/dav1d_fuzzer.c193
-rw-r--r--third_party/dav1d/tests/libfuzzer/dav1d_fuzzer.h37
-rw-r--r--third_party/dav1d/tests/libfuzzer/main.c100
-rw-r--r--third_party/dav1d/tests/libfuzzer/meson.build109
-rw-r--r--third_party/dav1d/tests/meson.build132
21 files changed, 5081 insertions, 0 deletions
diff --git a/third_party/dav1d/tests/checkasm/arm/checkasm_32.S b/third_party/dav1d/tests/checkasm/arm/checkasm_32.S
new file mode 100644
index 0000000000..a186ef8fc2
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/arm/checkasm_32.S
@@ -0,0 +1,201 @@
+/******************************************************************************
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2015 Martin Storsjo
+ * Copyright © 2015 Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#define PRIVATE_PREFIX checkasm_
+
+#include "src/arm/asm.S"
+#include "src/arm/32/util.S"
+
+const register_init, align=3
+ .quad 0x21f86d66c8ca00ce
+ .quad 0x75b6ba21077c48ad
+ .quad 0xed56bb2dcb3c7736
+ .quad 0x8bda43d3fd1a7e06
+ .quad 0xb64a9c9e5d318408
+ .quad 0xdf9a54b303f1d3a3
+ .quad 0x4a75479abd64e097
+ .quad 0x249214109d5d1c88
+endconst
+
+const error_message_fpscr
+ .asciz "failed to preserve register FPSCR, changed bits: %x"
+error_message_gpr:
+ .asciz "failed to preserve register r%d"
+error_message_vfp:
+ .asciz "failed to preserve register d%d"
+error_message_stack:
+ .asciz "failed to preserve stack"
+endconst
+
+@ max number of args used by any asm function.
+#define MAX_ARGS 15
+
+#define ARG_STACK 4*(MAX_ARGS - 4)
+
+@ Align the used stack space to 8 to preserve the stack alignment.
+@ +8 for stack canary reference.
+#define ARG_STACK_A (((ARG_STACK + pushed + 7) & ~7) - pushed + 8)
+
+.macro clobbercheck variant
+.equ pushed, 4*9
+function checked_call_\variant, export=1
+ push {r4-r11, lr}
+.ifc \variant, vfp
+ vpush {d8-d15}
+ fmrx r4, FPSCR
+ push {r4}
+.equ pushed, pushed + 16*4 + 4
+.endif
+
+ movrel r12, register_init
+.ifc \variant, vfp
+ vldm r12, {d8-d15}
+.endif
+ ldm r12, {r4-r11}
+
+ sub sp, sp, #ARG_STACK_A
+.equ pos, 0
+.rept MAX_ARGS-4
+ ldr r12, [sp, #ARG_STACK_A + pushed + 8 + pos]
+ str r12, [sp, #pos]
+.equ pos, pos + 4
+.endr
+
+ @ For stack overflows, the callee is free to overwrite the parameters
+ @ that were passed on the stack (if any), so we can only check after
+ @ that point. First figure out how many parameters the function
+ @ really took on the stack:
+ ldr r12, [sp, #ARG_STACK_A + pushed + 8 + 4*(MAX_ARGS-4)]
+ @ Load the first non-parameter value from the stack, that should be
+ @ left untouched by the function. Store a copy of it inverted, so that
+ @ e.g. overwriting everything with zero would be noticed.
+ ldr r12, [sp, r12, lsl #2]
+ mvn r12, r12
+ str r12, [sp, #ARG_STACK_A - 4]
+
+ mov r12, r0
+ mov r0, r2
+ mov r1, r3
+ ldrd r2, r3, [sp, #ARG_STACK_A + pushed]
+ @ Call the target function
+ blx r12
+
+ @ Load the number of stack parameters, stack canary and its reference
+ ldr r12, [sp, #ARG_STACK_A + pushed + 8 + 4*(MAX_ARGS-4)]
+ ldr r2, [sp, r12, lsl #2]
+ ldr r3, [sp, #ARG_STACK_A - 4]
+
+ add sp, sp, #ARG_STACK_A
+ push {r0, r1}
+
+ mvn r3, r3
+ cmp r2, r3
+ bne 5f
+
+ movrel r12, register_init
+.ifc \variant, vfp
+.macro check_reg_vfp, dreg, offset
+ ldrd r2, r3, [r12, #8 * (\offset)]
+ vmov r0, lr, \dreg
+ eor r2, r2, r0
+ eor r3, r3, lr
+ orrs r2, r2, r3
+ bne 4f
+.endm
+
+.irp n, 8, 9, 10, 11, 12, 13, 14, 15
+ @ keep track of the checked double/SIMD register
+ mov r1, #\n
+ check_reg_vfp d\n, \n-8
+.endr
+.purgem check_reg_vfp
+
+ fmrx r1, FPSCR
+ ldr r3, [sp, #8]
+ eor r1, r1, r3
+ @ Ignore changes in bits 0-4 and 7
+ bic r1, r1, #0x9f
+ @ Ignore changes in the topmost 5 bits
+ bics r1, r1, #0xf8000000
+ bne 3f
+.endif
+
+ @ keep track of the checked GPR
+ mov r1, #4
+.macro check_reg reg1, reg2=
+ ldrd r2, r3, [r12], #8
+ eors r2, r2, \reg1
+ bne 2f
+ add r1, r1, #1
+.ifnb \reg2
+ eors r3, r3, \reg2
+ bne 2f
+.endif
+ add r1, r1, #1
+.endm
+ check_reg r4, r5
+ check_reg r6, r7
+@ r9 is a volatile register in the ios ABI
+#ifdef __APPLE__
+ check_reg r8
+#else
+ check_reg r8, r9
+#endif
+ check_reg r10, r11
+.purgem check_reg
+
+ b 0f
+5:
+ movrel r0, error_message_stack
+ b 1f
+4:
+ movrel r0, error_message_vfp
+ b 1f
+3:
+ movrel r0, error_message_fpscr
+ b 1f
+2:
+ movrel r0, error_message_gpr
+1:
+#ifdef PREFIX
+ bl _checkasm_fail_func
+#else
+ bl checkasm_fail_func
+#endif
+0:
+ pop {r0, r1}
+.ifc \variant, vfp
+ pop {r2}
+ fmxr FPSCR, r2
+ vpop {d8-d15}
+.endif
+ pop {r4-r11, pc}
+endfunc
+.endm
+
+clobbercheck vfp
diff --git a/third_party/dav1d/tests/checkasm/arm/checkasm_64.S b/third_party/dav1d/tests/checkasm/arm/checkasm_64.S
new file mode 100644
index 0000000000..25749145a5
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/arm/checkasm_64.S
@@ -0,0 +1,211 @@
+/******************************************************************************
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2015 Martin Storsjo
+ * Copyright © 2015 Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *****************************************************************************/
+
+#define PRIVATE_PREFIX checkasm_
+
+#include "src/arm/asm.S"
+#include "src/arm/64/util.S"
+
+const register_init, align=4
+ .quad 0x21f86d66c8ca00ce
+ .quad 0x75b6ba21077c48ad
+ .quad 0xed56bb2dcb3c7736
+ .quad 0x8bda43d3fd1a7e06
+ .quad 0xb64a9c9e5d318408
+ .quad 0xdf9a54b303f1d3a3
+ .quad 0x4a75479abd64e097
+ .quad 0x249214109d5d1c88
+ .quad 0x1a1b2550a612b48c
+ .quad 0x79445c159ce79064
+ .quad 0x2eed899d5a28ddcd
+ .quad 0x86b2536fcd8cf636
+ .quad 0xb0856806085e7943
+ .quad 0x3f2bf84fc0fcca4e
+ .quad 0xacbd382dcf5b8de2
+ .quad 0xd229e1f5b281303f
+ .quad 0x71aeaff20b095fd9
+ .quad 0xab63e2e11fa38ed9
+endconst
+
+
+const error_message_register
+ .asciz "failed to preserve register"
+error_message_stack:
+ .asciz "stack clobbered"
+endconst
+
+
+// max number of args used by any asm function.
+#define MAX_ARGS 15
+
+#define CLOBBER_STACK ((8*MAX_ARGS + 15) & ~15)
+
+function stack_clobber, export=1
+ mov x3, sp
+ mov x2, #CLOBBER_STACK
+1:
+ stp x0, x1, [sp, #-16]!
+ subs x2, x2, #16
+ b.gt 1b
+ mov sp, x3
+ ret
+endfunc
+
+// + 16 for stack canary reference
+#define ARG_STACK ((8*(MAX_ARGS - 8) + 15) & ~15 + 16)
+
+function checked_call, export=1
+ stp x29, x30, [sp, #-16]!
+ mov x29, sp
+ stp x19, x20, [sp, #-16]!
+ stp x21, x22, [sp, #-16]!
+ stp x23, x24, [sp, #-16]!
+ stp x25, x26, [sp, #-16]!
+ stp x27, x28, [sp, #-16]!
+ stp d8, d9, [sp, #-16]!
+ stp d10, d11, [sp, #-16]!
+ stp d12, d13, [sp, #-16]!
+ stp d14, d15, [sp, #-16]!
+
+ movrel x9, register_init
+ ldp d8, d9, [x9], #16
+ ldp d10, d11, [x9], #16
+ ldp d12, d13, [x9], #16
+ ldp d14, d15, [x9], #16
+ ldp x19, x20, [x9], #16
+ ldp x21, x22, [x9], #16
+ ldp x23, x24, [x9], #16
+ ldp x25, x26, [x9], #16
+ ldp x27, x28, [x9], #16
+
+ sub sp, sp, #ARG_STACK
+.equ pos, 0
+.rept MAX_ARGS-8
+ // Skip the first 8 args, that are loaded into registers
+ ldr x9, [x29, #16 + 8*8 + pos]
+ str x9, [sp, #pos]
+.equ pos, pos + 8
+.endr
+
+ // Fill x8-x17 with garbage. This doesn't have to be preserved,
+ // but avoids relying on them having any particular value.
+ movrel x9, register_init
+ ldp x10, x11, [x9], #32
+ ldp x12, x13, [x9], #32
+ ldp x14, x15, [x9], #32
+ ldp x16, x17, [x9], #32
+ ldp x8, x9, [x9]
+
+ // For stack overflows, the callee is free to overwrite the parameters
+ // that were passed on the stack (if any), so we can only check after
+ // that point. First figure out how many parameters the function
+ // really took on the stack:
+ ldr w2, [x29, #16 + 8*8 + (MAX_ARGS-8)*8]
+ // Load the first non-parameter value from the stack, that should be
+ // left untouched by the function. Store a copy of it inverted, so that
+ // e.g. overwriting everything with zero would be noticed.
+ ldr x2, [sp, x2, lsl #3]
+ mvn x2, x2
+ str x2, [sp, #ARG_STACK-8]
+
+ // Load the in-register arguments
+ mov x12, x0
+ ldp x0, x1, [x29, #16]
+ ldp x2, x3, [x29, #32]
+ ldp x4, x5, [x29, #48]
+ ldp x6, x7, [x29, #64]
+ // Call the target function
+ blr x12
+
+ // Load the number of stack parameters, stack canary and its reference
+ ldr w2, [x29, #16 + 8*8 + (MAX_ARGS-8)*8]
+ ldr x2, [sp, x2, lsl #3]
+ ldr x3, [sp, #ARG_STACK-8]
+
+ add sp, sp, #ARG_STACK
+ stp x0, x1, [sp, #-16]!
+
+ mvn x3, x3
+ cmp x2, x3
+ b.ne 2f
+
+ movrel x9, register_init
+ movi v3.8h, #0
+
+.macro check_reg_neon reg1, reg2
+ ldr q1, [x9], #16
+ uzp1 v2.2d, v\reg1\().2d, v\reg2\().2d
+ eor v1.16b, v1.16b, v2.16b
+ orr v3.16b, v3.16b, v1.16b
+.endm
+ check_reg_neon 8, 9
+ check_reg_neon 10, 11
+ check_reg_neon 12, 13
+ check_reg_neon 14, 15
+ uqxtn v3.8b, v3.8h
+ umov x3, v3.d[0]
+
+.macro check_reg reg1, reg2
+ ldp x0, x1, [x9], #16
+ eor x0, x0, \reg1
+ eor x1, x1, \reg2
+ orr x3, x3, x0
+ orr x3, x3, x1
+.endm
+ check_reg x19, x20
+ check_reg x21, x22
+ check_reg x23, x24
+ check_reg x25, x26
+ check_reg x27, x28
+
+ cbz x3, 0f
+
+ movrel x0, error_message_register
+ b 1f
+2:
+ movrel x0, error_message_stack
+1:
+#ifdef PREFIX
+ bl _checkasm_fail_func
+#else
+ bl checkasm_fail_func
+#endif
+0:
+ ldp x0, x1, [sp], #16
+ ldp d14, d15, [sp], #16
+ ldp d12, d13, [sp], #16
+ ldp d10, d11, [sp], #16
+ ldp d8, d9, [sp], #16
+ ldp x27, x28, [sp], #16
+ ldp x25, x26, [sp], #16
+ ldp x23, x24, [sp], #16
+ ldp x21, x22, [sp], #16
+ ldp x19, x20, [sp], #16
+ ldp x29, x30, [sp], #16
+ ret
+endfunc
diff --git a/third_party/dav1d/tests/checkasm/cdef.c b/third_party/dav1d/tests/checkasm/cdef.c
new file mode 100644
index 0000000000..7259e1a906
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/cdef.c
@@ -0,0 +1,150 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tests/checkasm/checkasm.h"
+
+#include <string.h>
+#include <stdio.h>
+
+#include "common/dump.h"
+
+#include "src/levels.h"
+#include "src/cdef.h"
+
+static int to_binary(int x) { /* 0-15 -> 0000-1111 */
+ return (x & 1) + 5 * (x & 2) + 25 * (x & 4) + 125 * (x & 8);
+}
+
+static void init_tmp(pixel *buf, int n, const int bitdepth_max) {
+ const int fill_type = rnd() & 7;
+ if (fill_type == 0)
+ while (n--) /* check for cdef_filter underflows */
+ *buf++ = rnd() & 1;
+ else if (fill_type == 1)
+ while (n--) /* check for cdef_filter overflows */
+ *buf++ = bitdepth_max - (rnd() & 1);
+ else
+ while (n--)
+ *buf++ = rnd() & bitdepth_max;
+}
+
+static void check_cdef_filter(const cdef_fn fn, const int w, const int h) {
+ ALIGN_STK_64(pixel, c_src, 16 * 10 + 16, ), *const c_dst = c_src + 8;
+ ALIGN_STK_64(pixel, a_src, 16 * 10 + 16, ), *const a_dst = a_src + 8;
+ ALIGN_STK_64(pixel, top_buf, 16 * 2 + 16, ), *const top = top_buf + 8;
+ ALIGN_STK_16(pixel, left, 8,[2]);
+ const ptrdiff_t stride = 16 * sizeof(pixel);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const pixel (*left)[2],
+ const pixel *top, int pri_strength, int sec_strength,
+ int dir, int damping, enum CdefEdgeFlags edges HIGHBD_DECL_SUFFIX);
+
+ if (check_func(fn, "cdef_filter_%dx%d_%dbpc", w, h, BITDEPTH)) {
+ for (int dir = 0; dir < 8; dir++) {
+ for (enum CdefEdgeFlags edges = 0x0; edges <= 0xf; edges++) {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ const int bitdepth_min_8 = bitdepth_from_max(bitdepth_max) - 8;
+
+ init_tmp(c_src, 16 * 10 + 16, bitdepth_max);
+ init_tmp(top_buf, 16 * 2 + 16, bitdepth_max);
+ init_tmp((pixel *) left, 8 * 2, bitdepth_max);
+ memcpy(a_src, c_src, (16 * 10 + 16) * sizeof(pixel));
+
+ const int lvl = 1 + (rnd() % 62);
+ const int damping = 3 + (rnd() & 3) + bitdepth_min_8 - (w == 4 || (rnd() & 1));
+ int pri_strength = (lvl >> 2) << bitdepth_min_8;
+ int sec_strength = lvl & 3;
+ sec_strength += sec_strength == 3;
+ sec_strength <<= bitdepth_min_8;
+ call_ref(c_dst, stride, left, top, pri_strength, sec_strength,
+ dir, damping, edges HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, stride, left, top, pri_strength, sec_strength,
+ dir, damping, edges HIGHBD_TAIL_SUFFIX);
+ if (checkasm_check_pixel(c_dst, stride, a_dst, stride, w, h, "dst")) {
+ fprintf(stderr, "strength = %d:%d, dir = %d, damping = %d, edges = %04d\n",
+ pri_strength, sec_strength, dir, damping, to_binary(edges));
+ return;
+ }
+ if (dir == 7 && (edges == 0x5 || edges == 0xa || edges == 0xf)) {
+ /* Benchmark a fixed set of cases to get consistent results:
+ * 1) top/left edges and pri_strength only
+ * 2) bottom/right edges and sec_strength only
+ * 3) all edges and both pri_strength and sec_strength
+ */
+ pri_strength = (edges & 1) << bitdepth_min_8;
+ sec_strength = (edges & 2) << bitdepth_min_8;
+ bench_new(a_dst, stride, left, top, pri_strength, sec_strength,
+ dir, damping, edges HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ }
+ }
+}
+
+static void check_cdef_direction(const cdef_dir_fn fn) {
+ ALIGN_STK_64(pixel, src, 8 * 8,);
+
+ declare_func(int, pixel *src, ptrdiff_t dst_stride, unsigned *var
+ HIGHBD_DECL_SUFFIX);
+
+ if (check_func(fn, "cdef_dir_%dbpc", BITDEPTH)) {
+ unsigned c_var, a_var;
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ init_tmp(src, 64, bitdepth_max);
+
+ const int c_dir = call_ref(src, 8 * sizeof(pixel), &c_var HIGHBD_TAIL_SUFFIX);
+ const int a_dir = call_new(src, 8 * sizeof(pixel), &a_var HIGHBD_TAIL_SUFFIX);
+ if (c_var != a_var || c_dir != a_dir) {
+ if (fail()) {
+ hex_fdump(stderr, src, 8 * sizeof(pixel), 8, 8, "src");
+ fprintf(stderr, "c_dir %d a_dir %d\n", c_dir, a_dir);
+ }
+ }
+ bench_new(src, 8 * sizeof(pixel), &a_var HIGHBD_TAIL_SUFFIX);
+ }
+ report("cdef_dir");
+}
+
+void bitfn(checkasm_check_cdef)(void) {
+ Dav1dCdefDSPContext c;
+ bitfn(dav1d_cdef_dsp_init)(&c);
+
+ check_cdef_direction(c.dir);
+
+ check_cdef_filter(c.fb[0], 8, 8);
+ check_cdef_filter(c.fb[1], 4, 8);
+ check_cdef_filter(c.fb[2], 4, 4);
+ report("cdef_filter");
+}
diff --git a/third_party/dav1d/tests/checkasm/checkasm.c b/third_party/dav1d/tests/checkasm/checkasm.c
new file mode 100644
index 0000000000..b6a21b7669
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/checkasm.c
@@ -0,0 +1,815 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "tests/checkasm/checkasm.h"
+
+#include <math.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <string.h>
+
+#include "src/cpu.h"
+
+#ifdef _WIN32
+#include <windows.h>
+#define COLOR_RED FOREGROUND_RED
+#define COLOR_GREEN FOREGROUND_GREEN
+#define COLOR_YELLOW (FOREGROUND_RED|FOREGROUND_GREEN)
+
+static unsigned get_seed(void) {
+ return GetTickCount();
+}
+#else
+#include <unistd.h>
+#include <signal.h>
+#include <time.h>
+#ifdef __APPLE__
+#include <mach/mach_time.h>
+#endif
+#define COLOR_RED 1
+#define COLOR_GREEN 2
+#define COLOR_YELLOW 3
+
+static unsigned get_seed(void) {
+#ifdef __APPLE__
+ return (unsigned) mach_absolute_time();
+#elif defined(HAVE_CLOCK_GETTIME)
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return (unsigned) (1000000000ULL * ts.tv_sec + ts.tv_nsec);
+#endif
+}
+#endif
+
+/* List of tests to invoke */
+static const struct {
+ const char *name;
+ void (*func)(void);
+} tests[] = {
+ { "msac", checkasm_check_msac },
+#if CONFIG_8BPC
+ { "cdef_8bpc", checkasm_check_cdef_8bpc },
+ { "filmgrain_8bpc", checkasm_check_filmgrain_8bpc },
+ { "ipred_8bpc", checkasm_check_ipred_8bpc },
+ { "itx_8bpc", checkasm_check_itx_8bpc },
+ { "loopfilter_8bpc", checkasm_check_loopfilter_8bpc },
+ { "looprestoration_8bpc", checkasm_check_looprestoration_8bpc },
+ { "mc_8bpc", checkasm_check_mc_8bpc },
+#endif
+#if CONFIG_16BPC
+ { "cdef_16bpc", checkasm_check_cdef_16bpc },
+ { "filmgrain_16bpc", checkasm_check_filmgrain_16bpc },
+ { "ipred_16bpc", checkasm_check_ipred_16bpc },
+ { "itx_16bpc", checkasm_check_itx_16bpc },
+ { "loopfilter_16bpc", checkasm_check_loopfilter_16bpc },
+ { "looprestoration_16bpc", checkasm_check_looprestoration_16bpc },
+ { "mc_16bpc", checkasm_check_mc_16bpc },
+#endif
+ { 0 }
+};
+
+/* List of cpu flags to check */
+static const struct {
+ const char *name;
+ const char *suffix;
+ unsigned flag;
+} cpus[] = {
+#if ARCH_X86
+ { "SSE2", "sse2", DAV1D_X86_CPU_FLAG_SSE2 },
+ { "SSSE3", "ssse3", DAV1D_X86_CPU_FLAG_SSSE3 },
+ { "SSE4.1", "sse4", DAV1D_X86_CPU_FLAG_SSE41 },
+ { "AVX2", "avx2", DAV1D_X86_CPU_FLAG_AVX2 },
+ { "AVX-512 (Ice Lake)", "avx512icl", DAV1D_X86_CPU_FLAG_AVX512ICL },
+#elif ARCH_AARCH64 || ARCH_ARM
+ { "NEON", "neon", DAV1D_ARM_CPU_FLAG_NEON },
+#elif ARCH_PPC64LE
+ { "VSX", "vsx", DAV1D_PPC_CPU_FLAG_VSX },
+#endif
+ { 0 }
+};
+
+typedef struct CheckasmFuncVersion {
+ struct CheckasmFuncVersion *next;
+ void *func;
+ int ok;
+ unsigned cpu;
+ int iterations;
+ uint64_t cycles;
+} CheckasmFuncVersion;
+
+/* Binary search tree node */
+typedef struct CheckasmFunc {
+ struct CheckasmFunc *child[2];
+ CheckasmFuncVersion versions;
+ uint8_t color; /* 0 = red, 1 = black */
+ char name[];
+} CheckasmFunc;
+
+/* Internal state */
+static struct {
+ CheckasmFunc *funcs;
+ CheckasmFunc *current_func;
+ CheckasmFuncVersion *current_func_ver;
+ const char *current_test_name;
+ const char *bench_pattern;
+ size_t bench_pattern_len;
+ int num_checked;
+ int num_failed;
+ int nop_time;
+ unsigned cpu_flag;
+ const char *cpu_flag_name;
+ const char *test_name;
+ unsigned seed;
+ int bench_c;
+ int verbose;
+ int function_listing;
+#if ARCH_X86_64
+ void (*simd_warmup)(void);
+#endif
+} state;
+
+/* float compare support code */
+typedef union {
+ float f;
+ uint32_t i;
+} intfloat;
+
+static uint32_t xs_state[4];
+
+static void xor128_srand(unsigned seed) {
+ xs_state[0] = seed;
+ xs_state[1] = ( seed & 0xffff0000) | (~seed & 0x0000ffff);
+ xs_state[2] = (~seed & 0xffff0000) | ( seed & 0x0000ffff);
+ xs_state[3] = ~seed;
+}
+
+// xor128 from Marsaglia, George (July 2003). "Xorshift RNGs".
+// Journal of Statistical Software. 8 (14).
+// doi:10.18637/jss.v008.i14.
+int xor128_rand(void) {
+ const uint32_t x = xs_state[0];
+ const uint32_t t = x ^ (x << 11);
+
+ xs_state[0] = xs_state[1];
+ xs_state[1] = xs_state[2];
+ xs_state[2] = xs_state[3];
+ uint32_t w = xs_state[3];
+
+ w = (w ^ (w >> 19)) ^ (t ^ (t >> 8));
+ xs_state[3] = w;
+
+ return w >> 1;
+}
+
+static int is_negative(const intfloat u) {
+ return u.i >> 31;
+}
+
+int float_near_ulp(const float a, const float b, const unsigned max_ulp) {
+ intfloat x, y;
+
+ x.f = a;
+ y.f = b;
+
+ if (is_negative(x) != is_negative(y)) {
+ // handle -0.0 == +0.0
+ return a == b;
+ }
+
+ if (llabs((int64_t)x.i - y.i) <= max_ulp)
+ return 1;
+
+ return 0;
+}
+
+int float_near_ulp_array(const float *const a, const float *const b,
+ const unsigned max_ulp, const int len)
+{
+ for (int i = 0; i < len; i++)
+ if (!float_near_ulp(a[i], b[i], max_ulp))
+ return 0;
+
+ return 1;
+}
+
+int float_near_abs_eps(const float a, const float b, const float eps) {
+ return fabsf(a - b) < eps;
+}
+
+int float_near_abs_eps_array(const float *const a, const float *const b,
+ const float eps, const int len)
+{
+ for (int i = 0; i < len; i++)
+ if (!float_near_abs_eps(a[i], b[i], eps))
+ return 0;
+
+ return 1;
+}
+
+int float_near_abs_eps_ulp(const float a, const float b, const float eps,
+ const unsigned max_ulp)
+{
+ return float_near_ulp(a, b, max_ulp) || float_near_abs_eps(a, b, eps);
+}
+
+int float_near_abs_eps_array_ulp(const float *const a, const float *const b,
+ const float eps, const unsigned max_ulp,
+ const int len)
+{
+ for (int i = 0; i < len; i++)
+ if (!float_near_abs_eps_ulp(a[i], b[i], eps, max_ulp))
+ return 0;
+
+ return 1;
+}
+
+/* Print colored text to stderr if the terminal supports it */
+static void color_printf(const int color, const char *const fmt, ...) {
+ static int8_t use_color = -1;
+ va_list arg;
+
+#ifdef _WIN32
+ static HANDLE con;
+ static WORD org_attributes;
+
+ if (use_color < 0) {
+ CONSOLE_SCREEN_BUFFER_INFO con_info;
+ con = GetStdHandle(STD_ERROR_HANDLE);
+ if (con && con != INVALID_HANDLE_VALUE &&
+ GetConsoleScreenBufferInfo(con, &con_info))
+ {
+ org_attributes = con_info.wAttributes;
+ use_color = 1;
+ } else
+ use_color = 0;
+ }
+ if (use_color)
+ SetConsoleTextAttribute(con, (org_attributes & 0xfff0) |
+ (color & 0x0f));
+#else
+ if (use_color < 0) {
+ const char *const term = getenv("TERM");
+ use_color = term && strcmp(term, "dumb") && isatty(2);
+ }
+ if (use_color)
+ fprintf(stderr, "\x1b[%d;3%dm", (color & 0x08) >> 3, color & 0x07);
+#endif
+
+ va_start(arg, fmt);
+ vfprintf(stderr, fmt, arg);
+ va_end(arg);
+
+ if (use_color) {
+#ifdef _WIN32
+ SetConsoleTextAttribute(con, org_attributes);
+#else
+ fprintf(stderr, "\x1b[0m");
+#endif
+ }
+}
+
+/* Deallocate a tree */
+static void destroy_func_tree(CheckasmFunc *const f) {
+ if (f) {
+ CheckasmFuncVersion *v = f->versions.next;
+ while (v) {
+ CheckasmFuncVersion *next = v->next;
+ free(v);
+ v = next;
+ }
+
+ destroy_func_tree(f->child[0]);
+ destroy_func_tree(f->child[1]);
+ free(f);
+ }
+}
+
+/* Allocate a zero-initialized block, clean up and exit on failure */
+static void *checkasm_malloc(const size_t size) {
+ void *const ptr = calloc(1, size);
+ if (!ptr) {
+ fprintf(stderr, "checkasm: malloc failed\n");
+ destroy_func_tree(state.funcs);
+ exit(1);
+ }
+ return ptr;
+}
+
+/* Get the suffix of the specified cpu flag */
+static const char *cpu_suffix(const unsigned cpu) {
+ for (int i = (int)(sizeof(cpus) / sizeof(*cpus)) - 2; i >= 0; i--)
+ if (cpu & cpus[i].flag)
+ return cpus[i].suffix;
+
+ return "c";
+}
+
+#ifdef readtime
+static int cmp_nop(const void *a, const void *b) {
+ return *(const uint16_t*)a - *(const uint16_t*)b;
+}
+
+/* Measure the overhead of the timing code (in decicycles) */
+static int measure_nop_time(void) {
+ uint16_t nops[10000];
+ int nop_sum = 0;
+
+ for (int i = 0; i < 10000; i++) {
+ uint64_t t = readtime();
+ nops[i] = (uint16_t) (readtime() - t);
+ }
+
+ qsort(nops, 10000, sizeof(uint16_t), cmp_nop);
+ for (int i = 2500; i < 7500; i++)
+ nop_sum += nops[i];
+
+ return nop_sum / 500;
+}
+
+/* Print benchmark results */
+static void print_benchs(const CheckasmFunc *const f) {
+ if (f) {
+ print_benchs(f->child[0]);
+
+ /* Only print functions with at least one assembly version */
+ if (state.bench_c || f->versions.cpu || f->versions.next) {
+ const CheckasmFuncVersion *v = &f->versions;
+ do {
+ if (v->iterations) {
+ const int decicycles = (int) (10*v->cycles/v->iterations -
+ state.nop_time) / 4;
+ printf("%s_%s: %d.%d\n", f->name, cpu_suffix(v->cpu),
+ decicycles/10, decicycles%10);
+ }
+ } while ((v = v->next));
+ }
+
+ print_benchs(f->child[1]);
+ }
+}
+#endif
+
+static void print_functions(const CheckasmFunc *const f) {
+ if (f) {
+ print_functions(f->child[0]);
+ printf("%s\n", f->name);
+ print_functions(f->child[1]);
+ }
+}
+
+#define is_digit(x) ((x) >= '0' && (x) <= '9')
+
+/* ASCIIbetical sort except preserving natural order for numbers */
+static int cmp_func_names(const char *a, const char *b) {
+ const char *const start = a;
+ int ascii_diff, digit_diff;
+
+ for (; !(ascii_diff = *(const unsigned char*)a -
+ *(const unsigned char*)b) && *a; a++, b++);
+ for (; is_digit(*a) && is_digit(*b); a++, b++);
+
+ if (a > start && is_digit(a[-1]) &&
+ (digit_diff = is_digit(*a) - is_digit(*b)))
+ {
+ return digit_diff;
+ }
+
+ return ascii_diff;
+}
+
+/* Perform a tree rotation in the specified direction and return the new root */
+static CheckasmFunc *rotate_tree(CheckasmFunc *const f, const int dir) {
+ CheckasmFunc *const r = f->child[dir^1];
+ f->child[dir^1] = r->child[dir];
+ r->child[dir] = f;
+ r->color = f->color;
+ f->color = 0;
+ return r;
+}
+
+#define is_red(f) ((f) && !(f)->color)
+
+/* Balance a left-leaning red-black tree at the specified node */
+static void balance_tree(CheckasmFunc **const root) {
+ CheckasmFunc *const f = *root;
+
+ if (is_red(f->child[0]) && is_red(f->child[1])) {
+ f->color ^= 1;
+ f->child[0]->color = f->child[1]->color = 1;
+ }
+ else if (!is_red(f->child[0]) && is_red(f->child[1]))
+ *root = rotate_tree(f, 0); /* Rotate left */
+ else if (is_red(f->child[0]) && is_red(f->child[0]->child[0]))
+ *root = rotate_tree(f, 1); /* Rotate right */
+}
+
+/* Get a node with the specified name, creating it if it doesn't exist */
+static CheckasmFunc *get_func(CheckasmFunc **const root, const char *const name) {
+ CheckasmFunc *f = *root;
+
+ if (f) {
+ /* Search the tree for a matching node */
+ const int cmp = cmp_func_names(name, f->name);
+ if (cmp) {
+ f = get_func(&f->child[cmp > 0], name);
+
+ /* Rebalance the tree on the way up if a new node was inserted */
+ if (!f->versions.func)
+ balance_tree(root);
+ }
+ } else {
+ /* Allocate and insert a new node into the tree */
+ const size_t name_length = strlen(name) + 1;
+ f = *root = checkasm_malloc(offsetof(CheckasmFunc, name) + name_length);
+ memcpy(f->name, name, name_length);
+ }
+
+ return f;
+}
+
+checkasm_context checkasm_context_buf;
+
+/* Crash handling: attempt to catch crashes and handle them
+ * gracefully instead of just aborting abruptly. */
+#ifdef _WIN32
+static LONG NTAPI signal_handler(EXCEPTION_POINTERS *const e) {
+ switch (e->ExceptionRecord->ExceptionCode) {
+ case EXCEPTION_FLT_DIVIDE_BY_ZERO:
+ case EXCEPTION_INT_DIVIDE_BY_ZERO:
+ checkasm_fail_func("fatal arithmetic error");
+ break;
+ case EXCEPTION_ILLEGAL_INSTRUCTION:
+ case EXCEPTION_PRIV_INSTRUCTION:
+ checkasm_fail_func("illegal instruction");
+ break;
+ case EXCEPTION_ACCESS_VIOLATION:
+ case EXCEPTION_ARRAY_BOUNDS_EXCEEDED:
+ case EXCEPTION_DATATYPE_MISALIGNMENT:
+ case EXCEPTION_IN_PAGE_ERROR:
+ case EXCEPTION_STACK_OVERFLOW:
+ checkasm_fail_func("segmentation fault");
+ break;
+ default:
+ return EXCEPTION_CONTINUE_SEARCH;
+ }
+ checkasm_load_context();
+ return EXCEPTION_CONTINUE_EXECUTION; /* never reached, but shuts up gcc */
+}
+#else
+static void signal_handler(const int s) {
+ checkasm_set_signal_handler_state(0);
+ checkasm_fail_func(s == SIGFPE ? "fatal arithmetic error" :
+ s == SIGILL ? "illegal instruction" :
+ "segmentation fault");
+ checkasm_load_context();
+}
+#endif
+
+/* Perform tests and benchmarks for the specified
+ * cpu flag if supported by the host */
+static void check_cpu_flag(const char *const name, unsigned flag) {
+ const unsigned old_cpu_flag = state.cpu_flag;
+
+ flag |= old_cpu_flag;
+ dav1d_set_cpu_flags_mask(flag);
+ state.cpu_flag = dav1d_get_cpu_flags();
+
+ if (!flag || state.cpu_flag != old_cpu_flag) {
+ state.cpu_flag_name = name;
+ for (int i = 0; tests[i].func; i++) {
+ if (state.test_name && strcmp(tests[i].name, state.test_name))
+ continue;
+ xor128_srand(state.seed);
+ state.current_test_name = tests[i].name;
+ tests[i].func();
+ }
+ }
+}
+
+/* Print the name of the current CPU flag, but only do it once */
+static void print_cpu_name(void) {
+ if (state.cpu_flag_name) {
+ color_printf(COLOR_YELLOW, "%s:\n", state.cpu_flag_name);
+ state.cpu_flag_name = NULL;
+ }
+}
+
+int main(int argc, char *argv[]) {
+ state.seed = get_seed();
+
+ while (argc > 1) {
+ if (!strncmp(argv[1], "--help", 6)) {
+ fprintf(stdout,
+ "checkasm [options] <random seed>\n"
+ " <random seed> Numeric value to seed the rng\n"
+ "Options:\n"
+ " --test=<test_name> Test only <test_name>\n"
+ " --bench=<pattern> Test and benchmark the functions matching <pattern>\n"
+ " --list-functions List available functions\n"
+ " --list-tests List available tests\n"
+ " --bench-c Benchmark the C-only functions\n"
+ " --verbose -v Print failures verbosely\n");
+ return 0;
+ } else if (!strncmp(argv[1], "--bench-c", 9)) {
+ state.bench_c = 1;
+ } else if (!strncmp(argv[1], "--bench", 7)) {
+#ifndef readtime
+ fprintf(stderr,
+ "checkasm: --bench is not supported on your system\n");
+ return 1;
+#endif
+ if (argv[1][7] == '=') {
+ state.bench_pattern = argv[1] + 8;
+ state.bench_pattern_len = strlen(state.bench_pattern);
+ } else
+ state.bench_pattern = "";
+ } else if (!strncmp(argv[1], "--test=", 7)) {
+ state.test_name = argv[1] + 7;
+ } else if (!strcmp(argv[1], "--list-functions")) {
+ state.function_listing = 1;
+ } else if (!strcmp(argv[1], "--list-tests")) {
+ for (int i = 0; tests[i].name; i++)
+ printf("%s\n", tests[i].name);
+ return 0;
+ } else if (!strcmp(argv[1], "--verbose") || !strcmp(argv[1], "-v")) {
+ state.verbose = 1;
+ } else {
+ state.seed = (unsigned) strtoul(argv[1], NULL, 10);
+ }
+
+ argc--;
+ argv++;
+ }
+
+ dav1d_init_cpu();
+
+#ifdef readtime
+ if (state.bench_pattern) {
+ static int testing = 0;
+ checkasm_save_context();
+ if (!testing) {
+ checkasm_set_signal_handler_state(1);
+ testing = 1;
+ readtime();
+ checkasm_set_signal_handler_state(0);
+ } else {
+ fprintf(stderr, "checkasm: unable to access cycle counter\n");
+ return 1;
+ }
+ }
+#endif
+
+ int ret = 0;
+
+ if (!state.function_listing) {
+ fprintf(stderr, "checkasm: using random seed %u\n", state.seed);
+#if ARCH_X86_64
+ void checkasm_warmup_avx2(void);
+ void checkasm_warmup_avx512(void);
+ const unsigned cpu_flags = dav1d_get_cpu_flags();
+ if (cpu_flags & DAV1D_X86_CPU_FLAG_AVX512ICL)
+ state.simd_warmup = checkasm_warmup_avx512;
+ else if (cpu_flags & DAV1D_X86_CPU_FLAG_AVX2)
+ state.simd_warmup = checkasm_warmup_avx2;
+ checkasm_simd_warmup();
+#endif
+ }
+
+ check_cpu_flag(NULL, 0);
+
+ if (state.function_listing) {
+ print_functions(state.funcs);
+ } else {
+ for (int i = 0; cpus[i].flag; i++)
+ check_cpu_flag(cpus[i].name, cpus[i].flag);
+ if (!state.num_checked) {
+ fprintf(stderr, "checkasm: no tests to perform\n");
+ } else if (state.num_failed) {
+ fprintf(stderr, "checkasm: %d of %d tests have failed\n",
+ state.num_failed, state.num_checked);
+ ret = 1;
+ } else {
+ fprintf(stderr, "checkasm: all %d tests passed\n", state.num_checked);
+#ifdef readtime
+ if (state.bench_pattern) {
+ state.nop_time = measure_nop_time();
+ printf("nop: %d.%d\n", state.nop_time/10, state.nop_time%10);
+ print_benchs(state.funcs);
+ }
+#endif
+ }
+ }
+
+ destroy_func_tree(state.funcs);
+ return ret;
+}
+
+/* Decide whether or not the specified function needs to be tested and
+ * allocate/initialize data structures if needed. Returns a pointer to a
+ * reference function if the function should be tested, otherwise NULL */
+void *checkasm_check_func(void *const func, const char *const name, ...) {
+ char name_buf[256];
+ va_list arg;
+
+ va_start(arg, name);
+ const int name_length = vsnprintf(name_buf, sizeof(name_buf), name, arg);
+ va_end(arg);
+
+ if (!func || name_length <= 0 || (size_t)name_length >= sizeof(name_buf))
+ return NULL;
+
+ state.current_func = get_func(&state.funcs, name_buf);
+
+ if (state.function_listing) /* Save function names without running tests */
+ return NULL;
+
+ state.funcs->color = 1;
+ CheckasmFuncVersion *v = &state.current_func->versions;
+ void *ref = func;
+
+ if (v->func) {
+ CheckasmFuncVersion *prev;
+ do {
+ /* Only test functions that haven't already been tested */
+ if (v->func == func)
+ return NULL;
+
+ if (v->ok)
+ ref = v->func;
+
+ prev = v;
+ } while ((v = v->next));
+
+ v = prev->next = checkasm_malloc(sizeof(CheckasmFuncVersion));
+ }
+
+ v->func = func;
+ v->ok = 1;
+ v->cpu = state.cpu_flag;
+ state.current_func_ver = v;
+ xor128_srand(state.seed);
+
+ if (state.cpu_flag || state.bench_c)
+ state.num_checked++;
+
+ return ref;
+}
+
+/* Decide whether or not the current function needs to be benchmarked */
+int checkasm_bench_func(void) {
+ return !state.num_failed && state.bench_pattern &&
+ !strncmp(state.current_func->name, state.bench_pattern,
+ state.bench_pattern_len);
+}
+
+/* Indicate that the current test has failed, return whether verbose printing
+ * is requested. */
+int checkasm_fail_func(const char *const msg, ...) {
+ if (state.current_func_ver && state.current_func_ver->cpu &&
+ state.current_func_ver->ok)
+ {
+ va_list arg;
+
+ print_cpu_name();
+ fprintf(stderr, " %s_%s (", state.current_func->name,
+ cpu_suffix(state.current_func_ver->cpu));
+ va_start(arg, msg);
+ vfprintf(stderr, msg, arg);
+ va_end(arg);
+ fprintf(stderr, ")\n");
+
+ state.current_func_ver->ok = 0;
+ state.num_failed++;
+ }
+ return state.verbose;
+}
+
+/* Update benchmark results of the current function */
+void checkasm_update_bench(const int iterations, const uint64_t cycles) {
+ state.current_func_ver->iterations += iterations;
+ state.current_func_ver->cycles += cycles;
+}
+
+/* Print the outcome of all tests performed since
+ * the last time this function was called */
+void checkasm_report(const char *const name, ...) {
+ static int prev_checked, prev_failed;
+ static size_t max_length;
+
+ if (state.num_checked > prev_checked) {
+ int pad_length = (int) max_length + 4;
+ va_list arg;
+
+ print_cpu_name();
+ pad_length -= fprintf(stderr, " - %s.", state.current_test_name);
+ va_start(arg, name);
+ pad_length -= vfprintf(stderr, name, arg);
+ va_end(arg);
+ fprintf(stderr, "%*c", imax(pad_length, 0) + 2, '[');
+
+ if (state.num_failed == prev_failed)
+ color_printf(COLOR_GREEN, "OK");
+ else
+ color_printf(COLOR_RED, "FAILED");
+ fprintf(stderr, "]\n");
+
+ prev_checked = state.num_checked;
+ prev_failed = state.num_failed;
+ } else if (!state.cpu_flag) {
+ /* Calculate the amount of padding required
+ * to make the output vertically aligned */
+ size_t length = strlen(state.current_test_name);
+ va_list arg;
+
+ va_start(arg, name);
+ length += vsnprintf(NULL, 0, name, arg);
+ va_end(arg);
+
+ if (length > max_length)
+ max_length = length;
+ }
+}
+
+void checkasm_set_signal_handler_state(const int enabled) {
+#ifdef _WIN32
+ if (enabled)
+ AddVectoredExceptionHandler(0, signal_handler);
+ else
+ RemoveVectoredExceptionHandler(signal_handler);
+#else
+ void (*const handler)(int) = enabled ? signal_handler : SIG_DFL;
+ signal(SIGBUS, handler);
+ signal(SIGFPE, handler);
+ signal(SIGILL, handler);
+ signal(SIGSEGV, handler);
+#endif
+}
+
+#define DEF_CHECKASM_CHECK_FUNC(type, fmt) \
+int checkasm_check_##type(const char *const file, const int line, \
+ const type *buf1, ptrdiff_t stride1, \
+ const type *buf2, ptrdiff_t stride2, \
+ const int w, int h, const char *const name) \
+{ \
+ stride1 /= sizeof(*buf1); \
+ stride2 /= sizeof(*buf2); \
+ int y = 0; \
+ for (y = 0; y < h; y++) \
+ if (memcmp(&buf1[y*stride1], &buf2[y*stride2], w*sizeof(*buf1))) \
+ break; \
+ if (y == h) \
+ return 0; \
+ if (!checkasm_fail_func("%s:%d", file, line)) \
+ return 1; \
+ fprintf(stderr, "%s:\n", name); \
+ while (h--) { \
+ for (int x = 0; x < w; x++) \
+ fprintf(stderr, " " fmt, buf1[x]); \
+ fprintf(stderr, " "); \
+ for (int x = 0; x < w; x++) \
+ fprintf(stderr, " " fmt, buf2[x]); \
+ fprintf(stderr, " "); \
+ for (int x = 0; x < w; x++) \
+ fprintf(stderr, "%c", buf1[x] != buf2[x] ? 'x' : '.'); \
+ buf1 += stride1; \
+ buf2 += stride2; \
+ fprintf(stderr, "\n"); \
+ } \
+ return 1; \
+}
+
+DEF_CHECKASM_CHECK_FUNC(uint8_t, "%02x")
+DEF_CHECKASM_CHECK_FUNC(uint16_t, "%04x")
+DEF_CHECKASM_CHECK_FUNC(int16_t, "%6d")
+DEF_CHECKASM_CHECK_FUNC(int32_t, "%9d")
+
+#if ARCH_X86_64
+void checkasm_simd_warmup(void)
+{
+ if (state.simd_warmup)
+ state.simd_warmup();
+}
+#endif
diff --git a/third_party/dav1d/tests/checkasm/checkasm.h b/third_party/dav1d/tests/checkasm/checkasm.h
new file mode 100644
index 0000000000..6c81481c89
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/checkasm.h
@@ -0,0 +1,332 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DAV1D_TESTS_CHECKASM_CHECKASM_H
+#define DAV1D_TESTS_CHECKASM_CHECKASM_H
+
+#include "config.h"
+
+#include <stdint.h>
+#include <stdlib.h>
+
+#if ARCH_X86_64 && defined(_WIN32)
+/* setjmp/longjmp on 64-bit Windows will try to use SEH to unwind the stack,
+ * which doesn't work for assembly functions without unwind information. */
+#include <windows.h>
+#define checkasm_context CONTEXT
+#define checkasm_save_context() RtlCaptureContext(&checkasm_context_buf)
+#define checkasm_load_context() RtlRestoreContext(&checkasm_context_buf, NULL)
+#else
+#include <setjmp.h>
+#define checkasm_context jmp_buf
+#define checkasm_save_context() setjmp(checkasm_context_buf)
+#define checkasm_load_context() longjmp(checkasm_context_buf, 1)
+#endif
+
+#include "include/common/attributes.h"
+#include "include/common/bitdepth.h"
+#include "include/common/intops.h"
+
+int xor128_rand(void);
+#define rnd xor128_rand
+
+#define decl_check_bitfns(name) \
+name##_8bpc(void); \
+name##_16bpc(void)
+
+void checkasm_check_msac(void);
+decl_check_bitfns(void checkasm_check_cdef);
+decl_check_bitfns(void checkasm_check_filmgrain);
+decl_check_bitfns(void checkasm_check_ipred);
+decl_check_bitfns(void checkasm_check_itx);
+decl_check_bitfns(void checkasm_check_loopfilter);
+decl_check_bitfns(void checkasm_check_looprestoration);
+decl_check_bitfns(void checkasm_check_mc);
+
+void *checkasm_check_func(void *func, const char *name, ...);
+int checkasm_bench_func(void);
+int checkasm_fail_func(const char *msg, ...);
+void checkasm_update_bench(int iterations, uint64_t cycles);
+void checkasm_report(const char *name, ...);
+void checkasm_set_signal_handler_state(int enabled);
+extern checkasm_context checkasm_context_buf;
+
+/* float compare utilities */
+int float_near_ulp(float a, float b, unsigned max_ulp);
+int float_near_abs_eps(float a, float b, float eps);
+int float_near_abs_eps_ulp(float a, float b, float eps, unsigned max_ulp);
+int float_near_ulp_array(const float *a, const float *b, unsigned max_ulp,
+ int len);
+int float_near_abs_eps_array(const float *a, const float *b, float eps,
+ int len);
+int float_near_abs_eps_array_ulp(const float *a, const float *b, float eps,
+ unsigned max_ulp, int len);
+
+#define BENCH_RUNS (1 << 12) /* Trade-off between accuracy and speed */
+
+/* Decide whether or not the specified function needs to be tested */
+#define check_func(func, ...)\
+ (func_ref = checkasm_check_func((func_new = func), __VA_ARGS__))
+
+/* Declare the function prototype. The first argument is the return value,
+ * the remaining arguments are the function parameters. Naming parameters
+ * is optional. */
+#define declare_func(ret, ...)\
+ declare_new(ret, __VA_ARGS__)\
+ void *func_ref, *func_new;\
+ typedef ret func_type(__VA_ARGS__);\
+ checkasm_save_context()
+
+/* Indicate that the current test has failed */
+#define fail() checkasm_fail_func("%s:%d", __FILE__, __LINE__)
+
+/* Print the test outcome */
+#define report checkasm_report
+
+/* Call the reference function */
+#define call_ref(...)\
+ (checkasm_set_signal_handler_state(1),\
+ ((func_type *)func_ref)(__VA_ARGS__));\
+ checkasm_set_signal_handler_state(0)
+
+#if HAVE_ASM
+#if ARCH_X86
+#ifdef _MSC_VER
+#include <intrin.h>
+#define readtime() (_mm_lfence(), __rdtsc())
+#else
+static inline uint64_t readtime(void) {
+ uint32_t eax, edx;
+ __asm__ __volatile__("lfence\nrdtsc" : "=a"(eax), "=d"(edx));
+ return (((uint64_t)edx) << 32) | eax;
+}
+#define readtime readtime
+#endif
+#elif (ARCH_AARCH64 || ARCH_ARM) && defined(__APPLE__)
+#include <mach/mach_time.h>
+#define readtime() mach_absolute_time()
+#elif ARCH_AARCH64
+#ifdef _MSC_VER
+#include <windows.h>
+#define readtime() (_InstructionSynchronizationBarrier(), ReadTimeStampCounter())
+#else
+static inline uint64_t readtime(void) {
+ uint64_t cycle_counter;
+ /* This requires enabling user mode access to the cycle counter (which
+ * can only be done from kernel space).
+ * This could also read cntvct_el0 instead of pmccntr_el0; that register
+ * might also be readable (depending on kernel version), but it has much
+ * worse precision (it's a fixed 50 MHz timer). */
+ __asm__ __volatile__("isb\nmrs %0, pmccntr_el0"
+ : "=r"(cycle_counter)
+ :: "memory");
+ return cycle_counter;
+}
+#define readtime readtime
+#endif
+#elif ARCH_ARM && !defined(_MSC_VER) && __ARM_ARCH >= 7
+static inline uint64_t readtime(void) {
+ uint32_t cycle_counter;
+ /* This requires enabling user mode access to the cycle counter (which
+ * can only be done from kernel space). */
+ __asm__ __volatile__("isb\nmrc p15, 0, %0, c9, c13, 0"
+ : "=r"(cycle_counter)
+ :: "memory");
+ return cycle_counter;
+}
+#define readtime readtime
+#elif ARCH_PPC64LE
+static inline uint64_t readtime(void) {
+ uint32_t tbu, tbl, temp;
+
+ __asm__ __volatile__(
+ "1:\n"
+ "mfspr %2,269\n"
+ "mfspr %0,268\n"
+ "mfspr %1,269\n"
+ "cmpw %2,%1\n"
+ "bne 1b\n"
+ : "=r"(tbl), "=r"(tbu), "=r"(temp)
+ :
+ : "cc");
+
+ return (((uint64_t)tbu) << 32) | (uint64_t)tbl;
+}
+#define readtime readtime
+#endif
+
+/* Verifies that clobbered callee-saved registers
+ * are properly saved and restored */
+void checkasm_checked_call(void *func, ...);
+
+#if ARCH_X86_64
+/* Evil hack: detect incorrect assumptions that 32-bit ints are zero-extended
+ * to 64-bit. This is done by clobbering the stack with junk around the stack
+ * pointer and calling the assembly function through checked_call() with added
+ * dummy arguments which forces all real arguments to be passed on the stack
+ * and not in registers. For 32-bit arguments the upper half of the 64-bit
+ * register locations on the stack will now contain junk which will cause
+ * misbehaving functions to either produce incorrect output or segfault. Note
+ * that even though this works extremely well in practice, it's technically
+ * not guaranteed and false negatives is theoretically possible, but there
+ * can never be any false positives. */
+void checkasm_stack_clobber(uint64_t clobber, ...);
+/* YMM and ZMM registers on x86 are turned off to save power when they haven't
+ * been used for some period of time. When they are used there will be a
+ * "warmup" period during which performance will be reduced and inconsistent
+ * which is problematic when trying to benchmark individual functions. We can
+ * work around this by periodically issuing "dummy" instructions that uses
+ * those registers to keep them powered on. */
+void checkasm_simd_warmup(void);
+#define declare_new(ret, ...)\
+ ret (*checked_call)(void *, int, int, int, int, int, __VA_ARGS__,\
+ int, int, int, int, int, int, int, int,\
+ int, int, int, int, int, int, int) =\
+ (void *)checkasm_checked_call;
+#define CLOB (UINT64_C(0xdeadbeefdeadbeef))
+#ifdef _WIN32
+#define STACKARGS 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0
+#else
+#define STACKARGS 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0
+#endif
+#define call_new(...)\
+ (checkasm_set_signal_handler_state(1),\
+ checkasm_simd_warmup(),\
+ checkasm_stack_clobber(CLOB, CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
+ CLOB, CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
+ CLOB, CLOB, CLOB, CLOB, CLOB, CLOB, CLOB),\
+ checked_call(func_new, 0, 0, 0, 0, 0, __VA_ARGS__, STACKARGS));\
+ checkasm_set_signal_handler_state(0)
+#elif ARCH_X86_32
+#define declare_new(ret, ...)\
+ ret (*checked_call)(void *, __VA_ARGS__, int, int, int, int, int, int,\
+ int, int, int, int, int, int, int, int, int) =\
+ (void *)checkasm_checked_call;
+#define call_new(...)\
+ (checkasm_set_signal_handler_state(1),\
+ checked_call(func_new, __VA_ARGS__, 15, 14, 13, 12,\
+ 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1));\
+ checkasm_set_signal_handler_state(0)
+#elif ARCH_ARM
+/* Use a dummy argument, to offset the real parameters by 2, not only 1.
+ * This makes sure that potential 8-byte-alignment of parameters is kept
+ * the same even when the extra parameters have been removed. */
+void checkasm_checked_call_vfp(void *func, int dummy, ...);
+#define declare_new(ret, ...)\
+ ret (*checked_call)(void *, int dummy, __VA_ARGS__,\
+ int, int, int, int, int, int, int, int,\
+ int, int, int, int, int, int, int) =\
+ (void *)checkasm_checked_call_vfp;
+#define call_new(...)\
+ (checkasm_set_signal_handler_state(1),\
+ checked_call(func_new, 0, __VA_ARGS__, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0));\
+ checkasm_set_signal_handler_state(0)
+#elif ARCH_AARCH64 && !defined(__APPLE__)
+void checkasm_stack_clobber(uint64_t clobber, ...);
+#define declare_new(ret, ...)\
+ ret (*checked_call)(void *, int, int, int, int, int, int, int,\
+ __VA_ARGS__, int, int, int, int, int, int, int, int,\
+ int, int, int, int, int, int, int) =\
+ (void *)checkasm_checked_call;
+#define CLOB (UINT64_C(0xdeadbeefdeadbeef))
+#define call_new(...)\
+ (checkasm_set_signal_handler_state(1),\
+ checkasm_stack_clobber(CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
+ CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
+ CLOB, CLOB, CLOB, CLOB, CLOB, CLOB,\
+ CLOB, CLOB, CLOB, CLOB, CLOB),\
+ checked_call(func_new, 0, 0, 0, 0, 0, 0, 0, __VA_ARGS__,\
+ 7, 6, 5, 4, 3, 2, 1, 0, 0, 0, 0, 0, 0, 0, 0));\
+ checkasm_set_signal_handler_state(0)
+#else
+#define declare_new(ret, ...)
+#define call_new(...)\
+ (checkasm_set_signal_handler_state(1),\
+ ((func_type *)func_new)(__VA_ARGS__));\
+ checkasm_set_signal_handler_state(0)
+#endif
+#else /* HAVE_ASM */
+#define declare_new(ret, ...)
+/* Call the function */
+#define call_new(...)\
+ (checkasm_set_signal_handler_state(1),\
+ ((func_type *)func_new)(__VA_ARGS__));\
+ checkasm_set_signal_handler_state(0)
+#endif /* HAVE_ASM */
+
+/* Benchmark the function */
+#ifdef readtime
+#define bench_new(...)\
+ do {\
+ if (checkasm_bench_func()) {\
+ checkasm_set_signal_handler_state(1);\
+ func_type *tfunc = func_new;\
+ uint64_t tsum = 0;\
+ int tcount = 0;\
+ for (int ti = 0; ti < BENCH_RUNS; ti++) {\
+ uint64_t t = readtime();\
+ tfunc(__VA_ARGS__);\
+ tfunc(__VA_ARGS__);\
+ tfunc(__VA_ARGS__);\
+ tfunc(__VA_ARGS__);\
+ t = readtime() - t;\
+ if (t*tcount <= tsum*4 && ti > 0) {\
+ tsum += t;\
+ tcount++;\
+ }\
+ }\
+ checkasm_set_signal_handler_state(0);\
+ checkasm_update_bench(tcount, tsum);\
+ }\
+ } while (0)
+#else
+#define bench_new(...) do {} while (0)
+#endif
+
+#define DECL_CHECKASM_CHECK_FUNC(type) \
+int checkasm_check_##type(const char *const file, const int line, \
+ const type *const buf1, const ptrdiff_t stride1, \
+ const type *const buf2, const ptrdiff_t stride2, \
+ const int w, const int h, const char *const name)
+
+DECL_CHECKASM_CHECK_FUNC(uint8_t);
+DECL_CHECKASM_CHECK_FUNC(uint16_t);
+DECL_CHECKASM_CHECK_FUNC(int16_t);
+DECL_CHECKASM_CHECK_FUNC(int32_t);
+
+
+#define PASTE(a,b) a ## b
+#define CONCAT(a,b) PASTE(a,b)
+
+#define checkasm_check(prefix, ...) CONCAT(checkasm_check_, prefix)(__FILE__, __LINE__, __VA_ARGS__)
+
+#ifdef BITDEPTH
+#define checkasm_check_pixel(...) checkasm_check(PIXEL_TYPE, __VA_ARGS__)
+#define checkasm_check_coef(...) checkasm_check(COEF_TYPE, __VA_ARGS__)
+#endif
+
+#endif /* DAV1D_TESTS_CHECKASM_CHECKASM_H */
diff --git a/third_party/dav1d/tests/checkasm/filmgrain.c b/third_party/dav1d/tests/checkasm/filmgrain.c
new file mode 100644
index 0000000000..1219ee7c85
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/filmgrain.c
@@ -0,0 +1,331 @@
+/*
+ * Copyright © 2019, VideoLAN and dav1d authors
+ * Copyright © 2019, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tests/checkasm/checkasm.h"
+
+#include <string.h>
+
+#include "src/levels.h"
+#include "src/film_grain.h"
+#define UNIT_TEST 1
+#include "src/fg_apply_tmpl.c"
+
+static const char ss_name[][4] = {
+ [DAV1D_PIXEL_LAYOUT_I420 - 1] = "420",
+ [DAV1D_PIXEL_LAYOUT_I422 - 1] = "422",
+ [DAV1D_PIXEL_LAYOUT_I444 - 1] = "444",
+};
+
+static void check_gen_grny(const Dav1dFilmGrainDSPContext *const dsp) {
+ entry grain_lut_c[GRAIN_HEIGHT][GRAIN_WIDTH];
+ entry grain_lut_a[GRAIN_HEIGHT + 1][GRAIN_WIDTH];
+
+ declare_func(void, entry grain_lut[][GRAIN_WIDTH],
+ const Dav1dFilmGrainData *data HIGHBD_DECL_SUFFIX);
+
+ for (int i = 0; i < 4; i++) {
+ if (check_func(dsp->generate_grain_y, "gen_grain_y_ar%d_%dbpc", i, BITDEPTH)) {
+ ALIGN_STK_16(Dav1dFilmGrainData, fg_data, 1,);
+ fg_data[0].seed = rnd() & 0xFFFF;
+
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#endif
+
+ fg_data[0].grain_scale_shift = rnd() & 3;
+ fg_data[0].ar_coeff_shift = (rnd() & 3) + 6;
+ fg_data[0].ar_coeff_lag = i;
+ const int num_y_pos = 2 * fg_data[0].ar_coeff_lag * (fg_data[0].ar_coeff_lag + 1);
+ for (int n = 0; n < num_y_pos; n++)
+ fg_data[0].ar_coeffs_y[n] = (rnd() & 0xff) - 128;
+
+ call_ref(grain_lut_c, fg_data HIGHBD_TAIL_SUFFIX);
+ call_new(grain_lut_a, fg_data HIGHBD_TAIL_SUFFIX);
+ if (memcmp(grain_lut_c, grain_lut_a,
+ GRAIN_WIDTH * GRAIN_HEIGHT * sizeof(entry)))
+ {
+ fail();
+ }
+
+ bench_new(grain_lut_a, fg_data HIGHBD_TAIL_SUFFIX);
+ }
+ }
+
+ report("gen_grain_y");
+}
+
+static void check_gen_grnuv(const Dav1dFilmGrainDSPContext *const dsp) {
+ entry grain_lut_y[GRAIN_HEIGHT + 1][GRAIN_WIDTH];
+ entry grain_lut_c[GRAIN_HEIGHT][GRAIN_WIDTH];
+ entry grain_lut_a[GRAIN_HEIGHT + 1][GRAIN_WIDTH];
+
+ declare_func(void, entry grain_lut[][GRAIN_WIDTH],
+ const entry grain_lut_y[][GRAIN_WIDTH],
+ const Dav1dFilmGrainData *data, intptr_t uv HIGHBD_DECL_SUFFIX);
+
+ for (int layout_idx = 0; layout_idx < 3; layout_idx++) {
+ const enum Dav1dPixelLayout layout = layout_idx + 1;
+ const int ss_x = layout != DAV1D_PIXEL_LAYOUT_I444;
+ const int ss_y = layout == DAV1D_PIXEL_LAYOUT_I420;
+
+ for (int i = 0; i < 4; i++) {
+ if (check_func(dsp->generate_grain_uv[layout_idx],
+ "gen_grain_uv_ar%d_%dbpc_%s",
+ i, BITDEPTH, ss_name[layout_idx]))
+ {
+ ALIGN_STK_16(Dav1dFilmGrainData, fg_data, 1,);
+ fg_data[0].seed = rnd() & 0xFFFF;
+
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#endif
+
+ fg_data[0].num_y_points = rnd() & 1;
+ fg_data[0].grain_scale_shift = rnd() & 3;
+ fg_data[0].ar_coeff_shift = (rnd() & 3) + 6;
+ fg_data[0].ar_coeff_lag = i;
+ const int num_y_pos = 2 * fg_data[0].ar_coeff_lag * (fg_data[0].ar_coeff_lag + 1);
+ for (int n = 0; n < num_y_pos; n++)
+ fg_data[0].ar_coeffs_y[n] = (rnd() & 0xff) - 128;
+ dsp->generate_grain_y(grain_lut_y, fg_data HIGHBD_TAIL_SUFFIX);
+
+ const int uv = rnd() & 1;
+ const int num_uv_pos = num_y_pos + !!fg_data[0].num_y_points;
+ for (int n = 0; n < num_uv_pos; n++)
+ fg_data[0].ar_coeffs_uv[uv][n] = (rnd() & 0xff) - 128;
+ if (!fg_data[0].num_y_points)
+ fg_data[0].ar_coeffs_uv[uv][num_uv_pos] = 0;
+ memset(grain_lut_c, 0xff, sizeof(grain_lut_c));
+ memset(grain_lut_a, 0xff, sizeof(grain_lut_a));
+ call_ref(grain_lut_c, grain_lut_y, fg_data, uv HIGHBD_TAIL_SUFFIX);
+ call_new(grain_lut_a, grain_lut_y, fg_data, uv HIGHBD_TAIL_SUFFIX);
+ int diff = 0, w = ss_x ? 44 : GRAIN_WIDTH;
+ for (int y = 0; y < (ss_y ? 38 : GRAIN_HEIGHT); y++)
+ diff |= memcmp(grain_lut_a[y], grain_lut_c[y], w * sizeof(entry));
+ if (diff) fail();
+
+ bench_new(grain_lut_a, grain_lut_y, fg_data, uv HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ }
+
+ report("gen_grain_uv");
+}
+
+static void check_fgy_sbrow(const Dav1dFilmGrainDSPContext *const dsp) {
+ ALIGN_STK_64(pixel, c_dst, 128 * 32,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 32,);
+ ALIGN_STK_64(pixel, src, 128 * 32,);
+ const ptrdiff_t stride = 128 * sizeof(pixel);
+
+ declare_func(void, pixel *dst_row, const pixel *src_row, ptrdiff_t stride,
+ const Dav1dFilmGrainData *data, size_t pw,
+ const uint8_t scaling[SCALING_SIZE],
+ const entry grain_lut[][GRAIN_WIDTH],
+ int bh, int row_num HIGHBD_DECL_SUFFIX);
+
+ if (check_func(dsp->fgy_32x32xn, "fgy_32x32xn_%dbpc", BITDEPTH)) {
+ ALIGN_STK_16(Dav1dFilmGrainData, fg_data, 16,);
+ fg_data[0].seed = rnd() & 0xFFFF;
+
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ uint8_t scaling[SCALING_SIZE];
+ entry grain_lut[GRAIN_HEIGHT + 1][GRAIN_WIDTH];
+ fg_data[0].grain_scale_shift = rnd() & 3;
+ fg_data[0].ar_coeff_shift = (rnd() & 3) + 6;
+ fg_data[0].ar_coeff_lag = rnd() & 3;
+ const int num_y_pos = 2 * fg_data[0].ar_coeff_lag * (fg_data[0].ar_coeff_lag + 1);
+ for (int n = 0; n < num_y_pos; n++)
+ fg_data[0].ar_coeffs_y[n] = (rnd() & 0xff) - 128;
+ dsp->generate_grain_y(grain_lut, fg_data HIGHBD_TAIL_SUFFIX);
+
+ fg_data[0].num_y_points = 2 + (rnd() % 13);
+ const int pad = 0xff / fg_data[0].num_y_points;
+ for (int n = 0; n < fg_data[0].num_y_points; n++) {
+ fg_data[0].y_points[n][0] = 0xff * n / fg_data[0].num_y_points;
+ fg_data[0].y_points[n][0] += rnd() % pad;
+ fg_data[0].y_points[n][1] = rnd() & 0xff;
+ }
+ generate_scaling(bitdepth_from_max(bitdepth_max), fg_data[0].y_points,
+ fg_data[0].num_y_points, scaling);
+
+ const int w = 1 + (rnd() & 127);
+ const int h = 1 + (rnd() & 31);
+
+ for (int y = 0; y < 32; y++)
+ for (int x = 0; x < 128; x++)
+ src[y * PXSTRIDE(stride) + x] = rnd() & bitdepth_max;
+ const int row_num = rnd() & 1 ? rnd() & 0x7ff : 0;
+
+ fg_data[0].clip_to_restricted_range = rnd() & 1;
+ fg_data[0].scaling_shift = (rnd() & 3) + 8;
+ for (fg_data[0].overlap_flag = 0; fg_data[0].overlap_flag <= 1;
+ fg_data[0].overlap_flag++)
+ {
+ call_ref(c_dst, src, stride, fg_data, w, scaling, grain_lut, h,
+ row_num HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, src, stride, fg_data, w, scaling, grain_lut, h,
+ row_num HIGHBD_TAIL_SUFFIX);
+
+ checkasm_check_pixel(c_dst, stride, a_dst, stride, w, h, "dst");
+ }
+ fg_data[0].overlap_flag = 1;
+ bench_new(a_dst, src, stride, fg_data, 64, scaling, grain_lut, 32,
+ row_num HIGHBD_TAIL_SUFFIX);
+ }
+
+ report("fgy_32x32xn");
+}
+
+static void check_fguv_sbrow(const Dav1dFilmGrainDSPContext *const dsp) {
+ ALIGN_STK_64(pixel, c_dst, 128 * 32,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 32,);
+ ALIGN_STK_64(pixel, src, 128 * 32,);
+ ALIGN_STK_64(pixel, luma_src, 128 * 32,);
+ const ptrdiff_t lstride = 128 * sizeof(pixel);
+
+ declare_func(void, pixel *dst_row, const pixel *src_row, ptrdiff_t stride,
+ const Dav1dFilmGrainData *data, size_t pw,
+ const uint8_t scaling[SCALING_SIZE],
+ const entry grain_lut[][GRAIN_WIDTH], int bh, int row_num,
+ const pixel *luma_row, ptrdiff_t luma_stride, int uv_pl,
+ int is_identity HIGHBD_DECL_SUFFIX);
+
+ for (int layout_idx = 0; layout_idx < 3; layout_idx++) {
+ const enum Dav1dPixelLayout layout = layout_idx + 1;
+ const int ss_x = layout != DAV1D_PIXEL_LAYOUT_I444;
+ const int ss_y = layout == DAV1D_PIXEL_LAYOUT_I420;
+ const ptrdiff_t stride = (ss_x ? 96 : 128) * sizeof(pixel);
+
+ for (int csfl = 0; csfl <= 1; csfl++) {
+ if (check_func(dsp->fguv_32x32xn[layout_idx],
+ "fguv_32x32xn_%dbpc_%s_csfl%d",
+ BITDEPTH, ss_name[layout_idx], csfl))
+ {
+ ALIGN_STK_16(Dav1dFilmGrainData, fg_data, 1,);
+
+ fg_data[0].seed = rnd() & 0xFFFF;
+
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ const int uv_pl = rnd() & 1;
+ const int is_identity = rnd() & 1;
+
+ uint8_t scaling[SCALING_SIZE];
+ entry grain_lut[2][GRAIN_HEIGHT + 1][GRAIN_WIDTH];
+ fg_data[0].grain_scale_shift = rnd() & 3;
+ fg_data[0].ar_coeff_shift = (rnd() & 3) + 6;
+ fg_data[0].ar_coeff_lag = rnd() & 3;
+ const int num_y_pos = 2 * fg_data[0].ar_coeff_lag * (fg_data[0].ar_coeff_lag + 1);
+ for (int n = 0; n < num_y_pos; n++)
+ fg_data[0].ar_coeffs_y[n] = (rnd() & 0xff) - 128;
+ const int num_uv_pos = num_y_pos + 1;
+ for (int n = 0; n < num_uv_pos; n++)
+ fg_data[0].ar_coeffs_uv[uv_pl][n] = (rnd() & 0xff) - 128;
+ dsp->generate_grain_y(grain_lut[0], fg_data HIGHBD_TAIL_SUFFIX);
+ dsp->generate_grain_uv[layout_idx](grain_lut[1], grain_lut[0],
+ fg_data, uv_pl HIGHBD_TAIL_SUFFIX);
+
+ const int w = 1 + (rnd() & (127 >> ss_x));
+ const int h = 1 + (rnd() & (31 >> ss_y));
+
+ for (int y = 0; y < 32; y++)
+ for (int x = 0; x < 128; x++)
+ src[y * PXSTRIDE(stride) + x] = rnd() & bitdepth_max;
+ for (int y = 0; y < 32; y++)
+ for (int x = 0; x < 128; x++)
+ luma_src[y * PXSTRIDE(lstride) + x] = rnd() & bitdepth_max;
+ const int row_num = rnd() & 1 ? rnd() & 0x7ff : 0;
+
+ if (csfl) {
+ fg_data[0].num_y_points = 2 + (rnd() % 13);
+ const int pad = 0xff / fg_data[0].num_y_points;
+ for (int n = 0; n < fg_data[0].num_y_points; n++) {
+ fg_data[0].y_points[n][0] = 0xff * n / fg_data[0].num_y_points;
+ fg_data[0].y_points[n][0] += rnd() % pad;
+ fg_data[0].y_points[n][1] = rnd() & 0xff;
+ }
+ generate_scaling(bitdepth_from_max(bitdepth_max), fg_data[0].y_points,
+ fg_data[0].num_y_points, scaling);
+ } else {
+ fg_data[0].num_uv_points[uv_pl] = 2 + (rnd() % 9);
+ const int pad = 0xff / fg_data[0].num_uv_points[uv_pl];
+ for (int n = 0; n < fg_data[0].num_uv_points[uv_pl]; n++) {
+ fg_data[0].uv_points[uv_pl][n][0] = 0xff * n / fg_data[0].num_uv_points[uv_pl];
+ fg_data[0].uv_points[uv_pl][n][0] += rnd() % pad;
+ fg_data[0].uv_points[uv_pl][n][1] = rnd() & 0xff;
+ }
+ generate_scaling(bitdepth_from_max(bitdepth_max), fg_data[0].uv_points[uv_pl],
+ fg_data[0].num_uv_points[uv_pl], scaling);
+
+ fg_data[0].uv_mult[uv_pl] = (rnd() & 0xff) - 128;
+ fg_data[0].uv_luma_mult[uv_pl] = (rnd() & 0xff) - 128;
+ fg_data[0].uv_offset[uv_pl] = (rnd() & 0x1ff) - 256;
+ }
+
+ fg_data[0].clip_to_restricted_range = rnd() & 1;
+ fg_data[0].scaling_shift = (rnd() & 3) + 8;
+ fg_data[0].chroma_scaling_from_luma = csfl;
+ for (fg_data[0].overlap_flag = 0; fg_data[0].overlap_flag <= 1;
+ fg_data[0].overlap_flag++)
+ {
+ call_ref(c_dst, src, stride, fg_data, w, scaling, grain_lut[1], h,
+ row_num, luma_src, lstride, uv_pl, is_identity HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, src, stride, fg_data, w, scaling, grain_lut[1], h,
+ row_num, luma_src, lstride, uv_pl, is_identity HIGHBD_TAIL_SUFFIX);
+
+ checkasm_check_pixel(c_dst, stride, a_dst, stride, w, h, "dst");
+ }
+
+ fg_data[0].overlap_flag = 1;
+ bench_new(a_dst, src, stride, fg_data, 32, scaling, grain_lut[1], 16,
+ row_num, luma_src, lstride, uv_pl, is_identity HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ }
+
+ report("fguv_32x32xn");
+}
+
+void bitfn(checkasm_check_filmgrain)(void) {
+ Dav1dFilmGrainDSPContext c;
+
+ bitfn(dav1d_film_grain_dsp_init)(&c);
+
+ check_gen_grny(&c);
+ check_gen_grnuv(&c);
+ check_fgy_sbrow(&c);
+ check_fguv_sbrow(&c);
+}
diff --git a/third_party/dav1d/tests/checkasm/ipred.c b/third_party/dav1d/tests/checkasm/ipred.c
new file mode 100644
index 0000000000..6b054a7005
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/ipred.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tests/checkasm/checkasm.h"
+#include "src/ipred.h"
+#include "src/levels.h"
+
+#include <stdio.h>
+
+static const char *const intra_pred_mode_names[N_IMPL_INTRA_PRED_MODES] = {
+ [DC_PRED] = "dc",
+ [DC_128_PRED] = "dc_128",
+ [TOP_DC_PRED] = "dc_top",
+ [LEFT_DC_PRED] = "dc_left",
+ [HOR_PRED] = "h",
+ [VERT_PRED] = "v",
+ [PAETH_PRED] = "paeth",
+ [SMOOTH_PRED] = "smooth",
+ [SMOOTH_V_PRED] = "smooth_v",
+ [SMOOTH_H_PRED] = "smooth_h",
+ [Z1_PRED] = "z1",
+ [Z2_PRED] = "z2",
+ [Z3_PRED] = "z3",
+ [FILTER_PRED] = "filter"
+};
+
+static const char *const cfl_ac_names[3] = { "420", "422", "444" };
+
+static const char *const cfl_pred_mode_names[DC_128_PRED + 1] = {
+ [DC_PRED] = "cfl",
+ [DC_128_PRED] = "cfl_128",
+ [TOP_DC_PRED] = "cfl_top",
+ [LEFT_DC_PRED] = "cfl_left",
+};
+
+static const uint8_t z_angles[27] = {
+ 3, 6, 9,
+ 14, 17, 20, 23, 26, 29, 32,
+ 36, 39, 42, 45, 48, 51, 54,
+ 58, 61, 64, 67, 70, 73, 76,
+ 81, 84, 87
+};
+
+static void check_intra_pred(Dav1dIntraPredDSPContext *const c) {
+ ALIGN_STK_64(pixel, c_dst, 64 * 64,);
+ ALIGN_STK_64(pixel, a_dst, 64 * 64,);
+ ALIGN_STK_64(pixel, topleft_buf, 257,);
+ pixel *const topleft = topleft_buf + 128;
+
+ declare_func(void, pixel *dst, ptrdiff_t stride, const pixel *topleft,
+ int width, int height, int angle, int max_width, int max_height
+ HIGHBD_DECL_SUFFIX);
+
+ for (int mode = 0; mode < N_IMPL_INTRA_PRED_MODES; mode++) {
+ int bpc_min = BITDEPTH, bpc_max = BITDEPTH;
+ if (mode == FILTER_PRED && BITDEPTH == 16) {
+ bpc_min = 10;
+ bpc_max = 12;
+ }
+ for (int bpc = bpc_min; bpc <= bpc_max; bpc += 2)
+ for (int w = 4; w <= (mode == FILTER_PRED ? 32 : 64); w <<= 1)
+ if (check_func(c->intra_pred[mode], "intra_pred_%s_w%d_%dbpc",
+ intra_pred_mode_names[mode], w, bpc))
+ {
+ for (int h = imax(w / 4, 4); h <= imin(w * 4,
+ (mode == FILTER_PRED ? 32 : 64)); h <<= 1)
+ {
+ const ptrdiff_t stride = w * sizeof(pixel);
+
+ int a = 0, maxw = 0, maxh = 0;
+ if (mode >= Z1_PRED && mode <= Z3_PRED) { /* angle */
+ a = (90 * (mode - Z1_PRED) + z_angles[rnd() % 27]) |
+ (rnd() & 0x600);
+ if (mode == Z2_PRED) {
+ maxw = rnd(), maxh = rnd();
+ maxw = 1 + (maxw & (maxw & 4096 ? 4095 : w - 1));
+ maxh = 1 + (maxh & (maxh & 4096 ? 4095 : h - 1));
+ }
+ } else if (mode == FILTER_PRED) /* filter_idx */
+ a = (rnd() % 5) | (rnd() & ~511);
+
+ int bitdepth_max;
+ if (bpc == 16)
+ bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+ else
+ bitdepth_max = (1 << bpc) - 1;
+
+ for (int i = -h * 2; i <= w * 2; i++)
+ topleft[i] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, stride, topleft, w, h, a, maxw, maxh
+ HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, stride, topleft, w, h, a, maxw, maxh
+ HIGHBD_TAIL_SUFFIX);
+ if (checkasm_check_pixel(c_dst, stride, a_dst, stride,
+ w, h, "dst"))
+ {
+ if (mode == Z1_PRED || mode == Z3_PRED)
+ fprintf(stderr, "angle = %d (0x%03x)\n",
+ a & 0x1ff, a & 0x600);
+ else if (mode == Z2_PRED)
+ fprintf(stderr, "angle = %d (0x%03x), "
+ "max_width = %d, max_height = %d\n",
+ a & 0x1ff, a & 0x600, maxw, maxh);
+ else if (mode == FILTER_PRED)
+ fprintf(stderr, "filter_idx = %d\n", a & 0x1ff);
+ }
+
+ bench_new(a_dst, stride, topleft, w, h, a, 128, 128
+ HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ }
+ report("intra_pred");
+}
+
+static void check_cfl_ac(Dav1dIntraPredDSPContext *const c) {
+ ALIGN_STK_64(int16_t, c_dst, 32 * 32,);
+ ALIGN_STK_64(int16_t, a_dst, 32 * 32,);
+ ALIGN_STK_64(pixel, luma, 32 * 32,);
+
+ declare_func(void, int16_t *ac, const pixel *y, ptrdiff_t stride,
+ int w_pad, int h_pad, int cw, int ch);
+
+ for (int layout = 1; layout <= DAV1D_PIXEL_LAYOUT_I444; layout++) {
+ const int ss_ver = layout == DAV1D_PIXEL_LAYOUT_I420;
+ const int ss_hor = layout != DAV1D_PIXEL_LAYOUT_I444;
+ const int h_step = 2 >> ss_hor, v_step = 2 >> ss_ver;
+ for (int w = 4; w <= (32 >> ss_hor); w <<= 1)
+ if (check_func(c->cfl_ac[layout - 1], "cfl_ac_%s_w%d_%dbpc",
+ cfl_ac_names[layout - 1], w, BITDEPTH))
+ {
+ for (int h = imax(w / 4, 4);
+ h <= imin(w * 4, (32 >> ss_ver)); h <<= 1)
+ {
+ const ptrdiff_t stride = 32 * sizeof(pixel);
+ for (int w_pad = imax((w >> 2) - h_step, 0);
+ w_pad >= 0; w_pad -= h_step)
+ {
+ for (int h_pad = imax((h >> 2) - v_step, 0);
+ h_pad >= 0; h_pad -= v_step)
+ {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ for (int y = 0; y < (h << ss_ver); y++)
+ for (int x = 0; x < (w << ss_hor); x++)
+ luma[y * 32 + x] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, luma, stride, w_pad, h_pad, w, h);
+ call_new(a_dst, luma, stride, w_pad, h_pad, w, h);
+ checkasm_check(int16_t, c_dst, w * sizeof(*c_dst),
+ a_dst, w * sizeof(*a_dst),
+ w, h, "dst");
+ }
+ }
+
+ bench_new(a_dst, luma, stride, 0, 0, w, h);
+ }
+ }
+ }
+ report("cfl_ac");
+}
+
+static void check_cfl_pred(Dav1dIntraPredDSPContext *const c) {
+ ALIGN_STK_64(pixel, c_dst, 32 * 32,);
+ ALIGN_STK_64(pixel, a_dst, 32 * 32,);
+ ALIGN_STK_64(int16_t, ac, 32 * 32,);
+ ALIGN_STK_64(pixel, topleft_buf, 257,);
+ pixel *const topleft = topleft_buf + 128;
+
+ declare_func(void, pixel *dst, ptrdiff_t stride, const pixel *topleft,
+ int width, int height, const int16_t *ac, int alpha
+ HIGHBD_DECL_SUFFIX);
+
+ for (int mode = 0; mode <= DC_128_PRED; mode += 1 + 2 * !mode)
+ for (int w = 4; w <= 32; w <<= 1)
+ if (check_func(c->cfl_pred[mode], "cfl_pred_%s_w%d_%dbpc",
+ cfl_pred_mode_names[mode], w, BITDEPTH))
+ {
+ for (int h = imax(w / 4, 4); h <= imin(w * 4, 32); h <<= 1)
+ {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ const ptrdiff_t stride = w * sizeof(pixel);
+
+ int alpha = ((rnd() & 15) + 1) * (1 - (rnd() & 2));
+
+ for (int i = -h * 2; i <= w * 2; i++)
+ topleft[i] = rnd() & bitdepth_max;
+
+ int luma_avg = w * h >> 1;
+ for (int i = 0; i < w * h; i++)
+ luma_avg += ac[i] = rnd() & (bitdepth_max << 3);
+ luma_avg /= w * h;
+ for (int i = 0; i < w * h; i++)
+ ac[i] -= luma_avg;
+
+ call_ref(c_dst, stride, topleft, w, h, ac, alpha
+ HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, stride, topleft, w, h, ac, alpha
+ HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, stride, a_dst, stride,
+ w, h, "dst");
+
+ bench_new(a_dst, stride, topleft, w, h, ac, alpha
+ HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ report("cfl_pred");
+}
+
+static void check_pal_pred(Dav1dIntraPredDSPContext *const c) {
+ ALIGN_STK_64(pixel, c_dst, 64 * 64,);
+ ALIGN_STK_64(pixel, a_dst, 64 * 64,);
+ ALIGN_STK_64(uint8_t, idx, 64 * 64,);
+ ALIGN_STK_16(uint16_t, pal, 8,);
+
+ declare_func(void, pixel *dst, ptrdiff_t stride, const uint16_t *pal,
+ const uint8_t *idx, int w, int h);
+
+ for (int w = 4; w <= 64; w <<= 1)
+ if (check_func(c->pal_pred, "pal_pred_w%d_%dbpc", w, BITDEPTH))
+ for (int h = imax(w / 4, 4); h <= imin(w * 4, 64); h <<= 1)
+ {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ const ptrdiff_t stride = w * sizeof(pixel);
+
+ for (int i = 0; i < 8; i++)
+ pal[i] = rnd() & bitdepth_max;
+
+ for (int i = 0; i < w * h; i++)
+ idx[i] = rnd() & 7;
+
+ call_ref(c_dst, stride, pal, idx, w, h);
+ call_new(a_dst, stride, pal, idx, w, h);
+ checkasm_check_pixel(c_dst, stride, a_dst, stride, w, h, "dst");
+
+ bench_new(a_dst, stride, pal, idx, w, h);
+ }
+ report("pal_pred");
+}
+
+void bitfn(checkasm_check_ipred)(void) {
+ Dav1dIntraPredDSPContext c;
+ bitfn(dav1d_intra_pred_dsp_init)(&c);
+
+ check_intra_pred(&c);
+ check_cfl_ac(&c);
+ check_cfl_pred(&c);
+ check_pal_pred(&c);
+}
diff --git a/third_party/dav1d/tests/checkasm/itx.c b/third_party/dav1d/tests/checkasm/itx.c
new file mode 100644
index 0000000000..01f5e05338
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/itx.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tests/checkasm/checkasm.h"
+
+#include <math.h>
+
+#include "src/itx.h"
+#include "src/levels.h"
+#include "src/scan.h"
+#include "src/tables.h"
+
+#ifndef M_PI
+#define M_PI 3.14159265358979323846
+#endif
+#ifndef M_SQRT1_2
+#define M_SQRT1_2 0.707106781186547524401
+#endif
+
+enum Tx1D { DCT, ADST, FLIPADST, IDENTITY, WHT };
+
+static const uint8_t itx_1d_types[N_TX_TYPES_PLUS_LL][2] = {
+ [DCT_DCT] = { DCT, DCT },
+ [ADST_DCT] = { DCT, ADST },
+ [DCT_ADST] = { ADST, DCT },
+ [ADST_ADST] = { ADST, ADST },
+ [FLIPADST_DCT] = { DCT, FLIPADST },
+ [DCT_FLIPADST] = { FLIPADST, DCT },
+ [FLIPADST_FLIPADST] = { FLIPADST, FLIPADST },
+ [ADST_FLIPADST] = { FLIPADST, ADST },
+ [FLIPADST_ADST] = { ADST, FLIPADST },
+ [IDTX] = { IDENTITY, IDENTITY },
+ [V_DCT] = { IDENTITY, DCT },
+ [H_DCT] = { DCT, IDENTITY },
+ [V_ADST] = { IDENTITY, ADST },
+ [H_ADST] = { ADST, IDENTITY },
+ [V_FLIPADST] = { IDENTITY, FLIPADST },
+ [H_FLIPADST] = { FLIPADST, IDENTITY },
+ [WHT_WHT] = { WHT, WHT },
+};
+
+static const char *const itx_1d_names[5] = {
+ [DCT] = "dct",
+ [ADST] = "adst",
+ [FLIPADST] = "flipadst",
+ [IDENTITY] = "identity",
+ [WHT] = "wht"
+};
+
+static const double scaling_factors[9] = {
+ 4.0000, /* 4x4 */
+ 4.0000 * M_SQRT1_2, /* 4x8 8x4 */
+ 2.0000, /* 4x16 8x8 16x4 */
+ 2.0000 * M_SQRT1_2, /* 8x16 16x8 */
+ 1.0000, /* 8x32 16x16 32x8 */
+ 0.5000 * M_SQRT1_2, /* 16x32 32x16 */
+ 0.2500, /* 16x64 32x32 64x16 */
+ 0.1250 * M_SQRT1_2, /* 32x64 64x32 */
+ 0.0625, /* 64x64 */
+};
+
+/* FIXME: Ensure that those forward transforms are similar to the real AV1
+ * transforms. The FLIPADST currently uses the ADST forward transform for
+ * example which is obviously "incorrect", but we're just using it for now
+ * since it does produce coefficients in the correct range at least. */
+
+/* DCT-II */
+static void fdct_1d(double *const out, const double *const in, const int sz) {
+ for (int i = 0; i < sz; i++) {
+ out[i] = 0.0;
+ for (int j = 0; j < sz; j++)
+ out[i] += in[j] * cos(M_PI * (2 * j + 1) * i / (sz * 2.0));
+ }
+ out[0] *= M_SQRT1_2;
+}
+
+/* See "Towards jointly optimal spatial prediction and adaptive transform in
+ * video/image coding", by J. Han, A. Saxena, and K. Rose
+ * IEEE Proc. ICASSP, pp. 726-729, Mar. 2010.
+ * and "A Butterfly Structured Design of The Hybrid Transform Coding Scheme",
+ * by Jingning Han, Yaowu Xu, and Debargha Mukherjee
+ * http://research.google.com/pubs/archive/41418.pdf
+ */
+static void fadst_1d(double *const out, const double *const in, const int sz) {
+ for (int i = 0; i < sz; i++) {
+ out[i] = 0.0;
+ for (int j = 0; j < sz; j++)
+ out[i] += in[j] * sin(M_PI *
+ (sz == 4 ? ( j + 1) * (2 * i + 1) / (8.0 + 1.0) :
+ (2 * j + 1) * (2 * i + 1) / (sz * 4.0)));
+ }
+}
+
+static void fwht4_1d(double *const out, const double *const in)
+{
+ const double t0 = in[0] + in[1];
+ const double t3 = in[3] - in[2];
+ const double t4 = (t0 - t3) * 0.5;
+ const double t1 = t4 - in[1];
+ const double t2 = t4 - in[2];
+ out[0] = t0 - t2;
+ out[1] = t2;
+ out[2] = t3 + t1;
+ out[3] = t1;
+}
+
+static int copy_subcoefs(coef *coeff,
+ const enum RectTxfmSize tx, const enum TxfmType txtp,
+ const int sw, const int sh, const int subsh)
+{
+ /* copy the topleft coefficients such that the return value (being the
+ * coefficient scantable index for the eob token) guarantees that only
+ * the topleft $sub out of $sz (where $sz >= $sub) coefficients in both
+ * dimensions are non-zero. This leads to braching to specific optimized
+ * simd versions (e.g. dc-only) so that we get full asm coverage in this
+ * test */
+ const uint16_t *const scan = dav1d_scans[tx][dav1d_tx_type_class[txtp]];
+ const int sub_high = subsh > 0 ? subsh * 8 - 1 : 0;
+ const int sub_low = subsh > 1 ? sub_high - 8 : 0;
+ int n, eob;
+
+ for (n = 0, eob = 0; n < sw * sh; n++) {
+ const int rc = scan[n];
+ const int rcx = rc % sh, rcy = rc / sh;
+
+ /* Pick a random eob within this sub-itx */
+ if (rcx > sub_high || rcy > sub_high) {
+ break; /* upper boundary */
+ } else if (!eob && (rcx > sub_low || rcy > sub_low))
+ eob = n; /* lower boundary */
+ }
+
+ if (eob)
+ eob += rnd() % (n - eob - 1);
+ for (n = eob + 1; n < sw * sh; n++)
+ coeff[scan[n]] = 0;
+ for (; n < 32 * 32; n++)
+ coeff[n] = rnd();
+ return eob;
+}
+
+static int ftx(coef *const buf, const enum RectTxfmSize tx,
+ const enum TxfmType txtp, const int w, const int h,
+ const int subsh, const int bitdepth_max)
+{
+ double out[64 * 64], temp[64 * 64];
+ const double scale = scaling_factors[ctz(w * h) - 4];
+ const int sw = imin(w, 32), sh = imin(h, 32);
+
+ for (int i = 0; i < h; i++) {
+ double in[64], temp_out[64];
+
+ for (int i = 0; i < w; i++)
+ in[i] = (rnd() & (2 * bitdepth_max + 1)) - bitdepth_max;
+
+ switch (itx_1d_types[txtp][0]) {
+ case DCT:
+ fdct_1d(temp_out, in, w);
+ break;
+ case ADST:
+ case FLIPADST:
+ fadst_1d(temp_out, in, w);
+ break;
+ case WHT:
+ fwht4_1d(temp_out, in);
+ break;
+ case IDENTITY:
+ memcpy(temp_out, in, w * sizeof(*temp_out));
+ break;
+ }
+
+ for (int j = 0; j < w; j++)
+ temp[j * h + i] = temp_out[j] * scale;
+ }
+
+ for (int i = 0; i < w; i++) {
+ switch (itx_1d_types[txtp][0]) {
+ case DCT:
+ fdct_1d(&out[i * h], &temp[i * h], h);
+ break;
+ case ADST:
+ case FLIPADST:
+ fadst_1d(&out[i * h], &temp[i * h], h);
+ break;
+ case WHT:
+ fwht4_1d(&out[i * h], &temp[i * h]);
+ break;
+ case IDENTITY:
+ memcpy(&out[i * h], &temp[i * h], h * sizeof(*out));
+ break;
+ }
+ }
+
+ for (int y = 0; y < sh; y++)
+ for (int x = 0; x < sw; x++)
+ buf[y * sw + x] = (coef) (out[y * w + x] + 0.5);
+
+ return copy_subcoefs(buf, tx, txtp, sw, sh, subsh);
+}
+
+void bitfn(checkasm_check_itx)(void) {
+#if BITDEPTH == 16
+ const int bpc_min = 10, bpc_max = 12;
+#else
+ const int bpc_min = 8, bpc_max = 8;
+#endif
+
+ ALIGN_STK_64(coef, coeff, 2, [32 * 32]);
+ ALIGN_STK_64(pixel, c_dst, 64 * 64,);
+ ALIGN_STK_64(pixel, a_dst, 64 * 64,);
+ Dav1dInvTxfmDSPContext c = { { { 0 } } }; /* Zero unused function pointer elements. */
+
+ static const uint8_t txfm_size_order[N_RECT_TX_SIZES] = {
+ TX_4X4, RTX_4X8, RTX_4X16,
+ RTX_8X4, TX_8X8, RTX_8X16, RTX_8X32,
+ RTX_16X4, RTX_16X8, TX_16X16, RTX_16X32, RTX_16X64,
+ RTX_32X8, RTX_32X16, TX_32X32, RTX_32X64,
+ RTX_64X16, RTX_64X32, TX_64X64
+ };
+
+ static const uint8_t subsh_iters[5] = { 2, 2, 3, 5, 5 };
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, coef *coeff, int eob
+ HIGHBD_DECL_SUFFIX);
+
+ for (int i = 0; i < N_RECT_TX_SIZES; i++) {
+ const enum RectTxfmSize tx = txfm_size_order[i];
+ const int w = dav1d_txfm_dimensions[tx].w * 4;
+ const int h = dav1d_txfm_dimensions[tx].h * 4;
+ const int subsh_max = subsh_iters[imax(dav1d_txfm_dimensions[tx].lw,
+ dav1d_txfm_dimensions[tx].lh)];
+
+ for (int bpc = bpc_min; bpc <= bpc_max; bpc += 2) {
+ bitfn(dav1d_itx_dsp_init)(&c, bpc);
+ for (enum TxfmType txtp = 0; txtp < N_TX_TYPES_PLUS_LL; txtp++)
+ for (int subsh = 0; subsh < subsh_max; subsh++)
+ if (check_func(c.itxfm_add[tx][txtp],
+ "inv_txfm_add_%dx%d_%s_%s_%d_%dbpc",
+ w, h, itx_1d_names[itx_1d_types[txtp][0]],
+ itx_1d_names[itx_1d_types[txtp][1]], subsh,
+ bpc))
+ {
+ const int bitdepth_max = (1 << bpc) - 1;
+ const int eob = ftx(coeff[0], tx, txtp, w, h, subsh, bitdepth_max);
+ memcpy(coeff[1], coeff[0], sizeof(*coeff));
+
+ for (int j = 0; j < w * h; j++)
+ c_dst[j] = a_dst[j] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, w * sizeof(*c_dst), coeff[0], eob
+ HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, w * sizeof(*c_dst), coeff[1], eob
+ HIGHBD_TAIL_SUFFIX);
+
+ checkasm_check_pixel(c_dst, w * sizeof(*c_dst),
+ a_dst, w * sizeof(*a_dst),
+ w, h, "dst");
+ if (memcmp(coeff[0], coeff[1], sizeof(*coeff)))
+ fail();
+
+ bench_new(a_dst, w * sizeof(*c_dst), coeff[0], eob
+ HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ report("add_%dx%d", w, h);
+ }
+}
diff --git a/third_party/dav1d/tests/checkasm/loopfilter.c b/third_party/dav1d/tests/checkasm/loopfilter.c
new file mode 100644
index 0000000000..aabf54ff90
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/loopfilter.c
@@ -0,0 +1,204 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tests/checkasm/checkasm.h"
+
+#include <string.h>
+
+#include "src/levels.h"
+#include "src/loopfilter.h"
+
+static void init_lpf_border(pixel *const dst, const ptrdiff_t stride,
+ int E, int I, int H, const int bitdepth_max)
+{
+ const int bitdepth_min_8 = bitdepth_from_max(bitdepth_max) - 8;
+ const int F = 1 << bitdepth_min_8;
+ E <<= bitdepth_min_8;
+ I <<= bitdepth_min_8;
+ H <<= bitdepth_min_8;
+
+ const int filter_type = rnd() % 4;
+ const int edge_diff = rnd() % ((E + 2) * 4) - 2 * (E + 2);
+ switch (filter_type) {
+ case 0: // random, unfiltered
+ for (int i = -8; i < 8; i++)
+ dst[i * stride] = rnd() & bitdepth_max;
+ break;
+ case 1: // long flat
+ dst[-8 * stride] = rnd() & bitdepth_max;
+ dst[+7 * stride] = rnd() & bitdepth_max;
+ dst[+0 * stride] = rnd() & bitdepth_max;
+ dst[-1 * stride] = iclip_pixel(dst[+0 * stride] + edge_diff);
+ for (int i = 1; i < 7; i++) {
+ dst[-(1 + i) * stride] = iclip_pixel(dst[-1 * stride] +
+ rnd() % (2 * (F + 1)) - (F + 1));
+ dst[+(0 + i) * stride] = iclip_pixel(dst[+0 * stride] +
+ rnd() % (2 * (F + 1)) - (F + 1));
+ }
+ break;
+ case 2: // short flat
+ for (int i = 4; i < 8; i++) {
+ dst[-(1 + i) * stride] = rnd() & bitdepth_max;
+ dst[+(0 + i) * stride] = rnd() & bitdepth_max;
+ }
+ dst[+0 * stride] = rnd() & bitdepth_max;
+ dst[-1 * stride] = iclip_pixel(dst[+0 * stride] + edge_diff);
+ for (int i = 1; i < 4; i++) {
+ dst[-(1 + i) * stride] = iclip_pixel(dst[-1 * stride] +
+ rnd() % (2 * (F + 1)) - (F + 1));
+ dst[+(0 + i) * stride] = iclip_pixel(dst[+0 * stride] +
+ rnd() % (2 * (F + 1)) - (F + 1));
+ }
+ break;
+ case 3: // normal or hev
+ for (int i = 4; i < 8; i++) {
+ dst[-(1 + i) * stride] = rnd() & bitdepth_max;
+ dst[+(0 + i) * stride] = rnd() & bitdepth_max;
+ }
+ dst[+0 * stride] = rnd() & bitdepth_max;
+ dst[-1 * stride] = iclip_pixel(dst[+0 * stride] + edge_diff);
+ for (int i = 1; i < 4; i++) {
+ dst[-(1 + i) * stride] = iclip_pixel(dst[-(0 + i) * stride] +
+ rnd() % (2 * (I + 1)) - (I + 1));
+ dst[+(0 + i) * stride] = iclip_pixel(dst[+(i - 1) * stride] +
+ rnd() % (2 * (I + 1)) - (I + 1));
+ }
+ break;
+ }
+}
+
+static void check_lpf_sb(loopfilter_sb_fn fn, const char *const name,
+ const int n_blks, const int lf_idx,
+ const int is_chroma, const int dir)
+{
+ ALIGN_STK_64(pixel, c_dst_mem, 128 * 16,);
+ ALIGN_STK_64(pixel, a_dst_mem, 128 * 16,);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const uint32_t *mask,
+ const uint8_t (*l)[4], ptrdiff_t b4_stride,
+ const Av1FilterLUT *lut, int w HIGHBD_DECL_SUFFIX);
+
+ pixel *a_dst, *c_dst;
+ ptrdiff_t stride, b4_stride;
+ int w, h;
+ if (dir) {
+ a_dst = a_dst_mem + 128 * 8;
+ c_dst = c_dst_mem + 128 * 8;
+ w = 128;
+ h = 16;
+ b4_stride = 32;
+ } else {
+ a_dst = a_dst_mem + 8;
+ c_dst = c_dst_mem + 8;
+ w = 16;
+ h = 128;
+ b4_stride = 2;
+ }
+ stride = w * sizeof(pixel);
+
+ Av1FilterLUT lut;
+ const int sharp = rnd() & 7;
+ for (int level = 0; level < 64; level++) {
+ int limit = level;
+
+ if (sharp > 0) {
+ limit >>= (sharp + 3) >> 2;
+ limit = imin(limit, 9 - sharp);
+ }
+ limit = imax(limit, 1);
+
+ lut.i[level] = limit;
+ lut.e[level] = 2 * (level + 2) + limit;
+ }
+ lut.sharp[0] = (sharp + 3) >> 2;
+ lut.sharp[1] = sharp ? 9 - sharp : 0xff;
+
+ const int n_strengths = is_chroma ? 2 : 3;
+ for (int i = 0; i < n_strengths; i++) {
+ if (check_func(fn, "%s_w%d_%dbpc", name,
+ is_chroma ? 4 + 2 * i : 4 << i, BITDEPTH))
+ {
+ uint32_t vmask[4] = { 0 };
+ uint8_t l[32 * 2][4];
+
+ for (int j = 0; j < n_blks; j++) {
+ const int idx = rnd() % (i + 2);
+ if (idx) vmask[idx - 1] |= 1U << j;
+ if (dir) {
+ l[j][lf_idx] = rnd() & 63;
+ l[j + 32][lf_idx] = rnd() & 63;
+ } else {
+ l[j * 2][lf_idx] = rnd() & 63;
+ l[j * 2 + 1][lf_idx] = rnd() & 63;
+ }
+ }
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ for (int i = 0; i < 4 * n_blks; i++) {
+ const int x = i >> 2;
+ int L;
+ if (dir) {
+ L = l[32 + x][lf_idx] ? l[32 + x][lf_idx] : l[x][lf_idx];
+ } else {
+ L = l[2 * x + 1][lf_idx] ? l[2 * x + 1][lf_idx] : l[2 * x][lf_idx];
+ }
+ init_lpf_border(c_dst + i * (dir ? 1 : 16), dir ? 128 : 1,
+ lut.e[L], lut.i[L], L >> 4, bitdepth_max);
+ }
+ memcpy(a_dst_mem, c_dst_mem, 128 * sizeof(pixel) * 16);
+
+ call_ref(c_dst, stride,
+ vmask, (const uint8_t(*)[4]) &l[dir ? 32 : 1][lf_idx], b4_stride,
+ &lut, n_blks HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, stride,
+ vmask, (const uint8_t(*)[4]) &l[dir ? 32 : 1][lf_idx], b4_stride,
+ &lut, n_blks HIGHBD_TAIL_SUFFIX);
+
+ checkasm_check_pixel(c_dst_mem, stride, a_dst_mem, stride,
+ w, h, "dst");
+ bench_new(a_dst, stride,
+ vmask, (const uint8_t(*)[4]) &l[dir ? 32 : 1][lf_idx], b4_stride,
+ &lut, n_blks HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ report(name);
+}
+
+void bitfn(checkasm_check_loopfilter)(void) {
+ Dav1dLoopFilterDSPContext c;
+
+ bitfn(dav1d_loop_filter_dsp_init)(&c);
+
+ check_lpf_sb(c.loop_filter_sb[0][0], "lpf_h_sb_y", 32, 0, 0, 0);
+ check_lpf_sb(c.loop_filter_sb[0][1], "lpf_v_sb_y", 32, 1, 0, 1);
+ check_lpf_sb(c.loop_filter_sb[1][0], "lpf_h_sb_uv", 16, 2, 1, 0);
+ check_lpf_sb(c.loop_filter_sb[1][1], "lpf_v_sb_uv", 16, 2, 1, 1);
+}
diff --git a/third_party/dav1d/tests/checkasm/looprestoration.c b/third_party/dav1d/tests/checkasm/looprestoration.c
new file mode 100644
index 0000000000..c76b020e6e
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/looprestoration.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tests/checkasm/checkasm.h"
+
+#include <string.h>
+
+#include "src/levels.h"
+#include "src/looprestoration.h"
+#include "src/tables.h"
+
+static void init_tmp(pixel *buf, const ptrdiff_t stride,
+ const int w, const int h, const int bitdepth_max)
+{
+ for (int y = 0; y < h; y++) {
+ for (int x = 0; x < w; x++)
+ buf[x] = rnd() & bitdepth_max;
+ buf += PXSTRIDE(stride);
+ }
+}
+
+static void check_wiener(Dav1dLoopRestorationDSPContext *const c, const int bpc) {
+ ALIGN_STK_64(pixel, c_dst, 448 * 64,);
+ ALIGN_STK_64(pixel, a_dst, 448 * 64,);
+ ALIGN_STK_64(pixel, h_edge, 448 * 8,);
+ pixel left[64][4];
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride,
+ const pixel (*const left)[4],
+ const pixel *lpf, ptrdiff_t lpf_stride,
+ int w, int h, const int16_t filterh[7],
+ const int16_t filterv[7], enum LrEdgeFlags edges
+ HIGHBD_DECL_SUFFIX);
+
+ for (int pl = 0; pl < 2; pl++) {
+ if (check_func(c->wiener, "wiener_%s_%dbpc",
+ pl ? "chroma" : "luma", bpc))
+ {
+ int16_t filter[2][3], filter_v[7], filter_h[7];
+
+ filter[0][0] = pl ? 0 : (rnd() & 15) - 5;
+ filter[0][1] = (rnd() & 31) - 23;
+ filter[0][2] = (rnd() & 63) - 17;
+ filter[1][0] = pl ? 0 : (rnd() & 15) - 5;
+ filter[1][1] = (rnd() & 31) - 23;
+ filter[1][2] = (rnd() & 63) - 17;
+
+ filter_h[0] = filter_h[6] = filter[0][0];
+ filter_h[1] = filter_h[5] = filter[0][1];
+ filter_h[2] = filter_h[4] = filter[0][2];
+ filter_h[3] = -((filter_h[0] + filter_h[1] + filter_h[2]) * 2);
+
+ filter_v[0] = filter_v[6] = filter[1][0];
+ filter_v[1] = filter_v[5] = filter[1][1];
+ filter_v[2] = filter_v[4] = filter[1][2];
+ filter_v[3] = -((filter_v[0] + filter_v[1] + filter_v[2]) * 2);
+
+ const int base_w = 1 + (rnd() % 384);
+ const int base_h = 1 + (rnd() & 63);
+ const int bitdepth_max = (1 << bpc) - 1;
+
+ init_tmp(c_dst, 448 * sizeof(pixel), 448, 64, bitdepth_max);
+ init_tmp(h_edge, 448 * sizeof(pixel), 448, 8, bitdepth_max);
+ init_tmp((pixel *) left, 4 * sizeof(pixel), 4, 64, bitdepth_max);
+
+ for (enum LrEdgeFlags edges = 0; edges <= 0xf; edges++) {
+ const int w = edges & LR_HAVE_RIGHT ? 256 : base_w;
+ const int h = edges & LR_HAVE_BOTTOM ? 64 : base_h;
+
+ memcpy(a_dst, c_dst, 448 * 64 * sizeof(pixel));
+
+ call_ref(c_dst + 32, 448 * sizeof(pixel), left,
+ h_edge + 32, 448 * sizeof(pixel),
+ w, h, filter_h, filter_v, edges HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst + 32, 448 * sizeof(pixel), left,
+ h_edge + 32, 448 * sizeof(pixel),
+ w, h, filter_h, filter_v, edges HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst + 32, 448 * sizeof(pixel),
+ a_dst + 32, 448 * sizeof(pixel),
+ w, h, "dst");
+ }
+ bench_new(a_dst + 32, 448 * sizeof(pixel), left,
+ h_edge + 32, 448 * sizeof(pixel),
+ 256, 64, filter_h, filter_v, 0xf HIGHBD_TAIL_SUFFIX);
+ }
+ }
+}
+
+static void check_sgr(Dav1dLoopRestorationDSPContext *const c, const int bpc) {
+ ALIGN_STK_64(pixel, c_dst, 448 * 64,);
+ ALIGN_STK_64(pixel, a_dst, 448 * 64,);
+ ALIGN_STK_64(pixel, h_edge, 448 * 8,);
+ pixel left[64][4];
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride,
+ const pixel (*const left)[4],
+ const pixel *lpf, ptrdiff_t lpf_stride,
+ int w, int h, int sgr_idx,
+ const int16_t sgr_wt[7], enum LrEdgeFlags edges
+ HIGHBD_DECL_SUFFIX);
+
+ for (int sgr_idx = 14; sgr_idx >= 6; sgr_idx -= 4) {
+ if (check_func(c->selfguided, "selfguided_%s_%dbpc",
+ sgr_idx == 6 ? "mix" : sgr_idx == 10 ? "3x3" : "5x5", bpc))
+ {
+ int16_t sgr_wt[2];
+
+ sgr_wt[0] = dav1d_sgr_params[sgr_idx][0] ? (rnd() & 127) - 96 : 0;
+ sgr_wt[1] = dav1d_sgr_params[sgr_idx][1] ? (rnd() & 127) - 32 :
+ iclip(128 - sgr_wt[0], -32, 95);
+
+ const int base_w = 1 + (rnd() % 384);
+ const int base_h = 1 + (rnd() & 63);
+ const int bitdepth_max = (1 << bpc) - 1;
+
+ init_tmp(c_dst, 448 * sizeof(pixel), 448, 64, bitdepth_max);
+ init_tmp(h_edge, 448 * sizeof(pixel), 448, 8, bitdepth_max);
+ init_tmp((pixel *) left, 4 * sizeof(pixel), 4, 64, bitdepth_max);
+
+ for (enum LrEdgeFlags edges = 0; edges <= 0xf; edges++) {
+ const int w = edges & LR_HAVE_RIGHT ? 256 : base_w;
+ const int h = edges & LR_HAVE_BOTTOM ? 64 : base_h;
+
+ memcpy(a_dst, c_dst, 448 * 64 * sizeof(pixel));
+
+ call_ref(c_dst + 32, 448 * sizeof(pixel), left,
+ h_edge + 32, 448 * sizeof(pixel),
+ w, h, sgr_idx, sgr_wt, edges HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst + 32, 448 * sizeof(pixel), left,
+ h_edge + 32, 448 * sizeof(pixel),
+ w, h, sgr_idx, sgr_wt, edges HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst + 32, 448 * sizeof(pixel),
+ a_dst + 32, 448 * sizeof(pixel),
+ w, h, "dst");
+ }
+ bench_new(a_dst + 32, 448 * sizeof(pixel), left,
+ h_edge + 32, 448 * sizeof(pixel),
+ 256, 64, sgr_idx, sgr_wt, 0xf HIGHBD_TAIL_SUFFIX);
+ }
+ }
+}
+
+void bitfn(checkasm_check_looprestoration)(void) {
+#if BITDEPTH == 16
+ const int bpc_min = 10, bpc_max = 12;
+#else
+ const int bpc_min = 8, bpc_max = 8;
+#endif
+ for (int bpc = bpc_min; bpc <= bpc_max; bpc += 2) {
+ Dav1dLoopRestorationDSPContext c;
+ bitfn(dav1d_loop_restoration_dsp_init)(&c, bpc);
+ check_wiener(&c, bpc);
+ }
+ report("wiener");
+ for (int bpc = bpc_min; bpc <= bpc_max; bpc += 2) {
+ Dav1dLoopRestorationDSPContext c;
+ bitfn(dav1d_loop_restoration_dsp_init)(&c, bpc);
+ check_sgr(&c, bpc);
+ }
+ report("sgr");
+}
diff --git a/third_party/dav1d/tests/checkasm/mc.c b/third_party/dav1d/tests/checkasm/mc.c
new file mode 100644
index 0000000000..ff8680d102
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/mc.c
@@ -0,0 +1,756 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tests/checkasm/checkasm.h"
+
+#include "src/levels.h"
+#include "src/mc.h"
+
+static const char *const filter_names[] = {
+ "8tap_regular", "8tap_regular_smooth", "8tap_regular_sharp",
+ "8tap_sharp_regular", "8tap_sharp_smooth", "8tap_sharp",
+ "8tap_smooth_regular", "8tap_smooth", "8tap_smooth_sharp",
+ "bilinear"
+};
+
+static const char *const mxy_names[] = { "0", "h", "v", "hv" };
+static const char *const scaled_paths[] = { "", "_dy1", "_dy2" };
+
+static int mc_h_next(const int h) {
+ switch (h) {
+ case 4:
+ case 8:
+ case 16:
+ return (h * 3) >> 1;
+ case 6:
+ case 12:
+ case 24:
+ return (h & (h - 1)) * 2;
+ default:
+ return h * 2;
+ }
+}
+
+static void check_mc(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, src_buf, 135 * 135,);
+ ALIGN_STK_64(pixel, c_dst, 128 * 128,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 128,);
+ const pixel *src = src_buf + 135 * 3 + 3;
+ const ptrdiff_t src_stride = 135 * sizeof(pixel);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const pixel *src,
+ ptrdiff_t src_stride, int w, int h, int mx, int my
+ HIGHBD_DECL_SUFFIX);
+
+ for (int filter = 0; filter < N_2D_FILTERS; filter++)
+ for (int w = 2; w <= 128; w <<= 1) {
+ const ptrdiff_t dst_stride = w * sizeof(pixel);
+ for (int mxy = 0; mxy < 4; mxy++)
+ if (check_func(c->mc[filter], "mc_%s_w%d_%s_%dbpc",
+ filter_names[filter], w, mxy_names[mxy], BITDEPTH))
+ {
+ const int h_min = w <= 32 ? 2 : w / 4;
+ const int h_max = imax(imin(w * 4, 128), 32);
+ for (int h = h_min; h <= h_max; h = mc_h_next(h)) {
+ const int mx = (mxy & 1) ? rnd() % 15 + 1 : 0;
+ const int my = (mxy & 2) ? rnd() % 15 + 1 : 0;
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ for (int i = 0; i < 135 * 135; i++)
+ src_buf[i] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, dst_stride, src, src_stride, w, h,
+ mx, my HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, dst_stride, src, src_stride, w, h,
+ mx, my HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, dst_stride,
+ a_dst, dst_stride,
+ w, h, "dst");
+
+ if (filter == FILTER_2D_8TAP_REGULAR ||
+ filter == FILTER_2D_BILINEAR)
+ {
+ bench_new(a_dst, dst_stride, src, src_stride, w, h,
+ mx, my HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ }
+ }
+ report("mc");
+}
+
+/* Generate worst case input in the topleft corner, randomize the rest */
+static void generate_mct_input(pixel *const buf, const int bitdepth_max) {
+ static const int8_t pattern[8] = { -1, 0, -1, 0, 0, -1, 0, -1 };
+ const int sign = -(rnd() & 1);
+
+ for (int y = 0; y < 135; y++)
+ for (int x = 0; x < 135; x++)
+ buf[135*y+x] = ((x | y) < 8 ? (pattern[x] ^ pattern[y] ^ sign)
+ : rnd()) & bitdepth_max;
+}
+
+static void check_mct(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, src_buf, 135 * 135,);
+ ALIGN_STK_64(int16_t, c_tmp, 128 * 128,);
+ ALIGN_STK_64(int16_t, a_tmp, 128 * 128,);
+ const pixel *src = src_buf + 135 * 3 + 3;
+ const ptrdiff_t src_stride = 135 * sizeof(pixel);
+
+ declare_func(void, int16_t *tmp, const pixel *src, ptrdiff_t src_stride,
+ int w, int h, int mx, int my HIGHBD_DECL_SUFFIX);
+
+ for (int filter = 0; filter < N_2D_FILTERS; filter++)
+ for (int w = 4; w <= 128; w <<= 1)
+ for (int mxy = 0; mxy < 4; mxy++)
+ if (check_func(c->mct[filter], "mct_%s_w%d_%s_%dbpc",
+ filter_names[filter], w, mxy_names[mxy], BITDEPTH))
+ for (int h = imax(w / 4, 4); h <= imin(w * 4, 128); h <<= 1)
+ {
+ const int mx = (mxy & 1) ? rnd() % 15 + 1 : 0;
+ const int my = (mxy & 2) ? rnd() % 15 + 1 : 0;
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ generate_mct_input(src_buf, bitdepth_max);
+
+ call_ref(c_tmp, src, src_stride, w, h,
+ mx, my HIGHBD_TAIL_SUFFIX);
+ call_new(a_tmp, src, src_stride, w, h,
+ mx, my HIGHBD_TAIL_SUFFIX);
+ checkasm_check(int16_t, c_tmp, w * sizeof(*c_tmp),
+ a_tmp, w * sizeof(*a_tmp),
+ w, h, "tmp");
+
+ if (filter == FILTER_2D_8TAP_REGULAR ||
+ filter == FILTER_2D_BILINEAR)
+ {
+ bench_new(a_tmp, src, src_stride, w, h,
+ mx, my HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ report("mct");
+}
+
+static void check_mc_scaled(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, src_buf, 263 * 263,);
+ ALIGN_STK_64(pixel, c_dst, 128 * 128,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 128,);
+ const pixel *src = src_buf + 263 * 3 + 3;
+ const ptrdiff_t src_stride = 263 * sizeof(pixel);
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const pixel *src,
+ ptrdiff_t src_stride, int w, int h,
+ int mx, int my, int dx, int dy HIGHBD_DECL_SUFFIX);
+
+ for (int filter = 0; filter < N_2D_FILTERS; filter++)
+ for (int w = 2; w <= 128; w <<= 1) {
+ const ptrdiff_t dst_stride = w * sizeof(pixel);
+ for (int p = 0; p < 3; ++p) {
+ if (check_func(c->mc_scaled[filter], "mc_scaled_%s_w%d%s_%dbpc",
+ filter_names[filter], w, scaled_paths[p], BITDEPTH))
+ {
+ const int h_min = w <= 32 ? 2 : w / 4;
+ const int h_max = imax(imin(w * 4, 128), 32);
+ for (int h = h_min; h <= h_max; h = mc_h_next(h)) {
+ const int mx = rnd() % 1024;
+ const int my = rnd() % 1024;
+ const int dx = rnd() % 2048 + 1;
+ const int dy = !p
+ ? rnd() % 2048 + 1
+ : p << 10; // ystep=1.0 and ystep=2.0 paths
+
+ for (int k = 0; k < 263 * 263; k++)
+ src_buf[k] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, dst_stride, src, src_stride,
+ w, h, mx, my, dx, dy HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, dst_stride, src, src_stride,
+ w, h, mx, my, dx, dy HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, dst_stride,
+ a_dst, dst_stride, w, h, "dst");
+
+ if (filter == FILTER_2D_8TAP_REGULAR ||
+ filter == FILTER_2D_BILINEAR)
+ bench_new(a_dst, dst_stride, src, src_stride,
+ w, h, mx, my, dx, dy HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ }
+ }
+ report("mc_scaled");
+}
+
+static void check_mct_scaled(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, src_buf, 263 * 263,);
+ ALIGN_STK_64(int16_t, c_tmp, 128 * 128,);
+ ALIGN_STK_64(int16_t, a_tmp, 128 * 128,);
+ const pixel *src = src_buf + 263 * 3 + 3;
+ const ptrdiff_t src_stride = 263 * sizeof(pixel);
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ declare_func(void, int16_t *tmp, const pixel *src, ptrdiff_t src_stride,
+ int w, int h, int mx, int my, int dx, int dy HIGHBD_DECL_SUFFIX);
+
+ for (int filter = 0; filter < N_2D_FILTERS; filter++)
+ for (int w = 4; w <= 128; w <<= 1)
+ for (int p = 0; p < 3; ++p) {
+ if (check_func(c->mct_scaled[filter], "mct_scaled_%s_w%d%s_%dbpc",
+ filter_names[filter], w, scaled_paths[p], BITDEPTH))
+ {
+ const int h_min = imax(w / 4, 4);
+ const int h_max = imin(w * 4, 128);
+ for (int h = h_min; h <= h_max; h = mc_h_next(h)) {
+ const int mx = rnd() % 1024;
+ const int my = rnd() % 1024;
+ const int dx = rnd() % 2048 + 1;
+ const int dy = !p
+ ? rnd() % 2048 + 1
+ : p << 10; // ystep=1.0 and ystep=2.0 paths
+
+ for (int k = 0; k < 263 * 263; k++)
+ src_buf[k] = rnd() & bitdepth_max;
+
+ call_ref(c_tmp, src, src_stride,
+ w, h, mx, my, dx, dy HIGHBD_TAIL_SUFFIX);
+ call_new(a_tmp, src, src_stride,
+ w, h, mx, my, dx, dy HIGHBD_TAIL_SUFFIX);
+ checkasm_check(int16_t, c_tmp, w * sizeof(*c_tmp),
+ a_tmp, w * sizeof(*a_tmp),
+ w, h, "tmp");
+
+ if (filter == FILTER_2D_8TAP_REGULAR ||
+ filter == FILTER_2D_BILINEAR)
+ bench_new(a_tmp, src, src_stride,
+ w, h, mx, my, dx, dy HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ }
+ report("mct_scaled");
+}
+
+static void init_tmp(Dav1dMCDSPContext *const c, pixel *const buf,
+ int16_t (*const tmp)[128 * 128], const int bitdepth_max)
+{
+ for (int i = 0; i < 2; i++) {
+ generate_mct_input(buf, bitdepth_max);
+ c->mct[FILTER_2D_8TAP_SHARP](tmp[i], buf + 135 * 3 + 3,
+ 135 * sizeof(pixel), 128, 128,
+ 8, 8 HIGHBD_TAIL_SUFFIX);
+ }
+}
+
+static void check_avg(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(int16_t, tmp, 2, [128 * 128]);
+ ALIGN_STK_64(pixel, c_dst, 135 * 135,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 128,);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const int16_t *tmp1,
+ const int16_t *tmp2, int w, int h HIGHBD_DECL_SUFFIX);
+
+ for (int w = 4; w <= 128; w <<= 1)
+ if (check_func(c->avg, "avg_w%d_%dbpc", w, BITDEPTH)) {
+ ptrdiff_t dst_stride = w * sizeof(pixel);
+ for (int h = imax(w / 4, 4); h <= imin(w * 4, 128); h <<= 1)
+ {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ init_tmp(c, c_dst, tmp, bitdepth_max);
+ call_ref(c_dst, dst_stride, tmp[0], tmp[1], w, h HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, dst_stride, tmp[0], tmp[1], w, h HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, dst_stride, a_dst, dst_stride,
+ w, h, "dst");
+
+ bench_new(a_dst, dst_stride, tmp[0], tmp[1], w, h HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ report("avg");
+}
+
+static void check_w_avg(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(int16_t, tmp, 2, [128 * 128]);
+ ALIGN_STK_64(pixel, c_dst, 135 * 135,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 128,);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const int16_t *tmp1,
+ const int16_t *tmp2, int w, int h, int weight HIGHBD_DECL_SUFFIX);
+
+ for (int w = 4; w <= 128; w <<= 1)
+ if (check_func(c->w_avg, "w_avg_w%d_%dbpc", w, BITDEPTH)) {
+ ptrdiff_t dst_stride = w * sizeof(pixel);
+ for (int h = imax(w / 4, 4); h <= imin(w * 4, 128); h <<= 1)
+ {
+ int weight = rnd() % 15 + 1;
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ init_tmp(c, c_dst, tmp, bitdepth_max);
+
+ call_ref(c_dst, dst_stride, tmp[0], tmp[1], w, h, weight HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, dst_stride, tmp[0], tmp[1], w, h, weight HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, dst_stride, a_dst, dst_stride,
+ w, h, "dst");
+
+ bench_new(a_dst, dst_stride, tmp[0], tmp[1], w, h, weight HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ report("w_avg");
+}
+
+static void check_mask(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(int16_t, tmp, 2, [128 * 128]);
+ ALIGN_STK_64(pixel, c_dst, 135 * 135,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 128,);
+ ALIGN_STK_64(uint8_t, mask, 128 * 128,);
+
+ for (int i = 0; i < 128 * 128; i++)
+ mask[i] = rnd() % 65;
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const int16_t *tmp1,
+ const int16_t *tmp2, int w, int h, const uint8_t *mask
+ HIGHBD_DECL_SUFFIX);
+
+ for (int w = 4; w <= 128; w <<= 1)
+ if (check_func(c->mask, "mask_w%d_%dbpc", w, BITDEPTH)) {
+ ptrdiff_t dst_stride = w * sizeof(pixel);
+ for (int h = imax(w / 4, 4); h <= imin(w * 4, 128); h <<= 1)
+ {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ init_tmp(c, c_dst, tmp, bitdepth_max);
+ call_ref(c_dst, dst_stride, tmp[0], tmp[1], w, h, mask HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, dst_stride, tmp[0], tmp[1], w, h, mask HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, dst_stride, a_dst, dst_stride,
+ w, h, "dst");
+
+ bench_new(a_dst, dst_stride, tmp[0], tmp[1], w, h, mask HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ report("mask");
+}
+
+static void check_w_mask(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(int16_t, tmp, 2, [128 * 128]);
+ ALIGN_STK_64(pixel, c_dst, 135 * 135,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 128,);
+ ALIGN_STK_64(uint8_t, c_mask, 128 * 128,);
+ ALIGN_STK_64(uint8_t, a_mask, 128 * 128,);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const int16_t *tmp1,
+ const int16_t *tmp2, int w, int h, uint8_t *mask, int sign
+ HIGHBD_DECL_SUFFIX);
+
+ static const uint16_t ss[] = { 444, 422, 420 };
+ static const uint8_t ss_hor[] = { 0, 1, 1 };
+ static const uint8_t ss_ver[] = { 0, 0, 1 };
+
+ for (int i = 0; i < 3; i++)
+ for (int w = 4; w <= 128; w <<= 1)
+ if (check_func(c->w_mask[i], "w_mask_%d_w%d_%dbpc", ss[i], w,
+ BITDEPTH))
+ {
+ ptrdiff_t dst_stride = w * sizeof(pixel);
+ for (int h = imax(w / 4, 4); h <= imin(w * 4, 128); h <<= 1)
+ {
+ int sign = rnd() & 1;
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ init_tmp(c, c_dst, tmp, bitdepth_max);
+
+ call_ref(c_dst, dst_stride, tmp[0], tmp[1], w, h,
+ c_mask, sign HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, dst_stride, tmp[0], tmp[1], w, h,
+ a_mask, sign HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, dst_stride,
+ a_dst, dst_stride,
+ w, h, "dst");
+ checkasm_check(uint8_t, c_mask, w >> ss_hor[i],
+ a_mask, w >> ss_hor[i],
+ w >> ss_hor[i], h >> ss_ver[i],
+ "mask");
+
+ bench_new(a_dst, dst_stride, tmp[0], tmp[1], w, h,
+ a_mask, sign HIGHBD_TAIL_SUFFIX);
+ }
+ }
+ report("w_mask");
+}
+
+static void check_blend(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, tmp, 32 * 32,);
+ ALIGN_STK_64(pixel, c_dst, 32 * 32,);
+ ALIGN_STK_64(pixel, a_dst, 32 * 32,);
+ ALIGN_STK_64(uint8_t, mask, 32 * 32,);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const pixel *tmp,
+ int w, int h, const uint8_t *mask);
+
+ for (int w = 4; w <= 32; w <<= 1) {
+ const ptrdiff_t dst_stride = w * sizeof(pixel);
+ if (check_func(c->blend, "blend_w%d_%dbpc", w, BITDEPTH))
+ for (int h = imax(w / 2, 4); h <= imin(w * 2, 32); h <<= 1) {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ for (int i = 0; i < 32 * 32; i++) {
+ tmp[i] = rnd() & bitdepth_max;
+ mask[i] = rnd() % 65;
+ }
+ for (int i = 0; i < w * h; i++)
+ c_dst[i] = a_dst[i] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, dst_stride, tmp, w, h, mask);
+ call_new(a_dst, dst_stride, tmp, w, h, mask);
+ checkasm_check_pixel(c_dst, dst_stride, a_dst, dst_stride,
+ w, h, "dst");
+
+ bench_new(a_dst, dst_stride, tmp, w, h, mask);
+ }
+ }
+ report("blend");
+}
+
+static void check_blend_v(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, tmp, 32 * 128,);
+ ALIGN_STK_64(pixel, c_dst, 32 * 128,);
+ ALIGN_STK_64(pixel, a_dst, 32 * 128,);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const pixel *tmp,
+ int w, int h);
+
+ for (int w = 2; w <= 32; w <<= 1) {
+ const ptrdiff_t dst_stride = w * sizeof(pixel);
+ if (check_func(c->blend_v, "blend_v_w%d_%dbpc", w, BITDEPTH))
+ for (int h = 2; h <= (w == 2 ? 64 : 128); h <<= 1) {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ for (int i = 0; i < w * h; i++)
+ c_dst[i] = a_dst[i] = rnd() & bitdepth_max;
+ for (int i = 0; i < 32 * 128; i++)
+ tmp[i] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, dst_stride, tmp, w, h);
+ call_new(a_dst, dst_stride, tmp, w, h);
+ checkasm_check_pixel(c_dst, dst_stride, a_dst, dst_stride,
+ w, h, "dst");
+
+ bench_new(a_dst, dst_stride, tmp, w, h);
+ }
+ }
+ report("blend_v");
+}
+
+static void check_blend_h(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, tmp, 128 * 32,);
+ ALIGN_STK_64(pixel, c_dst, 128 * 32,);
+ ALIGN_STK_64(pixel, a_dst, 128 * 32,);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const pixel *tmp,
+ int w, int h);
+
+ for (int w = 2; w <= 128; w <<= 1) {
+ const ptrdiff_t dst_stride = w * sizeof(pixel);
+ if (check_func(c->blend_h, "blend_h_w%d_%dbpc", w, BITDEPTH))
+ for (int h = (w == 128 ? 4 : 2); h <= 32; h <<= 1) {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+ for (int i = 0; i < w * h; i++)
+ c_dst[i] = a_dst[i] = rnd() & bitdepth_max;
+ for (int i = 0; i < 128 * 32; i++)
+ tmp[i] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, dst_stride, tmp, w, h);
+ call_new(a_dst, dst_stride, tmp, w, h);
+ checkasm_check_pixel(c_dst, dst_stride, a_dst, dst_stride,
+ w, h, "dst");
+
+ bench_new(a_dst, dst_stride, tmp, w, h);
+ }
+ }
+ report("blend_h");
+}
+
+static void check_warp8x8(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, src_buf, 15 * 15,);
+ ALIGN_STK_64(pixel, c_dst, 8 * 8,);
+ ALIGN_STK_64(pixel, a_dst, 8 * 8,);
+ int16_t abcd[4];
+ const pixel *src = src_buf + 15 * 3 + 3;
+ const ptrdiff_t dst_stride = 8 * sizeof(pixel);
+ const ptrdiff_t src_stride = 15 * sizeof(pixel);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride, const pixel *src,
+ ptrdiff_t src_stride, const int16_t *abcd, int mx, int my
+ HIGHBD_DECL_SUFFIX);
+
+ if (check_func(c->warp8x8, "warp_8x8_%dbpc", BITDEPTH)) {
+ const int mx = (rnd() & 0x1fff) - 0xa00;
+ const int my = (rnd() & 0x1fff) - 0xa00;
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ for (int i = 0; i < 4; i++)
+ abcd[i] = (rnd() & 0x1fff) - 0xa00;
+
+ for (int i = 0; i < 15 * 15; i++)
+ src_buf[i] = rnd() & bitdepth_max;
+
+ call_ref(c_dst, dst_stride, src, src_stride, abcd, mx, my HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, dst_stride, src, src_stride, abcd, mx, my HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, dst_stride, a_dst, dst_stride,
+ 8, 8, "dst");
+
+ bench_new(a_dst, dst_stride, src, src_stride, abcd, mx, my HIGHBD_TAIL_SUFFIX);
+ }
+ report("warp8x8");
+}
+
+static void check_warp8x8t(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, src_buf, 15 * 15,);
+ ALIGN_STK_64(int16_t, c_tmp, 8 * 8,);
+ ALIGN_STK_64(int16_t, a_tmp, 8 * 8,);
+ int16_t abcd[4];
+ const pixel *src = src_buf + 15 * 3 + 3;
+ const ptrdiff_t src_stride = 15 * sizeof(pixel);
+
+ declare_func(void, int16_t *tmp, ptrdiff_t tmp_stride, const pixel *src,
+ ptrdiff_t src_stride, const int16_t *abcd, int mx, int my
+ HIGHBD_DECL_SUFFIX);
+
+ if (check_func(c->warp8x8t, "warp_8x8t_%dbpc", BITDEPTH)) {
+ const int mx = (rnd() & 0x1fff) - 0xa00;
+ const int my = (rnd() & 0x1fff) - 0xa00;
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ for (int i = 0; i < 4; i++)
+ abcd[i] = (rnd() & 0x1fff) - 0xa00;
+
+ for (int i = 0; i < 15 * 15; i++)
+ src_buf[i] = rnd() & bitdepth_max;
+
+ call_ref(c_tmp, 8, src, src_stride, abcd, mx, my HIGHBD_TAIL_SUFFIX);
+ call_new(a_tmp, 8, src, src_stride, abcd, mx, my HIGHBD_TAIL_SUFFIX);
+ checkasm_check(int16_t, c_tmp, 8 * sizeof(*c_tmp),
+ a_tmp, 8 * sizeof(*a_tmp),
+ 8, 8, "tmp");
+
+ bench_new(a_tmp, 8, src, src_stride, abcd, mx, my HIGHBD_TAIL_SUFFIX);
+ }
+ report("warp8x8t");
+}
+
+enum EdgeFlags {
+ HAVE_TOP = 1,
+ HAVE_BOTTOM = 2,
+ HAVE_LEFT = 4,
+ HAVE_RIGHT = 8,
+};
+
+static void random_offset_for_edge(int *const x, int *const y,
+ const int bw, const int bh,
+ int *const iw, int *const ih,
+ const enum EdgeFlags edge)
+{
+#define set_off(edge1, edge2, pos, dim) \
+ *i##dim = edge & (HAVE_##edge1 | HAVE_##edge2) ? 160 : 1 + (rnd() % (b##dim - 2)); \
+ switch (edge & (HAVE_##edge1 | HAVE_##edge2)) { \
+ case HAVE_##edge1 | HAVE_##edge2: \
+ assert(b##dim <= *i##dim); \
+ *pos = rnd() % (*i##dim - b##dim + 1); \
+ break; \
+ case HAVE_##edge1: \
+ *pos = (*i##dim - b##dim) + 1 + (rnd() % (b##dim - 1)); \
+ break; \
+ case HAVE_##edge2: \
+ *pos = -(1 + (rnd() % (b##dim - 1))); \
+ break; \
+ case 0: \
+ assert(b##dim - 1 > *i##dim); \
+ *pos = -(1 + (rnd() % (b##dim - *i##dim - 1))); \
+ break; \
+ }
+ set_off(LEFT, RIGHT, x, w);
+ set_off(TOP, BOTTOM, y, h);
+}
+
+static void check_emuedge(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, c_dst, 135 * 192,);
+ ALIGN_STK_64(pixel, a_dst, 135 * 192,);
+ ALIGN_STK_64(pixel, src, 160 * 160,);
+
+ for (int i = 0; i < 160 * 160; i++)
+ src[i] = rnd() & ((1U << BITDEPTH) - 1);
+
+ declare_func(void, intptr_t bw, intptr_t bh, intptr_t iw, intptr_t ih,
+ intptr_t x, intptr_t y,
+ pixel *dst, ptrdiff_t dst_stride,
+ const pixel *src, ptrdiff_t src_stride);
+
+ int x, y, iw, ih;
+ for (int w = 4; w <= 128; w <<= 1)
+ if (check_func(c->emu_edge, "emu_edge_w%d_%dbpc", w, BITDEPTH)) {
+ for (int h = imax(w / 4, 4); h <= imin(w * 4, 128); h <<= 1) {
+ // we skip 0xf, since it implies that we don't need emu_edge
+ for (enum EdgeFlags edge = 0; edge < 0xf; edge++) {
+ const int bw = w + (rnd() & 7);
+ const int bh = h + (rnd() & 7);
+ random_offset_for_edge(&x, &y, bw, bh, &iw, &ih, edge);
+ call_ref(bw, bh, iw, ih, x, y,
+ c_dst, 192 * sizeof(pixel), src, 160 * sizeof(pixel));
+ call_new(bw, bh, iw, ih, x, y,
+ a_dst, 192 * sizeof(pixel), src, 160 * sizeof(pixel));
+ checkasm_check_pixel(c_dst, 192 * sizeof(pixel),
+ a_dst, 192 * sizeof(pixel),
+ bw, bh, "dst");
+ }
+ }
+ for (enum EdgeFlags edge = 1; edge < 0xf; edge <<= 1) {
+ random_offset_for_edge(&x, &y, w + 7, w + 7, &iw, &ih, edge);
+ bench_new(w + 7, w + 7, iw, ih, x, y,
+ a_dst, 192 * sizeof(pixel), src, 160 * sizeof(pixel));
+ }
+ }
+ report("emu_edge");
+}
+
+static int get_upscale_x0(const int in_w, const int out_w, const int step) {
+ const int err = out_w * step - (in_w << 14);
+ const int x0 = (-((out_w - in_w) << 13) + (out_w >> 1)) / out_w + 128 - (err >> 1);
+ return x0 & 0x3fff;
+}
+
+static void check_resize(Dav1dMCDSPContext *const c) {
+ ALIGN_STK_64(pixel, c_dst, 1024 * 64,);
+ ALIGN_STK_64(pixel, a_dst, 1024 * 64,);
+ ALIGN_STK_64(pixel, src, 512 * 64,);
+
+ const int height = 64;
+ const int max_src_width = 512;
+ const ptrdiff_t dst_stride = 1024 * sizeof(pixel);
+ const ptrdiff_t src_stride = 512 * sizeof(pixel);
+
+ declare_func(void, pixel *dst, ptrdiff_t dst_stride,
+ const pixel *src, ptrdiff_t src_stride,
+ int dst_w, int src_w, int h, int dx, int mx0
+ HIGHBD_DECL_SUFFIX);
+
+ if (check_func(c->resize, "resize_%dbpc", BITDEPTH)) {
+#if BITDEPTH == 16
+ const int bitdepth_max = rnd() & 1 ? 0x3ff : 0xfff;
+#else
+ const int bitdepth_max = 0xff;
+#endif
+
+ for (int i = 0; i < max_src_width * height; i++)
+ src[i] = rnd() & bitdepth_max;
+
+ const int w_den = 9 + (rnd() & 7);
+ const int src_w = 16 + (rnd() % (max_src_width - 16 + 1));
+ const int dst_w = w_den * src_w >> 3;
+#define scale_fac(ref_sz, this_sz) \
+ ((((ref_sz) << 14) + ((this_sz) >> 1)) / (this_sz))
+ const int dx = scale_fac(src_w, dst_w);
+#undef scale_fac
+ const int mx0 = get_upscale_x0(src_w, dst_w, dx);
+
+ call_ref(c_dst, dst_stride, src, src_stride,
+ dst_w, height, src_w, dx, mx0 HIGHBD_TAIL_SUFFIX);
+ call_new(a_dst, dst_stride, src, src_stride,
+ dst_w, height, src_w, dx, mx0 HIGHBD_TAIL_SUFFIX);
+ checkasm_check_pixel(c_dst, dst_stride, a_dst, dst_stride,
+ dst_w, height, "dst");
+
+ bench_new(a_dst, dst_stride, src, src_stride,
+ 512, height, 512 * 8 / w_den, dx, mx0 HIGHBD_TAIL_SUFFIX);
+ }
+
+ report("resize");
+}
+
+void bitfn(checkasm_check_mc)(void) {
+ Dav1dMCDSPContext c;
+ bitfn(dav1d_mc_dsp_init)(&c);
+
+ check_mc(&c);
+ check_mct(&c);
+ check_mc_scaled(&c);
+ check_mct_scaled(&c);
+ check_avg(&c);
+ check_w_avg(&c);
+ check_mask(&c);
+ check_w_mask(&c);
+ check_blend(&c);
+ check_blend_v(&c);
+ check_blend_h(&c);
+ check_warp8x8(&c);
+ check_warp8x8t(&c);
+ check_emuedge(&c);
+ check_resize(&c);
+}
diff --git a/third_party/dav1d/tests/checkasm/msac.c b/third_party/dav1d/tests/checkasm/msac.c
new file mode 100644
index 0000000000..9d2df71124
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/msac.c
@@ -0,0 +1,291 @@
+/*
+ * Copyright © 2019, VideoLAN and dav1d authors
+ * Copyright © 2019, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "tests/checkasm/checkasm.h"
+
+#include "src/cpu.h"
+#include "src/msac.h"
+
+#include <stdio.h>
+#include <string.h>
+
+#define BUF_SIZE 8192
+
+/* The normal code doesn't use function pointers */
+typedef unsigned (*decode_symbol_adapt_fn)(MsacContext *s, uint16_t *cdf,
+ size_t n_symbols);
+typedef unsigned (*decode_adapt_fn)(MsacContext *s, uint16_t *cdf);
+typedef unsigned (*decode_bool_equi_fn)(MsacContext *s);
+typedef unsigned (*decode_bool_fn)(MsacContext *s, unsigned f);
+
+typedef struct {
+ decode_symbol_adapt_fn symbol_adapt4;
+ decode_symbol_adapt_fn symbol_adapt8;
+ decode_symbol_adapt_fn symbol_adapt16;
+ decode_adapt_fn bool_adapt;
+ decode_bool_equi_fn bool_equi;
+ decode_bool_fn bool;
+ decode_adapt_fn hi_tok;
+} MsacDSPContext;
+
+static void randomize_cdf(uint16_t *const cdf, const int n) {
+ int i;
+ for (i = 15; i > n; i--)
+ cdf[i] = rnd(); // padding
+ cdf[i] = 0; // count
+ do {
+ cdf[i - 1] = cdf[i] + rnd() % (32768 - cdf[i] - i) + 1;
+ } while (--i > 0);
+}
+
+/* memcmp() on structs can have weird behavior due to padding etc. */
+static int msac_cmp(const MsacContext *const a, const MsacContext *const b) {
+ return a->buf_pos != b->buf_pos || a->buf_end != b->buf_end ||
+ a->dif != b->dif || a->rng != b->rng || a->cnt != b->cnt ||
+ a->allow_update_cdf != b->allow_update_cdf;
+}
+
+static void msac_dump(unsigned c_res, unsigned a_res,
+ const MsacContext *const a, const MsacContext *const b,
+ const uint16_t *const cdf_a, const uint16_t *const cdf_b,
+ const int num_cdf)
+{
+ if (c_res != a_res)
+ fprintf(stderr, "c_res %u a_res %u\n", c_res, a_res);
+ if (a->buf_pos != b->buf_pos)
+ fprintf(stderr, "buf_pos %p vs %p\n", a->buf_pos, b->buf_pos);
+ if (a->buf_end != b->buf_end)
+ fprintf(stderr, "buf_end %p vs %p\n", a->buf_end, b->buf_end);
+ if (a->dif != b->dif)
+ fprintf(stderr, "dif %zx vs %zx\n", a->dif, b->dif);
+ if (a->rng != b->rng)
+ fprintf(stderr, "rng %u vs %u\n", a->rng, b->rng);
+ if (a->cnt != b->cnt)
+ fprintf(stderr, "cnt %d vs %d\n", a->cnt, b->cnt);
+ if (a->allow_update_cdf)
+ fprintf(stderr, "allow_update_cdf %d vs %d\n",
+ a->allow_update_cdf, b->allow_update_cdf);
+ if (num_cdf && memcmp(cdf_a, cdf_b, sizeof(*cdf_a) * (num_cdf + 1))) {
+ fprintf(stderr, "cdf:\n");
+ for (int i = 0; i <= num_cdf; i++)
+ fprintf(stderr, " %5u", cdf_a[i]);
+ fprintf(stderr, "\n");
+ for (int i = 0; i <= num_cdf; i++)
+ fprintf(stderr, " %5u", cdf_b[i]);
+ fprintf(stderr, "\n");
+ for (int i = 0; i <= num_cdf; i++)
+ fprintf(stderr, " %c", cdf_a[i] != cdf_b[i] ? 'x' : '.');
+ fprintf(stderr, "\n");
+ }
+}
+
+#define CHECK_SYMBOL_ADAPT(n, n_min, n_max) do { \
+ if (check_func(c->symbol_adapt##n, "msac_decode_symbol_adapt%d", n)) { \
+ for (int cdf_update = 0; cdf_update <= 1; cdf_update++) { \
+ for (int ns = n_min; ns <= n_max; ns++) { \
+ dav1d_msac_init(&s_c, buf, BUF_SIZE, !cdf_update); \
+ s_a = s_c; \
+ randomize_cdf(cdf[0], ns); \
+ memcpy(cdf[1], cdf[0], sizeof(*cdf)); \
+ for (int i = 0; i < 64; i++) { \
+ unsigned c_res = call_ref(&s_c, cdf[0], ns); \
+ unsigned a_res = call_new(&s_a, cdf[1], ns); \
+ if (c_res != a_res || msac_cmp(&s_c, &s_a) || \
+ memcmp(cdf[0], cdf[1], sizeof(**cdf) * (ns + 1))) \
+ { \
+ if (fail()) \
+ msac_dump(c_res, a_res, &s_c, &s_a, \
+ cdf[0], cdf[1], ns); \
+ } \
+ } \
+ if (cdf_update && ns == n - 1) \
+ bench_new(&s_a, cdf[1], ns); \
+ } \
+ } \
+ } \
+} while (0)
+
+static void check_decode_symbol(MsacDSPContext *const c, uint8_t *const buf) {
+ ALIGN_STK_32(uint16_t, cdf, 2, [16]);
+ MsacContext s_c, s_a;
+
+ declare_func(unsigned, MsacContext *s, uint16_t *cdf, size_t n_symbols);
+ CHECK_SYMBOL_ADAPT( 4, 1, 4);
+ CHECK_SYMBOL_ADAPT( 8, 1, 7);
+ CHECK_SYMBOL_ADAPT(16, 3, 15);
+ report("decode_symbol");
+}
+
+static void check_decode_bool_adapt(MsacDSPContext *const c, uint8_t *const buf) {
+ MsacContext s_c, s_a;
+
+ declare_func(unsigned, MsacContext *s, uint16_t *cdf);
+ if (check_func(c->bool_adapt, "msac_decode_bool_adapt")) {
+ uint16_t cdf[2][2];
+ for (int cdf_update = 0; cdf_update <= 1; cdf_update++) {
+ dav1d_msac_init(&s_c, buf, BUF_SIZE, !cdf_update);
+ s_a = s_c;
+ cdf[0][0] = cdf[1][0] = rnd() % 32767 + 1;
+ cdf[0][1] = cdf[1][1] = 0;
+ for (int i = 0; i < 64; i++) {
+ unsigned c_res = call_ref(&s_c, cdf[0]);
+ unsigned a_res = call_new(&s_a, cdf[1]);
+ if (c_res != a_res || msac_cmp(&s_c, &s_a) ||
+ memcmp(cdf[0], cdf[1], sizeof(*cdf)))
+ {
+ if (fail())
+ msac_dump(c_res, a_res, &s_c, &s_a, cdf[0], cdf[1], 1);
+ }
+ }
+ if (cdf_update)
+ bench_new(&s_a, cdf[1]);
+ }
+ }
+}
+
+static void check_decode_bool_equi(MsacDSPContext *const c, uint8_t *const buf) {
+ MsacContext s_c, s_a;
+
+ declare_func(unsigned, MsacContext *s);
+ if (check_func(c->bool_equi, "msac_decode_bool_equi")) {
+ dav1d_msac_init(&s_c, buf, BUF_SIZE, 1);
+ s_a = s_c;
+ for (int i = 0; i < 64; i++) {
+ unsigned c_res = call_ref(&s_c);
+ unsigned a_res = call_new(&s_a);
+ if (c_res != a_res || msac_cmp(&s_c, &s_a)) {
+ if (fail())
+ msac_dump(c_res, a_res, &s_c, &s_a, NULL, NULL, 0);
+ }
+ }
+ bench_new(&s_a);
+ }
+}
+
+static void check_decode_bool(MsacDSPContext *const c, uint8_t *const buf) {
+ MsacContext s_c, s_a;
+
+ declare_func(unsigned, MsacContext *s, unsigned f);
+ if (check_func(c->bool, "msac_decode_bool")) {
+ dav1d_msac_init(&s_c, buf, BUF_SIZE, 1);
+ s_a = s_c;
+ for (int i = 0; i < 64; i++) {
+ const unsigned f = rnd() & 0x7fff;
+ unsigned c_res = call_ref(&s_c, f);
+ unsigned a_res = call_new(&s_a, f);
+ if (c_res != a_res || msac_cmp(&s_c, &s_a)) {
+ if (fail())
+ msac_dump(c_res, a_res, &s_c, &s_a, NULL, NULL, 0);
+ }
+ }
+ bench_new(&s_a, 16384);
+ }
+
+}
+
+static void check_decode_bool_funcs(MsacDSPContext *const c, uint8_t *const buf) {
+ check_decode_bool_adapt(c, buf);
+ check_decode_bool_equi(c, buf);
+ check_decode_bool(c, buf);
+ report("decode_bool");
+}
+
+static void check_decode_hi_tok(MsacDSPContext *const c, uint8_t *const buf) {
+ ALIGN_STK_16(uint16_t, cdf, 2, [16]);
+ MsacContext s_c, s_a;
+
+ declare_func(unsigned, MsacContext *s, uint16_t *cdf);
+ if (check_func(c->hi_tok, "msac_decode_hi_tok")) {
+ for (int cdf_update = 0; cdf_update <= 1; cdf_update++) {
+ dav1d_msac_init(&s_c, buf, BUF_SIZE, !cdf_update);
+ s_a = s_c;
+ randomize_cdf(cdf[0], 3);
+ memcpy(cdf[1], cdf[0], sizeof(*cdf));
+ for (int i = 0; i < 64; i++) {
+ unsigned c_res = call_ref(&s_c, cdf[0]);
+ unsigned a_res = call_new(&s_a, cdf[1]);
+ if (c_res != a_res || msac_cmp(&s_c, &s_a) ||
+ memcmp(cdf[0], cdf[1], sizeof(*cdf)))
+ {
+ if (fail())
+ msac_dump(c_res, a_res, &s_c, &s_a, cdf[0], cdf[1], 3);
+ break;
+ }
+ }
+ if (cdf_update)
+ bench_new(&s_a, cdf[1]);
+ }
+ }
+ report("decode_hi_tok");
+}
+
+void checkasm_check_msac(void) {
+ MsacDSPContext c;
+ c.symbol_adapt4 = dav1d_msac_decode_symbol_adapt_c;
+ c.symbol_adapt8 = dav1d_msac_decode_symbol_adapt_c;
+ c.symbol_adapt16 = dav1d_msac_decode_symbol_adapt_c;
+ c.bool_adapt = dav1d_msac_decode_bool_adapt_c;
+ c.bool_equi = dav1d_msac_decode_bool_equi_c;
+ c.bool = dav1d_msac_decode_bool_c;
+ c.hi_tok = dav1d_msac_decode_hi_tok_c;
+
+#if (ARCH_AARCH64 || ARCH_ARM) && HAVE_ASM
+ if (dav1d_get_cpu_flags() & DAV1D_ARM_CPU_FLAG_NEON) {
+ c.symbol_adapt4 = dav1d_msac_decode_symbol_adapt4_neon;
+ c.symbol_adapt8 = dav1d_msac_decode_symbol_adapt8_neon;
+ c.symbol_adapt16 = dav1d_msac_decode_symbol_adapt16_neon;
+ c.bool_adapt = dav1d_msac_decode_bool_adapt_neon;
+ c.bool_equi = dav1d_msac_decode_bool_equi_neon;
+ c.bool = dav1d_msac_decode_bool_neon;
+ c.hi_tok = dav1d_msac_decode_hi_tok_neon;
+ }
+#elif ARCH_X86 && HAVE_ASM
+ if (dav1d_get_cpu_flags() & DAV1D_X86_CPU_FLAG_SSE2) {
+ c.symbol_adapt4 = dav1d_msac_decode_symbol_adapt4_sse2;
+ c.symbol_adapt8 = dav1d_msac_decode_symbol_adapt8_sse2;
+ c.symbol_adapt16 = dav1d_msac_decode_symbol_adapt16_sse2;
+ c.bool_adapt = dav1d_msac_decode_bool_adapt_sse2;
+ c.bool_equi = dav1d_msac_decode_bool_equi_sse2;
+ c.bool = dav1d_msac_decode_bool_sse2;
+ c.hi_tok = dav1d_msac_decode_hi_tok_sse2;
+ }
+
+#if ARCH_X86_64
+ if (dav1d_get_cpu_flags() & DAV1D_X86_CPU_FLAG_AVX2) {
+ c.symbol_adapt16 = dav1d_msac_decode_symbol_adapt16_avx2;
+ }
+#endif
+#endif
+
+ uint8_t buf[BUF_SIZE];
+ for (int i = 0; i < BUF_SIZE; i++)
+ buf[i] = rnd();
+
+ check_decode_symbol(&c, buf);
+ check_decode_bool_funcs(&c, buf);
+ check_decode_hi_tok(&c, buf);
+}
diff --git a/third_party/dav1d/tests/checkasm/x86/checkasm.asm b/third_party/dav1d/tests/checkasm/x86/checkasm.asm
new file mode 100644
index 0000000000..5ec7287a88
--- /dev/null
+++ b/third_party/dav1d/tests/checkasm/x86/checkasm.asm
@@ -0,0 +1,287 @@
+; Copyright © 2018, VideoLAN and dav1d authors
+; Copyright © 2018, Two Orioles, LLC
+; All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions are met:
+;
+; 1. Redistributions of source code must retain the above copyright notice, this
+; list of conditions and the following disclaimer.
+;
+; 2. Redistributions in binary form must reproduce the above copyright notice,
+; this list of conditions and the following disclaimer in the documentation
+; and/or other materials provided with the distribution.
+;
+; THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+; ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+; WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+; DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+; ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+; (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+; ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+; SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+%include "config.asm"
+%undef private_prefix
+%define private_prefix checkasm
+%include "ext/x86/x86inc.asm"
+
+SECTION_RODATA 16
+
+%if ARCH_X86_64
+; just random numbers to reduce the chance of incidental match
+%if WIN64
+x6: dq 0x1a1b2550a612b48c,0x79445c159ce79064
+x7: dq 0x2eed899d5a28ddcd,0x86b2536fcd8cf636
+x8: dq 0xb0856806085e7943,0x3f2bf84fc0fcca4e
+x9: dq 0xacbd382dcf5b8de2,0xd229e1f5b281303f
+x10: dq 0x71aeaff20b095fd9,0xab63e2e11fa38ed9
+x11: dq 0x89b0c0765892729a,0x77d410d5c42c882d
+x12: dq 0xc45ea11a955d8dd5,0x24b3c1d2a024048b
+x13: dq 0x2e8ec680de14b47c,0xdd7b8919edd42786
+x14: dq 0x135ce6888fa02cbf,0x11e53e2b2ac655ef
+x15: dq 0x011ff554472a7a10,0x6de8f4c914c334d5
+n7: dq 0x21f86d66c8ca00ce
+n8: dq 0x75b6ba21077c48ad
+%endif
+n9: dq 0xed56bb2dcb3c7736
+n10: dq 0x8bda43d3fd1a7e06
+n11: dq 0xb64a9c9e5d318408
+n12: dq 0xdf9a54b303f1d3a3
+n13: dq 0x4a75479abd64e097
+n14: dq 0x249214109d5d1c88
+%endif
+
+errmsg_reg: db "failed to preserve register", 0
+errmsg_stack: db "stack corruption", 0
+
+SECTION .text
+
+cextern fail_func
+
+; max number of args used by any asm function.
+; (max_args % 4) must equal 3 for stack alignment
+%define max_args 15
+
+%if ARCH_X86_64
+
+;-----------------------------------------------------------------------------
+; int checkasm_stack_clobber(uint64_t clobber, ...)
+;-----------------------------------------------------------------------------
+cglobal stack_clobber, 1, 2
+ ; Clobber the stack with junk below the stack pointer
+ %define argsize (max_args+6)*8
+ SUB rsp, argsize
+ mov r1, argsize-8
+.loop:
+ mov [rsp+r1], r0
+ sub r1, 8
+ jge .loop
+ ADD rsp, argsize
+ RET
+
+%if WIN64
+ %assign free_regs 7
+ %define stack_param rsp+32 ; shadow space
+ %define num_stack_params rsp+stack_offset+22*8
+ DECLARE_REG_TMP 4
+%else
+ %assign free_regs 9
+ %define stack_param rsp
+ %define num_stack_params rsp+stack_offset+16*8
+ DECLARE_REG_TMP 7
+%endif
+
+;-----------------------------------------------------------------------------
+; void checkasm_checked_call(void *func, ...)
+;-----------------------------------------------------------------------------
+INIT_XMM
+cglobal checked_call, 2, 15, 16, max_args*8+64+8
+ mov t0, r0
+
+ ; All arguments have been pushed on the stack instead of registers in
+ ; order to test for incorrect assumptions that 32-bit ints are
+ ; zero-extended to 64-bit.
+ mov r0, r6mp
+ mov r1, r7mp
+ mov r2, r8mp
+ mov r3, r9mp
+%if UNIX64
+ mov r4, r10mp
+ mov r5, r11mp
+%else ; WIN64
+ ; Move possible floating-point arguments to the correct registers
+ movq m0, r0
+ movq m1, r1
+ movq m2, r2
+ movq m3, r3
+
+ %assign i 6
+ %rep 16-6
+ mova m %+ i, [x %+ i]
+ %assign i i+1
+ %endrep
+%endif
+
+ ; write stack canaries to the area above parameters passed on the stack
+ mov r9d, [num_stack_params]
+ mov r8, [rsp+stack_offset] ; return address
+ not r8
+%assign i 0
+%rep 8 ; 64 bytes
+ mov [stack_param+(r9+i)*8], r8
+ %assign i i+1
+%endrep
+ dec r9d
+ jl .stack_setup_done ; no stack parameters
+.copy_stack_parameter:
+ mov r8, [stack_param+stack_offset+7*8+r9*8]
+ mov [stack_param+r9*8], r8
+ dec r9d
+ jge .copy_stack_parameter
+.stack_setup_done:
+
+%assign i 14
+%rep 15-free_regs
+ mov r %+ i, [n %+ i]
+ %assign i i-1
+%endrep
+ call t0
+
+ ; check for failure to preserve registers
+ xor r14, [n14]
+ lea r0, [errmsg_reg]
+%assign i 13
+%rep 14-free_regs
+ xor r %+ i, [n %+ i]
+ or r14, r %+ i
+ %assign i i-1
+%endrep
+%if WIN64
+ pxor m6, [x6]
+ %assign i 7
+ %rep 16-7
+ pxor m %+ i, [x %+ i]
+ por m6, m %+ i
+ %assign i i+1
+ %endrep
+ packsswb m6, m6
+ movq r5, m6
+ or r14, r5
+%endif
+ jnz .fail
+
+ ; check for stack corruption
+ mov r9d, [num_stack_params]
+ mov r8, [rsp+stack_offset]
+ mov r4, [stack_param+r9*8]
+ not r8
+ xor r4, r8
+%assign i 1
+%rep 6
+ mov r5, [stack_param+(r9+i)*8]
+ xor r5, r8
+ or r4, r5
+ %assign i i+1
+%endrep
+ xor r8, [stack_param+(r9+7)*8]
+ or r4, r8
+ jz .ok
+ add r0, errmsg_stack-errmsg_reg
+.fail:
+ ; Call fail_func() with a descriptive message to mark it as a failure.
+ ; Save the return value located in rdx:rax first to prevent clobbering.
+ mov r9, rax
+ mov r10, rdx
+ xor eax, eax
+ call fail_func
+ mov rdx, r10
+ mov rax, r9
+.ok:
+ RET
+
+; trigger a warmup of vector units
+%macro WARMUP 0
+cglobal warmup, 0, 0
+ xorps m0, m0
+ mulps m0, m0
+ RET
+%endmacro
+
+INIT_YMM avx2
+WARMUP
+INIT_ZMM avx512
+WARMUP
+
+%else
+
+; just random numbers to reduce the chance of incidental match
+%assign n3 0x6549315c
+%assign n4 0xe02f3e23
+%assign n5 0xb78d0d1d
+%assign n6 0x33627ba7
+
+;-----------------------------------------------------------------------------
+; void checkasm_checked_call(void *func, ...)
+;-----------------------------------------------------------------------------
+cglobal checked_call, 1, 7
+ mov r3, [esp+stack_offset] ; return address
+ mov r1, [esp+stack_offset+17*4] ; num_stack_params
+ mov r2, 27
+ not r3
+ sub r2, r1
+.push_canary:
+ push r3
+ dec r2
+ jg .push_canary
+.push_parameter:
+ push dword [esp+32*4]
+ dec r1
+ jg .push_parameter
+ mov r3, n3
+ mov r4, n4
+ mov r5, n5
+ mov r6, n6
+ call r0
+
+ ; check for failure to preserve registers
+ xor r3, n3
+ xor r4, n4
+ xor r5, n5
+ xor r6, n6
+ or r3, r4
+ or r5, r6
+ LEA r1, errmsg_reg
+ or r3, r5
+ jnz .fail
+
+ ; check for stack corruption
+ mov r3, [esp+48*4] ; num_stack_params
+ mov r6, [esp+31*4] ; return address
+ mov r4, [esp+r3*4]
+ sub r3, 26
+ not r6
+ xor r4, r6
+.check_canary:
+ mov r5, [esp+(r3+27)*4]
+ xor r5, r6
+ or r4, r5
+ inc r3
+ jl .check_canary
+ test r4, r4
+ jz .ok
+ add r1, errmsg_stack-errmsg_reg
+.fail:
+ mov r3, eax
+ mov r4, edx
+ mov [esp], r1
+ call fail_func
+ mov edx, r4
+ mov eax, r3
+.ok:
+ add esp, 27*4
+ RET
+
+%endif ; ARCH_X86_64
diff --git a/third_party/dav1d/tests/header_test.c b/third_party/dav1d/tests/header_test.c
new file mode 100644
index 0000000000..dfe0dfb431
--- /dev/null
+++ b/third_party/dav1d/tests/header_test.c
@@ -0,0 +1,33 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Two Orioles, LLC
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include DAV1D_TEST_HEADER
+
+int main()
+{
+ return 0;
+}
diff --git a/third_party/dav1d/tests/libfuzzer/alloc_fail.c b/third_party/dav1d/tests/libfuzzer/alloc_fail.c
new file mode 100644
index 0000000000..ddd1dd71ab
--- /dev/null
+++ b/third_party/dav1d/tests/libfuzzer/alloc_fail.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <pthread.h>
+
+#include "alloc_fail.h"
+
+static int fail_probability;
+
+void dav1d_setup_alloc_fail(unsigned seed, unsigned probability) {
+ srand(seed);
+
+ while (probability >= RAND_MAX)
+ probability >>= 1;
+
+ fail_probability = probability;
+}
+
+void * __wrap_malloc(size_t);
+
+void * __wrap_malloc(size_t sz) {
+ if (rand() < fail_probability)
+ return NULL;
+ return malloc(sz);
+}
+
+#if defined(HAVE_POSIX_MEMALIGN)
+int __wrap_posix_memalign(void **memptr, size_t alignment, size_t size);
+
+int __wrap_posix_memalign(void **memptr, size_t alignment, size_t size) {
+ if (rand() < fail_probability)
+ return ENOMEM;
+ return posix_memalign(memptr, alignment, size);
+}
+#else
+#error "HAVE_POSIX_MEMALIGN required"
+#endif
+
+int __wrap_pthread_create(pthread_t *, const pthread_attr_t *,
+ void *(*) (void *), void *);
+
+int __wrap_pthread_create(pthread_t *thread, const pthread_attr_t *attr,
+ void *(*start_routine) (void *), void *arg)
+{
+ if (rand() < (fail_probability + RAND_MAX/16))
+ return EAGAIN;
+
+ return pthread_create(thread, attr, start_routine, arg);
+}
+
+int __wrap_pthread_mutex_init(pthread_mutex_t *,
+ const pthread_mutexattr_t *);
+
+int __wrap_pthread_mutex_init(pthread_mutex_t *restrict mutex,
+ const pthread_mutexattr_t *restrict attr)
+{
+ if (rand() < (fail_probability + RAND_MAX/8))
+ return ENOMEM;
+
+ return pthread_mutex_init(mutex, attr);
+}
+
+int __wrap_pthread_cond_init(pthread_cond_t *,
+ const pthread_condattr_t *);
+
+int __wrap_pthread_cond_init(pthread_cond_t *restrict cond,
+ const pthread_condattr_t *restrict attr)
+{
+ if (rand() < (fail_probability + RAND_MAX/16))
+ return ENOMEM;
+
+ return pthread_cond_init(cond, attr);
+}
diff --git a/third_party/dav1d/tests/libfuzzer/alloc_fail.h b/third_party/dav1d/tests/libfuzzer/alloc_fail.h
new file mode 100644
index 0000000000..5ace870beb
--- /dev/null
+++ b/third_party/dav1d/tests/libfuzzer/alloc_fail.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DAV1D_TESTS_LIBFUZZER_ALLOC_FAIL_H
+#define DAV1D_TESTS_LIBFUZZER_ALLOC_FAIL_H
+
+#include <dav1d/common.h>
+
+DAV1D_API void dav1d_setup_alloc_fail(unsigned seed, unsigned probability);
+
+#endif /* DAV1D_TESTS_LIBFUZZER_ALLOC_FAIL_H */
diff --git a/third_party/dav1d/tests/libfuzzer/dav1d_fuzzer.c b/third_party/dav1d/tests/libfuzzer/dav1d_fuzzer.c
new file mode 100644
index 0000000000..bd040a861a
--- /dev/null
+++ b/third_party/dav1d/tests/libfuzzer/dav1d_fuzzer.c
@@ -0,0 +1,193 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include <errno.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <string.h>
+#include <stdlib.h>
+
+#include <dav1d/dav1d.h>
+#include "src/cpu.h"
+#include "dav1d_fuzzer.h"
+
+#ifdef DAV1D_ALLOC_FAIL
+
+#include "alloc_fail.h"
+
+static unsigned djb_xor(const uint8_t * c, size_t len) {
+ unsigned hash = 5381;
+ for(size_t i = 0; i < len; i++)
+ hash = hash * 33 ^ c[i];
+ return hash;
+}
+#endif
+
+static unsigned r32le(const uint8_t *const p) {
+ return ((uint32_t)p[3] << 24U) | (p[2] << 16U) | (p[1] << 8U) | p[0];
+}
+
+#define DAV1D_FUZZ_MAX_SIZE 4096 * 4096
+
+// search for "--cpumask xxx" in argv and remove both parameters
+int LLVMFuzzerInitialize(int *argc, char ***argv) {
+ int i = 1;
+ for (; i < *argc; i++) {
+ if (!strcmp((*argv)[i], "--cpumask")) {
+ const char * cpumask = (*argv)[i+1];
+ if (cpumask) {
+ char *end;
+ unsigned res;
+ if (!strncmp(cpumask, "0x", 2)) {
+ cpumask += 2;
+ res = (unsigned) strtoul(cpumask, &end, 16);
+ } else {
+ res = (unsigned) strtoul(cpumask, &end, 0);
+ }
+ if (end != cpumask && !end[0]) {
+ dav1d_set_cpu_flags_mask(res);
+ }
+ }
+ break;
+ }
+ }
+
+ for (; i < *argc - 2; i++) {
+ (*argv)[i] = (*argv)[i + 2];
+ }
+
+ *argc = i;
+
+ return 0;
+}
+
+
+// expects ivf input
+
+int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size)
+{
+ Dav1dSettings settings = { 0 };
+ Dav1dContext * ctx = NULL;
+ Dav1dPicture pic;
+ const uint8_t *ptr = data;
+ int have_seq_hdr = 0;
+ int err;
+
+ dav1d_version();
+
+ if (size < 32) goto end;
+#ifdef DAV1D_ALLOC_FAIL
+ unsigned h = djb_xor(ptr, 32);
+ unsigned seed = h;
+ unsigned probability = h > (RAND_MAX >> 5) ? RAND_MAX >> 5 : h;
+ int n_frame_threads = (h & 0xf) + 1;
+ int n_tile_threads = ((h >> 4) & 0x7) + 1;
+ if (n_frame_threads > 5) n_frame_threads = 1;
+ if (n_tile_threads > 3) n_tile_threads = 1;
+#endif
+ ptr += 32; // skip ivf header
+
+ dav1d_default_settings(&settings);
+
+#ifdef DAV1D_MT_FUZZING
+ settings.n_frame_threads = settings.n_tile_threads = 2;
+#elif defined(DAV1D_ALLOC_FAIL)
+ settings.n_frame_threads = n_frame_threads;
+ settings.n_tile_threads = n_tile_threads;
+ dav1d_setup_alloc_fail(seed, probability);
+#else
+ settings.n_frame_threads = settings.n_tile_threads = 1;
+#endif
+#if defined(DAV1D_FUZZ_MAX_SIZE)
+ settings.frame_size_limit = DAV1D_FUZZ_MAX_SIZE;
+#endif
+
+ err = dav1d_open(&ctx, &settings);
+ if (err < 0) goto end;
+
+ while (ptr <= data + size - 12) {
+ Dav1dData buf;
+ uint8_t *p;
+
+ size_t frame_size = r32le(ptr);
+ ptr += 12;
+
+ if (frame_size > size || ptr > data + size - frame_size)
+ break;
+
+ if (!frame_size) continue;
+
+ if (!have_seq_hdr) {
+ Dav1dSequenceHeader seq = { 0 };
+ int err = dav1d_parse_sequence_header(&seq, ptr, frame_size);
+ // skip frames until we see a sequence header
+ if (err != 0) {
+ ptr += frame_size;
+ continue;
+ }
+ have_seq_hdr = 1;
+ }
+
+ // copy frame data to a new buffer to catch reads past the end of input
+ p = dav1d_data_create(&buf, frame_size);
+ if (!p) goto cleanup;
+ memcpy(p, ptr, frame_size);
+ ptr += frame_size;
+
+ do {
+ if ((err = dav1d_send_data(ctx, &buf)) < 0) {
+ if (err != DAV1D_ERR(EAGAIN))
+ break;
+ }
+ memset(&pic, 0, sizeof(pic));
+ err = dav1d_get_picture(ctx, &pic);
+ if (err == 0) {
+ dav1d_picture_unref(&pic);
+ } else if (err != DAV1D_ERR(EAGAIN)) {
+ break;
+ }
+ } while (buf.sz > 0);
+
+ if (buf.sz > 0)
+ dav1d_data_unref(&buf);
+ }
+
+ do {
+ memset(&pic, 0, sizeof(pic));
+ err = dav1d_get_picture(ctx, &pic);
+ if (err == 0)
+ dav1d_picture_unref(&pic);
+ } while (err != DAV1D_ERR(EAGAIN));
+
+cleanup:
+ dav1d_flush(ctx);
+ dav1d_close(&ctx);
+end:
+ return 0;
+}
diff --git a/third_party/dav1d/tests/libfuzzer/dav1d_fuzzer.h b/third_party/dav1d/tests/libfuzzer/dav1d_fuzzer.h
new file mode 100644
index 0000000000..0cbbad46b0
--- /dev/null
+++ b/third_party/dav1d/tests/libfuzzer/dav1d_fuzzer.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DAV1D_TESTS_LIBFUZZER_DAV1D_FUZZER_H
+#define DAV1D_TESTS_LIBFUZZER_DAV1D_FUZZER_H
+
+#include <stddef.h>
+#include <stdint.h>
+
+int LLVMFuzzerInitialize(int *argc, char ***argv);
+int LLVMFuzzerTestOneInput(const uint8_t *data, size_t size);
+
+#endif /* DAV1D_TESTS_LIBFUZZER_DAV1D_FUZZER_H */
diff --git a/third_party/dav1d/tests/libfuzzer/main.c b/third_party/dav1d/tests/libfuzzer/main.c
new file mode 100644
index 0000000000..8647738666
--- /dev/null
+++ b/third_party/dav1d/tests/libfuzzer/main.c
@@ -0,0 +1,100 @@
+/*
+ * Copyright © 2018, VideoLAN and dav1d authors
+ * Copyright © 2018, Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "dav1d_fuzzer.h"
+
+// expects ivf input
+
+int main(int argc, char *argv[]) {
+ int ret = -1;
+ FILE *f = NULL;
+ int64_t fsize;
+ const char *filename = NULL;
+ uint8_t *data = NULL;
+ size_t size = 0;
+
+ if (LLVMFuzzerInitialize(&argc, &argv)) {
+ return 1;
+ }
+
+ if (argc != 2) {
+ fprintf(stdout, "Usage:\n%s fuzzing_testcase.ivf\n", argv[0]);
+ return -1;
+ }
+ filename = argv[1];
+
+ if (!(f = fopen(filename, "rb"))) {
+ fprintf(stderr, "failed to open %s: %s\n", filename, strerror(errno));
+ goto error;
+ }
+
+ if (fseeko(f, 0, SEEK_END) == -1) {
+ fprintf(stderr, "fseek(%s, 0, SEEK_END) failed: %s\n", filename,
+ strerror(errno));
+ goto error;
+ }
+ if ((fsize = ftello(f)) == -1) {
+ fprintf(stderr, "ftell(%s) failed: %s\n", filename, strerror(errno));
+ goto error;
+ }
+ rewind(f);
+
+ if (fsize < 0 || fsize > INT_MAX) {
+ fprintf(stderr, "%s is too large: %"PRId64"\n", filename, fsize);
+ goto error;
+ }
+ size = (size_t)fsize;
+
+ if (!(data = malloc(size))) {
+ fprintf(stderr, "failed to allocate: %zu bytes\n", size);
+ goto error;
+ }
+
+ if (fread(data, size, 1, f) == size) {
+ fprintf(stderr, "failed to read %zu bytes from %s: %s\n", size,
+ filename, strerror(errno));
+ goto error;
+ }
+
+ ret = LLVMFuzzerTestOneInput(data, size);
+
+error:
+ free(data);
+ if (f) fclose(f);
+ return ret;
+}
diff --git a/third_party/dav1d/tests/libfuzzer/meson.build b/third_party/dav1d/tests/libfuzzer/meson.build
new file mode 100644
index 0000000000..bba47dc79f
--- /dev/null
+++ b/third_party/dav1d/tests/libfuzzer/meson.build
@@ -0,0 +1,109 @@
+# Copyright © 2020, VideoLAN and dav1d authors
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# Build definition for the dav1d fuzzing binaries
+#
+
+if fuzzing_engine == 'none' and not have_fseeko
+ subdir_done()
+endif
+
+dav1d_fuzzer_sources = files('dav1d_fuzzer.c')
+fuzzer_ldflags = []
+fuzzer_link_lang = {}
+
+if get_option('fuzzer_ldflags') != ''
+ fuzzer_ldflags += [get_option('fuzzer_ldflags')]
+endif
+
+if fuzzing_engine == 'none'
+ dav1d_fuzzer_sources += files('main.c')
+elif fuzzing_engine == 'libfuzzer'
+ fuzzer_ldflags += ['-fsanitize=fuzzer']
+elif fuzzing_engine == 'oss-fuzz'
+ # libFuzzingEngine needs c++
+ add_languages('cpp')
+ fuzzer_link_lang = {'link_language': 'cpp'}
+endif
+
+dav1d_fuzzer = executable('dav1d_fuzzer',
+ dav1d_fuzzer_sources,
+ include_directories: dav1d_inc_dirs,
+ c_args: [stackalign_flag, stackrealign_flag],
+ link_args: fuzzer_ldflags,
+ link_with : libdav1d,
+ build_by_default: true,
+ dependencies : [thread_dependency],
+ kwargs: fuzzer_link_lang
+ )
+
+dav1d_fuzzer_mt = executable('dav1d_fuzzer_mt',
+ dav1d_fuzzer_sources,
+ include_directories: dav1d_inc_dirs,
+ c_args: [stackalign_flag, stackrealign_flag, '-DDAV1D_MT_FUZZING'],
+ link_args: fuzzer_ldflags,
+ link_with : libdav1d,
+ build_by_default: true,
+ dependencies : [thread_dependency],
+ kwargs: fuzzer_link_lang
+ )
+
+objcopy = find_program('objcopy',
+ required: false)
+
+if meson.version().version_compare('<0.56.99')
+ lto = get_option('b_lto') ? 'true' : 'false'
+else
+ lto = get_option('b_lto')
+endif
+
+if (objcopy.found() and
+ lto == 'false' and
+ get_option('default_library') == 'static' and
+ cc.has_function('posix_memalign', prefix : '#include <stdlib.h>', args : test_args))
+
+ libdav1d_af = custom_target('libdav1d_af',
+ input: libdav1d,
+ output: 'libdav1d_af.a',
+ depends: libdav1d,
+ command: [objcopy,
+ '--redefine-sym', 'malloc=__wrap_malloc',
+ '--redefine-sym', 'posix_memalign=__wrap_posix_memalign',
+ '--redefine-sym', 'pthread_create=__wrap_pthread_create',
+ '--redefine-sym', 'pthread_cond_init=__wrap_pthread_cond_init',
+ '--redefine-sym', 'pthread_mutex_init=__wrap_pthread_mutex_init',
+ '@INPUT@', '@OUTPUT@'])
+
+ dav1d_fuzzer_mem = executable('dav1d_fuzzer_mem',
+ dav1d_fuzzer_sources + ['alloc_fail.c'],
+ include_directories: dav1d_inc_dirs,
+ c_args: [stackalign_flag, stackrealign_flag, '-DDAV1D_ALLOC_FAIL'],
+ link_args: fuzzer_ldflags + [join_paths(libdav1d_af.full_path())],
+ link_depends: libdav1d_af,
+ build_by_default: false,
+ dependencies : [thread_dependency],
+ kwargs: fuzzer_link_lang
+ )
+endif
diff --git a/third_party/dav1d/tests/meson.build b/third_party/dav1d/tests/meson.build
new file mode 100644
index 0000000000..e26358f737
--- /dev/null
+++ b/third_party/dav1d/tests/meson.build
@@ -0,0 +1,132 @@
+# Copyright © 2018, VideoLAN and dav1d authors
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice, this
+# list of conditions and the following disclaimer.
+#
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+#
+# Build definition for the dav1d tests
+#
+
+# Leave subdir if tests are disabled
+if not get_option('enable_tests')
+ subdir_done()
+endif
+
+if is_asm_enabled
+ checkasm_sources = files(
+ 'checkasm/checkasm.c',
+ 'checkasm/msac.c',
+ )
+
+ checkasm_tmpl_sources = files(
+ 'checkasm/cdef.c',
+ 'checkasm/filmgrain.c',
+ 'checkasm/ipred.c',
+ 'checkasm/itx.c',
+ 'checkasm/loopfilter.c',
+ 'checkasm/looprestoration.c',
+ 'checkasm/mc.c',
+ )
+
+ checkasm_bitdepth_objs = []
+ foreach bitdepth : dav1d_bitdepths
+ checkasm_bitdepth_lib = static_library(
+ 'checkasm_bitdepth_@0@'.format(bitdepth),
+ checkasm_tmpl_sources,
+ include_directories: dav1d_inc_dirs,
+ c_args: ['-DBITDEPTH=@0@'.format(bitdepth), stackalign_flag],
+ install: false,
+ build_by_default: false,
+ )
+ checkasm_bitdepth_objs += checkasm_bitdepth_lib.extract_all_objects()
+ endforeach
+
+ checkasm_asm_objs = []
+ checkasm_asm_sources = []
+ if host_machine.cpu_family() == 'aarch64' or host_machine.cpu() == 'arm64'
+ checkasm_asm_sources += files('checkasm/arm/checkasm_64.S')
+ elif host_machine.cpu_family().startswith('arm')
+ checkasm_asm_sources += files('checkasm/arm/checkasm_32.S')
+ elif host_machine.cpu_family().startswith('x86')
+ checkasm_asm_objs += nasm_gen.process(files('checkasm/x86/checkasm.asm'))
+ endif
+
+ if use_gaspp
+ checkasm_asm_objs += gaspp_gen.process(checkasm_asm_sources)
+ else
+ checkasm_sources += checkasm_asm_sources
+ endif
+
+ m_lib = cc.find_library('m', required: false)
+
+ checkasm = executable('checkasm',
+ checkasm_sources,
+ checkasm_asm_objs,
+
+ objects: [
+ checkasm_bitdepth_objs,
+ libdav1d.extract_all_objects(recursive: true),
+ ],
+
+ include_directories: dav1d_inc_dirs,
+ c_args: [stackalign_flag, stackrealign_flag],
+ build_by_default: false,
+ dependencies : [
+ thread_dependency,
+ rt_dependency,
+ libdl_dependency,
+ m_lib,
+ ],
+ )
+
+ test('checkasm', checkasm, is_parallel: false)
+endif
+
+c99_extension_flag = cc.first_supported_argument(
+ '-Werror=c11-extensions',
+ '-Werror=c99-c11-compat',
+ '-Wc11-extensions',
+ '-Wc99-c11-compat',
+)
+
+# dav1d_api_headers
+foreach header : dav1d_api_headers
+ target = header + '_test'
+
+ header_test_exe = executable(target,
+ 'header_test.c',
+ include_directories: dav1d_inc_dirs,
+ c_args: ['-DDAV1D_TEST_HEADER="@0@"'.format(header), c99_extension_flag],
+ build_by_default: true
+ )
+
+ test(target, header_test_exe)
+endforeach
+
+
+# fuzzing binaries
+subdir('libfuzzer')
+
+# Include dav1d test data repository with additional tests
+if get_option('testdata_tests')
+ subdir('dav1d-test-data')
+endif