summaryrefslogtreecommitdiffstats
path: root/third_party/dav1d/src/ppc
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/dav1d/src/ppc/cdef.h61
-rw-r--r--third_party/dav1d/src/ppc/cdef_tmpl.c487
-rw-r--r--third_party/dav1d/src/ppc/cpu.c51
-rw-r--r--third_party/dav1d/src/ppc/cpu.h37
-rw-r--r--third_party/dav1d/src/ppc/dav1d_types.h54
-rw-r--r--third_party/dav1d/src/ppc/looprestoration.h48
-rw-r--r--third_party/dav1d/src/ppc/looprestoration_tmpl.c321
7 files changed, 1059 insertions, 0 deletions
diff --git a/third_party/dav1d/src/ppc/cdef.h b/third_party/dav1d/src/ppc/cdef.h
new file mode 100644
index 0000000000..b794ba53be
--- /dev/null
+++ b/third_party/dav1d/src/ppc/cdef.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright © 2019, Luca Barbato
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdlib.h>
+
+#include "common/bitdepth.h"
+#include "common/intops.h"
+
+#include "src/cdef.h"
+#include "src/cpu.h"
+
+#define cdef_vsx_fn(w, h) \
+void dav1d_cdef_filter_##w##x##h##_vsx(pixel *const dst, \
+ const ptrdiff_t dst_stride, \
+ const pixel (*left)[2], \
+ const pixel *const top, \
+ const pixel *const bottom, \
+ const int pri_strength, \
+ const int sec_strength, \
+ const int dir, \
+ const int damping, \
+ const enum CdefEdgeFlags edges)
+
+cdef_vsx_fn(4, 4);
+cdef_vsx_fn(4, 8);
+cdef_vsx_fn(8, 8);
+
+static ALWAYS_INLINE void cdef_dsp_init_ppc(Dav1dCdefDSPContext *const c) {
+ const unsigned flags = dav1d_get_cpu_flags();
+
+ if (!(flags & DAV1D_PPC_CPU_FLAG_VSX)) return;
+
+#if BITDEPTH == 8
+ c->fb[0] = dav1d_cdef_filter_8x8_vsx;
+ c->fb[1] = dav1d_cdef_filter_4x8_vsx;
+ c->fb[2] = dav1d_cdef_filter_4x4_vsx;
+#endif
+}
diff --git a/third_party/dav1d/src/ppc/cdef_tmpl.c b/third_party/dav1d/src/ppc/cdef_tmpl.c
new file mode 100644
index 0000000000..e2e759810f
--- /dev/null
+++ b/third_party/dav1d/src/ppc/cdef_tmpl.c
@@ -0,0 +1,487 @@
+/*
+ * Copyright © 2019, Luca Barbato
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/ppc/dav1d_types.h"
+#include "src/ppc/cdef.h"
+
+#if BITDEPTH == 8
+static inline i16x8 vconstrain(const i16x8 diff, const int16_t threshold,
+ const int damping)
+{
+ const i16x8 zero = vec_splat_s16(0);
+ if (!threshold) return zero;
+ const uint16_t shift = imax(0, damping - ulog2(threshold));
+ const i16x8 abs_diff = vec_abs(diff);
+ const b16x8 mask = vec_cmplt(diff, zero);
+ const i16x8 thr = vec_splats(threshold);
+ const i16x8 sub = vec_sub(thr, vec_sra(abs_diff, vec_splats(shift)));
+ const i16x8 max = vec_max(zero, sub);
+ const i16x8 min = vec_min(abs_diff, max);
+ const i16x8 neg = vec_sub(zero, min);
+ return vec_sel(min, neg, mask);
+}
+
+static inline void copy4xN(uint16_t *tmp, const ptrdiff_t tmp_stride,
+ const uint8_t *src, const ptrdiff_t src_stride,
+ const uint8_t (*left)[2], const uint8_t *const top,
+ const uint8_t *const bottom, const int w, const int h,
+ const enum CdefEdgeFlags edges)
+{
+ const u16x8 fill = vec_splats((uint16_t)INT16_MAX);
+
+ u16x8 l0;
+ u16x8 l1;
+
+ int y_start = -2, y_end = h + 2;
+
+ // Copy top and bottom first
+ if (!(edges & CDEF_HAVE_TOP)) {
+ l0 = fill;
+ l1 = fill;
+ y_start = 0;
+ } else {
+ l0 = u8h_to_u16(vec_vsx_ld(0, top + 0 * src_stride - 2));
+ l1 = u8h_to_u16(vec_vsx_ld(0, top + 1 * src_stride - 2));
+ }
+
+ vec_st(l0, 0, tmp - 2 * 8);
+ vec_st(l1, 0, tmp - 1 * 8);
+
+ if (!(edges & CDEF_HAVE_BOTTOM)) {
+ l0 = fill;
+ l1 = fill;
+ y_end -= 2;
+ } else {
+ l0 = u8h_to_u16(vec_vsx_ld(0, bottom + 0 * src_stride - 2));
+ l1 = u8h_to_u16(vec_vsx_ld(0, bottom + 1 * src_stride - 2));
+ }
+
+ vec_st(l0, 0, tmp + (h + 0) * 8);
+ vec_st(l1, 0, tmp + (h + 1) * 8);
+
+ int y_with_left_edge = 0;
+ if (!(edges & CDEF_HAVE_LEFT)) {
+ u16x8 l = u8h_to_u16(vec_vsx_ld(0, src));
+ vec_vsx_st(l, 0, tmp + 2);
+
+ y_with_left_edge = 1;
+ }
+
+ for (int y = y_with_left_edge; y < h; y++) {
+ u16x8 l = u8h_to_u16(vec_vsx_ld(0, src - 2 + y * src_stride));
+ vec_st(l, 0, tmp + y * 8);
+ }
+
+ if (!(edges & CDEF_HAVE_LEFT)) {
+ for (int y = y_start; y < y_end; y++) {
+ tmp[y * 8] = INT16_MAX;
+ tmp[1 + y * 8] = INT16_MAX;
+ }
+ } else {
+ for (int y = 0; y < h; y++) {
+ tmp[y * 8] = left[y][0];
+ tmp[1 + y * 8] = left[y][1];
+ }
+ }
+ if (!(edges & CDEF_HAVE_RIGHT)) {
+ for (int y = y_start; y < y_end; y++) {
+ tmp[- 2 + (y + 1) * 8] = INT16_MAX;
+ tmp[- 1 + (y + 1) * 8] = INT16_MAX;
+ }
+ }
+}
+
+static inline void copy8xN(uint16_t *tmp, const ptrdiff_t tmp_stride,
+ const uint8_t *src, const ptrdiff_t src_stride,
+ const uint8_t (*left)[2], const uint8_t *const top,
+ const uint8_t *const bottom, const int w, const int h,
+ const enum CdefEdgeFlags edges)
+{
+ const u16x8 fill = vec_splats((uint16_t)INT16_MAX);
+
+ u16x8 l0h, l0l;
+ u16x8 l1h, l1l;
+
+ int y_start = -2, y_end = h + 2;
+
+ // Copy top and bottom first
+ if (!(edges & CDEF_HAVE_TOP)) {
+ l0h = fill;
+ l0l = fill;
+ l1h = fill;
+ l1l = fill;
+ y_start = 0;
+ } else {
+ u8x16 l0 = vec_vsx_ld(0, top + 0 * src_stride - 2);
+ u8x16 l1 = vec_vsx_ld(0, top + 1 * src_stride - 2);
+ l0h = u8h_to_u16(l0);
+ l0l = u8l_to_u16(l0);
+ l1h = u8h_to_u16(l1);
+ l1l = u8l_to_u16(l1);
+ }
+
+ vec_st(l0h, 0, tmp - 4 * 8);
+ vec_st(l0l, 0, tmp - 3 * 8);
+ vec_st(l1h, 0, tmp - 2 * 8);
+ vec_st(l1l, 0, tmp - 1 * 8);
+
+ if (!(edges & CDEF_HAVE_BOTTOM)) {
+ l0h = fill;
+ l0l = fill;
+ l1h = fill;
+ l1l = fill;
+ y_end -= 2;
+ } else {
+ u8x16 l0 = vec_vsx_ld(0, bottom + 0 * src_stride - 2);
+ u8x16 l1 = vec_vsx_ld(0, bottom + 1 * src_stride - 2);
+ l0h = u8h_to_u16(l0);
+ l0l = u8l_to_u16(l0);
+ l1h = u8h_to_u16(l1);
+ l1l = u8l_to_u16(l1);
+ }
+
+ vec_st(l0h, 0, tmp + (h + 0) * 16);
+ vec_st(l0l, 0, tmp + (h + 0) * 16 + 8);
+ vec_st(l1h, 0, tmp + (h + 1) * 16);
+ vec_st(l1l, 0, tmp + (h + 1) * 16 + 8);
+
+ int y_with_left_edge = 0;
+ if (!(edges & CDEF_HAVE_LEFT)) {
+ u8x16 l = vec_vsx_ld(0, src);
+ u16x8 lh = u8h_to_u16(l);
+ u16x8 ll = u8l_to_u16(l);
+ vec_vsx_st(lh, 0, tmp + 2);
+ vec_vsx_st(ll, 0, tmp + 8 + 2);
+
+ y_with_left_edge = 1;
+ }
+
+ for (int y = y_with_left_edge; y < h; y++) {
+ u8x16 l = vec_vsx_ld(0, src - 2 + y * src_stride);
+ u16x8 lh = u8h_to_u16(l);
+ u16x8 ll = u8l_to_u16(l);
+ vec_st(lh, 0, tmp + y * 16);
+ vec_st(ll, 0, tmp + 8 + y * 16);
+ }
+
+ if (!(edges & CDEF_HAVE_LEFT)) {
+ for (int y = y_start; y < y_end; y++) {
+ tmp[y * 16] = INT16_MAX;
+ tmp[1 + y * 16] = INT16_MAX;
+ }
+ } else {
+ for (int y = 0; y < h; y++) {
+ tmp[y * 16] = left[y][0];
+ tmp[1 + y * 16] = left[y][1];
+ }
+ }
+ if (!(edges & CDEF_HAVE_RIGHT)) {
+ for (int y = y_start; y < y_end; y++) {
+ tmp[- 6 + (y + 1) * 16] = INT16_MAX;
+ tmp[- 5 + (y + 1) * 16] = INT16_MAX;
+ }
+ }
+}
+
+static inline i16x8 max_mask(i16x8 a, i16x8 b) {
+ const i16x8 I16X8_INT16_MAX = vec_splats((int16_t)INT16_MAX);
+
+ const b16x8 mask = vec_cmpeq(a, I16X8_INT16_MAX);
+
+ const i16x8 val = vec_sel(a, b, mask);
+
+ return vec_max(val, b);
+}
+
+#define LOAD_PIX(addr) \
+ const i16x8 px = (i16x8)vec_vsx_ld(0, addr); \
+ i16x8 max = px; \
+ i16x8 min = px; \
+ i16x8 sum = vec_splat_s16(0);
+
+#define LOAD_PIX4(addr) \
+ const i16x8 a = (i16x8)vec_vsx_ld(0, addr); \
+ const i16x8 b = (i16x8)vec_vsx_ld(0, addr + tmp_stride); \
+ const i16x8 px = vec_xxpermdi(a, b, 0); \
+ i16x8 max = px; \
+ i16x8 min = px; \
+ i16x8 sum = vec_splat_s16(0);
+
+#define LOAD_DIR(p, addr, o0, o1) \
+ const i16x8 p ## 0 = (i16x8)vec_vsx_ld(0, addr + o0); \
+ const i16x8 p ## 1 = (i16x8)vec_vsx_ld(0, addr - o0); \
+ const i16x8 p ## 2 = (i16x8)vec_vsx_ld(0, addr + o1); \
+ const i16x8 p ## 3 = (i16x8)vec_vsx_ld(0, addr - o1);
+
+#define LOAD_DIR4(p, addr, o0, o1) \
+ LOAD_DIR(p ## a, addr, o0, o1) \
+ LOAD_DIR(p ## b, addr + tmp_stride, o0, o1) \
+ const i16x8 p ## 0 = vec_xxpermdi(p ## a ## 0, p ## b ## 0, 0); \
+ const i16x8 p ## 1 = vec_xxpermdi(p ## a ## 1, p ## b ## 1, 0); \
+ const i16x8 p ## 2 = vec_xxpermdi(p ## a ## 2, p ## b ## 2, 0); \
+ const i16x8 p ## 3 = vec_xxpermdi(p ## a ## 3, p ## b ## 3, 0);
+
+#define CONSTRAIN(p, strength) \
+ const i16x8 p ## _d0 = vec_sub(p ## 0, px); \
+ const i16x8 p ## _d1 = vec_sub(p ## 1, px); \
+ const i16x8 p ## _d2 = vec_sub(p ## 2, px); \
+ const i16x8 p ## _d3 = vec_sub(p ## 3, px); \
+\
+ i16x8 p ## _c0 = vconstrain(p ## _d0, strength, damping); \
+ i16x8 p ## _c1 = vconstrain(p ## _d1, strength, damping); \
+ i16x8 p ## _c2 = vconstrain(p ## _d2, strength, damping); \
+ i16x8 p ## _c3 = vconstrain(p ## _d3, strength, damping);
+
+#define MIN_MAX(p) \
+ max = max_mask(p ## 0, max); \
+ min = vec_min(p ## 0, min); \
+ max = max_mask(p ## 1, max); \
+ min = vec_min(p ## 1, min); \
+ max = max_mask(p ## 2, max); \
+ min = vec_min(p ## 2, min); \
+ max = max_mask(p ## 3, max); \
+ min = vec_min(p ## 3, min);
+
+#define PRI_0(p) \
+ p ## _c0 = vec_add(vec_sl(p ## _c0, vec_splat_u16(1)), vec_sl(p ## _c0, vec_splats(tap_even))); \
+ p ## _c1 = vec_add(vec_sl(p ## _c1, vec_splat_u16(1)), vec_sl(p ## _c1, vec_splats(tap_even)));
+
+#define PRI_1(p) \
+ p ## _c2 = vec_sub(vec_sl(p ## _c2, vec_splat_u16(2)), vec_sl(p ## _c2, vec_splats(tap_even))); \
+ p ## _c3 = vec_sub(vec_sl(p ## _c3, vec_splat_u16(2)), vec_sl(p ## _c3, vec_splats(tap_even)));
+
+#define SEC_0(p) \
+ p ## _c0 = vec_sl(p ## _c0, vec_splat_u16(1)); \
+ p ## _c1 = vec_sl(p ## _c1, vec_splat_u16(1)); \
+ p ## _c2 = vec_sl(p ## _c2, vec_splat_u16(1)); \
+ p ## _c3 = vec_sl(p ## _c3, vec_splat_u16(1));
+
+#define UPDATE_SUM(p) \
+ const i16x8 p ## sum0 = vec_add(p ## _c0, p ## _c1); \
+ const i16x8 p ## sum1 = vec_add(p ## _c2, p ## _c3); \
+ sum = vec_add(sum, p ## sum0); \
+ sum = vec_add(sum, p ## sum1);
+
+static inline void
+filter_4xN(pixel *dst, const ptrdiff_t dst_stride,
+ const pixel (*left)[2], const pixel *const top,
+ const pixel *const bottom, const int w, const int h,
+ const int pri_strength, const int sec_strength, const int dir,
+ const int damping, const enum CdefEdgeFlags edges,
+ const ptrdiff_t tmp_stride, uint16_t *tmp)
+{
+ const int8_t cdef_directions[8 /* dir */][2 /* pass */] = {
+ { -1 * tmp_stride + 1, -2 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, -1 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, 0 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, 1 * tmp_stride + 2 },
+ { 1 * tmp_stride + 1, 2 * tmp_stride + 2 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride + 1 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride + 0 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride - 1 }
+ };
+ const int bitdepth_min_8 = bitdepth_from_max(bitdepth_max) - 8;
+ const uint16_t tap_even = !((pri_strength >> bitdepth_min_8) & 1);
+ const int off1 = cdef_directions[dir][0];
+ const int off1_1 = cdef_directions[dir][1];
+
+ const int off2 = cdef_directions[(dir + 2) & 7][0];
+ const int off3 = cdef_directions[(dir + 6) & 7][0];
+
+ const int off2_1 = cdef_directions[(dir + 2) & 7][1];
+ const int off3_1 = cdef_directions[(dir + 6) & 7][1];
+
+ copy4xN(tmp - 2, tmp_stride, dst, dst_stride, left, top, bottom, w, h, edges);
+
+ for (int y = 0; y < h / 2; y++) {
+ LOAD_PIX4(tmp)
+
+ // Primary pass
+ LOAD_DIR4(p, tmp, off1, off1_1)
+
+ CONSTRAIN(p, pri_strength)
+
+ MIN_MAX(p)
+
+ PRI_0(p)
+ PRI_1(p)
+
+ UPDATE_SUM(p)
+
+ // Secondary pass 1
+ LOAD_DIR4(s, tmp, off2, off3)
+
+ CONSTRAIN(s, sec_strength)
+
+ MIN_MAX(s)
+
+ SEC_0(s)
+
+ UPDATE_SUM(s)
+
+ // Secondary pass 2
+ LOAD_DIR4(s2, tmp, off2_1, off3_1)
+
+ CONSTRAIN(s2, sec_strength)
+
+ MIN_MAX(s2)
+
+ UPDATE_SUM(s2)
+
+ // Store
+ i16x8 bias = vec_and((i16x8)vec_cmplt(sum, vec_splat_s16(0)), vec_splat_s16(1));
+ bias = vec_sub(vec_splat_s16(8), bias);
+ i16x8 unclamped = vec_add(px, vec_sra(vec_add(sum, bias), vec_splat_u16(4)));
+ i16x8 vdst = vec_max(vec_min(unclamped, max), min);
+
+ dst[0] = vdst[0];
+ dst[1] = vdst[1];
+ dst[2] = vdst[2];
+ dst[3] = vdst[3];
+
+ tmp += tmp_stride;
+ dst += PXSTRIDE(dst_stride);
+ dst[0] = vdst[4];
+ dst[1] = vdst[5];
+ dst[2] = vdst[6];
+ dst[3] = vdst[7];
+
+ tmp += tmp_stride;
+ dst += PXSTRIDE(dst_stride);
+ }
+}
+
+static inline void
+filter_8xN(pixel *dst, const ptrdiff_t dst_stride,
+ const pixel (*left)[2], const pixel *const top,
+ const pixel *const bottom, const int w, const int h,
+ const int pri_strength, const int sec_strength, const int dir,
+ const int damping, const enum CdefEdgeFlags edges,
+ const ptrdiff_t tmp_stride, uint16_t *tmp)
+{
+ const int8_t cdef_directions[8 /* dir */][2 /* pass */] = {
+ { -1 * tmp_stride + 1, -2 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, -1 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, 0 * tmp_stride + 2 },
+ { 0 * tmp_stride + 1, 1 * tmp_stride + 2 },
+ { 1 * tmp_stride + 1, 2 * tmp_stride + 2 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride + 1 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride + 0 },
+ { 1 * tmp_stride + 0, 2 * tmp_stride - 1 }
+ };
+ const int bitdepth_min_8 = bitdepth_from_max(bitdepth_max) - 8;
+
+
+ const uint16_t tap_even = !((pri_strength >> bitdepth_min_8) & 1);
+ const int off1 = cdef_directions[dir][0];
+ const int off1_1 = cdef_directions[dir][1];
+
+ const int off2 = cdef_directions[(dir + 2) & 7][0];
+ const int off3 = cdef_directions[(dir + 6) & 7][0];
+
+ const int off2_1 = cdef_directions[(dir + 2) & 7][1];
+ const int off3_1 = cdef_directions[(dir + 6) & 7][1];
+
+ copy8xN(tmp - 2, tmp_stride, dst, dst_stride, left, top, bottom, w, h, edges);
+
+ for (int y = 0; y < h; y++) {
+ LOAD_PIX(tmp)
+
+ // Primary pass
+ LOAD_DIR(p, tmp, off1, off1_1)
+
+ CONSTRAIN(p, pri_strength)
+
+ MIN_MAX(p)
+
+ PRI_0(p)
+ PRI_1(p)
+
+ UPDATE_SUM(p)
+
+ // Secondary pass 1
+ LOAD_DIR(s, tmp, off2, off3)
+
+ CONSTRAIN(s, sec_strength)
+
+ MIN_MAX(s)
+
+ SEC_0(s)
+
+ UPDATE_SUM(s)
+
+ // Secondary pass 2
+ LOAD_DIR(s2, tmp, off2_1, off3_1)
+
+ CONSTRAIN(s2, sec_strength)
+
+ MIN_MAX(s2)
+
+ UPDATE_SUM(s2)
+
+ // Store
+ i16x8 bias = vec_and((i16x8)vec_cmplt(sum, vec_splat_s16(0)), vec_splat_s16(1));
+ bias = vec_sub(vec_splat_s16(8), bias);
+ i16x8 unclamped = vec_add(px, vec_sra(vec_add(sum, bias), vec_splat_u16(4)));
+ i16x8 vdst = vec_max(vec_min(unclamped, max), min);
+
+ dst[0] = vdst[0];
+ dst[1] = vdst[1];
+ dst[2] = vdst[2];
+ dst[3] = vdst[3];
+ dst[4] = vdst[4];
+ dst[5] = vdst[5];
+ dst[6] = vdst[6];
+ dst[7] = vdst[7];
+
+ tmp += tmp_stride;
+ dst += PXSTRIDE(dst_stride);
+ }
+
+}
+
+#define cdef_fn(w, h, tmp_stride) \
+void dav1d_cdef_filter_##w##x##h##_vsx(pixel *const dst, \
+ const ptrdiff_t dst_stride, \
+ const pixel (*left)[2], \
+ const pixel *const top, \
+ const pixel *const bottom, \
+ const int pri_strength, \
+ const int sec_strength, \
+ const int dir, \
+ const int damping, \
+ const enum CdefEdgeFlags edges) \
+{ \
+ ALIGN_STK_16(uint16_t, tmp_buf, 12 * tmp_stride + 8,); \
+ uint16_t *tmp = tmp_buf + 2 * tmp_stride + 2; \
+ filter_##w##xN(dst, dst_stride, left, top, bottom, w, h, pri_strength, \
+ sec_strength, dir, damping, edges, tmp_stride, tmp); \
+}
+
+cdef_fn(4, 4, 8);
+cdef_fn(4, 8, 8);
+cdef_fn(8, 8, 16);
+#endif
diff --git a/third_party/dav1d/src/ppc/cpu.c b/third_party/dav1d/src/ppc/cpu.c
new file mode 100644
index 0000000000..fe77057c57
--- /dev/null
+++ b/third_party/dav1d/src/ppc/cpu.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright © 2019, VideoLAN and dav1d authors
+ * Copyright © 2019, Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#include "common/attributes.h"
+
+#include "src/ppc/cpu.h"
+
+#if (defined(HAVE_GETAUXVAL) || defined(HAVE_ELF_AUX_INFO)) && ARCH_PPC64LE
+#include <sys/auxv.h>
+#define HAVE_AUX
+#endif
+
+COLD unsigned dav1d_get_cpu_flags_ppc(void) {
+ unsigned flags = 0;
+#if defined(HAVE_GETAUXVAL) && ARCH_PPC64LE
+ unsigned long hw_cap = getauxval(AT_HWCAP);
+#elif defined(HAVE_ELF_AUX_INFO) && ARCH_PPC64LE
+ unsigned long hw_cap = 0;
+ elf_aux_info(AT_HWCAP, &hw_cap, sizeof(hw_cap));
+#endif
+#ifdef HAVE_AUX
+ flags |= (hw_cap & PPC_FEATURE_HAS_VSX) ? DAV1D_PPC_CPU_FLAG_VSX : 0;
+#endif
+ return flags;
+}
diff --git a/third_party/dav1d/src/ppc/cpu.h b/third_party/dav1d/src/ppc/cpu.h
new file mode 100644
index 0000000000..cfd2ff4ff5
--- /dev/null
+++ b/third_party/dav1d/src/ppc/cpu.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright © 2019, VideoLAN and dav1d authors
+ * Copyright © 2019, Janne Grunau
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DAV1D_SRC_PPC_CPU_H
+#define DAV1D_SRC_PPC_CPU_H
+
+enum CpuFlags {
+ DAV1D_PPC_CPU_FLAG_VSX = 1 << 0,
+};
+
+unsigned dav1d_get_cpu_flags_ppc(void);
+
+#endif /* DAV1D_SRC_PPC_CPU_H */
diff --git a/third_party/dav1d/src/ppc/dav1d_types.h b/third_party/dav1d/src/ppc/dav1d_types.h
new file mode 100644
index 0000000000..0b4bd72f0e
--- /dev/null
+++ b/third_party/dav1d/src/ppc/dav1d_types.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright © 2019, VideoLAN and dav1d authors
+ * Copyright © 2019, Luca Barbato
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DAV1D_SRC_PPC_TYPES_H
+#define DAV1D_SRC_PPC_TYPES_H
+
+#include <altivec.h>
+#undef pixel
+
+#define u8x16 vector unsigned char
+#define i8x16 vector signed char
+#define b8x16 vector bool char
+#define u16x8 vector unsigned short
+#define i16x8 vector signed short
+#define b16x8 vector bool short
+#define u32x4 vector unsigned int
+#define i32x4 vector signed int
+#define b32x4 vector bool int
+#define u64x2 vector unsigned long long
+#define i64x2 vector signed long long
+#define b64x2 vector bool long long
+
+#define u8h_to_u16(v) ((u16x8) vec_mergeh((u8x16) v, vec_splat_u8(0)))
+#define u8l_to_u16(v) ((u16x8) vec_mergel((u8x16) v, vec_splat_u8(0)))
+#define u16h_to_i32(v) ((i32x4) vec_mergeh((u16x8) v, vec_splat_u16(0)))
+#define i16h_to_i32(v) ((i32x4) vec_unpackh((i16x8)v))
+#define u16l_to_i32(v) ((i32x4) vec_mergel((u16x8) v, vec_splat_u16(0)))
+#define i16l_to_i32(v) ((i32x4) vec_unpackl((i16x8)v))
+
+#endif /* DAV1D_SRC_PPC_TYPES_H */
diff --git a/third_party/dav1d/src/ppc/looprestoration.h b/third_party/dav1d/src/ppc/looprestoration.h
new file mode 100644
index 0000000000..3fe16318bd
--- /dev/null
+++ b/third_party/dav1d/src/ppc/looprestoration.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright © 2019, VideoLAN and dav1d authors
+ * Copyright © 2019, Michail Alvanos
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "common/intops.h"
+
+#include "src/cpu.h"
+#include "src/looprestoration.h"
+
+void dav1d_wiener_filter_vsx(uint8_t *p, const ptrdiff_t stride,
+ const uint8_t (*const left)[4],
+ const uint8_t *lpf,
+ const int w, const int h,
+ const LooprestorationParams *const params,
+ const enum LrEdgeFlags edges HIGHBD_DECL_SUFFIX);
+
+static ALWAYS_INLINE void loop_restoration_dsp_init_ppc(Dav1dLoopRestorationDSPContext *const c, const int bpc) {
+ const unsigned flags = dav1d_get_cpu_flags();
+
+ if (!(flags & DAV1D_PPC_CPU_FLAG_VSX)) return;
+
+#if BITDEPTH == 8
+ c->wiener[0] = c->wiener[1] = dav1d_wiener_filter_vsx;
+#endif
+}
diff --git a/third_party/dav1d/src/ppc/looprestoration_tmpl.c b/third_party/dav1d/src/ppc/looprestoration_tmpl.c
new file mode 100644
index 0000000000..c0c64e1800
--- /dev/null
+++ b/third_party/dav1d/src/ppc/looprestoration_tmpl.c
@@ -0,0 +1,321 @@
+/*
+ * Copyright © 2019, VideoLAN and dav1d authors
+ * Copyright © 2019, Michail Alvanos
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice, this
+ * list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
+ * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "src/ppc/dav1d_types.h"
+#include "src/ppc/looprestoration.h"
+
+#if BITDEPTH == 8
+
+#define REST_UNIT_STRIDE (400)
+
+static inline i32x4 iclip_vec(i32x4 v, const i32x4 minv, const i32x4 maxv) {
+ v = vec_max(minv, v);
+ v = vec_min(maxv, v);
+ return v;
+}
+
+#define APPLY_FILTER_H(v, f, ssum1, ssum2) do { \
+ i16x8 ktmp_u16_high = (i16x8) u8h_to_u16(v); \
+ i16x8 ktmp_u16_low = (i16x8) u8l_to_u16(v); \
+ ssum1 = vec_madd(ktmp_u16_high, f, ssum1); \
+ ssum2 = vec_madd(ktmp_u16_low, f, ssum2); \
+} while (0)
+
+static void wiener_filter_h_vsx(int32_t *hor_ptr,
+ uint8_t *tmp_ptr,
+ const int16_t filterh[8],
+ const int w, const int h)
+{
+ const i32x4 zerov = vec_splats(0);
+ const i32x4 seven_vec = vec_splats(7);
+ const i32x4 bitdepth_added_vec = vec_splats(1 << 14);
+ const i32x4 round_bits_vec = vec_splats(3);
+ const i32x4 rounding_off_vec = vec_splats(1<<2);
+ const i32x4 clip_limit_v = vec_splats((1 << 13) - 1);
+
+ i16x8 filterhvall = vec_vsx_ld(0, filterh);
+ i16x8 filterhv0 = vec_splat( filterhvall, 0);
+ i16x8 filterhv1 = vec_splat( filterhvall, 1);
+ i16x8 filterhv2 = vec_splat( filterhvall, 2);
+ i16x8 filterhv3 = vec_splat( filterhvall, 3);
+ i16x8 filterhv4 = vec_splat( filterhvall, 4);
+ i16x8 filterhv5 = vec_splat( filterhvall, 5);
+ i16x8 filterhv6 = vec_splat( filterhvall, 6);
+
+ for (int j = 0; j < h + 6; j++) {
+ for (int i = 0; i < w; i+=16) {
+ i32x4 sum1 = bitdepth_added_vec;
+ i32x4 sum2 = bitdepth_added_vec;
+ i32x4 sum3 = bitdepth_added_vec;
+ i32x4 sum4 = bitdepth_added_vec;
+
+ u8x16 tmp_v0 = vec_ld(0, &tmp_ptr[i]);
+ u8x16 tmp_v7 = vec_ld(0, &tmp_ptr[i+16]);
+
+ u8x16 tmp_v1 = vec_sld( tmp_v7, tmp_v0, 15);
+ u8x16 tmp_v2 = vec_sld( tmp_v7, tmp_v0, 14);
+ u8x16 tmp_v3 = vec_sld( tmp_v7, tmp_v0, 13);
+ u8x16 tmp_v4 = vec_sld( tmp_v7, tmp_v0, 12);
+ u8x16 tmp_v5 = vec_sld( tmp_v7, tmp_v0, 11);
+ u8x16 tmp_v6 = vec_sld( tmp_v7, tmp_v0, 10);
+
+ u16x8 tmp_u16_high = u8h_to_u16(tmp_v3);
+ u16x8 tmp_u16_low = u8l_to_u16(tmp_v3);
+
+ i32x4 tmp_expanded1 = i16h_to_i32(tmp_u16_high);
+ i32x4 tmp_expanded2 = i16l_to_i32(tmp_u16_high);
+ i32x4 tmp_expanded3 = i16h_to_i32(tmp_u16_low);
+ i32x4 tmp_expanded4 = i16l_to_i32(tmp_u16_low);
+
+ i16x8 ssum1 = (i16x8) zerov;
+ i16x8 ssum2 = (i16x8) zerov;
+
+ APPLY_FILTER_H(tmp_v0, filterhv0, ssum1, ssum2);
+ APPLY_FILTER_H(tmp_v1, filterhv1, ssum1, ssum2);
+ APPLY_FILTER_H(tmp_v2, filterhv2, ssum1, ssum2);
+ APPLY_FILTER_H(tmp_v3, filterhv3, ssum1, ssum2);
+ APPLY_FILTER_H(tmp_v4, filterhv4, ssum1, ssum2);
+ APPLY_FILTER_H(tmp_v5, filterhv5, ssum1, ssum2);
+ APPLY_FILTER_H(tmp_v6, filterhv6, ssum1, ssum2);
+
+ sum1 += i16h_to_i32(ssum1) + (tmp_expanded1 << seven_vec);
+ sum2 += i16l_to_i32(ssum1) + (tmp_expanded2 << seven_vec);
+ sum3 += i16h_to_i32(ssum2) + (tmp_expanded3 << seven_vec);
+ sum4 += i16l_to_i32(ssum2) + (tmp_expanded4 << seven_vec);
+
+ sum1 = (sum1 + rounding_off_vec) >> round_bits_vec;
+ sum2 = (sum2 + rounding_off_vec) >> round_bits_vec;
+ sum3 = (sum3 + rounding_off_vec) >> round_bits_vec;
+ sum4 = (sum4 + rounding_off_vec) >> round_bits_vec;
+
+ sum1 = iclip_vec(sum1, zerov, clip_limit_v);
+ sum2 = iclip_vec(sum2, zerov, clip_limit_v);
+ sum3 = iclip_vec(sum3, zerov, clip_limit_v);
+ sum4 = iclip_vec(sum4, zerov, clip_limit_v);
+
+ vec_st(sum1, 0, &hor_ptr[i]);
+ vec_st(sum2, 16, &hor_ptr[i]);
+ vec_st(sum3, 32, &hor_ptr[i]);
+ vec_st(sum4, 48, &hor_ptr[i]);
+ }
+ tmp_ptr += REST_UNIT_STRIDE;
+ hor_ptr += REST_UNIT_STRIDE;
+ }
+}
+
+static inline i16x8 iclip_u8_vec(i16x8 v) {
+ const i16x8 zerov = vec_splats((int16_t)0);
+ const i16x8 maxv = vec_splats((int16_t)255);
+ v = vec_max(zerov, v);
+ v = vec_min(maxv, v);
+ return v;
+}
+
+#define APPLY_FILTER_V(index, f) do { \
+ i32x4 v1 = vec_ld( 0, &hor[(j + index) * REST_UNIT_STRIDE + i]); \
+ i32x4 v2 = vec_ld(16, &hor[(j + index) * REST_UNIT_STRIDE + i]); \
+ i32x4 v3 = vec_ld(32, &hor[(j + index) * REST_UNIT_STRIDE + i]); \
+ i32x4 v4 = vec_ld(48, &hor[(j + index) * REST_UNIT_STRIDE + i]); \
+ sum1 = sum1 + v1 * f; \
+ sum2 = sum2 + v2 * f; \
+ sum3 = sum3 + v3 * f; \
+ sum4 = sum4 + v4 * f; \
+} while (0)
+
+#define LOAD_AND_APPLY_FILTER_V(sumpixelv, hor) do { \
+ i32x4 sum1 = round_vec; \
+ i32x4 sum2 = round_vec; \
+ i32x4 sum3 = round_vec; \
+ i32x4 sum4 = round_vec; \
+ APPLY_FILTER_V(0, filterv0); \
+ APPLY_FILTER_V(1, filterv1); \
+ APPLY_FILTER_V(2, filterv2); \
+ APPLY_FILTER_V(3, filterv3); \
+ APPLY_FILTER_V(4, filterv4); \
+ APPLY_FILTER_V(5, filterv5); \
+ APPLY_FILTER_V(6, filterv6); \
+ sum1 = sum1 >> round_bits_vec; \
+ sum2 = sum2 >> round_bits_vec; \
+ sum3 = sum3 >> round_bits_vec; \
+ sum4 = sum4 >> round_bits_vec; \
+ i16x8 sum_short_packed_1 = (i16x8) vec_pack(sum1, sum2); \
+ i16x8 sum_short_packed_2 = (i16x8) vec_pack(sum3, sum4); \
+ sum_short_packed_1 = iclip_u8_vec(sum_short_packed_1); \
+ sum_short_packed_2 = iclip_u8_vec(sum_short_packed_2); \
+ sum_pixel = (u8x16) vec_pack(sum_short_packed_1, sum_short_packed_2); \
+} while (0)
+
+static inline void wiener_filter_v_vsx(uint8_t *p,
+ const ptrdiff_t stride,
+ const int32_t *hor,
+ const int16_t filterv[8],
+ const int w, const int h)
+{
+ const i32x4 round_bits_vec = vec_splats(11);
+ const i32x4 round_vec = vec_splats((1 << 10) - (1 << 18));
+
+ i32x4 filterv0 = vec_splats((int32_t) filterv[0]);
+ i32x4 filterv1 = vec_splats((int32_t) filterv[1]);
+ i32x4 filterv2 = vec_splats((int32_t) filterv[2]);
+ i32x4 filterv3 = vec_splats((int32_t) filterv[3]);
+ i32x4 filterv4 = vec_splats((int32_t) filterv[4]);
+ i32x4 filterv5 = vec_splats((int32_t) filterv[5]);
+ i32x4 filterv6 = vec_splats((int32_t) filterv[6]);
+
+ for (int j = 0; j < h; j++) {
+ for (int i = 0; i <(w-w%16); i += 16) {
+ u8x16 sum_pixel;
+ LOAD_AND_APPLY_FILTER_V(sum_pixel, hor);
+ vec_vsx_st(sum_pixel, 0, &p[j * PXSTRIDE(stride) + i]);
+ }
+ // remaining loop
+ if (w & 0xf){
+ int i=w-w%16;
+ ALIGN_STK_16(uint8_t, tmp_out, 16,);
+ u8x16 sum_pixel;
+
+ LOAD_AND_APPLY_FILTER_V(sum_pixel, hor);
+ vec_vsx_st(sum_pixel, 0, tmp_out);
+
+ for (int k=0; i<w; i++, k++) {
+ p[j * PXSTRIDE(stride) + i] = tmp_out[k];
+ }
+ }
+ }
+}
+
+static inline void padding(uint8_t *dst, const uint8_t *p,
+ const ptrdiff_t stride, const uint8_t (*left)[4],
+ const uint8_t *lpf, int unit_w, const int stripe_h,
+ const enum LrEdgeFlags edges)
+{
+ const int have_left = !!(edges & LR_HAVE_LEFT);
+ const int have_right = !!(edges & LR_HAVE_RIGHT);
+
+ // Copy more pixels if we don't have to pad them
+ unit_w += 3 * have_left + 3 * have_right;
+ uint8_t *dst_l = dst + 3 * !have_left;
+ p -= 3 * have_left;
+ lpf -= 3 * have_left;
+
+ if (edges & LR_HAVE_TOP) {
+ // Copy previous loop filtered rows
+ const uint8_t *const above_1 = lpf;
+ const uint8_t *const above_2 = above_1 + PXSTRIDE(stride);
+ pixel_copy(dst_l, above_1, unit_w);
+ pixel_copy(dst_l + REST_UNIT_STRIDE, above_1, unit_w);
+ pixel_copy(dst_l + 2 * REST_UNIT_STRIDE, above_2, unit_w);
+ } else {
+ // Pad with first row
+ pixel_copy(dst_l, p, unit_w);
+ pixel_copy(dst_l + REST_UNIT_STRIDE, p, unit_w);
+ pixel_copy(dst_l + 2 * REST_UNIT_STRIDE, p, unit_w);
+ if (have_left) {
+ pixel_copy(dst_l, &left[0][1], 3);
+ pixel_copy(dst_l + REST_UNIT_STRIDE, &left[0][1], 3);
+ pixel_copy(dst_l + 2 * REST_UNIT_STRIDE, &left[0][1], 3);
+ }
+ }
+
+ uint8_t *dst_tl = dst_l + 3 * REST_UNIT_STRIDE;
+ if (edges & LR_HAVE_BOTTOM) {
+ // Copy next loop filtered rows
+ const uint8_t *const below_1 = lpf + 6 * PXSTRIDE(stride);
+ const uint8_t *const below_2 = below_1 + PXSTRIDE(stride);
+ pixel_copy(dst_tl + stripe_h * REST_UNIT_STRIDE, below_1, unit_w);
+ pixel_copy(dst_tl + (stripe_h + 1) * REST_UNIT_STRIDE, below_2, unit_w);
+ pixel_copy(dst_tl + (stripe_h + 2) * REST_UNIT_STRIDE, below_2, unit_w);
+ } else {
+ // Pad with last row
+ const uint8_t *const src = p + (stripe_h - 1) * PXSTRIDE(stride);
+ pixel_copy(dst_tl + stripe_h * REST_UNIT_STRIDE, src, unit_w);
+ pixel_copy(dst_tl + (stripe_h + 1) * REST_UNIT_STRIDE, src, unit_w);
+ pixel_copy(dst_tl + (stripe_h + 2) * REST_UNIT_STRIDE, src, unit_w);
+ if (have_left) {
+ pixel_copy(dst_tl + stripe_h * REST_UNIT_STRIDE, &left[stripe_h - 1][1], 3);
+ pixel_copy(dst_tl + (stripe_h + 1) * REST_UNIT_STRIDE, &left[stripe_h - 1][1], 3);
+ pixel_copy(dst_tl + (stripe_h + 2) * REST_UNIT_STRIDE, &left[stripe_h - 1][1], 3);
+ }
+ }
+
+ // Inner UNIT_WxSTRIPE_H
+ for (int j = 0; j < stripe_h; j++) {
+ pixel_copy(dst_tl + 3 * have_left, p + 3 * have_left, unit_w - 3 * have_left);
+ dst_tl += REST_UNIT_STRIDE;
+ p += PXSTRIDE(stride);
+ }
+
+ if (!have_right) {
+ uint8_t *pad = dst_l + unit_w;
+ uint8_t *row_last = &dst_l[unit_w - 1];
+ // Pad 3x(STRIPE_H+6) with last column
+ for (int j = 0; j < stripe_h + 6; j++) {
+ pixel_set(pad, *row_last, 3);
+ pad += REST_UNIT_STRIDE;
+ row_last += REST_UNIT_STRIDE;
+ }
+ }
+
+ if (!have_left) {
+ // Pad 3x(STRIPE_H+6) with first column
+ for (int j = 0; j < stripe_h + 6; j++) {
+ pixel_set(dst, *dst_l, 3);
+ dst += REST_UNIT_STRIDE;
+ dst_l += REST_UNIT_STRIDE;
+ }
+ } else {
+ dst += 3 * REST_UNIT_STRIDE;
+ for (int j = 0; j < stripe_h; j++) {
+ pixel_copy(dst, &left[j][1], 3);
+ dst += REST_UNIT_STRIDE;
+ }
+ }
+}
+
+// FIXME Could split into luma and chroma specific functions,
+// (since first and last tops are always 0 for chroma)
+// FIXME Could implement a version that requires less temporary memory
+// (should be possible to implement with only 6 rows of temp storage)
+void dav1d_wiener_filter_vsx(uint8_t *p, const ptrdiff_t stride,
+ const uint8_t (*const left)[4],
+ const uint8_t *lpf,
+ const int w, const int h,
+ const LooprestorationParams *const params,
+ const enum LrEdgeFlags edges HIGHBD_DECL_SUFFIX)
+{
+ const int16_t (*const filter)[8] = params->filter;
+
+ // Wiener filtering is applied to a maximum stripe height of 64 + 3 pixels
+ // of padding above and below
+ ALIGN_STK_16(uint8_t, tmp, 70 /*(64 + 3 + 3)*/ * REST_UNIT_STRIDE,);
+ padding(tmp, p, stride, left, lpf, w, h, edges);
+ ALIGN_STK_16(int32_t, hor, 70 /*(64 + 3 + 3)*/ * REST_UNIT_STRIDE + 64,);
+
+ wiener_filter_h_vsx(hor, tmp, filter[0], w, h);
+ wiener_filter_v_vsx(p, stride, hor, filter[1], w, h);
+}
+#endif