summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp/arm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 19:33:14 +0000
commit36d22d82aa202bb199967e9512281e9a53db42c9 (patch)
tree105e8c98ddea1c1e4784a60a5a6410fa416be2de /third_party/aom/aom_dsp/arm
parentInitial commit. (diff)
downloadfirefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.tar.xz
firefox-esr-36d22d82aa202bb199967e9512281e9a53db42c9.zip
Adding upstream version 115.7.0esr.upstream/115.7.0esrupstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/aom/aom_dsp/arm')
-rw-r--r--third_party/aom/aom_dsp/arm/blend_a64_mask_neon.c451
-rw-r--r--third_party/aom/aom_dsp/arm/fwd_txfm_neon.c222
-rw-r--r--third_party/aom/aom_dsp/arm/intrapred_neon.c590
-rw-r--r--third_party/aom/aom_dsp/arm/loopfilter_neon.c928
-rw-r--r--third_party/aom/aom_dsp/arm/sad4d_neon.c226
-rw-r--r--third_party/aom/aom_dsp/arm/sad_neon.c224
-rw-r--r--third_party/aom/aom_dsp/arm/subpel_variance_neon.c131
-rw-r--r--third_party/aom/aom_dsp/arm/subtract_neon.c81
-rw-r--r--third_party/aom/aom_dsp/arm/variance_neon.c400
9 files changed, 3253 insertions, 0 deletions
diff --git a/third_party/aom/aom_dsp/arm/blend_a64_mask_neon.c b/third_party/aom/aom_dsp/arm/blend_a64_mask_neon.c
new file mode 100644
index 0000000000..e7f08a5fdb
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/blend_a64_mask_neon.c
@@ -0,0 +1,451 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+#include <assert.h>
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/blend.h"
+#include "aom_ports/mem.h"
+#include "av1/common/arm/mem_neon.h"
+#include "config/aom_dsp_rtcd.h"
+
+static INLINE void blend8x1(int16x8_t mask, int16x8_t src_0, int16x8_t src_1,
+ const int16x8_t v_maxval, int16x8_t *res) {
+ int32x4_t im_res_low, im_res_high;
+ const int16x8_t max_minus_mask = vsubq_s16(v_maxval, mask);
+
+ im_res_low = vmull_s16(vget_low_s16(mask), vget_low_s16(src_0));
+ im_res_low =
+ vmlal_s16(im_res_low, vget_low_s16(max_minus_mask), vget_low_s16(src_1));
+
+ im_res_high = vmull_s16(vget_high_s16(mask), vget_high_s16(src_0));
+ im_res_high = vmlal_s16(im_res_high, vget_high_s16(max_minus_mask),
+ vget_high_s16(src_1));
+
+ *res = vcombine_s16(vshrn_n_s32(im_res_low, AOM_BLEND_A64_ROUND_BITS),
+ vshrn_n_s32(im_res_high, AOM_BLEND_A64_ROUND_BITS));
+}
+
+static INLINE void blend_8x4(uint8_t *dst, uint32_t dst_stride,
+ const CONV_BUF_TYPE *src0, uint32_t src0_stride,
+ const CONV_BUF_TYPE *src1, uint32_t src1_stride,
+ int16x8_t mask0, int16x8_t mask1, int16x8_t mask2,
+ int16x8_t mask3, const int16x8_t v_maxval,
+ const uint16x8_t vec_round_offset,
+ const int16x8_t vec_round_bits) {
+ int16x8_t src0_0, src0_1, src0_2, src0_3;
+ int16x8_t src1_0, src1_1, src1_2, src1_3;
+ int16x8_t im_res_0, im_res_1, im_res_2, im_res_3;
+
+ load_s16_8x4((int16_t *)src0, (int32_t)src0_stride, &src0_0, &src0_1, &src0_2,
+ &src0_3);
+ load_s16_8x4((int16_t *)src1, (int32_t)src1_stride, &src1_0, &src1_1, &src1_2,
+ &src1_3);
+
+ blend8x1(mask0, src0_0, src1_0, v_maxval, &im_res_0);
+ blend8x1(mask1, src0_1, src1_1, v_maxval, &im_res_1);
+ blend8x1(mask2, src0_2, src1_2, v_maxval, &im_res_2);
+ blend8x1(mask3, src0_3, src1_3, v_maxval, &im_res_3);
+
+ uint16x8_t im_res1_0 =
+ vqsubq_u16(vreinterpretq_u16_s16(im_res_0), vec_round_offset);
+ uint16x8_t im_res1_1 =
+ vqsubq_u16(vreinterpretq_u16_s16(im_res_1), vec_round_offset);
+ uint16x8_t im_res1_2 =
+ vqsubq_u16(vreinterpretq_u16_s16(im_res_2), vec_round_offset);
+ uint16x8_t im_res1_3 =
+ vqsubq_u16(vreinterpretq_u16_s16(im_res_3), vec_round_offset);
+
+ im_res_0 = vshlq_s16(vreinterpretq_s16_u16(im_res1_0), vec_round_bits);
+ im_res_1 = vshlq_s16(vreinterpretq_s16_u16(im_res1_1), vec_round_bits);
+ im_res_2 = vshlq_s16(vreinterpretq_s16_u16(im_res1_2), vec_round_bits);
+ im_res_3 = vshlq_s16(vreinterpretq_s16_u16(im_res1_3), vec_round_bits);
+
+ vst1_u8((dst + 0 * dst_stride), vqmovun_s16(im_res_0));
+ vst1_u8((dst + 1 * dst_stride), vqmovun_s16(im_res_1));
+ vst1_u8((dst + 2 * dst_stride), vqmovun_s16(im_res_2));
+ vst1_u8((dst + 3 * dst_stride), vqmovun_s16(im_res_3));
+}
+
+static INLINE void blend_4x4(uint8_t *dst, uint32_t dst_stride,
+ const CONV_BUF_TYPE *src0, uint32_t src0_stride,
+ const CONV_BUF_TYPE *src1, uint32_t src1_stride,
+ int16x4_t mask0, int16x4_t mask1, int16x4_t mask2,
+ int16x4_t mask3, const int16x8_t v_maxval,
+ const uint16x8_t vec_round_offset,
+ const int16x8_t vec_round_bits) {
+ int16x8_t src0_0, src0_1;
+ int16x8_t src1_0, src1_1;
+ uint64x2_t tu0 = vdupq_n_u64(0), tu1 = vdupq_n_u64(0), tu2 = vdupq_n_u64(0),
+ tu3 = vdupq_n_u64(0);
+ int16x8_t mask0_1, mask2_3;
+ int16x8_t res0, res1;
+
+ load_unaligned_u16_4x4(src0, src0_stride, &tu0, &tu1);
+ load_unaligned_u16_4x4(src1, src1_stride, &tu2, &tu3);
+
+ src0_0 = vreinterpretq_s16_u64(tu0);
+ src0_1 = vreinterpretq_s16_u64(tu1);
+
+ src1_0 = vreinterpretq_s16_u64(tu2);
+ src1_1 = vreinterpretq_s16_u64(tu3);
+
+ mask0_1 = vcombine_s16(mask0, mask1);
+ mask2_3 = vcombine_s16(mask2, mask3);
+
+ blend8x1(mask0_1, src0_0, src1_0, v_maxval, &res0);
+ blend8x1(mask2_3, src0_1, src1_1, v_maxval, &res1);
+
+ uint16x8_t im_res_0 =
+ vqsubq_u16(vreinterpretq_u16_s16(res0), vec_round_offset);
+ uint16x8_t im_res_1 =
+ vqsubq_u16(vreinterpretq_u16_s16(res1), vec_round_offset);
+
+ src0_0 = vshlq_s16(vreinterpretq_s16_u16(im_res_0), vec_round_bits);
+ src0_1 = vshlq_s16(vreinterpretq_s16_u16(im_res_1), vec_round_bits);
+
+ uint8x8_t res_0 = vqmovun_s16(src0_0);
+ uint8x8_t res_1 = vqmovun_s16(src0_1);
+
+ vst1_lane_u32((uint32_t *)(dst + 0 * dst_stride), vreinterpret_u32_u8(res_0),
+ 0);
+ vst1_lane_u32((uint32_t *)(dst + 1 * dst_stride), vreinterpret_u32_u8(res_0),
+ 1);
+ vst1_lane_u32((uint32_t *)(dst + 2 * dst_stride), vreinterpret_u32_u8(res_1),
+ 0);
+ vst1_lane_u32((uint32_t *)(dst + 3 * dst_stride), vreinterpret_u32_u8(res_1),
+ 1);
+}
+
+void aom_lowbd_blend_a64_d16_mask_neon(
+ uint8_t *dst, uint32_t dst_stride, const CONV_BUF_TYPE *src0,
+ uint32_t src0_stride, const CONV_BUF_TYPE *src1, uint32_t src1_stride,
+ const uint8_t *mask, uint32_t mask_stride, int w, int h, int subw, int subh,
+ ConvolveParams *conv_params) {
+ int i = 0;
+ const int bd = 8;
+ int w_tmp = w;
+ const uint8_t *mask_tmp = mask;
+ const CONV_BUF_TYPE *src0_tmp = src0;
+ const CONV_BUF_TYPE *src1_tmp = src1;
+ uint8_t *dst_tmp = dst;
+
+ const int offset_bits = bd + 2 * FILTER_BITS - conv_params->round_0;
+ const int round_offset = (1 << (offset_bits - conv_params->round_1)) +
+ (1 << (offset_bits - conv_params->round_1 - 1));
+ const int round_bits =
+ 2 * FILTER_BITS - conv_params->round_0 - conv_params->round_1;
+
+ assert(IMPLIES((void *)src0 == dst, src0_stride == dst_stride));
+ assert(IMPLIES((void *)src1 == dst, src1_stride == dst_stride));
+
+ assert(h >= 4);
+ assert(w >= 4);
+ assert(IS_POWER_OF_TWO(h));
+ assert(IS_POWER_OF_TWO(w));
+
+ uint8x8_t s0, s1, s2, s3;
+ uint32x2_t tu0 = vdup_n_u32(0), tu1 = vdup_n_u32(0), tu2 = vdup_n_u32(0),
+ tu3 = vdup_n_u32(0);
+ uint8x16_t t0, t1, t2, t3, t4, t5, t6, t7;
+ int16x8_t mask0, mask1, mask2, mask3;
+ int16x8_t mask4, mask5, mask6, mask7;
+ int32x4_t m0_32, m1_32, m2_32, m3_32;
+ int32x4_t m4_32, m5_32, m6_32, m7_32;
+ uint8x8_t mask0_l, mask1_l, mask2_l, mask3_l;
+ uint8x8_t mask4_l, mask5_l, mask6_l, mask7_l;
+ int16x4_t mask0_low, mask1_low, mask2_low, mask3_low;
+ const uint16x4_t vec_zero = vdup_n_u16(0);
+ const uint16_t offset = round_offset - (1 << (round_bits - 1));
+ const int16x8_t v_maxval = vdupq_n_s16(AOM_BLEND_A64_MAX_ALPHA);
+ const int16x8_t vec_round_bits = vdupq_n_s16(-round_bits);
+ const uint16x8_t vec_offset = vdupq_n_u16(offset);
+
+ if (subw == 0 && subh == 0) {
+ if (w_tmp > 7) {
+ do {
+ w_tmp = w;
+ do {
+ load_u8_8x4(mask_tmp, mask_stride, &s0, &s1, &s2, &s3);
+
+ mask0 = vmovl_s8(vreinterpret_s8_u8(s0));
+ mask1 = vmovl_s8(vreinterpret_s8_u8(s1));
+ mask2 = vmovl_s8(vreinterpret_s8_u8(s2));
+ mask3 = vmovl_s8(vreinterpret_s8_u8(s3));
+
+ blend_8x4(dst_tmp, dst_stride, src0_tmp, src0_stride, src1_tmp,
+ src1_stride, mask0, mask1, mask2, mask3, v_maxval,
+ vec_offset, vec_round_bits);
+
+ w_tmp -= 8;
+ mask_tmp += 8;
+ dst_tmp += 8;
+ src0_tmp += 8;
+ src1_tmp += 8;
+ } while (w_tmp > 7);
+ i += 4;
+ mask_tmp += (4 * mask_stride) - w;
+ dst_tmp += (4 * dst_stride) - w;
+ src0_tmp += (4 * src0_stride) - w;
+ src1_tmp += (4 * src1_stride) - w;
+ } while (i < h);
+ } else {
+ do {
+ load_unaligned_u8_4x4(mask_tmp, mask_stride, &tu0, &tu1);
+
+ mask0 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu0)));
+ mask1 = vreinterpretq_s16_u16(vmovl_u8(vreinterpret_u8_u32(tu1)));
+
+ mask0_low = vget_low_s16(mask0);
+ mask1_low = vget_high_s16(mask0);
+ mask2_low = vget_low_s16(mask1);
+ mask3_low = vget_high_s16(mask1);
+
+ blend_4x4(dst_tmp, dst_stride, src0_tmp, src0_stride, src1_tmp,
+ src1_stride, mask0_low, mask1_low, mask2_low, mask3_low,
+ v_maxval, vec_offset, vec_round_bits);
+
+ i += 4;
+ mask_tmp += (4 * mask_stride);
+ dst_tmp += (4 * dst_stride);
+ src0_tmp += (4 * src0_stride);
+ src1_tmp += (4 * src1_stride);
+ } while (i < h);
+ }
+ } else if (subw == 1 && subh == 1) {
+ if (w_tmp > 7) {
+ do {
+ w_tmp = w;
+ do {
+ load_u8_16x8(mask_tmp, mask_stride, &t0, &t1, &t2, &t3, &t4, &t5, &t6,
+ &t7);
+
+ mask0 =
+ vreinterpretq_s16_u16(vaddl_u8(vget_low_u8(t0), vget_low_u8(t1)));
+ mask1 =
+ vreinterpretq_s16_u16(vaddl_u8(vget_low_u8(t2), vget_low_u8(t3)));
+ mask2 =
+ vreinterpretq_s16_u16(vaddl_u8(vget_low_u8(t4), vget_low_u8(t5)));
+ mask3 =
+ vreinterpretq_s16_u16(vaddl_u8(vget_low_u8(t6), vget_low_u8(t7)));
+
+ mask4 = vreinterpretq_s16_u16(
+ vaddl_u8(vget_high_u8(t0), vget_high_u8(t1)));
+ mask5 = vreinterpretq_s16_u16(
+ vaddl_u8(vget_high_u8(t2), vget_high_u8(t3)));
+ mask6 = vreinterpretq_s16_u16(
+ vaddl_u8(vget_high_u8(t4), vget_high_u8(t5)));
+ mask7 = vreinterpretq_s16_u16(
+ vaddl_u8(vget_high_u8(t6), vget_high_u8(t7)));
+
+ m0_32 = vpaddlq_s16(mask0);
+ m1_32 = vpaddlq_s16(mask1);
+ m2_32 = vpaddlq_s16(mask2);
+ m3_32 = vpaddlq_s16(mask3);
+
+ m4_32 = vpaddlq_s16(mask4);
+ m5_32 = vpaddlq_s16(mask5);
+ m6_32 = vpaddlq_s16(mask6);
+ m7_32 = vpaddlq_s16(mask7);
+
+ mask0 =
+ vcombine_s16(vqrshrn_n_s32(m0_32, 2), vqrshrn_n_s32(m4_32, 2));
+ mask1 =
+ vcombine_s16(vqrshrn_n_s32(m1_32, 2), vqrshrn_n_s32(m5_32, 2));
+ mask2 =
+ vcombine_s16(vqrshrn_n_s32(m2_32, 2), vqrshrn_n_s32(m6_32, 2));
+ mask3 =
+ vcombine_s16(vqrshrn_n_s32(m3_32, 2), vqrshrn_n_s32(m7_32, 2));
+
+ blend_8x4(dst_tmp, dst_stride, src0_tmp, src0_stride, src1_tmp,
+ src1_stride, mask0, mask1, mask2, mask3, v_maxval,
+ vec_offset, vec_round_bits);
+
+ w_tmp -= 8;
+ mask_tmp += 16;
+ dst_tmp += 8;
+ src0_tmp += 8;
+ src1_tmp += 8;
+ } while (w_tmp > 7);
+ i += 4;
+ mask_tmp += (8 * mask_stride) - (2 * w);
+ dst_tmp += (4 * dst_stride) - w;
+ src0_tmp += (4 * src0_stride) - w;
+ src1_tmp += (4 * src1_stride) - w;
+ } while (i < h);
+ } else {
+ do {
+ load_u8_8x8(mask_tmp, mask_stride, &mask0_l, &mask1_l, &mask2_l,
+ &mask3_l, &mask4_l, &mask5_l, &mask6_l, &mask7_l);
+
+ mask0 = vreinterpretq_s16_u16(vaddl_u8(mask0_l, mask1_l));
+ mask1 = vreinterpretq_s16_u16(vaddl_u8(mask2_l, mask3_l));
+ mask2 = vreinterpretq_s16_u16(vaddl_u8(mask4_l, mask5_l));
+ mask3 = vreinterpretq_s16_u16(vaddl_u8(mask6_l, mask7_l));
+
+ m0_32 = vpaddlq_s16(mask0);
+ m1_32 = vpaddlq_s16(mask1);
+ m2_32 = vpaddlq_s16(mask2);
+ m3_32 = vpaddlq_s16(mask3);
+
+ mask0_low = vqrshrn_n_s32(m0_32, 2);
+ mask1_low = vqrshrn_n_s32(m1_32, 2);
+ mask2_low = vqrshrn_n_s32(m2_32, 2);
+ mask3_low = vqrshrn_n_s32(m3_32, 2);
+
+ blend_4x4(dst_tmp, dst_stride, src0_tmp, src0_stride, src1_tmp,
+ src1_stride, mask0_low, mask1_low, mask2_low, mask3_low,
+ v_maxval, vec_offset, vec_round_bits);
+
+ i += 4;
+ mask_tmp += (8 * mask_stride);
+ dst_tmp += (4 * dst_stride);
+ src0_tmp += (4 * src0_stride);
+ src1_tmp += (4 * src1_stride);
+ } while (i < h);
+ }
+ } else if (subw == 1 && subh == 0) {
+ if (w_tmp > 7) {
+ do {
+ w_tmp = w;
+ do {
+ load_u8_16x4(mask_tmp, mask_stride, &t0, &t1, &t2, &t3);
+
+ mask0 = vreinterpretq_s16_u16(vcombine_u16(
+ vpaddl_u8(vget_low_u8(t0)), vpaddl_u8(vget_high_u8(t0))));
+ mask1 = vreinterpretq_s16_u16(vcombine_u16(
+ vpaddl_u8(vget_low_u8(t1)), vpaddl_u8(vget_high_u8(t1))));
+ mask2 = vreinterpretq_s16_u16(vcombine_u16(
+ vpaddl_u8(vget_low_u8(t2)), vpaddl_u8(vget_high_u8(t2))));
+ mask3 = vreinterpretq_s16_u16(vcombine_u16(
+ vpaddl_u8(vget_low_u8(t3)), vpaddl_u8(vget_high_u8(t3))));
+
+ mask0 = vmovl_s8(vqrshrn_n_s16(mask0, 1));
+ mask1 = vmovl_s8(vqrshrn_n_s16(mask1, 1));
+ mask2 = vmovl_s8(vqrshrn_n_s16(mask2, 1));
+ mask3 = vmovl_s8(vqrshrn_n_s16(mask3, 1));
+
+ blend_8x4(dst_tmp, dst_stride, src0_tmp, src0_stride, src1_tmp,
+ src1_stride, mask0, mask1, mask2, mask3, v_maxval,
+ vec_offset, vec_round_bits);
+ w_tmp -= 8;
+ mask_tmp += 16;
+ dst_tmp += 8;
+ src0_tmp += 8;
+ src1_tmp += 8;
+ } while (w_tmp > 7);
+ i += 4;
+ mask_tmp += (4 * mask_stride) - (2 * w);
+ dst_tmp += (4 * dst_stride) - w;
+ src0_tmp += (4 * src0_stride) - w;
+ src1_tmp += (4 * src1_stride) - w;
+ } while (i < h);
+ } else {
+ do {
+ load_u8_8x4(mask_tmp, mask_stride, &mask0_l, &mask1_l, &mask2_l,
+ &mask3_l);
+
+ mask0 =
+ vreinterpretq_s16_u16(vcombine_u16(vpaddl_u8(mask0_l), vec_zero));
+ mask1 =
+ vreinterpretq_s16_u16(vcombine_u16(vpaddl_u8(mask1_l), vec_zero));
+ mask2 =
+ vreinterpretq_s16_u16(vcombine_u16(vpaddl_u8(mask2_l), vec_zero));
+ mask3 =
+ vreinterpretq_s16_u16(vcombine_u16(vpaddl_u8(mask3_l), vec_zero));
+
+ mask0_low = vget_low_s16(vmovl_s8(vqrshrn_n_s16(mask0, 1)));
+ mask1_low = vget_low_s16(vmovl_s8(vqrshrn_n_s16(mask1, 1)));
+ mask2_low = vget_low_s16(vmovl_s8(vqrshrn_n_s16(mask2, 1)));
+ mask3_low = vget_low_s16(vmovl_s8(vqrshrn_n_s16(mask3, 1)));
+
+ blend_4x4(dst_tmp, dst_stride, src0_tmp, src0_stride, src1_tmp,
+ src1_stride, mask0_low, mask1_low, mask2_low, mask3_low,
+ v_maxval, vec_offset, vec_round_bits);
+
+ i += 4;
+ mask_tmp += (4 * mask_stride);
+ dst_tmp += (4 * dst_stride);
+ src0_tmp += (4 * src0_stride);
+ src1_tmp += (4 * src1_stride);
+ } while (i < h);
+ }
+ } else {
+ if (w_tmp > 7) {
+ do {
+ w_tmp = w;
+ do {
+ load_u8_8x8(mask_tmp, mask_stride, &mask0_l, &mask1_l, &mask2_l,
+ &mask3_l, &mask4_l, &mask5_l, &mask6_l, &mask7_l);
+
+ mask0 = vreinterpretq_s16_u16(vaddl_u8(mask0_l, mask1_l));
+ mask1 = vreinterpretq_s16_u16(vaddl_u8(mask2_l, mask3_l));
+ mask2 = vreinterpretq_s16_u16(vaddl_u8(mask4_l, mask5_l));
+ mask3 = vreinterpretq_s16_u16(vaddl_u8(mask6_l, mask7_l));
+
+ mask0 = vmovl_s8(vqrshrn_n_s16(mask0, 1));
+ mask1 = vmovl_s8(vqrshrn_n_s16(mask1, 1));
+ mask2 = vmovl_s8(vqrshrn_n_s16(mask2, 1));
+ mask3 = vmovl_s8(vqrshrn_n_s16(mask3, 1));
+
+ blend_8x4(dst_tmp, dst_stride, src0_tmp, src0_stride, src1_tmp,
+ src1_stride, mask0, mask1, mask2, mask3, v_maxval,
+ vec_offset, vec_round_bits);
+
+ w_tmp -= 8;
+ mask_tmp += 8;
+ dst_tmp += 8;
+ src0_tmp += 8;
+ src1_tmp += 8;
+ } while (w_tmp > 7);
+ i += 4;
+ mask_tmp += (8 * mask_stride) - w;
+ dst_tmp += (4 * dst_stride) - w;
+ src0_tmp += (4 * src0_stride) - w;
+ src1_tmp += (4 * src1_stride) - w;
+ } while (i < h);
+ } else {
+ do {
+ load_unaligned_u8_4x4(mask_tmp, 2 * mask_stride, &tu0, &tu1);
+ load_unaligned_u8_4x4(mask_tmp + mask_stride, 2 * mask_stride, &tu2,
+ &tu3);
+
+ s0 = vreinterpret_u8_u32(tu0);
+ s1 = vreinterpret_u8_u32(tu1);
+ s2 = vreinterpret_u8_u32(tu2);
+ s3 = vreinterpret_u8_u32(tu3);
+
+ mask0 = vreinterpretq_s16_u16(vaddl_u8(s0, s2));
+ mask1 = vreinterpretq_s16_u16(vaddl_u8(s1, s3));
+
+ mask0 = vmovl_s8(vqrshrn_n_s16(mask0, 1));
+ mask1 = vmovl_s8(vqrshrn_n_s16(mask1, 1));
+
+ mask0_low = vget_low_s16(mask0);
+ mask1_low = vget_high_s16(mask0);
+ mask2_low = vget_low_s16(mask1);
+ mask3_low = vget_high_s16(mask1);
+
+ blend_4x4(dst_tmp, dst_stride, src0_tmp, src0_stride, src1_tmp,
+ src1_stride, mask0_low, mask1_low, mask2_low, mask3_low,
+ v_maxval, vec_offset, vec_round_bits);
+
+ i += 4;
+ mask_tmp += (8 * mask_stride);
+ dst_tmp += (4 * dst_stride);
+ src0_tmp += (4 * src0_stride);
+ src1_tmp += (4 * src1_stride);
+ } while (i < h);
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/arm/fwd_txfm_neon.c b/third_party/aom/aom_dsp/arm/fwd_txfm_neon.c
new file mode 100644
index 0000000000..e4300c9920
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/fwd_txfm_neon.c
@@ -0,0 +1,222 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+
+#include "aom_dsp/txfm_common.h"
+
+void aom_fdct8x8_neon(const int16_t *input, int16_t *final_output, int stride) {
+ int i;
+ // stage 1
+ int16x8_t input_0 = vshlq_n_s16(vld1q_s16(&input[0 * stride]), 2);
+ int16x8_t input_1 = vshlq_n_s16(vld1q_s16(&input[1 * stride]), 2);
+ int16x8_t input_2 = vshlq_n_s16(vld1q_s16(&input[2 * stride]), 2);
+ int16x8_t input_3 = vshlq_n_s16(vld1q_s16(&input[3 * stride]), 2);
+ int16x8_t input_4 = vshlq_n_s16(vld1q_s16(&input[4 * stride]), 2);
+ int16x8_t input_5 = vshlq_n_s16(vld1q_s16(&input[5 * stride]), 2);
+ int16x8_t input_6 = vshlq_n_s16(vld1q_s16(&input[6 * stride]), 2);
+ int16x8_t input_7 = vshlq_n_s16(vld1q_s16(&input[7 * stride]), 2);
+ for (i = 0; i < 2; ++i) {
+ int16x8_t out_0, out_1, out_2, out_3, out_4, out_5, out_6, out_7;
+ const int16x8_t v_s0 = vaddq_s16(input_0, input_7);
+ const int16x8_t v_s1 = vaddq_s16(input_1, input_6);
+ const int16x8_t v_s2 = vaddq_s16(input_2, input_5);
+ const int16x8_t v_s3 = vaddq_s16(input_3, input_4);
+ const int16x8_t v_s4 = vsubq_s16(input_3, input_4);
+ const int16x8_t v_s5 = vsubq_s16(input_2, input_5);
+ const int16x8_t v_s6 = vsubq_s16(input_1, input_6);
+ const int16x8_t v_s7 = vsubq_s16(input_0, input_7);
+ // fdct4(step, step);
+ int16x8_t v_x0 = vaddq_s16(v_s0, v_s3);
+ int16x8_t v_x1 = vaddq_s16(v_s1, v_s2);
+ int16x8_t v_x2 = vsubq_s16(v_s1, v_s2);
+ int16x8_t v_x3 = vsubq_s16(v_s0, v_s3);
+ // fdct4(step, step);
+ int32x4_t v_t0_lo = vaddl_s16(vget_low_s16(v_x0), vget_low_s16(v_x1));
+ int32x4_t v_t0_hi = vaddl_s16(vget_high_s16(v_x0), vget_high_s16(v_x1));
+ int32x4_t v_t1_lo = vsubl_s16(vget_low_s16(v_x0), vget_low_s16(v_x1));
+ int32x4_t v_t1_hi = vsubl_s16(vget_high_s16(v_x0), vget_high_s16(v_x1));
+ int32x4_t v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), (int16_t)cospi_24_64);
+ int32x4_t v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), (int16_t)cospi_24_64);
+ int32x4_t v_t3_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_24_64);
+ int32x4_t v_t3_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_24_64);
+ v_t2_lo = vmlal_n_s16(v_t2_lo, vget_low_s16(v_x3), (int16_t)cospi_8_64);
+ v_t2_hi = vmlal_n_s16(v_t2_hi, vget_high_s16(v_x3), (int16_t)cospi_8_64);
+ v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x2), (int16_t)cospi_8_64);
+ v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x2), (int16_t)cospi_8_64);
+ v_t0_lo = vmulq_n_s32(v_t0_lo, (int32_t)cospi_16_64);
+ v_t0_hi = vmulq_n_s32(v_t0_hi, (int32_t)cospi_16_64);
+ v_t1_lo = vmulq_n_s32(v_t1_lo, (int32_t)cospi_16_64);
+ v_t1_hi = vmulq_n_s32(v_t1_hi, (int32_t)cospi_16_64);
+ {
+ const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS);
+ const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS);
+ const int16x4_t c = vrshrn_n_s32(v_t1_lo, DCT_CONST_BITS);
+ const int16x4_t d = vrshrn_n_s32(v_t1_hi, DCT_CONST_BITS);
+ const int16x4_t e = vrshrn_n_s32(v_t2_lo, DCT_CONST_BITS);
+ const int16x4_t f = vrshrn_n_s32(v_t2_hi, DCT_CONST_BITS);
+ const int16x4_t g = vrshrn_n_s32(v_t3_lo, DCT_CONST_BITS);
+ const int16x4_t h = vrshrn_n_s32(v_t3_hi, DCT_CONST_BITS);
+ out_0 = vcombine_s16(a, c); // 00 01 02 03 40 41 42 43
+ out_2 = vcombine_s16(e, g); // 20 21 22 23 60 61 62 63
+ out_4 = vcombine_s16(b, d); // 04 05 06 07 44 45 46 47
+ out_6 = vcombine_s16(f, h); // 24 25 26 27 64 65 66 67
+ }
+ // Stage 2
+ v_x0 = vsubq_s16(v_s6, v_s5);
+ v_x1 = vaddq_s16(v_s6, v_s5);
+ v_t0_lo = vmull_n_s16(vget_low_s16(v_x0), (int16_t)cospi_16_64);
+ v_t0_hi = vmull_n_s16(vget_high_s16(v_x0), (int16_t)cospi_16_64);
+ v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_16_64);
+ v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_16_64);
+ {
+ const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS);
+ const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS);
+ const int16x4_t c = vrshrn_n_s32(v_t1_lo, DCT_CONST_BITS);
+ const int16x4_t d = vrshrn_n_s32(v_t1_hi, DCT_CONST_BITS);
+ const int16x8_t ab = vcombine_s16(a, b);
+ const int16x8_t cd = vcombine_s16(c, d);
+ // Stage 3
+ v_x0 = vaddq_s16(v_s4, ab);
+ v_x1 = vsubq_s16(v_s4, ab);
+ v_x2 = vsubq_s16(v_s7, cd);
+ v_x3 = vaddq_s16(v_s7, cd);
+ }
+ // Stage 4
+ v_t0_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_4_64);
+ v_t0_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_4_64);
+ v_t0_lo = vmlal_n_s16(v_t0_lo, vget_low_s16(v_x0), (int16_t)cospi_28_64);
+ v_t0_hi = vmlal_n_s16(v_t0_hi, vget_high_s16(v_x0), (int16_t)cospi_28_64);
+ v_t1_lo = vmull_n_s16(vget_low_s16(v_x1), (int16_t)cospi_12_64);
+ v_t1_hi = vmull_n_s16(vget_high_s16(v_x1), (int16_t)cospi_12_64);
+ v_t1_lo = vmlal_n_s16(v_t1_lo, vget_low_s16(v_x2), (int16_t)cospi_20_64);
+ v_t1_hi = vmlal_n_s16(v_t1_hi, vget_high_s16(v_x2), (int16_t)cospi_20_64);
+ v_t2_lo = vmull_n_s16(vget_low_s16(v_x2), (int16_t)cospi_12_64);
+ v_t2_hi = vmull_n_s16(vget_high_s16(v_x2), (int16_t)cospi_12_64);
+ v_t2_lo = vmlsl_n_s16(v_t2_lo, vget_low_s16(v_x1), (int16_t)cospi_20_64);
+ v_t2_hi = vmlsl_n_s16(v_t2_hi, vget_high_s16(v_x1), (int16_t)cospi_20_64);
+ v_t3_lo = vmull_n_s16(vget_low_s16(v_x3), (int16_t)cospi_28_64);
+ v_t3_hi = vmull_n_s16(vget_high_s16(v_x3), (int16_t)cospi_28_64);
+ v_t3_lo = vmlsl_n_s16(v_t3_lo, vget_low_s16(v_x0), (int16_t)cospi_4_64);
+ v_t3_hi = vmlsl_n_s16(v_t3_hi, vget_high_s16(v_x0), (int16_t)cospi_4_64);
+ {
+ const int16x4_t a = vrshrn_n_s32(v_t0_lo, DCT_CONST_BITS);
+ const int16x4_t b = vrshrn_n_s32(v_t0_hi, DCT_CONST_BITS);
+ const int16x4_t c = vrshrn_n_s32(v_t1_lo, DCT_CONST_BITS);
+ const int16x4_t d = vrshrn_n_s32(v_t1_hi, DCT_CONST_BITS);
+ const int16x4_t e = vrshrn_n_s32(v_t2_lo, DCT_CONST_BITS);
+ const int16x4_t f = vrshrn_n_s32(v_t2_hi, DCT_CONST_BITS);
+ const int16x4_t g = vrshrn_n_s32(v_t3_lo, DCT_CONST_BITS);
+ const int16x4_t h = vrshrn_n_s32(v_t3_hi, DCT_CONST_BITS);
+ out_1 = vcombine_s16(a, c); // 10 11 12 13 50 51 52 53
+ out_3 = vcombine_s16(e, g); // 30 31 32 33 70 71 72 73
+ out_5 = vcombine_s16(b, d); // 14 15 16 17 54 55 56 57
+ out_7 = vcombine_s16(f, h); // 34 35 36 37 74 75 76 77
+ }
+ // transpose 8x8
+ {
+ // 00 01 02 03 40 41 42 43
+ // 10 11 12 13 50 51 52 53
+ // 20 21 22 23 60 61 62 63
+ // 30 31 32 33 70 71 72 73
+ // 04 05 06 07 44 45 46 47
+ // 14 15 16 17 54 55 56 57
+ // 24 25 26 27 64 65 66 67
+ // 34 35 36 37 74 75 76 77
+ const int32x4x2_t r02_s32 =
+ vtrnq_s32(vreinterpretq_s32_s16(out_0), vreinterpretq_s32_s16(out_2));
+ const int32x4x2_t r13_s32 =
+ vtrnq_s32(vreinterpretq_s32_s16(out_1), vreinterpretq_s32_s16(out_3));
+ const int32x4x2_t r46_s32 =
+ vtrnq_s32(vreinterpretq_s32_s16(out_4), vreinterpretq_s32_s16(out_6));
+ const int32x4x2_t r57_s32 =
+ vtrnq_s32(vreinterpretq_s32_s16(out_5), vreinterpretq_s32_s16(out_7));
+ const int16x8x2_t r01_s16 =
+ vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[0]),
+ vreinterpretq_s16_s32(r13_s32.val[0]));
+ const int16x8x2_t r23_s16 =
+ vtrnq_s16(vreinterpretq_s16_s32(r02_s32.val[1]),
+ vreinterpretq_s16_s32(r13_s32.val[1]));
+ const int16x8x2_t r45_s16 =
+ vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[0]),
+ vreinterpretq_s16_s32(r57_s32.val[0]));
+ const int16x8x2_t r67_s16 =
+ vtrnq_s16(vreinterpretq_s16_s32(r46_s32.val[1]),
+ vreinterpretq_s16_s32(r57_s32.val[1]));
+ input_0 = r01_s16.val[0];
+ input_1 = r01_s16.val[1];
+ input_2 = r23_s16.val[0];
+ input_3 = r23_s16.val[1];
+ input_4 = r45_s16.val[0];
+ input_5 = r45_s16.val[1];
+ input_6 = r67_s16.val[0];
+ input_7 = r67_s16.val[1];
+ // 00 10 20 30 40 50 60 70
+ // 01 11 21 31 41 51 61 71
+ // 02 12 22 32 42 52 62 72
+ // 03 13 23 33 43 53 63 73
+ // 04 14 24 34 44 54 64 74
+ // 05 15 25 35 45 55 65 75
+ // 06 16 26 36 46 56 66 76
+ // 07 17 27 37 47 57 67 77
+ }
+ } // for
+ {
+ // from aom_dct_sse2.c
+ // Post-condition (division by two)
+ // division of two 16 bits signed numbers using shifts
+ // n / 2 = (n - (n >> 15)) >> 1
+ const int16x8_t sign_in0 = vshrq_n_s16(input_0, 15);
+ const int16x8_t sign_in1 = vshrq_n_s16(input_1, 15);
+ const int16x8_t sign_in2 = vshrq_n_s16(input_2, 15);
+ const int16x8_t sign_in3 = vshrq_n_s16(input_3, 15);
+ const int16x8_t sign_in4 = vshrq_n_s16(input_4, 15);
+ const int16x8_t sign_in5 = vshrq_n_s16(input_5, 15);
+ const int16x8_t sign_in6 = vshrq_n_s16(input_6, 15);
+ const int16x8_t sign_in7 = vshrq_n_s16(input_7, 15);
+ input_0 = vhsubq_s16(input_0, sign_in0);
+ input_1 = vhsubq_s16(input_1, sign_in1);
+ input_2 = vhsubq_s16(input_2, sign_in2);
+ input_3 = vhsubq_s16(input_3, sign_in3);
+ input_4 = vhsubq_s16(input_4, sign_in4);
+ input_5 = vhsubq_s16(input_5, sign_in5);
+ input_6 = vhsubq_s16(input_6, sign_in6);
+ input_7 = vhsubq_s16(input_7, sign_in7);
+ // store results
+ vst1q_s16(&final_output[0 * 8], input_0);
+ vst1q_s16(&final_output[1 * 8], input_1);
+ vst1q_s16(&final_output[2 * 8], input_2);
+ vst1q_s16(&final_output[3 * 8], input_3);
+ vst1q_s16(&final_output[4 * 8], input_4);
+ vst1q_s16(&final_output[5 * 8], input_5);
+ vst1q_s16(&final_output[6 * 8], input_6);
+ vst1q_s16(&final_output[7 * 8], input_7);
+ }
+}
+
+void aom_fdct8x8_1_neon(const int16_t *input, int16_t *output, int stride) {
+ int r;
+ int16x8_t sum = vld1q_s16(&input[0]);
+ for (r = 1; r < 8; ++r) {
+ const int16x8_t input_00 = vld1q_s16(&input[r * stride]);
+ sum = vaddq_s16(sum, input_00);
+ }
+ {
+ const int32x4_t a = vpaddlq_s16(sum);
+ const int64x2_t b = vpaddlq_s32(a);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ output[0] = vget_lane_s16(vreinterpret_s16_s32(c), 0);
+ output[1] = 0;
+ }
+}
diff --git a/third_party/aom/aom_dsp/arm/intrapred_neon.c b/third_party/aom/aom_dsp/arm/intrapred_neon.c
new file mode 100644
index 0000000000..c85b1e9100
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/intrapred_neon.c
@@ -0,0 +1,590 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/aom_dsp_rtcd.h"
+
+#include "aom/aom_integer.h"
+
+//------------------------------------------------------------------------------
+// DC 4x4
+
+// 'do_above' and 'do_left' facilitate branch removal when inlined.
+static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride, const uint8_t *above,
+ const uint8_t *left, int do_above, int do_left) {
+ uint16x8_t sum_top;
+ uint16x8_t sum_left;
+ uint8x8_t dc0;
+
+ if (do_above) {
+ const uint8x8_t A = vld1_u8(above); // top row
+ const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top
+ const uint16x4_t p1 = vpadd_u16(p0, p0);
+ sum_top = vcombine_u16(p1, p1);
+ }
+
+ if (do_left) {
+ const uint8x8_t L = vld1_u8(left); // left border
+ const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left
+ const uint16x4_t p1 = vpadd_u16(p0, p0);
+ sum_left = vcombine_u16(p1, p1);
+ }
+
+ if (do_above && do_left) {
+ const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
+ dc0 = vrshrn_n_u16(sum, 3);
+ } else if (do_above) {
+ dc0 = vrshrn_n_u16(sum_top, 2);
+ } else if (do_left) {
+ dc0 = vrshrn_n_u16(sum_left, 2);
+ } else {
+ dc0 = vdup_n_u8(0x80);
+ }
+
+ {
+ const uint8x8_t dc = vdup_lane_u8(dc0, 0);
+ int i;
+ for (i = 0; i < 4; ++i) {
+ vst1_lane_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc), 0);
+ }
+ }
+}
+
+void aom_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ dc_4x4(dst, stride, above, left, 1, 1);
+}
+
+void aom_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)above;
+ dc_4x4(dst, stride, NULL, left, 0, 1);
+}
+
+void aom_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)left;
+ dc_4x4(dst, stride, above, NULL, 1, 0);
+}
+
+void aom_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)above;
+ (void)left;
+ dc_4x4(dst, stride, NULL, NULL, 0, 0);
+}
+
+//------------------------------------------------------------------------------
+// DC 8x8
+
+// 'do_above' and 'do_left' facilitate branch removal when inlined.
+static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, const uint8_t *above,
+ const uint8_t *left, int do_above, int do_left) {
+ uint16x8_t sum_top;
+ uint16x8_t sum_left;
+ uint8x8_t dc0;
+
+ if (do_above) {
+ const uint8x8_t A = vld1_u8(above); // top row
+ const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top
+ const uint16x4_t p1 = vpadd_u16(p0, p0);
+ const uint16x4_t p2 = vpadd_u16(p1, p1);
+ sum_top = vcombine_u16(p2, p2);
+ }
+
+ if (do_left) {
+ const uint8x8_t L = vld1_u8(left); // left border
+ const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left
+ const uint16x4_t p1 = vpadd_u16(p0, p0);
+ const uint16x4_t p2 = vpadd_u16(p1, p1);
+ sum_left = vcombine_u16(p2, p2);
+ }
+
+ if (do_above && do_left) {
+ const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
+ dc0 = vrshrn_n_u16(sum, 4);
+ } else if (do_above) {
+ dc0 = vrshrn_n_u16(sum_top, 3);
+ } else if (do_left) {
+ dc0 = vrshrn_n_u16(sum_left, 3);
+ } else {
+ dc0 = vdup_n_u8(0x80);
+ }
+
+ {
+ const uint8x8_t dc = vdup_lane_u8(dc0, 0);
+ int i;
+ for (i = 0; i < 8; ++i) {
+ vst1_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc));
+ }
+ }
+}
+
+void aom_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ dc_8x8(dst, stride, above, left, 1, 1);
+}
+
+void aom_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)above;
+ dc_8x8(dst, stride, NULL, left, 0, 1);
+}
+
+void aom_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)left;
+ dc_8x8(dst, stride, above, NULL, 1, 0);
+}
+
+void aom_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ (void)above;
+ (void)left;
+ dc_8x8(dst, stride, NULL, NULL, 0, 0);
+}
+
+//------------------------------------------------------------------------------
+// DC 16x16
+
+// 'do_above' and 'do_left' facilitate branch removal when inlined.
+static INLINE void dc_16x16(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left,
+ int do_above, int do_left) {
+ uint16x8_t sum_top;
+ uint16x8_t sum_left;
+ uint8x8_t dc0;
+
+ if (do_above) {
+ const uint8x16_t A = vld1q_u8(above); // top row
+ const uint16x8_t p0 = vpaddlq_u8(A); // cascading summation of the top
+ const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
+ const uint16x4_t p2 = vpadd_u16(p1, p1);
+ const uint16x4_t p3 = vpadd_u16(p2, p2);
+ sum_top = vcombine_u16(p3, p3);
+ }
+
+ if (do_left) {
+ const uint8x16_t L = vld1q_u8(left); // left row
+ const uint16x8_t p0 = vpaddlq_u8(L); // cascading summation of the left
+ const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
+ const uint16x4_t p2 = vpadd_u16(p1, p1);
+ const uint16x4_t p3 = vpadd_u16(p2, p2);
+ sum_left = vcombine_u16(p3, p3);
+ }
+
+ if (do_above && do_left) {
+ const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
+ dc0 = vrshrn_n_u16(sum, 5);
+ } else if (do_above) {
+ dc0 = vrshrn_n_u16(sum_top, 4);
+ } else if (do_left) {
+ dc0 = vrshrn_n_u16(sum_left, 4);
+ } else {
+ dc0 = vdup_n_u8(0x80);
+ }
+
+ {
+ const uint8x16_t dc = vdupq_lane_u8(dc0, 0);
+ int i;
+ for (i = 0; i < 16; ++i) {
+ vst1q_u8(dst + i * stride, dc);
+ }
+ }
+}
+
+void aom_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ dc_16x16(dst, stride, above, left, 1, 1);
+}
+
+void aom_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ (void)above;
+ dc_16x16(dst, stride, NULL, left, 0, 1);
+}
+
+void aom_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ (void)left;
+ dc_16x16(dst, stride, above, NULL, 1, 0);
+}
+
+void aom_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ (void)above;
+ (void)left;
+ dc_16x16(dst, stride, NULL, NULL, 0, 0);
+}
+
+//------------------------------------------------------------------------------
+// DC 32x32
+
+// 'do_above' and 'do_left' facilitate branch removal when inlined.
+static INLINE void dc_32x32(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left,
+ int do_above, int do_left) {
+ uint16x8_t sum_top;
+ uint16x8_t sum_left;
+ uint8x8_t dc0;
+
+ if (do_above) {
+ const uint8x16_t A0 = vld1q_u8(above); // top row
+ const uint8x16_t A1 = vld1q_u8(above + 16);
+ const uint16x8_t p0 = vpaddlq_u8(A0); // cascading summation of the top
+ const uint16x8_t p1 = vpaddlq_u8(A1);
+ const uint16x8_t p2 = vaddq_u16(p0, p1);
+ const uint16x4_t p3 = vadd_u16(vget_low_u16(p2), vget_high_u16(p2));
+ const uint16x4_t p4 = vpadd_u16(p3, p3);
+ const uint16x4_t p5 = vpadd_u16(p4, p4);
+ sum_top = vcombine_u16(p5, p5);
+ }
+
+ if (do_left) {
+ const uint8x16_t L0 = vld1q_u8(left); // left row
+ const uint8x16_t L1 = vld1q_u8(left + 16);
+ const uint16x8_t p0 = vpaddlq_u8(L0); // cascading summation of the left
+ const uint16x8_t p1 = vpaddlq_u8(L1);
+ const uint16x8_t p2 = vaddq_u16(p0, p1);
+ const uint16x4_t p3 = vadd_u16(vget_low_u16(p2), vget_high_u16(p2));
+ const uint16x4_t p4 = vpadd_u16(p3, p3);
+ const uint16x4_t p5 = vpadd_u16(p4, p4);
+ sum_left = vcombine_u16(p5, p5);
+ }
+
+ if (do_above && do_left) {
+ const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
+ dc0 = vrshrn_n_u16(sum, 6);
+ } else if (do_above) {
+ dc0 = vrshrn_n_u16(sum_top, 5);
+ } else if (do_left) {
+ dc0 = vrshrn_n_u16(sum_left, 5);
+ } else {
+ dc0 = vdup_n_u8(0x80);
+ }
+
+ {
+ const uint8x16_t dc = vdupq_lane_u8(dc0, 0);
+ int i;
+ for (i = 0; i < 32; ++i) {
+ vst1q_u8(dst + i * stride, dc);
+ vst1q_u8(dst + i * stride + 16, dc);
+ }
+ }
+}
+
+void aom_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ dc_32x32(dst, stride, above, left, 1, 1);
+}
+
+void aom_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ (void)above;
+ dc_32x32(dst, stride, NULL, left, 0, 1);
+}
+
+void aom_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ (void)left;
+ dc_32x32(dst, stride, above, NULL, 1, 0);
+}
+
+void aom_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above,
+ const uint8_t *left) {
+ (void)above;
+ (void)left;
+ dc_32x32(dst, stride, NULL, NULL, 0, 0);
+}
+
+// -----------------------------------------------------------------------------
+
+void aom_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ const uint8x8_t XABCD_u8 = vld1_u8(above - 1);
+ const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8);
+ const uint64x1_t ____XABC = vshl_n_u64(XABCD, 32);
+ const uint32x2_t zero = vdup_n_u32(0);
+ const uint32x2_t IJKL = vld1_lane_u32((const uint32_t *)left, zero, 0);
+ const uint8x8_t IJKL_u8 = vreinterpret_u8_u32(IJKL);
+ const uint64x1_t LKJI____ = vreinterpret_u64_u8(vrev32_u8(IJKL_u8));
+ const uint64x1_t LKJIXABC = vorr_u64(LKJI____, ____XABC);
+ const uint8x8_t KJIXABC_ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 8));
+ const uint8x8_t JIXABC__ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 16));
+ const uint8_t D = vget_lane_u8(XABCD_u8, 4);
+ const uint8x8_t JIXABCD_ = vset_lane_u8(D, JIXABC__, 6);
+ const uint8x8_t LKJIXABC_u8 = vreinterpret_u8_u64(LKJIXABC);
+ const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8);
+ const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_);
+ const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2);
+ const uint32x2_t r3 = vreinterpret_u32_u8(avg2);
+ const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8));
+ const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16));
+ const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24));
+ vst1_lane_u32((uint32_t *)(dst + 0 * stride), r0, 0);
+ vst1_lane_u32((uint32_t *)(dst + 1 * stride), r1, 0);
+ vst1_lane_u32((uint32_t *)(dst + 2 * stride), r2, 0);
+ vst1_lane_u32((uint32_t *)(dst + 3 * stride), r3, 0);
+}
+
+void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ int i;
+ uint32x2_t d0u32 = vdup_n_u32(0);
+ (void)left;
+
+ d0u32 = vld1_lane_u32((const uint32_t *)above, d0u32, 0);
+ for (i = 0; i < 4; i++, dst += stride)
+ vst1_lane_u32((uint32_t *)dst, d0u32, 0);
+}
+
+void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ int i;
+ uint8x8_t d0u8 = vdup_n_u8(0);
+ (void)left;
+
+ d0u8 = vld1_u8(above);
+ for (i = 0; i < 8; i++, dst += stride) vst1_u8(dst, d0u8);
+}
+
+void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ int i;
+ uint8x16_t q0u8 = vdupq_n_u8(0);
+ (void)left;
+
+ q0u8 = vld1q_u8(above);
+ for (i = 0; i < 16; i++, dst += stride) vst1q_u8(dst, q0u8);
+}
+
+void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ int i;
+ uint8x16_t q0u8 = vdupq_n_u8(0);
+ uint8x16_t q1u8 = vdupq_n_u8(0);
+ (void)left;
+
+ q0u8 = vld1q_u8(above);
+ q1u8 = vld1q_u8(above + 16);
+ for (i = 0; i < 32; i++, dst += stride) {
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q1u8);
+ }
+}
+
+void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ uint8x8_t d0u8 = vdup_n_u8(0);
+ uint32x2_t d1u32 = vdup_n_u32(0);
+ (void)above;
+
+ d1u32 = vld1_lane_u32((const uint32_t *)left, d1u32, 0);
+
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 0);
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 1);
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 2);
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 3);
+ vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
+}
+
+void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ uint8x8_t d0u8 = vdup_n_u8(0);
+ uint64x1_t d1u64 = vdup_n_u64(0);
+ (void)above;
+
+ d1u64 = vld1_u64((const uint64_t *)left);
+
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 0);
+ vst1_u8(dst, d0u8);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 1);
+ vst1_u8(dst, d0u8);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 2);
+ vst1_u8(dst, d0u8);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 3);
+ vst1_u8(dst, d0u8);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 4);
+ vst1_u8(dst, d0u8);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 5);
+ vst1_u8(dst, d0u8);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 6);
+ vst1_u8(dst, d0u8);
+ dst += stride;
+ d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 7);
+ vst1_u8(dst, d0u8);
+}
+
+void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ int j;
+ uint8x8_t d2u8 = vdup_n_u8(0);
+ uint8x16_t q0u8 = vdupq_n_u8(0);
+ uint8x16_t q1u8 = vdupq_n_u8(0);
+ (void)above;
+
+ q1u8 = vld1q_u8(left);
+ d2u8 = vget_low_u8(q1u8);
+ for (j = 0; j < 2; j++, d2u8 = vget_high_u8(q1u8)) {
+ q0u8 = vdupq_lane_u8(d2u8, 0);
+ vst1q_u8(dst, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 1);
+ vst1q_u8(dst, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 2);
+ vst1q_u8(dst, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 3);
+ vst1q_u8(dst, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 4);
+ vst1q_u8(dst, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 5);
+ vst1q_u8(dst, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 6);
+ vst1q_u8(dst, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 7);
+ vst1q_u8(dst, q0u8);
+ dst += stride;
+ }
+}
+
+void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
+ const uint8_t *above, const uint8_t *left) {
+ int j, k;
+ uint8x8_t d2u8 = vdup_n_u8(0);
+ uint8x16_t q0u8 = vdupq_n_u8(0);
+ uint8x16_t q1u8 = vdupq_n_u8(0);
+ (void)above;
+
+ for (k = 0; k < 2; k++, left += 16) {
+ q1u8 = vld1q_u8(left);
+ d2u8 = vget_low_u8(q1u8);
+ for (j = 0; j < 2; j++, d2u8 = vget_high_u8(q1u8)) {
+ q0u8 = vdupq_lane_u8(d2u8, 0);
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 1);
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 2);
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 3);
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 4);
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 5);
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 6);
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q0u8);
+ dst += stride;
+ q0u8 = vdupq_lane_u8(d2u8, 7);
+ vst1q_u8(dst, q0u8);
+ vst1q_u8(dst + 16, q0u8);
+ dst += stride;
+ }
+ }
+}
+
+static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bw,
+ const uint16_t *above,
+ const uint16_t *left) {
+ assert(bw >= 4);
+ assert(IS_POWER_OF_TWO(bw));
+ int expected_dc, sum = 0;
+ const int count = bw * 2;
+ uint32x4_t sum_q = vdupq_n_u32(0);
+ uint32x2_t sum_d;
+ uint16_t *dst_1;
+ if (bw >= 8) {
+ for (int i = 0; i < bw; i += 8) {
+ sum_q = vpadalq_u16(sum_q, vld1q_u16(above));
+ sum_q = vpadalq_u16(sum_q, vld1q_u16(left));
+ above += 8;
+ left += 8;
+ }
+ sum_d = vadd_u32(vget_low_u32(sum_q), vget_high_u32(sum_q));
+ sum = vget_lane_s32(vreinterpret_s32_u64(vpaddl_u32(sum_d)), 0);
+ expected_dc = (sum + (count >> 1)) / count;
+ const uint16x8_t dc = vdupq_n_u16((uint16_t)expected_dc);
+ for (int r = 0; r < bw; r++) {
+ dst_1 = dst;
+ for (int i = 0; i < bw; i += 8) {
+ vst1q_u16(dst_1, dc);
+ dst_1 += 8;
+ }
+ dst += stride;
+ }
+ } else { // 4x4
+ sum_q = vaddl_u16(vld1_u16(above), vld1_u16(left));
+ sum_d = vadd_u32(vget_low_u32(sum_q), vget_high_u32(sum_q));
+ sum = vget_lane_s32(vreinterpret_s32_u64(vpaddl_u32(sum_d)), 0);
+ expected_dc = (sum + (count >> 1)) / count;
+ const uint16x4_t dc = vdup_n_u16((uint16_t)expected_dc);
+ for (int r = 0; r < bw; r++) {
+ vst1_u16(dst, dc);
+ dst += stride;
+ }
+ }
+}
+
+#define intra_pred_highbd_sized_neon(type, width) \
+ void aom_highbd_##type##_predictor_##width##x##width##_neon( \
+ uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \
+ const uint16_t *left, int bd) { \
+ (void)bd; \
+ highbd_##type##_predictor(dst, stride, width, above, left); \
+ }
+
+#define intra_pred_square(type) \
+ intra_pred_highbd_sized_neon(type, 4); \
+ intra_pred_highbd_sized_neon(type, 8); \
+ intra_pred_highbd_sized_neon(type, 16); \
+ intra_pred_highbd_sized_neon(type, 32); \
+ intra_pred_highbd_sized_neon(type, 64);
+
+intra_pred_square(dc);
+#undef intra_pred_square
diff --git a/third_party/aom/aom_dsp/arm/loopfilter_neon.c b/third_party/aom/aom_dsp/arm/loopfilter_neon.c
new file mode 100644
index 0000000000..bdc67626d6
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/loopfilter_neon.c
@@ -0,0 +1,928 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_dsp_rtcd.h"
+#include "config/aom_config.h"
+
+#include "aom/aom_integer.h"
+#include "av1/common/arm/mem_neon.h"
+#include "av1/common/arm/transpose_neon.h"
+
+static INLINE uint8x8_t lpf_mask(uint8x8_t p3q3, uint8x8_t p2q2, uint8x8_t p1q1,
+ uint8x8_t p0q0, const uint8_t blimit,
+ const uint8_t limit) {
+ // Calculate mask values for four samples
+ uint32x2x2_t p0q0_p1q1;
+ uint16x8_t temp_16x8;
+ uint16x4_t temp0_16x4, temp1_16x4;
+ uint8x8_t mask_8x8, temp_8x8;
+ const uint8x8_t limit_8x8 = vdup_n_u8(limit);
+ const uint16x4_t blimit_16x4 = vdup_n_u16((uint16_t)blimit);
+
+ mask_8x8 = vabd_u8(p3q3, p2q2);
+ mask_8x8 = vmax_u8(mask_8x8, vabd_u8(p2q2, p1q1));
+ mask_8x8 = vmax_u8(mask_8x8, vabd_u8(p1q1, p0q0));
+ mask_8x8 = vcle_u8(mask_8x8, limit_8x8);
+
+ temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8)));
+ mask_8x8 = vand_u8(mask_8x8, temp_8x8);
+
+ p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1));
+ temp_8x8 = vabd_u8(vreinterpret_u8_u32(p0q0_p1q1.val[0]),
+ vreinterpret_u8_u32(p0q0_p1q1.val[1]));
+ temp_16x8 = vmovl_u8(temp_8x8);
+ temp0_16x4 = vshl_n_u16(vget_low_u16(temp_16x8), 1);
+ temp1_16x4 = vshr_n_u16(vget_high_u16(temp_16x8), 1);
+ temp0_16x4 = vadd_u16(temp0_16x4, temp1_16x4);
+ temp0_16x4 = vcle_u16(temp0_16x4, blimit_16x4);
+ temp_8x8 = vmovn_u16(vcombine_u16(temp0_16x4, temp0_16x4));
+
+ mask_8x8 = vand_u8(mask_8x8, temp_8x8);
+
+ return mask_8x8;
+}
+
+static INLINE uint8x8_t lpf_mask2(uint8x8_t p1q1, uint8x8_t p0q0,
+ const uint8_t blimit, const uint8_t limit) {
+ uint32x2x2_t p0q0_p1q1;
+ uint16x8_t temp_16x8;
+ uint16x4_t temp0_16x4, temp1_16x4;
+ const uint16x4_t blimit_16x4 = vdup_n_u16(blimit);
+ const uint8x8_t limit_8x8 = vdup_n_u8(limit);
+ uint8x8_t mask_8x8, temp_8x8;
+
+ mask_8x8 = vabd_u8(p1q1, p0q0);
+ mask_8x8 = vcle_u8(mask_8x8, limit_8x8);
+
+ temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8)));
+ mask_8x8 = vand_u8(mask_8x8, temp_8x8);
+
+ p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1));
+ temp_8x8 = vabd_u8(vreinterpret_u8_u32(p0q0_p1q1.val[0]),
+ vreinterpret_u8_u32(p0q0_p1q1.val[1]));
+ temp_16x8 = vmovl_u8(temp_8x8);
+ temp0_16x4 = vshl_n_u16(vget_low_u16(temp_16x8), 1);
+ temp1_16x4 = vshr_n_u16(vget_high_u16(temp_16x8), 1);
+ temp0_16x4 = vadd_u16(temp0_16x4, temp1_16x4);
+ temp0_16x4 = vcle_u16(temp0_16x4, blimit_16x4);
+ temp_8x8 = vmovn_u16(vcombine_u16(temp0_16x4, temp0_16x4));
+
+ mask_8x8 = vand_u8(mask_8x8, temp_8x8);
+
+ return mask_8x8;
+}
+
+static INLINE uint8x8_t lpf_flat_mask4(uint8x8_t p3q3, uint8x8_t p2q2,
+ uint8x8_t p1q1, uint8x8_t p0q0) {
+ const uint8x8_t thresh_8x8 = vdup_n_u8(1); // for bd==8 threshold is always 1
+ uint8x8_t flat_8x8, temp_8x8;
+
+ flat_8x8 = vabd_u8(p1q1, p0q0);
+ flat_8x8 = vmax_u8(flat_8x8, vabd_u8(p2q2, p0q0));
+ flat_8x8 = vmax_u8(flat_8x8, vabd_u8(p3q3, p0q0));
+ flat_8x8 = vcle_u8(flat_8x8, thresh_8x8);
+
+ temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(flat_8x8)));
+ flat_8x8 = vand_u8(flat_8x8, temp_8x8);
+
+ return flat_8x8;
+}
+
+static INLINE uint8x8_t lpf_flat_mask3(uint8x8_t p2q2, uint8x8_t p1q1,
+ uint8x8_t p0q0) {
+ const uint8x8_t thresh_8x8 = vdup_n_u8(1); // for bd==8 threshold is always 1
+ uint8x8_t flat_8x8, temp_8x8;
+
+ flat_8x8 = vabd_u8(p1q1, p0q0);
+ flat_8x8 = vmax_u8(flat_8x8, vabd_u8(p2q2, p0q0));
+ flat_8x8 = vcle_u8(flat_8x8, thresh_8x8);
+
+ temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(flat_8x8)));
+ flat_8x8 = vand_u8(flat_8x8, temp_8x8);
+
+ return flat_8x8;
+}
+
+static INLINE uint8x8_t lpf_mask3_chroma(uint8x8_t p2q2, uint8x8_t p1q1,
+ uint8x8_t p0q0, const uint8_t blimit,
+ const uint8_t limit) {
+ // Calculate mask3 values for four samples
+ uint32x2x2_t p0q0_p1q1;
+ uint16x8_t temp_16x8;
+ uint16x4_t temp0_16x4, temp1_16x4;
+ uint8x8_t mask_8x8, temp_8x8;
+ const uint8x8_t limit_8x8 = vdup_n_u8(limit);
+ const uint16x4_t blimit_16x4 = vdup_n_u16((uint16_t)blimit);
+
+ mask_8x8 = vabd_u8(p2q2, p1q1);
+ mask_8x8 = vmax_u8(mask_8x8, vabd_u8(p1q1, p0q0));
+ mask_8x8 = vcle_u8(mask_8x8, limit_8x8);
+
+ temp_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(mask_8x8)));
+ mask_8x8 = vand_u8(mask_8x8, temp_8x8);
+
+ p0q0_p1q1 = vtrn_u32(vreinterpret_u32_u8(p0q0), vreinterpret_u32_u8(p1q1));
+ temp_8x8 = vabd_u8(vreinterpret_u8_u32(p0q0_p1q1.val[0]),
+ vreinterpret_u8_u32(p0q0_p1q1.val[1]));
+ temp_16x8 = vmovl_u8(temp_8x8);
+ temp0_16x4 = vshl_n_u16(vget_low_u16(temp_16x8), 1);
+ temp1_16x4 = vshr_n_u16(vget_high_u16(temp_16x8), 1);
+ temp0_16x4 = vadd_u16(temp0_16x4, temp1_16x4);
+ temp0_16x4 = vcle_u16(temp0_16x4, blimit_16x4);
+ temp_8x8 = vmovn_u16(vcombine_u16(temp0_16x4, temp0_16x4));
+
+ mask_8x8 = vand_u8(mask_8x8, temp_8x8);
+
+ return mask_8x8;
+}
+
+static void lpf_14_neon(uint8x8_t *p6q6, uint8x8_t *p5q5, uint8x8_t *p4q4,
+ uint8x8_t *p3q3, uint8x8_t *p2q2, uint8x8_t *p1q1,
+ uint8x8_t *p0q0, const uint8_t blimit,
+ const uint8_t limit, const uint8_t thresh) {
+ uint16x8_t out;
+ uint8x8_t out_f14_pq0, out_f14_pq1, out_f14_pq2, out_f14_pq3, out_f14_pq4,
+ out_f14_pq5;
+ uint8x8_t out_f7_pq0, out_f7_pq1, out_f7_pq2;
+ uint8x8_t out_f4_pq0, out_f4_pq1;
+ uint8x8_t mask_8x8, flat_8x8, flat2_8x8;
+ uint8x8_t q0p0, q1p1, q2p2;
+
+ // Calculate filter masks
+ mask_8x8 = lpf_mask(*p3q3, *p2q2, *p1q1, *p0q0, blimit, limit);
+ flat_8x8 = lpf_flat_mask4(*p3q3, *p2q2, *p1q1, *p0q0);
+ flat2_8x8 = lpf_flat_mask4(*p6q6, *p5q5, *p4q4, *p0q0);
+ {
+ // filter 4
+ int32x2x2_t ps0_qs0, ps1_qs1;
+ int16x8_t filter_s16;
+ const uint8x8_t thresh_f4 = vdup_n_u8(thresh);
+ uint8x8_t temp0_8x8, temp1_8x8;
+ int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8;
+ int8x8_t op0, oq0, op1, oq1;
+ int8x8_t pq_s0, pq_s1;
+ int8x8_t filter_s8, filter1_s8, filter2_s8;
+ int8x8_t hev_8x8;
+ const int8x8_t sign_mask = vdup_n_s8(0x80);
+ const int8x8_t val_4 = vdup_n_s8(4);
+ const int8x8_t val_3 = vdup_n_s8(3);
+
+ pq_s0 = veor_s8(vreinterpret_s8_u8(*p0q0), sign_mask);
+ pq_s1 = veor_s8(vreinterpret_s8_u8(*p1q1), sign_mask);
+
+ ps0_qs0 = vtrn_s32(vreinterpret_s32_s8(pq_s0), vreinterpret_s32_s8(pq_s0));
+ ps1_qs1 = vtrn_s32(vreinterpret_s32_s8(pq_s1), vreinterpret_s32_s8(pq_s1));
+ ps0_s8 = vreinterpret_s8_s32(ps0_qs0.val[0]);
+ qs0_s8 = vreinterpret_s8_s32(ps0_qs0.val[1]);
+ ps1_s8 = vreinterpret_s8_s32(ps1_qs1.val[0]);
+ qs1_s8 = vreinterpret_s8_s32(ps1_qs1.val[1]);
+
+ // hev_mask
+ temp0_8x8 = vcgt_u8(vabd_u8(*p0q0, *p1q1), thresh_f4);
+ temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8)));
+ hev_8x8 = vreinterpret_s8_u8(vorr_u8(temp0_8x8, temp1_8x8));
+
+ // add outer taps if we have high edge variance
+ filter_s8 = vqsub_s8(ps1_s8, qs1_s8);
+ filter_s8 = vand_s8(filter_s8, hev_8x8);
+
+ // inner taps
+ temp_s8 = vqsub_s8(qs0_s8, ps0_s8);
+ filter_s16 = vmovl_s8(filter_s8);
+ filter_s16 = vmlal_s8(filter_s16, temp_s8, val_3);
+ filter_s8 = vqmovn_s16(filter_s16);
+ filter_s8 = vand_s8(filter_s8, vreinterpret_s8_u8(mask_8x8));
+
+ filter1_s8 = vqadd_s8(filter_s8, val_4);
+ filter2_s8 = vqadd_s8(filter_s8, val_3);
+ filter1_s8 = vshr_n_s8(filter1_s8, 3);
+ filter2_s8 = vshr_n_s8(filter2_s8, 3);
+
+ oq0 = veor_s8(vqsub_s8(qs0_s8, filter1_s8), sign_mask);
+ op0 = veor_s8(vqadd_s8(ps0_s8, filter2_s8), sign_mask);
+
+ hev_8x8 = vmvn_s8(hev_8x8);
+ filter_s8 = vrshr_n_s8(filter1_s8, 1);
+ filter_s8 = vand_s8(filter_s8, hev_8x8);
+
+ oq1 = veor_s8(vqsub_s8(qs1_s8, filter_s8), sign_mask);
+ op1 = veor_s8(vqadd_s8(ps1_s8, filter_s8), sign_mask);
+
+ out_f4_pq0 = vreinterpret_u8_s8(vext_s8(op0, oq0, 4));
+ out_f4_pq1 = vreinterpret_u8_s8(vext_s8(op1, oq1, 4));
+ }
+ // reverse p and q
+ q0p0 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p0q0)));
+ q1p1 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p1q1)));
+ q2p2 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p2q2)));
+ {
+ // filter 8
+ uint16x8_t out_pq0, out_pq1, out_pq2;
+ out = vaddl_u8(*p3q3, *p2q2);
+ out = vaddw_u8(out, *p1q1);
+ out = vaddw_u8(out, *p0q0);
+
+ out = vaddw_u8(out, q0p0);
+ out_pq1 = vaddw_u8(out, *p3q3);
+ out_pq2 = vaddw_u8(out_pq1, *p3q3);
+ out_pq2 = vaddw_u8(out_pq2, *p2q2);
+ out_pq1 = vaddw_u8(out_pq1, *p1q1);
+ out_pq1 = vaddw_u8(out_pq1, q1p1);
+
+ out_pq0 = vaddw_u8(out, *p0q0);
+ out_pq0 = vaddw_u8(out_pq0, q1p1);
+ out_pq0 = vaddw_u8(out_pq0, q2p2);
+
+ out_f7_pq0 = vrshrn_n_u16(out_pq0, 3);
+ out_f7_pq1 = vrshrn_n_u16(out_pq1, 3);
+ out_f7_pq2 = vrshrn_n_u16(out_pq2, 3);
+ }
+ {
+ // filter 14
+ uint16x8_t out_pq0, out_pq1, out_pq2, out_pq3, out_pq4, out_pq5;
+ uint16x8_t p6q6_2, p6q6_temp, qp_sum;
+ uint8x8_t qp_rev;
+
+ out = vaddw_u8(out, *p4q4);
+ out = vaddw_u8(out, *p5q5);
+ out = vaddw_u8(out, *p6q6);
+
+ out_pq5 = vaddw_u8(out, *p4q4);
+ out_pq4 = vaddw_u8(out_pq5, *p3q3);
+ out_pq3 = vaddw_u8(out_pq4, *p2q2);
+
+ out_pq5 = vaddw_u8(out_pq5, *p5q5);
+ out_pq4 = vaddw_u8(out_pq4, *p5q5);
+
+ out_pq0 = vaddw_u8(out, *p1q1);
+ out_pq1 = vaddw_u8(out_pq0, *p2q2);
+ out_pq2 = vaddw_u8(out_pq1, *p3q3);
+
+ out_pq0 = vaddw_u8(out_pq0, *p0q0);
+ out_pq1 = vaddw_u8(out_pq1, *p0q0);
+
+ out_pq1 = vaddw_u8(out_pq1, *p6q6);
+ p6q6_2 = vaddl_u8(*p6q6, *p6q6);
+ out_pq2 = vaddq_u16(out_pq2, p6q6_2);
+ p6q6_temp = vaddw_u8(p6q6_2, *p6q6);
+ out_pq3 = vaddq_u16(out_pq3, p6q6_temp);
+ p6q6_temp = vaddw_u8(p6q6_temp, *p6q6);
+ out_pq4 = vaddq_u16(out_pq4, p6q6_temp);
+ p6q6_temp = vaddq_u16(p6q6_temp, p6q6_2);
+ out_pq5 = vaddq_u16(out_pq5, p6q6_temp);
+
+ out_pq4 = vaddw_u8(out_pq4, q1p1);
+
+ qp_sum = vaddl_u8(q2p2, q1p1);
+ out_pq3 = vaddq_u16(out_pq3, qp_sum);
+
+ qp_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p3q3)));
+ qp_sum = vaddw_u8(qp_sum, qp_rev);
+ out_pq2 = vaddq_u16(out_pq2, qp_sum);
+
+ qp_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p4q4)));
+ qp_sum = vaddw_u8(qp_sum, qp_rev);
+ out_pq1 = vaddq_u16(out_pq1, qp_sum);
+
+ qp_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p5q5)));
+ qp_sum = vaddw_u8(qp_sum, qp_rev);
+ out_pq0 = vaddq_u16(out_pq0, qp_sum);
+
+ out_pq0 = vaddw_u8(out_pq0, q0p0);
+
+ out_f14_pq0 = vrshrn_n_u16(out_pq0, 4);
+ out_f14_pq1 = vrshrn_n_u16(out_pq1, 4);
+ out_f14_pq2 = vrshrn_n_u16(out_pq2, 4);
+ out_f14_pq3 = vrshrn_n_u16(out_pq3, 4);
+ out_f14_pq4 = vrshrn_n_u16(out_pq4, 4);
+ out_f14_pq5 = vrshrn_n_u16(out_pq5, 4);
+ }
+ {
+ uint8x8_t filter4_cond, filter8_cond, filter14_cond;
+ filter8_cond = vand_u8(flat_8x8, mask_8x8);
+ filter4_cond = vmvn_u8(filter8_cond);
+ filter14_cond = vand_u8(filter8_cond, flat2_8x8);
+
+ // filter4 outputs
+ *p0q0 = vbsl_u8(filter4_cond, out_f4_pq0, *p0q0);
+ *p1q1 = vbsl_u8(filter4_cond, out_f4_pq1, *p1q1);
+
+ // filter8 outputs
+ *p0q0 = vbsl_u8(filter8_cond, out_f7_pq0, *p0q0);
+ *p1q1 = vbsl_u8(filter8_cond, out_f7_pq1, *p1q1);
+ *p2q2 = vbsl_u8(filter8_cond, out_f7_pq2, *p2q2);
+
+ // filter14 outputs
+ *p0q0 = vbsl_u8(filter14_cond, out_f14_pq0, *p0q0);
+ *p1q1 = vbsl_u8(filter14_cond, out_f14_pq1, *p1q1);
+ *p2q2 = vbsl_u8(filter14_cond, out_f14_pq2, *p2q2);
+ *p3q3 = vbsl_u8(filter14_cond, out_f14_pq3, *p3q3);
+ *p4q4 = vbsl_u8(filter14_cond, out_f14_pq4, *p4q4);
+ *p5q5 = vbsl_u8(filter14_cond, out_f14_pq5, *p5q5);
+ }
+}
+
+static void lpf_8_neon(uint8x8_t *p3q3, uint8x8_t *p2q2, uint8x8_t *p1q1,
+ uint8x8_t *p0q0, const uint8_t blimit,
+ const uint8_t limit, const uint8_t thresh) {
+ uint16x8_t out;
+ uint8x8_t out_f7_pq0, out_f7_pq1, out_f7_pq2;
+ uint8x8_t out_f4_pq0, out_f4_pq1;
+ uint8x8_t mask_8x8, flat_8x8;
+
+ // Calculate filter masks
+ mask_8x8 = lpf_mask(*p3q3, *p2q2, *p1q1, *p0q0, blimit, limit);
+ flat_8x8 = lpf_flat_mask4(*p3q3, *p2q2, *p1q1, *p0q0);
+ {
+ // filter 4
+ int32x2x2_t ps0_qs0, ps1_qs1;
+ int16x8_t filter_s16;
+ const uint8x8_t thresh_f4 = vdup_n_u8(thresh);
+ uint8x8_t temp0_8x8, temp1_8x8;
+ int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8;
+ int8x8_t op0, oq0, op1, oq1;
+ int8x8_t pq_s0, pq_s1;
+ int8x8_t filter_s8, filter1_s8, filter2_s8;
+ int8x8_t hev_8x8;
+ const int8x8_t sign_mask = vdup_n_s8(0x80);
+ const int8x8_t val_4 = vdup_n_s8(4);
+ const int8x8_t val_3 = vdup_n_s8(3);
+
+ pq_s0 = veor_s8(vreinterpret_s8_u8(*p0q0), sign_mask);
+ pq_s1 = veor_s8(vreinterpret_s8_u8(*p1q1), sign_mask);
+
+ ps0_qs0 = vtrn_s32(vreinterpret_s32_s8(pq_s0), vreinterpret_s32_s8(pq_s0));
+ ps1_qs1 = vtrn_s32(vreinterpret_s32_s8(pq_s1), vreinterpret_s32_s8(pq_s1));
+ ps0_s8 = vreinterpret_s8_s32(ps0_qs0.val[0]);
+ qs0_s8 = vreinterpret_s8_s32(ps0_qs0.val[1]);
+ ps1_s8 = vreinterpret_s8_s32(ps1_qs1.val[0]);
+ qs1_s8 = vreinterpret_s8_s32(ps1_qs1.val[1]);
+
+ // hev_mask
+ temp0_8x8 = vcgt_u8(vabd_u8(*p0q0, *p1q1), thresh_f4);
+ temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8)));
+ hev_8x8 = vreinterpret_s8_u8(vorr_u8(temp0_8x8, temp1_8x8));
+
+ // add outer taps if we have high edge variance
+ filter_s8 = vqsub_s8(ps1_s8, qs1_s8);
+ filter_s8 = vand_s8(filter_s8, hev_8x8);
+
+ // inner taps
+ temp_s8 = vqsub_s8(qs0_s8, ps0_s8);
+ filter_s16 = vmovl_s8(filter_s8);
+ filter_s16 = vmlal_s8(filter_s16, temp_s8, val_3);
+ filter_s8 = vqmovn_s16(filter_s16);
+ filter_s8 = vand_s8(filter_s8, vreinterpret_s8_u8(mask_8x8));
+
+ filter1_s8 = vqadd_s8(filter_s8, val_4);
+ filter2_s8 = vqadd_s8(filter_s8, val_3);
+ filter1_s8 = vshr_n_s8(filter1_s8, 3);
+ filter2_s8 = vshr_n_s8(filter2_s8, 3);
+
+ oq0 = veor_s8(vqsub_s8(qs0_s8, filter1_s8), sign_mask);
+ op0 = veor_s8(vqadd_s8(ps0_s8, filter2_s8), sign_mask);
+
+ hev_8x8 = vmvn_s8(hev_8x8);
+ filter_s8 = vrshr_n_s8(filter1_s8, 1);
+ filter_s8 = vand_s8(filter_s8, hev_8x8);
+
+ oq1 = veor_s8(vqsub_s8(qs1_s8, filter_s8), sign_mask);
+ op1 = veor_s8(vqadd_s8(ps1_s8, filter_s8), sign_mask);
+
+ out_f4_pq0 = vreinterpret_u8_s8(vext_s8(op0, oq0, 4));
+ out_f4_pq1 = vreinterpret_u8_s8(vext_s8(op1, oq1, 4));
+ }
+ {
+ // filter 8
+ uint16x8_t out_pq0, out_pq1, out_pq2;
+ uint8x8_t q0p0, q1p1, q2p2;
+
+ out = vaddl_u8(*p3q3, *p2q2);
+ out = vaddw_u8(out, *p1q1);
+ out = vaddw_u8(out, *p0q0);
+
+ // reverse p and q
+ q0p0 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p0q0)));
+ q1p1 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p1q1)));
+ q2p2 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p2q2)));
+
+ out = vaddw_u8(out, q0p0);
+ out_pq1 = vaddw_u8(out, *p3q3);
+ out_pq2 = vaddw_u8(out_pq1, *p3q3);
+ out_pq2 = vaddw_u8(out_pq2, *p2q2);
+ out_pq1 = vaddw_u8(out_pq1, *p1q1);
+ out_pq1 = vaddw_u8(out_pq1, q1p1);
+
+ out_pq0 = vaddw_u8(out, *p0q0);
+ out_pq0 = vaddw_u8(out_pq0, q1p1);
+ out_pq0 = vaddw_u8(out_pq0, q2p2);
+
+ out_f7_pq0 = vrshrn_n_u16(out_pq0, 3);
+ out_f7_pq1 = vrshrn_n_u16(out_pq1, 3);
+ out_f7_pq2 = vrshrn_n_u16(out_pq2, 3);
+ }
+ {
+ uint8x8_t filter4_cond, filter8_cond;
+ filter8_cond = vand_u8(flat_8x8, mask_8x8);
+ filter4_cond = vmvn_u8(filter8_cond);
+
+ // filter4 outputs
+ *p0q0 = vbsl_u8(filter4_cond, out_f4_pq0, *p0q0);
+ *p1q1 = vbsl_u8(filter4_cond, out_f4_pq1, *p1q1);
+
+ // filter8 outputs
+ *p0q0 = vbsl_u8(filter8_cond, out_f7_pq0, *p0q0);
+ *p1q1 = vbsl_u8(filter8_cond, out_f7_pq1, *p1q1);
+ *p2q2 = vbsl_u8(filter8_cond, out_f7_pq2, *p2q2);
+ }
+}
+
+static void lpf_6_neon(uint8x8_t *p2q2, uint8x8_t *p1q1, uint8x8_t *p0q0,
+ const uint8_t blimit, const uint8_t limit,
+ const uint8_t thresh) {
+ uint16x8_t out;
+ uint8x8_t out_f6_pq0, out_f6_pq1;
+ uint8x8_t out_f4_pq0, out_f4_pq1;
+ uint8x8_t mask_8x8, flat_8x8;
+
+ // Calculate filter masks
+ mask_8x8 = lpf_mask3_chroma(*p2q2, *p1q1, *p0q0, blimit, limit);
+ flat_8x8 = lpf_flat_mask3(*p2q2, *p1q1, *p0q0);
+ {
+ // filter 4
+ int32x2x2_t ps0_qs0, ps1_qs1;
+ int16x8_t filter_s16;
+ const uint8x8_t thresh_f4 = vdup_n_u8(thresh);
+ uint8x8_t temp0_8x8, temp1_8x8;
+ int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8;
+ int8x8_t op0, oq0, op1, oq1;
+ int8x8_t pq_s0, pq_s1;
+ int8x8_t filter_s8, filter1_s8, filter2_s8;
+ int8x8_t hev_8x8;
+ const int8x8_t sign_mask = vdup_n_s8(0x80);
+ const int8x8_t val_4 = vdup_n_s8(4);
+ const int8x8_t val_3 = vdup_n_s8(3);
+
+ pq_s0 = veor_s8(vreinterpret_s8_u8(*p0q0), sign_mask);
+ pq_s1 = veor_s8(vreinterpret_s8_u8(*p1q1), sign_mask);
+
+ ps0_qs0 = vtrn_s32(vreinterpret_s32_s8(pq_s0), vreinterpret_s32_s8(pq_s0));
+ ps1_qs1 = vtrn_s32(vreinterpret_s32_s8(pq_s1), vreinterpret_s32_s8(pq_s1));
+ ps0_s8 = vreinterpret_s8_s32(ps0_qs0.val[0]);
+ qs0_s8 = vreinterpret_s8_s32(ps0_qs0.val[1]);
+ ps1_s8 = vreinterpret_s8_s32(ps1_qs1.val[0]);
+ qs1_s8 = vreinterpret_s8_s32(ps1_qs1.val[1]);
+
+ // hev_mask
+ temp0_8x8 = vcgt_u8(vabd_u8(*p0q0, *p1q1), thresh_f4);
+ temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8)));
+ hev_8x8 = vreinterpret_s8_u8(vorr_u8(temp0_8x8, temp1_8x8));
+
+ // add outer taps if we have high edge variance
+ filter_s8 = vqsub_s8(ps1_s8, qs1_s8);
+ filter_s8 = vand_s8(filter_s8, hev_8x8);
+
+ // inner taps
+ temp_s8 = vqsub_s8(qs0_s8, ps0_s8);
+ filter_s16 = vmovl_s8(filter_s8);
+ filter_s16 = vmlal_s8(filter_s16, temp_s8, val_3);
+ filter_s8 = vqmovn_s16(filter_s16);
+ filter_s8 = vand_s8(filter_s8, vreinterpret_s8_u8(mask_8x8));
+
+ filter1_s8 = vqadd_s8(filter_s8, val_4);
+ filter2_s8 = vqadd_s8(filter_s8, val_3);
+ filter1_s8 = vshr_n_s8(filter1_s8, 3);
+ filter2_s8 = vshr_n_s8(filter2_s8, 3);
+
+ oq0 = veor_s8(vqsub_s8(qs0_s8, filter1_s8), sign_mask);
+ op0 = veor_s8(vqadd_s8(ps0_s8, filter2_s8), sign_mask);
+
+ filter_s8 = vrshr_n_s8(filter1_s8, 1);
+ filter_s8 = vbic_s8(filter_s8, hev_8x8);
+
+ oq1 = veor_s8(vqsub_s8(qs1_s8, filter_s8), sign_mask);
+ op1 = veor_s8(vqadd_s8(ps1_s8, filter_s8), sign_mask);
+
+ out_f4_pq0 = vreinterpret_u8_s8(vext_s8(op0, oq0, 4));
+ out_f4_pq1 = vreinterpret_u8_s8(vext_s8(op1, oq1, 4));
+ }
+ {
+ // filter 6
+ uint16x8_t out_pq0, out_pq1;
+ uint8x8_t pq_rev;
+
+ out = vaddl_u8(*p0q0, *p1q1);
+ out = vaddq_u16(out, out);
+ out = vaddw_u8(out, *p2q2);
+
+ pq_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p0q0)));
+ out = vaddw_u8(out, pq_rev);
+
+ out_pq0 = vaddw_u8(out, pq_rev);
+ pq_rev = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(*p1q1)));
+ out_pq0 = vaddw_u8(out_pq0, pq_rev);
+
+ out_pq1 = vaddw_u8(out, *p2q2);
+ out_pq1 = vaddw_u8(out_pq1, *p2q2);
+
+ out_f6_pq0 = vrshrn_n_u16(out_pq0, 3);
+ out_f6_pq1 = vrshrn_n_u16(out_pq1, 3);
+ }
+ {
+ uint8x8_t filter4_cond, filter6_cond;
+ filter6_cond = vand_u8(flat_8x8, mask_8x8);
+ filter4_cond = vmvn_u8(filter6_cond);
+
+ // filter4 outputs
+ *p0q0 = vbsl_u8(filter4_cond, out_f4_pq0, *p0q0);
+ *p1q1 = vbsl_u8(filter4_cond, out_f4_pq1, *p1q1);
+
+ // filter6 outputs
+ *p0q0 = vbsl_u8(filter6_cond, out_f6_pq0, *p0q0);
+ *p1q1 = vbsl_u8(filter6_cond, out_f6_pq1, *p1q1);
+ }
+}
+
+static void lpf_4_neon(uint8x8_t *p1q1, uint8x8_t *p0q0, const uint8_t blimit,
+ const uint8_t limit, const uint8_t thresh) {
+ int32x2x2_t ps0_qs0, ps1_qs1;
+ int16x8_t filter_s16;
+ const uint8x8_t thresh_f4 = vdup_n_u8(thresh);
+ uint8x8_t mask_8x8, temp0_8x8, temp1_8x8;
+ int8x8_t ps0_s8, ps1_s8, qs0_s8, qs1_s8, temp_s8;
+ int8x8_t op0, oq0, op1, oq1;
+ int8x8_t pq_s0, pq_s1;
+ int8x8_t filter_s8, filter1_s8, filter2_s8;
+ int8x8_t hev_8x8;
+ const int8x8_t sign_mask = vdup_n_s8(0x80);
+ const int8x8_t val_4 = vdup_n_s8(4);
+ const int8x8_t val_3 = vdup_n_s8(3);
+
+ // Calculate filter mask
+ mask_8x8 = lpf_mask2(*p1q1, *p0q0, blimit, limit);
+
+ pq_s0 = veor_s8(vreinterpret_s8_u8(*p0q0), sign_mask);
+ pq_s1 = veor_s8(vreinterpret_s8_u8(*p1q1), sign_mask);
+
+ ps0_qs0 = vtrn_s32(vreinterpret_s32_s8(pq_s0), vreinterpret_s32_s8(pq_s0));
+ ps1_qs1 = vtrn_s32(vreinterpret_s32_s8(pq_s1), vreinterpret_s32_s8(pq_s1));
+ ps0_s8 = vreinterpret_s8_s32(ps0_qs0.val[0]);
+ qs0_s8 = vreinterpret_s8_s32(ps0_qs0.val[1]);
+ ps1_s8 = vreinterpret_s8_s32(ps1_qs1.val[0]);
+ qs1_s8 = vreinterpret_s8_s32(ps1_qs1.val[1]);
+
+ // hev_mask
+ temp0_8x8 = vcgt_u8(vabd_u8(*p0q0, *p1q1), thresh_f4);
+ temp1_8x8 = vreinterpret_u8_u32(vrev64_u32(vreinterpret_u32_u8(temp0_8x8)));
+ hev_8x8 = vreinterpret_s8_u8(vorr_u8(temp0_8x8, temp1_8x8));
+
+ // add outer taps if we have high edge variance
+ filter_s8 = vqsub_s8(ps1_s8, qs1_s8);
+ filter_s8 = vand_s8(filter_s8, hev_8x8);
+
+ // inner taps
+ temp_s8 = vqsub_s8(qs0_s8, ps0_s8);
+ filter_s16 = vmovl_s8(filter_s8);
+ filter_s16 = vmlal_s8(filter_s16, temp_s8, val_3);
+ filter_s8 = vqmovn_s16(filter_s16);
+ filter_s8 = vand_s8(filter_s8, vreinterpret_s8_u8(mask_8x8));
+
+ filter1_s8 = vqadd_s8(filter_s8, val_4);
+ filter2_s8 = vqadd_s8(filter_s8, val_3);
+ filter1_s8 = vshr_n_s8(filter1_s8, 3);
+ filter2_s8 = vshr_n_s8(filter2_s8, 3);
+
+ oq0 = veor_s8(vqsub_s8(qs0_s8, filter1_s8), sign_mask);
+ op0 = veor_s8(vqadd_s8(ps0_s8, filter2_s8), sign_mask);
+
+ filter_s8 = vrshr_n_s8(filter1_s8, 1);
+ filter_s8 = vbic_s8(filter_s8, hev_8x8);
+
+ oq1 = veor_s8(vqsub_s8(qs1_s8, filter_s8), sign_mask);
+ op1 = veor_s8(vqadd_s8(ps1_s8, filter_s8), sign_mask);
+
+ *p0q0 = vreinterpret_u8_s8(vext_s8(op0, oq0, 4));
+ *p1q1 = vreinterpret_u8_s8(vext_s8(op1, oq1, 4));
+}
+
+void aom_lpf_vertical_14_neon(uint8_t *src, int stride, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ uint8x16_t row0, row1, row2, row3;
+ uint8x8_t pxp3, p6p2, p5p1, p4p0;
+ uint8x8_t q0q4, q1q5, q2q6, q3qy;
+ uint32x2x2_t p6q6_p2q2, p5q5_p1q1, p4q4_p0q0, pxqx_p3q3;
+ uint32x2_t pq_rev;
+ uint8x8_t p0q0, p1q1, p2q2, p3q3, p4q4, p5q5, p6q6;
+
+ // row0: x p6 p5 p4 p3 p2 p1 p0 | q0 q1 q2 q3 q4 q5 q6 y
+ // row1: x p6 p5 p4 p3 p2 p1 p0 | q0 q1 q2 q3 q4 q5 q6 y
+ // row2: x p6 p5 p4 p3 p2 p1 p0 | q0 q1 q2 q3 q4 q5 q6 y
+ // row3: x p6 p5 p4 p3 p2 p1 p0 | q0 q1 q2 q3 q4 q5 q6 y
+ load_u8_8x16(src - 8, stride, &row0, &row1, &row2, &row3);
+
+ pxp3 = vget_low_u8(row0);
+ p6p2 = vget_low_u8(row1);
+ p5p1 = vget_low_u8(row2);
+ p4p0 = vget_low_u8(row3);
+ transpose_u8_8x4(&pxp3, &p6p2, &p5p1, &p4p0);
+
+ q0q4 = vget_high_u8(row0);
+ q1q5 = vget_high_u8(row1);
+ q2q6 = vget_high_u8(row2);
+ q3qy = vget_high_u8(row3);
+ transpose_u8_8x4(&q0q4, &q1q5, &q2q6, &q3qy);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(q3qy));
+ pxqx_p3q3 = vtrn_u32(vreinterpret_u32_u8(pxp3), pq_rev);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(q1q5));
+ p5q5_p1q1 = vtrn_u32(vreinterpret_u32_u8(p5p1), pq_rev);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(q0q4));
+ p4q4_p0q0 = vtrn_u32(vreinterpret_u32_u8(p4p0), pq_rev);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(q2q6));
+ p6q6_p2q2 = vtrn_u32(vreinterpret_u32_u8(p6p2), pq_rev);
+
+ p0q0 = vreinterpret_u8_u32(p4q4_p0q0.val[1]);
+ p1q1 = vreinterpret_u8_u32(p5q5_p1q1.val[1]);
+ p2q2 = vreinterpret_u8_u32(p6q6_p2q2.val[1]);
+ p3q3 = vreinterpret_u8_u32(pxqx_p3q3.val[1]);
+ p4q4 = vreinterpret_u8_u32(p4q4_p0q0.val[0]);
+ p5q5 = vreinterpret_u8_u32(p5q5_p1q1.val[0]);
+ p6q6 = vreinterpret_u8_u32(p6q6_p2q2.val[0]);
+
+ lpf_14_neon(&p6q6, &p5q5, &p4q4, &p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit,
+ *thresh);
+
+ pxqx_p3q3 = vtrn_u32(pxqx_p3q3.val[0], vreinterpret_u32_u8(p3q3));
+ p5q5_p1q1 = vtrn_u32(vreinterpret_u32_u8(p5q5), vreinterpret_u32_u8(p1q1));
+ p4q4_p0q0 = vtrn_u32(vreinterpret_u32_u8(p4q4), vreinterpret_u32_u8(p0q0));
+ p6q6_p2q2 = vtrn_u32(vreinterpret_u32_u8(p6q6), vreinterpret_u32_u8(p2q2));
+
+ pxqx_p3q3.val[1] = vrev64_u32(pxqx_p3q3.val[1]);
+ p5q5_p1q1.val[1] = vrev64_u32(p5q5_p1q1.val[1]);
+ p4q4_p0q0.val[1] = vrev64_u32(p4q4_p0q0.val[1]);
+ p6q6_p2q2.val[1] = vrev64_u32(p6q6_p2q2.val[1]);
+
+ q0q4 = vreinterpret_u8_u32(p4q4_p0q0.val[1]);
+ q1q5 = vreinterpret_u8_u32(p5q5_p1q1.val[1]);
+ q2q6 = vreinterpret_u8_u32(p6q6_p2q2.val[1]);
+ q3qy = vreinterpret_u8_u32(pxqx_p3q3.val[1]);
+ transpose_u8_8x4(&q0q4, &q1q5, &q2q6, &q3qy);
+
+ pxp3 = vreinterpret_u8_u32(pxqx_p3q3.val[0]);
+ p6p2 = vreinterpret_u8_u32(p6q6_p2q2.val[0]);
+ p5p1 = vreinterpret_u8_u32(p5q5_p1q1.val[0]);
+ p4p0 = vreinterpret_u8_u32(p4q4_p0q0.val[0]);
+ transpose_u8_8x4(&pxp3, &p6p2, &p5p1, &p4p0);
+
+ row0 = vcombine_u8(pxp3, q0q4);
+ row1 = vcombine_u8(p6p2, q1q5);
+ row2 = vcombine_u8(p5p1, q2q6);
+ row3 = vcombine_u8(p4p0, q3qy);
+
+ store_u8_8x16(src - 8, stride, row0, row1, row2, row3);
+}
+
+void aom_lpf_vertical_8_neon(uint8_t *src, int stride, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ uint32x2x2_t p2q2_p1q1, p3q3_p0q0;
+ uint32x2_t pq_rev;
+ uint8x8_t p3q0, p2q1, p1q2, p0q3;
+ uint8x8_t p0q0, p1q1, p2q2, p3q3;
+
+ // row0: p3 p2 p1 p0 | q0 q1 q2 q3
+ // row1: p3 p2 p1 p0 | q0 q1 q2 q3
+ // row2: p3 p2 p1 p0 | q0 q1 q2 q3
+ // row3: p3 p2 p1 p0 | q0 q1 q2 q3
+ load_u8_8x4(src - 4, stride, &p3q0, &p2q1, &p1q2, &p0q3);
+
+ transpose_u8_8x4(&p3q0, &p2q1, &p1q2, &p0q3);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(p0q3));
+ p3q3_p0q0 = vtrn_u32(vreinterpret_u32_u8(p3q0), pq_rev);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(p1q2));
+ p2q2_p1q1 = vtrn_u32(vreinterpret_u32_u8(p2q1), pq_rev);
+
+ p0q0 = vreinterpret_u8_u32(vrev64_u32(p3q3_p0q0.val[1]));
+ p1q1 = vreinterpret_u8_u32(vrev64_u32(p2q2_p1q1.val[1]));
+ p2q2 = vreinterpret_u8_u32(p2q2_p1q1.val[0]);
+ p3q3 = vreinterpret_u8_u32(p3q3_p0q0.val[0]);
+
+ lpf_8_neon(&p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(p0q0));
+ p3q3_p0q0 = vtrn_u32(vreinterpret_u32_u8(p3q3), pq_rev);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(p1q1));
+ p2q2_p1q1 = vtrn_u32(vreinterpret_u32_u8(p2q2), pq_rev);
+
+ p0q3 = vreinterpret_u8_u32(vrev64_u32(p3q3_p0q0.val[1]));
+ p1q2 = vreinterpret_u8_u32(vrev64_u32(p2q2_p1q1.val[1]));
+ p2q1 = vreinterpret_u8_u32(p2q2_p1q1.val[0]);
+ p3q0 = vreinterpret_u8_u32(p3q3_p0q0.val[0]);
+ transpose_u8_8x4(&p3q0, &p2q1, &p1q2, &p0q3);
+
+ store_u8_8x4(src - 4, stride, p3q0, p2q1, p1q2, p0q3);
+}
+
+void aom_lpf_vertical_6_neon(uint8_t *src, int stride, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ uint32x2x2_t p2q2_p1q1, pxqy_p0q0;
+ uint32x2_t pq_rev;
+ uint8x8_t pxq0, p2q1, p1q2, p0qy;
+ uint8x8_t p0q0, p1q1, p2q2, pxqy;
+
+ // row0: px p2 p1 p0 | q0 q1 q2 qy
+ // row1: px p2 p1 p0 | q0 q1 q2 qy
+ // row2: px p2 p1 p0 | q0 q1 q2 qy
+ // row3: px p2 p1 p0 | q0 q1 q2 qy
+ load_u8_8x4(src - 4, stride, &pxq0, &p2q1, &p1q2, &p0qy);
+
+ transpose_u8_8x4(&pxq0, &p2q1, &p1q2, &p0qy);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(p0qy));
+ pxqy_p0q0 = vtrn_u32(vreinterpret_u32_u8(pxq0), pq_rev);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(p1q2));
+ p2q2_p1q1 = vtrn_u32(vreinterpret_u32_u8(p2q1), pq_rev);
+
+ p0q0 = vreinterpret_u8_u32(vrev64_u32(pxqy_p0q0.val[1]));
+ p1q1 = vreinterpret_u8_u32(vrev64_u32(p2q2_p1q1.val[1]));
+ p2q2 = vreinterpret_u8_u32(p2q2_p1q1.val[0]);
+ pxqy = vreinterpret_u8_u32(pxqy_p0q0.val[0]);
+
+ lpf_6_neon(&p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(p0q0));
+ pxqy_p0q0 = vtrn_u32(vreinterpret_u32_u8(pxqy), pq_rev);
+
+ pq_rev = vrev64_u32(vreinterpret_u32_u8(p1q1));
+ p2q2_p1q1 = vtrn_u32(vreinterpret_u32_u8(p2q2), pq_rev);
+
+ p0qy = vreinterpret_u8_u32(vrev64_u32(pxqy_p0q0.val[1]));
+ p1q2 = vreinterpret_u8_u32(vrev64_u32(p2q2_p1q1.val[1]));
+ p2q1 = vreinterpret_u8_u32(p2q2_p1q1.val[0]);
+ pxq0 = vreinterpret_u8_u32(pxqy_p0q0.val[0]);
+ transpose_u8_8x4(&pxq0, &p2q1, &p1q2, &p0qy);
+
+ store_u8_8x4(src - 4, stride, pxq0, p2q1, p1q2, p0qy);
+}
+
+void aom_lpf_vertical_4_neon(uint8_t *src, int stride, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ uint32x2x2_t p1q0_p0q1, p1q1_p0q0, p1p0_q1q0;
+ uint32x2_t pq_rev;
+ uint8x8_t UNINITIALIZED_IS_SAFE(p1p0), q0q1, p0q0, p1q1;
+
+ // row0: p1 p0 | q0 q1
+ // row1: p1 p0 | q0 q1
+ // row2: p1 p0 | q0 q1
+ // row3: p1 p0 | q0 q1
+ load_u8_4x1(src - 2, &p1p0, 0);
+ load_u8_4x1((src - 2) + 1 * stride, &p1p0, 1);
+ load_u8_4x1((src - 2) + 2 * stride, &q0q1, 0);
+ load_u8_4x1((src - 2) + 3 * stride, &q0q1, 1);
+
+ transpose_u8_4x4(&p1p0, &q0q1);
+
+ p1q0_p0q1 = vtrn_u32(vreinterpret_u32_u8(p1p0), vreinterpret_u32_u8(q0q1));
+
+ pq_rev = vrev64_u32(p1q0_p0q1.val[1]);
+ p1q1_p0q0 = vtrn_u32(p1q0_p0q1.val[0], pq_rev);
+
+ p1q1 = vreinterpret_u8_u32(p1q1_p0q0.val[0]);
+ p0q0 = vreinterpret_u8_u32(p1q1_p0q0.val[1]);
+
+ lpf_4_neon(&p1q1, &p0q0, *blimit, *limit, *thresh);
+
+ p1p0_q1q0 = vtrn_u32(vreinterpret_u32_u8(p1q1), vreinterpret_u32_u8(p0q0));
+
+ p1p0 = vreinterpret_u8_u32(p1p0_q1q0.val[0]);
+ q0q1 = vreinterpret_u8_u32(vrev64_u32(p1p0_q1q0.val[1]));
+
+ transpose_u8_4x4(&p1p0, &q0q1);
+
+ store_u8_4x1(src - 2, p1p0, 0);
+ store_u8_4x1((src - 2) + 1 * stride, q0q1, 0);
+ store_u8_4x1((src - 2) + 2 * stride, p1p0, 1);
+ store_u8_4x1((src - 2) + 3 * stride, q0q1, 1);
+}
+
+void aom_lpf_horizontal_14_neon(uint8_t *src, int stride, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ uint8x8_t p0q0, p1q1, p2q2, p3q3, p4q4, p5q5, UNINITIALIZED_IS_SAFE(p6q6);
+
+ load_u8_4x1(src - 7 * stride, &p6q6, 0);
+ load_u8_4x1(src - 6 * stride, &p5q5, 0);
+ load_u8_4x1(src - 5 * stride, &p4q4, 0);
+ load_u8_4x1(src - 4 * stride, &p3q3, 0);
+ load_u8_4x1(src - 3 * stride, &p2q2, 0);
+ load_u8_4x1(src - 2 * stride, &p1q1, 0);
+ load_u8_4x1(src - 1 * stride, &p0q0, 0);
+ load_u8_4x1(src + 0 * stride, &p0q0, 1);
+ load_u8_4x1(src + 1 * stride, &p1q1, 1);
+ load_u8_4x1(src + 2 * stride, &p2q2, 1);
+ load_u8_4x1(src + 3 * stride, &p3q3, 1);
+ load_u8_4x1(src + 4 * stride, &p4q4, 1);
+ load_u8_4x1(src + 5 * stride, &p5q5, 1);
+ load_u8_4x1(src + 6 * stride, &p6q6, 1);
+
+ lpf_14_neon(&p6q6, &p5q5, &p4q4, &p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit,
+ *thresh);
+
+ store_u8_4x1(src - 6 * stride, p5q5, 0);
+ store_u8_4x1(src - 5 * stride, p4q4, 0);
+ store_u8_4x1(src - 4 * stride, p3q3, 0);
+ store_u8_4x1(src - 3 * stride, p2q2, 0);
+ store_u8_4x1(src - 2 * stride, p1q1, 0);
+ store_u8_4x1(src - 1 * stride, p0q0, 0);
+ store_u8_4x1(src + 0 * stride, p0q0, 1);
+ store_u8_4x1(src + 1 * stride, p1q1, 1);
+ store_u8_4x1(src + 2 * stride, p2q2, 1);
+ store_u8_4x1(src + 3 * stride, p3q3, 1);
+ store_u8_4x1(src + 4 * stride, p4q4, 1);
+ store_u8_4x1(src + 5 * stride, p5q5, 1);
+}
+
+void aom_lpf_horizontal_8_neon(uint8_t *src, int stride, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ uint8x8_t p0q0, p1q1, p2q2, p3q3;
+
+ p3q3 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 4 * stride)));
+ p2q2 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 3 * stride)));
+ p1q1 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 2 * stride)));
+ p0q0 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 1 * stride)));
+ p0q0 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 0 * stride),
+ vreinterpret_u32_u8(p0q0), 1));
+ p1q1 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 1 * stride),
+ vreinterpret_u32_u8(p1q1), 1));
+ p2q2 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 2 * stride),
+ vreinterpret_u32_u8(p2q2), 1));
+ p3q3 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 3 * stride),
+ vreinterpret_u32_u8(p3q3), 1));
+
+ lpf_8_neon(&p3q3, &p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh);
+
+ vst1_lane_u32((uint32_t *)(src - 4 * stride), vreinterpret_u32_u8(p3q3), 0);
+ vst1_lane_u32((uint32_t *)(src - 3 * stride), vreinterpret_u32_u8(p2q2), 0);
+ vst1_lane_u32((uint32_t *)(src - 2 * stride), vreinterpret_u32_u8(p1q1), 0);
+ vst1_lane_u32((uint32_t *)(src - 1 * stride), vreinterpret_u32_u8(p0q0), 0);
+ vst1_lane_u32((uint32_t *)(src + 0 * stride), vreinterpret_u32_u8(p0q0), 1);
+ vst1_lane_u32((uint32_t *)(src + 1 * stride), vreinterpret_u32_u8(p1q1), 1);
+ vst1_lane_u32((uint32_t *)(src + 2 * stride), vreinterpret_u32_u8(p2q2), 1);
+ vst1_lane_u32((uint32_t *)(src + 3 * stride), vreinterpret_u32_u8(p3q3), 1);
+}
+
+void aom_lpf_horizontal_6_neon(uint8_t *src, int stride, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ uint8x8_t p0q0, p1q1, p2q2;
+
+ p2q2 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 3 * stride)));
+ p1q1 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 2 * stride)));
+ p0q0 = vreinterpret_u8_u32(vld1_dup_u32((uint32_t *)(src - 1 * stride)));
+ p0q0 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 0 * stride),
+ vreinterpret_u32_u8(p0q0), 1));
+ p1q1 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 1 * stride),
+ vreinterpret_u32_u8(p1q1), 1));
+ p2q2 = vreinterpret_u8_u32(vld1_lane_u32((uint32_t *)(src + 2 * stride),
+ vreinterpret_u32_u8(p2q2), 1));
+
+ lpf_6_neon(&p2q2, &p1q1, &p0q0, *blimit, *limit, *thresh);
+
+ vst1_lane_u32((uint32_t *)(src - 3 * stride), vreinterpret_u32_u8(p2q2), 0);
+ vst1_lane_u32((uint32_t *)(src - 2 * stride), vreinterpret_u32_u8(p1q1), 0);
+ vst1_lane_u32((uint32_t *)(src - 1 * stride), vreinterpret_u32_u8(p0q0), 0);
+ vst1_lane_u32((uint32_t *)(src + 0 * stride), vreinterpret_u32_u8(p0q0), 1);
+ vst1_lane_u32((uint32_t *)(src + 1 * stride), vreinterpret_u32_u8(p1q1), 1);
+ vst1_lane_u32((uint32_t *)(src + 2 * stride), vreinterpret_u32_u8(p2q2), 1);
+}
+
+void aom_lpf_horizontal_4_neon(uint8_t *src, int stride, const uint8_t *blimit,
+ const uint8_t *limit, const uint8_t *thresh) {
+ uint8x8_t p0q0, UNINITIALIZED_IS_SAFE(p1q1);
+
+ load_u8_4x1(src - 2 * stride, &p1q1, 0);
+ load_u8_4x1(src - 1 * stride, &p0q0, 0);
+ load_u8_4x1(src + 0 * stride, &p0q0, 1);
+ load_u8_4x1(src + 1 * stride, &p1q1, 1);
+
+ lpf_4_neon(&p1q1, &p0q0, *blimit, *limit, *thresh);
+
+ store_u8_4x1(src - 2 * stride, p1q1, 0);
+ store_u8_4x1(src - 1 * stride, p0q0, 0);
+ store_u8_4x1(src + 0 * stride, p0q0, 1);
+ store_u8_4x1(src + 1 * stride, p1q1, 1);
+}
diff --git a/third_party/aom/aom_dsp/arm/sad4d_neon.c b/third_party/aom/aom_dsp/arm/sad4d_neon.c
new file mode 100644
index 0000000000..606950ab25
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/sad4d_neon.c
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+#include "config/aom_dsp_rtcd.h"
+
+#include "aom/aom_integer.h"
+
+static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
+ const uint16x8_t vec_hi) {
+ const uint32x4_t vec_l_lo =
+ vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo));
+ const uint32x4_t vec_l_hi =
+ vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi));
+ const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi);
+ const uint64x2_t b = vpaddlq_u32(a);
+ const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+ return vget_lane_u32(c, 0);
+}
+
+// Calculate the absolute difference of 64 bytes from vec_src_00, vec_src_16,
+// vec_src_32, vec_src_48 and ref. Accumulate partial sums in vec_sum_ref_lo
+// and vec_sum_ref_hi.
+static void sad_neon_64(const uint8x16_t vec_src_00,
+ const uint8x16_t vec_src_16,
+ const uint8x16_t vec_src_32,
+ const uint8x16_t vec_src_48, const uint8_t *ref,
+ uint16x8_t *vec_sum_ref_lo,
+ uint16x8_t *vec_sum_ref_hi) {
+ const uint8x16_t vec_ref_00 = vld1q_u8(ref);
+ const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16);
+ const uint8x16_t vec_ref_32 = vld1q_u8(ref + 32);
+ const uint8x16_t vec_ref_48 = vld1q_u8(ref + 48);
+
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_00),
+ vget_low_u8(vec_ref_00));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_00),
+ vget_high_u8(vec_ref_00));
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_16),
+ vget_low_u8(vec_ref_16));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_16),
+ vget_high_u8(vec_ref_16));
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_32),
+ vget_low_u8(vec_ref_32));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_32),
+ vget_high_u8(vec_ref_32));
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_48),
+ vget_low_u8(vec_ref_48));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_48),
+ vget_high_u8(vec_ref_48));
+}
+
+// Calculate the absolute difference of 32 bytes from vec_src_00, vec_src_16,
+// and ref. Accumulate partial sums in vec_sum_ref_lo and vec_sum_ref_hi.
+static void sad_neon_32(const uint8x16_t vec_src_00,
+ const uint8x16_t vec_src_16, const uint8_t *ref,
+ uint16x8_t *vec_sum_ref_lo,
+ uint16x8_t *vec_sum_ref_hi) {
+ const uint8x16_t vec_ref_00 = vld1q_u8(ref);
+ const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16);
+
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_00),
+ vget_low_u8(vec_ref_00));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_00),
+ vget_high_u8(vec_ref_00));
+ *vec_sum_ref_lo = vabal_u8(*vec_sum_ref_lo, vget_low_u8(vec_src_16),
+ vget_low_u8(vec_ref_16));
+ *vec_sum_ref_hi = vabal_u8(*vec_sum_ref_hi, vget_high_u8(vec_src_16),
+ vget_high_u8(vec_ref_16));
+}
+
+void aom_sad64x64x4d_neon(const uint8_t *src, int src_stride,
+ const uint8_t *const ref[4], int ref_stride,
+ uint32_t *res) {
+ int i;
+ uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0);
+ const uint8_t *ref0, *ref1, *ref2, *ref3;
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+
+ for (i = 0; i < 64; ++i) {
+ const uint8x16_t vec_src_00 = vld1q_u8(src);
+ const uint8x16_t vec_src_16 = vld1q_u8(src + 16);
+ const uint8x16_t vec_src_32 = vld1q_u8(src + 32);
+ const uint8x16_t vec_src_48 = vld1q_u8(src + 48);
+
+ sad_neon_64(vec_src_00, vec_src_16, vec_src_32, vec_src_48, ref0,
+ &vec_sum_ref0_lo, &vec_sum_ref0_hi);
+ sad_neon_64(vec_src_00, vec_src_16, vec_src_32, vec_src_48, ref1,
+ &vec_sum_ref1_lo, &vec_sum_ref1_hi);
+ sad_neon_64(vec_src_00, vec_src_16, vec_src_32, vec_src_48, ref2,
+ &vec_sum_ref2_lo, &vec_sum_ref2_hi);
+ sad_neon_64(vec_src_00, vec_src_16, vec_src_32, vec_src_48, ref3,
+ &vec_sum_ref3_lo, &vec_sum_ref3_hi);
+
+ src += src_stride;
+ ref0 += ref_stride;
+ ref1 += ref_stride;
+ ref2 += ref_stride;
+ ref3 += ref_stride;
+ }
+
+ res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi);
+ res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi);
+ res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
+ res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
+}
+
+void aom_sad32x32x4d_neon(const uint8_t *src, int src_stride,
+ const uint8_t *const ref[4], int ref_stride,
+ uint32_t *res) {
+ int i;
+ uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0);
+ const uint8_t *ref0, *ref1, *ref2, *ref3;
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+
+ for (i = 0; i < 32; ++i) {
+ const uint8x16_t vec_src_00 = vld1q_u8(src);
+ const uint8x16_t vec_src_16 = vld1q_u8(src + 16);
+
+ sad_neon_32(vec_src_00, vec_src_16, ref0, &vec_sum_ref0_lo,
+ &vec_sum_ref0_hi);
+ sad_neon_32(vec_src_00, vec_src_16, ref1, &vec_sum_ref1_lo,
+ &vec_sum_ref1_hi);
+ sad_neon_32(vec_src_00, vec_src_16, ref2, &vec_sum_ref2_lo,
+ &vec_sum_ref2_hi);
+ sad_neon_32(vec_src_00, vec_src_16, ref3, &vec_sum_ref3_lo,
+ &vec_sum_ref3_hi);
+
+ src += src_stride;
+ ref0 += ref_stride;
+ ref1 += ref_stride;
+ ref2 += ref_stride;
+ ref3 += ref_stride;
+ }
+
+ res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi);
+ res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi);
+ res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
+ res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
+}
+
+void aom_sad16x16x4d_neon(const uint8_t *src, int src_stride,
+ const uint8_t *const ref[4], int ref_stride,
+ uint32_t *res) {
+ int i;
+ uint16x8_t vec_sum_ref0_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref0_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref1_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref2_hi = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_lo = vdupq_n_u16(0);
+ uint16x8_t vec_sum_ref3_hi = vdupq_n_u16(0);
+ const uint8_t *ref0, *ref1, *ref2, *ref3;
+ ref0 = ref[0];
+ ref1 = ref[1];
+ ref2 = ref[2];
+ ref3 = ref[3];
+
+ for (i = 0; i < 16; ++i) {
+ const uint8x16_t vec_src = vld1q_u8(src);
+ const uint8x16_t vec_ref0 = vld1q_u8(ref0);
+ const uint8x16_t vec_ref1 = vld1q_u8(ref1);
+ const uint8x16_t vec_ref2 = vld1q_u8(ref2);
+ const uint8x16_t vec_ref3 = vld1q_u8(ref3);
+
+ vec_sum_ref0_lo =
+ vabal_u8(vec_sum_ref0_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref0));
+ vec_sum_ref0_hi = vabal_u8(vec_sum_ref0_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref0));
+ vec_sum_ref1_lo =
+ vabal_u8(vec_sum_ref1_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref1));
+ vec_sum_ref1_hi = vabal_u8(vec_sum_ref1_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref1));
+ vec_sum_ref2_lo =
+ vabal_u8(vec_sum_ref2_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref2));
+ vec_sum_ref2_hi = vabal_u8(vec_sum_ref2_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref2));
+ vec_sum_ref3_lo =
+ vabal_u8(vec_sum_ref3_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref3));
+ vec_sum_ref3_hi = vabal_u8(vec_sum_ref3_hi, vget_high_u8(vec_src),
+ vget_high_u8(vec_ref3));
+
+ src += src_stride;
+ ref0 += ref_stride;
+ ref1 += ref_stride;
+ ref2 += ref_stride;
+ ref3 += ref_stride;
+ }
+
+ res[0] = horizontal_long_add_16x8(vec_sum_ref0_lo, vec_sum_ref0_hi);
+ res[1] = horizontal_long_add_16x8(vec_sum_ref1_lo, vec_sum_ref1_hi);
+ res[2] = horizontal_long_add_16x8(vec_sum_ref2_lo, vec_sum_ref2_hi);
+ res[3] = horizontal_long_add_16x8(vec_sum_ref3_lo, vec_sum_ref3_hi);
+}
diff --git a/third_party/aom/aom_dsp/arm/sad_neon.c b/third_party/aom/aom_dsp/arm/sad_neon.c
new file mode 100644
index 0000000000..a39de91d60
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/sad_neon.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+
+#include "aom/aom_integer.h"
+
+unsigned int aom_sad8x16_neon(unsigned char *src_ptr, int src_stride,
+ unsigned char *ref_ptr, int ref_stride) {
+ uint8x8_t d0, d8;
+ uint16x8_t q12;
+ uint32x4_t q1;
+ uint64x2_t q3;
+ uint32x2_t d5;
+ int i;
+
+ d0 = vld1_u8(src_ptr);
+ src_ptr += src_stride;
+ d8 = vld1_u8(ref_ptr);
+ ref_ptr += ref_stride;
+ q12 = vabdl_u8(d0, d8);
+
+ for (i = 0; i < 15; i++) {
+ d0 = vld1_u8(src_ptr);
+ src_ptr += src_stride;
+ d8 = vld1_u8(ref_ptr);
+ ref_ptr += ref_stride;
+ q12 = vabal_u8(q12, d0, d8);
+ }
+
+ q1 = vpaddlq_u16(q12);
+ q3 = vpaddlq_u32(q1);
+ d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
+ vreinterpret_u32_u64(vget_high_u64(q3)));
+
+ return vget_lane_u32(d5, 0);
+}
+
+unsigned int aom_sad4x4_neon(unsigned char *src_ptr, int src_stride,
+ unsigned char *ref_ptr, int ref_stride) {
+ uint8x8_t d0, d8;
+ uint16x8_t q12;
+ uint32x2_t d1;
+ uint64x1_t d3;
+ int i;
+
+ d0 = vld1_u8(src_ptr);
+ src_ptr += src_stride;
+ d8 = vld1_u8(ref_ptr);
+ ref_ptr += ref_stride;
+ q12 = vabdl_u8(d0, d8);
+
+ for (i = 0; i < 3; i++) {
+ d0 = vld1_u8(src_ptr);
+ src_ptr += src_stride;
+ d8 = vld1_u8(ref_ptr);
+ ref_ptr += ref_stride;
+ q12 = vabal_u8(q12, d0, d8);
+ }
+
+ d1 = vpaddl_u16(vget_low_u16(q12));
+ d3 = vpaddl_u32(d1);
+
+ return vget_lane_u32(vreinterpret_u32_u64(d3), 0);
+}
+
+unsigned int aom_sad16x8_neon(unsigned char *src_ptr, int src_stride,
+ unsigned char *ref_ptr, int ref_stride) {
+ uint8x16_t q0, q4;
+ uint16x8_t q12, q13;
+ uint32x4_t q1;
+ uint64x2_t q3;
+ uint32x2_t d5;
+ int i;
+
+ q0 = vld1q_u8(src_ptr);
+ src_ptr += src_stride;
+ q4 = vld1q_u8(ref_ptr);
+ ref_ptr += ref_stride;
+ q12 = vabdl_u8(vget_low_u8(q0), vget_low_u8(q4));
+ q13 = vabdl_u8(vget_high_u8(q0), vget_high_u8(q4));
+
+ for (i = 0; i < 7; i++) {
+ q0 = vld1q_u8(src_ptr);
+ src_ptr += src_stride;
+ q4 = vld1q_u8(ref_ptr);
+ ref_ptr += ref_stride;
+ q12 = vabal_u8(q12, vget_low_u8(q0), vget_low_u8(q4));
+ q13 = vabal_u8(q13, vget_high_u8(q0), vget_high_u8(q4));
+ }
+
+ q12 = vaddq_u16(q12, q13);
+ q1 = vpaddlq_u16(q12);
+ q3 = vpaddlq_u32(q1);
+ d5 = vadd_u32(vreinterpret_u32_u64(vget_low_u64(q3)),
+ vreinterpret_u32_u64(vget_high_u64(q3)));
+
+ return vget_lane_u32(d5, 0);
+}
+
+static INLINE unsigned int horizontal_long_add_16x8(const uint16x8_t vec_lo,
+ const uint16x8_t vec_hi) {
+ const uint32x4_t vec_l_lo =
+ vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo));
+ const uint32x4_t vec_l_hi =
+ vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi));
+ const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi);
+ const uint64x2_t b = vpaddlq_u32(a);
+ const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+ return vget_lane_u32(c, 0);
+}
+static INLINE unsigned int horizontal_add_16x8(const uint16x8_t vec_16x8) {
+ const uint32x4_t a = vpaddlq_u16(vec_16x8);
+ const uint64x2_t b = vpaddlq_u32(a);
+ const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
+ vreinterpret_u32_u64(vget_high_u64(b)));
+ return vget_lane_u32(c, 0);
+}
+
+unsigned int aom_sad64x64_neon(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride) {
+ int i;
+ uint16x8_t vec_accum_lo = vdupq_n_u16(0);
+ uint16x8_t vec_accum_hi = vdupq_n_u16(0);
+ for (i = 0; i < 64; ++i) {
+ const uint8x16_t vec_src_00 = vld1q_u8(src);
+ const uint8x16_t vec_src_16 = vld1q_u8(src + 16);
+ const uint8x16_t vec_src_32 = vld1q_u8(src + 32);
+ const uint8x16_t vec_src_48 = vld1q_u8(src + 48);
+ const uint8x16_t vec_ref_00 = vld1q_u8(ref);
+ const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16);
+ const uint8x16_t vec_ref_32 = vld1q_u8(ref + 32);
+ const uint8x16_t vec_ref_48 = vld1q_u8(ref + 48);
+ src += src_stride;
+ ref += ref_stride;
+ vec_accum_lo = vabal_u8(vec_accum_lo, vget_low_u8(vec_src_00),
+ vget_low_u8(vec_ref_00));
+ vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src_00),
+ vget_high_u8(vec_ref_00));
+ vec_accum_lo = vabal_u8(vec_accum_lo, vget_low_u8(vec_src_16),
+ vget_low_u8(vec_ref_16));
+ vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src_16),
+ vget_high_u8(vec_ref_16));
+ vec_accum_lo = vabal_u8(vec_accum_lo, vget_low_u8(vec_src_32),
+ vget_low_u8(vec_ref_32));
+ vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src_32),
+ vget_high_u8(vec_ref_32));
+ vec_accum_lo = vabal_u8(vec_accum_lo, vget_low_u8(vec_src_48),
+ vget_low_u8(vec_ref_48));
+ vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src_48),
+ vget_high_u8(vec_ref_48));
+ }
+ return horizontal_long_add_16x8(vec_accum_lo, vec_accum_hi);
+}
+
+unsigned int aom_sad32x32_neon(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride) {
+ int i;
+ uint16x8_t vec_accum_lo = vdupq_n_u16(0);
+ uint16x8_t vec_accum_hi = vdupq_n_u16(0);
+
+ for (i = 0; i < 32; ++i) {
+ const uint8x16_t vec_src_00 = vld1q_u8(src);
+ const uint8x16_t vec_src_16 = vld1q_u8(src + 16);
+ const uint8x16_t vec_ref_00 = vld1q_u8(ref);
+ const uint8x16_t vec_ref_16 = vld1q_u8(ref + 16);
+ src += src_stride;
+ ref += ref_stride;
+ vec_accum_lo = vabal_u8(vec_accum_lo, vget_low_u8(vec_src_00),
+ vget_low_u8(vec_ref_00));
+ vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src_00),
+ vget_high_u8(vec_ref_00));
+ vec_accum_lo = vabal_u8(vec_accum_lo, vget_low_u8(vec_src_16),
+ vget_low_u8(vec_ref_16));
+ vec_accum_hi = vabal_u8(vec_accum_hi, vget_high_u8(vec_src_16),
+ vget_high_u8(vec_ref_16));
+ }
+ return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
+}
+
+unsigned int aom_sad16x16_neon(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride) {
+ int i;
+ uint16x8_t vec_accum_lo = vdupq_n_u16(0);
+ uint16x8_t vec_accum_hi = vdupq_n_u16(0);
+
+ for (i = 0; i < 16; ++i) {
+ const uint8x16_t vec_src = vld1q_u8(src);
+ const uint8x16_t vec_ref = vld1q_u8(ref);
+ src += src_stride;
+ ref += ref_stride;
+ vec_accum_lo =
+ vabal_u8(vec_accum_lo, vget_low_u8(vec_src), vget_low_u8(vec_ref));
+ vec_accum_hi =
+ vabal_u8(vec_accum_hi, vget_high_u8(vec_src), vget_high_u8(vec_ref));
+ }
+ return horizontal_add_16x8(vaddq_u16(vec_accum_lo, vec_accum_hi));
+}
+
+unsigned int aom_sad8x8_neon(const uint8_t *src, int src_stride,
+ const uint8_t *ref, int ref_stride) {
+ int i;
+ uint16x8_t vec_accum = vdupq_n_u16(0);
+
+ for (i = 0; i < 8; ++i) {
+ const uint8x8_t vec_src = vld1_u8(src);
+ const uint8x8_t vec_ref = vld1_u8(ref);
+ src += src_stride;
+ ref += ref_stride;
+ vec_accum = vabal_u8(vec_accum, vec_src, vec_ref);
+ }
+ return horizontal_add_16x8(vec_accum);
+}
diff --git a/third_party/aom/aom_dsp/arm/subpel_variance_neon.c b/third_party/aom/aom_dsp/arm/subpel_variance_neon.c
new file mode 100644
index 0000000000..cf618eee77
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/subpel_variance_neon.c
@@ -0,0 +1,131 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_dsp_rtcd.h"
+#include "config/aom_config.h"
+
+#include "aom_ports/mem.h"
+#include "aom/aom_integer.h"
+
+#include "aom_dsp/aom_filter.h"
+#include "aom_dsp/variance.h"
+
+static void var_filter_block2d_bil_w8(const uint8_t *src_ptr,
+ uint8_t *output_ptr,
+ unsigned int src_pixels_per_line,
+ int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const uint8_t *filter) {
+ const uint8x8_t f0 = vmov_n_u8(filter[0]);
+ const uint8x8_t f1 = vmov_n_u8(filter[1]);
+ unsigned int i;
+ for (i = 0; i < output_height; ++i) {
+ const uint8x8_t src_0 = vld1_u8(&src_ptr[0]);
+ const uint8x8_t src_1 = vld1_u8(&src_ptr[pixel_step]);
+ const uint16x8_t a = vmull_u8(src_0, f0);
+ const uint16x8_t b = vmlal_u8(a, src_1, f1);
+ const uint8x8_t out = vrshrn_n_u16(b, FILTER_BITS);
+ vst1_u8(&output_ptr[0], out);
+ // Next row...
+ src_ptr += src_pixels_per_line;
+ output_ptr += output_width;
+ }
+}
+
+static void var_filter_block2d_bil_w16(const uint8_t *src_ptr,
+ uint8_t *output_ptr,
+ unsigned int src_pixels_per_line,
+ int pixel_step,
+ unsigned int output_height,
+ unsigned int output_width,
+ const uint8_t *filter) {
+ const uint8x8_t f0 = vmov_n_u8(filter[0]);
+ const uint8x8_t f1 = vmov_n_u8(filter[1]);
+ unsigned int i, j;
+ for (i = 0; i < output_height; ++i) {
+ for (j = 0; j < output_width; j += 16) {
+ const uint8x16_t src_0 = vld1q_u8(&src_ptr[j]);
+ const uint8x16_t src_1 = vld1q_u8(&src_ptr[j + pixel_step]);
+ const uint16x8_t a = vmull_u8(vget_low_u8(src_0), f0);
+ const uint16x8_t b = vmlal_u8(a, vget_low_u8(src_1), f1);
+ const uint8x8_t out_lo = vrshrn_n_u16(b, FILTER_BITS);
+ const uint16x8_t c = vmull_u8(vget_high_u8(src_0), f0);
+ const uint16x8_t d = vmlal_u8(c, vget_high_u8(src_1), f1);
+ const uint8x8_t out_hi = vrshrn_n_u16(d, FILTER_BITS);
+ vst1q_u8(&output_ptr[j], vcombine_u8(out_lo, out_hi));
+ }
+ // Next row...
+ src_ptr += src_pixels_per_line;
+ output_ptr += output_width;
+ }
+}
+
+unsigned int aom_sub_pixel_variance8x8_neon(const uint8_t *src, int src_stride,
+ int xoffset, int yoffset,
+ const uint8_t *dst, int dst_stride,
+ unsigned int *sse) {
+ DECLARE_ALIGNED(16, uint8_t, temp2[8 * 8]);
+ DECLARE_ALIGNED(16, uint8_t, fdata3[9 * 8]);
+
+ var_filter_block2d_bil_w8(src, fdata3, src_stride, 1, 9, 8,
+ bilinear_filters_2t[xoffset]);
+ var_filter_block2d_bil_w8(fdata3, temp2, 8, 8, 8, 8,
+ bilinear_filters_2t[yoffset]);
+ return aom_variance8x8_neon(temp2, 8, dst, dst_stride, sse);
+}
+
+unsigned int aom_sub_pixel_variance16x16_neon(const uint8_t *src,
+ int src_stride, int xoffset,
+ int yoffset, const uint8_t *dst,
+ int dst_stride,
+ unsigned int *sse) {
+ DECLARE_ALIGNED(16, uint8_t, temp2[16 * 16]);
+ DECLARE_ALIGNED(16, uint8_t, fdata3[17 * 16]);
+
+ var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 17, 16,
+ bilinear_filters_2t[xoffset]);
+ var_filter_block2d_bil_w16(fdata3, temp2, 16, 16, 16, 16,
+ bilinear_filters_2t[yoffset]);
+ return aom_variance16x16_neon(temp2, 16, dst, dst_stride, sse);
+}
+
+unsigned int aom_sub_pixel_variance32x32_neon(const uint8_t *src,
+ int src_stride, int xoffset,
+ int yoffset, const uint8_t *dst,
+ int dst_stride,
+ unsigned int *sse) {
+ DECLARE_ALIGNED(16, uint8_t, temp2[32 * 32]);
+ DECLARE_ALIGNED(16, uint8_t, fdata3[33 * 32]);
+
+ var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 33, 32,
+ bilinear_filters_2t[xoffset]);
+ var_filter_block2d_bil_w16(fdata3, temp2, 32, 32, 32, 32,
+ bilinear_filters_2t[yoffset]);
+ return aom_variance32x32_neon(temp2, 32, dst, dst_stride, sse);
+}
+
+unsigned int aom_sub_pixel_variance64x64_neon(const uint8_t *src,
+ int src_stride, int xoffset,
+ int yoffset, const uint8_t *dst,
+ int dst_stride,
+ unsigned int *sse) {
+ DECLARE_ALIGNED(16, uint8_t, temp2[64 * 64]);
+ DECLARE_ALIGNED(16, uint8_t, fdata3[65 * 64]);
+
+ var_filter_block2d_bil_w16(src, fdata3, src_stride, 1, 65, 64,
+ bilinear_filters_2t[xoffset]);
+ var_filter_block2d_bil_w16(fdata3, temp2, 64, 64, 64, 64,
+ bilinear_filters_2t[yoffset]);
+ return aom_variance64x64_neon(temp2, 64, dst, dst_stride, sse);
+}
diff --git a/third_party/aom/aom_dsp/arm/subtract_neon.c b/third_party/aom/aom_dsp/arm/subtract_neon.c
new file mode 100644
index 0000000000..28f5ace8e1
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/subtract_neon.c
@@ -0,0 +1,81 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_config.h"
+
+#include "aom/aom_integer.h"
+
+void aom_subtract_block_neon(int rows, int cols, int16_t *diff,
+ ptrdiff_t diff_stride, const uint8_t *src,
+ ptrdiff_t src_stride, const uint8_t *pred,
+ ptrdiff_t pred_stride) {
+ int r, c;
+
+ if (cols > 16) {
+ for (r = 0; r < rows; ++r) {
+ for (c = 0; c < cols; c += 32) {
+ const uint8x16_t v_src_00 = vld1q_u8(&src[c + 0]);
+ const uint8x16_t v_src_16 = vld1q_u8(&src[c + 16]);
+ const uint8x16_t v_pred_00 = vld1q_u8(&pred[c + 0]);
+ const uint8x16_t v_pred_16 = vld1q_u8(&pred[c + 16]);
+ const uint16x8_t v_diff_lo_00 =
+ vsubl_u8(vget_low_u8(v_src_00), vget_low_u8(v_pred_00));
+ const uint16x8_t v_diff_hi_00 =
+ vsubl_u8(vget_high_u8(v_src_00), vget_high_u8(v_pred_00));
+ const uint16x8_t v_diff_lo_16 =
+ vsubl_u8(vget_low_u8(v_src_16), vget_low_u8(v_pred_16));
+ const uint16x8_t v_diff_hi_16 =
+ vsubl_u8(vget_high_u8(v_src_16), vget_high_u8(v_pred_16));
+ vst1q_s16(&diff[c + 0], vreinterpretq_s16_u16(v_diff_lo_00));
+ vst1q_s16(&diff[c + 8], vreinterpretq_s16_u16(v_diff_hi_00));
+ vst1q_s16(&diff[c + 16], vreinterpretq_s16_u16(v_diff_lo_16));
+ vst1q_s16(&diff[c + 24], vreinterpretq_s16_u16(v_diff_hi_16));
+ }
+ diff += diff_stride;
+ pred += pred_stride;
+ src += src_stride;
+ }
+ } else if (cols > 8) {
+ for (r = 0; r < rows; ++r) {
+ const uint8x16_t v_src = vld1q_u8(&src[0]);
+ const uint8x16_t v_pred = vld1q_u8(&pred[0]);
+ const uint16x8_t v_diff_lo =
+ vsubl_u8(vget_low_u8(v_src), vget_low_u8(v_pred));
+ const uint16x8_t v_diff_hi =
+ vsubl_u8(vget_high_u8(v_src), vget_high_u8(v_pred));
+ vst1q_s16(&diff[0], vreinterpretq_s16_u16(v_diff_lo));
+ vst1q_s16(&diff[8], vreinterpretq_s16_u16(v_diff_hi));
+ diff += diff_stride;
+ pred += pred_stride;
+ src += src_stride;
+ }
+ } else if (cols > 4) {
+ for (r = 0; r < rows; ++r) {
+ const uint8x8_t v_src = vld1_u8(&src[0]);
+ const uint8x8_t v_pred = vld1_u8(&pred[0]);
+ const uint16x8_t v_diff = vsubl_u8(v_src, v_pred);
+ vst1q_s16(&diff[0], vreinterpretq_s16_u16(v_diff));
+ diff += diff_stride;
+ pred += pred_stride;
+ src += src_stride;
+ }
+ } else {
+ for (r = 0; r < rows; ++r) {
+ for (c = 0; c < cols; ++c) diff[c] = src[c] - pred[c];
+
+ diff += diff_stride;
+ pred += pred_stride;
+ src += src_stride;
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/arm/variance_neon.c b/third_party/aom/aom_dsp/arm/variance_neon.c
new file mode 100644
index 0000000000..74385a6010
--- /dev/null
+++ b/third_party/aom/aom_dsp/arm/variance_neon.c
@@ -0,0 +1,400 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <arm_neon.h>
+
+#include "config/aom_dsp_rtcd.h"
+#include "config/aom_config.h"
+
+#include "aom/aom_integer.h"
+#include "aom_ports/mem.h"
+
+static INLINE int horizontal_add_s16x8(const int16x8_t v_16x8) {
+ const int32x4_t a = vpaddlq_s16(v_16x8);
+ const int64x2_t b = vpaddlq_s32(a);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ return vget_lane_s32(c, 0);
+}
+
+static INLINE int horizontal_add_s32x4(const int32x4_t v_32x4) {
+ const int64x2_t b = vpaddlq_s32(v_32x4);
+ const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
+ vreinterpret_s32_s64(vget_high_s64(b)));
+ return vget_lane_s32(c, 0);
+}
+
+// w * h must be less than 2048 or local variable v_sum may overflow.
+static void variance_neon_w8(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, int w, int h, uint32_t *sse,
+ int *sum) {
+ int i, j;
+ int16x8_t v_sum = vdupq_n_s16(0);
+ int32x4_t v_sse_lo = vdupq_n_s32(0);
+ int32x4_t v_sse_hi = vdupq_n_s32(0);
+
+ for (i = 0; i < h; ++i) {
+ for (j = 0; j < w; j += 8) {
+ const uint8x8_t v_a = vld1_u8(&a[j]);
+ const uint8x8_t v_b = vld1_u8(&b[j]);
+ const uint16x8_t v_diff = vsubl_u8(v_a, v_b);
+ const int16x8_t sv_diff = vreinterpretq_s16_u16(v_diff);
+ v_sum = vaddq_s16(v_sum, sv_diff);
+ v_sse_lo =
+ vmlal_s16(v_sse_lo, vget_low_s16(sv_diff), vget_low_s16(sv_diff));
+ v_sse_hi =
+ vmlal_s16(v_sse_hi, vget_high_s16(sv_diff), vget_high_s16(sv_diff));
+ }
+ a += a_stride;
+ b += b_stride;
+ }
+
+ *sum = horizontal_add_s16x8(v_sum);
+ *sse = (unsigned int)horizontal_add_s32x4(vaddq_s32(v_sse_lo, v_sse_hi));
+}
+
+void aom_get8x8var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, unsigned int *sse, int *sum) {
+ variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, sum);
+}
+
+void aom_get16x16var_neon(const uint8_t *a, int a_stride, const uint8_t *b,
+ int b_stride, unsigned int *sse, int *sum) {
+ variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, sum);
+}
+
+unsigned int aom_variance8x8_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_neon_w8(a, a_stride, b, b_stride, 8, 8, sse, &sum);
+ return *sse - ((sum * sum) >> 6);
+}
+
+unsigned int aom_variance16x16_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_neon_w8(a, a_stride, b, b_stride, 16, 16, sse, &sum);
+ return *sse - (((unsigned int)((int64_t)sum * sum)) >> 8);
+}
+
+unsigned int aom_variance32x32_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum;
+ variance_neon_w8(a, a_stride, b, b_stride, 32, 32, sse, &sum);
+ return *sse - (unsigned int)(((int64_t)sum * sum) >> 10);
+}
+
+unsigned int aom_variance32x64_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum1, sum2;
+ uint32_t sse1, sse2;
+ variance_neon_w8(a, a_stride, b, b_stride, 32, 32, &sse1, &sum1);
+ variance_neon_w8(a + (32 * a_stride), a_stride, b + (32 * b_stride), b_stride,
+ 32, 32, &sse2, &sum2);
+ *sse = sse1 + sse2;
+ sum1 += sum2;
+ return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
+}
+
+unsigned int aom_variance64x32_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum1, sum2;
+ uint32_t sse1, sse2;
+ variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
+ variance_neon_w8(a + (16 * a_stride), a_stride, b + (16 * b_stride), b_stride,
+ 64, 16, &sse2, &sum2);
+ *sse = sse1 + sse2;
+ sum1 += sum2;
+ return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 11);
+}
+
+unsigned int aom_variance64x64_neon(const uint8_t *a, int a_stride,
+ const uint8_t *b, int b_stride,
+ unsigned int *sse) {
+ int sum1, sum2;
+ uint32_t sse1, sse2;
+
+ variance_neon_w8(a, a_stride, b, b_stride, 64, 16, &sse1, &sum1);
+ variance_neon_w8(a + (16 * a_stride), a_stride, b + (16 * b_stride), b_stride,
+ 64, 16, &sse2, &sum2);
+ sse1 += sse2;
+ sum1 += sum2;
+
+ variance_neon_w8(a + (16 * 2 * a_stride), a_stride, b + (16 * 2 * b_stride),
+ b_stride, 64, 16, &sse2, &sum2);
+ sse1 += sse2;
+ sum1 += sum2;
+
+ variance_neon_w8(a + (16 * 3 * a_stride), a_stride, b + (16 * 3 * b_stride),
+ b_stride, 64, 16, &sse2, &sum2);
+ *sse = sse1 + sse2;
+ sum1 += sum2;
+ return *sse - (unsigned int)(((int64_t)sum1 * sum1) >> 12);
+}
+
+unsigned int aom_variance16x8_neon(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride, unsigned int *sse) {
+ int i;
+ int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
+ uint32x2_t d0u32, d10u32;
+ int64x1_t d0s64, d1s64;
+ uint8x16_t q0u8, q1u8, q2u8, q3u8;
+ uint16x8_t q11u16, q12u16, q13u16, q14u16;
+ int32x4_t q8s32, q9s32, q10s32;
+ int64x2_t q0s64, q1s64, q5s64;
+
+ q8s32 = vdupq_n_s32(0);
+ q9s32 = vdupq_n_s32(0);
+ q10s32 = vdupq_n_s32(0);
+
+ for (i = 0; i < 4; i++) {
+ q0u8 = vld1q_u8(src_ptr);
+ src_ptr += source_stride;
+ q1u8 = vld1q_u8(src_ptr);
+ src_ptr += source_stride;
+ __builtin_prefetch(src_ptr);
+
+ q2u8 = vld1q_u8(ref_ptr);
+ ref_ptr += recon_stride;
+ q3u8 = vld1q_u8(ref_ptr);
+ ref_ptr += recon_stride;
+ __builtin_prefetch(ref_ptr);
+
+ q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
+ q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
+ q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
+ q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
+
+ d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
+ d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
+ q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
+ q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
+ q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
+
+ d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
+ d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
+ q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
+ q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
+ q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
+
+ d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
+ d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
+ q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q13u16));
+ q9s32 = vmlal_s16(q9s32, d26s16, d26s16);
+ q10s32 = vmlal_s16(q10s32, d27s16, d27s16);
+
+ d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
+ d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
+ q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q14u16));
+ q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
+ q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
+ }
+
+ q10s32 = vaddq_s32(q10s32, q9s32);
+ q0s64 = vpaddlq_s32(q8s32);
+ q1s64 = vpaddlq_s32(q10s32);
+
+ d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
+ d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+
+ q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), vreinterpret_s32_s64(d0s64));
+ vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
+
+ d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
+ d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
+
+ return vget_lane_u32(d0u32, 0);
+}
+
+unsigned int aom_variance8x16_neon(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride, unsigned int *sse) {
+ int i;
+ uint8x8_t d0u8, d2u8, d4u8, d6u8;
+ int16x4_t d22s16, d23s16, d24s16, d25s16;
+ uint32x2_t d0u32, d10u32;
+ int64x1_t d0s64, d1s64;
+ uint16x8_t q11u16, q12u16;
+ int32x4_t q8s32, q9s32, q10s32;
+ int64x2_t q0s64, q1s64, q5s64;
+
+ q8s32 = vdupq_n_s32(0);
+ q9s32 = vdupq_n_s32(0);
+ q10s32 = vdupq_n_s32(0);
+
+ for (i = 0; i < 8; i++) {
+ d0u8 = vld1_u8(src_ptr);
+ src_ptr += source_stride;
+ d2u8 = vld1_u8(src_ptr);
+ src_ptr += source_stride;
+ __builtin_prefetch(src_ptr);
+
+ d4u8 = vld1_u8(ref_ptr);
+ ref_ptr += recon_stride;
+ d6u8 = vld1_u8(ref_ptr);
+ ref_ptr += recon_stride;
+ __builtin_prefetch(ref_ptr);
+
+ q11u16 = vsubl_u8(d0u8, d4u8);
+ q12u16 = vsubl_u8(d2u8, d6u8);
+
+ d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
+ d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
+ q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q11u16));
+ q9s32 = vmlal_s16(q9s32, d22s16, d22s16);
+ q10s32 = vmlal_s16(q10s32, d23s16, d23s16);
+
+ d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
+ d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
+ q8s32 = vpadalq_s16(q8s32, vreinterpretq_s16_u16(q12u16));
+ q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
+ q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
+ }
+
+ q10s32 = vaddq_s32(q10s32, q9s32);
+ q0s64 = vpaddlq_s32(q8s32);
+ q1s64 = vpaddlq_s32(q10s32);
+
+ d0s64 = vadd_s64(vget_low_s64(q0s64), vget_high_s64(q0s64));
+ d1s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+
+ q5s64 = vmull_s32(vreinterpret_s32_s64(d0s64), vreinterpret_s32_s64(d0s64));
+ vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d1s64), 0);
+
+ d10u32 = vshr_n_u32(vreinterpret_u32_s64(vget_low_s64(q5s64)), 7);
+ d0u32 = vsub_u32(vreinterpret_u32_s64(d1s64), d10u32);
+
+ return vget_lane_u32(d0u32, 0);
+}
+
+unsigned int aom_mse16x16_neon(const unsigned char *src_ptr, int source_stride,
+ const unsigned char *ref_ptr, int recon_stride,
+ unsigned int *sse) {
+ int i;
+ int16x4_t d22s16, d23s16, d24s16, d25s16, d26s16, d27s16, d28s16, d29s16;
+ int64x1_t d0s64;
+ uint8x16_t q0u8, q1u8, q2u8, q3u8;
+ int32x4_t q7s32, q8s32, q9s32, q10s32;
+ uint16x8_t q11u16, q12u16, q13u16, q14u16;
+ int64x2_t q1s64;
+
+ q7s32 = vdupq_n_s32(0);
+ q8s32 = vdupq_n_s32(0);
+ q9s32 = vdupq_n_s32(0);
+ q10s32 = vdupq_n_s32(0);
+
+ for (i = 0; i < 8; i++) { // mse16x16_neon_loop
+ q0u8 = vld1q_u8(src_ptr);
+ src_ptr += source_stride;
+ q1u8 = vld1q_u8(src_ptr);
+ src_ptr += source_stride;
+ q2u8 = vld1q_u8(ref_ptr);
+ ref_ptr += recon_stride;
+ q3u8 = vld1q_u8(ref_ptr);
+ ref_ptr += recon_stride;
+
+ q11u16 = vsubl_u8(vget_low_u8(q0u8), vget_low_u8(q2u8));
+ q12u16 = vsubl_u8(vget_high_u8(q0u8), vget_high_u8(q2u8));
+ q13u16 = vsubl_u8(vget_low_u8(q1u8), vget_low_u8(q3u8));
+ q14u16 = vsubl_u8(vget_high_u8(q1u8), vget_high_u8(q3u8));
+
+ d22s16 = vreinterpret_s16_u16(vget_low_u16(q11u16));
+ d23s16 = vreinterpret_s16_u16(vget_high_u16(q11u16));
+ q7s32 = vmlal_s16(q7s32, d22s16, d22s16);
+ q8s32 = vmlal_s16(q8s32, d23s16, d23s16);
+
+ d24s16 = vreinterpret_s16_u16(vget_low_u16(q12u16));
+ d25s16 = vreinterpret_s16_u16(vget_high_u16(q12u16));
+ q9s32 = vmlal_s16(q9s32, d24s16, d24s16);
+ q10s32 = vmlal_s16(q10s32, d25s16, d25s16);
+
+ d26s16 = vreinterpret_s16_u16(vget_low_u16(q13u16));
+ d27s16 = vreinterpret_s16_u16(vget_high_u16(q13u16));
+ q7s32 = vmlal_s16(q7s32, d26s16, d26s16);
+ q8s32 = vmlal_s16(q8s32, d27s16, d27s16);
+
+ d28s16 = vreinterpret_s16_u16(vget_low_u16(q14u16));
+ d29s16 = vreinterpret_s16_u16(vget_high_u16(q14u16));
+ q9s32 = vmlal_s16(q9s32, d28s16, d28s16);
+ q10s32 = vmlal_s16(q10s32, d29s16, d29s16);
+ }
+
+ q7s32 = vaddq_s32(q7s32, q8s32);
+ q9s32 = vaddq_s32(q9s32, q10s32);
+ q10s32 = vaddq_s32(q7s32, q9s32);
+
+ q1s64 = vpaddlq_s32(q10s32);
+ d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+
+ vst1_lane_u32((uint32_t *)sse, vreinterpret_u32_s64(d0s64), 0);
+ return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
+}
+
+unsigned int aom_get4x4sse_cs_neon(const unsigned char *src_ptr,
+ int source_stride,
+ const unsigned char *ref_ptr,
+ int recon_stride) {
+ int16x4_t d22s16, d24s16, d26s16, d28s16;
+ int64x1_t d0s64;
+ uint8x8_t d0u8, d1u8, d2u8, d3u8, d4u8, d5u8, d6u8, d7u8;
+ int32x4_t q7s32, q8s32, q9s32, q10s32;
+ uint16x8_t q11u16, q12u16, q13u16, q14u16;
+ int64x2_t q1s64;
+
+ d0u8 = vld1_u8(src_ptr);
+ src_ptr += source_stride;
+ d4u8 = vld1_u8(ref_ptr);
+ ref_ptr += recon_stride;
+ d1u8 = vld1_u8(src_ptr);
+ src_ptr += source_stride;
+ d5u8 = vld1_u8(ref_ptr);
+ ref_ptr += recon_stride;
+ d2u8 = vld1_u8(src_ptr);
+ src_ptr += source_stride;
+ d6u8 = vld1_u8(ref_ptr);
+ ref_ptr += recon_stride;
+ d3u8 = vld1_u8(src_ptr);
+ src_ptr += source_stride;
+ d7u8 = vld1_u8(ref_ptr);
+ ref_ptr += recon_stride;
+
+ q11u16 = vsubl_u8(d0u8, d4u8);
+ q12u16 = vsubl_u8(d1u8, d5u8);
+ q13u16 = vsubl_u8(d2u8, d6u8);
+ q14u16 = vsubl_u8(d3u8, d7u8);
+
+ d22s16 = vget_low_s16(vreinterpretq_s16_u16(q11u16));
+ d24s16 = vget_low_s16(vreinterpretq_s16_u16(q12u16));
+ d26s16 = vget_low_s16(vreinterpretq_s16_u16(q13u16));
+ d28s16 = vget_low_s16(vreinterpretq_s16_u16(q14u16));
+
+ q7s32 = vmull_s16(d22s16, d22s16);
+ q8s32 = vmull_s16(d24s16, d24s16);
+ q9s32 = vmull_s16(d26s16, d26s16);
+ q10s32 = vmull_s16(d28s16, d28s16);
+
+ q7s32 = vaddq_s32(q7s32, q8s32);
+ q9s32 = vaddq_s32(q9s32, q10s32);
+ q9s32 = vaddq_s32(q7s32, q9s32);
+
+ q1s64 = vpaddlq_s32(q9s32);
+ d0s64 = vadd_s64(vget_low_s64(q1s64), vget_high_s64(q1s64));
+
+ return vget_lane_u32(vreinterpret_u32_s64(d0s64), 0);
+}