summaryrefslogtreecommitdiffstats
path: root/third_party/aom/av1/encoder/x86
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/aom/av1/encoder/x86/av1_fwd_txfm1d_sse4.c1409
-rw-r--r--third_party/aom/av1/encoder/x86/av1_fwd_txfm2d_avx2.c3010
-rw-r--r--third_party/aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c336
-rw-r--r--third_party/aom/av1/encoder/x86/av1_fwd_txfm_avx2.h96
-rw-r--r--third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c2673
-rw-r--r--third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.h253
-rw-r--r--third_party/aom/av1/encoder/x86/av1_highbd_quantize_avx2.c137
-rw-r--r--third_party/aom/av1/encoder/x86/av1_highbd_quantize_sse4.c195
-rw-r--r--third_party/aom/av1/encoder/x86/av1_k_means_avx2.c132
-rw-r--r--third_party/aom/av1/encoder/x86/av1_k_means_sse2.c124
-rw-r--r--third_party/aom/av1/encoder/x86/av1_quantize_avx2.c414
-rw-r--r--third_party/aom/av1/encoder/x86/av1_quantize_sse2.c289
-rw-r--r--third_party/aom/av1/encoder/x86/av1_quantize_ssse3_x86_64.asm204
-rw-r--r--third_party/aom/av1/encoder/x86/av1_ssim_opt_x86_64.asm222
-rw-r--r--third_party/aom/av1/encoder/x86/av1_temporal_denoiser_sse2.c328
-rw-r--r--third_party/aom/av1/encoder/x86/av1_txfm1d_sse4.h144
-rw-r--r--third_party/aom/av1/encoder/x86/cnn_avx2.c532
-rw-r--r--third_party/aom/av1/encoder/x86/dct_sse2.asm82
-rw-r--r--third_party/aom/av1/encoder/x86/encodetxb_avx2.c122
-rw-r--r--third_party/aom/av1/encoder/x86/encodetxb_sse2.c505
-rw-r--r--third_party/aom/av1/encoder/x86/encodetxb_sse4.c84
-rw-r--r--third_party/aom/av1/encoder/x86/error_intrin_avx2.c210
-rw-r--r--third_party/aom/av1/encoder/x86/error_intrin_sse2.c75
-rw-r--r--third_party/aom/av1/encoder/x86/error_sse2.asm88
-rw-r--r--third_party/aom/av1/encoder/x86/hash_sse42.c53
-rw-r--r--third_party/aom/av1/encoder/x86/highbd_block_error_intrin_avx2.c64
-rw-r--r--third_party/aom/av1/encoder/x86/highbd_block_error_intrin_sse2.c74
-rw-r--r--third_party/aom/av1/encoder/x86/highbd_fwd_txfm_avx2.c3132
-rw-r--r--third_party/aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c2629
-rw-r--r--third_party/aom/av1/encoder/x86/highbd_temporal_filter_avx2.c466
-rw-r--r--third_party/aom/av1/encoder/x86/highbd_temporal_filter_sse2.c341
-rw-r--r--third_party/aom/av1/encoder/x86/ml_avx2.c240
-rw-r--r--third_party/aom/av1/encoder/x86/ml_sse3.c336
-rw-r--r--third_party/aom/av1/encoder/x86/ml_sse3.h29
-rw-r--r--third_party/aom/av1/encoder/x86/pickrst_avx2.c2348
-rw-r--r--third_party/aom/av1/encoder/x86/pickrst_sse4.c1483
-rw-r--r--third_party/aom/av1/encoder/x86/rdopt_avx2.c254
-rw-r--r--third_party/aom/av1/encoder/x86/rdopt_sse4.c272
-rw-r--r--third_party/aom/av1/encoder/x86/reconinter_enc_sse2.c347
-rw-r--r--third_party/aom/av1/encoder/x86/reconinter_enc_ssse3.c67
-rw-r--r--third_party/aom/av1/encoder/x86/temporal_filter_avx2.c647
-rw-r--r--third_party/aom/av1/encoder/x86/temporal_filter_sse2.c320
-rw-r--r--third_party/aom/av1/encoder/x86/wedge_utils_avx2.c215
-rw-r--r--third_party/aom/av1/encoder/x86/wedge_utils_sse2.c254
44 files changed, 25235 insertions, 0 deletions
diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm1d_sse4.c b/third_party/aom/av1/encoder/x86/av1_fwd_txfm1d_sse4.c
new file mode 100644
index 0000000000..494b0fdf15
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm1d_sse4.c
@@ -0,0 +1,1409 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "av1/encoder/x86/av1_txfm1d_sse4.h"
+
+void av1_fdct32_sse4_1(__m128i *input, __m128i *output, int cos_bit,
+ const int stride) {
+ __m128i buf0[32];
+ __m128i buf1[32];
+ const int32_t *cospi;
+
+ int startidx = 0 * stride;
+ int endidx = 31 * stride;
+ // stage 0
+ // stage 1
+ buf1[0] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[31] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[1] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[30] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[2] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[29] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[3] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[28] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[4] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[27] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[5] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[26] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[6] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[25] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[7] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[24] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[8] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[23] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[9] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[22] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[10] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[21] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[11] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[20] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[12] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[19] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[13] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[18] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[14] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[17] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += stride;
+ endidx -= stride;
+ buf1[15] = _mm_add_epi32(input[startidx], input[endidx]);
+ buf1[16] = _mm_sub_epi32(input[startidx], input[endidx]);
+
+ // stage 2
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = _mm_add_epi32(buf1[0], buf1[15]);
+ buf0[15] = _mm_sub_epi32(buf1[0], buf1[15]);
+ buf0[1] = _mm_add_epi32(buf1[1], buf1[14]);
+ buf0[14] = _mm_sub_epi32(buf1[1], buf1[14]);
+ buf0[2] = _mm_add_epi32(buf1[2], buf1[13]);
+ buf0[13] = _mm_sub_epi32(buf1[2], buf1[13]);
+ buf0[3] = _mm_add_epi32(buf1[3], buf1[12]);
+ buf0[12] = _mm_sub_epi32(buf1[3], buf1[12]);
+ buf0[4] = _mm_add_epi32(buf1[4], buf1[11]);
+ buf0[11] = _mm_sub_epi32(buf1[4], buf1[11]);
+ buf0[5] = _mm_add_epi32(buf1[5], buf1[10]);
+ buf0[10] = _mm_sub_epi32(buf1[5], buf1[10]);
+ buf0[6] = _mm_add_epi32(buf1[6], buf1[9]);
+ buf0[9] = _mm_sub_epi32(buf1[6], buf1[9]);
+ buf0[7] = _mm_add_epi32(buf1[7], buf1[8]);
+ buf0[8] = _mm_sub_epi32(buf1[7], buf1[8]);
+ buf0[16] = buf1[16];
+ buf0[17] = buf1[17];
+ buf0[18] = buf1[18];
+ buf0[19] = buf1[19];
+ btf_32_sse4_1_type0(-cospi[32], cospi[32], buf1[20], buf1[27], buf0[20],
+ buf0[27], cos_bit);
+ btf_32_sse4_1_type0(-cospi[32], cospi[32], buf1[21], buf1[26], buf0[21],
+ buf0[26], cos_bit);
+ btf_32_sse4_1_type0(-cospi[32], cospi[32], buf1[22], buf1[25], buf0[22],
+ buf0[25], cos_bit);
+ btf_32_sse4_1_type0(-cospi[32], cospi[32], buf1[23], buf1[24], buf0[23],
+ buf0[24], cos_bit);
+ buf0[28] = buf1[28];
+ buf0[29] = buf1[29];
+ buf0[30] = buf1[30];
+ buf0[31] = buf1[31];
+
+ // stage 3
+ cospi = cospi_arr(cos_bit);
+ buf1[0] = _mm_add_epi32(buf0[0], buf0[7]);
+ buf1[7] = _mm_sub_epi32(buf0[0], buf0[7]);
+ buf1[1] = _mm_add_epi32(buf0[1], buf0[6]);
+ buf1[6] = _mm_sub_epi32(buf0[1], buf0[6]);
+ buf1[2] = _mm_add_epi32(buf0[2], buf0[5]);
+ buf1[5] = _mm_sub_epi32(buf0[2], buf0[5]);
+ buf1[3] = _mm_add_epi32(buf0[3], buf0[4]);
+ buf1[4] = _mm_sub_epi32(buf0[3], buf0[4]);
+ buf1[8] = buf0[8];
+ buf1[9] = buf0[9];
+ btf_32_sse4_1_type0(-cospi[32], cospi[32], buf0[10], buf0[13], buf1[10],
+ buf1[13], cos_bit);
+ btf_32_sse4_1_type0(-cospi[32], cospi[32], buf0[11], buf0[12], buf1[11],
+ buf1[12], cos_bit);
+ buf1[14] = buf0[14];
+ buf1[15] = buf0[15];
+ buf1[16] = _mm_add_epi32(buf0[16], buf0[23]);
+ buf1[23] = _mm_sub_epi32(buf0[16], buf0[23]);
+ buf1[17] = _mm_add_epi32(buf0[17], buf0[22]);
+ buf1[22] = _mm_sub_epi32(buf0[17], buf0[22]);
+ buf1[18] = _mm_add_epi32(buf0[18], buf0[21]);
+ buf1[21] = _mm_sub_epi32(buf0[18], buf0[21]);
+ buf1[19] = _mm_add_epi32(buf0[19], buf0[20]);
+ buf1[20] = _mm_sub_epi32(buf0[19], buf0[20]);
+ buf1[24] = _mm_sub_epi32(buf0[31], buf0[24]);
+ buf1[31] = _mm_add_epi32(buf0[31], buf0[24]);
+ buf1[25] = _mm_sub_epi32(buf0[30], buf0[25]);
+ buf1[30] = _mm_add_epi32(buf0[30], buf0[25]);
+ buf1[26] = _mm_sub_epi32(buf0[29], buf0[26]);
+ buf1[29] = _mm_add_epi32(buf0[29], buf0[26]);
+ buf1[27] = _mm_sub_epi32(buf0[28], buf0[27]);
+ buf1[28] = _mm_add_epi32(buf0[28], buf0[27]);
+
+ // stage 4
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = _mm_add_epi32(buf1[0], buf1[3]);
+ buf0[3] = _mm_sub_epi32(buf1[0], buf1[3]);
+ buf0[1] = _mm_add_epi32(buf1[1], buf1[2]);
+ buf0[2] = _mm_sub_epi32(buf1[1], buf1[2]);
+ buf0[4] = buf1[4];
+ btf_32_sse4_1_type0(-cospi[32], cospi[32], buf1[5], buf1[6], buf0[5], buf0[6],
+ cos_bit);
+ buf0[7] = buf1[7];
+ buf0[8] = _mm_add_epi32(buf1[8], buf1[11]);
+ buf0[11] = _mm_sub_epi32(buf1[8], buf1[11]);
+ buf0[9] = _mm_add_epi32(buf1[9], buf1[10]);
+ buf0[10] = _mm_sub_epi32(buf1[9], buf1[10]);
+ buf0[12] = _mm_sub_epi32(buf1[15], buf1[12]);
+ buf0[15] = _mm_add_epi32(buf1[15], buf1[12]);
+ buf0[13] = _mm_sub_epi32(buf1[14], buf1[13]);
+ buf0[14] = _mm_add_epi32(buf1[14], buf1[13]);
+ buf0[16] = buf1[16];
+ buf0[17] = buf1[17];
+ btf_32_sse4_1_type0(-cospi[16], cospi[48], buf1[18], buf1[29], buf0[18],
+ buf0[29], cos_bit);
+ btf_32_sse4_1_type0(-cospi[16], cospi[48], buf1[19], buf1[28], buf0[19],
+ buf0[28], cos_bit);
+ btf_32_sse4_1_type0(-cospi[48], -cospi[16], buf1[20], buf1[27], buf0[20],
+ buf0[27], cos_bit);
+ btf_32_sse4_1_type0(-cospi[48], -cospi[16], buf1[21], buf1[26], buf0[21],
+ buf0[26], cos_bit);
+ buf0[22] = buf1[22];
+ buf0[23] = buf1[23];
+ buf0[24] = buf1[24];
+ buf0[25] = buf1[25];
+ buf0[30] = buf1[30];
+ buf0[31] = buf1[31];
+
+ // stage 5
+ cospi = cospi_arr(cos_bit);
+ btf_32_sse4_1_type0(cospi[32], cospi[32], buf0[0], buf0[1], buf1[0], buf1[1],
+ cos_bit);
+ btf_32_sse4_1_type1(cospi[48], cospi[16], buf0[2], buf0[3], buf1[2], buf1[3],
+ cos_bit);
+ buf1[4] = _mm_add_epi32(buf0[4], buf0[5]);
+ buf1[5] = _mm_sub_epi32(buf0[4], buf0[5]);
+ buf1[6] = _mm_sub_epi32(buf0[7], buf0[6]);
+ buf1[7] = _mm_add_epi32(buf0[7], buf0[6]);
+ buf1[8] = buf0[8];
+ btf_32_sse4_1_type0(-cospi[16], cospi[48], buf0[9], buf0[14], buf1[9],
+ buf1[14], cos_bit);
+ btf_32_sse4_1_type0(-cospi[48], -cospi[16], buf0[10], buf0[13], buf1[10],
+ buf1[13], cos_bit);
+ buf1[11] = buf0[11];
+ buf1[12] = buf0[12];
+ buf1[15] = buf0[15];
+ buf1[16] = _mm_add_epi32(buf0[16], buf0[19]);
+ buf1[19] = _mm_sub_epi32(buf0[16], buf0[19]);
+ buf1[17] = _mm_add_epi32(buf0[17], buf0[18]);
+ buf1[18] = _mm_sub_epi32(buf0[17], buf0[18]);
+ buf1[20] = _mm_sub_epi32(buf0[23], buf0[20]);
+ buf1[23] = _mm_add_epi32(buf0[23], buf0[20]);
+ buf1[21] = _mm_sub_epi32(buf0[22], buf0[21]);
+ buf1[22] = _mm_add_epi32(buf0[22], buf0[21]);
+ buf1[24] = _mm_add_epi32(buf0[24], buf0[27]);
+ buf1[27] = _mm_sub_epi32(buf0[24], buf0[27]);
+ buf1[25] = _mm_add_epi32(buf0[25], buf0[26]);
+ buf1[26] = _mm_sub_epi32(buf0[25], buf0[26]);
+ buf1[28] = _mm_sub_epi32(buf0[31], buf0[28]);
+ buf1[31] = _mm_add_epi32(buf0[31], buf0[28]);
+ buf1[29] = _mm_sub_epi32(buf0[30], buf0[29]);
+ buf1[30] = _mm_add_epi32(buf0[30], buf0[29]);
+
+ // stage 6
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = buf1[0];
+ buf0[1] = buf1[1];
+ buf0[2] = buf1[2];
+ buf0[3] = buf1[3];
+ btf_32_sse4_1_type1(cospi[56], cospi[8], buf1[4], buf1[7], buf0[4], buf0[7],
+ cos_bit);
+ btf_32_sse4_1_type1(cospi[24], cospi[40], buf1[5], buf1[6], buf0[5], buf0[6],
+ cos_bit);
+ buf0[8] = _mm_add_epi32(buf1[8], buf1[9]);
+ buf0[9] = _mm_sub_epi32(buf1[8], buf1[9]);
+ buf0[10] = _mm_sub_epi32(buf1[11], buf1[10]);
+ buf0[11] = _mm_add_epi32(buf1[11], buf1[10]);
+ buf0[12] = _mm_add_epi32(buf1[12], buf1[13]);
+ buf0[13] = _mm_sub_epi32(buf1[12], buf1[13]);
+ buf0[14] = _mm_sub_epi32(buf1[15], buf1[14]);
+ buf0[15] = _mm_add_epi32(buf1[15], buf1[14]);
+ buf0[16] = buf1[16];
+ btf_32_sse4_1_type0(-cospi[8], cospi[56], buf1[17], buf1[30], buf0[17],
+ buf0[30], cos_bit);
+ btf_32_sse4_1_type0(-cospi[56], -cospi[8], buf1[18], buf1[29], buf0[18],
+ buf0[29], cos_bit);
+ buf0[19] = buf1[19];
+ buf0[20] = buf1[20];
+ btf_32_sse4_1_type0(-cospi[40], cospi[24], buf1[21], buf1[26], buf0[21],
+ buf0[26], cos_bit);
+ btf_32_sse4_1_type0(-cospi[24], -cospi[40], buf1[22], buf1[25], buf0[22],
+ buf0[25], cos_bit);
+ buf0[23] = buf1[23];
+ buf0[24] = buf1[24];
+ buf0[27] = buf1[27];
+ buf0[28] = buf1[28];
+ buf0[31] = buf1[31];
+
+ // stage 7
+ cospi = cospi_arr(cos_bit);
+ buf1[0] = buf0[0];
+ buf1[1] = buf0[1];
+ buf1[2] = buf0[2];
+ buf1[3] = buf0[3];
+ buf1[4] = buf0[4];
+ buf1[5] = buf0[5];
+ buf1[6] = buf0[6];
+ buf1[7] = buf0[7];
+ btf_32_sse4_1_type1(cospi[60], cospi[4], buf0[8], buf0[15], buf1[8], buf1[15],
+ cos_bit);
+ btf_32_sse4_1_type1(cospi[28], cospi[36], buf0[9], buf0[14], buf1[9],
+ buf1[14], cos_bit);
+ btf_32_sse4_1_type1(cospi[44], cospi[20], buf0[10], buf0[13], buf1[10],
+ buf1[13], cos_bit);
+ btf_32_sse4_1_type1(cospi[12], cospi[52], buf0[11], buf0[12], buf1[11],
+ buf1[12], cos_bit);
+ buf1[16] = _mm_add_epi32(buf0[16], buf0[17]);
+ buf1[17] = _mm_sub_epi32(buf0[16], buf0[17]);
+ buf1[18] = _mm_sub_epi32(buf0[19], buf0[18]);
+ buf1[19] = _mm_add_epi32(buf0[19], buf0[18]);
+ buf1[20] = _mm_add_epi32(buf0[20], buf0[21]);
+ buf1[21] = _mm_sub_epi32(buf0[20], buf0[21]);
+ buf1[22] = _mm_sub_epi32(buf0[23], buf0[22]);
+ buf1[23] = _mm_add_epi32(buf0[23], buf0[22]);
+ buf1[24] = _mm_add_epi32(buf0[24], buf0[25]);
+ buf1[25] = _mm_sub_epi32(buf0[24], buf0[25]);
+ buf1[26] = _mm_sub_epi32(buf0[27], buf0[26]);
+ buf1[27] = _mm_add_epi32(buf0[27], buf0[26]);
+ buf1[28] = _mm_add_epi32(buf0[28], buf0[29]);
+ buf1[29] = _mm_sub_epi32(buf0[28], buf0[29]);
+ buf1[30] = _mm_sub_epi32(buf0[31], buf0[30]);
+ buf1[31] = _mm_add_epi32(buf0[31], buf0[30]);
+
+ // stage 8
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = buf1[0];
+ buf0[1] = buf1[1];
+ buf0[2] = buf1[2];
+ buf0[3] = buf1[3];
+ buf0[4] = buf1[4];
+ buf0[5] = buf1[5];
+ buf0[6] = buf1[6];
+ buf0[7] = buf1[7];
+ buf0[8] = buf1[8];
+ buf0[9] = buf1[9];
+ buf0[10] = buf1[10];
+ buf0[11] = buf1[11];
+ buf0[12] = buf1[12];
+ buf0[13] = buf1[13];
+ buf0[14] = buf1[14];
+ buf0[15] = buf1[15];
+ btf_32_sse4_1_type1(cospi[62], cospi[2], buf1[16], buf1[31], buf0[16],
+ buf0[31], cos_bit);
+ btf_32_sse4_1_type1(cospi[30], cospi[34], buf1[17], buf1[30], buf0[17],
+ buf0[30], cos_bit);
+ btf_32_sse4_1_type1(cospi[46], cospi[18], buf1[18], buf1[29], buf0[18],
+ buf0[29], cos_bit);
+ btf_32_sse4_1_type1(cospi[14], cospi[50], buf1[19], buf1[28], buf0[19],
+ buf0[28], cos_bit);
+ btf_32_sse4_1_type1(cospi[54], cospi[10], buf1[20], buf1[27], buf0[20],
+ buf0[27], cos_bit);
+ btf_32_sse4_1_type1(cospi[22], cospi[42], buf1[21], buf1[26], buf0[21],
+ buf0[26], cos_bit);
+ btf_32_sse4_1_type1(cospi[38], cospi[26], buf1[22], buf1[25], buf0[22],
+ buf0[25], cos_bit);
+ btf_32_sse4_1_type1(cospi[6], cospi[58], buf1[23], buf1[24], buf0[23],
+ buf0[24], cos_bit);
+
+ startidx = 0 * stride;
+ endidx = 31 * stride;
+ // stage 9
+ output[startidx] = buf0[0];
+ output[endidx] = buf0[31];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[16];
+ output[endidx] = buf0[15];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[8];
+ output[endidx] = buf0[23];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[24];
+ output[endidx] = buf0[7];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[4];
+ output[endidx] = buf0[27];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[20];
+ output[endidx] = buf0[11];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[12];
+ output[endidx] = buf0[19];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[28];
+ output[endidx] = buf0[3];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[2];
+ output[endidx] = buf0[29];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[18];
+ output[endidx] = buf0[13];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[10];
+ output[endidx] = buf0[21];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[26];
+ output[endidx] = buf0[5];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[6];
+ output[endidx] = buf0[25];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[22];
+ output[endidx] = buf0[9];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[14];
+ output[endidx] = buf0[17];
+ startidx += stride;
+ endidx -= stride;
+ output[startidx] = buf0[30];
+ output[endidx] = buf0[1];
+}
+
+void av1_fadst4_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range) {
+ const int txfm_size = 4;
+ const int num_per_128 = 4;
+ const int32_t *cospi;
+ __m128i buf0[4];
+ __m128i buf1[4];
+ int col_num = txfm_size / num_per_128;
+ int col;
+ (void)stage_range;
+ for (col = 0; col < col_num; col++) {
+ // stage 0;
+ int j;
+ for (j = 0; j < 4; ++j) {
+ buf0[j] = input[j * col_num + col];
+ }
+
+ // stage 1
+ buf1[0] = buf0[3];
+ buf1[1] = buf0[0];
+ buf1[2] = buf0[1];
+ buf1[3] = buf0[2];
+
+ // stage 2
+ cospi = cospi_arr(cos_bit);
+ btf_32_sse4_1_type0(cospi[8], cospi[56], buf1[0], buf1[1], buf0[0], buf0[1],
+ cos_bit);
+ btf_32_sse4_1_type0(cospi[40], cospi[24], buf1[2], buf1[3], buf0[2],
+ buf0[3], cos_bit);
+
+ // stage 3
+ buf1[0] = _mm_add_epi32(buf0[0], buf0[2]);
+ buf1[2] = _mm_sub_epi32(buf0[0], buf0[2]);
+ buf1[1] = _mm_add_epi32(buf0[1], buf0[3]);
+ buf1[3] = _mm_sub_epi32(buf0[1], buf0[3]);
+
+ // stage 4
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = buf1[0];
+ buf0[1] = buf1[1];
+ btf_32_sse4_1_type0(cospi[32], cospi[32], buf1[2], buf1[3], buf0[2],
+ buf0[3], cos_bit);
+
+ // stage 5
+ buf1[0] = buf0[0];
+ buf1[1] = _mm_sub_epi32(_mm_setzero_si128(), buf0[2]);
+ buf1[2] = buf0[3];
+ buf1[3] = _mm_sub_epi32(_mm_setzero_si128(), buf0[1]);
+
+ for (j = 0; j < 4; ++j) {
+ output[j * col_num + col] = buf1[j];
+ }
+ }
+}
+
+void av1_fdct64_sse4_1(__m128i *input, __m128i *output, int8_t cos_bit,
+ const int instride, const int outstride) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ __m128i cospi_m32 = _mm_set1_epi32(-cospi[32]);
+ __m128i cospi_p32 = _mm_set1_epi32(cospi[32]);
+ __m128i cospi_m16 = _mm_set1_epi32(-cospi[16]);
+ __m128i cospi_p48 = _mm_set1_epi32(cospi[48]);
+ __m128i cospi_m48 = _mm_set1_epi32(-cospi[48]);
+ __m128i cospi_p16 = _mm_set1_epi32(cospi[16]);
+ __m128i cospi_m08 = _mm_set1_epi32(-cospi[8]);
+ __m128i cospi_p56 = _mm_set1_epi32(cospi[56]);
+ __m128i cospi_m56 = _mm_set1_epi32(-cospi[56]);
+ __m128i cospi_m40 = _mm_set1_epi32(-cospi[40]);
+ __m128i cospi_p24 = _mm_set1_epi32(cospi[24]);
+ __m128i cospi_m24 = _mm_set1_epi32(-cospi[24]);
+ __m128i cospi_p08 = _mm_set1_epi32(cospi[8]);
+ __m128i cospi_p40 = _mm_set1_epi32(cospi[40]);
+ __m128i cospi_p60 = _mm_set1_epi32(cospi[60]);
+ __m128i cospi_p04 = _mm_set1_epi32(cospi[4]);
+ __m128i cospi_p28 = _mm_set1_epi32(cospi[28]);
+ __m128i cospi_p36 = _mm_set1_epi32(cospi[36]);
+ __m128i cospi_p44 = _mm_set1_epi32(cospi[44]);
+ __m128i cospi_p20 = _mm_set1_epi32(cospi[20]);
+ __m128i cospi_p12 = _mm_set1_epi32(cospi[12]);
+ __m128i cospi_p52 = _mm_set1_epi32(cospi[52]);
+ __m128i cospi_m04 = _mm_set1_epi32(-cospi[4]);
+ __m128i cospi_m60 = _mm_set1_epi32(-cospi[60]);
+ __m128i cospi_m36 = _mm_set1_epi32(-cospi[36]);
+ __m128i cospi_m28 = _mm_set1_epi32(-cospi[28]);
+ __m128i cospi_m20 = _mm_set1_epi32(-cospi[20]);
+ __m128i cospi_m44 = _mm_set1_epi32(-cospi[44]);
+ __m128i cospi_m52 = _mm_set1_epi32(-cospi[52]);
+ __m128i cospi_m12 = _mm_set1_epi32(-cospi[12]);
+ __m128i cospi_p62 = _mm_set1_epi32(cospi[62]);
+ __m128i cospi_p02 = _mm_set1_epi32(cospi[2]);
+ __m128i cospi_p30 = _mm_set1_epi32(cospi[30]);
+ __m128i cospi_p34 = _mm_set1_epi32(cospi[34]);
+ __m128i cospi_p46 = _mm_set1_epi32(cospi[46]);
+ __m128i cospi_p18 = _mm_set1_epi32(cospi[18]);
+ __m128i cospi_p14 = _mm_set1_epi32(cospi[14]);
+ __m128i cospi_p50 = _mm_set1_epi32(cospi[50]);
+ __m128i cospi_p54 = _mm_set1_epi32(cospi[54]);
+ __m128i cospi_p10 = _mm_set1_epi32(cospi[10]);
+ __m128i cospi_p22 = _mm_set1_epi32(cospi[22]);
+ __m128i cospi_p42 = _mm_set1_epi32(cospi[42]);
+ __m128i cospi_p38 = _mm_set1_epi32(cospi[38]);
+ __m128i cospi_p26 = _mm_set1_epi32(cospi[26]);
+ __m128i cospi_p06 = _mm_set1_epi32(cospi[6]);
+ __m128i cospi_p58 = _mm_set1_epi32(cospi[58]);
+ __m128i cospi_p63 = _mm_set1_epi32(cospi[63]);
+ __m128i cospi_p01 = _mm_set1_epi32(cospi[1]);
+ __m128i cospi_p31 = _mm_set1_epi32(cospi[31]);
+ __m128i cospi_p33 = _mm_set1_epi32(cospi[33]);
+ __m128i cospi_p47 = _mm_set1_epi32(cospi[47]);
+ __m128i cospi_p17 = _mm_set1_epi32(cospi[17]);
+ __m128i cospi_p15 = _mm_set1_epi32(cospi[15]);
+ __m128i cospi_p49 = _mm_set1_epi32(cospi[49]);
+ __m128i cospi_p55 = _mm_set1_epi32(cospi[55]);
+ __m128i cospi_p09 = _mm_set1_epi32(cospi[9]);
+ __m128i cospi_p23 = _mm_set1_epi32(cospi[23]);
+ __m128i cospi_p41 = _mm_set1_epi32(cospi[41]);
+ __m128i cospi_p39 = _mm_set1_epi32(cospi[39]);
+ __m128i cospi_p25 = _mm_set1_epi32(cospi[25]);
+ __m128i cospi_p07 = _mm_set1_epi32(cospi[7]);
+ __m128i cospi_p57 = _mm_set1_epi32(cospi[57]);
+ __m128i cospi_p59 = _mm_set1_epi32(cospi[59]);
+ __m128i cospi_p05 = _mm_set1_epi32(cospi[5]);
+ __m128i cospi_p27 = _mm_set1_epi32(cospi[27]);
+ __m128i cospi_p37 = _mm_set1_epi32(cospi[37]);
+ __m128i cospi_p43 = _mm_set1_epi32(cospi[43]);
+ __m128i cospi_p21 = _mm_set1_epi32(cospi[21]);
+ __m128i cospi_p11 = _mm_set1_epi32(cospi[11]);
+ __m128i cospi_p53 = _mm_set1_epi32(cospi[53]);
+ __m128i cospi_p51 = _mm_set1_epi32(cospi[51]);
+ __m128i cospi_p13 = _mm_set1_epi32(cospi[13]);
+ __m128i cospi_p19 = _mm_set1_epi32(cospi[19]);
+ __m128i cospi_p45 = _mm_set1_epi32(cospi[45]);
+ __m128i cospi_p35 = _mm_set1_epi32(cospi[35]);
+ __m128i cospi_p29 = _mm_set1_epi32(cospi[29]);
+ __m128i cospi_p03 = _mm_set1_epi32(cospi[3]);
+ __m128i cospi_p61 = _mm_set1_epi32(cospi[61]);
+
+ int startidx = 0 * instride;
+ int endidx = 63 * instride;
+ // stage 1
+ __m128i x1[64];
+ x1[0] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[63] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[1] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[62] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[2] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[61] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[3] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[60] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[4] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[59] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[5] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[58] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[6] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[57] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[7] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[56] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[8] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[55] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[9] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[54] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[10] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[53] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[11] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[52] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[12] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[51] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[13] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[50] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[14] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[49] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[15] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[48] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[16] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[47] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[17] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[46] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[18] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[45] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[19] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[44] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[20] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[43] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[21] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[42] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[22] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[41] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[23] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[40] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[24] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[39] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[25] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[38] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[26] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[37] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[27] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[36] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[28] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[35] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[29] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[34] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[30] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[33] = _mm_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[31] = _mm_add_epi32(input[startidx], input[endidx]);
+ x1[32] = _mm_sub_epi32(input[startidx], input[endidx]);
+
+ // stage 2
+ __m128i x2[64];
+ x2[0] = _mm_add_epi32(x1[0], x1[31]);
+ x2[31] = _mm_sub_epi32(x1[0], x1[31]);
+ x2[1] = _mm_add_epi32(x1[1], x1[30]);
+ x2[30] = _mm_sub_epi32(x1[1], x1[30]);
+ x2[2] = _mm_add_epi32(x1[2], x1[29]);
+ x2[29] = _mm_sub_epi32(x1[2], x1[29]);
+ x2[3] = _mm_add_epi32(x1[3], x1[28]);
+ x2[28] = _mm_sub_epi32(x1[3], x1[28]);
+ x2[4] = _mm_add_epi32(x1[4], x1[27]);
+ x2[27] = _mm_sub_epi32(x1[4], x1[27]);
+ x2[5] = _mm_add_epi32(x1[5], x1[26]);
+ x2[26] = _mm_sub_epi32(x1[5], x1[26]);
+ x2[6] = _mm_add_epi32(x1[6], x1[25]);
+ x2[25] = _mm_sub_epi32(x1[6], x1[25]);
+ x2[7] = _mm_add_epi32(x1[7], x1[24]);
+ x2[24] = _mm_sub_epi32(x1[7], x1[24]);
+ x2[8] = _mm_add_epi32(x1[8], x1[23]);
+ x2[23] = _mm_sub_epi32(x1[8], x1[23]);
+ x2[9] = _mm_add_epi32(x1[9], x1[22]);
+ x2[22] = _mm_sub_epi32(x1[9], x1[22]);
+ x2[10] = _mm_add_epi32(x1[10], x1[21]);
+ x2[21] = _mm_sub_epi32(x1[10], x1[21]);
+ x2[11] = _mm_add_epi32(x1[11], x1[20]);
+ x2[20] = _mm_sub_epi32(x1[11], x1[20]);
+ x2[12] = _mm_add_epi32(x1[12], x1[19]);
+ x2[19] = _mm_sub_epi32(x1[12], x1[19]);
+ x2[13] = _mm_add_epi32(x1[13], x1[18]);
+ x2[18] = _mm_sub_epi32(x1[13], x1[18]);
+ x2[14] = _mm_add_epi32(x1[14], x1[17]);
+ x2[17] = _mm_sub_epi32(x1[14], x1[17]);
+ x2[15] = _mm_add_epi32(x1[15], x1[16]);
+ x2[16] = _mm_sub_epi32(x1[15], x1[16]);
+ x2[32] = x1[32];
+ x2[33] = x1[33];
+ x2[34] = x1[34];
+ x2[35] = x1[35];
+ x2[36] = x1[36];
+ x2[37] = x1[37];
+ x2[38] = x1[38];
+ x2[39] = x1[39];
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x1[40], x1[55], x2[40], x2[55],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x1[41], x1[54], x2[41], x2[54],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x1[42], x1[53], x2[42], x2[53],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x1[43], x1[52], x2[43], x2[52],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x1[44], x1[51], x2[44], x2[51],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x1[45], x1[50], x2[45], x2[50],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x1[46], x1[49], x2[46], x2[49],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x1[47], x1[48], x2[47], x2[48],
+ __rounding, cos_bit);
+ x2[56] = x1[56];
+ x2[57] = x1[57];
+ x2[58] = x1[58];
+ x2[59] = x1[59];
+ x2[60] = x1[60];
+ x2[61] = x1[61];
+ x2[62] = x1[62];
+ x2[63] = x1[63];
+
+ // stage 3
+ __m128i x3[64];
+ x3[0] = _mm_add_epi32(x2[0], x2[15]);
+ x3[15] = _mm_sub_epi32(x2[0], x2[15]);
+ x3[1] = _mm_add_epi32(x2[1], x2[14]);
+ x3[14] = _mm_sub_epi32(x2[1], x2[14]);
+ x3[2] = _mm_add_epi32(x2[2], x2[13]);
+ x3[13] = _mm_sub_epi32(x2[2], x2[13]);
+ x3[3] = _mm_add_epi32(x2[3], x2[12]);
+ x3[12] = _mm_sub_epi32(x2[3], x2[12]);
+ x3[4] = _mm_add_epi32(x2[4], x2[11]);
+ x3[11] = _mm_sub_epi32(x2[4], x2[11]);
+ x3[5] = _mm_add_epi32(x2[5], x2[10]);
+ x3[10] = _mm_sub_epi32(x2[5], x2[10]);
+ x3[6] = _mm_add_epi32(x2[6], x2[9]);
+ x3[9] = _mm_sub_epi32(x2[6], x2[9]);
+ x3[7] = _mm_add_epi32(x2[7], x2[8]);
+ x3[8] = _mm_sub_epi32(x2[7], x2[8]);
+ x3[16] = x2[16];
+ x3[17] = x2[17];
+ x3[18] = x2[18];
+ x3[19] = x2[19];
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x2[20], x2[27], x3[20], x3[27],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x2[21], x2[26], x3[21], x3[26],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x2[22], x2[25], x3[22], x3[25],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x2[23], x2[24], x3[23], x3[24],
+ __rounding, cos_bit);
+ x3[28] = x2[28];
+ x3[29] = x2[29];
+ x3[30] = x2[30];
+ x3[31] = x2[31];
+ x3[32] = _mm_add_epi32(x2[32], x2[47]);
+ x3[47] = _mm_sub_epi32(x2[32], x2[47]);
+ x3[33] = _mm_add_epi32(x2[33], x2[46]);
+ x3[46] = _mm_sub_epi32(x2[33], x2[46]);
+ x3[34] = _mm_add_epi32(x2[34], x2[45]);
+ x3[45] = _mm_sub_epi32(x2[34], x2[45]);
+ x3[35] = _mm_add_epi32(x2[35], x2[44]);
+ x3[44] = _mm_sub_epi32(x2[35], x2[44]);
+ x3[36] = _mm_add_epi32(x2[36], x2[43]);
+ x3[43] = _mm_sub_epi32(x2[36], x2[43]);
+ x3[37] = _mm_add_epi32(x2[37], x2[42]);
+ x3[42] = _mm_sub_epi32(x2[37], x2[42]);
+ x3[38] = _mm_add_epi32(x2[38], x2[41]);
+ x3[41] = _mm_sub_epi32(x2[38], x2[41]);
+ x3[39] = _mm_add_epi32(x2[39], x2[40]);
+ x3[40] = _mm_sub_epi32(x2[39], x2[40]);
+ x3[48] = _mm_sub_epi32(x2[63], x2[48]);
+ x3[63] = _mm_add_epi32(x2[63], x2[48]);
+ x3[49] = _mm_sub_epi32(x2[62], x2[49]);
+ x3[62] = _mm_add_epi32(x2[62], x2[49]);
+ x3[50] = _mm_sub_epi32(x2[61], x2[50]);
+ x3[61] = _mm_add_epi32(x2[61], x2[50]);
+ x3[51] = _mm_sub_epi32(x2[60], x2[51]);
+ x3[60] = _mm_add_epi32(x2[60], x2[51]);
+ x3[52] = _mm_sub_epi32(x2[59], x2[52]);
+ x3[59] = _mm_add_epi32(x2[59], x2[52]);
+ x3[53] = _mm_sub_epi32(x2[58], x2[53]);
+ x3[58] = _mm_add_epi32(x2[58], x2[53]);
+ x3[54] = _mm_sub_epi32(x2[57], x2[54]);
+ x3[57] = _mm_add_epi32(x2[57], x2[54]);
+ x3[55] = _mm_sub_epi32(x2[56], x2[55]);
+ x3[56] = _mm_add_epi32(x2[56], x2[55]);
+
+ // stage 4
+ __m128i x4[64];
+ x4[0] = _mm_add_epi32(x3[0], x3[7]);
+ x4[7] = _mm_sub_epi32(x3[0], x3[7]);
+ x4[1] = _mm_add_epi32(x3[1], x3[6]);
+ x4[6] = _mm_sub_epi32(x3[1], x3[6]);
+ x4[2] = _mm_add_epi32(x3[2], x3[5]);
+ x4[5] = _mm_sub_epi32(x3[2], x3[5]);
+ x4[3] = _mm_add_epi32(x3[3], x3[4]);
+ x4[4] = _mm_sub_epi32(x3[3], x3[4]);
+ x4[8] = x3[8];
+ x4[9] = x3[9];
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x3[10], x3[13], x4[10], x4[13],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x3[11], x3[12], x4[11], x4[12],
+ __rounding, cos_bit);
+ x4[14] = x3[14];
+ x4[15] = x3[15];
+ x4[16] = _mm_add_epi32(x3[16], x3[23]);
+ x4[23] = _mm_sub_epi32(x3[16], x3[23]);
+ x4[17] = _mm_add_epi32(x3[17], x3[22]);
+ x4[22] = _mm_sub_epi32(x3[17], x3[22]);
+ x4[18] = _mm_add_epi32(x3[18], x3[21]);
+ x4[21] = _mm_sub_epi32(x3[18], x3[21]);
+ x4[19] = _mm_add_epi32(x3[19], x3[20]);
+ x4[20] = _mm_sub_epi32(x3[19], x3[20]);
+ x4[24] = _mm_sub_epi32(x3[31], x3[24]);
+ x4[31] = _mm_add_epi32(x3[31], x3[24]);
+ x4[25] = _mm_sub_epi32(x3[30], x3[25]);
+ x4[30] = _mm_add_epi32(x3[30], x3[25]);
+ x4[26] = _mm_sub_epi32(x3[29], x3[26]);
+ x4[29] = _mm_add_epi32(x3[29], x3[26]);
+ x4[27] = _mm_sub_epi32(x3[28], x3[27]);
+ x4[28] = _mm_add_epi32(x3[28], x3[27]);
+ x4[32] = x3[32];
+ x4[33] = x3[33];
+ x4[34] = x3[34];
+ x4[35] = x3[35];
+ btf_32_type0_sse4_1_new(cospi_m16, cospi_p48, x3[36], x3[59], x4[36], x4[59],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m16, cospi_p48, x3[37], x3[58], x4[37], x4[58],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m16, cospi_p48, x3[38], x3[57], x4[38], x4[57],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m16, cospi_p48, x3[39], x3[56], x4[39], x4[56],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m48, cospi_m16, x3[40], x3[55], x4[40], x4[55],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m48, cospi_m16, x3[41], x3[54], x4[41], x4[54],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m48, cospi_m16, x3[42], x3[53], x4[42], x4[53],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m48, cospi_m16, x3[43], x3[52], x4[43], x4[52],
+ __rounding, cos_bit);
+ x4[44] = x3[44];
+ x4[45] = x3[45];
+ x4[46] = x3[46];
+ x4[47] = x3[47];
+ x4[48] = x3[48];
+ x4[49] = x3[49];
+ x4[50] = x3[50];
+ x4[51] = x3[51];
+ x4[60] = x3[60];
+ x4[61] = x3[61];
+ x4[62] = x3[62];
+ x4[63] = x3[63];
+
+ // stage 5
+ __m128i x5[64];
+ x5[0] = _mm_add_epi32(x4[0], x4[3]);
+ x5[3] = _mm_sub_epi32(x4[0], x4[3]);
+ x5[1] = _mm_add_epi32(x4[1], x4[2]);
+ x5[2] = _mm_sub_epi32(x4[1], x4[2]);
+ x5[4] = x4[4];
+ btf_32_type0_sse4_1_new(cospi_m32, cospi_p32, x4[5], x4[6], x5[5], x5[6],
+ __rounding, cos_bit);
+ x5[7] = x4[7];
+ x5[8] = _mm_add_epi32(x4[8], x4[11]);
+ x5[11] = _mm_sub_epi32(x4[8], x4[11]);
+ x5[9] = _mm_add_epi32(x4[9], x4[10]);
+ x5[10] = _mm_sub_epi32(x4[9], x4[10]);
+ x5[12] = _mm_sub_epi32(x4[15], x4[12]);
+ x5[15] = _mm_add_epi32(x4[15], x4[12]);
+ x5[13] = _mm_sub_epi32(x4[14], x4[13]);
+ x5[14] = _mm_add_epi32(x4[14], x4[13]);
+ x5[16] = x4[16];
+ x5[17] = x4[17];
+ btf_32_type0_sse4_1_new(cospi_m16, cospi_p48, x4[18], x4[29], x5[18], x5[29],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m16, cospi_p48, x4[19], x4[28], x5[19], x5[28],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m48, cospi_m16, x4[20], x4[27], x5[20], x5[27],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m48, cospi_m16, x4[21], x4[26], x5[21], x5[26],
+ __rounding, cos_bit);
+ x5[22] = x4[22];
+ x5[23] = x4[23];
+ x5[24] = x4[24];
+ x5[25] = x4[25];
+ x5[30] = x4[30];
+ x5[31] = x4[31];
+ x5[32] = _mm_add_epi32(x4[32], x4[39]);
+ x5[39] = _mm_sub_epi32(x4[32], x4[39]);
+ x5[33] = _mm_add_epi32(x4[33], x4[38]);
+ x5[38] = _mm_sub_epi32(x4[33], x4[38]);
+ x5[34] = _mm_add_epi32(x4[34], x4[37]);
+ x5[37] = _mm_sub_epi32(x4[34], x4[37]);
+ x5[35] = _mm_add_epi32(x4[35], x4[36]);
+ x5[36] = _mm_sub_epi32(x4[35], x4[36]);
+ x5[40] = _mm_sub_epi32(x4[47], x4[40]);
+ x5[47] = _mm_add_epi32(x4[47], x4[40]);
+ x5[41] = _mm_sub_epi32(x4[46], x4[41]);
+ x5[46] = _mm_add_epi32(x4[46], x4[41]);
+ x5[42] = _mm_sub_epi32(x4[45], x4[42]);
+ x5[45] = _mm_add_epi32(x4[45], x4[42]);
+ x5[43] = _mm_sub_epi32(x4[44], x4[43]);
+ x5[44] = _mm_add_epi32(x4[44], x4[43]);
+ x5[48] = _mm_add_epi32(x4[48], x4[55]);
+ x5[55] = _mm_sub_epi32(x4[48], x4[55]);
+ x5[49] = _mm_add_epi32(x4[49], x4[54]);
+ x5[54] = _mm_sub_epi32(x4[49], x4[54]);
+ x5[50] = _mm_add_epi32(x4[50], x4[53]);
+ x5[53] = _mm_sub_epi32(x4[50], x4[53]);
+ x5[51] = _mm_add_epi32(x4[51], x4[52]);
+ x5[52] = _mm_sub_epi32(x4[51], x4[52]);
+ x5[56] = _mm_sub_epi32(x4[63], x4[56]);
+ x5[63] = _mm_add_epi32(x4[63], x4[56]);
+ x5[57] = _mm_sub_epi32(x4[62], x4[57]);
+ x5[62] = _mm_add_epi32(x4[62], x4[57]);
+ x5[58] = _mm_sub_epi32(x4[61], x4[58]);
+ x5[61] = _mm_add_epi32(x4[61], x4[58]);
+ x5[59] = _mm_sub_epi32(x4[60], x4[59]);
+ x5[60] = _mm_add_epi32(x4[60], x4[59]);
+
+ // stage 6
+ __m128i x6[64];
+ btf_32_type0_sse4_1_new(cospi_p32, cospi_p32, x5[0], x5[1], x6[0], x6[1],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p48, cospi_p16, x5[2], x5[3], x6[2], x6[3],
+ __rounding, cos_bit);
+ x6[4] = _mm_add_epi32(x5[4], x5[5]);
+ x6[5] = _mm_sub_epi32(x5[4], x5[5]);
+ x6[6] = _mm_sub_epi32(x5[7], x5[6]);
+ x6[7] = _mm_add_epi32(x5[7], x5[6]);
+ x6[8] = x5[8];
+ btf_32_type0_sse4_1_new(cospi_m16, cospi_p48, x5[9], x5[14], x6[9], x6[14],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m48, cospi_m16, x5[10], x5[13], x6[10], x6[13],
+ __rounding, cos_bit);
+ x6[11] = x5[11];
+ x6[12] = x5[12];
+ x6[15] = x5[15];
+ x6[16] = _mm_add_epi32(x5[16], x5[19]);
+ x6[19] = _mm_sub_epi32(x5[16], x5[19]);
+ x6[17] = _mm_add_epi32(x5[17], x5[18]);
+ x6[18] = _mm_sub_epi32(x5[17], x5[18]);
+ x6[20] = _mm_sub_epi32(x5[23], x5[20]);
+ x6[23] = _mm_add_epi32(x5[23], x5[20]);
+ x6[21] = _mm_sub_epi32(x5[22], x5[21]);
+ x6[22] = _mm_add_epi32(x5[22], x5[21]);
+ x6[24] = _mm_add_epi32(x5[24], x5[27]);
+ x6[27] = _mm_sub_epi32(x5[24], x5[27]);
+ x6[25] = _mm_add_epi32(x5[25], x5[26]);
+ x6[26] = _mm_sub_epi32(x5[25], x5[26]);
+ x6[28] = _mm_sub_epi32(x5[31], x5[28]);
+ x6[31] = _mm_add_epi32(x5[31], x5[28]);
+ x6[29] = _mm_sub_epi32(x5[30], x5[29]);
+ x6[30] = _mm_add_epi32(x5[30], x5[29]);
+ x6[32] = x5[32];
+ x6[33] = x5[33];
+ btf_32_type0_sse4_1_new(cospi_m08, cospi_p56, x5[34], x5[61], x6[34], x6[61],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m08, cospi_p56, x5[35], x5[60], x6[35], x6[60],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m56, cospi_m08, x5[36], x5[59], x6[36], x6[59],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m56, cospi_m08, x5[37], x5[58], x6[37], x6[58],
+ __rounding, cos_bit);
+ x6[38] = x5[38];
+ x6[39] = x5[39];
+ x6[40] = x5[40];
+ x6[41] = x5[41];
+ btf_32_type0_sse4_1_new(cospi_m40, cospi_p24, x5[42], x5[53], x6[42], x6[53],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m40, cospi_p24, x5[43], x5[52], x6[43], x6[52],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m24, cospi_m40, x5[44], x5[51], x6[44], x6[51],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m24, cospi_m40, x5[45], x5[50], x6[45], x6[50],
+ __rounding, cos_bit);
+ x6[46] = x5[46];
+ x6[47] = x5[47];
+ x6[48] = x5[48];
+ x6[49] = x5[49];
+ x6[54] = x5[54];
+ x6[55] = x5[55];
+ x6[56] = x5[56];
+ x6[57] = x5[57];
+ x6[62] = x5[62];
+ x6[63] = x5[63];
+
+ // stage 7
+ __m128i x7[64];
+ x7[0] = x6[0];
+ x7[1] = x6[1];
+ x7[2] = x6[2];
+ x7[3] = x6[3];
+ btf_32_type1_sse4_1_new(cospi_p56, cospi_p08, x6[4], x6[7], x7[4], x7[7],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p24, cospi_p40, x6[5], x6[6], x7[5], x7[6],
+ __rounding, cos_bit);
+ x7[8] = _mm_add_epi32(x6[8], x6[9]);
+ x7[9] = _mm_sub_epi32(x6[8], x6[9]);
+ x7[10] = _mm_sub_epi32(x6[11], x6[10]);
+ x7[11] = _mm_add_epi32(x6[11], x6[10]);
+ x7[12] = _mm_add_epi32(x6[12], x6[13]);
+ x7[13] = _mm_sub_epi32(x6[12], x6[13]);
+ x7[14] = _mm_sub_epi32(x6[15], x6[14]);
+ x7[15] = _mm_add_epi32(x6[15], x6[14]);
+ x7[16] = x6[16];
+ btf_32_type0_sse4_1_new(cospi_m08, cospi_p56, x6[17], x6[30], x7[17], x7[30],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m56, cospi_m08, x6[18], x6[29], x7[18], x7[29],
+ __rounding, cos_bit);
+ x7[19] = x6[19];
+ x7[20] = x6[20];
+ btf_32_type0_sse4_1_new(cospi_m40, cospi_p24, x6[21], x6[26], x7[21], x7[26],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m24, cospi_m40, x6[22], x6[25], x7[22], x7[25],
+ __rounding, cos_bit);
+ x7[23] = x6[23];
+ x7[24] = x6[24];
+ x7[27] = x6[27];
+ x7[28] = x6[28];
+ x7[31] = x6[31];
+ x7[32] = _mm_add_epi32(x6[32], x6[35]);
+ x7[35] = _mm_sub_epi32(x6[32], x6[35]);
+ x7[33] = _mm_add_epi32(x6[33], x6[34]);
+ x7[34] = _mm_sub_epi32(x6[33], x6[34]);
+ x7[36] = _mm_sub_epi32(x6[39], x6[36]);
+ x7[39] = _mm_add_epi32(x6[39], x6[36]);
+ x7[37] = _mm_sub_epi32(x6[38], x6[37]);
+ x7[38] = _mm_add_epi32(x6[38], x6[37]);
+ x7[40] = _mm_add_epi32(x6[40], x6[43]);
+ x7[43] = _mm_sub_epi32(x6[40], x6[43]);
+ x7[41] = _mm_add_epi32(x6[41], x6[42]);
+ x7[42] = _mm_sub_epi32(x6[41], x6[42]);
+ x7[44] = _mm_sub_epi32(x6[47], x6[44]);
+ x7[47] = _mm_add_epi32(x6[47], x6[44]);
+ x7[45] = _mm_sub_epi32(x6[46], x6[45]);
+ x7[46] = _mm_add_epi32(x6[46], x6[45]);
+ x7[48] = _mm_add_epi32(x6[48], x6[51]);
+ x7[51] = _mm_sub_epi32(x6[48], x6[51]);
+ x7[49] = _mm_add_epi32(x6[49], x6[50]);
+ x7[50] = _mm_sub_epi32(x6[49], x6[50]);
+ x7[52] = _mm_sub_epi32(x6[55], x6[52]);
+ x7[55] = _mm_add_epi32(x6[55], x6[52]);
+ x7[53] = _mm_sub_epi32(x6[54], x6[53]);
+ x7[54] = _mm_add_epi32(x6[54], x6[53]);
+ x7[56] = _mm_add_epi32(x6[56], x6[59]);
+ x7[59] = _mm_sub_epi32(x6[56], x6[59]);
+ x7[57] = _mm_add_epi32(x6[57], x6[58]);
+ x7[58] = _mm_sub_epi32(x6[57], x6[58]);
+ x7[60] = _mm_sub_epi32(x6[63], x6[60]);
+ x7[63] = _mm_add_epi32(x6[63], x6[60]);
+ x7[61] = _mm_sub_epi32(x6[62], x6[61]);
+ x7[62] = _mm_add_epi32(x6[62], x6[61]);
+
+ // stage 8
+ __m128i x8[64];
+ x8[0] = x7[0];
+ x8[1] = x7[1];
+ x8[2] = x7[2];
+ x8[3] = x7[3];
+ x8[4] = x7[4];
+ x8[5] = x7[5];
+ x8[6] = x7[6];
+ x8[7] = x7[7];
+ btf_32_type1_sse4_1_new(cospi_p60, cospi_p04, x7[8], x7[15], x8[8], x8[15],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p28, cospi_p36, x7[9], x7[14], x8[9], x8[14],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p44, cospi_p20, x7[10], x7[13], x8[10], x8[13],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p12, cospi_p52, x7[11], x7[12], x8[11], x8[12],
+ __rounding, cos_bit);
+ x8[16] = _mm_add_epi32(x7[16], x7[17]);
+ x8[17] = _mm_sub_epi32(x7[16], x7[17]);
+ x8[18] = _mm_sub_epi32(x7[19], x7[18]);
+ x8[19] = _mm_add_epi32(x7[19], x7[18]);
+ x8[20] = _mm_add_epi32(x7[20], x7[21]);
+ x8[21] = _mm_sub_epi32(x7[20], x7[21]);
+ x8[22] = _mm_sub_epi32(x7[23], x7[22]);
+ x8[23] = _mm_add_epi32(x7[23], x7[22]);
+ x8[24] = _mm_add_epi32(x7[24], x7[25]);
+ x8[25] = _mm_sub_epi32(x7[24], x7[25]);
+ x8[26] = _mm_sub_epi32(x7[27], x7[26]);
+ x8[27] = _mm_add_epi32(x7[27], x7[26]);
+ x8[28] = _mm_add_epi32(x7[28], x7[29]);
+ x8[29] = _mm_sub_epi32(x7[28], x7[29]);
+ x8[30] = _mm_sub_epi32(x7[31], x7[30]);
+ x8[31] = _mm_add_epi32(x7[31], x7[30]);
+ x8[32] = x7[32];
+ btf_32_type0_sse4_1_new(cospi_m04, cospi_p60, x7[33], x7[62], x8[33], x8[62],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m60, cospi_m04, x7[34], x7[61], x8[34], x8[61],
+ __rounding, cos_bit);
+ x8[35] = x7[35];
+ x8[36] = x7[36];
+ btf_32_type0_sse4_1_new(cospi_m36, cospi_p28, x7[37], x7[58], x8[37], x8[58],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m28, cospi_m36, x7[38], x7[57], x8[38], x8[57],
+ __rounding, cos_bit);
+ x8[39] = x7[39];
+ x8[40] = x7[40];
+ btf_32_type0_sse4_1_new(cospi_m20, cospi_p44, x7[41], x7[54], x8[41], x8[54],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m44, cospi_m20, x7[42], x7[53], x8[42], x8[53],
+ __rounding, cos_bit);
+ x8[43] = x7[43];
+ x8[44] = x7[44];
+ btf_32_type0_sse4_1_new(cospi_m52, cospi_p12, x7[45], x7[50], x8[45], x8[50],
+ __rounding, cos_bit);
+ btf_32_type0_sse4_1_new(cospi_m12, cospi_m52, x7[46], x7[49], x8[46], x8[49],
+ __rounding, cos_bit);
+ x8[47] = x7[47];
+ x8[48] = x7[48];
+ x8[51] = x7[51];
+ x8[52] = x7[52];
+ x8[55] = x7[55];
+ x8[56] = x7[56];
+ x8[59] = x7[59];
+ x8[60] = x7[60];
+ x8[63] = x7[63];
+
+ // stage 9
+ __m128i x9[64];
+ x9[0] = x8[0];
+ x9[1] = x8[1];
+ x9[2] = x8[2];
+ x9[3] = x8[3];
+ x9[4] = x8[4];
+ x9[5] = x8[5];
+ x9[6] = x8[6];
+ x9[7] = x8[7];
+ x9[8] = x8[8];
+ x9[9] = x8[9];
+ x9[10] = x8[10];
+ x9[11] = x8[11];
+ x9[12] = x8[12];
+ x9[13] = x8[13];
+ x9[14] = x8[14];
+ x9[15] = x8[15];
+ btf_32_type1_sse4_1_new(cospi_p62, cospi_p02, x8[16], x8[31], x9[16], x9[31],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p30, cospi_p34, x8[17], x8[30], x9[17], x9[30],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p46, cospi_p18, x8[18], x8[29], x9[18], x9[29],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p14, cospi_p50, x8[19], x8[28], x9[19], x9[28],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p54, cospi_p10, x8[20], x8[27], x9[20], x9[27],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p22, cospi_p42, x8[21], x8[26], x9[21], x9[26],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p38, cospi_p26, x8[22], x8[25], x9[22], x9[25],
+ __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p06, cospi_p58, x8[23], x8[24], x9[23], x9[24],
+ __rounding, cos_bit);
+ x9[32] = _mm_add_epi32(x8[32], x8[33]);
+ x9[33] = _mm_sub_epi32(x8[32], x8[33]);
+ x9[34] = _mm_sub_epi32(x8[35], x8[34]);
+ x9[35] = _mm_add_epi32(x8[35], x8[34]);
+ x9[36] = _mm_add_epi32(x8[36], x8[37]);
+ x9[37] = _mm_sub_epi32(x8[36], x8[37]);
+ x9[38] = _mm_sub_epi32(x8[39], x8[38]);
+ x9[39] = _mm_add_epi32(x8[39], x8[38]);
+ x9[40] = _mm_add_epi32(x8[40], x8[41]);
+ x9[41] = _mm_sub_epi32(x8[40], x8[41]);
+ x9[42] = _mm_sub_epi32(x8[43], x8[42]);
+ x9[43] = _mm_add_epi32(x8[43], x8[42]);
+ x9[44] = _mm_add_epi32(x8[44], x8[45]);
+ x9[45] = _mm_sub_epi32(x8[44], x8[45]);
+ x9[46] = _mm_sub_epi32(x8[47], x8[46]);
+ x9[47] = _mm_add_epi32(x8[47], x8[46]);
+ x9[48] = _mm_add_epi32(x8[48], x8[49]);
+ x9[49] = _mm_sub_epi32(x8[48], x8[49]);
+ x9[50] = _mm_sub_epi32(x8[51], x8[50]);
+ x9[51] = _mm_add_epi32(x8[51], x8[50]);
+ x9[52] = _mm_add_epi32(x8[52], x8[53]);
+ x9[53] = _mm_sub_epi32(x8[52], x8[53]);
+ x9[54] = _mm_sub_epi32(x8[55], x8[54]);
+ x9[55] = _mm_add_epi32(x8[55], x8[54]);
+ x9[56] = _mm_add_epi32(x8[56], x8[57]);
+ x9[57] = _mm_sub_epi32(x8[56], x8[57]);
+ x9[58] = _mm_sub_epi32(x8[59], x8[58]);
+ x9[59] = _mm_add_epi32(x8[59], x8[58]);
+ x9[60] = _mm_add_epi32(x8[60], x8[61]);
+ x9[61] = _mm_sub_epi32(x8[60], x8[61]);
+ x9[62] = _mm_sub_epi32(x8[63], x8[62]);
+ x9[63] = _mm_add_epi32(x8[63], x8[62]);
+
+ // stage 10
+ __m128i x10[64];
+ x10[0] = x9[0];
+ x10[1] = x9[1];
+ x10[2] = x9[2];
+ x10[3] = x9[3];
+ x10[4] = x9[4];
+ x10[5] = x9[5];
+ x10[6] = x9[6];
+ x10[7] = x9[7];
+ x10[8] = x9[8];
+ x10[9] = x9[9];
+ x10[10] = x9[10];
+ x10[11] = x9[11];
+ x10[12] = x9[12];
+ x10[13] = x9[13];
+ x10[14] = x9[14];
+ x10[15] = x9[15];
+ x10[16] = x9[16];
+ x10[17] = x9[17];
+ x10[18] = x9[18];
+ x10[19] = x9[19];
+ x10[20] = x9[20];
+ x10[21] = x9[21];
+ x10[22] = x9[22];
+ x10[23] = x9[23];
+ x10[24] = x9[24];
+ x10[25] = x9[25];
+ x10[26] = x9[26];
+ x10[27] = x9[27];
+ x10[28] = x9[28];
+ x10[29] = x9[29];
+ x10[30] = x9[30];
+ x10[31] = x9[31];
+ btf_32_type1_sse4_1_new(cospi_p63, cospi_p01, x9[32], x9[63], x10[32],
+ x10[63], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p31, cospi_p33, x9[33], x9[62], x10[33],
+ x10[62], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p47, cospi_p17, x9[34], x9[61], x10[34],
+ x10[61], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p15, cospi_p49, x9[35], x9[60], x10[35],
+ x10[60], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p55, cospi_p09, x9[36], x9[59], x10[36],
+ x10[59], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p23, cospi_p41, x9[37], x9[58], x10[37],
+ x10[58], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p39, cospi_p25, x9[38], x9[57], x10[38],
+ x10[57], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p07, cospi_p57, x9[39], x9[56], x10[39],
+ x10[56], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p59, cospi_p05, x9[40], x9[55], x10[40],
+ x10[55], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p27, cospi_p37, x9[41], x9[54], x10[41],
+ x10[54], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p43, cospi_p21, x9[42], x9[53], x10[42],
+ x10[53], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p11, cospi_p53, x9[43], x9[52], x10[43],
+ x10[52], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p51, cospi_p13, x9[44], x9[51], x10[44],
+ x10[51], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p19, cospi_p45, x9[45], x9[50], x10[45],
+ x10[50], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p35, cospi_p29, x9[46], x9[49], x10[46],
+ x10[49], __rounding, cos_bit);
+ btf_32_type1_sse4_1_new(cospi_p03, cospi_p61, x9[47], x9[48], x10[47],
+ x10[48], __rounding, cos_bit);
+
+ startidx = 0 * outstride;
+ endidx = 63 * outstride;
+ // stage 11
+ output[startidx] = x10[0];
+ output[endidx] = x10[63];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[32];
+ output[endidx] = x10[31];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[16];
+ output[endidx] = x10[47];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[48];
+ output[endidx] = x10[15];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[8];
+ output[endidx] = x10[55];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[40];
+ output[endidx] = x10[23];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[24];
+ output[endidx] = x10[39];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[56];
+ output[endidx] = x10[7];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[4];
+ output[endidx] = x10[59];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[36];
+ output[endidx] = x10[27];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[20];
+ output[endidx] = x10[43];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[52];
+ output[endidx] = x10[11];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[12];
+ output[endidx] = x10[51];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[44];
+ output[endidx] = x10[19];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[28];
+ output[endidx] = x10[35];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[60];
+ output[endidx] = x10[3];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[2];
+ output[endidx] = x10[61];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[34];
+ output[endidx] = x10[29];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[18];
+ output[endidx] = x10[45];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[50];
+ output[endidx] = x10[13];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[10];
+ output[endidx] = x10[53];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[42];
+ output[endidx] = x10[21];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[26];
+ output[endidx] = x10[37];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[58];
+ output[endidx] = x10[5];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[6];
+ output[endidx] = x10[57];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[38];
+ output[endidx] = x10[25];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[22];
+ output[endidx] = x10[41];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[54];
+ output[endidx] = x10[9];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[14];
+ output[endidx] = x10[49];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[46];
+ output[endidx] = x10[17];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[30];
+ output[endidx] = x10[33];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x10[62];
+ output[endidx] = x10[1];
+}
+
+void av1_idtx32_sse4_1(__m128i *input, __m128i *output, int cos_bit,
+ const int col_num) {
+ (void)cos_bit;
+ for (int i = 0; i < 32; i++) {
+ output[i * col_num] = _mm_slli_epi32(input[i * col_num], 2);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm2d_avx2.c b/third_party/aom/av1/encoder/x86/av1_fwd_txfm2d_avx2.c
new file mode 100644
index 0000000000..b143df3523
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm2d_avx2.c
@@ -0,0 +1,3010 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "config/av1_rtcd.h"
+
+#include "av1/common/enums.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/encoder/x86/av1_fwd_txfm_avx2.h"
+#include "av1/common/x86/av1_txfm_sse2.h"
+#include "av1/encoder/av1_fwd_txfm1d_cfg.h"
+#include "av1/encoder/x86/av1_txfm1d_sse4.h"
+#include "av1/encoder/x86/av1_fwd_txfm_sse2.h"
+#include "aom_dsp/x86/txfm_common_avx2.h"
+
+static INLINE void fdct16x16_new_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i _r = _mm256_set1_epi32(1 << (cos_bit - 1));
+
+ __m256i cospi_m32_p32 = pair_set_w16_epi16(-cospi[32], cospi[32]);
+ __m256i cospi_p32_p32 = pair_set_w16_epi16(cospi[32], cospi[32]);
+ __m256i cospi_p32_m32 = pair_set_w16_epi16(cospi[32], -cospi[32]);
+ __m256i cospi_p48_p16 = pair_set_w16_epi16(cospi[48], cospi[16]);
+ __m256i cospi_m16_p48 = pair_set_w16_epi16(-cospi[16], cospi[48]);
+ __m256i cospi_m48_m16 = pair_set_w16_epi16(-cospi[48], -cospi[16]);
+ __m256i cospi_p56_p08 = pair_set_w16_epi16(cospi[56], cospi[8]);
+ __m256i cospi_m08_p56 = pair_set_w16_epi16(-cospi[8], cospi[56]);
+ __m256i cospi_p24_p40 = pair_set_w16_epi16(cospi[24], cospi[40]);
+ __m256i cospi_m40_p24 = pair_set_w16_epi16(-cospi[40], cospi[24]);
+ __m256i cospi_p60_p04 = pair_set_w16_epi16(cospi[60], cospi[4]);
+ __m256i cospi_m04_p60 = pair_set_w16_epi16(-cospi[4], cospi[60]);
+ __m256i cospi_p28_p36 = pair_set_w16_epi16(cospi[28], cospi[36]);
+ __m256i cospi_m36_p28 = pair_set_w16_epi16(-cospi[36], cospi[28]);
+ __m256i cospi_p44_p20 = pair_set_w16_epi16(cospi[44], cospi[20]);
+ __m256i cospi_m20_p44 = pair_set_w16_epi16(-cospi[20], cospi[44]);
+ __m256i cospi_p12_p52 = pair_set_w16_epi16(cospi[12], cospi[52]);
+ __m256i cospi_m52_p12 = pair_set_w16_epi16(-cospi[52], cospi[12]);
+
+ // stage 1
+ __m256i x1[16];
+ btf_16_adds_subs_out_avx2(&x1[0], &x1[15], input[0], input[15]);
+ btf_16_adds_subs_out_avx2(&x1[1], &x1[14], input[1], input[14]);
+ btf_16_adds_subs_out_avx2(&x1[2], &x1[13], input[2], input[13]);
+ btf_16_adds_subs_out_avx2(&x1[3], &x1[12], input[3], input[12]);
+ btf_16_adds_subs_out_avx2(&x1[4], &x1[11], input[4], input[11]);
+ btf_16_adds_subs_out_avx2(&x1[5], &x1[10], input[5], input[10]);
+ btf_16_adds_subs_out_avx2(&x1[6], &x1[9], input[6], input[9]);
+ btf_16_adds_subs_out_avx2(&x1[7], &x1[8], input[7], input[8]);
+
+ // stage 2
+ btf_16_adds_subs_avx2(&x1[0], &x1[7]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[6]);
+ btf_16_adds_subs_avx2(&x1[2], &x1[5]);
+ btf_16_adds_subs_avx2(&x1[3], &x1[4]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[10], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[11], &x1[12], _r, cos_bit);
+
+ // stage 3
+ btf_16_adds_subs_avx2(&x1[0], &x1[3]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[2]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[5], &x1[6], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[8], &x1[11]);
+ btf_16_adds_subs_avx2(&x1[9], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[15], &x1[12]);
+ btf_16_adds_subs_avx2(&x1[14], &x1[13]);
+
+ // stage 4
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[0], &x1[1], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p48_p16, cospi_m16_p48, &x1[2], &x1[3], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[4], &x1[5]);
+ btf_16_adds_subs_avx2(&x1[7], &x1[6]);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[9], &x1[14], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[10], &x1[13], _r, cos_bit);
+
+ // stage 5
+ btf_16_w16_avx2(cospi_p56_p08, cospi_m08_p56, &x1[4], &x1[7], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p24_p40, cospi_m40_p24, &x1[5], &x1[6], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[8], &x1[9]);
+ btf_16_adds_subs_avx2(&x1[11], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[12], &x1[13]);
+ btf_16_adds_subs_avx2(&x1[15], &x1[14]);
+
+ // stage 6
+ btf_16_w16_avx2(cospi_p60_p04, cospi_m04_p60, &x1[8], &x1[15], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p28_p36, cospi_m36_p28, &x1[9], &x1[14], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p44_p20, cospi_m20_p44, &x1[10], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p12_p52, cospi_m52_p12, &x1[11], &x1[12], _r, cos_bit);
+
+ // stage 7
+ output[0] = x1[0];
+ output[1] = x1[8];
+ output[2] = x1[4];
+ output[3] = x1[12];
+ output[4] = x1[2];
+ output[5] = x1[10];
+ output[6] = x1[6];
+ output[7] = x1[14];
+ output[8] = x1[1];
+ output[9] = x1[9];
+ output[10] = x1[5];
+ output[11] = x1[13];
+ output[12] = x1[3];
+ output[13] = x1[11];
+ output[14] = x1[7];
+ output[15] = x1[15];
+}
+
+static INLINE void fdct16x32_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i _r = _mm256_set1_epi32(1 << (cos_bit - 1));
+
+ __m256i cospi_m32_p32 = pair_set_w16_epi16(-cospi[32], cospi[32]);
+ __m256i cospi_p32_p32 = pair_set_w16_epi16(cospi[32], cospi[32]);
+ __m256i cospi_m16_p48 = pair_set_w16_epi16(-cospi[16], cospi[48]);
+ __m256i cospi_p48_p16 = pair_set_w16_epi16(cospi[48], cospi[16]);
+ __m256i cospi_m48_m16 = pair_set_w16_epi16(-cospi[48], -cospi[16]);
+ __m256i cospi_p32_m32 = pair_set_w16_epi16(cospi[32], -cospi[32]);
+ __m256i cospi_p56_p08 = pair_set_w16_epi16(cospi[56], cospi[8]);
+ __m256i cospi_m08_p56 = pair_set_w16_epi16(-cospi[8], cospi[56]);
+ __m256i cospi_p24_p40 = pair_set_w16_epi16(cospi[24], cospi[40]);
+ __m256i cospi_m40_p24 = pair_set_w16_epi16(-cospi[40], cospi[24]);
+ __m256i cospi_m56_m08 = pair_set_w16_epi16(-cospi[56], -cospi[8]);
+ __m256i cospi_m24_m40 = pair_set_w16_epi16(-cospi[24], -cospi[40]);
+ __m256i cospi_p60_p04 = pair_set_w16_epi16(cospi[60], cospi[4]);
+ __m256i cospi_m04_p60 = pair_set_w16_epi16(-cospi[4], cospi[60]);
+ __m256i cospi_p28_p36 = pair_set_w16_epi16(cospi[28], cospi[36]);
+ __m256i cospi_m36_p28 = pair_set_w16_epi16(-cospi[36], cospi[28]);
+ __m256i cospi_p44_p20 = pair_set_w16_epi16(cospi[44], cospi[20]);
+ __m256i cospi_m20_p44 = pair_set_w16_epi16(-cospi[20], cospi[44]);
+ __m256i cospi_p12_p52 = pair_set_w16_epi16(cospi[12], cospi[52]);
+ __m256i cospi_m52_p12 = pair_set_w16_epi16(-cospi[52], cospi[12]);
+ __m256i cospi_p62_p02 = pair_set_w16_epi16(cospi[62], cospi[2]);
+ __m256i cospi_m02_p62 = pair_set_w16_epi16(-cospi[2], cospi[62]);
+ __m256i cospi_p30_p34 = pair_set_w16_epi16(cospi[30], cospi[34]);
+ __m256i cospi_m34_p30 = pair_set_w16_epi16(-cospi[34], cospi[30]);
+ __m256i cospi_p46_p18 = pair_set_w16_epi16(cospi[46], cospi[18]);
+ __m256i cospi_m18_p46 = pair_set_w16_epi16(-cospi[18], cospi[46]);
+ __m256i cospi_p14_p50 = pair_set_w16_epi16(cospi[14], cospi[50]);
+ __m256i cospi_m50_p14 = pair_set_w16_epi16(-cospi[50], cospi[14]);
+ __m256i cospi_p54_p10 = pair_set_w16_epi16(cospi[54], cospi[10]);
+ __m256i cospi_m10_p54 = pair_set_w16_epi16(-cospi[10], cospi[54]);
+ __m256i cospi_p22_p42 = pair_set_w16_epi16(cospi[22], cospi[42]);
+ __m256i cospi_m42_p22 = pair_set_w16_epi16(-cospi[42], cospi[22]);
+ __m256i cospi_p38_p26 = pair_set_w16_epi16(cospi[38], cospi[26]);
+ __m256i cospi_m26_p38 = pair_set_w16_epi16(-cospi[26], cospi[38]);
+ __m256i cospi_p06_p58 = pair_set_w16_epi16(cospi[6], cospi[58]);
+ __m256i cospi_m58_p06 = pair_set_w16_epi16(-cospi[58], cospi[6]);
+
+ // stage 1
+ __m256i x1[32];
+ btf_16_adds_subs_out_avx2(&x1[0], &x1[31], input[0], input[31]);
+ btf_16_adds_subs_out_avx2(&x1[1], &x1[30], input[1], input[30]);
+ btf_16_adds_subs_out_avx2(&x1[2], &x1[29], input[2], input[29]);
+ btf_16_adds_subs_out_avx2(&x1[3], &x1[28], input[3], input[28]);
+ btf_16_adds_subs_out_avx2(&x1[4], &x1[27], input[4], input[27]);
+ btf_16_adds_subs_out_avx2(&x1[5], &x1[26], input[5], input[26]);
+ btf_16_adds_subs_out_avx2(&x1[6], &x1[25], input[6], input[25]);
+ btf_16_adds_subs_out_avx2(&x1[7], &x1[24], input[7], input[24]);
+ btf_16_adds_subs_out_avx2(&x1[8], &x1[23], input[8], input[23]);
+ btf_16_adds_subs_out_avx2(&x1[9], &x1[22], input[9], input[22]);
+ btf_16_adds_subs_out_avx2(&x1[10], &x1[21], input[10], input[21]);
+ btf_16_adds_subs_out_avx2(&x1[11], &x1[20], input[11], input[20]);
+ btf_16_adds_subs_out_avx2(&x1[12], &x1[19], input[12], input[19]);
+ btf_16_adds_subs_out_avx2(&x1[13], &x1[18], input[13], input[18]);
+ btf_16_adds_subs_out_avx2(&x1[14], &x1[17], input[14], input[17]);
+ btf_16_adds_subs_out_avx2(&x1[15], &x1[16], input[15], input[16]);
+
+ // stage 2
+ btf_16_adds_subs_avx2(&x1[0], &x1[15]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[14]);
+ btf_16_adds_subs_avx2(&x1[2], &x1[13]);
+ btf_16_adds_subs_avx2(&x1[3], &x1[12]);
+ btf_16_adds_subs_avx2(&x1[4], &x1[11]);
+ btf_16_adds_subs_avx2(&x1[5], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[6], &x1[9]);
+ btf_16_adds_subs_avx2(&x1[7], &x1[8]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[20], &x1[27], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[21], &x1[26], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[22], &x1[25], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[23], &x1[24], _r, cos_bit);
+
+ // stage 3
+ btf_16_adds_subs_avx2(&x1[0], &x1[7]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[6]);
+ btf_16_adds_subs_avx2(&x1[2], &x1[5]);
+ btf_16_adds_subs_avx2(&x1[3], &x1[4]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[10], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[11], &x1[12], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[16], &x1[23]);
+ btf_16_adds_subs_avx2(&x1[17], &x1[22]);
+ btf_16_adds_subs_avx2(&x1[18], &x1[21]);
+ btf_16_adds_subs_avx2(&x1[19], &x1[20]);
+ btf_16_adds_subs_avx2(&x1[31], &x1[24]);
+ btf_16_adds_subs_avx2(&x1[30], &x1[25]);
+ btf_16_adds_subs_avx2(&x1[29], &x1[26]);
+ btf_16_adds_subs_avx2(&x1[28], &x1[27]);
+
+ // stage 4
+ btf_16_adds_subs_avx2(&x1[0], &x1[3]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[2]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[5], &x1[6], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[8], &x1[11]);
+ btf_16_adds_subs_avx2(&x1[9], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[15], &x1[12]);
+ btf_16_adds_subs_avx2(&x1[14], &x1[13]);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[18], &x1[29], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[19], &x1[28], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[20], &x1[27], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[21], &x1[26], _r, cos_bit);
+
+ // stage 5
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[0], &x1[1], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p48_p16, cospi_m16_p48, &x1[2], &x1[3], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[4], &x1[5]);
+ btf_16_adds_subs_avx2(&x1[7], &x1[6]);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[9], &x1[14], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[10], &x1[13], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[16], &x1[19]);
+ btf_16_adds_subs_avx2(&x1[17], &x1[18]);
+ btf_16_adds_subs_avx2(&x1[23], &x1[20]);
+ btf_16_adds_subs_avx2(&x1[22], &x1[21]);
+ btf_16_adds_subs_avx2(&x1[24], &x1[27]);
+ btf_16_adds_subs_avx2(&x1[25], &x1[26]);
+ btf_16_adds_subs_avx2(&x1[31], &x1[28]);
+ btf_16_adds_subs_avx2(&x1[30], &x1[29]);
+
+ // stage 6
+ btf_16_w16_avx2(cospi_p56_p08, cospi_m08_p56, &x1[4], &x1[7], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p24_p40, cospi_m40_p24, &x1[5], &x1[6], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[8], &x1[9]);
+ btf_16_adds_subs_avx2(&x1[11], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[12], &x1[13]);
+ btf_16_adds_subs_avx2(&x1[15], &x1[14]);
+ btf_16_w16_avx2(cospi_m08_p56, cospi_p56_p08, &x1[17], &x1[30], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m56_m08, cospi_m08_p56, &x1[18], &x1[29], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m40_p24, cospi_p24_p40, &x1[21], &x1[26], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m24_m40, cospi_m40_p24, &x1[22], &x1[25], _r, cos_bit);
+
+ // stage 7
+ btf_16_w16_avx2(cospi_p60_p04, cospi_m04_p60, &x1[8], &x1[15], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p28_p36, cospi_m36_p28, &x1[9], &x1[14], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p44_p20, cospi_m20_p44, &x1[10], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p12_p52, cospi_m52_p12, &x1[11], &x1[12], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[16], &x1[17]);
+ btf_16_adds_subs_avx2(&x1[19], &x1[18]);
+ btf_16_adds_subs_avx2(&x1[20], &x1[21]);
+ btf_16_adds_subs_avx2(&x1[23], &x1[22]);
+ btf_16_adds_subs_avx2(&x1[24], &x1[25]);
+ btf_16_adds_subs_avx2(&x1[27], &x1[26]);
+ btf_16_adds_subs_avx2(&x1[28], &x1[29]);
+ btf_16_adds_subs_avx2(&x1[31], &x1[30]);
+
+ // stage 8
+ btf_16_w16_avx2(cospi_p62_p02, cospi_m02_p62, &x1[16], &x1[31], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p30_p34, cospi_m34_p30, &x1[17], &x1[30], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p46_p18, cospi_m18_p46, &x1[18], &x1[29], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p14_p50, cospi_m50_p14, &x1[19], &x1[28], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p54_p10, cospi_m10_p54, &x1[20], &x1[27], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p22_p42, cospi_m42_p22, &x1[21], &x1[26], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p38_p26, cospi_m26_p38, &x1[22], &x1[25], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p06_p58, cospi_m58_p06, &x1[23], &x1[24], _r, cos_bit);
+
+ // stage 9
+ output[0] = x1[0];
+ output[1] = x1[16];
+ output[2] = x1[8];
+ output[3] = x1[24];
+ output[4] = x1[4];
+ output[5] = x1[20];
+ output[6] = x1[12];
+ output[7] = x1[28];
+ output[8] = x1[2];
+ output[9] = x1[18];
+ output[10] = x1[10];
+ output[11] = x1[26];
+ output[12] = x1[6];
+ output[13] = x1[22];
+ output[14] = x1[14];
+ output[15] = x1[30];
+ output[16] = x1[1];
+ output[17] = x1[17];
+ output[18] = x1[9];
+ output[19] = x1[25];
+ output[20] = x1[5];
+ output[21] = x1[21];
+ output[22] = x1[13];
+ output[23] = x1[29];
+ output[24] = x1[3];
+ output[25] = x1[19];
+ output[26] = x1[11];
+ output[27] = x1[27];
+ output[28] = x1[7];
+ output[29] = x1[23];
+ output[30] = x1[15];
+ output[31] = x1[31];
+}
+
+static INLINE void fdct16x64_new_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i _r = _mm256_set1_epi32(1 << (cos_bit - 1));
+
+ __m256i cospi_m32_p32 = pair_set_w16_epi16(-cospi[32], cospi[32]);
+ __m256i cospi_p32_p32 = pair_set_w16_epi16(cospi[32], cospi[32]);
+ __m256i cospi_m16_p48 = pair_set_w16_epi16(-cospi[16], cospi[48]);
+ __m256i cospi_p48_p16 = pair_set_w16_epi16(cospi[48], cospi[16]);
+ __m256i cospi_m48_m16 = pair_set_w16_epi16(-cospi[48], -cospi[16]);
+ __m256i cospi_p32_m32 = pair_set_w16_epi16(cospi[32], -cospi[32]);
+ __m256i cospi_m08_p56 = pair_set_w16_epi16(-cospi[8], cospi[56]);
+ __m256i cospi_p56_p08 = pair_set_w16_epi16(cospi[56], cospi[8]);
+ __m256i cospi_m56_m08 = pair_set_w16_epi16(-cospi[56], -cospi[8]);
+ __m256i cospi_m40_p24 = pair_set_w16_epi16(-cospi[40], cospi[24]);
+ __m256i cospi_p24_p40 = pair_set_w16_epi16(cospi[24], cospi[40]);
+ __m256i cospi_m24_m40 = pair_set_w16_epi16(-cospi[24], -cospi[40]);
+ __m256i cospi_p60_p04 = pair_set_w16_epi16(cospi[60], cospi[4]);
+ __m256i cospi_m04_p60 = pair_set_w16_epi16(-cospi[4], cospi[60]);
+ __m256i cospi_p28_p36 = pair_set_w16_epi16(cospi[28], cospi[36]);
+ __m256i cospi_m36_p28 = pair_set_w16_epi16(-cospi[36], cospi[28]);
+ __m256i cospi_p44_p20 = pair_set_w16_epi16(cospi[44], cospi[20]);
+ __m256i cospi_m20_p44 = pair_set_w16_epi16(-cospi[20], cospi[44]);
+ __m256i cospi_p12_p52 = pair_set_w16_epi16(cospi[12], cospi[52]);
+ __m256i cospi_m52_p12 = pair_set_w16_epi16(-cospi[52], cospi[12]);
+ __m256i cospi_m60_m04 = pair_set_w16_epi16(-cospi[60], -cospi[4]);
+ __m256i cospi_m28_m36 = pair_set_w16_epi16(-cospi[28], -cospi[36]);
+ __m256i cospi_m44_m20 = pair_set_w16_epi16(-cospi[44], -cospi[20]);
+ __m256i cospi_m12_m52 = pair_set_w16_epi16(-cospi[12], -cospi[52]);
+ __m256i cospi_p62_p02 = pair_set_w16_epi16(cospi[62], cospi[2]);
+ __m256i cospi_m02_p62 = pair_set_w16_epi16(-cospi[2], cospi[62]);
+ __m256i cospi_p30_p34 = pair_set_w16_epi16(cospi[30], cospi[34]);
+ __m256i cospi_m34_p30 = pair_set_w16_epi16(-cospi[34], cospi[30]);
+ __m256i cospi_p46_p18 = pair_set_w16_epi16(cospi[46], cospi[18]);
+ __m256i cospi_m18_p46 = pair_set_w16_epi16(-cospi[18], cospi[46]);
+ __m256i cospi_p14_p50 = pair_set_w16_epi16(cospi[14], cospi[50]);
+ __m256i cospi_m50_p14 = pair_set_w16_epi16(-cospi[50], cospi[14]);
+ __m256i cospi_p54_p10 = pair_set_w16_epi16(cospi[54], cospi[10]);
+ __m256i cospi_m10_p54 = pair_set_w16_epi16(-cospi[10], cospi[54]);
+ __m256i cospi_p22_p42 = pair_set_w16_epi16(cospi[22], cospi[42]);
+ __m256i cospi_m42_p22 = pair_set_w16_epi16(-cospi[42], cospi[22]);
+ __m256i cospi_p38_p26 = pair_set_w16_epi16(cospi[38], cospi[26]);
+ __m256i cospi_m26_p38 = pair_set_w16_epi16(-cospi[26], cospi[38]);
+ __m256i cospi_p06_p58 = pair_set_w16_epi16(cospi[6], cospi[58]);
+ __m256i cospi_m58_p06 = pair_set_w16_epi16(-cospi[58], cospi[6]);
+ __m256i cospi_p63_p01 = pair_set_w16_epi16(cospi[63], cospi[1]);
+ __m256i cospi_m01_p63 = pair_set_w16_epi16(-cospi[1], cospi[63]);
+ __m256i cospi_p31_p33 = pair_set_w16_epi16(cospi[31], cospi[33]);
+ __m256i cospi_m33_p31 = pair_set_w16_epi16(-cospi[33], cospi[31]);
+ __m256i cospi_p47_p17 = pair_set_w16_epi16(cospi[47], cospi[17]);
+ __m256i cospi_m17_p47 = pair_set_w16_epi16(-cospi[17], cospi[47]);
+ __m256i cospi_p15_p49 = pair_set_w16_epi16(cospi[15], cospi[49]);
+ __m256i cospi_m49_p15 = pair_set_w16_epi16(-cospi[49], cospi[15]);
+ __m256i cospi_p55_p09 = pair_set_w16_epi16(cospi[55], cospi[9]);
+ __m256i cospi_m09_p55 = pair_set_w16_epi16(-cospi[9], cospi[55]);
+ __m256i cospi_p23_p41 = pair_set_w16_epi16(cospi[23], cospi[41]);
+ __m256i cospi_m41_p23 = pair_set_w16_epi16(-cospi[41], cospi[23]);
+ __m256i cospi_p39_p25 = pair_set_w16_epi16(cospi[39], cospi[25]);
+ __m256i cospi_m25_p39 = pair_set_w16_epi16(-cospi[25], cospi[39]);
+ __m256i cospi_p07_p57 = pair_set_w16_epi16(cospi[7], cospi[57]);
+ __m256i cospi_m57_p07 = pair_set_w16_epi16(-cospi[57], cospi[7]);
+ __m256i cospi_p59_p05 = pair_set_w16_epi16(cospi[59], cospi[5]);
+ __m256i cospi_m05_p59 = pair_set_w16_epi16(-cospi[5], cospi[59]);
+ __m256i cospi_p27_p37 = pair_set_w16_epi16(cospi[27], cospi[37]);
+ __m256i cospi_m37_p27 = pair_set_w16_epi16(-cospi[37], cospi[27]);
+ __m256i cospi_p43_p21 = pair_set_w16_epi16(cospi[43], cospi[21]);
+ __m256i cospi_m21_p43 = pair_set_w16_epi16(-cospi[21], cospi[43]);
+ __m256i cospi_p11_p53 = pair_set_w16_epi16(cospi[11], cospi[53]);
+ __m256i cospi_m53_p11 = pair_set_w16_epi16(-cospi[53], cospi[11]);
+ __m256i cospi_p51_p13 = pair_set_w16_epi16(cospi[51], cospi[13]);
+ __m256i cospi_m13_p51 = pair_set_w16_epi16(-cospi[13], cospi[51]);
+ __m256i cospi_p19_p45 = pair_set_w16_epi16(cospi[19], cospi[45]);
+ __m256i cospi_m45_p19 = pair_set_w16_epi16(-cospi[45], cospi[19]);
+ __m256i cospi_p35_p29 = pair_set_w16_epi16(cospi[35], cospi[29]);
+ __m256i cospi_m29_p35 = pair_set_w16_epi16(-cospi[29], cospi[35]);
+ __m256i cospi_p03_p61 = pair_set_w16_epi16(cospi[3], cospi[61]);
+ __m256i cospi_m61_p03 = pair_set_w16_epi16(-cospi[61], cospi[3]);
+
+ // stage 1
+ __m256i x1[64];
+ btf_16_adds_subs_out_avx2(&x1[0], &x1[63], input[0], input[63]);
+ btf_16_adds_subs_out_avx2(&x1[1], &x1[62], input[1], input[62]);
+ btf_16_adds_subs_out_avx2(&x1[2], &x1[61], input[2], input[61]);
+ btf_16_adds_subs_out_avx2(&x1[3], &x1[60], input[3], input[60]);
+ btf_16_adds_subs_out_avx2(&x1[4], &x1[59], input[4], input[59]);
+ btf_16_adds_subs_out_avx2(&x1[5], &x1[58], input[5], input[58]);
+ btf_16_adds_subs_out_avx2(&x1[6], &x1[57], input[6], input[57]);
+ btf_16_adds_subs_out_avx2(&x1[7], &x1[56], input[7], input[56]);
+ btf_16_adds_subs_out_avx2(&x1[8], &x1[55], input[8], input[55]);
+ btf_16_adds_subs_out_avx2(&x1[9], &x1[54], input[9], input[54]);
+ btf_16_adds_subs_out_avx2(&x1[10], &x1[53], input[10], input[53]);
+ btf_16_adds_subs_out_avx2(&x1[11], &x1[52], input[11], input[52]);
+ btf_16_adds_subs_out_avx2(&x1[12], &x1[51], input[12], input[51]);
+ btf_16_adds_subs_out_avx2(&x1[13], &x1[50], input[13], input[50]);
+ btf_16_adds_subs_out_avx2(&x1[14], &x1[49], input[14], input[49]);
+ btf_16_adds_subs_out_avx2(&x1[15], &x1[48], input[15], input[48]);
+ btf_16_adds_subs_out_avx2(&x1[16], &x1[47], input[16], input[47]);
+ btf_16_adds_subs_out_avx2(&x1[17], &x1[46], input[17], input[46]);
+ btf_16_adds_subs_out_avx2(&x1[18], &x1[45], input[18], input[45]);
+ btf_16_adds_subs_out_avx2(&x1[19], &x1[44], input[19], input[44]);
+ btf_16_adds_subs_out_avx2(&x1[20], &x1[43], input[20], input[43]);
+ btf_16_adds_subs_out_avx2(&x1[21], &x1[42], input[21], input[42]);
+ btf_16_adds_subs_out_avx2(&x1[22], &x1[41], input[22], input[41]);
+ btf_16_adds_subs_out_avx2(&x1[23], &x1[40], input[23], input[40]);
+ btf_16_adds_subs_out_avx2(&x1[24], &x1[39], input[24], input[39]);
+ btf_16_adds_subs_out_avx2(&x1[25], &x1[38], input[25], input[38]);
+ btf_16_adds_subs_out_avx2(&x1[26], &x1[37], input[26], input[37]);
+ btf_16_adds_subs_out_avx2(&x1[27], &x1[36], input[27], input[36]);
+ btf_16_adds_subs_out_avx2(&x1[28], &x1[35], input[28], input[35]);
+ btf_16_adds_subs_out_avx2(&x1[29], &x1[34], input[29], input[34]);
+ btf_16_adds_subs_out_avx2(&x1[30], &x1[33], input[30], input[33]);
+ btf_16_adds_subs_out_avx2(&x1[31], &x1[32], input[31], input[32]);
+
+ // stage 2
+ btf_16_adds_subs_avx2(&x1[0], &x1[31]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[30]);
+ btf_16_adds_subs_avx2(&x1[2], &x1[29]);
+ btf_16_adds_subs_avx2(&x1[3], &x1[28]);
+ btf_16_adds_subs_avx2(&x1[4], &x1[27]);
+ btf_16_adds_subs_avx2(&x1[5], &x1[26]);
+ btf_16_adds_subs_avx2(&x1[6], &x1[25]);
+ btf_16_adds_subs_avx2(&x1[7], &x1[24]);
+ btf_16_adds_subs_avx2(&x1[8], &x1[23]);
+ btf_16_adds_subs_avx2(&x1[9], &x1[22]);
+ btf_16_adds_subs_avx2(&x1[10], &x1[21]);
+ btf_16_adds_subs_avx2(&x1[11], &x1[20]);
+ btf_16_adds_subs_avx2(&x1[12], &x1[19]);
+ btf_16_adds_subs_avx2(&x1[13], &x1[18]);
+ btf_16_adds_subs_avx2(&x1[14], &x1[17]);
+ btf_16_adds_subs_avx2(&x1[15], &x1[16]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[40], &x1[55], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[41], &x1[54], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[42], &x1[53], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[43], &x1[52], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[44], &x1[51], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[45], &x1[50], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[46], &x1[49], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[47], &x1[48], _r, cos_bit);
+
+ // stage 3
+ btf_16_adds_subs_avx2(&x1[0], &x1[15]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[14]);
+ btf_16_adds_subs_avx2(&x1[2], &x1[13]);
+ btf_16_adds_subs_avx2(&x1[3], &x1[12]);
+ btf_16_adds_subs_avx2(&x1[4], &x1[11]);
+ btf_16_adds_subs_avx2(&x1[5], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[6], &x1[9]);
+ btf_16_adds_subs_avx2(&x1[7], &x1[8]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[20], &x1[27], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[21], &x1[26], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[22], &x1[25], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[23], &x1[24], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[32], &x1[47]);
+ btf_16_adds_subs_avx2(&x1[33], &x1[46]);
+ btf_16_adds_subs_avx2(&x1[34], &x1[45]);
+ btf_16_adds_subs_avx2(&x1[35], &x1[44]);
+ btf_16_adds_subs_avx2(&x1[36], &x1[43]);
+ btf_16_adds_subs_avx2(&x1[37], &x1[42]);
+ btf_16_adds_subs_avx2(&x1[38], &x1[41]);
+ btf_16_adds_subs_avx2(&x1[39], &x1[40]);
+ btf_16_adds_subs_avx2(&x1[63], &x1[48]);
+ btf_16_adds_subs_avx2(&x1[62], &x1[49]);
+ btf_16_adds_subs_avx2(&x1[61], &x1[50]);
+ btf_16_adds_subs_avx2(&x1[60], &x1[51]);
+ btf_16_adds_subs_avx2(&x1[59], &x1[52]);
+ btf_16_adds_subs_avx2(&x1[58], &x1[53]);
+ btf_16_adds_subs_avx2(&x1[57], &x1[54]);
+ btf_16_adds_subs_avx2(&x1[56], &x1[55]);
+
+ // stage 4
+ btf_16_adds_subs_avx2(&x1[0], &x1[7]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[6]);
+ btf_16_adds_subs_avx2(&x1[2], &x1[5]);
+ btf_16_adds_subs_avx2(&x1[3], &x1[4]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[10], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[11], &x1[12], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[16], &x1[23]);
+ btf_16_adds_subs_avx2(&x1[17], &x1[22]);
+ btf_16_adds_subs_avx2(&x1[18], &x1[21]);
+ btf_16_adds_subs_avx2(&x1[19], &x1[20]);
+ btf_16_adds_subs_avx2(&x1[31], &x1[24]);
+ btf_16_adds_subs_avx2(&x1[30], &x1[25]);
+ btf_16_adds_subs_avx2(&x1[29], &x1[26]);
+ btf_16_adds_subs_avx2(&x1[28], &x1[27]);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[36], &x1[59], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[37], &x1[58], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[38], &x1[57], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[39], &x1[56], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[40], &x1[55], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[41], &x1[54], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[42], &x1[53], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[43], &x1[52], _r, cos_bit);
+
+ // stage 5
+ btf_16_adds_subs_avx2(&x1[0], &x1[3]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[2]);
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[5], &x1[6], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[8], &x1[11]);
+ btf_16_adds_subs_avx2(&x1[9], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[15], &x1[12]);
+ btf_16_adds_subs_avx2(&x1[14], &x1[13]);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[18], &x1[29], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[19], &x1[28], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[20], &x1[27], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[21], &x1[26], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[32], &x1[39]);
+ btf_16_adds_subs_avx2(&x1[33], &x1[38]);
+ btf_16_adds_subs_avx2(&x1[34], &x1[37]);
+ btf_16_adds_subs_avx2(&x1[35], &x1[36]);
+ btf_16_adds_subs_avx2(&x1[47], &x1[40]);
+ btf_16_adds_subs_avx2(&x1[46], &x1[41]);
+ btf_16_adds_subs_avx2(&x1[45], &x1[42]);
+ btf_16_adds_subs_avx2(&x1[44], &x1[43]);
+ btf_16_adds_subs_avx2(&x1[48], &x1[55]);
+ btf_16_adds_subs_avx2(&x1[49], &x1[54]);
+ btf_16_adds_subs_avx2(&x1[50], &x1[53]);
+ btf_16_adds_subs_avx2(&x1[51], &x1[52]);
+ btf_16_adds_subs_avx2(&x1[63], &x1[56]);
+ btf_16_adds_subs_avx2(&x1[62], &x1[57]);
+ btf_16_adds_subs_avx2(&x1[61], &x1[58]);
+ btf_16_adds_subs_avx2(&x1[60], &x1[59]);
+
+ // stage 6
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[0], &x1[1], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p48_p16, cospi_m16_p48, &x1[2], &x1[3], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[4], &x1[5]);
+ btf_16_adds_subs_avx2(&x1[7], &x1[6]);
+ btf_16_w16_avx2(cospi_m16_p48, cospi_p48_p16, &x1[9], &x1[14], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_m16, cospi_m16_p48, &x1[10], &x1[13], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[16], &x1[19]);
+ btf_16_adds_subs_avx2(&x1[17], &x1[18]);
+ btf_16_adds_subs_avx2(&x1[23], &x1[20]);
+ btf_16_adds_subs_avx2(&x1[22], &x1[21]);
+ btf_16_adds_subs_avx2(&x1[24], &x1[27]);
+ btf_16_adds_subs_avx2(&x1[25], &x1[26]);
+ btf_16_adds_subs_avx2(&x1[31], &x1[28]);
+ btf_16_adds_subs_avx2(&x1[30], &x1[29]);
+ btf_16_w16_avx2(cospi_m08_p56, cospi_p56_p08, &x1[34], &x1[61], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m08_p56, cospi_p56_p08, &x1[35], &x1[60], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m56_m08, cospi_m08_p56, &x1[36], &x1[59], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m56_m08, cospi_m08_p56, &x1[37], &x1[58], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m40_p24, cospi_p24_p40, &x1[42], &x1[53], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m40_p24, cospi_p24_p40, &x1[43], &x1[52], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m24_m40, cospi_m40_p24, &x1[44], &x1[51], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m24_m40, cospi_m40_p24, &x1[45], &x1[50], _r, cos_bit);
+
+ // stage 7
+ btf_16_w16_avx2(cospi_p56_p08, cospi_m08_p56, &x1[4], &x1[7], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p24_p40, cospi_m40_p24, &x1[5], &x1[6], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[8], &x1[9]);
+ btf_16_adds_subs_avx2(&x1[11], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[12], &x1[13]);
+ btf_16_adds_subs_avx2(&x1[15], &x1[14]);
+ btf_16_w16_avx2(cospi_m08_p56, cospi_p56_p08, &x1[17], &x1[30], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m56_m08, cospi_m08_p56, &x1[18], &x1[29], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m40_p24, cospi_p24_p40, &x1[21], &x1[26], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m24_m40, cospi_m40_p24, &x1[22], &x1[25], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[32], &x1[35]);
+ btf_16_adds_subs_avx2(&x1[33], &x1[34]);
+ btf_16_adds_subs_avx2(&x1[39], &x1[36]);
+ btf_16_adds_subs_avx2(&x1[38], &x1[37]);
+ btf_16_adds_subs_avx2(&x1[40], &x1[43]);
+ btf_16_adds_subs_avx2(&x1[41], &x1[42]);
+ btf_16_adds_subs_avx2(&x1[47], &x1[44]);
+ btf_16_adds_subs_avx2(&x1[46], &x1[45]);
+ btf_16_adds_subs_avx2(&x1[48], &x1[51]);
+ btf_16_adds_subs_avx2(&x1[49], &x1[50]);
+ btf_16_adds_subs_avx2(&x1[55], &x1[52]);
+ btf_16_adds_subs_avx2(&x1[54], &x1[53]);
+ btf_16_adds_subs_avx2(&x1[56], &x1[59]);
+ btf_16_adds_subs_avx2(&x1[57], &x1[58]);
+ btf_16_adds_subs_avx2(&x1[63], &x1[60]);
+ btf_16_adds_subs_avx2(&x1[62], &x1[61]);
+
+ // stage 8
+ btf_16_w16_avx2(cospi_p60_p04, cospi_m04_p60, &x1[8], &x1[15], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p28_p36, cospi_m36_p28, &x1[9], &x1[14], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p44_p20, cospi_m20_p44, &x1[10], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p12_p52, cospi_m52_p12, &x1[11], &x1[12], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[16], &x1[17]);
+ btf_16_adds_subs_avx2(&x1[19], &x1[18]);
+ btf_16_adds_subs_avx2(&x1[20], &x1[21]);
+ btf_16_adds_subs_avx2(&x1[23], &x1[22]);
+ btf_16_adds_subs_avx2(&x1[24], &x1[25]);
+ btf_16_adds_subs_avx2(&x1[27], &x1[26]);
+ btf_16_adds_subs_avx2(&x1[28], &x1[29]);
+ btf_16_adds_subs_avx2(&x1[31], &x1[30]);
+ btf_16_w16_avx2(cospi_m04_p60, cospi_p60_p04, &x1[33], &x1[62], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m60_m04, cospi_m04_p60, &x1[34], &x1[61], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m36_p28, cospi_p28_p36, &x1[37], &x1[58], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m28_m36, cospi_m36_p28, &x1[38], &x1[57], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m20_p44, cospi_p44_p20, &x1[41], &x1[54], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m44_m20, cospi_m20_p44, &x1[42], &x1[53], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m52_p12, cospi_p12_p52, &x1[45], &x1[50], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m12_m52, cospi_m52_p12, &x1[46], &x1[49], _r, cos_bit);
+
+ // stage 9
+ btf_16_w16_avx2(cospi_p62_p02, cospi_m02_p62, &x1[16], &x1[31], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p30_p34, cospi_m34_p30, &x1[17], &x1[30], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p46_p18, cospi_m18_p46, &x1[18], &x1[29], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p14_p50, cospi_m50_p14, &x1[19], &x1[28], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p54_p10, cospi_m10_p54, &x1[20], &x1[27], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p22_p42, cospi_m42_p22, &x1[21], &x1[26], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p38_p26, cospi_m26_p38, &x1[22], &x1[25], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p06_p58, cospi_m58_p06, &x1[23], &x1[24], _r, cos_bit);
+ btf_16_adds_subs_avx2(&x1[32], &x1[33]);
+ btf_16_adds_subs_avx2(&x1[35], &x1[34]);
+ btf_16_adds_subs_avx2(&x1[36], &x1[37]);
+ btf_16_adds_subs_avx2(&x1[39], &x1[38]);
+ btf_16_adds_subs_avx2(&x1[40], &x1[41]);
+ btf_16_adds_subs_avx2(&x1[43], &x1[42]);
+ btf_16_adds_subs_avx2(&x1[44], &x1[45]);
+ btf_16_adds_subs_avx2(&x1[47], &x1[46]);
+ btf_16_adds_subs_avx2(&x1[48], &x1[49]);
+ btf_16_adds_subs_avx2(&x1[51], &x1[50]);
+ btf_16_adds_subs_avx2(&x1[52], &x1[53]);
+ btf_16_adds_subs_avx2(&x1[55], &x1[54]);
+ btf_16_adds_subs_avx2(&x1[56], &x1[57]);
+ btf_16_adds_subs_avx2(&x1[59], &x1[58]);
+ btf_16_adds_subs_avx2(&x1[60], &x1[61]);
+ btf_16_adds_subs_avx2(&x1[63], &x1[62]);
+
+ // stage 10
+ btf_16_w16_avx2(cospi_p63_p01, cospi_m01_p63, &x1[32], &x1[63], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p31_p33, cospi_m33_p31, &x1[33], &x1[62], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p47_p17, cospi_m17_p47, &x1[34], &x1[61], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p15_p49, cospi_m49_p15, &x1[35], &x1[60], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p55_p09, cospi_m09_p55, &x1[36], &x1[59], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p23_p41, cospi_m41_p23, &x1[37], &x1[58], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p39_p25, cospi_m25_p39, &x1[38], &x1[57], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p07_p57, cospi_m57_p07, &x1[39], &x1[56], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p59_p05, cospi_m05_p59, &x1[40], &x1[55], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p27_p37, cospi_m37_p27, &x1[41], &x1[54], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p43_p21, cospi_m21_p43, &x1[42], &x1[53], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p11_p53, cospi_m53_p11, &x1[43], &x1[52], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p51_p13, cospi_m13_p51, &x1[44], &x1[51], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p19_p45, cospi_m45_p19, &x1[45], &x1[50], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p35_p29, cospi_m29_p35, &x1[46], &x1[49], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p03_p61, cospi_m61_p03, &x1[47], &x1[48], _r, cos_bit);
+
+ // stage 11
+ output[0] = x1[0];
+ output[1] = x1[32];
+ output[2] = x1[16];
+ output[3] = x1[48];
+ output[4] = x1[8];
+ output[5] = x1[40];
+ output[6] = x1[24];
+ output[7] = x1[56];
+ output[8] = x1[4];
+ output[9] = x1[36];
+ output[10] = x1[20];
+ output[11] = x1[52];
+ output[12] = x1[12];
+ output[13] = x1[44];
+ output[14] = x1[28];
+ output[15] = x1[60];
+ output[16] = x1[2];
+ output[17] = x1[34];
+ output[18] = x1[18];
+ output[19] = x1[50];
+ output[20] = x1[10];
+ output[21] = x1[42];
+ output[22] = x1[26];
+ output[23] = x1[58];
+ output[24] = x1[6];
+ output[25] = x1[38];
+ output[26] = x1[22];
+ output[27] = x1[54];
+ output[28] = x1[14];
+ output[29] = x1[46];
+ output[30] = x1[30];
+ output[31] = x1[62];
+ output[32] = x1[1];
+ output[33] = x1[33];
+ output[34] = x1[17];
+ output[35] = x1[49];
+ output[36] = x1[9];
+ output[37] = x1[41];
+ output[38] = x1[25];
+ output[39] = x1[57];
+ output[40] = x1[5];
+ output[41] = x1[37];
+ output[42] = x1[21];
+ output[43] = x1[53];
+ output[44] = x1[13];
+ output[45] = x1[45];
+ output[46] = x1[29];
+ output[47] = x1[61];
+ output[48] = x1[3];
+ output[49] = x1[35];
+ output[50] = x1[19];
+ output[51] = x1[51];
+ output[52] = x1[11];
+ output[53] = x1[43];
+ output[54] = x1[27];
+ output[55] = x1[59];
+ output[56] = x1[7];
+ output[57] = x1[39];
+ output[58] = x1[23];
+ output[59] = x1[55];
+ output[60] = x1[15];
+ output[61] = x1[47];
+ output[62] = x1[31];
+ output[63] = x1[63];
+}
+
+static INLINE void fdct32_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ __m256i x1[32];
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i _r = _mm256_set1_epi32(1 << (cos_bit - 1));
+ // stage 0
+ // stage 1
+ btf_32_add_sub_out_avx2(&x1[0], &x1[31], input[0], input[31]);
+ btf_32_add_sub_out_avx2(&x1[1], &x1[30], input[1], input[30]);
+ btf_32_add_sub_out_avx2(&x1[2], &x1[29], input[2], input[29]);
+ btf_32_add_sub_out_avx2(&x1[3], &x1[28], input[3], input[28]);
+ btf_32_add_sub_out_avx2(&x1[4], &x1[27], input[4], input[27]);
+ btf_32_add_sub_out_avx2(&x1[5], &x1[26], input[5], input[26]);
+ btf_32_add_sub_out_avx2(&x1[6], &x1[25], input[6], input[25]);
+ btf_32_add_sub_out_avx2(&x1[7], &x1[24], input[7], input[24]);
+ btf_32_add_sub_out_avx2(&x1[8], &x1[23], input[8], input[23]);
+ btf_32_add_sub_out_avx2(&x1[9], &x1[22], input[9], input[22]);
+ btf_32_add_sub_out_avx2(&x1[10], &x1[21], input[10], input[21]);
+ btf_32_add_sub_out_avx2(&x1[11], &x1[20], input[11], input[20]);
+ btf_32_add_sub_out_avx2(&x1[12], &x1[19], input[12], input[19]);
+ btf_32_add_sub_out_avx2(&x1[13], &x1[18], input[13], input[18]);
+ btf_32_add_sub_out_avx2(&x1[14], &x1[17], input[14], input[17]);
+ btf_32_add_sub_out_avx2(&x1[15], &x1[16], input[15], input[16]);
+
+ // stage 2
+ btf_32_add_sub_avx2(&x1[0], &x1[15]);
+ btf_32_add_sub_avx2(&x1[1], &x1[14]);
+ btf_32_add_sub_avx2(&x1[2], &x1[13]);
+ btf_32_add_sub_avx2(&x1[3], &x1[12]);
+ btf_32_add_sub_avx2(&x1[4], &x1[11]);
+ btf_32_add_sub_avx2(&x1[5], &x1[10]);
+ btf_32_add_sub_avx2(&x1[6], &x1[9]);
+ btf_32_add_sub_avx2(&x1[7], &x1[8]);
+ btf_32_avx2_type0(-cospi[32], cospi[32], &x1[20], &x1[27], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[32], cospi[32], &x1[21], &x1[26], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[32], cospi[32], &x1[22], &x1[25], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[32], cospi[32], &x1[23], &x1[24], _r, cos_bit);
+
+ // stage 3
+ btf_32_add_sub_avx2(&x1[0], &x1[7]);
+ btf_32_add_sub_avx2(&x1[1], &x1[6]);
+ btf_32_add_sub_avx2(&x1[2], &x1[5]);
+ btf_32_add_sub_avx2(&x1[3], &x1[4]);
+ btf_32_avx2_type0(-cospi[32], cospi[32], &x1[10], &x1[13], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[32], cospi[32], &x1[11], &x1[12], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[16], &x1[23]);
+ btf_32_add_sub_avx2(&x1[17], &x1[22]);
+ btf_32_add_sub_avx2(&x1[18], &x1[21]);
+ btf_32_add_sub_avx2(&x1[19], &x1[20]);
+ btf_32_add_sub_avx2(&x1[31], &x1[24]);
+ btf_32_add_sub_avx2(&x1[30], &x1[25]);
+ btf_32_add_sub_avx2(&x1[29], &x1[26]);
+ btf_32_add_sub_avx2(&x1[28], &x1[27]);
+
+ // stage 4
+ btf_32_add_sub_avx2(&x1[0], &x1[3]);
+ btf_32_add_sub_avx2(&x1[1], &x1[2]);
+ btf_32_avx2_type0(-cospi[32], cospi[32], &x1[5], &x1[6], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[8], &x1[11]);
+ btf_32_add_sub_avx2(&x1[9], &x1[10]);
+ btf_32_add_sub_avx2(&x1[15], &x1[12]);
+ btf_32_add_sub_avx2(&x1[14], &x1[13]);
+ btf_32_avx2_type0(-cospi[16], cospi[48], &x1[18], &x1[29], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[16], cospi[48], &x1[19], &x1[28], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[48], -cospi[16], &x1[20], &x1[27], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[48], -cospi[16], &x1[21], &x1[26], _r, cos_bit);
+
+ // stage 5
+ btf_32_avx2_type0(cospi[32], cospi[32], &x1[0], &x1[1], _r, cos_bit);
+ btf_32_avx2_type1(cospi[48], cospi[16], &x1[2], &x1[3], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[4], &x1[5]);
+ btf_32_add_sub_avx2(&x1[7], &x1[6]);
+ btf_32_avx2_type0(-cospi[16], cospi[48], &x1[9], &x1[14], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[48], -cospi[16], &x1[10], &x1[13], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[16], &x1[19]);
+ btf_32_add_sub_avx2(&x1[17], &x1[18]);
+ btf_32_add_sub_avx2(&x1[23], &x1[20]);
+ btf_32_add_sub_avx2(&x1[22], &x1[21]);
+ btf_32_add_sub_avx2(&x1[24], &x1[27]);
+ btf_32_add_sub_avx2(&x1[25], &x1[26]);
+ btf_32_add_sub_avx2(&x1[31], &x1[28]);
+ btf_32_add_sub_avx2(&x1[30], &x1[29]);
+
+ // stage 6
+ btf_32_avx2_type1(cospi[56], cospi[8], &x1[4], &x1[7], _r, cos_bit);
+ btf_32_avx2_type1(cospi[24], cospi[40], &x1[5], &x1[6], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[8], &x1[9]);
+ btf_32_add_sub_avx2(&x1[11], &x1[10]);
+ btf_32_add_sub_avx2(&x1[12], &x1[13]);
+ btf_32_add_sub_avx2(&x1[15], &x1[14]);
+ btf_32_avx2_type0(-cospi[8], cospi[56], &x1[17], &x1[30], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[56], -cospi[8], &x1[18], &x1[29], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[40], cospi[24], &x1[21], &x1[26], _r, cos_bit);
+ btf_32_avx2_type0(-cospi[24], -cospi[40], &x1[22], &x1[25], _r, cos_bit);
+
+ // stage 7
+ btf_32_avx2_type1(cospi[60], cospi[4], &x1[8], &x1[15], _r, cos_bit);
+ btf_32_avx2_type1(cospi[28], cospi[36], &x1[9], &x1[14], _r, cos_bit);
+ btf_32_avx2_type1(cospi[44], cospi[20], &x1[10], &x1[13], _r, cos_bit);
+ btf_32_avx2_type1(cospi[12], cospi[52], &x1[11], &x1[12], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[16], &x1[17]);
+ btf_32_add_sub_avx2(&x1[19], &x1[18]);
+ btf_32_add_sub_avx2(&x1[20], &x1[21]);
+ btf_32_add_sub_avx2(&x1[23], &x1[22]);
+ btf_32_add_sub_avx2(&x1[24], &x1[25]);
+ btf_32_add_sub_avx2(&x1[27], &x1[26]);
+ btf_32_add_sub_avx2(&x1[28], &x1[29]);
+ btf_32_add_sub_avx2(&x1[31], &x1[30]);
+
+ // stage 8
+ btf_32_avx2_type1(cospi[62], cospi[2], &x1[16], &x1[31], _r, cos_bit);
+ btf_32_avx2_type1(cospi[30], cospi[34], &x1[17], &x1[30], _r, cos_bit);
+ btf_32_avx2_type1(cospi[46], cospi[18], &x1[18], &x1[29], _r, cos_bit);
+ btf_32_avx2_type1(cospi[14], cospi[50], &x1[19], &x1[28], _r, cos_bit);
+ btf_32_avx2_type1(cospi[54], cospi[10], &x1[20], &x1[27], _r, cos_bit);
+ btf_32_avx2_type1(cospi[22], cospi[42], &x1[21], &x1[26], _r, cos_bit);
+ btf_32_avx2_type1(cospi[38], cospi[26], &x1[22], &x1[25], _r, cos_bit);
+ btf_32_avx2_type1(cospi[6], cospi[58], &x1[23], &x1[24], _r, cos_bit);
+
+ // stage 9
+ output[0] = x1[0];
+ output[1] = x1[16];
+ output[2] = x1[8];
+ output[3] = x1[24];
+ output[4] = x1[4];
+ output[5] = x1[20];
+ output[6] = x1[12];
+ output[7] = x1[28];
+ output[8] = x1[2];
+ output[9] = x1[18];
+ output[10] = x1[10];
+ output[11] = x1[26];
+ output[12] = x1[6];
+ output[13] = x1[22];
+ output[14] = x1[14];
+ output[15] = x1[30];
+ output[16] = x1[1];
+ output[17] = x1[17];
+ output[18] = x1[9];
+ output[19] = x1[25];
+ output[20] = x1[5];
+ output[21] = x1[21];
+ output[22] = x1[13];
+ output[23] = x1[29];
+ output[24] = x1[3];
+ output[25] = x1[19];
+ output[26] = x1[11];
+ output[27] = x1[27];
+ output[28] = x1[7];
+ output[29] = x1[23];
+ output[30] = x1[15];
+ output[31] = x1[31];
+}
+
+static INLINE void fdct64_new_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i _r = _mm256_set1_epi32(1 << (cos_bit - 1));
+
+ __m256i cospi_m32 = _mm256_set1_epi32(-cospi[32]);
+ __m256i cospi_p32 = _mm256_set1_epi32(cospi[32]);
+ __m256i cospi_m16 = _mm256_set1_epi32(-cospi[16]);
+ __m256i cospi_p48 = _mm256_set1_epi32(cospi[48]);
+ __m256i cospi_m48 = _mm256_set1_epi32(-cospi[48]);
+ __m256i cospi_p16 = _mm256_set1_epi32(cospi[16]);
+ __m256i cospi_m08 = _mm256_set1_epi32(-cospi[8]);
+ __m256i cospi_p56 = _mm256_set1_epi32(cospi[56]);
+ __m256i cospi_m56 = _mm256_set1_epi32(-cospi[56]);
+ __m256i cospi_m40 = _mm256_set1_epi32(-cospi[40]);
+ __m256i cospi_p24 = _mm256_set1_epi32(cospi[24]);
+ __m256i cospi_m24 = _mm256_set1_epi32(-cospi[24]);
+ __m256i cospi_p08 = _mm256_set1_epi32(cospi[8]);
+ __m256i cospi_p40 = _mm256_set1_epi32(cospi[40]);
+ __m256i cospi_p60 = _mm256_set1_epi32(cospi[60]);
+ __m256i cospi_p04 = _mm256_set1_epi32(cospi[4]);
+ __m256i cospi_p28 = _mm256_set1_epi32(cospi[28]);
+ __m256i cospi_p36 = _mm256_set1_epi32(cospi[36]);
+ __m256i cospi_p44 = _mm256_set1_epi32(cospi[44]);
+ __m256i cospi_p20 = _mm256_set1_epi32(cospi[20]);
+ __m256i cospi_p12 = _mm256_set1_epi32(cospi[12]);
+ __m256i cospi_p52 = _mm256_set1_epi32(cospi[52]);
+ __m256i cospi_m04 = _mm256_set1_epi32(-cospi[4]);
+ __m256i cospi_m60 = _mm256_set1_epi32(-cospi[60]);
+ __m256i cospi_m36 = _mm256_set1_epi32(-cospi[36]);
+ __m256i cospi_m28 = _mm256_set1_epi32(-cospi[28]);
+ __m256i cospi_m20 = _mm256_set1_epi32(-cospi[20]);
+ __m256i cospi_m44 = _mm256_set1_epi32(-cospi[44]);
+ __m256i cospi_m52 = _mm256_set1_epi32(-cospi[52]);
+ __m256i cospi_m12 = _mm256_set1_epi32(-cospi[12]);
+ __m256i cospi_p62 = _mm256_set1_epi32(cospi[62]);
+ __m256i cospi_p02 = _mm256_set1_epi32(cospi[2]);
+ __m256i cospi_p30 = _mm256_set1_epi32(cospi[30]);
+ __m256i cospi_p34 = _mm256_set1_epi32(cospi[34]);
+ __m256i cospi_p46 = _mm256_set1_epi32(cospi[46]);
+ __m256i cospi_p18 = _mm256_set1_epi32(cospi[18]);
+ __m256i cospi_p14 = _mm256_set1_epi32(cospi[14]);
+ __m256i cospi_p50 = _mm256_set1_epi32(cospi[50]);
+ __m256i cospi_p54 = _mm256_set1_epi32(cospi[54]);
+ __m256i cospi_p10 = _mm256_set1_epi32(cospi[10]);
+ __m256i cospi_p22 = _mm256_set1_epi32(cospi[22]);
+ __m256i cospi_p42 = _mm256_set1_epi32(cospi[42]);
+ __m256i cospi_p38 = _mm256_set1_epi32(cospi[38]);
+ __m256i cospi_p26 = _mm256_set1_epi32(cospi[26]);
+ __m256i cospi_p06 = _mm256_set1_epi32(cospi[6]);
+ __m256i cospi_p58 = _mm256_set1_epi32(cospi[58]);
+ __m256i cospi_p63 = _mm256_set1_epi32(cospi[63]);
+ __m256i cospi_p01 = _mm256_set1_epi32(cospi[1]);
+ __m256i cospi_p31 = _mm256_set1_epi32(cospi[31]);
+ __m256i cospi_p33 = _mm256_set1_epi32(cospi[33]);
+ __m256i cospi_p47 = _mm256_set1_epi32(cospi[47]);
+ __m256i cospi_p17 = _mm256_set1_epi32(cospi[17]);
+ __m256i cospi_p15 = _mm256_set1_epi32(cospi[15]);
+ __m256i cospi_p49 = _mm256_set1_epi32(cospi[49]);
+ __m256i cospi_p55 = _mm256_set1_epi32(cospi[55]);
+ __m256i cospi_p09 = _mm256_set1_epi32(cospi[9]);
+ __m256i cospi_p23 = _mm256_set1_epi32(cospi[23]);
+ __m256i cospi_p41 = _mm256_set1_epi32(cospi[41]);
+ __m256i cospi_p39 = _mm256_set1_epi32(cospi[39]);
+ __m256i cospi_p25 = _mm256_set1_epi32(cospi[25]);
+ __m256i cospi_p07 = _mm256_set1_epi32(cospi[7]);
+ __m256i cospi_p57 = _mm256_set1_epi32(cospi[57]);
+ __m256i cospi_p59 = _mm256_set1_epi32(cospi[59]);
+ __m256i cospi_p05 = _mm256_set1_epi32(cospi[5]);
+ __m256i cospi_p27 = _mm256_set1_epi32(cospi[27]);
+ __m256i cospi_p37 = _mm256_set1_epi32(cospi[37]);
+ __m256i cospi_p43 = _mm256_set1_epi32(cospi[43]);
+ __m256i cospi_p21 = _mm256_set1_epi32(cospi[21]);
+ __m256i cospi_p11 = _mm256_set1_epi32(cospi[11]);
+ __m256i cospi_p53 = _mm256_set1_epi32(cospi[53]);
+ __m256i cospi_p51 = _mm256_set1_epi32(cospi[51]);
+ __m256i cospi_p13 = _mm256_set1_epi32(cospi[13]);
+ __m256i cospi_p19 = _mm256_set1_epi32(cospi[19]);
+ __m256i cospi_p45 = _mm256_set1_epi32(cospi[45]);
+ __m256i cospi_p35 = _mm256_set1_epi32(cospi[35]);
+ __m256i cospi_p29 = _mm256_set1_epi32(cospi[29]);
+ __m256i cospi_p03 = _mm256_set1_epi32(cospi[3]);
+ __m256i cospi_p61 = _mm256_set1_epi32(cospi[61]);
+
+ // stage 1
+ __m256i x1[64];
+ btf_32_add_sub_out_avx2(&x1[0], &x1[63], input[0], input[63]);
+ btf_32_add_sub_out_avx2(&x1[1], &x1[62], input[1], input[62]);
+ btf_32_add_sub_out_avx2(&x1[2], &x1[61], input[2], input[61]);
+ btf_32_add_sub_out_avx2(&x1[3], &x1[60], input[3], input[60]);
+ btf_32_add_sub_out_avx2(&x1[4], &x1[59], input[4], input[59]);
+ btf_32_add_sub_out_avx2(&x1[5], &x1[58], input[5], input[58]);
+ btf_32_add_sub_out_avx2(&x1[6], &x1[57], input[6], input[57]);
+ btf_32_add_sub_out_avx2(&x1[7], &x1[56], input[7], input[56]);
+ btf_32_add_sub_out_avx2(&x1[8], &x1[55], input[8], input[55]);
+ btf_32_add_sub_out_avx2(&x1[9], &x1[54], input[9], input[54]);
+ btf_32_add_sub_out_avx2(&x1[10], &x1[53], input[10], input[53]);
+ btf_32_add_sub_out_avx2(&x1[11], &x1[52], input[11], input[52]);
+ btf_32_add_sub_out_avx2(&x1[12], &x1[51], input[12], input[51]);
+ btf_32_add_sub_out_avx2(&x1[13], &x1[50], input[13], input[50]);
+ btf_32_add_sub_out_avx2(&x1[14], &x1[49], input[14], input[49]);
+ btf_32_add_sub_out_avx2(&x1[15], &x1[48], input[15], input[48]);
+ btf_32_add_sub_out_avx2(&x1[16], &x1[47], input[16], input[47]);
+ btf_32_add_sub_out_avx2(&x1[17], &x1[46], input[17], input[46]);
+ btf_32_add_sub_out_avx2(&x1[18], &x1[45], input[18], input[45]);
+ btf_32_add_sub_out_avx2(&x1[19], &x1[44], input[19], input[44]);
+ btf_32_add_sub_out_avx2(&x1[20], &x1[43], input[20], input[43]);
+ btf_32_add_sub_out_avx2(&x1[21], &x1[42], input[21], input[42]);
+ btf_32_add_sub_out_avx2(&x1[22], &x1[41], input[22], input[41]);
+ btf_32_add_sub_out_avx2(&x1[23], &x1[40], input[23], input[40]);
+ btf_32_add_sub_out_avx2(&x1[24], &x1[39], input[24], input[39]);
+ btf_32_add_sub_out_avx2(&x1[25], &x1[38], input[25], input[38]);
+ btf_32_add_sub_out_avx2(&x1[26], &x1[37], input[26], input[37]);
+ btf_32_add_sub_out_avx2(&x1[27], &x1[36], input[27], input[36]);
+ btf_32_add_sub_out_avx2(&x1[28], &x1[35], input[28], input[35]);
+ btf_32_add_sub_out_avx2(&x1[29], &x1[34], input[29], input[34]);
+ btf_32_add_sub_out_avx2(&x1[30], &x1[33], input[30], input[33]);
+ btf_32_add_sub_out_avx2(&x1[31], &x1[32], input[31], input[32]);
+
+ // stage 2
+ btf_32_add_sub_avx2(&x1[0], &x1[31]);
+ btf_32_add_sub_avx2(&x1[1], &x1[30]);
+ btf_32_add_sub_avx2(&x1[2], &x1[29]);
+ btf_32_add_sub_avx2(&x1[3], &x1[28]);
+ btf_32_add_sub_avx2(&x1[4], &x1[27]);
+ btf_32_add_sub_avx2(&x1[5], &x1[26]);
+ btf_32_add_sub_avx2(&x1[6], &x1[25]);
+ btf_32_add_sub_avx2(&x1[7], &x1[24]);
+ btf_32_add_sub_avx2(&x1[8], &x1[23]);
+ btf_32_add_sub_avx2(&x1[9], &x1[22]);
+ btf_32_add_sub_avx2(&x1[10], &x1[21]);
+ btf_32_add_sub_avx2(&x1[11], &x1[20]);
+ btf_32_add_sub_avx2(&x1[12], &x1[19]);
+ btf_32_add_sub_avx2(&x1[13], &x1[18]);
+ btf_32_add_sub_avx2(&x1[14], &x1[17]);
+ btf_32_add_sub_avx2(&x1[15], &x1[16]);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[40], &x1[55], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[41], &x1[54], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[42], &x1[53], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[43], &x1[52], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[44], &x1[51], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[45], &x1[50], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[46], &x1[49], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[47], &x1[48], _r, cos_bit);
+
+ // stage 3
+ btf_32_add_sub_avx2(&x1[0], &x1[15]);
+ btf_32_add_sub_avx2(&x1[1], &x1[14]);
+ btf_32_add_sub_avx2(&x1[2], &x1[13]);
+ btf_32_add_sub_avx2(&x1[3], &x1[12]);
+ btf_32_add_sub_avx2(&x1[4], &x1[11]);
+ btf_32_add_sub_avx2(&x1[5], &x1[10]);
+ btf_32_add_sub_avx2(&x1[6], &x1[9]);
+ btf_32_add_sub_avx2(&x1[7], &x1[8]);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[20], &x1[27], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[21], &x1[26], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[22], &x1[25], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[23], &x1[24], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[32], &x1[47]);
+ btf_32_add_sub_avx2(&x1[33], &x1[46]);
+ btf_32_add_sub_avx2(&x1[34], &x1[45]);
+ btf_32_add_sub_avx2(&x1[35], &x1[44]);
+ btf_32_add_sub_avx2(&x1[36], &x1[43]);
+ btf_32_add_sub_avx2(&x1[37], &x1[42]);
+ btf_32_add_sub_avx2(&x1[38], &x1[41]);
+ btf_32_add_sub_avx2(&x1[39], &x1[40]);
+ btf_32_add_sub_avx2(&x1[63], &x1[48]);
+ btf_32_add_sub_avx2(&x1[62], &x1[49]);
+ btf_32_add_sub_avx2(&x1[61], &x1[50]);
+ btf_32_add_sub_avx2(&x1[60], &x1[51]);
+ btf_32_add_sub_avx2(&x1[59], &x1[52]);
+ btf_32_add_sub_avx2(&x1[58], &x1[53]);
+ btf_32_add_sub_avx2(&x1[57], &x1[54]);
+ btf_32_add_sub_avx2(&x1[56], &x1[55]);
+
+ // stage 4
+ btf_32_add_sub_avx2(&x1[0], &x1[7]);
+ btf_32_add_sub_avx2(&x1[1], &x1[6]);
+ btf_32_add_sub_avx2(&x1[2], &x1[5]);
+ btf_32_add_sub_avx2(&x1[3], &x1[4]);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[10], &x1[13], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[11], &x1[12], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[16], &x1[23]);
+ btf_32_add_sub_avx2(&x1[17], &x1[22]);
+ btf_32_add_sub_avx2(&x1[18], &x1[21]);
+ btf_32_add_sub_avx2(&x1[19], &x1[20]);
+ btf_32_add_sub_avx2(&x1[31], &x1[24]);
+ btf_32_add_sub_avx2(&x1[30], &x1[25]);
+ btf_32_add_sub_avx2(&x1[29], &x1[26]);
+ btf_32_add_sub_avx2(&x1[28], &x1[27]);
+ btf_32_avx2_type0_new(cospi_m16, cospi_p48, &x1[36], &x1[59], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m16, cospi_p48, &x1[37], &x1[58], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m16, cospi_p48, &x1[38], &x1[57], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m16, cospi_p48, &x1[39], &x1[56], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m48, cospi_m16, &x1[40], &x1[55], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m48, cospi_m16, &x1[41], &x1[54], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m48, cospi_m16, &x1[42], &x1[53], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m48, cospi_m16, &x1[43], &x1[52], _r, cos_bit);
+
+ // stage 5
+ btf_32_add_sub_avx2(&x1[0], &x1[3]);
+ btf_32_add_sub_avx2(&x1[1], &x1[2]);
+ btf_32_avx2_type0_new(cospi_m32, cospi_p32, &x1[5], &x1[6], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[8], &x1[11]);
+ btf_32_add_sub_avx2(&x1[9], &x1[10]);
+ btf_32_add_sub_avx2(&x1[15], &x1[12]);
+ btf_32_add_sub_avx2(&x1[14], &x1[13]);
+ btf_32_avx2_type0_new(cospi_m16, cospi_p48, &x1[18], &x1[29], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m16, cospi_p48, &x1[19], &x1[28], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m48, cospi_m16, &x1[20], &x1[27], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m48, cospi_m16, &x1[21], &x1[26], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[32], &x1[39]);
+ btf_32_add_sub_avx2(&x1[33], &x1[38]);
+ btf_32_add_sub_avx2(&x1[34], &x1[37]);
+ btf_32_add_sub_avx2(&x1[35], &x1[36]);
+ btf_32_add_sub_avx2(&x1[47], &x1[40]);
+ btf_32_add_sub_avx2(&x1[46], &x1[41]);
+ btf_32_add_sub_avx2(&x1[45], &x1[42]);
+ btf_32_add_sub_avx2(&x1[44], &x1[43]);
+ btf_32_add_sub_avx2(&x1[48], &x1[55]);
+ btf_32_add_sub_avx2(&x1[49], &x1[54]);
+ btf_32_add_sub_avx2(&x1[50], &x1[53]);
+ btf_32_add_sub_avx2(&x1[51], &x1[52]);
+ btf_32_add_sub_avx2(&x1[63], &x1[56]);
+ btf_32_add_sub_avx2(&x1[62], &x1[57]);
+ btf_32_add_sub_avx2(&x1[61], &x1[58]);
+ btf_32_add_sub_avx2(&x1[60], &x1[59]);
+
+ // stage 6
+ btf_32_avx2_type0_new(cospi_p32, cospi_p32, &x1[0], &x1[1], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p48, cospi_p16, &x1[2], &x1[3], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[4], &x1[5]);
+ btf_32_add_sub_avx2(&x1[7], &x1[6]);
+ btf_32_avx2_type0_new(cospi_m16, cospi_p48, &x1[9], &x1[14], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m48, cospi_m16, &x1[10], &x1[13], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[16], &x1[19]);
+ btf_32_add_sub_avx2(&x1[17], &x1[18]);
+ btf_32_add_sub_avx2(&x1[23], &x1[20]);
+ btf_32_add_sub_avx2(&x1[22], &x1[21]);
+ btf_32_add_sub_avx2(&x1[24], &x1[27]);
+ btf_32_add_sub_avx2(&x1[25], &x1[26]);
+ btf_32_add_sub_avx2(&x1[31], &x1[28]);
+ btf_32_add_sub_avx2(&x1[30], &x1[29]);
+ btf_32_avx2_type0_new(cospi_m08, cospi_p56, &x1[34], &x1[61], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m08, cospi_p56, &x1[35], &x1[60], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m56, cospi_m08, &x1[36], &x1[59], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m56, cospi_m08, &x1[37], &x1[58], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m40, cospi_p24, &x1[42], &x1[53], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m40, cospi_p24, &x1[43], &x1[52], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m24, cospi_m40, &x1[44], &x1[51], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m24, cospi_m40, &x1[45], &x1[50], _r, cos_bit);
+
+ // stage 7
+ btf_32_avx2_type1_new(cospi_p56, cospi_p08, &x1[4], &x1[7], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p24, cospi_p40, &x1[5], &x1[6], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[8], &x1[9]);
+ btf_32_add_sub_avx2(&x1[11], &x1[10]);
+ btf_32_add_sub_avx2(&x1[12], &x1[13]);
+ btf_32_add_sub_avx2(&x1[15], &x1[14]);
+ btf_32_avx2_type0_new(cospi_m08, cospi_p56, &x1[17], &x1[30], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m56, cospi_m08, &x1[18], &x1[29], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m40, cospi_p24, &x1[21], &x1[26], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m24, cospi_m40, &x1[22], &x1[25], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[32], &x1[35]);
+ btf_32_add_sub_avx2(&x1[33], &x1[34]);
+ btf_32_add_sub_avx2(&x1[39], &x1[36]);
+ btf_32_add_sub_avx2(&x1[38], &x1[37]);
+ btf_32_add_sub_avx2(&x1[40], &x1[43]);
+ btf_32_add_sub_avx2(&x1[41], &x1[42]);
+ btf_32_add_sub_avx2(&x1[47], &x1[44]);
+ btf_32_add_sub_avx2(&x1[46], &x1[45]);
+ btf_32_add_sub_avx2(&x1[48], &x1[51]);
+ btf_32_add_sub_avx2(&x1[49], &x1[50]);
+ btf_32_add_sub_avx2(&x1[55], &x1[52]);
+ btf_32_add_sub_avx2(&x1[54], &x1[53]);
+ btf_32_add_sub_avx2(&x1[56], &x1[59]);
+ btf_32_add_sub_avx2(&x1[57], &x1[58]);
+ btf_32_add_sub_avx2(&x1[63], &x1[60]);
+ btf_32_add_sub_avx2(&x1[62], &x1[61]);
+
+ // stage 8
+ btf_32_avx2_type1_new(cospi_p60, cospi_p04, &x1[8], &x1[15], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p28, cospi_p36, &x1[9], &x1[14], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p44, cospi_p20, &x1[10], &x1[13], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p12, cospi_p52, &x1[11], &x1[12], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[16], &x1[17]);
+ btf_32_add_sub_avx2(&x1[19], &x1[18]);
+ btf_32_add_sub_avx2(&x1[20], &x1[21]);
+ btf_32_add_sub_avx2(&x1[23], &x1[22]);
+ btf_32_add_sub_avx2(&x1[24], &x1[25]);
+ btf_32_add_sub_avx2(&x1[27], &x1[26]);
+ btf_32_add_sub_avx2(&x1[28], &x1[29]);
+ btf_32_add_sub_avx2(&x1[31], &x1[30]);
+ btf_32_avx2_type0_new(cospi_m04, cospi_p60, &x1[33], &x1[62], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m60, cospi_m04, &x1[34], &x1[61], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m36, cospi_p28, &x1[37], &x1[58], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m28, cospi_m36, &x1[38], &x1[57], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m20, cospi_p44, &x1[41], &x1[54], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m44, cospi_m20, &x1[42], &x1[53], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m52, cospi_p12, &x1[45], &x1[50], _r, cos_bit);
+ btf_32_avx2_type0_new(cospi_m12, cospi_m52, &x1[46], &x1[49], _r, cos_bit);
+
+ // stage 9
+ btf_32_avx2_type1_new(cospi_p62, cospi_p02, &x1[16], &x1[31], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p30, cospi_p34, &x1[17], &x1[30], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p46, cospi_p18, &x1[18], &x1[29], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p14, cospi_p50, &x1[19], &x1[28], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p54, cospi_p10, &x1[20], &x1[27], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p22, cospi_p42, &x1[21], &x1[26], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p38, cospi_p26, &x1[22], &x1[25], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p06, cospi_p58, &x1[23], &x1[24], _r, cos_bit);
+ btf_32_add_sub_avx2(&x1[32], &x1[33]);
+ btf_32_add_sub_avx2(&x1[35], &x1[34]);
+ btf_32_add_sub_avx2(&x1[36], &x1[37]);
+ btf_32_add_sub_avx2(&x1[39], &x1[38]);
+ btf_32_add_sub_avx2(&x1[40], &x1[41]);
+ btf_32_add_sub_avx2(&x1[43], &x1[42]);
+ btf_32_add_sub_avx2(&x1[44], &x1[45]);
+ btf_32_add_sub_avx2(&x1[47], &x1[46]);
+ btf_32_add_sub_avx2(&x1[48], &x1[49]);
+ btf_32_add_sub_avx2(&x1[51], &x1[50]);
+ btf_32_add_sub_avx2(&x1[52], &x1[53]);
+ btf_32_add_sub_avx2(&x1[55], &x1[54]);
+ btf_32_add_sub_avx2(&x1[56], &x1[57]);
+ btf_32_add_sub_avx2(&x1[59], &x1[58]);
+ btf_32_add_sub_avx2(&x1[60], &x1[61]);
+ btf_32_add_sub_avx2(&x1[63], &x1[62]);
+
+ // stage 10
+ btf_32_avx2_type1_new(cospi_p63, cospi_p01, &x1[32], &x1[63], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p31, cospi_p33, &x1[33], &x1[62], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p47, cospi_p17, &x1[34], &x1[61], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p15, cospi_p49, &x1[35], &x1[60], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p55, cospi_p09, &x1[36], &x1[59], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p23, cospi_p41, &x1[37], &x1[58], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p39, cospi_p25, &x1[38], &x1[57], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p07, cospi_p57, &x1[39], &x1[56], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p59, cospi_p05, &x1[40], &x1[55], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p27, cospi_p37, &x1[41], &x1[54], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p43, cospi_p21, &x1[42], &x1[53], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p11, cospi_p53, &x1[43], &x1[52], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p51, cospi_p13, &x1[44], &x1[51], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p19, cospi_p45, &x1[45], &x1[50], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p35, cospi_p29, &x1[46], &x1[49], _r, cos_bit);
+ btf_32_avx2_type1_new(cospi_p03, cospi_p61, &x1[47], &x1[48], _r, cos_bit);
+
+ // stage 11
+ output[0] = x1[0];
+ output[1] = x1[32];
+ output[2] = x1[16];
+ output[3] = x1[48];
+ output[4] = x1[8];
+ output[5] = x1[40];
+ output[6] = x1[24];
+ output[7] = x1[56];
+ output[8] = x1[4];
+ output[9] = x1[36];
+ output[10] = x1[20];
+ output[11] = x1[52];
+ output[12] = x1[12];
+ output[13] = x1[44];
+ output[14] = x1[28];
+ output[15] = x1[60];
+ output[16] = x1[2];
+ output[17] = x1[34];
+ output[18] = x1[18];
+ output[19] = x1[50];
+ output[20] = x1[10];
+ output[21] = x1[42];
+ output[22] = x1[26];
+ output[23] = x1[58];
+ output[24] = x1[6];
+ output[25] = x1[38];
+ output[26] = x1[22];
+ output[27] = x1[54];
+ output[28] = x1[14];
+ output[29] = x1[46];
+ output[30] = x1[30];
+ output[31] = x1[62];
+ output[32] = x1[1];
+ output[33] = x1[33];
+ output[34] = x1[17];
+ output[35] = x1[49];
+ output[36] = x1[9];
+ output[37] = x1[41];
+ output[38] = x1[25];
+ output[39] = x1[57];
+ output[40] = x1[5];
+ output[41] = x1[37];
+ output[42] = x1[21];
+ output[43] = x1[53];
+ output[44] = x1[13];
+ output[45] = x1[45];
+ output[46] = x1[29];
+ output[47] = x1[61];
+ output[48] = x1[3];
+ output[49] = x1[35];
+ output[50] = x1[19];
+ output[51] = x1[51];
+ output[52] = x1[11];
+ output[53] = x1[43];
+ output[54] = x1[27];
+ output[55] = x1[59];
+ output[56] = x1[7];
+ output[57] = x1[39];
+ output[58] = x1[23];
+ output[59] = x1[55];
+ output[60] = x1[15];
+ output[61] = x1[47];
+ output[62] = x1[31];
+ output[63] = x1[63];
+}
+
+static INLINE void fadst16x16_new_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i __zero = _mm256_setzero_si256();
+ const __m256i _r = _mm256_set1_epi32(1 << (cos_bit - 1));
+
+ __m256i cospi_p32_p32 = pair_set_w16_epi16(cospi[32], cospi[32]);
+ __m256i cospi_p32_m32 = pair_set_w16_epi16(cospi[32], -cospi[32]);
+ __m256i cospi_p16_p48 = pair_set_w16_epi16(cospi[16], cospi[48]);
+ __m256i cospi_p48_m16 = pair_set_w16_epi16(cospi[48], -cospi[16]);
+ __m256i cospi_m48_p16 = pair_set_w16_epi16(-cospi[48], cospi[16]);
+ __m256i cospi_p08_p56 = pair_set_w16_epi16(cospi[8], cospi[56]);
+ __m256i cospi_p56_m08 = pair_set_w16_epi16(cospi[56], -cospi[8]);
+ __m256i cospi_p40_p24 = pair_set_w16_epi16(cospi[40], cospi[24]);
+ __m256i cospi_p24_m40 = pair_set_w16_epi16(cospi[24], -cospi[40]);
+ __m256i cospi_m56_p08 = pair_set_w16_epi16(-cospi[56], cospi[8]);
+ __m256i cospi_m24_p40 = pair_set_w16_epi16(-cospi[24], cospi[40]);
+ __m256i cospi_p02_p62 = pair_set_w16_epi16(cospi[2], cospi[62]);
+ __m256i cospi_p62_m02 = pair_set_w16_epi16(cospi[62], -cospi[2]);
+ __m256i cospi_p10_p54 = pair_set_w16_epi16(cospi[10], cospi[54]);
+ __m256i cospi_p54_m10 = pair_set_w16_epi16(cospi[54], -cospi[10]);
+ __m256i cospi_p18_p46 = pair_set_w16_epi16(cospi[18], cospi[46]);
+ __m256i cospi_p46_m18 = pair_set_w16_epi16(cospi[46], -cospi[18]);
+ __m256i cospi_p26_p38 = pair_set_w16_epi16(cospi[26], cospi[38]);
+ __m256i cospi_p38_m26 = pair_set_w16_epi16(cospi[38], -cospi[26]);
+ __m256i cospi_p34_p30 = pair_set_w16_epi16(cospi[34], cospi[30]);
+ __m256i cospi_p30_m34 = pair_set_w16_epi16(cospi[30], -cospi[34]);
+ __m256i cospi_p42_p22 = pair_set_w16_epi16(cospi[42], cospi[22]);
+ __m256i cospi_p22_m42 = pair_set_w16_epi16(cospi[22], -cospi[42]);
+ __m256i cospi_p50_p14 = pair_set_w16_epi16(cospi[50], cospi[14]);
+ __m256i cospi_p14_m50 = pair_set_w16_epi16(cospi[14], -cospi[50]);
+ __m256i cospi_p58_p06 = pair_set_w16_epi16(cospi[58], cospi[6]);
+ __m256i cospi_p06_m58 = pair_set_w16_epi16(cospi[6], -cospi[58]);
+
+ // stage 1
+ __m256i x1[16];
+ x1[0] = input[0];
+ x1[1] = _mm256_subs_epi16(__zero, input[15]);
+ x1[2] = _mm256_subs_epi16(__zero, input[7]);
+ x1[3] = input[8];
+ x1[4] = _mm256_subs_epi16(__zero, input[3]);
+ x1[5] = input[12];
+ x1[6] = input[4];
+ x1[7] = _mm256_subs_epi16(__zero, input[11]);
+ x1[8] = _mm256_subs_epi16(__zero, input[1]);
+ x1[9] = input[14];
+ x1[10] = input[6];
+ x1[11] = _mm256_subs_epi16(__zero, input[9]);
+ x1[12] = input[2];
+ x1[13] = _mm256_subs_epi16(__zero, input[13]);
+ x1[14] = _mm256_subs_epi16(__zero, input[5]);
+ x1[15] = input[10];
+
+ // stage 2
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[2], &x1[3], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[6], &x1[7], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[10], &x1[11], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[14], &x1[15], _r, cos_bit);
+
+ // stage 3
+ btf_16_adds_subs_avx2(&x1[0], &x1[2]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[3]);
+ btf_16_adds_subs_avx2(&x1[4], &x1[6]);
+ btf_16_adds_subs_avx2(&x1[5], &x1[7]);
+ btf_16_adds_subs_avx2(&x1[8], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[9], &x1[11]);
+ btf_16_adds_subs_avx2(&x1[12], &x1[14]);
+ btf_16_adds_subs_avx2(&x1[13], &x1[15]);
+
+ // stage 4
+ btf_16_w16_avx2(cospi_p16_p48, cospi_p48_m16, &x1[4], &x1[5], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_p16, cospi_p16_p48, &x1[6], &x1[7], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p16_p48, cospi_p48_m16, &x1[12], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m48_p16, cospi_p16_p48, &x1[14], &x1[15], _r, cos_bit);
+
+ // stage 5
+ btf_16_adds_subs_avx2(&x1[0], &x1[4]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[5]);
+ btf_16_adds_subs_avx2(&x1[2], &x1[6]);
+ btf_16_adds_subs_avx2(&x1[3], &x1[7]);
+ btf_16_adds_subs_avx2(&x1[8], &x1[12]);
+ btf_16_adds_subs_avx2(&x1[9], &x1[13]);
+ btf_16_adds_subs_avx2(&x1[10], &x1[14]);
+ btf_16_adds_subs_avx2(&x1[11], &x1[15]);
+
+ // stage 6
+ btf_16_w16_avx2(cospi_p08_p56, cospi_p56_m08, &x1[8], &x1[9], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p40_p24, cospi_p24_m40, &x1[10], &x1[11], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m56_p08, cospi_p08_p56, &x1[12], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_m24_p40, cospi_p40_p24, &x1[14], &x1[15], _r, cos_bit);
+
+ // stage 7
+ btf_16_adds_subs_avx2(&x1[0], &x1[8]);
+ btf_16_adds_subs_avx2(&x1[1], &x1[9]);
+ btf_16_adds_subs_avx2(&x1[2], &x1[10]);
+ btf_16_adds_subs_avx2(&x1[3], &x1[11]);
+ btf_16_adds_subs_avx2(&x1[4], &x1[12]);
+ btf_16_adds_subs_avx2(&x1[5], &x1[13]);
+ btf_16_adds_subs_avx2(&x1[6], &x1[14]);
+ btf_16_adds_subs_avx2(&x1[7], &x1[15]);
+
+ // stage 8
+ btf_16_w16_avx2(cospi_p02_p62, cospi_p62_m02, &x1[0], &x1[1], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p10_p54, cospi_p54_m10, &x1[2], &x1[3], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p18_p46, cospi_p46_m18, &x1[4], &x1[5], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p26_p38, cospi_p38_m26, &x1[6], &x1[7], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p34_p30, cospi_p30_m34, &x1[8], &x1[9], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p42_p22, cospi_p22_m42, &x1[10], &x1[11], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p50_p14, cospi_p14_m50, &x1[12], &x1[13], _r, cos_bit);
+ btf_16_w16_avx2(cospi_p58_p06, cospi_p06_m58, &x1[14], &x1[15], _r, cos_bit);
+
+ // stage 9
+ output[0] = x1[1];
+ output[1] = x1[14];
+ output[2] = x1[3];
+ output[3] = x1[12];
+ output[4] = x1[5];
+ output[5] = x1[10];
+ output[6] = x1[7];
+ output[7] = x1[8];
+ output[8] = x1[9];
+ output[9] = x1[6];
+ output[10] = x1[11];
+ output[11] = x1[4];
+ output[12] = x1[13];
+ output[13] = x1[2];
+ output[14] = x1[15];
+ output[15] = x1[0];
+}
+
+static INLINE void fidentity16x16_new_avx2(const __m256i *input,
+ __m256i *output, int8_t cos_bit) {
+ (void)cos_bit;
+ const __m256i one = _mm256_set1_epi16(1);
+
+ for (int i = 0; i < 16; ++i) {
+ const __m256i a_lo = _mm256_unpacklo_epi16(input[i], one);
+ const __m256i a_hi = _mm256_unpackhi_epi16(input[i], one);
+ const __m256i b_lo = scale_round_avx2(a_lo, 2 * NewSqrt2);
+ const __m256i b_hi = scale_round_avx2(a_hi, 2 * NewSqrt2);
+ output[i] = _mm256_packs_epi32(b_lo, b_hi);
+ }
+}
+
+static INLINE void fidentity16x32_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ (void)cos_bit;
+ for (int i = 0; i < 32; ++i) {
+ output[i] = _mm256_slli_epi16(input[i], 2);
+ }
+}
+
+static INLINE void store_output_32bit_w16(int32_t *const out,
+ const __m256i *const in1,
+ const __m256i *const in2,
+ const int stride,
+ const int out_size) {
+ for (int i = 0; i < out_size; ++i) {
+ _mm256_store_si256((__m256i *)(out + stride * i), in1[i]);
+ _mm256_store_si256((__m256i *)(out + stride * i + 8), in2[i]);
+ }
+}
+
+// Store 8 16 bit values. Sign extend the values.
+static INLINE void store_buffer_16bit_to_32bit_w16_avx2(const __m256i *const in,
+ int32_t *out,
+ const int stride,
+ const int out_size) {
+ for (int i = 0; i < out_size; ++i) {
+ _mm256_store_si256((__m256i *)(out),
+ _mm256_cvtepi16_epi32(_mm256_castsi256_si128(in[i])));
+ _mm256_store_si256(
+ (__m256i *)(out + 8),
+ _mm256_cvtepi16_epi32(_mm256_extracti128_si256(in[i], 1)));
+ out += stride;
+ }
+}
+
+static INLINE void store_rect_16bit_to_32bit_avx2(const __m256i a,
+ int32_t *const b) {
+ const __m256i one = _mm256_set1_epi16(1);
+ const __m256i a_reoder = _mm256_permute4x64_epi64(a, 0xd8);
+ const __m256i a_lo = _mm256_unpacklo_epi16(a_reoder, one);
+ const __m256i a_hi = _mm256_unpackhi_epi16(a_reoder, one);
+ const __m256i b_lo = scale_round_avx2(a_lo, NewSqrt2);
+ const __m256i b_hi = scale_round_avx2(a_hi, NewSqrt2);
+ _mm256_store_si256((__m256i *)b, b_lo);
+ _mm256_store_si256((__m256i *)(b + 8), b_hi);
+}
+
+static INLINE void store_rect_buffer_16bit_to_32bit_w16_avx2(
+ const __m256i *const in, int32_t *const out, const int stride,
+ const int out_size) {
+ for (int i = 0; i < out_size; ++i) {
+ store_rect_16bit_to_32bit_avx2(in[i], out + i * stride);
+ }
+}
+
+typedef void (*transform_1d_avx2)(const __m256i *input, __m256i *output,
+ int8_t cos_bit);
+
+static const transform_1d_avx2 col_txfm16x32_arr[TX_TYPES] = {
+ fdct16x32_avx2, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST_ADST
+ fidentity16x32_avx2, // IDTX
+ fdct16x32_avx2, // V_DCT
+ fidentity16x32_avx2, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL // H_FLIPADST
+};
+
+static const transform_1d_avx2 row_txfm16x32_arr[TX_TYPES] = {
+ fdct16x32_avx2, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST_ADST
+ fidentity16x32_avx2, // IDTX
+ fidentity16x32_avx2, // V_DCT
+ fdct16x32_avx2, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL // H_FLIPADST
+};
+
+static const transform_1d_avx2 col_txfm16x16_arr[TX_TYPES] = {
+ fdct16x16_new_avx2, // DCT_DCT
+ fadst16x16_new_avx2, // ADST_DCT
+ fdct16x16_new_avx2, // DCT_ADST
+ fadst16x16_new_avx2, // ADST_ADST
+ fadst16x16_new_avx2, // FLIPADST_DCT
+ fdct16x16_new_avx2, // DCT_FLIPADST
+ fadst16x16_new_avx2, // FLIPADST_FLIPADST
+ fadst16x16_new_avx2, // ADST_FLIPADST
+ fadst16x16_new_avx2, // FLIPADST_ADST
+ fidentity16x16_new_avx2, // IDTX
+ fdct16x16_new_avx2, // V_DCT
+ fidentity16x16_new_avx2, // H_DCT
+ fadst16x16_new_avx2, // V_ADST
+ fidentity16x16_new_avx2, // H_ADST
+ fadst16x16_new_avx2, // V_FLIPADST
+ fidentity16x16_new_avx2 // H_FLIPADST
+};
+
+static const transform_1d_avx2 row_txfm16x16_arr[TX_TYPES] = {
+ fdct16x16_new_avx2, // DCT_DCT
+ fdct16x16_new_avx2, // ADST_DCT
+ fadst16x16_new_avx2, // DCT_ADST
+ fadst16x16_new_avx2, // ADST_ADST
+ fdct16x16_new_avx2, // FLIPADST_DCT
+ fadst16x16_new_avx2, // DCT_FLIPADST
+ fadst16x16_new_avx2, // FLIPADST_FLIPADST
+ fadst16x16_new_avx2, // ADST_FLIPADST
+ fadst16x16_new_avx2, // FLIPADST_ADST
+ fidentity16x16_new_avx2, // IDTX
+ fidentity16x16_new_avx2, // V_DCT
+ fdct16x16_new_avx2, // H_DCT
+ fidentity16x16_new_avx2, // V_ADST
+ fadst16x16_new_avx2, // H_ADST
+ fidentity16x16_new_avx2, // V_FLIPADST
+ fadst16x16_new_avx2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 col_txfm8x8_arr[TX_TYPES] = {
+ fdct8x8_new_sse2, // DCT_DCT
+ fadst8x8_new_sse2, // ADST_DCT
+ fdct8x8_new_sse2, // DCT_ADST
+ fadst8x8_new_sse2, // ADST_ADST
+ fadst8x8_new_sse2, // FLIPADST_DCT
+ fdct8x8_new_sse2, // DCT_FLIPADST
+ fadst8x8_new_sse2, // FLIPADST_FLIPADST
+ fadst8x8_new_sse2, // ADST_FLIPADST
+ fadst8x8_new_sse2, // FLIPADST_ADST
+ fidentity8x8_new_sse2, // IDTX
+ fdct8x8_new_sse2, // V_DCT
+ fidentity8x8_new_sse2, // H_DCT
+ fadst8x8_new_sse2, // V_ADST
+ fidentity8x8_new_sse2, // H_ADST
+ fadst8x8_new_sse2, // V_FLIPADST
+ fidentity8x8_new_sse2, // H_FLIPADST
+};
+
+static const transform_1d_sse2 row_txfm8x8_arr[TX_TYPES] = {
+ fdct8x8_new_sse2, // DCT_DCT
+ fdct8x8_new_sse2, // ADST_DCT
+ fadst8x8_new_sse2, // DCT_ADST
+ fadst8x8_new_sse2, // ADST_ADST
+ fdct8x8_new_sse2, // FLIPADST_DCT
+ fadst8x8_new_sse2, // DCT_FLIPADST
+ fadst8x8_new_sse2, // FLIPADST_FLIPADST
+ fadst8x8_new_sse2, // ADST_FLIPADST
+ fadst8x8_new_sse2, // FLIPADST_ADST
+ fidentity8x8_new_sse2, // IDTX
+ fidentity8x8_new_sse2, // V_DCT
+ fdct8x8_new_sse2, // H_DCT
+ fidentity8x8_new_sse2, // V_ADST
+ fadst8x8_new_sse2, // H_ADST
+ fidentity8x8_new_sse2, // V_FLIPADST
+ fadst8x8_new_sse2 // H_FLIPADST
+};
+
+static INLINE void load_buffer_and_round_shift(const int16_t *in, int stride,
+ __m128i *out, int bit) {
+ out[0] = _mm_load_si128((const __m128i *)(in + 0 * stride));
+ out[1] = _mm_load_si128((const __m128i *)(in + 1 * stride));
+ out[2] = _mm_load_si128((const __m128i *)(in + 2 * stride));
+ out[3] = _mm_load_si128((const __m128i *)(in + 3 * stride));
+ out[4] = _mm_load_si128((const __m128i *)(in + 4 * stride));
+ out[5] = _mm_load_si128((const __m128i *)(in + 5 * stride));
+ out[6] = _mm_load_si128((const __m128i *)(in + 6 * stride));
+ out[7] = _mm_load_si128((const __m128i *)(in + 7 * stride));
+ out[0] = _mm_slli_epi16(out[0], bit);
+ out[1] = _mm_slli_epi16(out[1], bit);
+ out[2] = _mm_slli_epi16(out[2], bit);
+ out[3] = _mm_slli_epi16(out[3], bit);
+ out[4] = _mm_slli_epi16(out[4], bit);
+ out[5] = _mm_slli_epi16(out[5], bit);
+ out[6] = _mm_slli_epi16(out[6], bit);
+ out[7] = _mm_slli_epi16(out[7], bit);
+}
+
+static INLINE void load_buffer_and_flip_round_shift(const int16_t *in,
+ int stride, __m128i *out,
+ int bit) {
+ out[7] = load_16bit_to_16bit(in + 0 * stride);
+ out[6] = load_16bit_to_16bit(in + 1 * stride);
+ out[5] = load_16bit_to_16bit(in + 2 * stride);
+ out[4] = load_16bit_to_16bit(in + 3 * stride);
+ out[3] = load_16bit_to_16bit(in + 4 * stride);
+ out[2] = load_16bit_to_16bit(in + 5 * stride);
+ out[1] = load_16bit_to_16bit(in + 6 * stride);
+ out[0] = load_16bit_to_16bit(in + 7 * stride);
+ out[7] = _mm_slli_epi16(out[7], bit);
+ out[6] = _mm_slli_epi16(out[6], bit);
+ out[5] = _mm_slli_epi16(out[5], bit);
+ out[4] = _mm_slli_epi16(out[4], bit);
+ out[3] = _mm_slli_epi16(out[3], bit);
+ out[2] = _mm_slli_epi16(out[2], bit);
+ out[1] = _mm_slli_epi16(out[1], bit);
+ out[0] = _mm_slli_epi16(out[0], bit);
+}
+
+#define TRANSPOSE_8X8_AVX2() \
+ { \
+ /* aa0: 00 10 01 11 02 12 03 13 | 40 50 41 51 42 52 43 53*/ \
+ /* aa1: 04 14 05 15 06 16 07 17 | 44 54 45 55 46 56 47 57*/ \
+ /* aa2: 20 30 21 31 22 32 23 33 | 60 70 61 71 62 72 63 73*/ \
+ /* aa3: 24 34 25 35 26 36 27 37 | 64 74 65 75 66 76 67 77*/ \
+ const __m256i aa0 = _mm256_unpacklo_epi16(b0, b1); \
+ const __m256i aa1 = _mm256_unpackhi_epi16(b0, b1); \
+ const __m256i aa2 = _mm256_unpacklo_epi16(b2, b3); \
+ const __m256i aa3 = _mm256_unpackhi_epi16(b2, b3); \
+ /* Unpack 32 bit elements resulting in: */ \
+ /* bb0: 00 10 20 30 01 11 21 31 | 40 50 60 70 41 51 61 71*/ \
+ /* bb1: 02 12 22 32 03 13 23 33 | 42 52 62 72 43 53 63 73*/ \
+ /* bb2: 04 14 24 34 05 15 25 35 | 44 54 64 74 45 55 65 75*/ \
+ /* bb2: 06 16 26 36 07 17 27 37 | 46 56 66 76 47 57 67 77*/ \
+ const __m256i bb0 = _mm256_unpacklo_epi32(aa0, aa2); \
+ const __m256i bb1 = _mm256_unpackhi_epi32(aa0, aa2); \
+ const __m256i bb2 = _mm256_unpacklo_epi32(aa1, aa3); \
+ const __m256i bb3 = _mm256_unpackhi_epi32(aa1, aa3); \
+ /* bb0: 00 10 20 30 40 50 60 70| 01 11 21 31 41 51 61 71*/ \
+ /* bb1: 02 12 22 32 42 52 62 72| 03 13 23 33 43 53 63 73*/ \
+ /* bb2: 04 14 24 34 44 54 64 74| 05 15 25 35 45 55 65 75*/ \
+ /* bb2: 06 16 26 36 46 56 66 76| 07 17 27 37 47 57 67 77*/ \
+ c0 = _mm256_permute4x64_epi64(bb0, 0xd8); \
+ c1 = _mm256_permute4x64_epi64(bb1, 0xd8); \
+ c2 = _mm256_permute4x64_epi64(bb2, 0xd8); \
+ c3 = _mm256_permute4x64_epi64(bb3, 0xd8); \
+ }
+
+static INLINE void transpose_round_shift_flip_8x8(__m128i *const in,
+ __m128i *const out, int bit) {
+ __m256i c0, c1, c2, c3;
+ bit = -bit;
+ const __m256i rounding = _mm256_set1_epi16(1 << (bit - 1));
+ const __m256i s04 =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(in[0]), in[4], 0x1);
+ const __m256i s15 =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(in[1]), in[5], 0x1);
+ const __m256i s26 =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(in[2]), in[6], 0x1);
+ const __m256i s37 =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(in[3]), in[7], 0x1);
+
+ const __m256i a0 = _mm256_adds_epi16(s04, rounding);
+ const __m256i a1 = _mm256_adds_epi16(s15, rounding);
+ const __m256i a2 = _mm256_adds_epi16(s26, rounding);
+ const __m256i a3 = _mm256_adds_epi16(s37, rounding);
+
+ // b0: 00 01 02 03 04 05 06 07 | 40 41 42 43 44 45 46 47
+ // b1: 10 11 12 13 14 15 16 17 | 50 51 52 53 54 55 56 57
+ // b2: 20 21 22 23 24 25 26 27 | 60 61 62 63 64 65 66 67
+ // b3: 30 31 32 33 34 35 36 37 | 70 71 72 73 74 75 76 77
+ const __m256i b0 = _mm256_srai_epi16(a0, bit);
+ const __m256i b1 = _mm256_srai_epi16(a1, bit);
+ const __m256i b2 = _mm256_srai_epi16(a2, bit);
+ const __m256i b3 = _mm256_srai_epi16(a3, bit);
+
+ TRANSPOSE_8X8_AVX2()
+
+ // Unpack 64 bit elements resulting in:
+ // out[7]: 00 10 20 30 40 50 60 70
+ // out[6]: 01 11 21 31 41 51 61 71
+ // out[5]: 02 12 22 32 42 52 62 72
+ // out[4]: 03 13 23 33 43 53 63 73
+ // out[3]: 04 14 24 34 44 54 64 74
+ // out[2]: 05 15 25 35 45 55 65 75
+ // out[1]: 06 16 26 36 46 56 66 76
+ // out[0]: 07 17 27 37 47 57 67 77
+ out[7] = _mm256_castsi256_si128(c0);
+ out[6] = _mm256_extractf128_si256(c0, 1);
+ out[5] = _mm256_castsi256_si128(c1);
+ out[4] = _mm256_extractf128_si256(c1, 1);
+ out[3] = _mm256_castsi256_si128(c2);
+ out[2] = _mm256_extractf128_si256(c2, 1);
+ out[1] = _mm256_castsi256_si128(c3);
+ out[0] = _mm256_extractf128_si256(c3, 1);
+}
+
+static INLINE void transpose_round_shift_8x8(__m128i *const in,
+ __m128i *const out, int bit) {
+ __m256i c0, c1, c2, c3;
+ bit = -bit;
+ const __m256i rounding = _mm256_set1_epi16(1 << (bit - 1));
+ const __m256i s04 =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(in[0]), in[4], 0x1);
+ const __m256i s15 =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(in[1]), in[5], 0x1);
+ const __m256i s26 =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(in[2]), in[6], 0x1);
+ const __m256i s37 =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(in[3]), in[7], 0x1);
+
+ const __m256i a0 = _mm256_adds_epi16(s04, rounding);
+ const __m256i a1 = _mm256_adds_epi16(s15, rounding);
+ const __m256i a2 = _mm256_adds_epi16(s26, rounding);
+ const __m256i a3 = _mm256_adds_epi16(s37, rounding);
+
+ // b0: 00 01 02 03 04 05 06 07 | 40 41 42 43 44 45 46 47
+ // b1: 10 11 12 13 14 15 16 17 | 50 51 52 53 54 55 56 57
+ // b2: 20 21 22 23 24 25 26 27 | 60 61 62 63 64 65 66 67
+ // b3: 30 31 32 33 34 35 36 37 | 70 71 72 73 74 75 76 77
+ const __m256i b0 = _mm256_srai_epi16(a0, bit);
+ const __m256i b1 = _mm256_srai_epi16(a1, bit);
+ const __m256i b2 = _mm256_srai_epi16(a2, bit);
+ const __m256i b3 = _mm256_srai_epi16(a3, bit);
+
+ TRANSPOSE_8X8_AVX2()
+ // Unpack 64 bit elements resulting in:
+ // out[7]: 00 10 20 30 40 50 60 70
+ // out[6]: 01 11 21 31 41 51 61 71
+ // out[5]: 02 12 22 32 42 52 62 72
+ // out[4]: 03 13 23 33 43 53 63 73
+ // out[3]: 04 14 24 34 44 54 64 74
+ // out[2]: 05 15 25 35 45 55 65 75
+ // out[1]: 06 16 26 36 46 56 66 76
+ // out[0]: 07 17 27 37 47 57 67 77
+ out[0] = _mm256_castsi256_si128(c0);
+ out[1] = _mm256_extractf128_si256(c0, 1);
+ out[2] = _mm256_castsi256_si128(c1);
+ out[3] = _mm256_extractf128_si256(c1, 1);
+ out[4] = _mm256_castsi256_si128(c2);
+ out[5] = _mm256_extractf128_si256(c2, 1);
+ out[6] = _mm256_castsi256_si128(c3);
+ out[7] = _mm256_extractf128_si256(c3, 1);
+}
+
+static INLINE void store_buffer_16bit_to_32bit_w8_avx2(const __m128i *const in,
+ int32_t *const out,
+ const int stride,
+ const int out_size) {
+ for (int i = 0; i < out_size; ++i) {
+ _mm256_store_si256((__m256i *)(out + i * stride),
+ _mm256_cvtepi16_epi32(in[i]));
+ }
+}
+
+static void av1_lowbd_fwd_txfm2d_8x8_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[8], buf1[8], *buf;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X8];
+ const int txw_idx = get_txw_idx(TX_8X8);
+ const int txh_idx = get_txh_idx(TX_8X8);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const transform_1d_sse2 col_txfm = col_txfm8x8_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x8_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ // Condition to check shift bit is avoided while round shifting, by assuming
+ // that shift[0] will always be positive.
+ assert(shift[0] > 0);
+ if (ud_flip)
+ load_buffer_and_flip_round_shift(input, stride, buf0, shift[0]);
+ else
+ load_buffer_and_round_shift(input, stride, buf0, shift[0]);
+
+ col_txfm(buf0, buf0, cos_bit_col);
+ // Condition to check shift bit is avoided while round shifting, by assuming
+ // that shift[1] will always be negative.
+ assert(shift[1] < 0);
+
+ if (lr_flip) {
+ transpose_round_shift_flip_8x8(buf0, buf1, shift[1]);
+ } else {
+ transpose_round_shift_8x8(buf0, buf1, shift[1]);
+ }
+
+ buf = buf1;
+ row_txfm(buf, buf, cos_bit_row);
+
+ // Round and shift operation is avoided here as the shift bit is assumed to be
+ // zero always.
+ assert(shift[2] == 0);
+ store_buffer_16bit_to_32bit_w8_avx2(buf, output, 8, 8);
+}
+
+static void lowbd_fwd_txfm2d_16x16_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ const TX_SIZE tx_size = TX_16X16;
+ __m256i buf0[16], buf1[16];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = col_txfm16x16_arr[tx_type];
+ const transform_1d_avx2 row_txfm = row_txfm16x16_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ const int32_t i = 0;
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip_avx2(input + 16 * i, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit_avx2(input + 16 * i, stride, buf0, height);
+ }
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ transpose_16bit_16x16_avx2(buf0, buf1 + 0 * width + 16 * i);
+
+ __m256i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_avx2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit_w16_avx2(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w16_avx2(buf, output + i * 16, height, width);
+}
+
+static void lowbd_fwd_txfm2d_32x32_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ const TX_SIZE tx_size = TX_32X32;
+ __m256i buf0[32], buf1[128];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = col_txfm16x32_arr[tx_type];
+ const transform_1d_avx2 row_txfm = row_txfm16x32_arr[tx_type];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < 2; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip_avx2(input + 16 * i, stride, buf0,
+ height);
+ } else {
+ load_buffer_16bit_to_16bit_avx2(input + 16 * i, stride, buf0, height);
+ }
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ transpose_16bit_16x16_avx2(buf0 + 0 * 16, buf1 + 0 * width + 16 * i);
+ transpose_16bit_16x16_avx2(buf0 + 1 * 16, buf1 + 1 * width + 16 * i);
+ }
+
+ for (int i = 0; i < 2; i++) {
+ __m256i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_avx2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit_w16_avx2(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w16_avx2(buf, output + i * 16, height, width);
+ }
+}
+
+static void lowbd_fwd_txfm2d_64x64_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_64X64;
+ __m256i buf0[64], buf1[256];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = fdct16x64_new_avx2;
+ const int width_div16 = (width >> 4);
+ const int height_div16 = (height >> 4);
+
+ for (int i = 0; i < width_div16; i++) {
+ load_buffer_16bit_to_16bit_avx2(input + 16 * i, stride, buf0, height);
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ for (int j = 0; j < AOMMIN(2, height_div16); ++j) {
+ transpose_16bit_16x16_avx2(buf0 + j * 16, buf1 + j * width + 16 * i);
+ }
+ }
+
+ for (int i = 0; i < AOMMIN(2, height_div16); i++) {
+ __m256i bufA[64];
+ __m256i bufB[64];
+ __m128i *buf = (__m128i *)(buf1 + width * i);
+ for (int j = 0; j < width; ++j) {
+ bufA[j] = _mm256_cvtepi16_epi32(buf[j * 2]);
+ bufB[j] = _mm256_cvtepi16_epi32(buf[j * 2 + 1]);
+ }
+ fdct64_new_avx2(bufA, bufA, cos_bit_row);
+ fdct64_new_avx2(bufB, bufB, cos_bit_row);
+ round_shift_array_32_avx2(bufA, bufA, 32, -shift[2]);
+ round_shift_array_32_avx2(bufB, bufB, 32, -shift[2]);
+ store_output_32bit_w16(output + i * 16, bufA, bufB, 32, 32);
+ }
+}
+
+static void lowbd_fwd_txfm2d_16x32_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ const TX_SIZE tx_size = TX_16X32;
+ __m256i buf0[32], buf1[32];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = col_txfm16x32_arr[tx_type];
+ const transform_1d_avx2 row_txfm = row_txfm16x16_arr[tx_type];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip_avx2(input, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit_avx2(input, stride, buf0, height);
+ }
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ transpose_16bit_16x16_avx2(buf0, buf1);
+ transpose_16bit_16x16_avx2(buf0 + 16, buf1 + 16);
+
+ for (int i = 0; i < 2; i++) {
+ __m256i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_avx2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit_w16_avx2(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w16_avx2(buf, output + i * 16, height,
+ width);
+ }
+}
+
+static void lowbd_fwd_txfm2d_32x16_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m256i buf0[32], buf1[64];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X16];
+ const int txw_idx = get_txw_idx(TX_32X16);
+ const int txh_idx = get_txh_idx(TX_32X16);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 32;
+ const int height = 16;
+ const transform_1d_avx2 col_txfm = col_txfm16x16_arr[tx_type];
+ const transform_1d_avx2 row_txfm = row_txfm16x32_arr[tx_type];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < 2; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip_avx2(input + 16 * i, stride, buf0,
+ height);
+ } else {
+ load_buffer_16bit_to_16bit_avx2(input + 16 * i, stride, buf0, height);
+ }
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ transpose_16bit_16x16_avx2(buf0, buf1 + 0 * width + 16 * i);
+ }
+
+ __m256i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_avx2(buf1, buf, width);
+ } else {
+ buf = buf1;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit_w16_avx2(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w16_avx2(buf, output, height, width);
+}
+
+static void lowbd_fwd_txfm2d_64x32_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ const TX_SIZE tx_size = TX_64X32;
+ __m256i buf0[64], buf1[256];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = col_txfm16x32_arr[tx_type];
+ const int width_div16 = (width >> 4);
+ const int height_div16 = (height >> 4);
+
+ for (int i = 0; i < width_div16; i++) {
+ load_buffer_16bit_to_16bit_avx2(input + 16 * i, stride, buf0, height);
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ for (int j = 0; j < AOMMIN(4, height_div16); ++j) {
+ transpose_16bit_16x16_avx2(buf0 + j * 16, buf1 + j * width + 16 * i);
+ }
+ }
+ assert(tx_type == DCT_DCT);
+ for (int i = 0; i < AOMMIN(2, height_div16); i++) {
+ __m256i bufA[64];
+ __m256i bufB[64];
+ __m128i *buf = (__m128i *)(buf1 + width * i);
+ for (int j = 0; j < width; ++j) {
+ bufA[j] = _mm256_cvtepi16_epi32(buf[j * 2]);
+ bufB[j] = _mm256_cvtepi16_epi32(buf[j * 2 + 1]);
+ }
+ fdct64_new_avx2(bufA, bufA, cos_bit_row);
+ fdct64_new_avx2(bufB, bufB, cos_bit_row);
+ round_shift_rect_array_32_avx2(bufA, bufA, 32, -shift[2], NewSqrt2);
+ round_shift_rect_array_32_avx2(bufB, bufB, 32, -shift[2], NewSqrt2);
+
+ store_output_32bit_w16(output + i * 16, bufA, bufB, 32, 32);
+ }
+}
+
+static void lowbd_fwd_txfm2d_32x64_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_32X64;
+ __m256i buf0[64], buf1[256];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = fdct16x64_new_avx2;
+ const int width_div16 = (width >> 4);
+ const int height_div16 = (height >> 4);
+
+ for (int i = 0; i < width_div16; i++) {
+ load_buffer_16bit_to_16bit_avx2(input + 16 * i, stride, buf0, height);
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ for (int j = 0; j < AOMMIN(2, height_div16); ++j) {
+ transpose_16bit_16x16_avx2(buf0 + j * 16, buf1 + j * width + 16 * i);
+ }
+ }
+
+ for (int i = 0; i < AOMMIN(2, height_div16); i++) {
+ __m256i bufA[32];
+ __m256i bufB[32];
+ __m128i *buf = (__m128i *)(buf1 + width * i);
+ for (int j = 0; j < width; ++j) {
+ bufA[j] = _mm256_cvtepi16_epi32(buf[j * 2]);
+ bufB[j] = _mm256_cvtepi16_epi32(buf[j * 2 + 1]);
+ }
+ fdct32_avx2(bufA, bufA, cos_bit_row);
+ fdct32_avx2(bufB, bufB, cos_bit_row);
+ round_shift_rect_array_32_avx2(bufA, bufA, 32, -shift[2], NewSqrt2);
+ round_shift_rect_array_32_avx2(bufB, bufB, 32, -shift[2], NewSqrt2);
+
+ store_output_32bit_w16(output + i * 16, bufA, bufB, 32, 32);
+ }
+}
+
+static void lowbd_fwd_txfm2d_16x64_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_16X64;
+ __m256i buf0[64], buf1[64];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = fdct16x64_new_avx2;
+ const transform_1d_avx2 row_txfm = fdct16x16_new_avx2;
+ const int width_div16 = (width >> 4);
+ const int height_div16 = (height >> 4);
+
+ for (int i = 0; i < width_div16; i++) {
+ load_buffer_16bit_to_16bit_avx2(input + 16 * i, stride, buf0, height);
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ for (int j = 0; j < height_div16; ++j) {
+ transpose_16bit_16x16_avx2(buf0 + j * 16, buf1 + j * width + 16 * i);
+ }
+ }
+
+ for (int i = 0; i < AOMMIN(2, height_div16); i++) {
+ __m256i *buf = buf1 + width * i;
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit_w16_avx2(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w16_avx2(buf, output + width * i, 32, width);
+ }
+}
+
+static void lowbd_fwd_txfm2d_64x16_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_64X16;
+ __m256i buf0[64], buf1[64];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = fdct16x16_new_avx2;
+ const transform_1d_avx2 row_txfm = fdct16x64_new_avx2;
+ const int width_div16 = (width >> 4);
+ const int height_div16 = (height >> 4);
+
+ for (int i = 0; i < width_div16; i++) {
+ load_buffer_16bit_to_16bit_avx2(input + 16 * i, stride, buf0, height);
+ round_shift_16bit_w16_avx2(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf0, height, shift[1]);
+ for (int j = 0; j < height_div16; ++j) {
+ transpose_16bit_16x16_avx2(buf0 + j * 16, buf1 + j * width + 16 * i);
+ }
+ }
+
+ for (int i = 0; i < height_div16; i++) {
+ __m256i *buf = buf1 + width * i;
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit_w16_avx2(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w16_avx2(buf, output + 16 * i, 16, 32);
+ }
+ // Zero out the bottom 16x32 area.
+ memset(output + 16 * 32, 0, 16 * 32 * sizeof(*output));
+}
+
+static INLINE void btf_16_avx2(__m256i *w0, __m256i *w1, __m256i *in0,
+ __m256i *in1, __m128i *out0, __m128i *out1,
+ __m128i *out2, __m128i *out3,
+ const __m256i *__rounding, int8_t *cos_bit) {
+ __m256i t0 = _mm256_unpacklo_epi16(*in0, *in1);
+ __m256i t1 = _mm256_unpackhi_epi16(*in0, *in1);
+ __m256i u0 = _mm256_madd_epi16(t0, *w0);
+ __m256i u1 = _mm256_madd_epi16(t1, *w0);
+ __m256i v0 = _mm256_madd_epi16(t0, *w1);
+ __m256i v1 = _mm256_madd_epi16(t1, *w1);
+
+ __m256i a0 = _mm256_add_epi32(u0, *__rounding);
+ __m256i a1 = _mm256_add_epi32(u1, *__rounding);
+ __m256i b0 = _mm256_add_epi32(v0, *__rounding);
+ __m256i b1 = _mm256_add_epi32(v1, *__rounding);
+
+ __m256i c0 = _mm256_srai_epi32(a0, *cos_bit);
+ __m256i c1 = _mm256_srai_epi32(a1, *cos_bit);
+ __m256i d0 = _mm256_srai_epi32(b0, *cos_bit);
+ __m256i d1 = _mm256_srai_epi32(b1, *cos_bit);
+
+ __m256i temp0 = _mm256_packs_epi32(c0, c1);
+ __m256i temp1 = _mm256_packs_epi32(d0, d1);
+
+ *out0 = _mm256_castsi256_si128(temp0);
+ *out1 = _mm256_castsi256_si128(temp1);
+ *out2 = _mm256_extracti128_si256(temp0, 0x01);
+ *out3 = _mm256_extracti128_si256(temp1, 0x01);
+}
+
+static INLINE void fdct8x8_new_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i __rounding = _mm256_set1_epi32(1 << (cos_bit - 1));
+
+ __m256i cospi_m32_p32 = pair_set_w16_epi16(-cospi[32], cospi[32]);
+ __m256i cospi_p32_p32 = pair_set_w16_epi16(cospi[32], cospi[32]);
+ __m256i cospi_p32_m32 = pair_set_w16_epi16(cospi[32], -cospi[32]);
+ __m256i cospi_p48_p16 = pair_set_w16_epi16(cospi[48], cospi[16]);
+ __m256i cospi_m16_p48 = pair_set_w16_epi16(-cospi[16], cospi[48]);
+ __m256i cospi_p56_p08 = pair_set_w16_epi16(cospi[56], cospi[8]);
+ __m256i cospi_m08_p56 = pair_set_w16_epi16(-cospi[8], cospi[56]);
+ __m256i cospi_p24_p40 = pair_set_w16_epi16(cospi[24], cospi[40]);
+ __m256i cospi_m40_p24 = pair_set_w16_epi16(-cospi[40], cospi[24]);
+
+ // stage 1
+ __m256i x1[8];
+ x1[0] = _mm256_adds_epi16(input[0], input[7]);
+ x1[7] = _mm256_subs_epi16(input[0], input[7]);
+ x1[1] = _mm256_adds_epi16(input[1], input[6]);
+ x1[6] = _mm256_subs_epi16(input[1], input[6]);
+ x1[2] = _mm256_adds_epi16(input[2], input[5]);
+ x1[5] = _mm256_subs_epi16(input[2], input[5]);
+ x1[3] = _mm256_adds_epi16(input[3], input[4]);
+ x1[4] = _mm256_subs_epi16(input[3], input[4]);
+
+ // stage 2
+ __m256i x2[8];
+ x2[0] = _mm256_adds_epi16(x1[0], x1[3]);
+ x2[3] = _mm256_subs_epi16(x1[0], x1[3]);
+ x2[1] = _mm256_adds_epi16(x1[1], x1[2]);
+ x2[2] = _mm256_subs_epi16(x1[1], x1[2]);
+ x2[4] = x1[4];
+ btf_16_w16_avx2(cospi_m32_p32, cospi_p32_p32, &x1[5], &x1[6], __rounding,
+ cos_bit);
+ x2[5] = x1[5];
+ x2[6] = x1[6];
+ x2[7] = x1[7];
+
+ // stage 3
+ __m256i x3[8];
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x2[0], &x2[1], __rounding,
+ cos_bit);
+ x3[0] = x2[0];
+ x3[1] = x2[1];
+ btf_16_w16_avx2(cospi_p48_p16, cospi_m16_p48, &x2[2], &x2[3], __rounding,
+ cos_bit);
+ x3[2] = x2[2];
+ x3[3] = x2[3];
+ x3[4] = _mm256_adds_epi16(x2[4], x2[5]);
+ x3[5] = _mm256_subs_epi16(x2[4], x2[5]);
+ x3[6] = _mm256_subs_epi16(x2[7], x2[6]);
+ x3[7] = _mm256_adds_epi16(x2[7], x2[6]);
+
+ // stage 4
+ __m256i x4[8];
+ x4[0] = x3[0];
+ x4[1] = x3[1];
+ x4[2] = x3[2];
+ x4[3] = x3[3];
+ btf_16_w16_avx2(cospi_p56_p08, cospi_m08_p56, &x3[4], &x3[7], __rounding,
+ cos_bit);
+ x4[4] = x3[4];
+ x4[7] = x3[7];
+ btf_16_w16_avx2(cospi_p24_p40, cospi_m40_p24, &x3[5], &x3[6], __rounding,
+ cos_bit);
+ x4[5] = x3[5];
+ x4[6] = x3[6];
+ // stage 5
+ output[0] = x4[0];
+ output[1] = x4[4];
+ output[2] = x4[2];
+ output[3] = x4[6];
+ output[4] = x4[1];
+ output[5] = x4[5];
+ output[6] = x4[3];
+ output[7] = x4[7];
+}
+
+static INLINE void fadst8x8_new_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i __zero = _mm256_setzero_si256();
+ const __m256i __rounding = _mm256_set1_epi32(1 << (cos_bit - 1));
+
+ __m256i cospi_p32_p32 = pair_set_w16_epi16(cospi[32], cospi[32]);
+ __m256i cospi_p32_m32 = pair_set_w16_epi16(cospi[32], -cospi[32]);
+ __m256i cospi_p16_p48 = pair_set_w16_epi16(cospi[16], cospi[48]);
+ __m256i cospi_p48_m16 = pair_set_w16_epi16(cospi[48], -cospi[16]);
+ __m256i cospi_m48_p16 = pair_set_w16_epi16(-cospi[48], cospi[16]);
+ __m256i cospi_p04_p60 = pair_set_w16_epi16(cospi[4], cospi[60]);
+ __m256i cospi_p60_m04 = pair_set_w16_epi16(cospi[60], -cospi[4]);
+ __m256i cospi_p20_p44 = pair_set_w16_epi16(cospi[20], cospi[44]);
+ __m256i cospi_p44_m20 = pair_set_w16_epi16(cospi[44], -cospi[20]);
+ __m256i cospi_p36_p28 = pair_set_w16_epi16(cospi[36], cospi[28]);
+ __m256i cospi_p28_m36 = pair_set_w16_epi16(cospi[28], -cospi[36]);
+ __m256i cospi_p52_p12 = pair_set_w16_epi16(cospi[52], cospi[12]);
+ __m256i cospi_p12_m52 = pair_set_w16_epi16(cospi[12], -cospi[52]);
+
+ // stage 1
+ __m256i x1[8];
+ x1[0] = input[0];
+ x1[1] = _mm256_subs_epi16(__zero, input[7]);
+ x1[2] = _mm256_subs_epi16(__zero, input[3]);
+ x1[3] = input[4];
+ x1[4] = _mm256_subs_epi16(__zero, input[1]);
+ x1[5] = input[6];
+ x1[6] = input[2];
+ x1[7] = _mm256_subs_epi16(__zero, input[5]);
+
+ // stage 2
+ __m256i x2[8];
+ x2[0] = x1[0];
+ x2[1] = x1[1];
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[2], &x1[3], __rounding,
+ cos_bit);
+ x2[2] = x1[2];
+ x2[3] = x1[3];
+ x2[4] = x1[4];
+ x2[5] = x1[5];
+ btf_16_w16_avx2(cospi_p32_p32, cospi_p32_m32, &x1[6], &x1[7], __rounding,
+ cos_bit);
+ x2[6] = x1[6];
+ x2[7] = x1[7];
+
+ // stage 3
+ __m256i x3[8];
+ x3[0] = _mm256_adds_epi16(x2[0], x2[2]);
+ x3[2] = _mm256_subs_epi16(x2[0], x2[2]);
+ x3[1] = _mm256_adds_epi16(x2[1], x2[3]);
+ x3[3] = _mm256_subs_epi16(x2[1], x2[3]);
+ x3[4] = _mm256_adds_epi16(x2[4], x2[6]);
+ x3[6] = _mm256_subs_epi16(x2[4], x2[6]);
+ x3[5] = _mm256_adds_epi16(x2[5], x2[7]);
+ x3[7] = _mm256_subs_epi16(x2[5], x2[7]);
+
+ // stage 4
+ __m256i x4[8];
+ x4[0] = x3[0];
+ x4[1] = x3[1];
+ x4[2] = x3[2];
+ x4[3] = x3[3];
+ btf_16_w16_avx2(cospi_p16_p48, cospi_p48_m16, &x3[4], &x3[5], __rounding,
+ cos_bit);
+ x4[4] = x3[4];
+ x4[5] = x3[5];
+ btf_16_w16_avx2(cospi_m48_p16, cospi_p16_p48, &x3[6], &x3[7], __rounding,
+ cos_bit);
+ x4[6] = x3[6];
+ x4[7] = x3[7];
+
+ // stage 5
+ __m256i x5[8];
+ x5[0] = _mm256_adds_epi16(x4[0], x4[4]);
+ x5[4] = _mm256_subs_epi16(x4[0], x4[4]);
+ x5[1] = _mm256_adds_epi16(x4[1], x4[5]);
+ x5[5] = _mm256_subs_epi16(x4[1], x4[5]);
+ x5[2] = _mm256_adds_epi16(x4[2], x4[6]);
+ x5[6] = _mm256_subs_epi16(x4[2], x4[6]);
+ x5[3] = _mm256_adds_epi16(x4[3], x4[7]);
+ x5[7] = _mm256_subs_epi16(x4[3], x4[7]);
+
+ // stage 6
+ __m256i x6[8];
+ btf_16_w16_avx2(cospi_p04_p60, cospi_p60_m04, &x5[0], &x5[1], __rounding,
+ cos_bit);
+ x6[0] = x5[0];
+ x6[1] = x5[1];
+ btf_16_w16_avx2(cospi_p20_p44, cospi_p44_m20, &x5[2], &x5[3], __rounding,
+ cos_bit);
+ x6[2] = x5[2];
+ x6[3] = x5[3];
+ btf_16_w16_avx2(cospi_p36_p28, cospi_p28_m36, &x5[4], &x5[5], __rounding,
+ cos_bit);
+ x6[4] = x5[4];
+ x6[5] = x5[5];
+ btf_16_w16_avx2(cospi_p52_p12, cospi_p12_m52, &x5[6], &x5[7], __rounding,
+ cos_bit);
+ x6[6] = x5[6];
+ x6[7] = x5[7];
+
+ // stage 7
+ output[0] = x6[1];
+ output[1] = x6[6];
+ output[2] = x6[3];
+ output[3] = x6[4];
+ output[4] = x6[5];
+ output[5] = x6[2];
+ output[6] = x6[7];
+ output[7] = x6[0];
+}
+
+static INLINE void fidentity8x8_new_avx2(const __m256i *input, __m256i *output,
+ int8_t cos_bit) {
+ (void)cos_bit;
+
+ output[0] = _mm256_adds_epi16(input[0], input[0]);
+ output[1] = _mm256_adds_epi16(input[1], input[1]);
+ output[2] = _mm256_adds_epi16(input[2], input[2]);
+ output[3] = _mm256_adds_epi16(input[3], input[3]);
+ output[4] = _mm256_adds_epi16(input[4], input[4]);
+ output[5] = _mm256_adds_epi16(input[5], input[5]);
+ output[6] = _mm256_adds_epi16(input[6], input[6]);
+ output[7] = _mm256_adds_epi16(input[7], input[7]);
+}
+
+static INLINE void fdct8x16_new_avx2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i __rounding_256 = _mm256_set1_epi32(1 << (cos_bit - 1));
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+ __m128i temp0, temp1, temp2, temp3;
+ __m256i in0, in1;
+ __m128i cospi_m32_p32 = pair_set_epi16(-cospi[32], cospi[32]);
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]);
+ __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]);
+ __m128i cospi_m48_m16 = pair_set_epi16(-cospi[48], -cospi[16]);
+ __m128i cospi_p56_p08 = pair_set_epi16(cospi[56], cospi[8]);
+ __m128i cospi_m08_p56 = pair_set_epi16(-cospi[8], cospi[56]);
+ __m128i cospi_p24_p40 = pair_set_epi16(cospi[24], cospi[40]);
+ __m128i cospi_m40_p24 = pair_set_epi16(-cospi[40], cospi[24]);
+ __m128i cospi_p60_p04 = pair_set_epi16(cospi[60], cospi[4]);
+ __m128i cospi_m04_p60 = pair_set_epi16(-cospi[4], cospi[60]);
+ __m128i cospi_p28_p36 = pair_set_epi16(cospi[28], cospi[36]);
+ __m128i cospi_m36_p28 = pair_set_epi16(-cospi[36], cospi[28]);
+ __m128i cospi_p44_p20 = pair_set_epi16(cospi[44], cospi[20]);
+ __m128i cospi_m20_p44 = pair_set_epi16(-cospi[20], cospi[44]);
+ __m128i cospi_p12_p52 = pair_set_epi16(cospi[12], cospi[52]);
+ __m128i cospi_m52_p12 = pair_set_epi16(-cospi[52], cospi[12]);
+
+ __m256i cospi_arr[12];
+
+ cospi_arr[0] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_m32_p32),
+ cospi_m32_p32, 0x1);
+ cospi_arr[1] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p32_p32),
+ cospi_p32_p32, 0x1);
+ cospi_arr[2] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p32_p32),
+ cospi_p48_p16, 0x1);
+ cospi_arr[3] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p32_m32),
+ cospi_m16_p48, 0x1);
+ cospi_arr[4] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_m16_p48),
+ cospi_m48_m16, 0x1);
+ cospi_arr[5] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p48_p16),
+ cospi_m16_p48, 0x1);
+ cospi_arr[6] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p56_p08),
+ cospi_p24_p40, 0x1);
+ cospi_arr[7] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_m08_p56),
+ cospi_m40_p24, 0x1);
+ cospi_arr[8] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p60_p04),
+ cospi_p28_p36, 0x1);
+ cospi_arr[9] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_m04_p60),
+ cospi_m36_p28, 0x1);
+ cospi_arr[10] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p44_p20),
+ cospi_p12_p52, 0x1);
+ cospi_arr[11] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_m20_p44),
+ cospi_m52_p12, 0x1);
+
+ __m256i x[8];
+ x[0] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[0]), input[1], 0x1);
+ x[1] = _mm256_insertf128_si256(_mm256_castsi128_si256(input[15]), input[14],
+ 0x1);
+ x[2] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[2]), input[3], 0x1);
+ x[3] = _mm256_insertf128_si256(_mm256_castsi128_si256(input[13]), input[12],
+ 0x1);
+ x[4] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[5]), input[4], 0x1);
+ x[5] = _mm256_insertf128_si256(_mm256_castsi128_si256(input[10]), input[11],
+ 0x1);
+ x[6] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[7]), input[6], 0x1);
+ x[7] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[8]), input[9], 0x1);
+
+ // stage 1
+ __m256i x1[8];
+ x1[0] = _mm256_adds_epi16(x[0], x[1]);
+ x1[7] = _mm256_subs_epi16(x[0], x[1]);
+ x1[1] = _mm256_adds_epi16(x[2], x[3]);
+ x1[6] = _mm256_subs_epi16(x[2], x[3]);
+ x1[2] = _mm256_adds_epi16(x[4], x[5]);
+ x1[5] = _mm256_subs_epi16(x[4], x[5]);
+ x1[3] = _mm256_adds_epi16(x[6], x[7]);
+ x1[4] = _mm256_subs_epi16(x[6], x[7]);
+
+ // stage 2
+ __m256i x2[8];
+ x2[0] = _mm256_adds_epi16(x1[0], x1[3]);
+ x2[7] = _mm256_subs_epi16(x1[0], x1[3]);
+ x2[1] = _mm256_adds_epi16(x1[1], x1[2]);
+ x2[6] = _mm256_subs_epi16(x1[1], x1[2]);
+ x2[2] = x1[4];
+ x2[3] = x1[7];
+ btf_16_avx2(&cospi_arr[0], &cospi_arr[1], &x1[5], &x1[6], &temp0, &temp1,
+ &temp2, &temp3, &__rounding_256, &cos_bit);
+ x2[4] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp2), temp0, 0x1);
+ x2[5] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp3), temp1, 0x1);
+
+ // stage 3
+ __m256i x3[8];
+ x2[1] = _mm256_permute4x64_epi64(x2[1], 0x4e);
+ x3[0] = _mm256_adds_epi16(x2[0], x2[1]);
+ x3[1] = _mm256_subs_epi16(x2[0], x2[1]);
+ x3[2] = _mm256_blend_epi32(x2[7], x2[6], 0xf0);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, _mm256_castsi256_si128(x2[6]),
+ _mm256_extractf128_si256(x2[7], 0x01), temp0, temp1);
+ x3[7] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp1), temp0, 0x1);
+ x3[3] = _mm256_adds_epi16(x2[2], x2[4]);
+ x3[4] = _mm256_subs_epi16(x2[2], x2[4]);
+ x3[5] = _mm256_adds_epi16(x2[3], x2[5]);
+ x3[6] = _mm256_subs_epi16(x2[3], x2[5]);
+
+ // stage 4
+ __m256i x4[8];
+ x4[0] = _mm256_blend_epi32(x3[0], x3[1], 0xf0);
+ x4[1] = _mm256_permute2f128_si256(x3[0], x3[1], 0x21);
+ btf_16_avx2(&cospi_arr[2], &cospi_arr[3], &x4[0], &x4[1], &output[0],
+ &output[8], &output[4], &output[12], &__rounding_256, &cos_bit);
+ x4[2] = _mm256_adds_epi16(x3[2], x3[7]);
+ x4[3] = _mm256_subs_epi16(x3[2], x3[7]);
+ x4[4] = _mm256_permute2f128_si256(x3[3], x3[4], 0x20);
+ x4[5] = _mm256_permute2f128_si256(x3[6], x3[5], 0x20);
+ in0 = _mm256_permute2f128_si256(x3[3], x3[4], 0x31);
+ in1 = _mm256_permute2f128_si256(x3[5], x3[6], 0x31);
+ btf_16_avx2(&cospi_arr[4], &cospi_arr[5], &in0, &in1, &temp0, &temp1, &temp2,
+ &temp3, &__rounding_256, &cos_bit);
+
+ x4[6] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp0), temp2, 0x1);
+ x4[7] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp3), temp1, 0x1);
+
+ // stage 5
+ __m256i x5[4];
+ in0 = _mm256_permute2f128_si256(x4[2], x4[3], 0x31);
+ in1 = _mm256_permute2f128_si256(x4[2], x4[3], 0x20);
+ btf_16_avx2(&cospi_arr[6], &cospi_arr[7], &in0, &in1, &output[2], &output[14],
+ &output[10], &output[6], &__rounding_256, &cos_bit);
+ x5[0] = _mm256_adds_epi16(x4[4], x4[6]);
+ x5[1] = _mm256_subs_epi16(x4[4], x4[6]);
+ x5[2] = _mm256_adds_epi16(x4[5], x4[7]);
+ x5[3] = _mm256_subs_epi16(x4[5], x4[7]);
+
+ // stage 6
+ in0 = _mm256_permute2f128_si256(x5[0], x5[1], 0x20);
+ in1 = _mm256_permute2f128_si256(x5[2], x5[3], 0x31);
+ btf_16_avx2(&cospi_arr[8], &cospi_arr[9], &in0, &in1, &output[1], &output[15],
+ &output[9], &output[7], &__rounding_256, &cos_bit);
+ in0 = _mm256_permute2f128_si256(x5[1], x5[0], 0x31);
+ in1 = _mm256_permute2f128_si256(x5[3], x5[2], 0x20);
+ btf_16_avx2(&cospi_arr[10], &cospi_arr[11], &in0, &in1, &output[5],
+ &output[11], &output[13], &output[3], &__rounding_256, &cos_bit);
+}
+
+static INLINE void fadst8x16_new_avx2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i __zero = _mm256_setzero_si256();
+ const __m256i __rounding_256 = _mm256_set1_epi32(1 << (cos_bit - 1));
+ __m256i in0, in1;
+ __m128i temp0, temp1, temp2, temp3;
+
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]);
+ __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]);
+ __m128i cospi_m48_p16 = pair_set_epi16(-cospi[48], cospi[16]);
+ __m128i cospi_p08_p56 = pair_set_epi16(cospi[8], cospi[56]);
+ __m128i cospi_p56_m08 = pair_set_epi16(cospi[56], -cospi[8]);
+ __m128i cospi_p40_p24 = pair_set_epi16(cospi[40], cospi[24]);
+ __m128i cospi_p24_m40 = pair_set_epi16(cospi[24], -cospi[40]);
+ __m128i cospi_m56_p08 = pair_set_epi16(-cospi[56], cospi[8]);
+ __m128i cospi_m24_p40 = pair_set_epi16(-cospi[24], cospi[40]);
+ __m128i cospi_p02_p62 = pair_set_epi16(cospi[2], cospi[62]);
+ __m128i cospi_p62_m02 = pair_set_epi16(cospi[62], -cospi[2]);
+ __m128i cospi_p10_p54 = pair_set_epi16(cospi[10], cospi[54]);
+ __m128i cospi_p54_m10 = pair_set_epi16(cospi[54], -cospi[10]);
+ __m128i cospi_p18_p46 = pair_set_epi16(cospi[18], cospi[46]);
+ __m128i cospi_p46_m18 = pair_set_epi16(cospi[46], -cospi[18]);
+ __m128i cospi_p26_p38 = pair_set_epi16(cospi[26], cospi[38]);
+ __m128i cospi_p38_m26 = pair_set_epi16(cospi[38], -cospi[26]);
+ __m128i cospi_p34_p30 = pair_set_epi16(cospi[34], cospi[30]);
+ __m128i cospi_p30_m34 = pair_set_epi16(cospi[30], -cospi[34]);
+ __m128i cospi_p42_p22 = pair_set_epi16(cospi[42], cospi[22]);
+ __m128i cospi_p22_m42 = pair_set_epi16(cospi[22], -cospi[42]);
+ __m128i cospi_p50_p14 = pair_set_epi16(cospi[50], cospi[14]);
+ __m128i cospi_p14_m50 = pair_set_epi16(cospi[14], -cospi[50]);
+ __m128i cospi_p58_p06 = pair_set_epi16(cospi[58], cospi[6]);
+ __m128i cospi_p06_m58 = pair_set_epi16(cospi[6], -cospi[58]);
+
+ __m256i cospi_arr[20];
+
+ cospi_arr[0] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p32_p32),
+ cospi_p32_p32, 0x1);
+ cospi_arr[1] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p32_m32),
+ cospi_p32_m32, 0x1);
+ cospi_arr[2] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p32_p32),
+ cospi_p32_p32, 0x1);
+ cospi_arr[3] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p32_m32),
+ cospi_p32_m32, 0x1);
+ cospi_arr[4] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p16_p48),
+ cospi_m48_p16, 0x1);
+ cospi_arr[5] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p48_m16),
+ cospi_p16_p48, 0x1);
+ cospi_arr[6] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p16_p48),
+ cospi_m48_p16, 0x1);
+ cospi_arr[7] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p48_m16),
+ cospi_p16_p48, 0x1);
+ cospi_arr[8] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p08_p56),
+ cospi_p40_p24, 0x1);
+ cospi_arr[9] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p56_m08),
+ cospi_p24_m40, 0x1);
+ cospi_arr[10] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_m56_p08),
+ cospi_m24_p40, 0x1);
+ cospi_arr[11] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p08_p56),
+ cospi_p40_p24, 0x1);
+ cospi_arr[12] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p02_p62),
+ cospi_p10_p54, 0x1);
+ cospi_arr[13] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p62_m02),
+ cospi_p54_m10, 0x1);
+ cospi_arr[14] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p18_p46),
+ cospi_p26_p38, 0x1);
+ cospi_arr[15] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p46_m18),
+ cospi_p38_m26, 0x1);
+ cospi_arr[16] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p34_p30),
+ cospi_p42_p22, 0x1);
+ cospi_arr[17] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p30_m34),
+ cospi_p22_m42, 0x1);
+ cospi_arr[18] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p50_p14),
+ cospi_p58_p06, 0x1);
+ cospi_arr[19] = _mm256_insertf128_si256(_mm256_castsi128_si256(cospi_p14_m50),
+ cospi_p06_m58, 0x1);
+
+ __m256i x[8];
+ x[0] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[0]), input[4], 0x1);
+ x[1] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[2]), input[6], 0x1);
+ x[2] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[8]), input[12], 0x1);
+ x[3] = _mm256_insertf128_si256(_mm256_castsi128_si256(input[10]), input[14],
+ 0x1);
+ x[4] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[1]), input[9], 0x1);
+ x[5] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[3]), input[11], 0x1);
+ x[6] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[5]), input[13], 0x1);
+ x[7] =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(input[7]), input[15], 0x1);
+
+ // stage 1
+ __m256i x1[8];
+ x1[0] = x[0];
+ x1[1] = _mm256_subs_epi16(__zero, x[7]);
+ x1[2] = x[2];
+ x1[3] = _mm256_subs_epi16(__zero, x[5]);
+ x1[4] = _mm256_subs_epi16(__zero, x[4]);
+ x1[5] = x[3];
+ x1[6] = _mm256_subs_epi16(__zero, x[6]);
+ x1[7] = x[1];
+
+ // stage 2
+ __m256i x2[8];
+ x2[0] = _mm256_blend_epi32(x1[0], x1[1], 0xf0);
+ x2[3] = _mm256_blend_epi32(x1[3], x1[2], 0xf0);
+ x2[4] = _mm256_blend_epi32(x1[4], x1[5], 0xf0);
+ x2[7] = _mm256_blend_epi32(x1[7], x1[6], 0xf0);
+ in0 = _mm256_blend_epi32(x1[1], x1[0], 0xf0);
+ in1 = _mm256_blend_epi32(x1[2], x1[3], 0xf0);
+ btf_16_avx2(&cospi_arr[0], &cospi_arr[1], &in0, &in1, &temp0, &temp1, &temp2,
+ &temp3, &__rounding_256, &cos_bit);
+ x2[1] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp0), temp1, 0x1);
+ x2[2] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp2), temp3, 0x1);
+ in0 = _mm256_permute2f128_si256(x1[7], x1[6], 0x21);
+ in1 = _mm256_permute2f128_si256(x1[4], x1[5], 0x21);
+ btf_16_avx2(&cospi_arr[2], &cospi_arr[3], &in0, &in1, &temp0, &temp1, &temp2,
+ &temp3, &__rounding_256, &cos_bit);
+ x2[5] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp0), temp1, 0x1);
+ x2[6] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp2), temp3, 0x1);
+
+ // stage 3
+ __m256i x3[8];
+ x3[0] = _mm256_adds_epi16(x2[0], x2[1]);
+ x3[1] = _mm256_subs_epi16(x2[0], x2[1]);
+ x3[2] = _mm256_adds_epi16(x2[3], x2[2]);
+ x3[3] = _mm256_subs_epi16(x2[3], x2[2]);
+ x3[4] = _mm256_adds_epi16(x2[4], x2[5]);
+ x3[5] = _mm256_subs_epi16(x2[4], x2[5]);
+ x3[6] = _mm256_adds_epi16(x2[7], x2[6]);
+ x3[7] = _mm256_subs_epi16(x2[7], x2[6]);
+
+ // stage 4
+ __m256i x4[8];
+ x4[0] = x3[0];
+ x4[1] = x3[1];
+ x4[4] = x3[4];
+ x4[5] = x3[5];
+ in0 = _mm256_permute2f128_si256(x3[2], x3[3], 0x20);
+ in1 = _mm256_permute2f128_si256(x3[2], x3[3], 0x31);
+ btf_16_avx2(&cospi_arr[4], &cospi_arr[5], &in0, &in1, &temp0, &temp1, &temp2,
+ &temp3, &__rounding_256, &cos_bit);
+ x4[2] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp0), temp1, 0x1);
+ x4[3] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp2), temp3, 0x1);
+ in0 = _mm256_permute2f128_si256(x3[6], x3[7], 0x20);
+ in1 = _mm256_permute2f128_si256(x3[6], x3[7], 0x31);
+ btf_16_avx2(&cospi_arr[6], &cospi_arr[7], &in0, &in1, &temp0, &temp1, &temp2,
+ &temp3, &__rounding_256, &cos_bit);
+ x4[6] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp0), temp1, 0x1);
+ x4[7] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp2), temp3, 0x1);
+
+ // stage 5
+ __m256i x5[8];
+ x5[0] = _mm256_adds_epi16(x4[0], x4[2]);
+ x5[1] = _mm256_subs_epi16(x4[0], x4[2]);
+ x5[2] = _mm256_adds_epi16(x4[1], x4[3]);
+ x5[3] = _mm256_subs_epi16(x4[1], x4[3]);
+ x5[4] = _mm256_adds_epi16(x4[4], x4[6]);
+ x5[5] = _mm256_subs_epi16(x4[4], x4[6]);
+ x5[6] = _mm256_adds_epi16(x4[5], x4[7]);
+ x5[7] = _mm256_subs_epi16(x4[5], x4[7]);
+
+ // stage 6
+ __m256i x6[8];
+ x6[0] = x5[0];
+ x6[1] = x5[2];
+ x6[2] = x5[1];
+ x6[3] = x5[3];
+ in0 = _mm256_permute2f128_si256(x5[4], x5[6], 0x20);
+ in1 = _mm256_permute2f128_si256(x5[4], x5[6], 0x31);
+ btf_16_avx2(&cospi_arr[8], &cospi_arr[9], &in0, &in1, &temp0, &temp1, &temp2,
+ &temp3, &__rounding_256, &cos_bit);
+ x6[4] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp0), temp1, 0x1);
+ x6[5] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp2), temp3, 0x1);
+ in0 = _mm256_permute2f128_si256(x5[5], x5[7], 0x20);
+ in1 = _mm256_permute2f128_si256(x5[5], x5[7], 0x31);
+ btf_16_avx2(&cospi_arr[10], &cospi_arr[11], &in0, &in1, &temp0, &temp1,
+ &temp2, &temp3, &__rounding_256, &cos_bit);
+ x6[6] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp0), temp1, 0x1);
+ x6[7] = _mm256_insertf128_si256(_mm256_castsi128_si256(temp2), temp3, 0x1);
+
+ // stage 7
+ __m256i x7[8];
+ x7[0] = _mm256_adds_epi16(x6[0], x6[4]);
+ x7[1] = _mm256_subs_epi16(x6[0], x6[4]);
+ x7[2] = _mm256_adds_epi16(x6[1], x6[5]);
+ x7[3] = _mm256_subs_epi16(x6[1], x6[5]);
+ x7[4] = _mm256_adds_epi16(x6[2], x6[6]);
+ x7[5] = _mm256_subs_epi16(x6[2], x6[6]);
+ x7[6] = _mm256_adds_epi16(x6[3], x6[7]);
+ x7[7] = _mm256_subs_epi16(x6[3], x6[7]);
+
+ // stage 8
+ in0 = _mm256_permute2f128_si256(x7[0], x7[2], 0x20);
+ in1 = _mm256_permute2f128_si256(x7[0], x7[2], 0x31);
+ btf_16_avx2(&cospi_arr[12], &cospi_arr[13], &in0, &in1, &output[15],
+ &output[0], &output[13], &output[2], &__rounding_256, &cos_bit);
+ in0 = _mm256_permute2f128_si256(x7[4], x7[6], 0x20);
+ in1 = _mm256_permute2f128_si256(x7[4], x7[6], 0x31);
+ btf_16_avx2(&cospi_arr[14], &cospi_arr[15], &in0, &in1, &output[11],
+ &output[4], &output[9], &output[6], &__rounding_256, &cos_bit);
+ in0 = _mm256_permute2f128_si256(x7[1], x7[3], 0x20);
+ in1 = _mm256_permute2f128_si256(x7[1], x7[3], 0x31);
+ btf_16_avx2(&cospi_arr[16], &cospi_arr[17], &in0, &in1, &output[7],
+ &output[8], &output[5], &output[10], &__rounding_256, &cos_bit);
+ in0 = _mm256_permute2f128_si256(x7[5], x7[7], 0x20);
+ in1 = _mm256_permute2f128_si256(x7[5], x7[7], 0x31);
+ btf_16_avx2(&cospi_arr[18], &cospi_arr[19], &in0, &in1, &output[3],
+ &output[12], &output[1], &output[14], &__rounding_256, &cos_bit);
+}
+
+static INLINE void fidentity8x16_new_avx2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ (void)cos_bit;
+ const __m256i one = _mm256_set1_epi16(1);
+ __m256i temp;
+ for (int i = 0; i < 16; i += 2) {
+ temp = _mm256_insertf128_si256(_mm256_castsi128_si256(input[i]),
+ input[i + 1], 0x1);
+ const __m256i a_lo = _mm256_unpacklo_epi16(temp, one);
+ const __m256i a_hi = _mm256_unpackhi_epi16(temp, one);
+ const __m256i b_lo = scale_round_avx2(a_lo, 2 * NewSqrt2);
+ const __m256i b_hi = scale_round_avx2(a_hi, 2 * NewSqrt2);
+ temp = _mm256_packs_epi32(b_lo, b_hi);
+ output[i] = _mm256_castsi256_si128(temp);
+ output[i + 1] = _mm256_extractf128_si256(temp, 0x1);
+ }
+}
+
+static const transform_1d_avx2 row_txfm8x16_arr[TX_TYPES] = {
+ fdct8x8_new_avx2, // DCT_DCT
+ fdct8x8_new_avx2, // ADST_DCT
+ fadst8x8_new_avx2, // DCT_ADST
+ fadst8x8_new_avx2, // ADST_ADST
+ fdct8x8_new_avx2, // FLIPADST_DCT
+ fadst8x8_new_avx2, // DCT_FLIPADST
+ fadst8x8_new_avx2, // FLIPADST_FLIPADST
+ fadst8x8_new_avx2, // ADST_FLIPADST
+ fadst8x8_new_avx2, // FLIPADST_ADST
+ fidentity8x8_new_avx2, // IDTX
+ fidentity8x8_new_avx2, // V_DCT
+ fdct8x8_new_avx2, // H_DCT
+ fidentity8x8_new_avx2, // V_ADST
+ fadst8x8_new_avx2, // H_ADST
+ fidentity8x8_new_avx2, // V_FLIPADST
+ fadst8x8_new_avx2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 col_txfm8x16_arr[TX_TYPES] = {
+ fdct8x16_new_avx2, // DCT_DCT
+ fadst8x16_new_avx2, // ADST_DCT
+ fdct8x16_new_avx2, // DCT_ADST
+ fadst8x16_new_avx2, // ADST_ADST
+ fadst8x16_new_avx2, // FLIPADST_DCT
+ fdct8x16_new_avx2, // DCT_FLIPADST
+ fadst8x16_new_avx2, // FLIPADST_FLIPADST
+ fadst8x16_new_avx2, // ADST_FLIPADST
+ fadst8x16_new_avx2, // FLIPADST_ADST
+ fidentity8x16_new_avx2, // IDTX
+ fdct8x16_new_avx2, // V_DCT
+ fidentity8x16_new_avx2, // H_DCT
+ fadst8x16_new_avx2, // V_ADST
+ fidentity8x16_new_avx2, // H_ADST
+ fadst8x16_new_avx2, // V_FLIPADST
+ fidentity8x16_new_avx2 // H_FLIPADST
+};
+
+static const transform_1d_avx2 col_txfm16x8_arr[TX_TYPES] = {
+ fdct8x8_new_avx2, // DCT_DCT
+ fadst8x8_new_avx2, // ADST_DCT
+ fdct8x8_new_avx2, // DCT_ADST
+ fadst8x8_new_avx2, // ADST_ADST
+ fadst8x8_new_avx2, // FLIPADST_DCT
+ fdct8x8_new_avx2, // DCT_FLIPADST
+ fadst8x8_new_avx2, // FLIPADST_FLIPADST
+ fadst8x8_new_avx2, // ADST_FLIPADST
+ fadst8x8_new_avx2, // FLIPADST_ADST
+ fidentity8x8_new_avx2, // IDTX
+ fdct8x8_new_avx2, // V_DCT
+ fidentity8x8_new_avx2, // H_DCT
+ fadst8x8_new_avx2, // V_ADST
+ fidentity8x8_new_avx2, // H_ADST
+ fadst8x8_new_avx2, // V_FLIPADST
+ fidentity8x8_new_avx2, // H_FLIPADST
+};
+
+static const transform_1d_sse2 row_txfm16x8_arr[TX_TYPES] = {
+ fdct8x16_new_avx2, // DCT_DCT
+ fdct8x16_new_avx2, // ADST_DCT
+ fadst8x16_new_avx2, // DCT_ADST
+ fadst8x16_new_avx2, // ADST_ADST
+ fdct8x16_new_avx2, // FLIPADST_DCT
+ fadst8x16_new_avx2, // DCT_FLIPADST
+ fadst8x16_new_avx2, // FLIPADST_FLIPADST
+ fadst8x16_new_avx2, // ADST_FLIPADST
+ fadst8x16_new_avx2, // FLIPADST_ADST
+ fidentity8x16_new_avx2, // IDTX
+ fidentity8x16_new_avx2, // V_DCT
+ fdct8x16_new_avx2, // H_DCT
+ fidentity8x16_new_avx2, // V_ADST
+ fadst8x16_new_avx2, // H_ADST
+ fidentity8x16_new_avx2, // V_FLIPADST
+ fadst8x16_new_avx2 // H_FLIPADST
+};
+
+static void lowbd_fwd_txfm2d_8x16_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[16], buf1[16];
+ __m256i buf2[8];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X16];
+ const int txw_idx = get_txw_idx(TX_8X16);
+ const int txh_idx = get_txh_idx(TX_8X16);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 8;
+ const int height = 16;
+ const transform_1d_sse2 col_txfm = col_txfm8x16_arr[tx_type];
+ const transform_1d_avx2 row_txfm = row_txfm8x16_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1);
+ transpose_16bit_8x8(buf0 + 8, buf1 + 8);
+
+ __m128i *bufl, *bufu;
+ if (lr_flip) {
+ bufl = buf0;
+ bufu = buf0 + 8;
+ flip_buf_sse2(buf1 + width * 0, bufl, width);
+ flip_buf_sse2(buf1 + width * 1, bufu, width);
+ } else {
+ bufl = buf1 + width * 0;
+ bufu = buf1 + width * 1;
+ }
+ pack_reg(bufl, bufu, buf2);
+ row_txfm(buf2, buf2, cos_bit_row);
+ round_shift_16bit_w16_avx2(buf2, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w16_avx2(buf2, output, height, width);
+}
+
+static void lowbd_fwd_txfm2d_16x8_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[16], buf1[16];
+ __m256i buf2[8];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X8];
+ const int txw_idx = get_txw_idx(TX_16X8);
+ const int txh_idx = get_txh_idx(TX_16X8);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 16;
+ const int height = 8;
+ const transform_1d_avx2 col_txfm = col_txfm16x8_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm16x8_arr[tx_type];
+ __m128i *buf;
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input + 8 * 0, stride, buf0, height);
+ load_buffer_16bit_to_16bit_flip(input + 8 * 1, stride, &buf0[8], height);
+ } else {
+ load_buffer_16bit_to_16bit(input + 8 * 0, stride, buf0, height);
+ load_buffer_16bit_to_16bit(input + 8 * 1, stride, &buf0[8], height);
+ }
+ pack_reg(buf0, &buf0[8], buf2);
+ round_shift_16bit_w16_avx2(buf2, height, shift[0]);
+ col_txfm(buf2, buf2, cos_bit_col);
+ round_shift_16bit_w16_avx2(buf2, height, shift[1]);
+ transpose_16bit_16x8_avx2(buf2, buf2);
+ extract_reg(buf2, buf1);
+
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1, buf, width);
+ } else {
+ buf = buf1;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w8(buf, output, height, width);
+}
+
+static FwdTxfm2dFunc fwd_txfm2d_func_ls[TX_SIZES_ALL] = {
+ av1_lowbd_fwd_txfm2d_4x4_sse2, // 4x4 transform
+ av1_lowbd_fwd_txfm2d_8x8_avx2, // 8x8 transform
+ lowbd_fwd_txfm2d_16x16_avx2, // 16x16 transform
+ lowbd_fwd_txfm2d_32x32_avx2, // 32x32 transform
+ lowbd_fwd_txfm2d_64x64_avx2, // 64x64 transform
+ av1_lowbd_fwd_txfm2d_4x8_sse2, // 4x8 transform
+ av1_lowbd_fwd_txfm2d_8x4_sse2, // 8x4 transform
+ lowbd_fwd_txfm2d_8x16_avx2, // 8x16 transform
+ lowbd_fwd_txfm2d_16x8_avx2, // 16x8 transform
+ lowbd_fwd_txfm2d_16x32_avx2, // 16x32 transform
+ lowbd_fwd_txfm2d_32x16_avx2, // 32x16 transform
+ lowbd_fwd_txfm2d_32x64_avx2, // 32x64 transform
+ lowbd_fwd_txfm2d_64x32_avx2, // 64x32 transform
+ av1_lowbd_fwd_txfm2d_4x16_sse2, // 4x16 transform
+ av1_lowbd_fwd_txfm2d_16x4_sse2, // 16x4 transform
+ av1_lowbd_fwd_txfm2d_8x32_sse2, // 8x32 transform
+ av1_lowbd_fwd_txfm2d_32x8_sse2, // 32x8 transform
+ lowbd_fwd_txfm2d_16x64_avx2, // 16x64 transform
+ lowbd_fwd_txfm2d_64x16_avx2, // 64x16 transform
+};
+
+void av1_lowbd_fwd_txfm_avx2(const int16_t *src_diff, tran_low_t *coeff,
+ int diff_stride, TxfmParam *txfm_param) {
+ FwdTxfm2dFunc fwd_txfm2d_func = fwd_txfm2d_func_ls[txfm_param->tx_size];
+ if (txfm_param->lossless && txfm_param->tx_size == TX_4X4) {
+ av1_lowbd_fwd_txfm_c(src_diff, coeff, diff_stride, txfm_param);
+ } else {
+ fwd_txfm2d_func(src_diff, coeff, diff_stride, txfm_param->tx_type,
+ txfm_param->bd);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c b/third_party/aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c
new file mode 100644
index 0000000000..825da8d7b4
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm2d_sse4.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "config/av1_rtcd.h"
+
+#include "av1/common/enums.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/x86/av1_txfm_sse2.h"
+#include "av1/common/x86/highbd_txfm_utility_sse4.h"
+#include "av1/encoder/av1_fwd_txfm1d_cfg.h"
+#include "av1/encoder/x86/av1_txfm1d_sse4.h"
+#include "av1/encoder/x86/av1_fwd_txfm_sse2.h"
+
+static INLINE void int16_array_with_stride_to_int32_array_without_stride(
+ const int16_t *input, int stride, int32_t *output, int txfm1d_size) {
+ int r, c;
+ for (r = 0; r < txfm1d_size; r++) {
+ for (c = 0; c < txfm1d_size; c++) {
+ output[r * txfm1d_size + c] = (int32_t)input[r * stride + c];
+ }
+ }
+}
+
+static INLINE void store_output_32bit_w8(int32_t *const out,
+ const __m128i *const in1,
+ const __m128i *const in2,
+ const int stride, const int out_size) {
+ for (int i = 0; i < out_size; ++i) {
+ _mm_store_si128((__m128i *)(out + stride * i), in1[i]);
+ _mm_store_si128((__m128i *)(out + stride * i + 4), in2[i]);
+ }
+}
+
+typedef void (*TxfmFuncSSE2)(__m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+
+static void fdct32_sse4_1(__m128i *input, __m128i *output, const int8_t cos_bit,
+ const int8_t *stage_range) {
+ const int txfm_size = 32;
+ const int num_per_128 = 4;
+ int col_num = txfm_size / num_per_128;
+ int col;
+ (void)stage_range;
+ for (col = 0; col < col_num; col++) {
+ av1_fdct32_sse4_1((input + col), (output + col), cos_bit, col_num);
+ }
+}
+
+static void fdct64_new_sse4_1(__m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range) {
+ const int txfm_size = 64;
+ const int num_per_128 = 4;
+ int col_num = txfm_size / num_per_128;
+ (void)stage_range;
+ for (int col = 0; col < col_num; col++) {
+ av1_fdct64_sse4_1((input + col), (output + col), cos_bit, col_num, col_num);
+ }
+}
+static void idtx32x32_sse4_1(__m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range) {
+ (void)stage_range;
+
+ for (int i = 0; i < 8; i++) {
+ av1_idtx32_sse4_1(&input[i * 32], &output[i * 32], cos_bit, 1);
+ }
+}
+
+static INLINE TxfmFuncSSE2 fwd_txfm_type_to_func(TXFM_TYPE txfm_type) {
+ switch (txfm_type) {
+ case TXFM_TYPE_DCT32: return fdct32_sse4_1;
+ case TXFM_TYPE_DCT64: return fdct64_new_sse4_1;
+ case TXFM_TYPE_IDENTITY32: return idtx32x32_sse4_1;
+ default: assert(0);
+ }
+ return NULL;
+}
+
+static INLINE void fwd_txfm2d_sse4_1(const int16_t *input, int32_t *output,
+ const int stride,
+ const TXFM_2D_FLIP_CFG *cfg,
+ int32_t *txfm_buf) {
+ // TODO(sarahparker) This does not currently support rectangular transforms
+ // and will break without splitting txfm_size out into row and col size.
+ // Rectangular transforms use c code only, so it should be ok for now.
+ // It will be corrected when there are sse implementations for rectangular
+ // transforms.
+ assert(cfg->tx_size < TX_SIZES);
+ const int txfm_size = tx_size_wide[cfg->tx_size];
+ const int8_t *shift = cfg->shift;
+ const int8_t *stage_range_col = cfg->stage_range_col;
+ const int8_t *stage_range_row = cfg->stage_range_row;
+ const int8_t cos_bit_col = cfg->cos_bit_col;
+ const int8_t cos_bit_row = cfg->cos_bit_row;
+ const TxfmFuncSSE2 txfm_func_col = fwd_txfm_type_to_func(cfg->txfm_type_col);
+ const TxfmFuncSSE2 txfm_func_row = fwd_txfm_type_to_func(cfg->txfm_type_row);
+
+ __m128i *buf_128 = (__m128i *)txfm_buf;
+ __m128i *out_128 = (__m128i *)output;
+ int num_per_128 = 4;
+ int txfm2d_size_128 = txfm_size * txfm_size / num_per_128;
+
+ int16_array_with_stride_to_int32_array_without_stride(input, stride, txfm_buf,
+ txfm_size);
+ av1_round_shift_array_32_sse4_1(buf_128, out_128, txfm2d_size_128, -shift[0]);
+ txfm_func_col(out_128, buf_128, cos_bit_col, stage_range_col);
+ av1_round_shift_array_32_sse4_1(buf_128, out_128, txfm2d_size_128, -shift[1]);
+ transpose_32(txfm_size, out_128, buf_128);
+ txfm_func_row(buf_128, out_128, cos_bit_row, stage_range_row);
+ av1_round_shift_array_32_sse4_1(out_128, out_128, txfm2d_size_128, -shift[2]);
+}
+
+static INLINE void fwd_txfm2d_64x64_sse4_1(const int16_t *input,
+ int32_t *output, const int stride,
+ const TXFM_2D_FLIP_CFG *cfg,
+ int32_t *txfm_buf) {
+ assert(cfg->tx_size < TX_SIZES);
+ const int txfm_size = tx_size_wide[cfg->tx_size];
+ const int8_t *shift = cfg->shift;
+ const int8_t *stage_range_col = cfg->stage_range_col;
+ const int8_t cos_bit_col = cfg->cos_bit_col;
+ const int8_t cos_bit_row = cfg->cos_bit_row;
+ const TxfmFuncSSE2 txfm_func_col = fwd_txfm_type_to_func(cfg->txfm_type_col);
+ __m128i *buf_128 = (__m128i *)txfm_buf;
+ __m128i *out_128 = (__m128i *)output;
+
+ const int num_per_128 = 4;
+ int txfm2d_size_128 = txfm_size * txfm_size / num_per_128;
+ int col_num = txfm_size / num_per_128;
+
+ int16_array_with_stride_to_int32_array_without_stride(input, stride, output,
+ txfm_size);
+ /*col wise transform*/
+ txfm_func_col(out_128, buf_128, cos_bit_col, stage_range_col);
+ av1_round_shift_array_32_sse4_1(buf_128, out_128, txfm2d_size_128, -shift[1]);
+ transpose_32(txfm_size, out_128, buf_128);
+
+ /*row wise transform*/
+ for (int col = 0; col < (col_num >> 1); col++) {
+ av1_fdct64_sse4_1((buf_128 + col), (out_128 + col), cos_bit_row, col_num,
+ (col_num >> 1));
+ }
+
+ txfm2d_size_128 = (col_num >> 1) * (txfm_size >> 1);
+ av1_round_shift_array_32_sse4_1(out_128, out_128, txfm2d_size_128, -shift[2]);
+}
+
+void av1_fwd_txfm2d_32x32_sse4_1(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ DECLARE_ALIGNED(16, int32_t, txfm_buf[1024]);
+ TXFM_2D_FLIP_CFG cfg;
+ av1_get_fwd_txfm_cfg(tx_type, TX_32X32, &cfg);
+ (void)bd;
+ fwd_txfm2d_sse4_1(input, output, stride, &cfg, txfm_buf);
+}
+
+void av1_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ DECLARE_ALIGNED(16, int32_t, txfm_buf[4096]);
+ TXFM_2D_FLIP_CFG cfg;
+ av1_get_fwd_txfm_cfg(tx_type, TX_64X64, &cfg);
+ (void)bd;
+ fwd_txfm2d_64x64_sse4_1(input, output, stride, &cfg, txfm_buf);
+}
+
+static void lowbd_fwd_txfm2d_64x64_sse4_1(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_64X64;
+ __m128i buf0[64], buf1[512];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_sse2 col_txfm = av1_fdct8x64_new_sse2;
+ const int width_div8 = (width >> 3);
+ const int height_div8 = (height >> 3);
+
+ for (int i = 0; i < width_div8; i++) {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ for (int j = 0; j < AOMMIN(4, height_div8); ++j) {
+ transpose_16bit_8x8(buf0 + j * 8, buf1 + j * width + 8 * i);
+ }
+ }
+ for (int i = 0; i < AOMMIN(4, height_div8); i++) {
+ __m128i bufA[64];
+ __m128i bufB[64];
+ __m128i *buf = buf1 + width * i;
+ for (int j = 0; j < width; ++j) {
+ bufA[j] = _mm_cvtepi16_epi32(buf[j]);
+ bufB[j] = _mm_cvtepi16_epi32(_mm_unpackhi_epi64(buf[j], buf[j]));
+ }
+ av1_fdct64_sse4_1(bufA, bufA, cos_bit_row, 1, 1);
+ av1_fdct64_sse4_1(bufB, bufB, cos_bit_row, 1, 1);
+ av1_round_shift_array_32_sse4_1(bufA, bufA, 32, -shift[2]);
+ av1_round_shift_array_32_sse4_1(bufB, bufB, 32, -shift[2]);
+
+ store_output_32bit_w8(output + i * 8, bufA, bufB, 32, 32);
+ }
+}
+
+static void lowbd_fwd_txfm2d_64x32_sse4_1(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ const TX_SIZE tx_size = TX_64X32;
+ __m128i buf0[64], buf1[256];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_sse2 col_txfm = col_txfm8x32_arr[tx_type];
+ const int width_div8 = (width >> 3);
+ const int height_div8 = (height >> 3);
+
+ for (int i = 0; i < width_div8; i++) {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ for (int j = 0; j < AOMMIN(4, height_div8); ++j) {
+ transpose_16bit_8x8(buf0 + j * 8, buf1 + j * width + 8 * i);
+ }
+ }
+ assert(tx_type == DCT_DCT);
+ for (int i = 0; i < AOMMIN(4, height_div8); i++) {
+ __m128i bufA[64];
+ __m128i bufB[64];
+ __m128i *buf = buf1 + width * i;
+ for (int j = 0; j < width; ++j) {
+ bufA[j] = _mm_cvtepi16_epi32(buf[j]);
+ bufB[j] = _mm_cvtepi16_epi32(_mm_unpackhi_epi64(buf[j], buf[j]));
+ }
+ av1_fdct64_sse4_1(bufA, bufA, cos_bit_row, 1, 1);
+ av1_fdct64_sse4_1(bufB, bufB, cos_bit_row, 1, 1);
+ av1_round_shift_rect_array_32_sse4_1(bufA, bufA, 32, -shift[2], NewSqrt2);
+ av1_round_shift_rect_array_32_sse4_1(bufB, bufB, 32, -shift[2], NewSqrt2);
+
+ store_output_32bit_w8(output + i * 8, bufA, bufB, 32, 32);
+ }
+}
+
+static void lowbd_fwd_txfm2d_32x64_sse4_1(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_32X64;
+ __m128i buf0[64], buf1[256];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_sse2 col_txfm = av1_fdct8x64_new_sse2;
+ const int width_div8 = (width >> 3);
+ const int height_div8 = (height >> 3);
+
+ for (int i = 0; i < width_div8; i++) {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ for (int j = 0; j < AOMMIN(4, height_div8); ++j) {
+ transpose_16bit_8x8(buf0 + j * 8, buf1 + j * width + 8 * i);
+ }
+ }
+
+ for (int i = 0; i < AOMMIN(4, height_div8); i++) {
+ __m128i bufA[32];
+ __m128i bufB[32];
+ __m128i *buf = buf1 + width * i;
+ for (int j = 0; j < width; ++j) {
+ bufA[j] = _mm_cvtepi16_epi32(buf[j]);
+ bufB[j] = _mm_cvtepi16_epi32(_mm_unpackhi_epi64(buf[j], buf[j]));
+ }
+ av1_fdct32_sse4_1(bufA, bufA, cos_bit_row, 1);
+ av1_fdct32_sse4_1(bufB, bufB, cos_bit_row, 1);
+ av1_round_shift_rect_array_32_sse4_1(bufA, bufA, 32, -shift[2], NewSqrt2);
+ av1_round_shift_rect_array_32_sse4_1(bufB, bufB, 32, -shift[2], NewSqrt2);
+
+ store_output_32bit_w8(output + i * 8, bufA, bufB, 32, 32);
+ }
+}
+
+static FwdTxfm2dFunc fwd_txfm2d_func_ls[TX_SIZES_ALL] = {
+ av1_lowbd_fwd_txfm2d_4x4_sse2, // 4x4 transform
+ av1_lowbd_fwd_txfm2d_8x8_sse2, // 8x8 transform
+ av1_lowbd_fwd_txfm2d_16x16_sse2, // 16x16 transform
+ av1_lowbd_fwd_txfm2d_32x32_sse2, // 32x32 transform
+ lowbd_fwd_txfm2d_64x64_sse4_1, // 64x64 transform
+ av1_lowbd_fwd_txfm2d_4x8_sse2, // 4x8 transform
+ av1_lowbd_fwd_txfm2d_8x4_sse2, // 8x4 transform
+ av1_lowbd_fwd_txfm2d_8x16_sse2, // 8x16 transform
+ av1_lowbd_fwd_txfm2d_16x8_sse2, // 16x8 transform
+ av1_lowbd_fwd_txfm2d_16x32_sse2, // 16x32 transform
+ av1_lowbd_fwd_txfm2d_32x16_sse2, // 32x16 transform
+ lowbd_fwd_txfm2d_32x64_sse4_1, // 32x64 transform
+ lowbd_fwd_txfm2d_64x32_sse4_1, // 64x32 transform
+ av1_lowbd_fwd_txfm2d_4x16_sse2, // 4x16 transform
+ av1_lowbd_fwd_txfm2d_16x4_sse2, // 16x4 transform
+ av1_lowbd_fwd_txfm2d_8x32_sse2, // 8x32 transform
+ av1_lowbd_fwd_txfm2d_32x8_sse2, // 32x8 transform
+ av1_lowbd_fwd_txfm2d_16x64_sse2, // 16x64 transform
+ av1_lowbd_fwd_txfm2d_64x16_sse2, // 64x16 transform
+};
+
+void av1_lowbd_fwd_txfm_sse4_1(const int16_t *src_diff, tran_low_t *coeff,
+ int diff_stride, TxfmParam *txfm_param) {
+ FwdTxfm2dFunc fwd_txfm2d_func = fwd_txfm2d_func_ls[txfm_param->tx_size];
+ if (txfm_param->lossless && txfm_param->tx_size == TX_4X4) {
+ av1_lowbd_fwd_txfm_c(src_diff, coeff, diff_stride, txfm_param);
+ } else {
+ fwd_txfm2d_func(src_diff, coeff, diff_stride, txfm_param->tx_type,
+ txfm_param->bd);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_avx2.h b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_avx2.h
new file mode 100644
index 0000000000..aaad76e5ae
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_avx2.h
@@ -0,0 +1,96 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AV1_ENCODER_X86_AV1_FWD_TXFM_AVX2_H_
+#define AOM_AV1_ENCODER_X86_AV1_FWD_TXFM_AVX2_H_
+#include <immintrin.h>
+
+// out0 = in0*w0 + in1*w1
+// out1 = -in1*w0 + in0*w1
+static INLINE void btf_32_avx2_type0(const int32_t w0, const int32_t w1,
+ __m256i *in0, __m256i *in1,
+ const __m256i _r, const int32_t cos_bit) {
+ __m256i _in0 = *in0;
+ __m256i _in1 = *in1;
+ const __m256i ww0 = _mm256_set1_epi32(w0);
+ const __m256i ww1 = _mm256_set1_epi32(w1);
+ const __m256i in0_w0 = _mm256_mullo_epi32(_in0, ww0);
+ const __m256i in1_w1 = _mm256_mullo_epi32(_in1, ww1);
+ __m256i temp0 = _mm256_add_epi32(in0_w0, in1_w1);
+ temp0 = _mm256_add_epi32(temp0, _r);
+ *in0 = _mm256_srai_epi32(temp0, cos_bit);
+ const __m256i in0_w1 = _mm256_mullo_epi32(_in0, ww1);
+ const __m256i in1_w0 = _mm256_mullo_epi32(_in1, ww0);
+ __m256i temp1 = _mm256_sub_epi32(in0_w1, in1_w0);
+ temp1 = _mm256_add_epi32(temp1, _r);
+ *in1 = _mm256_srai_epi32(temp1, cos_bit);
+}
+
+static INLINE void btf_32_avx2_type1(const int32_t w0, const int32_t w1,
+ __m256i *in0, __m256i *in1,
+ const __m256i _r, const int32_t cos_bit) {
+ __m256i _in0 = *in0;
+ __m256i _in1 = *in1;
+ const __m256i ww0 = _mm256_set1_epi32(w0);
+ const __m256i ww1 = _mm256_set1_epi32(w1);
+ const __m256i in0_w0 = _mm256_mullo_epi32(_in0, ww0);
+ const __m256i in1_w1 = _mm256_mullo_epi32(_in1, ww1);
+ __m256i temp0 = _mm256_add_epi32(in0_w0, in1_w1);
+ temp0 = _mm256_add_epi32(temp0, _r);
+ *in0 = _mm256_srai_epi32(temp0, cos_bit);
+ const __m256i in0_w1 = _mm256_mullo_epi32(_in0, ww1);
+ const __m256i in1_w0 = _mm256_mullo_epi32(_in1, ww0);
+ __m256i temp1 = _mm256_sub_epi32(in1_w0, in0_w1);
+ temp1 = _mm256_add_epi32(temp1, _r);
+ *in1 = _mm256_srai_epi32(temp1, cos_bit);
+}
+
+// out0 = in0*w0 + in1*w1
+// out1 = -in1*w0 + in0*w1
+static INLINE void btf_32_avx2_type0_new(const __m256i ww0, const __m256i ww1,
+ __m256i *in0, __m256i *in1,
+ const __m256i _r,
+ const int32_t cos_bit) {
+ __m256i _in0 = *in0;
+ __m256i _in1 = *in1;
+ const __m256i in0_w0 = _mm256_mullo_epi32(_in0, ww0);
+ const __m256i in1_w1 = _mm256_mullo_epi32(_in1, ww1);
+ __m256i temp0 = _mm256_add_epi32(in0_w0, in1_w1);
+ temp0 = _mm256_add_epi32(temp0, _r);
+ *in0 = _mm256_srai_epi32(temp0, cos_bit);
+ const __m256i in0_w1 = _mm256_mullo_epi32(_in0, ww1);
+ const __m256i in1_w0 = _mm256_mullo_epi32(_in1, ww0);
+ __m256i temp1 = _mm256_sub_epi32(in0_w1, in1_w0);
+ temp1 = _mm256_add_epi32(temp1, _r);
+ *in1 = _mm256_srai_epi32(temp1, cos_bit);
+}
+
+// out0 = in0*w0 + in1*w1
+// out1 = in1*w0 - in0*w1
+static INLINE void btf_32_avx2_type1_new(const __m256i ww0, const __m256i ww1,
+ __m256i *in0, __m256i *in1,
+ const __m256i _r,
+ const int32_t cos_bit) {
+ __m256i _in0 = *in0;
+ __m256i _in1 = *in1;
+ const __m256i in0_w0 = _mm256_mullo_epi32(_in0, ww0);
+ const __m256i in1_w1 = _mm256_mullo_epi32(_in1, ww1);
+ __m256i temp0 = _mm256_add_epi32(in0_w0, in1_w1);
+ temp0 = _mm256_add_epi32(temp0, _r);
+ *in0 = _mm256_srai_epi32(temp0, cos_bit);
+ const __m256i in0_w1 = _mm256_mullo_epi32(_in0, ww1);
+ const __m256i in1_w0 = _mm256_mullo_epi32(_in1, ww0);
+ __m256i temp1 = _mm256_sub_epi32(in1_w0, in0_w1);
+ temp1 = _mm256_add_epi32(temp1, _r);
+ *in1 = _mm256_srai_epi32(temp1, cos_bit);
+}
+
+#endif // AOM_AV1_ENCODER_X86_AV1_FWD_TXFM_AVX2_H_
diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
new file mode 100644
index 0000000000..a4def754b0
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.c
@@ -0,0 +1,2673 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "av1/common/x86/av1_txfm_sse2.h"
+#include "av1/encoder/av1_fwd_txfm1d_cfg.h"
+#include "av1/encoder/x86/av1_fwd_txfm_sse2.h"
+
+// TODO(linfengz): refine fdct4x8 and fadst4x8 optimization (if possible).
+
+static void fdct4x4_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ const __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ const __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]);
+ const __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]);
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+ __m128i u[4], v[4];
+
+ u[0] = _mm_unpacklo_epi16(input[0], input[1]);
+ u[1] = _mm_unpacklo_epi16(input[3], input[2]);
+
+ v[0] = _mm_add_epi16(u[0], u[1]);
+ v[1] = _mm_sub_epi16(u[0], u[1]);
+
+ u[0] = _mm_madd_epi16(v[0], cospi_p32_p32); // 0
+ u[1] = _mm_madd_epi16(v[0], cospi_p32_m32); // 2
+ u[2] = _mm_madd_epi16(v[1], cospi_p16_p48); // 1
+ u[3] = _mm_madd_epi16(v[1], cospi_p48_m16); // 3
+
+ v[0] = _mm_add_epi32(u[0], __rounding);
+ v[1] = _mm_add_epi32(u[1], __rounding);
+ v[2] = _mm_add_epi32(u[2], __rounding);
+ v[3] = _mm_add_epi32(u[3], __rounding);
+ u[0] = _mm_srai_epi32(v[0], cos_bit);
+ u[1] = _mm_srai_epi32(v[1], cos_bit);
+ u[2] = _mm_srai_epi32(v[2], cos_bit);
+ u[3] = _mm_srai_epi32(v[3], cos_bit);
+
+ output[0] = _mm_packs_epi32(u[0], u[1]);
+ output[1] = _mm_packs_epi32(u[2], u[3]);
+ output[2] = _mm_srli_si128(output[0], 8);
+ output[3] = _mm_srli_si128(output[1], 8);
+}
+
+static void fdct8x4_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]);
+ __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]);
+
+ // stage 1
+ __m128i x1[4];
+ x1[0] = _mm_adds_epi16(input[0], input[3]);
+ x1[3] = _mm_subs_epi16(input[0], input[3]);
+ x1[1] = _mm_adds_epi16(input[1], input[2]);
+ x1[2] = _mm_subs_epi16(input[1], input[2]);
+
+ // stage 2
+ __m128i x2[4];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[0], x1[1], x2[0], x2[1]);
+ btf_16_sse2(cospi_p48_p16, cospi_m16_p48, x1[2], x1[3], x2[2], x2[3]);
+
+ // stage 3
+ output[0] = x2[0];
+ output[1] = x2[2];
+ output[2] = x2[1];
+ output[3] = x2[3];
+}
+
+static void fdct4x8_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ __m128i cospi_m32_p32 = pair_set_epi16(-cospi[32], cospi[32]);
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]);
+ __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]);
+ __m128i cospi_p56_p08 = pair_set_epi16(cospi[56], cospi[8]);
+ __m128i cospi_m08_p56 = pair_set_epi16(-cospi[8], cospi[56]);
+ __m128i cospi_p24_p40 = pair_set_epi16(cospi[24], cospi[40]);
+ __m128i cospi_m40_p24 = pair_set_epi16(-cospi[40], cospi[24]);
+
+ // stage 1
+ __m128i x1[8];
+ x1[0] = _mm_adds_epi16(input[0], input[7]);
+ x1[7] = _mm_subs_epi16(input[0], input[7]);
+ x1[1] = _mm_adds_epi16(input[1], input[6]);
+ x1[6] = _mm_subs_epi16(input[1], input[6]);
+ x1[2] = _mm_adds_epi16(input[2], input[5]);
+ x1[5] = _mm_subs_epi16(input[2], input[5]);
+ x1[3] = _mm_adds_epi16(input[3], input[4]);
+ x1[4] = _mm_subs_epi16(input[3], input[4]);
+
+ // stage 2
+ __m128i x2[8];
+ x2[0] = _mm_adds_epi16(x1[0], x1[3]);
+ x2[3] = _mm_subs_epi16(x1[0], x1[3]);
+ x2[1] = _mm_adds_epi16(x1[1], x1[2]);
+ x2[2] = _mm_subs_epi16(x1[1], x1[2]);
+ x2[4] = x1[4];
+ btf_16_w4_sse2(&cospi_m32_p32, &cospi_p32_p32, __rounding, cos_bit, &x1[5],
+ &x1[6], &x2[5], &x2[6]);
+ x2[7] = x1[7];
+
+ // stage 3
+ __m128i x3[8];
+ btf_16_w4_sse2(&cospi_p32_p32, &cospi_p32_m32, __rounding, cos_bit, &x2[0],
+ &x2[1], &x3[0], &x3[1]);
+ btf_16_w4_sse2(&cospi_p48_p16, &cospi_m16_p48, __rounding, cos_bit, &x2[2],
+ &x2[3], &x3[2], &x3[3]);
+ x3[4] = _mm_adds_epi16(x2[4], x2[5]);
+ x3[5] = _mm_subs_epi16(x2[4], x2[5]);
+ x3[6] = _mm_subs_epi16(x2[7], x2[6]);
+ x3[7] = _mm_adds_epi16(x2[7], x2[6]);
+
+ // stage 4
+ __m128i x4[8];
+ x4[0] = x3[0];
+ x4[1] = x3[1];
+ x4[2] = x3[2];
+ x4[3] = x3[3];
+ btf_16_w4_sse2(&cospi_p56_p08, &cospi_m08_p56, __rounding, cos_bit, &x3[4],
+ &x3[7], &x4[4], &x4[7]);
+ btf_16_w4_sse2(&cospi_p24_p40, &cospi_m40_p24, __rounding, cos_bit, &x3[5],
+ &x3[6], &x4[5], &x4[6]);
+
+ // stage 5
+ output[0] = x4[0];
+ output[1] = x4[4];
+ output[2] = x4[2];
+ output[3] = x4[6];
+ output[4] = x4[1];
+ output[5] = x4[5];
+ output[6] = x4[3];
+ output[7] = x4[7];
+}
+
+static void fdct8x16_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ __m128i cospi_m32_p32 = pair_set_epi16(-cospi[32], cospi[32]);
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]);
+ __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]);
+ __m128i cospi_m48_m16 = pair_set_epi16(-cospi[48], -cospi[16]);
+ __m128i cospi_p56_p08 = pair_set_epi16(cospi[56], cospi[8]);
+ __m128i cospi_m08_p56 = pair_set_epi16(-cospi[8], cospi[56]);
+ __m128i cospi_p24_p40 = pair_set_epi16(cospi[24], cospi[40]);
+ __m128i cospi_m40_p24 = pair_set_epi16(-cospi[40], cospi[24]);
+ __m128i cospi_p60_p04 = pair_set_epi16(cospi[60], cospi[4]);
+ __m128i cospi_m04_p60 = pair_set_epi16(-cospi[4], cospi[60]);
+ __m128i cospi_p28_p36 = pair_set_epi16(cospi[28], cospi[36]);
+ __m128i cospi_m36_p28 = pair_set_epi16(-cospi[36], cospi[28]);
+ __m128i cospi_p44_p20 = pair_set_epi16(cospi[44], cospi[20]);
+ __m128i cospi_m20_p44 = pair_set_epi16(-cospi[20], cospi[44]);
+ __m128i cospi_p12_p52 = pair_set_epi16(cospi[12], cospi[52]);
+ __m128i cospi_m52_p12 = pair_set_epi16(-cospi[52], cospi[12]);
+
+ // stage 1
+ __m128i x1[16];
+ x1[0] = _mm_adds_epi16(input[0], input[15]);
+ x1[15] = _mm_subs_epi16(input[0], input[15]);
+ x1[1] = _mm_adds_epi16(input[1], input[14]);
+ x1[14] = _mm_subs_epi16(input[1], input[14]);
+ x1[2] = _mm_adds_epi16(input[2], input[13]);
+ x1[13] = _mm_subs_epi16(input[2], input[13]);
+ x1[3] = _mm_adds_epi16(input[3], input[12]);
+ x1[12] = _mm_subs_epi16(input[3], input[12]);
+ x1[4] = _mm_adds_epi16(input[4], input[11]);
+ x1[11] = _mm_subs_epi16(input[4], input[11]);
+ x1[5] = _mm_adds_epi16(input[5], input[10]);
+ x1[10] = _mm_subs_epi16(input[5], input[10]);
+ x1[6] = _mm_adds_epi16(input[6], input[9]);
+ x1[9] = _mm_subs_epi16(input[6], input[9]);
+ x1[7] = _mm_adds_epi16(input[7], input[8]);
+ x1[8] = _mm_subs_epi16(input[7], input[8]);
+
+ // stage 2
+ __m128i x2[16];
+ x2[0] = _mm_adds_epi16(x1[0], x1[7]);
+ x2[7] = _mm_subs_epi16(x1[0], x1[7]);
+ x2[1] = _mm_adds_epi16(x1[1], x1[6]);
+ x2[6] = _mm_subs_epi16(x1[1], x1[6]);
+ x2[2] = _mm_adds_epi16(x1[2], x1[5]);
+ x2[5] = _mm_subs_epi16(x1[2], x1[5]);
+ x2[3] = _mm_adds_epi16(x1[3], x1[4]);
+ x2[4] = _mm_subs_epi16(x1[3], x1[4]);
+ x2[8] = x1[8];
+ x2[9] = x1[9];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[10], x1[13], x2[10], x2[13]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[11], x1[12], x2[11], x2[12]);
+ x2[14] = x1[14];
+ x2[15] = x1[15];
+
+ // stage 3
+ __m128i x3[16];
+ x3[0] = _mm_adds_epi16(x2[0], x2[3]);
+ x3[3] = _mm_subs_epi16(x2[0], x2[3]);
+ x3[1] = _mm_adds_epi16(x2[1], x2[2]);
+ x3[2] = _mm_subs_epi16(x2[1], x2[2]);
+ x3[4] = x2[4];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x2[5], x2[6], x3[5], x3[6]);
+ x3[7] = x2[7];
+ x3[8] = _mm_adds_epi16(x2[8], x2[11]);
+ x3[11] = _mm_subs_epi16(x2[8], x2[11]);
+ x3[9] = _mm_adds_epi16(x2[9], x2[10]);
+ x3[10] = _mm_subs_epi16(x2[9], x2[10]);
+ x3[12] = _mm_subs_epi16(x2[15], x2[12]);
+ x3[15] = _mm_adds_epi16(x2[15], x2[12]);
+ x3[13] = _mm_subs_epi16(x2[14], x2[13]);
+ x3[14] = _mm_adds_epi16(x2[14], x2[13]);
+
+ // stage 4
+ __m128i x4[16];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x3[0], x3[1], x4[0], x4[1]);
+ btf_16_sse2(cospi_p48_p16, cospi_m16_p48, x3[2], x3[3], x4[2], x4[3]);
+ x4[4] = _mm_adds_epi16(x3[4], x3[5]);
+ x4[5] = _mm_subs_epi16(x3[4], x3[5]);
+ x4[6] = _mm_subs_epi16(x3[7], x3[6]);
+ x4[7] = _mm_adds_epi16(x3[7], x3[6]);
+ x4[8] = x3[8];
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x3[9], x3[14], x4[9], x4[14]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x3[10], x3[13], x4[10], x4[13]);
+ x4[11] = x3[11];
+ x4[12] = x3[12];
+ x4[15] = x3[15];
+
+ // stage 5
+ __m128i x5[16];
+ x5[0] = x4[0];
+ x5[1] = x4[1];
+ x5[2] = x4[2];
+ x5[3] = x4[3];
+ btf_16_sse2(cospi_p56_p08, cospi_m08_p56, x4[4], x4[7], x5[4], x5[7]);
+ btf_16_sse2(cospi_p24_p40, cospi_m40_p24, x4[5], x4[6], x5[5], x5[6]);
+ x5[8] = _mm_adds_epi16(x4[8], x4[9]);
+ x5[9] = _mm_subs_epi16(x4[8], x4[9]);
+ x5[10] = _mm_subs_epi16(x4[11], x4[10]);
+ x5[11] = _mm_adds_epi16(x4[11], x4[10]);
+ x5[12] = _mm_adds_epi16(x4[12], x4[13]);
+ x5[13] = _mm_subs_epi16(x4[12], x4[13]);
+ x5[14] = _mm_subs_epi16(x4[15], x4[14]);
+ x5[15] = _mm_adds_epi16(x4[15], x4[14]);
+
+ // stage 6
+ __m128i x6[16];
+ x6[0] = x5[0];
+ x6[1] = x5[1];
+ x6[2] = x5[2];
+ x6[3] = x5[3];
+ x6[4] = x5[4];
+ x6[5] = x5[5];
+ x6[6] = x5[6];
+ x6[7] = x5[7];
+ btf_16_sse2(cospi_p60_p04, cospi_m04_p60, x5[8], x5[15], x6[8], x6[15]);
+ btf_16_sse2(cospi_p28_p36, cospi_m36_p28, x5[9], x5[14], x6[9], x6[14]);
+ btf_16_sse2(cospi_p44_p20, cospi_m20_p44, x5[10], x5[13], x6[10], x6[13]);
+ btf_16_sse2(cospi_p12_p52, cospi_m52_p12, x5[11], x5[12], x6[11], x6[12]);
+
+ // stage 7
+ output[0] = x6[0];
+ output[1] = x6[8];
+ output[2] = x6[4];
+ output[3] = x6[12];
+ output[4] = x6[2];
+ output[5] = x6[10];
+ output[6] = x6[6];
+ output[7] = x6[14];
+ output[8] = x6[1];
+ output[9] = x6[9];
+ output[10] = x6[5];
+ output[11] = x6[13];
+ output[12] = x6[3];
+ output[13] = x6[11];
+ output[14] = x6[7];
+ output[15] = x6[15];
+}
+
+void av1_fdct8x32_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ __m128i cospi_m32_p32 = pair_set_epi16(-cospi[32], cospi[32]);
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]);
+ __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]);
+ __m128i cospi_m48_m16 = pair_set_epi16(-cospi[48], -cospi[16]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_p56_p08 = pair_set_epi16(cospi[56], cospi[8]);
+ __m128i cospi_m08_p56 = pair_set_epi16(-cospi[8], cospi[56]);
+ __m128i cospi_p24_p40 = pair_set_epi16(cospi[24], cospi[40]);
+ __m128i cospi_m40_p24 = pair_set_epi16(-cospi[40], cospi[24]);
+ __m128i cospi_m56_m08 = pair_set_epi16(-cospi[56], -cospi[8]);
+ __m128i cospi_m24_m40 = pair_set_epi16(-cospi[24], -cospi[40]);
+ __m128i cospi_p60_p04 = pair_set_epi16(cospi[60], cospi[4]);
+ __m128i cospi_m04_p60 = pair_set_epi16(-cospi[4], cospi[60]);
+ __m128i cospi_p28_p36 = pair_set_epi16(cospi[28], cospi[36]);
+ __m128i cospi_m36_p28 = pair_set_epi16(-cospi[36], cospi[28]);
+ __m128i cospi_p44_p20 = pair_set_epi16(cospi[44], cospi[20]);
+ __m128i cospi_m20_p44 = pair_set_epi16(-cospi[20], cospi[44]);
+ __m128i cospi_p12_p52 = pair_set_epi16(cospi[12], cospi[52]);
+ __m128i cospi_m52_p12 = pair_set_epi16(-cospi[52], cospi[12]);
+ __m128i cospi_p62_p02 = pair_set_epi16(cospi[62], cospi[2]);
+ __m128i cospi_m02_p62 = pair_set_epi16(-cospi[2], cospi[62]);
+ __m128i cospi_p30_p34 = pair_set_epi16(cospi[30], cospi[34]);
+ __m128i cospi_m34_p30 = pair_set_epi16(-cospi[34], cospi[30]);
+ __m128i cospi_p46_p18 = pair_set_epi16(cospi[46], cospi[18]);
+ __m128i cospi_m18_p46 = pair_set_epi16(-cospi[18], cospi[46]);
+ __m128i cospi_p14_p50 = pair_set_epi16(cospi[14], cospi[50]);
+ __m128i cospi_m50_p14 = pair_set_epi16(-cospi[50], cospi[14]);
+ __m128i cospi_p54_p10 = pair_set_epi16(cospi[54], cospi[10]);
+ __m128i cospi_m10_p54 = pair_set_epi16(-cospi[10], cospi[54]);
+ __m128i cospi_p22_p42 = pair_set_epi16(cospi[22], cospi[42]);
+ __m128i cospi_m42_p22 = pair_set_epi16(-cospi[42], cospi[22]);
+ __m128i cospi_p38_p26 = pair_set_epi16(cospi[38], cospi[26]);
+ __m128i cospi_m26_p38 = pair_set_epi16(-cospi[26], cospi[38]);
+ __m128i cospi_p06_p58 = pair_set_epi16(cospi[6], cospi[58]);
+ __m128i cospi_m58_p06 = pair_set_epi16(-cospi[58], cospi[6]);
+
+ // stage 1
+ __m128i x1[32];
+ x1[0] = _mm_adds_epi16(input[0], input[31]);
+ x1[31] = _mm_subs_epi16(input[0], input[31]);
+ x1[1] = _mm_adds_epi16(input[1], input[30]);
+ x1[30] = _mm_subs_epi16(input[1], input[30]);
+ x1[2] = _mm_adds_epi16(input[2], input[29]);
+ x1[29] = _mm_subs_epi16(input[2], input[29]);
+ x1[3] = _mm_adds_epi16(input[3], input[28]);
+ x1[28] = _mm_subs_epi16(input[3], input[28]);
+ x1[4] = _mm_adds_epi16(input[4], input[27]);
+ x1[27] = _mm_subs_epi16(input[4], input[27]);
+ x1[5] = _mm_adds_epi16(input[5], input[26]);
+ x1[26] = _mm_subs_epi16(input[5], input[26]);
+ x1[6] = _mm_adds_epi16(input[6], input[25]);
+ x1[25] = _mm_subs_epi16(input[6], input[25]);
+ x1[7] = _mm_adds_epi16(input[7], input[24]);
+ x1[24] = _mm_subs_epi16(input[7], input[24]);
+ x1[8] = _mm_adds_epi16(input[8], input[23]);
+ x1[23] = _mm_subs_epi16(input[8], input[23]);
+ x1[9] = _mm_adds_epi16(input[9], input[22]);
+ x1[22] = _mm_subs_epi16(input[9], input[22]);
+ x1[10] = _mm_adds_epi16(input[10], input[21]);
+ x1[21] = _mm_subs_epi16(input[10], input[21]);
+ x1[11] = _mm_adds_epi16(input[11], input[20]);
+ x1[20] = _mm_subs_epi16(input[11], input[20]);
+ x1[12] = _mm_adds_epi16(input[12], input[19]);
+ x1[19] = _mm_subs_epi16(input[12], input[19]);
+ x1[13] = _mm_adds_epi16(input[13], input[18]);
+ x1[18] = _mm_subs_epi16(input[13], input[18]);
+ x1[14] = _mm_adds_epi16(input[14], input[17]);
+ x1[17] = _mm_subs_epi16(input[14], input[17]);
+ x1[15] = _mm_adds_epi16(input[15], input[16]);
+ x1[16] = _mm_subs_epi16(input[15], input[16]);
+
+ // stage 2
+ __m128i x2[32];
+ x2[0] = _mm_adds_epi16(x1[0], x1[15]);
+ x2[15] = _mm_subs_epi16(x1[0], x1[15]);
+ x2[1] = _mm_adds_epi16(x1[1], x1[14]);
+ x2[14] = _mm_subs_epi16(x1[1], x1[14]);
+ x2[2] = _mm_adds_epi16(x1[2], x1[13]);
+ x2[13] = _mm_subs_epi16(x1[2], x1[13]);
+ x2[3] = _mm_adds_epi16(x1[3], x1[12]);
+ x2[12] = _mm_subs_epi16(x1[3], x1[12]);
+ x2[4] = _mm_adds_epi16(x1[4], x1[11]);
+ x2[11] = _mm_subs_epi16(x1[4], x1[11]);
+ x2[5] = _mm_adds_epi16(x1[5], x1[10]);
+ x2[10] = _mm_subs_epi16(x1[5], x1[10]);
+ x2[6] = _mm_adds_epi16(x1[6], x1[9]);
+ x2[9] = _mm_subs_epi16(x1[6], x1[9]);
+ x2[7] = _mm_adds_epi16(x1[7], x1[8]);
+ x2[8] = _mm_subs_epi16(x1[7], x1[8]);
+ x2[16] = x1[16];
+ x2[17] = x1[17];
+ x2[18] = x1[18];
+ x2[19] = x1[19];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[20], x1[27], x2[20], x2[27]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[21], x1[26], x2[21], x2[26]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[22], x1[25], x2[22], x2[25]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[23], x1[24], x2[23], x2[24]);
+ x2[28] = x1[28];
+ x2[29] = x1[29];
+ x2[30] = x1[30];
+ x2[31] = x1[31];
+
+ // stage 3
+ __m128i x3[32];
+ x3[0] = _mm_adds_epi16(x2[0], x2[7]);
+ x3[7] = _mm_subs_epi16(x2[0], x2[7]);
+ x3[1] = _mm_adds_epi16(x2[1], x2[6]);
+ x3[6] = _mm_subs_epi16(x2[1], x2[6]);
+ x3[2] = _mm_adds_epi16(x2[2], x2[5]);
+ x3[5] = _mm_subs_epi16(x2[2], x2[5]);
+ x3[3] = _mm_adds_epi16(x2[3], x2[4]);
+ x3[4] = _mm_subs_epi16(x2[3], x2[4]);
+ x3[8] = x2[8];
+ x3[9] = x2[9];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x2[10], x2[13], x3[10], x3[13]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x2[11], x2[12], x3[11], x3[12]);
+ x3[14] = x2[14];
+ x3[15] = x2[15];
+ x3[16] = _mm_adds_epi16(x2[16], x2[23]);
+ x3[23] = _mm_subs_epi16(x2[16], x2[23]);
+ x3[17] = _mm_adds_epi16(x2[17], x2[22]);
+ x3[22] = _mm_subs_epi16(x2[17], x2[22]);
+ x3[18] = _mm_adds_epi16(x2[18], x2[21]);
+ x3[21] = _mm_subs_epi16(x2[18], x2[21]);
+ x3[19] = _mm_adds_epi16(x2[19], x2[20]);
+ x3[20] = _mm_subs_epi16(x2[19], x2[20]);
+ x3[24] = _mm_subs_epi16(x2[31], x2[24]);
+ x3[31] = _mm_adds_epi16(x2[31], x2[24]);
+ x3[25] = _mm_subs_epi16(x2[30], x2[25]);
+ x3[30] = _mm_adds_epi16(x2[30], x2[25]);
+ x3[26] = _mm_subs_epi16(x2[29], x2[26]);
+ x3[29] = _mm_adds_epi16(x2[29], x2[26]);
+ x3[27] = _mm_subs_epi16(x2[28], x2[27]);
+ x3[28] = _mm_adds_epi16(x2[28], x2[27]);
+
+ // stage 4
+ __m128i x4[32];
+ x4[0] = _mm_adds_epi16(x3[0], x3[3]);
+ x4[3] = _mm_subs_epi16(x3[0], x3[3]);
+ x4[1] = _mm_adds_epi16(x3[1], x3[2]);
+ x4[2] = _mm_subs_epi16(x3[1], x3[2]);
+ x4[4] = x3[4];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x3[5], x3[6], x4[5], x4[6]);
+ x4[7] = x3[7];
+ x4[8] = _mm_adds_epi16(x3[8], x3[11]);
+ x4[11] = _mm_subs_epi16(x3[8], x3[11]);
+ x4[9] = _mm_adds_epi16(x3[9], x3[10]);
+ x4[10] = _mm_subs_epi16(x3[9], x3[10]);
+ x4[12] = _mm_subs_epi16(x3[15], x3[12]);
+ x4[15] = _mm_adds_epi16(x3[15], x3[12]);
+ x4[13] = _mm_subs_epi16(x3[14], x3[13]);
+ x4[14] = _mm_adds_epi16(x3[14], x3[13]);
+ x4[16] = x3[16];
+ x4[17] = x3[17];
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x3[18], x3[29], x4[18], x4[29]);
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x3[19], x3[28], x4[19], x4[28]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x3[20], x3[27], x4[20], x4[27]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x3[21], x3[26], x4[21], x4[26]);
+ x4[22] = x3[22];
+ x4[23] = x3[23];
+ x4[24] = x3[24];
+ x4[25] = x3[25];
+ x4[30] = x3[30];
+ x4[31] = x3[31];
+
+ // stage 5
+ __m128i x5[32];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x4[0], x4[1], x5[0], x5[1]);
+ btf_16_sse2(cospi_p48_p16, cospi_m16_p48, x4[2], x4[3], x5[2], x5[3]);
+ x5[4] = _mm_adds_epi16(x4[4], x4[5]);
+ x5[5] = _mm_subs_epi16(x4[4], x4[5]);
+ x5[6] = _mm_subs_epi16(x4[7], x4[6]);
+ x5[7] = _mm_adds_epi16(x4[7], x4[6]);
+ x5[8] = x4[8];
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x4[9], x4[14], x5[9], x5[14]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x4[10], x4[13], x5[10], x5[13]);
+ x5[11] = x4[11];
+ x5[12] = x4[12];
+ x5[15] = x4[15];
+ x5[16] = _mm_adds_epi16(x4[16], x4[19]);
+ x5[19] = _mm_subs_epi16(x4[16], x4[19]);
+ x5[17] = _mm_adds_epi16(x4[17], x4[18]);
+ x5[18] = _mm_subs_epi16(x4[17], x4[18]);
+ x5[20] = _mm_subs_epi16(x4[23], x4[20]);
+ x5[23] = _mm_adds_epi16(x4[23], x4[20]);
+ x5[21] = _mm_subs_epi16(x4[22], x4[21]);
+ x5[22] = _mm_adds_epi16(x4[22], x4[21]);
+ x5[24] = _mm_adds_epi16(x4[24], x4[27]);
+ x5[27] = _mm_subs_epi16(x4[24], x4[27]);
+ x5[25] = _mm_adds_epi16(x4[25], x4[26]);
+ x5[26] = _mm_subs_epi16(x4[25], x4[26]);
+ x5[28] = _mm_subs_epi16(x4[31], x4[28]);
+ x5[31] = _mm_adds_epi16(x4[31], x4[28]);
+ x5[29] = _mm_subs_epi16(x4[30], x4[29]);
+ x5[30] = _mm_adds_epi16(x4[30], x4[29]);
+
+ // stage 6
+ __m128i x6[32];
+ x6[0] = x5[0];
+ x6[1] = x5[1];
+ x6[2] = x5[2];
+ x6[3] = x5[3];
+ btf_16_sse2(cospi_p56_p08, cospi_m08_p56, x5[4], x5[7], x6[4], x6[7]);
+ btf_16_sse2(cospi_p24_p40, cospi_m40_p24, x5[5], x5[6], x6[5], x6[6]);
+ x6[8] = _mm_adds_epi16(x5[8], x5[9]);
+ x6[9] = _mm_subs_epi16(x5[8], x5[9]);
+ x6[10] = _mm_subs_epi16(x5[11], x5[10]);
+ x6[11] = _mm_adds_epi16(x5[11], x5[10]);
+ x6[12] = _mm_adds_epi16(x5[12], x5[13]);
+ x6[13] = _mm_subs_epi16(x5[12], x5[13]);
+ x6[14] = _mm_subs_epi16(x5[15], x5[14]);
+ x6[15] = _mm_adds_epi16(x5[15], x5[14]);
+ x6[16] = x5[16];
+ btf_16_sse2(cospi_m08_p56, cospi_p56_p08, x5[17], x5[30], x6[17], x6[30]);
+ btf_16_sse2(cospi_m56_m08, cospi_m08_p56, x5[18], x5[29], x6[18], x6[29]);
+ x6[19] = x5[19];
+ x6[20] = x5[20];
+ btf_16_sse2(cospi_m40_p24, cospi_p24_p40, x5[21], x5[26], x6[21], x6[26]);
+ btf_16_sse2(cospi_m24_m40, cospi_m40_p24, x5[22], x5[25], x6[22], x6[25]);
+ x6[23] = x5[23];
+ x6[24] = x5[24];
+ x6[27] = x5[27];
+ x6[28] = x5[28];
+ x6[31] = x5[31];
+
+ // stage 7
+ __m128i x7[32];
+ x7[0] = x6[0];
+ x7[1] = x6[1];
+ x7[2] = x6[2];
+ x7[3] = x6[3];
+ x7[4] = x6[4];
+ x7[5] = x6[5];
+ x7[6] = x6[6];
+ x7[7] = x6[7];
+ btf_16_sse2(cospi_p60_p04, cospi_m04_p60, x6[8], x6[15], x7[8], x7[15]);
+ btf_16_sse2(cospi_p28_p36, cospi_m36_p28, x6[9], x6[14], x7[9], x7[14]);
+ btf_16_sse2(cospi_p44_p20, cospi_m20_p44, x6[10], x6[13], x7[10], x7[13]);
+ btf_16_sse2(cospi_p12_p52, cospi_m52_p12, x6[11], x6[12], x7[11], x7[12]);
+ x7[16] = _mm_adds_epi16(x6[16], x6[17]);
+ x7[17] = _mm_subs_epi16(x6[16], x6[17]);
+ x7[18] = _mm_subs_epi16(x6[19], x6[18]);
+ x7[19] = _mm_adds_epi16(x6[19], x6[18]);
+ x7[20] = _mm_adds_epi16(x6[20], x6[21]);
+ x7[21] = _mm_subs_epi16(x6[20], x6[21]);
+ x7[22] = _mm_subs_epi16(x6[23], x6[22]);
+ x7[23] = _mm_adds_epi16(x6[23], x6[22]);
+ x7[24] = _mm_adds_epi16(x6[24], x6[25]);
+ x7[25] = _mm_subs_epi16(x6[24], x6[25]);
+ x7[26] = _mm_subs_epi16(x6[27], x6[26]);
+ x7[27] = _mm_adds_epi16(x6[27], x6[26]);
+ x7[28] = _mm_adds_epi16(x6[28], x6[29]);
+ x7[29] = _mm_subs_epi16(x6[28], x6[29]);
+ x7[30] = _mm_subs_epi16(x6[31], x6[30]);
+ x7[31] = _mm_adds_epi16(x6[31], x6[30]);
+
+ // stage 8
+ __m128i x8[32];
+ x8[0] = x7[0];
+ x8[1] = x7[1];
+ x8[2] = x7[2];
+ x8[3] = x7[3];
+ x8[4] = x7[4];
+ x8[5] = x7[5];
+ x8[6] = x7[6];
+ x8[7] = x7[7];
+ x8[8] = x7[8];
+ x8[9] = x7[9];
+ x8[10] = x7[10];
+ x8[11] = x7[11];
+ x8[12] = x7[12];
+ x8[13] = x7[13];
+ x8[14] = x7[14];
+ x8[15] = x7[15];
+ btf_16_sse2(cospi_p62_p02, cospi_m02_p62, x7[16], x7[31], x8[16], x8[31]);
+ btf_16_sse2(cospi_p30_p34, cospi_m34_p30, x7[17], x7[30], x8[17], x8[30]);
+ btf_16_sse2(cospi_p46_p18, cospi_m18_p46, x7[18], x7[29], x8[18], x8[29]);
+ btf_16_sse2(cospi_p14_p50, cospi_m50_p14, x7[19], x7[28], x8[19], x8[28]);
+ btf_16_sse2(cospi_p54_p10, cospi_m10_p54, x7[20], x7[27], x8[20], x8[27]);
+ btf_16_sse2(cospi_p22_p42, cospi_m42_p22, x7[21], x7[26], x8[21], x8[26]);
+ btf_16_sse2(cospi_p38_p26, cospi_m26_p38, x7[22], x7[25], x8[22], x8[25]);
+ btf_16_sse2(cospi_p06_p58, cospi_m58_p06, x7[23], x7[24], x8[23], x8[24]);
+
+ // stage 9
+ output[0] = x8[0];
+ output[1] = x8[16];
+ output[2] = x8[8];
+ output[3] = x8[24];
+ output[4] = x8[4];
+ output[5] = x8[20];
+ output[6] = x8[12];
+ output[7] = x8[28];
+ output[8] = x8[2];
+ output[9] = x8[18];
+ output[10] = x8[10];
+ output[11] = x8[26];
+ output[12] = x8[6];
+ output[13] = x8[22];
+ output[14] = x8[14];
+ output[15] = x8[30];
+ output[16] = x8[1];
+ output[17] = x8[17];
+ output[18] = x8[9];
+ output[19] = x8[25];
+ output[20] = x8[5];
+ output[21] = x8[21];
+ output[22] = x8[13];
+ output[23] = x8[29];
+ output[24] = x8[3];
+ output[25] = x8[19];
+ output[26] = x8[11];
+ output[27] = x8[27];
+ output[28] = x8[7];
+ output[29] = x8[23];
+ output[30] = x8[15];
+ output[31] = x8[31];
+}
+
+void av1_fdct8x64_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ __m128i cospi_m32_p32 = pair_set_epi16(-cospi[32], cospi[32]);
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]);
+ __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]);
+ __m128i cospi_m48_m16 = pair_set_epi16(-cospi[48], -cospi[16]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_m08_p56 = pair_set_epi16(-cospi[8], cospi[56]);
+ __m128i cospi_p56_p08 = pair_set_epi16(cospi[56], cospi[8]);
+ __m128i cospi_m56_m08 = pair_set_epi16(-cospi[56], -cospi[8]);
+ __m128i cospi_m40_p24 = pair_set_epi16(-cospi[40], cospi[24]);
+ __m128i cospi_p24_p40 = pair_set_epi16(cospi[24], cospi[40]);
+ __m128i cospi_m24_m40 = pair_set_epi16(-cospi[24], -cospi[40]);
+ __m128i cospi_p60_p04 = pair_set_epi16(cospi[60], cospi[4]);
+ __m128i cospi_m04_p60 = pair_set_epi16(-cospi[4], cospi[60]);
+ __m128i cospi_p28_p36 = pair_set_epi16(cospi[28], cospi[36]);
+ __m128i cospi_m36_p28 = pair_set_epi16(-cospi[36], cospi[28]);
+ __m128i cospi_p44_p20 = pair_set_epi16(cospi[44], cospi[20]);
+ __m128i cospi_m20_p44 = pair_set_epi16(-cospi[20], cospi[44]);
+ __m128i cospi_p12_p52 = pair_set_epi16(cospi[12], cospi[52]);
+ __m128i cospi_m52_p12 = pair_set_epi16(-cospi[52], cospi[12]);
+ __m128i cospi_m60_m04 = pair_set_epi16(-cospi[60], -cospi[4]);
+ __m128i cospi_m28_m36 = pair_set_epi16(-cospi[28], -cospi[36]);
+ __m128i cospi_m44_m20 = pair_set_epi16(-cospi[44], -cospi[20]);
+ __m128i cospi_m12_m52 = pair_set_epi16(-cospi[12], -cospi[52]);
+ __m128i cospi_p62_p02 = pair_set_epi16(cospi[62], cospi[2]);
+ __m128i cospi_m02_p62 = pair_set_epi16(-cospi[2], cospi[62]);
+ __m128i cospi_p30_p34 = pair_set_epi16(cospi[30], cospi[34]);
+ __m128i cospi_m34_p30 = pair_set_epi16(-cospi[34], cospi[30]);
+ __m128i cospi_p46_p18 = pair_set_epi16(cospi[46], cospi[18]);
+ __m128i cospi_m18_p46 = pair_set_epi16(-cospi[18], cospi[46]);
+ __m128i cospi_p14_p50 = pair_set_epi16(cospi[14], cospi[50]);
+ __m128i cospi_m50_p14 = pair_set_epi16(-cospi[50], cospi[14]);
+ __m128i cospi_p54_p10 = pair_set_epi16(cospi[54], cospi[10]);
+ __m128i cospi_m10_p54 = pair_set_epi16(-cospi[10], cospi[54]);
+ __m128i cospi_p22_p42 = pair_set_epi16(cospi[22], cospi[42]);
+ __m128i cospi_m42_p22 = pair_set_epi16(-cospi[42], cospi[22]);
+ __m128i cospi_p38_p26 = pair_set_epi16(cospi[38], cospi[26]);
+ __m128i cospi_m26_p38 = pair_set_epi16(-cospi[26], cospi[38]);
+ __m128i cospi_p06_p58 = pair_set_epi16(cospi[6], cospi[58]);
+ __m128i cospi_m58_p06 = pair_set_epi16(-cospi[58], cospi[6]);
+ __m128i cospi_p63_p01 = pair_set_epi16(cospi[63], cospi[1]);
+ __m128i cospi_m01_p63 = pair_set_epi16(-cospi[1], cospi[63]);
+ __m128i cospi_p31_p33 = pair_set_epi16(cospi[31], cospi[33]);
+ __m128i cospi_m33_p31 = pair_set_epi16(-cospi[33], cospi[31]);
+ __m128i cospi_p47_p17 = pair_set_epi16(cospi[47], cospi[17]);
+ __m128i cospi_m17_p47 = pair_set_epi16(-cospi[17], cospi[47]);
+ __m128i cospi_p15_p49 = pair_set_epi16(cospi[15], cospi[49]);
+ __m128i cospi_m49_p15 = pair_set_epi16(-cospi[49], cospi[15]);
+ __m128i cospi_p55_p09 = pair_set_epi16(cospi[55], cospi[9]);
+ __m128i cospi_m09_p55 = pair_set_epi16(-cospi[9], cospi[55]);
+ __m128i cospi_p23_p41 = pair_set_epi16(cospi[23], cospi[41]);
+ __m128i cospi_m41_p23 = pair_set_epi16(-cospi[41], cospi[23]);
+ __m128i cospi_p39_p25 = pair_set_epi16(cospi[39], cospi[25]);
+ __m128i cospi_m25_p39 = pair_set_epi16(-cospi[25], cospi[39]);
+ __m128i cospi_p07_p57 = pair_set_epi16(cospi[7], cospi[57]);
+ __m128i cospi_m57_p07 = pair_set_epi16(-cospi[57], cospi[7]);
+ __m128i cospi_p59_p05 = pair_set_epi16(cospi[59], cospi[5]);
+ __m128i cospi_m05_p59 = pair_set_epi16(-cospi[5], cospi[59]);
+ __m128i cospi_p27_p37 = pair_set_epi16(cospi[27], cospi[37]);
+ __m128i cospi_m37_p27 = pair_set_epi16(-cospi[37], cospi[27]);
+ __m128i cospi_p43_p21 = pair_set_epi16(cospi[43], cospi[21]);
+ __m128i cospi_m21_p43 = pair_set_epi16(-cospi[21], cospi[43]);
+ __m128i cospi_p11_p53 = pair_set_epi16(cospi[11], cospi[53]);
+ __m128i cospi_m53_p11 = pair_set_epi16(-cospi[53], cospi[11]);
+ __m128i cospi_p51_p13 = pair_set_epi16(cospi[51], cospi[13]);
+ __m128i cospi_m13_p51 = pair_set_epi16(-cospi[13], cospi[51]);
+ __m128i cospi_p19_p45 = pair_set_epi16(cospi[19], cospi[45]);
+ __m128i cospi_m45_p19 = pair_set_epi16(-cospi[45], cospi[19]);
+ __m128i cospi_p35_p29 = pair_set_epi16(cospi[35], cospi[29]);
+ __m128i cospi_m29_p35 = pair_set_epi16(-cospi[29], cospi[35]);
+ __m128i cospi_p03_p61 = pair_set_epi16(cospi[3], cospi[61]);
+ __m128i cospi_m61_p03 = pair_set_epi16(-cospi[61], cospi[3]);
+
+ // stage 1
+ __m128i x1[64];
+ x1[0] = _mm_adds_epi16(input[0], input[63]);
+ x1[63] = _mm_subs_epi16(input[0], input[63]);
+ x1[1] = _mm_adds_epi16(input[1], input[62]);
+ x1[62] = _mm_subs_epi16(input[1], input[62]);
+ x1[2] = _mm_adds_epi16(input[2], input[61]);
+ x1[61] = _mm_subs_epi16(input[2], input[61]);
+ x1[3] = _mm_adds_epi16(input[3], input[60]);
+ x1[60] = _mm_subs_epi16(input[3], input[60]);
+ x1[4] = _mm_adds_epi16(input[4], input[59]);
+ x1[59] = _mm_subs_epi16(input[4], input[59]);
+ x1[5] = _mm_adds_epi16(input[5], input[58]);
+ x1[58] = _mm_subs_epi16(input[5], input[58]);
+ x1[6] = _mm_adds_epi16(input[6], input[57]);
+ x1[57] = _mm_subs_epi16(input[6], input[57]);
+ x1[7] = _mm_adds_epi16(input[7], input[56]);
+ x1[56] = _mm_subs_epi16(input[7], input[56]);
+ x1[8] = _mm_adds_epi16(input[8], input[55]);
+ x1[55] = _mm_subs_epi16(input[8], input[55]);
+ x1[9] = _mm_adds_epi16(input[9], input[54]);
+ x1[54] = _mm_subs_epi16(input[9], input[54]);
+ x1[10] = _mm_adds_epi16(input[10], input[53]);
+ x1[53] = _mm_subs_epi16(input[10], input[53]);
+ x1[11] = _mm_adds_epi16(input[11], input[52]);
+ x1[52] = _mm_subs_epi16(input[11], input[52]);
+ x1[12] = _mm_adds_epi16(input[12], input[51]);
+ x1[51] = _mm_subs_epi16(input[12], input[51]);
+ x1[13] = _mm_adds_epi16(input[13], input[50]);
+ x1[50] = _mm_subs_epi16(input[13], input[50]);
+ x1[14] = _mm_adds_epi16(input[14], input[49]);
+ x1[49] = _mm_subs_epi16(input[14], input[49]);
+ x1[15] = _mm_adds_epi16(input[15], input[48]);
+ x1[48] = _mm_subs_epi16(input[15], input[48]);
+ x1[16] = _mm_adds_epi16(input[16], input[47]);
+ x1[47] = _mm_subs_epi16(input[16], input[47]);
+ x1[17] = _mm_adds_epi16(input[17], input[46]);
+ x1[46] = _mm_subs_epi16(input[17], input[46]);
+ x1[18] = _mm_adds_epi16(input[18], input[45]);
+ x1[45] = _mm_subs_epi16(input[18], input[45]);
+ x1[19] = _mm_adds_epi16(input[19], input[44]);
+ x1[44] = _mm_subs_epi16(input[19], input[44]);
+ x1[20] = _mm_adds_epi16(input[20], input[43]);
+ x1[43] = _mm_subs_epi16(input[20], input[43]);
+ x1[21] = _mm_adds_epi16(input[21], input[42]);
+ x1[42] = _mm_subs_epi16(input[21], input[42]);
+ x1[22] = _mm_adds_epi16(input[22], input[41]);
+ x1[41] = _mm_subs_epi16(input[22], input[41]);
+ x1[23] = _mm_adds_epi16(input[23], input[40]);
+ x1[40] = _mm_subs_epi16(input[23], input[40]);
+ x1[24] = _mm_adds_epi16(input[24], input[39]);
+ x1[39] = _mm_subs_epi16(input[24], input[39]);
+ x1[25] = _mm_adds_epi16(input[25], input[38]);
+ x1[38] = _mm_subs_epi16(input[25], input[38]);
+ x1[26] = _mm_adds_epi16(input[26], input[37]);
+ x1[37] = _mm_subs_epi16(input[26], input[37]);
+ x1[27] = _mm_adds_epi16(input[27], input[36]);
+ x1[36] = _mm_subs_epi16(input[27], input[36]);
+ x1[28] = _mm_adds_epi16(input[28], input[35]);
+ x1[35] = _mm_subs_epi16(input[28], input[35]);
+ x1[29] = _mm_adds_epi16(input[29], input[34]);
+ x1[34] = _mm_subs_epi16(input[29], input[34]);
+ x1[30] = _mm_adds_epi16(input[30], input[33]);
+ x1[33] = _mm_subs_epi16(input[30], input[33]);
+ x1[31] = _mm_adds_epi16(input[31], input[32]);
+ x1[32] = _mm_subs_epi16(input[31], input[32]);
+
+ // stage 2
+ __m128i x2[64];
+ x2[0] = _mm_adds_epi16(x1[0], x1[31]);
+ x2[31] = _mm_subs_epi16(x1[0], x1[31]);
+ x2[1] = _mm_adds_epi16(x1[1], x1[30]);
+ x2[30] = _mm_subs_epi16(x1[1], x1[30]);
+ x2[2] = _mm_adds_epi16(x1[2], x1[29]);
+ x2[29] = _mm_subs_epi16(x1[2], x1[29]);
+ x2[3] = _mm_adds_epi16(x1[3], x1[28]);
+ x2[28] = _mm_subs_epi16(x1[3], x1[28]);
+ x2[4] = _mm_adds_epi16(x1[4], x1[27]);
+ x2[27] = _mm_subs_epi16(x1[4], x1[27]);
+ x2[5] = _mm_adds_epi16(x1[5], x1[26]);
+ x2[26] = _mm_subs_epi16(x1[5], x1[26]);
+ x2[6] = _mm_adds_epi16(x1[6], x1[25]);
+ x2[25] = _mm_subs_epi16(x1[6], x1[25]);
+ x2[7] = _mm_adds_epi16(x1[7], x1[24]);
+ x2[24] = _mm_subs_epi16(x1[7], x1[24]);
+ x2[8] = _mm_adds_epi16(x1[8], x1[23]);
+ x2[23] = _mm_subs_epi16(x1[8], x1[23]);
+ x2[9] = _mm_adds_epi16(x1[9], x1[22]);
+ x2[22] = _mm_subs_epi16(x1[9], x1[22]);
+ x2[10] = _mm_adds_epi16(x1[10], x1[21]);
+ x2[21] = _mm_subs_epi16(x1[10], x1[21]);
+ x2[11] = _mm_adds_epi16(x1[11], x1[20]);
+ x2[20] = _mm_subs_epi16(x1[11], x1[20]);
+ x2[12] = _mm_adds_epi16(x1[12], x1[19]);
+ x2[19] = _mm_subs_epi16(x1[12], x1[19]);
+ x2[13] = _mm_adds_epi16(x1[13], x1[18]);
+ x2[18] = _mm_subs_epi16(x1[13], x1[18]);
+ x2[14] = _mm_adds_epi16(x1[14], x1[17]);
+ x2[17] = _mm_subs_epi16(x1[14], x1[17]);
+ x2[15] = _mm_adds_epi16(x1[15], x1[16]);
+ x2[16] = _mm_subs_epi16(x1[15], x1[16]);
+ x2[32] = x1[32];
+ x2[33] = x1[33];
+ x2[34] = x1[34];
+ x2[35] = x1[35];
+ x2[36] = x1[36];
+ x2[37] = x1[37];
+ x2[38] = x1[38];
+ x2[39] = x1[39];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[40], x1[55], x2[40], x2[55]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[41], x1[54], x2[41], x2[54]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[42], x1[53], x2[42], x2[53]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[43], x1[52], x2[43], x2[52]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[44], x1[51], x2[44], x2[51]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[45], x1[50], x2[45], x2[50]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[46], x1[49], x2[46], x2[49]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[47], x1[48], x2[47], x2[48]);
+ x2[56] = x1[56];
+ x2[57] = x1[57];
+ x2[58] = x1[58];
+ x2[59] = x1[59];
+ x2[60] = x1[60];
+ x2[61] = x1[61];
+ x2[62] = x1[62];
+ x2[63] = x1[63];
+
+ // stage 3
+ __m128i x3[64];
+ x3[0] = _mm_adds_epi16(x2[0], x2[15]);
+ x3[15] = _mm_subs_epi16(x2[0], x2[15]);
+ x3[1] = _mm_adds_epi16(x2[1], x2[14]);
+ x3[14] = _mm_subs_epi16(x2[1], x2[14]);
+ x3[2] = _mm_adds_epi16(x2[2], x2[13]);
+ x3[13] = _mm_subs_epi16(x2[2], x2[13]);
+ x3[3] = _mm_adds_epi16(x2[3], x2[12]);
+ x3[12] = _mm_subs_epi16(x2[3], x2[12]);
+ x3[4] = _mm_adds_epi16(x2[4], x2[11]);
+ x3[11] = _mm_subs_epi16(x2[4], x2[11]);
+ x3[5] = _mm_adds_epi16(x2[5], x2[10]);
+ x3[10] = _mm_subs_epi16(x2[5], x2[10]);
+ x3[6] = _mm_adds_epi16(x2[6], x2[9]);
+ x3[9] = _mm_subs_epi16(x2[6], x2[9]);
+ x3[7] = _mm_adds_epi16(x2[7], x2[8]);
+ x3[8] = _mm_subs_epi16(x2[7], x2[8]);
+ x3[16] = x2[16];
+ x3[17] = x2[17];
+ x3[18] = x2[18];
+ x3[19] = x2[19];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x2[20], x2[27], x3[20], x3[27]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x2[21], x2[26], x3[21], x3[26]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x2[22], x2[25], x3[22], x3[25]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x2[23], x2[24], x3[23], x3[24]);
+ x3[28] = x2[28];
+ x3[29] = x2[29];
+ x3[30] = x2[30];
+ x3[31] = x2[31];
+ x3[32] = _mm_adds_epi16(x2[32], x2[47]);
+ x3[47] = _mm_subs_epi16(x2[32], x2[47]);
+ x3[33] = _mm_adds_epi16(x2[33], x2[46]);
+ x3[46] = _mm_subs_epi16(x2[33], x2[46]);
+ x3[34] = _mm_adds_epi16(x2[34], x2[45]);
+ x3[45] = _mm_subs_epi16(x2[34], x2[45]);
+ x3[35] = _mm_adds_epi16(x2[35], x2[44]);
+ x3[44] = _mm_subs_epi16(x2[35], x2[44]);
+ x3[36] = _mm_adds_epi16(x2[36], x2[43]);
+ x3[43] = _mm_subs_epi16(x2[36], x2[43]);
+ x3[37] = _mm_adds_epi16(x2[37], x2[42]);
+ x3[42] = _mm_subs_epi16(x2[37], x2[42]);
+ x3[38] = _mm_adds_epi16(x2[38], x2[41]);
+ x3[41] = _mm_subs_epi16(x2[38], x2[41]);
+ x3[39] = _mm_adds_epi16(x2[39], x2[40]);
+ x3[40] = _mm_subs_epi16(x2[39], x2[40]);
+ x3[48] = _mm_subs_epi16(x2[63], x2[48]);
+ x3[63] = _mm_adds_epi16(x2[63], x2[48]);
+ x3[49] = _mm_subs_epi16(x2[62], x2[49]);
+ x3[62] = _mm_adds_epi16(x2[62], x2[49]);
+ x3[50] = _mm_subs_epi16(x2[61], x2[50]);
+ x3[61] = _mm_adds_epi16(x2[61], x2[50]);
+ x3[51] = _mm_subs_epi16(x2[60], x2[51]);
+ x3[60] = _mm_adds_epi16(x2[60], x2[51]);
+ x3[52] = _mm_subs_epi16(x2[59], x2[52]);
+ x3[59] = _mm_adds_epi16(x2[59], x2[52]);
+ x3[53] = _mm_subs_epi16(x2[58], x2[53]);
+ x3[58] = _mm_adds_epi16(x2[58], x2[53]);
+ x3[54] = _mm_subs_epi16(x2[57], x2[54]);
+ x3[57] = _mm_adds_epi16(x2[57], x2[54]);
+ x3[55] = _mm_subs_epi16(x2[56], x2[55]);
+ x3[56] = _mm_adds_epi16(x2[56], x2[55]);
+
+ // stage 4
+ __m128i x4[64];
+ x4[0] = _mm_adds_epi16(x3[0], x3[7]);
+ x4[7] = _mm_subs_epi16(x3[0], x3[7]);
+ x4[1] = _mm_adds_epi16(x3[1], x3[6]);
+ x4[6] = _mm_subs_epi16(x3[1], x3[6]);
+ x4[2] = _mm_adds_epi16(x3[2], x3[5]);
+ x4[5] = _mm_subs_epi16(x3[2], x3[5]);
+ x4[3] = _mm_adds_epi16(x3[3], x3[4]);
+ x4[4] = _mm_subs_epi16(x3[3], x3[4]);
+ x4[8] = x3[8];
+ x4[9] = x3[9];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x3[10], x3[13], x4[10], x4[13]);
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x3[11], x3[12], x4[11], x4[12]);
+ x4[14] = x3[14];
+ x4[15] = x3[15];
+ x4[16] = _mm_adds_epi16(x3[16], x3[23]);
+ x4[23] = _mm_subs_epi16(x3[16], x3[23]);
+ x4[17] = _mm_adds_epi16(x3[17], x3[22]);
+ x4[22] = _mm_subs_epi16(x3[17], x3[22]);
+ x4[18] = _mm_adds_epi16(x3[18], x3[21]);
+ x4[21] = _mm_subs_epi16(x3[18], x3[21]);
+ x4[19] = _mm_adds_epi16(x3[19], x3[20]);
+ x4[20] = _mm_subs_epi16(x3[19], x3[20]);
+ x4[24] = _mm_subs_epi16(x3[31], x3[24]);
+ x4[31] = _mm_adds_epi16(x3[31], x3[24]);
+ x4[25] = _mm_subs_epi16(x3[30], x3[25]);
+ x4[30] = _mm_adds_epi16(x3[30], x3[25]);
+ x4[26] = _mm_subs_epi16(x3[29], x3[26]);
+ x4[29] = _mm_adds_epi16(x3[29], x3[26]);
+ x4[27] = _mm_subs_epi16(x3[28], x3[27]);
+ x4[28] = _mm_adds_epi16(x3[28], x3[27]);
+ x4[32] = x3[32];
+ x4[33] = x3[33];
+ x4[34] = x3[34];
+ x4[35] = x3[35];
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x3[36], x3[59], x4[36], x4[59]);
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x3[37], x3[58], x4[37], x4[58]);
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x3[38], x3[57], x4[38], x4[57]);
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x3[39], x3[56], x4[39], x4[56]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x3[40], x3[55], x4[40], x4[55]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x3[41], x3[54], x4[41], x4[54]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x3[42], x3[53], x4[42], x4[53]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x3[43], x3[52], x4[43], x4[52]);
+ x4[44] = x3[44];
+ x4[45] = x3[45];
+ x4[46] = x3[46];
+ x4[47] = x3[47];
+ x4[48] = x3[48];
+ x4[49] = x3[49];
+ x4[50] = x3[50];
+ x4[51] = x3[51];
+ x4[60] = x3[60];
+ x4[61] = x3[61];
+ x4[62] = x3[62];
+ x4[63] = x3[63];
+
+ // stage 5
+ __m128i x5[64];
+ x5[0] = _mm_adds_epi16(x4[0], x4[3]);
+ x5[3] = _mm_subs_epi16(x4[0], x4[3]);
+ x5[1] = _mm_adds_epi16(x4[1], x4[2]);
+ x5[2] = _mm_subs_epi16(x4[1], x4[2]);
+ x5[4] = x4[4];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x4[5], x4[6], x5[5], x5[6]);
+ x5[7] = x4[7];
+ x5[8] = _mm_adds_epi16(x4[8], x4[11]);
+ x5[11] = _mm_subs_epi16(x4[8], x4[11]);
+ x5[9] = _mm_adds_epi16(x4[9], x4[10]);
+ x5[10] = _mm_subs_epi16(x4[9], x4[10]);
+ x5[12] = _mm_subs_epi16(x4[15], x4[12]);
+ x5[15] = _mm_adds_epi16(x4[15], x4[12]);
+ x5[13] = _mm_subs_epi16(x4[14], x4[13]);
+ x5[14] = _mm_adds_epi16(x4[14], x4[13]);
+ x5[16] = x4[16];
+ x5[17] = x4[17];
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x4[18], x4[29], x5[18], x5[29]);
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x4[19], x4[28], x5[19], x5[28]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x4[20], x4[27], x5[20], x5[27]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x4[21], x4[26], x5[21], x5[26]);
+ x5[22] = x4[22];
+ x5[23] = x4[23];
+ x5[24] = x4[24];
+ x5[25] = x4[25];
+ x5[30] = x4[30];
+ x5[31] = x4[31];
+ x5[32] = _mm_adds_epi16(x4[32], x4[39]);
+ x5[39] = _mm_subs_epi16(x4[32], x4[39]);
+ x5[33] = _mm_adds_epi16(x4[33], x4[38]);
+ x5[38] = _mm_subs_epi16(x4[33], x4[38]);
+ x5[34] = _mm_adds_epi16(x4[34], x4[37]);
+ x5[37] = _mm_subs_epi16(x4[34], x4[37]);
+ x5[35] = _mm_adds_epi16(x4[35], x4[36]);
+ x5[36] = _mm_subs_epi16(x4[35], x4[36]);
+ x5[40] = _mm_subs_epi16(x4[47], x4[40]);
+ x5[47] = _mm_adds_epi16(x4[47], x4[40]);
+ x5[41] = _mm_subs_epi16(x4[46], x4[41]);
+ x5[46] = _mm_adds_epi16(x4[46], x4[41]);
+ x5[42] = _mm_subs_epi16(x4[45], x4[42]);
+ x5[45] = _mm_adds_epi16(x4[45], x4[42]);
+ x5[43] = _mm_subs_epi16(x4[44], x4[43]);
+ x5[44] = _mm_adds_epi16(x4[44], x4[43]);
+ x5[48] = _mm_adds_epi16(x4[48], x4[55]);
+ x5[55] = _mm_subs_epi16(x4[48], x4[55]);
+ x5[49] = _mm_adds_epi16(x4[49], x4[54]);
+ x5[54] = _mm_subs_epi16(x4[49], x4[54]);
+ x5[50] = _mm_adds_epi16(x4[50], x4[53]);
+ x5[53] = _mm_subs_epi16(x4[50], x4[53]);
+ x5[51] = _mm_adds_epi16(x4[51], x4[52]);
+ x5[52] = _mm_subs_epi16(x4[51], x4[52]);
+ x5[56] = _mm_subs_epi16(x4[63], x4[56]);
+ x5[63] = _mm_adds_epi16(x4[63], x4[56]);
+ x5[57] = _mm_subs_epi16(x4[62], x4[57]);
+ x5[62] = _mm_adds_epi16(x4[62], x4[57]);
+ x5[58] = _mm_subs_epi16(x4[61], x4[58]);
+ x5[61] = _mm_adds_epi16(x4[61], x4[58]);
+ x5[59] = _mm_subs_epi16(x4[60], x4[59]);
+ x5[60] = _mm_adds_epi16(x4[60], x4[59]);
+
+ // stage 6
+ __m128i x6[64];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x5[0], x5[1], x6[0], x6[1]);
+ btf_16_sse2(cospi_p48_p16, cospi_m16_p48, x5[2], x5[3], x6[2], x6[3]);
+ x6[4] = _mm_adds_epi16(x5[4], x5[5]);
+ x6[5] = _mm_subs_epi16(x5[4], x5[5]);
+ x6[6] = _mm_subs_epi16(x5[7], x5[6]);
+ x6[7] = _mm_adds_epi16(x5[7], x5[6]);
+ x6[8] = x5[8];
+ btf_16_sse2(cospi_m16_p48, cospi_p48_p16, x5[9], x5[14], x6[9], x6[14]);
+ btf_16_sse2(cospi_m48_m16, cospi_m16_p48, x5[10], x5[13], x6[10], x6[13]);
+ x6[11] = x5[11];
+ x6[12] = x5[12];
+ x6[15] = x5[15];
+ x6[16] = _mm_adds_epi16(x5[16], x5[19]);
+ x6[19] = _mm_subs_epi16(x5[16], x5[19]);
+ x6[17] = _mm_adds_epi16(x5[17], x5[18]);
+ x6[18] = _mm_subs_epi16(x5[17], x5[18]);
+ x6[20] = _mm_subs_epi16(x5[23], x5[20]);
+ x6[23] = _mm_adds_epi16(x5[23], x5[20]);
+ x6[21] = _mm_subs_epi16(x5[22], x5[21]);
+ x6[22] = _mm_adds_epi16(x5[22], x5[21]);
+ x6[24] = _mm_adds_epi16(x5[24], x5[27]);
+ x6[27] = _mm_subs_epi16(x5[24], x5[27]);
+ x6[25] = _mm_adds_epi16(x5[25], x5[26]);
+ x6[26] = _mm_subs_epi16(x5[25], x5[26]);
+ x6[28] = _mm_subs_epi16(x5[31], x5[28]);
+ x6[31] = _mm_adds_epi16(x5[31], x5[28]);
+ x6[29] = _mm_subs_epi16(x5[30], x5[29]);
+ x6[30] = _mm_adds_epi16(x5[30], x5[29]);
+ x6[32] = x5[32];
+ x6[33] = x5[33];
+ btf_16_sse2(cospi_m08_p56, cospi_p56_p08, x5[34], x5[61], x6[34], x6[61]);
+ btf_16_sse2(cospi_m08_p56, cospi_p56_p08, x5[35], x5[60], x6[35], x6[60]);
+ btf_16_sse2(cospi_m56_m08, cospi_m08_p56, x5[36], x5[59], x6[36], x6[59]);
+ btf_16_sse2(cospi_m56_m08, cospi_m08_p56, x5[37], x5[58], x6[37], x6[58]);
+ x6[38] = x5[38];
+ x6[39] = x5[39];
+ x6[40] = x5[40];
+ x6[41] = x5[41];
+ btf_16_sse2(cospi_m40_p24, cospi_p24_p40, x5[42], x5[53], x6[42], x6[53]);
+ btf_16_sse2(cospi_m40_p24, cospi_p24_p40, x5[43], x5[52], x6[43], x6[52]);
+ btf_16_sse2(cospi_m24_m40, cospi_m40_p24, x5[44], x5[51], x6[44], x6[51]);
+ btf_16_sse2(cospi_m24_m40, cospi_m40_p24, x5[45], x5[50], x6[45], x6[50]);
+ x6[46] = x5[46];
+ x6[47] = x5[47];
+ x6[48] = x5[48];
+ x6[49] = x5[49];
+ x6[54] = x5[54];
+ x6[55] = x5[55];
+ x6[56] = x5[56];
+ x6[57] = x5[57];
+ x6[62] = x5[62];
+ x6[63] = x5[63];
+
+ // stage 7
+ __m128i x7[64];
+ x7[0] = x6[0];
+ x7[1] = x6[1];
+ x7[2] = x6[2];
+ x7[3] = x6[3];
+ btf_16_sse2(cospi_p56_p08, cospi_m08_p56, x6[4], x6[7], x7[4], x7[7]);
+ btf_16_sse2(cospi_p24_p40, cospi_m40_p24, x6[5], x6[6], x7[5], x7[6]);
+ x7[8] = _mm_adds_epi16(x6[8], x6[9]);
+ x7[9] = _mm_subs_epi16(x6[8], x6[9]);
+ x7[10] = _mm_subs_epi16(x6[11], x6[10]);
+ x7[11] = _mm_adds_epi16(x6[11], x6[10]);
+ x7[12] = _mm_adds_epi16(x6[12], x6[13]);
+ x7[13] = _mm_subs_epi16(x6[12], x6[13]);
+ x7[14] = _mm_subs_epi16(x6[15], x6[14]);
+ x7[15] = _mm_adds_epi16(x6[15], x6[14]);
+ x7[16] = x6[16];
+ btf_16_sse2(cospi_m08_p56, cospi_p56_p08, x6[17], x6[30], x7[17], x7[30]);
+ btf_16_sse2(cospi_m56_m08, cospi_m08_p56, x6[18], x6[29], x7[18], x7[29]);
+ x7[19] = x6[19];
+ x7[20] = x6[20];
+ btf_16_sse2(cospi_m40_p24, cospi_p24_p40, x6[21], x6[26], x7[21], x7[26]);
+ btf_16_sse2(cospi_m24_m40, cospi_m40_p24, x6[22], x6[25], x7[22], x7[25]);
+ x7[23] = x6[23];
+ x7[24] = x6[24];
+ x7[27] = x6[27];
+ x7[28] = x6[28];
+ x7[31] = x6[31];
+ x7[32] = _mm_adds_epi16(x6[32], x6[35]);
+ x7[35] = _mm_subs_epi16(x6[32], x6[35]);
+ x7[33] = _mm_adds_epi16(x6[33], x6[34]);
+ x7[34] = _mm_subs_epi16(x6[33], x6[34]);
+ x7[36] = _mm_subs_epi16(x6[39], x6[36]);
+ x7[39] = _mm_adds_epi16(x6[39], x6[36]);
+ x7[37] = _mm_subs_epi16(x6[38], x6[37]);
+ x7[38] = _mm_adds_epi16(x6[38], x6[37]);
+ x7[40] = _mm_adds_epi16(x6[40], x6[43]);
+ x7[43] = _mm_subs_epi16(x6[40], x6[43]);
+ x7[41] = _mm_adds_epi16(x6[41], x6[42]);
+ x7[42] = _mm_subs_epi16(x6[41], x6[42]);
+ x7[44] = _mm_subs_epi16(x6[47], x6[44]);
+ x7[47] = _mm_adds_epi16(x6[47], x6[44]);
+ x7[45] = _mm_subs_epi16(x6[46], x6[45]);
+ x7[46] = _mm_adds_epi16(x6[46], x6[45]);
+ x7[48] = _mm_adds_epi16(x6[48], x6[51]);
+ x7[51] = _mm_subs_epi16(x6[48], x6[51]);
+ x7[49] = _mm_adds_epi16(x6[49], x6[50]);
+ x7[50] = _mm_subs_epi16(x6[49], x6[50]);
+ x7[52] = _mm_subs_epi16(x6[55], x6[52]);
+ x7[55] = _mm_adds_epi16(x6[55], x6[52]);
+ x7[53] = _mm_subs_epi16(x6[54], x6[53]);
+ x7[54] = _mm_adds_epi16(x6[54], x6[53]);
+ x7[56] = _mm_adds_epi16(x6[56], x6[59]);
+ x7[59] = _mm_subs_epi16(x6[56], x6[59]);
+ x7[57] = _mm_adds_epi16(x6[57], x6[58]);
+ x7[58] = _mm_subs_epi16(x6[57], x6[58]);
+ x7[60] = _mm_subs_epi16(x6[63], x6[60]);
+ x7[63] = _mm_adds_epi16(x6[63], x6[60]);
+ x7[61] = _mm_subs_epi16(x6[62], x6[61]);
+ x7[62] = _mm_adds_epi16(x6[62], x6[61]);
+
+ // stage 8
+ __m128i x8[64];
+ x8[0] = x7[0];
+ x8[1] = x7[1];
+ x8[2] = x7[2];
+ x8[3] = x7[3];
+ x8[4] = x7[4];
+ x8[5] = x7[5];
+ x8[6] = x7[6];
+ x8[7] = x7[7];
+ btf_16_sse2(cospi_p60_p04, cospi_m04_p60, x7[8], x7[15], x8[8], x8[15]);
+ btf_16_sse2(cospi_p28_p36, cospi_m36_p28, x7[9], x7[14], x8[9], x8[14]);
+ btf_16_sse2(cospi_p44_p20, cospi_m20_p44, x7[10], x7[13], x8[10], x8[13]);
+ btf_16_sse2(cospi_p12_p52, cospi_m52_p12, x7[11], x7[12], x8[11], x8[12]);
+ x8[16] = _mm_adds_epi16(x7[16], x7[17]);
+ x8[17] = _mm_subs_epi16(x7[16], x7[17]);
+ x8[18] = _mm_subs_epi16(x7[19], x7[18]);
+ x8[19] = _mm_adds_epi16(x7[19], x7[18]);
+ x8[20] = _mm_adds_epi16(x7[20], x7[21]);
+ x8[21] = _mm_subs_epi16(x7[20], x7[21]);
+ x8[22] = _mm_subs_epi16(x7[23], x7[22]);
+ x8[23] = _mm_adds_epi16(x7[23], x7[22]);
+ x8[24] = _mm_adds_epi16(x7[24], x7[25]);
+ x8[25] = _mm_subs_epi16(x7[24], x7[25]);
+ x8[26] = _mm_subs_epi16(x7[27], x7[26]);
+ x8[27] = _mm_adds_epi16(x7[27], x7[26]);
+ x8[28] = _mm_adds_epi16(x7[28], x7[29]);
+ x8[29] = _mm_subs_epi16(x7[28], x7[29]);
+ x8[30] = _mm_subs_epi16(x7[31], x7[30]);
+ x8[31] = _mm_adds_epi16(x7[31], x7[30]);
+ x8[32] = x7[32];
+ btf_16_sse2(cospi_m04_p60, cospi_p60_p04, x7[33], x7[62], x8[33], x8[62]);
+ btf_16_sse2(cospi_m60_m04, cospi_m04_p60, x7[34], x7[61], x8[34], x8[61]);
+ x8[35] = x7[35];
+ x8[36] = x7[36];
+ btf_16_sse2(cospi_m36_p28, cospi_p28_p36, x7[37], x7[58], x8[37], x8[58]);
+ btf_16_sse2(cospi_m28_m36, cospi_m36_p28, x7[38], x7[57], x8[38], x8[57]);
+ x8[39] = x7[39];
+ x8[40] = x7[40];
+ btf_16_sse2(cospi_m20_p44, cospi_p44_p20, x7[41], x7[54], x8[41], x8[54]);
+ btf_16_sse2(cospi_m44_m20, cospi_m20_p44, x7[42], x7[53], x8[42], x8[53]);
+ x8[43] = x7[43];
+ x8[44] = x7[44];
+ btf_16_sse2(cospi_m52_p12, cospi_p12_p52, x7[45], x7[50], x8[45], x8[50]);
+ btf_16_sse2(cospi_m12_m52, cospi_m52_p12, x7[46], x7[49], x8[46], x8[49]);
+ x8[47] = x7[47];
+ x8[48] = x7[48];
+ x8[51] = x7[51];
+ x8[52] = x7[52];
+ x8[55] = x7[55];
+ x8[56] = x7[56];
+ x8[59] = x7[59];
+ x8[60] = x7[60];
+ x8[63] = x7[63];
+
+ // stage 9
+ __m128i x9[64];
+ x9[0] = x8[0];
+ x9[1] = x8[1];
+ x9[2] = x8[2];
+ x9[3] = x8[3];
+ x9[4] = x8[4];
+ x9[5] = x8[5];
+ x9[6] = x8[6];
+ x9[7] = x8[7];
+ x9[8] = x8[8];
+ x9[9] = x8[9];
+ x9[10] = x8[10];
+ x9[11] = x8[11];
+ x9[12] = x8[12];
+ x9[13] = x8[13];
+ x9[14] = x8[14];
+ x9[15] = x8[15];
+ btf_16_sse2(cospi_p62_p02, cospi_m02_p62, x8[16], x8[31], x9[16], x9[31]);
+ btf_16_sse2(cospi_p30_p34, cospi_m34_p30, x8[17], x8[30], x9[17], x9[30]);
+ btf_16_sse2(cospi_p46_p18, cospi_m18_p46, x8[18], x8[29], x9[18], x9[29]);
+ btf_16_sse2(cospi_p14_p50, cospi_m50_p14, x8[19], x8[28], x9[19], x9[28]);
+ btf_16_sse2(cospi_p54_p10, cospi_m10_p54, x8[20], x8[27], x9[20], x9[27]);
+ btf_16_sse2(cospi_p22_p42, cospi_m42_p22, x8[21], x8[26], x9[21], x9[26]);
+ btf_16_sse2(cospi_p38_p26, cospi_m26_p38, x8[22], x8[25], x9[22], x9[25]);
+ btf_16_sse2(cospi_p06_p58, cospi_m58_p06, x8[23], x8[24], x9[23], x9[24]);
+ x9[32] = _mm_adds_epi16(x8[32], x8[33]);
+ x9[33] = _mm_subs_epi16(x8[32], x8[33]);
+ x9[34] = _mm_subs_epi16(x8[35], x8[34]);
+ x9[35] = _mm_adds_epi16(x8[35], x8[34]);
+ x9[36] = _mm_adds_epi16(x8[36], x8[37]);
+ x9[37] = _mm_subs_epi16(x8[36], x8[37]);
+ x9[38] = _mm_subs_epi16(x8[39], x8[38]);
+ x9[39] = _mm_adds_epi16(x8[39], x8[38]);
+ x9[40] = _mm_adds_epi16(x8[40], x8[41]);
+ x9[41] = _mm_subs_epi16(x8[40], x8[41]);
+ x9[42] = _mm_subs_epi16(x8[43], x8[42]);
+ x9[43] = _mm_adds_epi16(x8[43], x8[42]);
+ x9[44] = _mm_adds_epi16(x8[44], x8[45]);
+ x9[45] = _mm_subs_epi16(x8[44], x8[45]);
+ x9[46] = _mm_subs_epi16(x8[47], x8[46]);
+ x9[47] = _mm_adds_epi16(x8[47], x8[46]);
+ x9[48] = _mm_adds_epi16(x8[48], x8[49]);
+ x9[49] = _mm_subs_epi16(x8[48], x8[49]);
+ x9[50] = _mm_subs_epi16(x8[51], x8[50]);
+ x9[51] = _mm_adds_epi16(x8[51], x8[50]);
+ x9[52] = _mm_adds_epi16(x8[52], x8[53]);
+ x9[53] = _mm_subs_epi16(x8[52], x8[53]);
+ x9[54] = _mm_subs_epi16(x8[55], x8[54]);
+ x9[55] = _mm_adds_epi16(x8[55], x8[54]);
+ x9[56] = _mm_adds_epi16(x8[56], x8[57]);
+ x9[57] = _mm_subs_epi16(x8[56], x8[57]);
+ x9[58] = _mm_subs_epi16(x8[59], x8[58]);
+ x9[59] = _mm_adds_epi16(x8[59], x8[58]);
+ x9[60] = _mm_adds_epi16(x8[60], x8[61]);
+ x9[61] = _mm_subs_epi16(x8[60], x8[61]);
+ x9[62] = _mm_subs_epi16(x8[63], x8[62]);
+ x9[63] = _mm_adds_epi16(x8[63], x8[62]);
+
+ // stage 10
+ __m128i x10[64];
+ x10[0] = x9[0];
+ x10[1] = x9[1];
+ x10[2] = x9[2];
+ x10[3] = x9[3];
+ x10[4] = x9[4];
+ x10[5] = x9[5];
+ x10[6] = x9[6];
+ x10[7] = x9[7];
+ x10[8] = x9[8];
+ x10[9] = x9[9];
+ x10[10] = x9[10];
+ x10[11] = x9[11];
+ x10[12] = x9[12];
+ x10[13] = x9[13];
+ x10[14] = x9[14];
+ x10[15] = x9[15];
+ x10[16] = x9[16];
+ x10[17] = x9[17];
+ x10[18] = x9[18];
+ x10[19] = x9[19];
+ x10[20] = x9[20];
+ x10[21] = x9[21];
+ x10[22] = x9[22];
+ x10[23] = x9[23];
+ x10[24] = x9[24];
+ x10[25] = x9[25];
+ x10[26] = x9[26];
+ x10[27] = x9[27];
+ x10[28] = x9[28];
+ x10[29] = x9[29];
+ x10[30] = x9[30];
+ x10[31] = x9[31];
+ btf_16_sse2(cospi_p63_p01, cospi_m01_p63, x9[32], x9[63], x10[32], x10[63]);
+ btf_16_sse2(cospi_p31_p33, cospi_m33_p31, x9[33], x9[62], x10[33], x10[62]);
+ btf_16_sse2(cospi_p47_p17, cospi_m17_p47, x9[34], x9[61], x10[34], x10[61]);
+ btf_16_sse2(cospi_p15_p49, cospi_m49_p15, x9[35], x9[60], x10[35], x10[60]);
+ btf_16_sse2(cospi_p55_p09, cospi_m09_p55, x9[36], x9[59], x10[36], x10[59]);
+ btf_16_sse2(cospi_p23_p41, cospi_m41_p23, x9[37], x9[58], x10[37], x10[58]);
+ btf_16_sse2(cospi_p39_p25, cospi_m25_p39, x9[38], x9[57], x10[38], x10[57]);
+ btf_16_sse2(cospi_p07_p57, cospi_m57_p07, x9[39], x9[56], x10[39], x10[56]);
+ btf_16_sse2(cospi_p59_p05, cospi_m05_p59, x9[40], x9[55], x10[40], x10[55]);
+ btf_16_sse2(cospi_p27_p37, cospi_m37_p27, x9[41], x9[54], x10[41], x10[54]);
+ btf_16_sse2(cospi_p43_p21, cospi_m21_p43, x9[42], x9[53], x10[42], x10[53]);
+ btf_16_sse2(cospi_p11_p53, cospi_m53_p11, x9[43], x9[52], x10[43], x10[52]);
+ btf_16_sse2(cospi_p51_p13, cospi_m13_p51, x9[44], x9[51], x10[44], x10[51]);
+ btf_16_sse2(cospi_p19_p45, cospi_m45_p19, x9[45], x9[50], x10[45], x10[50]);
+ btf_16_sse2(cospi_p35_p29, cospi_m29_p35, x9[46], x9[49], x10[46], x10[49]);
+ btf_16_sse2(cospi_p03_p61, cospi_m61_p03, x9[47], x9[48], x10[47], x10[48]);
+
+ // stage 11
+ output[0] = x10[0];
+ output[1] = x10[32];
+ output[2] = x10[16];
+ output[3] = x10[48];
+ output[4] = x10[8];
+ output[5] = x10[40];
+ output[6] = x10[24];
+ output[7] = x10[56];
+ output[8] = x10[4];
+ output[9] = x10[36];
+ output[10] = x10[20];
+ output[11] = x10[52];
+ output[12] = x10[12];
+ output[13] = x10[44];
+ output[14] = x10[28];
+ output[15] = x10[60];
+ output[16] = x10[2];
+ output[17] = x10[34];
+ output[18] = x10[18];
+ output[19] = x10[50];
+ output[20] = x10[10];
+ output[21] = x10[42];
+ output[22] = x10[26];
+ output[23] = x10[58];
+ output[24] = x10[6];
+ output[25] = x10[38];
+ output[26] = x10[22];
+ output[27] = x10[54];
+ output[28] = x10[14];
+ output[29] = x10[46];
+ output[30] = x10[30];
+ output[31] = x10[62];
+ output[32] = x10[1];
+ output[33] = x10[33];
+ output[34] = x10[17];
+ output[35] = x10[49];
+ output[36] = x10[9];
+ output[37] = x10[41];
+ output[38] = x10[25];
+ output[39] = x10[57];
+ output[40] = x10[5];
+ output[41] = x10[37];
+ output[42] = x10[21];
+ output[43] = x10[53];
+ output[44] = x10[13];
+ output[45] = x10[45];
+ output[46] = x10[29];
+ output[47] = x10[61];
+ output[48] = x10[3];
+ output[49] = x10[35];
+ output[50] = x10[19];
+ output[51] = x10[51];
+ output[52] = x10[11];
+ output[53] = x10[43];
+ output[54] = x10[27];
+ output[55] = x10[59];
+ output[56] = x10[7];
+ output[57] = x10[39];
+ output[58] = x10[23];
+ output[59] = x10[55];
+ output[60] = x10[15];
+ output[61] = x10[47];
+ output[62] = x10[31];
+ output[63] = x10[63];
+}
+
+static void fadst4x4_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *sinpi = sinpi_arr(cos_bit);
+ const __m128i sinpi_p01_p02 = pair_set_epi16(sinpi[1], sinpi[2]);
+ const __m128i sinpi_p04_m01 = pair_set_epi16(sinpi[4], -sinpi[1]);
+ const __m128i sinpi_p03_p04 = pair_set_epi16(sinpi[3], sinpi[4]);
+ const __m128i sinpi_m03_p02 = pair_set_epi16(-sinpi[3], sinpi[2]);
+ const __m128i sinpi_p03_p03 = _mm_set1_epi16((int16_t)sinpi[3]);
+ const __m128i __zero = _mm_setzero_si128();
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+ const __m128i in7 = _mm_add_epi16(input[0], input[1]);
+ __m128i u[8], v[8];
+
+ u[0] = _mm_unpacklo_epi16(input[0], input[1]);
+ u[1] = _mm_unpacklo_epi16(input[2], input[3]);
+ u[2] = _mm_unpacklo_epi16(in7, __zero);
+ u[3] = _mm_unpacklo_epi16(input[2], __zero);
+ u[4] = _mm_unpacklo_epi16(input[3], __zero);
+
+ v[0] = _mm_madd_epi16(u[0], sinpi_p01_p02); // s0 + s2
+ v[1] = _mm_madd_epi16(u[1], sinpi_p03_p04); // s4 + s5
+ v[2] = _mm_madd_epi16(u[2], sinpi_p03_p03); // x1
+ v[3] = _mm_madd_epi16(u[0], sinpi_p04_m01); // s1 - s3
+ v[4] = _mm_madd_epi16(u[1], sinpi_m03_p02); // -s4 + s6
+ v[5] = _mm_madd_epi16(u[3], sinpi_p03_p03); // s4
+ v[6] = _mm_madd_epi16(u[4], sinpi_p03_p03);
+
+ u[0] = _mm_add_epi32(v[0], v[1]);
+ u[1] = _mm_sub_epi32(v[2], v[6]);
+ u[2] = _mm_add_epi32(v[3], v[4]);
+ u[3] = _mm_sub_epi32(u[2], u[0]);
+ u[4] = _mm_slli_epi32(v[5], 2);
+ u[5] = _mm_sub_epi32(u[4], v[5]);
+ u[6] = _mm_add_epi32(u[3], u[5]);
+
+ v[0] = _mm_add_epi32(u[0], __rounding);
+ v[1] = _mm_add_epi32(u[1], __rounding);
+ v[2] = _mm_add_epi32(u[2], __rounding);
+ v[3] = _mm_add_epi32(u[6], __rounding);
+
+ u[0] = _mm_srai_epi32(v[0], cos_bit);
+ u[1] = _mm_srai_epi32(v[1], cos_bit);
+ u[2] = _mm_srai_epi32(v[2], cos_bit);
+ u[3] = _mm_srai_epi32(v[3], cos_bit);
+
+ output[0] = _mm_packs_epi32(u[0], u[2]);
+ output[1] = _mm_packs_epi32(u[1], u[3]);
+ output[2] = _mm_srli_si128(output[0], 8);
+ output[3] = _mm_srli_si128(output[1], 8);
+}
+
+static void fadst4x8_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __zero = _mm_setzero_si128();
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]);
+ __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]);
+ __m128i cospi_m48_p16 = pair_set_epi16(-cospi[48], cospi[16]);
+ __m128i cospi_p04_p60 = pair_set_epi16(cospi[4], cospi[60]);
+ __m128i cospi_p60_m04 = pair_set_epi16(cospi[60], -cospi[4]);
+ __m128i cospi_p20_p44 = pair_set_epi16(cospi[20], cospi[44]);
+ __m128i cospi_p44_m20 = pair_set_epi16(cospi[44], -cospi[20]);
+ __m128i cospi_p36_p28 = pair_set_epi16(cospi[36], cospi[28]);
+ __m128i cospi_p28_m36 = pair_set_epi16(cospi[28], -cospi[36]);
+ __m128i cospi_p52_p12 = pair_set_epi16(cospi[52], cospi[12]);
+ __m128i cospi_p12_m52 = pair_set_epi16(cospi[12], -cospi[52]);
+
+ // stage 1
+ __m128i x1[8];
+ x1[0] = input[0];
+ x1[1] = _mm_subs_epi16(__zero, input[7]);
+ x1[2] = _mm_subs_epi16(__zero, input[3]);
+ x1[3] = input[4];
+ x1[4] = _mm_subs_epi16(__zero, input[1]);
+ x1[5] = input[6];
+ x1[6] = input[2];
+ x1[7] = _mm_subs_epi16(__zero, input[5]);
+
+ // stage 2
+ __m128i x2[8];
+ x2[0] = x1[0];
+ x2[1] = x1[1];
+ btf_16_w4_sse2(&cospi_p32_p32, &cospi_p32_m32, __rounding, cos_bit, &x1[2],
+ &x1[3], &x2[2], &x2[3]);
+ x2[4] = x1[4];
+ x2[5] = x1[5];
+ btf_16_w4_sse2(&cospi_p32_p32, &cospi_p32_m32, __rounding, cos_bit, &x1[6],
+ &x1[7], &x2[6], &x2[7]);
+
+ // stage 3
+ __m128i x3[8];
+ x3[0] = _mm_adds_epi16(x2[0], x2[2]);
+ x3[2] = _mm_subs_epi16(x2[0], x2[2]);
+ x3[1] = _mm_adds_epi16(x2[1], x2[3]);
+ x3[3] = _mm_subs_epi16(x2[1], x2[3]);
+ x3[4] = _mm_adds_epi16(x2[4], x2[6]);
+ x3[6] = _mm_subs_epi16(x2[4], x2[6]);
+ x3[5] = _mm_adds_epi16(x2[5], x2[7]);
+ x3[7] = _mm_subs_epi16(x2[5], x2[7]);
+
+ // stage 4
+ __m128i x4[8];
+ x4[0] = x3[0];
+ x4[1] = x3[1];
+ x4[2] = x3[2];
+ x4[3] = x3[3];
+ btf_16_w4_sse2(&cospi_p16_p48, &cospi_p48_m16, __rounding, cos_bit, &x3[4],
+ &x3[5], &x4[4], &x4[5]);
+ btf_16_w4_sse2(&cospi_m48_p16, &cospi_p16_p48, __rounding, cos_bit, &x3[6],
+ &x3[7], &x4[6], &x4[7]);
+
+ // stage 5
+ __m128i x5[8];
+ x5[0] = _mm_adds_epi16(x4[0], x4[4]);
+ x5[4] = _mm_subs_epi16(x4[0], x4[4]);
+ x5[1] = _mm_adds_epi16(x4[1], x4[5]);
+ x5[5] = _mm_subs_epi16(x4[1], x4[5]);
+ x5[2] = _mm_adds_epi16(x4[2], x4[6]);
+ x5[6] = _mm_subs_epi16(x4[2], x4[6]);
+ x5[3] = _mm_adds_epi16(x4[3], x4[7]);
+ x5[7] = _mm_subs_epi16(x4[3], x4[7]);
+
+ // stage 6
+ __m128i x6[8];
+ btf_16_w4_sse2(&cospi_p04_p60, &cospi_p60_m04, __rounding, cos_bit, &x5[0],
+ &x5[1], &x6[0], &x6[1]);
+ btf_16_w4_sse2(&cospi_p20_p44, &cospi_p44_m20, __rounding, cos_bit, &x5[2],
+ &x5[3], &x6[2], &x6[3]);
+ btf_16_w4_sse2(&cospi_p36_p28, &cospi_p28_m36, __rounding, cos_bit, &x5[4],
+ &x5[5], &x6[4], &x6[5]);
+ btf_16_w4_sse2(&cospi_p52_p12, &cospi_p12_m52, __rounding, cos_bit, &x5[6],
+ &x5[7], &x6[6], &x6[7]);
+
+ // stage 7
+ output[0] = x6[1];
+ output[1] = x6[6];
+ output[2] = x6[3];
+ output[3] = x6[4];
+ output[4] = x6[5];
+ output[5] = x6[2];
+ output[6] = x6[7];
+ output[7] = x6[0];
+}
+
+static void fadst8x4_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *sinpi = sinpi_arr(cos_bit);
+ const __m128i sinpi_p01_p02 = pair_set_epi16(sinpi[1], sinpi[2]);
+ const __m128i sinpi_p04_m01 = pair_set_epi16(sinpi[4], -sinpi[1]);
+ const __m128i sinpi_p03_p04 = pair_set_epi16(sinpi[3], sinpi[4]);
+ const __m128i sinpi_m03_p02 = pair_set_epi16(-sinpi[3], sinpi[2]);
+ const __m128i sinpi_p03_p03 = _mm_set1_epi16((int16_t)sinpi[3]);
+ const __m128i __zero = _mm_setzero_si128();
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+ const __m128i in7 = _mm_add_epi16(input[0], input[1]);
+ __m128i u_lo[8], u_hi[8], v_lo[8], v_hi[8];
+
+ u_lo[0] = _mm_unpacklo_epi16(input[0], input[1]);
+ u_hi[0] = _mm_unpackhi_epi16(input[0], input[1]);
+ u_lo[1] = _mm_unpacklo_epi16(input[2], input[3]);
+ u_hi[1] = _mm_unpackhi_epi16(input[2], input[3]);
+ u_lo[2] = _mm_unpacklo_epi16(in7, __zero);
+ u_hi[2] = _mm_unpackhi_epi16(in7, __zero);
+ u_lo[3] = _mm_unpacklo_epi16(input[2], __zero);
+ u_hi[3] = _mm_unpackhi_epi16(input[2], __zero);
+ u_lo[4] = _mm_unpacklo_epi16(input[3], __zero);
+ u_hi[4] = _mm_unpackhi_epi16(input[3], __zero);
+
+ v_lo[0] = _mm_madd_epi16(u_lo[0], sinpi_p01_p02); // s0 + s2
+ v_hi[0] = _mm_madd_epi16(u_hi[0], sinpi_p01_p02); // s0 + s2
+ v_lo[1] = _mm_madd_epi16(u_lo[1], sinpi_p03_p04); // s4 + s5
+ v_hi[1] = _mm_madd_epi16(u_hi[1], sinpi_p03_p04); // s4 + s5
+ v_lo[2] = _mm_madd_epi16(u_lo[2], sinpi_p03_p03); // x1
+ v_hi[2] = _mm_madd_epi16(u_hi[2], sinpi_p03_p03); // x1
+ v_lo[3] = _mm_madd_epi16(u_lo[0], sinpi_p04_m01); // s1 - s3
+ v_hi[3] = _mm_madd_epi16(u_hi[0], sinpi_p04_m01); // s1 - s3
+ v_lo[4] = _mm_madd_epi16(u_lo[1], sinpi_m03_p02); // -s4 + s6
+ v_hi[4] = _mm_madd_epi16(u_hi[1], sinpi_m03_p02); // -s4 + s6
+ v_lo[5] = _mm_madd_epi16(u_lo[3], sinpi_p03_p03); // s4
+ v_hi[5] = _mm_madd_epi16(u_hi[3], sinpi_p03_p03); // s4
+ v_lo[6] = _mm_madd_epi16(u_lo[4], sinpi_p03_p03);
+ v_hi[6] = _mm_madd_epi16(u_hi[4], sinpi_p03_p03);
+
+ u_lo[0] = _mm_add_epi32(v_lo[0], v_lo[1]);
+ u_hi[0] = _mm_add_epi32(v_hi[0], v_hi[1]);
+ u_lo[1] = _mm_sub_epi32(v_lo[2], v_lo[6]);
+ u_hi[1] = _mm_sub_epi32(v_hi[2], v_hi[6]);
+ u_lo[2] = _mm_add_epi32(v_lo[3], v_lo[4]);
+ u_hi[2] = _mm_add_epi32(v_hi[3], v_hi[4]);
+ u_lo[3] = _mm_sub_epi32(u_lo[2], u_lo[0]);
+ u_hi[3] = _mm_sub_epi32(u_hi[2], u_hi[0]);
+ u_lo[4] = _mm_slli_epi32(v_lo[5], 2);
+ u_hi[4] = _mm_slli_epi32(v_hi[5], 2);
+ u_lo[5] = _mm_sub_epi32(u_lo[4], v_lo[5]);
+ u_hi[5] = _mm_sub_epi32(u_hi[4], v_hi[5]);
+ u_lo[6] = _mm_add_epi32(u_lo[3], u_lo[5]);
+ u_hi[6] = _mm_add_epi32(u_hi[3], u_hi[5]);
+
+ v_lo[0] = _mm_add_epi32(u_lo[0], __rounding);
+ v_hi[0] = _mm_add_epi32(u_hi[0], __rounding);
+ v_lo[1] = _mm_add_epi32(u_lo[1], __rounding);
+ v_hi[1] = _mm_add_epi32(u_hi[1], __rounding);
+ v_lo[2] = _mm_add_epi32(u_lo[2], __rounding);
+ v_hi[2] = _mm_add_epi32(u_hi[2], __rounding);
+ v_lo[3] = _mm_add_epi32(u_lo[6], __rounding);
+ v_hi[3] = _mm_add_epi32(u_hi[6], __rounding);
+
+ u_lo[0] = _mm_srai_epi32(v_lo[0], cos_bit);
+ u_hi[0] = _mm_srai_epi32(v_hi[0], cos_bit);
+ u_lo[1] = _mm_srai_epi32(v_lo[1], cos_bit);
+ u_hi[1] = _mm_srai_epi32(v_hi[1], cos_bit);
+ u_lo[2] = _mm_srai_epi32(v_lo[2], cos_bit);
+ u_hi[2] = _mm_srai_epi32(v_hi[2], cos_bit);
+ u_lo[3] = _mm_srai_epi32(v_lo[3], cos_bit);
+ u_hi[3] = _mm_srai_epi32(v_hi[3], cos_bit);
+
+ output[0] = _mm_packs_epi32(u_lo[0], u_hi[0]);
+ output[1] = _mm_packs_epi32(u_lo[1], u_hi[1]);
+ output[2] = _mm_packs_epi32(u_lo[2], u_hi[2]);
+ output[3] = _mm_packs_epi32(u_lo[3], u_hi[3]);
+}
+
+static void fadst8x16_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __zero = _mm_setzero_si128();
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]);
+ __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]);
+ __m128i cospi_m48_p16 = pair_set_epi16(-cospi[48], cospi[16]);
+ __m128i cospi_p08_p56 = pair_set_epi16(cospi[8], cospi[56]);
+ __m128i cospi_p56_m08 = pair_set_epi16(cospi[56], -cospi[8]);
+ __m128i cospi_p40_p24 = pair_set_epi16(cospi[40], cospi[24]);
+ __m128i cospi_p24_m40 = pair_set_epi16(cospi[24], -cospi[40]);
+ __m128i cospi_m56_p08 = pair_set_epi16(-cospi[56], cospi[8]);
+ __m128i cospi_m24_p40 = pair_set_epi16(-cospi[24], cospi[40]);
+ __m128i cospi_p02_p62 = pair_set_epi16(cospi[2], cospi[62]);
+ __m128i cospi_p62_m02 = pair_set_epi16(cospi[62], -cospi[2]);
+ __m128i cospi_p10_p54 = pair_set_epi16(cospi[10], cospi[54]);
+ __m128i cospi_p54_m10 = pair_set_epi16(cospi[54], -cospi[10]);
+ __m128i cospi_p18_p46 = pair_set_epi16(cospi[18], cospi[46]);
+ __m128i cospi_p46_m18 = pair_set_epi16(cospi[46], -cospi[18]);
+ __m128i cospi_p26_p38 = pair_set_epi16(cospi[26], cospi[38]);
+ __m128i cospi_p38_m26 = pair_set_epi16(cospi[38], -cospi[26]);
+ __m128i cospi_p34_p30 = pair_set_epi16(cospi[34], cospi[30]);
+ __m128i cospi_p30_m34 = pair_set_epi16(cospi[30], -cospi[34]);
+ __m128i cospi_p42_p22 = pair_set_epi16(cospi[42], cospi[22]);
+ __m128i cospi_p22_m42 = pair_set_epi16(cospi[22], -cospi[42]);
+ __m128i cospi_p50_p14 = pair_set_epi16(cospi[50], cospi[14]);
+ __m128i cospi_p14_m50 = pair_set_epi16(cospi[14], -cospi[50]);
+ __m128i cospi_p58_p06 = pair_set_epi16(cospi[58], cospi[6]);
+ __m128i cospi_p06_m58 = pair_set_epi16(cospi[6], -cospi[58]);
+
+ // stage 1
+ __m128i x1[16];
+ x1[0] = input[0];
+ x1[1] = _mm_subs_epi16(__zero, input[15]);
+ x1[2] = _mm_subs_epi16(__zero, input[7]);
+ x1[3] = input[8];
+ x1[4] = _mm_subs_epi16(__zero, input[3]);
+ x1[5] = input[12];
+ x1[6] = input[4];
+ x1[7] = _mm_subs_epi16(__zero, input[11]);
+ x1[8] = _mm_subs_epi16(__zero, input[1]);
+ x1[9] = input[14];
+ x1[10] = input[6];
+ x1[11] = _mm_subs_epi16(__zero, input[9]);
+ x1[12] = input[2];
+ x1[13] = _mm_subs_epi16(__zero, input[13]);
+ x1[14] = _mm_subs_epi16(__zero, input[5]);
+ x1[15] = input[10];
+
+ // stage 2
+ __m128i x2[16];
+ x2[0] = x1[0];
+ x2[1] = x1[1];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[2], x1[3], x2[2], x2[3]);
+ x2[4] = x1[4];
+ x2[5] = x1[5];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[6], x1[7], x2[6], x2[7]);
+ x2[8] = x1[8];
+ x2[9] = x1[9];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[10], x1[11], x2[10], x2[11]);
+ x2[12] = x1[12];
+ x2[13] = x1[13];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[14], x1[15], x2[14], x2[15]);
+
+ // stage 3
+ __m128i x3[16];
+ x3[0] = _mm_adds_epi16(x2[0], x2[2]);
+ x3[2] = _mm_subs_epi16(x2[0], x2[2]);
+ x3[1] = _mm_adds_epi16(x2[1], x2[3]);
+ x3[3] = _mm_subs_epi16(x2[1], x2[3]);
+ x3[4] = _mm_adds_epi16(x2[4], x2[6]);
+ x3[6] = _mm_subs_epi16(x2[4], x2[6]);
+ x3[5] = _mm_adds_epi16(x2[5], x2[7]);
+ x3[7] = _mm_subs_epi16(x2[5], x2[7]);
+ x3[8] = _mm_adds_epi16(x2[8], x2[10]);
+ x3[10] = _mm_subs_epi16(x2[8], x2[10]);
+ x3[9] = _mm_adds_epi16(x2[9], x2[11]);
+ x3[11] = _mm_subs_epi16(x2[9], x2[11]);
+ x3[12] = _mm_adds_epi16(x2[12], x2[14]);
+ x3[14] = _mm_subs_epi16(x2[12], x2[14]);
+ x3[13] = _mm_adds_epi16(x2[13], x2[15]);
+ x3[15] = _mm_subs_epi16(x2[13], x2[15]);
+
+ // stage 4
+ __m128i x4[16];
+ x4[0] = x3[0];
+ x4[1] = x3[1];
+ x4[2] = x3[2];
+ x4[3] = x3[3];
+ btf_16_sse2(cospi_p16_p48, cospi_p48_m16, x3[4], x3[5], x4[4], x4[5]);
+ btf_16_sse2(cospi_m48_p16, cospi_p16_p48, x3[6], x3[7], x4[6], x4[7]);
+ x4[8] = x3[8];
+ x4[9] = x3[9];
+ x4[10] = x3[10];
+ x4[11] = x3[11];
+ btf_16_sse2(cospi_p16_p48, cospi_p48_m16, x3[12], x3[13], x4[12], x4[13]);
+ btf_16_sse2(cospi_m48_p16, cospi_p16_p48, x3[14], x3[15], x4[14], x4[15]);
+
+ // stage 5
+ __m128i x5[16];
+ x5[0] = _mm_adds_epi16(x4[0], x4[4]);
+ x5[4] = _mm_subs_epi16(x4[0], x4[4]);
+ x5[1] = _mm_adds_epi16(x4[1], x4[5]);
+ x5[5] = _mm_subs_epi16(x4[1], x4[5]);
+ x5[2] = _mm_adds_epi16(x4[2], x4[6]);
+ x5[6] = _mm_subs_epi16(x4[2], x4[6]);
+ x5[3] = _mm_adds_epi16(x4[3], x4[7]);
+ x5[7] = _mm_subs_epi16(x4[3], x4[7]);
+ x5[8] = _mm_adds_epi16(x4[8], x4[12]);
+ x5[12] = _mm_subs_epi16(x4[8], x4[12]);
+ x5[9] = _mm_adds_epi16(x4[9], x4[13]);
+ x5[13] = _mm_subs_epi16(x4[9], x4[13]);
+ x5[10] = _mm_adds_epi16(x4[10], x4[14]);
+ x5[14] = _mm_subs_epi16(x4[10], x4[14]);
+ x5[11] = _mm_adds_epi16(x4[11], x4[15]);
+ x5[15] = _mm_subs_epi16(x4[11], x4[15]);
+
+ // stage 6
+ __m128i x6[16];
+ x6[0] = x5[0];
+ x6[1] = x5[1];
+ x6[2] = x5[2];
+ x6[3] = x5[3];
+ x6[4] = x5[4];
+ x6[5] = x5[5];
+ x6[6] = x5[6];
+ x6[7] = x5[7];
+ btf_16_sse2(cospi_p08_p56, cospi_p56_m08, x5[8], x5[9], x6[8], x6[9]);
+ btf_16_sse2(cospi_p40_p24, cospi_p24_m40, x5[10], x5[11], x6[10], x6[11]);
+ btf_16_sse2(cospi_m56_p08, cospi_p08_p56, x5[12], x5[13], x6[12], x6[13]);
+ btf_16_sse2(cospi_m24_p40, cospi_p40_p24, x5[14], x5[15], x6[14], x6[15]);
+
+ // stage 7
+ __m128i x7[16];
+ x7[0] = _mm_adds_epi16(x6[0], x6[8]);
+ x7[8] = _mm_subs_epi16(x6[0], x6[8]);
+ x7[1] = _mm_adds_epi16(x6[1], x6[9]);
+ x7[9] = _mm_subs_epi16(x6[1], x6[9]);
+ x7[2] = _mm_adds_epi16(x6[2], x6[10]);
+ x7[10] = _mm_subs_epi16(x6[2], x6[10]);
+ x7[3] = _mm_adds_epi16(x6[3], x6[11]);
+ x7[11] = _mm_subs_epi16(x6[3], x6[11]);
+ x7[4] = _mm_adds_epi16(x6[4], x6[12]);
+ x7[12] = _mm_subs_epi16(x6[4], x6[12]);
+ x7[5] = _mm_adds_epi16(x6[5], x6[13]);
+ x7[13] = _mm_subs_epi16(x6[5], x6[13]);
+ x7[6] = _mm_adds_epi16(x6[6], x6[14]);
+ x7[14] = _mm_subs_epi16(x6[6], x6[14]);
+ x7[7] = _mm_adds_epi16(x6[7], x6[15]);
+ x7[15] = _mm_subs_epi16(x6[7], x6[15]);
+
+ // stage 8
+ __m128i x8[16];
+ btf_16_sse2(cospi_p02_p62, cospi_p62_m02, x7[0], x7[1], x8[0], x8[1]);
+ btf_16_sse2(cospi_p10_p54, cospi_p54_m10, x7[2], x7[3], x8[2], x8[3]);
+ btf_16_sse2(cospi_p18_p46, cospi_p46_m18, x7[4], x7[5], x8[4], x8[5]);
+ btf_16_sse2(cospi_p26_p38, cospi_p38_m26, x7[6], x7[7], x8[6], x8[7]);
+ btf_16_sse2(cospi_p34_p30, cospi_p30_m34, x7[8], x7[9], x8[8], x8[9]);
+ btf_16_sse2(cospi_p42_p22, cospi_p22_m42, x7[10], x7[11], x8[10], x8[11]);
+ btf_16_sse2(cospi_p50_p14, cospi_p14_m50, x7[12], x7[13], x8[12], x8[13]);
+ btf_16_sse2(cospi_p58_p06, cospi_p06_m58, x7[14], x7[15], x8[14], x8[15]);
+
+ // stage 9
+ output[0] = x8[1];
+ output[1] = x8[14];
+ output[2] = x8[3];
+ output[3] = x8[12];
+ output[4] = x8[5];
+ output[5] = x8[10];
+ output[6] = x8[7];
+ output[7] = x8[8];
+ output[8] = x8[9];
+ output[9] = x8[6];
+ output[10] = x8[11];
+ output[11] = x8[4];
+ output[12] = x8[13];
+ output[13] = x8[2];
+ output[14] = x8[15];
+ output[15] = x8[0];
+}
+
+static const transform_1d_sse2 col_txfm4x4_arr[TX_TYPES] = {
+ fdct4x4_new_sse2, // DCT_DCT
+ fadst4x4_new_sse2, // ADST_DCT
+ fdct4x4_new_sse2, // DCT_ADST
+ fadst4x4_new_sse2, // ADST_ADST
+ fadst4x4_new_sse2, // FLIPADST_DCT
+ fdct4x4_new_sse2, // DCT_FLIPADST
+ fadst4x4_new_sse2, // FLIPADST_FLIPADST
+ fadst4x4_new_sse2, // ADST_FLIPADST
+ fadst4x4_new_sse2, // FLIPADST_ADST
+ fidentity4x4_new_sse2, // IDTX
+ fdct4x4_new_sse2, // V_DCT
+ fidentity4x4_new_sse2, // H_DCT
+ fadst4x4_new_sse2, // V_ADST
+ fidentity4x4_new_sse2, // H_ADST
+ fadst4x4_new_sse2, // V_FLIPADST
+ fidentity4x4_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 row_txfm4x4_arr[TX_TYPES] = {
+ fdct4x4_new_sse2, // DCT_DCT
+ fdct4x4_new_sse2, // ADST_DCT
+ fadst4x4_new_sse2, // DCT_ADST
+ fadst4x4_new_sse2, // ADST_ADST
+ fdct4x4_new_sse2, // FLIPADST_DCT
+ fadst4x4_new_sse2, // DCT_FLIPADST
+ fadst4x4_new_sse2, // FLIPADST_FLIPADST
+ fadst4x4_new_sse2, // ADST_FLIPADST
+ fadst4x4_new_sse2, // FLIPADST_ADST
+ fidentity4x4_new_sse2, // IDTX
+ fidentity4x4_new_sse2, // V_DCT
+ fdct4x4_new_sse2, // H_DCT
+ fidentity4x4_new_sse2, // V_ADST
+ fadst4x4_new_sse2, // H_ADST
+ fidentity4x4_new_sse2, // V_FLIPADST
+ fadst4x4_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 col_txfm4x8_arr[TX_TYPES] = {
+ fdct4x8_new_sse2, // DCT_DCT
+ fadst4x8_new_sse2, // ADST_DCT
+ fdct4x8_new_sse2, // DCT_ADST
+ fadst4x8_new_sse2, // ADST_ADST
+ fadst4x8_new_sse2, // FLIPADST_DCT
+ fdct4x8_new_sse2, // DCT_FLIPADST
+ fadst4x8_new_sse2, // FLIPADST_FLIPADST
+ fadst4x8_new_sse2, // ADST_FLIPADST
+ fadst4x8_new_sse2, // FLIPADST_ADST
+ fidentity8x8_new_sse2, // IDTX
+ fdct4x8_new_sse2, // V_DCT
+ fidentity8x8_new_sse2, // H_DCT
+ fadst4x8_new_sse2, // V_ADST
+ fidentity8x8_new_sse2, // H_ADST
+ fadst4x8_new_sse2, // V_FLIPADST
+ fidentity8x8_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 row_txfm8x4_arr[TX_TYPES] = {
+ fdct8x4_new_sse2, // DCT_DCT
+ fdct8x4_new_sse2, // ADST_DCT
+ fadst8x4_new_sse2, // DCT_ADST
+ fadst8x4_new_sse2, // ADST_ADST
+ fdct8x4_new_sse2, // FLIPADST_DCT
+ fadst8x4_new_sse2, // DCT_FLIPADST
+ fadst8x4_new_sse2, // FLIPADST_FLIPADST
+ fadst8x4_new_sse2, // ADST_FLIPADST
+ fadst8x4_new_sse2, // FLIPADST_ADST
+ fidentity8x4_new_sse2, // IDTX
+ fidentity8x4_new_sse2, // V_DCT
+ fdct8x4_new_sse2, // H_DCT
+ fidentity8x4_new_sse2, // V_ADST
+ fadst8x4_new_sse2, // H_ADST
+ fidentity8x4_new_sse2, // V_FLIPADST
+ fadst8x4_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 col_txfm8x4_arr[TX_TYPES] = {
+ fdct8x4_new_sse2, // DCT_DCT
+ fadst8x4_new_sse2, // ADST_DCT
+ fdct8x4_new_sse2, // DCT_ADST
+ fadst8x4_new_sse2, // ADST_ADST
+ fadst8x4_new_sse2, // FLIPADST_DCT
+ fdct8x4_new_sse2, // DCT_FLIPADST
+ fadst8x4_new_sse2, // FLIPADST_FLIPADST
+ fadst8x4_new_sse2, // ADST_FLIPADST
+ fadst8x4_new_sse2, // FLIPADST_ADST
+ fidentity8x4_new_sse2, // IDTX
+ fdct8x4_new_sse2, // V_DCT
+ fidentity8x4_new_sse2, // H_DCT
+ fadst8x4_new_sse2, // V_ADST
+ fidentity8x4_new_sse2, // H_ADST
+ fadst8x4_new_sse2, // V_FLIPADST
+ fidentity8x4_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 row_txfm4x8_arr[TX_TYPES] = {
+ fdct4x8_new_sse2, // DCT_DCT
+ fdct4x8_new_sse2, // ADST_DCT
+ fadst4x8_new_sse2, // DCT_ADST
+ fadst4x8_new_sse2, // ADST_ADST
+ fdct4x8_new_sse2, // FLIPADST_DCT
+ fadst4x8_new_sse2, // DCT_FLIPADST
+ fadst4x8_new_sse2, // FLIPADST_FLIPADST
+ fadst4x8_new_sse2, // ADST_FLIPADST
+ fadst4x8_new_sse2, // FLIPADST_ADST
+ fidentity8x8_new_sse2, // IDTX
+ fidentity8x8_new_sse2, // V_DCT
+ fdct4x8_new_sse2, // H_DCT
+ fidentity8x8_new_sse2, // V_ADST
+ fadst4x8_new_sse2, // H_ADST
+ fidentity8x8_new_sse2, // V_FLIPADST
+ fadst4x8_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 col_txfm8x8_arr[TX_TYPES] = {
+ fdct8x8_new_sse2, // DCT_DCT
+ fadst8x8_new_sse2, // ADST_DCT
+ fdct8x8_new_sse2, // DCT_ADST
+ fadst8x8_new_sse2, // ADST_ADST
+ fadst8x8_new_sse2, // FLIPADST_DCT
+ fdct8x8_new_sse2, // DCT_FLIPADST
+ fadst8x8_new_sse2, // FLIPADST_FLIPADST
+ fadst8x8_new_sse2, // ADST_FLIPADST
+ fadst8x8_new_sse2, // FLIPADST_ADST
+ fidentity8x8_new_sse2, // IDTX
+ fdct8x8_new_sse2, // V_DCT
+ fidentity8x8_new_sse2, // H_DCT
+ fadst8x8_new_sse2, // V_ADST
+ fidentity8x8_new_sse2, // H_ADST
+ fadst8x8_new_sse2, // V_FLIPADST
+ fidentity8x8_new_sse2, // H_FLIPADST
+};
+
+static const transform_1d_sse2 row_txfm8x8_arr[TX_TYPES] = {
+ fdct8x8_new_sse2, // DCT_DCT
+ fdct8x8_new_sse2, // ADST_DCT
+ fadst8x8_new_sse2, // DCT_ADST
+ fadst8x8_new_sse2, // ADST_ADST
+ fdct8x8_new_sse2, // FLIPADST_DCT
+ fadst8x8_new_sse2, // DCT_FLIPADST
+ fadst8x8_new_sse2, // FLIPADST_FLIPADST
+ fadst8x8_new_sse2, // ADST_FLIPADST
+ fadst8x8_new_sse2, // FLIPADST_ADST
+ fidentity8x8_new_sse2, // IDTX
+ fidentity8x8_new_sse2, // V_DCT
+ fdct8x8_new_sse2, // H_DCT
+ fidentity8x8_new_sse2, // V_ADST
+ fadst8x8_new_sse2, // H_ADST
+ fidentity8x8_new_sse2, // V_FLIPADST
+ fadst8x8_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 col_txfm8x16_arr[TX_TYPES] = {
+ fdct8x16_new_sse2, // DCT_DCT
+ fadst8x16_new_sse2, // ADST_DCT
+ fdct8x16_new_sse2, // DCT_ADST
+ fadst8x16_new_sse2, // ADST_ADST
+ fadst8x16_new_sse2, // FLIPADST_DCT
+ fdct8x16_new_sse2, // DCT_FLIPADST
+ fadst8x16_new_sse2, // FLIPADST_FLIPADST
+ fadst8x16_new_sse2, // ADST_FLIPADST
+ fadst8x16_new_sse2, // FLIPADST_ADST
+ fidentity8x16_new_sse2, // IDTX
+ fdct8x16_new_sse2, // V_DCT
+ fidentity8x16_new_sse2, // H_DCT
+ fadst8x16_new_sse2, // V_ADST
+ fidentity8x16_new_sse2, // H_ADST
+ fadst8x16_new_sse2, // V_FLIPADST
+ fidentity8x16_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 row_txfm8x16_arr[TX_TYPES] = {
+ fdct8x16_new_sse2, // DCT_DCT
+ fdct8x16_new_sse2, // ADST_DCT
+ fadst8x16_new_sse2, // DCT_ADST
+ fadst8x16_new_sse2, // ADST_ADST
+ fdct8x16_new_sse2, // FLIPADST_DCT
+ fadst8x16_new_sse2, // DCT_FLIPADST
+ fadst8x16_new_sse2, // FLIPADST_FLIPADST
+ fadst8x16_new_sse2, // ADST_FLIPADST
+ fadst8x16_new_sse2, // FLIPADST_ADST
+ fidentity8x16_new_sse2, // IDTX
+ fidentity8x16_new_sse2, // V_DCT
+ fdct8x16_new_sse2, // H_DCT
+ fidentity8x16_new_sse2, // V_ADST
+ fadst8x16_new_sse2, // H_ADST
+ fidentity8x16_new_sse2, // V_FLIPADST
+ fadst8x16_new_sse2 // H_FLIPADST
+};
+
+static const transform_1d_sse2 row_txfm8x32_arr[TX_TYPES] = {
+ av1_fdct8x32_new_sse2, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST_ADST
+ fidentity8x32_new_sse2, // IDTX
+ fidentity8x32_new_sse2, // V_DCT
+ av1_fdct8x32_new_sse2, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL // H_FLIPADST
+};
+
+void av1_lowbd_fwd_txfm2d_4x4_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[4], buf1[4], *buf;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X4];
+ const int txw_idx = get_txw_idx(TX_4X4);
+ const int txh_idx = get_txh_idx(TX_4X4);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 4;
+ const int height = 4;
+ const transform_1d_sse2 col_txfm = col_txfm4x4_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm4x4_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_w4_flip(input, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit_w4(input, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_4x4(buf0, buf1);
+
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1, buf, width);
+ } else {
+ buf = buf1;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w4(buf, output, height, width);
+}
+
+void av1_lowbd_fwd_txfm2d_4x8_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)stride;
+ (void)bd;
+ __m128i buf0[8], buf1[8], *buf;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X8];
+ const int txw_idx = get_txw_idx(TX_4X8);
+ const int txh_idx = get_txh_idx(TX_4X8);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 4;
+ const int height = 8;
+ const transform_1d_sse2 col_txfm = col_txfm4x8_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x4_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_w4_flip(input, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit_w4(input, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_4x8(buf0, buf1);
+
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1, buf, width);
+ } else {
+ buf = buf1;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w8(buf, output, height, width);
+}
+
+void av1_lowbd_fwd_txfm2d_4x16_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[16], buf1[16];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X16];
+ const int txw_idx = get_txw_idx(TX_4X16);
+ const int txh_idx = get_txh_idx(TX_4X16);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 4;
+ const int height = 16;
+ const transform_1d_sse2 col_txfm = col_txfm8x16_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x4_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_w4_flip(input, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit_w4(input, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_4x8(buf0, buf1);
+ transpose_16bit_4x8(buf0 + 8, buf1 + 8);
+
+ for (int i = 0; i < 2; i++) {
+ __m128i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1 + 8 * i, buf, width);
+ } else {
+ buf = buf1 + 8 * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width);
+ }
+}
+
+void av1_lowbd_fwd_txfm2d_8x4_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[8], buf1[8], *buf;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X4];
+ const int txw_idx = get_txw_idx(TX_8X4);
+ const int txh_idx = get_txh_idx(TX_8X4);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 8;
+ const int height = 4;
+ const transform_1d_sse2 col_txfm = col_txfm8x4_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm4x8_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ if (ud_flip)
+ load_buffer_16bit_to_16bit_flip(input, stride, buf0, height);
+ else
+ load_buffer_16bit_to_16bit(input, stride, buf0, height);
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1);
+
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1, buf, width);
+ } else {
+ buf = buf1;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w4(buf, output, height, width);
+}
+
+void av1_lowbd_fwd_txfm2d_8x8_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[8], buf1[8], *buf;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X8];
+ const int txw_idx = get_txw_idx(TX_8X8);
+ const int txh_idx = get_txh_idx(TX_8X8);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 8;
+ const int height = 8;
+ const transform_1d_sse2 col_txfm = col_txfm8x8_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x8_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ if (ud_flip)
+ load_buffer_16bit_to_16bit_flip(input, stride, buf0, height);
+ else
+ load_buffer_16bit_to_16bit(input, stride, buf0, height);
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1);
+
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1, buf, width);
+ } else {
+ buf = buf1;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w8(buf, output, height, width);
+}
+
+void av1_lowbd_fwd_txfm2d_8x16_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[16], buf1[16];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X16];
+ const int txw_idx = get_txw_idx(TX_8X16);
+ const int txh_idx = get_txh_idx(TX_8X16);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 8;
+ const int height = 16;
+ const transform_1d_sse2 col_txfm = col_txfm8x16_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x8_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1);
+ transpose_16bit_8x8(buf0 + 8, buf1 + 8);
+
+ for (int i = 0; i < 2; i++) {
+ __m128i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width);
+ }
+}
+
+void av1_lowbd_fwd_txfm2d_8x32_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[32], buf1[32];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X32];
+ const int txw_idx = get_txw_idx(TX_8X32);
+ const int txh_idx = get_txh_idx(TX_8X32);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 8;
+ const int height = 32;
+ const transform_1d_sse2 col_txfm = col_txfm8x32_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x8_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1);
+ transpose_16bit_8x8(buf0 + 8, buf1 + 8);
+ transpose_16bit_8x8(buf0 + 16, buf1 + 16);
+ transpose_16bit_8x8(buf0 + 24, buf1 + 24);
+
+ for (int i = 0; i < 4; i++) {
+ __m128i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width);
+ }
+}
+
+void av1_lowbd_fwd_txfm2d_16x4_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[16], buf1[16];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X4];
+ const int txw_idx = get_txw_idx(TX_16X4);
+ const int txh_idx = get_txh_idx(TX_16X4);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 16;
+ const int height = 4;
+ const transform_1d_sse2 col_txfm = col_txfm8x4_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x16_arr[tx_type];
+ __m128i *buf;
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ for (int i = 0; i < 2; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x4(buf0, buf1 + 8 * i);
+ }
+
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1, buf, width);
+ } else {
+ buf = buf1;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w4(buf, output, height, width);
+}
+
+void av1_lowbd_fwd_txfm2d_16x8_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[16], buf1[16];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X8];
+ const int txw_idx = get_txw_idx(TX_16X8);
+ const int txh_idx = get_txh_idx(TX_16X8);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 16;
+ const int height = 8;
+ const transform_1d_sse2 col_txfm = col_txfm8x8_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x16_arr[tx_type];
+ __m128i *buf;
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ for (int i = 0; i < 2; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1 + 8 * i);
+ }
+
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1, buf, width);
+ } else {
+ buf = buf1;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w8(buf, output, height, width);
+}
+
+void av1_lowbd_fwd_txfm2d_16x16_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[16], buf1[32];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X16];
+ const int txw_idx = get_txw_idx(TX_16X16);
+ const int txh_idx = get_txh_idx(TX_16X16);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 16;
+ const int height = 16;
+ const transform_1d_sse2 col_txfm = col_txfm8x16_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x16_arr[tx_type];
+ int ud_flip, lr_flip;
+
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ for (int i = 0; i < 2; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1 + 0 * width + 8 * i);
+ transpose_16bit_8x8(buf0 + 8, buf1 + 1 * width + 8 * i);
+ }
+
+ for (int i = 0; i < 2; i++) {
+ __m128i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width);
+ }
+}
+
+void av1_lowbd_fwd_txfm2d_16x32_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[32], buf1[64];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X32];
+ const int txw_idx = get_txw_idx(TX_16X32);
+ const int txh_idx = get_txh_idx(TX_16X32);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 16;
+ const int height = 32;
+ const transform_1d_sse2 col_txfm = col_txfm8x32_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x16_arr[tx_type];
+
+ if (col_txfm != NULL && row_txfm != NULL) {
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < 2; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0 + 0 * 8, buf1 + 0 * width + 8 * i);
+ transpose_16bit_8x8(buf0 + 1 * 8, buf1 + 1 * width + 8 * i);
+ transpose_16bit_8x8(buf0 + 2 * 8, buf1 + 2 * width + 8 * i);
+ transpose_16bit_8x8(buf0 + 3 * 8, buf1 + 3 * width + 8 * i);
+ }
+
+ for (int i = 0; i < 4; i++) {
+ __m128i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width);
+ }
+ } else {
+ av1_fwd_txfm2d_16x32_c(input, output, stride, tx_type, bd);
+ }
+}
+
+void av1_lowbd_fwd_txfm2d_32x8_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[32], buf1[32];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X8];
+ const int txw_idx = get_txw_idx(TX_32X8);
+ const int txh_idx = get_txh_idx(TX_32X8);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 32;
+ const int height = 8;
+ const transform_1d_sse2 col_txfm = col_txfm8x8_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x32_arr[tx_type];
+
+ if (col_txfm != NULL && row_txfm != NULL) {
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < 4; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1 + 0 * width + 8 * i);
+ }
+
+ for (int i = 0; i < 1; i++) {
+ __m128i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width);
+ }
+ } else {
+ av1_fwd_txfm2d_32x16_c(input, output, stride, tx_type, bd);
+ }
+}
+
+void av1_lowbd_fwd_txfm2d_32x16_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[32], buf1[64];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X16];
+ const int txw_idx = get_txw_idx(TX_32X16);
+ const int txh_idx = get_txh_idx(TX_32X16);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 32;
+ const int height = 16;
+ const transform_1d_sse2 col_txfm = col_txfm8x16_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x32_arr[tx_type];
+
+ if (col_txfm != NULL && row_txfm != NULL) {
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < 4; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0, buf1 + 0 * width + 8 * i);
+ transpose_16bit_8x8(buf0 + 8, buf1 + 1 * width + 8 * i);
+ }
+
+ for (int i = 0; i < 2; i++) {
+ __m128i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_rect_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width);
+ }
+ } else {
+ av1_fwd_txfm2d_32x16_c(input, output, stride, tx_type, bd);
+ }
+}
+
+void av1_lowbd_fwd_txfm2d_32x32_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m128i buf0[32], buf1[128];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X32];
+ const int txw_idx = get_txw_idx(TX_32X32);
+ const int txh_idx = get_txh_idx(TX_32X32);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = 32;
+ const int height = 32;
+ const transform_1d_sse2 col_txfm = col_txfm8x32_arr[tx_type];
+ const transform_1d_sse2 row_txfm = row_txfm8x32_arr[tx_type];
+
+ if (col_txfm != NULL && row_txfm != NULL) {
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < 4; i++) {
+ if (ud_flip) {
+ load_buffer_16bit_to_16bit_flip(input + 8 * i, stride, buf0, height);
+ } else {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ }
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ transpose_16bit_8x8(buf0 + 0 * 8, buf1 + 0 * width + 8 * i);
+ transpose_16bit_8x8(buf0 + 1 * 8, buf1 + 1 * width + 8 * i);
+ transpose_16bit_8x8(buf0 + 2 * 8, buf1 + 2 * width + 8 * i);
+ transpose_16bit_8x8(buf0 + 3 * 8, buf1 + 3 * width + 8 * i);
+ }
+
+ for (int i = 0; i < 4; i++) {
+ __m128i *buf;
+ if (lr_flip) {
+ buf = buf0;
+ flip_buf_sse2(buf1 + width * i, buf, width);
+ } else {
+ buf = buf1 + width * i;
+ }
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, height, width);
+ }
+ } else {
+ av1_fwd_txfm2d_32x32_c(input, output, stride, tx_type, bd);
+ }
+}
+
+void av1_lowbd_fwd_txfm2d_64x16_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_64X16;
+ __m128i buf0[64], buf1[128];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_sse2 col_txfm = fdct8x16_new_sse2;
+ const transform_1d_sse2 row_txfm = av1_fdct8x64_new_sse2;
+ const int width_div8 = (width >> 3);
+ const int height_div8 = (height >> 3);
+
+ for (int i = 0; i < width_div8; i++) {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ for (int j = 0; j < height_div8; ++j) {
+ transpose_16bit_8x8(buf0 + j * 8, buf1 + j * width + 8 * i);
+ }
+ }
+
+ for (int i = 0; i < height_div8; i++) {
+ __m128i *buf = buf1 + width * i;
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, 16, 32);
+ }
+ // Zero out the bottom 16x32 area.
+ memset(output + 16 * 32, 0, 16 * 32 * sizeof(*output));
+}
+
+void av1_lowbd_fwd_txfm2d_16x64_sse2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_16X64;
+ __m128i buf0[64], buf1[128];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_sse2 col_txfm = av1_fdct8x64_new_sse2;
+ const transform_1d_sse2 row_txfm = fdct8x16_new_sse2;
+ const int width_div8 = (width >> 3);
+ const int height_div8 = (height >> 3);
+
+ for (int i = 0; i < width_div8; i++) {
+ load_buffer_16bit_to_16bit(input + 8 * i, stride, buf0, height);
+ round_shift_16bit(buf0, height, shift[0]);
+ col_txfm(buf0, buf0, cos_bit_col);
+ round_shift_16bit(buf0, height, shift[1]);
+ for (int j = 0; j < height_div8; ++j) {
+ transpose_16bit_8x8(buf0 + j * 8, buf1 + j * width + 8 * i);
+ }
+ }
+
+ for (int i = 0; i < AOMMIN(4, height_div8); i++) {
+ __m128i *buf = buf1 + width * i;
+ row_txfm(buf, buf, cos_bit_row);
+ round_shift_16bit(buf, width, shift[2]);
+ store_buffer_16bit_to_32bit_w8(buf, output + 8 * i, 32, 16);
+ }
+}
+
+static FwdTxfm2dFunc fwd_txfm2d_func_ls[TX_SIZES_ALL] = {
+ av1_lowbd_fwd_txfm2d_4x4_sse2, // 4x4 transform
+ av1_lowbd_fwd_txfm2d_8x8_sse2, // 8x8 transform
+ av1_lowbd_fwd_txfm2d_16x16_sse2, // 16x16 transform
+ av1_lowbd_fwd_txfm2d_32x32_sse2, // 32x32 transform
+ NULL, // 64x64 transform
+ av1_lowbd_fwd_txfm2d_4x8_sse2, // 4x8 transform
+ av1_lowbd_fwd_txfm2d_8x4_sse2, // 8x4 transform
+ av1_lowbd_fwd_txfm2d_8x16_sse2, // 8x16 transform
+ av1_lowbd_fwd_txfm2d_16x8_sse2, // 16x8 transform
+ av1_lowbd_fwd_txfm2d_16x32_sse2, // 16x32 transform
+ av1_lowbd_fwd_txfm2d_32x16_sse2, // 32x16 transform
+ NULL, // 32x64 transform
+ NULL, // 64x32 transform
+ av1_lowbd_fwd_txfm2d_4x16_sse2, // 4x16 transform
+ av1_lowbd_fwd_txfm2d_16x4_sse2, // 16x4 transform
+ av1_lowbd_fwd_txfm2d_8x32_sse2, // 8x32 transform
+ av1_lowbd_fwd_txfm2d_32x8_sse2, // 32x8 transform
+ av1_lowbd_fwd_txfm2d_16x64_sse2, // 16x64 transform
+ av1_lowbd_fwd_txfm2d_64x16_sse2, // 64x16 transform
+};
+
+void av1_lowbd_fwd_txfm_sse2(const int16_t *src_diff, tran_low_t *coeff,
+ int diff_stride, TxfmParam *txfm_param) {
+ FwdTxfm2dFunc fwd_txfm2d_func = fwd_txfm2d_func_ls[txfm_param->tx_size];
+
+ if ((fwd_txfm2d_func == NULL) ||
+ (txfm_param->lossless && txfm_param->tx_size == TX_4X4))
+ av1_lowbd_fwd_txfm_c(src_diff, coeff, diff_stride, txfm_param);
+ else
+ fwd_txfm2d_func(src_diff, coeff, diff_stride, txfm_param->tx_type,
+ txfm_param->bd);
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.h b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.h
new file mode 100644
index 0000000000..3cb869a8fe
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_fwd_txfm_sse2.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+#ifndef AOM_AV1_ENCODER_X86_AV1_FWD_TXFM_SSE2_H_
+#define AOM_AV1_ENCODER_X86_AV1_FWD_TXFM_SSE2_H_
+
+#include <immintrin.h>
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/x86/transpose_sse2.h"
+#include "aom_dsp/x86/txfm_common_sse2.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_fdct8x32_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit);
+void av1_fdct8x64_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit);
+
+static INLINE void fidentity4x4_new_sse2(const __m128i *const input,
+ __m128i *const output,
+ const int8_t cos_bit) {
+ (void)cos_bit;
+ const __m128i one = _mm_set1_epi16(1);
+
+ for (int i = 0; i < 4; ++i) {
+ const __m128i a = _mm_unpacklo_epi16(input[i], one);
+ const __m128i b = scale_round_sse2(a, NewSqrt2);
+ output[i] = _mm_packs_epi32(b, b);
+ }
+}
+
+static INLINE void fidentity8x4_new_sse2(const __m128i *const input,
+ __m128i *const output,
+ const int8_t cos_bit) {
+ (void)cos_bit;
+ const __m128i one = _mm_set1_epi16(1);
+
+ for (int i = 0; i < 4; ++i) {
+ const __m128i a_lo = _mm_unpacklo_epi16(input[i], one);
+ const __m128i a_hi = _mm_unpackhi_epi16(input[i], one);
+ const __m128i b_lo = scale_round_sse2(a_lo, NewSqrt2);
+ const __m128i b_hi = scale_round_sse2(a_hi, NewSqrt2);
+ output[i] = _mm_packs_epi32(b_lo, b_hi);
+ }
+}
+
+static INLINE void fidentity8x8_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ (void)cos_bit;
+
+ output[0] = _mm_adds_epi16(input[0], input[0]);
+ output[1] = _mm_adds_epi16(input[1], input[1]);
+ output[2] = _mm_adds_epi16(input[2], input[2]);
+ output[3] = _mm_adds_epi16(input[3], input[3]);
+ output[4] = _mm_adds_epi16(input[4], input[4]);
+ output[5] = _mm_adds_epi16(input[5], input[5]);
+ output[6] = _mm_adds_epi16(input[6], input[6]);
+ output[7] = _mm_adds_epi16(input[7], input[7]);
+}
+
+static INLINE void fdct8x8_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ const __m128i cospi_m32_p32 = pair_set_epi16(-cospi[32], cospi[32]);
+ const __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ const __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ const __m128i cospi_p48_p16 = pair_set_epi16(cospi[48], cospi[16]);
+ const __m128i cospi_m16_p48 = pair_set_epi16(-cospi[16], cospi[48]);
+ const __m128i cospi_p56_p08 = pair_set_epi16(cospi[56], cospi[8]);
+ const __m128i cospi_m08_p56 = pair_set_epi16(-cospi[8], cospi[56]);
+ const __m128i cospi_p24_p40 = pair_set_epi16(cospi[24], cospi[40]);
+ const __m128i cospi_m40_p24 = pair_set_epi16(-cospi[40], cospi[24]);
+
+ // stage 1
+ __m128i x1[8];
+ x1[0] = _mm_adds_epi16(input[0], input[7]);
+ x1[7] = _mm_subs_epi16(input[0], input[7]);
+ x1[1] = _mm_adds_epi16(input[1], input[6]);
+ x1[6] = _mm_subs_epi16(input[1], input[6]);
+ x1[2] = _mm_adds_epi16(input[2], input[5]);
+ x1[5] = _mm_subs_epi16(input[2], input[5]);
+ x1[3] = _mm_adds_epi16(input[3], input[4]);
+ x1[4] = _mm_subs_epi16(input[3], input[4]);
+
+ // stage 2
+ __m128i x2[8];
+ x2[0] = _mm_adds_epi16(x1[0], x1[3]);
+ x2[3] = _mm_subs_epi16(x1[0], x1[3]);
+ x2[1] = _mm_adds_epi16(x1[1], x1[2]);
+ x2[2] = _mm_subs_epi16(x1[1], x1[2]);
+ x2[4] = x1[4];
+ btf_16_sse2(cospi_m32_p32, cospi_p32_p32, x1[5], x1[6], x2[5], x2[6]);
+ x2[7] = x1[7];
+
+ // stage 3
+ __m128i x3[8];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x2[0], x2[1], x3[0], x3[1]);
+ btf_16_sse2(cospi_p48_p16, cospi_m16_p48, x2[2], x2[3], x3[2], x3[3]);
+ x3[4] = _mm_adds_epi16(x2[4], x2[5]);
+ x3[5] = _mm_subs_epi16(x2[4], x2[5]);
+ x3[6] = _mm_subs_epi16(x2[7], x2[6]);
+ x3[7] = _mm_adds_epi16(x2[7], x2[6]);
+
+ // stage 4 and 5
+ output[0] = x3[0];
+ output[4] = x3[1];
+ output[2] = x3[2];
+ output[6] = x3[3];
+ btf_16_sse2(cospi_p56_p08, cospi_m08_p56, x3[4], x3[7], output[1], output[7]);
+ btf_16_sse2(cospi_p24_p40, cospi_m40_p24, x3[5], x3[6], output[5], output[3]);
+}
+
+static INLINE void fadst8x8_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m128i __zero = _mm_setzero_si128();
+ const __m128i __rounding = _mm_set1_epi32(1 << (cos_bit - 1));
+
+ const __m128i cospi_p32_p32 = pair_set_epi16(cospi[32], cospi[32]);
+ const __m128i cospi_p32_m32 = pair_set_epi16(cospi[32], -cospi[32]);
+ const __m128i cospi_p16_p48 = pair_set_epi16(cospi[16], cospi[48]);
+ const __m128i cospi_p48_m16 = pair_set_epi16(cospi[48], -cospi[16]);
+ const __m128i cospi_m48_p16 = pair_set_epi16(-cospi[48], cospi[16]);
+ const __m128i cospi_p04_p60 = pair_set_epi16(cospi[4], cospi[60]);
+ const __m128i cospi_p60_m04 = pair_set_epi16(cospi[60], -cospi[4]);
+ const __m128i cospi_p20_p44 = pair_set_epi16(cospi[20], cospi[44]);
+ const __m128i cospi_p44_m20 = pair_set_epi16(cospi[44], -cospi[20]);
+ const __m128i cospi_p36_p28 = pair_set_epi16(cospi[36], cospi[28]);
+ const __m128i cospi_p28_m36 = pair_set_epi16(cospi[28], -cospi[36]);
+ const __m128i cospi_p52_p12 = pair_set_epi16(cospi[52], cospi[12]);
+ const __m128i cospi_p12_m52 = pair_set_epi16(cospi[12], -cospi[52]);
+
+ // stage 1
+ __m128i x1[8];
+ x1[0] = input[0];
+ x1[1] = _mm_subs_epi16(__zero, input[7]);
+ x1[2] = _mm_subs_epi16(__zero, input[3]);
+ x1[3] = input[4];
+ x1[4] = _mm_subs_epi16(__zero, input[1]);
+ x1[5] = input[6];
+ x1[6] = input[2];
+ x1[7] = _mm_subs_epi16(__zero, input[5]);
+
+ // stage 2
+ __m128i x2[8];
+ x2[0] = x1[0];
+ x2[1] = x1[1];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[2], x1[3], x2[2], x2[3]);
+ x2[4] = x1[4];
+ x2[5] = x1[5];
+ btf_16_sse2(cospi_p32_p32, cospi_p32_m32, x1[6], x1[7], x2[6], x2[7]);
+
+ // stage 3
+ __m128i x3[8];
+ x3[0] = _mm_adds_epi16(x2[0], x2[2]);
+ x3[2] = _mm_subs_epi16(x2[0], x2[2]);
+ x3[1] = _mm_adds_epi16(x2[1], x2[3]);
+ x3[3] = _mm_subs_epi16(x2[1], x2[3]);
+ x3[4] = _mm_adds_epi16(x2[4], x2[6]);
+ x3[6] = _mm_subs_epi16(x2[4], x2[6]);
+ x3[5] = _mm_adds_epi16(x2[5], x2[7]);
+ x3[7] = _mm_subs_epi16(x2[5], x2[7]);
+
+ // stage 4
+ __m128i x4[8];
+ x4[0] = x3[0];
+ x4[1] = x3[1];
+ x4[2] = x3[2];
+ x4[3] = x3[3];
+ btf_16_sse2(cospi_p16_p48, cospi_p48_m16, x3[4], x3[5], x4[4], x4[5]);
+ btf_16_sse2(cospi_m48_p16, cospi_p16_p48, x3[6], x3[7], x4[6], x4[7]);
+
+ // stage 5, 6 and 7
+ output[7] = _mm_adds_epi16(x4[0], x4[4]);
+ output[3] = _mm_subs_epi16(x4[0], x4[4]);
+ output[0] = _mm_adds_epi16(x4[1], x4[5]);
+ output[4] = _mm_subs_epi16(x4[1], x4[5]);
+ output[5] = _mm_adds_epi16(x4[2], x4[6]);
+ output[1] = _mm_subs_epi16(x4[2], x4[6]);
+ output[2] = _mm_adds_epi16(x4[3], x4[7]);
+ output[6] = _mm_subs_epi16(x4[3], x4[7]);
+
+ btf_16_sse2(cospi_p04_p60, cospi_p60_m04, output[7], output[0], output[7],
+ output[0]);
+ btf_16_sse2(cospi_p20_p44, cospi_p44_m20, output[5], output[2], output[5],
+ output[2]);
+ btf_16_sse2(cospi_p36_p28, cospi_p28_m36, output[3], output[4], output[3],
+ output[4]);
+ btf_16_sse2(cospi_p52_p12, cospi_p12_m52, output[1], output[6], output[1],
+ output[6]);
+}
+
+static INLINE void fidentity8x16_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ (void)cos_bit;
+ const __m128i one = _mm_set1_epi16(1);
+
+ for (int i = 0; i < 16; ++i) {
+ const __m128i a_lo = _mm_unpacklo_epi16(input[i], one);
+ const __m128i a_hi = _mm_unpackhi_epi16(input[i], one);
+ const __m128i b_lo = scale_round_sse2(a_lo, 2 * NewSqrt2);
+ const __m128i b_hi = scale_round_sse2(a_hi, 2 * NewSqrt2);
+ output[i] = _mm_packs_epi32(b_lo, b_hi);
+ }
+}
+
+static INLINE void fidentity8x32_new_sse2(const __m128i *input, __m128i *output,
+ int8_t cos_bit) {
+ (void)cos_bit;
+ for (int i = 0; i < 32; ++i) {
+ output[i] = _mm_slli_epi16(input[i], 2);
+ }
+}
+
+static const transform_1d_sse2 col_txfm8x32_arr[TX_TYPES] = {
+ av1_fdct8x32_new_sse2, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST_ADST
+ fidentity8x32_new_sse2, // IDTX
+ av1_fdct8x32_new_sse2, // V_DCT
+ fidentity8x32_new_sse2, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL // H_FLIPADST
+};
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AOM_AV1_ENCODER_X86_AV1_FWD_TXFM_SSE2_H_
diff --git a/third_party/aom/av1/encoder/x86/av1_highbd_quantize_avx2.c b/third_party/aom/av1/encoder/x86/av1_highbd_quantize_avx2.c
new file mode 100644
index 0000000000..b58911fcb2
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_highbd_quantize_avx2.c
@@ -0,0 +1,137 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h>
+
+#include "config/av1_rtcd.h"
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
+
+static INLINE void init_one_qp(const __m128i *p, __m256i *qp) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i dc = _mm_unpacklo_epi16(*p, zero);
+ const __m128i ac = _mm_unpackhi_epi16(*p, zero);
+ *qp = _mm256_insertf128_si256(_mm256_castsi128_si256(dc), ac, 1);
+}
+
+static INLINE void update_qp(__m256i *qp) {
+ qp[0] = _mm256_permute2x128_si256(qp[0], qp[0], 0x11);
+ qp[1] = _mm256_permute2x128_si256(qp[1], qp[1], 0x11);
+ qp[2] = _mm256_permute2x128_si256(qp[2], qp[2], 0x11);
+}
+
+static INLINE void init_qp(const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *dequant_ptr, int log_scale,
+ __m256i *qp) {
+ __m128i round = _mm_loadu_si128((const __m128i *)round_ptr);
+ if (log_scale) {
+ const __m128i round_scale = _mm_set1_epi16(1 << (15 - log_scale));
+ round = _mm_mulhrs_epi16(round, round_scale);
+ }
+ const __m128i quant = _mm_loadu_si128((const __m128i *)quant_ptr);
+ const __m128i dequant = _mm_loadu_si128((const __m128i *)dequant_ptr);
+
+ init_one_qp(&round, &qp[0]);
+ init_one_qp(&quant, &qp[1]);
+ init_one_qp(&dequant, &qp[2]);
+}
+
+static INLINE void quantize(const __m256i *qp, __m256i *c,
+ const int16_t *iscan_ptr, int log_scale,
+ tran_low_t *qcoeff, tran_low_t *dqcoeff,
+ __m256i *eob) {
+ const __m256i abs_coeff = _mm256_abs_epi32(*c);
+ __m256i q = _mm256_add_epi32(abs_coeff, qp[0]);
+
+ __m256i q_lo = _mm256_mul_epi32(q, qp[1]);
+ __m256i q_hi = _mm256_srli_epi64(q, 32);
+ const __m256i qp_hi = _mm256_srli_epi64(qp[1], 32);
+ q_hi = _mm256_mul_epi32(q_hi, qp_hi);
+ q_lo = _mm256_srli_epi64(q_lo, 16 - log_scale);
+ q_hi = _mm256_srli_epi64(q_hi, 16 - log_scale);
+ q_hi = _mm256_slli_epi64(q_hi, 32);
+ q = _mm256_or_si256(q_lo, q_hi);
+ const __m256i abs_s = _mm256_slli_epi32(abs_coeff, 1 + log_scale);
+ const __m256i mask = _mm256_cmpgt_epi32(qp[2], abs_s);
+ q = _mm256_andnot_si256(mask, q);
+
+ __m256i dq = _mm256_mullo_epi32(q, qp[2]);
+ dq = _mm256_srai_epi32(dq, log_scale);
+ q = _mm256_sign_epi32(q, *c);
+ dq = _mm256_sign_epi32(dq, *c);
+
+ _mm256_storeu_si256((__m256i *)qcoeff, q);
+ _mm256_storeu_si256((__m256i *)dqcoeff, dq);
+
+ const __m128i isc = _mm_loadu_si128((const __m128i *)iscan_ptr);
+ const __m128i zr = _mm_setzero_si128();
+ const __m128i lo = _mm_unpacklo_epi16(isc, zr);
+ const __m128i hi = _mm_unpackhi_epi16(isc, zr);
+ const __m256i iscan =
+ _mm256_insertf128_si256(_mm256_castsi128_si256(lo), hi, 1);
+
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i zc = _mm256_cmpeq_epi32(dq, zero);
+ const __m256i nz = _mm256_cmpeq_epi32(zc, zero);
+ __m256i cur_eob = _mm256_sub_epi32(iscan, nz);
+ cur_eob = _mm256_and_si256(cur_eob, nz);
+ *eob = _mm256_max_epi32(cur_eob, *eob);
+}
+
+void av1_highbd_quantize_fp_avx2(
+ const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan, int log_scale) {
+ (void)scan;
+ (void)zbin_ptr;
+ (void)quant_shift_ptr;
+ const unsigned int step = 8;
+ __m256i qp[3], coeff;
+
+ init_qp(round_ptr, quant_ptr, dequant_ptr, log_scale, qp);
+ coeff = _mm256_loadu_si256((const __m256i *)coeff_ptr);
+
+ __m256i eob = _mm256_setzero_si256();
+ quantize(qp, &coeff, iscan, log_scale, qcoeff_ptr, dqcoeff_ptr, &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan += step;
+ n_coeffs -= step;
+
+ update_qp(qp);
+ while (n_coeffs > 0) {
+ coeff = _mm256_loadu_si256((const __m256i *)coeff_ptr);
+ quantize(qp, &coeff, iscan, log_scale, qcoeff_ptr, dqcoeff_ptr, &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan += step;
+ n_coeffs -= step;
+ }
+ {
+ __m256i eob_s;
+ eob_s = _mm256_shuffle_epi32(eob, 0xe);
+ eob = _mm256_max_epi16(eob, eob_s);
+ eob_s = _mm256_shufflelo_epi16(eob, 0xe);
+ eob = _mm256_max_epi16(eob, eob_s);
+ eob_s = _mm256_shufflelo_epi16(eob, 1);
+ eob = _mm256_max_epi16(eob, eob_s);
+ const __m128i final_eob = _mm_max_epi16(_mm256_castsi256_si128(eob),
+ _mm256_extractf128_si256(eob, 1));
+ *eob_ptr = _mm_extract_epi16(final_eob, 0);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_highbd_quantize_sse4.c b/third_party/aom/av1/encoder/x86/av1_highbd_quantize_sse4.c
new file mode 100644
index 0000000000..40b3b460b6
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_highbd_quantize_sse4.c
@@ -0,0 +1,195 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <smmintrin.h>
+#include <stdint.h>
+
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/x86/synonyms.h"
+
+// Coefficient quantization phase 1
+// param[0-2] : rounding/quan/dequan constants
+static INLINE void quantize_coeff_phase1(__m128i *coeff, const __m128i *param,
+ const int shift, const int scale,
+ __m128i *qcoeff, __m128i *dquan,
+ __m128i *sign) {
+ const __m128i zero = _mm_setzero_si128();
+ const __m128i one = _mm_set1_epi32(1);
+
+ *sign = _mm_cmplt_epi32(*coeff, zero);
+ *sign = _mm_or_si128(*sign, one);
+ *coeff = _mm_abs_epi32(*coeff);
+
+ qcoeff[0] = _mm_add_epi32(*coeff, param[0]);
+ qcoeff[1] = _mm_unpackhi_epi32(qcoeff[0], zero);
+ qcoeff[0] = _mm_unpacklo_epi32(qcoeff[0], zero);
+
+ qcoeff[0] = _mm_mul_epi32(qcoeff[0], param[1]);
+ qcoeff[0] = _mm_srli_epi64(qcoeff[0], shift);
+ dquan[0] = _mm_mul_epi32(qcoeff[0], param[2]);
+ dquan[0] = _mm_srli_epi64(dquan[0], scale);
+ const __m128i abs_s = _mm_slli_epi32(*coeff, 1 + scale);
+ qcoeff[2] = _mm_cmplt_epi32(abs_s, param[3]);
+}
+
+// Coefficient quantization phase 2
+static INLINE void quantize_coeff_phase2(__m128i *qcoeff, __m128i *dquan,
+ const __m128i *sign,
+ const __m128i *param, const int shift,
+ const int scale, tran_low_t *qAddr,
+ tran_low_t *dqAddr) {
+ __m128i mask0L = _mm_set_epi32(-1, -1, 0, 0);
+ __m128i mask0H = _mm_set_epi32(0, 0, -1, -1);
+
+ qcoeff[1] = _mm_mul_epi32(qcoeff[1], param[1]);
+ qcoeff[1] = _mm_srli_epi64(qcoeff[1], shift);
+ dquan[1] = _mm_mul_epi32(qcoeff[1], param[2]);
+ dquan[1] = _mm_srli_epi64(dquan[1], scale);
+
+ // combine L&H
+ qcoeff[0] = _mm_shuffle_epi32(qcoeff[0], 0xd8);
+ qcoeff[1] = _mm_shuffle_epi32(qcoeff[1], 0x8d);
+
+ qcoeff[0] = _mm_and_si128(qcoeff[0], mask0H);
+ qcoeff[1] = _mm_and_si128(qcoeff[1], mask0L);
+
+ dquan[0] = _mm_shuffle_epi32(dquan[0], 0xd8);
+ dquan[1] = _mm_shuffle_epi32(dquan[1], 0x8d);
+
+ dquan[0] = _mm_and_si128(dquan[0], mask0H);
+ dquan[1] = _mm_and_si128(dquan[1], mask0L);
+
+ qcoeff[0] = _mm_or_si128(qcoeff[0], qcoeff[1]);
+ dquan[0] = _mm_or_si128(dquan[0], dquan[1]);
+
+ qcoeff[0] = _mm_sign_epi32(qcoeff[0], *sign);
+ dquan[0] = _mm_sign_epi32(dquan[0], *sign);
+ qcoeff[0] = _mm_andnot_si128(qcoeff[2], qcoeff[0]);
+ dquan[0] = _mm_andnot_si128(qcoeff[2], dquan[0]);
+ _mm_storeu_si128((__m128i *)qAddr, qcoeff[0]);
+ _mm_storeu_si128((__m128i *)dqAddr, dquan[0]);
+}
+
+static INLINE void find_eob(tran_low_t *qcoeff_ptr, const int16_t *iscan,
+ __m128i *eob) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i mask, iscanIdx;
+ const __m128i q0 = _mm_loadu_si128((__m128i const *)qcoeff_ptr);
+ const __m128i q1 = _mm_loadu_si128((__m128i const *)(qcoeff_ptr + 4));
+ __m128i nz_flag0 = _mm_cmpeq_epi32(q0, zero);
+ __m128i nz_flag1 = _mm_cmpeq_epi32(q1, zero);
+
+ nz_flag0 = _mm_cmpeq_epi32(nz_flag0, zero);
+ nz_flag1 = _mm_cmpeq_epi32(nz_flag1, zero);
+
+ mask = _mm_packs_epi32(nz_flag0, nz_flag1);
+ iscanIdx = _mm_loadu_si128((__m128i const *)iscan);
+ iscanIdx = _mm_sub_epi16(iscanIdx, mask);
+ iscanIdx = _mm_and_si128(iscanIdx, mask);
+ *eob = _mm_max_epi16(*eob, iscanIdx);
+}
+
+static INLINE uint16_t get_accumulated_eob(__m128i *eob) {
+ __m128i eob_shuffled;
+ uint16_t eobValue;
+ eob_shuffled = _mm_shuffle_epi32(*eob, 0xe);
+ *eob = _mm_max_epi16(*eob, eob_shuffled);
+ eob_shuffled = _mm_shufflelo_epi16(*eob, 0xe);
+ *eob = _mm_max_epi16(*eob, eob_shuffled);
+ eob_shuffled = _mm_shufflelo_epi16(*eob, 0x1);
+ *eob = _mm_max_epi16(*eob, eob_shuffled);
+ eobValue = _mm_extract_epi16(*eob, 0);
+ return eobValue;
+}
+
+void av1_highbd_quantize_fp_sse4_1(
+ const tran_low_t *coeff_ptr, intptr_t count, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan, int log_scale) {
+ __m128i coeff[2], qcoeff[3], dequant[2], qparam[4], coeff_sign;
+ __m128i eob = _mm_setzero_si128();
+ const tran_low_t *src = coeff_ptr;
+ tran_low_t *quanAddr = qcoeff_ptr;
+ tran_low_t *dquanAddr = dqcoeff_ptr;
+ const int shift = 16 - log_scale;
+ const int coeff_stride = 4;
+ const int quan_stride = coeff_stride;
+ (void)zbin_ptr;
+ (void)quant_shift_ptr;
+ (void)scan;
+
+ memset(quanAddr, 0, count * sizeof(quanAddr[0]));
+ memset(dquanAddr, 0, count * sizeof(dquanAddr[0]));
+
+ coeff[0] = _mm_loadu_si128((__m128i const *)src);
+ const int round1 = ROUND_POWER_OF_TWO(round_ptr[1], log_scale);
+ const int round0 = ROUND_POWER_OF_TWO(round_ptr[0], log_scale);
+
+ qparam[0] = _mm_set_epi32(round1, round1, round1, round0);
+ qparam[1] = xx_set_64_from_32i(quant_ptr[1], quant_ptr[0]);
+ qparam[2] = xx_set_64_from_32i(dequant_ptr[1], dequant_ptr[0]);
+ qparam[3] = _mm_set_epi32(dequant_ptr[1], dequant_ptr[1], dequant_ptr[1],
+ dequant_ptr[0]);
+
+ // DC and first 3 AC
+ quantize_coeff_phase1(&coeff[0], qparam, shift, log_scale, qcoeff, dequant,
+ &coeff_sign);
+
+ // update round/quan/dquan for AC
+ qparam[0] = _mm_unpackhi_epi64(qparam[0], qparam[0]);
+ qparam[1] = xx_set1_64_from_32i(quant_ptr[1]);
+ qparam[2] = xx_set1_64_from_32i(dequant_ptr[1]);
+ qparam[3] = _mm_set1_epi32(dequant_ptr[1]);
+ quantize_coeff_phase2(qcoeff, dequant, &coeff_sign, qparam, shift, log_scale,
+ quanAddr, dquanAddr);
+
+ // next 4 AC
+ coeff[1] = _mm_loadu_si128((__m128i const *)(src + coeff_stride));
+ quantize_coeff_phase1(&coeff[1], qparam, shift, log_scale, qcoeff, dequant,
+ &coeff_sign);
+ quantize_coeff_phase2(qcoeff, dequant, &coeff_sign, qparam, shift, log_scale,
+ quanAddr + quan_stride, dquanAddr + quan_stride);
+
+ find_eob(quanAddr, iscan, &eob);
+
+ count -= 8;
+
+ // loop for the rest of AC
+ while (count > 0) {
+ src += coeff_stride << 1;
+ quanAddr += quan_stride << 1;
+ dquanAddr += quan_stride << 1;
+ iscan += quan_stride << 1;
+
+ coeff[0] = _mm_loadu_si128((__m128i const *)src);
+ coeff[1] = _mm_loadu_si128((__m128i const *)(src + coeff_stride));
+
+ quantize_coeff_phase1(&coeff[0], qparam, shift, log_scale, qcoeff, dequant,
+ &coeff_sign);
+ quantize_coeff_phase2(qcoeff, dequant, &coeff_sign, qparam, shift,
+ log_scale, quanAddr, dquanAddr);
+
+ quantize_coeff_phase1(&coeff[1], qparam, shift, log_scale, qcoeff, dequant,
+ &coeff_sign);
+ quantize_coeff_phase2(qcoeff, dequant, &coeff_sign, qparam, shift,
+ log_scale, quanAddr + quan_stride,
+ dquanAddr + quan_stride);
+
+ find_eob(quanAddr, iscan, &eob);
+
+ count -= 8;
+ }
+ *eob_ptr = get_accumulated_eob(&eob);
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_k_means_avx2.c b/third_party/aom/av1/encoder/x86/av1_k_means_avx2.c
new file mode 100644
index 0000000000..52ddc66437
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_k_means_avx2.c
@@ -0,0 +1,132 @@
+/*
+ * Copyright (c) 2020, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+#include <immintrin.h> // AVX2
+
+#include "config/av1_rtcd.h"
+#include "aom_dsp/x86/synonyms.h"
+
+static int64_t k_means_horizontal_sum_avx2(__m256i a) {
+ const __m128i low = _mm256_castsi256_si128(a);
+ const __m128i high = _mm256_extracti128_si256(a, 1);
+ const __m128i sum = _mm_add_epi64(low, high);
+ const __m128i sum_high = _mm_unpackhi_epi64(sum, sum);
+ int64_t res;
+ _mm_storel_epi64((__m128i *)&res, _mm_add_epi64(sum, sum_high));
+ return res;
+}
+
+void av1_calc_indices_dim1_avx2(const int16_t *data, const int16_t *centroids,
+ uint8_t *indices, int64_t *total_dist, int n,
+ int k) {
+ const __m256i v_zero = _mm256_setzero_si256();
+ __m256i sum = _mm256_setzero_si256();
+ __m256i cents[PALETTE_MAX_SIZE];
+ for (int j = 0; j < k; ++j) {
+ cents[j] = _mm256_set1_epi16(centroids[j]);
+ }
+
+ for (int i = 0; i < n; i += 16) {
+ const __m256i in = _mm256_loadu_si256((__m256i *)data);
+ __m256i ind = _mm256_setzero_si256();
+ // Compute the distance to the first centroid.
+ __m256i d1 = _mm256_sub_epi16(in, cents[0]);
+ __m256i dist_min = _mm256_abs_epi16(d1);
+
+ for (int j = 1; j < k; ++j) {
+ // Compute the distance to the centroid.
+ d1 = _mm256_sub_epi16(in, cents[j]);
+ const __m256i dist = _mm256_abs_epi16(d1);
+ // Compare to the minimal one.
+ const __m256i cmp = _mm256_cmpgt_epi16(dist_min, dist);
+ dist_min = _mm256_min_epi16(dist_min, dist);
+ const __m256i ind1 = _mm256_set1_epi16(j);
+ ind = _mm256_or_si256(_mm256_andnot_si256(cmp, ind),
+ _mm256_and_si256(cmp, ind1));
+ }
+
+ const __m256i p1 = _mm256_packus_epi16(ind, v_zero);
+ const __m256i px = _mm256_permute4x64_epi64(p1, 0x58);
+ const __m128i d2 = _mm256_extracti128_si256(px, 0);
+
+ _mm_storeu_si128((__m128i *)indices, d2);
+
+ if (total_dist) {
+ // Square, convert to 32 bit and add together.
+ dist_min = _mm256_madd_epi16(dist_min, dist_min);
+ // Convert to 64 bit and add to sum.
+ const __m256i dist1 = _mm256_unpacklo_epi32(dist_min, v_zero);
+ const __m256i dist2 = _mm256_unpackhi_epi32(dist_min, v_zero);
+ sum = _mm256_add_epi64(sum, dist1);
+ sum = _mm256_add_epi64(sum, dist2);
+ }
+
+ indices += 16;
+ data += 16;
+ }
+ if (total_dist) {
+ *total_dist = k_means_horizontal_sum_avx2(sum);
+ }
+}
+
+void av1_calc_indices_dim2_avx2(const int16_t *data, const int16_t *centroids,
+ uint8_t *indices, int64_t *total_dist, int n,
+ int k) {
+ const __m256i v_zero = _mm256_setzero_si256();
+ const __m256i permute = _mm256_set_epi32(0, 0, 0, 0, 5, 1, 4, 0);
+ __m256i sum = _mm256_setzero_si256();
+ __m256i ind[2];
+ __m256i cents[PALETTE_MAX_SIZE];
+ for (int j = 0; j < k; ++j) {
+ const int16_t cx = centroids[2 * j], cy = centroids[2 * j + 1];
+ cents[j] = _mm256_set_epi16(cy, cx, cy, cx, cy, cx, cy, cx, cy, cx, cy, cx,
+ cy, cx, cy, cx);
+ }
+
+ for (int i = 0; i < n; i += 16) {
+ for (int l = 0; l < 2; ++l) {
+ const __m256i in = _mm256_loadu_si256((__m256i *)data);
+ ind[l] = _mm256_setzero_si256();
+ // Compute the distance to the first centroid.
+ __m256i d1 = _mm256_sub_epi16(in, cents[0]);
+ __m256i dist_min = _mm256_madd_epi16(d1, d1);
+
+ for (int j = 1; j < k; ++j) {
+ // Compute the distance to the centroid.
+ d1 = _mm256_sub_epi16(in, cents[j]);
+ const __m256i dist = _mm256_madd_epi16(d1, d1);
+ // Compare to the minimal one.
+ const __m256i cmp = _mm256_cmpgt_epi32(dist_min, dist);
+ dist_min = _mm256_min_epi32(dist_min, dist);
+ const __m256i ind1 = _mm256_set1_epi32(j);
+ ind[l] = _mm256_or_si256(_mm256_andnot_si256(cmp, ind[l]),
+ _mm256_and_si256(cmp, ind1));
+ }
+ if (total_dist) {
+ // Convert to 64 bit and add to sum.
+ const __m256i dist1 = _mm256_unpacklo_epi32(dist_min, v_zero);
+ const __m256i dist2 = _mm256_unpackhi_epi32(dist_min, v_zero);
+ sum = _mm256_add_epi64(sum, dist1);
+ sum = _mm256_add_epi64(sum, dist2);
+ }
+ data += 16;
+ }
+ // Cast to 8 bit and store.
+ const __m256i d2 = _mm256_packus_epi32(ind[0], ind[1]);
+ const __m256i d3 = _mm256_packus_epi16(d2, v_zero);
+ const __m256i d4 = _mm256_permutevar8x32_epi32(d3, permute);
+ const __m128i d5 = _mm256_extracti128_si256(d4, 0);
+ _mm_storeu_si128((__m128i *)indices, d5);
+ indices += 16;
+ }
+ if (total_dist) {
+ *total_dist = k_means_horizontal_sum_avx2(sum);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_k_means_sse2.c b/third_party/aom/av1/encoder/x86/av1_k_means_sse2.c
new file mode 100644
index 0000000000..6c75822350
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_k_means_sse2.c
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2021, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <emmintrin.h> // SSE2
+
+#include "config/av1_rtcd.h"
+#include "aom_dsp/x86/synonyms.h"
+
+static int64_t k_means_horizontal_sum_sse2(__m128i a) {
+ const __m128i sum1 = _mm_unpackhi_epi64(a, a);
+ const __m128i sum2 = _mm_add_epi64(a, sum1);
+ int64_t res;
+ _mm_storel_epi64((__m128i *)&res, sum2);
+ return res;
+}
+
+void av1_calc_indices_dim1_sse2(const int16_t *data, const int16_t *centroids,
+ uint8_t *indices, int64_t *total_dist, int n,
+ int k) {
+ const __m128i v_zero = _mm_setzero_si128();
+ __m128i sum = _mm_setzero_si128();
+ __m128i cents[PALETTE_MAX_SIZE];
+ for (int j = 0; j < k; ++j) {
+ cents[j] = _mm_set1_epi16(centroids[j]);
+ }
+
+ for (int i = 0; i < n; i += 8) {
+ const __m128i in = _mm_loadu_si128((__m128i *)data);
+ __m128i ind = _mm_setzero_si128();
+ // Compute the distance to the first centroid.
+ __m128i d1 = _mm_sub_epi16(in, cents[0]);
+ __m128i d2 = _mm_sub_epi16(cents[0], in);
+ __m128i dist_min = _mm_max_epi16(d1, d2);
+
+ for (int j = 1; j < k; ++j) {
+ // Compute the distance to the centroid.
+ d1 = _mm_sub_epi16(in, cents[j]);
+ d2 = _mm_sub_epi16(cents[j], in);
+ const __m128i dist = _mm_max_epi16(d1, d2);
+ // Compare to the minimal one.
+ const __m128i cmp = _mm_cmpgt_epi16(dist_min, dist);
+ dist_min = _mm_min_epi16(dist_min, dist);
+ const __m128i ind1 = _mm_set1_epi16(j);
+ ind = _mm_or_si128(_mm_andnot_si128(cmp, ind), _mm_and_si128(cmp, ind1));
+ }
+ if (total_dist) {
+ // Square, convert to 32 bit and add together.
+ dist_min = _mm_madd_epi16(dist_min, dist_min);
+ // Convert to 64 bit and add to sum.
+ const __m128i dist1 = _mm_unpacklo_epi32(dist_min, v_zero);
+ const __m128i dist2 = _mm_unpackhi_epi32(dist_min, v_zero);
+ sum = _mm_add_epi64(sum, dist1);
+ sum = _mm_add_epi64(sum, dist2);
+ }
+ __m128i p2 = _mm_packus_epi16(ind, v_zero);
+ _mm_storel_epi64((__m128i *)indices, p2);
+ indices += 8;
+ data += 8;
+ }
+ if (total_dist) {
+ *total_dist = k_means_horizontal_sum_sse2(sum);
+ }
+}
+
+void av1_calc_indices_dim2_sse2(const int16_t *data, const int16_t *centroids,
+ uint8_t *indices, int64_t *total_dist, int n,
+ int k) {
+ const __m128i v_zero = _mm_setzero_si128();
+ __m128i sum = _mm_setzero_si128();
+ __m128i ind[2];
+ __m128i cents[PALETTE_MAX_SIZE];
+ for (int j = 0; j < k; ++j) {
+ const int16_t cx = centroids[2 * j], cy = centroids[2 * j + 1];
+ cents[j] = _mm_set_epi16(cy, cx, cy, cx, cy, cx, cy, cx);
+ }
+
+ for (int i = 0; i < n; i += 8) {
+ for (int l = 0; l < 2; ++l) {
+ const __m128i in = _mm_loadu_si128((__m128i *)data);
+ ind[l] = _mm_setzero_si128();
+ // Compute the distance to the first centroid.
+ __m128i d1 = _mm_sub_epi16(in, cents[0]);
+ __m128i dist_min = _mm_madd_epi16(d1, d1);
+
+ for (int j = 1; j < k; ++j) {
+ // Compute the distance to the centroid.
+ d1 = _mm_sub_epi16(in, cents[j]);
+ const __m128i dist = _mm_madd_epi16(d1, d1);
+ // Compare to the minimal one.
+ const __m128i cmp = _mm_cmpgt_epi32(dist_min, dist);
+ const __m128i dist1 = _mm_andnot_si128(cmp, dist_min);
+ const __m128i dist2 = _mm_and_si128(cmp, dist);
+ dist_min = _mm_or_si128(dist1, dist2);
+ const __m128i ind1 = _mm_set1_epi32(j);
+ ind[l] = _mm_or_si128(_mm_andnot_si128(cmp, ind[l]),
+ _mm_and_si128(cmp, ind1));
+ }
+ if (total_dist) {
+ // Convert to 64 bit and add to sum.
+ const __m128i dist1 = _mm_unpacklo_epi32(dist_min, v_zero);
+ const __m128i dist2 = _mm_unpackhi_epi32(dist_min, v_zero);
+ sum = _mm_add_epi64(sum, dist1);
+ sum = _mm_add_epi64(sum, dist2);
+ }
+ data += 8;
+ }
+ // Cast to 8 bit and store.
+ const __m128i d2 = _mm_packus_epi16(ind[0], ind[1]);
+ const __m128i d3 = _mm_packus_epi16(d2, v_zero);
+ _mm_storel_epi64((__m128i *)indices, d3);
+ indices += 8;
+ }
+ if (total_dist) {
+ *total_dist = k_means_horizontal_sum_sse2(sum);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_quantize_avx2.c b/third_party/aom/av1/encoder/x86/av1_quantize_avx2.c
new file mode 100644
index 0000000000..75c5172f85
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_quantize_avx2.c
@@ -0,0 +1,414 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h>
+
+#include "config/av1_rtcd.h"
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/aom_dsp_common.h"
+
+static INLINE void write_zero(tran_low_t *qcoeff) {
+ const __m256i zero = _mm256_setzero_si256();
+ _mm256_storeu_si256((__m256i *)qcoeff, zero);
+ _mm256_storeu_si256((__m256i *)qcoeff + 1, zero);
+}
+
+static INLINE void init_one_qp(const __m128i *p, __m256i *qp) {
+ const __m128i ac = _mm_unpackhi_epi64(*p, *p);
+ *qp = _mm256_insertf128_si256(_mm256_castsi128_si256(*p), ac, 1);
+}
+
+static INLINE void init_qp(const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *dequant_ptr, int log_scale,
+ __m256i *thr, __m256i *qp) {
+ __m128i round = _mm_loadu_si128((const __m128i *)round_ptr);
+ const __m128i quant = _mm_loadu_si128((const __m128i *)quant_ptr);
+ const __m128i dequant = _mm_loadu_si128((const __m128i *)dequant_ptr);
+
+ if (log_scale > 0) {
+ const __m128i rnd = _mm_set1_epi16((int16_t)1 << (log_scale - 1));
+ round = _mm_add_epi16(round, rnd);
+ round = _mm_srai_epi16(round, log_scale);
+ }
+
+ init_one_qp(&round, &qp[0]);
+ init_one_qp(&quant, &qp[1]);
+
+ if (log_scale == 1) {
+ qp[1] = _mm256_slli_epi16(qp[1], log_scale);
+ }
+
+ init_one_qp(&dequant, &qp[2]);
+ *thr = _mm256_srai_epi16(qp[2], 1 + log_scale);
+ // Subtracting 1 here eliminates a _mm256_cmpeq_epi16() instruction when
+ // calculating the zbin mask.
+ *thr = _mm256_sub_epi16(*thr, _mm256_set1_epi16(1));
+}
+
+static INLINE void update_qp(__m256i *thr, __m256i *qp) {
+ qp[0] = _mm256_permute2x128_si256(qp[0], qp[0], 0x11);
+ qp[1] = _mm256_permute2x128_si256(qp[1], qp[1], 0x11);
+ qp[2] = _mm256_permute2x128_si256(qp[2], qp[2], 0x11);
+ *thr = _mm256_permute2x128_si256(*thr, *thr, 0x11);
+}
+
+static INLINE __m256i load_coefficients_avx2(const tran_low_t *coeff_ptr) {
+ const __m256i coeff1 = _mm256_load_si256((__m256i *)coeff_ptr);
+ const __m256i coeff2 = _mm256_load_si256((__m256i *)(coeff_ptr + 8));
+ return _mm256_packs_epi32(coeff1, coeff2);
+}
+
+static INLINE void store_coefficients_avx2(__m256i coeff_vals,
+ tran_low_t *coeff_ptr) {
+ __m256i coeff_sign = _mm256_srai_epi16(coeff_vals, 15);
+ __m256i coeff_vals_lo = _mm256_unpacklo_epi16(coeff_vals, coeff_sign);
+ __m256i coeff_vals_hi = _mm256_unpackhi_epi16(coeff_vals, coeff_sign);
+ _mm256_store_si256((__m256i *)coeff_ptr, coeff_vals_lo);
+ _mm256_store_si256((__m256i *)(coeff_ptr + 8), coeff_vals_hi);
+}
+
+static INLINE uint16_t quant_gather_eob(__m256i eob) {
+ const __m128i eob_lo = _mm256_castsi256_si128(eob);
+ const __m128i eob_hi = _mm256_extractf128_si256(eob, 1);
+ __m128i eob_s = _mm_max_epi16(eob_lo, eob_hi);
+ eob_s = _mm_subs_epu16(_mm_set1_epi16(INT16_MAX), eob_s);
+ eob_s = _mm_minpos_epu16(eob_s);
+ return INT16_MAX - _mm_extract_epi16(eob_s, 0);
+}
+
+static INLINE int16_t accumulate_eob256(__m256i eob256) {
+ const __m128i eob_lo = _mm256_castsi256_si128(eob256);
+ const __m128i eob_hi = _mm256_extractf128_si256(eob256, 1);
+ __m128i eob = _mm_max_epi16(eob_lo, eob_hi);
+ __m128i eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
+ eob = _mm_max_epi16(eob, eob_shuffled);
+ eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
+ eob = _mm_max_epi16(eob, eob_shuffled);
+ eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
+ eob = _mm_max_epi16(eob, eob_shuffled);
+ return _mm_extract_epi16(eob, 1);
+}
+
+static AOM_FORCE_INLINE void quantize_lp_16_first(
+ const int16_t *coeff_ptr, const int16_t *iscan_ptr, int16_t *qcoeff_ptr,
+ int16_t *dqcoeff_ptr, __m256i *round256, __m256i *quant256,
+ __m256i *dequant256, __m256i *eob) {
+ const __m256i coeff = _mm256_loadu_si256((const __m256i *)coeff_ptr);
+ const __m256i abs_coeff = _mm256_abs_epi16(coeff);
+ const __m256i tmp_rnd = _mm256_adds_epi16(abs_coeff, *round256);
+ const __m256i abs_qcoeff = _mm256_mulhi_epi16(tmp_rnd, *quant256);
+ const __m256i qcoeff = _mm256_sign_epi16(abs_qcoeff, coeff);
+ const __m256i dqcoeff = _mm256_mullo_epi16(qcoeff, *dequant256);
+ const __m256i nz_mask =
+ _mm256_cmpgt_epi16(abs_qcoeff, _mm256_setzero_si256());
+
+ _mm256_storeu_si256((__m256i *)qcoeff_ptr, qcoeff);
+ _mm256_storeu_si256((__m256i *)dqcoeff_ptr, dqcoeff);
+
+ const __m256i iscan = _mm256_loadu_si256((const __m256i *)iscan_ptr);
+ const __m256i iscan_plus1 = _mm256_sub_epi16(iscan, nz_mask);
+ const __m256i nz_iscan = _mm256_and_si256(iscan_plus1, nz_mask);
+ *eob = _mm256_max_epi16(*eob, nz_iscan);
+}
+
+static AOM_FORCE_INLINE void quantize_lp_16(
+ const int16_t *coeff_ptr, intptr_t n_coeffs, const int16_t *iscan_ptr,
+ int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr, __m256i *round256,
+ __m256i *quant256, __m256i *dequant256, __m256i *eob) {
+ const __m256i coeff =
+ _mm256_loadu_si256((const __m256i *)(coeff_ptr + n_coeffs));
+ const __m256i abs_coeff = _mm256_abs_epi16(coeff);
+ const __m256i tmp_rnd = _mm256_adds_epi16(abs_coeff, *round256);
+ const __m256i abs_qcoeff = _mm256_mulhi_epi16(tmp_rnd, *quant256);
+ const __m256i qcoeff = _mm256_sign_epi16(abs_qcoeff, coeff);
+ const __m256i dqcoeff = _mm256_mullo_epi16(qcoeff, *dequant256);
+ const __m256i nz_mask =
+ _mm256_cmpgt_epi16(abs_qcoeff, _mm256_setzero_si256());
+
+ _mm256_storeu_si256((__m256i *)(qcoeff_ptr + n_coeffs), qcoeff);
+ _mm256_storeu_si256((__m256i *)(dqcoeff_ptr + n_coeffs), dqcoeff);
+
+ const __m256i iscan =
+ _mm256_loadu_si256((const __m256i *)(iscan_ptr + n_coeffs));
+ const __m256i iscan_plus1 = _mm256_sub_epi16(iscan, nz_mask);
+ const __m256i nz_iscan = _mm256_and_si256(iscan_plus1, nz_mask);
+ *eob = _mm256_max_epi16(*eob, nz_iscan);
+}
+
+void av1_quantize_lp_avx2(const int16_t *coeff_ptr, intptr_t n_coeffs,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
+ (void)scan;
+ __m256i eob256 = _mm256_setzero_si256();
+
+ // Setup global values.
+ __m256i round256 =
+ _mm256_castsi128_si256(_mm_load_si128((const __m128i *)round_ptr));
+ __m256i quant256 =
+ _mm256_castsi128_si256(_mm_load_si128((const __m128i *)quant_ptr));
+ __m256i dequant256 =
+ _mm256_castsi128_si256(_mm_load_si128((const __m128i *)dequant_ptr));
+
+ // Populate upper AC values.
+ round256 = _mm256_permute4x64_epi64(round256, 0x54);
+ quant256 = _mm256_permute4x64_epi64(quant256, 0x54);
+ dequant256 = _mm256_permute4x64_epi64(dequant256, 0x54);
+
+ // Process DC and the first 15 AC coeffs.
+ quantize_lp_16_first(coeff_ptr, iscan, qcoeff_ptr, dqcoeff_ptr, &round256,
+ &quant256, &dequant256, &eob256);
+
+ if (n_coeffs > 16) {
+ // Overwrite the DC constants with AC constants
+ dequant256 = _mm256_permute2x128_si256(dequant256, dequant256, 0x31);
+ quant256 = _mm256_permute2x128_si256(quant256, quant256, 0x31);
+ round256 = _mm256_permute2x128_si256(round256, round256, 0x31);
+
+ // AC only loop.
+ for (int idx = 16; idx < n_coeffs; idx += 16) {
+ quantize_lp_16(coeff_ptr, idx, iscan, qcoeff_ptr, dqcoeff_ptr, &round256,
+ &quant256, &dequant256, &eob256);
+ }
+ }
+
+ *eob_ptr = accumulate_eob256(eob256);
+}
+
+static AOM_FORCE_INLINE __m256i get_max_lane_eob(const int16_t *iscan,
+ __m256i v_eobmax,
+ __m256i v_mask) {
+ const __m256i v_iscan = _mm256_loadu_si256((const __m256i *)iscan);
+ const __m256i v_iscan_perm = _mm256_permute4x64_epi64(v_iscan, 0xD8);
+ const __m256i v_iscan_plus1 = _mm256_sub_epi16(v_iscan_perm, v_mask);
+ const __m256i v_nz_iscan = _mm256_and_si256(v_iscan_plus1, v_mask);
+ return _mm256_max_epi16(v_eobmax, v_nz_iscan);
+}
+
+static AOM_FORCE_INLINE void quantize_fp_16(
+ const __m256i *thr, const __m256i *qp, const tran_low_t *coeff_ptr,
+ const int16_t *iscan_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ __m256i *eob) {
+ const __m256i coeff = load_coefficients_avx2(coeff_ptr);
+ const __m256i abs_coeff = _mm256_abs_epi16(coeff);
+ const __m256i mask = _mm256_cmpgt_epi16(abs_coeff, *thr);
+ const int nzflag = _mm256_movemask_epi8(mask);
+
+ if (nzflag) {
+ const __m256i tmp_rnd = _mm256_adds_epi16(abs_coeff, qp[0]);
+ const __m256i abs_q = _mm256_mulhi_epi16(tmp_rnd, qp[1]);
+ const __m256i q = _mm256_sign_epi16(abs_q, coeff);
+ const __m256i dq = _mm256_mullo_epi16(q, qp[2]);
+ const __m256i nz_mask = _mm256_cmpgt_epi16(abs_q, _mm256_setzero_si256());
+
+ store_coefficients_avx2(q, qcoeff_ptr);
+ store_coefficients_avx2(dq, dqcoeff_ptr);
+
+ *eob = get_max_lane_eob(iscan_ptr, *eob, nz_mask);
+ } else {
+ write_zero(qcoeff_ptr);
+ write_zero(dqcoeff_ptr);
+ }
+}
+
+void av1_quantize_fp_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const int16_t *zbin_ptr, const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+ (void)scan_ptr;
+ (void)zbin_ptr;
+ (void)quant_shift_ptr;
+
+ const int log_scale = 0;
+ const int step = 16;
+ __m256i qp[3], thr;
+ __m256i eob = _mm256_setzero_si256();
+
+ init_qp(round_ptr, quant_ptr, dequant_ptr, log_scale, &thr, qp);
+
+ quantize_fp_16(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr, &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan_ptr += step;
+ n_coeffs -= step;
+
+ update_qp(&thr, qp);
+
+ while (n_coeffs > 0) {
+ quantize_fp_16(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
+ &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan_ptr += step;
+ n_coeffs -= step;
+ }
+ *eob_ptr = quant_gather_eob(eob);
+}
+
+static AOM_FORCE_INLINE void quantize_fp_32x32(
+ const __m256i *thr, const __m256i *qp, const tran_low_t *coeff_ptr,
+ const int16_t *iscan_ptr, tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ __m256i *eob) {
+ const __m256i coeff = load_coefficients_avx2(coeff_ptr);
+ const __m256i abs_coeff = _mm256_abs_epi16(coeff);
+ const __m256i mask = _mm256_cmpgt_epi16(abs_coeff, *thr);
+ const int nzflag = _mm256_movemask_epi8(mask);
+
+ if (nzflag) {
+ const __m256i tmp_rnd = _mm256_adds_epi16(abs_coeff, qp[0]);
+ const __m256i abs_q = _mm256_mulhi_epu16(tmp_rnd, qp[1]);
+ const __m256i q = _mm256_sign_epi16(abs_q, coeff);
+ const __m256i abs_dq =
+ _mm256_srli_epi16(_mm256_mullo_epi16(abs_q, qp[2]), 1);
+ const __m256i nz_mask = _mm256_cmpgt_epi16(abs_q, _mm256_setzero_si256());
+ const __m256i dq = _mm256_sign_epi16(abs_dq, coeff);
+
+ store_coefficients_avx2(q, qcoeff_ptr);
+ store_coefficients_avx2(dq, dqcoeff_ptr);
+
+ *eob = get_max_lane_eob(iscan_ptr, *eob, nz_mask);
+ } else {
+ write_zero(qcoeff_ptr);
+ write_zero(dqcoeff_ptr);
+ }
+}
+
+void av1_quantize_fp_32x32_avx2(
+ const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+ (void)scan_ptr;
+ (void)zbin_ptr;
+ (void)quant_shift_ptr;
+
+ const int log_scale = 1;
+ const unsigned int step = 16;
+ __m256i qp[3], thr;
+ __m256i eob = _mm256_setzero_si256();
+
+ init_qp(round_ptr, quant_ptr, dequant_ptr, log_scale, &thr, qp);
+
+ quantize_fp_32x32(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
+ &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan_ptr += step;
+ n_coeffs -= step;
+
+ update_qp(&thr, qp);
+
+ while (n_coeffs > 0) {
+ quantize_fp_32x32(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
+ &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan_ptr += step;
+ n_coeffs -= step;
+ }
+ *eob_ptr = quant_gather_eob(eob);
+}
+
+static INLINE void quantize_fp_64x64(const __m256i *thr, const __m256i *qp,
+ const tran_low_t *coeff_ptr,
+ const int16_t *iscan_ptr,
+ tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, __m256i *eob) {
+ const __m256i coeff = load_coefficients_avx2(coeff_ptr);
+ const __m256i abs_coeff = _mm256_abs_epi16(coeff);
+ const __m256i mask = _mm256_cmpgt_epi16(abs_coeff, *thr);
+ const int nzflag = _mm256_movemask_epi8(mask);
+
+ if (nzflag) {
+ const __m256i tmp_rnd =
+ _mm256_and_si256(_mm256_adds_epi16(abs_coeff, qp[0]), mask);
+ const __m256i qh = _mm256_slli_epi16(_mm256_mulhi_epi16(tmp_rnd, qp[1]), 2);
+ const __m256i ql =
+ _mm256_srli_epi16(_mm256_mullo_epi16(tmp_rnd, qp[1]), 14);
+ const __m256i abs_q = _mm256_or_si256(qh, ql);
+ const __m256i dqh = _mm256_slli_epi16(_mm256_mulhi_epi16(abs_q, qp[2]), 14);
+ const __m256i dql = _mm256_srli_epi16(_mm256_mullo_epi16(abs_q, qp[2]), 2);
+ const __m256i abs_dq = _mm256_or_si256(dqh, dql);
+ const __m256i q = _mm256_sign_epi16(abs_q, coeff);
+ const __m256i dq = _mm256_sign_epi16(abs_dq, coeff);
+ // Check the signed q/dq value here instead of the absolute value. When
+ // dequant equals 4, the dequant threshold (*thr) becomes 0 after being
+ // scaled down by (1 + log_scale). See init_qp(). When *thr is 0 and the
+ // abs_coeff is 0, the nzflag will be set. As a result, the eob will be
+ // incorrectly calculated. The psign instruction corrects the error by
+ // zeroing out q/dq if coeff is zero.
+ const __m256i z_mask = _mm256_cmpeq_epi16(dq, _mm256_setzero_si256());
+ const __m256i nz_mask = _mm256_cmpeq_epi16(z_mask, _mm256_setzero_si256());
+
+ store_coefficients_avx2(q, qcoeff_ptr);
+ store_coefficients_avx2(dq, dqcoeff_ptr);
+
+ *eob = get_max_lane_eob(iscan_ptr, *eob, nz_mask);
+ } else {
+ write_zero(qcoeff_ptr);
+ write_zero(dqcoeff_ptr);
+ }
+}
+
+void av1_quantize_fp_64x64_avx2(
+ const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
+ tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+ (void)scan_ptr;
+ (void)zbin_ptr;
+ (void)quant_shift_ptr;
+
+ const int log_scale = 2;
+ const unsigned int step = 16;
+ __m256i qp[3], thr;
+ __m256i eob = _mm256_setzero_si256();
+
+ init_qp(round_ptr, quant_ptr, dequant_ptr, log_scale, &thr, qp);
+
+ quantize_fp_64x64(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
+ &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan_ptr += step;
+ n_coeffs -= step;
+
+ update_qp(&thr, qp);
+
+ while (n_coeffs > 0) {
+ quantize_fp_64x64(&thr, qp, coeff_ptr, iscan_ptr, qcoeff_ptr, dqcoeff_ptr,
+ &eob);
+
+ coeff_ptr += step;
+ qcoeff_ptr += step;
+ dqcoeff_ptr += step;
+ iscan_ptr += step;
+ n_coeffs -= step;
+ }
+ *eob_ptr = quant_gather_eob(eob);
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_quantize_sse2.c b/third_party/aom/av1/encoder/x86/av1_quantize_sse2.c
new file mode 100644
index 0000000000..b533894015
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_quantize_sse2.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <emmintrin.h>
+#include <xmmintrin.h>
+
+#include "config/av1_rtcd.h"
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/x86/quantize_x86.h"
+
+static INLINE void read_coeff(const tran_low_t *coeff, intptr_t offset,
+ __m128i *c0, __m128i *c1) {
+ const tran_low_t *addr = coeff + offset;
+ if (sizeof(tran_low_t) == 4) {
+ const __m128i x0 = _mm_load_si128((const __m128i *)addr);
+ const __m128i x1 = _mm_load_si128((const __m128i *)addr + 1);
+ const __m128i x2 = _mm_load_si128((const __m128i *)addr + 2);
+ const __m128i x3 = _mm_load_si128((const __m128i *)addr + 3);
+ *c0 = _mm_packs_epi32(x0, x1);
+ *c1 = _mm_packs_epi32(x2, x3);
+ } else {
+ *c0 = _mm_load_si128((const __m128i *)addr);
+ *c1 = _mm_load_si128((const __m128i *)addr + 1);
+ }
+}
+
+static INLINE void write_qcoeff(const __m128i *qc0, const __m128i *qc1,
+ tran_low_t *qcoeff, intptr_t offset) {
+ tran_low_t *addr = qcoeff + offset;
+ if (sizeof(tran_low_t) == 4) {
+ const __m128i zero = _mm_setzero_si128();
+ __m128i sign_bits = _mm_cmplt_epi16(*qc0, zero);
+ __m128i y0 = _mm_unpacklo_epi16(*qc0, sign_bits);
+ __m128i y1 = _mm_unpackhi_epi16(*qc0, sign_bits);
+ _mm_store_si128((__m128i *)addr, y0);
+ _mm_store_si128((__m128i *)addr + 1, y1);
+
+ sign_bits = _mm_cmplt_epi16(*qc1, zero);
+ y0 = _mm_unpacklo_epi16(*qc1, sign_bits);
+ y1 = _mm_unpackhi_epi16(*qc1, sign_bits);
+ _mm_store_si128((__m128i *)addr + 2, y0);
+ _mm_store_si128((__m128i *)addr + 3, y1);
+ } else {
+ _mm_store_si128((__m128i *)addr, *qc0);
+ _mm_store_si128((__m128i *)addr + 1, *qc1);
+ }
+}
+
+static INLINE void write_zero(tran_low_t *qcoeff, intptr_t offset) {
+ const __m128i zero = _mm_setzero_si128();
+ tran_low_t *addr = qcoeff + offset;
+ if (sizeof(tran_low_t) == 4) {
+ _mm_store_si128((__m128i *)addr, zero);
+ _mm_store_si128((__m128i *)addr + 1, zero);
+ _mm_store_si128((__m128i *)addr + 2, zero);
+ _mm_store_si128((__m128i *)addr + 3, zero);
+ } else {
+ _mm_store_si128((__m128i *)addr, zero);
+ _mm_store_si128((__m128i *)addr + 1, zero);
+ }
+}
+
+static INLINE void quantize(const int16_t *iscan_ptr,
+ const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const __m128i *round0, const __m128i *round1,
+ const __m128i *quant0, const __m128i *quant1,
+ const __m128i *dequant0, const __m128i *dequant1,
+ const __m128i *thr0, const __m128i *thr1,
+ __m128i *eob) {
+ __m128i coeff0, coeff1;
+ // Do DC and first 15 AC
+ read_coeff(coeff_ptr, n_coeffs, &coeff0, &coeff1);
+
+ // Poor man's sign extract
+ const __m128i coeff0_sign = _mm_srai_epi16(coeff0, 15);
+ const __m128i coeff1_sign = _mm_srai_epi16(coeff1, 15);
+ __m128i qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
+ __m128i qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
+ qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+ qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+ const __m128i mask0 = _mm_or_si128(_mm_cmpgt_epi16(qcoeff0, *thr0),
+ _mm_cmpeq_epi16(qcoeff0, *thr0));
+ const __m128i mask1 = _mm_or_si128(_mm_cmpgt_epi16(qcoeff1, *thr1),
+ _mm_cmpeq_epi16(qcoeff1, *thr1));
+ const int nzflag = _mm_movemask_epi8(mask0) | _mm_movemask_epi8(mask1);
+
+ if (nzflag) {
+ qcoeff0 = _mm_adds_epi16(qcoeff0, *round0);
+ qcoeff1 = _mm_adds_epi16(qcoeff1, *round1);
+ const __m128i qtmp0 = _mm_mulhi_epi16(qcoeff0, *quant0);
+ const __m128i qtmp1 = _mm_mulhi_epi16(qcoeff1, *quant1);
+
+ // Reinsert signs
+ qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign);
+ qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign);
+ qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+ qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+ write_qcoeff(&qcoeff0, &qcoeff1, qcoeff_ptr, n_coeffs);
+
+ coeff0 = _mm_mullo_epi16(qcoeff0, *dequant0);
+ coeff1 = _mm_mullo_epi16(qcoeff1, *dequant1);
+
+ write_qcoeff(&coeff0, &coeff1, dqcoeff_ptr, n_coeffs);
+
+ const __m128i zero = _mm_setzero_si128();
+ // Scan for eob
+ const __m128i zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
+ const __m128i zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
+ const __m128i nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
+ const __m128i nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
+ const __m128i iscan0 =
+ _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ const __m128i iscan1 =
+ _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+ // Add one to convert from indices to counts
+ const __m128i iscan0_nz = _mm_sub_epi16(iscan0, nzero_coeff0);
+ const __m128i iscan1_nz = _mm_sub_epi16(iscan1, nzero_coeff1);
+ const __m128i eob0 = _mm_and_si128(iscan0_nz, nzero_coeff0);
+ const __m128i eob1 = _mm_and_si128(iscan1_nz, nzero_coeff1);
+ const __m128i eob2 = _mm_max_epi16(eob0, eob1);
+ *eob = _mm_max_epi16(*eob, eob2);
+ } else {
+ write_zero(qcoeff_ptr, n_coeffs);
+ write_zero(dqcoeff_ptr, n_coeffs);
+ }
+}
+
+void av1_quantize_fp_sse2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
+ const int16_t *zbin_ptr, const int16_t *round_ptr,
+ const int16_t *quant_ptr,
+ const int16_t *quant_shift_ptr,
+ tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan_ptr, const int16_t *iscan_ptr) {
+ (void)scan_ptr;
+ (void)zbin_ptr;
+ (void)quant_shift_ptr;
+
+ coeff_ptr += n_coeffs;
+ iscan_ptr += n_coeffs;
+ qcoeff_ptr += n_coeffs;
+ dqcoeff_ptr += n_coeffs;
+ n_coeffs = -n_coeffs;
+
+ const __m128i round0 = _mm_load_si128((const __m128i *)round_ptr);
+ const __m128i round1 = _mm_unpackhi_epi64(round0, round0);
+ const __m128i quant0 = _mm_load_si128((const __m128i *)quant_ptr);
+ const __m128i quant1 = _mm_unpackhi_epi64(quant0, quant0);
+ const __m128i dequant0 = _mm_load_si128((const __m128i *)dequant_ptr);
+ const __m128i dequant1 = _mm_unpackhi_epi64(dequant0, dequant0);
+ const __m128i thr0 = _mm_srai_epi16(dequant0, 1);
+ const __m128i thr1 = _mm_srai_epi16(dequant1, 1);
+ __m128i eob = _mm_setzero_si128();
+
+ quantize(iscan_ptr, coeff_ptr, n_coeffs, qcoeff_ptr, dqcoeff_ptr, &round0,
+ &round1, &quant0, &quant1, &dequant0, &dequant1, &thr0, &thr1, &eob);
+
+ n_coeffs += 8 * 2;
+
+ // AC only loop
+ while (n_coeffs < 0) {
+ quantize(iscan_ptr, coeff_ptr, n_coeffs, qcoeff_ptr, dqcoeff_ptr, &round1,
+ &round1, &quant1, &quant1, &dequant1, &dequant1, &thr1, &thr1,
+ &eob);
+ n_coeffs += 8 * 2;
+ }
+
+ // Accumulate EOB
+ {
+ __m128i eob_shuffled;
+ eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
+ eob = _mm_max_epi16(eob, eob_shuffled);
+ eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
+ eob = _mm_max_epi16(eob, eob_shuffled);
+ eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
+ eob = _mm_max_epi16(eob, eob_shuffled);
+ *eob_ptr = _mm_extract_epi16(eob, 1);
+ }
+}
+
+static INLINE void quantize_lp(const int16_t *iscan_ptr,
+ const int16_t *coeff_ptr, intptr_t n_coeffs,
+ int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
+ const __m128i *round0, const __m128i *round1,
+ const __m128i *quant0, const __m128i *quant1,
+ const __m128i *dequant0, const __m128i *dequant1,
+ __m128i *eob) {
+ const int16_t *read = coeff_ptr + n_coeffs;
+ __m128i coeff0 = _mm_load_si128((const __m128i *)read);
+ __m128i coeff1 = _mm_load_si128((const __m128i *)read + 1);
+
+ // Poor man's sign extract
+ const __m128i coeff0_sign = _mm_srai_epi16(coeff0, 15);
+ const __m128i coeff1_sign = _mm_srai_epi16(coeff1, 15);
+ __m128i qcoeff0 = _mm_xor_si128(coeff0, coeff0_sign);
+ __m128i qcoeff1 = _mm_xor_si128(coeff1, coeff1_sign);
+ qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+ qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+ qcoeff0 = _mm_adds_epi16(qcoeff0, *round0);
+ qcoeff1 = _mm_adds_epi16(qcoeff1, *round1);
+ const __m128i qtmp0 = _mm_mulhi_epi16(qcoeff0, *quant0);
+ const __m128i qtmp1 = _mm_mulhi_epi16(qcoeff1, *quant1);
+
+ // Reinsert signs
+ qcoeff0 = _mm_xor_si128(qtmp0, coeff0_sign);
+ qcoeff1 = _mm_xor_si128(qtmp1, coeff1_sign);
+ qcoeff0 = _mm_sub_epi16(qcoeff0, coeff0_sign);
+ qcoeff1 = _mm_sub_epi16(qcoeff1, coeff1_sign);
+
+ int16_t *addr = qcoeff_ptr + n_coeffs;
+ _mm_store_si128((__m128i *)addr, qcoeff0);
+ _mm_store_si128((__m128i *)addr + 1, qcoeff1);
+
+ coeff0 = _mm_mullo_epi16(qcoeff0, *dequant0);
+ coeff1 = _mm_mullo_epi16(qcoeff1, *dequant1);
+
+ addr = dqcoeff_ptr + n_coeffs;
+ _mm_store_si128((__m128i *)addr, coeff0);
+ _mm_store_si128((__m128i *)addr + 1, coeff1);
+
+ const __m128i zero = _mm_setzero_si128();
+ // Scan for eob
+ const __m128i zero_coeff0 = _mm_cmpeq_epi16(coeff0, zero);
+ const __m128i zero_coeff1 = _mm_cmpeq_epi16(coeff1, zero);
+ const __m128i nzero_coeff0 = _mm_cmpeq_epi16(zero_coeff0, zero);
+ const __m128i nzero_coeff1 = _mm_cmpeq_epi16(zero_coeff1, zero);
+
+ const __m128i iscan0 =
+ _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs));
+ const __m128i iscan1 =
+ _mm_load_si128((const __m128i *)(iscan_ptr + n_coeffs) + 1);
+
+ // Add one to convert from indices to counts
+ const __m128i iscan0_nz = _mm_sub_epi16(iscan0, nzero_coeff0);
+ const __m128i iscan1_nz = _mm_sub_epi16(iscan1, nzero_coeff1);
+ const __m128i eob0 = _mm_and_si128(iscan0_nz, nzero_coeff0);
+ const __m128i eob1 = _mm_and_si128(iscan1_nz, nzero_coeff1);
+ const __m128i eob2 = _mm_max_epi16(eob0, eob1);
+ *eob = _mm_max_epi16(*eob, eob2);
+}
+
+void av1_quantize_lp_sse2(const int16_t *coeff_ptr, intptr_t n_coeffs,
+ const int16_t *round_ptr, const int16_t *quant_ptr,
+ int16_t *qcoeff_ptr, int16_t *dqcoeff_ptr,
+ const int16_t *dequant_ptr, uint16_t *eob_ptr,
+ const int16_t *scan, const int16_t *iscan) {
+ (void)scan;
+ coeff_ptr += n_coeffs;
+ iscan += n_coeffs;
+ qcoeff_ptr += n_coeffs;
+ dqcoeff_ptr += n_coeffs;
+ n_coeffs = -n_coeffs;
+
+ // Setup global values
+ const __m128i round0 = _mm_load_si128((const __m128i *)round_ptr);
+ const __m128i round1 = _mm_unpackhi_epi64(round0, round0);
+ const __m128i quant0 = _mm_load_si128((const __m128i *)quant_ptr);
+ const __m128i quant1 = _mm_unpackhi_epi64(quant0, quant0);
+ const __m128i dequant0 = _mm_load_si128((const __m128i *)dequant_ptr);
+ const __m128i dequant1 = _mm_unpackhi_epi64(dequant0, dequant0);
+ __m128i eob = _mm_setzero_si128();
+
+ // DC and first 15 AC
+ quantize_lp(iscan, coeff_ptr, n_coeffs, qcoeff_ptr, dqcoeff_ptr, &round0,
+ &round1, &quant0, &quant1, &dequant0, &dequant1, &eob);
+ n_coeffs += 8 * 2;
+
+ // AC only loop
+ while (n_coeffs < 0) {
+ quantize_lp(iscan, coeff_ptr, n_coeffs, qcoeff_ptr, dqcoeff_ptr, &round1,
+ &round1, &quant1, &quant1, &dequant1, &dequant1, &eob);
+ n_coeffs += 8 * 2;
+ }
+
+ // Accumulate EOB
+ *eob_ptr = accumulate_eob(eob);
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_quantize_ssse3_x86_64.asm b/third_party/aom/av1/encoder/x86/av1_quantize_ssse3_x86_64.asm
new file mode 100644
index 0000000000..ad4ae274e2
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_quantize_ssse3_x86_64.asm
@@ -0,0 +1,204 @@
+;
+; Copyright (c) 2016, Alliance for Open Media. All rights reserved
+;
+; This source code is subject to the terms of the BSD 2 Clause License and
+; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+; was not distributed with this source code in the LICENSE file, you can
+; obtain it at www.aomedia.org/license/software. If the Alliance for Open
+; Media Patent License 1.0 was not distributed with this source code in the
+; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+;
+
+;
+
+%define private_prefix av1
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION_RODATA
+pw_1: times 8 dw 1
+
+SECTION .text
+
+%macro QUANTIZE_FP 2
+cglobal quantize_%1, 0, %2, 15, coeff, ncoeff, skip, zbin, round, quant, \
+ shift, qcoeff, dqcoeff, dequant, \
+ eob, scan, iscan
+ cmp dword skipm, 0
+ jne .blank
+
+ ; actual quantize loop - setup pointers, rounders, etc.
+ movifnidn coeffq, coeffmp
+ movifnidn ncoeffq, ncoeffmp
+ mov r2, dequantmp
+ movifnidn zbinq, zbinmp
+ movifnidn roundq, roundmp
+ movifnidn quantq, quantmp
+ mova m1, [roundq] ; m1 = round
+ mova m2, [quantq] ; m2 = quant
+%ifidn %1, fp_32x32
+ pcmpeqw m5, m5
+ psrlw m5, 15
+ paddw m1, m5
+ psrlw m1, 1 ; m1 = (m1 + 1) / 2
+%endif
+ mova m3, [r2q] ; m3 = dequant
+ mov r3, qcoeffmp
+ mov r4, dqcoeffmp
+ mov r5, iscanmp
+%ifidn %1, fp_32x32
+ psllw m2, 1
+%endif
+ pxor m5, m5 ; m5 = dedicated zero
+
+ lea coeffq, [ coeffq+ncoeffq*2]
+ lea r5q, [ r5q+ncoeffq*2]
+ lea r3q, [ r3q+ncoeffq*2]
+ lea r4q, [r4q+ncoeffq*2]
+ neg ncoeffq
+
+ ; get DC and first 15 AC coeffs
+ mova m9, [ coeffq+ncoeffq*2+ 0] ; m9 = c[i]
+ mova m10, [ coeffq+ncoeffq*2+16] ; m10 = c[i]
+ pabsw m6, m9 ; m6 = abs(m9)
+ pabsw m11, m10 ; m11 = abs(m10)
+ pcmpeqw m7, m7
+
+ paddsw m6, m1 ; m6 += round
+ punpckhqdq m1, m1
+ paddsw m11, m1 ; m11 += round
+ pmulhw m8, m6, m2 ; m8 = m6*q>>16
+ punpckhqdq m2, m2
+ pmulhw m13, m11, m2 ; m13 = m11*q>>16
+ psignw m8, m9 ; m8 = reinsert sign
+ psignw m13, m10 ; m13 = reinsert sign
+ mova [r3q+ncoeffq*2+ 0], m8
+ mova [r3q+ncoeffq*2+16], m13
+%ifidn %1, fp_32x32
+ pabsw m8, m8
+ pabsw m13, m13
+%endif
+ pmullw m8, m3 ; r4[i] = r3[i] * q
+ punpckhqdq m3, m3
+ pmullw m13, m3 ; r4[i] = r3[i] * q
+%ifidn %1, fp_32x32
+ psrlw m8, 1
+ psrlw m13, 1
+ psignw m8, m9
+ psignw m13, m10
+ psrlw m0, m3, 2
+%else
+ psrlw m0, m3, 1
+%endif
+ mova [r4q+ncoeffq*2+ 0], m8
+ mova [r4q+ncoeffq*2+16], m13
+ pcmpeqw m8, m5 ; m8 = c[i] == 0
+ pcmpeqw m13, m5 ; m13 = c[i] == 0
+ mova m6, [ r5q+ncoeffq*2+ 0] ; m6 = scan[i]
+ mova m11, [ r5q+ncoeffq*2+16] ; m11 = scan[i]
+ psubw m6, m7 ; m6 = scan[i] + 1
+ psubw m11, m7 ; m11 = scan[i] + 1
+ pandn m8, m6 ; m8 = max(eob)
+ pandn m13, m11 ; m13 = max(eob)
+ pmaxsw m8, m13
+ add ncoeffq, mmsize
+ jz .accumulate_eob
+
+.ac_only_loop:
+ mova m9, [ coeffq+ncoeffq*2+ 0] ; m9 = c[i]
+ mova m10, [ coeffq+ncoeffq*2+16] ; m10 = c[i]
+ pabsw m6, m9 ; m6 = abs(m9)
+ pabsw m11, m10 ; m11 = abs(m10)
+
+ pcmpgtw m7, m6, m0
+ pcmpgtw m12, m11, m0
+ pmovmskb r6d, m7
+ pmovmskb r2d, m12
+
+ or r6, r2
+ jz .skip_iter
+
+ pcmpeqw m7, m7
+
+ paddsw m6, m1 ; m6 += round
+ paddsw m11, m1 ; m11 += round
+ pmulhw m14, m6, m2 ; m14 = m6*q>>16
+ pmulhw m13, m11, m2 ; m13 = m11*q>>16
+ psignw m14, m9 ; m14 = reinsert sign
+ psignw m13, m10 ; m13 = reinsert sign
+ mova [r3q+ncoeffq*2+ 0], m14
+ mova [r3q+ncoeffq*2+16], m13
+%ifidn %1, fp_32x32
+ pabsw m14, m14
+ pabsw m13, m13
+%endif
+ pmullw m14, m3 ; r4[i] = r3[i] * q
+ pmullw m13, m3 ; r4[i] = r3[i] * q
+%ifidn %1, fp_32x32
+ psrlw m14, 1
+ psrlw m13, 1
+ psignw m14, m9
+ psignw m13, m10
+%endif
+ mova [r4q+ncoeffq*2+ 0], m14
+ mova [r4q+ncoeffq*2+16], m13
+ pcmpeqw m14, m5 ; m14 = c[i] == 0
+ pcmpeqw m13, m5 ; m13 = c[i] == 0
+ mova m6, [ r5q+ncoeffq*2+ 0] ; m6 = scan[i]
+ mova m11, [ r5q+ncoeffq*2+16] ; m11 = scan[i]
+ psubw m6, m7 ; m6 = scan[i] + 1
+ psubw m11, m7 ; m11 = scan[i] + 1
+ pandn m14, m6 ; m14 = max(eob)
+ pandn m13, m11 ; m13 = max(eob)
+ pmaxsw m8, m14
+ pmaxsw m8, m13
+ add ncoeffq, mmsize
+ jl .ac_only_loop
+
+ jmp .accumulate_eob
+.skip_iter:
+ mova [r3q+ncoeffq*2+ 0], m5
+ mova [r3q+ncoeffq*2+16], m5
+ mova [r4q+ncoeffq*2+ 0], m5
+ mova [r4q+ncoeffq*2+16], m5
+ add ncoeffq, mmsize
+ jl .ac_only_loop
+
+.accumulate_eob:
+ ; horizontally accumulate/max eobs and write into [eob] memory pointer
+ mov r2, eobmp
+ pshufd m7, m8, 0xe
+ pmaxsw m8, m7
+ pshuflw m7, m8, 0xe
+ pmaxsw m8, m7
+ pshuflw m7, m8, 0x1
+ pmaxsw m8, m7
+ pextrw r6, m8, 0
+ mov [r2], r6
+ RET
+
+ ; skip-block, i.e. just write all zeroes
+.blank:
+ mov r0, dqcoeffmp
+ movifnidn ncoeffq, ncoeffmp
+ mov r2, qcoeffmp
+ mov r3, eobmp
+
+ lea r0q, [r0q+ncoeffq*2]
+ lea r2q, [r2q+ncoeffq*2]
+ neg ncoeffq
+ pxor m7, m7
+.blank_loop:
+ mova [r0q+ncoeffq*2+ 0], m7
+ mova [r0q+ncoeffq*2+16], m7
+ mova [r2q+ncoeffq*2+ 0], m7
+ mova [r2q+ncoeffq*2+16], m7
+ add ncoeffq, mmsize
+ jl .blank_loop
+ mov word [r3q], 0
+ RET
+%endmacro
+
+INIT_XMM ssse3
+QUANTIZE_FP fp, 7
+QUANTIZE_FP fp_32x32, 7
diff --git a/third_party/aom/av1/encoder/x86/av1_ssim_opt_x86_64.asm b/third_party/aom/av1/encoder/x86/av1_ssim_opt_x86_64.asm
new file mode 100644
index 0000000000..618758105a
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_ssim_opt_x86_64.asm
@@ -0,0 +1,222 @@
+;
+; Copyright (c) 2016, Alliance for Open Media. All rights reserved
+;
+; This source code is subject to the terms of the BSD 2 Clause License and
+; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+; was not distributed with this source code in the LICENSE file, you can
+; obtain it at www.aomedia.org/license/software. If the Alliance for Open
+; Media Patent License 1.0 was not distributed with this source code in the
+; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+;
+
+;
+
+%include "aom_ports/x86_abi_support.asm"
+
+; tabulate_ssim - sums sum_s,sum_r,sum_sq_s,sum_sq_r, sum_sxr
+%macro TABULATE_SSIM 0
+ paddusw xmm15, xmm3 ; sum_s
+ paddusw xmm14, xmm4 ; sum_r
+ movdqa xmm1, xmm3
+ pmaddwd xmm1, xmm1
+ paddd xmm13, xmm1 ; sum_sq_s
+ movdqa xmm2, xmm4
+ pmaddwd xmm2, xmm2
+ paddd xmm12, xmm2 ; sum_sq_r
+ pmaddwd xmm3, xmm4
+ paddd xmm11, xmm3 ; sum_sxr
+%endmacro
+
+; Sum across the register %1 starting with q words
+%macro SUM_ACROSS_Q 1
+ movdqa xmm2,%1
+ punpckldq %1,xmm0
+ punpckhdq xmm2,xmm0
+ paddq %1,xmm2
+ movdqa xmm2,%1
+ punpcklqdq %1,xmm0
+ punpckhqdq xmm2,xmm0
+ paddq %1,xmm2
+%endmacro
+
+; Sum across the register %1 starting with q words
+%macro SUM_ACROSS_W 1
+ movdqa xmm1, %1
+ punpcklwd %1,xmm0
+ punpckhwd xmm1,xmm0
+ paddd %1, xmm1
+ SUM_ACROSS_Q %1
+%endmacro
+
+SECTION .text
+
+;void ssim_parms_sse2(
+; unsigned char *s,
+; int sp,
+; unsigned char *r,
+; int rp
+; unsigned long *sum_s,
+; unsigned long *sum_r,
+; unsigned long *sum_sq_s,
+; unsigned long *sum_sq_r,
+; unsigned long *sum_sxr);
+;
+; TODO: Use parm passing through structure, probably don't need the pxors
+; ( calling app will initialize to 0 ) could easily fit everything in sse2
+; without too much hastle, and can probably do better estimates with psadw
+; or pavgb At this point this is just meant to be first pass for calculating
+; all the parms needed for 16x16 ssim so we can play with dssim as distortion
+; in mode selection code.
+globalsym(av1_ssim_parms_16x16_sse2)
+sym(av1_ssim_parms_16x16_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ SAVE_XMM 15
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;s
+ mov rcx, arg(1) ;sp
+ mov rdi, arg(2) ;r
+ mov rax, arg(3) ;rp
+
+ pxor xmm0, xmm0
+ pxor xmm15,xmm15 ;sum_s
+ pxor xmm14,xmm14 ;sum_r
+ pxor xmm13,xmm13 ;sum_sq_s
+ pxor xmm12,xmm12 ;sum_sq_r
+ pxor xmm11,xmm11 ;sum_sxr
+
+ mov rdx, 16 ;row counter
+.NextRow:
+
+ ;grab source and reference pixels
+ movdqu xmm5, [rsi]
+ movdqu xmm6, [rdi]
+ movdqa xmm3, xmm5
+ movdqa xmm4, xmm6
+ punpckhbw xmm3, xmm0 ; high_s
+ punpckhbw xmm4, xmm0 ; high_r
+
+ TABULATE_SSIM
+
+ movdqa xmm3, xmm5
+ movdqa xmm4, xmm6
+ punpcklbw xmm3, xmm0 ; low_s
+ punpcklbw xmm4, xmm0 ; low_r
+
+ TABULATE_SSIM
+
+ add rsi, rcx ; next s row
+ add rdi, rax ; next r row
+
+ dec rdx ; counter
+ jnz .NextRow
+
+ SUM_ACROSS_W xmm15
+ SUM_ACROSS_W xmm14
+ SUM_ACROSS_Q xmm13
+ SUM_ACROSS_Q xmm12
+ SUM_ACROSS_Q xmm11
+
+ mov rdi,arg(4)
+ movd [rdi], xmm15;
+ mov rdi,arg(5)
+ movd [rdi], xmm14;
+ mov rdi,arg(6)
+ movd [rdi], xmm13;
+ mov rdi,arg(7)
+ movd [rdi], xmm12;
+ mov rdi,arg(8)
+ movd [rdi], xmm11;
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
+
+;void ssim_parms_sse2(
+; unsigned char *s,
+; int sp,
+; unsigned char *r,
+; int rp
+; unsigned long *sum_s,
+; unsigned long *sum_r,
+; unsigned long *sum_sq_s,
+; unsigned long *sum_sq_r,
+; unsigned long *sum_sxr);
+;
+; TODO: Use parm passing through structure, probably don't need the pxors
+; ( calling app will initialize to 0 ) could easily fit everything in sse2
+; without too much hastle, and can probably do better estimates with psadw
+; or pavgb At this point this is just meant to be first pass for calculating
+; all the parms needed for 16x16 ssim so we can play with dssim as distortion
+; in mode selection code.
+globalsym(av1_ssim_parms_8x8_sse2)
+sym(av1_ssim_parms_8x8_sse2):
+ push rbp
+ mov rbp, rsp
+ SHADOW_ARGS_TO_STACK 9
+ SAVE_XMM 15
+ push rsi
+ push rdi
+ ; end prolog
+
+ mov rsi, arg(0) ;s
+ mov rcx, arg(1) ;sp
+ mov rdi, arg(2) ;r
+ mov rax, arg(3) ;rp
+
+ pxor xmm0, xmm0
+ pxor xmm15,xmm15 ;sum_s
+ pxor xmm14,xmm14 ;sum_r
+ pxor xmm13,xmm13 ;sum_sq_s
+ pxor xmm12,xmm12 ;sum_sq_r
+ pxor xmm11,xmm11 ;sum_sxr
+
+ mov rdx, 8 ;row counter
+.NextRow:
+
+ ;grab source and reference pixels
+ movq xmm3, [rsi]
+ movq xmm4, [rdi]
+ punpcklbw xmm3, xmm0 ; low_s
+ punpcklbw xmm4, xmm0 ; low_r
+
+ TABULATE_SSIM
+
+ add rsi, rcx ; next s row
+ add rdi, rax ; next r row
+
+ dec rdx ; counter
+ jnz .NextRow
+
+ SUM_ACROSS_W xmm15
+ SUM_ACROSS_W xmm14
+ SUM_ACROSS_Q xmm13
+ SUM_ACROSS_Q xmm12
+ SUM_ACROSS_Q xmm11
+
+ mov rdi,arg(4)
+ movd [rdi], xmm15;
+ mov rdi,arg(5)
+ movd [rdi], xmm14;
+ mov rdi,arg(6)
+ movd [rdi], xmm13;
+ mov rdi,arg(7)
+ movd [rdi], xmm12;
+ mov rdi,arg(8)
+ movd [rdi], xmm11;
+
+ ; begin epilog
+ pop rdi
+ pop rsi
+ RESTORE_XMM
+ UNSHADOW_ARGS
+ pop rbp
+ ret
diff --git a/third_party/aom/av1/encoder/x86/av1_temporal_denoiser_sse2.c b/third_party/aom/av1/encoder/x86/av1_temporal_denoiser_sse2.c
new file mode 100644
index 0000000000..830f40ecb0
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_temporal_denoiser_sse2.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/x86/mem_sse2.h"
+
+#include "av1/common/reconinter.h"
+#include "av1/encoder/context_tree.h"
+#include "av1/encoder/av1_temporal_denoiser.h"
+
+// Compute the sum of all pixel differences of this MB.
+static INLINE int sum_diff_16x1(__m128i acc_diff) {
+ const __m128i k_1 = _mm_set1_epi16(1);
+ const __m128i acc_diff_lo =
+ _mm_srai_epi16(_mm_unpacklo_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_hi =
+ _mm_srai_epi16(_mm_unpackhi_epi8(acc_diff, acc_diff), 8);
+ const __m128i acc_diff_16 = _mm_add_epi16(acc_diff_lo, acc_diff_hi);
+ const __m128i hg_fe_dc_ba = _mm_madd_epi16(acc_diff_16, k_1);
+ const __m128i hgfe_dcba =
+ _mm_add_epi32(hg_fe_dc_ba, _mm_srli_si128(hg_fe_dc_ba, 8));
+ const __m128i hgfedcba =
+ _mm_add_epi32(hgfe_dcba, _mm_srli_si128(hgfe_dcba, 4));
+ return _mm_cvtsi128_si32(hgfedcba);
+}
+
+// Denoise a 16x1 vector.
+static INLINE __m128i av1_denoiser_16x1_sse2(
+ const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
+ const __m128i *k_0, const __m128i *k_4, const __m128i *k_8,
+ const __m128i *k_16, const __m128i *l3, const __m128i *l32,
+ const __m128i *l21, __m128i acc_diff) {
+ // Calculate differences
+ const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0]));
+ __m128i v_running_avg_y;
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, *k_0);
+ // Clamp absolute difference to 16 to be used to get mask. Doing this
+ // allows us to use _mm_cmpgt_epi8, which operates on signed byte.
+ const __m128i clamped_absdiff =
+ _mm_min_epu8(_mm_or_si128(pdiff, ndiff), *k_16);
+ // Get masks for l2 l1 and l0 adjustments.
+ const __m128i mask2 = _mm_cmpgt_epi8(*k_16, clamped_absdiff);
+ const __m128i mask1 = _mm_cmpgt_epi8(*k_8, clamped_absdiff);
+ const __m128i mask0 = _mm_cmpgt_epi8(*k_4, clamped_absdiff);
+ // Get adjustments for l2, l1, and l0.
+ __m128i adj2 = _mm_and_si128(mask2, *l32);
+ const __m128i adj1 = _mm_and_si128(mask1, *l21);
+ const __m128i adj0 = _mm_and_si128(mask0, clamped_absdiff);
+ __m128i adj, padj, nadj;
+
+ // Combine the adjustments and get absolute adjustments.
+ adj2 = _mm_add_epi8(adj2, adj1);
+ adj = _mm_sub_epi8(*l3, adj2);
+ adj = _mm_andnot_si128(mask0, adj);
+ adj = _mm_or_si128(adj, adj0);
+
+ // Restore the sign and get positive and negative adjustments.
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+
+ // Calculate filtered value.
+ v_running_avg_y = _mm_adds_epu8(v_sig, padj);
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y);
+
+ // Adjustments <=7, and each element in acc_diff can fit in signed
+ // char.
+ acc_diff = _mm_adds_epi8(acc_diff, padj);
+ acc_diff = _mm_subs_epi8(acc_diff, nadj);
+ return acc_diff;
+}
+
+// Denoise a 16x1 vector with a weaker filter.
+static INLINE __m128i av1_denoiser_adj_16x1_sse2(
+ const uint8_t *sig, const uint8_t *mc_running_avg_y, uint8_t *running_avg_y,
+ const __m128i k_0, const __m128i k_delta, __m128i acc_diff) {
+ __m128i v_running_avg_y = _mm_loadu_si128((__m128i *)(&running_avg_y[0]));
+ // Calculate differences.
+ const __m128i v_sig = _mm_loadu_si128((const __m128i *)(&sig[0]));
+ const __m128i v_mc_running_avg_y =
+ _mm_loadu_si128((const __m128i *)(&mc_running_avg_y[0]));
+ const __m128i pdiff = _mm_subs_epu8(v_mc_running_avg_y, v_sig);
+ const __m128i ndiff = _mm_subs_epu8(v_sig, v_mc_running_avg_y);
+ // Obtain the sign. FF if diff is negative.
+ const __m128i diff_sign = _mm_cmpeq_epi8(pdiff, k_0);
+ // Clamp absolute difference to delta to get the adjustment.
+ const __m128i adj = _mm_min_epu8(_mm_or_si128(pdiff, ndiff), k_delta);
+ // Restore the sign and get positive and negative adjustments.
+ __m128i padj, nadj;
+ padj = _mm_andnot_si128(diff_sign, adj);
+ nadj = _mm_and_si128(diff_sign, adj);
+ // Calculate filtered value.
+ v_running_avg_y = _mm_subs_epu8(v_running_avg_y, padj);
+ v_running_avg_y = _mm_adds_epu8(v_running_avg_y, nadj);
+ _mm_storeu_si128((__m128i *)running_avg_y, v_running_avg_y);
+
+ // Accumulate the adjustments.
+ acc_diff = _mm_subs_epi8(acc_diff, padj);
+ acc_diff = _mm_adds_epi8(acc_diff, nadj);
+ return acc_diff;
+}
+
+// Denoise 8x8 and 8x16 blocks.
+static int av1_denoiser_NxM_sse2_small(const uint8_t *sig, int sig_stride,
+ const uint8_t *mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t *running_avg_y, int avg_y_stride,
+ int increase_denoising, BLOCK_SIZE bs,
+ int motion_magnitude, int width) {
+ int sum_diff_thresh, r, sum_diff = 0;
+ const int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
+ ? 1
+ : 0;
+ uint8_t sig_buffer[8][16], mc_running_buffer[8][16], running_buffer[8][16];
+ __m128i acc_diff = _mm_setzero_si128();
+ const __m128i k_0 = _mm_setzero_si128();
+ const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
+ const __m128i k_8 = _mm_set1_epi8(8);
+ const __m128i k_16 = _mm_set1_epi8(16);
+ // Modify each level's adjustment according to motion_magnitude.
+ const __m128i l3 = _mm_set1_epi8(
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6);
+ // Difference between level 3 and level 2 is 2.
+ const __m128i l32 = _mm_set1_epi8(2);
+ // Difference between level 2 and level 1 is 1.
+ const __m128i l21 = _mm_set1_epi8(1);
+ const int b_height = block_size_high[bs] >> 1;
+
+ for (r = 0; r < b_height; ++r) {
+ memcpy(sig_buffer[r], sig, width);
+ memcpy(sig_buffer[r] + width, sig + sig_stride, width);
+ memcpy(mc_running_buffer[r], mc_running_avg_y, width);
+ memcpy(mc_running_buffer[r] + width, mc_running_avg_y + mc_avg_y_stride,
+ width);
+ memcpy(running_buffer[r], running_avg_y, width);
+ memcpy(running_buffer[r] + width, running_avg_y + avg_y_stride, width);
+ acc_diff = av1_denoiser_16x1_sse2(sig_buffer[r], mc_running_buffer[r],
+ running_buffer[r], &k_0, &k_4, &k_8,
+ &k_16, &l3, &l32, &l21, acc_diff);
+ memcpy(running_avg_y, running_buffer[r], width);
+ memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width, width);
+ // Update pointers for next iteration.
+ sig += (sig_stride << 1);
+ mc_running_avg_y += (mc_avg_y_stride << 1);
+ running_avg_y += (avg_y_stride << 1);
+ }
+
+ {
+ sum_diff = sum_diff_16x1(acc_diff);
+ sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
+ if (abs(sum_diff) > sum_diff_thresh) {
+ // Before returning to copy the block (i.e., apply no denoising),
+ // check if we can still apply some (weaker) temporal filtering to
+ // this block, that would otherwise not be denoised at all. Simplest
+ // is to apply an additional adjustment to running_avg_y to bring it
+ // closer to sig. The adjustment is capped by a maximum delta, and
+ // chosen such that in most cases the resulting sum_diff will be
+ // within the acceptable range given by sum_diff_thresh.
+
+ // The delta is set by the excess of absolute pixel diff over the
+ // threshold.
+ const int delta =
+ ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const __m128i k_delta = _mm_set1_epi8(delta);
+ running_avg_y -= avg_y_stride * (b_height << 1);
+ for (r = 0; r < b_height; ++r) {
+ acc_diff = av1_denoiser_adj_16x1_sse2(
+ sig_buffer[r], mc_running_buffer[r], running_buffer[r], k_0,
+ k_delta, acc_diff);
+ memcpy(running_avg_y, running_buffer[r], width);
+ memcpy(running_avg_y + avg_y_stride, running_buffer[r] + width,
+ width);
+ // Update pointers for next iteration.
+ running_avg_y += (avg_y_stride << 1);
+ }
+ sum_diff = sum_diff_16x1(acc_diff);
+ if (abs(sum_diff) > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+ return FILTER_BLOCK;
+}
+
+// Denoise 16x16 to 128x128 blocks.
+static int av1_denoiser_NxM_sse2_big(const uint8_t *sig, int sig_stride,
+ const uint8_t *mc_running_avg_y,
+ int mc_avg_y_stride,
+ uint8_t *running_avg_y, int avg_y_stride,
+ int increase_denoising, BLOCK_SIZE bs,
+ int motion_magnitude) {
+ int sum_diff_thresh, r, c, sum_diff = 0;
+ const int shift_inc =
+ (increase_denoising && motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD)
+ ? 1
+ : 0;
+ __m128i acc_diff[8][8];
+ const __m128i k_0 = _mm_setzero_si128();
+ const __m128i k_4 = _mm_set1_epi8(4 + shift_inc);
+ const __m128i k_8 = _mm_set1_epi8(8);
+ const __m128i k_16 = _mm_set1_epi8(16);
+ // Modify each level's adjustment according to motion_magnitude.
+ const __m128i l3 = _mm_set1_epi8(
+ (motion_magnitude <= MOTION_MAGNITUDE_THRESHOLD) ? 7 + shift_inc : 6);
+ // Difference between level 3 and level 2 is 2.
+ const __m128i l32 = _mm_set1_epi8(2);
+ // Difference between level 2 and level 1 is 1.
+ const __m128i l21 = _mm_set1_epi8(1);
+ const int b_width = block_size_wide[bs];
+ const int b_height = block_size_high[bs];
+ const int b_width_shift4 = b_width >> 4;
+
+ for (r = 0; r < 8; ++r) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ acc_diff[c][r] = _mm_setzero_si128();
+ }
+ }
+
+ for (r = 0; r < b_height; ++r) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ acc_diff[c][r >> 4] = av1_denoiser_16x1_sse2(
+ sig, mc_running_avg_y, running_avg_y, &k_0, &k_4, &k_8, &k_16, &l3,
+ &l32, &l21, acc_diff[c][r >> 4]);
+ // Update pointers for next iteration.
+ sig += 16;
+ mc_running_avg_y += 16;
+ running_avg_y += 16;
+ }
+
+ if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ sum_diff += sum_diff_16x1(acc_diff[c][r >> 4]);
+ }
+ }
+
+ // Update pointers for next iteration.
+ sig = sig - b_width + sig_stride;
+ mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride;
+ running_avg_y = running_avg_y - b_width + avg_y_stride;
+ }
+
+ {
+ sum_diff_thresh = total_adj_strong_thresh(bs, increase_denoising);
+ if (abs(sum_diff) > sum_diff_thresh) {
+ const int delta =
+ ((abs(sum_diff) - sum_diff_thresh) >> num_pels_log2_lookup[bs]) + 1;
+
+ // Only apply the adjustment for max delta up to 3.
+ if (delta < 4) {
+ const __m128i k_delta = _mm_set1_epi8(delta);
+ sig -= sig_stride * b_height;
+ mc_running_avg_y -= mc_avg_y_stride * b_height;
+ running_avg_y -= avg_y_stride * b_height;
+ sum_diff = 0;
+ for (r = 0; r < b_height; ++r) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ acc_diff[c][r >> 4] =
+ av1_denoiser_adj_16x1_sse2(sig, mc_running_avg_y, running_avg_y,
+ k_0, k_delta, acc_diff[c][r >> 4]);
+ // Update pointers for next iteration.
+ sig += 16;
+ mc_running_avg_y += 16;
+ running_avg_y += 16;
+ }
+
+ if ((r & 0xf) == 0xf || (bs == BLOCK_16X8 && r == 7)) {
+ for (c = 0; c < b_width_shift4; ++c) {
+ sum_diff += sum_diff_16x1(acc_diff[c][r >> 4]);
+ }
+ }
+ sig = sig - b_width + sig_stride;
+ mc_running_avg_y = mc_running_avg_y - b_width + mc_avg_y_stride;
+ running_avg_y = running_avg_y - b_width + avg_y_stride;
+ }
+ if (abs(sum_diff) > sum_diff_thresh) {
+ return COPY_BLOCK;
+ }
+ } else {
+ return COPY_BLOCK;
+ }
+ }
+ }
+ return FILTER_BLOCK;
+}
+
+int av1_denoiser_filter_sse2(const uint8_t *sig, int sig_stride,
+ const uint8_t *mc_avg, int mc_avg_stride,
+ uint8_t *avg, int avg_stride,
+ int increase_denoising, BLOCK_SIZE bs,
+ int motion_magnitude) {
+ // Rank by frequency of the block type to have an early termination.
+ if (bs == BLOCK_16X16 || bs == BLOCK_32X32 || bs == BLOCK_64X64 ||
+ bs == BLOCK_128X128 || bs == BLOCK_128X64 || bs == BLOCK_64X128 ||
+ bs == BLOCK_16X32 || bs == BLOCK_16X8 || bs == BLOCK_32X16 ||
+ bs == BLOCK_32X64 || bs == BLOCK_64X32) {
+ return av1_denoiser_NxM_sse2_big(sig, sig_stride, mc_avg, mc_avg_stride,
+ avg, avg_stride, increase_denoising, bs,
+ motion_magnitude);
+ } else if (bs == BLOCK_8X8 || bs == BLOCK_8X16) {
+ return av1_denoiser_NxM_sse2_small(sig, sig_stride, mc_avg, mc_avg_stride,
+ avg, avg_stride, increase_denoising, bs,
+ motion_magnitude, 8);
+ } else {
+ return COPY_BLOCK;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/av1_txfm1d_sse4.h b/third_party/aom/av1/encoder/x86/av1_txfm1d_sse4.h
new file mode 100644
index 0000000000..7a0f32898b
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/av1_txfm1d_sse4.h
@@ -0,0 +1,144 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AV1_ENCODER_X86_AV1_TXFM1D_SSE4_H_
+#define AOM_AV1_ENCODER_X86_AV1_TXFM1D_SSE4_H_
+
+#include <smmintrin.h>
+#include "av1/common/av1_txfm.h"
+#include "av1/common/x86/av1_txfm_sse4.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void av1_fdct4_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_fdct8_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_fdct16_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_fdct32_sse4_1(__m128i *input, __m128i *output, int cos_bit,
+ const int stride);
+void av1_fdct64_sse4_1(__m128i *input, __m128i *output, int8_t cos_bit,
+ const int instride, const int outstride);
+void av1_fadst4_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_fadst8_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_fadst16_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+
+void av1_idct4_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_idct8_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_idct16_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_idct32_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_idct64_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+
+void av1_iadst4_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_iadst8_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+void av1_iadst16_sse4_1(const __m128i *input, __m128i *output,
+ const int8_t cos_bit, const int8_t *stage_range);
+
+void av1_idtx32_sse4_1(__m128i *input, __m128i *output, int cos_bit,
+ const int col_num);
+
+static INLINE void transpose_32_4x4(int stride, const __m128i *input,
+ __m128i *output) {
+ __m128i temp0 = _mm_unpacklo_epi32(input[0 * stride], input[2 * stride]);
+ __m128i temp1 = _mm_unpackhi_epi32(input[0 * stride], input[2 * stride]);
+ __m128i temp2 = _mm_unpacklo_epi32(input[1 * stride], input[3 * stride]);
+ __m128i temp3 = _mm_unpackhi_epi32(input[1 * stride], input[3 * stride]);
+
+ output[0 * stride] = _mm_unpacklo_epi32(temp0, temp2);
+ output[1 * stride] = _mm_unpackhi_epi32(temp0, temp2);
+ output[2 * stride] = _mm_unpacklo_epi32(temp1, temp3);
+ output[3 * stride] = _mm_unpackhi_epi32(temp1, temp3);
+}
+
+// the entire input block can be represent by a grid of 4x4 blocks
+// each 4x4 blocks can be represent by 4 vertical __m128i
+// we first transpose each 4x4 block internally
+// then transpose the grid
+static INLINE void transpose_32(int txfm_size, const __m128i *input,
+ __m128i *output) {
+ const int num_per_128 = 4;
+ const int row_size = txfm_size;
+ const int col_size = txfm_size / num_per_128;
+ int r, c;
+
+ // transpose each 4x4 block internally
+ for (r = 0; r < row_size; r += 4) {
+ for (c = 0; c < col_size; c++) {
+ transpose_32_4x4(col_size, &input[r * col_size + c],
+ &output[c * 4 * col_size + r / 4]);
+ }
+ }
+}
+
+// out0 = in0*w0 + in1*w1
+// out1 = -in1*w0 + in0*w1
+#define btf_32_sse4_1_type0(w0, w1, in0, in1, out0, out1, bit) \
+ do { \
+ const __m128i ww0 = _mm_set1_epi32(w0); \
+ const __m128i ww1 = _mm_set1_epi32(w1); \
+ const __m128i in0_w0 = _mm_mullo_epi32(in0, ww0); \
+ const __m128i in1_w1 = _mm_mullo_epi32(in1, ww1); \
+ out0 = _mm_add_epi32(in0_w0, in1_w1); \
+ out0 = av1_round_shift_32_sse4_1(out0, bit); \
+ const __m128i in0_w1 = _mm_mullo_epi32(in0, ww1); \
+ const __m128i in1_w0 = _mm_mullo_epi32(in1, ww0); \
+ out1 = _mm_sub_epi32(in0_w1, in1_w0); \
+ out1 = av1_round_shift_32_sse4_1(out1, bit); \
+ } while (0)
+
+// out0 = in0*w0 + in1*w1
+// out1 = in1*w0 - in0*w1
+#define btf_32_sse4_1_type1(w0, w1, in0, in1, out0, out1, bit) \
+ do { \
+ btf_32_sse4_1_type0(w1, w0, in1, in0, out0, out1, bit); \
+ } while (0)
+
+// out0 = in0*w0 + in1*w1
+// out1 = -in1*w0 + in0*w1
+#define btf_32_type0_sse4_1_new(ww0, ww1, in0, in1, out0, out1, r, bit) \
+ do { \
+ const __m128i in0_w0 = _mm_mullo_epi32(in0, ww0); \
+ const __m128i in1_w1 = _mm_mullo_epi32(in1, ww1); \
+ out0 = _mm_add_epi32(in0_w0, in1_w1); \
+ out0 = _mm_add_epi32(out0, r); \
+ out0 = _mm_srai_epi32(out0, bit); \
+ const __m128i in0_w1 = _mm_mullo_epi32(in0, ww1); \
+ const __m128i in1_w0 = _mm_mullo_epi32(in1, ww0); \
+ out1 = _mm_sub_epi32(in0_w1, in1_w0); \
+ out1 = _mm_add_epi32(out1, r); \
+ out1 = _mm_srai_epi32(out1, bit); \
+ } while (0)
+
+// out0 = in0*w0 + in1*w1
+// out1 = in1*w0 - in0*w1
+#define btf_32_type1_sse4_1_new(ww0, ww1, in0, in1, out0, out1, r, bit) \
+ do { \
+ btf_32_type0_sse4_1_new(ww1, ww0, in1, in0, out0, out1, r, bit); \
+ } while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AOM_AV1_ENCODER_X86_AV1_TXFM1D_SSE4_H_
diff --git a/third_party/aom/av1/encoder/x86/cnn_avx2.c b/third_party/aom/av1/encoder/x86/cnn_avx2.c
new file mode 100644
index 0000000000..ee93b3d5a0
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/cnn_avx2.c
@@ -0,0 +1,532 @@
+/*
+ * Copyright (c) 2020, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <immintrin.h>
+#include <math.h>
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "av1/common/av1_common_int.h"
+#include "av1/encoder/cnn.h"
+
+// This mask rearranges source pixels in the order shown below.
+// shuffle_src_layer0[0][8]: applied on source pixels 0 to 7.
+// shuffle_src_layer0[1][8]: applied on source pixels 7 to 14.
+// This shuffling is needed to process 3 5x5 blocks which need
+// source pixels in the following order.
+// 1st 5x5 block: source pixels needed are 0 to 4,
+// 2nd 5x5 block: source pixels needed are 4 to 8,
+// 3rd 5x5 block: source pixels needed are 8 to 12.
+// Source pixels are loaded like mentioned below.
+// load_src0 : 0, 1, 2, 3, 4, 5, 6, 7
+// load_src1 : 7, 8, 9, 10, 11, 12, 13, 14
+// After applying masks, source bytes will be in the order:
+// load_src0 : 0, 1, 2, 3, 4, 4, 5, 6
+// consists 5 pixels needed for 1st 5x5 block and
+// first 3 pixels needed for 2nd 5x5 block.
+// load_src1 : 7, 8, 8, 9, 10, 11, 12, x
+// consists last 2 pixels needed for 2nd 5x5 block and
+// 5 pixels needed for 3rd 5x5 block.
+DECLARE_ALIGNED(32, static const uint32_t,
+ shuffle_src_layer0[2][8]) = { { 0, 1, 2, 3, 4, 4, 5, 6 },
+ { 0, 1, 1, 2, 3, 4, 5, 0 } };
+
+// This mask rearrange the weights to match shuffled source pixels order.
+DECLARE_ALIGNED(32, static const uint32_t,
+ shuffle_weight_layer0[2][8]) = { { 0, 1, 2, 3, 4, 0, 1, 2 },
+ { 3, 4, 0, 1, 2, 3, 4, 0 } };
+
+// Shuffle mask used to rearrange weights corresponding to layer 1 and layer 2.
+// For layer 1 and layer 2, convolution happens at 2x2 as filter_width and
+// filter_height are equal to 2. So rearranging the weights in the
+// order shown below to match source pixels. Basically this mask replicates
+// the weights across the width of 2.
+DECLARE_ALIGNED(32, static const uint32_t,
+ shuffle_weight_layer_1_and_2[2][8]) = {
+ { 0, 1, 0, 1, 0, 1, 0, 1 }, { 2, 3, 2, 3, 2, 3, 2, 3 }
+};
+
+// After the stages of multiplication and accumulation, the output values
+// in the register will be jumbled. In order to store register into
+// output buffer in a proper way, the following mask is applied on output
+// register.
+DECLARE_ALIGNED(32, static const uint32_t,
+ shuffle_output_layer_1_and_2[8]) = { 0, 1, 4, 5, 2, 3, 6, 7 };
+
+// Load weights needed for layer 0 (for 5x5 block processing),
+// and fill the registers appropriately to match source pixel mapping.
+static INLINE void prepare_weights_for_5x5_convolve(
+ const float *layer_config_weights, int off, float weight[5][8],
+ const int cstep, __m256 *shuffle_weight, const __m256i weight_mask_0,
+ const __m256i weight_mask_1) {
+ for (int row = 0; row < 5; ++row) {
+ for (int col = 0; col < 5; ++col) {
+ weight[row][col] = layer_config_weights[off];
+ off += cstep;
+ }
+ }
+ shuffle_weight[0] = _mm256_loadu_ps(weight[0]);
+ shuffle_weight[1] = _mm256_loadu_ps(weight[1]);
+ shuffle_weight[2] = _mm256_loadu_ps(weight[2]);
+ shuffle_weight[3] = _mm256_loadu_ps(weight[3]);
+ shuffle_weight[4] = _mm256_loadu_ps(weight[4]);
+
+ shuffle_weight[0] =
+ _mm256_permutevar8x32_ps(shuffle_weight[0], weight_mask_0);
+ shuffle_weight[1] =
+ _mm256_permutevar8x32_ps(shuffle_weight[1], weight_mask_0);
+ shuffle_weight[2] =
+ _mm256_permutevar8x32_ps(shuffle_weight[2], weight_mask_0);
+ shuffle_weight[3] =
+ _mm256_permutevar8x32_ps(shuffle_weight[3], weight_mask_0);
+ shuffle_weight[4] =
+ _mm256_permutevar8x32_ps(shuffle_weight[4], weight_mask_0);
+ shuffle_weight[5] =
+ _mm256_permutevar8x32_ps(shuffle_weight[0], weight_mask_1);
+ shuffle_weight[6] =
+ _mm256_permutevar8x32_ps(shuffle_weight[1], weight_mask_1);
+ shuffle_weight[7] =
+ _mm256_permutevar8x32_ps(shuffle_weight[2], weight_mask_1);
+ shuffle_weight[8] =
+ _mm256_permutevar8x32_ps(shuffle_weight[3], weight_mask_1);
+ shuffle_weight[9] =
+ _mm256_permutevar8x32_ps(shuffle_weight[4], weight_mask_1);
+}
+
+// For each row, loads source pixels 0 to 7(load_src_0), 7 to 14(load_src_1) and
+// arranges them appropriately to process 3 blocks.
+#define PERFORM_CONVOLVE_FOR_3_5X5_BLOCKS() \
+ do { \
+ for (int row = 0; row < 5; row++) { \
+ load_src_0 = _mm256_loadu_ps(input_ptr); \
+ load_src_1 = _mm256_loadu_ps(input_ptr + 7); \
+ load_src_0 = _mm256_permutevar8x32_ps(load_src_0, block0_1); \
+ load_src_1 = _mm256_permutevar8x32_ps(load_src_1, block1_2); \
+ load_src_0 = _mm256_mul_ps(load_src_0, shuffle_weight[0 + row]); \
+ load_src_1 = _mm256_mul_ps(load_src_1, shuffle_weight[5 + row]); \
+ accum_src_0 = _mm256_add_ps(load_src_0, accum_src_0); \
+ accum_src_1 = _mm256_add_ps(load_src_1, accum_src_1); \
+ input_ptr += in_stride; \
+ } \
+ } while (0)
+
+// Load masks needed for shuffling of output and weights.
+static INLINE void load_shuffle_masks_for_2x2_convolve(__m256i *output_mask,
+ __m256i *weight_mask) {
+ // Load shuffle buffer needed to sort the output.
+ *output_mask =
+ _mm256_load_si256((const __m256i *)shuffle_output_layer_1_and_2);
+
+ // Load shuffle buffers needed for weight.
+ weight_mask[0] =
+ _mm256_load_si256((const __m256i *)shuffle_weight_layer_1_and_2[0]);
+ weight_mask[1] =
+ _mm256_load_si256((const __m256i *)shuffle_weight_layer_1_and_2[1]);
+}
+
+// Load weights needed for layer 1 and 2 (for 2x2 block processing),
+// and fill the registers appropriately to match source pixel mapping.
+static INLINE void prepare_weights_for_2x2_convolve(
+ const float *layer_config_weights, int off, const int cstep,
+ __m256 *shuffle_weight, __m256i *weight_mask) {
+ // Weights needed for 2x2 block.
+ float weight[4] = { 0 };
+ for (int i = 0; i < 4; ++i) {
+ weight[i] = layer_config_weights[off];
+ off += cstep;
+ }
+
+ const __m256 weight_vec = _mm256_castps128_ps256(_mm_loadu_ps(weight));
+ shuffle_weight[0] = _mm256_permutevar8x32_ps(weight_vec, weight_mask[0]);
+ shuffle_weight[1] = _mm256_permutevar8x32_ps(weight_vec, weight_mask[1]);
+}
+
+// Do convolution of one 5x5 block.
+#define PERFORM_CONVOLVE_FOR_1_5X5_BLOCK(w, accum0, in_stride) \
+ do { \
+ __m128 load_src[5]; \
+ load_src[0] = _mm_loadu_ps(input_ptr); \
+ last_column_sum += input_ptr[4] * weight[0][4]; \
+ input_ptr += in_stride; \
+ load_src[1] = _mm_loadu_ps(input_ptr); \
+ last_column_sum += input_ptr[4] * weight[1][4]; \
+ input_ptr += in_stride; \
+ load_src[2] = _mm_loadu_ps(input_ptr); \
+ last_column_sum += input_ptr[4] * weight[2][4]; \
+ input_ptr += in_stride; \
+ load_src[3] = _mm_loadu_ps(input_ptr); \
+ last_column_sum += input_ptr[4] * weight[3][4]; \
+ input_ptr += in_stride; \
+ load_src[4] = _mm_loadu_ps(input_ptr); \
+ last_column_sum += input_ptr[4] * weight[4][4]; \
+ \
+ load_src[0] = _mm_mul_ps(load_src[0], _mm256_castps256_ps128(w[0])); \
+ load_src[1] = _mm_mul_ps(load_src[1], _mm256_castps256_ps128(w[1])); \
+ load_src[2] = _mm_mul_ps(load_src[2], _mm256_castps256_ps128(w[2])); \
+ load_src[3] = _mm_mul_ps(load_src[3], _mm256_castps256_ps128(w[3])); \
+ load_src[4] = _mm_mul_ps(load_src[4], _mm256_castps256_ps128(w[4])); \
+ \
+ accum0 = _mm_add_ps(load_src[0], accum0); \
+ load_src[1] = _mm_add_ps(load_src[1], load_src[2]); \
+ load_src[3] = _mm_add_ps(load_src[3], load_src[4]); \
+ load_src[1] = _mm_add_ps(load_src[1], load_src[3]); \
+ accum0 = _mm_add_ps(accum0, load_src[1]); \
+ } while (0)
+
+// Do convolution on 8 horizontal 2x2 blocks.
+static INLINE void perform_convolve_for_8h_2x2_blocks(
+ const float *input_ptr, int in_stride, __m256 *weight, __m256 *out_accum,
+ __m256i shuffle_output_mask) {
+ __m256 load_src[4];
+ // Load input into source registers.
+ load_src[0] = _mm256_loadu_ps(input_ptr);
+ load_src[1] = _mm256_loadu_ps(input_ptr + 8);
+ load_src[2] = _mm256_loadu_ps(input_ptr + in_stride);
+ load_src[3] = _mm256_loadu_ps(input_ptr + in_stride + 8);
+
+ // Multiply the loaded input with corresponding weights.
+ load_src[0] = _mm256_mul_ps(load_src[0], weight[0]);
+ load_src[1] = _mm256_mul_ps(load_src[1], weight[0]);
+ load_src[2] = _mm256_mul_ps(load_src[2], weight[1]);
+ load_src[3] = _mm256_mul_ps(load_src[3], weight[1]);
+
+ // Accumulate across 2x2 blocks.
+ load_src[0] = _mm256_add_ps(load_src[0], load_src[2]);
+ load_src[1] = _mm256_add_ps(load_src[1], load_src[3]);
+ load_src[0] = _mm256_hadd_ps(load_src[0], load_src[1]);
+
+ // Sort the output in order to store into output buffer.
+ load_src[0] = _mm256_permutevar8x32_ps(load_src[0], shuffle_output_mask);
+ *out_accum = _mm256_add_ps(*out_accum, load_src[0]);
+}
+
+// Do convolution on 8 (4 horizontal x 2 vertical) 2x2 blocks.
+static INLINE void perform_convolve_for_4hx2v_2x2_blocks(
+ const float *input_ptr, int in_stride, __m256 *weight, __m256 *out_accum,
+ __m256i shuffle_output_mask) {
+ __m256 load_src[4];
+ // Load input into source registers.
+ load_src[0] = _mm256_loadu_ps(input_ptr);
+ load_src[1] = _mm256_loadu_ps(input_ptr + in_stride);
+ load_src[2] = _mm256_loadu_ps(input_ptr + (in_stride * 2));
+ load_src[3] = _mm256_loadu_ps(input_ptr + (in_stride * 3));
+
+ // Multiply the loaded input with corresponding weights.
+ load_src[0] = _mm256_mul_ps(load_src[0], weight[0]);
+ load_src[1] = _mm256_mul_ps(load_src[1], weight[1]);
+ load_src[2] = _mm256_mul_ps(load_src[2], weight[0]);
+ load_src[3] = _mm256_mul_ps(load_src[3], weight[1]);
+
+ // Accumulate across 2x2 blocks.
+ load_src[0] = _mm256_add_ps(load_src[0], load_src[1]);
+ load_src[2] = _mm256_add_ps(load_src[2], load_src[3]);
+ load_src[0] = _mm256_hadd_ps(load_src[0], load_src[2]);
+
+ // Sort the output in order to store into output buffer.
+ load_src[0] = _mm256_permutevar8x32_ps(load_src[0], shuffle_output_mask);
+ *out_accum = _mm256_add_ps(*out_accum, load_src[0]);
+}
+
+// AVX2 variant of av1_cnn_convolve_no_maxpool_padding_valid_c(), when
+// filter_width and filter_height are equal to 5.
+// CNN convolve parsing is based on av1_intra_mode_cnn_partition_cnn_config.
+// Based on the configuration set for each layer, the current encoder
+// always chooses the case of no_maxpool_padding_valid.
+// And also for layer 0 convolution happens at 5x5 level as the
+// filter_width and filter_height are set as 5.
+static void cnn_convolve_no_maxpool_padding_valid_5x5_avx2(
+ const float **input, int in_width, int in_height, int in_stride,
+ const CNN_LAYER_CONFIG *const layer_config, float **output, int out_stride,
+ int start_idx, const int cstep, const int channel_step) {
+ const int kFilterWidth = 5;
+ const int kFilterHeight = 5;
+ const int kSkipWidth = 4;
+ const int kSkipHeight = 4;
+ assert(layer_config->filter_width == kFilterWidth &&
+ layer_config->filter_height == kFilterHeight);
+ assert(layer_config->skip_width == kSkipWidth &&
+ layer_config->skip_height == kSkipHeight);
+
+ // Load shuffle buffers needed for source.
+ const __m256i block0_1 =
+ _mm256_load_si256((const __m256i *)shuffle_src_layer0[0]);
+ const __m256i block1_2 =
+ _mm256_load_si256((const __m256i *)shuffle_src_layer0[1]);
+
+ // Load shuffle buffers needed for weight.
+ const __m256i weight_mask_0 =
+ _mm256_load_si256((const __m256i *)shuffle_weight_layer0[0]);
+ const __m256i weight_mask_1 =
+ _mm256_load_si256((const __m256i *)shuffle_weight_layer0[1]);
+
+ // Width needs to be moved to go to next iteration of processing 3 5x5 blocks.
+ const int kSkipWidthForNextIter = kSkipWidth * 3;
+
+ // Minimum width required to process 3 5x5 blocks at a time.
+ // min width (for processing 3 5x5 block) = 2*skip_width + filter_width
+ // Here, skip_width specifies how much width we should move while processing
+ // next block convolution and filter_width specifies for how many pixels
+ // filter needs to be applied.
+ const int kMinWidthFor3_5x5Blocks = (kSkipWidth * 2) + kFilterWidth;
+ for (int i = start_idx; i < layer_config->out_channels; i += channel_step) {
+ const float out_ch_bias = layer_config->bias[i];
+ for (int k = 0; k < layer_config->in_channels; ++k) {
+ __m256 shuffle_weight[10];
+
+ // Weights needed are 5x5, for SIMD purpose made this array as 5x8.
+ float weight[5][8] = { { 0 } };
+ int off = k * layer_config->out_channels + i;
+
+ // In layer 0, the convolution process happens at 5x5.
+ // The weights needed for 5x5 block are same across the in-channels,
+ // which is why the load of weights happens once for each in-channel.
+ prepare_weights_for_5x5_convolve(layer_config->weights, off, weight,
+ cstep, shuffle_weight, weight_mask_0,
+ weight_mask_1);
+
+ for (int h = 0, u = 0; h < in_height - kFilterHeight + 1;
+ h += kSkipHeight, ++u) {
+ const int out_h = u * out_stride;
+ int v = 0;
+ int w = 0;
+ int rem_width = in_width;
+ // Processing 3 5x5 blocks at a time, if sufficient width is present.
+ while (rem_width >= kMinWidthFor3_5x5Blocks) {
+ __m256 load_src_0, load_src_1;
+ __m256 accum_src_0 = _mm256_setzero_ps();
+ __m256 accum_src_1 = _mm256_setzero_ps();
+ const float *input_ptr = &input[k][h * in_stride + w];
+ PERFORM_CONVOLVE_FOR_3_5X5_BLOCKS();
+
+ // Accumulate across column.
+ __m256 accum = _mm256_hadd_ps(accum_src_0, accum_src_1);
+ __m128 tmp_reg_0 = _mm256_extractf128_ps(accum_src_0, 1);
+ __m128 tmp_reg_1 = _mm256_extractf128_ps(accum_src_1, 1);
+
+ __m128 accum_l = _mm256_castps256_ps128(accum);
+ __m128 accum_h = _mm256_extractf128_ps(accum, 1);
+
+ __m128 tmp_reg_2 = _mm_add_ps(accum_l, tmp_reg_0);
+ __m128 tmp_reg_3 = _mm_add_ps(tmp_reg_0, accum_h);
+ __m128 tmp_reg_4 = _mm_add_ps(tmp_reg_1, accum_h);
+
+ // 1st 5x5 block output.
+ output[i][out_h + v] =
+ out_ch_bias + _mm_cvtss_f32(tmp_reg_2) +
+ _mm_cvtss_f32(_mm_shuffle_ps(accum_l, accum_l, 1));
+
+ // 2nd 5x5 block output.
+ output[i][out_h + v + 1] =
+ out_ch_bias +
+ _mm_cvtss_f32(_mm_shuffle_ps(tmp_reg_3, tmp_reg_3, 1)) +
+ _mm_cvtss_f32(_mm_shuffle_ps(accum_l, accum_l, 2));
+
+ // 3rd 5x5 block output.
+ output[i][out_h + v + 2] =
+ out_ch_bias +
+ _mm_cvtss_f32(_mm_shuffle_ps(tmp_reg_4, tmp_reg_4, 2)) +
+ _mm_cvtss_f32(_mm_shuffle_ps(accum_l, accum_l, 3));
+
+ v += 3;
+ w += kSkipWidthForNextIter;
+ rem_width -= kSkipWidthForNextIter;
+ }
+
+ // Process remaining blocks as single 5x5 block at a time.
+ while (rem_width >= kFilterWidth) {
+ float last_column_sum = 0;
+ __m128 accum = _mm_setzero_ps();
+ const float *input_ptr = &input[k][h * in_stride + w];
+ PERFORM_CONVOLVE_FOR_1_5X5_BLOCK(shuffle_weight, accum, in_stride);
+
+ // Accumulate across column.
+ accum = _mm_hadd_ps(accum, accum);
+ output[i][out_h + v] = out_ch_bias + last_column_sum +
+ _mm_cvtss_f32(accum) +
+ _mm_cvtss_f32(_mm_shuffle_ps(accum, accum, 1));
+
+ v += 1;
+ w += kSkipWidth;
+ rem_width -= kSkipWidth;
+ }
+ }
+ }
+ }
+}
+
+// AVX2 implementation for layer 1.
+static INLINE void cnn_convolve_no_maxpool_padding_valid_layer1_avx2(
+ const float **input, int in_stride,
+ const CNN_LAYER_CONFIG *const layer_config, float **output, int out_stride,
+ int start_idx, const int cstep, const int channel_step) {
+ __m256i weight_mask[2];
+ __m256i shuffle_output_mask;
+ load_shuffle_masks_for_2x2_convolve(&shuffle_output_mask, weight_mask);
+
+ const int kInHeight = 16;
+ const int kFilterHeight = 2;
+ const int kSkipHeight = 2;
+ for (int i = start_idx; i < layer_config->out_channels; i += channel_step) {
+ __m256 bias_reg = _mm256_set1_ps(layer_config->bias[i]);
+ // out_accum registers are used to store the 2x2 convolve outputs
+ // (calculated over input block size), which are accumulated across the
+ // in_channels. As per the design, each iteration of for loop processes 8
+ // (horizontal) 2x2 blocks and stores in corresponding out_accum register
+ // (as input size is 16x16, a total of 64 2x2 blocks are present and 8
+ // out_accum registers are enough to store the outputs).
+ // Hence for loops corresponding to 'j' and 'h', below, run over the number
+ // of out_accum registers.
+ __m256 out_accum[8];
+ for (int j = 0; j < 8; ++j) out_accum[j] = bias_reg;
+ for (int k = 0; k < layer_config->in_channels; ++k) {
+ __m256 shuffle_weight[2];
+ int off = k * layer_config->out_channels + i;
+ // In layer 1, the convolution process happens at 2x2.
+ // The weights needed for 2x2 block are same across the in-channels,
+ // which is why the load of weights happens once for each in-channel.
+ prepare_weights_for_2x2_convolve(layer_config->weights, off, cstep,
+ shuffle_weight, weight_mask);
+
+ for (int h = 0, u = 0; h < kInHeight - kFilterHeight + 1;
+ h += kSkipHeight, ++u) {
+ const float *input_ptr = &input[k][h * in_stride];
+ perform_convolve_for_8h_2x2_blocks(input_ptr, in_stride, shuffle_weight,
+ &out_accum[u], shuffle_output_mask);
+ }
+ }
+ // Store output of layer 1.
+ for (int j = 0; j < 8; ++j) {
+ _mm256_storeu_ps(&output[i][j * out_stride], out_accum[j]);
+ }
+ }
+}
+
+// AVX2 implementation for layer 2.
+static INLINE void cnn_convolve_no_maxpool_padding_valid_layer2_avx2(
+ const float **input, int in_stride,
+ const CNN_LAYER_CONFIG *const layer_config, float **output, int out_stride,
+ int start_idx, const int cstep, const int channel_step) {
+ __m256i weight_mask[2];
+ __m256i shuffle_output_mask;
+ load_shuffle_masks_for_2x2_convolve(&shuffle_output_mask, weight_mask);
+
+ const int kInHeight = 8;
+ const int kFilterHeight = 2;
+ const int kSkipHeight = 2;
+ for (int i = start_idx; i < layer_config->out_channels; i += channel_step) {
+ __m256 bias_reg = _mm256_set1_ps(layer_config->bias[i]);
+ // out_accum registers are used to store the 2x2 convolve outputs
+ // (calculated over input block size), which are accumulated across the
+ // in_channels. As per the design, each iteration of for loop processes 8
+ // (4 horizontal x 2 vertical) 2x2 blocks and stores in corresponding
+ // out_accum register (as input size is 8x8, a total of 16 2x2 blocks are
+ // present and 2 out_accum registers are enough to store the outputs).
+ // Hence for loops corresponding to 'j' and 'h', below, run over the number
+ // of out_accum registers.
+ __m256 out_accum[2];
+
+ // Height needs to be moved to go to next iteration of processing
+ // while processing 2 2x2 blocks vertically.
+ const int kSkipHeightForNextIter = kSkipHeight * 2;
+ for (int j = 0; j < 2; ++j) out_accum[j] = bias_reg;
+ for (int k = 0; k < layer_config->in_channels; ++k) {
+ __m256 shuffle_weight[2];
+ int off = k * layer_config->out_channels + i;
+ // In layer 2, the convolution process happens at 2x2.
+ // The weights needed for 2x2 block are same across the in-channels,
+ // which is why the load of weights happens once for each in-channel.
+ prepare_weights_for_2x2_convolve(layer_config->weights, off, cstep,
+ shuffle_weight, weight_mask);
+
+ for (int h = 0, u = 0; h < kInHeight - kFilterHeight + 1;
+ h += kSkipHeightForNextIter, ++u) {
+ const float *input_ptr = &input[k][h * in_stride];
+ perform_convolve_for_4hx2v_2x2_blocks(input_ptr, in_stride,
+ shuffle_weight, &out_accum[u],
+ shuffle_output_mask);
+ }
+ }
+ // Store output of layer 2.
+ for (int j = 0; j < 2; ++j) {
+ _mm256_storeu_ps(&output[i][j * out_stride * 2], out_accum[j]);
+ }
+ }
+}
+
+// AVX2 variant of av1_cnn_convolve_no_maxpool_padding_valid_c(), when
+// filter_width and filter_height are equal to 2.
+// As per the layer config set by av1_intra_mode_cnn_partition_cnn_config,
+// the filter_width and filter_height are equal to 2 for layer >= 1. So
+// convolution happens at 2x2 for layer >= 1.
+void cnn_convolve_no_maxpool_padding_valid_2x2_avx2(
+ const float **input, int in_width, int in_height, int in_stride,
+ const CNN_LAYER_CONFIG *const layer_config, float **output, int out_stride,
+ int start_idx, const int cstep, const int channel_step) {
+ assert(layer_config->filter_width == 2 && layer_config->filter_height == 2);
+ assert(layer_config->skip_width == 2 && layer_config->skip_height == 2);
+
+ if (in_width == 16 && in_height == 16) {
+ // This case of in_width and in_height equal to 16 corresponds to layer 1.
+ // The output size of this layer is 8x8.
+ cnn_convolve_no_maxpool_padding_valid_layer1_avx2(
+ input, in_stride, layer_config, output, out_stride, start_idx, cstep,
+ channel_step);
+ } else if (in_width == 8 && in_height == 8) {
+ // This case of in_width and in_height equal to 8 corresponds to layer 2.
+ // The output size of this layer is 4x4.
+ cnn_convolve_no_maxpool_padding_valid_layer2_avx2(
+ input, in_stride, layer_config, output, out_stride, start_idx, cstep,
+ channel_step);
+ } else {
+ // For layer equal to 3 and 4, the input is of size 4x4 and 2x2
+ // respectively. Implementing SIMD for these cases might not be optimal,
+ // which is why we call C path for layer >= 3.
+ av1_cnn_convolve_no_maxpool_padding_valid_c(
+ input, in_width, in_height, in_stride, layer_config, output, out_stride,
+ start_idx, cstep, channel_step);
+ }
+}
+
+// AVX2 variant of av1_cnn_convolve_no_maxpool_padding_valid_c().
+// As per the current encoder, av1_cnn_convolve function gets called for
+// block size equal to 64x64. av1_cnn_convolve() uses layer config values
+// set by av1_intra_mode_cnn_partition_cnn_config. The following are a few
+// details related to each layer's config parameters.
+// Layer_Number in_size out_size filter_wd filter_ht skip_wd skip_ht
+// 0 64x64 16x16 5 5 4 4
+// 1 16x16 8x8 2 2 2 2
+// 2 8x8 4x4 2 2 2 2
+// 3 4x4 2x2 2 2 2 2
+// 4 2x2 1x1 2 2 2 2
+// Here,
+// filter_wd = filter_width and filter_ht = filter_height,
+// skip_wd = skip_width and skip_ht = skip_height.
+void av1_cnn_convolve_no_maxpool_padding_valid_avx2(
+ const float **input, int in_width, int in_height, int in_stride,
+ const CNN_LAYER_CONFIG *layer_config, float **output, int out_stride,
+ int start_idx, int cstep, int channel_step) {
+ if (layer_config->filter_width == 5 && layer_config->filter_height == 5 &&
+ layer_config->skip_width == 4 && layer_config->skip_height == 4) {
+ cnn_convolve_no_maxpool_padding_valid_5x5_avx2(
+ input, in_width, in_height, in_stride, layer_config, output, out_stride,
+ start_idx, cstep, channel_step);
+ } else if (layer_config->filter_width == 2 &&
+ layer_config->filter_height == 2 &&
+ layer_config->skip_width == 2 && layer_config->skip_height == 2) {
+ cnn_convolve_no_maxpool_padding_valid_2x2_avx2(
+ input, in_width, in_height, in_stride, layer_config, output, out_stride,
+ start_idx, cstep, channel_step);
+ } else {
+ av1_cnn_convolve_no_maxpool_padding_valid_c(
+ input, in_width, in_height, in_stride, layer_config, output, out_stride,
+ start_idx, cstep, channel_step);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/dct_sse2.asm b/third_party/aom/av1/encoder/x86/dct_sse2.asm
new file mode 100644
index 0000000000..b185548184
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/dct_sse2.asm
@@ -0,0 +1,82 @@
+;
+; Copyright (c) 2016, Alliance for Open Media. All rights reserved
+;
+; This source code is subject to the terms of the BSD 2 Clause License and
+; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+; was not distributed with this source code in the LICENSE file, you can
+; obtain it at www.aomedia.org/license/software. If the Alliance for Open
+; Media Patent License 1.0 was not distributed with this source code in the
+; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+;
+
+%define private_prefix av1
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+%macro TRANSFORM_COLS 0
+ paddw m0, m1
+ movq m4, m0
+ psubw m3, m2
+ psubw m4, m3
+ psraw m4, 1
+ movq m5, m4
+ psubw m5, m1 ;b1
+ psubw m4, m2 ;c1
+ psubw m0, m4
+ paddw m3, m5
+ ; m0 a0
+ SWAP 1, 4 ; m1 c1
+ SWAP 2, 3 ; m2 d1
+ SWAP 3, 5 ; m3 b1
+%endmacro
+
+%macro TRANSPOSE_4X4 0
+ ; 00 01 02 03
+ ; 10 11 12 13
+ ; 20 21 22 23
+ ; 30 31 32 33
+ punpcklwd m0, m1 ; 00 10 01 11 02 12 03 13
+ punpcklwd m2, m3 ; 20 30 21 31 22 32 23 33
+ mova m1, m0
+ punpckldq m0, m2 ; 00 10 20 30 01 11 21 31
+ punpckhdq m1, m2 ; 02 12 22 32 03 13 23 33
+%endmacro
+
+INIT_XMM sse2
+cglobal fwht4x4, 3, 4, 8, input, output, stride
+ lea r3q, [inputq + strideq*4]
+ movq m0, [inputq] ;a1
+ movq m1, [inputq + strideq*2] ;b1
+ movq m2, [r3q] ;c1
+ movq m3, [r3q + strideq*2] ;d1
+
+ TRANSFORM_COLS
+ TRANSPOSE_4X4
+ SWAP 1, 2
+ psrldq m1, m0, 8
+ psrldq m3, m2, 8
+ TRANSFORM_COLS
+ TRANSPOSE_4X4
+
+ psllw m0, 2
+ psllw m1, 2
+
+ ; sign extension
+ mova m2, m0
+ mova m3, m1
+ punpcklwd m0, m0
+ punpcklwd m1, m1
+ punpckhwd m2, m2
+ punpckhwd m3, m3
+ psrad m0, 16
+ psrad m1, 16
+ psrad m2, 16
+ psrad m3, 16
+ mova [outputq], m0
+ mova [outputq + 16], m2
+ mova [outputq + 32], m1
+ mova [outputq + 48], m3
+
+ RET
diff --git a/third_party/aom/av1/encoder/x86/encodetxb_avx2.c b/third_party/aom/av1/encoder/x86/encodetxb_avx2.c
new file mode 100644
index 0000000000..9627f75930
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/encodetxb_avx2.c
@@ -0,0 +1,122 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+#include <smmintrin.h> /* SSE4.1 */
+#include <immintrin.h> /* AVX2 */
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/x86/mem_sse2.h"
+#include "av1/common/av1_common_int.h"
+#include "av1/common/txb_common.h"
+#include "aom_dsp/x86/synonyms.h"
+#include "aom_dsp/x86/synonyms_avx2.h"
+
+void av1_txb_init_levels_avx2(const tran_low_t *const coeff, const int width,
+ const int height, uint8_t *const levels) {
+ const int stride = height + TX_PAD_HOR;
+ const __m256i y_zeros = _mm256_setzero_si256();
+
+ const int32_t bottom_len = sizeof(*levels) * (TX_PAD_BOTTOM * stride);
+ uint8_t *bottom_buf_end = levels + (width + TX_PAD_BOTTOM) * stride;
+ uint8_t *bottom_buf = bottom_buf_end - ((bottom_len + 31) & (~31));
+
+ do {
+ yy_storeu_256(bottom_buf, y_zeros);
+ bottom_buf += 32;
+ } while (bottom_buf < bottom_buf_end);
+
+ int i = 0;
+ uint8_t *ls = levels;
+ const tran_low_t *cf = coeff;
+ if (height == 4) {
+ do {
+ const __m256i c0 = yy_loadu_256(cf);
+ const __m256i c1 = yy_loadu_256(cf + 8);
+ const __m256i abs01 = _mm256_abs_epi16(_mm256_packs_epi32(c0, c1));
+ const __m256i abs01_8 = _mm256_packs_epi16(abs01, y_zeros);
+ const __m256i res_ = _mm256_shuffle_epi32(abs01_8, 0xd8);
+ const __m256i res = _mm256_permute4x64_epi64(res_, 0xd8);
+ yy_storeu_256(ls, res);
+ ls += 32;
+ cf += 16;
+ i += 4;
+ } while (i < width);
+ } else if (height == 8) {
+ do {
+ const __m256i coeffA = yy_loadu_256(cf);
+ const __m256i coeffB = yy_loadu_256(cf + 8);
+ const __m256i coeffC = yy_loadu_256(cf + 16);
+ const __m256i coeffD = yy_loadu_256(cf + 24);
+ const __m256i coeffAB = _mm256_packs_epi32(coeffA, coeffB);
+ const __m256i coeffCD = _mm256_packs_epi32(coeffC, coeffD);
+ const __m256i absAB = _mm256_abs_epi16(coeffAB);
+ const __m256i absCD = _mm256_abs_epi16(coeffCD);
+ const __m256i absABCD = _mm256_packs_epi16(absAB, absCD);
+ const __m256i res_ = _mm256_permute4x64_epi64(absABCD, 0xd8);
+ const __m256i res = _mm256_shuffle_epi32(res_, 0xd8);
+ const __m128i res0 = _mm256_castsi256_si128(res);
+ const __m128i res1 = _mm256_extracti128_si256(res, 1);
+ xx_storel_64(ls, res0);
+ *(int32_t *)(ls + height) = 0;
+ xx_storel_64(ls + stride, _mm_srli_si128(res0, 8));
+ *(int32_t *)(ls + height + stride) = 0;
+ xx_storel_64(ls + stride * 2, res1);
+ *(int32_t *)(ls + height + stride * 2) = 0;
+ xx_storel_64(ls + stride * 3, _mm_srli_si128(res1, 8));
+ *(int32_t *)(ls + height + stride * 3) = 0;
+ cf += 32;
+ ls += stride << 2;
+ i += 4;
+ } while (i < width);
+ } else if (height == 16) {
+ do {
+ const __m256i coeffA = yy_loadu_256(cf);
+ const __m256i coeffB = yy_loadu_256(cf + 8);
+ const __m256i coeffC = yy_loadu_256(cf + 16);
+ const __m256i coeffD = yy_loadu_256(cf + 24);
+ const __m256i coeffAB = _mm256_packs_epi32(coeffA, coeffB);
+ const __m256i coeffCD = _mm256_packs_epi32(coeffC, coeffD);
+ const __m256i absAB = _mm256_abs_epi16(coeffAB);
+ const __m256i absCD = _mm256_abs_epi16(coeffCD);
+ const __m256i absABCD = _mm256_packs_epi16(absAB, absCD);
+ const __m256i res_ = _mm256_permute4x64_epi64(absABCD, 0xd8);
+ const __m256i res = _mm256_shuffle_epi32(res_, 0xd8);
+ xx_storeu_128(ls, _mm256_castsi256_si128(res));
+ xx_storeu_128(ls + stride, _mm256_extracti128_si256(res, 1));
+ cf += 32;
+ *(int32_t *)(ls + height) = 0;
+ *(int32_t *)(ls + stride + height) = 0;
+ ls += stride << 1;
+ i += 2;
+ } while (i < width);
+ } else {
+ do {
+ const __m256i coeffA = yy_loadu_256(cf);
+ const __m256i coeffB = yy_loadu_256(cf + 8);
+ const __m256i coeffC = yy_loadu_256(cf + 16);
+ const __m256i coeffD = yy_loadu_256(cf + 24);
+ const __m256i coeffAB = _mm256_packs_epi32(coeffA, coeffB);
+ const __m256i coeffCD = _mm256_packs_epi32(coeffC, coeffD);
+ const __m256i absAB = _mm256_abs_epi16(coeffAB);
+ const __m256i absCD = _mm256_abs_epi16(coeffCD);
+ const __m256i absABCD = _mm256_packs_epi16(absAB, absCD);
+ const __m256i res_ = _mm256_permute4x64_epi64(absABCD, 0xd8);
+ const __m256i res = _mm256_shuffle_epi32(res_, 0xd8);
+ yy_storeu_256(ls, res);
+ cf += 32;
+ *(int32_t *)(ls + height) = 0;
+ ls += stride;
+ i += 1;
+ } while (i < width);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/encodetxb_sse2.c b/third_party/aom/av1/encoder/x86/encodetxb_sse2.c
new file mode 100644
index 0000000000..d23a688747
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/encodetxb_sse2.c
@@ -0,0 +1,505 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/x86/mem_sse2.h"
+#include "av1/common/av1_common_int.h"
+#include "av1/common/txb_common.h"
+
+static INLINE void load_levels_4x4x5_sse2(const uint8_t *const src,
+ const int stride,
+ const ptrdiff_t *const offsets,
+ __m128i *const level) {
+ level[0] = load_8bit_4x4_to_1_reg_sse2(src + 1, stride);
+ level[1] = load_8bit_4x4_to_1_reg_sse2(src + stride, stride);
+ level[2] = load_8bit_4x4_to_1_reg_sse2(src + offsets[0], stride);
+ level[3] = load_8bit_4x4_to_1_reg_sse2(src + offsets[1], stride);
+ level[4] = load_8bit_4x4_to_1_reg_sse2(src + offsets[2], stride);
+}
+
+static INLINE void load_levels_8x2x5_sse2(const uint8_t *const src,
+ const int stride,
+ const ptrdiff_t *const offsets,
+ __m128i *const level) {
+ level[0] = load_8bit_8x2_to_1_reg_sse2(src + 1, stride);
+ level[1] = load_8bit_8x2_to_1_reg_sse2(src + stride, stride);
+ level[2] = load_8bit_8x2_to_1_reg_sse2(src + offsets[0], stride);
+ level[3] = load_8bit_8x2_to_1_reg_sse2(src + offsets[1], stride);
+ level[4] = load_8bit_8x2_to_1_reg_sse2(src + offsets[2], stride);
+}
+
+static INLINE void load_levels_16x1x5_sse2(const uint8_t *const src,
+ const int stride,
+ const ptrdiff_t *const offsets,
+ __m128i *const level) {
+ level[0] = _mm_loadu_si128((__m128i *)(src + 1));
+ level[1] = _mm_loadu_si128((__m128i *)(src + stride));
+ level[2] = _mm_loadu_si128((__m128i *)(src + offsets[0]));
+ level[3] = _mm_loadu_si128((__m128i *)(src + offsets[1]));
+ level[4] = _mm_loadu_si128((__m128i *)(src + offsets[2]));
+}
+
+static INLINE __m128i get_coeff_contexts_kernel_sse2(__m128i *const level) {
+ const __m128i const_3 = _mm_set1_epi8(3);
+ const __m128i const_4 = _mm_set1_epi8(4);
+ __m128i count;
+
+ count = _mm_min_epu8(level[0], const_3);
+ level[1] = _mm_min_epu8(level[1], const_3);
+ level[2] = _mm_min_epu8(level[2], const_3);
+ level[3] = _mm_min_epu8(level[3], const_3);
+ level[4] = _mm_min_epu8(level[4], const_3);
+ count = _mm_add_epi8(count, level[1]);
+ count = _mm_add_epi8(count, level[2]);
+ count = _mm_add_epi8(count, level[3]);
+ count = _mm_add_epi8(count, level[4]);
+ count = _mm_avg_epu8(count, _mm_setzero_si128());
+ count = _mm_min_epu8(count, const_4);
+ return count;
+}
+
+static INLINE void get_4_nz_map_contexts_2d(const uint8_t *levels,
+ const int width,
+ const ptrdiff_t *const offsets,
+ int8_t *const coeff_contexts) {
+ const int stride = 4 + TX_PAD_HOR;
+ const __m128i pos_to_offset_large = _mm_set1_epi8(21);
+ __m128i pos_to_offset =
+ (width == 4)
+ ? _mm_setr_epi8(0, 1, 6, 6, 1, 6, 6, 21, 6, 6, 21, 21, 6, 21, 21, 21)
+ : _mm_setr_epi8(0, 16, 16, 16, 16, 16, 16, 16, 6, 6, 21, 21, 6, 21,
+ 21, 21);
+ __m128i count;
+ __m128i level[5];
+ int8_t *cc = coeff_contexts;
+ int col = width;
+
+ assert(!(width % 4));
+
+ do {
+ load_levels_4x4x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset);
+ _mm_store_si128((__m128i *)cc, count);
+ pos_to_offset = pos_to_offset_large;
+ levels += 4 * stride;
+ cc += 16;
+ col -= 4;
+ } while (col);
+
+ coeff_contexts[0] = 0;
+}
+
+static INLINE void get_4_nz_map_contexts_ver(const uint8_t *levels,
+ const int width,
+ const ptrdiff_t *const offsets,
+ int8_t *coeff_contexts) {
+ const int stride = 4 + TX_PAD_HOR;
+ const __m128i pos_to_offset =
+ _mm_setr_epi8(SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10);
+ __m128i count;
+ __m128i level[5];
+ int col = width;
+
+ assert(!(width % 4));
+
+ do {
+ load_levels_4x4x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset);
+ _mm_store_si128((__m128i *)coeff_contexts, count);
+ levels += 4 * stride;
+ coeff_contexts += 16;
+ col -= 4;
+ } while (col);
+}
+
+static INLINE void get_4_nz_map_contexts_hor(const uint8_t *levels,
+ const int width,
+ const ptrdiff_t *const offsets,
+ int8_t *coeff_contexts) {
+ const int stride = 4 + TX_PAD_HOR;
+ const __m128i pos_to_offset_large = _mm_set1_epi8(SIG_COEF_CONTEXTS_2D + 10);
+ __m128i pos_to_offset =
+ _mm_setr_epi8(SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 0,
+ SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 0,
+ SIG_COEF_CONTEXTS_2D + 5, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 5, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10);
+ __m128i count;
+ __m128i level[5];
+ int col = width;
+
+ assert(!(width % 4));
+
+ do {
+ load_levels_4x4x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset);
+ _mm_store_si128((__m128i *)coeff_contexts, count);
+ pos_to_offset = pos_to_offset_large;
+ levels += 4 * stride;
+ coeff_contexts += 16;
+ col -= 4;
+ } while (col);
+}
+
+static INLINE void get_8_coeff_contexts_2d(const uint8_t *levels,
+ const int width,
+ const ptrdiff_t *const offsets,
+ int8_t *coeff_contexts) {
+ const int stride = 8 + TX_PAD_HOR;
+ int8_t *cc = coeff_contexts;
+ int col = width;
+ __m128i count;
+ __m128i level[5];
+ __m128i pos_to_offset[3];
+
+ assert(!(width % 2));
+
+ if (width == 8) {
+ pos_to_offset[0] =
+ _mm_setr_epi8(0, 1, 6, 6, 21, 21, 21, 21, 1, 6, 6, 21, 21, 21, 21, 21);
+ pos_to_offset[1] = _mm_setr_epi8(6, 6, 21, 21, 21, 21, 21, 21, 6, 21, 21,
+ 21, 21, 21, 21, 21);
+ } else if (width < 8) {
+ pos_to_offset[0] = _mm_setr_epi8(0, 11, 6, 6, 21, 21, 21, 21, 11, 11, 6, 21,
+ 21, 21, 21, 21);
+ pos_to_offset[1] = _mm_setr_epi8(11, 11, 21, 21, 21, 21, 21, 21, 11, 11, 21,
+ 21, 21, 21, 21, 21);
+ } else {
+ pos_to_offset[0] = _mm_setr_epi8(0, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16,
+ 16, 16, 16, 16, 16);
+ pos_to_offset[1] = _mm_setr_epi8(6, 6, 21, 21, 21, 21, 21, 21, 6, 21, 21,
+ 21, 21, 21, 21, 21);
+ }
+ pos_to_offset[2] = _mm_set1_epi8(21);
+
+ do {
+ load_levels_8x2x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset[0]);
+ _mm_store_si128((__m128i *)cc, count);
+ pos_to_offset[0] = pos_to_offset[1];
+ pos_to_offset[1] = pos_to_offset[2];
+ levels += 2 * stride;
+ cc += 16;
+ col -= 2;
+ } while (col);
+
+ coeff_contexts[0] = 0;
+}
+
+static INLINE void get_8_coeff_contexts_ver(const uint8_t *levels,
+ const int width,
+ const ptrdiff_t *const offsets,
+ int8_t *coeff_contexts) {
+ const int stride = 8 + TX_PAD_HOR;
+ const __m128i pos_to_offset =
+ _mm_setr_epi8(SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10);
+ int col = width;
+ __m128i count;
+ __m128i level[5];
+
+ assert(!(width % 2));
+
+ do {
+ load_levels_8x2x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset);
+ _mm_store_si128((__m128i *)coeff_contexts, count);
+ levels += 2 * stride;
+ coeff_contexts += 16;
+ col -= 2;
+ } while (col);
+}
+
+static INLINE void get_8_coeff_contexts_hor(const uint8_t *levels,
+ const int width,
+ const ptrdiff_t *const offsets,
+ int8_t *coeff_contexts) {
+ const int stride = 8 + TX_PAD_HOR;
+ const __m128i pos_to_offset_large = _mm_set1_epi8(SIG_COEF_CONTEXTS_2D + 10);
+ __m128i pos_to_offset =
+ _mm_setr_epi8(SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 0,
+ SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 0,
+ SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 0,
+ SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 0,
+ SIG_COEF_CONTEXTS_2D + 5, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 5, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 5, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 5, SIG_COEF_CONTEXTS_2D + 5);
+ int col = width;
+ __m128i count;
+ __m128i level[5];
+
+ assert(!(width % 2));
+
+ do {
+ load_levels_8x2x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset);
+ _mm_store_si128((__m128i *)coeff_contexts, count);
+ pos_to_offset = pos_to_offset_large;
+ levels += 2 * stride;
+ coeff_contexts += 16;
+ col -= 2;
+ } while (col);
+}
+
+static INLINE void get_16n_coeff_contexts_2d(const uint8_t *levels,
+ const int real_width,
+ const int real_height,
+ const int width, const int height,
+ const ptrdiff_t *const offsets,
+ int8_t *coeff_contexts) {
+ const int stride = height + TX_PAD_HOR;
+ int8_t *cc = coeff_contexts;
+ int col = width;
+ __m128i pos_to_offset[5];
+ __m128i pos_to_offset_large[3];
+ __m128i count;
+ __m128i level[5];
+
+ assert(!(height % 16));
+
+ pos_to_offset_large[2] = _mm_set1_epi8(21);
+ if (real_width == real_height) {
+ pos_to_offset[0] = _mm_setr_epi8(0, 1, 6, 6, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21);
+ pos_to_offset[1] = _mm_setr_epi8(1, 6, 6, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21);
+ pos_to_offset[2] = _mm_setr_epi8(6, 6, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21);
+ pos_to_offset[3] = _mm_setr_epi8(6, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21);
+ pos_to_offset[4] = pos_to_offset_large[0] = pos_to_offset_large[1] =
+ pos_to_offset_large[2];
+ } else if (real_width < real_height) {
+ pos_to_offset[0] = _mm_setr_epi8(0, 11, 6, 6, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21);
+ pos_to_offset[1] = _mm_setr_epi8(11, 11, 6, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21);
+ pos_to_offset[2] = pos_to_offset[3] = pos_to_offset[4] = _mm_setr_epi8(
+ 11, 11, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21);
+ pos_to_offset_large[0] = pos_to_offset_large[1] = pos_to_offset_large[2];
+ } else { // real_width > real_height
+ pos_to_offset[0] = pos_to_offset[1] = _mm_setr_epi8(
+ 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16);
+ pos_to_offset[2] = _mm_setr_epi8(6, 6, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21);
+ pos_to_offset[3] = _mm_setr_epi8(6, 21, 21, 21, 21, 21, 21, 21, 21, 21, 21,
+ 21, 21, 21, 21, 21);
+ pos_to_offset[4] = pos_to_offset_large[2];
+ pos_to_offset_large[0] = pos_to_offset_large[1] = _mm_set1_epi8(16);
+ }
+
+ do {
+ int h = height;
+
+ do {
+ load_levels_16x1x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset[0]);
+ _mm_store_si128((__m128i *)cc, count);
+ levels += 16;
+ cc += 16;
+ h -= 16;
+ pos_to_offset[0] = pos_to_offset_large[0];
+ } while (h);
+
+ pos_to_offset[0] = pos_to_offset[1];
+ pos_to_offset[1] = pos_to_offset[2];
+ pos_to_offset[2] = pos_to_offset[3];
+ pos_to_offset[3] = pos_to_offset[4];
+ pos_to_offset_large[0] = pos_to_offset_large[1];
+ pos_to_offset_large[1] = pos_to_offset_large[2];
+ levels += TX_PAD_HOR;
+ } while (--col);
+
+ coeff_contexts[0] = 0;
+}
+
+static INLINE void get_16n_coeff_contexts_ver(const uint8_t *levels,
+ const int width, const int height,
+ const ptrdiff_t *const offsets,
+ int8_t *coeff_contexts) {
+ const int stride = height + TX_PAD_HOR;
+ const __m128i pos_to_offset_large =
+ _mm_setr_epi8(SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10);
+ __m128i count;
+ __m128i level[5];
+ int col = width;
+
+ assert(!(height % 16));
+
+ do {
+ __m128i pos_to_offset =
+ _mm_setr_epi8(SIG_COEF_CONTEXTS_2D + 0, SIG_COEF_CONTEXTS_2D + 5,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10,
+ SIG_COEF_CONTEXTS_2D + 10, SIG_COEF_CONTEXTS_2D + 10);
+ int h = height;
+
+ do {
+ load_levels_16x1x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset);
+ _mm_store_si128((__m128i *)coeff_contexts, count);
+ pos_to_offset = pos_to_offset_large;
+ levels += 16;
+ coeff_contexts += 16;
+ h -= 16;
+ } while (h);
+
+ levels += TX_PAD_HOR;
+ } while (--col);
+}
+
+static INLINE void get_16n_coeff_contexts_hor(const uint8_t *levels,
+ const int width, const int height,
+ const ptrdiff_t *const offsets,
+ int8_t *coeff_contexts) {
+ const int stride = height + TX_PAD_HOR;
+ __m128i pos_to_offset[3];
+ __m128i count;
+ __m128i level[5];
+ int col = width;
+
+ assert(!(height % 16));
+
+ pos_to_offset[0] = _mm_set1_epi8(SIG_COEF_CONTEXTS_2D + 0);
+ pos_to_offset[1] = _mm_set1_epi8(SIG_COEF_CONTEXTS_2D + 5);
+ pos_to_offset[2] = _mm_set1_epi8(SIG_COEF_CONTEXTS_2D + 10);
+
+ do {
+ int h = height;
+
+ do {
+ load_levels_16x1x5_sse2(levels, stride, offsets, level);
+ count = get_coeff_contexts_kernel_sse2(level);
+ count = _mm_add_epi8(count, pos_to_offset[0]);
+ _mm_store_si128((__m128i *)coeff_contexts, count);
+ levels += 16;
+ coeff_contexts += 16;
+ h -= 16;
+ } while (h);
+
+ pos_to_offset[0] = pos_to_offset[1];
+ pos_to_offset[1] = pos_to_offset[2];
+ levels += TX_PAD_HOR;
+ } while (--col);
+}
+
+// Note: levels[] must be in the range [0, 127], inclusive.
+void av1_get_nz_map_contexts_sse2(const uint8_t *const levels,
+ const int16_t *const scan, const uint16_t eob,
+ const TX_SIZE tx_size,
+ const TX_CLASS tx_class,
+ int8_t *const coeff_contexts) {
+ const int last_idx = eob - 1;
+ if (!last_idx) {
+ coeff_contexts[0] = 0;
+ return;
+ }
+
+ const int real_width = tx_size_wide[tx_size];
+ const int real_height = tx_size_high[tx_size];
+ const int width = get_txb_wide(tx_size);
+ const int height = get_txb_high(tx_size);
+ const int stride = height + TX_PAD_HOR;
+ ptrdiff_t offsets[3];
+
+ /* coeff_contexts must be 16 byte aligned. */
+ assert(!((intptr_t)coeff_contexts & 0xf));
+
+ if (tx_class == TX_CLASS_2D) {
+ offsets[0] = 0 * stride + 2;
+ offsets[1] = 1 * stride + 1;
+ offsets[2] = 2 * stride + 0;
+
+ if (height == 4) {
+ get_4_nz_map_contexts_2d(levels, width, offsets, coeff_contexts);
+ } else if (height == 8) {
+ get_8_coeff_contexts_2d(levels, width, offsets, coeff_contexts);
+ } else if (height == 16) {
+ get_16n_coeff_contexts_2d(levels, real_width, real_height, width, height,
+ offsets, coeff_contexts);
+ } else {
+ get_16n_coeff_contexts_2d(levels, real_width, real_height, width, height,
+ offsets, coeff_contexts);
+ }
+ } else if (tx_class == TX_CLASS_HORIZ) {
+ offsets[0] = 2 * stride;
+ offsets[1] = 3 * stride;
+ offsets[2] = 4 * stride;
+ if (height == 4) {
+ get_4_nz_map_contexts_hor(levels, width, offsets, coeff_contexts);
+ } else if (height == 8) {
+ get_8_coeff_contexts_hor(levels, width, offsets, coeff_contexts);
+ } else {
+ get_16n_coeff_contexts_hor(levels, width, height, offsets,
+ coeff_contexts);
+ }
+ } else { // TX_CLASS_VERT
+ offsets[0] = 2;
+ offsets[1] = 3;
+ offsets[2] = 4;
+ if (height == 4) {
+ get_4_nz_map_contexts_ver(levels, width, offsets, coeff_contexts);
+ } else if (height == 8) {
+ get_8_coeff_contexts_ver(levels, width, offsets, coeff_contexts);
+ } else {
+ get_16n_coeff_contexts_ver(levels, width, height, offsets,
+ coeff_contexts);
+ }
+ }
+
+ const int bhl = get_txb_bhl(tx_size);
+ const int pos = scan[last_idx];
+ if (last_idx <= (width << bhl) / 8)
+ coeff_contexts[pos] = 1;
+ else if (last_idx <= (width << bhl) / 4)
+ coeff_contexts[pos] = 2;
+ else
+ coeff_contexts[pos] = 3;
+}
diff --git a/third_party/aom/av1/encoder/x86/encodetxb_sse4.c b/third_party/aom/av1/encoder/x86/encodetxb_sse4.c
new file mode 100644
index 0000000000..72bd8e3411
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/encodetxb_sse4.c
@@ -0,0 +1,84 @@
+/*
+ * Copyright (c) 2017, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+#include <smmintrin.h> /* SSE4.1 */
+
+#include "aom/aom_integer.h"
+#include "av1/common/av1_common_int.h"
+#include "av1/common/txb_common.h"
+#include "aom_dsp/x86/synonyms.h"
+
+void av1_txb_init_levels_sse4_1(const tran_low_t *const coeff, const int width,
+ const int height, uint8_t *const levels) {
+ const int stride = height + TX_PAD_HOR;
+ const __m128i zeros = _mm_setzero_si128();
+
+ const int32_t bottom_len = sizeof(*levels) * (TX_PAD_BOTTOM * stride);
+ uint8_t *bottom_buf = levels + stride * width;
+ uint8_t *bottom_buf_end = bottom_buf + bottom_len;
+ do {
+ _mm_storeu_si128((__m128i *)(bottom_buf), zeros);
+ bottom_buf += 16;
+ } while (bottom_buf < bottom_buf_end);
+
+ int i = 0;
+ uint8_t *ls = levels;
+ const tran_low_t *cf = coeff;
+ if (height == 4) {
+ do {
+ const __m128i coeffA = xx_loadu_128(cf);
+ const __m128i coeffB = xx_loadu_128(cf + 4);
+ const __m128i coeffAB = _mm_packs_epi32(coeffA, coeffB);
+ const __m128i absAB = _mm_abs_epi16(coeffAB);
+ const __m128i absAB8 = _mm_packs_epi16(absAB, zeros);
+ const __m128i lsAB = _mm_unpacklo_epi32(absAB8, zeros);
+ xx_storeu_128(ls, lsAB);
+ ls += (stride << 1);
+ cf += (height << 1);
+ i += 2;
+ } while (i < width);
+ } else if (height == 8) {
+ do {
+ const __m128i coeffA = xx_loadu_128(cf);
+ const __m128i coeffB = xx_loadu_128(cf + 4);
+ const __m128i coeffAB = _mm_packs_epi32(coeffA, coeffB);
+ const __m128i absAB = _mm_abs_epi16(coeffAB);
+ const __m128i absAB8 = _mm_packs_epi16(absAB, zeros);
+ xx_storeu_128(ls, absAB8);
+ ls += stride;
+ cf += height;
+ i += 1;
+ } while (i < width);
+ } else {
+ do {
+ int j = 0;
+ do {
+ const __m128i coeffA = xx_loadu_128(cf);
+ const __m128i coeffB = xx_loadu_128(cf + 4);
+ const __m128i coeffC = xx_loadu_128(cf + 8);
+ const __m128i coeffD = xx_loadu_128(cf + 12);
+ const __m128i coeffAB = _mm_packs_epi32(coeffA, coeffB);
+ const __m128i coeffCD = _mm_packs_epi32(coeffC, coeffD);
+ const __m128i absAB = _mm_abs_epi16(coeffAB);
+ const __m128i absCD = _mm_abs_epi16(coeffCD);
+ const __m128i absABCD = _mm_packs_epi16(absAB, absCD);
+ xx_storeu_128(ls + j, absABCD);
+ j += 16;
+ cf += 16;
+ } while (j < height);
+ *(int32_t *)(ls + height) = 0;
+ ls += stride;
+ i += 1;
+ } while (i < width);
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/error_intrin_avx2.c b/third_party/aom/av1/encoder/x86/error_intrin_avx2.c
new file mode 100644
index 0000000000..57725d1795
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/error_intrin_avx2.c
@@ -0,0 +1,210 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h> // AVX2
+
+#include "config/av1_rtcd.h"
+
+#include "aom/aom_integer.h"
+
+static INLINE void read_coeff(const tran_low_t *coeff, intptr_t offset,
+ __m256i *c) {
+ const tran_low_t *addr = coeff + offset;
+
+ if (sizeof(tran_low_t) == 4) {
+ const __m256i x0 = _mm256_loadu_si256((const __m256i *)addr);
+ const __m256i x1 = _mm256_loadu_si256((const __m256i *)addr + 1);
+ const __m256i y = _mm256_packs_epi32(x0, x1);
+ *c = _mm256_permute4x64_epi64(y, 0xD8);
+ } else {
+ *c = _mm256_loadu_si256((const __m256i *)addr);
+ }
+}
+
+static INLINE void av1_block_error_num_coeff16_avx2(const int16_t *coeff,
+ const int16_t *dqcoeff,
+ __m256i *sse_256) {
+ const __m256i _coeff = _mm256_loadu_si256((const __m256i *)coeff);
+ const __m256i _dqcoeff = _mm256_loadu_si256((const __m256i *)dqcoeff);
+ // d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
+ const __m256i diff = _mm256_sub_epi16(_dqcoeff, _coeff);
+ // r0 r1 r2 r3 r4 r5 r6 r7
+ const __m256i error = _mm256_madd_epi16(diff, diff);
+ // r0+r1 r2+r3 | r0+r1 r2+r3 | r4+r5 r6+r7 | r4+r5 r6+r7
+ const __m256i error_hi = _mm256_hadd_epi32(error, error);
+ // r0+r1 | r2+r3 | r4+r5 | r6+r7
+ *sse_256 = _mm256_unpacklo_epi32(error_hi, _mm256_setzero_si256());
+}
+
+static INLINE void av1_block_error_num_coeff32_avx2(const int16_t *coeff,
+ const int16_t *dqcoeff,
+ __m256i *sse_256) {
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i _coeff_0 = _mm256_loadu_si256((const __m256i *)coeff);
+ const __m256i _dqcoeff_0 = _mm256_loadu_si256((const __m256i *)dqcoeff);
+ const __m256i _coeff_1 = _mm256_loadu_si256((const __m256i *)(coeff + 16));
+ const __m256i _dqcoeff_1 =
+ _mm256_loadu_si256((const __m256i *)(dqcoeff + 16));
+
+ // d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
+ const __m256i diff_0 = _mm256_sub_epi16(_dqcoeff_0, _coeff_0);
+ const __m256i diff_1 = _mm256_sub_epi16(_dqcoeff_1, _coeff_1);
+
+ // r0 r1 r2 r3 r4 r5 r6 r7
+ const __m256i error_0 = _mm256_madd_epi16(diff_0, diff_0);
+ const __m256i error_1 = _mm256_madd_epi16(diff_1, diff_1);
+ const __m256i err_final_0 = _mm256_add_epi32(error_0, error_1);
+
+ // For extreme input values, the accumulation needs to happen in 64 bit
+ // precision to avoid any overflow.
+ const __m256i exp0_error_lo = _mm256_unpacklo_epi32(err_final_0, zero);
+ const __m256i exp0_error_hi = _mm256_unpackhi_epi32(err_final_0, zero);
+ const __m256i sum_temp_0 = _mm256_add_epi64(exp0_error_hi, exp0_error_lo);
+ *sse_256 = _mm256_add_epi64(*sse_256, sum_temp_0);
+}
+
+static INLINE void av1_block_error_num_coeff64_avx2(const int16_t *coeff,
+ const int16_t *dqcoeff,
+ __m256i *sse_256,
+ intptr_t num_coeff) {
+ const __m256i zero = _mm256_setzero_si256();
+ for (int i = 0; i < num_coeff; i += 64) {
+ // Load 64 elements for coeff and dqcoeff.
+ const __m256i _coeff_0 = _mm256_loadu_si256((const __m256i *)coeff);
+ const __m256i _dqcoeff_0 = _mm256_loadu_si256((const __m256i *)dqcoeff);
+ const __m256i _coeff_1 = _mm256_loadu_si256((const __m256i *)(coeff + 16));
+ const __m256i _dqcoeff_1 =
+ _mm256_loadu_si256((const __m256i *)(dqcoeff + 16));
+ const __m256i _coeff_2 = _mm256_loadu_si256((const __m256i *)(coeff + 32));
+ const __m256i _dqcoeff_2 =
+ _mm256_loadu_si256((const __m256i *)(dqcoeff + 32));
+ const __m256i _coeff_3 = _mm256_loadu_si256((const __m256i *)(coeff + 48));
+ const __m256i _dqcoeff_3 =
+ _mm256_loadu_si256((const __m256i *)(dqcoeff + 48));
+
+ // d0 d1 d2 d3 d4 d5 d6 d7 d8 d9 d10 d11 d12 d13 d14 d15
+ const __m256i diff_0 = _mm256_sub_epi16(_dqcoeff_0, _coeff_0);
+ const __m256i diff_1 = _mm256_sub_epi16(_dqcoeff_1, _coeff_1);
+ const __m256i diff_2 = _mm256_sub_epi16(_dqcoeff_2, _coeff_2);
+ const __m256i diff_3 = _mm256_sub_epi16(_dqcoeff_3, _coeff_3);
+
+ // r0 r1 r2 r3 r4 r5 r6 r7
+ const __m256i error_0 = _mm256_madd_epi16(diff_0, diff_0);
+ const __m256i error_1 = _mm256_madd_epi16(diff_1, diff_1);
+ const __m256i error_2 = _mm256_madd_epi16(diff_2, diff_2);
+ const __m256i error_3 = _mm256_madd_epi16(diff_3, diff_3);
+ // r00 r01 r02 r03 r04 r05 r06 r07
+ const __m256i err_final_0 = _mm256_add_epi32(error_0, error_1);
+ // r10 r11 r12 r13 r14 r15 r16 r17
+ const __m256i err_final_1 = _mm256_add_epi32(error_2, error_3);
+
+ // For extreme input values, the accumulation needs to happen in 64 bit
+ // precision to avoid any overflow. r00 r01 r04 r05
+ const __m256i exp0_error_lo = _mm256_unpacklo_epi32(err_final_0, zero);
+ // r02 r03 r06 r07
+ const __m256i exp0_error_hi = _mm256_unpackhi_epi32(err_final_0, zero);
+ // r10 r11 r14 r15
+ const __m256i exp1_error_lo = _mm256_unpacklo_epi32(err_final_1, zero);
+ // r12 r13 r16 r17
+ const __m256i exp1_error_hi = _mm256_unpackhi_epi32(err_final_1, zero);
+
+ const __m256i sum_temp_0 = _mm256_add_epi64(exp0_error_hi, exp0_error_lo);
+ const __m256i sum_temp_1 = _mm256_add_epi64(exp1_error_hi, exp1_error_lo);
+ const __m256i sse_256_temp = _mm256_add_epi64(sum_temp_1, sum_temp_0);
+ *sse_256 = _mm256_add_epi64(*sse_256, sse_256_temp);
+ coeff += 64;
+ dqcoeff += 64;
+ }
+}
+
+int64_t av1_block_error_lp_avx2(const int16_t *coeff, const int16_t *dqcoeff,
+ intptr_t num_coeff) {
+ assert(num_coeff % 16 == 0);
+ __m256i sse_256 = _mm256_setzero_si256();
+ int64_t sse;
+
+ if (num_coeff == 16)
+ av1_block_error_num_coeff16_avx2(coeff, dqcoeff, &sse_256);
+ else if (num_coeff == 32)
+ av1_block_error_num_coeff32_avx2(coeff, dqcoeff, &sse_256);
+ else
+ av1_block_error_num_coeff64_avx2(coeff, dqcoeff, &sse_256, num_coeff);
+
+ // Save the higher 64 bit of each 128 bit lane.
+ const __m256i sse_hi = _mm256_srli_si256(sse_256, 8);
+ // Add the higher 64 bit to the low 64 bit.
+ sse_256 = _mm256_add_epi64(sse_256, sse_hi);
+ // Accumulate the sse_256 register to get final sse
+ const __m128i sse_128 = _mm_add_epi64(_mm256_castsi256_si128(sse_256),
+ _mm256_extractf128_si256(sse_256, 1));
+
+ // Store the results.
+ _mm_storel_epi64((__m128i *)&sse, sse_128);
+ return sse;
+}
+
+int64_t av1_block_error_avx2(const tran_low_t *coeff, const tran_low_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz) {
+ __m256i sse_reg, ssz_reg, coeff_reg, dqcoeff_reg;
+ __m256i exp_dqcoeff_lo, exp_dqcoeff_hi, exp_coeff_lo, exp_coeff_hi;
+ __m256i sse_reg_64hi, ssz_reg_64hi;
+ __m128i sse_reg128, ssz_reg128;
+ int64_t sse;
+ int i;
+ const __m256i zero_reg = _mm256_setzero_si256();
+
+ // init sse and ssz registerd to zero
+ sse_reg = _mm256_setzero_si256();
+ ssz_reg = _mm256_setzero_si256();
+
+ for (i = 0; i < block_size; i += 16) {
+ // load 32 bytes from coeff and dqcoeff
+ read_coeff(coeff, i, &coeff_reg);
+ read_coeff(dqcoeff, i, &dqcoeff_reg);
+ // dqcoeff - coeff
+ dqcoeff_reg = _mm256_sub_epi16(dqcoeff_reg, coeff_reg);
+ // madd (dqcoeff - coeff)
+ dqcoeff_reg = _mm256_madd_epi16(dqcoeff_reg, dqcoeff_reg);
+ // madd coeff
+ coeff_reg = _mm256_madd_epi16(coeff_reg, coeff_reg);
+ // expand each double word of madd (dqcoeff - coeff) to quad word
+ exp_dqcoeff_lo = _mm256_unpacklo_epi32(dqcoeff_reg, zero_reg);
+ exp_dqcoeff_hi = _mm256_unpackhi_epi32(dqcoeff_reg, zero_reg);
+ // expand each double word of madd (coeff) to quad word
+ exp_coeff_lo = _mm256_unpacklo_epi32(coeff_reg, zero_reg);
+ exp_coeff_hi = _mm256_unpackhi_epi32(coeff_reg, zero_reg);
+ // add each quad word of madd (dqcoeff - coeff) and madd (coeff)
+ sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_lo);
+ ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_lo);
+ sse_reg = _mm256_add_epi64(sse_reg, exp_dqcoeff_hi);
+ ssz_reg = _mm256_add_epi64(ssz_reg, exp_coeff_hi);
+ }
+ // save the higher 64 bit of each 128 bit lane
+ sse_reg_64hi = _mm256_srli_si256(sse_reg, 8);
+ ssz_reg_64hi = _mm256_srli_si256(ssz_reg, 8);
+ // add the higher 64 bit to the low 64 bit
+ sse_reg = _mm256_add_epi64(sse_reg, sse_reg_64hi);
+ ssz_reg = _mm256_add_epi64(ssz_reg, ssz_reg_64hi);
+
+ // add each 64 bit from each of the 128 bit lane of the 256 bit
+ sse_reg128 = _mm_add_epi64(_mm256_castsi256_si128(sse_reg),
+ _mm256_extractf128_si256(sse_reg, 1));
+
+ ssz_reg128 = _mm_add_epi64(_mm256_castsi256_si128(ssz_reg),
+ _mm256_extractf128_si256(ssz_reg, 1));
+
+ // store the results
+ _mm_storel_epi64((__m128i *)(&sse), sse_reg128);
+
+ _mm_storel_epi64((__m128i *)(ssz), ssz_reg128);
+ _mm256_zeroupper();
+ return sse;
+}
diff --git a/third_party/aom/av1/encoder/x86/error_intrin_sse2.c b/third_party/aom/av1/encoder/x86/error_intrin_sse2.c
new file mode 100644
index 0000000000..61f65c623f
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/error_intrin_sse2.c
@@ -0,0 +1,75 @@
+/*
+ * Copyright (c) 2021, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <emmintrin.h> // SSE2
+
+#include "config/av1_rtcd.h"
+
+#include "aom/aom_integer.h"
+
+static AOM_INLINE __m128i reduce_sum_epi64(__m128i reg) {
+ __m128i reg_hi = _mm_srli_si128(reg, 8);
+ reg = _mm_add_epi64(reg, reg_hi);
+
+ return reg;
+}
+
+int64_t av1_block_error_lp_sse2(const int16_t *coeff, const int16_t *dqcoeff,
+ intptr_t block_size) {
+ assert(block_size % 16 == 0);
+ assert(block_size >= 16);
+
+ const __m128i zero = _mm_setzero_si128();
+ __m128i accum_0 = zero;
+ __m128i accum_1 = zero;
+
+ for (int i = 0; i < block_size; i += 16) {
+ // Load 8 elements for coeff and dqcoeff.
+ const __m128i _coeff_0 = _mm_loadu_si128((const __m128i *)coeff);
+ const __m128i _coeff_1 = _mm_loadu_si128((const __m128i *)(coeff + 8));
+ const __m128i _dqcoeff_0 = _mm_loadu_si128((const __m128i *)dqcoeff);
+ const __m128i _dqcoeff_1 = _mm_loadu_si128((const __m128i *)(dqcoeff + 8));
+ // Compute the diff
+ const __m128i diff_0 = _mm_sub_epi16(_dqcoeff_0, _coeff_0);
+ const __m128i diff_1 = _mm_sub_epi16(_dqcoeff_1, _coeff_1);
+ // Compute the error
+ const __m128i error_0 = _mm_madd_epi16(diff_0, diff_0);
+ const __m128i error_1 = _mm_madd_epi16(diff_1, diff_1);
+
+ const __m128i error_lo_0 = _mm_unpacklo_epi32(error_0, zero);
+ const __m128i error_lo_1 = _mm_unpacklo_epi32(error_1, zero);
+ const __m128i error_hi_0 = _mm_unpackhi_epi32(error_0, zero);
+ const __m128i error_hi_1 = _mm_unpackhi_epi32(error_1, zero);
+
+ // Accumulate
+ accum_0 = _mm_add_epi64(accum_0, error_lo_0);
+ accum_1 = _mm_add_epi64(accum_1, error_lo_1);
+ accum_0 = _mm_add_epi64(accum_0, error_hi_0);
+ accum_1 = _mm_add_epi64(accum_1, error_hi_1);
+
+ // Advance
+ coeff += 16;
+ dqcoeff += 16;
+ }
+
+ __m128i accum = _mm_add_epi64(accum_0, accum_1);
+ // Reduce sum the register
+ accum = reduce_sum_epi64(accum);
+
+ // Store the results.
+#if AOM_ARCH_X86_64
+ return _mm_cvtsi128_si64(accum);
+#else
+ int64_t result;
+ _mm_storel_epi64((__m128i *)&result, accum);
+ return result;
+#endif // AOM_ARCH_X86_64
+}
diff --git a/third_party/aom/av1/encoder/x86/error_sse2.asm b/third_party/aom/av1/encoder/x86/error_sse2.asm
new file mode 100644
index 0000000000..6407c106ab
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/error_sse2.asm
@@ -0,0 +1,88 @@
+;
+; Copyright (c) 2016, Alliance for Open Media. All rights reserved
+;
+; This source code is subject to the terms of the BSD 2 Clause License and
+; the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+; was not distributed with this source code in the LICENSE file, you can
+; obtain it at www.aomedia.org/license/software. If the Alliance for Open
+; Media Patent License 1.0 was not distributed with this source code in the
+; PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+;
+
+;
+
+; Increment %1 by sizeof() tran_low_t * %2.
+%macro INCREMENT_ELEMENTS_TRAN_LOW 2
+ lea %1, [%1 + %2 * 4]
+%endmacro
+
+; Load %2 + %3 into m%1.
+; %3 is the offset in elements, not bytes.
+; If tran_low_t is 16 bits (low bit depth configuration) then load the value
+; directly. If tran_low_t is 32 bits (high bit depth configuration) then pack
+; the values down to 16 bits.
+%macro LOAD_TRAN_LOW 3
+ mova m%1, [%2 + (%3) * 4]
+ packssdw m%1, [%2 + (%3) * 4 + 16]
+%endmacro
+
+%define private_prefix av1
+
+%include "third_party/x86inc/x86inc.asm"
+
+SECTION .text
+
+; int64_t av1_block_error(int16_t *coeff, int16_t *dqcoeff, intptr_t block_size,
+; int64_t *ssz)
+
+INIT_XMM sse2
+cglobal block_error, 3, 3, 8, uqc, dqc, size, ssz
+ pxor m4, m4 ; sse accumulator
+ pxor m6, m6 ; ssz accumulator
+ pxor m5, m5 ; dedicated zero register
+.loop:
+ LOAD_TRAN_LOW 2, uqcq, 0
+ LOAD_TRAN_LOW 0, dqcq, 0
+ LOAD_TRAN_LOW 3, uqcq, 8
+ LOAD_TRAN_LOW 1, dqcq, 8
+ INCREMENT_ELEMENTS_TRAN_LOW uqcq, 16
+ INCREMENT_ELEMENTS_TRAN_LOW dqcq, 16
+ sub sizeq, 16
+ psubw m0, m2
+ psubw m1, m3
+ ; individual errors are max. 15bit+sign, so squares are 30bit, and
+ ; thus the sum of 2 should fit in a 31bit integer (+ unused sign bit)
+ pmaddwd m0, m0
+ pmaddwd m1, m1
+ pmaddwd m2, m2
+ pmaddwd m3, m3
+ ; the sum of 2 31bit integers will fit in a 32bit unsigned integer
+ paddd m0, m1
+ paddd m2, m3
+ ; accumulate in 64bit
+ punpckldq m7, m0, m5
+ punpckhdq m0, m5
+ paddq m4, m7
+ punpckldq m7, m2, m5
+ paddq m4, m0
+ punpckhdq m2, m5
+ paddq m6, m7
+ paddq m6, m2
+ jg .loop
+
+ ; accumulate horizontally and store in return value
+ movhlps m5, m4
+ movhlps m7, m6
+ paddq m4, m5
+ paddq m6, m7
+%if AOM_ARCH_X86_64
+ movq rax, m4
+ movq [sszq], m6
+%else
+ mov eax, sszm
+ pshufd m5, m4, 0x1
+ movq [eax], m6
+ movd eax, m4
+ movd edx, m5
+%endif
+ RET
diff --git a/third_party/aom/av1/encoder/x86/hash_sse42.c b/third_party/aom/av1/encoder/x86/hash_sse42.c
new file mode 100644
index 0000000000..ebe75310e9
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/hash_sse42.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <stdint.h>
+#include <smmintrin.h>
+
+#include "config/av1_rtcd.h"
+
+// Byte-boundary alignment issues
+#define ALIGN_SIZE 8
+#define ALIGN_MASK (ALIGN_SIZE - 1)
+
+#define CALC_CRC(op, crc, type, buf, len) \
+ while ((len) >= sizeof(type)) { \
+ (crc) = op((crc), *(type *)(buf)); \
+ (len) -= sizeof(type); \
+ buf += sizeof(type); \
+ }
+
+/**
+ * Calculates 32-bit CRC for the input buffer
+ * polynomial is 0x11EDC6F41
+ * @return A 32-bit unsigned integer representing the CRC
+ */
+uint32_t av1_get_crc32c_value_sse4_2(void *crc_calculator, uint8_t *p,
+ size_t len) {
+ (void)crc_calculator;
+ const uint8_t *buf = p;
+ uint32_t crc = 0xFFFFFFFF;
+
+ // Align the input to the word boundary
+ for (; (len > 0) && ((intptr_t)buf & ALIGN_MASK); len--, buf++) {
+ crc = _mm_crc32_u8(crc, *buf);
+ }
+
+#ifdef __x86_64__
+ uint64_t crc64 = crc;
+ CALC_CRC(_mm_crc32_u64, crc64, uint64_t, buf, len)
+ crc = (uint32_t)crc64;
+#endif
+ CALC_CRC(_mm_crc32_u32, crc, uint32_t, buf, len)
+ CALC_CRC(_mm_crc32_u16, crc, uint16_t, buf, len)
+ CALC_CRC(_mm_crc32_u8, crc, uint8_t, buf, len)
+ return (crc ^ 0xFFFFFFFF);
+}
diff --git a/third_party/aom/av1/encoder/x86/highbd_block_error_intrin_avx2.c b/third_party/aom/av1/encoder/x86/highbd_block_error_intrin_avx2.c
new file mode 100644
index 0000000000..340307cb3e
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/highbd_block_error_intrin_avx2.c
@@ -0,0 +1,64 @@
+/*
+ * Copyright (c) 2019, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h>
+#include <stdio.h>
+#include "aom/aom_integer.h"
+#include "av1/common/common.h"
+#include "config/av1_rtcd.h"
+
+int64_t av1_highbd_block_error_avx2(const tran_low_t *coeff,
+ const tran_low_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz,
+ int bps) {
+ int i;
+ int64_t temp1[8];
+ int64_t error = 0, sqcoeff = 0;
+ const int shift = 2 * (bps - 8);
+ const int rounding = shift > 0 ? 1 << (shift - 1) : 0;
+
+ for (i = 0; i < block_size; i += 16) {
+ __m256i mm256_coeff = _mm256_loadu_si256((__m256i *)(coeff + i));
+ __m256i mm256_coeff2 = _mm256_loadu_si256((__m256i *)(coeff + i + 8));
+ __m256i mm256_dqcoeff = _mm256_loadu_si256((__m256i *)(dqcoeff + i));
+ __m256i mm256_dqcoeff2 = _mm256_loadu_si256((__m256i *)(dqcoeff + i + 8));
+
+ __m256i diff1 = _mm256_sub_epi32(mm256_coeff, mm256_dqcoeff);
+ __m256i diff2 = _mm256_sub_epi32(mm256_coeff2, mm256_dqcoeff2);
+ __m256i diff1h = _mm256_srli_epi64(diff1, 32);
+ __m256i diff2h = _mm256_srli_epi64(diff2, 32);
+ __m256i res = _mm256_mul_epi32(diff1, diff1);
+ __m256i res1 = _mm256_mul_epi32(diff1h, diff1h);
+ __m256i res2 = _mm256_mul_epi32(diff2, diff2);
+ __m256i res3 = _mm256_mul_epi32(diff2h, diff2h);
+ __m256i res_diff = _mm256_add_epi64(_mm256_add_epi64(res, res1),
+ _mm256_add_epi64(res2, res3));
+ __m256i mm256_coeffh = _mm256_srli_epi64(mm256_coeff, 32);
+ __m256i mm256_coeffh2 = _mm256_srli_epi64(mm256_coeff2, 32);
+ res = _mm256_mul_epi32(mm256_coeff, mm256_coeff);
+ res1 = _mm256_mul_epi32(mm256_coeffh, mm256_coeffh);
+ res2 = _mm256_mul_epi32(mm256_coeff2, mm256_coeff2);
+ res3 = _mm256_mul_epi32(mm256_coeffh2, mm256_coeffh2);
+ __m256i res_sqcoeff = _mm256_add_epi64(_mm256_add_epi64(res, res1),
+ _mm256_add_epi64(res2, res3));
+ _mm256_storeu_si256((__m256i *)temp1, res_diff);
+ _mm256_storeu_si256((__m256i *)temp1 + 1, res_sqcoeff);
+
+ error += temp1[0] + temp1[1] + temp1[2] + temp1[3];
+ sqcoeff += temp1[4] + temp1[5] + temp1[6] + temp1[7];
+ }
+ assert(error >= 0 && sqcoeff >= 0);
+ error = (error + rounding) >> shift;
+ sqcoeff = (sqcoeff + rounding) >> shift;
+
+ *ssz = sqcoeff;
+ return error;
+}
diff --git a/third_party/aom/av1/encoder/x86/highbd_block_error_intrin_sse2.c b/third_party/aom/av1/encoder/x86/highbd_block_error_intrin_sse2.c
new file mode 100644
index 0000000000..b0b2757568
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/highbd_block_error_intrin_sse2.c
@@ -0,0 +1,74 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <emmintrin.h>
+#include <stdio.h>
+
+#include "av1/common/common.h"
+#include "config/av1_rtcd.h"
+
+int64_t av1_highbd_block_error_sse2(const tran_low_t *coeff,
+ const tran_low_t *dqcoeff,
+ intptr_t block_size, int64_t *ssz,
+ int bps) {
+ int i, j, test;
+ uint32_t temp[4];
+ __m128i max, min, cmp0, cmp1, cmp2, cmp3;
+ int64_t error = 0, sqcoeff = 0;
+ const int shift = 2 * (bps - 8);
+ const int rounding = shift > 0 ? 1 << (shift - 1) : 0;
+
+ for (i = 0; i < block_size; i += 8) {
+ // Load the data into xmm registers
+ __m128i mm_coeff = _mm_load_si128((__m128i *)(coeff + i));
+ __m128i mm_coeff2 = _mm_load_si128((__m128i *)(coeff + i + 4));
+ __m128i mm_dqcoeff = _mm_load_si128((__m128i *)(dqcoeff + i));
+ __m128i mm_dqcoeff2 = _mm_load_si128((__m128i *)(dqcoeff + i + 4));
+ // Check if any values require more than 15 bit
+ max = _mm_set1_epi32(0x3fff);
+ min = _mm_set1_epi32((int)0xffffc000);
+ cmp0 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff, max),
+ _mm_cmplt_epi32(mm_coeff, min));
+ cmp1 = _mm_xor_si128(_mm_cmpgt_epi32(mm_coeff2, max),
+ _mm_cmplt_epi32(mm_coeff2, min));
+ cmp2 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff, max),
+ _mm_cmplt_epi32(mm_dqcoeff, min));
+ cmp3 = _mm_xor_si128(_mm_cmpgt_epi32(mm_dqcoeff2, max),
+ _mm_cmplt_epi32(mm_dqcoeff2, min));
+ test = _mm_movemask_epi8(
+ _mm_or_si128(_mm_or_si128(cmp0, cmp1), _mm_or_si128(cmp2, cmp3)));
+
+ if (!test) {
+ __m128i mm_diff, error_sse2, sqcoeff_sse2;
+ mm_coeff = _mm_packs_epi32(mm_coeff, mm_coeff2);
+ mm_dqcoeff = _mm_packs_epi32(mm_dqcoeff, mm_dqcoeff2);
+ mm_diff = _mm_sub_epi16(mm_coeff, mm_dqcoeff);
+ error_sse2 = _mm_madd_epi16(mm_diff, mm_diff);
+ sqcoeff_sse2 = _mm_madd_epi16(mm_coeff, mm_coeff);
+ _mm_storeu_si128((__m128i *)temp, error_sse2);
+ error = error + temp[0] + temp[1] + temp[2] + temp[3];
+ _mm_storeu_si128((__m128i *)temp, sqcoeff_sse2);
+ sqcoeff += temp[0] + temp[1] + temp[2] + temp[3];
+ } else {
+ for (j = 0; j < 8; j++) {
+ const int64_t diff = coeff[i + j] - dqcoeff[i + j];
+ error += diff * diff;
+ sqcoeff += (int64_t)coeff[i + j] * (int64_t)coeff[i + j];
+ }
+ }
+ }
+ assert(error >= 0 && sqcoeff >= 0);
+ error = (error + rounding) >> shift;
+ sqcoeff = (sqcoeff + rounding) >> shift;
+
+ *ssz = sqcoeff;
+ return error;
+}
diff --git a/third_party/aom/av1/encoder/x86/highbd_fwd_txfm_avx2.c b/third_party/aom/av1/encoder/x86/highbd_fwd_txfm_avx2.c
new file mode 100644
index 0000000000..9cdf21fc7c
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/highbd_fwd_txfm_avx2.c
@@ -0,0 +1,3132 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+#include <assert.h>
+#include <immintrin.h> /*AVX2*/
+
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/encoder/av1_fwd_txfm1d_cfg.h"
+#include "aom_dsp/txfm_common.h"
+#include "aom_ports/mem.h"
+#include "aom_dsp/x86/txfm_common_sse2.h"
+#include "aom_dsp/x86/txfm_common_avx2.h"
+
+static INLINE void load_buffer_8x8_avx2(const int16_t *input, __m256i *out,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ __m128i out1[8];
+ if (!flipud) {
+ out1[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ out1[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ out1[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ out1[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ out1[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ out1[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ out1[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ out1[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+
+ } else {
+ out1[7] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ out1[6] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ out1[5] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ out1[4] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ out1[3] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ out1[2] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ out1[1] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ out1[0] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ }
+ if (!fliplr) {
+ out[0] = _mm256_cvtepi16_epi32(out1[0]);
+ out[1] = _mm256_cvtepi16_epi32(out1[1]);
+ out[2] = _mm256_cvtepi16_epi32(out1[2]);
+ out[3] = _mm256_cvtepi16_epi32(out1[3]);
+ out[4] = _mm256_cvtepi16_epi32(out1[4]);
+ out[5] = _mm256_cvtepi16_epi32(out1[5]);
+ out[6] = _mm256_cvtepi16_epi32(out1[6]);
+ out[7] = _mm256_cvtepi16_epi32(out1[7]);
+
+ } else {
+ out[0] = _mm256_cvtepi16_epi32(mm_reverse_epi16(out1[0]));
+ out[1] = _mm256_cvtepi16_epi32(mm_reverse_epi16(out1[1]));
+ out[2] = _mm256_cvtepi16_epi32(mm_reverse_epi16(out1[2]));
+ out[3] = _mm256_cvtepi16_epi32(mm_reverse_epi16(out1[3]));
+ out[4] = _mm256_cvtepi16_epi32(mm_reverse_epi16(out1[4]));
+ out[5] = _mm256_cvtepi16_epi32(mm_reverse_epi16(out1[5]));
+ out[6] = _mm256_cvtepi16_epi32(mm_reverse_epi16(out1[6]));
+ out[7] = _mm256_cvtepi16_epi32(mm_reverse_epi16(out1[7]));
+ }
+ out[0] = _mm256_slli_epi32(out[0], shift);
+ out[1] = _mm256_slli_epi32(out[1], shift);
+ out[2] = _mm256_slli_epi32(out[2], shift);
+ out[3] = _mm256_slli_epi32(out[3], shift);
+ out[4] = _mm256_slli_epi32(out[4], shift);
+ out[5] = _mm256_slli_epi32(out[5], shift);
+ out[6] = _mm256_slli_epi32(out[6], shift);
+ out[7] = _mm256_slli_epi32(out[7], shift);
+}
+static INLINE void col_txfm_8x8_rounding(__m256i *in, int shift) {
+ const __m256i rounding = _mm256_set1_epi32(1 << (shift - 1));
+
+ in[0] = _mm256_add_epi32(in[0], rounding);
+ in[1] = _mm256_add_epi32(in[1], rounding);
+ in[2] = _mm256_add_epi32(in[2], rounding);
+ in[3] = _mm256_add_epi32(in[3], rounding);
+ in[4] = _mm256_add_epi32(in[4], rounding);
+ in[5] = _mm256_add_epi32(in[5], rounding);
+ in[6] = _mm256_add_epi32(in[6], rounding);
+ in[7] = _mm256_add_epi32(in[7], rounding);
+
+ in[0] = _mm256_srai_epi32(in[0], shift);
+ in[1] = _mm256_srai_epi32(in[1], shift);
+ in[2] = _mm256_srai_epi32(in[2], shift);
+ in[3] = _mm256_srai_epi32(in[3], shift);
+ in[4] = _mm256_srai_epi32(in[4], shift);
+ in[5] = _mm256_srai_epi32(in[5], shift);
+ in[6] = _mm256_srai_epi32(in[6], shift);
+ in[7] = _mm256_srai_epi32(in[7], shift);
+}
+static INLINE void load_buffer_8x16_avx2(const int16_t *input, __m256i *out,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ const int16_t *topL = input;
+ const int16_t *botL = input + 8 * stride;
+
+ const int16_t *tmp;
+
+ if (flipud) {
+ tmp = topL;
+ topL = botL;
+ botL = tmp;
+ }
+ load_buffer_8x8_avx2(topL, out, stride, flipud, fliplr, shift);
+ load_buffer_8x8_avx2(botL, out + 8, stride, flipud, fliplr, shift);
+}
+static INLINE void load_buffer_16xn_avx2(const int16_t *input, __m256i *out,
+ int stride, int height, int outstride,
+ int flipud, int fliplr) {
+ __m256i out1[64];
+ if (!flipud) {
+ for (int i = 0; i < height; i++) {
+ out1[i] = _mm256_loadu_si256((const __m256i *)(input + i * stride));
+ }
+ } else {
+ for (int i = 0; i < height; i++) {
+ out1[(height - 1) - i] =
+ _mm256_loadu_si256((const __m256i *)(input + i * stride));
+ }
+ }
+ if (!fliplr) {
+ for (int i = 0; i < height; i++) {
+ out[i * outstride] =
+ _mm256_cvtepi16_epi32(_mm256_castsi256_si128(out1[i]));
+ out[i * outstride + 1] =
+ _mm256_cvtepi16_epi32(_mm256_extractf128_si256(out1[i], 1));
+ }
+ } else {
+ for (int i = 0; i < height; i++) {
+ out[i * outstride + 1] = _mm256_cvtepi16_epi32(
+ mm_reverse_epi16(_mm256_castsi256_si128(out1[i])));
+ out[i * outstride + 0] = _mm256_cvtepi16_epi32(
+ mm_reverse_epi16(_mm256_extractf128_si256(out1[i], 1)));
+ }
+ }
+}
+
+static void fwd_txfm_transpose_8x8_avx2(const __m256i *in, __m256i *out,
+ const int instride,
+ const int outstride) {
+ __m256i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m256i x0, x1;
+
+ u0 = _mm256_unpacklo_epi32(in[0 * instride], in[1 * instride]);
+ u1 = _mm256_unpackhi_epi32(in[0 * instride], in[1 * instride]);
+
+ u2 = _mm256_unpacklo_epi32(in[2 * instride], in[3 * instride]);
+ u3 = _mm256_unpackhi_epi32(in[2 * instride], in[3 * instride]);
+
+ u4 = _mm256_unpacklo_epi32(in[4 * instride], in[5 * instride]);
+ u5 = _mm256_unpackhi_epi32(in[4 * instride], in[5 * instride]);
+
+ u6 = _mm256_unpacklo_epi32(in[6 * instride], in[7 * instride]);
+ u7 = _mm256_unpackhi_epi32(in[6 * instride], in[7 * instride]);
+
+ x0 = _mm256_unpacklo_epi64(u0, u2);
+ x1 = _mm256_unpacklo_epi64(u4, u6);
+ out[0 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x20);
+ out[4 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x31);
+
+ x0 = _mm256_unpackhi_epi64(u0, u2);
+ x1 = _mm256_unpackhi_epi64(u4, u6);
+ out[1 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x20);
+ out[5 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x31);
+
+ x0 = _mm256_unpacklo_epi64(u1, u3);
+ x1 = _mm256_unpacklo_epi64(u5, u7);
+ out[2 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x20);
+ out[6 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x31);
+
+ x0 = _mm256_unpackhi_epi64(u1, u3);
+ x1 = _mm256_unpackhi_epi64(u5, u7);
+ out[3 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x20);
+ out[7 * outstride] = _mm256_permute2f128_si256(x0, x1, 0x31);
+}
+static INLINE void round_shift_32_8xn_avx2(__m256i *in, int size, int bit,
+ int stride) {
+ if (bit < 0) {
+ bit = -bit;
+ __m256i round = _mm256_set1_epi32(1 << (bit - 1));
+ for (int i = 0; i < size; ++i) {
+ in[stride * i] = _mm256_add_epi32(in[stride * i], round);
+ in[stride * i] = _mm256_srai_epi32(in[stride * i], bit);
+ }
+ } else if (bit > 0) {
+ for (int i = 0; i < size; ++i) {
+ in[stride * i] = _mm256_slli_epi32(in[stride * i], bit);
+ }
+ }
+}
+static INLINE void store_buffer_avx2(const __m256i *const in, int32_t *out,
+ const int stride, const int out_size) {
+ for (int i = 0; i < out_size; ++i) {
+ _mm256_store_si256((__m256i *)(out), in[i]);
+ out += stride;
+ }
+}
+static INLINE void fwd_txfm_transpose_16x16_avx2(const __m256i *in,
+ __m256i *out) {
+ fwd_txfm_transpose_8x8_avx2(&in[0], &out[0], 2, 2);
+ fwd_txfm_transpose_8x8_avx2(&in[1], &out[16], 2, 2);
+ fwd_txfm_transpose_8x8_avx2(&in[16], &out[1], 2, 2);
+ fwd_txfm_transpose_8x8_avx2(&in[17], &out[17], 2, 2);
+}
+
+static INLINE __m256i av1_half_btf_avx2(const __m256i *w0, const __m256i *n0,
+ const __m256i *w1, const __m256i *n1,
+ const __m256i *rounding, int bit) {
+ __m256i x, y;
+
+ x = _mm256_mullo_epi32(*w0, *n0);
+ y = _mm256_mullo_epi32(*w1, *n1);
+ x = _mm256_add_epi32(x, y);
+ x = _mm256_add_epi32(x, *rounding);
+ x = _mm256_srai_epi32(x, bit);
+ return x;
+}
+#define btf_32_avx2_type0(w0, w1, in0, in1, out0, out1, bit) \
+ do { \
+ const __m256i ww0 = _mm256_set1_epi32(w0); \
+ const __m256i ww1 = _mm256_set1_epi32(w1); \
+ const __m256i in0_w0 = _mm256_mullo_epi32(in0, ww0); \
+ const __m256i in1_w1 = _mm256_mullo_epi32(in1, ww1); \
+ out0 = _mm256_add_epi32(in0_w0, in1_w1); \
+ round_shift_32_8xn_avx2(&out0, 1, -bit, 1); \
+ const __m256i in0_w1 = _mm256_mullo_epi32(in0, ww1); \
+ const __m256i in1_w0 = _mm256_mullo_epi32(in1, ww0); \
+ out1 = _mm256_sub_epi32(in0_w1, in1_w0); \
+ round_shift_32_8xn_avx2(&out1, 1, -bit, 1); \
+ } while (0)
+
+#define btf_32_type0_avx2_new(ww0, ww1, in0, in1, out0, out1, r, bit) \
+ do { \
+ const __m256i in0_w0 = _mm256_mullo_epi32(in0, ww0); \
+ const __m256i in1_w1 = _mm256_mullo_epi32(in1, ww1); \
+ out0 = _mm256_add_epi32(in0_w0, in1_w1); \
+ out0 = _mm256_add_epi32(out0, r); \
+ out0 = _mm256_srai_epi32(out0, bit); \
+ const __m256i in0_w1 = _mm256_mullo_epi32(in0, ww1); \
+ const __m256i in1_w0 = _mm256_mullo_epi32(in1, ww0); \
+ out1 = _mm256_sub_epi32(in0_w1, in1_w0); \
+ out1 = _mm256_add_epi32(out1, r); \
+ out1 = _mm256_srai_epi32(out1, bit); \
+ } while (0)
+
+typedef void (*transform_1d_avx2)(__m256i *in, __m256i *out,
+ const int8_t cos_bit, int instride,
+ int outstride);
+static void fdct8_avx2(__m256i *in, __m256i *out, const int8_t bit,
+ const int col_num, const int outstride) {
+ const int32_t *cospi = cospi_arr(bit);
+ const __m256i cospi32 = _mm256_set1_epi32(cospi[32]);
+ const __m256i cospim32 = _mm256_set1_epi32(-cospi[32]);
+ const __m256i cospi48 = _mm256_set1_epi32(cospi[48]);
+ const __m256i cospi16 = _mm256_set1_epi32(cospi[16]);
+ const __m256i cospi56 = _mm256_set1_epi32(cospi[56]);
+ const __m256i cospi8 = _mm256_set1_epi32(cospi[8]);
+ const __m256i cospi24 = _mm256_set1_epi32(cospi[24]);
+ const __m256i cospi40 = _mm256_set1_epi32(cospi[40]);
+ const __m256i rnding = _mm256_set1_epi32(1 << (bit - 1));
+ __m256i u[8], v[8];
+ for (int col = 0; col < col_num; ++col) {
+ u[0] = _mm256_add_epi32(in[0 * col_num + col], in[7 * col_num + col]);
+ v[7] = _mm256_sub_epi32(in[0 * col_num + col], in[7 * col_num + col]);
+ u[1] = _mm256_add_epi32(in[1 * col_num + col], in[6 * col_num + col]);
+ u[6] = _mm256_sub_epi32(in[1 * col_num + col], in[6 * col_num + col]);
+ u[2] = _mm256_add_epi32(in[2 * col_num + col], in[5 * col_num + col]);
+ u[5] = _mm256_sub_epi32(in[2 * col_num + col], in[5 * col_num + col]);
+ u[3] = _mm256_add_epi32(in[3 * col_num + col], in[4 * col_num + col]);
+ v[4] = _mm256_sub_epi32(in[3 * col_num + col], in[4 * col_num + col]);
+ v[0] = _mm256_add_epi32(u[0], u[3]);
+ v[3] = _mm256_sub_epi32(u[0], u[3]);
+ v[1] = _mm256_add_epi32(u[1], u[2]);
+ v[2] = _mm256_sub_epi32(u[1], u[2]);
+
+ v[5] = _mm256_mullo_epi32(u[5], cospim32);
+ v[6] = _mm256_mullo_epi32(u[6], cospi32);
+ v[5] = _mm256_add_epi32(v[5], v[6]);
+ v[5] = _mm256_add_epi32(v[5], rnding);
+ v[5] = _mm256_srai_epi32(v[5], bit);
+
+ u[0] = _mm256_mullo_epi32(u[5], cospi32);
+ v[6] = _mm256_mullo_epi32(u[6], cospim32);
+ v[6] = _mm256_sub_epi32(u[0], v[6]);
+ v[6] = _mm256_add_epi32(v[6], rnding);
+ v[6] = _mm256_srai_epi32(v[6], bit);
+
+ // stage 3
+ // type 0
+ v[0] = _mm256_mullo_epi32(v[0], cospi32);
+ v[1] = _mm256_mullo_epi32(v[1], cospi32);
+ u[0] = _mm256_add_epi32(v[0], v[1]);
+ u[0] = _mm256_add_epi32(u[0], rnding);
+ u[0] = _mm256_srai_epi32(u[0], bit);
+
+ u[1] = _mm256_sub_epi32(v[0], v[1]);
+ u[1] = _mm256_add_epi32(u[1], rnding);
+ u[1] = _mm256_srai_epi32(u[1], bit);
+
+ // type 1
+ v[0] = _mm256_mullo_epi32(v[2], cospi48);
+ v[1] = _mm256_mullo_epi32(v[3], cospi16);
+ u[2] = _mm256_add_epi32(v[0], v[1]);
+ u[2] = _mm256_add_epi32(u[2], rnding);
+ u[2] = _mm256_srai_epi32(u[2], bit);
+
+ v[0] = _mm256_mullo_epi32(v[2], cospi16);
+ v[1] = _mm256_mullo_epi32(v[3], cospi48);
+ u[3] = _mm256_sub_epi32(v[1], v[0]);
+ u[3] = _mm256_add_epi32(u[3], rnding);
+ u[3] = _mm256_srai_epi32(u[3], bit);
+
+ u[4] = _mm256_add_epi32(v[4], v[5]);
+ u[5] = _mm256_sub_epi32(v[4], v[5]);
+ u[6] = _mm256_sub_epi32(v[7], v[6]);
+ u[7] = _mm256_add_epi32(v[7], v[6]);
+
+ // stage 4
+ // stage 5
+ v[0] = _mm256_mullo_epi32(u[4], cospi56);
+ v[1] = _mm256_mullo_epi32(u[7], cospi8);
+ v[0] = _mm256_add_epi32(v[0], v[1]);
+ v[0] = _mm256_add_epi32(v[0], rnding);
+ out[1 * outstride + col] = _mm256_srai_epi32(v[0], bit); // buf0[4]
+
+ v[0] = _mm256_mullo_epi32(u[4], cospi8);
+ v[1] = _mm256_mullo_epi32(u[7], cospi56);
+ v[0] = _mm256_sub_epi32(v[1], v[0]);
+ v[0] = _mm256_add_epi32(v[0], rnding);
+ out[7 * outstride + col] = _mm256_srai_epi32(v[0], bit); // buf0[7]
+
+ v[0] = _mm256_mullo_epi32(u[5], cospi24);
+ v[1] = _mm256_mullo_epi32(u[6], cospi40);
+ v[0] = _mm256_add_epi32(v[0], v[1]);
+ v[0] = _mm256_add_epi32(v[0], rnding);
+ out[5 * outstride + col] = _mm256_srai_epi32(v[0], bit); // buf0[5]
+
+ v[0] = _mm256_mullo_epi32(u[5], cospi40);
+ v[1] = _mm256_mullo_epi32(u[6], cospi24);
+ v[0] = _mm256_sub_epi32(v[1], v[0]);
+ v[0] = _mm256_add_epi32(v[0], rnding);
+ out[3 * outstride + col] = _mm256_srai_epi32(v[0], bit); // buf0[6]
+
+ out[0 * outstride + col] = u[0]; // buf0[0]
+ out[4 * outstride + col] = u[1]; // buf0[1]
+ out[2 * outstride + col] = u[2]; // buf0[2]
+ out[6 * outstride + col] = u[3]; // buf0[3]
+ }
+}
+static void fadst8_avx2(__m256i *in, __m256i *out, const int8_t bit,
+ const int col_num, const int outstirde) {
+ (void)col_num;
+ const int32_t *cospi = cospi_arr(bit);
+ const __m256i cospi32 = _mm256_set1_epi32(cospi[32]);
+ const __m256i cospi16 = _mm256_set1_epi32(cospi[16]);
+ const __m256i cospim16 = _mm256_set1_epi32(-cospi[16]);
+ const __m256i cospi48 = _mm256_set1_epi32(cospi[48]);
+ const __m256i cospim48 = _mm256_set1_epi32(-cospi[48]);
+ const __m256i cospi4 = _mm256_set1_epi32(cospi[4]);
+ const __m256i cospim4 = _mm256_set1_epi32(-cospi[4]);
+ const __m256i cospi60 = _mm256_set1_epi32(cospi[60]);
+ const __m256i cospi20 = _mm256_set1_epi32(cospi[20]);
+ const __m256i cospim20 = _mm256_set1_epi32(-cospi[20]);
+ const __m256i cospi44 = _mm256_set1_epi32(cospi[44]);
+ const __m256i cospi28 = _mm256_set1_epi32(cospi[28]);
+ const __m256i cospi36 = _mm256_set1_epi32(cospi[36]);
+ const __m256i cospim36 = _mm256_set1_epi32(-cospi[36]);
+ const __m256i cospi52 = _mm256_set1_epi32(cospi[52]);
+ const __m256i cospim52 = _mm256_set1_epi32(-cospi[52]);
+ const __m256i cospi12 = _mm256_set1_epi32(cospi[12]);
+ const __m256i rnding = _mm256_set1_epi32(1 << (bit - 1));
+ const __m256i zero = _mm256_setzero_si256();
+ __m256i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m256i v0, v1, v2, v3, v4, v5, v6, v7;
+ __m256i x, y;
+ for (int col = 0; col < col_num; ++col) {
+ u0 = in[0 * col_num + col];
+ u1 = _mm256_sub_epi32(zero, in[7 * col_num + col]);
+ u2 = _mm256_sub_epi32(zero, in[3 * col_num + col]);
+ u3 = in[4 * col_num + col];
+ u4 = _mm256_sub_epi32(zero, in[1 * col_num + col]);
+ u5 = in[6 * col_num + col];
+ u6 = in[2 * col_num + col];
+ u7 = _mm256_sub_epi32(zero, in[5 * col_num + col]);
+
+ // stage 2
+ v0 = u0;
+ v1 = u1;
+
+ x = _mm256_mullo_epi32(u2, cospi32);
+ y = _mm256_mullo_epi32(u3, cospi32);
+ v2 = _mm256_add_epi32(x, y);
+ v2 = _mm256_add_epi32(v2, rnding);
+ v2 = _mm256_srai_epi32(v2, bit);
+
+ v3 = _mm256_sub_epi32(x, y);
+ v3 = _mm256_add_epi32(v3, rnding);
+ v3 = _mm256_srai_epi32(v3, bit);
+
+ v4 = u4;
+ v5 = u5;
+
+ x = _mm256_mullo_epi32(u6, cospi32);
+ y = _mm256_mullo_epi32(u7, cospi32);
+ v6 = _mm256_add_epi32(x, y);
+ v6 = _mm256_add_epi32(v6, rnding);
+ v6 = _mm256_srai_epi32(v6, bit);
+
+ v7 = _mm256_sub_epi32(x, y);
+ v7 = _mm256_add_epi32(v7, rnding);
+ v7 = _mm256_srai_epi32(v7, bit);
+
+ // stage 3
+ u0 = _mm256_add_epi32(v0, v2);
+ u1 = _mm256_add_epi32(v1, v3);
+ u2 = _mm256_sub_epi32(v0, v2);
+ u3 = _mm256_sub_epi32(v1, v3);
+ u4 = _mm256_add_epi32(v4, v6);
+ u5 = _mm256_add_epi32(v5, v7);
+ u6 = _mm256_sub_epi32(v4, v6);
+ u7 = _mm256_sub_epi32(v5, v7);
+
+ // stage 4
+ v0 = u0;
+ v1 = u1;
+ v2 = u2;
+ v3 = u3;
+
+ x = _mm256_mullo_epi32(u4, cospi16);
+ y = _mm256_mullo_epi32(u5, cospi48);
+ v4 = _mm256_add_epi32(x, y);
+ v4 = _mm256_add_epi32(v4, rnding);
+ v4 = _mm256_srai_epi32(v4, bit);
+
+ x = _mm256_mullo_epi32(u4, cospi48);
+ y = _mm256_mullo_epi32(u5, cospim16);
+ v5 = _mm256_add_epi32(x, y);
+ v5 = _mm256_add_epi32(v5, rnding);
+ v5 = _mm256_srai_epi32(v5, bit);
+
+ x = _mm256_mullo_epi32(u6, cospim48);
+ y = _mm256_mullo_epi32(u7, cospi16);
+ v6 = _mm256_add_epi32(x, y);
+ v6 = _mm256_add_epi32(v6, rnding);
+ v6 = _mm256_srai_epi32(v6, bit);
+
+ x = _mm256_mullo_epi32(u6, cospi16);
+ y = _mm256_mullo_epi32(u7, cospi48);
+ v7 = _mm256_add_epi32(x, y);
+ v7 = _mm256_add_epi32(v7, rnding);
+ v7 = _mm256_srai_epi32(v7, bit);
+
+ // stage 5
+ u0 = _mm256_add_epi32(v0, v4);
+ u1 = _mm256_add_epi32(v1, v5);
+ u2 = _mm256_add_epi32(v2, v6);
+ u3 = _mm256_add_epi32(v3, v7);
+ u4 = _mm256_sub_epi32(v0, v4);
+ u5 = _mm256_sub_epi32(v1, v5);
+ u6 = _mm256_sub_epi32(v2, v6);
+ u7 = _mm256_sub_epi32(v3, v7);
+
+ // stage 6
+ x = _mm256_mullo_epi32(u0, cospi4);
+ y = _mm256_mullo_epi32(u1, cospi60);
+ v0 = _mm256_add_epi32(x, y);
+ v0 = _mm256_add_epi32(v0, rnding);
+ v0 = _mm256_srai_epi32(v0, bit);
+
+ x = _mm256_mullo_epi32(u0, cospi60);
+ y = _mm256_mullo_epi32(u1, cospim4);
+ v1 = _mm256_add_epi32(x, y);
+ v1 = _mm256_add_epi32(v1, rnding);
+ v1 = _mm256_srai_epi32(v1, bit);
+
+ x = _mm256_mullo_epi32(u2, cospi20);
+ y = _mm256_mullo_epi32(u3, cospi44);
+ v2 = _mm256_add_epi32(x, y);
+ v2 = _mm256_add_epi32(v2, rnding);
+ v2 = _mm256_srai_epi32(v2, bit);
+
+ x = _mm256_mullo_epi32(u2, cospi44);
+ y = _mm256_mullo_epi32(u3, cospim20);
+ v3 = _mm256_add_epi32(x, y);
+ v3 = _mm256_add_epi32(v3, rnding);
+ v3 = _mm256_srai_epi32(v3, bit);
+
+ x = _mm256_mullo_epi32(u4, cospi36);
+ y = _mm256_mullo_epi32(u5, cospi28);
+ v4 = _mm256_add_epi32(x, y);
+ v4 = _mm256_add_epi32(v4, rnding);
+ v4 = _mm256_srai_epi32(v4, bit);
+
+ x = _mm256_mullo_epi32(u4, cospi28);
+ y = _mm256_mullo_epi32(u5, cospim36);
+ v5 = _mm256_add_epi32(x, y);
+ v5 = _mm256_add_epi32(v5, rnding);
+ v5 = _mm256_srai_epi32(v5, bit);
+
+ x = _mm256_mullo_epi32(u6, cospi52);
+ y = _mm256_mullo_epi32(u7, cospi12);
+ v6 = _mm256_add_epi32(x, y);
+ v6 = _mm256_add_epi32(v6, rnding);
+ v6 = _mm256_srai_epi32(v6, bit);
+
+ x = _mm256_mullo_epi32(u6, cospi12);
+ y = _mm256_mullo_epi32(u7, cospim52);
+ v7 = _mm256_add_epi32(x, y);
+ v7 = _mm256_add_epi32(v7, rnding);
+ v7 = _mm256_srai_epi32(v7, bit);
+
+ // stage 7
+ out[0 * outstirde + col] = v1;
+ out[1 * outstirde + col] = v6;
+ out[2 * outstirde + col] = v3;
+ out[3 * outstirde + col] = v4;
+ out[4 * outstirde + col] = v5;
+ out[5 * outstirde + col] = v2;
+ out[6 * outstirde + col] = v7;
+ out[7 * outstirde + col] = v0;
+ }
+}
+static void idtx8_avx2(__m256i *in, __m256i *out, const int8_t bit, int col_num,
+ int outstride) {
+ (void)bit;
+ (void)outstride;
+ int num_iters = 8 * col_num;
+ for (int i = 0; i < num_iters; i += 8) {
+ out[i] = _mm256_add_epi32(in[i], in[i]);
+ out[i + 1] = _mm256_add_epi32(in[i + 1], in[i + 1]);
+ out[i + 2] = _mm256_add_epi32(in[i + 2], in[i + 2]);
+ out[i + 3] = _mm256_add_epi32(in[i + 3], in[i + 3]);
+ out[i + 4] = _mm256_add_epi32(in[i + 4], in[i + 4]);
+ out[i + 5] = _mm256_add_epi32(in[i + 5], in[i + 5]);
+ out[i + 6] = _mm256_add_epi32(in[i + 6], in[i + 6]);
+ out[i + 7] = _mm256_add_epi32(in[i + 7], in[i + 7]);
+ }
+}
+void av1_fwd_txfm2d_8x8_avx2(const int16_t *input, int32_t *coeff, int stride,
+ TX_TYPE tx_type, int bd) {
+ __m256i in[8], out[8];
+ const TX_SIZE tx_size = TX_8X8;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int width = tx_size_wide[tx_size];
+ const int width_div8 = (width >> 3);
+
+ switch (tx_type) {
+ case DCT_DCT:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ fdct8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fdct8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case ADST_DCT:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fdct8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case DCT_ADST:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ fdct8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case ADST_ADST:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case FLIPADST_DCT:
+ load_buffer_8x8_avx2(input, in, stride, 1, 0, shift[0]);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fdct8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case DCT_FLIPADST:
+ load_buffer_8x8_avx2(input, in, stride, 0, 1, shift[0]);
+ fdct8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case FLIPADST_FLIPADST:
+ load_buffer_8x8_avx2(input, in, stride, 1, 1, shift[0]);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case ADST_FLIPADST:
+ load_buffer_8x8_avx2(input, in, stride, 0, 1, shift[0]);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case FLIPADST_ADST:
+ load_buffer_8x8_avx2(input, in, stride, 1, 0, shift[0]);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case IDTX:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ idtx8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ idtx8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case V_DCT:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ fdct8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ idtx8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case H_DCT:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ idtx8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fdct8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case V_ADST:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ idtx8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case H_ADST:
+ load_buffer_8x8_avx2(input, in, stride, 0, 0, shift[0]);
+ idtx8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case V_FLIPADST:
+ load_buffer_8x8_avx2(input, in, stride, 1, 0, shift[0]);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ idtx8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ case H_FLIPADST:
+ load_buffer_8x8_avx2(input, in, stride, 0, 1, shift[0]);
+ idtx8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, width_div8, width_div8);
+ fadst8_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 8);
+ break;
+ default: assert(0);
+ }
+ (void)bd;
+}
+
+static void fdct16_avx2(__m256i *in, __m256i *out, const int8_t bit,
+ const int col_num, const int outstride) {
+ const int32_t *cospi = cospi_arr(bit);
+ const __m256i cospi32 = _mm256_set1_epi32(cospi[32]);
+ const __m256i cospim32 = _mm256_set1_epi32(-cospi[32]);
+ const __m256i cospi48 = _mm256_set1_epi32(cospi[48]);
+ const __m256i cospi16 = _mm256_set1_epi32(cospi[16]);
+ const __m256i cospim48 = _mm256_set1_epi32(-cospi[48]);
+ const __m256i cospim16 = _mm256_set1_epi32(-cospi[16]);
+ const __m256i cospi56 = _mm256_set1_epi32(cospi[56]);
+ const __m256i cospi8 = _mm256_set1_epi32(cospi[8]);
+ const __m256i cospi24 = _mm256_set1_epi32(cospi[24]);
+ const __m256i cospi40 = _mm256_set1_epi32(cospi[40]);
+ const __m256i cospi60 = _mm256_set1_epi32(cospi[60]);
+ const __m256i cospi4 = _mm256_set1_epi32(cospi[4]);
+ const __m256i cospi28 = _mm256_set1_epi32(cospi[28]);
+ const __m256i cospi36 = _mm256_set1_epi32(cospi[36]);
+ const __m256i cospi44 = _mm256_set1_epi32(cospi[44]);
+ const __m256i cospi20 = _mm256_set1_epi32(cospi[20]);
+ const __m256i cospi12 = _mm256_set1_epi32(cospi[12]);
+ const __m256i cospi52 = _mm256_set1_epi32(cospi[52]);
+ const __m256i rnding = _mm256_set1_epi32(1 << (bit - 1));
+ __m256i u[16], v[16], x;
+ int col;
+
+ // Calculate the column 0, 1, 2, 3
+ for (col = 0; col < col_num; ++col) {
+ // stage 0
+ // stage 1
+ u[0] = _mm256_add_epi32(in[0 * col_num + col], in[15 * col_num + col]);
+ u[15] = _mm256_sub_epi32(in[0 * col_num + col], in[15 * col_num + col]);
+ u[1] = _mm256_add_epi32(in[1 * col_num + col], in[14 * col_num + col]);
+ u[14] = _mm256_sub_epi32(in[1 * col_num + col], in[14 * col_num + col]);
+ u[2] = _mm256_add_epi32(in[2 * col_num + col], in[13 * col_num + col]);
+ u[13] = _mm256_sub_epi32(in[2 * col_num + col], in[13 * col_num + col]);
+ u[3] = _mm256_add_epi32(in[3 * col_num + col], in[12 * col_num + col]);
+ u[12] = _mm256_sub_epi32(in[3 * col_num + col], in[12 * col_num + col]);
+ u[4] = _mm256_add_epi32(in[4 * col_num + col], in[11 * col_num + col]);
+ u[11] = _mm256_sub_epi32(in[4 * col_num + col], in[11 * col_num + col]);
+ u[5] = _mm256_add_epi32(in[5 * col_num + col], in[10 * col_num + col]);
+ u[10] = _mm256_sub_epi32(in[5 * col_num + col], in[10 * col_num + col]);
+ u[6] = _mm256_add_epi32(in[6 * col_num + col], in[9 * col_num + col]);
+ u[9] = _mm256_sub_epi32(in[6 * col_num + col], in[9 * col_num + col]);
+ u[7] = _mm256_add_epi32(in[7 * col_num + col], in[8 * col_num + col]);
+ u[8] = _mm256_sub_epi32(in[7 * col_num + col], in[8 * col_num + col]);
+
+ // stage 2
+ v[0] = _mm256_add_epi32(u[0], u[7]);
+ v[7] = _mm256_sub_epi32(u[0], u[7]);
+ v[1] = _mm256_add_epi32(u[1], u[6]);
+ v[6] = _mm256_sub_epi32(u[1], u[6]);
+ v[2] = _mm256_add_epi32(u[2], u[5]);
+ v[5] = _mm256_sub_epi32(u[2], u[5]);
+ v[3] = _mm256_add_epi32(u[3], u[4]);
+ v[4] = _mm256_sub_epi32(u[3], u[4]);
+ v[8] = u[8];
+ v[9] = u[9];
+
+ v[10] = _mm256_mullo_epi32(u[10], cospim32);
+ x = _mm256_mullo_epi32(u[13], cospi32);
+ v[10] = _mm256_add_epi32(v[10], x);
+ v[10] = _mm256_add_epi32(v[10], rnding);
+ v[10] = _mm256_srai_epi32(v[10], bit);
+
+ v[13] = _mm256_mullo_epi32(u[10], cospi32);
+ x = _mm256_mullo_epi32(u[13], cospim32);
+ v[13] = _mm256_sub_epi32(v[13], x);
+ v[13] = _mm256_add_epi32(v[13], rnding);
+ v[13] = _mm256_srai_epi32(v[13], bit);
+
+ v[11] = _mm256_mullo_epi32(u[11], cospim32);
+ x = _mm256_mullo_epi32(u[12], cospi32);
+ v[11] = _mm256_add_epi32(v[11], x);
+ v[11] = _mm256_add_epi32(v[11], rnding);
+ v[11] = _mm256_srai_epi32(v[11], bit);
+
+ v[12] = _mm256_mullo_epi32(u[11], cospi32);
+ x = _mm256_mullo_epi32(u[12], cospim32);
+ v[12] = _mm256_sub_epi32(v[12], x);
+ v[12] = _mm256_add_epi32(v[12], rnding);
+ v[12] = _mm256_srai_epi32(v[12], bit);
+ v[14] = u[14];
+ v[15] = u[15];
+
+ // stage 3
+ u[0] = _mm256_add_epi32(v[0], v[3]);
+ u[3] = _mm256_sub_epi32(v[0], v[3]);
+ u[1] = _mm256_add_epi32(v[1], v[2]);
+ u[2] = _mm256_sub_epi32(v[1], v[2]);
+ u[4] = v[4];
+
+ u[5] = _mm256_mullo_epi32(v[5], cospim32);
+ x = _mm256_mullo_epi32(v[6], cospi32);
+ u[5] = _mm256_add_epi32(u[5], x);
+ u[5] = _mm256_add_epi32(u[5], rnding);
+ u[5] = _mm256_srai_epi32(u[5], bit);
+
+ u[6] = _mm256_mullo_epi32(v[5], cospi32);
+ x = _mm256_mullo_epi32(v[6], cospim32);
+ u[6] = _mm256_sub_epi32(u[6], x);
+ u[6] = _mm256_add_epi32(u[6], rnding);
+ u[6] = _mm256_srai_epi32(u[6], bit);
+
+ u[7] = v[7];
+ u[8] = _mm256_add_epi32(v[8], v[11]);
+ u[11] = _mm256_sub_epi32(v[8], v[11]);
+ u[9] = _mm256_add_epi32(v[9], v[10]);
+ u[10] = _mm256_sub_epi32(v[9], v[10]);
+ u[12] = _mm256_sub_epi32(v[15], v[12]);
+ u[15] = _mm256_add_epi32(v[15], v[12]);
+ u[13] = _mm256_sub_epi32(v[14], v[13]);
+ u[14] = _mm256_add_epi32(v[14], v[13]);
+
+ // stage 4
+ u[0] = _mm256_mullo_epi32(u[0], cospi32);
+ u[1] = _mm256_mullo_epi32(u[1], cospi32);
+ v[0] = _mm256_add_epi32(u[0], u[1]);
+ v[0] = _mm256_add_epi32(v[0], rnding);
+ v[0] = _mm256_srai_epi32(v[0], bit);
+
+ v[1] = _mm256_sub_epi32(u[0], u[1]);
+ v[1] = _mm256_add_epi32(v[1], rnding);
+ v[1] = _mm256_srai_epi32(v[1], bit);
+
+ v[2] = _mm256_mullo_epi32(u[2], cospi48);
+ x = _mm256_mullo_epi32(u[3], cospi16);
+ v[2] = _mm256_add_epi32(v[2], x);
+ v[2] = _mm256_add_epi32(v[2], rnding);
+ v[2] = _mm256_srai_epi32(v[2], bit);
+
+ v[3] = _mm256_mullo_epi32(u[2], cospi16);
+ x = _mm256_mullo_epi32(u[3], cospi48);
+ v[3] = _mm256_sub_epi32(x, v[3]);
+ v[3] = _mm256_add_epi32(v[3], rnding);
+ v[3] = _mm256_srai_epi32(v[3], bit);
+
+ v[4] = _mm256_add_epi32(u[4], u[5]);
+ v[5] = _mm256_sub_epi32(u[4], u[5]);
+ v[6] = _mm256_sub_epi32(u[7], u[6]);
+ v[7] = _mm256_add_epi32(u[7], u[6]);
+ v[8] = u[8];
+
+ v[9] = _mm256_mullo_epi32(u[9], cospim16);
+ x = _mm256_mullo_epi32(u[14], cospi48);
+ v[9] = _mm256_add_epi32(v[9], x);
+ v[9] = _mm256_add_epi32(v[9], rnding);
+ v[9] = _mm256_srai_epi32(v[9], bit);
+
+ v[14] = _mm256_mullo_epi32(u[9], cospi48);
+ x = _mm256_mullo_epi32(u[14], cospim16);
+ v[14] = _mm256_sub_epi32(v[14], x);
+ v[14] = _mm256_add_epi32(v[14], rnding);
+ v[14] = _mm256_srai_epi32(v[14], bit);
+
+ v[10] = _mm256_mullo_epi32(u[10], cospim48);
+ x = _mm256_mullo_epi32(u[13], cospim16);
+ v[10] = _mm256_add_epi32(v[10], x);
+ v[10] = _mm256_add_epi32(v[10], rnding);
+ v[10] = _mm256_srai_epi32(v[10], bit);
+
+ v[13] = _mm256_mullo_epi32(u[10], cospim16);
+ x = _mm256_mullo_epi32(u[13], cospim48);
+ v[13] = _mm256_sub_epi32(v[13], x);
+ v[13] = _mm256_add_epi32(v[13], rnding);
+ v[13] = _mm256_srai_epi32(v[13], bit);
+
+ v[11] = u[11];
+ v[12] = u[12];
+ v[15] = u[15];
+
+ // stage 5
+ u[0] = v[0];
+ u[1] = v[1];
+ u[2] = v[2];
+ u[3] = v[3];
+
+ u[4] = _mm256_mullo_epi32(v[4], cospi56);
+ x = _mm256_mullo_epi32(v[7], cospi8);
+ u[4] = _mm256_add_epi32(u[4], x);
+ u[4] = _mm256_add_epi32(u[4], rnding);
+ u[4] = _mm256_srai_epi32(u[4], bit);
+
+ u[7] = _mm256_mullo_epi32(v[4], cospi8);
+ x = _mm256_mullo_epi32(v[7], cospi56);
+ u[7] = _mm256_sub_epi32(x, u[7]);
+ u[7] = _mm256_add_epi32(u[7], rnding);
+ u[7] = _mm256_srai_epi32(u[7], bit);
+
+ u[5] = _mm256_mullo_epi32(v[5], cospi24);
+ x = _mm256_mullo_epi32(v[6], cospi40);
+ u[5] = _mm256_add_epi32(u[5], x);
+ u[5] = _mm256_add_epi32(u[5], rnding);
+ u[5] = _mm256_srai_epi32(u[5], bit);
+
+ u[6] = _mm256_mullo_epi32(v[5], cospi40);
+ x = _mm256_mullo_epi32(v[6], cospi24);
+ u[6] = _mm256_sub_epi32(x, u[6]);
+ u[6] = _mm256_add_epi32(u[6], rnding);
+ u[6] = _mm256_srai_epi32(u[6], bit);
+
+ u[8] = _mm256_add_epi32(v[8], v[9]);
+ u[9] = _mm256_sub_epi32(v[8], v[9]);
+ u[10] = _mm256_sub_epi32(v[11], v[10]);
+ u[11] = _mm256_add_epi32(v[11], v[10]);
+ u[12] = _mm256_add_epi32(v[12], v[13]);
+ u[13] = _mm256_sub_epi32(v[12], v[13]);
+ u[14] = _mm256_sub_epi32(v[15], v[14]);
+ u[15] = _mm256_add_epi32(v[15], v[14]);
+
+ // stage 6
+ v[0] = u[0];
+ v[1] = u[1];
+ v[2] = u[2];
+ v[3] = u[3];
+ v[4] = u[4];
+ v[5] = u[5];
+ v[6] = u[6];
+ v[7] = u[7];
+
+ v[8] = _mm256_mullo_epi32(u[8], cospi60);
+ x = _mm256_mullo_epi32(u[15], cospi4);
+ v[8] = _mm256_add_epi32(v[8], x);
+ v[8] = _mm256_add_epi32(v[8], rnding);
+ v[8] = _mm256_srai_epi32(v[8], bit);
+
+ v[15] = _mm256_mullo_epi32(u[8], cospi4);
+ x = _mm256_mullo_epi32(u[15], cospi60);
+ v[15] = _mm256_sub_epi32(x, v[15]);
+ v[15] = _mm256_add_epi32(v[15], rnding);
+ v[15] = _mm256_srai_epi32(v[15], bit);
+
+ v[9] = _mm256_mullo_epi32(u[9], cospi28);
+ x = _mm256_mullo_epi32(u[14], cospi36);
+ v[9] = _mm256_add_epi32(v[9], x);
+ v[9] = _mm256_add_epi32(v[9], rnding);
+ v[9] = _mm256_srai_epi32(v[9], bit);
+
+ v[14] = _mm256_mullo_epi32(u[9], cospi36);
+ x = _mm256_mullo_epi32(u[14], cospi28);
+ v[14] = _mm256_sub_epi32(x, v[14]);
+ v[14] = _mm256_add_epi32(v[14], rnding);
+ v[14] = _mm256_srai_epi32(v[14], bit);
+
+ v[10] = _mm256_mullo_epi32(u[10], cospi44);
+ x = _mm256_mullo_epi32(u[13], cospi20);
+ v[10] = _mm256_add_epi32(v[10], x);
+ v[10] = _mm256_add_epi32(v[10], rnding);
+ v[10] = _mm256_srai_epi32(v[10], bit);
+
+ v[13] = _mm256_mullo_epi32(u[10], cospi20);
+ x = _mm256_mullo_epi32(u[13], cospi44);
+ v[13] = _mm256_sub_epi32(x, v[13]);
+ v[13] = _mm256_add_epi32(v[13], rnding);
+ v[13] = _mm256_srai_epi32(v[13], bit);
+
+ v[11] = _mm256_mullo_epi32(u[11], cospi12);
+ x = _mm256_mullo_epi32(u[12], cospi52);
+ v[11] = _mm256_add_epi32(v[11], x);
+ v[11] = _mm256_add_epi32(v[11], rnding);
+ v[11] = _mm256_srai_epi32(v[11], bit);
+
+ v[12] = _mm256_mullo_epi32(u[11], cospi52);
+ x = _mm256_mullo_epi32(u[12], cospi12);
+ v[12] = _mm256_sub_epi32(x, v[12]);
+ v[12] = _mm256_add_epi32(v[12], rnding);
+ v[12] = _mm256_srai_epi32(v[12], bit);
+
+ out[0 * outstride + col] = v[0];
+ out[1 * outstride + col] = v[8];
+ out[2 * outstride + col] = v[4];
+ out[3 * outstride + col] = v[12];
+ out[4 * outstride + col] = v[2];
+ out[5 * outstride + col] = v[10];
+ out[6 * outstride + col] = v[6];
+ out[7 * outstride + col] = v[14];
+ out[8 * outstride + col] = v[1];
+ out[9 * outstride + col] = v[9];
+ out[10 * outstride + col] = v[5];
+ out[11 * outstride + col] = v[13];
+ out[12 * outstride + col] = v[3];
+ out[13 * outstride + col] = v[11];
+ out[14 * outstride + col] = v[7];
+ out[15 * outstride + col] = v[15];
+ }
+}
+static void fadst16_avx2(__m256i *in, __m256i *out, const int8_t bit,
+ const int num_cols, const int outstride) {
+ const int32_t *cospi = cospi_arr(bit);
+ const __m256i cospi32 = _mm256_set1_epi32(cospi[32]);
+ const __m256i cospi48 = _mm256_set1_epi32(cospi[48]);
+ const __m256i cospi16 = _mm256_set1_epi32(cospi[16]);
+ const __m256i cospim16 = _mm256_set1_epi32(-cospi[16]);
+ const __m256i cospim48 = _mm256_set1_epi32(-cospi[48]);
+ const __m256i cospi8 = _mm256_set1_epi32(cospi[8]);
+ const __m256i cospi56 = _mm256_set1_epi32(cospi[56]);
+ const __m256i cospim56 = _mm256_set1_epi32(-cospi[56]);
+ const __m256i cospim8 = _mm256_set1_epi32(-cospi[8]);
+ const __m256i cospi24 = _mm256_set1_epi32(cospi[24]);
+ const __m256i cospim24 = _mm256_set1_epi32(-cospi[24]);
+ const __m256i cospim40 = _mm256_set1_epi32(-cospi[40]);
+ const __m256i cospi40 = _mm256_set1_epi32(cospi[40]);
+ const __m256i cospi2 = _mm256_set1_epi32(cospi[2]);
+ const __m256i cospi62 = _mm256_set1_epi32(cospi[62]);
+ const __m256i cospim2 = _mm256_set1_epi32(-cospi[2]);
+ const __m256i cospi10 = _mm256_set1_epi32(cospi[10]);
+ const __m256i cospi54 = _mm256_set1_epi32(cospi[54]);
+ const __m256i cospim10 = _mm256_set1_epi32(-cospi[10]);
+ const __m256i cospi18 = _mm256_set1_epi32(cospi[18]);
+ const __m256i cospi46 = _mm256_set1_epi32(cospi[46]);
+ const __m256i cospim18 = _mm256_set1_epi32(-cospi[18]);
+ const __m256i cospi26 = _mm256_set1_epi32(cospi[26]);
+ const __m256i cospi38 = _mm256_set1_epi32(cospi[38]);
+ const __m256i cospim26 = _mm256_set1_epi32(-cospi[26]);
+ const __m256i cospi34 = _mm256_set1_epi32(cospi[34]);
+ const __m256i cospi30 = _mm256_set1_epi32(cospi[30]);
+ const __m256i cospim34 = _mm256_set1_epi32(-cospi[34]);
+ const __m256i cospi42 = _mm256_set1_epi32(cospi[42]);
+ const __m256i cospi22 = _mm256_set1_epi32(cospi[22]);
+ const __m256i cospim42 = _mm256_set1_epi32(-cospi[42]);
+ const __m256i cospi50 = _mm256_set1_epi32(cospi[50]);
+ const __m256i cospi14 = _mm256_set1_epi32(cospi[14]);
+ const __m256i cospim50 = _mm256_set1_epi32(-cospi[50]);
+ const __m256i cospi58 = _mm256_set1_epi32(cospi[58]);
+ const __m256i cospi6 = _mm256_set1_epi32(cospi[6]);
+ const __m256i cospim58 = _mm256_set1_epi32(-cospi[58]);
+ const __m256i rnding = _mm256_set1_epi32(1 << (bit - 1));
+ const __m256i zero = _mm256_setzero_si256();
+
+ __m256i u[16], v[16], x, y;
+ int col;
+
+ for (col = 0; col < num_cols; ++col) {
+ // stage 0
+ // stage 1
+ u[0] = in[0 * num_cols + col];
+ u[1] = _mm256_sub_epi32(zero, in[15 * num_cols + col]);
+ u[2] = _mm256_sub_epi32(zero, in[7 * num_cols + col]);
+ u[3] = in[8 * num_cols + col];
+ u[4] = _mm256_sub_epi32(zero, in[3 * num_cols + col]);
+ u[5] = in[12 * num_cols + col];
+ u[6] = in[4 * num_cols + col];
+ u[7] = _mm256_sub_epi32(zero, in[11 * num_cols + col]);
+ u[8] = _mm256_sub_epi32(zero, in[1 * num_cols + col]);
+ u[9] = in[14 * num_cols + col];
+ u[10] = in[6 * num_cols + col];
+ u[11] = _mm256_sub_epi32(zero, in[9 * num_cols + col]);
+ u[12] = in[2 * num_cols + col];
+ u[13] = _mm256_sub_epi32(zero, in[13 * num_cols + col]);
+ u[14] = _mm256_sub_epi32(zero, in[5 * num_cols + col]);
+ u[15] = in[10 * num_cols + col];
+
+ // stage 2
+ v[0] = u[0];
+ v[1] = u[1];
+
+ x = _mm256_mullo_epi32(u[2], cospi32);
+ y = _mm256_mullo_epi32(u[3], cospi32);
+ v[2] = _mm256_add_epi32(x, y);
+ v[2] = _mm256_add_epi32(v[2], rnding);
+ v[2] = _mm256_srai_epi32(v[2], bit);
+
+ v[3] = _mm256_sub_epi32(x, y);
+ v[3] = _mm256_add_epi32(v[3], rnding);
+ v[3] = _mm256_srai_epi32(v[3], bit);
+
+ v[4] = u[4];
+ v[5] = u[5];
+
+ x = _mm256_mullo_epi32(u[6], cospi32);
+ y = _mm256_mullo_epi32(u[7], cospi32);
+ v[6] = _mm256_add_epi32(x, y);
+ v[6] = _mm256_add_epi32(v[6], rnding);
+ v[6] = _mm256_srai_epi32(v[6], bit);
+
+ v[7] = _mm256_sub_epi32(x, y);
+ v[7] = _mm256_add_epi32(v[7], rnding);
+ v[7] = _mm256_srai_epi32(v[7], bit);
+
+ v[8] = u[8];
+ v[9] = u[9];
+
+ x = _mm256_mullo_epi32(u[10], cospi32);
+ y = _mm256_mullo_epi32(u[11], cospi32);
+ v[10] = _mm256_add_epi32(x, y);
+ v[10] = _mm256_add_epi32(v[10], rnding);
+ v[10] = _mm256_srai_epi32(v[10], bit);
+
+ v[11] = _mm256_sub_epi32(x, y);
+ v[11] = _mm256_add_epi32(v[11], rnding);
+ v[11] = _mm256_srai_epi32(v[11], bit);
+
+ v[12] = u[12];
+ v[13] = u[13];
+
+ x = _mm256_mullo_epi32(u[14], cospi32);
+ y = _mm256_mullo_epi32(u[15], cospi32);
+ v[14] = _mm256_add_epi32(x, y);
+ v[14] = _mm256_add_epi32(v[14], rnding);
+ v[14] = _mm256_srai_epi32(v[14], bit);
+
+ v[15] = _mm256_sub_epi32(x, y);
+ v[15] = _mm256_add_epi32(v[15], rnding);
+ v[15] = _mm256_srai_epi32(v[15], bit);
+
+ // stage 3
+ u[0] = _mm256_add_epi32(v[0], v[2]);
+ u[1] = _mm256_add_epi32(v[1], v[3]);
+ u[2] = _mm256_sub_epi32(v[0], v[2]);
+ u[3] = _mm256_sub_epi32(v[1], v[3]);
+ u[4] = _mm256_add_epi32(v[4], v[6]);
+ u[5] = _mm256_add_epi32(v[5], v[7]);
+ u[6] = _mm256_sub_epi32(v[4], v[6]);
+ u[7] = _mm256_sub_epi32(v[5], v[7]);
+ u[8] = _mm256_add_epi32(v[8], v[10]);
+ u[9] = _mm256_add_epi32(v[9], v[11]);
+ u[10] = _mm256_sub_epi32(v[8], v[10]);
+ u[11] = _mm256_sub_epi32(v[9], v[11]);
+ u[12] = _mm256_add_epi32(v[12], v[14]);
+ u[13] = _mm256_add_epi32(v[13], v[15]);
+ u[14] = _mm256_sub_epi32(v[12], v[14]);
+ u[15] = _mm256_sub_epi32(v[13], v[15]);
+
+ // stage 4
+ v[0] = u[0];
+ v[1] = u[1];
+ v[2] = u[2];
+ v[3] = u[3];
+ v[4] = av1_half_btf_avx2(&cospi16, &u[4], &cospi48, &u[5], &rnding, bit);
+ v[5] = av1_half_btf_avx2(&cospi48, &u[4], &cospim16, &u[5], &rnding, bit);
+ v[6] = av1_half_btf_avx2(&cospim48, &u[6], &cospi16, &u[7], &rnding, bit);
+ v[7] = av1_half_btf_avx2(&cospi16, &u[6], &cospi48, &u[7], &rnding, bit);
+ v[8] = u[8];
+ v[9] = u[9];
+ v[10] = u[10];
+ v[11] = u[11];
+ v[12] = av1_half_btf_avx2(&cospi16, &u[12], &cospi48, &u[13], &rnding, bit);
+ v[13] =
+ av1_half_btf_avx2(&cospi48, &u[12], &cospim16, &u[13], &rnding, bit);
+ v[14] =
+ av1_half_btf_avx2(&cospim48, &u[14], &cospi16, &u[15], &rnding, bit);
+ v[15] = av1_half_btf_avx2(&cospi16, &u[14], &cospi48, &u[15], &rnding, bit);
+
+ // stage 5
+ u[0] = _mm256_add_epi32(v[0], v[4]);
+ u[1] = _mm256_add_epi32(v[1], v[5]);
+ u[2] = _mm256_add_epi32(v[2], v[6]);
+ u[3] = _mm256_add_epi32(v[3], v[7]);
+ u[4] = _mm256_sub_epi32(v[0], v[4]);
+ u[5] = _mm256_sub_epi32(v[1], v[5]);
+ u[6] = _mm256_sub_epi32(v[2], v[6]);
+ u[7] = _mm256_sub_epi32(v[3], v[7]);
+ u[8] = _mm256_add_epi32(v[8], v[12]);
+ u[9] = _mm256_add_epi32(v[9], v[13]);
+ u[10] = _mm256_add_epi32(v[10], v[14]);
+ u[11] = _mm256_add_epi32(v[11], v[15]);
+ u[12] = _mm256_sub_epi32(v[8], v[12]);
+ u[13] = _mm256_sub_epi32(v[9], v[13]);
+ u[14] = _mm256_sub_epi32(v[10], v[14]);
+ u[15] = _mm256_sub_epi32(v[11], v[15]);
+
+ // stage 6
+ v[0] = u[0];
+ v[1] = u[1];
+ v[2] = u[2];
+ v[3] = u[3];
+ v[4] = u[4];
+ v[5] = u[5];
+ v[6] = u[6];
+ v[7] = u[7];
+ v[8] = av1_half_btf_avx2(&cospi8, &u[8], &cospi56, &u[9], &rnding, bit);
+ v[9] = av1_half_btf_avx2(&cospi56, &u[8], &cospim8, &u[9], &rnding, bit);
+ v[10] = av1_half_btf_avx2(&cospi40, &u[10], &cospi24, &u[11], &rnding, bit);
+ v[11] =
+ av1_half_btf_avx2(&cospi24, &u[10], &cospim40, &u[11], &rnding, bit);
+ v[12] = av1_half_btf_avx2(&cospim56, &u[12], &cospi8, &u[13], &rnding, bit);
+ v[13] = av1_half_btf_avx2(&cospi8, &u[12], &cospi56, &u[13], &rnding, bit);
+ v[14] =
+ av1_half_btf_avx2(&cospim24, &u[14], &cospi40, &u[15], &rnding, bit);
+ v[15] = av1_half_btf_avx2(&cospi40, &u[14], &cospi24, &u[15], &rnding, bit);
+
+ // stage 7
+ u[0] = _mm256_add_epi32(v[0], v[8]);
+ u[1] = _mm256_add_epi32(v[1], v[9]);
+ u[2] = _mm256_add_epi32(v[2], v[10]);
+ u[3] = _mm256_add_epi32(v[3], v[11]);
+ u[4] = _mm256_add_epi32(v[4], v[12]);
+ u[5] = _mm256_add_epi32(v[5], v[13]);
+ u[6] = _mm256_add_epi32(v[6], v[14]);
+ u[7] = _mm256_add_epi32(v[7], v[15]);
+ u[8] = _mm256_sub_epi32(v[0], v[8]);
+ u[9] = _mm256_sub_epi32(v[1], v[9]);
+ u[10] = _mm256_sub_epi32(v[2], v[10]);
+ u[11] = _mm256_sub_epi32(v[3], v[11]);
+ u[12] = _mm256_sub_epi32(v[4], v[12]);
+ u[13] = _mm256_sub_epi32(v[5], v[13]);
+ u[14] = _mm256_sub_epi32(v[6], v[14]);
+ u[15] = _mm256_sub_epi32(v[7], v[15]);
+
+ // stage 8
+ v[0] = av1_half_btf_avx2(&cospi2, &u[0], &cospi62, &u[1], &rnding, bit);
+ v[1] = av1_half_btf_avx2(&cospi62, &u[0], &cospim2, &u[1], &rnding, bit);
+ v[2] = av1_half_btf_avx2(&cospi10, &u[2], &cospi54, &u[3], &rnding, bit);
+ v[3] = av1_half_btf_avx2(&cospi54, &u[2], &cospim10, &u[3], &rnding, bit);
+ v[4] = av1_half_btf_avx2(&cospi18, &u[4], &cospi46, &u[5], &rnding, bit);
+ v[5] = av1_half_btf_avx2(&cospi46, &u[4], &cospim18, &u[5], &rnding, bit);
+ v[6] = av1_half_btf_avx2(&cospi26, &u[6], &cospi38, &u[7], &rnding, bit);
+ v[7] = av1_half_btf_avx2(&cospi38, &u[6], &cospim26, &u[7], &rnding, bit);
+ v[8] = av1_half_btf_avx2(&cospi34, &u[8], &cospi30, &u[9], &rnding, bit);
+ v[9] = av1_half_btf_avx2(&cospi30, &u[8], &cospim34, &u[9], &rnding, bit);
+ v[10] = av1_half_btf_avx2(&cospi42, &u[10], &cospi22, &u[11], &rnding, bit);
+ v[11] =
+ av1_half_btf_avx2(&cospi22, &u[10], &cospim42, &u[11], &rnding, bit);
+ v[12] = av1_half_btf_avx2(&cospi50, &u[12], &cospi14, &u[13], &rnding, bit);
+ v[13] =
+ av1_half_btf_avx2(&cospi14, &u[12], &cospim50, &u[13], &rnding, bit);
+ v[14] = av1_half_btf_avx2(&cospi58, &u[14], &cospi6, &u[15], &rnding, bit);
+ v[15] = av1_half_btf_avx2(&cospi6, &u[14], &cospim58, &u[15], &rnding, bit);
+
+ // stage 9
+ out[0 * outstride + col] = v[1];
+ out[1 * outstride + col] = v[14];
+ out[2 * outstride + col] = v[3];
+ out[3 * outstride + col] = v[12];
+ out[4 * outstride + col] = v[5];
+ out[5 * outstride + col] = v[10];
+ out[6 * outstride + col] = v[7];
+ out[7 * outstride + col] = v[8];
+ out[8 * outstride + col] = v[9];
+ out[9 * outstride + col] = v[6];
+ out[10 * outstride + col] = v[11];
+ out[11 * outstride + col] = v[4];
+ out[12 * outstride + col] = v[13];
+ out[13 * outstride + col] = v[2];
+ out[14 * outstride + col] = v[15];
+ out[15 * outstride + col] = v[0];
+ }
+}
+static void idtx16_avx2(__m256i *in, __m256i *out, const int8_t bit,
+ int col_num, const int outstride) {
+ (void)bit;
+ (void)outstride;
+ __m256i fact = _mm256_set1_epi32(2 * NewSqrt2);
+ __m256i offset = _mm256_set1_epi32(1 << (NewSqrt2Bits - 1));
+ __m256i a_low;
+
+ int num_iters = 16 * col_num;
+ for (int i = 0; i < num_iters; i++) {
+ a_low = _mm256_mullo_epi32(in[i], fact);
+ a_low = _mm256_add_epi32(a_low, offset);
+ out[i] = _mm256_srai_epi32(a_low, NewSqrt2Bits);
+ }
+}
+static const transform_1d_avx2 col_highbd_txfm8x16_arr[TX_TYPES] = {
+ fdct16_avx2, // DCT_DCT
+ fadst16_avx2, // ADST_DCT
+ fdct16_avx2, // DCT_ADST
+ fadst16_avx2, // ADST_ADST
+ fadst16_avx2, // FLIPADST_DCT
+ fdct16_avx2, // DCT_FLIPADST
+ fadst16_avx2, // FLIPADST_FLIPADST
+ fadst16_avx2, // ADST_FLIPADST
+ fadst16_avx2, // FLIPADST_ADST
+ idtx16_avx2, // IDTX
+ fdct16_avx2, // V_DCT
+ idtx16_avx2, // H_DCT
+ fadst16_avx2, // V_ADST
+ idtx16_avx2, // H_ADST
+ fadst16_avx2, // V_FLIPADST
+ idtx16_avx2 // H_FLIPADST
+};
+static const transform_1d_avx2 row_highbd_txfm8x8_arr[TX_TYPES] = {
+ fdct8_avx2, // DCT_DCT
+ fdct8_avx2, // ADST_DCT
+ fadst8_avx2, // DCT_ADST
+ fadst8_avx2, // ADST_ADST
+ fdct8_avx2, // FLIPADST_DCT
+ fadst8_avx2, // DCT_FLIPADST
+ fadst8_avx2, // FLIPADST_FLIPADST
+ fadst8_avx2, // ADST_FLIPADST
+ fadst8_avx2, // FLIPADST_ADST
+ idtx8_avx2, // IDTX
+ idtx8_avx2, // V_DCT
+ fdct8_avx2, // H_DCT
+ idtx8_avx2, // V_ADST
+ fadst8_avx2, // H_ADST
+ idtx8_avx2, // V_FLIPADST
+ fadst8_avx2 // H_FLIPADST
+};
+void av1_fwd_txfm2d_8x16_avx2(const int16_t *input, int32_t *coeff, int stride,
+ TX_TYPE tx_type, int bd) {
+ __m256i in[16], out[16];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X16];
+ const int txw_idx = get_txw_idx(TX_8X16);
+ const int txh_idx = get_txh_idx(TX_8X16);
+ const transform_1d_avx2 col_txfm = col_highbd_txfm8x16_arr[tx_type];
+ const transform_1d_avx2 row_txfm = row_highbd_txfm8x8_arr[tx_type];
+ const int8_t bit = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ load_buffer_8x16_avx2(input, in, stride, ud_flip, lr_flip, shift[0]);
+ col_txfm(in, out, bit, 1, 1);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ col_txfm_8x8_rounding(&out[8], -shift[1]);
+ fwd_txfm_transpose_8x8_avx2(out, in, 1, 2);
+ fwd_txfm_transpose_8x8_avx2(&out[8], &in[1], 1, 2);
+ row_txfm(in, out, bit, 2, 2);
+ round_shift_rect_array_32_avx2(out, in, 16, -shift[2], NewSqrt2);
+ store_buffer_avx2(in, coeff, 8, 16);
+ (void)bd;
+}
+static const transform_1d_avx2 col_highbd_txfm8x8_arr[TX_TYPES] = {
+ fdct8_avx2, // DCT_DCT
+ fadst8_avx2, // ADST_DCT
+ fdct8_avx2, // DCT_ADST
+ fadst8_avx2, // ADST_ADST
+ fadst8_avx2, // FLIPADST_DCT
+ fdct8_avx2, // DCT_FLIPADST
+ fadst8_avx2, // FLIPADST_FLIPADST
+ fadst8_avx2, // ADST_FLIPADST
+ fadst8_avx2, // FLIPADST_ADST
+ idtx8_avx2, // IDTX
+ fdct8_avx2, // V_DCT
+ idtx8_avx2, // H_DCT
+ fadst8_avx2, // V_ADST
+ idtx8_avx2, // H_ADST
+ fadst8_avx2, // V_FLIPADST
+ idtx8_avx2 // H_FLIPADST
+};
+static const transform_1d_avx2 row_highbd_txfm8x16_arr[TX_TYPES] = {
+ fdct16_avx2, // DCT_DCT
+ fdct16_avx2, // ADST_DCT
+ fadst16_avx2, // DCT_ADST
+ fadst16_avx2, // ADST_ADST
+ fdct16_avx2, // FLIPADST_DCT
+ fadst16_avx2, // DCT_FLIPADST
+ fadst16_avx2, // FLIPADST_FLIPADST
+ fadst16_avx2, // ADST_FLIPADST
+ fadst16_avx2, // FLIPADST_ADST
+ idtx16_avx2, // IDTX
+ idtx16_avx2, // V_DCT
+ fdct16_avx2, // H_DCT
+ idtx16_avx2, // V_ADST
+ fadst16_avx2, // H_ADST
+ idtx16_avx2, // V_FLIPADST
+ fadst16_avx2 // H_FLIPADST
+};
+void av1_fwd_txfm2d_16x8_avx2(const int16_t *input, int32_t *coeff, int stride,
+ TX_TYPE tx_type, int bd) {
+ __m256i in[16], out[16];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X8];
+ const int txw_idx = get_txw_idx(TX_16X8);
+ const int txh_idx = get_txh_idx(TX_16X8);
+ const transform_1d_avx2 col_txfm = col_highbd_txfm8x8_arr[tx_type];
+ const transform_1d_avx2 row_txfm = row_highbd_txfm8x16_arr[tx_type];
+ const int8_t bit = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ load_buffer_16xn_avx2(input, in, stride, 8, 2, ud_flip, lr_flip);
+ round_shift_32_8xn_avx2(in, 16, shift[0], 1);
+ col_txfm(in, out, bit, 2, 2);
+ round_shift_32_8xn_avx2(out, 16, shift[1], 1);
+ fwd_txfm_transpose_8x8_avx2(out, in, 2, 1);
+ fwd_txfm_transpose_8x8_avx2(&out[1], &in[8], 2, 1);
+ row_txfm(in, out, bit, 1, 1);
+ round_shift_rect_array_32_avx2(out, out, 16, -shift[2], NewSqrt2);
+ store_buffer_avx2(out, coeff, 8, 16);
+ (void)bd;
+}
+void av1_fwd_txfm2d_16x16_avx2(const int16_t *input, int32_t *coeff, int stride,
+ TX_TYPE tx_type, int bd) {
+ __m256i in[32], out[32];
+ const TX_SIZE tx_size = TX_16X16;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const int width_div8 = (width >> 3);
+ const int width_div16 = (width >> 4);
+ const int size = (height << 1);
+ switch (tx_type) {
+ case DCT_DCT:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fdct16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fdct16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case ADST_DCT:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fdct16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case DCT_ADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fdct16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case ADST_ADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case FLIPADST_DCT:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 1, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fdct16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case DCT_FLIPADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 1);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fdct16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case FLIPADST_FLIPADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 1, 1);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case ADST_FLIPADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 1);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case FLIPADST_ADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 1, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case IDTX:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ idtx16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ idtx16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case V_DCT:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fdct16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ idtx16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case H_DCT:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ idtx16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fdct16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case V_ADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ idtx16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case H_ADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ idtx16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case V_FLIPADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 1, 0);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ idtx16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ case H_FLIPADST:
+ load_buffer_16xn_avx2(input, in, stride, height, width_div8, 0, 1);
+ round_shift_32_8xn_avx2(in, size, shift[0], width_div16);
+ idtx16_avx2(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(out, size, shift[1], width_div16);
+ fwd_txfm_transpose_16x16_avx2(out, in);
+ fadst16_avx2(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], width_div8,
+ width_div8);
+ store_buffer_avx2(out, coeff, 8, 32);
+ break;
+ default: assert(0);
+ }
+ (void)bd;
+}
+static INLINE void fdct32_avx2(__m256i *input, __m256i *output,
+ const int8_t cos_bit, const int instride,
+ const int outstride) {
+ __m256i buf0[32];
+ __m256i buf1[32];
+ const int32_t *cospi;
+ int startidx = 0 * instride;
+ int endidx = 31 * instride;
+ // stage 0
+ // stage 1
+ buf1[0] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[31] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[1] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[30] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[2] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[29] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[3] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[28] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[4] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[27] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[5] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[26] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[6] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[25] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[7] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[24] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[8] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[23] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[9] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[22] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[10] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[21] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[11] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[20] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[12] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[19] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[13] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[18] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[14] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[17] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ buf1[15] = _mm256_add_epi32(input[startidx], input[endidx]);
+ buf1[16] = _mm256_sub_epi32(input[startidx], input[endidx]);
+
+ // stage 2
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = _mm256_add_epi32(buf1[0], buf1[15]);
+ buf0[15] = _mm256_sub_epi32(buf1[0], buf1[15]);
+ buf0[1] = _mm256_add_epi32(buf1[1], buf1[14]);
+ buf0[14] = _mm256_sub_epi32(buf1[1], buf1[14]);
+ buf0[2] = _mm256_add_epi32(buf1[2], buf1[13]);
+ buf0[13] = _mm256_sub_epi32(buf1[2], buf1[13]);
+ buf0[3] = _mm256_add_epi32(buf1[3], buf1[12]);
+ buf0[12] = _mm256_sub_epi32(buf1[3], buf1[12]);
+ buf0[4] = _mm256_add_epi32(buf1[4], buf1[11]);
+ buf0[11] = _mm256_sub_epi32(buf1[4], buf1[11]);
+ buf0[5] = _mm256_add_epi32(buf1[5], buf1[10]);
+ buf0[10] = _mm256_sub_epi32(buf1[5], buf1[10]);
+ buf0[6] = _mm256_add_epi32(buf1[6], buf1[9]);
+ buf0[9] = _mm256_sub_epi32(buf1[6], buf1[9]);
+ buf0[7] = _mm256_add_epi32(buf1[7], buf1[8]);
+ buf0[8] = _mm256_sub_epi32(buf1[7], buf1[8]);
+ buf0[16] = buf1[16];
+ buf0[17] = buf1[17];
+ buf0[18] = buf1[18];
+ buf0[19] = buf1[19];
+ btf_32_avx2_type0(-cospi[32], cospi[32], buf1[20], buf1[27], buf0[20],
+ buf0[27], cos_bit);
+ btf_32_avx2_type0(-cospi[32], cospi[32], buf1[21], buf1[26], buf0[21],
+ buf0[26], cos_bit);
+ btf_32_avx2_type0(-cospi[32], cospi[32], buf1[22], buf1[25], buf0[22],
+ buf0[25], cos_bit);
+ btf_32_avx2_type0(-cospi[32], cospi[32], buf1[23], buf1[24], buf0[23],
+ buf0[24], cos_bit);
+ buf0[28] = buf1[28];
+ buf0[29] = buf1[29];
+ buf0[30] = buf1[30];
+ buf0[31] = buf1[31];
+
+ // stage 3
+ cospi = cospi_arr(cos_bit);
+ buf1[0] = _mm256_add_epi32(buf0[0], buf0[7]);
+ buf1[7] = _mm256_sub_epi32(buf0[0], buf0[7]);
+ buf1[1] = _mm256_add_epi32(buf0[1], buf0[6]);
+ buf1[6] = _mm256_sub_epi32(buf0[1], buf0[6]);
+ buf1[2] = _mm256_add_epi32(buf0[2], buf0[5]);
+ buf1[5] = _mm256_sub_epi32(buf0[2], buf0[5]);
+ buf1[3] = _mm256_add_epi32(buf0[3], buf0[4]);
+ buf1[4] = _mm256_sub_epi32(buf0[3], buf0[4]);
+ buf1[8] = buf0[8];
+ buf1[9] = buf0[9];
+ btf_32_avx2_type0(-cospi[32], cospi[32], buf0[10], buf0[13], buf1[10],
+ buf1[13], cos_bit);
+ btf_32_avx2_type0(-cospi[32], cospi[32], buf0[11], buf0[12], buf1[11],
+ buf1[12], cos_bit);
+ buf1[14] = buf0[14];
+ buf1[15] = buf0[15];
+ buf1[16] = _mm256_add_epi32(buf0[16], buf0[23]);
+ buf1[23] = _mm256_sub_epi32(buf0[16], buf0[23]);
+ buf1[17] = _mm256_add_epi32(buf0[17], buf0[22]);
+ buf1[22] = _mm256_sub_epi32(buf0[17], buf0[22]);
+ buf1[18] = _mm256_add_epi32(buf0[18], buf0[21]);
+ buf1[21] = _mm256_sub_epi32(buf0[18], buf0[21]);
+ buf1[19] = _mm256_add_epi32(buf0[19], buf0[20]);
+ buf1[20] = _mm256_sub_epi32(buf0[19], buf0[20]);
+ buf1[24] = _mm256_sub_epi32(buf0[31], buf0[24]);
+ buf1[31] = _mm256_add_epi32(buf0[31], buf0[24]);
+ buf1[25] = _mm256_sub_epi32(buf0[30], buf0[25]);
+ buf1[30] = _mm256_add_epi32(buf0[30], buf0[25]);
+ buf1[26] = _mm256_sub_epi32(buf0[29], buf0[26]);
+ buf1[29] = _mm256_add_epi32(buf0[29], buf0[26]);
+ buf1[27] = _mm256_sub_epi32(buf0[28], buf0[27]);
+ buf1[28] = _mm256_add_epi32(buf0[28], buf0[27]);
+
+ // stage 4
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = _mm256_add_epi32(buf1[0], buf1[3]);
+ buf0[3] = _mm256_sub_epi32(buf1[0], buf1[3]);
+ buf0[1] = _mm256_add_epi32(buf1[1], buf1[2]);
+ buf0[2] = _mm256_sub_epi32(buf1[1], buf1[2]);
+ buf0[4] = buf1[4];
+ btf_32_avx2_type0(-cospi[32], cospi[32], buf1[5], buf1[6], buf0[5], buf0[6],
+ cos_bit);
+ buf0[7] = buf1[7];
+ buf0[8] = _mm256_add_epi32(buf1[8], buf1[11]);
+ buf0[11] = _mm256_sub_epi32(buf1[8], buf1[11]);
+ buf0[9] = _mm256_add_epi32(buf1[9], buf1[10]);
+ buf0[10] = _mm256_sub_epi32(buf1[9], buf1[10]);
+ buf0[12] = _mm256_sub_epi32(buf1[15], buf1[12]);
+ buf0[15] = _mm256_add_epi32(buf1[15], buf1[12]);
+ buf0[13] = _mm256_sub_epi32(buf1[14], buf1[13]);
+ buf0[14] = _mm256_add_epi32(buf1[14], buf1[13]);
+ buf0[16] = buf1[16];
+ buf0[17] = buf1[17];
+ btf_32_avx2_type0(-cospi[16], cospi[48], buf1[18], buf1[29], buf0[18],
+ buf0[29], cos_bit);
+ btf_32_avx2_type0(-cospi[16], cospi[48], buf1[19], buf1[28], buf0[19],
+ buf0[28], cos_bit);
+ btf_32_avx2_type0(-cospi[48], -cospi[16], buf1[20], buf1[27], buf0[20],
+ buf0[27], cos_bit);
+ btf_32_avx2_type0(-cospi[48], -cospi[16], buf1[21], buf1[26], buf0[21],
+ buf0[26], cos_bit);
+ buf0[22] = buf1[22];
+ buf0[23] = buf1[23];
+ buf0[24] = buf1[24];
+ buf0[25] = buf1[25];
+ buf0[30] = buf1[30];
+ buf0[31] = buf1[31];
+
+ // stage 5
+ cospi = cospi_arr(cos_bit);
+ btf_32_avx2_type0(cospi[32], cospi[32], buf0[0], buf0[1], buf1[0], buf1[1],
+ cos_bit);
+ btf_32_avx2_type0(cospi[16], cospi[48], buf0[3], buf0[2], buf1[2], buf1[3],
+ cos_bit);
+ buf1[4] = _mm256_add_epi32(buf0[4], buf0[5]);
+ buf1[5] = _mm256_sub_epi32(buf0[4], buf0[5]);
+ buf1[6] = _mm256_sub_epi32(buf0[7], buf0[6]);
+ buf1[7] = _mm256_add_epi32(buf0[7], buf0[6]);
+ buf1[8] = buf0[8];
+ btf_32_avx2_type0(-cospi[16], cospi[48], buf0[9], buf0[14], buf1[9], buf1[14],
+ cos_bit);
+ btf_32_avx2_type0(-cospi[48], -cospi[16], buf0[10], buf0[13], buf1[10],
+ buf1[13], cos_bit);
+ buf1[11] = buf0[11];
+ buf1[12] = buf0[12];
+ buf1[15] = buf0[15];
+ buf1[16] = _mm256_add_epi32(buf0[16], buf0[19]);
+ buf1[19] = _mm256_sub_epi32(buf0[16], buf0[19]);
+ buf1[17] = _mm256_add_epi32(buf0[17], buf0[18]);
+ buf1[18] = _mm256_sub_epi32(buf0[17], buf0[18]);
+ buf1[20] = _mm256_sub_epi32(buf0[23], buf0[20]);
+ buf1[23] = _mm256_add_epi32(buf0[23], buf0[20]);
+ buf1[21] = _mm256_sub_epi32(buf0[22], buf0[21]);
+ buf1[22] = _mm256_add_epi32(buf0[22], buf0[21]);
+ buf1[24] = _mm256_add_epi32(buf0[24], buf0[27]);
+ buf1[27] = _mm256_sub_epi32(buf0[24], buf0[27]);
+ buf1[25] = _mm256_add_epi32(buf0[25], buf0[26]);
+ buf1[26] = _mm256_sub_epi32(buf0[25], buf0[26]);
+ buf1[28] = _mm256_sub_epi32(buf0[31], buf0[28]);
+ buf1[31] = _mm256_add_epi32(buf0[31], buf0[28]);
+ buf1[29] = _mm256_sub_epi32(buf0[30], buf0[29]);
+ buf1[30] = _mm256_add_epi32(buf0[30], buf0[29]);
+
+ // stage 6
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = buf1[0];
+ buf0[1] = buf1[1];
+ buf0[2] = buf1[2];
+ buf0[3] = buf1[3];
+ btf_32_avx2_type0(cospi[8], cospi[56], buf1[7], buf1[4], buf0[4], buf0[7],
+ cos_bit);
+ btf_32_avx2_type0(cospi[40], cospi[24], buf1[6], buf1[5], buf0[5], buf0[6],
+ cos_bit);
+ buf0[8] = _mm256_add_epi32(buf1[8], buf1[9]);
+ buf0[9] = _mm256_sub_epi32(buf1[8], buf1[9]);
+ buf0[10] = _mm256_sub_epi32(buf1[11], buf1[10]);
+ buf0[11] = _mm256_add_epi32(buf1[11], buf1[10]);
+ buf0[12] = _mm256_add_epi32(buf1[12], buf1[13]);
+ buf0[13] = _mm256_sub_epi32(buf1[12], buf1[13]);
+ buf0[14] = _mm256_sub_epi32(buf1[15], buf1[14]);
+ buf0[15] = _mm256_add_epi32(buf1[15], buf1[14]);
+ buf0[16] = buf1[16];
+ btf_32_avx2_type0(-cospi[8], cospi[56], buf1[17], buf1[30], buf0[17],
+ buf0[30], cos_bit);
+ btf_32_avx2_type0(-cospi[56], -cospi[8], buf1[18], buf1[29], buf0[18],
+ buf0[29], cos_bit);
+ buf0[19] = buf1[19];
+ buf0[20] = buf1[20];
+ btf_32_avx2_type0(-cospi[40], cospi[24], buf1[21], buf1[26], buf0[21],
+ buf0[26], cos_bit);
+ btf_32_avx2_type0(-cospi[24], -cospi[40], buf1[22], buf1[25], buf0[22],
+ buf0[25], cos_bit);
+ buf0[23] = buf1[23];
+ buf0[24] = buf1[24];
+ buf0[27] = buf1[27];
+ buf0[28] = buf1[28];
+ buf0[31] = buf1[31];
+
+ // stage 7
+ cospi = cospi_arr(cos_bit);
+ buf1[0] = buf0[0];
+ buf1[1] = buf0[1];
+ buf1[2] = buf0[2];
+ buf1[3] = buf0[3];
+ buf1[4] = buf0[4];
+ buf1[5] = buf0[5];
+ buf1[6] = buf0[6];
+ buf1[7] = buf0[7];
+ btf_32_avx2_type0(cospi[4], cospi[60], buf0[15], buf0[8], buf1[8], buf1[15],
+ cos_bit);
+ btf_32_avx2_type0(cospi[36], cospi[28], buf0[14], buf0[9], buf1[9], buf1[14],
+ cos_bit);
+ btf_32_avx2_type0(cospi[20], cospi[44], buf0[13], buf0[10], buf1[10],
+ buf1[13], cos_bit);
+ btf_32_avx2_type0(cospi[52], cospi[12], buf0[12], buf0[11], buf1[11],
+ buf1[12], cos_bit);
+ buf1[16] = _mm256_add_epi32(buf0[16], buf0[17]);
+ buf1[17] = _mm256_sub_epi32(buf0[16], buf0[17]);
+ buf1[18] = _mm256_sub_epi32(buf0[19], buf0[18]);
+ buf1[19] = _mm256_add_epi32(buf0[19], buf0[18]);
+ buf1[20] = _mm256_add_epi32(buf0[20], buf0[21]);
+ buf1[21] = _mm256_sub_epi32(buf0[20], buf0[21]);
+ buf1[22] = _mm256_sub_epi32(buf0[23], buf0[22]);
+ buf1[23] = _mm256_add_epi32(buf0[23], buf0[22]);
+ buf1[24] = _mm256_add_epi32(buf0[24], buf0[25]);
+ buf1[25] = _mm256_sub_epi32(buf0[24], buf0[25]);
+ buf1[26] = _mm256_sub_epi32(buf0[27], buf0[26]);
+ buf1[27] = _mm256_add_epi32(buf0[27], buf0[26]);
+ buf1[28] = _mm256_add_epi32(buf0[28], buf0[29]);
+ buf1[29] = _mm256_sub_epi32(buf0[28], buf0[29]);
+ buf1[30] = _mm256_sub_epi32(buf0[31], buf0[30]);
+ buf1[31] = _mm256_add_epi32(buf0[31], buf0[30]);
+
+ // stage 8
+ cospi = cospi_arr(cos_bit);
+ buf0[0] = buf1[0];
+ buf0[1] = buf1[1];
+ buf0[2] = buf1[2];
+ buf0[3] = buf1[3];
+ buf0[4] = buf1[4];
+ buf0[5] = buf1[5];
+ buf0[6] = buf1[6];
+ buf0[7] = buf1[7];
+ buf0[8] = buf1[8];
+ buf0[9] = buf1[9];
+ buf0[10] = buf1[10];
+ buf0[11] = buf1[11];
+ buf0[12] = buf1[12];
+ buf0[13] = buf1[13];
+ buf0[14] = buf1[14];
+ buf0[15] = buf1[15];
+ btf_32_avx2_type0(cospi[2], cospi[62], buf1[31], buf1[16], buf0[16], buf0[31],
+ cos_bit);
+ btf_32_avx2_type0(cospi[34], cospi[30], buf1[30], buf1[17], buf0[17],
+ buf0[30], cos_bit);
+ btf_32_avx2_type0(cospi[18], cospi[46], buf1[29], buf1[18], buf0[18],
+ buf0[29], cos_bit);
+ btf_32_avx2_type0(cospi[50], cospi[14], buf1[28], buf1[19], buf0[19],
+ buf0[28], cos_bit);
+ btf_32_avx2_type0(cospi[10], cospi[54], buf1[27], buf1[20], buf0[20],
+ buf0[27], cos_bit);
+ btf_32_avx2_type0(cospi[42], cospi[22], buf1[26], buf1[21], buf0[21],
+ buf0[26], cos_bit);
+ btf_32_avx2_type0(cospi[26], cospi[38], buf1[25], buf1[22], buf0[22],
+ buf0[25], cos_bit);
+ btf_32_avx2_type0(cospi[58], cospi[6], buf1[24], buf1[23], buf0[23], buf0[24],
+ cos_bit);
+
+ startidx = 0 * outstride;
+ endidx = 31 * outstride;
+ // stage 9
+ output[startidx] = buf0[0];
+ output[endidx] = buf0[31];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[16];
+ output[endidx] = buf0[15];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[8];
+ output[endidx] = buf0[23];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[24];
+ output[endidx] = buf0[7];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[4];
+ output[endidx] = buf0[27];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[20];
+ output[endidx] = buf0[11];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[12];
+ output[endidx] = buf0[19];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[28];
+ output[endidx] = buf0[3];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[2];
+ output[endidx] = buf0[29];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[18];
+ output[endidx] = buf0[13];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[10];
+ output[endidx] = buf0[21];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[26];
+ output[endidx] = buf0[5];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[6];
+ output[endidx] = buf0[25];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[22];
+ output[endidx] = buf0[9];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[14];
+ output[endidx] = buf0[17];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = buf0[30];
+ output[endidx] = buf0[1];
+}
+static INLINE void idtx32x32_avx2(__m256i *input, __m256i *output,
+ const int8_t cos_bit, int instride,
+ int outstride) {
+ (void)cos_bit;
+ for (int i = 0; i < 32; i += 8) {
+ output[i * outstride] = _mm256_slli_epi32(input[i * instride], 2);
+ output[(i + 1) * outstride] =
+ _mm256_slli_epi32(input[(i + 1) * instride], 2);
+ output[(i + 2) * outstride] =
+ _mm256_slli_epi32(input[(i + 2) * instride], 2);
+ output[(i + 3) * outstride] =
+ _mm256_slli_epi32(input[(i + 3) * instride], 2);
+ output[(i + 4) * outstride] =
+ _mm256_slli_epi32(input[(i + 4) * instride], 2);
+ output[(i + 5) * outstride] =
+ _mm256_slli_epi32(input[(i + 5) * instride], 2);
+ output[(i + 6) * outstride] =
+ _mm256_slli_epi32(input[(i + 6) * instride], 2);
+ output[(i + 7) * outstride] =
+ _mm256_slli_epi32(input[(i + 7) * instride], 2);
+ }
+}
+static const transform_1d_avx2 col_txfm8x32_arr[TX_TYPES] = {
+ fdct32_avx2, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST_ADST
+ idtx32x32_avx2, // IDTX
+ NULL, // V_DCT
+ NULL, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL // H_FLIPADST
+};
+static const transform_1d_avx2 row_txfm8x32_arr[TX_TYPES] = {
+ fdct32_avx2, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST_ADST
+ idtx32x32_avx2, // IDTX
+ NULL, // V_DCT
+ NULL, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL // H_FLIPADST
+};
+void av1_fwd_txfm2d_32x32_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ __m256i buf0[128], buf1[128];
+ const int tx_size = TX_32X32;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = col_txfm8x32_arr[tx_type];
+ const transform_1d_avx2 row_txfm = row_txfm8x32_arr[tx_type];
+ int r, c;
+ const int width_div16 = (width >> 4);
+ const int width_div8 = (width >> 3);
+
+ for (int i = 0; i < width_div16; i++) {
+ load_buffer_16xn_avx2(input + (i << 4), &buf0[(i << 1)], stride, height,
+ width_div8, 0, 0);
+ round_shift_32_8xn_avx2(&buf0[(i << 1)], height, shift[0], width_div8);
+ round_shift_32_8xn_avx2(&buf0[(i << 1) + 1], height, shift[0], width_div8);
+ col_txfm(&buf0[(i << 1)], &buf0[(i << 1)], cos_bit_col, width_div8,
+ width_div8);
+ col_txfm(&buf0[(i << 1) + 1], &buf0[(i << 1) + 1], cos_bit_col, width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(&buf0[(i << 1)], height, shift[1], width_div8);
+ round_shift_32_8xn_avx2(&buf0[(i << 1) + 1], height, shift[1], width_div8);
+ }
+
+ for (r = 0; r < height; r += 8) {
+ for (c = 0; c < width_div8; c++) {
+ fwd_txfm_transpose_8x8_avx2(&buf0[r * width_div8 + c],
+ &buf1[c * 8 * width_div8 + (r >> 3)],
+ width_div8, width_div8);
+ }
+ }
+
+ for (int i = 0; i < width_div16; i++) {
+ row_txfm(&buf1[(i << 1)], &buf1[(i << 1)], cos_bit_row, width_div8,
+ width_div8);
+ row_txfm(&buf1[(i << 1) + 1], &buf1[(i << 1) + 1], cos_bit_row, width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(&buf1[(i << 1)], height, shift[2], width_div8);
+ round_shift_32_8xn_avx2(&buf1[(i << 1) + 1], height, shift[2], width_div8);
+ }
+
+ store_buffer_avx2(buf1, output, 8, 128);
+}
+static INLINE void fdct64_stage2_avx2(__m256i *x1, __m256i *x2,
+ __m256i *cospi_m32, __m256i *cospi_p32,
+ const __m256i *__rounding,
+ int8_t cos_bit) {
+ x2[0] = _mm256_add_epi32(x1[0], x1[31]);
+ x2[31] = _mm256_sub_epi32(x1[0], x1[31]);
+ x2[1] = _mm256_add_epi32(x1[1], x1[30]);
+ x2[30] = _mm256_sub_epi32(x1[1], x1[30]);
+ x2[2] = _mm256_add_epi32(x1[2], x1[29]);
+ x2[29] = _mm256_sub_epi32(x1[2], x1[29]);
+ x2[3] = _mm256_add_epi32(x1[3], x1[28]);
+ x2[28] = _mm256_sub_epi32(x1[3], x1[28]);
+ x2[4] = _mm256_add_epi32(x1[4], x1[27]);
+ x2[27] = _mm256_sub_epi32(x1[4], x1[27]);
+ x2[5] = _mm256_add_epi32(x1[5], x1[26]);
+ x2[26] = _mm256_sub_epi32(x1[5], x1[26]);
+ x2[6] = _mm256_add_epi32(x1[6], x1[25]);
+ x2[25] = _mm256_sub_epi32(x1[6], x1[25]);
+ x2[7] = _mm256_add_epi32(x1[7], x1[24]);
+ x2[24] = _mm256_sub_epi32(x1[7], x1[24]);
+ x2[8] = _mm256_add_epi32(x1[8], x1[23]);
+ x2[23] = _mm256_sub_epi32(x1[8], x1[23]);
+ x2[9] = _mm256_add_epi32(x1[9], x1[22]);
+ x2[22] = _mm256_sub_epi32(x1[9], x1[22]);
+ x2[10] = _mm256_add_epi32(x1[10], x1[21]);
+ x2[21] = _mm256_sub_epi32(x1[10], x1[21]);
+ x2[11] = _mm256_add_epi32(x1[11], x1[20]);
+ x2[20] = _mm256_sub_epi32(x1[11], x1[20]);
+ x2[12] = _mm256_add_epi32(x1[12], x1[19]);
+ x2[19] = _mm256_sub_epi32(x1[12], x1[19]);
+ x2[13] = _mm256_add_epi32(x1[13], x1[18]);
+ x2[18] = _mm256_sub_epi32(x1[13], x1[18]);
+ x2[14] = _mm256_add_epi32(x1[14], x1[17]);
+ x2[17] = _mm256_sub_epi32(x1[14], x1[17]);
+ x2[15] = _mm256_add_epi32(x1[15], x1[16]);
+ x2[16] = _mm256_sub_epi32(x1[15], x1[16]);
+ x2[32] = x1[32];
+ x2[33] = x1[33];
+ x2[34] = x1[34];
+ x2[35] = x1[35];
+ x2[36] = x1[36];
+ x2[37] = x1[37];
+ x2[38] = x1[38];
+ x2[39] = x1[39];
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[40], x1[55], x2[40], x2[55],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[41], x1[54], x2[41], x2[54],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[42], x1[53], x2[42], x2[53],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[43], x1[52], x2[43], x2[52],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[44], x1[51], x2[44], x2[51],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[45], x1[50], x2[45], x2[50],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[46], x1[49], x2[46], x2[49],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x1[47], x1[48], x2[47], x2[48],
+ *__rounding, cos_bit);
+ x2[56] = x1[56];
+ x2[57] = x1[57];
+ x2[58] = x1[58];
+ x2[59] = x1[59];
+ x2[60] = x1[60];
+ x2[61] = x1[61];
+ x2[62] = x1[62];
+ x2[63] = x1[63];
+}
+static INLINE void fdct64_stage3_avx2(__m256i *x2, __m256i *x3,
+ __m256i *cospi_m32, __m256i *cospi_p32,
+ const __m256i *__rounding,
+ int8_t cos_bit) {
+ x3[0] = _mm256_add_epi32(x2[0], x2[15]);
+ x3[15] = _mm256_sub_epi32(x2[0], x2[15]);
+ x3[1] = _mm256_add_epi32(x2[1], x2[14]);
+ x3[14] = _mm256_sub_epi32(x2[1], x2[14]);
+ x3[2] = _mm256_add_epi32(x2[2], x2[13]);
+ x3[13] = _mm256_sub_epi32(x2[2], x2[13]);
+ x3[3] = _mm256_add_epi32(x2[3], x2[12]);
+ x3[12] = _mm256_sub_epi32(x2[3], x2[12]);
+ x3[4] = _mm256_add_epi32(x2[4], x2[11]);
+ x3[11] = _mm256_sub_epi32(x2[4], x2[11]);
+ x3[5] = _mm256_add_epi32(x2[5], x2[10]);
+ x3[10] = _mm256_sub_epi32(x2[5], x2[10]);
+ x3[6] = _mm256_add_epi32(x2[6], x2[9]);
+ x3[9] = _mm256_sub_epi32(x2[6], x2[9]);
+ x3[7] = _mm256_add_epi32(x2[7], x2[8]);
+ x3[8] = _mm256_sub_epi32(x2[7], x2[8]);
+ x3[16] = x2[16];
+ x3[17] = x2[17];
+ x3[18] = x2[18];
+ x3[19] = x2[19];
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x2[20], x2[27], x3[20], x3[27],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x2[21], x2[26], x3[21], x3[26],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x2[22], x2[25], x3[22], x3[25],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x2[23], x2[24], x3[23], x3[24],
+ *__rounding, cos_bit);
+ x3[28] = x2[28];
+ x3[29] = x2[29];
+ x3[30] = x2[30];
+ x3[31] = x2[31];
+ x3[32] = _mm256_add_epi32(x2[32], x2[47]);
+ x3[47] = _mm256_sub_epi32(x2[32], x2[47]);
+ x3[33] = _mm256_add_epi32(x2[33], x2[46]);
+ x3[46] = _mm256_sub_epi32(x2[33], x2[46]);
+ x3[34] = _mm256_add_epi32(x2[34], x2[45]);
+ x3[45] = _mm256_sub_epi32(x2[34], x2[45]);
+ x3[35] = _mm256_add_epi32(x2[35], x2[44]);
+ x3[44] = _mm256_sub_epi32(x2[35], x2[44]);
+ x3[36] = _mm256_add_epi32(x2[36], x2[43]);
+ x3[43] = _mm256_sub_epi32(x2[36], x2[43]);
+ x3[37] = _mm256_add_epi32(x2[37], x2[42]);
+ x3[42] = _mm256_sub_epi32(x2[37], x2[42]);
+ x3[38] = _mm256_add_epi32(x2[38], x2[41]);
+ x3[41] = _mm256_sub_epi32(x2[38], x2[41]);
+ x3[39] = _mm256_add_epi32(x2[39], x2[40]);
+ x3[40] = _mm256_sub_epi32(x2[39], x2[40]);
+ x3[48] = _mm256_sub_epi32(x2[63], x2[48]);
+ x3[63] = _mm256_add_epi32(x2[63], x2[48]);
+ x3[49] = _mm256_sub_epi32(x2[62], x2[49]);
+ x3[62] = _mm256_add_epi32(x2[62], x2[49]);
+ x3[50] = _mm256_sub_epi32(x2[61], x2[50]);
+ x3[61] = _mm256_add_epi32(x2[61], x2[50]);
+ x3[51] = _mm256_sub_epi32(x2[60], x2[51]);
+ x3[60] = _mm256_add_epi32(x2[60], x2[51]);
+ x3[52] = _mm256_sub_epi32(x2[59], x2[52]);
+ x3[59] = _mm256_add_epi32(x2[59], x2[52]);
+ x3[53] = _mm256_sub_epi32(x2[58], x2[53]);
+ x3[58] = _mm256_add_epi32(x2[58], x2[53]);
+ x3[54] = _mm256_sub_epi32(x2[57], x2[54]);
+ x3[57] = _mm256_add_epi32(x2[57], x2[54]);
+ x3[55] = _mm256_sub_epi32(x2[56], x2[55]);
+ x3[56] = _mm256_add_epi32(x2[56], x2[55]);
+}
+static INLINE void fdct64_stage4_avx2(__m256i *x3, __m256i *x4,
+ __m256i *cospi_m32, __m256i *cospi_p32,
+ __m256i *cospi_m16, __m256i *cospi_p48,
+ __m256i *cospi_m48,
+ const __m256i *__rounding,
+ int8_t cos_bit) {
+ x4[0] = _mm256_add_epi32(x3[0], x3[7]);
+ x4[7] = _mm256_sub_epi32(x3[0], x3[7]);
+ x4[1] = _mm256_add_epi32(x3[1], x3[6]);
+ x4[6] = _mm256_sub_epi32(x3[1], x3[6]);
+ x4[2] = _mm256_add_epi32(x3[2], x3[5]);
+ x4[5] = _mm256_sub_epi32(x3[2], x3[5]);
+ x4[3] = _mm256_add_epi32(x3[3], x3[4]);
+ x4[4] = _mm256_sub_epi32(x3[3], x3[4]);
+ x4[8] = x3[8];
+ x4[9] = x3[9];
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x3[10], x3[13], x4[10], x4[13],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x3[11], x3[12], x4[11], x4[12],
+ *__rounding, cos_bit);
+ x4[14] = x3[14];
+ x4[15] = x3[15];
+ x4[16] = _mm256_add_epi32(x3[16], x3[23]);
+ x4[23] = _mm256_sub_epi32(x3[16], x3[23]);
+ x4[17] = _mm256_add_epi32(x3[17], x3[22]);
+ x4[22] = _mm256_sub_epi32(x3[17], x3[22]);
+ x4[18] = _mm256_add_epi32(x3[18], x3[21]);
+ x4[21] = _mm256_sub_epi32(x3[18], x3[21]);
+ x4[19] = _mm256_add_epi32(x3[19], x3[20]);
+ x4[20] = _mm256_sub_epi32(x3[19], x3[20]);
+ x4[24] = _mm256_sub_epi32(x3[31], x3[24]);
+ x4[31] = _mm256_add_epi32(x3[31], x3[24]);
+ x4[25] = _mm256_sub_epi32(x3[30], x3[25]);
+ x4[30] = _mm256_add_epi32(x3[30], x3[25]);
+ x4[26] = _mm256_sub_epi32(x3[29], x3[26]);
+ x4[29] = _mm256_add_epi32(x3[29], x3[26]);
+ x4[27] = _mm256_sub_epi32(x3[28], x3[27]);
+ x4[28] = _mm256_add_epi32(x3[28], x3[27]);
+ x4[32] = x3[32];
+ x4[33] = x3[33];
+ x4[34] = x3[34];
+ x4[35] = x3[35];
+ btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x3[36], x3[59], x4[36], x4[59],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x3[37], x3[58], x4[37], x4[58],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x3[38], x3[57], x4[38], x4[57],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x3[39], x3[56], x4[39], x4[56],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x3[40], x3[55], x4[40], x4[55],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x3[41], x3[54], x4[41], x4[54],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x3[42], x3[53], x4[42], x4[53],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x3[43], x3[52], x4[43], x4[52],
+ *__rounding, cos_bit);
+ x4[44] = x3[44];
+ x4[45] = x3[45];
+ x4[46] = x3[46];
+ x4[47] = x3[47];
+ x4[48] = x3[48];
+ x4[49] = x3[49];
+ x4[50] = x3[50];
+ x4[51] = x3[51];
+ x4[60] = x3[60];
+ x4[61] = x3[61];
+ x4[62] = x3[62];
+ x4[63] = x3[63];
+}
+static INLINE void fdct64_stage5_avx2(__m256i *x4, __m256i *x5,
+ __m256i *cospi_m32, __m256i *cospi_p32,
+ __m256i *cospi_m16, __m256i *cospi_p48,
+ __m256i *cospi_m48,
+ const __m256i *__rounding,
+ int8_t cos_bit) {
+ x5[0] = _mm256_add_epi32(x4[0], x4[3]);
+ x5[3] = _mm256_sub_epi32(x4[0], x4[3]);
+ x5[1] = _mm256_add_epi32(x4[1], x4[2]);
+ x5[2] = _mm256_sub_epi32(x4[1], x4[2]);
+ x5[4] = x4[4];
+ btf_32_type0_avx2_new(*cospi_m32, *cospi_p32, x4[5], x4[6], x5[5], x5[6],
+ *__rounding, cos_bit);
+ x5[7] = x4[7];
+ x5[8] = _mm256_add_epi32(x4[8], x4[11]);
+ x5[11] = _mm256_sub_epi32(x4[8], x4[11]);
+ x5[9] = _mm256_add_epi32(x4[9], x4[10]);
+ x5[10] = _mm256_sub_epi32(x4[9], x4[10]);
+ x5[12] = _mm256_sub_epi32(x4[15], x4[12]);
+ x5[15] = _mm256_add_epi32(x4[15], x4[12]);
+ x5[13] = _mm256_sub_epi32(x4[14], x4[13]);
+ x5[14] = _mm256_add_epi32(x4[14], x4[13]);
+ x5[16] = x4[16];
+ x5[17] = x4[17];
+ btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x4[18], x4[29], x5[18], x5[29],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x4[19], x4[28], x5[19], x5[28],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x4[20], x4[27], x5[20], x5[27],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x4[21], x4[26], x5[21], x5[26],
+ *__rounding, cos_bit);
+ x5[22] = x4[22];
+ x5[23] = x4[23];
+ x5[24] = x4[24];
+ x5[25] = x4[25];
+ x5[30] = x4[30];
+ x5[31] = x4[31];
+ x5[32] = _mm256_add_epi32(x4[32], x4[39]);
+ x5[39] = _mm256_sub_epi32(x4[32], x4[39]);
+ x5[33] = _mm256_add_epi32(x4[33], x4[38]);
+ x5[38] = _mm256_sub_epi32(x4[33], x4[38]);
+ x5[34] = _mm256_add_epi32(x4[34], x4[37]);
+ x5[37] = _mm256_sub_epi32(x4[34], x4[37]);
+ x5[35] = _mm256_add_epi32(x4[35], x4[36]);
+ x5[36] = _mm256_sub_epi32(x4[35], x4[36]);
+ x5[40] = _mm256_sub_epi32(x4[47], x4[40]);
+ x5[47] = _mm256_add_epi32(x4[47], x4[40]);
+ x5[41] = _mm256_sub_epi32(x4[46], x4[41]);
+ x5[46] = _mm256_add_epi32(x4[46], x4[41]);
+ x5[42] = _mm256_sub_epi32(x4[45], x4[42]);
+ x5[45] = _mm256_add_epi32(x4[45], x4[42]);
+ x5[43] = _mm256_sub_epi32(x4[44], x4[43]);
+ x5[44] = _mm256_add_epi32(x4[44], x4[43]);
+ x5[48] = _mm256_add_epi32(x4[48], x4[55]);
+ x5[55] = _mm256_sub_epi32(x4[48], x4[55]);
+ x5[49] = _mm256_add_epi32(x4[49], x4[54]);
+ x5[54] = _mm256_sub_epi32(x4[49], x4[54]);
+ x5[50] = _mm256_add_epi32(x4[50], x4[53]);
+ x5[53] = _mm256_sub_epi32(x4[50], x4[53]);
+ x5[51] = _mm256_add_epi32(x4[51], x4[52]);
+ x5[52] = _mm256_sub_epi32(x4[51], x4[52]);
+ x5[56] = _mm256_sub_epi32(x4[63], x4[56]);
+ x5[63] = _mm256_add_epi32(x4[63], x4[56]);
+ x5[57] = _mm256_sub_epi32(x4[62], x4[57]);
+ x5[62] = _mm256_add_epi32(x4[62], x4[57]);
+ x5[58] = _mm256_sub_epi32(x4[61], x4[58]);
+ x5[61] = _mm256_add_epi32(x4[61], x4[58]);
+ x5[59] = _mm256_sub_epi32(x4[60], x4[59]);
+ x5[60] = _mm256_add_epi32(x4[60], x4[59]);
+}
+static INLINE void fdct64_stage6_avx2(
+ __m256i *x5, __m256i *x6, __m256i *cospi_p16, __m256i *cospi_p32,
+ __m256i *cospi_m16, __m256i *cospi_p48, __m256i *cospi_m48,
+ __m256i *cospi_m08, __m256i *cospi_p56, __m256i *cospi_m56,
+ __m256i *cospi_m40, __m256i *cospi_p24, __m256i *cospi_m24,
+ const __m256i *__rounding, int8_t cos_bit) {
+ btf_32_type0_avx2_new(*cospi_p32, *cospi_p32, x5[0], x5[1], x6[0], x6[1],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_p16, *cospi_p48, x5[3], x5[2], x6[2], x6[3],
+ *__rounding, cos_bit);
+ x6[4] = _mm256_add_epi32(x5[4], x5[5]);
+ x6[5] = _mm256_sub_epi32(x5[4], x5[5]);
+ x6[6] = _mm256_sub_epi32(x5[7], x5[6]);
+ x6[7] = _mm256_add_epi32(x5[7], x5[6]);
+ x6[8] = x5[8];
+ btf_32_type0_avx2_new(*cospi_m16, *cospi_p48, x5[9], x5[14], x6[9], x6[14],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m48, *cospi_m16, x5[10], x5[13], x6[10], x6[13],
+ *__rounding, cos_bit);
+ x6[11] = x5[11];
+ x6[12] = x5[12];
+ x6[15] = x5[15];
+ x6[16] = _mm256_add_epi32(x5[16], x5[19]);
+ x6[19] = _mm256_sub_epi32(x5[16], x5[19]);
+ x6[17] = _mm256_add_epi32(x5[17], x5[18]);
+ x6[18] = _mm256_sub_epi32(x5[17], x5[18]);
+ x6[20] = _mm256_sub_epi32(x5[23], x5[20]);
+ x6[23] = _mm256_add_epi32(x5[23], x5[20]);
+ x6[21] = _mm256_sub_epi32(x5[22], x5[21]);
+ x6[22] = _mm256_add_epi32(x5[22], x5[21]);
+ x6[24] = _mm256_add_epi32(x5[24], x5[27]);
+ x6[27] = _mm256_sub_epi32(x5[24], x5[27]);
+ x6[25] = _mm256_add_epi32(x5[25], x5[26]);
+ x6[26] = _mm256_sub_epi32(x5[25], x5[26]);
+ x6[28] = _mm256_sub_epi32(x5[31], x5[28]);
+ x6[31] = _mm256_add_epi32(x5[31], x5[28]);
+ x6[29] = _mm256_sub_epi32(x5[30], x5[29]);
+ x6[30] = _mm256_add_epi32(x5[30], x5[29]);
+ x6[32] = x5[32];
+ x6[33] = x5[33];
+ btf_32_type0_avx2_new(*cospi_m08, *cospi_p56, x5[34], x5[61], x6[34], x6[61],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m08, *cospi_p56, x5[35], x5[60], x6[35], x6[60],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m56, *cospi_m08, x5[36], x5[59], x6[36], x6[59],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m56, *cospi_m08, x5[37], x5[58], x6[37], x6[58],
+ *__rounding, cos_bit);
+ x6[38] = x5[38];
+ x6[39] = x5[39];
+ x6[40] = x5[40];
+ x6[41] = x5[41];
+ btf_32_type0_avx2_new(*cospi_m40, *cospi_p24, x5[42], x5[53], x6[42], x6[53],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m40, *cospi_p24, x5[43], x5[52], x6[43], x6[52],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m24, *cospi_m40, x5[44], x5[51], x6[44], x6[51],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m24, *cospi_m40, x5[45], x5[50], x6[45], x6[50],
+ *__rounding, cos_bit);
+ x6[46] = x5[46];
+ x6[47] = x5[47];
+ x6[48] = x5[48];
+ x6[49] = x5[49];
+ x6[54] = x5[54];
+ x6[55] = x5[55];
+ x6[56] = x5[56];
+ x6[57] = x5[57];
+ x6[62] = x5[62];
+ x6[63] = x5[63];
+}
+static INLINE void fdct64_stage7_avx2(__m256i *x6, __m256i *x7,
+ __m256i *cospi_p08, __m256i *cospi_p56,
+ __m256i *cospi_p40, __m256i *cospi_p24,
+ __m256i *cospi_m08, __m256i *cospi_m56,
+ __m256i *cospi_m40, __m256i *cospi_m24,
+ const __m256i *__rounding,
+ int8_t cos_bit) {
+ x7[0] = x6[0];
+ x7[1] = x6[1];
+ x7[2] = x6[2];
+ x7[3] = x6[3];
+ btf_32_type0_avx2_new(*cospi_p08, *cospi_p56, x6[7], x6[4], x7[4], x7[7],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_p40, *cospi_p24, x6[6], x6[5], x7[5], x7[6],
+ *__rounding, cos_bit);
+ x7[8] = _mm256_add_epi32(x6[8], x6[9]);
+ x7[9] = _mm256_sub_epi32(x6[8], x6[9]);
+ x7[10] = _mm256_sub_epi32(x6[11], x6[10]);
+ x7[11] = _mm256_add_epi32(x6[11], x6[10]);
+ x7[12] = _mm256_add_epi32(x6[12], x6[13]);
+ x7[13] = _mm256_sub_epi32(x6[12], x6[13]);
+ x7[14] = _mm256_sub_epi32(x6[15], x6[14]);
+ x7[15] = _mm256_add_epi32(x6[15], x6[14]);
+ x7[16] = x6[16];
+ btf_32_type0_avx2_new(*cospi_m08, *cospi_p56, x6[17], x6[30], x7[17], x7[30],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m56, *cospi_m08, x6[18], x6[29], x7[18], x7[29],
+ *__rounding, cos_bit);
+ x7[19] = x6[19];
+ x7[20] = x6[20];
+ btf_32_type0_avx2_new(*cospi_m40, *cospi_p24, x6[21], x6[26], x7[21], x7[26],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(*cospi_m24, *cospi_m40, x6[22], x6[25], x7[22], x7[25],
+ *__rounding, cos_bit);
+ x7[23] = x6[23];
+ x7[24] = x6[24];
+ x7[27] = x6[27];
+ x7[28] = x6[28];
+ x7[31] = x6[31];
+ x7[32] = _mm256_add_epi32(x6[32], x6[35]);
+ x7[35] = _mm256_sub_epi32(x6[32], x6[35]);
+ x7[33] = _mm256_add_epi32(x6[33], x6[34]);
+ x7[34] = _mm256_sub_epi32(x6[33], x6[34]);
+ x7[36] = _mm256_sub_epi32(x6[39], x6[36]);
+ x7[39] = _mm256_add_epi32(x6[39], x6[36]);
+ x7[37] = _mm256_sub_epi32(x6[38], x6[37]);
+ x7[38] = _mm256_add_epi32(x6[38], x6[37]);
+ x7[40] = _mm256_add_epi32(x6[40], x6[43]);
+ x7[43] = _mm256_sub_epi32(x6[40], x6[43]);
+ x7[41] = _mm256_add_epi32(x6[41], x6[42]);
+ x7[42] = _mm256_sub_epi32(x6[41], x6[42]);
+ x7[44] = _mm256_sub_epi32(x6[47], x6[44]);
+ x7[47] = _mm256_add_epi32(x6[47], x6[44]);
+ x7[45] = _mm256_sub_epi32(x6[46], x6[45]);
+ x7[46] = _mm256_add_epi32(x6[46], x6[45]);
+ x7[48] = _mm256_add_epi32(x6[48], x6[51]);
+ x7[51] = _mm256_sub_epi32(x6[48], x6[51]);
+ x7[49] = _mm256_add_epi32(x6[49], x6[50]);
+ x7[50] = _mm256_sub_epi32(x6[49], x6[50]);
+ x7[52] = _mm256_sub_epi32(x6[55], x6[52]);
+ x7[55] = _mm256_add_epi32(x6[55], x6[52]);
+ x7[53] = _mm256_sub_epi32(x6[54], x6[53]);
+ x7[54] = _mm256_add_epi32(x6[54], x6[53]);
+ x7[56] = _mm256_add_epi32(x6[56], x6[59]);
+ x7[59] = _mm256_sub_epi32(x6[56], x6[59]);
+ x7[57] = _mm256_add_epi32(x6[57], x6[58]);
+ x7[58] = _mm256_sub_epi32(x6[57], x6[58]);
+ x7[60] = _mm256_sub_epi32(x6[63], x6[60]);
+ x7[63] = _mm256_add_epi32(x6[63], x6[60]);
+ x7[61] = _mm256_sub_epi32(x6[62], x6[61]);
+ x7[62] = _mm256_add_epi32(x6[62], x6[61]);
+}
+static INLINE void fdct64_stage8_avx2(__m256i *x7, __m256i *x8,
+ const int32_t *cospi,
+ const __m256i *__rounding,
+ int8_t cos_bit) {
+ __m256i cospi_p60 = _mm256_set1_epi32(cospi[60]);
+ __m256i cospi_p04 = _mm256_set1_epi32(cospi[4]);
+ __m256i cospi_p28 = _mm256_set1_epi32(cospi[28]);
+ __m256i cospi_p36 = _mm256_set1_epi32(cospi[36]);
+ __m256i cospi_p44 = _mm256_set1_epi32(cospi[44]);
+ __m256i cospi_p20 = _mm256_set1_epi32(cospi[20]);
+ __m256i cospi_p12 = _mm256_set1_epi32(cospi[12]);
+ __m256i cospi_p52 = _mm256_set1_epi32(cospi[52]);
+ __m256i cospi_m04 = _mm256_set1_epi32(-cospi[4]);
+ __m256i cospi_m60 = _mm256_set1_epi32(-cospi[60]);
+ __m256i cospi_m36 = _mm256_set1_epi32(-cospi[36]);
+ __m256i cospi_m28 = _mm256_set1_epi32(-cospi[28]);
+ __m256i cospi_m20 = _mm256_set1_epi32(-cospi[20]);
+ __m256i cospi_m44 = _mm256_set1_epi32(-cospi[44]);
+ __m256i cospi_m52 = _mm256_set1_epi32(-cospi[52]);
+ __m256i cospi_m12 = _mm256_set1_epi32(-cospi[12]);
+
+ x8[0] = x7[0];
+ x8[1] = x7[1];
+ x8[2] = x7[2];
+ x8[3] = x7[3];
+ x8[4] = x7[4];
+ x8[5] = x7[5];
+ x8[6] = x7[6];
+ x8[7] = x7[7];
+
+ btf_32_type0_avx2_new(cospi_p04, cospi_p60, x7[15], x7[8], x8[8], x8[15],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p36, cospi_p28, x7[14], x7[9], x8[9], x8[14],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p20, cospi_p44, x7[13], x7[10], x8[10], x8[13],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p52, cospi_p12, x7[12], x7[11], x8[11], x8[12],
+ *__rounding, cos_bit);
+ x8[16] = _mm256_add_epi32(x7[16], x7[17]);
+ x8[17] = _mm256_sub_epi32(x7[16], x7[17]);
+ x8[18] = _mm256_sub_epi32(x7[19], x7[18]);
+ x8[19] = _mm256_add_epi32(x7[19], x7[18]);
+ x8[20] = _mm256_add_epi32(x7[20], x7[21]);
+ x8[21] = _mm256_sub_epi32(x7[20], x7[21]);
+ x8[22] = _mm256_sub_epi32(x7[23], x7[22]);
+ x8[23] = _mm256_add_epi32(x7[23], x7[22]);
+ x8[24] = _mm256_add_epi32(x7[24], x7[25]);
+ x8[25] = _mm256_sub_epi32(x7[24], x7[25]);
+ x8[26] = _mm256_sub_epi32(x7[27], x7[26]);
+ x8[27] = _mm256_add_epi32(x7[27], x7[26]);
+ x8[28] = _mm256_add_epi32(x7[28], x7[29]);
+ x8[29] = _mm256_sub_epi32(x7[28], x7[29]);
+ x8[30] = _mm256_sub_epi32(x7[31], x7[30]);
+ x8[31] = _mm256_add_epi32(x7[31], x7[30]);
+ x8[32] = x7[32];
+ btf_32_type0_avx2_new(cospi_m04, cospi_p60, x7[33], x7[62], x8[33], x8[62],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_m60, cospi_m04, x7[34], x7[61], x8[34], x8[61],
+ *__rounding, cos_bit);
+ x8[35] = x7[35];
+ x8[36] = x7[36];
+ btf_32_type0_avx2_new(cospi_m36, cospi_p28, x7[37], x7[58], x8[37], x8[58],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_m28, cospi_m36, x7[38], x7[57], x8[38], x8[57],
+ *__rounding, cos_bit);
+ x8[39] = x7[39];
+ x8[40] = x7[40];
+ btf_32_type0_avx2_new(cospi_m20, cospi_p44, x7[41], x7[54], x8[41], x8[54],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_m44, cospi_m20, x7[42], x7[53], x8[42], x8[53],
+ *__rounding, cos_bit);
+ x8[43] = x7[43];
+ x8[44] = x7[44];
+ btf_32_type0_avx2_new(cospi_m52, cospi_p12, x7[45], x7[50], x8[45], x8[50],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_m12, cospi_m52, x7[46], x7[49], x8[46], x8[49],
+ *__rounding, cos_bit);
+ x8[47] = x7[47];
+ x8[48] = x7[48];
+ x8[51] = x7[51];
+ x8[52] = x7[52];
+ x8[55] = x7[55];
+ x8[56] = x7[56];
+ x8[59] = x7[59];
+ x8[60] = x7[60];
+ x8[63] = x7[63];
+}
+static INLINE void fdct64_stage9_avx2(__m256i *x8, __m256i *x9,
+ const int32_t *cospi,
+ const __m256i *__rounding,
+ int8_t cos_bit) {
+ __m256i cospi_p62 = _mm256_set1_epi32(cospi[62]);
+ __m256i cospi_p02 = _mm256_set1_epi32(cospi[2]);
+ __m256i cospi_p30 = _mm256_set1_epi32(cospi[30]);
+ __m256i cospi_p34 = _mm256_set1_epi32(cospi[34]);
+ __m256i cospi_p46 = _mm256_set1_epi32(cospi[46]);
+ __m256i cospi_p18 = _mm256_set1_epi32(cospi[18]);
+ __m256i cospi_p14 = _mm256_set1_epi32(cospi[14]);
+ __m256i cospi_p50 = _mm256_set1_epi32(cospi[50]);
+ __m256i cospi_p54 = _mm256_set1_epi32(cospi[54]);
+ __m256i cospi_p10 = _mm256_set1_epi32(cospi[10]);
+ __m256i cospi_p22 = _mm256_set1_epi32(cospi[22]);
+ __m256i cospi_p42 = _mm256_set1_epi32(cospi[42]);
+ __m256i cospi_p38 = _mm256_set1_epi32(cospi[38]);
+ __m256i cospi_p26 = _mm256_set1_epi32(cospi[26]);
+ __m256i cospi_p06 = _mm256_set1_epi32(cospi[6]);
+ __m256i cospi_p58 = _mm256_set1_epi32(cospi[58]);
+
+ x9[0] = x8[0];
+ x9[1] = x8[1];
+ x9[2] = x8[2];
+ x9[3] = x8[3];
+ x9[4] = x8[4];
+ x9[5] = x8[5];
+ x9[6] = x8[6];
+ x9[7] = x8[7];
+ x9[8] = x8[8];
+ x9[9] = x8[9];
+ x9[10] = x8[10];
+ x9[11] = x8[11];
+ x9[12] = x8[12];
+ x9[13] = x8[13];
+ x9[14] = x8[14];
+ x9[15] = x8[15];
+ btf_32_type0_avx2_new(cospi_p02, cospi_p62, x8[31], x8[16], x9[16], x9[31],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p34, cospi_p30, x8[30], x8[17], x9[17], x9[30],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p18, cospi_p46, x8[29], x8[18], x9[18], x9[29],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p50, cospi_p14, x8[28], x8[19], x9[19], x9[28],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p10, cospi_p54, x8[27], x8[20], x9[20], x9[27],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p42, cospi_p22, x8[26], x8[21], x9[21], x9[26],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p26, cospi_p38, x8[25], x8[22], x9[22], x9[25],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p58, cospi_p06, x8[24], x8[23], x9[23], x9[24],
+ *__rounding, cos_bit);
+ x9[32] = _mm256_add_epi32(x8[32], x8[33]);
+ x9[33] = _mm256_sub_epi32(x8[32], x8[33]);
+ x9[34] = _mm256_sub_epi32(x8[35], x8[34]);
+ x9[35] = _mm256_add_epi32(x8[35], x8[34]);
+ x9[36] = _mm256_add_epi32(x8[36], x8[37]);
+ x9[37] = _mm256_sub_epi32(x8[36], x8[37]);
+ x9[38] = _mm256_sub_epi32(x8[39], x8[38]);
+ x9[39] = _mm256_add_epi32(x8[39], x8[38]);
+ x9[40] = _mm256_add_epi32(x8[40], x8[41]);
+ x9[41] = _mm256_sub_epi32(x8[40], x8[41]);
+ x9[42] = _mm256_sub_epi32(x8[43], x8[42]);
+ x9[43] = _mm256_add_epi32(x8[43], x8[42]);
+ x9[44] = _mm256_add_epi32(x8[44], x8[45]);
+ x9[45] = _mm256_sub_epi32(x8[44], x8[45]);
+ x9[46] = _mm256_sub_epi32(x8[47], x8[46]);
+ x9[47] = _mm256_add_epi32(x8[47], x8[46]);
+ x9[48] = _mm256_add_epi32(x8[48], x8[49]);
+ x9[49] = _mm256_sub_epi32(x8[48], x8[49]);
+ x9[50] = _mm256_sub_epi32(x8[51], x8[50]);
+ x9[51] = _mm256_add_epi32(x8[51], x8[50]);
+ x9[52] = _mm256_add_epi32(x8[52], x8[53]);
+ x9[53] = _mm256_sub_epi32(x8[52], x8[53]);
+ x9[54] = _mm256_sub_epi32(x8[55], x8[54]);
+ x9[55] = _mm256_add_epi32(x8[55], x8[54]);
+ x9[56] = _mm256_add_epi32(x8[56], x8[57]);
+ x9[57] = _mm256_sub_epi32(x8[56], x8[57]);
+ x9[58] = _mm256_sub_epi32(x8[59], x8[58]);
+ x9[59] = _mm256_add_epi32(x8[59], x8[58]);
+ x9[60] = _mm256_add_epi32(x8[60], x8[61]);
+ x9[61] = _mm256_sub_epi32(x8[60], x8[61]);
+ x9[62] = _mm256_sub_epi32(x8[63], x8[62]);
+ x9[63] = _mm256_add_epi32(x8[63], x8[62]);
+}
+static INLINE void fdct64_stage10_avx2(__m256i *x9, __m256i *x10,
+ const int32_t *cospi,
+ const __m256i *__rounding,
+ int8_t cos_bit) {
+ __m256i cospi_p63 = _mm256_set1_epi32(cospi[63]);
+ __m256i cospi_p01 = _mm256_set1_epi32(cospi[1]);
+ __m256i cospi_p31 = _mm256_set1_epi32(cospi[31]);
+ __m256i cospi_p33 = _mm256_set1_epi32(cospi[33]);
+ __m256i cospi_p47 = _mm256_set1_epi32(cospi[47]);
+ __m256i cospi_p17 = _mm256_set1_epi32(cospi[17]);
+ __m256i cospi_p15 = _mm256_set1_epi32(cospi[15]);
+ __m256i cospi_p49 = _mm256_set1_epi32(cospi[49]);
+ __m256i cospi_p55 = _mm256_set1_epi32(cospi[55]);
+ __m256i cospi_p09 = _mm256_set1_epi32(cospi[9]);
+ __m256i cospi_p23 = _mm256_set1_epi32(cospi[23]);
+ __m256i cospi_p41 = _mm256_set1_epi32(cospi[41]);
+ __m256i cospi_p39 = _mm256_set1_epi32(cospi[39]);
+ __m256i cospi_p25 = _mm256_set1_epi32(cospi[25]);
+ __m256i cospi_p07 = _mm256_set1_epi32(cospi[7]);
+ __m256i cospi_p57 = _mm256_set1_epi32(cospi[57]);
+ __m256i cospi_p59 = _mm256_set1_epi32(cospi[59]);
+ __m256i cospi_p05 = _mm256_set1_epi32(cospi[5]);
+ __m256i cospi_p27 = _mm256_set1_epi32(cospi[27]);
+ __m256i cospi_p37 = _mm256_set1_epi32(cospi[37]);
+ __m256i cospi_p43 = _mm256_set1_epi32(cospi[43]);
+ __m256i cospi_p21 = _mm256_set1_epi32(cospi[21]);
+ __m256i cospi_p11 = _mm256_set1_epi32(cospi[11]);
+ __m256i cospi_p53 = _mm256_set1_epi32(cospi[53]);
+ __m256i cospi_p51 = _mm256_set1_epi32(cospi[51]);
+ __m256i cospi_p13 = _mm256_set1_epi32(cospi[13]);
+ __m256i cospi_p19 = _mm256_set1_epi32(cospi[19]);
+ __m256i cospi_p45 = _mm256_set1_epi32(cospi[45]);
+ __m256i cospi_p35 = _mm256_set1_epi32(cospi[35]);
+ __m256i cospi_p29 = _mm256_set1_epi32(cospi[29]);
+ __m256i cospi_p03 = _mm256_set1_epi32(cospi[3]);
+ __m256i cospi_p61 = _mm256_set1_epi32(cospi[61]);
+
+ x10[0] = x9[0];
+ x10[1] = x9[1];
+ x10[2] = x9[2];
+ x10[3] = x9[3];
+ x10[4] = x9[4];
+ x10[5] = x9[5];
+ x10[6] = x9[6];
+ x10[7] = x9[7];
+ x10[8] = x9[8];
+ x10[9] = x9[9];
+ x10[10] = x9[10];
+ x10[11] = x9[11];
+ x10[12] = x9[12];
+ x10[13] = x9[13];
+ x10[14] = x9[14];
+ x10[15] = x9[15];
+ x10[16] = x9[16];
+ x10[17] = x9[17];
+ x10[18] = x9[18];
+ x10[19] = x9[19];
+ x10[20] = x9[20];
+ x10[21] = x9[21];
+ x10[22] = x9[22];
+ x10[23] = x9[23];
+ x10[24] = x9[24];
+ x10[25] = x9[25];
+ x10[26] = x9[26];
+ x10[27] = x9[27];
+ x10[28] = x9[28];
+ x10[29] = x9[29];
+ x10[30] = x9[30];
+ x10[31] = x9[31];
+ btf_32_type0_avx2_new(cospi_p01, cospi_p63, x9[63], x9[32], x10[32], x10[63],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p33, cospi_p31, x9[62], x9[33], x10[33], x10[62],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p17, cospi_p47, x9[61], x9[34], x10[34], x10[61],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p49, cospi_p15, x9[60], x9[35], x10[35], x10[60],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p09, cospi_p55, x9[59], x9[36], x10[36], x10[59],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p41, cospi_p23, x9[58], x9[37], x10[37], x10[58],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p25, cospi_p39, x9[57], x9[38], x10[38], x10[57],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p57, cospi_p07, x9[56], x9[39], x10[39], x10[56],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p05, cospi_p59, x9[55], x9[40], x10[40], x10[55],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p37, cospi_p27, x9[54], x9[41], x10[41], x10[54],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p21, cospi_p43, x9[53], x9[42], x10[42], x10[53],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p53, cospi_p11, x9[52], x9[43], x10[43], x10[52],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p13, cospi_p51, x9[51], x9[44], x10[44], x10[51],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p45, cospi_p19, x9[50], x9[45], x10[45], x10[50],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p29, cospi_p35, x9[49], x9[46], x10[46], x10[49],
+ *__rounding, cos_bit);
+ btf_32_type0_avx2_new(cospi_p61, cospi_p03, x9[48], x9[47], x10[47], x10[48],
+ *__rounding, cos_bit);
+}
+static void fdct64_avx2(__m256i *input, __m256i *output, int8_t cos_bit,
+ const int instride, const int outstride) {
+ const int32_t *cospi = cospi_arr(cos_bit);
+ const __m256i __rounding = _mm256_set1_epi32(1 << (cos_bit - 1));
+ __m256i cospi_m32 = _mm256_set1_epi32(-cospi[32]);
+ __m256i cospi_p32 = _mm256_set1_epi32(cospi[32]);
+ __m256i cospi_m16 = _mm256_set1_epi32(-cospi[16]);
+ __m256i cospi_p48 = _mm256_set1_epi32(cospi[48]);
+ __m256i cospi_m48 = _mm256_set1_epi32(-cospi[48]);
+ __m256i cospi_p16 = _mm256_set1_epi32(cospi[16]);
+ __m256i cospi_m08 = _mm256_set1_epi32(-cospi[8]);
+ __m256i cospi_p56 = _mm256_set1_epi32(cospi[56]);
+ __m256i cospi_m56 = _mm256_set1_epi32(-cospi[56]);
+ __m256i cospi_m40 = _mm256_set1_epi32(-cospi[40]);
+ __m256i cospi_p24 = _mm256_set1_epi32(cospi[24]);
+ __m256i cospi_m24 = _mm256_set1_epi32(-cospi[24]);
+ __m256i cospi_p08 = _mm256_set1_epi32(cospi[8]);
+ __m256i cospi_p40 = _mm256_set1_epi32(cospi[40]);
+
+ int startidx = 0 * instride;
+ int endidx = 63 * instride;
+ // stage 1
+ __m256i x1[64];
+ x1[0] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[63] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[1] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[62] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[2] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[61] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[3] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[60] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[4] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[59] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[5] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[58] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[6] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[57] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[7] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[56] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[8] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[55] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[9] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[54] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[10] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[53] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[11] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[52] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[12] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[51] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[13] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[50] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[14] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[49] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[15] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[48] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[16] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[47] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[17] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[46] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[18] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[45] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[19] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[44] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[20] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[43] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[21] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[42] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[22] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[41] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[23] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[40] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[24] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[39] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[25] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[38] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[26] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[37] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[27] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[36] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[28] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[35] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[29] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[34] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[30] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[33] = _mm256_sub_epi32(input[startidx], input[endidx]);
+ startidx += instride;
+ endidx -= instride;
+ x1[31] = _mm256_add_epi32(input[startidx], input[endidx]);
+ x1[32] = _mm256_sub_epi32(input[startidx], input[endidx]);
+
+ // stage 2
+ __m256i x2[64];
+ fdct64_stage2_avx2(x1, x2, &cospi_m32, &cospi_p32, &__rounding, cos_bit);
+ // stage 3
+ fdct64_stage3_avx2(x2, x1, &cospi_m32, &cospi_p32, &__rounding, cos_bit);
+ // stage 4
+ fdct64_stage4_avx2(x1, x2, &cospi_m32, &cospi_p32, &cospi_m16, &cospi_p48,
+ &cospi_m48, &__rounding, cos_bit);
+ // stage 5
+ fdct64_stage5_avx2(x2, x1, &cospi_m32, &cospi_p32, &cospi_m16, &cospi_p48,
+ &cospi_m48, &__rounding, cos_bit);
+ // stage 6
+ fdct64_stage6_avx2(x1, x2, &cospi_p16, &cospi_p32, &cospi_m16, &cospi_p48,
+ &cospi_m48, &cospi_m08, &cospi_p56, &cospi_m56, &cospi_m40,
+ &cospi_p24, &cospi_m24, &__rounding, cos_bit);
+ // stage 7
+ fdct64_stage7_avx2(x2, x1, &cospi_p08, &cospi_p56, &cospi_p40, &cospi_p24,
+ &cospi_m08, &cospi_m56, &cospi_m40, &cospi_m24,
+ &__rounding, cos_bit);
+ // stage 8
+ fdct64_stage8_avx2(x1, x2, cospi, &__rounding, cos_bit);
+ // stage 9
+ fdct64_stage9_avx2(x2, x1, cospi, &__rounding, cos_bit);
+ // stage 10
+ fdct64_stage10_avx2(x1, x2, cospi, &__rounding, cos_bit);
+
+ startidx = 0 * outstride;
+ endidx = 63 * outstride;
+
+ // stage 11
+ output[startidx] = x2[0];
+ output[endidx] = x2[63];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[32];
+ output[endidx] = x2[31];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[16];
+ output[endidx] = x2[47];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[48];
+ output[endidx] = x2[15];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[8];
+ output[endidx] = x2[55];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[40];
+ output[endidx] = x2[23];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[24];
+ output[endidx] = x2[39];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[56];
+ output[endidx] = x2[7];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[4];
+ output[endidx] = x2[59];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[36];
+ output[endidx] = x2[27];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[20];
+ output[endidx] = x2[43];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[52];
+ output[endidx] = x2[11];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[12];
+ output[endidx] = x2[51];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[44];
+ output[endidx] = x2[19];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[28];
+ output[endidx] = x2[35];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[60];
+ output[endidx] = x2[3];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[2];
+ output[endidx] = x2[61];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[34];
+ output[endidx] = x2[29];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[18];
+ output[endidx] = x2[45];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[50];
+ output[endidx] = x2[13];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[10];
+ output[endidx] = x2[53];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[42];
+ output[endidx] = x2[21];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[26];
+ output[endidx] = x2[37];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[58];
+ output[endidx] = x2[5];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[6];
+ output[endidx] = x2[57];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[38];
+ output[endidx] = x2[25];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[22];
+ output[endidx] = x2[41];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[54];
+ output[endidx] = x2[9];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[14];
+ output[endidx] = x2[49];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[46];
+ output[endidx] = x2[17];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[30];
+ output[endidx] = x2[33];
+ startidx += outstride;
+ endidx -= outstride;
+ output[startidx] = x2[62];
+ output[endidx] = x2[1];
+}
+void av1_fwd_txfm2d_64x64_avx2(const int16_t *input, int32_t *output,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)bd;
+ (void)tx_type;
+ assert(tx_type == DCT_DCT);
+ const TX_SIZE tx_size = TX_64X64;
+ __m256i buf0[512], buf1[512];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[tx_size];
+ const int txw_idx = get_txw_idx(tx_size);
+ const int txh_idx = get_txh_idx(tx_size);
+ const int cos_bit_col = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ const int cos_bit_row = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int width = tx_size_wide[tx_size];
+ const int height = tx_size_high[tx_size];
+ const transform_1d_avx2 col_txfm = fdct64_avx2;
+ const transform_1d_avx2 row_txfm = fdct64_avx2;
+ const int width_div16 = (width >> 4);
+ const int width_div8 = (width >> 3);
+ int r, c;
+ for (int i = 0; i < width_div16; i++) {
+ load_buffer_16xn_avx2(input + (i << 4), &buf0[i << 1], stride, height,
+ width_div8, 0, 0);
+ round_shift_32_8xn_avx2(&buf0[i << 1], height, shift[0], width_div8);
+ round_shift_32_8xn_avx2(&buf0[(i << 1) + 1], height, shift[0], width_div8);
+ col_txfm(&buf0[i << 1], &buf0[i << 1], cos_bit_col, width_div8, width_div8);
+ col_txfm(&buf0[(i << 1) + 1], &buf0[(i << 1) + 1], cos_bit_col, width_div8,
+ width_div8);
+ round_shift_32_8xn_avx2(&buf0[i << 1], height, shift[1], width_div8);
+ round_shift_32_8xn_avx2(&buf0[(i << 1) + 1], height, shift[1], width_div8);
+ }
+
+ for (r = 0; r < height; r += 8) {
+ for (c = 0; c < width_div8; c++) {
+ fwd_txfm_transpose_8x8_avx2(&buf0[r * width_div8 + c],
+ &buf1[c * 8 * width_div8 + (r >> 3)],
+ width_div8, width_div8);
+ }
+ }
+
+ for (int i = 0; i < 2; i++) {
+ row_txfm(&buf1[i << 1], &buf0[i << 1], cos_bit_row, width_div8,
+ width_div16);
+ row_txfm(&buf1[(i << 1) + 1], &buf0[(i << 1) + 1], cos_bit_row, width_div8,
+ width_div16);
+ round_shift_32_8xn_avx2(&buf0[i << 1], (height >> 1), shift[2],
+ width_div16);
+ round_shift_32_8xn_avx2(&buf0[(i << 1) + 1], (height >> 1), shift[2],
+ width_div16);
+ }
+
+ store_buffer_avx2(buf0, output, 8, 128);
+}
diff --git a/third_party/aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c b/third_party/aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c
new file mode 100644
index 0000000000..158b4ae439
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/highbd_fwd_txfm_sse4.c
@@ -0,0 +1,2629 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+#include <assert.h>
+#include <smmintrin.h> /* SSE4.1 */
+
+#include "aom_dsp/txfm_common.h"
+#include "aom_dsp/x86/transpose_sse2.h"
+#include "aom_dsp/x86/txfm_common_sse2.h"
+#include "aom_ports/mem.h"
+#include "av1/common/av1_txfm.h"
+#include "av1/common/x86/highbd_txfm_utility_sse4.h"
+#include "av1/encoder/av1_fwd_txfm1d_cfg.h"
+#include "av1/encoder/x86/av1_txfm1d_sse4.h"
+#include "config/aom_config.h"
+#include "config/av1_rtcd.h"
+
+static INLINE void store_output_w4(int32_t *const out, const __m128i *const in,
+ const int stride, const int out_size) {
+ for (int i = 0; i < out_size; ++i) {
+ _mm_store_si128((__m128i *)(out + i * stride), in[i]);
+ }
+}
+
+void av1_fwht4x4_sse4_1(const int16_t *input, tran_low_t *output, int stride) {
+ __m128i in[4];
+ in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+ in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+ in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+ in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+
+ // Convert to int32_t.
+ __m128i op[4];
+ op[0] = _mm_cvtepi16_epi32(in[0]);
+ op[1] = _mm_cvtepi16_epi32(in[1]);
+ op[2] = _mm_cvtepi16_epi32(in[2]);
+ op[3] = _mm_cvtepi16_epi32(in[3]);
+
+ for (int i = 0; i < 2; ++i) {
+ __m128i a1 = op[0];
+ __m128i b1 = op[1];
+ __m128i c1 = op[2];
+ __m128i d1 = op[3];
+ __m128i e1;
+
+ a1 = _mm_add_epi32(a1, b1); // a1 += b1
+ d1 = _mm_sub_epi32(d1, c1); // d1 = d1 - c1
+ e1 = _mm_sub_epi32(a1, d1); // e1 = (a1 - d1) >> 1
+ e1 = _mm_srai_epi32(e1, 1);
+ b1 = _mm_sub_epi32(e1, b1); // b1 = e1 - b1
+ c1 = _mm_sub_epi32(e1, c1); // c1 = e1 - c1
+ a1 = _mm_sub_epi32(a1, c1); // a1 -= c1
+ d1 = _mm_add_epi32(d1, b1); // d1 += b1
+
+ op[0] = a1;
+ op[1] = c1;
+ op[2] = d1;
+ op[3] = b1;
+
+ if (i == 0) {
+ transpose_32bit_4x4(op, op);
+ }
+ }
+
+ op[0] = _mm_slli_epi32(op[0], UNIT_QUANT_SHIFT);
+ op[1] = _mm_slli_epi32(op[1], UNIT_QUANT_SHIFT);
+ op[2] = _mm_slli_epi32(op[2], UNIT_QUANT_SHIFT);
+ op[3] = _mm_slli_epi32(op[3], UNIT_QUANT_SHIFT);
+
+ _mm_storeu_si128((__m128i *)(output + 0), op[0]);
+ _mm_storeu_si128((__m128i *)(output + 4), op[1]);
+ _mm_storeu_si128((__m128i *)(output + 8), op[2]);
+ _mm_storeu_si128((__m128i *)(output + 12), op[3]);
+}
+
+static INLINE void load_buffer_4x4(const int16_t *input, __m128i *in,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ if (!flipud) {
+ in[0] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+ in[1] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+ in[2] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+ in[3] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+ } else {
+ in[0] = _mm_loadl_epi64((const __m128i *)(input + 3 * stride));
+ in[1] = _mm_loadl_epi64((const __m128i *)(input + 2 * stride));
+ in[2] = _mm_loadl_epi64((const __m128i *)(input + 1 * stride));
+ in[3] = _mm_loadl_epi64((const __m128i *)(input + 0 * stride));
+ }
+
+ if (fliplr) {
+ in[0] = _mm_shufflelo_epi16(in[0], 0x1b);
+ in[1] = _mm_shufflelo_epi16(in[1], 0x1b);
+ in[2] = _mm_shufflelo_epi16(in[2], 0x1b);
+ in[3] = _mm_shufflelo_epi16(in[3], 0x1b);
+ }
+
+ in[0] = _mm_cvtepi16_epi32(in[0]);
+ in[1] = _mm_cvtepi16_epi32(in[1]);
+ in[2] = _mm_cvtepi16_epi32(in[2]);
+ in[3] = _mm_cvtepi16_epi32(in[3]);
+
+ in[0] = _mm_slli_epi32(in[0], shift);
+ in[1] = _mm_slli_epi32(in[1], shift);
+ in[2] = _mm_slli_epi32(in[2], shift);
+ in[3] = _mm_slli_epi32(in[3], shift);
+}
+
+// We only use stage-2 bit;
+// shift[0] is used in load_buffer_4x4()
+// shift[1] is used in txfm_func_col()
+// shift[2] is used in txfm_func_row()
+static void fdct4x4_sse4_1(__m128i *in, __m128i *out, int bit,
+ const int num_col) {
+ const int32_t *cospi = cospi_arr(bit);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+ const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+ const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+ __m128i s0, s1, s2, s3;
+ __m128i u0, u1, u2, u3;
+ __m128i v0, v1, v2, v3;
+
+ int endidx = 3 * num_col;
+ s0 = _mm_add_epi32(in[0], in[endidx]);
+ s3 = _mm_sub_epi32(in[0], in[endidx]);
+ endidx -= num_col;
+ s1 = _mm_add_epi32(in[num_col], in[endidx]);
+ s2 = _mm_sub_epi32(in[num_col], in[endidx]);
+
+ // btf_32_sse4_1_type0(cospi32, cospi32, s[01], u[02], bit);
+ u0 = _mm_mullo_epi32(s0, cospi32);
+ u1 = _mm_mullo_epi32(s1, cospi32);
+ u2 = _mm_add_epi32(u0, u1);
+ v0 = _mm_sub_epi32(u0, u1);
+
+ u3 = _mm_add_epi32(u2, rnding);
+ v1 = _mm_add_epi32(v0, rnding);
+
+ u0 = _mm_srai_epi32(u3, bit);
+ u2 = _mm_srai_epi32(v1, bit);
+
+ // btf_32_sse4_1_type1(cospi48, cospi16, s[23], u[13], bit);
+ v0 = _mm_mullo_epi32(s2, cospi48);
+ v1 = _mm_mullo_epi32(s3, cospi16);
+ v2 = _mm_add_epi32(v0, v1);
+
+ v3 = _mm_add_epi32(v2, rnding);
+ u1 = _mm_srai_epi32(v3, bit);
+
+ v0 = _mm_mullo_epi32(s2, cospi16);
+ v1 = _mm_mullo_epi32(s3, cospi48);
+ v2 = _mm_sub_epi32(v1, v0);
+
+ v3 = _mm_add_epi32(v2, rnding);
+ u3 = _mm_srai_epi32(v3, bit);
+
+ // Note: shift[1] and shift[2] are zeros
+
+ out[0] = u0;
+ out[1] = u1;
+ out[2] = u2;
+ out[3] = u3;
+}
+
+static INLINE void write_buffer_4x4(__m128i *res, int32_t *output) {
+ _mm_store_si128((__m128i *)(output + 0 * 4), res[0]);
+ _mm_store_si128((__m128i *)(output + 1 * 4), res[1]);
+ _mm_store_si128((__m128i *)(output + 2 * 4), res[2]);
+ _mm_store_si128((__m128i *)(output + 3 * 4), res[3]);
+}
+
+static void fadst4x4_sse4_1(__m128i *in, __m128i *out, int bit,
+ const int num_col) {
+ const int32_t *sinpi = sinpi_arr(bit);
+ const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+ const __m128i sinpi1 = _mm_set1_epi32((int)sinpi[1]);
+ const __m128i sinpi2 = _mm_set1_epi32((int)sinpi[2]);
+ const __m128i sinpi3 = _mm_set1_epi32((int)sinpi[3]);
+ const __m128i sinpi4 = _mm_set1_epi32((int)sinpi[4]);
+ __m128i t;
+ __m128i s0, s1, s2, s3, s4, s5, s6, s7;
+ __m128i x0, x1, x2, x3;
+ __m128i u0, u1, u2, u3;
+
+ int idx = 0 * num_col;
+ s0 = _mm_mullo_epi32(in[idx], sinpi1);
+ s1 = _mm_mullo_epi32(in[idx], sinpi4);
+ t = _mm_add_epi32(in[idx], in[idx + num_col]);
+ idx += num_col;
+ s2 = _mm_mullo_epi32(in[idx], sinpi2);
+ s3 = _mm_mullo_epi32(in[idx], sinpi1);
+ idx += num_col;
+ s4 = _mm_mullo_epi32(in[idx], sinpi3);
+ idx += num_col;
+ s5 = _mm_mullo_epi32(in[idx], sinpi4);
+ s6 = _mm_mullo_epi32(in[idx], sinpi2);
+ s7 = _mm_sub_epi32(t, in[idx]);
+
+ t = _mm_add_epi32(s0, s2);
+ x0 = _mm_add_epi32(t, s5);
+ x1 = _mm_mullo_epi32(s7, sinpi3);
+ t = _mm_sub_epi32(s1, s3);
+ x2 = _mm_add_epi32(t, s6);
+ x3 = s4;
+
+ s0 = _mm_add_epi32(x0, x3);
+ s1 = x1;
+ s2 = _mm_sub_epi32(x2, x3);
+ t = _mm_sub_epi32(x2, x0);
+ s3 = _mm_add_epi32(t, x3);
+
+ u0 = _mm_add_epi32(s0, rnding);
+ u0 = _mm_srai_epi32(u0, bit);
+
+ u1 = _mm_add_epi32(s1, rnding);
+ u1 = _mm_srai_epi32(u1, bit);
+
+ u2 = _mm_add_epi32(s2, rnding);
+ u2 = _mm_srai_epi32(u2, bit);
+
+ u3 = _mm_add_epi32(s3, rnding);
+ u3 = _mm_srai_epi32(u3, bit);
+
+ out[0] = u0;
+ out[1] = u1;
+ out[2] = u2;
+ out[3] = u3;
+}
+static void idtx4x4_sse4_1(__m128i *in, __m128i *out, int bit, int col_num) {
+ (void)bit;
+ __m128i fact = _mm_set1_epi32(NewSqrt2);
+ __m128i offset = _mm_set1_epi32(1 << (NewSqrt2Bits - 1));
+ __m128i a_low;
+
+ for (int i = 0; i < 4; i++) {
+ a_low = _mm_mullo_epi32(in[i * col_num], fact);
+ a_low = _mm_add_epi32(a_low, offset);
+ out[i] = _mm_srai_epi32(a_low, NewSqrt2Bits);
+ }
+}
+void av1_fwd_txfm2d_4x4_sse4_1(const int16_t *input, int32_t *coeff,
+ int input_stride, TX_TYPE tx_type, int bd) {
+ __m128i in[4];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X4];
+ const int txw_idx = get_txw_idx(TX_4X4);
+ const int txh_idx = get_txh_idx(TX_4X4);
+
+ switch (tx_type) {
+ case DCT_DCT:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ fdct4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fdct4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case ADST_DCT:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fdct4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case DCT_ADST:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ fdct4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case ADST_ADST:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case FLIPADST_DCT:
+ load_buffer_4x4(input, in, input_stride, 1, 0, shift[0]);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fdct4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case DCT_FLIPADST:
+ load_buffer_4x4(input, in, input_stride, 0, 1, shift[0]);
+ fdct4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case FLIPADST_FLIPADST:
+ load_buffer_4x4(input, in, input_stride, 1, 1, shift[0]);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case ADST_FLIPADST:
+ load_buffer_4x4(input, in, input_stride, 0, 1, shift[0]);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case FLIPADST_ADST:
+ load_buffer_4x4(input, in, input_stride, 1, 0, shift[0]);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case IDTX:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ idtx4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ idtx4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case V_DCT:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ fdct4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ idtx4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case H_DCT:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ idtx4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fdct4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case V_ADST:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ idtx4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case H_ADST:
+ load_buffer_4x4(input, in, input_stride, 0, 0, shift[0]);
+ idtx4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_col[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case V_FLIPADST:
+ load_buffer_4x4(input, in, input_stride, 1, 0, shift[0]);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ idtx4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ case H_FLIPADST:
+ load_buffer_4x4(input, in, input_stride, 0, 1, shift[0]);
+ idtx4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ transpose_32bit_4x4(in, in);
+ fadst4x4_sse4_1(in, in, av1_fwd_cos_bit_row[txw_idx][txh_idx], 1);
+ write_buffer_4x4(in, coeff);
+ break;
+ default: assert(0);
+ }
+ (void)bd;
+}
+
+static INLINE void load_buffer_8x8(const int16_t *input, __m128i *in,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ __m128i u;
+ if (!flipud) {
+ in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ in[4] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ in[5] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ in[6] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ in[7] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ } else {
+ in[0] = _mm_load_si128((const __m128i *)(input + 7 * stride));
+ in[1] = _mm_load_si128((const __m128i *)(input + 6 * stride));
+ in[2] = _mm_load_si128((const __m128i *)(input + 5 * stride));
+ in[3] = _mm_load_si128((const __m128i *)(input + 4 * stride));
+ in[4] = _mm_load_si128((const __m128i *)(input + 3 * stride));
+ in[5] = _mm_load_si128((const __m128i *)(input + 2 * stride));
+ in[6] = _mm_load_si128((const __m128i *)(input + 1 * stride));
+ in[7] = _mm_load_si128((const __m128i *)(input + 0 * stride));
+ }
+
+ if (fliplr) {
+ in[0] = mm_reverse_epi16(in[0]);
+ in[1] = mm_reverse_epi16(in[1]);
+ in[2] = mm_reverse_epi16(in[2]);
+ in[3] = mm_reverse_epi16(in[3]);
+ in[4] = mm_reverse_epi16(in[4]);
+ in[5] = mm_reverse_epi16(in[5]);
+ in[6] = mm_reverse_epi16(in[6]);
+ in[7] = mm_reverse_epi16(in[7]);
+ }
+
+ u = _mm_unpackhi_epi64(in[4], in[4]);
+ in[8] = _mm_cvtepi16_epi32(in[4]);
+ in[9] = _mm_cvtepi16_epi32(u);
+
+ u = _mm_unpackhi_epi64(in[5], in[5]);
+ in[10] = _mm_cvtepi16_epi32(in[5]);
+ in[11] = _mm_cvtepi16_epi32(u);
+
+ u = _mm_unpackhi_epi64(in[6], in[6]);
+ in[12] = _mm_cvtepi16_epi32(in[6]);
+ in[13] = _mm_cvtepi16_epi32(u);
+
+ u = _mm_unpackhi_epi64(in[7], in[7]);
+ in[14] = _mm_cvtepi16_epi32(in[7]);
+ in[15] = _mm_cvtepi16_epi32(u);
+
+ u = _mm_unpackhi_epi64(in[3], in[3]);
+ in[6] = _mm_cvtepi16_epi32(in[3]);
+ in[7] = _mm_cvtepi16_epi32(u);
+
+ u = _mm_unpackhi_epi64(in[2], in[2]);
+ in[4] = _mm_cvtepi16_epi32(in[2]);
+ in[5] = _mm_cvtepi16_epi32(u);
+
+ u = _mm_unpackhi_epi64(in[1], in[1]);
+ in[2] = _mm_cvtepi16_epi32(in[1]);
+ in[3] = _mm_cvtepi16_epi32(u);
+
+ u = _mm_unpackhi_epi64(in[0], in[0]);
+ in[0] = _mm_cvtepi16_epi32(in[0]);
+ in[1] = _mm_cvtepi16_epi32(u);
+
+ in[0] = _mm_slli_epi32(in[0], shift);
+ in[1] = _mm_slli_epi32(in[1], shift);
+ in[2] = _mm_slli_epi32(in[2], shift);
+ in[3] = _mm_slli_epi32(in[3], shift);
+ in[4] = _mm_slli_epi32(in[4], shift);
+ in[5] = _mm_slli_epi32(in[5], shift);
+ in[6] = _mm_slli_epi32(in[6], shift);
+ in[7] = _mm_slli_epi32(in[7], shift);
+
+ in[8] = _mm_slli_epi32(in[8], shift);
+ in[9] = _mm_slli_epi32(in[9], shift);
+ in[10] = _mm_slli_epi32(in[10], shift);
+ in[11] = _mm_slli_epi32(in[11], shift);
+ in[12] = _mm_slli_epi32(in[12], shift);
+ in[13] = _mm_slli_epi32(in[13], shift);
+ in[14] = _mm_slli_epi32(in[14], shift);
+ in[15] = _mm_slli_epi32(in[15], shift);
+}
+
+static INLINE void col_txfm_8x8_rounding(__m128i *in, int shift) {
+ const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
+
+ in[0] = _mm_add_epi32(in[0], rounding);
+ in[1] = _mm_add_epi32(in[1], rounding);
+ in[2] = _mm_add_epi32(in[2], rounding);
+ in[3] = _mm_add_epi32(in[3], rounding);
+ in[4] = _mm_add_epi32(in[4], rounding);
+ in[5] = _mm_add_epi32(in[5], rounding);
+ in[6] = _mm_add_epi32(in[6], rounding);
+ in[7] = _mm_add_epi32(in[7], rounding);
+ in[8] = _mm_add_epi32(in[8], rounding);
+ in[9] = _mm_add_epi32(in[9], rounding);
+ in[10] = _mm_add_epi32(in[10], rounding);
+ in[11] = _mm_add_epi32(in[11], rounding);
+ in[12] = _mm_add_epi32(in[12], rounding);
+ in[13] = _mm_add_epi32(in[13], rounding);
+ in[14] = _mm_add_epi32(in[14], rounding);
+ in[15] = _mm_add_epi32(in[15], rounding);
+
+ in[0] = _mm_srai_epi32(in[0], shift);
+ in[1] = _mm_srai_epi32(in[1], shift);
+ in[2] = _mm_srai_epi32(in[2], shift);
+ in[3] = _mm_srai_epi32(in[3], shift);
+ in[4] = _mm_srai_epi32(in[4], shift);
+ in[5] = _mm_srai_epi32(in[5], shift);
+ in[6] = _mm_srai_epi32(in[6], shift);
+ in[7] = _mm_srai_epi32(in[7], shift);
+ in[8] = _mm_srai_epi32(in[8], shift);
+ in[9] = _mm_srai_epi32(in[9], shift);
+ in[10] = _mm_srai_epi32(in[10], shift);
+ in[11] = _mm_srai_epi32(in[11], shift);
+ in[12] = _mm_srai_epi32(in[12], shift);
+ in[13] = _mm_srai_epi32(in[13], shift);
+ in[14] = _mm_srai_epi32(in[14], shift);
+ in[15] = _mm_srai_epi32(in[15], shift);
+}
+
+static INLINE void col_txfm_4x8_rounding(__m128i *in, int shift) {
+ const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
+
+ in[0] = _mm_add_epi32(in[0], rounding);
+ in[1] = _mm_add_epi32(in[1], rounding);
+ in[2] = _mm_add_epi32(in[2], rounding);
+ in[3] = _mm_add_epi32(in[3], rounding);
+ in[4] = _mm_add_epi32(in[4], rounding);
+ in[5] = _mm_add_epi32(in[5], rounding);
+ in[6] = _mm_add_epi32(in[6], rounding);
+ in[7] = _mm_add_epi32(in[7], rounding);
+
+ in[0] = _mm_srai_epi32(in[0], shift);
+ in[1] = _mm_srai_epi32(in[1], shift);
+ in[2] = _mm_srai_epi32(in[2], shift);
+ in[3] = _mm_srai_epi32(in[3], shift);
+ in[4] = _mm_srai_epi32(in[4], shift);
+ in[5] = _mm_srai_epi32(in[5], shift);
+ in[6] = _mm_srai_epi32(in[6], shift);
+ in[7] = _mm_srai_epi32(in[7], shift);
+}
+
+static INLINE void write_buffer_8x8(const __m128i *res, int32_t *output) {
+ _mm_store_si128((__m128i *)(output + 0 * 4), res[0]);
+ _mm_store_si128((__m128i *)(output + 1 * 4), res[1]);
+ _mm_store_si128((__m128i *)(output + 2 * 4), res[2]);
+ _mm_store_si128((__m128i *)(output + 3 * 4), res[3]);
+
+ _mm_store_si128((__m128i *)(output + 4 * 4), res[4]);
+ _mm_store_si128((__m128i *)(output + 5 * 4), res[5]);
+ _mm_store_si128((__m128i *)(output + 6 * 4), res[6]);
+ _mm_store_si128((__m128i *)(output + 7 * 4), res[7]);
+
+ _mm_store_si128((__m128i *)(output + 8 * 4), res[8]);
+ _mm_store_si128((__m128i *)(output + 9 * 4), res[9]);
+ _mm_store_si128((__m128i *)(output + 10 * 4), res[10]);
+ _mm_store_si128((__m128i *)(output + 11 * 4), res[11]);
+
+ _mm_store_si128((__m128i *)(output + 12 * 4), res[12]);
+ _mm_store_si128((__m128i *)(output + 13 * 4), res[13]);
+ _mm_store_si128((__m128i *)(output + 14 * 4), res[14]);
+ _mm_store_si128((__m128i *)(output + 15 * 4), res[15]);
+}
+
+static INLINE void write_buffer_16x8(const __m128i *res, int32_t *output,
+ const int stride) {
+ _mm_storeu_si128((__m128i *)(output), res[0]);
+ _mm_storeu_si128((__m128i *)(output + 4), res[1]);
+ _mm_storeu_si128((__m128i *)(output + stride), res[2]);
+ _mm_storeu_si128((__m128i *)(output + stride + 4), res[3]);
+
+ _mm_storeu_si128((__m128i *)(output + (stride * 2)), res[4]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 2) + 4), res[5]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 3)), res[6]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 3) + 4), res[7]);
+
+ _mm_storeu_si128((__m128i *)(output + (stride * 4)), res[8]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 4) + 4), res[9]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 5)), res[10]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 5) + 4), res[11]);
+
+ _mm_storeu_si128((__m128i *)(output + (stride * 6)), res[12]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 6) + 4), res[13]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 7)), res[14]);
+ _mm_storeu_si128((__m128i *)(output + (stride * 7) + 4), res[15]);
+}
+
+static void fdct4x8_sse4_1(__m128i *in, __m128i *out, int bit,
+ const int col_num) {
+ const int32_t *cospi = cospi_arr(bit);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+ const __m128i cospim32 = _mm_set1_epi32(-cospi[32]);
+ const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+ const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+ const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+ const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+ const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+ const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+ __m128i u[8], v[8];
+
+ int startidx = 0 * col_num;
+ int endidx = 7 * col_num;
+ // Even 8 points 0, 2, ..., 14
+ // stage 0
+ // stage 1
+ u[0] = _mm_add_epi32(in[startidx], in[endidx]);
+ v[7] = _mm_sub_epi32(in[startidx], in[endidx]); // v[7]
+ startidx += col_num;
+ endidx -= col_num;
+ u[1] = _mm_add_epi32(in[startidx], in[endidx]);
+ u[6] = _mm_sub_epi32(in[startidx], in[endidx]);
+ startidx += col_num;
+ endidx -= col_num;
+ u[2] = _mm_add_epi32(in[startidx], in[endidx]);
+ u[5] = _mm_sub_epi32(in[startidx], in[endidx]);
+ startidx += col_num;
+ endidx -= col_num;
+ u[3] = _mm_add_epi32(in[startidx], in[endidx]);
+ v[4] = _mm_sub_epi32(in[startidx], in[endidx]); // v[4]
+
+ // stage 2
+ v[0] = _mm_add_epi32(u[0], u[3]);
+ v[3] = _mm_sub_epi32(u[0], u[3]);
+ v[1] = _mm_add_epi32(u[1], u[2]);
+ v[2] = _mm_sub_epi32(u[1], u[2]);
+
+ v[5] = _mm_mullo_epi32(u[5], cospim32);
+ v[6] = _mm_mullo_epi32(u[6], cospi32);
+ v[5] = _mm_add_epi32(v[5], v[6]);
+ v[5] = _mm_add_epi32(v[5], rnding);
+ v[5] = _mm_srai_epi32(v[5], bit);
+
+ u[0] = _mm_mullo_epi32(u[5], cospi32);
+ v[6] = _mm_mullo_epi32(u[6], cospim32);
+ v[6] = _mm_sub_epi32(u[0], v[6]);
+ v[6] = _mm_add_epi32(v[6], rnding);
+ v[6] = _mm_srai_epi32(v[6], bit);
+
+ // stage 3
+ // type 0
+ v[0] = _mm_mullo_epi32(v[0], cospi32);
+ v[1] = _mm_mullo_epi32(v[1], cospi32);
+ u[0] = _mm_add_epi32(v[0], v[1]);
+ u[0] = _mm_add_epi32(u[0], rnding);
+ u[0] = _mm_srai_epi32(u[0], bit);
+
+ u[1] = _mm_sub_epi32(v[0], v[1]);
+ u[1] = _mm_add_epi32(u[1], rnding);
+ u[1] = _mm_srai_epi32(u[1], bit);
+
+ // type 1
+ v[0] = _mm_mullo_epi32(v[2], cospi48);
+ v[1] = _mm_mullo_epi32(v[3], cospi16);
+ u[2] = _mm_add_epi32(v[0], v[1]);
+ u[2] = _mm_add_epi32(u[2], rnding);
+ u[2] = _mm_srai_epi32(u[2], bit);
+
+ v[0] = _mm_mullo_epi32(v[2], cospi16);
+ v[1] = _mm_mullo_epi32(v[3], cospi48);
+ u[3] = _mm_sub_epi32(v[1], v[0]);
+ u[3] = _mm_add_epi32(u[3], rnding);
+ u[3] = _mm_srai_epi32(u[3], bit);
+
+ u[4] = _mm_add_epi32(v[4], v[5]);
+ u[5] = _mm_sub_epi32(v[4], v[5]);
+ u[6] = _mm_sub_epi32(v[7], v[6]);
+ u[7] = _mm_add_epi32(v[7], v[6]);
+
+ // stage 4
+ // stage 5
+ v[0] = _mm_mullo_epi32(u[4], cospi56);
+ v[1] = _mm_mullo_epi32(u[7], cospi8);
+ v[0] = _mm_add_epi32(v[0], v[1]);
+ v[0] = _mm_add_epi32(v[0], rnding);
+ out[1 * col_num] = _mm_srai_epi32(v[0], bit); // buf0[4]
+
+ v[0] = _mm_mullo_epi32(u[4], cospi8);
+ v[1] = _mm_mullo_epi32(u[7], cospi56);
+ v[0] = _mm_sub_epi32(v[1], v[0]);
+ v[0] = _mm_add_epi32(v[0], rnding);
+ out[7 * col_num] = _mm_srai_epi32(v[0], bit); // buf0[7]
+
+ v[0] = _mm_mullo_epi32(u[5], cospi24);
+ v[1] = _mm_mullo_epi32(u[6], cospi40);
+ v[0] = _mm_add_epi32(v[0], v[1]);
+ v[0] = _mm_add_epi32(v[0], rnding);
+ out[5 * col_num] = _mm_srai_epi32(v[0], bit); // buf0[5]
+
+ v[0] = _mm_mullo_epi32(u[5], cospi40);
+ v[1] = _mm_mullo_epi32(u[6], cospi24);
+ v[0] = _mm_sub_epi32(v[1], v[0]);
+ v[0] = _mm_add_epi32(v[0], rnding);
+ out[3 * col_num] = _mm_srai_epi32(v[0], bit); // buf0[6]
+
+ out[0 * col_num] = u[0]; // buf0[0]
+ out[4 * col_num] = u[1]; // buf0[1]
+ out[2 * col_num] = u[2]; // buf0[2]
+ out[6 * col_num] = u[3]; // buf0[3]
+}
+
+static void fdct8x8_sse4_1(__m128i *in, __m128i *out, int bit,
+ const int col_num) {
+ fdct4x8_sse4_1(in, out, bit, col_num);
+ fdct4x8_sse4_1(in + 1, out + 1, bit, col_num);
+}
+
+static void fadst8x8_sse4_1(__m128i *in, __m128i *out, int bit,
+ const int col_num) {
+ const int32_t *cospi = cospi_arr(bit);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+ const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
+ const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+ const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
+ const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
+ const __m128i cospim4 = _mm_set1_epi32(-cospi[4]);
+ const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
+ const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
+ const __m128i cospim20 = _mm_set1_epi32(-cospi[20]);
+ const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
+ const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
+ const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
+ const __m128i cospim36 = _mm_set1_epi32(-cospi[36]);
+ const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
+ const __m128i cospim52 = _mm_set1_epi32(-cospi[52]);
+ const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
+ const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+ const __m128i zero = _mm_setzero_si128();
+ __m128i u0, u1, u2, u3, u4, u5, u6, u7;
+ __m128i v0, v1, v2, v3, v4, v5, v6, v7;
+ __m128i x, y;
+ int col;
+
+ // Note:
+ // Even column: 0, 2, ..., 14
+ // Odd column: 1, 3, ..., 15
+ // one even column plus one odd column constructs one row (8 coeffs)
+ // total we have 8 rows (8x8).
+ for (col = 0; col < col_num; ++col) {
+ // stage 0
+ // stage 1
+ u0 = in[col_num * 0 + col];
+ u1 = _mm_sub_epi32(zero, in[col_num * 7 + col]);
+ u2 = _mm_sub_epi32(zero, in[col_num * 3 + col]);
+ u3 = in[col_num * 4 + col];
+ u4 = _mm_sub_epi32(zero, in[col_num * 1 + col]);
+ u5 = in[col_num * 6 + col];
+ u6 = in[col_num * 2 + col];
+ u7 = _mm_sub_epi32(zero, in[col_num * 5 + col]);
+
+ // stage 2
+ v0 = u0;
+ v1 = u1;
+
+ x = _mm_mullo_epi32(u2, cospi32);
+ y = _mm_mullo_epi32(u3, cospi32);
+ v2 = _mm_add_epi32(x, y);
+ v2 = _mm_add_epi32(v2, rnding);
+ v2 = _mm_srai_epi32(v2, bit);
+
+ v3 = _mm_sub_epi32(x, y);
+ v3 = _mm_add_epi32(v3, rnding);
+ v3 = _mm_srai_epi32(v3, bit);
+
+ v4 = u4;
+ v5 = u5;
+
+ x = _mm_mullo_epi32(u6, cospi32);
+ y = _mm_mullo_epi32(u7, cospi32);
+ v6 = _mm_add_epi32(x, y);
+ v6 = _mm_add_epi32(v6, rnding);
+ v6 = _mm_srai_epi32(v6, bit);
+
+ v7 = _mm_sub_epi32(x, y);
+ v7 = _mm_add_epi32(v7, rnding);
+ v7 = _mm_srai_epi32(v7, bit);
+
+ // stage 3
+ u0 = _mm_add_epi32(v0, v2);
+ u1 = _mm_add_epi32(v1, v3);
+ u2 = _mm_sub_epi32(v0, v2);
+ u3 = _mm_sub_epi32(v1, v3);
+ u4 = _mm_add_epi32(v4, v6);
+ u5 = _mm_add_epi32(v5, v7);
+ u6 = _mm_sub_epi32(v4, v6);
+ u7 = _mm_sub_epi32(v5, v7);
+
+ // stage 4
+ v0 = u0;
+ v1 = u1;
+ v2 = u2;
+ v3 = u3;
+
+ x = _mm_mullo_epi32(u4, cospi16);
+ y = _mm_mullo_epi32(u5, cospi48);
+ v4 = _mm_add_epi32(x, y);
+ v4 = _mm_add_epi32(v4, rnding);
+ v4 = _mm_srai_epi32(v4, bit);
+
+ x = _mm_mullo_epi32(u4, cospi48);
+ y = _mm_mullo_epi32(u5, cospim16);
+ v5 = _mm_add_epi32(x, y);
+ v5 = _mm_add_epi32(v5, rnding);
+ v5 = _mm_srai_epi32(v5, bit);
+
+ x = _mm_mullo_epi32(u6, cospim48);
+ y = _mm_mullo_epi32(u7, cospi16);
+ v6 = _mm_add_epi32(x, y);
+ v6 = _mm_add_epi32(v6, rnding);
+ v6 = _mm_srai_epi32(v6, bit);
+
+ x = _mm_mullo_epi32(u6, cospi16);
+ y = _mm_mullo_epi32(u7, cospi48);
+ v7 = _mm_add_epi32(x, y);
+ v7 = _mm_add_epi32(v7, rnding);
+ v7 = _mm_srai_epi32(v7, bit);
+
+ // stage 5
+ u0 = _mm_add_epi32(v0, v4);
+ u1 = _mm_add_epi32(v1, v5);
+ u2 = _mm_add_epi32(v2, v6);
+ u3 = _mm_add_epi32(v3, v7);
+ u4 = _mm_sub_epi32(v0, v4);
+ u5 = _mm_sub_epi32(v1, v5);
+ u6 = _mm_sub_epi32(v2, v6);
+ u7 = _mm_sub_epi32(v3, v7);
+
+ // stage 6
+ x = _mm_mullo_epi32(u0, cospi4);
+ y = _mm_mullo_epi32(u1, cospi60);
+ v0 = _mm_add_epi32(x, y);
+ v0 = _mm_add_epi32(v0, rnding);
+ v0 = _mm_srai_epi32(v0, bit);
+
+ x = _mm_mullo_epi32(u0, cospi60);
+ y = _mm_mullo_epi32(u1, cospim4);
+ v1 = _mm_add_epi32(x, y);
+ v1 = _mm_add_epi32(v1, rnding);
+ v1 = _mm_srai_epi32(v1, bit);
+
+ x = _mm_mullo_epi32(u2, cospi20);
+ y = _mm_mullo_epi32(u3, cospi44);
+ v2 = _mm_add_epi32(x, y);
+ v2 = _mm_add_epi32(v2, rnding);
+ v2 = _mm_srai_epi32(v2, bit);
+
+ x = _mm_mullo_epi32(u2, cospi44);
+ y = _mm_mullo_epi32(u3, cospim20);
+ v3 = _mm_add_epi32(x, y);
+ v3 = _mm_add_epi32(v3, rnding);
+ v3 = _mm_srai_epi32(v3, bit);
+
+ x = _mm_mullo_epi32(u4, cospi36);
+ y = _mm_mullo_epi32(u5, cospi28);
+ v4 = _mm_add_epi32(x, y);
+ v4 = _mm_add_epi32(v4, rnding);
+ v4 = _mm_srai_epi32(v4, bit);
+
+ x = _mm_mullo_epi32(u4, cospi28);
+ y = _mm_mullo_epi32(u5, cospim36);
+ v5 = _mm_add_epi32(x, y);
+ v5 = _mm_add_epi32(v5, rnding);
+ v5 = _mm_srai_epi32(v5, bit);
+
+ x = _mm_mullo_epi32(u6, cospi52);
+ y = _mm_mullo_epi32(u7, cospi12);
+ v6 = _mm_add_epi32(x, y);
+ v6 = _mm_add_epi32(v6, rnding);
+ v6 = _mm_srai_epi32(v6, bit);
+
+ x = _mm_mullo_epi32(u6, cospi12);
+ y = _mm_mullo_epi32(u7, cospim52);
+ v7 = _mm_add_epi32(x, y);
+ v7 = _mm_add_epi32(v7, rnding);
+ v7 = _mm_srai_epi32(v7, bit);
+
+ // stage 7
+ out[col_num * 0 + col] = v1;
+ out[col_num * 1 + col] = v6;
+ out[col_num * 2 + col] = v3;
+ out[col_num * 3 + col] = v4;
+ out[col_num * 4 + col] = v5;
+ out[col_num * 5 + col] = v2;
+ out[col_num * 6 + col] = v7;
+ out[col_num * 7 + col] = v0;
+ }
+}
+static void idtx8x8_sse4_1(__m128i *in, __m128i *out, int bit, int col_num) {
+ (void)bit;
+
+ for (int i = 0; i < col_num; i += 1) {
+ out[0 + 8 * i] = _mm_add_epi32(in[0 + 8 * i], in[0 + 8 * i]);
+ out[1 + 8 * i] = _mm_add_epi32(in[1 + 8 * i], in[1 + 8 * i]);
+ out[2 + 8 * i] = _mm_add_epi32(in[2 + 8 * i], in[2 + 8 * i]);
+ out[3 + 8 * i] = _mm_add_epi32(in[3 + 8 * i], in[3 + 8 * i]);
+ out[4 + 8 * i] = _mm_add_epi32(in[4 + 8 * i], in[4 + 8 * i]);
+ out[5 + 8 * i] = _mm_add_epi32(in[5 + 8 * i], in[5 + 8 * i]);
+ out[6 + 8 * i] = _mm_add_epi32(in[6 + 8 * i], in[6 + 8 * i]);
+ out[7 + 8 * i] = _mm_add_epi32(in[7 + 8 * i], in[7 + 8 * i]);
+ }
+}
+#if !CONFIG_REALTIME_ONLY
+static void idtx32x8_sse4_1(__m128i *in, __m128i *out, int bit, int col_num) {
+ (void)bit;
+ (void)col_num;
+ for (int j = 0; j < 2; j++) {
+ out[j + 8 * 0] = _mm_add_epi32(in[j + 8 * 0], in[j + 8 * 0]);
+ out[j + 8 * 1] = _mm_add_epi32(in[j + 8 * 1], in[j + 8 * 1]);
+ out[j + 8 * 2] = _mm_add_epi32(in[j + 8 * 2], in[j + 8 * 2]);
+ out[j + 8 * 3] = _mm_add_epi32(in[j + 8 * 3], in[j + 8 * 3]);
+ out[j + 8 * 4] = _mm_add_epi32(in[j + 8 * 4], in[j + 8 * 4]);
+ out[j + 8 * 5] = _mm_add_epi32(in[j + 8 * 5], in[j + 8 * 5]);
+ out[j + 8 * 6] = _mm_add_epi32(in[j + 8 * 6], in[j + 8 * 6]);
+ out[j + 8 * 7] = _mm_add_epi32(in[j + 8 * 7], in[j + 8 * 7]);
+ }
+}
+#endif
+void av1_fwd_txfm2d_8x8_sse4_1(const int16_t *input, int32_t *coeff, int stride,
+ TX_TYPE tx_type, int bd) {
+ __m128i in[16], out[16];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X8];
+ const int txw_idx = get_txw_idx(TX_8X8);
+ const int txh_idx = get_txh_idx(TX_8X8);
+
+ switch (tx_type) {
+ case DCT_DCT:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ fdct8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fdct8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case ADST_DCT:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fdct8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case DCT_ADST:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ fdct8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case ADST_ADST:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case FLIPADST_DCT:
+ load_buffer_8x8(input, in, stride, 1, 0, shift[0]);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fdct8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case DCT_FLIPADST:
+ load_buffer_8x8(input, in, stride, 0, 1, shift[0]);
+ fdct8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case FLIPADST_FLIPADST:
+ load_buffer_8x8(input, in, stride, 1, 1, shift[0]);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case ADST_FLIPADST:
+ load_buffer_8x8(input, in, stride, 0, 1, shift[0]);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case FLIPADST_ADST:
+ load_buffer_8x8(input, in, stride, 1, 0, shift[0]);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case IDTX:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ idtx8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ idtx8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case V_DCT:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ fdct8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ idtx8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case H_DCT:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ idtx8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fdct8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case V_ADST:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ idtx8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case H_ADST:
+ load_buffer_8x8(input, in, stride, 0, 0, shift[0]);
+ idtx8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case V_FLIPADST:
+ load_buffer_8x8(input, in, stride, 1, 0, shift[0]);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ idtx8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ case H_FLIPADST:
+ load_buffer_8x8(input, in, stride, 0, 1, shift[0]);
+ idtx8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ col_txfm_8x8_rounding(out, -shift[1]);
+ transpose_8x8(out, in);
+ fadst8x8_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], 2);
+ write_buffer_8x8(out, coeff);
+ break;
+ default: assert(0);
+ }
+ (void)bd;
+}
+
+// Hybrid Transform 16x16
+
+static INLINE void convert_8x8_to_16x16(const __m128i *in, __m128i *out) {
+ int row_index = 0;
+ int dst_index = 0;
+ int src_index = 0;
+
+ // row 0, 1, .., 7
+ do {
+ out[dst_index] = in[src_index];
+ out[dst_index + 1] = in[src_index + 1];
+ out[dst_index + 2] = in[src_index + 16];
+ out[dst_index + 3] = in[src_index + 17];
+ dst_index += 4;
+ src_index += 2;
+ row_index += 1;
+ } while (row_index < 8);
+
+ // row 8, 9, ..., 15
+ src_index += 16;
+ do {
+ out[dst_index] = in[src_index];
+ out[dst_index + 1] = in[src_index + 1];
+ out[dst_index + 2] = in[src_index + 16];
+ out[dst_index + 3] = in[src_index + 17];
+ dst_index += 4;
+ src_index += 2;
+ row_index += 1;
+ } while (row_index < 16);
+}
+
+static INLINE void load_buffer_16x16(const int16_t *input, __m128i *out,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ __m128i in[64];
+ // Load 4 8x8 blocks
+ const int16_t *topL = input;
+ const int16_t *topR = input + 8;
+ const int16_t *botL = input + 8 * stride;
+ const int16_t *botR = input + 8 * stride + 8;
+
+ const int16_t *tmp;
+
+ if (flipud) {
+ // Swap left columns
+ tmp = topL;
+ topL = botL;
+ botL = tmp;
+ // Swap right columns
+ tmp = topR;
+ topR = botR;
+ botR = tmp;
+ }
+
+ if (fliplr) {
+ // Swap top rows
+ tmp = topL;
+ topL = topR;
+ topR = tmp;
+ // Swap bottom rows
+ tmp = botL;
+ botL = botR;
+ botR = tmp;
+ }
+
+ // load first 8 columns
+ load_buffer_8x8(topL, &in[0], stride, flipud, fliplr, shift);
+ load_buffer_8x8(botL, &in[32], stride, flipud, fliplr, shift);
+
+ // load second 8 columns
+ load_buffer_8x8(topR, &in[16], stride, flipud, fliplr, shift);
+ load_buffer_8x8(botR, &in[48], stride, flipud, fliplr, shift);
+
+ convert_8x8_to_16x16(in, out);
+}
+
+static INLINE void load_buffer_8x16(const int16_t *input, __m128i *out,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ const int16_t *topL = input;
+ const int16_t *botL = input + 8 * stride;
+
+ const int16_t *tmp;
+
+ if (flipud) {
+ tmp = topL;
+ topL = botL;
+ botL = tmp;
+ }
+
+ load_buffer_8x8(topL, out, stride, flipud, fliplr, shift);
+ load_buffer_8x8(botL, out + 16, stride, flipud, fliplr, shift);
+}
+
+static INLINE void load_buffer_8x4(const int16_t *input, __m128i *out,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ const int16_t *topL = input;
+ const int16_t *topR = input + 4;
+
+ const int16_t *tmp;
+
+ if (fliplr) {
+ tmp = topL;
+ topL = topR;
+ topR = tmp;
+ }
+
+ load_buffer_4x4(topL, out, stride, flipud, fliplr, shift);
+ load_buffer_4x4(topR, out + 4, stride, flipud, fliplr, shift);
+}
+
+static INLINE void load_buffer_16x4(const int16_t *input, __m128i *out,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ const int16_t *topL = input;
+ const int16_t *topR = input + 8;
+
+ const int16_t *tmp;
+
+ if (fliplr) {
+ tmp = topL;
+ topL = topR;
+ topR = tmp;
+ }
+
+ load_buffer_8x4(topL, out, stride, flipud, fliplr, shift);
+ load_buffer_8x4(topR, out + 8, stride, flipud, fliplr, shift);
+}
+
+static INLINE void load_buffer_4x8(const int16_t *input, __m128i *out,
+ int stride, int flipud, int fliplr,
+ int shift) {
+ const int16_t *topL = input;
+ const int16_t *botL = input + 4 * stride;
+
+ const int16_t *tmp;
+
+ if (flipud) {
+ tmp = topL;
+ topL = botL;
+ botL = tmp;
+ }
+
+ load_buffer_4x4(topL, out, stride, flipud, fliplr, shift);
+ load_buffer_4x4(botL, out + 4, stride, flipud, fliplr, shift);
+}
+
+#if !CONFIG_REALTIME_ONLY
+static INLINE void load_buffer_4x16(const int16_t *input, __m128i *out,
+ const int stride, const int flipud,
+ const int fliplr, const int shift) {
+ const int16_t *topL = input;
+ const int16_t *botL = input + 8 * stride;
+
+ const int16_t *tmp;
+
+ if (flipud) {
+ tmp = topL;
+ topL = botL;
+ botL = tmp;
+ }
+ load_buffer_4x8(topL, out, stride, flipud, fliplr, shift);
+ load_buffer_4x8(botL, out + 8, stride, flipud, fliplr, shift);
+}
+#endif
+
+static INLINE void load_buffer_32x8n(const int16_t *input, __m128i *out,
+ int stride, int flipud, int fliplr,
+ int shift, const int height) {
+ const int16_t *in = input;
+ __m128i *output = out;
+ for (int col = 0; col < height; col++) {
+ in = input + col * stride;
+ output = out + col * 8;
+ load_buffer_4x4(in, output, 4, flipud, fliplr, shift);
+ load_buffer_4x4((in + 16), (output + 4), 4, flipud, fliplr, shift);
+ }
+}
+
+static void fdct16x16_sse4_1(__m128i *in, __m128i *out, int bit,
+ const int col_num) {
+ const int32_t *cospi = cospi_arr(bit);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+ const __m128i cospim32 = _mm_set1_epi32(-cospi[32]);
+ const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+ const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
+ const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
+ const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+ const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+ const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+ const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+ const __m128i cospi60 = _mm_set1_epi32(cospi[60]);
+ const __m128i cospi4 = _mm_set1_epi32(cospi[4]);
+ const __m128i cospi28 = _mm_set1_epi32(cospi[28]);
+ const __m128i cospi36 = _mm_set1_epi32(cospi[36]);
+ const __m128i cospi44 = _mm_set1_epi32(cospi[44]);
+ const __m128i cospi20 = _mm_set1_epi32(cospi[20]);
+ const __m128i cospi12 = _mm_set1_epi32(cospi[12]);
+ const __m128i cospi52 = _mm_set1_epi32(cospi[52]);
+ const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+ __m128i u[16], v[16], x;
+ int col;
+
+ // Calculate the column 0, 1, 2, 3
+ for (col = 0; col < col_num; ++col) {
+ // stage 0
+ // stage 1
+ u[0] = _mm_add_epi32(in[0 * col_num + col], in[15 * col_num + col]);
+ u[15] = _mm_sub_epi32(in[0 * col_num + col], in[15 * col_num + col]);
+ u[1] = _mm_add_epi32(in[1 * col_num + col], in[14 * col_num + col]);
+ u[14] = _mm_sub_epi32(in[1 * col_num + col], in[14 * col_num + col]);
+ u[2] = _mm_add_epi32(in[2 * col_num + col], in[13 * col_num + col]);
+ u[13] = _mm_sub_epi32(in[2 * col_num + col], in[13 * col_num + col]);
+ u[3] = _mm_add_epi32(in[3 * col_num + col], in[12 * col_num + col]);
+ u[12] = _mm_sub_epi32(in[3 * col_num + col], in[12 * col_num + col]);
+ u[4] = _mm_add_epi32(in[4 * col_num + col], in[11 * col_num + col]);
+ u[11] = _mm_sub_epi32(in[4 * col_num + col], in[11 * col_num + col]);
+ u[5] = _mm_add_epi32(in[5 * col_num + col], in[10 * col_num + col]);
+ u[10] = _mm_sub_epi32(in[5 * col_num + col], in[10 * col_num + col]);
+ u[6] = _mm_add_epi32(in[6 * col_num + col], in[9 * col_num + col]);
+ u[9] = _mm_sub_epi32(in[6 * col_num + col], in[9 * col_num + col]);
+ u[7] = _mm_add_epi32(in[7 * col_num + col], in[8 * col_num + col]);
+ u[8] = _mm_sub_epi32(in[7 * col_num + col], in[8 * col_num + col]);
+
+ // stage 2
+ v[0] = _mm_add_epi32(u[0], u[7]);
+ v[7] = _mm_sub_epi32(u[0], u[7]);
+ v[1] = _mm_add_epi32(u[1], u[6]);
+ v[6] = _mm_sub_epi32(u[1], u[6]);
+ v[2] = _mm_add_epi32(u[2], u[5]);
+ v[5] = _mm_sub_epi32(u[2], u[5]);
+ v[3] = _mm_add_epi32(u[3], u[4]);
+ v[4] = _mm_sub_epi32(u[3], u[4]);
+ v[8] = u[8];
+ v[9] = u[9];
+
+ v[10] = _mm_mullo_epi32(u[10], cospim32);
+ x = _mm_mullo_epi32(u[13], cospi32);
+ v[10] = _mm_add_epi32(v[10], x);
+ v[10] = _mm_add_epi32(v[10], rnding);
+ v[10] = _mm_srai_epi32(v[10], bit);
+
+ v[13] = _mm_mullo_epi32(u[10], cospi32);
+ x = _mm_mullo_epi32(u[13], cospim32);
+ v[13] = _mm_sub_epi32(v[13], x);
+ v[13] = _mm_add_epi32(v[13], rnding);
+ v[13] = _mm_srai_epi32(v[13], bit);
+
+ v[11] = _mm_mullo_epi32(u[11], cospim32);
+ x = _mm_mullo_epi32(u[12], cospi32);
+ v[11] = _mm_add_epi32(v[11], x);
+ v[11] = _mm_add_epi32(v[11], rnding);
+ v[11] = _mm_srai_epi32(v[11], bit);
+
+ v[12] = _mm_mullo_epi32(u[11], cospi32);
+ x = _mm_mullo_epi32(u[12], cospim32);
+ v[12] = _mm_sub_epi32(v[12], x);
+ v[12] = _mm_add_epi32(v[12], rnding);
+ v[12] = _mm_srai_epi32(v[12], bit);
+ v[14] = u[14];
+ v[15] = u[15];
+
+ // stage 3
+ u[0] = _mm_add_epi32(v[0], v[3]);
+ u[3] = _mm_sub_epi32(v[0], v[3]);
+ u[1] = _mm_add_epi32(v[1], v[2]);
+ u[2] = _mm_sub_epi32(v[1], v[2]);
+ u[4] = v[4];
+
+ u[5] = _mm_mullo_epi32(v[5], cospim32);
+ x = _mm_mullo_epi32(v[6], cospi32);
+ u[5] = _mm_add_epi32(u[5], x);
+ u[5] = _mm_add_epi32(u[5], rnding);
+ u[5] = _mm_srai_epi32(u[5], bit);
+
+ u[6] = _mm_mullo_epi32(v[5], cospi32);
+ x = _mm_mullo_epi32(v[6], cospim32);
+ u[6] = _mm_sub_epi32(u[6], x);
+ u[6] = _mm_add_epi32(u[6], rnding);
+ u[6] = _mm_srai_epi32(u[6], bit);
+
+ u[7] = v[7];
+ u[8] = _mm_add_epi32(v[8], v[11]);
+ u[11] = _mm_sub_epi32(v[8], v[11]);
+ u[9] = _mm_add_epi32(v[9], v[10]);
+ u[10] = _mm_sub_epi32(v[9], v[10]);
+ u[12] = _mm_sub_epi32(v[15], v[12]);
+ u[15] = _mm_add_epi32(v[15], v[12]);
+ u[13] = _mm_sub_epi32(v[14], v[13]);
+ u[14] = _mm_add_epi32(v[14], v[13]);
+
+ // stage 4
+ u[0] = _mm_mullo_epi32(u[0], cospi32);
+ u[1] = _mm_mullo_epi32(u[1], cospi32);
+ v[0] = _mm_add_epi32(u[0], u[1]);
+ v[0] = _mm_add_epi32(v[0], rnding);
+ v[0] = _mm_srai_epi32(v[0], bit);
+
+ v[1] = _mm_sub_epi32(u[0], u[1]);
+ v[1] = _mm_add_epi32(v[1], rnding);
+ v[1] = _mm_srai_epi32(v[1], bit);
+
+ v[2] = _mm_mullo_epi32(u[2], cospi48);
+ x = _mm_mullo_epi32(u[3], cospi16);
+ v[2] = _mm_add_epi32(v[2], x);
+ v[2] = _mm_add_epi32(v[2], rnding);
+ v[2] = _mm_srai_epi32(v[2], bit);
+
+ v[3] = _mm_mullo_epi32(u[2], cospi16);
+ x = _mm_mullo_epi32(u[3], cospi48);
+ v[3] = _mm_sub_epi32(x, v[3]);
+ v[3] = _mm_add_epi32(v[3], rnding);
+ v[3] = _mm_srai_epi32(v[3], bit);
+
+ v[4] = _mm_add_epi32(u[4], u[5]);
+ v[5] = _mm_sub_epi32(u[4], u[5]);
+ v[6] = _mm_sub_epi32(u[7], u[6]);
+ v[7] = _mm_add_epi32(u[7], u[6]);
+ v[8] = u[8];
+
+ v[9] = _mm_mullo_epi32(u[9], cospim16);
+ x = _mm_mullo_epi32(u[14], cospi48);
+ v[9] = _mm_add_epi32(v[9], x);
+ v[9] = _mm_add_epi32(v[9], rnding);
+ v[9] = _mm_srai_epi32(v[9], bit);
+
+ v[14] = _mm_mullo_epi32(u[9], cospi48);
+ x = _mm_mullo_epi32(u[14], cospim16);
+ v[14] = _mm_sub_epi32(v[14], x);
+ v[14] = _mm_add_epi32(v[14], rnding);
+ v[14] = _mm_srai_epi32(v[14], bit);
+
+ v[10] = _mm_mullo_epi32(u[10], cospim48);
+ x = _mm_mullo_epi32(u[13], cospim16);
+ v[10] = _mm_add_epi32(v[10], x);
+ v[10] = _mm_add_epi32(v[10], rnding);
+ v[10] = _mm_srai_epi32(v[10], bit);
+
+ v[13] = _mm_mullo_epi32(u[10], cospim16);
+ x = _mm_mullo_epi32(u[13], cospim48);
+ v[13] = _mm_sub_epi32(v[13], x);
+ v[13] = _mm_add_epi32(v[13], rnding);
+ v[13] = _mm_srai_epi32(v[13], bit);
+
+ v[11] = u[11];
+ v[12] = u[12];
+ v[15] = u[15];
+
+ // stage 5
+ u[0] = v[0];
+ u[1] = v[1];
+ u[2] = v[2];
+ u[3] = v[3];
+
+ u[4] = _mm_mullo_epi32(v[4], cospi56);
+ x = _mm_mullo_epi32(v[7], cospi8);
+ u[4] = _mm_add_epi32(u[4], x);
+ u[4] = _mm_add_epi32(u[4], rnding);
+ u[4] = _mm_srai_epi32(u[4], bit);
+
+ u[7] = _mm_mullo_epi32(v[4], cospi8);
+ x = _mm_mullo_epi32(v[7], cospi56);
+ u[7] = _mm_sub_epi32(x, u[7]);
+ u[7] = _mm_add_epi32(u[7], rnding);
+ u[7] = _mm_srai_epi32(u[7], bit);
+
+ u[5] = _mm_mullo_epi32(v[5], cospi24);
+ x = _mm_mullo_epi32(v[6], cospi40);
+ u[5] = _mm_add_epi32(u[5], x);
+ u[5] = _mm_add_epi32(u[5], rnding);
+ u[5] = _mm_srai_epi32(u[5], bit);
+
+ u[6] = _mm_mullo_epi32(v[5], cospi40);
+ x = _mm_mullo_epi32(v[6], cospi24);
+ u[6] = _mm_sub_epi32(x, u[6]);
+ u[6] = _mm_add_epi32(u[6], rnding);
+ u[6] = _mm_srai_epi32(u[6], bit);
+
+ u[8] = _mm_add_epi32(v[8], v[9]);
+ u[9] = _mm_sub_epi32(v[8], v[9]);
+ u[10] = _mm_sub_epi32(v[11], v[10]);
+ u[11] = _mm_add_epi32(v[11], v[10]);
+ u[12] = _mm_add_epi32(v[12], v[13]);
+ u[13] = _mm_sub_epi32(v[12], v[13]);
+ u[14] = _mm_sub_epi32(v[15], v[14]);
+ u[15] = _mm_add_epi32(v[15], v[14]);
+
+ // stage 6
+ v[0] = u[0];
+ v[1] = u[1];
+ v[2] = u[2];
+ v[3] = u[3];
+ v[4] = u[4];
+ v[5] = u[5];
+ v[6] = u[6];
+ v[7] = u[7];
+
+ v[8] = _mm_mullo_epi32(u[8], cospi60);
+ x = _mm_mullo_epi32(u[15], cospi4);
+ v[8] = _mm_add_epi32(v[8], x);
+ v[8] = _mm_add_epi32(v[8], rnding);
+ v[8] = _mm_srai_epi32(v[8], bit);
+
+ v[15] = _mm_mullo_epi32(u[8], cospi4);
+ x = _mm_mullo_epi32(u[15], cospi60);
+ v[15] = _mm_sub_epi32(x, v[15]);
+ v[15] = _mm_add_epi32(v[15], rnding);
+ v[15] = _mm_srai_epi32(v[15], bit);
+
+ v[9] = _mm_mullo_epi32(u[9], cospi28);
+ x = _mm_mullo_epi32(u[14], cospi36);
+ v[9] = _mm_add_epi32(v[9], x);
+ v[9] = _mm_add_epi32(v[9], rnding);
+ v[9] = _mm_srai_epi32(v[9], bit);
+
+ v[14] = _mm_mullo_epi32(u[9], cospi36);
+ x = _mm_mullo_epi32(u[14], cospi28);
+ v[14] = _mm_sub_epi32(x, v[14]);
+ v[14] = _mm_add_epi32(v[14], rnding);
+ v[14] = _mm_srai_epi32(v[14], bit);
+
+ v[10] = _mm_mullo_epi32(u[10], cospi44);
+ x = _mm_mullo_epi32(u[13], cospi20);
+ v[10] = _mm_add_epi32(v[10], x);
+ v[10] = _mm_add_epi32(v[10], rnding);
+ v[10] = _mm_srai_epi32(v[10], bit);
+
+ v[13] = _mm_mullo_epi32(u[10], cospi20);
+ x = _mm_mullo_epi32(u[13], cospi44);
+ v[13] = _mm_sub_epi32(x, v[13]);
+ v[13] = _mm_add_epi32(v[13], rnding);
+ v[13] = _mm_srai_epi32(v[13], bit);
+
+ v[11] = _mm_mullo_epi32(u[11], cospi12);
+ x = _mm_mullo_epi32(u[12], cospi52);
+ v[11] = _mm_add_epi32(v[11], x);
+ v[11] = _mm_add_epi32(v[11], rnding);
+ v[11] = _mm_srai_epi32(v[11], bit);
+
+ v[12] = _mm_mullo_epi32(u[11], cospi52);
+ x = _mm_mullo_epi32(u[12], cospi12);
+ v[12] = _mm_sub_epi32(x, v[12]);
+ v[12] = _mm_add_epi32(v[12], rnding);
+ v[12] = _mm_srai_epi32(v[12], bit);
+
+ out[0 * col_num + col] = v[0];
+ out[1 * col_num + col] = v[8];
+ out[2 * col_num + col] = v[4];
+ out[3 * col_num + col] = v[12];
+ out[4 * col_num + col] = v[2];
+ out[5 * col_num + col] = v[10];
+ out[6 * col_num + col] = v[6];
+ out[7 * col_num + col] = v[14];
+ out[8 * col_num + col] = v[1];
+ out[9 * col_num + col] = v[9];
+ out[10 * col_num + col] = v[5];
+ out[11 * col_num + col] = v[13];
+ out[12 * col_num + col] = v[3];
+ out[13 * col_num + col] = v[11];
+ out[14 * col_num + col] = v[7];
+ out[15 * col_num + col] = v[15];
+ }
+}
+
+static void fadst16x16_sse4_1(__m128i *in, __m128i *out, int bit,
+ const int num_cols) {
+ const int32_t *cospi = cospi_arr(bit);
+ const __m128i cospi32 = _mm_set1_epi32(cospi[32]);
+ const __m128i cospi48 = _mm_set1_epi32(cospi[48]);
+ const __m128i cospi16 = _mm_set1_epi32(cospi[16]);
+ const __m128i cospim16 = _mm_set1_epi32(-cospi[16]);
+ const __m128i cospim48 = _mm_set1_epi32(-cospi[48]);
+ const __m128i cospi8 = _mm_set1_epi32(cospi[8]);
+ const __m128i cospi56 = _mm_set1_epi32(cospi[56]);
+ const __m128i cospim56 = _mm_set1_epi32(-cospi[56]);
+ const __m128i cospim8 = _mm_set1_epi32(-cospi[8]);
+ const __m128i cospi24 = _mm_set1_epi32(cospi[24]);
+ const __m128i cospim24 = _mm_set1_epi32(-cospi[24]);
+ const __m128i cospim40 = _mm_set1_epi32(-cospi[40]);
+ const __m128i cospi40 = _mm_set1_epi32(cospi[40]);
+ const __m128i cospi2 = _mm_set1_epi32(cospi[2]);
+ const __m128i cospi62 = _mm_set1_epi32(cospi[62]);
+ const __m128i cospim2 = _mm_set1_epi32(-cospi[2]);
+ const __m128i cospi10 = _mm_set1_epi32(cospi[10]);
+ const __m128i cospi54 = _mm_set1_epi32(cospi[54]);
+ const __m128i cospim10 = _mm_set1_epi32(-cospi[10]);
+ const __m128i cospi18 = _mm_set1_epi32(cospi[18]);
+ const __m128i cospi46 = _mm_set1_epi32(cospi[46]);
+ const __m128i cospim18 = _mm_set1_epi32(-cospi[18]);
+ const __m128i cospi26 = _mm_set1_epi32(cospi[26]);
+ const __m128i cospi38 = _mm_set1_epi32(cospi[38]);
+ const __m128i cospim26 = _mm_set1_epi32(-cospi[26]);
+ const __m128i cospi34 = _mm_set1_epi32(cospi[34]);
+ const __m128i cospi30 = _mm_set1_epi32(cospi[30]);
+ const __m128i cospim34 = _mm_set1_epi32(-cospi[34]);
+ const __m128i cospi42 = _mm_set1_epi32(cospi[42]);
+ const __m128i cospi22 = _mm_set1_epi32(cospi[22]);
+ const __m128i cospim42 = _mm_set1_epi32(-cospi[42]);
+ const __m128i cospi50 = _mm_set1_epi32(cospi[50]);
+ const __m128i cospi14 = _mm_set1_epi32(cospi[14]);
+ const __m128i cospim50 = _mm_set1_epi32(-cospi[50]);
+ const __m128i cospi58 = _mm_set1_epi32(cospi[58]);
+ const __m128i cospi6 = _mm_set1_epi32(cospi[6]);
+ const __m128i cospim58 = _mm_set1_epi32(-cospi[58]);
+ const __m128i rnding = _mm_set1_epi32(1 << (bit - 1));
+ const __m128i zero = _mm_setzero_si128();
+
+ __m128i u[16], v[16], x, y;
+ int col;
+
+ for (col = 0; col < num_cols; ++col) {
+ // stage 0
+ // stage 1
+ u[0] = in[0 * num_cols + col];
+ u[1] = _mm_sub_epi32(zero, in[15 * num_cols + col]);
+ u[2] = _mm_sub_epi32(zero, in[7 * num_cols + col]);
+ u[3] = in[8 * num_cols + col];
+ u[4] = _mm_sub_epi32(zero, in[3 * num_cols + col]);
+ u[5] = in[12 * num_cols + col];
+ u[6] = in[4 * num_cols + col];
+ u[7] = _mm_sub_epi32(zero, in[11 * num_cols + col]);
+ u[8] = _mm_sub_epi32(zero, in[1 * num_cols + col]);
+ u[9] = in[14 * num_cols + col];
+ u[10] = in[6 * num_cols + col];
+ u[11] = _mm_sub_epi32(zero, in[9 * num_cols + col]);
+ u[12] = in[2 * num_cols + col];
+ u[13] = _mm_sub_epi32(zero, in[13 * num_cols + col]);
+ u[14] = _mm_sub_epi32(zero, in[5 * num_cols + col]);
+ u[15] = in[10 * num_cols + col];
+
+ // stage 2
+ v[0] = u[0];
+ v[1] = u[1];
+
+ x = _mm_mullo_epi32(u[2], cospi32);
+ y = _mm_mullo_epi32(u[3], cospi32);
+ v[2] = _mm_add_epi32(x, y);
+ v[2] = _mm_add_epi32(v[2], rnding);
+ v[2] = _mm_srai_epi32(v[2], bit);
+
+ v[3] = _mm_sub_epi32(x, y);
+ v[3] = _mm_add_epi32(v[3], rnding);
+ v[3] = _mm_srai_epi32(v[3], bit);
+
+ v[4] = u[4];
+ v[5] = u[5];
+
+ x = _mm_mullo_epi32(u[6], cospi32);
+ y = _mm_mullo_epi32(u[7], cospi32);
+ v[6] = _mm_add_epi32(x, y);
+ v[6] = _mm_add_epi32(v[6], rnding);
+ v[6] = _mm_srai_epi32(v[6], bit);
+
+ v[7] = _mm_sub_epi32(x, y);
+ v[7] = _mm_add_epi32(v[7], rnding);
+ v[7] = _mm_srai_epi32(v[7], bit);
+
+ v[8] = u[8];
+ v[9] = u[9];
+
+ x = _mm_mullo_epi32(u[10], cospi32);
+ y = _mm_mullo_epi32(u[11], cospi32);
+ v[10] = _mm_add_epi32(x, y);
+ v[10] = _mm_add_epi32(v[10], rnding);
+ v[10] = _mm_srai_epi32(v[10], bit);
+
+ v[11] = _mm_sub_epi32(x, y);
+ v[11] = _mm_add_epi32(v[11], rnding);
+ v[11] = _mm_srai_epi32(v[11], bit);
+
+ v[12] = u[12];
+ v[13] = u[13];
+
+ x = _mm_mullo_epi32(u[14], cospi32);
+ y = _mm_mullo_epi32(u[15], cospi32);
+ v[14] = _mm_add_epi32(x, y);
+ v[14] = _mm_add_epi32(v[14], rnding);
+ v[14] = _mm_srai_epi32(v[14], bit);
+
+ v[15] = _mm_sub_epi32(x, y);
+ v[15] = _mm_add_epi32(v[15], rnding);
+ v[15] = _mm_srai_epi32(v[15], bit);
+
+ // stage 3
+ u[0] = _mm_add_epi32(v[0], v[2]);
+ u[1] = _mm_add_epi32(v[1], v[3]);
+ u[2] = _mm_sub_epi32(v[0], v[2]);
+ u[3] = _mm_sub_epi32(v[1], v[3]);
+ u[4] = _mm_add_epi32(v[4], v[6]);
+ u[5] = _mm_add_epi32(v[5], v[7]);
+ u[6] = _mm_sub_epi32(v[4], v[6]);
+ u[7] = _mm_sub_epi32(v[5], v[7]);
+ u[8] = _mm_add_epi32(v[8], v[10]);
+ u[9] = _mm_add_epi32(v[9], v[11]);
+ u[10] = _mm_sub_epi32(v[8], v[10]);
+ u[11] = _mm_sub_epi32(v[9], v[11]);
+ u[12] = _mm_add_epi32(v[12], v[14]);
+ u[13] = _mm_add_epi32(v[13], v[15]);
+ u[14] = _mm_sub_epi32(v[12], v[14]);
+ u[15] = _mm_sub_epi32(v[13], v[15]);
+
+ // stage 4
+ v[0] = u[0];
+ v[1] = u[1];
+ v[2] = u[2];
+ v[3] = u[3];
+ v[4] = half_btf_sse4_1(&cospi16, &u[4], &cospi48, &u[5], &rnding, bit);
+ v[5] = half_btf_sse4_1(&cospi48, &u[4], &cospim16, &u[5], &rnding, bit);
+ v[6] = half_btf_sse4_1(&cospim48, &u[6], &cospi16, &u[7], &rnding, bit);
+ v[7] = half_btf_sse4_1(&cospi16, &u[6], &cospi48, &u[7], &rnding, bit);
+ v[8] = u[8];
+ v[9] = u[9];
+ v[10] = u[10];
+ v[11] = u[11];
+ v[12] = half_btf_sse4_1(&cospi16, &u[12], &cospi48, &u[13], &rnding, bit);
+ v[13] = half_btf_sse4_1(&cospi48, &u[12], &cospim16, &u[13], &rnding, bit);
+ v[14] = half_btf_sse4_1(&cospim48, &u[14], &cospi16, &u[15], &rnding, bit);
+ v[15] = half_btf_sse4_1(&cospi16, &u[14], &cospi48, &u[15], &rnding, bit);
+
+ // stage 5
+ u[0] = _mm_add_epi32(v[0], v[4]);
+ u[1] = _mm_add_epi32(v[1], v[5]);
+ u[2] = _mm_add_epi32(v[2], v[6]);
+ u[3] = _mm_add_epi32(v[3], v[7]);
+ u[4] = _mm_sub_epi32(v[0], v[4]);
+ u[5] = _mm_sub_epi32(v[1], v[5]);
+ u[6] = _mm_sub_epi32(v[2], v[6]);
+ u[7] = _mm_sub_epi32(v[3], v[7]);
+ u[8] = _mm_add_epi32(v[8], v[12]);
+ u[9] = _mm_add_epi32(v[9], v[13]);
+ u[10] = _mm_add_epi32(v[10], v[14]);
+ u[11] = _mm_add_epi32(v[11], v[15]);
+ u[12] = _mm_sub_epi32(v[8], v[12]);
+ u[13] = _mm_sub_epi32(v[9], v[13]);
+ u[14] = _mm_sub_epi32(v[10], v[14]);
+ u[15] = _mm_sub_epi32(v[11], v[15]);
+
+ // stage 6
+ v[0] = u[0];
+ v[1] = u[1];
+ v[2] = u[2];
+ v[3] = u[3];
+ v[4] = u[4];
+ v[5] = u[5];
+ v[6] = u[6];
+ v[7] = u[7];
+ v[8] = half_btf_sse4_1(&cospi8, &u[8], &cospi56, &u[9], &rnding, bit);
+ v[9] = half_btf_sse4_1(&cospi56, &u[8], &cospim8, &u[9], &rnding, bit);
+ v[10] = half_btf_sse4_1(&cospi40, &u[10], &cospi24, &u[11], &rnding, bit);
+ v[11] = half_btf_sse4_1(&cospi24, &u[10], &cospim40, &u[11], &rnding, bit);
+ v[12] = half_btf_sse4_1(&cospim56, &u[12], &cospi8, &u[13], &rnding, bit);
+ v[13] = half_btf_sse4_1(&cospi8, &u[12], &cospi56, &u[13], &rnding, bit);
+ v[14] = half_btf_sse4_1(&cospim24, &u[14], &cospi40, &u[15], &rnding, bit);
+ v[15] = half_btf_sse4_1(&cospi40, &u[14], &cospi24, &u[15], &rnding, bit);
+
+ // stage 7
+ u[0] = _mm_add_epi32(v[0], v[8]);
+ u[1] = _mm_add_epi32(v[1], v[9]);
+ u[2] = _mm_add_epi32(v[2], v[10]);
+ u[3] = _mm_add_epi32(v[3], v[11]);
+ u[4] = _mm_add_epi32(v[4], v[12]);
+ u[5] = _mm_add_epi32(v[5], v[13]);
+ u[6] = _mm_add_epi32(v[6], v[14]);
+ u[7] = _mm_add_epi32(v[7], v[15]);
+ u[8] = _mm_sub_epi32(v[0], v[8]);
+ u[9] = _mm_sub_epi32(v[1], v[9]);
+ u[10] = _mm_sub_epi32(v[2], v[10]);
+ u[11] = _mm_sub_epi32(v[3], v[11]);
+ u[12] = _mm_sub_epi32(v[4], v[12]);
+ u[13] = _mm_sub_epi32(v[5], v[13]);
+ u[14] = _mm_sub_epi32(v[6], v[14]);
+ u[15] = _mm_sub_epi32(v[7], v[15]);
+
+ // stage 8
+ v[0] = half_btf_sse4_1(&cospi2, &u[0], &cospi62, &u[1], &rnding, bit);
+ v[1] = half_btf_sse4_1(&cospi62, &u[0], &cospim2, &u[1], &rnding, bit);
+ v[2] = half_btf_sse4_1(&cospi10, &u[2], &cospi54, &u[3], &rnding, bit);
+ v[3] = half_btf_sse4_1(&cospi54, &u[2], &cospim10, &u[3], &rnding, bit);
+ v[4] = half_btf_sse4_1(&cospi18, &u[4], &cospi46, &u[5], &rnding, bit);
+ v[5] = half_btf_sse4_1(&cospi46, &u[4], &cospim18, &u[5], &rnding, bit);
+ v[6] = half_btf_sse4_1(&cospi26, &u[6], &cospi38, &u[7], &rnding, bit);
+ v[7] = half_btf_sse4_1(&cospi38, &u[6], &cospim26, &u[7], &rnding, bit);
+ v[8] = half_btf_sse4_1(&cospi34, &u[8], &cospi30, &u[9], &rnding, bit);
+ v[9] = half_btf_sse4_1(&cospi30, &u[8], &cospim34, &u[9], &rnding, bit);
+ v[10] = half_btf_sse4_1(&cospi42, &u[10], &cospi22, &u[11], &rnding, bit);
+ v[11] = half_btf_sse4_1(&cospi22, &u[10], &cospim42, &u[11], &rnding, bit);
+ v[12] = half_btf_sse4_1(&cospi50, &u[12], &cospi14, &u[13], &rnding, bit);
+ v[13] = half_btf_sse4_1(&cospi14, &u[12], &cospim50, &u[13], &rnding, bit);
+ v[14] = half_btf_sse4_1(&cospi58, &u[14], &cospi6, &u[15], &rnding, bit);
+ v[15] = half_btf_sse4_1(&cospi6, &u[14], &cospim58, &u[15], &rnding, bit);
+
+ // stage 9
+ out[0 * num_cols + col] = v[1];
+ out[1 * num_cols + col] = v[14];
+ out[2 * num_cols + col] = v[3];
+ out[3 * num_cols + col] = v[12];
+ out[4 * num_cols + col] = v[5];
+ out[5 * num_cols + col] = v[10];
+ out[6 * num_cols + col] = v[7];
+ out[7 * num_cols + col] = v[8];
+ out[8 * num_cols + col] = v[9];
+ out[9 * num_cols + col] = v[6];
+ out[10 * num_cols + col] = v[11];
+ out[11 * num_cols + col] = v[4];
+ out[12 * num_cols + col] = v[13];
+ out[13 * num_cols + col] = v[2];
+ out[14 * num_cols + col] = v[15];
+ out[15 * num_cols + col] = v[0];
+ }
+}
+
+static void col_txfm_16x16_rounding(__m128i *in, int shift) {
+ // Note:
+ // We split 16x16 rounding into 4 sections of 8x8 rounding,
+ // instead of 4 columns
+ col_txfm_8x8_rounding(&in[0], shift);
+ col_txfm_8x8_rounding(&in[16], shift);
+ col_txfm_8x8_rounding(&in[32], shift);
+ col_txfm_8x8_rounding(&in[48], shift);
+}
+
+static void col_txfm_8x16_rounding(__m128i *in, int shift) {
+ col_txfm_8x8_rounding(&in[0], shift);
+ col_txfm_8x8_rounding(&in[16], shift);
+}
+
+static void write_buffer_16x16(const __m128i *in, int32_t *output) {
+ const int size_8x8 = 16 * 4;
+ write_buffer_8x8(&in[0], output);
+ output += size_8x8;
+ write_buffer_8x8(&in[16], output);
+ output += size_8x8;
+ write_buffer_8x8(&in[32], output);
+ output += size_8x8;
+ write_buffer_8x8(&in[48], output);
+}
+static void idtx16x16_sse4_1(__m128i *in, __m128i *out, int bit, int col_num) {
+ (void)bit;
+ __m128i fact = _mm_set1_epi32(2 * NewSqrt2);
+ __m128i offset = _mm_set1_epi32(1 << (NewSqrt2Bits - 1));
+ __m128i a_low;
+
+ int num_iters = 16 * col_num;
+ for (int i = 0; i < num_iters; i++) {
+ a_low = _mm_mullo_epi32(in[i], fact);
+ a_low = _mm_add_epi32(a_low, offset);
+ out[i] = _mm_srai_epi32(a_low, NewSqrt2Bits);
+ }
+}
+void av1_fwd_txfm2d_16x16_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[64], out[64];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X16];
+ const int txw_idx = get_txw_idx(TX_16X16);
+ const int txh_idx = get_txh_idx(TX_16X16);
+ const int col_num = 4;
+ switch (tx_type) {
+ case DCT_DCT:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ fdct16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fdct16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case ADST_DCT:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx],
+ col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fdct16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case DCT_ADST:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ fdct16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx],
+ col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case ADST_ADST:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx],
+ col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx],
+ col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case FLIPADST_DCT:
+ load_buffer_16x16(input, in, stride, 1, 0, shift[0]);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx],
+ col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fdct16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case DCT_FLIPADST:
+ load_buffer_16x16(input, in, stride, 0, 1, shift[0]);
+ fdct16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx],
+ col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case FLIPADST_FLIPADST:
+ load_buffer_16x16(input, in, stride, 1, 1, shift[0]);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx],
+ col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx],
+ col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case ADST_FLIPADST:
+ load_buffer_16x16(input, in, stride, 0, 1, shift[0]);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx],
+ col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx],
+ col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case FLIPADST_ADST:
+ load_buffer_16x16(input, in, stride, 1, 0, shift[0]);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx],
+ col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx],
+ col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case IDTX:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ idtx16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ idtx16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case V_DCT:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ fdct16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ idtx16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case H_DCT:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ idtx16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fdct16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case V_ADST:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx],
+ col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ idtx16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case H_ADST:
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ idtx16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx],
+ col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case V_FLIPADST:
+ load_buffer_16x16(input, in, stride, 1, 0, shift[0]);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx],
+ col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ idtx16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx], col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ case H_FLIPADST:
+ load_buffer_16x16(input, in, stride, 0, 1, shift[0]);
+ idtx16x16_sse4_1(in, out, av1_fwd_cos_bit_col[txw_idx][txh_idx], col_num);
+ col_txfm_16x16_rounding(out, -shift[1]);
+ transpose_16x16(out, in);
+ fadst16x16_sse4_1(in, out, av1_fwd_cos_bit_row[txw_idx][txh_idx],
+ col_num);
+ write_buffer_16x16(out, coeff);
+ break;
+ default: assert(0);
+ }
+ (void)bd;
+}
+
+static INLINE void flip_buf_sse4_1(__m128i *in, __m128i *out, int size) {
+ for (int i = 0; i < size; i += 2) in[30 - i] = out[i];
+ for (int i = 1; i < size; i += 2) in[size - i] = out[i];
+}
+
+static const fwd_transform_1d_sse4_1 col_highbd_txfm8x8_arr[TX_TYPES] = {
+ fdct8x8_sse4_1, // DCT_DCT
+ fadst8x8_sse4_1, // ADST_DCT
+ fdct8x8_sse4_1, // DCT_ADST
+ fadst8x8_sse4_1, // ADST_ADST
+ fadst8x8_sse4_1, // FLIPADST_DCT
+ fdct8x8_sse4_1, // DCT_FLIPADST
+ fadst8x8_sse4_1, // FLIPADST_FLIPADST
+ fadst8x8_sse4_1, // ADST_FLIPADST
+ fadst8x8_sse4_1, // FLIPADST_ADST
+ idtx8x8_sse4_1, // IDTX
+ fdct8x8_sse4_1, // V_DCT
+ idtx8x8_sse4_1, // H_DCT
+ fadst8x8_sse4_1, // V_ADST
+ idtx8x8_sse4_1, // H_ADST
+ fadst8x8_sse4_1, // V_FLIPADST
+ idtx8x8_sse4_1 // H_FLIPADST
+};
+#if !CONFIG_REALTIME_ONLY
+static const fwd_transform_1d_sse4_1 row_highbd_txfm32x8_arr[TX_TYPES] = {
+ fdct8x8_sse4_1, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST-ADST
+ idtx32x8_sse4_1, // IDTX
+ NULL, // V_DCT
+ NULL, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL, // H_FLIPADST
+};
+#endif
+static const fwd_transform_1d_sse4_1 col_highbd_txfm4x8_arr[TX_TYPES] = {
+ fdct4x8_sse4_1, // DCT_DCT
+ fadst8x8_sse4_1, // ADST_DCT
+ fdct4x8_sse4_1, // DCT_ADST
+ fadst8x8_sse4_1, // ADST_ADST
+ fadst8x8_sse4_1, // FLIPADST_DCT
+ fdct4x8_sse4_1, // DCT_FLIPADST
+ fadst8x8_sse4_1, // FLIPADST_FLIPADST
+ fadst8x8_sse4_1, // ADST_FLIPADST
+ fadst8x8_sse4_1, // FLIPADST_ADST
+ idtx8x8_sse4_1, // IDTX
+ fdct4x8_sse4_1, // V_DCT
+ idtx8x8_sse4_1, // H_DCT
+ fadst8x8_sse4_1, // V_ADST
+ idtx8x8_sse4_1, // H_ADST
+ fadst8x8_sse4_1, // V_FLIPADST
+ idtx8x8_sse4_1 // H_FLIPADST
+};
+
+static const fwd_transform_1d_sse4_1 row_highbd_txfm8x16_arr[TX_TYPES] = {
+ fdct16x16_sse4_1, // DCT_DCT
+ fdct16x16_sse4_1, // ADST_DCT
+ fadst16x16_sse4_1, // DCT_ADST
+ fadst16x16_sse4_1, // ADST_ADST
+ fdct16x16_sse4_1, // FLIPADST_DCT
+ fadst16x16_sse4_1, // DCT_FLIPADST
+ fadst16x16_sse4_1, // FLIPADST_FLIPADST
+ fadst16x16_sse4_1, // ADST_FLIPADST
+ fadst16x16_sse4_1, // FLIPADST_ADST
+ idtx16x16_sse4_1, // IDTX
+ idtx16x16_sse4_1, // V_DCT
+ fdct16x16_sse4_1, // H_DCT
+ idtx16x16_sse4_1, // V_ADST
+ fadst16x16_sse4_1, // H_ADST
+ idtx16x16_sse4_1, // V_FLIPADST
+ fadst16x16_sse4_1 // H_FLIPADST
+};
+
+static const fwd_transform_1d_sse4_1 col_highbd_txfm8x16_arr[TX_TYPES] = {
+ fdct16x16_sse4_1, // DCT_DCT
+ fadst16x16_sse4_1, // ADST_DCT
+ fdct16x16_sse4_1, // DCT_ADST
+ fadst16x16_sse4_1, // ADST_ADST
+ fadst16x16_sse4_1, // FLIPADST_DCT
+ fdct16x16_sse4_1, // DCT_FLIPADST
+ fadst16x16_sse4_1, // FLIPADST_FLIPADST
+ fadst16x16_sse4_1, // ADST_FLIPADST
+ fadst16x16_sse4_1, // FLIPADST_ADST
+ idtx16x16_sse4_1, // IDTX
+ fdct16x16_sse4_1, // V_DCT
+ idtx16x16_sse4_1, // H_DCT
+ fadst16x16_sse4_1, // V_ADST
+ idtx16x16_sse4_1, // H_ADST
+ fadst16x16_sse4_1, // V_FLIPADST
+ idtx16x16_sse4_1 // H_FLIPADST
+};
+static const fwd_transform_1d_sse4_1 row_highbd_txfm8x8_arr[TX_TYPES] = {
+ fdct8x8_sse4_1, // DCT_DCT
+ fdct8x8_sse4_1, // ADST_DCT
+ fadst8x8_sse4_1, // DCT_ADST
+ fadst8x8_sse4_1, // ADST_ADST
+ fdct8x8_sse4_1, // FLIPADST_DCT
+ fadst8x8_sse4_1, // DCT_FLIPADST
+ fadst8x8_sse4_1, // FLIPADST_FLIPADST
+ fadst8x8_sse4_1, // ADST_FLIPADST
+ fadst8x8_sse4_1, // FLIPADST_ADST
+ idtx8x8_sse4_1, // IDTX
+ idtx8x8_sse4_1, // V_DCT
+ fdct8x8_sse4_1, // H_DCT
+ idtx8x8_sse4_1, // V_ADST
+ fadst8x8_sse4_1, // H_ADST
+ idtx8x8_sse4_1, // V_FLIPADST
+ fadst8x8_sse4_1 // H_FLIPADST
+};
+
+static const fwd_transform_1d_sse4_1 row_highbd_txfm4x8_arr[TX_TYPES] = {
+ fdct4x8_sse4_1, // DCT_DCT
+ fdct4x8_sse4_1, // ADST_DCT
+ fadst8x8_sse4_1, // DCT_ADST
+ fadst8x8_sse4_1, // ADST_ADST
+ fdct4x8_sse4_1, // FLIPADST_DCT
+ fadst8x8_sse4_1, // DCT_FLIPADST
+ fadst8x8_sse4_1, // FLIPADST_FLIPADST
+ fadst8x8_sse4_1, // ADST_FLIPADST
+ fadst8x8_sse4_1, // FLIPADST_ADST
+ idtx8x8_sse4_1, // IDTX
+ idtx8x8_sse4_1, // V_DCT
+ fdct4x8_sse4_1, // H_DCT
+ idtx8x8_sse4_1, // V_ADST
+ fadst8x8_sse4_1, // H_ADST
+ idtx8x8_sse4_1, // V_FLIPADST
+ fadst8x8_sse4_1 // H_FLIPADST
+};
+
+static const fwd_transform_1d_sse4_1 row_highbd_txfm4x4_arr[TX_TYPES] = {
+ fdct4x4_sse4_1, // DCT_DCT
+ fdct4x4_sse4_1, // ADST_DCT
+ fadst4x4_sse4_1, // DCT_ADST
+ fadst4x4_sse4_1, // ADST_ADST
+ fdct4x4_sse4_1, // FLIPADST_DCT
+ fadst4x4_sse4_1, // DCT_FLIPADST
+ fadst4x4_sse4_1, // FLIPADST_FLIPADST
+ fadst4x4_sse4_1, // ADST_FLIPADST
+ fadst4x4_sse4_1, // FLIPADST_ADST
+ idtx4x4_sse4_1, // IDTX
+ idtx4x4_sse4_1, // V_DCT
+ fdct4x4_sse4_1, // H_DCT
+ idtx4x4_sse4_1, // V_ADST
+ fadst4x4_sse4_1, // H_ADST
+ idtx4x4_sse4_1, // V_FLIPADST
+ fadst4x4_sse4_1 // H_FLIPADST
+};
+
+static const fwd_transform_1d_sse4_1 col_highbd_txfm4x4_arr[TX_TYPES] = {
+ fdct4x4_sse4_1, // DCT_DCT
+ fadst4x4_sse4_1, // ADST_DCT
+ fdct4x4_sse4_1, // DCT_ADST
+ fadst4x4_sse4_1, // ADST_ADST
+ fadst4x4_sse4_1, // FLIPADST_DCT
+ fdct4x4_sse4_1, // DCT_FLIPADST
+ fadst4x4_sse4_1, // FLIPADST_FLIPADST
+ fadst4x4_sse4_1, // ADST_FLIPADST
+ fadst4x4_sse4_1, // FLIPADST_ADST
+ idtx4x4_sse4_1, // IDTX
+ fdct4x4_sse4_1, // V_DCT
+ idtx4x4_sse4_1, // H_DCT
+ fadst4x4_sse4_1, // V_ADST
+ idtx4x4_sse4_1, // H_ADST
+ fadst4x4_sse4_1, // V_FLIPADST
+ idtx4x4_sse4_1 // H_FLIPADST
+};
+
+static const fwd_transform_1d_sse4_1 col_highbd_txfm8x32_arr[TX_TYPES] = {
+ av1_fdct32_sse4_1, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST_ADST
+ av1_idtx32_sse4_1, // IDTX
+ NULL, // V_DCT
+ NULL, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL // H_FLIPADST
+};
+
+static const fwd_transform_1d_sse4_1 row_highbd_txfm8x32_arr[TX_TYPES] = {
+ fdct16x16_sse4_1, // DCT_DCT
+ NULL, // ADST_DCT
+ NULL, // DCT_ADST
+ NULL, // ADST_ADST
+ NULL, // FLIPADST_DCT
+ NULL, // DCT_FLIPADST
+ NULL, // FLIPADST_FLIPADST
+ NULL, // ADST_FLIPADST
+ NULL, // FLIPADST_ADST
+ idtx16x16_sse4_1, // IDTX
+ NULL, // V_DCT
+ NULL, // H_DCT
+ NULL, // V_ADST
+ NULL, // H_ADST
+ NULL, // V_FLIPADST
+ NULL // H_FLIPADST
+};
+
+void av1_fwd_txfm2d_16x8_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[32], out[32];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X8];
+ const int txw_idx = get_txw_idx(TX_16X8);
+ const int txh_idx = get_txh_idx(TX_16X8);
+ const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm8x8_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm8x16_arr[tx_type];
+ int bit = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ for (int i = 0; i < 2; i++) {
+ load_buffer_8x8(input + i * 8, in, stride, ud_flip, 0, shift[0]);
+ col_txfm(in, in, bit, 2);
+ col_txfm_8x8_rounding(in, -shift[1]);
+ transpose_8x8(in, out + i * 16);
+ }
+
+ if (lr_flip) {
+ flip_buf_sse4_1(in, out, 32);
+ row_txfm(in, out, bit, 2);
+ } else {
+ row_txfm(out, out, bit, 2);
+ }
+
+ for (int i = 0; i < 2; i++) {
+ av1_round_shift_rect_array_32_sse4_1(out + i * 16, in, 16, -shift[2],
+ NewSqrt2);
+ write_buffer_8x8(in, coeff + i * 64);
+ }
+ (void)bd;
+}
+
+void av1_fwd_txfm2d_8x16_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[32], out[32];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X16];
+ const int txw_idx = get_txw_idx(TX_8X16);
+ const int txh_idx = get_txh_idx(TX_8X16);
+ const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm8x16_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm8x8_arr[tx_type];
+ int bit = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ load_buffer_8x16(input, in, stride, ud_flip, lr_flip, shift[0]);
+ col_txfm(in, in, bit, 2);
+ col_txfm_8x16_rounding(in, -shift[1]);
+ transpose_8x8(in, out);
+ transpose_8x8(in + 16, out + 16);
+
+ for (int i = 0; i < 2; i++) {
+ row_txfm(out + i * 16, out, bit, 2);
+ av1_round_shift_rect_array_32_sse4_1(out, out, 16, -shift[2], NewSqrt2);
+ write_buffer_16x8(out, coeff + i * 8, 16);
+ }
+ (void)bd;
+}
+
+#if !CONFIG_REALTIME_ONLY
+void av1_fwd_txfm2d_4x16_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[16];
+ __m128i *outcoeff128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X16];
+ const int txw_idx = get_txw_idx(TX_4X16);
+ const int txh_idx = get_txh_idx(TX_4X16);
+ const int txfm_size_col = tx_size_wide[TX_4X16];
+ const int txfm_size_row = tx_size_high[TX_4X16];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm8x16_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm4x4_arr[tx_type];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ // col transform
+ load_buffer_4x16(input, in, stride, ud_flip, lr_flip, shift[0]);
+ col_txfm(in, outcoeff128, bitcol, 1);
+ col_txfm_8x8_rounding(outcoeff128, -shift[1]);
+ transpose_8nx8n(outcoeff128, in, txfm_size_col, txfm_size_row);
+
+ // row transform
+ for (int i = 0; i < 4; i++) {
+ __m128i tmp[4];
+ row_txfm(in + i, tmp, bitrow, txfm_size_row >> 2);
+ store_output_w4(coeff + i * 4, tmp, txfm_size_row, txfm_size_col);
+ }
+ (void)bd;
+}
+#endif
+
+void av1_fwd_txfm2d_16x4_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[16];
+ __m128i *outcoeff128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X4];
+ const int txw_idx = get_txw_idx(TX_16X4);
+ const int txh_idx = get_txh_idx(TX_16X4);
+ const int txfm_size_col = tx_size_wide[TX_16X4];
+ const int txfm_size_row = tx_size_high[TX_16X4];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm4x4_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm8x16_arr[tx_type];
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ // col transform
+ load_buffer_16x4(input, in, stride, ud_flip, lr_flip, shift[0]);
+
+ for (int i = 0; i < (txfm_size_col >> 2); i++) {
+ __m128i *cur_in = &in[i * txfm_size_row];
+ col_txfm(cur_in, cur_in, bitcol, 1);
+ transpose_32bit_4x4(cur_in, cur_in);
+ }
+ col_txfm_8x8_rounding(in, -shift[1]);
+
+ // row transform
+ row_txfm(in, outcoeff128, bitrow, 1);
+ (void)bd;
+}
+
+void av1_fwd_txfm2d_16x32_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[128];
+ __m128i *outcoef128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X32];
+ const int txw_idx = get_txw_idx(TX_16X32);
+ const int txh_idx = get_txh_idx(TX_16X32);
+ const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm8x32_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm8x32_arr[tx_type];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+
+ // column transform
+ load_buffer_16x16(input, in, stride, 0, 0, shift[0]);
+ load_buffer_16x16(input + 16 * stride, in + 64, stride, 0, 0, shift[0]);
+
+ for (int i = 0; i < 4; i++) {
+ col_txfm((in + i), (in + i), bitcol, 4);
+ }
+ col_txfm_16x16_rounding(&in[0], -shift[1]);
+ col_txfm_16x16_rounding(&in[64], -shift[1]);
+ transpose_8nx8n(in, outcoef128, 16, 32);
+
+ // row transform
+ row_txfm(outcoef128, in, bitrow, 8);
+ av1_round_shift_rect_array_32_sse4_1(in, outcoef128, 128, -shift[2],
+ NewSqrt2);
+ (void)bd;
+}
+
+void av1_fwd_txfm2d_32x64_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)tx_type;
+ __m128i in[512];
+ __m128i *outcoef128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X64];
+ const int txw_idx = get_txw_idx(TX_32X64);
+ const int txh_idx = get_txh_idx(TX_32X64);
+ const int txfm_size_col = tx_size_wide[TX_32X64];
+ const int txfm_size_row = tx_size_high[TX_32X64];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int num_row = txfm_size_row >> 2;
+ const int num_col = txfm_size_col >> 2;
+
+ // column transform
+ load_buffer_32x8n(input, in, stride, 0, 0, shift[0], txfm_size_row);
+ for (int i = 0; i < num_col; i++) {
+ av1_fdct64_sse4_1((in + i), (in + i), bitcol, num_col, num_col);
+ }
+ for (int i = 0; i < num_col; i++) {
+ col_txfm_16x16_rounding((in + i * txfm_size_row), -shift[1]);
+ }
+ transpose_8nx8n(in, outcoef128, txfm_size_col, txfm_size_row);
+
+ // row transform
+ for (int i = 0; i < num_row; i++) {
+ av1_fdct32_sse4_1((outcoef128 + i), (in + i), bitrow, num_row);
+ }
+ for (int i = 0; i < txfm_size_col; i++) {
+ av1_round_shift_rect_array_32_sse4_1(in + i * 16, outcoef128 + i * 8, 8,
+ -shift[2], NewSqrt2);
+ }
+ (void)bd;
+}
+
+void av1_fwd_txfm2d_64x32_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ (void)tx_type;
+ __m128i in[512];
+ __m128i *outcoef128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_64X32];
+ const int txw_idx = get_txw_idx(TX_64X32);
+ const int txh_idx = get_txh_idx(TX_64X32);
+ const int txfm_size_col = tx_size_wide[TX_64X32];
+ const int txfm_size_row = tx_size_high[TX_64X32];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const int num_row = txfm_size_row >> 2;
+ const int num_col = txfm_size_col >> 2;
+
+ // column transform
+ for (int i = 0; i < 32; i++) {
+ load_buffer_4x4(input + 0 + i * stride, in + 0 + i * 16, 4, 0, 0, shift[0]);
+ load_buffer_4x4(input + 16 + i * stride, in + 4 + i * 16, 4, 0, 0,
+ shift[0]);
+ load_buffer_4x4(input + 32 + i * stride, in + 8 + i * 16, 4, 0, 0,
+ shift[0]);
+ load_buffer_4x4(input + 48 + i * stride, in + 12 + i * 16, 4, 0, 0,
+ shift[0]);
+ }
+
+ for (int i = 0; i < num_col; i++) {
+ av1_fdct32_sse4_1((in + i), (in + i), bitcol, num_col);
+ }
+
+ for (int i = 0; i < num_row; i++) {
+ col_txfm_16x16_rounding((in + i * txfm_size_col), -shift[1]);
+ }
+ transpose_8nx8n(in, outcoef128, txfm_size_col, txfm_size_row);
+
+ // row transform
+ for (int i = 0; i < num_row; i++) {
+ av1_fdct64_sse4_1((outcoef128 + i), (in + i), bitrow, num_row, num_row);
+ }
+ av1_round_shift_rect_array_32_sse4_1(in, outcoef128, 512, -shift[2],
+ NewSqrt2);
+ (void)bd;
+}
+
+void av1_fwd_txfm2d_32x16_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[128];
+ __m128i *outcoef128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X16];
+ const int txw_idx = get_txw_idx(TX_32X16);
+ const int txh_idx = get_txh_idx(TX_32X16);
+ const fwd_transform_1d_sse4_1 col_txfm = row_highbd_txfm8x32_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = col_highbd_txfm8x32_arr[tx_type];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+
+ // column transform
+ load_buffer_32x8n(input, in, stride, 0, 0, shift[0], 16);
+ col_txfm(in, in, bitcol, 8);
+ col_txfm_16x16_rounding(&in[0], -shift[1]);
+ col_txfm_16x16_rounding(&in[64], -shift[1]);
+ transpose_8nx8n(in, outcoef128, 32, 16);
+
+ // row transform
+ for (int i = 0; i < 4; i++) {
+ row_txfm((outcoef128 + i), (in + i), bitrow, 4);
+ }
+ av1_round_shift_rect_array_32_sse4_1(in, outcoef128, 128, -shift[2],
+ NewSqrt2);
+ (void)bd;
+}
+
+#if !CONFIG_REALTIME_ONLY
+void av1_fwd_txfm2d_8x32_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[64];
+ __m128i *outcoef128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X32];
+ const int txw_idx = get_txw_idx(TX_8X32);
+ const int txh_idx = get_txh_idx(TX_8X32);
+ const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm8x32_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm32x8_arr[tx_type];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+
+ const int txfm_size_col = tx_size_wide[TX_8X32];
+ const int txfm_size_row = tx_size_high[TX_8X32];
+ const int num_col = txfm_size_col >> 2;
+
+ // column transform
+ load_buffer_8x16(input, in, stride, 0, 0, shift[0]);
+ load_buffer_8x16(input + (txfm_size_row >> 1) * stride, in + txfm_size_row,
+ stride, 0, 0, shift[0]);
+
+ for (int i = 0; i < num_col; i++) {
+ col_txfm((in + i), (in + i), bitcol, num_col);
+ }
+ col_txfm_16x16_rounding(in, -shift[1]);
+ transpose_8nx8n(in, outcoef128, txfm_size_col, txfm_size_row);
+
+ // row transform
+ for (int i = 0; i < txfm_size_col; i += 2) {
+ row_txfm((outcoef128 + i), (outcoef128 + i), bitrow, txfm_size_col);
+ }
+ (void)bd;
+}
+
+void av1_fwd_txfm2d_32x8_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[64];
+ __m128i *outcoef128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_32X8];
+ const int txw_idx = get_txw_idx(TX_32X8);
+ const int txh_idx = get_txh_idx(TX_32X8);
+ const fwd_transform_1d_sse4_1 col_txfm = row_highbd_txfm32x8_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = col_highbd_txfm8x32_arr[tx_type];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+
+ const int txfm_size_col = tx_size_wide[TX_32X8];
+ const int txfm_size_row = tx_size_high[TX_32X8];
+ const int num_col = txfm_size_row >> 2;
+
+ // column transform
+ load_buffer_32x8n(input, in, stride, 0, 0, shift[0], 8);
+ for (int i = 0; i < txfm_size_row; i += 2) {
+ col_txfm((in + i), (in + i), bitcol, txfm_size_row);
+ }
+
+ col_txfm_16x16_rounding(&in[0], -shift[1]);
+ transpose_8nx8n(in, outcoef128, txfm_size_col, txfm_size_row);
+
+ // row transform
+ for (int i = 0; i < num_col; i++) {
+ row_txfm((outcoef128 + i), (outcoef128 + i), bitrow, num_col);
+ }
+ (void)bd;
+}
+#endif
+
+void av1_fwd_txfm2d_4x8_sse4_1(const int16_t *input, int32_t *coeff, int stride,
+ TX_TYPE tx_type, int bd) {
+ __m128i in[8];
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_4X8];
+ const int txw_idx = get_txw_idx(TX_4X8);
+ const int txh_idx = get_txh_idx(TX_4X8);
+ const int txfm_size_col = tx_size_wide[TX_4X8];
+ const int txfm_size_row = tx_size_high[TX_4X8];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm4x8_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm4x4_arr[tx_type];
+
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+
+ load_buffer_4x8(input, in, stride, ud_flip, lr_flip, shift[0]);
+ col_txfm(in, in, bitcol, 1);
+ col_txfm_4x8_rounding(in, -shift[1]);
+
+ for (int i = 0; i < 2; i++) {
+ __m128i *cur_in = &in[i * 4];
+ transpose_32bit_4x4(cur_in, cur_in);
+ row_txfm(cur_in, cur_in, bitrow, 1);
+ av1_round_shift_rect_array_32_sse4_1(cur_in, cur_in, txfm_size_col,
+ -shift[2], NewSqrt2);
+ store_output_w4(coeff + i * 4, cur_in, txfm_size_row, 4);
+ }
+ (void)bd;
+}
+
+void av1_fwd_txfm2d_8x4_sse4_1(const int16_t *input, int32_t *coeff, int stride,
+ TX_TYPE tx_type, int bd) {
+ __m128i in[8];
+ __m128i *outcoeff128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_8X4];
+ const int txw_idx = get_txw_idx(TX_8X4);
+ const int txh_idx = get_txh_idx(TX_8X4);
+ const int txfm_size_col = tx_size_wide[TX_8X4];
+ const int txfm_size_row = tx_size_high[TX_8X4];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ const fwd_transform_1d_sse4_1 col_txfm = col_highbd_txfm4x4_arr[tx_type];
+ const fwd_transform_1d_sse4_1 row_txfm = row_highbd_txfm4x8_arr[tx_type];
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ // col tranform
+ load_buffer_8x4(input, in, stride, ud_flip, lr_flip, shift[0]);
+ for (int i = 0; i < 2; i++) {
+ __m128i *cur_in = &in[i * txfm_size_row];
+ col_txfm(cur_in, cur_in, bitcol, 1);
+ transpose_32bit_4x4(cur_in, cur_in);
+ }
+ col_txfm_4x8_rounding(in, -shift[1]);
+
+ // row tranform
+ row_txfm(in, outcoeff128, bitrow, 1);
+ av1_round_shift_rect_array_32_sse4_1(outcoeff128, outcoeff128, txfm_size_col,
+ -shift[2], NewSqrt2);
+ (void)bd;
+}
+
+#if !CONFIG_REALTIME_ONLY
+void av1_fwd_txfm2d_16x64_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[256];
+ __m128i *outcoeff128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_16X64];
+ const int txw_idx = get_txw_idx(TX_16X64);
+ const int txh_idx = get_txh_idx(TX_16X64);
+ const int txfm_size_col = tx_size_wide[TX_16X64];
+ const int txfm_size_row = tx_size_high[TX_16X64];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ const int num_col = txfm_size_col >> 2;
+ // col tranform
+ for (int i = 0; i < txfm_size_row; i += num_col) {
+ load_buffer_4x4(input + (i + 0) * stride, in + (i + 0) * num_col, num_col,
+ ud_flip, lr_flip, shift[0]);
+ load_buffer_4x4(input + (i + 1) * stride, in + (i + 1) * num_col, num_col,
+ ud_flip, lr_flip, shift[0]);
+ load_buffer_4x4(input + (i + 2) * stride, in + (i + 2) * num_col, num_col,
+ ud_flip, lr_flip, shift[0]);
+ load_buffer_4x4(input + (i + 3) * stride, in + (i + 3) * num_col, num_col,
+ ud_flip, lr_flip, shift[0]);
+ }
+
+ for (int i = 0; i < num_col; i++) {
+ av1_fdct64_sse4_1(in + i, outcoeff128 + i, bitcol, num_col, num_col);
+ }
+
+ col_txfm_16x16_rounding(outcoeff128, -shift[1]);
+ col_txfm_16x16_rounding(outcoeff128 + 64, -shift[1]);
+ col_txfm_16x16_rounding(outcoeff128 + 128, -shift[1]);
+ col_txfm_16x16_rounding(outcoeff128 + 192, -shift[1]);
+
+ transpose_8nx8n(outcoeff128, in, txfm_size_col, 32);
+ fdct16x16_sse4_1(in, outcoeff128, bitrow, 8);
+ (void)bd;
+}
+
+void av1_fwd_txfm2d_64x16_sse4_1(const int16_t *input, int32_t *coeff,
+ int stride, TX_TYPE tx_type, int bd) {
+ __m128i in[256];
+ __m128i *outcoeff128 = (__m128i *)coeff;
+ const int8_t *shift = av1_fwd_txfm_shift_ls[TX_64X16];
+ const int txw_idx = get_txw_idx(TX_64X16);
+ const int txh_idx = get_txh_idx(TX_64X16);
+ const int txfm_size_col = tx_size_wide[TX_64X16];
+ const int txfm_size_row = tx_size_high[TX_64X16];
+ int bitcol = av1_fwd_cos_bit_col[txw_idx][txh_idx];
+ int bitrow = av1_fwd_cos_bit_row[txw_idx][txh_idx];
+ int ud_flip, lr_flip;
+ get_flip_cfg(tx_type, &ud_flip, &lr_flip);
+ // col tranform
+ for (int i = 0; i < txfm_size_row; i++) {
+ load_buffer_4x4(input + 0 + i * stride, in + 0 + i * txfm_size_row, 4,
+ ud_flip, lr_flip, shift[0]);
+ load_buffer_4x4(input + 16 + i * stride, in + 4 + i * txfm_size_row, 4,
+ ud_flip, lr_flip, shift[0]);
+ load_buffer_4x4(input + 32 + i * stride, in + 8 + i * txfm_size_row, 4,
+ ud_flip, lr_flip, shift[0]);
+ load_buffer_4x4(input + 48 + i * stride, in + 12 + i * txfm_size_row, 4,
+ ud_flip, lr_flip, shift[0]);
+ }
+
+ fdct16x16_sse4_1(in, outcoeff128, bitcol, txfm_size_row);
+ col_txfm_16x16_rounding(outcoeff128, -shift[1]);
+ col_txfm_16x16_rounding(outcoeff128 + 64, -shift[1]);
+ col_txfm_16x16_rounding(outcoeff128 + 128, -shift[1]);
+ col_txfm_16x16_rounding(outcoeff128 + 192, -shift[1]);
+
+ transpose_8nx8n(outcoeff128, in, txfm_size_col, txfm_size_row);
+ for (int i = 0; i < 4; i++) {
+ av1_fdct64_sse4_1(in + i, outcoeff128 + i, bitrow, 4, 4);
+ }
+ memset(coeff + txfm_size_row * 32, 0, txfm_size_row * 32 * sizeof(*coeff));
+ (void)bd;
+}
+#endif
diff --git a/third_party/aom/av1/encoder/x86/highbd_temporal_filter_avx2.c b/third_party/aom/av1/encoder/x86/highbd_temporal_filter_avx2.c
new file mode 100644
index 0000000000..ca448ca37b
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/highbd_temporal_filter_avx2.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (c) 2020, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <immintrin.h>
+
+#include "config/av1_rtcd.h"
+#include "aom_dsp/mathutils.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/temporal_filter.h"
+
+#define SSE_STRIDE (BW + 4)
+
+DECLARE_ALIGNED(32, static const uint32_t, sse_bytemask[4][8]) = {
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0, 0 },
+ { 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0 },
+ { 0, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0 },
+ { 0, 0, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }
+};
+
+static AOM_FORCE_INLINE void get_squared_error_16x16_avx2(
+ const uint16_t *frame1, const unsigned int stride, const uint16_t *frame2,
+ const unsigned int stride2, const int block_width, const int block_height,
+ uint32_t *frame_sse, const unsigned int sse_stride) {
+ (void)block_width;
+ const uint16_t *src1 = frame1;
+ const uint16_t *src2 = frame2;
+ uint32_t *dst = frame_sse + 2;
+ for (int i = 0; i < block_height; i++) {
+ __m256i v_src1 = _mm256_loadu_si256((__m256i *)src1);
+ __m256i v_src2 = _mm256_loadu_si256((__m256i *)src2);
+ __m256i v_diff = _mm256_sub_epi16(v_src1, v_src2);
+ __m256i v_mullo = _mm256_mullo_epi16(v_diff, v_diff);
+ __m256i v_mulhi = _mm256_mulhi_epi16(v_diff, v_diff);
+
+ __m256i v_lo = _mm256_unpacklo_epi16(v_mullo, v_mulhi);
+ __m256i v_hi = _mm256_unpackhi_epi16(v_mullo, v_mulhi);
+ __m256i diff_lo =
+ _mm256_inserti128_si256(v_lo, _mm256_extracti128_si256(v_hi, 0), 1);
+ __m256i diff_hi =
+ _mm256_inserti128_si256(v_hi, _mm256_extracti128_si256(v_lo, 1), 0);
+
+ _mm256_storeu_si256((__m256i *)dst, diff_lo);
+ dst += 8;
+ _mm256_storeu_si256((__m256i *)dst, diff_hi);
+
+ src1 += stride, src2 += stride2;
+ dst += sse_stride - 8;
+ }
+}
+
+static AOM_FORCE_INLINE void get_squared_error_32x32_avx2(
+ const uint16_t *frame1, const unsigned int stride, const uint16_t *frame2,
+ const unsigned int stride2, const int block_width, const int block_height,
+ uint32_t *frame_sse, const unsigned int sse_stride) {
+ (void)block_width;
+ const uint16_t *src1 = frame1;
+ const uint16_t *src2 = frame2;
+ uint32_t *dst = frame_sse + 2;
+ for (int i = 0; i < block_height; i++) {
+ __m256i v_src1 = _mm256_loadu_si256((__m256i *)src1);
+ __m256i v_src2 = _mm256_loadu_si256((__m256i *)src2);
+ __m256i v_diff = _mm256_sub_epi16(v_src1, v_src2);
+ __m256i v_mullo = _mm256_mullo_epi16(v_diff, v_diff);
+ __m256i v_mulhi = _mm256_mulhi_epi16(v_diff, v_diff);
+
+ __m256i v_lo = _mm256_unpacklo_epi16(v_mullo, v_mulhi);
+ __m256i v_hi = _mm256_unpackhi_epi16(v_mullo, v_mulhi);
+ __m256i diff_lo =
+ _mm256_inserti128_si256(v_lo, _mm256_extracti128_si256(v_hi, 0), 1);
+ __m256i diff_hi =
+ _mm256_inserti128_si256(v_hi, _mm256_extracti128_si256(v_lo, 1), 0);
+
+ _mm256_storeu_si256((__m256i *)dst, diff_lo);
+ _mm256_storeu_si256((__m256i *)(dst + 8), diff_hi);
+
+ v_src1 = _mm256_loadu_si256((__m256i *)(src1 + 16));
+ v_src2 = _mm256_loadu_si256((__m256i *)(src2 + 16));
+ v_diff = _mm256_sub_epi16(v_src1, v_src2);
+ v_mullo = _mm256_mullo_epi16(v_diff, v_diff);
+ v_mulhi = _mm256_mulhi_epi16(v_diff, v_diff);
+
+ v_lo = _mm256_unpacklo_epi16(v_mullo, v_mulhi);
+ v_hi = _mm256_unpackhi_epi16(v_mullo, v_mulhi);
+ diff_lo =
+ _mm256_inserti128_si256(v_lo, _mm256_extracti128_si256(v_hi, 0), 1);
+ diff_hi =
+ _mm256_inserti128_si256(v_hi, _mm256_extracti128_si256(v_lo, 1), 0);
+
+ _mm256_storeu_si256((__m256i *)(dst + 16), diff_lo);
+ _mm256_storeu_si256((__m256i *)(dst + 24), diff_hi);
+
+ src1 += stride;
+ src2 += stride2;
+ dst += sse_stride;
+ }
+}
+
+static AOM_FORCE_INLINE void xx_load_and_pad_left(uint32_t *src,
+ __m256i *v256tmp) {
+ *v256tmp = _mm256_loadu_si256((__m256i *)src);
+ // For the first column, replicate the first element twice to the left
+ __m256i v256tmp1 = _mm256_shuffle_epi32(*v256tmp, 0xEA);
+ *v256tmp = _mm256_inserti128_si256(*v256tmp,
+ _mm256_extracti128_si256(v256tmp1, 0), 0);
+}
+
+static AOM_FORCE_INLINE void xx_load_and_pad_right(uint32_t *src,
+ __m256i *v256tmp) {
+ *v256tmp = _mm256_loadu_si256((__m256i *)src);
+ // For the last column, replicate the last element twice to the right
+ __m256i v256tmp1 = _mm256_shuffle_epi32(*v256tmp, 0x54);
+ *v256tmp = _mm256_inserti128_si256(*v256tmp,
+ _mm256_extracti128_si256(v256tmp1, 1), 1);
+}
+
+static AOM_FORCE_INLINE int32_t xx_mask_and_hadd(__m256i vsum, int i) {
+ // Mask the required 5 values inside the vector
+ __m256i vtmp = _mm256_and_si256(vsum, *(__m256i *)sse_bytemask[i]);
+ __m128i v128a, v128b;
+ // Extract 256b as two 128b registers A and B
+ v128a = _mm256_castsi256_si128(vtmp);
+ v128b = _mm256_extracti128_si256(vtmp, 1);
+ // A = [A0+B0, A1+B1, A2+B2, A3+B3]
+ v128a = _mm_add_epi32(v128a, v128b);
+ // B = [A2+B2, A3+B3, 0, 0]
+ v128b = _mm_srli_si128(v128a, 8);
+ // A = [A0+B0+A2+B2, A1+B1+A3+B3, X, X]
+ v128a = _mm_add_epi32(v128a, v128b);
+ // B = [A1+B1+A3+B3, 0, 0, 0]
+ v128b = _mm_srli_si128(v128a, 4);
+ // A = [A0+B0+A2+B2+A1+B1+A3+B3, X, X, X]
+ v128a = _mm_add_epi32(v128a, v128b);
+ return _mm_extract_epi32(v128a, 0);
+}
+
+static void highbd_apply_temporal_filter(
+ const uint16_t *frame1, const unsigned int stride, const uint16_t *frame2,
+ const unsigned int stride2, const int block_width, const int block_height,
+ const int *subblock_mses, unsigned int *accumulator, uint16_t *count,
+ uint32_t *frame_sse, uint32_t *luma_sse_sum, int bd,
+ const double inv_num_ref_pixels, const double decay_factor,
+ const double inv_factor, const double weight_factor, double *d_factor,
+ int tf_wgt_calc_lvl) {
+ assert(((block_width == 16) || (block_width == 32)) &&
+ ((block_height == 16) || (block_height == 32)));
+
+ uint32_t acc_5x5_sse[BH][BW];
+
+ if (block_width == 32) {
+ get_squared_error_32x32_avx2(frame1, stride, frame2, stride2, block_width,
+ block_height, frame_sse, SSE_STRIDE);
+ } else {
+ get_squared_error_16x16_avx2(frame1, stride, frame2, stride2, block_width,
+ block_height, frame_sse, SSE_STRIDE);
+ }
+
+ __m256i vsrc[5];
+
+ // Traverse 4 columns at a time
+ // First and last columns will require padding
+ int col;
+ uint32_t *src = frame_sse;
+ for (int i = 2; i < 5; i++) {
+ xx_load_and_pad_left(src, &vsrc[i]);
+ src += SSE_STRIDE;
+ }
+
+ // Copy first row to first 2 vectors
+ vsrc[0] = vsrc[2];
+ vsrc[1] = vsrc[2];
+
+ for (int row = 0; row < block_height - 3; row++) {
+ __m256i vsum1 = _mm256_add_epi32(vsrc[0], vsrc[1]);
+ __m256i vsum2 = _mm256_add_epi32(vsrc[2], vsrc[3]);
+ __m256i vsum3 = _mm256_add_epi32(vsum1, vsum2);
+ __m256i vsum = _mm256_add_epi32(vsum3, vsrc[4]);
+
+ for (int i = 0; i < 4; i++) {
+ vsrc[i] = vsrc[i + 1];
+ }
+
+ xx_load_and_pad_left(src, &vsrc[4]);
+ src += SSE_STRIDE;
+
+ acc_5x5_sse[row][0] = xx_mask_and_hadd(vsum, 0);
+ acc_5x5_sse[row][1] = xx_mask_and_hadd(vsum, 1);
+ acc_5x5_sse[row][2] = xx_mask_and_hadd(vsum, 2);
+ acc_5x5_sse[row][3] = xx_mask_and_hadd(vsum, 3);
+ }
+ for (int row = block_height - 3; row < block_height; row++) {
+ __m256i vsum1 = _mm256_add_epi32(vsrc[0], vsrc[1]);
+ __m256i vsum2 = _mm256_add_epi32(vsrc[2], vsrc[3]);
+ __m256i vsum3 = _mm256_add_epi32(vsum1, vsum2);
+ __m256i vsum = _mm256_add_epi32(vsum3, vsrc[4]);
+
+ for (int i = 0; i < 4; i++) {
+ vsrc[i] = vsrc[i + 1];
+ }
+
+ acc_5x5_sse[row][0] = xx_mask_and_hadd(vsum, 0);
+ acc_5x5_sse[row][1] = xx_mask_and_hadd(vsum, 1);
+ acc_5x5_sse[row][2] = xx_mask_and_hadd(vsum, 2);
+ acc_5x5_sse[row][3] = xx_mask_and_hadd(vsum, 3);
+ }
+ for (col = 4; col < block_width - 4; col += 4) {
+ src = frame_sse + col;
+
+ // Load and pad(for first and last col) 3 rows from the top
+ for (int i = 2; i < 5; i++) {
+ vsrc[i] = _mm256_loadu_si256((__m256i *)src);
+ src += SSE_STRIDE;
+ }
+
+ // Copy first row to first 2 vectors
+ vsrc[0] = vsrc[2];
+ vsrc[1] = vsrc[2];
+
+ for (int row = 0; row < block_height - 3; row++) {
+ __m256i vsum1 = _mm256_add_epi32(vsrc[0], vsrc[1]);
+ __m256i vsum2 = _mm256_add_epi32(vsrc[2], vsrc[3]);
+ __m256i vsum3 = _mm256_add_epi32(vsum1, vsum2);
+ __m256i vsum = _mm256_add_epi32(vsum3, vsrc[4]);
+
+ for (int i = 0; i < 4; i++) {
+ vsrc[i] = vsrc[i + 1];
+ }
+
+ vsrc[4] = _mm256_loadu_si256((__m256i *)src);
+
+ src += SSE_STRIDE;
+
+ acc_5x5_sse[row][col] = xx_mask_and_hadd(vsum, 0);
+ acc_5x5_sse[row][col + 1] = xx_mask_and_hadd(vsum, 1);
+ acc_5x5_sse[row][col + 2] = xx_mask_and_hadd(vsum, 2);
+ acc_5x5_sse[row][col + 3] = xx_mask_and_hadd(vsum, 3);
+ }
+ for (int row = block_height - 3; row < block_height; row++) {
+ __m256i vsum1 = _mm256_add_epi32(vsrc[0], vsrc[1]);
+ __m256i vsum2 = _mm256_add_epi32(vsrc[2], vsrc[3]);
+ __m256i vsum3 = _mm256_add_epi32(vsum1, vsum2);
+ __m256i vsum = _mm256_add_epi32(vsum3, vsrc[4]);
+
+ for (int i = 0; i < 4; i++) {
+ vsrc[i] = vsrc[i + 1];
+ }
+
+ acc_5x5_sse[row][col] = xx_mask_and_hadd(vsum, 0);
+ acc_5x5_sse[row][col + 1] = xx_mask_and_hadd(vsum, 1);
+ acc_5x5_sse[row][col + 2] = xx_mask_and_hadd(vsum, 2);
+ acc_5x5_sse[row][col + 3] = xx_mask_and_hadd(vsum, 3);
+ }
+ }
+
+ src = frame_sse + col;
+
+ // Load and pad(for first and last col) 3 rows from the top
+ for (int i = 2; i < 5; i++) {
+ xx_load_and_pad_right(src, &vsrc[i]);
+ src += SSE_STRIDE;
+ }
+
+ // Copy first row to first 2 vectors
+ vsrc[0] = vsrc[2];
+ vsrc[1] = vsrc[2];
+
+ for (int row = 0; row < block_height - 3; row++) {
+ __m256i vsum1 = _mm256_add_epi32(vsrc[0], vsrc[1]);
+ __m256i vsum2 = _mm256_add_epi32(vsrc[2], vsrc[3]);
+ __m256i vsum3 = _mm256_add_epi32(vsum1, vsum2);
+ __m256i vsum = _mm256_add_epi32(vsum3, vsrc[4]);
+
+ for (int i = 0; i < 4; i++) {
+ vsrc[i] = vsrc[i + 1];
+ }
+
+ xx_load_and_pad_right(src, &vsrc[4]);
+ src += SSE_STRIDE;
+
+ acc_5x5_sse[row][col] = xx_mask_and_hadd(vsum, 0);
+ acc_5x5_sse[row][col + 1] = xx_mask_and_hadd(vsum, 1);
+ acc_5x5_sse[row][col + 2] = xx_mask_and_hadd(vsum, 2);
+ acc_5x5_sse[row][col + 3] = xx_mask_and_hadd(vsum, 3);
+ }
+ for (int row = block_height - 3; row < block_height; row++) {
+ __m256i vsum1 = _mm256_add_epi32(vsrc[0], vsrc[1]);
+ __m256i vsum2 = _mm256_add_epi32(vsrc[2], vsrc[3]);
+ __m256i vsum3 = _mm256_add_epi32(vsum1, vsum2);
+ __m256i vsum = _mm256_add_epi32(vsum3, vsrc[4]);
+
+ for (int i = 0; i < 4; i++) {
+ vsrc[i] = vsrc[i + 1];
+ }
+
+ acc_5x5_sse[row][col] = xx_mask_and_hadd(vsum, 0);
+ acc_5x5_sse[row][col + 1] = xx_mask_and_hadd(vsum, 1);
+ acc_5x5_sse[row][col + 2] = xx_mask_and_hadd(vsum, 2);
+ acc_5x5_sse[row][col + 3] = xx_mask_and_hadd(vsum, 3);
+ }
+
+ double subblock_mses_scaled[4];
+ double d_factor_decayed[4];
+ for (int idx = 0; idx < 4; idx++) {
+ subblock_mses_scaled[idx] = subblock_mses[idx] * inv_factor;
+ d_factor_decayed[idx] = d_factor[idx] * decay_factor;
+ }
+ if (tf_wgt_calc_lvl == 0) {
+ for (int i = 0, k = 0; i < block_height; i++) {
+ const int y_blk_raster_offset = (i >= block_height / 2) * 2;
+ for (int j = 0; j < block_width; j++, k++) {
+ const int pixel_value = frame2[i * stride2 + j];
+ uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j];
+
+ // Scale down the difference for high bit depth input.
+ diff_sse >>= ((bd - 8) * 2);
+
+ const double window_error = diff_sse * inv_num_ref_pixels;
+ const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2);
+
+ const double combined_error =
+ weight_factor * window_error + subblock_mses_scaled[subblock_idx];
+
+ double scaled_error = combined_error * d_factor_decayed[subblock_idx];
+ scaled_error = AOMMIN(scaled_error, 7);
+ const int weight = (int)(exp(-scaled_error) * TF_WEIGHT_SCALE);
+
+ count[k] += weight;
+ accumulator[k] += weight * pixel_value;
+ }
+ }
+ } else {
+ for (int i = 0, k = 0; i < block_height; i++) {
+ const int y_blk_raster_offset = (i >= block_height / 2) * 2;
+ for (int j = 0; j < block_width; j++, k++) {
+ const int pixel_value = frame2[i * stride2 + j];
+ uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j];
+
+ // Scale down the difference for high bit depth input.
+ diff_sse >>= ((bd - 8) * 2);
+
+ const double window_error = diff_sse * inv_num_ref_pixels;
+ const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2);
+
+ const double combined_error =
+ weight_factor * window_error + subblock_mses_scaled[subblock_idx];
+
+ double scaled_error = combined_error * d_factor_decayed[subblock_idx];
+ scaled_error = AOMMIN(scaled_error, 7);
+ const float fweight =
+ approx_exp((float)-scaled_error) * TF_WEIGHT_SCALE;
+ const int weight = iroundpf(fweight);
+
+ count[k] += weight;
+ accumulator[k] += weight * pixel_value;
+ }
+ }
+ }
+}
+
+void av1_highbd_apply_temporal_filter_avx2(
+ const YV12_BUFFER_CONFIG *frame_to_filter, const MACROBLOCKD *mbd,
+ const BLOCK_SIZE block_size, const int mb_row, const int mb_col,
+ const int num_planes, const double *noise_levels, const MV *subblock_mvs,
+ const int *subblock_mses, const int q_factor, const int filter_strength,
+ int tf_wgt_calc_lvl, const uint8_t *pred, uint32_t *accum,
+ uint16_t *count) {
+ const int is_high_bitdepth = frame_to_filter->flags & YV12_FLAG_HIGHBITDEPTH;
+ assert(block_size == BLOCK_32X32 && "Only support 32x32 block with sse2!");
+ assert(TF_WINDOW_LENGTH == 5 && "Only support window length 5 with sse2!");
+ assert(num_planes >= 1 && num_planes <= MAX_MB_PLANE);
+ (void)is_high_bitdepth;
+
+ const int mb_height = block_size_high[block_size];
+ const int mb_width = block_size_wide[block_size];
+ const int frame_height = frame_to_filter->y_crop_height;
+ const int frame_width = frame_to_filter->y_crop_width;
+ const int min_frame_size = AOMMIN(frame_height, frame_width);
+ // Variables to simplify combined error calculation.
+ const double inv_factor = 1.0 / ((TF_WINDOW_BLOCK_BALANCE_WEIGHT + 1) *
+ TF_SEARCH_ERROR_NORM_WEIGHT);
+ const double weight_factor =
+ (double)TF_WINDOW_BLOCK_BALANCE_WEIGHT * inv_factor;
+ // Adjust filtering based on q.
+ // Larger q -> stronger filtering -> larger weight.
+ // Smaller q -> weaker filtering -> smaller weight.
+ double q_decay = pow((double)q_factor / TF_Q_DECAY_THRESHOLD, 2);
+ q_decay = CLIP(q_decay, 1e-5, 1);
+ if (q_factor >= TF_QINDEX_CUTOFF) {
+ // Max q_factor is 255, therefore the upper bound of q_decay is 8.
+ // We do not need a clip here.
+ q_decay = 0.5 * pow((double)q_factor / 64, 2);
+ }
+ // Smaller strength -> smaller filtering weight.
+ double s_decay = pow((double)filter_strength / TF_STRENGTH_THRESHOLD, 2);
+ s_decay = CLIP(s_decay, 1e-5, 1);
+ double d_factor[4] = { 0 };
+ uint32_t frame_sse[SSE_STRIDE * BH] = { 0 };
+ uint32_t luma_sse_sum[BW * BH] = { 0 };
+ uint16_t *pred1 = CONVERT_TO_SHORTPTR(pred);
+
+ for (int subblock_idx = 0; subblock_idx < 4; subblock_idx++) {
+ // Larger motion vector -> smaller filtering weight.
+ const MV mv = subblock_mvs[subblock_idx];
+ const double distance = sqrt(pow(mv.row, 2) + pow(mv.col, 2));
+ double distance_threshold = min_frame_size * TF_SEARCH_DISTANCE_THRESHOLD;
+ distance_threshold = AOMMAX(distance_threshold, 1);
+ d_factor[subblock_idx] = distance / distance_threshold;
+ d_factor[subblock_idx] = AOMMAX(d_factor[subblock_idx], 1);
+ }
+
+ // Handle planes in sequence.
+ int plane_offset = 0;
+ for (int plane = 0; plane < num_planes; ++plane) {
+ const uint32_t plane_h = mb_height >> mbd->plane[plane].subsampling_y;
+ const uint32_t plane_w = mb_width >> mbd->plane[plane].subsampling_x;
+ const uint32_t frame_stride = frame_to_filter->strides[plane == 0 ? 0 : 1];
+ const int frame_offset = mb_row * plane_h * frame_stride + mb_col * plane_w;
+
+ const uint16_t *ref =
+ CONVERT_TO_SHORTPTR(frame_to_filter->buffers[plane]) + frame_offset;
+ const int ss_x_shift =
+ mbd->plane[plane].subsampling_x - mbd->plane[AOM_PLANE_Y].subsampling_x;
+ const int ss_y_shift =
+ mbd->plane[plane].subsampling_y - mbd->plane[AOM_PLANE_Y].subsampling_y;
+ const int num_ref_pixels = TF_WINDOW_LENGTH * TF_WINDOW_LENGTH +
+ ((plane) ? (1 << (ss_x_shift + ss_y_shift)) : 0);
+ const double inv_num_ref_pixels = 1.0 / num_ref_pixels;
+ // Larger noise -> larger filtering weight.
+ const double n_decay = 0.5 + log(2 * noise_levels[plane] + 5.0);
+ // Decay factors for non-local mean approach.
+ const double decay_factor = 1 / (n_decay * q_decay * s_decay);
+
+ // Filter U-plane and V-plane using Y-plane. This is because motion
+ // search is only done on Y-plane, so the information from Y-plane
+ // will be more accurate. The luma sse sum is reused in both chroma
+ // planes.
+ if (plane == AOM_PLANE_U) {
+ for (unsigned int i = 0, k = 0; i < plane_h; i++) {
+ for (unsigned int j = 0; j < plane_w; j++, k++) {
+ for (int ii = 0; ii < (1 << ss_y_shift); ++ii) {
+ for (int jj = 0; jj < (1 << ss_x_shift); ++jj) {
+ const int yy = (i << ss_y_shift) + ii; // Y-coord on Y-plane.
+ const int xx = (j << ss_x_shift) + jj; // X-coord on Y-plane.
+ luma_sse_sum[i * BW + j] += frame_sse[yy * SSE_STRIDE + xx + 2];
+ }
+ }
+ }
+ }
+ }
+
+ highbd_apply_temporal_filter(
+ ref, frame_stride, pred1 + plane_offset, plane_w, plane_w, plane_h,
+ subblock_mses, accum + plane_offset, count + plane_offset, frame_sse,
+ luma_sse_sum, mbd->bd, inv_num_ref_pixels, decay_factor, inv_factor,
+ weight_factor, d_factor, tf_wgt_calc_lvl);
+ plane_offset += plane_h * plane_w;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/highbd_temporal_filter_sse2.c b/third_party/aom/av1/encoder/x86/highbd_temporal_filter_sse2.c
new file mode 100644
index 0000000000..2032847083
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/highbd_temporal_filter_sse2.c
@@ -0,0 +1,341 @@
+/*
+ * Copyright (c) 2020, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h>
+
+#include "config/av1_rtcd.h"
+#include "aom_dsp/mathutils.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/temporal_filter.h"
+
+// For the squared error buffer, keep a padding for 4 samples
+#define SSE_STRIDE (BW + 4)
+
+DECLARE_ALIGNED(32, static const uint32_t, sse_bytemask_2x4[4][2][4]) = {
+ { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 } },
+ { { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 } },
+ { { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 } },
+ { { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF },
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF } }
+};
+
+static void get_squared_error(const uint16_t *frame1, const unsigned int stride,
+ const uint16_t *frame2,
+ const unsigned int stride2, const int block_width,
+ const int block_height, uint32_t *frame_sse,
+ const unsigned int dst_stride) {
+ const uint16_t *src1 = frame1;
+ const uint16_t *src2 = frame2;
+ uint32_t *dst = frame_sse;
+
+ for (int i = 0; i < block_height; i++) {
+ for (int j = 0; j < block_width; j += 8) {
+ __m128i vsrc1 = _mm_loadu_si128((__m128i *)(src1 + j));
+ __m128i vsrc2 = _mm_loadu_si128((__m128i *)(src2 + j));
+
+ __m128i vdiff = _mm_sub_epi16(vsrc1, vsrc2);
+ __m128i vmullo = _mm_mullo_epi16(vdiff, vdiff);
+ __m128i vmullh = _mm_mulhi_epi16(vdiff, vdiff);
+
+ __m128i vres1 = _mm_unpacklo_epi16(vmullo, vmullh);
+ __m128i vres2 = _mm_unpackhi_epi16(vmullo, vmullh);
+
+ _mm_storeu_si128((__m128i *)(dst + j + 2), vres1);
+ _mm_storeu_si128((__m128i *)(dst + j + 6), vres2);
+ }
+
+ src1 += stride;
+ src2 += stride2;
+ dst += dst_stride;
+ }
+}
+
+static void xx_load_and_pad(uint32_t *src, __m128i *dstvec, int col,
+ int block_width) {
+ __m128i vtmp1 = _mm_loadu_si128((__m128i *)src);
+ __m128i vtmp2 = _mm_loadu_si128((__m128i *)(src + 4));
+ // For the first column, replicate the first element twice to the left
+ dstvec[0] = (col) ? vtmp1 : _mm_shuffle_epi32(vtmp1, 0xEA);
+ // For the last column, replicate the last element twice to the right
+ dstvec[1] = (col < block_width - 4) ? vtmp2 : _mm_shuffle_epi32(vtmp2, 0x54);
+}
+
+static int32_t xx_mask_and_hadd(__m128i vsum1, __m128i vsum2, int i) {
+ __m128i veca, vecb;
+ // Mask and obtain the required 5 values inside the vector
+ veca = _mm_and_si128(vsum1, *(__m128i *)sse_bytemask_2x4[i][0]);
+ vecb = _mm_and_si128(vsum2, *(__m128i *)sse_bytemask_2x4[i][1]);
+ // A = [A0+B0, A1+B1, A2+B2, A3+B3]
+ veca = _mm_add_epi32(veca, vecb);
+ // B = [A2+B2, A3+B3, 0, 0]
+ vecb = _mm_srli_si128(veca, 8);
+ // A = [A0+B0+A2+B2, A1+B1+A3+B3, X, X]
+ veca = _mm_add_epi32(veca, vecb);
+ // B = [A1+B1+A3+B3, 0, 0, 0]
+ vecb = _mm_srli_si128(veca, 4);
+ // A = [A0+B0+A2+B2+A1+B1+A3+B3, X, X, X]
+ veca = _mm_add_epi32(veca, vecb);
+ return _mm_cvtsi128_si32(veca);
+}
+
+static void highbd_apply_temporal_filter(
+ const uint16_t *frame1, const unsigned int stride, const uint16_t *frame2,
+ const unsigned int stride2, const int block_width, const int block_height,
+ const int *subblock_mses, unsigned int *accumulator, uint16_t *count,
+ uint32_t *frame_sse, uint32_t *luma_sse_sum, int bd,
+ const double inv_num_ref_pixels, const double decay_factor,
+ const double inv_factor, const double weight_factor, double *d_factor,
+ int tf_wgt_calc_lvl) {
+ assert(((block_width == 16) || (block_width == 32)) &&
+ ((block_height == 16) || (block_height == 32)));
+
+ uint32_t acc_5x5_sse[BH][BW];
+
+ get_squared_error(frame1, stride, frame2, stride2, block_width, block_height,
+ frame_sse, SSE_STRIDE);
+
+ __m128i vsrc[5][2];
+
+ // Traverse 4 columns at a time
+ // First and last columns will require padding
+ for (int col = 0; col < block_width; col += 4) {
+ uint32_t *src = frame_sse + col;
+
+ // Load and pad(for first and last col) 3 rows from the top
+ for (int i = 2; i < 5; i++) {
+ xx_load_and_pad(src, vsrc[i], col, block_width);
+ src += SSE_STRIDE;
+ }
+
+ // Padding for top 2 rows
+ vsrc[0][0] = vsrc[2][0];
+ vsrc[0][1] = vsrc[2][1];
+ vsrc[1][0] = vsrc[2][0];
+ vsrc[1][1] = vsrc[2][1];
+
+ for (int row = 0; row < block_height - 3; row++) {
+ __m128i vsum11 = _mm_add_epi32(vsrc[0][0], vsrc[1][0]);
+ __m128i vsum12 = _mm_add_epi32(vsrc[2][0], vsrc[3][0]);
+ __m128i vsum13 = _mm_add_epi32(vsum11, vsum12);
+ __m128i vsum1 = _mm_add_epi32(vsum13, vsrc[4][0]);
+
+ __m128i vsum21 = _mm_add_epi32(vsrc[0][1], vsrc[1][1]);
+ __m128i vsum22 = _mm_add_epi32(vsrc[2][1], vsrc[3][1]);
+ __m128i vsum23 = _mm_add_epi32(vsum21, vsum22);
+ __m128i vsum2 = _mm_add_epi32(vsum23, vsrc[4][1]);
+
+ vsrc[0][0] = vsrc[1][0];
+ vsrc[0][1] = vsrc[1][1];
+ vsrc[1][0] = vsrc[2][0];
+ vsrc[1][1] = vsrc[2][1];
+ vsrc[2][0] = vsrc[3][0];
+ vsrc[2][1] = vsrc[3][1];
+ vsrc[3][0] = vsrc[4][0];
+ vsrc[3][1] = vsrc[4][1];
+
+ // Load next row
+ xx_load_and_pad(src, vsrc[4], col, block_width);
+ src += SSE_STRIDE;
+
+ acc_5x5_sse[row][col] = xx_mask_and_hadd(vsum1, vsum2, 0);
+ acc_5x5_sse[row][col + 1] = xx_mask_and_hadd(vsum1, vsum2, 1);
+ acc_5x5_sse[row][col + 2] = xx_mask_and_hadd(vsum1, vsum2, 2);
+ acc_5x5_sse[row][col + 3] = xx_mask_and_hadd(vsum1, vsum2, 3);
+ }
+ for (int row = block_height - 3; row < block_height; row++) {
+ __m128i vsum11 = _mm_add_epi32(vsrc[0][0], vsrc[1][0]);
+ __m128i vsum12 = _mm_add_epi32(vsrc[2][0], vsrc[3][0]);
+ __m128i vsum13 = _mm_add_epi32(vsum11, vsum12);
+ __m128i vsum1 = _mm_add_epi32(vsum13, vsrc[4][0]);
+
+ __m128i vsum21 = _mm_add_epi32(vsrc[0][1], vsrc[1][1]);
+ __m128i vsum22 = _mm_add_epi32(vsrc[2][1], vsrc[3][1]);
+ __m128i vsum23 = _mm_add_epi32(vsum21, vsum22);
+ __m128i vsum2 = _mm_add_epi32(vsum23, vsrc[4][1]);
+
+ vsrc[0][0] = vsrc[1][0];
+ vsrc[0][1] = vsrc[1][1];
+ vsrc[1][0] = vsrc[2][0];
+ vsrc[1][1] = vsrc[2][1];
+ vsrc[2][0] = vsrc[3][0];
+ vsrc[2][1] = vsrc[3][1];
+ vsrc[3][0] = vsrc[4][0];
+ vsrc[3][1] = vsrc[4][1];
+
+ acc_5x5_sse[row][col] = xx_mask_and_hadd(vsum1, vsum2, 0);
+ acc_5x5_sse[row][col + 1] = xx_mask_and_hadd(vsum1, vsum2, 1);
+ acc_5x5_sse[row][col + 2] = xx_mask_and_hadd(vsum1, vsum2, 2);
+ acc_5x5_sse[row][col + 3] = xx_mask_and_hadd(vsum1, vsum2, 3);
+ }
+ }
+
+ double subblock_mses_scaled[4];
+ double d_factor_decayed[4];
+ for (int idx = 0; idx < 4; idx++) {
+ subblock_mses_scaled[idx] = subblock_mses[idx] * inv_factor;
+ d_factor_decayed[idx] = d_factor[idx] * decay_factor;
+ }
+ if (tf_wgt_calc_lvl == 0) {
+ for (int i = 0, k = 0; i < block_height; i++) {
+ const int y_blk_raster_offset = (i >= block_height / 2) * 2;
+ for (int j = 0; j < block_width; j++, k++) {
+ const int pixel_value = frame2[i * stride2 + j];
+ uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j];
+
+ // Scale down the difference for high bit depth input.
+ diff_sse >>= ((bd - 8) * 2);
+
+ const double window_error = diff_sse * inv_num_ref_pixels;
+ const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2);
+
+ const double combined_error =
+ weight_factor * window_error + subblock_mses_scaled[subblock_idx];
+
+ double scaled_error = combined_error * d_factor_decayed[subblock_idx];
+ scaled_error = AOMMIN(scaled_error, 7);
+ const int weight = (int)(exp(-scaled_error) * TF_WEIGHT_SCALE);
+
+ count[k] += weight;
+ accumulator[k] += weight * pixel_value;
+ }
+ }
+ } else {
+ for (int i = 0, k = 0; i < block_height; i++) {
+ const int y_blk_raster_offset = (i >= block_height / 2) * 2;
+ for (int j = 0; j < block_width; j++, k++) {
+ const int pixel_value = frame2[i * stride2 + j];
+ uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j];
+
+ // Scale down the difference for high bit depth input.
+ diff_sse >>= ((bd - 8) * 2);
+
+ const double window_error = diff_sse * inv_num_ref_pixels;
+ const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2);
+
+ const double combined_error =
+ weight_factor * window_error + subblock_mses_scaled[subblock_idx];
+
+ double scaled_error = combined_error * d_factor_decayed[subblock_idx];
+ scaled_error = AOMMIN(scaled_error, 7);
+ const float fweight =
+ approx_exp((float)-scaled_error) * TF_WEIGHT_SCALE;
+ const int weight = iroundpf(fweight);
+
+ count[k] += weight;
+ accumulator[k] += weight * pixel_value;
+ }
+ }
+ }
+}
+
+void av1_highbd_apply_temporal_filter_sse2(
+ const YV12_BUFFER_CONFIG *frame_to_filter, const MACROBLOCKD *mbd,
+ const BLOCK_SIZE block_size, const int mb_row, const int mb_col,
+ const int num_planes, const double *noise_levels, const MV *subblock_mvs,
+ const int *subblock_mses, const int q_factor, const int filter_strength,
+ int tf_wgt_calc_lvl, const uint8_t *pred, uint32_t *accum,
+ uint16_t *count) {
+ const int is_high_bitdepth = frame_to_filter->flags & YV12_FLAG_HIGHBITDEPTH;
+ assert(block_size == BLOCK_32X32 && "Only support 32x32 block with sse2!");
+ assert(TF_WINDOW_LENGTH == 5 && "Only support window length 5 with sse2!");
+ assert(num_planes >= 1 && num_planes <= MAX_MB_PLANE);
+ (void)is_high_bitdepth;
+
+ const int mb_height = block_size_high[block_size];
+ const int mb_width = block_size_wide[block_size];
+ const int frame_height = frame_to_filter->y_crop_height;
+ const int frame_width = frame_to_filter->y_crop_width;
+ const int min_frame_size = AOMMIN(frame_height, frame_width);
+ // Variables to simplify combined error calculation.
+ const double inv_factor = 1.0 / ((TF_WINDOW_BLOCK_BALANCE_WEIGHT + 1) *
+ TF_SEARCH_ERROR_NORM_WEIGHT);
+ const double weight_factor =
+ (double)TF_WINDOW_BLOCK_BALANCE_WEIGHT * inv_factor;
+ // Adjust filtering based on q.
+ // Larger q -> stronger filtering -> larger weight.
+ // Smaller q -> weaker filtering -> smaller weight.
+ double q_decay = pow((double)q_factor / TF_Q_DECAY_THRESHOLD, 2);
+ q_decay = CLIP(q_decay, 1e-5, 1);
+ if (q_factor >= TF_QINDEX_CUTOFF) {
+ // Max q_factor is 255, therefore the upper bound of q_decay is 8.
+ // We do not need a clip here.
+ q_decay = 0.5 * pow((double)q_factor / 64, 2);
+ }
+ // Smaller strength -> smaller filtering weight.
+ double s_decay = pow((double)filter_strength / TF_STRENGTH_THRESHOLD, 2);
+ s_decay = CLIP(s_decay, 1e-5, 1);
+ double d_factor[4] = { 0 };
+ uint32_t frame_sse[SSE_STRIDE * BH] = { 0 };
+ uint32_t luma_sse_sum[BW * BH] = { 0 };
+ uint16_t *pred1 = CONVERT_TO_SHORTPTR(pred);
+
+ for (int subblock_idx = 0; subblock_idx < 4; subblock_idx++) {
+ // Larger motion vector -> smaller filtering weight.
+ const MV mv = subblock_mvs[subblock_idx];
+ const double distance = sqrt(pow(mv.row, 2) + pow(mv.col, 2));
+ double distance_threshold = min_frame_size * TF_SEARCH_DISTANCE_THRESHOLD;
+ distance_threshold = AOMMAX(distance_threshold, 1);
+ d_factor[subblock_idx] = distance / distance_threshold;
+ d_factor[subblock_idx] = AOMMAX(d_factor[subblock_idx], 1);
+ }
+
+ // Handle planes in sequence.
+ int plane_offset = 0;
+ for (int plane = 0; plane < num_planes; ++plane) {
+ const uint32_t plane_h = mb_height >> mbd->plane[plane].subsampling_y;
+ const uint32_t plane_w = mb_width >> mbd->plane[plane].subsampling_x;
+ const uint32_t frame_stride = frame_to_filter->strides[plane == 0 ? 0 : 1];
+ const int frame_offset = mb_row * plane_h * frame_stride + mb_col * plane_w;
+
+ const uint16_t *ref =
+ CONVERT_TO_SHORTPTR(frame_to_filter->buffers[plane]) + frame_offset;
+ const int ss_x_shift =
+ mbd->plane[plane].subsampling_x - mbd->plane[0].subsampling_x;
+ const int ss_y_shift =
+ mbd->plane[plane].subsampling_y - mbd->plane[0].subsampling_y;
+ const int num_ref_pixels = TF_WINDOW_LENGTH * TF_WINDOW_LENGTH +
+ ((plane) ? (1 << (ss_x_shift + ss_y_shift)) : 0);
+ const double inv_num_ref_pixels = 1.0 / num_ref_pixels;
+ // Larger noise -> larger filtering weight.
+ const double n_decay = 0.5 + log(2 * noise_levels[plane] + 5.0);
+ // Decay factors for non-local mean approach.
+ const double decay_factor = 1 / (n_decay * q_decay * s_decay);
+
+ // Filter U-plane and V-plane using Y-plane. This is because motion
+ // search is only done on Y-plane, so the information from Y-plane
+ // will be more accurate. The luma sse sum is reused in both chroma
+ // planes.
+ if (plane == AOM_PLANE_U) {
+ for (unsigned int i = 0, k = 0; i < plane_h; i++) {
+ for (unsigned int j = 0; j < plane_w; j++, k++) {
+ for (int ii = 0; ii < (1 << ss_y_shift); ++ii) {
+ for (int jj = 0; jj < (1 << ss_x_shift); ++jj) {
+ const int yy = (i << ss_y_shift) + ii; // Y-coord on Y-plane.
+ const int xx = (j << ss_x_shift) + jj; // X-coord on Y-plane.
+ luma_sse_sum[i * BW + j] += frame_sse[yy * SSE_STRIDE + xx + 2];
+ }
+ }
+ }
+ }
+ }
+
+ highbd_apply_temporal_filter(
+ ref, frame_stride, pred1 + plane_offset, plane_w, plane_w, plane_h,
+ subblock_mses, accum + plane_offset, count + plane_offset, frame_sse,
+ luma_sse_sum, mbd->bd, inv_num_ref_pixels, decay_factor, inv_factor,
+ weight_factor, d_factor, tf_wgt_calc_lvl);
+ plane_offset += plane_h * plane_w;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/ml_avx2.c b/third_party/aom/av1/encoder/x86/ml_avx2.c
new file mode 100644
index 0000000000..6432708416
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/ml_avx2.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <stdbool.h>
+#include <assert.h>
+#include <immintrin.h>
+
+#include "config/av1_rtcd.h"
+#include "av1/encoder/ml.h"
+#include "av1/encoder/x86/ml_sse3.h"
+
+#define CALC_OUTPUT_FOR_2ROWS \
+ const int index = weight_idx + (2 * i * tot_num_inputs); \
+ const __m256 weight0 = _mm256_loadu_ps(&weights[index]); \
+ const __m256 weight1 = _mm256_loadu_ps(&weights[index + tot_num_inputs]); \
+ const __m256 mul0 = _mm256_mul_ps(inputs256, weight0); \
+ const __m256 mul1 = _mm256_mul_ps(inputs256, weight1); \
+ hadd[i] = _mm256_hadd_ps(mul0, mul1);
+
+static INLINE void nn_propagate_8to1(
+ const float *const inputs, const float *const weights,
+ const float *const bias, int num_inputs_to_process, int tot_num_inputs,
+ int num_outputs, float *const output_nodes, int is_clip_required) {
+ // Process one output row at a time.
+ for (int out = 0; out < num_outputs; out++) {
+ __m256 in_result = _mm256_setzero_ps();
+ float bias_val = bias[out];
+ for (int in = 0; in < num_inputs_to_process; in += 8) {
+ const __m256 inputs256 = _mm256_loadu_ps(&inputs[in]);
+ const int weight_idx = in + (out * tot_num_inputs);
+ const __m256 weight0 = _mm256_loadu_ps(&weights[weight_idx]);
+ const __m256 mul0 = _mm256_mul_ps(inputs256, weight0);
+ in_result = _mm256_add_ps(in_result, mul0);
+ }
+ const __m128 low_128 = _mm256_castps256_ps128(in_result);
+ const __m128 high_128 = _mm256_extractf128_ps(in_result, 1);
+ const __m128 sum_par_0 = _mm_add_ps(low_128, high_128);
+ const __m128 sum_par_1 = _mm_hadd_ps(sum_par_0, sum_par_0);
+ const __m128 sum_tot =
+ _mm_add_ps(_mm_shuffle_ps(sum_par_1, sum_par_1, 0x99), sum_par_1);
+
+ bias_val += _mm_cvtss_f32(sum_tot);
+ if (is_clip_required) bias_val = AOMMAX(bias_val, 0);
+ output_nodes[out] = bias_val;
+ }
+}
+
+static INLINE void nn_propagate_8to4(
+ const float *const inputs, const float *const weights,
+ const float *const bias, int num_inputs_to_process, int tot_num_inputs,
+ int num_outputs, float *const output_nodes, int is_clip_required) {
+ __m256 hadd[2];
+ for (int out = 0; out < num_outputs; out += 4) {
+ __m128 bias_reg = _mm_loadu_ps(&bias[out]);
+ __m128 in_result = _mm_setzero_ps();
+ for (int in = 0; in < num_inputs_to_process; in += 8) {
+ const __m256 inputs256 = _mm256_loadu_ps(&inputs[in]);
+ const int weight_idx = in + (out * tot_num_inputs);
+ // Process two output row at a time.
+ for (int i = 0; i < 2; i++) {
+ CALC_OUTPUT_FOR_2ROWS
+ }
+
+ const __m256 sum_par = _mm256_hadd_ps(hadd[0], hadd[1]);
+ const __m128 low_128 = _mm256_castps256_ps128(sum_par);
+ const __m128 high_128 = _mm256_extractf128_ps(sum_par, 1);
+ const __m128 result = _mm_add_ps(low_128, high_128);
+
+ in_result = _mm_add_ps(in_result, result);
+ }
+
+ in_result = _mm_add_ps(in_result, bias_reg);
+ if (is_clip_required) in_result = _mm_max_ps(in_result, _mm_setzero_ps());
+ _mm_storeu_ps(&output_nodes[out], in_result);
+ }
+}
+
+static INLINE void nn_propagate_8to8(
+ const float *const inputs, const float *const weights,
+ const float *const bias, int num_inputs_to_process, int tot_num_inputs,
+ int num_outputs, float *const output_nodes, int is_clip_required) {
+ __m256 hadd[4];
+ for (int out = 0; out < num_outputs; out += 8) {
+ __m256 bias_reg = _mm256_loadu_ps(&bias[out]);
+ __m256 in_result = _mm256_setzero_ps();
+ for (int in = 0; in < num_inputs_to_process; in += 8) {
+ const __m256 inputs256 = _mm256_loadu_ps(&inputs[in]);
+ const int weight_idx = in + (out * tot_num_inputs);
+ // Process two output rows at a time.
+ for (int i = 0; i < 4; i++) {
+ CALC_OUTPUT_FOR_2ROWS
+ }
+ const __m256 hh0 = _mm256_hadd_ps(hadd[0], hadd[1]);
+ const __m256 hh1 = _mm256_hadd_ps(hadd[2], hadd[3]);
+
+ __m256 ht_0 = _mm256_permute2f128_ps(hh0, hh1, 0x20);
+ __m256 ht_1 = _mm256_permute2f128_ps(hh0, hh1, 0x31);
+
+ __m256 result = _mm256_add_ps(ht_0, ht_1);
+ in_result = _mm256_add_ps(in_result, result);
+ }
+ in_result = _mm256_add_ps(in_result, bias_reg);
+ if (is_clip_required)
+ in_result = _mm256_max_ps(in_result, _mm256_setzero_ps());
+ _mm256_storeu_ps(&output_nodes[out], in_result);
+ }
+}
+
+static INLINE void nn_propagate_input_multiple_of_8(
+ const float *const inputs, const float *const weights,
+ const float *const bias, int num_inputs_to_process, int tot_num_inputs,
+ bool is_output_layer, int num_outputs, float *const output_nodes) {
+ // The saturation of output is considered for hidden layer which is not equal
+ // to final hidden layer.
+ const int is_clip_required =
+ !is_output_layer && num_inputs_to_process == tot_num_inputs;
+ if (num_outputs % 8 == 0) {
+ nn_propagate_8to8(inputs, weights, bias, num_inputs_to_process,
+ tot_num_inputs, num_outputs, output_nodes,
+ is_clip_required);
+ } else if (num_outputs % 4 == 0) {
+ nn_propagate_8to4(inputs, weights, bias, num_inputs_to_process,
+ tot_num_inputs, num_outputs, output_nodes,
+ is_clip_required);
+ } else {
+ nn_propagate_8to1(inputs, weights, bias, num_inputs_to_process,
+ tot_num_inputs, num_outputs, output_nodes,
+ is_clip_required);
+ }
+}
+
+void av1_nn_predict_avx2(const float *input_nodes,
+ const NN_CONFIG *const nn_config, int reduce_prec,
+ float *const output) {
+ float buf[2][NN_MAX_NODES_PER_LAYER];
+ int buf_index = 0;
+ int num_inputs = nn_config->num_inputs;
+ assert(num_inputs > 0 && num_inputs <= NN_MAX_NODES_PER_LAYER);
+
+ for (int layer = 0; layer <= nn_config->num_hidden_layers; layer++) {
+ const float *layer_weights = nn_config->weights[layer];
+ const float *layer_bias = nn_config->bias[layer];
+ bool is_output_layer = layer == nn_config->num_hidden_layers;
+ float *const output_nodes = is_output_layer ? output : &buf[buf_index][0];
+ const int num_outputs = is_output_layer
+ ? nn_config->num_outputs
+ : nn_config->num_hidden_nodes[layer];
+ assert(num_outputs > 0 && num_outputs <= NN_MAX_NODES_PER_LAYER);
+
+ // Process input multiple of 8 using AVX2 intrinsic.
+ if (num_inputs % 8 == 0) {
+ nn_propagate_input_multiple_of_8(input_nodes, layer_weights, layer_bias,
+ num_inputs, num_inputs, is_output_layer,
+ num_outputs, output_nodes);
+ } else {
+ // When number of inputs is not multiple of 8, use hybrid approach of AVX2
+ // and SSE3 based on the need.
+ const int in_mul_8 = num_inputs / 8;
+ const int num_inputs_to_process = in_mul_8 * 8;
+ int bias_is_considered = 0;
+ if (in_mul_8) {
+ nn_propagate_input_multiple_of_8(
+ input_nodes, layer_weights, layer_bias, num_inputs_to_process,
+ num_inputs, is_output_layer, num_outputs, output_nodes);
+ bias_is_considered = 1;
+ }
+
+ const float *out_temp = bias_is_considered ? output_nodes : layer_bias;
+ const int input_remaining = num_inputs % 8;
+ if (input_remaining % 4 == 0 && num_outputs % 8 == 0) {
+ for (int out = 0; out < num_outputs; out += 8) {
+ __m128 out_h = _mm_loadu_ps(&out_temp[out + 4]);
+ __m128 out_l = _mm_loadu_ps(&out_temp[out]);
+ for (int in = in_mul_8 * 8; in < num_inputs; in += 4) {
+ av1_nn_propagate_4to8_sse3(&input_nodes[in],
+ &layer_weights[out * num_inputs + in],
+ &out_h, &out_l, num_inputs);
+ }
+ if (!is_output_layer) {
+ const __m128 zero = _mm_setzero_ps();
+ out_h = _mm_max_ps(out_h, zero);
+ out_l = _mm_max_ps(out_l, zero);
+ }
+ _mm_storeu_ps(&output_nodes[out + 4], out_h);
+ _mm_storeu_ps(&output_nodes[out], out_l);
+ }
+ } else if (input_remaining % 4 == 0 && num_outputs % 4 == 0) {
+ for (int out = 0; out < num_outputs; out += 4) {
+ __m128 outputs = _mm_loadu_ps(&out_temp[out]);
+ for (int in = in_mul_8 * 8; in < num_inputs; in += 4) {
+ av1_nn_propagate_4to4_sse3(&input_nodes[in],
+ &layer_weights[out * num_inputs + in],
+ &outputs, num_inputs);
+ }
+ if (!is_output_layer) outputs = _mm_max_ps(outputs, _mm_setzero_ps());
+ _mm_storeu_ps(&output_nodes[out], outputs);
+ }
+ } else if (input_remaining % 4 == 0) {
+ for (int out = 0; out < num_outputs; out++) {
+ __m128 outputs = _mm_load1_ps(&out_temp[out]);
+ for (int in = in_mul_8 * 8; in < num_inputs; in += 4) {
+ av1_nn_propagate_4to1_sse3(&input_nodes[in],
+ &layer_weights[out * num_inputs + in],
+ &outputs);
+ }
+ if (!is_output_layer) outputs = _mm_max_ps(outputs, _mm_setzero_ps());
+ output_nodes[out] = _mm_cvtss_f32(outputs);
+ }
+ } else {
+ // Use SSE instructions for scalar operations to avoid the latency
+ // of swapping between SIMD and FPU modes.
+ for (int out = 0; out < num_outputs; out++) {
+ __m128 outputs = _mm_load1_ps(&out_temp[out]);
+ for (int in_node = in_mul_8 * 8; in_node < num_inputs; in_node++) {
+ __m128 input = _mm_load1_ps(&input_nodes[in_node]);
+ __m128 weight =
+ _mm_load1_ps(&layer_weights[num_inputs * out + in_node]);
+ outputs = _mm_add_ps(outputs, _mm_mul_ps(input, weight));
+ }
+ if (!is_output_layer) outputs = _mm_max_ps(outputs, _mm_setzero_ps());
+ output_nodes[out] = _mm_cvtss_f32(outputs);
+ }
+ }
+ }
+ // Before processing the next layer, treat the output of current layer as
+ // input to next layer.
+ input_nodes = output_nodes;
+ num_inputs = num_outputs;
+ buf_index = 1 - buf_index;
+ }
+ if (reduce_prec) av1_nn_output_prec_reduce(output, nn_config->num_outputs);
+}
diff --git a/third_party/aom/av1/encoder/x86/ml_sse3.c b/third_party/aom/av1/encoder/x86/ml_sse3.c
new file mode 100644
index 0000000000..4748a68d38
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/ml_sse3.c
@@ -0,0 +1,336 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <stdbool.h>
+#include <assert.h>
+
+#include "config/av1_rtcd.h"
+#include "av1/encoder/ml.h"
+#include "av1/encoder/x86/ml_sse3.h"
+
+// In order to avoid the high-latency of swapping between FPU and SIMD
+// operations, we keep the result in a 128-bit register even though we only
+// care about a single value.
+static void nn_propagate_8to1(const float *const inputs,
+ const float *const weights,
+ __m128 *const output) {
+ const __m128 inputs_h = _mm_loadu_ps(&inputs[4]);
+ const __m128 inputs_l = _mm_loadu_ps(inputs);
+
+ const __m128 weights_h = _mm_loadu_ps(&weights[4]);
+ const __m128 weights_l = _mm_loadu_ps(weights);
+
+ const __m128 mul_h = _mm_mul_ps(inputs_h, weights_h);
+ const __m128 mul_l = _mm_mul_ps(inputs_l, weights_l);
+ // [7 6 5 4] [3 2 1 0] (weight and input indices)
+
+ const __m128 vadd = _mm_add_ps(mul_l, mul_h);
+ // [7+3 6+2 5+1 4+0]
+ const __m128 hadd1 = _mm_hadd_ps(vadd, vadd);
+ // [7+6+3+2 5+4+1+0 7+6+3+2 5+4+1+0]
+ const __m128 hadd2 = _mm_hadd_ps(hadd1, hadd1);
+ // [7+6+5+4+3+2+1+0 7+6+5+4+3+2+1+0 7+6+5+4+3+2+1+0 7+6+5+4+3+2+1+0]
+ *output = _mm_add_ps(*output, hadd2);
+}
+
+void av1_nn_propagate_4to1_sse3(const float *const inputs,
+ const float *const weights,
+ __m128 *const output) {
+ const __m128 inputs128 = _mm_loadu_ps(inputs);
+
+ const __m128 weights128 = _mm_loadu_ps(weights);
+
+ const __m128 mul = _mm_mul_ps(inputs128, weights128);
+ // [3 2 1 0] (weight and input indices)
+
+ const __m128 hadd1 = _mm_hadd_ps(mul, mul);
+ // [3+2 1+0 3+2 1+0]
+ const __m128 hadd2 = _mm_hadd_ps(hadd1, hadd1);
+ // [3+2+1+0 3+2+1+0 3+2+1+0 3+2+1+0]
+ *output = _mm_add_ps(*output, hadd2);
+}
+
+void av1_nn_propagate_4to4_sse3(const float *const inputs,
+ const float *const weights,
+ __m128 *const outputs, const int num_inputs) {
+ const __m128 inputs128 = _mm_loadu_ps(inputs);
+
+ __m128 hadd[2];
+ for (int i = 0; i < 2; i++) { // For each pair of outputs
+ const __m128 weight0 = _mm_loadu_ps(&weights[2 * i * num_inputs]);
+ const __m128 mul0 = _mm_mul_ps(weight0, inputs128);
+ const __m128 weight1 = _mm_loadu_ps(&weights[(2 * i + 1) * num_inputs]);
+ const __m128 mul1 = _mm_mul_ps(weight1, inputs128);
+ hadd[i] = _mm_hadd_ps(mul0, mul1);
+ }
+ // hadd[0] = [7+6 5+4 3+2 1+0] (weight indices)
+ // hadd[1] = [15+14 13+12 11+10 9+8]
+
+ const __m128 hh = _mm_hadd_ps(hadd[0], hadd[1]);
+ // [15+14+13+12 11+10+9+8 7+6+5+4 3+2+1+0]
+
+ *outputs = _mm_add_ps(*outputs, hh);
+}
+
+void av1_nn_propagate_4to8_sse3(const float *const inputs,
+ const float *const weights, __m128 *const out_h,
+ __m128 *const out_l, const int num_inputs) {
+ const __m128 inputs128 = _mm_loadu_ps(inputs);
+
+ __m128 hadd[4];
+ for (int i = 0; i < 4; i++) { // For each pair of outputs
+ const __m128 weight0 = _mm_loadu_ps(&weights[2 * i * num_inputs]);
+ const __m128 weight1 = _mm_loadu_ps(&weights[(2 * i + 1) * num_inputs]);
+ const __m128 mul0 = _mm_mul_ps(inputs128, weight0);
+ const __m128 mul1 = _mm_mul_ps(inputs128, weight1);
+ hadd[i] = _mm_hadd_ps(mul0, mul1);
+ }
+ // hadd[0] = [7+6 5+4 3+2 1+0] (weight indices)
+ // hadd[1] = [15+14 13+12 11+10 9+8]
+ // hadd[2] = [23+22 21+20 19+18 17+16]
+ // hadd[3] = [31+30 29+28 27+26 25+24]
+
+ const __m128 hh0 = _mm_hadd_ps(hadd[0], hadd[1]);
+ // [15+14+13+12 11+10+9+8 7+6+5+4 3+2+1+0]
+ const __m128 hh1 = _mm_hadd_ps(hadd[2], hadd[3]);
+ // [31+30+29+28 27+26+25+24 23+22+21+20 19+18+17+16]
+
+ *out_h = _mm_add_ps(*out_h, hh1);
+ *out_l = _mm_add_ps(*out_l, hh0);
+}
+
+static void nn_propagate_8to4(const float *const inputs,
+ const float *const weights, __m128 *const outputs,
+ const int num_inputs) {
+ const __m128 inputs_h = _mm_loadu_ps(inputs + 4);
+ const __m128 inputs_l = _mm_loadu_ps(inputs);
+ // [7 6 5 4] [3 2 1 0] (input indices)
+
+ __m128 add[4];
+ for (int i = 0; i < 4; i++) { // For each output:
+ const __m128 weight_h = _mm_loadu_ps(&weights[i * num_inputs + 4]);
+ const __m128 weight_l = _mm_loadu_ps(&weights[i * num_inputs]);
+ const __m128 mul_h = _mm_mul_ps(inputs_h, weight_h);
+ const __m128 mul_l = _mm_mul_ps(inputs_l, weight_l);
+ add[i] = _mm_add_ps(mul_l, mul_h);
+ }
+ // add[0] = [7+3 6+2 5+1 4+0]
+ // add[1] = [15+11 14+10 13+9 12+8]
+ // add[2] = [23+19 22+18 21+17 20+16]
+ // add[3] = [31+27 30+26 29+25 28+24]
+
+ const __m128 hadd_h = _mm_hadd_ps(add[2], add[3]);
+ // [31+30+27+26 29+28+25+24 23+22+19+18 21+20+17+16]
+ const __m128 hadd_l = _mm_hadd_ps(add[0], add[1]);
+ // [15+14+11+10 13+12+9+8 7+6+3+2 5+4+1+0]
+
+ const __m128 haddhadd = _mm_hadd_ps(hadd_l, hadd_h);
+ // [31+30+29+28+27+26+25+24 23+22+21+20+19+18+17+16
+ // 15+14+13+12+11+10+9+8 7+6+5+4+3+2+1+0]
+
+ *outputs = _mm_add_ps(*outputs, haddhadd);
+}
+
+static void nn_activate8(__m128 *out_h, __m128 *out_l) {
+ const __m128 zero = _mm_setzero_ps();
+ *out_h = _mm_max_ps(*out_h, zero);
+ *out_l = _mm_max_ps(*out_l, zero);
+}
+
+static void nn_activate4(__m128 *x) { *x = _mm_max_ps(*x, _mm_setzero_ps()); }
+
+// Calculate prediction based on the given input features and neural net config.
+// Assume there are no more than NN_MAX_NODES_PER_LAYER nodes in each hidden
+// layer.
+void av1_nn_predict_sse3(const float *input_nodes,
+ const NN_CONFIG *const nn_config, int reduce_prec,
+ float *const output) {
+ float buf[2][NN_MAX_NODES_PER_LAYER];
+ int buf_index = 0;
+ int num_inputs = nn_config->num_inputs;
+
+ // Hidden layers, except the final iteration is the output layer.
+ for (int layer = 0; layer <= nn_config->num_hidden_layers; layer++) {
+ const float *layer_weights = nn_config->weights[layer];
+ const float *layer_bias = nn_config->bias[layer];
+ bool output_layer = (layer == nn_config->num_hidden_layers);
+ float *const output_nodes = output_layer ? output : &buf[buf_index][0];
+ const int num_outputs = output_layer ? nn_config->num_outputs
+ : nn_config->num_hidden_nodes[layer];
+
+ if (num_inputs % 4 == 0 && num_outputs % 8 == 0) {
+ for (int out = 0; out < num_outputs; out += 8) {
+ __m128 out_h = _mm_loadu_ps(&layer_bias[out + 4]);
+ __m128 out_l = _mm_loadu_ps(&layer_bias[out]);
+ for (int in = 0; in < num_inputs; in += 4) {
+ av1_nn_propagate_4to8_sse3(&input_nodes[in],
+ &layer_weights[out * num_inputs + in],
+ &out_h, &out_l, num_inputs);
+ }
+ if (!output_layer) nn_activate8(&out_h, &out_l);
+ _mm_storeu_ps(&output_nodes[out + 4], out_h);
+ _mm_storeu_ps(&output_nodes[out], out_l);
+ }
+ } else if (num_inputs % 8 == 0 && num_outputs % 4 == 0) {
+ for (int out = 0; out < num_outputs; out += 4) {
+ __m128 outputs = _mm_loadu_ps(&layer_bias[out]);
+ for (int in = 0; in < num_inputs; in += 8) {
+ nn_propagate_8to4(&input_nodes[in],
+ &layer_weights[out * num_inputs + in], &outputs,
+ num_inputs);
+ }
+ if (!output_layer) nn_activate4(&outputs);
+ _mm_storeu_ps(&output_nodes[out], outputs);
+ }
+ } else if (num_inputs % 4 == 0 && num_outputs % 4 == 0) {
+ for (int out = 0; out < num_outputs; out += 4) {
+ __m128 outputs = _mm_loadu_ps(&layer_bias[out]);
+ for (int in = 0; in < num_inputs; in += 4) {
+ av1_nn_propagate_4to4_sse3(&input_nodes[in],
+ &layer_weights[out * num_inputs + in],
+ &outputs, num_inputs);
+ }
+ if (!output_layer) nn_activate4(&outputs);
+ _mm_storeu_ps(&output_nodes[out], outputs);
+ }
+ } else if (num_inputs % 8 == 0) {
+ for (int out = 0; out < num_outputs; out++) {
+ __m128 total = _mm_load1_ps(&layer_bias[out]);
+ for (int in = 0; in < num_inputs; in += 8) {
+ nn_propagate_8to1(&input_nodes[in],
+ &layer_weights[out * num_inputs + in], &total);
+ }
+ if (!output_layer) nn_activate4(&total);
+ output_nodes[out] = _mm_cvtss_f32(total);
+ }
+ } else if (num_inputs % 4 == 0) {
+ for (int out = 0; out < num_outputs; out++) {
+ __m128 total = _mm_load1_ps(&layer_bias[out]);
+ for (int in = 0; in < num_inputs; in += 4) {
+ av1_nn_propagate_4to1_sse3(
+ &input_nodes[in], &layer_weights[out * num_inputs + in], &total);
+ }
+ if (!output_layer) nn_activate4(&total);
+ output_nodes[out] = _mm_cvtss_f32(total);
+ }
+ } else {
+ // Use SSE instructions for scalar operations to avoid the latency of
+ // swapping between SIMD and FPU modes.
+ for (int out = 0; out < num_outputs; out++) {
+ __m128 total = _mm_load1_ps(&layer_bias[out]);
+ for (int in_node = 0; in_node < num_inputs; in_node++) {
+ __m128 input = _mm_load1_ps(&input_nodes[in_node]);
+ __m128 weight =
+ _mm_load1_ps(&layer_weights[num_inputs * out + in_node]);
+ total = _mm_add_ps(total, _mm_mul_ps(input, weight));
+ }
+ if (!output_layer) nn_activate4(&total);
+ output_nodes[out] = _mm_cvtss_f32(total);
+ }
+ }
+ input_nodes = output_nodes;
+ num_inputs = num_outputs;
+ buf_index = 1 - buf_index;
+ }
+ if (reduce_prec) av1_nn_output_prec_reduce(output, nn_config->num_outputs);
+}
+
+// Based on N. N. Schraudolph. A Fast, Compact Approximation of the Exponential
+// Function. Neural Computation, 11(4):853–862, 1999.
+static AOM_INLINE __m128 approx_exp(__m128 y) {
+#define A ((1 << 23) / 0.69314718056f) // (1 << 23) / ln(2)
+#define B \
+ 127 // Offset for the exponent according to IEEE floating point standard.
+#define C 60801 // Magic number controls the accuracy of approximation
+ const __m128 multiplier = _mm_set1_ps(A);
+ const __m128i offset = _mm_set1_epi32(B * (1 << 23) - C);
+
+ y = _mm_mul_ps(y, multiplier);
+ y = _mm_castsi128_ps(_mm_add_epi32(_mm_cvtps_epi32(y), offset));
+ return y;
+#undef A
+#undef B
+#undef C
+}
+
+static AOM_INLINE __m128 reduce_max(__m128 reg) {
+ __m128 tmp_reg;
+
+ tmp_reg = _mm_shuffle_ps(reg, reg, 0x4e); // 01 00 11 10
+ reg = _mm_max_ps(reg, tmp_reg);
+
+ tmp_reg = _mm_shuffle_ps(reg, reg, 0xb1); // 10 11 00 01
+ reg = _mm_max_ps(reg, tmp_reg);
+
+ return reg;
+}
+
+static AOM_INLINE __m128 reduce_sum(__m128 reg) {
+ __m128 tmp_reg;
+
+ tmp_reg = _mm_shuffle_ps(reg, reg, 0x4e); // 01 00 11 10
+ reg = _mm_add_ps(reg, tmp_reg);
+
+ tmp_reg = _mm_shuffle_ps(reg, reg, 0xb1); // 10 11 00 01
+ reg = _mm_add_ps(reg, tmp_reg);
+
+ return reg;
+}
+
+void av1_nn_fast_softmax_16_sse3(const float *input, float *output) {
+ // Clips at -10 to avoid underflowing
+ const __m128 clipper = _mm_set1_ps(-10.0f);
+
+ // Load in 16 values
+ __m128 in_0 = _mm_loadu_ps(&input[0]);
+ __m128 in_1 = _mm_loadu_ps(&input[4]);
+ __m128 in_2 = _mm_loadu_ps(&input[8]);
+ __m128 in_3 = _mm_loadu_ps(&input[12]);
+
+ // Get the max
+ __m128 max_0 = _mm_max_ps(in_0, in_1);
+ __m128 max_1 = _mm_max_ps(in_2, in_3);
+
+ max_0 = _mm_max_ps(max_0, max_1);
+ max_0 = reduce_max(max_0);
+
+ // Subtract the max off and clip
+ in_0 = _mm_sub_ps(in_0, max_0);
+ in_1 = _mm_sub_ps(in_1, max_0);
+ in_2 = _mm_sub_ps(in_2, max_0);
+ in_3 = _mm_sub_ps(in_3, max_0);
+
+ in_0 = _mm_max_ps(in_0, clipper);
+ in_1 = _mm_max_ps(in_1, clipper);
+ in_2 = _mm_max_ps(in_2, clipper);
+ in_3 = _mm_max_ps(in_3, clipper);
+
+ // Exponentiate and compute the denominator
+ __m128 sum = in_0 = approx_exp(in_0);
+ in_1 = approx_exp(in_1);
+ sum = _mm_add_ps(sum, in_1);
+ in_2 = approx_exp(in_2);
+ sum = _mm_add_ps(sum, in_2);
+ in_3 = approx_exp(in_3);
+ sum = _mm_add_ps(sum, in_3);
+ sum = reduce_sum(sum);
+
+ // Divide to get the probability
+ in_0 = _mm_div_ps(in_0, sum);
+ in_1 = _mm_div_ps(in_1, sum);
+ in_2 = _mm_div_ps(in_2, sum);
+ in_3 = _mm_div_ps(in_3, sum);
+
+ _mm_storeu_ps(&output[0], in_0);
+ _mm_storeu_ps(&output[4], in_1);
+ _mm_storeu_ps(&output[8], in_2);
+ _mm_storeu_ps(&output[12], in_3);
+}
diff --git a/third_party/aom/av1/encoder/x86/ml_sse3.h b/third_party/aom/av1/encoder/x86/ml_sse3.h
new file mode 100644
index 0000000000..f41a2474af
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/ml_sse3.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AV1_ENCODER_X86_ML_SSE3_H_
+#define AOM_AV1_ENCODER_X86_ML_SSE3_H_
+
+#include <pmmintrin.h>
+
+void av1_nn_propagate_4to1_sse3(const float *const inputs,
+ const float *const weights,
+ __m128 *const output);
+
+void av1_nn_propagate_4to4_sse3(const float *const inputs,
+ const float *const weights,
+ __m128 *const outputs, const int num_inputs);
+
+void av1_nn_propagate_4to8_sse3(const float *const inputs,
+ const float *const weights, __m128 *const out_h,
+ __m128 *const out_l, const int num_inputs);
+
+#endif // AOM_AV1_ENCODER_X86_ML_SSE3_H_
diff --git a/third_party/aom/av1/encoder/x86/pickrst_avx2.c b/third_party/aom/av1/encoder/x86/pickrst_avx2.c
new file mode 100644
index 0000000000..6658ed39a8
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/pickrst_avx2.c
@@ -0,0 +1,2348 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <immintrin.h> // AVX2
+#include "aom_dsp/x86/mem_sse2.h"
+#include "aom_dsp/x86/synonyms.h"
+#include "aom_dsp/x86/synonyms_avx2.h"
+#include "aom_dsp/x86/transpose_sse2.h"
+
+#include "config/av1_rtcd.h"
+#include "av1/common/restoration.h"
+#include "av1/encoder/pickrst.h"
+
+#if CONFIG_AV1_HIGHBITDEPTH
+static INLINE void acc_stat_highbd_avx2(int64_t *dst, const uint16_t *dgd,
+ const __m256i *shuffle,
+ const __m256i *dgd_ijkl) {
+ // Load two 128-bit chunks from dgd
+ const __m256i s0 = _mm256_inserti128_si256(
+ _mm256_castsi128_si256(_mm_loadu_si128((__m128i *)dgd)),
+ _mm_loadu_si128((__m128i *)(dgd + 4)), 1);
+ // s0 = [11 10 9 8 7 6 5 4] [7 6 5 4 3 2 1 0] as u16 (values are dgd indices)
+ // The weird order is so the shuffle stays within 128-bit lanes
+
+ // Shuffle 16x u16 values within lanes according to the mask:
+ // [0 1 1 2 2 3 3 4] [0 1 1 2 2 3 3 4]
+ // (Actually we shuffle u8 values as there's no 16-bit shuffle)
+ const __m256i s1 = _mm256_shuffle_epi8(s0, *shuffle);
+ // s1 = [8 7 7 6 6 5 5 4] [4 3 3 2 2 1 1 0] as u16 (values are dgd indices)
+
+ // Multiply 16x 16-bit integers in dgd_ijkl and s1, resulting in 16x 32-bit
+ // integers then horizontally add pairs of these integers resulting in 8x
+ // 32-bit integers
+ const __m256i d0 = _mm256_madd_epi16(*dgd_ijkl, s1);
+ // d0 = [a b c d] [e f g h] as u32
+
+ // Take the lower-half of d0, extend to u64, add it on to dst (H)
+ const __m256i d0l = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(d0, 0));
+ // d0l = [a b] [c d] as u64
+ const __m256i dst0 = yy_load_256(dst);
+ yy_store_256(dst, _mm256_add_epi64(d0l, dst0));
+
+ // Take the upper-half of d0, extend to u64, add it on to dst (H)
+ const __m256i d0h = _mm256_cvtepu32_epi64(_mm256_extracti128_si256(d0, 1));
+ // d0h = [e f] [g h] as u64
+ const __m256i dst1 = yy_load_256(dst + 4);
+ yy_store_256(dst + 4, _mm256_add_epi64(d0h, dst1));
+}
+
+static INLINE void acc_stat_highbd_win7_one_line_avx2(
+ const uint16_t *dgd, const uint16_t *src, int h_start, int h_end,
+ int dgd_stride, const __m256i *shuffle, int32_t *sumX,
+ int32_t sumY[WIENER_WIN][WIENER_WIN], int64_t M_int[WIENER_WIN][WIENER_WIN],
+ int64_t H_int[WIENER_WIN2][WIENER_WIN * 8]) {
+ int j, k, l;
+ const int wiener_win = WIENER_WIN;
+ // Main loop handles two pixels at a time
+ // We can assume that h_start is even, since it will always be aligned to
+ // a tile edge + some number of restoration units, and both of those will
+ // be 64-pixel aligned.
+ // However, at the edge of the image, h_end may be odd, so we need to handle
+ // that case correctly.
+ assert(h_start % 2 == 0);
+ const int h_end_even = h_end & ~1;
+ const int has_odd_pixel = h_end & 1;
+ for (j = h_start; j < h_end_even; j += 2) {
+ const uint16_t X1 = src[j];
+ const uint16_t X2 = src[j + 1];
+ *sumX += X1 + X2;
+ const uint16_t *dgd_ij = dgd + j;
+ for (k = 0; k < wiener_win; k++) {
+ const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int64_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint16_t D1 = dgd_ijk[l];
+ const uint16_t D2 = dgd_ijk[l + 1];
+ sumY[k][l] += D1 + D2;
+ M_int[k][l] += D1 * X1 + D2 * X2;
+
+ // Load two u16 values from dgd_ijkl combined as a u32,
+ // then broadcast to 8x u32 slots of a 256
+ const __m256i dgd_ijkl = _mm256_set1_epi32(loadu_int32(dgd_ijk + l));
+ // dgd_ijkl = [y x y x y x y x] [y x y x y x y x] where each is a u16
+
+ acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ }
+ }
+ }
+ // If the width is odd, add in the final pixel
+ if (has_odd_pixel) {
+ const uint16_t X1 = src[j];
+ *sumX += X1;
+ const uint16_t *dgd_ij = dgd + j;
+ for (k = 0; k < wiener_win; k++) {
+ const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int64_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint16_t D1 = dgd_ijk[l];
+ sumY[k][l] += D1;
+ M_int[k][l] += D1 * X1;
+
+ // The `acc_stat_highbd_avx2` function wants its input to have
+ // interleaved copies of two pixels, but we only have one. However, the
+ // pixels are (effectively) used as inputs to a multiply-accumulate. So
+ // if we set the extra pixel slot to 0, then it is effectively ignored.
+ const __m256i dgd_ijkl = _mm256_set1_epi32((int)D1);
+
+ acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ }
+ }
+ }
+}
+
+static INLINE void compute_stats_highbd_win7_opt_avx2(
+ const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end,
+ int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M,
+ int64_t *H, aom_bit_depth_t bit_depth) {
+ int i, j, k, l, m, n;
+ const int wiener_win = WIENER_WIN;
+ const int pixel_count = (h_end - h_start) * (v_end - v_start);
+ const int wiener_win2 = wiener_win * wiener_win;
+ const int wiener_halfwin = (wiener_win >> 1);
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
+ const uint16_t avg =
+ find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+
+ int64_t M_int[WIENER_WIN][WIENER_WIN] = { { 0 } };
+ DECLARE_ALIGNED(32, int64_t, H_int[WIENER_WIN2][WIENER_WIN * 8]) = { { 0 } };
+ int32_t sumY[WIENER_WIN][WIENER_WIN] = { { 0 } };
+ int32_t sumX = 0;
+ const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
+
+ const __m256i shuffle = yy_loadu_256(g_shuffle_stats_highbd_data);
+ for (j = v_start; j < v_end; j += 64) {
+ const int vert_end = AOMMIN(64, v_end - j) + j;
+ for (i = j; i < vert_end; i++) {
+ acc_stat_highbd_win7_one_line_avx2(
+ dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
+ dgd_stride, &shuffle, &sumX, sumY, M_int, H_int);
+ }
+ }
+
+ uint8_t bit_depth_divider = 1;
+ if (bit_depth == AOM_BITS_12)
+ bit_depth_divider = 16;
+ else if (bit_depth == AOM_BITS_10)
+ bit_depth_divider = 4;
+
+ const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
+ for (k = 0; k < wiener_win; k++) {
+ for (l = 0; l < wiener_win; l++) {
+ const int32_t idx0 = l * wiener_win + k;
+ M[idx0] = (M_int[k][l] +
+ (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) /
+ bit_depth_divider;
+ int64_t *H_ = H + idx0 * wiener_win2;
+ int64_t *H_int_ = &H_int[idx0][0];
+ for (m = 0; m < wiener_win; m++) {
+ for (n = 0; n < wiener_win; n++) {
+ H_[m * wiener_win + n] =
+ (H_int_[n * 8 + m] +
+ (avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) /
+ bit_depth_divider;
+ }
+ }
+ }
+ }
+}
+
+static INLINE void acc_stat_highbd_win5_one_line_avx2(
+ const uint16_t *dgd, const uint16_t *src, int h_start, int h_end,
+ int dgd_stride, const __m256i *shuffle, int32_t *sumX,
+ int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
+ int64_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
+ int64_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) {
+ int j, k, l;
+ const int wiener_win = WIENER_WIN_CHROMA;
+ // Main loop handles two pixels at a time
+ // We can assume that h_start is even, since it will always be aligned to
+ // a tile edge + some number of restoration units, and both of those will
+ // be 64-pixel aligned.
+ // However, at the edge of the image, h_end may be odd, so we need to handle
+ // that case correctly.
+ assert(h_start % 2 == 0);
+ const int h_end_even = h_end & ~1;
+ const int has_odd_pixel = h_end & 1;
+ for (j = h_start; j < h_end_even; j += 2) {
+ const uint16_t X1 = src[j];
+ const uint16_t X2 = src[j + 1];
+ *sumX += X1 + X2;
+ const uint16_t *dgd_ij = dgd + j;
+ for (k = 0; k < wiener_win; k++) {
+ const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int64_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint16_t D1 = dgd_ijk[l];
+ const uint16_t D2 = dgd_ijk[l + 1];
+ sumY[k][l] += D1 + D2;
+ M_int[k][l] += D1 * X1 + D2 * X2;
+
+ // Load two u16 values from dgd_ijkl combined as a u32,
+ // then broadcast to 8x u32 slots of a 256
+ const __m256i dgd_ijkl = _mm256_set1_epi32(loadu_int32(dgd_ijk + l));
+ // dgd_ijkl = [x y x y x y x y] [x y x y x y x y] where each is a u16
+
+ acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ }
+ }
+ }
+ // If the width is odd, add in the final pixel
+ if (has_odd_pixel) {
+ const uint16_t X1 = src[j];
+ *sumX += X1;
+ const uint16_t *dgd_ij = dgd + j;
+ for (k = 0; k < wiener_win; k++) {
+ const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int64_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint16_t D1 = dgd_ijk[l];
+ sumY[k][l] += D1;
+ M_int[k][l] += D1 * X1;
+
+ // The `acc_stat_highbd_avx2` function wants its input to have
+ // interleaved copies of two pixels, but we only have one. However, the
+ // pixels are (effectively) used as inputs to a multiply-accumulate. So
+ // if we set the extra pixel slot to 0, then it is effectively ignored.
+ const __m256i dgd_ijkl = _mm256_set1_epi32((int)D1);
+
+ acc_stat_highbd_avx2(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_avx2(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ }
+ }
+ }
+}
+
+static INLINE void compute_stats_highbd_win5_opt_avx2(
+ const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end,
+ int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M,
+ int64_t *H, aom_bit_depth_t bit_depth) {
+ int i, j, k, l, m, n;
+ const int wiener_win = WIENER_WIN_CHROMA;
+ const int pixel_count = (h_end - h_start) * (v_end - v_start);
+ const int wiener_win2 = wiener_win * wiener_win;
+ const int wiener_halfwin = (wiener_win >> 1);
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
+ const uint16_t avg =
+ find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+
+ int64_t M_int64[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+ DECLARE_ALIGNED(
+ 32, int64_t,
+ H_int64[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) = { { 0 } };
+ int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+ int32_t sumX = 0;
+ const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
+
+ const __m256i shuffle = yy_loadu_256(g_shuffle_stats_highbd_data);
+ for (j = v_start; j < v_end; j += 64) {
+ const int vert_end = AOMMIN(64, v_end - j) + j;
+ for (i = j; i < vert_end; i++) {
+ acc_stat_highbd_win5_one_line_avx2(
+ dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
+ dgd_stride, &shuffle, &sumX, sumY, M_int64, H_int64);
+ }
+ }
+
+ uint8_t bit_depth_divider = 1;
+ if (bit_depth == AOM_BITS_12)
+ bit_depth_divider = 16;
+ else if (bit_depth == AOM_BITS_10)
+ bit_depth_divider = 4;
+
+ const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
+ for (k = 0; k < wiener_win; k++) {
+ for (l = 0; l < wiener_win; l++) {
+ const int32_t idx0 = l * wiener_win + k;
+ M[idx0] = (M_int64[k][l] +
+ (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) /
+ bit_depth_divider;
+ int64_t *H_ = H + idx0 * wiener_win2;
+ int64_t *H_int_ = &H_int64[idx0][0];
+ for (m = 0; m < wiener_win; m++) {
+ for (n = 0; n < wiener_win; n++) {
+ H_[m * wiener_win + n] =
+ (H_int_[n * 8 + m] +
+ (avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) /
+ bit_depth_divider;
+ }
+ }
+ }
+ }
+}
+
+void av1_compute_stats_highbd_avx2(int wiener_win, const uint8_t *dgd8,
+ const uint8_t *src8, int h_start, int h_end,
+ int v_start, int v_end, int dgd_stride,
+ int src_stride, int64_t *M, int64_t *H,
+ aom_bit_depth_t bit_depth) {
+ if (wiener_win == WIENER_WIN) {
+ compute_stats_highbd_win7_opt_avx2(dgd8, src8, h_start, h_end, v_start,
+ v_end, dgd_stride, src_stride, M, H,
+ bit_depth);
+ } else if (wiener_win == WIENER_WIN_CHROMA) {
+ compute_stats_highbd_win5_opt_avx2(dgd8, src8, h_start, h_end, v_start,
+ v_end, dgd_stride, src_stride, M, H,
+ bit_depth);
+ } else {
+ av1_compute_stats_highbd_c(wiener_win, dgd8, src8, h_start, h_end, v_start,
+ v_end, dgd_stride, src_stride, M, H, bit_depth);
+ }
+}
+#endif // CONFIG_AV1_HIGHBITDEPTH
+
+static INLINE void madd_and_accum_avx2(__m256i src, __m256i dgd, __m256i *sum) {
+ *sum = _mm256_add_epi32(*sum, _mm256_madd_epi16(src, dgd));
+}
+
+static INLINE __m256i convert_and_add_avx2(__m256i src) {
+ const __m256i s0 = _mm256_cvtepi32_epi64(_mm256_castsi256_si128(src));
+ const __m256i s1 = _mm256_cvtepi32_epi64(_mm256_extracti128_si256(src, 1));
+ return _mm256_add_epi64(s0, s1);
+}
+
+static INLINE __m256i hadd_four_32_to_64_avx2(__m256i src0, __m256i src1,
+ __m256i *src2, __m256i *src3) {
+ // 00 01 10 11 02 03 12 13
+ const __m256i s_0 = _mm256_hadd_epi32(src0, src1);
+ // 20 21 30 31 22 23 32 33
+ const __m256i s_1 = _mm256_hadd_epi32(*src2, *src3);
+ // 00+01 10+11 20+21 30+31 02+03 12+13 22+23 32+33
+ const __m256i s_2 = _mm256_hadd_epi32(s_0, s_1);
+ return convert_and_add_avx2(s_2);
+}
+
+static INLINE __m128i add_64bit_lvl_avx2(__m256i src0, __m256i src1) {
+ // 00 10 02 12
+ const __m256i t0 = _mm256_unpacklo_epi64(src0, src1);
+ // 01 11 03 13
+ const __m256i t1 = _mm256_unpackhi_epi64(src0, src1);
+ // 00+01 10+11 02+03 12+13
+ const __m256i sum = _mm256_add_epi64(t0, t1);
+ // 00+01 10+11
+ const __m128i sum0 = _mm256_castsi256_si128(sum);
+ // 02+03 12+13
+ const __m128i sum1 = _mm256_extracti128_si256(sum, 1);
+ // 00+01+02+03 10+11+12+13
+ return _mm_add_epi64(sum0, sum1);
+}
+
+static INLINE __m128i convert_32_to_64_add_avx2(__m256i src0, __m256i src1) {
+ // 00 01 02 03
+ const __m256i s0 = convert_and_add_avx2(src0);
+ // 10 11 12 13
+ const __m256i s1 = convert_and_add_avx2(src1);
+ return add_64bit_lvl_avx2(s0, s1);
+}
+
+static INLINE int32_t calc_sum_of_register(__m256i src) {
+ const __m128i src_l = _mm256_castsi256_si128(src);
+ const __m128i src_h = _mm256_extracti128_si256(src, 1);
+ const __m128i sum = _mm_add_epi32(src_l, src_h);
+ const __m128i dst0 = _mm_add_epi32(sum, _mm_srli_si128(sum, 8));
+ const __m128i dst1 = _mm_add_epi32(dst0, _mm_srli_si128(dst0, 4));
+ return _mm_cvtsi128_si32(dst1);
+}
+
+static INLINE void transpose_64bit_4x4_avx2(const __m256i *const src,
+ __m256i *const dst) {
+ // Unpack 64 bit elements. Goes from:
+ // src[0]: 00 01 02 03
+ // src[1]: 10 11 12 13
+ // src[2]: 20 21 22 23
+ // src[3]: 30 31 32 33
+ // to:
+ // reg0: 00 10 02 12
+ // reg1: 20 30 22 32
+ // reg2: 01 11 03 13
+ // reg3: 21 31 23 33
+ const __m256i reg0 = _mm256_unpacklo_epi64(src[0], src[1]);
+ const __m256i reg1 = _mm256_unpacklo_epi64(src[2], src[3]);
+ const __m256i reg2 = _mm256_unpackhi_epi64(src[0], src[1]);
+ const __m256i reg3 = _mm256_unpackhi_epi64(src[2], src[3]);
+
+ // Unpack 64 bit elements resulting in:
+ // dst[0]: 00 10 20 30
+ // dst[1]: 01 11 21 31
+ // dst[2]: 02 12 22 32
+ // dst[3]: 03 13 23 33
+ dst[0] = _mm256_inserti128_si256(reg0, _mm256_castsi256_si128(reg1), 1);
+ dst[1] = _mm256_inserti128_si256(reg2, _mm256_castsi256_si128(reg3), 1);
+ dst[2] = _mm256_inserti128_si256(reg1, _mm256_extracti128_si256(reg0, 1), 0);
+ dst[3] = _mm256_inserti128_si256(reg3, _mm256_extracti128_si256(reg2, 1), 0);
+}
+
+// When we load 32 values of int8_t type and need less than 32 values for
+// processing, the below mask is used to make the extra values zero.
+static const int8_t mask_8bit[32] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 16 bytes
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16 bytes
+};
+
+// When we load 16 values of int16_t type and need less than 16 values for
+// processing, the below mask is used to make the extra values zero.
+static const int16_t mask_16bit[32] = {
+ -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, // 16 bytes
+ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, // 16 bytes
+};
+
+static INLINE uint8_t calc_dgd_buf_avg_avx2(const uint8_t *src, int32_t h_start,
+ int32_t h_end, int32_t v_start,
+ int32_t v_end, int32_t stride) {
+ const uint8_t *src_temp = src + v_start * stride + h_start;
+ const __m256i zero = _mm256_setzero_si256();
+ const int32_t width = h_end - h_start;
+ const int32_t height = v_end - v_start;
+ const int32_t wd_beyond_mul32 = width & 31;
+ const int32_t wd_mul32 = width - wd_beyond_mul32;
+ __m128i mask_low, mask_high;
+ __m256i ss = zero;
+
+ // When width is not multiple of 32, it still loads 32 and to make the data
+ // which is extra (beyond required) as zero using the below mask.
+ if (wd_beyond_mul32 >= 16) {
+ mask_low = _mm_set1_epi8(-1);
+ mask_high = _mm_loadu_si128((__m128i *)(&mask_8bit[32 - wd_beyond_mul32]));
+ } else {
+ mask_low = _mm_loadu_si128((__m128i *)(&mask_8bit[16 - wd_beyond_mul32]));
+ mask_high = _mm_setzero_si128();
+ }
+ const __m256i mask =
+ _mm256_inserti128_si256(_mm256_castsi128_si256(mask_low), mask_high, 1);
+
+ int32_t proc_ht = 0;
+ do {
+ // Process width in multiple of 32.
+ int32_t proc_wd = 0;
+ while (proc_wd < wd_mul32) {
+ const __m256i s_0 = _mm256_loadu_si256((__m256i *)(src_temp + proc_wd));
+ const __m256i sad_0 = _mm256_sad_epu8(s_0, zero);
+ ss = _mm256_add_epi32(ss, sad_0);
+ proc_wd += 32;
+ }
+
+ // Process the remaining width.
+ if (wd_beyond_mul32) {
+ const __m256i s_0 = _mm256_loadu_si256((__m256i *)(src_temp + proc_wd));
+ const __m256i s_m_0 = _mm256_and_si256(s_0, mask);
+ const __m256i sad_0 = _mm256_sad_epu8(s_m_0, zero);
+ ss = _mm256_add_epi32(ss, sad_0);
+ }
+ src_temp += stride;
+ proc_ht++;
+ } while (proc_ht < height);
+
+ const uint32_t sum = calc_sum_of_register(ss);
+ const uint8_t avg = sum / (width * height);
+ return avg;
+}
+
+// Fill (src-avg) or (dgd-avg) buffers. Note that when n = (width % 16) is not
+// 0, it writes (16 - n) more data than required.
+static INLINE void sub_avg_block_avx2(const uint8_t *src, int32_t src_stride,
+ uint8_t avg, int32_t width,
+ int32_t height, int16_t *dst,
+ int32_t dst_stride,
+ int use_downsampled_wiener_stats) {
+ const __m256i avg_reg = _mm256_set1_epi16(avg);
+
+ int32_t proc_ht = 0;
+ do {
+ int ds_factor =
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
+ if (use_downsampled_wiener_stats &&
+ (height - proc_ht < WIENER_STATS_DOWNSAMPLE_FACTOR)) {
+ ds_factor = height - proc_ht;
+ }
+
+ int32_t proc_wd = 0;
+ while (proc_wd < width) {
+ const __m128i s = _mm_loadu_si128((__m128i *)(src + proc_wd));
+ const __m256i ss = _mm256_cvtepu8_epi16(s);
+ const __m256i d = _mm256_sub_epi16(ss, avg_reg);
+ _mm256_storeu_si256((__m256i *)(dst + proc_wd), d);
+ proc_wd += 16;
+ }
+
+ src += ds_factor * src_stride;
+ dst += ds_factor * dst_stride;
+ proc_ht += ds_factor;
+ } while (proc_ht < height);
+}
+
+// Fills lower-triangular elements of H buffer from upper triangular elements of
+// the same
+static INLINE void fill_lower_triag_elements_avx2(const int32_t wiener_win2,
+ int64_t *const H) {
+ for (int32_t i = 0; i < wiener_win2 - 1; i += 4) {
+ __m256i in[4], out[4];
+
+ in[0] = _mm256_loadu_si256((__m256i *)(H + (i + 0) * wiener_win2 + i + 1));
+ in[1] = _mm256_loadu_si256((__m256i *)(H + (i + 1) * wiener_win2 + i + 1));
+ in[2] = _mm256_loadu_si256((__m256i *)(H + (i + 2) * wiener_win2 + i + 1));
+ in[3] = _mm256_loadu_si256((__m256i *)(H + (i + 3) * wiener_win2 + i + 1));
+
+ transpose_64bit_4x4_avx2(in, out);
+
+ _mm_storel_epi64((__m128i *)(H + (i + 1) * wiener_win2 + i),
+ _mm256_castsi256_si128(out[0]));
+ _mm_storeu_si128((__m128i *)(H + (i + 2) * wiener_win2 + i),
+ _mm256_castsi256_si128(out[1]));
+ _mm256_storeu_si256((__m256i *)(H + (i + 3) * wiener_win2 + i), out[2]);
+ _mm256_storeu_si256((__m256i *)(H + (i + 4) * wiener_win2 + i), out[3]);
+
+ for (int32_t j = i + 5; j < wiener_win2; j += 4) {
+ in[0] = _mm256_loadu_si256((__m256i *)(H + (i + 0) * wiener_win2 + j));
+ in[1] = _mm256_loadu_si256((__m256i *)(H + (i + 1) * wiener_win2 + j));
+ in[2] = _mm256_loadu_si256((__m256i *)(H + (i + 2) * wiener_win2 + j));
+ in[3] = _mm256_loadu_si256((__m256i *)(H + (i + 3) * wiener_win2 + j));
+
+ transpose_64bit_4x4_avx2(in, out);
+
+ _mm256_storeu_si256((__m256i *)(H + (j + 0) * wiener_win2 + i), out[0]);
+ _mm256_storeu_si256((__m256i *)(H + (j + 1) * wiener_win2 + i), out[1]);
+ _mm256_storeu_si256((__m256i *)(H + (j + 2) * wiener_win2 + i), out[2]);
+ _mm256_storeu_si256((__m256i *)(H + (j + 3) * wiener_win2 + i), out[3]);
+ }
+ }
+}
+
+// Fill H buffer based on loop_count.
+#define INIT_H_VALUES(d, loop_count) \
+ for (int g = 0; g < (loop_count); g++) { \
+ const __m256i dgd0 = \
+ _mm256_loadu_si256((__m256i *)((d) + (g * d_stride))); \
+ madd_and_accum_avx2(dgd_mul_df, dgd0, &sum_h[g]); \
+ }
+
+// Fill M & H buffer.
+#define INIT_MH_VALUES(d) \
+ for (int g = 0; g < wiener_win; g++) { \
+ const __m256i dgds_0 = \
+ _mm256_loadu_si256((__m256i *)((d) + (g * d_stride))); \
+ madd_and_accum_avx2(src_mul_df, dgds_0, &sum_m[g]); \
+ madd_and_accum_avx2(dgd_mul_df, dgds_0, &sum_h[g]); \
+ }
+
+// Update the dgd pointers appropriately.
+#define INITIALIZATION(wiener_window_sz) \
+ j = i / (wiener_window_sz); \
+ const int16_t *d_window = d + j; \
+ const int16_t *d_current_row = \
+ d + j + ((i % (wiener_window_sz)) * d_stride); \
+ int proc_ht = v_start; \
+ downsample_factor = \
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; \
+ __m256i sum_h[wiener_window_sz]; \
+ memset(sum_h, 0, sizeof(sum_h));
+
+// Update the downsample factor appropriately.
+#define UPDATE_DOWNSAMPLE_FACTOR \
+ int proc_wd = 0; \
+ if (use_downsampled_wiener_stats && \
+ ((v_end - proc_ht) < WIENER_STATS_DOWNSAMPLE_FACTOR)) { \
+ downsample_factor = v_end - proc_ht; \
+ } \
+ const __m256i df_reg = _mm256_set1_epi16(downsample_factor);
+
+#define CALCULATE_REMAINING_H_WIN5 \
+ while (j < wiener_win) { \
+ d_window = d; \
+ d_current_row = d + (i / wiener_win) + ((i % wiener_win) * d_stride); \
+ const __m256i zero = _mm256_setzero_si256(); \
+ sum_h[0] = zero; \
+ sum_h[1] = zero; \
+ sum_h[2] = zero; \
+ sum_h[3] = zero; \
+ sum_h[4] = zero; \
+ \
+ proc_ht = v_start; \
+ downsample_factor = \
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; \
+ do { \
+ UPDATE_DOWNSAMPLE_FACTOR; \
+ \
+ /* Process the amount of width multiple of 16.*/ \
+ while (proc_wd < wd_mul16) { \
+ const __m256i dgd = \
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); \
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); \
+ INIT_H_VALUES(d_window + j + proc_wd, 5) \
+ \
+ proc_wd += 16; \
+ }; \
+ \
+ /* Process the remaining width here. */ \
+ if (wd_beyond_mul16) { \
+ const __m256i dgd = \
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); \
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask); \
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); \
+ INIT_H_VALUES(d_window + j + proc_wd, 5) \
+ } \
+ proc_ht += downsample_factor; \
+ d_window += downsample_factor * d_stride; \
+ d_current_row += downsample_factor * d_stride; \
+ } while (proc_ht < v_end); \
+ const __m256i s_h0 = \
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); \
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + (wiener_win * j)), \
+ s_h0); \
+ const __m256i s_m_h = convert_and_add_avx2(sum_h[4]); \
+ const __m128i s_m_h0 = add_64bit_lvl_avx2(s_m_h, s_m_h); \
+ _mm_storel_epi64( \
+ (__m128i *)(H + (i * wiener_win2) + (wiener_win * j) + 4), s_m_h0); \
+ j++; \
+ }
+
+#define CALCULATE_REMAINING_H_WIN7 \
+ while (j < wiener_win) { \
+ d_window = d; \
+ d_current_row = d + (i / wiener_win) + ((i % wiener_win) * d_stride); \
+ const __m256i zero = _mm256_setzero_si256(); \
+ sum_h[0] = zero; \
+ sum_h[1] = zero; \
+ sum_h[2] = zero; \
+ sum_h[3] = zero; \
+ sum_h[4] = zero; \
+ sum_h[5] = zero; \
+ sum_h[6] = zero; \
+ \
+ proc_ht = v_start; \
+ downsample_factor = \
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1; \
+ do { \
+ UPDATE_DOWNSAMPLE_FACTOR; \
+ \
+ /* Process the amount of width multiple of 16.*/ \
+ while (proc_wd < wd_mul16) { \
+ const __m256i dgd = \
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); \
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg); \
+ INIT_H_VALUES(d_window + j + proc_wd, 7) \
+ \
+ proc_wd += 16; \
+ }; \
+ \
+ /* Process the remaining width here. */ \
+ if (wd_beyond_mul16) { \
+ const __m256i dgd = \
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd)); \
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask); \
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg); \
+ INIT_H_VALUES(d_window + j + proc_wd, 7) \
+ } \
+ proc_ht += downsample_factor; \
+ d_window += downsample_factor * d_stride; \
+ d_current_row += downsample_factor * d_stride; \
+ } while (proc_ht < v_end); \
+ const __m256i s_h1 = \
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]); \
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + (wiener_win * j)), \
+ s_h1); \
+ const __m256i s_h2 = \
+ hadd_four_32_to_64_avx2(sum_h[4], sum_h[5], &sum_h[6], &sum_h[6]); \
+ _mm256_storeu_si256( \
+ (__m256i *)(H + (i * wiener_win2) + (wiener_win * j) + 4), s_h2); \
+ j++; \
+ }
+
+// The buffers H(auto-covariance) and M(cross-correlation) are used to estimate
+// the filter tap values required for wiener filtering. Here, the buffer H is of
+// size ((wiener_window_size^2)*(wiener_window_size^2)) and M is of size
+// (wiener_window_size*wiener_window_size). H is a symmetric matrix where the
+// value above the diagonal (upper triangle) are equal to the values below the
+// diagonal (lower triangle). The calculation of elements/stats of H(upper
+// triangle) and M is done in steps as described below where each step fills
+// specific values of H and M.
+// Once the upper triangular elements of H matrix are derived, the same will be
+// copied to lower triangular using the function
+// fill_lower_triag_elements_avx2().
+// Example: Wiener window size =
+// WIENER_WIN_CHROMA (5) M buffer = [M0 M1 M2 ---- M23 M24] H buffer = Hxy
+// (x-row, y-column) [H00 H01 H02 ---- H023 H024] [H10 H11 H12 ---- H123 H124]
+// [H30 H31 H32 ---- H323 H324]
+// [H40 H41 H42 ---- H423 H424]
+// [H50 H51 H52 ---- H523 H524]
+// [H60 H61 H62 ---- H623 H624]
+// ||
+// ||
+// [H230 H231 H232 ---- H2323 H2324]
+// [H240 H241 H242 ---- H2423 H2424]
+// In Step 1, whole M buffers (i.e., M0 to M24) and the first row of H (i.e.,
+// H00 to H024) is filled. The remaining rows of H buffer are filled through
+// steps 2 to 6.
+static void compute_stats_win5_avx2(const int16_t *const d, int32_t d_stride,
+ const int16_t *const s, int32_t s_stride,
+ int32_t width, int v_start, int v_end,
+ int64_t *const M, int64_t *const H,
+ int use_downsampled_wiener_stats) {
+ const int32_t wiener_win = WIENER_WIN_CHROMA;
+ const int32_t wiener_win2 = wiener_win * wiener_win;
+ // Amount of width which is beyond multiple of 16. This case is handled
+ // appropriately to process only the required width towards the end.
+ const int32_t wd_mul16 = width & ~15;
+ const int32_t wd_beyond_mul16 = width - wd_mul16;
+ const __m256i mask =
+ _mm256_loadu_si256((__m256i *)(&mask_16bit[16 - wd_beyond_mul16]));
+ int downsample_factor;
+
+ // Step 1: Full M (i.e., M0 to M24) and first row H (i.e., H00 to H024)
+ // values are filled here. Here, the loop over 'j' is executed for values 0
+ // to 4 (wiener_win-1). When the loop executed for a specific 'j', 5 values of
+ // M and H are filled as shown below.
+ // j=0: M0-M4 and H00-H04, j=1: M5-M9 and H05-H09 are filled etc,.
+ int j = 0;
+ do {
+ const int16_t *s_t = s;
+ const int16_t *d_t = d;
+ __m256i sum_m[WIENER_WIN_CHROMA] = { _mm256_setzero_si256() };
+ __m256i sum_h[WIENER_WIN_CHROMA] = { _mm256_setzero_si256() };
+ downsample_factor =
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
+ int proc_ht = v_start;
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i src = _mm256_loadu_si256((__m256i *)(s_t + proc_wd));
+ const __m256i dgd = _mm256_loadu_si256((__m256i *)(d_t + proc_wd));
+ const __m256i src_mul_df = _mm256_mullo_epi16(src, df_reg);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_MH_VALUES(d_t + j + proc_wd)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i src = _mm256_loadu_si256((__m256i *)(s_t + proc_wd));
+ const __m256i dgd = _mm256_loadu_si256((__m256i *)(d_t + proc_wd));
+ const __m256i src_mask = _mm256_and_si256(src, mask);
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i src_mul_df = _mm256_mullo_epi16(src_mask, df_reg);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_MH_VALUES(d_t + j + proc_wd)
+ }
+ proc_ht += downsample_factor;
+ s_t += downsample_factor * s_stride;
+ d_t += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+
+ const __m256i s_m =
+ hadd_four_32_to_64_avx2(sum_m[0], sum_m[1], &sum_m[2], &sum_m[3]);
+ const __m128i s_m_h = convert_32_to_64_add_avx2(sum_m[4], sum_h[4]);
+ _mm256_storeu_si256((__m256i *)(M + wiener_win * j), s_m);
+ _mm_storel_epi64((__m128i *)&M[wiener_win * j + 4], s_m_h);
+
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + wiener_win * j), s_h);
+ _mm_storeh_epi64((__m128i *)&H[wiener_win * j + 4], s_m_h);
+ } while (++j < wiener_win);
+
+ // The below steps are designed to fill remaining rows of H buffer. Here, aim
+ // is to fill only upper triangle elements correspond to each row and lower
+ // triangle elements are copied from upper-triangle elements. Also, as
+ // mentioned in Step 1, the core function is designed to fill 5
+ // elements/stats/values of H buffer.
+ //
+ // Step 2: Here, the rows 1, 6, 11, 16 and 21 are filled. As we need to fill
+ // only upper-triangle elements, H10 from row1, H60-H64 and H65 from row6,etc,
+ // are need not be filled. As the core function process 5 values, in first
+ // iteration of 'j' only 4 values to be filled i.e., H11-H14 from row1,H66-H69
+ // from row6, etc.
+ for (int i = 1; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN_CHROMA)
+
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (1 * d_stride), 4)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (1 * d_stride), 4)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN5
+ }
+
+ // Step 3: Here, the rows 2, 7, 12, 17 and 22 are filled. As we need to fill
+ // only upper-triangle elements, H20-H21 from row2, H70-H74 and H75-H76 from
+ // row7, etc, are need not be filled. As the core function process 5 values,
+ // in first iteration of 'j' only 3 values to be filled i.e., H22-H24 from
+ // row2, H77-H79 from row7, etc.
+ for (int i = 2; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN_CHROMA)
+
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (2 * d_stride), 3)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (2 * d_stride), 3)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN5
+ }
+
+ // Step 4: Here, the rows 3, 8, 13, 18 and 23 are filled. As we need to fill
+ // only upper-triangle elements, H30-H32 from row3, H80-H84 and H85-H87 from
+ // row8, etc, are need not be filled. As the core function process 5 values,
+ // in first iteration of 'j' only 2 values to be filled i.e., H33-H34 from
+ // row3, H88-89 from row8, etc.
+ for (int i = 3; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN_CHROMA)
+
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (3 * d_stride), 2)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (3 * d_stride), 2)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m128i s_h = convert_32_to_64_add_avx2(sum_h[0], sum_h[1]);
+ _mm_storeu_si128((__m128i *)(H + (i * wiener_win2) + i), s_h);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN5
+ }
+
+ // Step 5: Here, the rows 4, 9, 14, 19 and 24 are filled. As we need to fill
+ // only upper-triangle elements, H40-H43 from row4, H90-H94 and H95-H98 from
+ // row9, etc, are need not be filled. As the core function process 5 values,
+ // in first iteration of 'j' only 1 values to be filled i.e., H44 from row4,
+ // H99 from row9, etc.
+ for (int i = 4; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN_CHROMA)
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (4 * d_stride), 1)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (4 * d_stride), 1)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m128i s_h = convert_32_to_64_add_avx2(sum_h[0], sum_h[1]);
+ _mm_storeu_si128((__m128i *)(H + (i * wiener_win2) + i), s_h);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN5
+ }
+
+ // Step 6: Here, the rows 5, 10, 15 and 20 are filled. As we need to fill only
+ // upper-triangle elements, H50-H54 from row5, H100-H104 and H105-H109 from
+ // row10,etc, are need not be filled. The first iteration of 'j' fills H55-H59
+ // from row5 and H1010-H1014 from row10, etc.
+ for (int i = 5; i < wiener_win2; i += wiener_win) {
+ // Derive j'th iteration from where the H buffer filling needs to be
+ // started.
+ j = i / wiener_win;
+ int shift = 0;
+ do {
+ // Update the dgd pointers appropriately.
+ int proc_ht = v_start;
+ const int16_t *d_window = d + (i / wiener_win);
+ const int16_t *d_current_row =
+ d + (i / wiener_win) + ((i % wiener_win) * d_stride);
+ downsample_factor =
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
+ __m256i sum_h[WIENER_WIN_CHROMA] = { _mm256_setzero_si256() };
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + shift + proc_wd, 5)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + shift + proc_wd, 5)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + (wiener_win * j)),
+ s_h);
+ const __m256i s_m_h = convert_and_add_avx2(sum_h[4]);
+ const __m128i s_m_h0 = add_64bit_lvl_avx2(s_m_h, s_m_h);
+ _mm_storel_epi64(
+ (__m128i *)(H + (i * wiener_win2) + (wiener_win * j) + 4), s_m_h0);
+ shift++;
+ } while (++j < wiener_win);
+ }
+
+ fill_lower_triag_elements_avx2(wiener_win2, H);
+}
+
+// The buffers H(auto-covariance) and M(cross-correlation) are used to estimate
+// the filter tap values required for wiener filtering. Here, the buffer H is of
+// size ((wiener_window_size^2)*(wiener_window_size^2)) and M is of size
+// (wiener_window_size*wiener_window_size). H is a symmetric matrix where the
+// value above the diagonal (upper triangle) are equal to the values below the
+// diagonal (lower triangle). The calculation of elements/stats of H(upper
+// triangle) and M is done in steps as described below where each step fills
+// specific values of H and M.
+// Example:
+// Wiener window size = WIENER_WIN (7)
+// M buffer = [M0 M1 M2 ---- M47 M48]
+// H buffer = Hxy (x-row, y-column)
+// [H00 H01 H02 ---- H047 H048]
+// [H10 H11 H12 ---- H147 H148]
+// [H30 H31 H32 ---- H347 H348]
+// [H40 H41 H42 ---- H447 H448]
+// [H50 H51 H52 ---- H547 H548]
+// [H60 H61 H62 ---- H647 H648]
+// ||
+// ||
+// [H470 H471 H472 ---- H4747 H4748]
+// [H480 H481 H482 ---- H4847 H4848]
+// In Step 1, whole M buffers (i.e., M0 to M48) and the first row of H (i.e.,
+// H00 to H048) is filled. The remaining rows of H buffer are filled through
+// steps 2 to 8.
+static void compute_stats_win7_avx2(const int16_t *const d, int32_t d_stride,
+ const int16_t *const s, int32_t s_stride,
+ int32_t width, int v_start, int v_end,
+ int64_t *const M, int64_t *const H,
+ int use_downsampled_wiener_stats) {
+ const int32_t wiener_win = WIENER_WIN;
+ const int32_t wiener_win2 = wiener_win * wiener_win;
+ // Amount of width which is beyond multiple of 16. This case is handled
+ // appropriately to process only the required width towards the end.
+ const int32_t wd_mul16 = width & ~15;
+ const int32_t wd_beyond_mul16 = width - wd_mul16;
+ const __m256i mask =
+ _mm256_loadu_si256((__m256i *)(&mask_16bit[16 - wd_beyond_mul16]));
+ int downsample_factor;
+
+ // Step 1: Full M (i.e., M0 to M48) and first row H (i.e., H00 to H048)
+ // values are filled here. Here, the loop over 'j' is executed for values 0
+ // to 6. When the loop executed for a specific 'j', 7 values of M and H are
+ // filled as shown below.
+ // j=0: M0-M6 and H00-H06, j=1: M7-M13 and H07-H013 are filled etc,.
+ int j = 0;
+ do {
+ const int16_t *s_t = s;
+ const int16_t *d_t = d;
+ __m256i sum_m[WIENER_WIN] = { _mm256_setzero_si256() };
+ __m256i sum_h[WIENER_WIN] = { _mm256_setzero_si256() };
+ downsample_factor =
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
+ int proc_ht = v_start;
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i src = _mm256_loadu_si256((__m256i *)(s_t + proc_wd));
+ const __m256i dgd = _mm256_loadu_si256((__m256i *)(d_t + proc_wd));
+ const __m256i src_mul_df = _mm256_mullo_epi16(src, df_reg);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_MH_VALUES(d_t + j + proc_wd)
+
+ proc_wd += 16;
+ }
+
+ if (wd_beyond_mul16) {
+ const __m256i src = _mm256_loadu_si256((__m256i *)(s_t + proc_wd));
+ const __m256i dgd = _mm256_loadu_si256((__m256i *)(d_t + proc_wd));
+ const __m256i src_mask = _mm256_and_si256(src, mask);
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i src_mul_df = _mm256_mullo_epi16(src_mask, df_reg);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_MH_VALUES(d_t + j + proc_wd)
+ }
+ proc_ht += downsample_factor;
+ s_t += downsample_factor * s_stride;
+ d_t += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+
+ const __m256i s_m0 =
+ hadd_four_32_to_64_avx2(sum_m[0], sum_m[1], &sum_m[2], &sum_m[3]);
+ const __m256i s_m1 =
+ hadd_four_32_to_64_avx2(sum_m[4], sum_m[5], &sum_m[6], &sum_m[6]);
+ _mm256_storeu_si256((__m256i *)(M + wiener_win * j + 0), s_m0);
+ _mm_storeu_si128((__m128i *)(M + wiener_win * j + 4),
+ _mm256_castsi256_si128(s_m1));
+ _mm_storel_epi64((__m128i *)&M[wiener_win * j + 6],
+ _mm256_extracti128_si256(s_m1, 1));
+
+ const __m256i sh_0 =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ const __m256i sh_1 =
+ hadd_four_32_to_64_avx2(sum_h[4], sum_h[5], &sum_h[6], &sum_h[6]);
+ _mm256_storeu_si256((__m256i *)(H + wiener_win * j + 0), sh_0);
+ _mm_storeu_si128((__m128i *)(H + wiener_win * j + 4),
+ _mm256_castsi256_si128(sh_1));
+ _mm_storel_epi64((__m128i *)&H[wiener_win * j + 6],
+ _mm256_extracti128_si256(sh_1, 1));
+ } while (++j < wiener_win);
+
+ // The below steps are designed to fill remaining rows of H buffer. Here, aim
+ // is to fill only upper triangle elements correspond to each row and lower
+ // triangle elements are copied from upper-triangle elements. Also, as
+ // mentioned in Step 1, the core function is designed to fill 7
+ // elements/stats/values of H buffer.
+ //
+ // Step 2: Here, the rows 1, 8, 15, 22, 29, 36 and 43 are filled. As we need
+ // to fill only upper-triangle elements, H10 from row1, H80-H86 and H87 from
+ // row8, etc. are need not be filled. As the core function process 7 values,
+ // in first iteration of 'j' only 6 values to be filled i.e., H11-H16 from
+ // row1 and H88-H813 from row8, etc.
+ for (int i = 1; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN)
+
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (1 * d_stride), 6)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (1 * d_stride), 6)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h);
+ const __m128i s_h0 = convert_32_to_64_add_avx2(sum_h[4], sum_h[5]);
+ _mm_storeu_si128((__m128i *)(H + (i * wiener_win2) + i + 4), s_h0);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN7
+ }
+
+ // Step 3: Here, the rows 2, 9, 16, 23, 30, 37 and 44 are filled. As we need
+ // to fill only upper-triangle elements, H20-H21 from row2, H90-H96 and
+ // H97-H98 from row9, etc. are need not be filled. As the core function
+ // process 7 values, in first iteration of 'j' only 5 values to be filled
+ // i.e., H22-H26 from row2 and H99-H913 from row9, etc.
+ for (int i = 2; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN)
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (2 * d_stride), 5)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (2 * d_stride), 5)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h);
+ const __m256i s_m_h = convert_and_add_avx2(sum_h[4]);
+ const __m128i s_m_h0 = add_64bit_lvl_avx2(s_m_h, s_m_h);
+ _mm_storel_epi64((__m128i *)(H + (i * wiener_win2) + i + 4), s_m_h0);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN7
+ }
+
+ // Step 4: Here, the rows 3, 10, 17, 24, 31, 38 and 45 are filled. As we need
+ // to fill only upper-triangle elements, H30-H32 from row3, H100-H106 and
+ // H107-H109 from row10, etc. are need not be filled. As the core function
+ // process 7 values, in first iteration of 'j' only 4 values to be filled
+ // i.e., H33-H36 from row3 and H1010-H1013 from row10, etc.
+ for (int i = 3; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN)
+
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (3 * d_stride), 4)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (3 * d_stride), 4)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN7
+ }
+
+ // Step 5: Here, the rows 4, 11, 18, 25, 32, 39 and 46 are filled. As we need
+ // to fill only upper-triangle elements, H40-H43 from row4, H110-H116 and
+ // H117-H1110 from row10, etc. are need not be filled. As the core function
+ // process 7 values, in first iteration of 'j' only 3 values to be filled
+ // i.e., H44-H46 from row4 and H1111-H1113 from row11, etc.
+ for (int i = 4; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN)
+
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (4 * d_stride), 3)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (4 * d_stride), 3)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN7
+ }
+
+ // Step 6: Here, the rows 5, 12, 19, 26, 33, 40 and 47 are filled. As we need
+ // to fill only upper-triangle elements, H50-H54 from row5, H120-H126 and
+ // H127-H1211 from row12, etc. are need not be filled. As the core function
+ // process 7 values, in first iteration of 'j' only 2 values to be filled
+ // i.e., H55-H56 from row5 and H1212-H1213 from row12, etc.
+ for (int i = 5; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN)
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (5 * d_stride), 2)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (5 * d_stride), 2)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + i), s_h);
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN7
+ }
+
+ // Step 7: Here, the rows 6, 13, 20, 27, 34, 41 and 48 are filled. As we need
+ // to fill only upper-triangle elements, H60-H65 from row6, H130-H136 and
+ // H137-H1312 from row13, etc. are need not be filled. As the core function
+ // process 7 values, in first iteration of 'j' only 1 value to be filled
+ // i.e., H66 from row6 and H1313 from row13, etc.
+ for (int i = 6; i < wiener_win2; i += wiener_win) {
+ // Update the dgd pointers appropriately and also derive the 'j'th iteration
+ // from where the H buffer filling needs to be started.
+ INITIALIZATION(WIENER_WIN)
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (6 * d_stride), 1)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + proc_wd + (6 * d_stride), 1)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+ const __m256i s_h =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ xx_storel_64(&H[(i * wiener_win2) + i], _mm256_castsi256_si128(s_h));
+
+ // process the remaining 'j' iterations.
+ j++;
+ CALCULATE_REMAINING_H_WIN7
+ }
+
+ // Step 8: Here, the rows 7, 14, 21, 28, 35 and 42 are filled. As we need
+ // to fill only upper-triangle elements, H70-H75 from row7, H140-H146 and
+ // H147-H1413 from row14, etc. are need not be filled. The first iteration of
+ // 'j' fills H77-H713 from row7 and H1414-H1420 from row14, etc.
+ for (int i = 7; i < wiener_win2; i += wiener_win) {
+ // Derive j'th iteration from where the H buffer filling needs to be
+ // started.
+ j = i / wiener_win;
+ int shift = 0;
+ do {
+ // Update the dgd pointers appropriately.
+ int proc_ht = v_start;
+ const int16_t *d_window = d + (i / WIENER_WIN);
+ const int16_t *d_current_row =
+ d + (i / WIENER_WIN) + ((i % WIENER_WIN) * d_stride);
+ downsample_factor =
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
+ __m256i sum_h[WIENER_WIN] = { _mm256_setzero_si256() };
+ do {
+ UPDATE_DOWNSAMPLE_FACTOR
+
+ // Process the amount of width multiple of 16.
+ while (proc_wd < wd_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd, df_reg);
+ INIT_H_VALUES(d_window + shift + proc_wd, 7)
+
+ proc_wd += 16;
+ }
+
+ // Process the remaining width here.
+ if (wd_beyond_mul16) {
+ const __m256i dgd =
+ _mm256_loadu_si256((__m256i *)(d_current_row + proc_wd));
+ const __m256i dgd_mask = _mm256_and_si256(dgd, mask);
+ const __m256i dgd_mul_df = _mm256_mullo_epi16(dgd_mask, df_reg);
+ INIT_H_VALUES(d_window + shift + proc_wd, 7)
+ }
+ proc_ht += downsample_factor;
+ d_window += downsample_factor * d_stride;
+ d_current_row += downsample_factor * d_stride;
+ } while (proc_ht < v_end);
+
+ const __m256i sh_0 =
+ hadd_four_32_to_64_avx2(sum_h[0], sum_h[1], &sum_h[2], &sum_h[3]);
+ const __m256i sh_1 =
+ hadd_four_32_to_64_avx2(sum_h[4], sum_h[5], &sum_h[6], &sum_h[6]);
+ _mm256_storeu_si256((__m256i *)(H + (i * wiener_win2) + (wiener_win * j)),
+ sh_0);
+ _mm_storeu_si128(
+ (__m128i *)(H + (i * wiener_win2) + (wiener_win * j) + 4),
+ _mm256_castsi256_si128(sh_1));
+ _mm_storel_epi64((__m128i *)&H[(i * wiener_win2) + (wiener_win * j) + 6],
+ _mm256_extracti128_si256(sh_1, 1));
+ shift++;
+ } while (++j < wiener_win);
+ }
+
+ fill_lower_triag_elements_avx2(wiener_win2, H);
+}
+
+void av1_compute_stats_avx2(int wiener_win, const uint8_t *dgd,
+ const uint8_t *src, int16_t *dgd_avg,
+ int16_t *src_avg, int h_start, int h_end,
+ int v_start, int v_end, int dgd_stride,
+ int src_stride, int64_t *M, int64_t *H,
+ int use_downsampled_wiener_stats) {
+ if (wiener_win != WIENER_WIN && wiener_win != WIENER_WIN_CHROMA) {
+ // Currently, libaom supports Wiener filter processing with window sizes as
+ // WIENER_WIN_CHROMA(5) and WIENER_WIN(7). For any other window size, SIMD
+ // support is not facilitated. Hence, invoke C function for the same.
+ av1_compute_stats_c(wiener_win, dgd, src, dgd_avg, src_avg, h_start, h_end,
+ v_start, v_end, dgd_stride, src_stride, M, H,
+ use_downsampled_wiener_stats);
+ return;
+ }
+
+ const int32_t wiener_halfwin = wiener_win >> 1;
+ const uint8_t avg =
+ calc_dgd_buf_avg_avx2(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+ const int32_t width = h_end - h_start;
+ const int32_t height = v_end - v_start;
+ const int32_t d_stride = (width + 2 * wiener_halfwin + 15) & ~15;
+ const int32_t s_stride = (width + 15) & ~15;
+
+ // Based on the sf 'use_downsampled_wiener_stats', process either once for
+ // UPDATE_DOWNSAMPLE_FACTOR or for each row.
+ sub_avg_block_avx2(src + v_start * src_stride + h_start, src_stride, avg,
+ width, height, src_avg, s_stride,
+ use_downsampled_wiener_stats);
+
+ // Compute (dgd-avg) buffer here which is used to fill H buffer.
+ sub_avg_block_avx2(
+ dgd + (v_start - wiener_halfwin) * dgd_stride + h_start - wiener_halfwin,
+ dgd_stride, avg, width + 2 * wiener_halfwin, height + 2 * wiener_halfwin,
+ dgd_avg, d_stride, 0);
+ if (wiener_win == WIENER_WIN) {
+ compute_stats_win7_avx2(dgd_avg, d_stride, src_avg, s_stride, width,
+ v_start, v_end, M, H, use_downsampled_wiener_stats);
+ } else if (wiener_win == WIENER_WIN_CHROMA) {
+ compute_stats_win5_avx2(dgd_avg, d_stride, src_avg, s_stride, width,
+ v_start, v_end, M, H, use_downsampled_wiener_stats);
+ }
+}
+
+static INLINE __m256i pair_set_epi16(int a, int b) {
+ return _mm256_set1_epi32(
+ (int32_t)(((uint16_t)(a)) | (((uint32_t)(b)) << 16)));
+}
+
+int64_t av1_lowbd_pixel_proj_error_avx2(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) {
+ int i, j, k;
+ const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS;
+ const __m256i rounding = _mm256_set1_epi32(1 << (shift - 1));
+ __m256i sum64 = _mm256_setzero_si256();
+ const uint8_t *src = src8;
+ const uint8_t *dat = dat8;
+ int64_t err = 0;
+ if (params->r[0] > 0 && params->r[1] > 0) {
+ __m256i xq_coeff = pair_set_epi16(xq[0], xq[1]);
+ for (i = 0; i < height; ++i) {
+ __m256i sum32 = _mm256_setzero_si256();
+ for (j = 0; j <= width - 16; j += 16) {
+ const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j));
+ const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j));
+ const __m256i flt0_16b = _mm256_permute4x64_epi64(
+ _mm256_packs_epi32(yy_loadu_256(flt0 + j),
+ yy_loadu_256(flt0 + j + 8)),
+ 0xd8);
+ const __m256i flt1_16b = _mm256_permute4x64_epi64(
+ _mm256_packs_epi32(yy_loadu_256(flt1 + j),
+ yy_loadu_256(flt1 + j + 8)),
+ 0xd8);
+ const __m256i u0 = _mm256_slli_epi16(d0, SGRPROJ_RST_BITS);
+ const __m256i flt0_0_sub_u = _mm256_sub_epi16(flt0_16b, u0);
+ const __m256i flt1_0_sub_u = _mm256_sub_epi16(flt1_16b, u0);
+ const __m256i v0 = _mm256_madd_epi16(
+ xq_coeff, _mm256_unpacklo_epi16(flt0_0_sub_u, flt1_0_sub_u));
+ const __m256i v1 = _mm256_madd_epi16(
+ xq_coeff, _mm256_unpackhi_epi16(flt0_0_sub_u, flt1_0_sub_u));
+ const __m256i vr0 =
+ _mm256_srai_epi32(_mm256_add_epi32(v0, rounding), shift);
+ const __m256i vr1 =
+ _mm256_srai_epi32(_mm256_add_epi32(v1, rounding), shift);
+ const __m256i e0 = _mm256_sub_epi16(
+ _mm256_add_epi16(_mm256_packs_epi32(vr0, vr1), d0), s0);
+ const __m256i err0 = _mm256_madd_epi16(e0, e0);
+ sum32 = _mm256_add_epi32(sum32, err0);
+ }
+ for (k = j; k < width; ++k) {
+ const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
+ int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u);
+ const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ flt0 += flt0_stride;
+ flt1 += flt1_stride;
+ const __m256i sum64_0 =
+ _mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32));
+ const __m256i sum64_1 =
+ _mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1));
+ sum64 = _mm256_add_epi64(sum64, sum64_0);
+ sum64 = _mm256_add_epi64(sum64, sum64_1);
+ }
+ } else if (params->r[0] > 0 || params->r[1] > 0) {
+ const int xq_active = (params->r[0] > 0) ? xq[0] : xq[1];
+ const __m256i xq_coeff =
+ pair_set_epi16(xq_active, -xq_active * (1 << SGRPROJ_RST_BITS));
+ const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1;
+ const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride;
+ for (i = 0; i < height; ++i) {
+ __m256i sum32 = _mm256_setzero_si256();
+ for (j = 0; j <= width - 16; j += 16) {
+ const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j));
+ const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j));
+ const __m256i flt_16b = _mm256_permute4x64_epi64(
+ _mm256_packs_epi32(yy_loadu_256(flt + j),
+ yy_loadu_256(flt + j + 8)),
+ 0xd8);
+ const __m256i v0 =
+ _mm256_madd_epi16(xq_coeff, _mm256_unpacklo_epi16(flt_16b, d0));
+ const __m256i v1 =
+ _mm256_madd_epi16(xq_coeff, _mm256_unpackhi_epi16(flt_16b, d0));
+ const __m256i vr0 =
+ _mm256_srai_epi32(_mm256_add_epi32(v0, rounding), shift);
+ const __m256i vr1 =
+ _mm256_srai_epi32(_mm256_add_epi32(v1, rounding), shift);
+ const __m256i e0 = _mm256_sub_epi16(
+ _mm256_add_epi16(_mm256_packs_epi32(vr0, vr1), d0), s0);
+ const __m256i err0 = _mm256_madd_epi16(e0, e0);
+ sum32 = _mm256_add_epi32(sum32, err0);
+ }
+ for (k = j; k < width; ++k) {
+ const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
+ int32_t v = xq_active * (flt[k] - u);
+ const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ flt += flt_stride;
+ const __m256i sum64_0 =
+ _mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32));
+ const __m256i sum64_1 =
+ _mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1));
+ sum64 = _mm256_add_epi64(sum64, sum64_0);
+ sum64 = _mm256_add_epi64(sum64, sum64_1);
+ }
+ } else {
+ __m256i sum32 = _mm256_setzero_si256();
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j <= width - 16; j += 16) {
+ const __m256i d0 = _mm256_cvtepu8_epi16(xx_loadu_128(dat + j));
+ const __m256i s0 = _mm256_cvtepu8_epi16(xx_loadu_128(src + j));
+ const __m256i diff0 = _mm256_sub_epi16(d0, s0);
+ const __m256i err0 = _mm256_madd_epi16(diff0, diff0);
+ sum32 = _mm256_add_epi32(sum32, err0);
+ }
+ for (k = j; k < width; ++k) {
+ const int32_t e = (int32_t)(dat[k]) - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ }
+ const __m256i sum64_0 =
+ _mm256_cvtepi32_epi64(_mm256_castsi256_si128(sum32));
+ const __m256i sum64_1 =
+ _mm256_cvtepi32_epi64(_mm256_extracti128_si256(sum32, 1));
+ sum64 = _mm256_add_epi64(sum64_0, sum64_1);
+ }
+ int64_t sum[4];
+ yy_storeu_256(sum, sum64);
+ err += sum[0] + sum[1] + sum[2] + sum[3];
+ return err;
+}
+
+// When params->r[0] > 0 and params->r[1] > 0. In this case all elements of
+// C and H need to be computed.
+static AOM_INLINE void calc_proj_params_r0_r1_avx2(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint8_t *src = src8;
+ const uint8_t *dat = dat8;
+ __m256i h00, h01, h11, c0, c1;
+ const __m256i zero = _mm256_setzero_si256();
+ h01 = h11 = c0 = c1 = h00 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 8) {
+ const __m256i u_load = _mm256_cvtepu8_epi32(
+ _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
+ const __m256i s_load = _mm256_cvtepu8_epi32(
+ _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
+ __m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
+ __m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
+ __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm256_sub_epi32(s, d);
+ f1 = _mm256_sub_epi32(f1, d);
+ f2 = _mm256_sub_epi32(f2, d);
+
+ const __m256i h00_even = _mm256_mul_epi32(f1, f1);
+ const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
+ _mm256_srli_epi64(f1, 32));
+ h00 = _mm256_add_epi64(h00, h00_even);
+ h00 = _mm256_add_epi64(h00, h00_odd);
+
+ const __m256i h01_even = _mm256_mul_epi32(f1, f2);
+ const __m256i h01_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
+ _mm256_srli_epi64(f2, 32));
+ h01 = _mm256_add_epi64(h01, h01_even);
+ h01 = _mm256_add_epi64(h01, h01_odd);
+
+ const __m256i h11_even = _mm256_mul_epi32(f2, f2);
+ const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
+ _mm256_srli_epi64(f2, 32));
+ h11 = _mm256_add_epi64(h11, h11_even);
+ h11 = _mm256_add_epi64(h11, h11_odd);
+
+ const __m256i c0_even = _mm256_mul_epi32(f1, s);
+ const __m256i c0_odd =
+ _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
+ c0 = _mm256_add_epi64(c0, c0_even);
+ c0 = _mm256_add_epi64(c0, c0_odd);
+
+ const __m256i c1_even = _mm256_mul_epi32(f2, s);
+ const __m256i c1_odd =
+ _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
+ c1 = _mm256_add_epi64(c1, c1_even);
+ c1 = _mm256_add_epi64(c1, c1_odd);
+ }
+ }
+
+ __m256i c_low = _mm256_unpacklo_epi64(c0, c1);
+ const __m256i c_high = _mm256_unpackhi_epi64(c0, c1);
+ c_low = _mm256_add_epi64(c_low, c_high);
+ const __m128i c_128bit = _mm_add_epi64(_mm256_extracti128_si256(c_low, 1),
+ _mm256_castsi256_si128(c_low));
+
+ __m256i h0x_low = _mm256_unpacklo_epi64(h00, h01);
+ const __m256i h0x_high = _mm256_unpackhi_epi64(h00, h01);
+ h0x_low = _mm256_add_epi64(h0x_low, h0x_high);
+ const __m128i h0x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h0x_low, 1),
+ _mm256_castsi256_si128(h0x_low));
+
+ // Using the symmetric properties of H, calculations of H[1][0] are not
+ // needed.
+ __m256i h1x_low = _mm256_unpacklo_epi64(zero, h11);
+ const __m256i h1x_high = _mm256_unpackhi_epi64(zero, h11);
+ h1x_low = _mm256_add_epi64(h1x_low, h1x_high);
+ const __m128i h1x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h1x_low, 1),
+ _mm256_castsi256_si128(h1x_low));
+
+ xx_storeu_128(C, c_128bit);
+ xx_storeu_128(H[0], h0x_128bit);
+ xx_storeu_128(H[1], h1x_128bit);
+
+ H[0][0] /= size;
+ H[0][1] /= size;
+ H[1][1] /= size;
+
+ // Since H is a symmetric matrix
+ H[1][0] = H[0][1];
+ C[0] /= size;
+ C[1] /= size;
+}
+
+// When only params->r[0] > 0. In this case only H[0][0] and C[0] are
+// non-zero and need to be computed.
+static AOM_INLINE void calc_proj_params_r0_avx2(const uint8_t *src8, int width,
+ int height, int src_stride,
+ const uint8_t *dat8,
+ int dat_stride, int32_t *flt0,
+ int flt0_stride,
+ int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint8_t *src = src8;
+ const uint8_t *dat = dat8;
+ __m256i h00, c0;
+ const __m256i zero = _mm256_setzero_si256();
+ c0 = h00 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 8) {
+ const __m256i u_load = _mm256_cvtepu8_epi32(
+ _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
+ const __m256i s_load = _mm256_cvtepu8_epi32(
+ _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
+ __m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
+ __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm256_sub_epi32(s, d);
+ f1 = _mm256_sub_epi32(f1, d);
+
+ const __m256i h00_even = _mm256_mul_epi32(f1, f1);
+ const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
+ _mm256_srli_epi64(f1, 32));
+ h00 = _mm256_add_epi64(h00, h00_even);
+ h00 = _mm256_add_epi64(h00, h00_odd);
+
+ const __m256i c0_even = _mm256_mul_epi32(f1, s);
+ const __m256i c0_odd =
+ _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
+ c0 = _mm256_add_epi64(c0, c0_even);
+ c0 = _mm256_add_epi64(c0, c0_odd);
+ }
+ }
+ const __m128i h00_128bit = _mm_add_epi64(_mm256_extracti128_si256(h00, 1),
+ _mm256_castsi256_si128(h00));
+ const __m128i h00_val =
+ _mm_add_epi64(h00_128bit, _mm_srli_si128(h00_128bit, 8));
+
+ const __m128i c0_128bit = _mm_add_epi64(_mm256_extracti128_si256(c0, 1),
+ _mm256_castsi256_si128(c0));
+ const __m128i c0_val = _mm_add_epi64(c0_128bit, _mm_srli_si128(c0_128bit, 8));
+
+ const __m128i c = _mm_unpacklo_epi64(c0_val, _mm256_castsi256_si128(zero));
+ const __m128i h0x = _mm_unpacklo_epi64(h00_val, _mm256_castsi256_si128(zero));
+
+ xx_storeu_128(C, c);
+ xx_storeu_128(H[0], h0x);
+
+ H[0][0] /= size;
+ C[0] /= size;
+}
+
+// When only params->r[1] > 0. In this case only H[1][1] and C[1] are
+// non-zero and need to be computed.
+static AOM_INLINE void calc_proj_params_r1_avx2(const uint8_t *src8, int width,
+ int height, int src_stride,
+ const uint8_t *dat8,
+ int dat_stride, int32_t *flt1,
+ int flt1_stride,
+ int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint8_t *src = src8;
+ const uint8_t *dat = dat8;
+ __m256i h11, c1;
+ const __m256i zero = _mm256_setzero_si256();
+ c1 = h11 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 8) {
+ const __m256i u_load = _mm256_cvtepu8_epi32(
+ _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
+ const __m256i s_load = _mm256_cvtepu8_epi32(
+ _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
+ __m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
+ __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm256_sub_epi32(s, d);
+ f2 = _mm256_sub_epi32(f2, d);
+
+ const __m256i h11_even = _mm256_mul_epi32(f2, f2);
+ const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
+ _mm256_srli_epi64(f2, 32));
+ h11 = _mm256_add_epi64(h11, h11_even);
+ h11 = _mm256_add_epi64(h11, h11_odd);
+
+ const __m256i c1_even = _mm256_mul_epi32(f2, s);
+ const __m256i c1_odd =
+ _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
+ c1 = _mm256_add_epi64(c1, c1_even);
+ c1 = _mm256_add_epi64(c1, c1_odd);
+ }
+ }
+
+ const __m128i h11_128bit = _mm_add_epi64(_mm256_extracti128_si256(h11, 1),
+ _mm256_castsi256_si128(h11));
+ const __m128i h11_val =
+ _mm_add_epi64(h11_128bit, _mm_srli_si128(h11_128bit, 8));
+
+ const __m128i c1_128bit = _mm_add_epi64(_mm256_extracti128_si256(c1, 1),
+ _mm256_castsi256_si128(c1));
+ const __m128i c1_val = _mm_add_epi64(c1_128bit, _mm_srli_si128(c1_128bit, 8));
+
+ const __m128i c = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), c1_val);
+ const __m128i h1x = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), h11_val);
+
+ xx_storeu_128(C, c);
+ xx_storeu_128(H[1], h1x);
+
+ H[1][1] /= size;
+ C[1] /= size;
+}
+
+// AVX2 variant of av1_calc_proj_params_c.
+void av1_calc_proj_params_avx2(const uint8_t *src8, int width, int height,
+ int src_stride, const uint8_t *dat8,
+ int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int64_t H[2][2],
+ int64_t C[2], const sgr_params_type *params) {
+ if ((params->r[0] > 0) && (params->r[1] > 0)) {
+ calc_proj_params_r0_r1_avx2(src8, width, height, src_stride, dat8,
+ dat_stride, flt0, flt0_stride, flt1,
+ flt1_stride, H, C);
+ } else if (params->r[0] > 0) {
+ calc_proj_params_r0_avx2(src8, width, height, src_stride, dat8, dat_stride,
+ flt0, flt0_stride, H, C);
+ } else if (params->r[1] > 0) {
+ calc_proj_params_r1_avx2(src8, width, height, src_stride, dat8, dat_stride,
+ flt1, flt1_stride, H, C);
+ }
+}
+
+static AOM_INLINE void calc_proj_params_r0_r1_high_bd_avx2(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
+ __m256i h00, h01, h11, c0, c1;
+ const __m256i zero = _mm256_setzero_si256();
+ h01 = h11 = c0 = c1 = h00 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 8) {
+ const __m256i u_load = _mm256_cvtepu16_epi32(
+ _mm_load_si128((__m128i *)(dat + i * dat_stride + j)));
+ const __m256i s_load = _mm256_cvtepu16_epi32(
+ _mm_load_si128((__m128i *)(src + i * src_stride + j)));
+ __m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
+ __m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
+ __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm256_sub_epi32(s, d);
+ f1 = _mm256_sub_epi32(f1, d);
+ f2 = _mm256_sub_epi32(f2, d);
+
+ const __m256i h00_even = _mm256_mul_epi32(f1, f1);
+ const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
+ _mm256_srli_epi64(f1, 32));
+ h00 = _mm256_add_epi64(h00, h00_even);
+ h00 = _mm256_add_epi64(h00, h00_odd);
+
+ const __m256i h01_even = _mm256_mul_epi32(f1, f2);
+ const __m256i h01_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
+ _mm256_srli_epi64(f2, 32));
+ h01 = _mm256_add_epi64(h01, h01_even);
+ h01 = _mm256_add_epi64(h01, h01_odd);
+
+ const __m256i h11_even = _mm256_mul_epi32(f2, f2);
+ const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
+ _mm256_srli_epi64(f2, 32));
+ h11 = _mm256_add_epi64(h11, h11_even);
+ h11 = _mm256_add_epi64(h11, h11_odd);
+
+ const __m256i c0_even = _mm256_mul_epi32(f1, s);
+ const __m256i c0_odd =
+ _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
+ c0 = _mm256_add_epi64(c0, c0_even);
+ c0 = _mm256_add_epi64(c0, c0_odd);
+
+ const __m256i c1_even = _mm256_mul_epi32(f2, s);
+ const __m256i c1_odd =
+ _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
+ c1 = _mm256_add_epi64(c1, c1_even);
+ c1 = _mm256_add_epi64(c1, c1_odd);
+ }
+ }
+
+ __m256i c_low = _mm256_unpacklo_epi64(c0, c1);
+ const __m256i c_high = _mm256_unpackhi_epi64(c0, c1);
+ c_low = _mm256_add_epi64(c_low, c_high);
+ const __m128i c_128bit = _mm_add_epi64(_mm256_extracti128_si256(c_low, 1),
+ _mm256_castsi256_si128(c_low));
+
+ __m256i h0x_low = _mm256_unpacklo_epi64(h00, h01);
+ const __m256i h0x_high = _mm256_unpackhi_epi64(h00, h01);
+ h0x_low = _mm256_add_epi64(h0x_low, h0x_high);
+ const __m128i h0x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h0x_low, 1),
+ _mm256_castsi256_si128(h0x_low));
+
+ // Using the symmetric properties of H, calculations of H[1][0] are not
+ // needed.
+ __m256i h1x_low = _mm256_unpacklo_epi64(zero, h11);
+ const __m256i h1x_high = _mm256_unpackhi_epi64(zero, h11);
+ h1x_low = _mm256_add_epi64(h1x_low, h1x_high);
+ const __m128i h1x_128bit = _mm_add_epi64(_mm256_extracti128_si256(h1x_low, 1),
+ _mm256_castsi256_si128(h1x_low));
+
+ xx_storeu_128(C, c_128bit);
+ xx_storeu_128(H[0], h0x_128bit);
+ xx_storeu_128(H[1], h1x_128bit);
+
+ H[0][0] /= size;
+ H[0][1] /= size;
+ H[1][1] /= size;
+
+ // Since H is a symmetric matrix
+ H[1][0] = H[0][1];
+ C[0] /= size;
+ C[1] /= size;
+}
+
+static AOM_INLINE void calc_proj_params_r0_high_bd_avx2(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
+ __m256i h00, c0;
+ const __m256i zero = _mm256_setzero_si256();
+ c0 = h00 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 8) {
+ const __m256i u_load = _mm256_cvtepu16_epi32(
+ _mm_load_si128((__m128i *)(dat + i * dat_stride + j)));
+ const __m256i s_load = _mm256_cvtepu16_epi32(
+ _mm_load_si128((__m128i *)(src + i * src_stride + j)));
+ __m256i f1 = _mm256_loadu_si256((__m256i *)(flt0 + i * flt0_stride + j));
+ __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm256_sub_epi32(s, d);
+ f1 = _mm256_sub_epi32(f1, d);
+
+ const __m256i h00_even = _mm256_mul_epi32(f1, f1);
+ const __m256i h00_odd = _mm256_mul_epi32(_mm256_srli_epi64(f1, 32),
+ _mm256_srli_epi64(f1, 32));
+ h00 = _mm256_add_epi64(h00, h00_even);
+ h00 = _mm256_add_epi64(h00, h00_odd);
+
+ const __m256i c0_even = _mm256_mul_epi32(f1, s);
+ const __m256i c0_odd =
+ _mm256_mul_epi32(_mm256_srli_epi64(f1, 32), _mm256_srli_epi64(s, 32));
+ c0 = _mm256_add_epi64(c0, c0_even);
+ c0 = _mm256_add_epi64(c0, c0_odd);
+ }
+ }
+ const __m128i h00_128bit = _mm_add_epi64(_mm256_extracti128_si256(h00, 1),
+ _mm256_castsi256_si128(h00));
+ const __m128i h00_val =
+ _mm_add_epi64(h00_128bit, _mm_srli_si128(h00_128bit, 8));
+
+ const __m128i c0_128bit = _mm_add_epi64(_mm256_extracti128_si256(c0, 1),
+ _mm256_castsi256_si128(c0));
+ const __m128i c0_val = _mm_add_epi64(c0_128bit, _mm_srli_si128(c0_128bit, 8));
+
+ const __m128i c = _mm_unpacklo_epi64(c0_val, _mm256_castsi256_si128(zero));
+ const __m128i h0x = _mm_unpacklo_epi64(h00_val, _mm256_castsi256_si128(zero));
+
+ xx_storeu_128(C, c);
+ xx_storeu_128(H[0], h0x);
+
+ H[0][0] /= size;
+ C[0] /= size;
+}
+
+static AOM_INLINE void calc_proj_params_r1_high_bd_avx2(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt1, int flt1_stride,
+ int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
+ __m256i h11, c1;
+ const __m256i zero = _mm256_setzero_si256();
+ c1 = h11 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 8) {
+ const __m256i u_load = _mm256_cvtepu16_epi32(
+ _mm_load_si128((__m128i *)(dat + i * dat_stride + j)));
+ const __m256i s_load = _mm256_cvtepu16_epi32(
+ _mm_load_si128((__m128i *)(src + i * src_stride + j)));
+ __m256i f2 = _mm256_loadu_si256((__m256i *)(flt1 + i * flt1_stride + j));
+ __m256i d = _mm256_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m256i s = _mm256_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm256_sub_epi32(s, d);
+ f2 = _mm256_sub_epi32(f2, d);
+
+ const __m256i h11_even = _mm256_mul_epi32(f2, f2);
+ const __m256i h11_odd = _mm256_mul_epi32(_mm256_srli_epi64(f2, 32),
+ _mm256_srli_epi64(f2, 32));
+ h11 = _mm256_add_epi64(h11, h11_even);
+ h11 = _mm256_add_epi64(h11, h11_odd);
+
+ const __m256i c1_even = _mm256_mul_epi32(f2, s);
+ const __m256i c1_odd =
+ _mm256_mul_epi32(_mm256_srli_epi64(f2, 32), _mm256_srli_epi64(s, 32));
+ c1 = _mm256_add_epi64(c1, c1_even);
+ c1 = _mm256_add_epi64(c1, c1_odd);
+ }
+ }
+
+ const __m128i h11_128bit = _mm_add_epi64(_mm256_extracti128_si256(h11, 1),
+ _mm256_castsi256_si128(h11));
+ const __m128i h11_val =
+ _mm_add_epi64(h11_128bit, _mm_srli_si128(h11_128bit, 8));
+
+ const __m128i c1_128bit = _mm_add_epi64(_mm256_extracti128_si256(c1, 1),
+ _mm256_castsi256_si128(c1));
+ const __m128i c1_val = _mm_add_epi64(c1_128bit, _mm_srli_si128(c1_128bit, 8));
+
+ const __m128i c = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), c1_val);
+ const __m128i h1x = _mm_unpacklo_epi64(_mm256_castsi256_si128(zero), h11_val);
+
+ xx_storeu_128(C, c);
+ xx_storeu_128(H[1], h1x);
+
+ H[1][1] /= size;
+ C[1] /= size;
+}
+
+// AVX2 variant of av1_calc_proj_params_high_bd_c.
+void av1_calc_proj_params_high_bd_avx2(const uint8_t *src8, int width,
+ int height, int src_stride,
+ const uint8_t *dat8, int dat_stride,
+ int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride,
+ int64_t H[2][2], int64_t C[2],
+ const sgr_params_type *params) {
+ if ((params->r[0] > 0) && (params->r[1] > 0)) {
+ calc_proj_params_r0_r1_high_bd_avx2(src8, width, height, src_stride, dat8,
+ dat_stride, flt0, flt0_stride, flt1,
+ flt1_stride, H, C);
+ } else if (params->r[0] > 0) {
+ calc_proj_params_r0_high_bd_avx2(src8, width, height, src_stride, dat8,
+ dat_stride, flt0, flt0_stride, H, C);
+ } else if (params->r[1] > 0) {
+ calc_proj_params_r1_high_bd_avx2(src8, width, height, src_stride, dat8,
+ dat_stride, flt1, flt1_stride, H, C);
+ }
+}
+
+#if CONFIG_AV1_HIGHBITDEPTH
+int64_t av1_highbd_pixel_proj_error_avx2(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) {
+ int i, j, k;
+ const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS;
+ const __m256i rounding = _mm256_set1_epi32(1 << (shift - 1));
+ __m256i sum64 = _mm256_setzero_si256();
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
+ int64_t err = 0;
+ if (params->r[0] > 0 && params->r[1] > 0) { // Both filters are enabled
+ const __m256i xq0 = _mm256_set1_epi32(xq[0]);
+ const __m256i xq1 = _mm256_set1_epi32(xq[1]);
+ for (i = 0; i < height; ++i) {
+ __m256i sum32 = _mm256_setzero_si256();
+ for (j = 0; j <= width - 16; j += 16) { // Process 16 pixels at a time
+ // Load 16 pixels each from source image and corrupted image
+ const __m256i s0 = yy_loadu_256(src + j);
+ const __m256i d0 = yy_loadu_256(dat + j);
+ // s0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16 (indices)
+
+ // Shift-up each pixel to match filtered image scaling
+ const __m256i u0 = _mm256_slli_epi16(d0, SGRPROJ_RST_BITS);
+
+ // Split u0 into two halves and pad each from u16 to i32
+ const __m256i u0l = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(u0));
+ const __m256i u0h =
+ _mm256_cvtepu16_epi32(_mm256_extracti128_si256(u0, 1));
+ // u0h, u0l = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as u32
+
+ // Load 16 pixels from each filtered image
+ const __m256i flt0l = yy_loadu_256(flt0 + j);
+ const __m256i flt0h = yy_loadu_256(flt0 + j + 8);
+ const __m256i flt1l = yy_loadu_256(flt1 + j);
+ const __m256i flt1h = yy_loadu_256(flt1 + j + 8);
+ // flt?l, flt?h = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as u32
+
+ // Subtract shifted corrupt image from each filtered image
+ const __m256i flt0l_subu = _mm256_sub_epi32(flt0l, u0l);
+ const __m256i flt0h_subu = _mm256_sub_epi32(flt0h, u0h);
+ const __m256i flt1l_subu = _mm256_sub_epi32(flt1l, u0l);
+ const __m256i flt1h_subu = _mm256_sub_epi32(flt1h, u0h);
+
+ // Multiply basis vectors by appropriate coefficients
+ const __m256i v0l = _mm256_mullo_epi32(flt0l_subu, xq0);
+ const __m256i v0h = _mm256_mullo_epi32(flt0h_subu, xq0);
+ const __m256i v1l = _mm256_mullo_epi32(flt1l_subu, xq1);
+ const __m256i v1h = _mm256_mullo_epi32(flt1h_subu, xq1);
+
+ // Add together the contributions from the two basis vectors
+ const __m256i vl = _mm256_add_epi32(v0l, v1l);
+ const __m256i vh = _mm256_add_epi32(v0h, v1h);
+
+ // Right-shift v with appropriate rounding
+ const __m256i vrl =
+ _mm256_srai_epi32(_mm256_add_epi32(vl, rounding), shift);
+ const __m256i vrh =
+ _mm256_srai_epi32(_mm256_add_epi32(vh, rounding), shift);
+ // vrh, vrl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0]
+
+ // Saturate each i32 to an i16 then combine both halves
+ // The permute (control=[3 1 2 0]) fixes weird ordering from AVX lanes
+ const __m256i vr =
+ _mm256_permute4x64_epi64(_mm256_packs_epi32(vrl, vrh), 0xd8);
+ // intermediate = [15 14 13 12 7 6 5 4] [11 10 9 8 3 2 1 0]
+ // vr = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0]
+
+ // Add twin-subspace-sgr-filter to corrupt image then subtract source
+ const __m256i e0 = _mm256_sub_epi16(_mm256_add_epi16(vr, d0), s0);
+
+ // Calculate squared error and add adjacent values
+ const __m256i err0 = _mm256_madd_epi16(e0, e0);
+
+ sum32 = _mm256_add_epi32(sum32, err0);
+ }
+
+ const __m256i sum32l =
+ _mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32));
+ sum64 = _mm256_add_epi64(sum64, sum32l);
+ const __m256i sum32h =
+ _mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1));
+ sum64 = _mm256_add_epi64(sum64, sum32h);
+
+ // Process remaining pixels in this row (modulo 16)
+ for (k = j; k < width; ++k) {
+ const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
+ int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u);
+ const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ flt0 += flt0_stride;
+ flt1 += flt1_stride;
+ }
+ } else if (params->r[0] > 0 || params->r[1] > 0) { // Only one filter enabled
+ const int32_t xq_on = (params->r[0] > 0) ? xq[0] : xq[1];
+ const __m256i xq_active = _mm256_set1_epi32(xq_on);
+ const __m256i xq_inactive =
+ _mm256_set1_epi32(-xq_on * (1 << SGRPROJ_RST_BITS));
+ const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1;
+ const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride;
+ for (i = 0; i < height; ++i) {
+ __m256i sum32 = _mm256_setzero_si256();
+ for (j = 0; j <= width - 16; j += 16) {
+ // Load 16 pixels from source image
+ const __m256i s0 = yy_loadu_256(src + j);
+ // s0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16
+
+ // Load 16 pixels from corrupted image and pad each u16 to i32
+ const __m256i d0 = yy_loadu_256(dat + j);
+ const __m256i d0h =
+ _mm256_cvtepu16_epi32(_mm256_extracti128_si256(d0, 1));
+ const __m256i d0l = _mm256_cvtepu16_epi32(_mm256_castsi256_si128(d0));
+ // d0 = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16
+ // d0h, d0l = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32
+
+ // Load 16 pixels from the filtered image
+ const __m256i flth = yy_loadu_256(flt + j + 8);
+ const __m256i fltl = yy_loadu_256(flt + j);
+ // flth, fltl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32
+
+ const __m256i flth_xq = _mm256_mullo_epi32(flth, xq_active);
+ const __m256i fltl_xq = _mm256_mullo_epi32(fltl, xq_active);
+ const __m256i d0h_xq = _mm256_mullo_epi32(d0h, xq_inactive);
+ const __m256i d0l_xq = _mm256_mullo_epi32(d0l, xq_inactive);
+
+ const __m256i vh = _mm256_add_epi32(flth_xq, d0h_xq);
+ const __m256i vl = _mm256_add_epi32(fltl_xq, d0l_xq);
+
+ // Shift this down with appropriate rounding
+ const __m256i vrh =
+ _mm256_srai_epi32(_mm256_add_epi32(vh, rounding), shift);
+ const __m256i vrl =
+ _mm256_srai_epi32(_mm256_add_epi32(vl, rounding), shift);
+ // vrh, vrl = [15 14 13 12] [11 10 9 8], [7 6 5 4] [3 2 1 0] as i32
+
+ // Saturate each i32 to an i16 then combine both halves
+ // The permute (control=[3 1 2 0]) fixes weird ordering from AVX lanes
+ const __m256i vr =
+ _mm256_permute4x64_epi64(_mm256_packs_epi32(vrl, vrh), 0xd8);
+ // intermediate = [15 14 13 12 7 6 5 4] [11 10 9 8 3 2 1 0] as u16
+ // vr = [15 14 13 12 11 10 9 8] [7 6 5 4 3 2 1 0] as u16
+
+ // Subtract twin-subspace-sgr filtered from source image to get error
+ const __m256i e0 = _mm256_sub_epi16(_mm256_add_epi16(vr, d0), s0);
+
+ // Calculate squared error and add adjacent values
+ const __m256i err0 = _mm256_madd_epi16(e0, e0);
+
+ sum32 = _mm256_add_epi32(sum32, err0);
+ }
+
+ const __m256i sum32l =
+ _mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32));
+ sum64 = _mm256_add_epi64(sum64, sum32l);
+ const __m256i sum32h =
+ _mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1));
+ sum64 = _mm256_add_epi64(sum64, sum32h);
+
+ // Process remaining pixels in this row (modulo 16)
+ for (k = j; k < width; ++k) {
+ const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
+ int32_t v = xq_on * (flt[k] - u);
+ const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ flt += flt_stride;
+ }
+ } else { // Neither filter is enabled
+ for (i = 0; i < height; ++i) {
+ __m256i sum32 = _mm256_setzero_si256();
+ for (j = 0; j <= width - 32; j += 32) {
+ // Load 2x16 u16 from source image
+ const __m256i s0l = yy_loadu_256(src + j);
+ const __m256i s0h = yy_loadu_256(src + j + 16);
+
+ // Load 2x16 u16 from corrupted image
+ const __m256i d0l = yy_loadu_256(dat + j);
+ const __m256i d0h = yy_loadu_256(dat + j + 16);
+
+ // Subtract corrupted image from source image
+ const __m256i diffl = _mm256_sub_epi16(d0l, s0l);
+ const __m256i diffh = _mm256_sub_epi16(d0h, s0h);
+
+ // Square error and add adjacent values
+ const __m256i err0l = _mm256_madd_epi16(diffl, diffl);
+ const __m256i err0h = _mm256_madd_epi16(diffh, diffh);
+
+ sum32 = _mm256_add_epi32(sum32, err0l);
+ sum32 = _mm256_add_epi32(sum32, err0h);
+ }
+
+ const __m256i sum32l =
+ _mm256_cvtepu32_epi64(_mm256_castsi256_si128(sum32));
+ sum64 = _mm256_add_epi64(sum64, sum32l);
+ const __m256i sum32h =
+ _mm256_cvtepu32_epi64(_mm256_extracti128_si256(sum32, 1));
+ sum64 = _mm256_add_epi64(sum64, sum32h);
+
+ // Process remaining pixels (modulu 16)
+ for (k = j; k < width; ++k) {
+ const int32_t e = (int32_t)(dat[k]) - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ }
+ }
+
+ // Sum 4 values from sum64l and sum64h into err
+ int64_t sum[4];
+ yy_storeu_256(sum, sum64);
+ err += sum[0] + sum[1] + sum[2] + sum[3];
+ return err;
+}
+#endif // CONFIG_AV1_HIGHBITDEPTH
diff --git a/third_party/aom/av1/encoder/x86/pickrst_sse4.c b/third_party/aom/av1/encoder/x86/pickrst_sse4.c
new file mode 100644
index 0000000000..50db305802
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/pickrst_sse4.c
@@ -0,0 +1,1483 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h>
+#include "aom_dsp/x86/mem_sse2.h"
+#include "aom_dsp/x86/synonyms.h"
+
+#include "config/av1_rtcd.h"
+#include "av1/common/restoration.h"
+#include "av1/encoder/pickrst.h"
+
+static INLINE void acc_stat_sse41(int32_t *dst, const uint8_t *src,
+ const __m128i *shuffle, const __m128i *kl) {
+ const __m128i s = _mm_shuffle_epi8(xx_loadu_128(src), *shuffle);
+ const __m128i d0 = _mm_madd_epi16(*kl, _mm_cvtepu8_epi16(s));
+ const __m128i d1 =
+ _mm_madd_epi16(*kl, _mm_cvtepu8_epi16(_mm_srli_si128(s, 8)));
+ const __m128i dst0 = xx_loadu_128(dst);
+ const __m128i dst1 = xx_loadu_128(dst + 4);
+ const __m128i r0 = _mm_add_epi32(dst0, d0);
+ const __m128i r1 = _mm_add_epi32(dst1, d1);
+ xx_storeu_128(dst, r0);
+ xx_storeu_128(dst + 4, r1);
+}
+
+static INLINE void acc_stat_win7_one_line_sse4_1(
+ const uint8_t *dgd, const uint8_t *src, int h_start, int h_end,
+ int dgd_stride, const __m128i *shuffle, int32_t *sumX,
+ int32_t sumY[WIENER_WIN][WIENER_WIN], int32_t M_int[WIENER_WIN][WIENER_WIN],
+ int32_t H_int[WIENER_WIN2][WIENER_WIN * 8]) {
+ const int wiener_win = 7;
+ int j, k, l;
+ // Main loop handles two pixels at a time
+ // We can assume that h_start is even, since it will always be aligned to
+ // a tile edge + some number of restoration units, and both of those will
+ // be 64-pixel aligned.
+ // However, at the edge of the image, h_end may be odd, so we need to handle
+ // that case correctly.
+ assert(h_start % 2 == 0);
+ const int h_end_even = h_end & ~1;
+ const int has_odd_pixel = h_end & 1;
+ for (j = h_start; j < h_end_even; j += 2) {
+ const uint8_t *dgd_ij = dgd + j;
+ const uint8_t X1 = src[j];
+ const uint8_t X2 = src[j + 1];
+ *sumX += X1 + X2;
+ for (k = 0; k < wiener_win; k++) {
+ const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int32_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint8_t D1 = dgd_ijk[l];
+ const uint8_t D2 = dgd_ijk[l + 1];
+ sumY[k][l] += D1 + D2;
+ M_int[k][l] += D1 * X1 + D2 * X2;
+
+ const __m128i kl =
+ _mm_cvtepu8_epi16(_mm_set1_epi16(loadu_int16(dgd_ijk + l)));
+ acc_stat_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle, &kl);
+ }
+ }
+ }
+ // If the width is odd, add in the final pixel
+ if (has_odd_pixel) {
+ const uint8_t *dgd_ij = dgd + j;
+ const uint8_t X1 = src[j];
+ *sumX += X1;
+ for (k = 0; k < wiener_win; k++) {
+ const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int32_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint8_t D1 = dgd_ijk[l];
+ sumY[k][l] += D1;
+ M_int[k][l] += D1 * X1;
+
+ // The `acc_stat_sse41` function wants its input to have interleaved
+ // copies of two pixels, but we only have one. However, the pixels
+ // are (effectively) used as inputs to a multiply-accumulate.
+ // So if we set the extra pixel slot to 0, then it is effectively
+ // ignored.
+ const __m128i kl = _mm_cvtepu8_epi16(_mm_set1_epi16((int16_t)D1));
+ acc_stat_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle, &kl);
+ }
+ }
+ }
+}
+
+static INLINE void compute_stats_win7_opt_sse4_1(
+ const uint8_t *dgd, const uint8_t *src, int h_start, int h_end, int v_start,
+ int v_end, int dgd_stride, int src_stride, int64_t *M, int64_t *H,
+ int use_downsampled_wiener_stats) {
+ int i, j, k, l, m, n;
+ const int wiener_win = WIENER_WIN;
+ const int pixel_count = (h_end - h_start) * (v_end - v_start);
+ const int wiener_win2 = wiener_win * wiener_win;
+ const int wiener_halfwin = (wiener_win >> 1);
+ const uint8_t avg =
+ find_average(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+
+ int32_t M_int32[WIENER_WIN][WIENER_WIN] = { { 0 } };
+ int32_t M_int32_row[WIENER_WIN][WIENER_WIN] = { { 0 } };
+ int64_t M_int64[WIENER_WIN][WIENER_WIN] = { { 0 } };
+ int32_t H_int32[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
+ int32_t H_int32_row[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
+ int64_t H_int64[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
+ int32_t sumY[WIENER_WIN][WIENER_WIN] = { { 0 } };
+ int32_t sumX = 0;
+ const uint8_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
+ int downsample_factor =
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
+ int32_t sumX_row = 0;
+ int32_t sumY_row[WIENER_WIN][WIENER_WIN] = { { 0 } };
+
+ const __m128i shuffle = xx_loadu_128(g_shuffle_stats_data);
+ for (j = v_start; j < v_end; j += 64) {
+ const int vert_end = AOMMIN(64, v_end - j) + j;
+ for (i = j; i < vert_end; i = i + downsample_factor) {
+ if (use_downsampled_wiener_stats &&
+ (vert_end - i < WIENER_STATS_DOWNSAMPLE_FACTOR)) {
+ downsample_factor = vert_end - i;
+ }
+ sumX_row = 0;
+ memset(sumY_row, 0, sizeof(int32_t) * WIENER_WIN * WIENER_WIN);
+ memset(M_int32_row, 0, sizeof(int32_t) * WIENER_WIN * WIENER_WIN);
+ memset(H_int32_row, 0, sizeof(int32_t) * WIENER_WIN2 * (WIENER_WIN * 8));
+ acc_stat_win7_one_line_sse4_1(
+ dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
+ dgd_stride, &shuffle, &sumX_row, sumY_row, M_int32_row, H_int32_row);
+ sumX += sumX_row * downsample_factor;
+ // Scale M matrix based on the downsampling factor
+ for (k = 0; k < wiener_win; ++k) {
+ for (l = 0; l < wiener_win; ++l) {
+ sumY[k][l] += (sumY_row[k][l] * downsample_factor);
+ M_int32[k][l] += (M_int32_row[k][l] * downsample_factor);
+ }
+ }
+ // Scale H matrix based on the downsampling factor
+ for (k = 0; k < WIENER_WIN2; ++k) {
+ for (l = 0; l < WIENER_WIN * 8; ++l) {
+ H_int32[k][l] += (H_int32_row[k][l] * downsample_factor);
+ }
+ }
+ }
+ for (k = 0; k < wiener_win; ++k) {
+ for (l = 0; l < wiener_win; ++l) {
+ M_int64[k][l] += M_int32[k][l];
+ M_int32[k][l] = 0;
+ }
+ }
+ for (k = 0; k < WIENER_WIN2; ++k) {
+ for (l = 0; l < WIENER_WIN * 8; ++l) {
+ H_int64[k][l] += H_int32[k][l];
+ H_int32[k][l] = 0;
+ }
+ }
+ }
+
+ const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
+ for (k = 0; k < wiener_win; k++) {
+ for (l = 0; l < wiener_win; l++) {
+ const int32_t idx0 = l * wiener_win + k;
+ M[idx0] =
+ M_int64[k][l] + (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]));
+ int64_t *H_ = H + idx0 * wiener_win2;
+ int64_t *H_int_ = &H_int64[idx0][0];
+ for (m = 0; m < wiener_win; m++) {
+ for (n = 0; n < wiener_win; n++) {
+ H_[m * wiener_win + n] = H_int_[n * 8 + m] + avg_square_sum -
+ (int64_t)avg * (sumY[k][l] + sumY[n][m]);
+ }
+ }
+ }
+ }
+}
+
+#if CONFIG_AV1_HIGHBITDEPTH
+static INLINE void acc_stat_highbd_sse41(int64_t *dst, const uint16_t *dgd,
+ const __m128i *shuffle,
+ const __m128i *dgd_ijkl) {
+ // Load 256 bits from dgd in two chunks
+ const __m128i s0l = xx_loadu_128(dgd);
+ const __m128i s0h = xx_loadu_128(dgd + 4);
+ // s0l = [7 6 5 4 3 2 1 0] as u16 values (dgd indices)
+ // s0h = [11 10 9 8 7 6 5 4] as u16 values (dgd indices)
+ // (Slightly strange order so we can apply the same shuffle to both halves)
+
+ // Shuffle the u16 values in each half (actually using 8-bit shuffle mask)
+ const __m128i s1l = _mm_shuffle_epi8(s0l, *shuffle);
+ const __m128i s1h = _mm_shuffle_epi8(s0h, *shuffle);
+ // s1l = [4 3 3 2 2 1 1 0] as u16 values (dgd indices)
+ // s1h = [8 7 7 6 6 5 5 4] as u16 values (dgd indices)
+
+ // Multiply s1 by dgd_ijkl resulting in 8x u32 values
+ // Horizontally add pairs of u32 resulting in 4x u32
+ const __m128i dl = _mm_madd_epi16(*dgd_ijkl, s1l);
+ const __m128i dh = _mm_madd_epi16(*dgd_ijkl, s1h);
+ // dl = [d c b a] as u32 values
+ // dh = [h g f e] as u32 values
+
+ // Add these 8x u32 results on to dst in four parts
+ const __m128i dll = _mm_cvtepu32_epi64(dl);
+ const __m128i dlh = _mm_cvtepu32_epi64(_mm_srli_si128(dl, 8));
+ const __m128i dhl = _mm_cvtepu32_epi64(dh);
+ const __m128i dhh = _mm_cvtepu32_epi64(_mm_srli_si128(dh, 8));
+ // dll = [b a] as u64 values, etc.
+
+ const __m128i rll = _mm_add_epi64(xx_loadu_128(dst), dll);
+ xx_storeu_128(dst, rll);
+ const __m128i rlh = _mm_add_epi64(xx_loadu_128(dst + 2), dlh);
+ xx_storeu_128(dst + 2, rlh);
+ const __m128i rhl = _mm_add_epi64(xx_loadu_128(dst + 4), dhl);
+ xx_storeu_128(dst + 4, rhl);
+ const __m128i rhh = _mm_add_epi64(xx_loadu_128(dst + 6), dhh);
+ xx_storeu_128(dst + 6, rhh);
+}
+
+static INLINE void acc_stat_highbd_win7_one_line_sse4_1(
+ const uint16_t *dgd, const uint16_t *src, int h_start, int h_end,
+ int dgd_stride, const __m128i *shuffle, int32_t *sumX,
+ int32_t sumY[WIENER_WIN][WIENER_WIN], int64_t M_int[WIENER_WIN][WIENER_WIN],
+ int64_t H_int[WIENER_WIN2][WIENER_WIN * 8]) {
+ int j, k, l;
+ const int wiener_win = WIENER_WIN;
+ // Main loop handles two pixels at a time
+ // We can assume that h_start is even, since it will always be aligned to
+ // a tile edge + some number of restoration units, and both of those will
+ // be 64-pixel aligned.
+ // However, at the edge of the image, h_end may be odd, so we need to handle
+ // that case correctly.
+ assert(h_start % 2 == 0);
+ const int h_end_even = h_end & ~1;
+ const int has_odd_pixel = h_end & 1;
+ for (j = h_start; j < h_end_even; j += 2) {
+ const uint16_t X1 = src[j];
+ const uint16_t X2 = src[j + 1];
+ *sumX += X1 + X2;
+ const uint16_t *dgd_ij = dgd + j;
+ for (k = 0; k < wiener_win; k++) {
+ const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int64_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint16_t D1 = dgd_ijk[l];
+ const uint16_t D2 = dgd_ijk[l + 1];
+ sumY[k][l] += D1 + D2;
+ M_int[k][l] += D1 * X1 + D2 * X2;
+
+ // Load two u16 values from dgd as a single u32
+ // Then broadcast to 4x u32 slots of a 128
+ const __m128i dgd_ijkl = _mm_set1_epi32(loadu_int32(dgd_ijk + l));
+ // dgd_ijkl = [y x y x y x y x] as u16
+
+ acc_stat_highbd_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ }
+ }
+ }
+ // If the width is odd, add in the final pixel
+ if (has_odd_pixel) {
+ const uint16_t X1 = src[j];
+ *sumX += X1;
+ const uint16_t *dgd_ij = dgd + j;
+ for (k = 0; k < wiener_win; k++) {
+ const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int64_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint16_t D1 = dgd_ijk[l];
+ sumY[k][l] += D1;
+ M_int[k][l] += D1 * X1;
+
+ // The `acc_stat_highbd_sse41` function wants its input to have
+ // interleaved copies of two pixels, but we only have one. However, the
+ // pixels are (effectively) used as inputs to a multiply-accumulate. So
+ // if we set the extra pixel slot to 0, then it is effectively ignored.
+ const __m128i dgd_ijkl = _mm_set1_epi32((int)D1);
+
+ acc_stat_highbd_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 5 * 8, dgd_ij + 5 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 6 * 8, dgd_ij + 6 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ }
+ }
+ }
+}
+
+static INLINE void compute_stats_highbd_win7_opt_sse4_1(
+ const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end,
+ int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M,
+ int64_t *H, aom_bit_depth_t bit_depth) {
+ int i, j, k, l, m, n;
+ const int wiener_win = WIENER_WIN;
+ const int pixel_count = (h_end - h_start) * (v_end - v_start);
+ const int wiener_win2 = wiener_win * wiener_win;
+ const int wiener_halfwin = (wiener_win >> 1);
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
+ const uint16_t avg =
+ find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+
+ int64_t M_int[WIENER_WIN][WIENER_WIN] = { { 0 } };
+ int64_t H_int[WIENER_WIN2][WIENER_WIN * 8] = { { 0 } };
+ int32_t sumY[WIENER_WIN][WIENER_WIN] = { { 0 } };
+ int32_t sumX = 0;
+ const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
+
+ // Load just half of the 256-bit shuffle control used for the AVX2 version
+ const __m128i shuffle = xx_loadu_128(g_shuffle_stats_highbd_data);
+ for (j = v_start; j < v_end; j += 64) {
+ const int vert_end = AOMMIN(64, v_end - j) + j;
+ for (i = j; i < vert_end; i++) {
+ acc_stat_highbd_win7_one_line_sse4_1(
+ dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
+ dgd_stride, &shuffle, &sumX, sumY, M_int, H_int);
+ }
+ }
+
+ uint8_t bit_depth_divider = 1;
+ if (bit_depth == AOM_BITS_12)
+ bit_depth_divider = 16;
+ else if (bit_depth == AOM_BITS_10)
+ bit_depth_divider = 4;
+
+ const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
+ for (k = 0; k < wiener_win; k++) {
+ for (l = 0; l < wiener_win; l++) {
+ const int32_t idx0 = l * wiener_win + k;
+ M[idx0] = (M_int[k][l] +
+ (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) /
+ bit_depth_divider;
+ int64_t *H_ = H + idx0 * wiener_win2;
+ int64_t *H_int_ = &H_int[idx0][0];
+ for (m = 0; m < wiener_win; m++) {
+ for (n = 0; n < wiener_win; n++) {
+ H_[m * wiener_win + n] =
+ (H_int_[n * 8 + m] +
+ (avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) /
+ bit_depth_divider;
+ }
+ }
+ }
+ }
+}
+
+static INLINE void acc_stat_highbd_win5_one_line_sse4_1(
+ const uint16_t *dgd, const uint16_t *src, int h_start, int h_end,
+ int dgd_stride, const __m128i *shuffle, int32_t *sumX,
+ int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
+ int64_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
+ int64_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) {
+ int j, k, l;
+ const int wiener_win = WIENER_WIN_CHROMA;
+ // Main loop handles two pixels at a time
+ // We can assume that h_start is even, since it will always be aligned to
+ // a tile edge + some number of restoration units, and both of those will
+ // be 64-pixel aligned.
+ // However, at the edge of the image, h_end may be odd, so we need to handle
+ // that case correctly.
+ assert(h_start % 2 == 0);
+ const int h_end_even = h_end & ~1;
+ const int has_odd_pixel = h_end & 1;
+ for (j = h_start; j < h_end_even; j += 2) {
+ const uint16_t X1 = src[j];
+ const uint16_t X2 = src[j + 1];
+ *sumX += X1 + X2;
+ const uint16_t *dgd_ij = dgd + j;
+ for (k = 0; k < wiener_win; k++) {
+ const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int64_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint16_t D1 = dgd_ijk[l];
+ const uint16_t D2 = dgd_ijk[l + 1];
+ sumY[k][l] += D1 + D2;
+ M_int[k][l] += D1 * X1 + D2 * X2;
+
+ // Load two u16 values from dgd as a single u32
+ // then broadcast to 4x u32 slots of a 128
+ const __m128i dgd_ijkl = _mm_set1_epi32(loadu_int32(dgd_ijk + l));
+ // dgd_ijkl = [y x y x y x y x] as u16
+
+ acc_stat_highbd_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ }
+ }
+ }
+ // If the width is odd, add in the final pixel
+ if (has_odd_pixel) {
+ const uint16_t X1 = src[j];
+ *sumX += X1;
+ const uint16_t *dgd_ij = dgd + j;
+ for (k = 0; k < wiener_win; k++) {
+ const uint16_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int64_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint16_t D1 = dgd_ijk[l];
+ sumY[k][l] += D1;
+ M_int[k][l] += D1 * X1;
+
+ // The `acc_stat_highbd_sse41` function wants its input to have
+ // interleaved copies of two pixels, but we only have one. However, the
+ // pixels are (effectively) used as inputs to a multiply-accumulate. So
+ // if we set the extra pixel slot to 0, then it is effectively ignored.
+ const __m128i dgd_ijkl = _mm_set1_epi32((int)D1);
+
+ acc_stat_highbd_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ acc_stat_highbd_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle,
+ &dgd_ijkl);
+ }
+ }
+ }
+}
+
+static INLINE void compute_stats_highbd_win5_opt_sse4_1(
+ const uint8_t *dgd8, const uint8_t *src8, int h_start, int h_end,
+ int v_start, int v_end, int dgd_stride, int src_stride, int64_t *M,
+ int64_t *H, aom_bit_depth_t bit_depth) {
+ int i, j, k, l, m, n;
+ const int wiener_win = WIENER_WIN_CHROMA;
+ const int pixel_count = (h_end - h_start) * (v_end - v_start);
+ const int wiener_win2 = wiener_win * wiener_win;
+ const int wiener_halfwin = (wiener_win >> 1);
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dgd = CONVERT_TO_SHORTPTR(dgd8);
+ const uint16_t avg =
+ find_average_highbd(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+
+ int64_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+ int64_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
+ int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+ int32_t sumX = 0;
+ const uint16_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
+
+ // Load just half of the 256-bit shuffle control used for the AVX2 version
+ const __m128i shuffle = xx_loadu_128(g_shuffle_stats_highbd_data);
+ for (j = v_start; j < v_end; j += 64) {
+ const int vert_end = AOMMIN(64, v_end - j) + j;
+ for (i = j; i < vert_end; i++) {
+ acc_stat_highbd_win5_one_line_sse4_1(
+ dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
+ dgd_stride, &shuffle, &sumX, sumY, M_int, H_int);
+ }
+ }
+
+ uint8_t bit_depth_divider = 1;
+ if (bit_depth == AOM_BITS_12)
+ bit_depth_divider = 16;
+ else if (bit_depth == AOM_BITS_10)
+ bit_depth_divider = 4;
+
+ const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
+ for (k = 0; k < wiener_win; k++) {
+ for (l = 0; l < wiener_win; l++) {
+ const int32_t idx0 = l * wiener_win + k;
+ M[idx0] = (M_int[k][l] +
+ (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]))) /
+ bit_depth_divider;
+ int64_t *H_ = H + idx0 * wiener_win2;
+ int64_t *H_int_ = &H_int[idx0][0];
+ for (m = 0; m < wiener_win; m++) {
+ for (n = 0; n < wiener_win; n++) {
+ H_[m * wiener_win + n] =
+ (H_int_[n * 8 + m] +
+ (avg_square_sum - (int64_t)avg * (sumY[k][l] + sumY[n][m]))) /
+ bit_depth_divider;
+ }
+ }
+ }
+ }
+}
+
+void av1_compute_stats_highbd_sse4_1(int wiener_win, const uint8_t *dgd8,
+ const uint8_t *src8, int h_start,
+ int h_end, int v_start, int v_end,
+ int dgd_stride, int src_stride, int64_t *M,
+ int64_t *H, aom_bit_depth_t bit_depth) {
+ if (wiener_win == WIENER_WIN) {
+ compute_stats_highbd_win7_opt_sse4_1(dgd8, src8, h_start, h_end, v_start,
+ v_end, dgd_stride, src_stride, M, H,
+ bit_depth);
+ } else if (wiener_win == WIENER_WIN_CHROMA) {
+ compute_stats_highbd_win5_opt_sse4_1(dgd8, src8, h_start, h_end, v_start,
+ v_end, dgd_stride, src_stride, M, H,
+ bit_depth);
+ } else {
+ av1_compute_stats_highbd_c(wiener_win, dgd8, src8, h_start, h_end, v_start,
+ v_end, dgd_stride, src_stride, M, H, bit_depth);
+ }
+}
+#endif // CONFIG_AV1_HIGHBITDEPTH
+
+static INLINE void acc_stat_win5_one_line_sse4_1(
+ const uint8_t *dgd, const uint8_t *src, int h_start, int h_end,
+ int dgd_stride, const __m128i *shuffle, int32_t *sumX,
+ int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
+ int32_t M_int[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA],
+ int32_t H_int[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8]) {
+ const int wiener_win = WIENER_WIN_CHROMA;
+ int j, k, l;
+ // Main loop handles two pixels at a time
+ // We can assume that h_start is even, since it will always be aligned to
+ // a tile edge + some number of restoration units, and both of those will
+ // be 64-pixel aligned.
+ // However, at the edge of the image, h_end may be odd, so we need to handle
+ // that case correctly.
+ assert(h_start % 2 == 0);
+ const int h_end_even = h_end & ~1;
+ const int has_odd_pixel = h_end & 1;
+ for (j = h_start; j < h_end_even; j += 2) {
+ const uint8_t *dgd_ij = dgd + j;
+ const uint8_t X1 = src[j];
+ const uint8_t X2 = src[j + 1];
+ *sumX += X1 + X2;
+ for (k = 0; k < wiener_win; k++) {
+ const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int32_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint8_t D1 = dgd_ijk[l];
+ const uint8_t D2 = dgd_ijk[l + 1];
+ sumY[k][l] += D1 + D2;
+ M_int[k][l] += D1 * X1 + D2 * X2;
+
+ const __m128i kl =
+ _mm_cvtepu8_epi16(_mm_set1_epi16(loadu_int16(dgd_ijk + l)));
+ acc_stat_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
+ }
+ }
+ }
+ // If the width is odd, add in the final pixel
+ if (has_odd_pixel) {
+ const uint8_t *dgd_ij = dgd + j;
+ const uint8_t X1 = src[j];
+ *sumX += X1;
+ for (k = 0; k < wiener_win; k++) {
+ const uint8_t *dgd_ijk = dgd_ij + k * dgd_stride;
+ for (l = 0; l < wiener_win; l++) {
+ int32_t *H_ = &H_int[(l * wiener_win + k)][0];
+ const uint8_t D1 = dgd_ijk[l];
+ sumY[k][l] += D1;
+ M_int[k][l] += D1 * X1;
+
+ // The `acc_stat_sse41` function wants its input to have interleaved
+ // copies of two pixels, but we only have one. However, the pixels
+ // are (effectively) used as inputs to a multiply-accumulate.
+ // So if we set the extra pixel slot to 0, then it is effectively
+ // ignored.
+ const __m128i kl = _mm_cvtepu8_epi16(_mm_set1_epi16((int16_t)D1));
+ acc_stat_sse41(H_ + 0 * 8, dgd_ij + 0 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 1 * 8, dgd_ij + 1 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 2 * 8, dgd_ij + 2 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 3 * 8, dgd_ij + 3 * dgd_stride, shuffle, &kl);
+ acc_stat_sse41(H_ + 4 * 8, dgd_ij + 4 * dgd_stride, shuffle, &kl);
+ }
+ }
+ }
+}
+
+static INLINE void compute_stats_win5_opt_sse4_1(
+ const uint8_t *dgd, const uint8_t *src, int h_start, int h_end, int v_start,
+ int v_end, int dgd_stride, int src_stride, int64_t *M, int64_t *H,
+ int use_downsampled_wiener_stats) {
+ int i, j, k, l, m, n;
+ const int wiener_win = WIENER_WIN_CHROMA;
+ const int pixel_count = (h_end - h_start) * (v_end - v_start);
+ const int wiener_win2 = wiener_win * wiener_win;
+ const int wiener_halfwin = (wiener_win >> 1);
+ const uint8_t avg =
+ find_average(dgd, h_start, h_end, v_start, v_end, dgd_stride);
+
+ int32_t M_int32[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+ int32_t M_int32_row[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+ int64_t M_int64[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+ int32_t H_int32[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
+ int32_t H_int32_row[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
+ int64_t H_int64[WIENER_WIN2_CHROMA][WIENER_WIN_CHROMA * 8] = { { 0 } };
+ int32_t sumY[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+ int32_t sumX = 0;
+ const uint8_t *dgd_win = dgd - wiener_halfwin * dgd_stride - wiener_halfwin;
+ int downsample_factor =
+ use_downsampled_wiener_stats ? WIENER_STATS_DOWNSAMPLE_FACTOR : 1;
+ int32_t sumX_row = 0;
+ int32_t sumY_row[WIENER_WIN_CHROMA][WIENER_WIN_CHROMA] = { { 0 } };
+
+ const __m128i shuffle = xx_loadu_128(g_shuffle_stats_data);
+ for (j = v_start; j < v_end; j += 64) {
+ const int vert_end = AOMMIN(64, v_end - j) + j;
+ for (i = j; i < vert_end; i = i + downsample_factor) {
+ if (use_downsampled_wiener_stats &&
+ (vert_end - i < WIENER_STATS_DOWNSAMPLE_FACTOR)) {
+ downsample_factor = vert_end - i;
+ }
+ sumX_row = 0;
+ memset(sumY_row, 0,
+ sizeof(int32_t) * WIENER_WIN_CHROMA * WIENER_WIN_CHROMA);
+ memset(M_int32_row, 0,
+ sizeof(int32_t) * WIENER_WIN_CHROMA * WIENER_WIN_CHROMA);
+ memset(H_int32_row, 0,
+ sizeof(int32_t) * WIENER_WIN2_CHROMA * (WIENER_WIN_CHROMA * 8));
+ acc_stat_win5_one_line_sse4_1(
+ dgd_win + i * dgd_stride, src + i * src_stride, h_start, h_end,
+ dgd_stride, &shuffle, &sumX_row, sumY_row, M_int32_row, H_int32_row);
+ sumX += sumX_row * downsample_factor;
+ // Scale M matrix based on the downsampling factor
+ for (k = 0; k < wiener_win; ++k) {
+ for (l = 0; l < wiener_win; ++l) {
+ sumY[k][l] += (sumY_row[k][l] * downsample_factor);
+ M_int32[k][l] += (M_int32_row[k][l] * downsample_factor);
+ }
+ }
+ // Scale H matrix based on the downsampling factor
+ for (k = 0; k < WIENER_WIN_CHROMA * WIENER_WIN_CHROMA; ++k) {
+ for (l = 0; l < WIENER_WIN_CHROMA * 8; ++l) {
+ H_int32[k][l] += (H_int32_row[k][l] * downsample_factor);
+ }
+ }
+ }
+ for (k = 0; k < wiener_win; ++k) {
+ for (l = 0; l < wiener_win; ++l) {
+ M_int64[k][l] += M_int32[k][l];
+ M_int32[k][l] = 0;
+ }
+ }
+ for (k = 0; k < WIENER_WIN_CHROMA * WIENER_WIN_CHROMA; ++k) {
+ for (l = 0; l < WIENER_WIN_CHROMA * 8; ++l) {
+ H_int64[k][l] += H_int32[k][l];
+ H_int32[k][l] = 0;
+ }
+ }
+ }
+
+ const int64_t avg_square_sum = (int64_t)avg * (int64_t)avg * pixel_count;
+ for (k = 0; k < wiener_win; k++) {
+ for (l = 0; l < wiener_win; l++) {
+ const int32_t idx0 = l * wiener_win + k;
+ M[idx0] =
+ M_int64[k][l] + (avg_square_sum - (int64_t)avg * (sumX + sumY[k][l]));
+ int64_t *H_ = H + idx0 * wiener_win2;
+ int64_t *H_int_ = &H_int64[idx0][0];
+ for (m = 0; m < wiener_win; m++) {
+ for (n = 0; n < wiener_win; n++) {
+ H_[m * wiener_win + n] = H_int_[n * 8 + m] + avg_square_sum -
+ (int64_t)avg * (sumY[k][l] + sumY[n][m]);
+ }
+ }
+ }
+ }
+}
+void av1_compute_stats_sse4_1(int wiener_win, const uint8_t *dgd,
+ const uint8_t *src, int16_t *dgd_avg,
+ int16_t *src_avg, int h_start, int h_end,
+ int v_start, int v_end, int dgd_stride,
+ int src_stride, int64_t *M, int64_t *H,
+ int use_downsampled_wiener_stats) {
+ if (wiener_win == WIENER_WIN) {
+ compute_stats_win7_opt_sse4_1(dgd, src, h_start, h_end, v_start, v_end,
+ dgd_stride, src_stride, M, H,
+ use_downsampled_wiener_stats);
+ } else if (wiener_win == WIENER_WIN_CHROMA) {
+ compute_stats_win5_opt_sse4_1(dgd, src, h_start, h_end, v_start, v_end,
+ dgd_stride, src_stride, M, H,
+ use_downsampled_wiener_stats);
+ } else {
+ av1_compute_stats_c(wiener_win, dgd, src, dgd_avg, src_avg, h_start, h_end,
+ v_start, v_end, dgd_stride, src_stride, M, H,
+ use_downsampled_wiener_stats);
+ }
+}
+
+static INLINE __m128i pair_set_epi16(int a, int b) {
+ return _mm_set1_epi32((int32_t)(((uint16_t)(a)) | (((uint32_t)(b)) << 16)));
+}
+
+int64_t av1_lowbd_pixel_proj_error_sse4_1(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) {
+ int i, j, k;
+ const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS;
+ const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
+ __m128i sum64 = _mm_setzero_si128();
+ const uint8_t *src = src8;
+ const uint8_t *dat = dat8;
+ int64_t err = 0;
+ if (params->r[0] > 0 && params->r[1] > 0) {
+ __m128i xq_coeff = pair_set_epi16(xq[0], xq[1]);
+ for (i = 0; i < height; ++i) {
+ __m128i sum32 = _mm_setzero_si128();
+ for (j = 0; j <= width - 8; j += 8) {
+ const __m128i d0 = _mm_cvtepu8_epi16(xx_loadl_64(dat + j));
+ const __m128i s0 = _mm_cvtepu8_epi16(xx_loadl_64(src + j));
+ const __m128i flt0_16b =
+ _mm_packs_epi32(xx_loadu_128(flt0 + j), xx_loadu_128(flt0 + j + 4));
+ const __m128i flt1_16b =
+ _mm_packs_epi32(xx_loadu_128(flt1 + j), xx_loadu_128(flt1 + j + 4));
+ const __m128i u0 = _mm_slli_epi16(d0, SGRPROJ_RST_BITS);
+ const __m128i flt0_0_sub_u = _mm_sub_epi16(flt0_16b, u0);
+ const __m128i flt1_0_sub_u = _mm_sub_epi16(flt1_16b, u0);
+ const __m128i v0 = _mm_madd_epi16(
+ xq_coeff, _mm_unpacklo_epi16(flt0_0_sub_u, flt1_0_sub_u));
+ const __m128i v1 = _mm_madd_epi16(
+ xq_coeff, _mm_unpackhi_epi16(flt0_0_sub_u, flt1_0_sub_u));
+ const __m128i vr0 = _mm_srai_epi32(_mm_add_epi32(v0, rounding), shift);
+ const __m128i vr1 = _mm_srai_epi32(_mm_add_epi32(v1, rounding), shift);
+ const __m128i e0 =
+ _mm_sub_epi16(_mm_add_epi16(_mm_packs_epi32(vr0, vr1), d0), s0);
+ const __m128i err0 = _mm_madd_epi16(e0, e0);
+ sum32 = _mm_add_epi32(sum32, err0);
+ }
+ for (k = j; k < width; ++k) {
+ const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
+ int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u);
+ const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ flt0 += flt0_stride;
+ flt1 += flt1_stride;
+ const __m128i sum64_0 = _mm_cvtepi32_epi64(sum32);
+ const __m128i sum64_1 = _mm_cvtepi32_epi64(_mm_srli_si128(sum32, 8));
+ sum64 = _mm_add_epi64(sum64, sum64_0);
+ sum64 = _mm_add_epi64(sum64, sum64_1);
+ }
+ } else if (params->r[0] > 0 || params->r[1] > 0) {
+ const int xq_active = (params->r[0] > 0) ? xq[0] : xq[1];
+ const __m128i xq_coeff =
+ pair_set_epi16(xq_active, -xq_active * (1 << SGRPROJ_RST_BITS));
+ const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1;
+ const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride;
+ for (i = 0; i < height; ++i) {
+ __m128i sum32 = _mm_setzero_si128();
+ for (j = 0; j <= width - 8; j += 8) {
+ const __m128i d0 = _mm_cvtepu8_epi16(xx_loadl_64(dat + j));
+ const __m128i s0 = _mm_cvtepu8_epi16(xx_loadl_64(src + j));
+ const __m128i flt_16b =
+ _mm_packs_epi32(xx_loadu_128(flt + j), xx_loadu_128(flt + j + 4));
+ const __m128i v0 =
+ _mm_madd_epi16(xq_coeff, _mm_unpacklo_epi16(flt_16b, d0));
+ const __m128i v1 =
+ _mm_madd_epi16(xq_coeff, _mm_unpackhi_epi16(flt_16b, d0));
+ const __m128i vr0 = _mm_srai_epi32(_mm_add_epi32(v0, rounding), shift);
+ const __m128i vr1 = _mm_srai_epi32(_mm_add_epi32(v1, rounding), shift);
+ const __m128i e0 =
+ _mm_sub_epi16(_mm_add_epi16(_mm_packs_epi32(vr0, vr1), d0), s0);
+ const __m128i err0 = _mm_madd_epi16(e0, e0);
+ sum32 = _mm_add_epi32(sum32, err0);
+ }
+ for (k = j; k < width; ++k) {
+ const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
+ int32_t v = xq_active * (flt[k] - u);
+ const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ flt += flt_stride;
+ const __m128i sum64_0 = _mm_cvtepi32_epi64(sum32);
+ const __m128i sum64_1 = _mm_cvtepi32_epi64(_mm_srli_si128(sum32, 8));
+ sum64 = _mm_add_epi64(sum64, sum64_0);
+ sum64 = _mm_add_epi64(sum64, sum64_1);
+ }
+ } else {
+ __m128i sum32 = _mm_setzero_si128();
+ for (i = 0; i < height; ++i) {
+ for (j = 0; j <= width - 16; j += 16) {
+ const __m128i d = xx_loadu_128(dat + j);
+ const __m128i s = xx_loadu_128(src + j);
+ const __m128i d0 = _mm_cvtepu8_epi16(d);
+ const __m128i d1 = _mm_cvtepu8_epi16(_mm_srli_si128(d, 8));
+ const __m128i s0 = _mm_cvtepu8_epi16(s);
+ const __m128i s1 = _mm_cvtepu8_epi16(_mm_srli_si128(s, 8));
+ const __m128i diff0 = _mm_sub_epi16(d0, s0);
+ const __m128i diff1 = _mm_sub_epi16(d1, s1);
+ const __m128i err0 = _mm_madd_epi16(diff0, diff0);
+ const __m128i err1 = _mm_madd_epi16(diff1, diff1);
+ sum32 = _mm_add_epi32(sum32, err0);
+ sum32 = _mm_add_epi32(sum32, err1);
+ }
+ for (k = j; k < width; ++k) {
+ const int32_t e = (int32_t)(dat[k]) - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ }
+ const __m128i sum64_0 = _mm_cvtepi32_epi64(sum32);
+ const __m128i sum64_1 = _mm_cvtepi32_epi64(_mm_srli_si128(sum32, 8));
+ sum64 = _mm_add_epi64(sum64_0, sum64_1);
+ }
+ int64_t sum[2];
+ xx_storeu_128(sum, sum64);
+ err += sum[0] + sum[1];
+ return err;
+}
+
+// When params->r[0] > 0 and params->r[1] > 0. In this case all elements of
+// C and H need to be computed.
+static AOM_INLINE void calc_proj_params_r0_r1_sse4_1(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint8_t *src = src8;
+ const uint8_t *dat = dat8;
+ __m128i h00, h01, h11, c0, c1;
+ const __m128i zero = _mm_setzero_si128();
+ h01 = h11 = c0 = c1 = h00 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 4) {
+ const __m128i u_load = _mm_cvtepu8_epi32(
+ _mm_cvtsi32_si128(*((int *)(dat + i * dat_stride + j))));
+ const __m128i s_load = _mm_cvtepu8_epi32(
+ _mm_cvtsi32_si128(*((int *)(src + i * src_stride + j))));
+ __m128i f1 = _mm_loadu_si128((__m128i *)(flt0 + i * flt0_stride + j));
+ __m128i f2 = _mm_loadu_si128((__m128i *)(flt1 + i * flt1_stride + j));
+ __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm_sub_epi32(s, d);
+ f1 = _mm_sub_epi32(f1, d);
+ f2 = _mm_sub_epi32(f2, d);
+
+ const __m128i h00_even = _mm_mul_epi32(f1, f1);
+ const __m128i h00_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f1, 32));
+ h00 = _mm_add_epi64(h00, h00_even);
+ h00 = _mm_add_epi64(h00, h00_odd);
+
+ const __m128i h01_even = _mm_mul_epi32(f1, f2);
+ const __m128i h01_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f2, 32));
+ h01 = _mm_add_epi64(h01, h01_even);
+ h01 = _mm_add_epi64(h01, h01_odd);
+
+ const __m128i h11_even = _mm_mul_epi32(f2, f2);
+ const __m128i h11_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(f2, 32));
+ h11 = _mm_add_epi64(h11, h11_even);
+ h11 = _mm_add_epi64(h11, h11_odd);
+
+ const __m128i c0_even = _mm_mul_epi32(f1, s);
+ const __m128i c0_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(s, 32));
+ c0 = _mm_add_epi64(c0, c0_even);
+ c0 = _mm_add_epi64(c0, c0_odd);
+
+ const __m128i c1_even = _mm_mul_epi32(f2, s);
+ const __m128i c1_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(s, 32));
+ c1 = _mm_add_epi64(c1, c1_even);
+ c1 = _mm_add_epi64(c1, c1_odd);
+ }
+ }
+
+ __m128i c_low = _mm_unpacklo_epi64(c0, c1);
+ const __m128i c_high = _mm_unpackhi_epi64(c0, c1);
+ c_low = _mm_add_epi64(c_low, c_high);
+
+ __m128i h0x_low = _mm_unpacklo_epi64(h00, h01);
+ const __m128i h0x_high = _mm_unpackhi_epi64(h00, h01);
+ h0x_low = _mm_add_epi64(h0x_low, h0x_high);
+
+ // Using the symmetric properties of H, calculations of H[1][0] are not
+ // needed.
+ __m128i h1x_low = _mm_unpacklo_epi64(zero, h11);
+ const __m128i h1x_high = _mm_unpackhi_epi64(zero, h11);
+ h1x_low = _mm_add_epi64(h1x_low, h1x_high);
+
+ xx_storeu_128(C, c_low);
+ xx_storeu_128(H[0], h0x_low);
+ xx_storeu_128(H[1], h1x_low);
+
+ H[0][0] /= size;
+ H[0][1] /= size;
+ H[1][1] /= size;
+
+ // Since H is a symmetric matrix
+ H[1][0] = H[0][1];
+ C[0] /= size;
+ C[1] /= size;
+}
+
+// When only params->r[0] > 0. In this case only H[0][0] and C[0] are
+// non-zero and need to be computed.
+static AOM_INLINE void calc_proj_params_r0_sse4_1(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint8_t *src = src8;
+ const uint8_t *dat = dat8;
+ __m128i h00, c0;
+ const __m128i zero = _mm_setzero_si128();
+ c0 = h00 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 4) {
+ const __m128i u_load = _mm_cvtepu8_epi32(
+ _mm_cvtsi32_si128(*((int *)(dat + i * dat_stride + j))));
+ const __m128i s_load = _mm_cvtepu8_epi32(
+ _mm_cvtsi32_si128(*((int *)(src + i * src_stride + j))));
+ __m128i f1 = _mm_loadu_si128((__m128i *)(flt0 + i * flt0_stride + j));
+ __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm_sub_epi32(s, d);
+ f1 = _mm_sub_epi32(f1, d);
+
+ const __m128i h00_even = _mm_mul_epi32(f1, f1);
+ const __m128i h00_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f1, 32));
+ h00 = _mm_add_epi64(h00, h00_even);
+ h00 = _mm_add_epi64(h00, h00_odd);
+
+ const __m128i c0_even = _mm_mul_epi32(f1, s);
+ const __m128i c0_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(s, 32));
+ c0 = _mm_add_epi64(c0, c0_even);
+ c0 = _mm_add_epi64(c0, c0_odd);
+ }
+ }
+ const __m128i h00_val = _mm_add_epi64(h00, _mm_srli_si128(h00, 8));
+
+ const __m128i c0_val = _mm_add_epi64(c0, _mm_srli_si128(c0, 8));
+
+ const __m128i c = _mm_unpacklo_epi64(c0_val, zero);
+ const __m128i h0x = _mm_unpacklo_epi64(h00_val, zero);
+
+ xx_storeu_128(C, c);
+ xx_storeu_128(H[0], h0x);
+
+ H[0][0] /= size;
+ C[0] /= size;
+}
+
+// When only params->r[1] > 0. In this case only H[1][1] and C[1] are
+// non-zero and need to be computed.
+static AOM_INLINE void calc_proj_params_r1_sse4_1(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt1, int flt1_stride,
+ int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint8_t *src = src8;
+ const uint8_t *dat = dat8;
+ __m128i h11, c1;
+ const __m128i zero = _mm_setzero_si128();
+ c1 = h11 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 4) {
+ const __m128i u_load = _mm_cvtepu8_epi32(
+ _mm_cvtsi32_si128(*((int *)(dat + i * dat_stride + j))));
+ const __m128i s_load = _mm_cvtepu8_epi32(
+ _mm_cvtsi32_si128(*((int *)(src + i * src_stride + j))));
+ __m128i f2 = _mm_loadu_si128((__m128i *)(flt1 + i * flt1_stride + j));
+ __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm_sub_epi32(s, d);
+ f2 = _mm_sub_epi32(f2, d);
+
+ const __m128i h11_even = _mm_mul_epi32(f2, f2);
+ const __m128i h11_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(f2, 32));
+ h11 = _mm_add_epi64(h11, h11_even);
+ h11 = _mm_add_epi64(h11, h11_odd);
+
+ const __m128i c1_even = _mm_mul_epi32(f2, s);
+ const __m128i c1_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(s, 32));
+ c1 = _mm_add_epi64(c1, c1_even);
+ c1 = _mm_add_epi64(c1, c1_odd);
+ }
+ }
+
+ const __m128i h11_val = _mm_add_epi64(h11, _mm_srli_si128(h11, 8));
+
+ const __m128i c1_val = _mm_add_epi64(c1, _mm_srli_si128(c1, 8));
+
+ const __m128i c = _mm_unpacklo_epi64(zero, c1_val);
+ const __m128i h1x = _mm_unpacklo_epi64(zero, h11_val);
+
+ xx_storeu_128(C, c);
+ xx_storeu_128(H[1], h1x);
+
+ H[1][1] /= size;
+ C[1] /= size;
+}
+
+// SSE4.1 variant of av1_calc_proj_params_c.
+void av1_calc_proj_params_sse4_1(const uint8_t *src8, int width, int height,
+ int src_stride, const uint8_t *dat8,
+ int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride,
+ int64_t H[2][2], int64_t C[2],
+ const sgr_params_type *params) {
+ if ((params->r[0] > 0) && (params->r[1] > 0)) {
+ calc_proj_params_r0_r1_sse4_1(src8, width, height, src_stride, dat8,
+ dat_stride, flt0, flt0_stride, flt1,
+ flt1_stride, H, C);
+ } else if (params->r[0] > 0) {
+ calc_proj_params_r0_sse4_1(src8, width, height, src_stride, dat8,
+ dat_stride, flt0, flt0_stride, H, C);
+ } else if (params->r[1] > 0) {
+ calc_proj_params_r1_sse4_1(src8, width, height, src_stride, dat8,
+ dat_stride, flt1, flt1_stride, H, C);
+ }
+}
+
+static AOM_INLINE void calc_proj_params_r0_r1_high_bd_sse4_1(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
+ __m128i h00, h01, h11, c0, c1;
+ const __m128i zero = _mm_setzero_si128();
+ h01 = h11 = c0 = c1 = h00 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 4) {
+ const __m128i u_load = _mm_cvtepu16_epi32(
+ _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
+ const __m128i s_load = _mm_cvtepu16_epi32(
+ _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
+ __m128i f1 = _mm_loadu_si128((__m128i *)(flt0 + i * flt0_stride + j));
+ __m128i f2 = _mm_loadu_si128((__m128i *)(flt1 + i * flt1_stride + j));
+ __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm_sub_epi32(s, d);
+ f1 = _mm_sub_epi32(f1, d);
+ f2 = _mm_sub_epi32(f2, d);
+
+ const __m128i h00_even = _mm_mul_epi32(f1, f1);
+ const __m128i h00_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f1, 32));
+ h00 = _mm_add_epi64(h00, h00_even);
+ h00 = _mm_add_epi64(h00, h00_odd);
+
+ const __m128i h01_even = _mm_mul_epi32(f1, f2);
+ const __m128i h01_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f2, 32));
+ h01 = _mm_add_epi64(h01, h01_even);
+ h01 = _mm_add_epi64(h01, h01_odd);
+
+ const __m128i h11_even = _mm_mul_epi32(f2, f2);
+ const __m128i h11_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(f2, 32));
+ h11 = _mm_add_epi64(h11, h11_even);
+ h11 = _mm_add_epi64(h11, h11_odd);
+
+ const __m128i c0_even = _mm_mul_epi32(f1, s);
+ const __m128i c0_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(s, 32));
+ c0 = _mm_add_epi64(c0, c0_even);
+ c0 = _mm_add_epi64(c0, c0_odd);
+
+ const __m128i c1_even = _mm_mul_epi32(f2, s);
+ const __m128i c1_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(s, 32));
+ c1 = _mm_add_epi64(c1, c1_even);
+ c1 = _mm_add_epi64(c1, c1_odd);
+ }
+ }
+
+ __m128i c_low = _mm_unpacklo_epi64(c0, c1);
+ const __m128i c_high = _mm_unpackhi_epi64(c0, c1);
+ c_low = _mm_add_epi64(c_low, c_high);
+
+ __m128i h0x_low = _mm_unpacklo_epi64(h00, h01);
+ const __m128i h0x_high = _mm_unpackhi_epi64(h00, h01);
+ h0x_low = _mm_add_epi64(h0x_low, h0x_high);
+
+ // Using the symmetric properties of H, calculations of H[1][0] are not
+ // needed.
+ __m128i h1x_low = _mm_unpacklo_epi64(zero, h11);
+ const __m128i h1x_high = _mm_unpackhi_epi64(zero, h11);
+ h1x_low = _mm_add_epi64(h1x_low, h1x_high);
+
+ xx_storeu_128(C, c_low);
+ xx_storeu_128(H[0], h0x_low);
+ xx_storeu_128(H[1], h1x_low);
+
+ H[0][0] /= size;
+ H[0][1] /= size;
+ H[1][1] /= size;
+
+ // Since H is a symmetric matrix
+ H[1][0] = H[0][1];
+ C[0] /= size;
+ C[1] /= size;
+}
+
+// When only params->r[0] > 0. In this case only H[0][0] and C[0] are
+// non-zero and need to be computed.
+static AOM_INLINE void calc_proj_params_r0_high_bd_sse4_1(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
+ __m128i h00, c0;
+ const __m128i zero = _mm_setzero_si128();
+ c0 = h00 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 4) {
+ const __m128i u_load = _mm_cvtepu16_epi32(
+ _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
+ const __m128i s_load = _mm_cvtepu16_epi32(
+ _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
+ __m128i f1 = _mm_loadu_si128((__m128i *)(flt0 + i * flt0_stride + j));
+ __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm_sub_epi32(s, d);
+ f1 = _mm_sub_epi32(f1, d);
+
+ const __m128i h00_even = _mm_mul_epi32(f1, f1);
+ const __m128i h00_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(f1, 32));
+ h00 = _mm_add_epi64(h00, h00_even);
+ h00 = _mm_add_epi64(h00, h00_odd);
+
+ const __m128i c0_even = _mm_mul_epi32(f1, s);
+ const __m128i c0_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f1, 32), _mm_srli_epi64(s, 32));
+ c0 = _mm_add_epi64(c0, c0_even);
+ c0 = _mm_add_epi64(c0, c0_odd);
+ }
+ }
+ const __m128i h00_val = _mm_add_epi64(h00, _mm_srli_si128(h00, 8));
+
+ const __m128i c0_val = _mm_add_epi64(c0, _mm_srli_si128(c0, 8));
+
+ const __m128i c = _mm_unpacklo_epi64(c0_val, zero);
+ const __m128i h0x = _mm_unpacklo_epi64(h00_val, zero);
+
+ xx_storeu_128(C, c);
+ xx_storeu_128(H[0], h0x);
+
+ H[0][0] /= size;
+ C[0] /= size;
+}
+
+// When only params->r[1] > 0. In this case only H[1][1] and C[1] are
+// non-zero and need to be computed.
+static AOM_INLINE void calc_proj_params_r1_high_bd_sse4_1(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt1, int flt1_stride,
+ int64_t H[2][2], int64_t C[2]) {
+ const int size = width * height;
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
+ __m128i h11, c1;
+ const __m128i zero = _mm_setzero_si128();
+ c1 = h11 = zero;
+
+ for (int i = 0; i < height; ++i) {
+ for (int j = 0; j < width; j += 4) {
+ const __m128i u_load = _mm_cvtepu16_epi32(
+ _mm_loadl_epi64((__m128i *)(dat + i * dat_stride + j)));
+ const __m128i s_load = _mm_cvtepu16_epi32(
+ _mm_loadl_epi64((__m128i *)(src + i * src_stride + j)));
+ __m128i f2 = _mm_loadu_si128((__m128i *)(flt1 + i * flt1_stride + j));
+ __m128i d = _mm_slli_epi32(u_load, SGRPROJ_RST_BITS);
+ __m128i s = _mm_slli_epi32(s_load, SGRPROJ_RST_BITS);
+ s = _mm_sub_epi32(s, d);
+ f2 = _mm_sub_epi32(f2, d);
+
+ const __m128i h11_even = _mm_mul_epi32(f2, f2);
+ const __m128i h11_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(f2, 32));
+ h11 = _mm_add_epi64(h11, h11_even);
+ h11 = _mm_add_epi64(h11, h11_odd);
+
+ const __m128i c1_even = _mm_mul_epi32(f2, s);
+ const __m128i c1_odd =
+ _mm_mul_epi32(_mm_srli_epi64(f2, 32), _mm_srli_epi64(s, 32));
+ c1 = _mm_add_epi64(c1, c1_even);
+ c1 = _mm_add_epi64(c1, c1_odd);
+ }
+ }
+
+ const __m128i h11_val = _mm_add_epi64(h11, _mm_srli_si128(h11, 8));
+
+ const __m128i c1_val = _mm_add_epi64(c1, _mm_srli_si128(c1, 8));
+
+ const __m128i c = _mm_unpacklo_epi64(zero, c1_val);
+ const __m128i h1x = _mm_unpacklo_epi64(zero, h11_val);
+
+ xx_storeu_128(C, c);
+ xx_storeu_128(H[1], h1x);
+
+ H[1][1] /= size;
+ C[1] /= size;
+}
+
+// SSE4.1 variant of av1_calc_proj_params_high_bd_c.
+void av1_calc_proj_params_high_bd_sse4_1(const uint8_t *src8, int width,
+ int height, int src_stride,
+ const uint8_t *dat8, int dat_stride,
+ int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride,
+ int64_t H[2][2], int64_t C[2],
+ const sgr_params_type *params) {
+ if ((params->r[0] > 0) && (params->r[1] > 0)) {
+ calc_proj_params_r0_r1_high_bd_sse4_1(src8, width, height, src_stride, dat8,
+ dat_stride, flt0, flt0_stride, flt1,
+ flt1_stride, H, C);
+ } else if (params->r[0] > 0) {
+ calc_proj_params_r0_high_bd_sse4_1(src8, width, height, src_stride, dat8,
+ dat_stride, flt0, flt0_stride, H, C);
+ } else if (params->r[1] > 0) {
+ calc_proj_params_r1_high_bd_sse4_1(src8, width, height, src_stride, dat8,
+ dat_stride, flt1, flt1_stride, H, C);
+ }
+}
+
+#if CONFIG_AV1_HIGHBITDEPTH
+int64_t av1_highbd_pixel_proj_error_sse4_1(
+ const uint8_t *src8, int width, int height, int src_stride,
+ const uint8_t *dat8, int dat_stride, int32_t *flt0, int flt0_stride,
+ int32_t *flt1, int flt1_stride, int xq[2], const sgr_params_type *params) {
+ int i, j, k;
+ const int32_t shift = SGRPROJ_RST_BITS + SGRPROJ_PRJ_BITS;
+ const __m128i rounding = _mm_set1_epi32(1 << (shift - 1));
+ __m128i sum64 = _mm_setzero_si128();
+ const uint16_t *src = CONVERT_TO_SHORTPTR(src8);
+ const uint16_t *dat = CONVERT_TO_SHORTPTR(dat8);
+ int64_t err = 0;
+ if (params->r[0] > 0 && params->r[1] > 0) { // Both filters are enabled
+ const __m128i xq0 = _mm_set1_epi32(xq[0]);
+ const __m128i xq1 = _mm_set1_epi32(xq[1]);
+
+ for (i = 0; i < height; ++i) {
+ __m128i sum32 = _mm_setzero_si128();
+ for (j = 0; j <= width - 8; j += 8) {
+ // Load 8x pixels from source image
+ const __m128i s0 = xx_loadu_128(src + j);
+ // s0 = [7 6 5 4 3 2 1 0] as i16 (indices of src[])
+
+ // Load 8x pixels from corrupted image
+ const __m128i d0 = xx_loadu_128(dat + j);
+ // d0 = [7 6 5 4 3 2 1 0] as i16 (indices of dat[])
+
+ // Shift each pixel value up by SGRPROJ_RST_BITS
+ const __m128i u0 = _mm_slli_epi16(d0, SGRPROJ_RST_BITS);
+
+ // Split u0 into two halves and pad each from u16 to i32
+ const __m128i u0l = _mm_cvtepu16_epi32(u0);
+ const __m128i u0h = _mm_cvtepu16_epi32(_mm_srli_si128(u0, 8));
+ // u0h = [7 6 5 4] as i32, u0l = [3 2 1 0] as i32, all dat[] indices
+
+ // Load 8 pixels from first and second filtered images
+ const __m128i flt0l = xx_loadu_128(flt0 + j);
+ const __m128i flt0h = xx_loadu_128(flt0 + j + 4);
+ const __m128i flt1l = xx_loadu_128(flt1 + j);
+ const __m128i flt1h = xx_loadu_128(flt1 + j + 4);
+ // flt0 = [7 6 5 4] [3 2 1 0] as i32 (indices of flt0+j)
+ // flt1 = [7 6 5 4] [3 2 1 0] as i32 (indices of flt1+j)
+
+ // Subtract shifted corrupt image from each filtered image
+ // This gives our two basis vectors for the projection
+ const __m128i flt0l_subu = _mm_sub_epi32(flt0l, u0l);
+ const __m128i flt0h_subu = _mm_sub_epi32(flt0h, u0h);
+ const __m128i flt1l_subu = _mm_sub_epi32(flt1l, u0l);
+ const __m128i flt1h_subu = _mm_sub_epi32(flt1h, u0h);
+ // flt?h_subu = [ f[7]-u[7] f[6]-u[6] f[5]-u[5] f[4]-u[4] ] as i32
+ // flt?l_subu = [ f[3]-u[3] f[2]-u[2] f[1]-u[1] f[0]-u[0] ] as i32
+
+ // Multiply each basis vector by the corresponding coefficient
+ const __m128i v0l = _mm_mullo_epi32(flt0l_subu, xq0);
+ const __m128i v0h = _mm_mullo_epi32(flt0h_subu, xq0);
+ const __m128i v1l = _mm_mullo_epi32(flt1l_subu, xq1);
+ const __m128i v1h = _mm_mullo_epi32(flt1h_subu, xq1);
+
+ // Add together the contribution from each scaled basis vector
+ const __m128i vl = _mm_add_epi32(v0l, v1l);
+ const __m128i vh = _mm_add_epi32(v0h, v1h);
+
+ // Right-shift v with appropriate rounding
+ const __m128i vrl = _mm_srai_epi32(_mm_add_epi32(vl, rounding), shift);
+ const __m128i vrh = _mm_srai_epi32(_mm_add_epi32(vh, rounding), shift);
+
+ // Saturate each i32 value to i16 and combine lower and upper halves
+ const __m128i vr = _mm_packs_epi32(vrl, vrh);
+
+ // Add twin-subspace-sgr-filter to corrupt image then subtract source
+ const __m128i e0 = _mm_sub_epi16(_mm_add_epi16(vr, d0), s0);
+
+ // Calculate squared error and add adjacent values
+ const __m128i err0 = _mm_madd_epi16(e0, e0);
+
+ sum32 = _mm_add_epi32(sum32, err0);
+ }
+
+ const __m128i sum32l = _mm_cvtepu32_epi64(sum32);
+ sum64 = _mm_add_epi64(sum64, sum32l);
+ const __m128i sum32h = _mm_cvtepu32_epi64(_mm_srli_si128(sum32, 8));
+ sum64 = _mm_add_epi64(sum64, sum32h);
+
+ // Process remaining pixels in this row (modulo 8)
+ for (k = j; k < width; ++k) {
+ const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
+ int32_t v = xq[0] * (flt0[k] - u) + xq[1] * (flt1[k] - u);
+ const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ flt0 += flt0_stride;
+ flt1 += flt1_stride;
+ }
+ } else if (params->r[0] > 0 || params->r[1] > 0) { // Only one filter enabled
+ const int32_t xq_on = (params->r[0] > 0) ? xq[0] : xq[1];
+ const __m128i xq_active = _mm_set1_epi32(xq_on);
+ const __m128i xq_inactive =
+ _mm_set1_epi32(-xq_on * (1 << SGRPROJ_RST_BITS));
+ const int32_t *flt = (params->r[0] > 0) ? flt0 : flt1;
+ const int flt_stride = (params->r[0] > 0) ? flt0_stride : flt1_stride;
+ for (i = 0; i < height; ++i) {
+ __m128i sum32 = _mm_setzero_si128();
+ for (j = 0; j <= width - 8; j += 8) {
+ // Load 8x pixels from source image
+ const __m128i s0 = xx_loadu_128(src + j);
+ // s0 = [7 6 5 4 3 2 1 0] as u16 (indices of src[])
+
+ // Load 8x pixels from corrupted image and pad each u16 to i32
+ const __m128i d0 = xx_loadu_128(dat + j);
+ const __m128i d0h = _mm_cvtepu16_epi32(_mm_srli_si128(d0, 8));
+ const __m128i d0l = _mm_cvtepu16_epi32(d0);
+ // d0h, d0l = [7 6 5 4], [3 2 1 0] as u32 (indices of dat[])
+
+ // Load 8 pixels from the filtered image
+ const __m128i flth = xx_loadu_128(flt + j + 4);
+ const __m128i fltl = xx_loadu_128(flt + j);
+ // flth, fltl = [7 6 5 4], [3 2 1 0] as i32 (indices of flt+j)
+
+ const __m128i flth_xq = _mm_mullo_epi32(flth, xq_active);
+ const __m128i fltl_xq = _mm_mullo_epi32(fltl, xq_active);
+ const __m128i d0h_xq = _mm_mullo_epi32(d0h, xq_inactive);
+ const __m128i d0l_xq = _mm_mullo_epi32(d0l, xq_inactive);
+
+ const __m128i vh = _mm_add_epi32(flth_xq, d0h_xq);
+ const __m128i vl = _mm_add_epi32(fltl_xq, d0l_xq);
+ // vh = [ xq0(f[7]-d[7]) xq0(f[6]-d[6]) xq0(f[5]-d[5]) xq0(f[4]-d[4]) ]
+ // vl = [ xq0(f[3]-d[3]) xq0(f[2]-d[2]) xq0(f[1]-d[1]) xq0(f[0]-d[0]) ]
+
+ // Shift this down with appropriate rounding
+ const __m128i vrh = _mm_srai_epi32(_mm_add_epi32(vh, rounding), shift);
+ const __m128i vrl = _mm_srai_epi32(_mm_add_epi32(vl, rounding), shift);
+
+ // Saturate vr0 and vr1 from i32 to i16 then pack together
+ const __m128i vr = _mm_packs_epi32(vrl, vrh);
+
+ // Subtract twin-subspace-sgr filtered from source image to get error
+ const __m128i e0 = _mm_sub_epi16(_mm_add_epi16(vr, d0), s0);
+
+ // Calculate squared error and add adjacent values
+ const __m128i err0 = _mm_madd_epi16(e0, e0);
+
+ sum32 = _mm_add_epi32(sum32, err0);
+ }
+
+ const __m128i sum32l = _mm_cvtepu32_epi64(sum32);
+ sum64 = _mm_add_epi64(sum64, sum32l);
+ const __m128i sum32h = _mm_cvtepu32_epi64(_mm_srli_si128(sum32, 8));
+ sum64 = _mm_add_epi64(sum64, sum32h);
+
+ // Process remaining pixels in this row (modulo 8)
+ for (k = j; k < width; ++k) {
+ const int32_t u = (int32_t)(dat[k] << SGRPROJ_RST_BITS);
+ int32_t v = xq_on * (flt[k] - u);
+ const int32_t e = ROUND_POWER_OF_TWO(v, shift) + dat[k] - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ flt += flt_stride;
+ }
+ } else { // Neither filter is enabled
+ for (i = 0; i < height; ++i) {
+ __m128i sum32 = _mm_setzero_si128();
+ for (j = 0; j <= width - 16; j += 16) {
+ // Load 2x8 u16 from source image
+ const __m128i s0 = xx_loadu_128(src + j);
+ const __m128i s1 = xx_loadu_128(src + j + 8);
+ // Load 2x8 u16 from corrupted image
+ const __m128i d0 = xx_loadu_128(dat + j);
+ const __m128i d1 = xx_loadu_128(dat + j + 8);
+
+ // Subtract corrupted image from source image
+ const __m128i diff0 = _mm_sub_epi16(d0, s0);
+ const __m128i diff1 = _mm_sub_epi16(d1, s1);
+
+ // Square error and add adjacent values
+ const __m128i err0 = _mm_madd_epi16(diff0, diff0);
+ const __m128i err1 = _mm_madd_epi16(diff1, diff1);
+
+ sum32 = _mm_add_epi32(sum32, err0);
+ sum32 = _mm_add_epi32(sum32, err1);
+ }
+
+ const __m128i sum32l = _mm_cvtepu32_epi64(sum32);
+ sum64 = _mm_add_epi64(sum64, sum32l);
+ const __m128i sum32h = _mm_cvtepu32_epi64(_mm_srli_si128(sum32, 8));
+ sum64 = _mm_add_epi64(sum64, sum32h);
+
+ // Process remaining pixels (modulu 8)
+ for (k = j; k < width; ++k) {
+ const int32_t e = (int32_t)(dat[k]) - src[k];
+ err += ((int64_t)e * e);
+ }
+ dat += dat_stride;
+ src += src_stride;
+ }
+ }
+
+ // Sum 4 values from sum64l and sum64h into err
+ int64_t sum[2];
+ xx_storeu_128(sum, sum64);
+ err += sum[0] + sum[1];
+ return err;
+}
+#endif // CONFIG_AV1_HIGHBITDEPTH
diff --git a/third_party/aom/av1/encoder/x86/rdopt_avx2.c b/third_party/aom/av1/encoder/x86/rdopt_avx2.c
new file mode 100644
index 0000000000..a0ab3940c0
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/rdopt_avx2.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <immintrin.h>
+#include "aom_dsp/x86/mem_sse2.h"
+#include "aom_dsp/x86/synonyms_avx2.h"
+
+#include "config/av1_rtcd.h"
+#include "av1/encoder/rdopt.h"
+
+// Process horizontal and vertical correlations in a 4x4 block of pixels.
+// We actually use the 4x4 pixels to calculate correlations corresponding to
+// the top-left 3x3 pixels, so this function must be called with 1x1 overlap,
+// moving the window along/down by 3 pixels at a time.
+INLINE static void horver_correlation_4x4(const int16_t *diff, int stride,
+ __m256i *xy_sum_32,
+ __m256i *xz_sum_32, __m256i *x_sum_32,
+ __m256i *x2_sum_32) {
+ // Pixels in this 4x4 [ a b c d ]
+ // are referred to as: [ e f g h ]
+ // [ i j k l ]
+ // [ m n o p ]
+
+ const __m256i pixels = _mm256_set_epi64x(
+ loadu_int64(&diff[0 * stride]), loadu_int64(&diff[1 * stride]),
+ loadu_int64(&diff[2 * stride]), loadu_int64(&diff[3 * stride]));
+ // pixels = [d c b a h g f e] [l k j i p o n m] as i16
+
+ const __m256i slli = _mm256_slli_epi64(pixels, 16);
+ // slli = [c b a 0 g f e 0] [k j i 0 o n m 0] as i16
+
+ const __m256i madd_xy = _mm256_madd_epi16(pixels, slli);
+ // madd_xy = [bc+cd ab fg+gh ef] [jk+kl ij no+op mn] as i32
+ *xy_sum_32 = _mm256_add_epi32(*xy_sum_32, madd_xy);
+
+ // Permute control [3 2] [1 0] => [2 1] [0 0], 0b10010000 = 0x90
+ const __m256i perm = _mm256_permute4x64_epi64(slli, 0x90);
+ // perm = [g f e 0 k j i 0] [o n m 0 o n m 0] as i16
+
+ const __m256i madd_xz = _mm256_madd_epi16(slli, perm);
+ // madd_xz = [cg+bf ae gk+fj ei] [ko+jn im oo+nn mm] as i32
+ *xz_sum_32 = _mm256_add_epi32(*xz_sum_32, madd_xz);
+
+ // Sum every element in slli (and then also their squares)
+ const __m256i madd1_slli = _mm256_madd_epi16(slli, _mm256_set1_epi16(1));
+ // madd1_slli = [c+b a g+f e] [k+j i o+n m] as i32
+ *x_sum_32 = _mm256_add_epi32(*x_sum_32, madd1_slli);
+
+ const __m256i madd_slli = _mm256_madd_epi16(slli, slli);
+ // madd_slli = [cc+bb aa gg+ff ee] [kk+jj ii oo+nn mm] as i32
+ *x2_sum_32 = _mm256_add_epi32(*x2_sum_32, madd_slli);
+}
+
+void av1_get_horver_correlation_full_avx2(const int16_t *diff, int stride,
+ int width, int height, float *hcorr,
+ float *vcorr) {
+ // The following notation is used:
+ // x - current pixel
+ // y - right neighbour pixel
+ // z - below neighbour pixel
+ // w - down-right neighbour pixel
+ int64_t xy_sum = 0, xz_sum = 0;
+ int64_t x_sum = 0, x2_sum = 0;
+
+ // Process horizontal and vertical correlations through the body in 4x4
+ // blocks. This excludes the final row and column and possibly one extra
+ // column depending how 3 divides into width and height
+ int32_t xy_xz_tmp[8] = { 0 }, x_x2_tmp[8] = { 0 };
+ __m256i xy_sum_32 = _mm256_setzero_si256();
+ __m256i xz_sum_32 = _mm256_setzero_si256();
+ __m256i x_sum_32 = _mm256_setzero_si256();
+ __m256i x2_sum_32 = _mm256_setzero_si256();
+ for (int i = 0; i <= height - 4; i += 3) {
+ for (int j = 0; j <= width - 4; j += 3) {
+ horver_correlation_4x4(&diff[i * stride + j], stride, &xy_sum_32,
+ &xz_sum_32, &x_sum_32, &x2_sum_32);
+ }
+ const __m256i hadd_xy_xz = _mm256_hadd_epi32(xy_sum_32, xz_sum_32);
+ // hadd_xy_xz = [ae+bf+cg ei+fj+gk ab+bc+cd ef+fg+gh]
+ // [im+jn+ko mm+nn+oo ij+jk+kl mn+no+op] as i32
+ yy_storeu_256(xy_xz_tmp, hadd_xy_xz);
+ xy_sum += (int64_t)xy_xz_tmp[5] + xy_xz_tmp[4] + xy_xz_tmp[1];
+ xz_sum += (int64_t)xy_xz_tmp[7] + xy_xz_tmp[6] + xy_xz_tmp[3];
+
+ const __m256i hadd_x_x2 = _mm256_hadd_epi32(x_sum_32, x2_sum_32);
+ // hadd_x_x2 = [aa+bb+cc ee+ff+gg a+b+c e+f+g]
+ // [ii+jj+kk mm+nn+oo i+j+k m+n+o] as i32
+ yy_storeu_256(x_x2_tmp, hadd_x_x2);
+ x_sum += (int64_t)x_x2_tmp[5] + x_x2_tmp[4] + x_x2_tmp[1];
+ x2_sum += (int64_t)x_x2_tmp[7] + x_x2_tmp[6] + x_x2_tmp[3];
+
+ xy_sum_32 = _mm256_setzero_si256();
+ xz_sum_32 = _mm256_setzero_si256();
+ x_sum_32 = _mm256_setzero_si256();
+ x2_sum_32 = _mm256_setzero_si256();
+ }
+
+ // x_sum now covers every pixel except the final 1-2 rows and 1-2 cols
+ int64_t x_finalrow = 0, x_finalcol = 0, x2_finalrow = 0, x2_finalcol = 0;
+
+ // Do we have 2 rows remaining or just the one? Note that width and height
+ // are powers of 2, so each modulo 3 must be 1 or 2.
+ if (height % 3 == 1) { // Just horiz corrs on the final row
+ const int16_t x0 = diff[(height - 1) * stride];
+ x_sum += x0;
+ x_finalrow += x0;
+ x2_sum += x0 * x0;
+ x2_finalrow += x0 * x0;
+ for (int j = 0; j < width - 1; ++j) {
+ const int16_t x = diff[(height - 1) * stride + j];
+ const int16_t y = diff[(height - 1) * stride + j + 1];
+ xy_sum += x * y;
+ x_sum += y;
+ x2_sum += y * y;
+ x_finalrow += y;
+ x2_finalrow += y * y;
+ }
+ } else { // Two rows remaining to do
+ const int16_t x0 = diff[(height - 2) * stride];
+ const int16_t z0 = diff[(height - 1) * stride];
+ x_sum += x0 + z0;
+ x2_sum += x0 * x0 + z0 * z0;
+ x_finalrow += z0;
+ x2_finalrow += z0 * z0;
+ for (int j = 0; j < width - 1; ++j) {
+ const int16_t x = diff[(height - 2) * stride + j];
+ const int16_t y = diff[(height - 2) * stride + j + 1];
+ const int16_t z = diff[(height - 1) * stride + j];
+ const int16_t w = diff[(height - 1) * stride + j + 1];
+
+ // Horizontal and vertical correlations for the penultimate row:
+ xy_sum += x * y;
+ xz_sum += x * z;
+
+ // Now just horizontal correlations for the final row:
+ xy_sum += z * w;
+
+ x_sum += y + w;
+ x2_sum += y * y + w * w;
+ x_finalrow += w;
+ x2_finalrow += w * w;
+ }
+ }
+
+ // Do we have 2 columns remaining or just the one?
+ if (width % 3 == 1) { // Just vert corrs on the final col
+ const int16_t x0 = diff[width - 1];
+ x_sum += x0;
+ x_finalcol += x0;
+ x2_sum += x0 * x0;
+ x2_finalcol += x0 * x0;
+ for (int i = 0; i < height - 1; ++i) {
+ const int16_t x = diff[i * stride + width - 1];
+ const int16_t z = diff[(i + 1) * stride + width - 1];
+ xz_sum += x * z;
+ x_finalcol += z;
+ x2_finalcol += z * z;
+ // So the bottom-right elements don't get counted twice:
+ if (i < height - (height % 3 == 1 ? 2 : 3)) {
+ x_sum += z;
+ x2_sum += z * z;
+ }
+ }
+ } else { // Two cols remaining
+ const int16_t x0 = diff[width - 2];
+ const int16_t y0 = diff[width - 1];
+ x_sum += x0 + y0;
+ x2_sum += x0 * x0 + y0 * y0;
+ x_finalcol += y0;
+ x2_finalcol += y0 * y0;
+ for (int i = 0; i < height - 1; ++i) {
+ const int16_t x = diff[i * stride + width - 2];
+ const int16_t y = diff[i * stride + width - 1];
+ const int16_t z = diff[(i + 1) * stride + width - 2];
+ const int16_t w = diff[(i + 1) * stride + width - 1];
+
+ // Horizontal and vertical correlations for the penultimate col:
+ // Skip these on the last iteration of this loop if we also had two
+ // rows remaining, otherwise the final horizontal and vertical correlation
+ // get erroneously processed twice
+ if (i < height - 2 || height % 3 == 1) {
+ xy_sum += x * y;
+ xz_sum += x * z;
+ }
+
+ x_finalcol += w;
+ x2_finalcol += w * w;
+ // So the bottom-right elements don't get counted twice:
+ if (i < height - (height % 3 == 1 ? 2 : 3)) {
+ x_sum += z + w;
+ x2_sum += z * z + w * w;
+ }
+
+ // Now just vertical correlations for the final column:
+ xz_sum += y * w;
+ }
+ }
+
+ // Calculate the simple sums and squared-sums
+ int64_t x_firstrow = 0, x_firstcol = 0;
+ int64_t x2_firstrow = 0, x2_firstcol = 0;
+
+ for (int j = 0; j < width; ++j) {
+ x_firstrow += diff[j];
+ x2_firstrow += diff[j] * diff[j];
+ }
+ for (int i = 0; i < height; ++i) {
+ x_firstcol += diff[i * stride];
+ x2_firstcol += diff[i * stride] * diff[i * stride];
+ }
+
+ int64_t xhor_sum = x_sum - x_finalcol;
+ int64_t xver_sum = x_sum - x_finalrow;
+ int64_t y_sum = x_sum - x_firstcol;
+ int64_t z_sum = x_sum - x_firstrow;
+ int64_t x2hor_sum = x2_sum - x2_finalcol;
+ int64_t x2ver_sum = x2_sum - x2_finalrow;
+ int64_t y2_sum = x2_sum - x2_firstcol;
+ int64_t z2_sum = x2_sum - x2_firstrow;
+
+ const float num_hor = (float)(height * (width - 1));
+ const float num_ver = (float)((height - 1) * width);
+
+ const float xhor_var_n = x2hor_sum - (xhor_sum * xhor_sum) / num_hor;
+ const float xver_var_n = x2ver_sum - (xver_sum * xver_sum) / num_ver;
+
+ const float y_var_n = y2_sum - (y_sum * y_sum) / num_hor;
+ const float z_var_n = z2_sum - (z_sum * z_sum) / num_ver;
+
+ const float xy_var_n = xy_sum - (xhor_sum * y_sum) / num_hor;
+ const float xz_var_n = xz_sum - (xver_sum * z_sum) / num_ver;
+
+ if (xhor_var_n > 0 && y_var_n > 0) {
+ *hcorr = xy_var_n / sqrtf(xhor_var_n * y_var_n);
+ *hcorr = *hcorr < 0 ? 0 : *hcorr;
+ } else {
+ *hcorr = 1.0;
+ }
+ if (xver_var_n > 0 && z_var_n > 0) {
+ *vcorr = xz_var_n / sqrtf(xver_var_n * z_var_n);
+ *vcorr = *vcorr < 0 ? 0 : *vcorr;
+ } else {
+ *vcorr = 1.0;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/rdopt_sse4.c b/third_party/aom/av1/encoder/x86/rdopt_sse4.c
new file mode 100644
index 0000000000..12ac146195
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/rdopt_sse4.c
@@ -0,0 +1,272 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h>
+#include "aom_dsp/x86/synonyms.h"
+
+#include "config/av1_rtcd.h"
+#include "av1/encoder/rdopt.h"
+
+// Process horizontal and vertical correlations in a 4x4 block of pixels.
+// We actually use the 4x4 pixels to calculate correlations corresponding to
+// the top-left 3x3 pixels, so this function must be called with 1x1 overlap,
+// moving the window along/down by 3 pixels at a time.
+INLINE static void horver_correlation_4x4(const int16_t *diff, int stride,
+ __m128i *xy_sum_32,
+ __m128i *xz_sum_32, __m128i *x_sum_32,
+ __m128i *x2_sum_32) {
+ // Pixels in this 4x4 [ a b c d ]
+ // are referred to as: [ e f g h ]
+ // [ i j k l ]
+ // [ m n o p ]
+
+ const __m128i pixelsa = _mm_set_epi64x(*(int64_t *)&diff[0 * stride],
+ *(int64_t *)&diff[2 * stride]);
+ const __m128i pixelsb = _mm_set_epi64x(*(int64_t *)&diff[1 * stride],
+ *(int64_t *)&diff[3 * stride]);
+ // pixelsa = [d c b a l k j i] as i16
+ // pixelsb = [h g f e p o n m] as i16
+
+ const __m128i slli_a = _mm_slli_epi64(pixelsa, 16);
+ const __m128i slli_b = _mm_slli_epi64(pixelsb, 16);
+ // slli_a = [c b a 0 k j i 0] as i16
+ // slli_b = [g f e 0 o n m 0] as i16
+
+ const __m128i xy_madd_a = _mm_madd_epi16(pixelsa, slli_a);
+ const __m128i xy_madd_b = _mm_madd_epi16(pixelsb, slli_b);
+ // xy_madd_a = [bc+cd ab jk+kl ij] as i32
+ // xy_madd_b = [fg+gh ef no+op mn] as i32
+
+ const __m128i xy32 = _mm_hadd_epi32(xy_madd_b, xy_madd_a);
+ // xy32 = [ab+bc+cd ij+jk+kl ef+fg+gh mn+no+op] as i32
+ *xy_sum_32 = _mm_add_epi32(*xy_sum_32, xy32);
+
+ const __m128i xz_madd_a = _mm_madd_epi16(slli_a, slli_b);
+ // xz_madd_a = [bf+cg ae jn+ko im] i32
+
+ const __m128i swap_b = _mm_srli_si128(slli_b, 8);
+ // swap_b = [0 0 0 0 g f e 0] as i16
+ const __m128i xz_madd_b = _mm_madd_epi16(slli_a, swap_b);
+ // xz_madd_b = [0 0 gk+fj ei] i32
+
+ const __m128i xz32 = _mm_hadd_epi32(xz_madd_b, xz_madd_a);
+ // xz32 = [ae+bf+cg im+jn+ko 0 ei+fj+gk] i32
+ *xz_sum_32 = _mm_add_epi32(*xz_sum_32, xz32);
+
+ // Now calculate the straight sums, x_sum += a+b+c+e+f+g+i+j+k
+ // (sum up every element in slli_a and swap_b)
+ const __m128i sum_slli_a = _mm_hadd_epi16(slli_a, slli_a);
+ const __m128i sum_slli_a32 = _mm_cvtepi16_epi32(sum_slli_a);
+ // sum_slli_a32 = [c+b a k+j i] as i32
+ const __m128i swap_b32 = _mm_cvtepi16_epi32(swap_b);
+ // swap_b32 = [g f e 0] as i32
+ *x_sum_32 = _mm_add_epi32(*x_sum_32, sum_slli_a32);
+ *x_sum_32 = _mm_add_epi32(*x_sum_32, swap_b32);
+ // sum = [c+b+g a+f k+j+e i] as i32
+
+ // Also sum their squares
+ const __m128i slli_a_2 = _mm_madd_epi16(slli_a, slli_a);
+ const __m128i swap_b_2 = _mm_madd_epi16(swap_b, swap_b);
+ // slli_a_2 = [c2+b2 a2 k2+j2 i2]
+ // swap_b_2 = [0 0 g2+f2 e2]
+ const __m128i sum2 = _mm_hadd_epi32(slli_a_2, swap_b_2);
+ // sum2 = [0 g2+f2+e2 c2+b2+a2 k2+j2+i2]
+ *x2_sum_32 = _mm_add_epi32(*x2_sum_32, sum2);
+}
+
+void av1_get_horver_correlation_full_sse4_1(const int16_t *diff, int stride,
+ int width, int height, float *hcorr,
+ float *vcorr) {
+ // The following notation is used:
+ // x - current pixel
+ // y - right neighbour pixel
+ // z - below neighbour pixel
+ // w - down-right neighbour pixel
+ int64_t xy_sum = 0, xz_sum = 0;
+ int64_t x_sum = 0, x2_sum = 0;
+
+ // Process horizontal and vertical correlations through the body in 4x4
+ // blocks. This excludes the final row and column and possibly one extra
+ // column depending how 3 divides into width and height
+ int32_t xy_tmp[4] = { 0 }, xz_tmp[4] = { 0 };
+ int32_t x_tmp[4] = { 0 }, x2_tmp[4] = { 0 };
+ __m128i xy_sum_32 = _mm_setzero_si128();
+ __m128i xz_sum_32 = _mm_setzero_si128();
+ __m128i x_sum_32 = _mm_setzero_si128();
+ __m128i x2_sum_32 = _mm_setzero_si128();
+ for (int i = 0; i <= height - 4; i += 3) {
+ for (int j = 0; j <= width - 4; j += 3) {
+ horver_correlation_4x4(&diff[i * stride + j], stride, &xy_sum_32,
+ &xz_sum_32, &x_sum_32, &x2_sum_32);
+ }
+ xx_storeu_128(xy_tmp, xy_sum_32);
+ xx_storeu_128(xz_tmp, xz_sum_32);
+ xx_storeu_128(x_tmp, x_sum_32);
+ xx_storeu_128(x2_tmp, x2_sum_32);
+ xy_sum += (int64_t)xy_tmp[3] + xy_tmp[2] + xy_tmp[1];
+ xz_sum += (int64_t)xz_tmp[3] + xz_tmp[2] + xz_tmp[0];
+ x_sum += (int64_t)x_tmp[3] + x_tmp[2] + x_tmp[1] + x_tmp[0];
+ x2_sum += (int64_t)x2_tmp[2] + x2_tmp[1] + x2_tmp[0];
+ xy_sum_32 = _mm_setzero_si128();
+ xz_sum_32 = _mm_setzero_si128();
+ x_sum_32 = _mm_setzero_si128();
+ x2_sum_32 = _mm_setzero_si128();
+ }
+
+ // x_sum now covers every pixel except the final 1-2 rows and 1-2 cols
+ int64_t x_finalrow = 0, x_finalcol = 0, x2_finalrow = 0, x2_finalcol = 0;
+
+ // Do we have 2 rows remaining or just the one? Note that width and height
+ // are powers of 2, so each modulo 3 must be 1 or 2.
+ if (height % 3 == 1) { // Just horiz corrs on the final row
+ const int16_t x0 = diff[(height - 1) * stride];
+ x_sum += x0;
+ x_finalrow += x0;
+ x2_sum += x0 * x0;
+ x2_finalrow += x0 * x0;
+ for (int j = 0; j < width - 1; ++j) {
+ const int16_t x = diff[(height - 1) * stride + j];
+ const int16_t y = diff[(height - 1) * stride + j + 1];
+ xy_sum += x * y;
+ x_sum += y;
+ x2_sum += y * y;
+ x_finalrow += y;
+ x2_finalrow += y * y;
+ }
+ } else { // Two rows remaining to do
+ const int16_t x0 = diff[(height - 2) * stride];
+ const int16_t z0 = diff[(height - 1) * stride];
+ x_sum += x0 + z0;
+ x2_sum += x0 * x0 + z0 * z0;
+ x_finalrow += z0;
+ x2_finalrow += z0 * z0;
+ for (int j = 0; j < width - 1; ++j) {
+ const int16_t x = diff[(height - 2) * stride + j];
+ const int16_t y = diff[(height - 2) * stride + j + 1];
+ const int16_t z = diff[(height - 1) * stride + j];
+ const int16_t w = diff[(height - 1) * stride + j + 1];
+
+ // Horizontal and vertical correlations for the penultimate row:
+ xy_sum += x * y;
+ xz_sum += x * z;
+
+ // Now just horizontal correlations for the final row:
+ xy_sum += z * w;
+
+ x_sum += y + w;
+ x2_sum += y * y + w * w;
+ x_finalrow += w;
+ x2_finalrow += w * w;
+ }
+ }
+
+ // Do we have 2 columns remaining or just the one?
+ if (width % 3 == 1) { // Just vert corrs on the final col
+ const int16_t x0 = diff[width - 1];
+ x_sum += x0;
+ x_finalcol += x0;
+ x2_sum += x0 * x0;
+ x2_finalcol += x0 * x0;
+ for (int i = 0; i < height - 1; ++i) {
+ const int16_t x = diff[i * stride + width - 1];
+ const int16_t z = diff[(i + 1) * stride + width - 1];
+ xz_sum += x * z;
+ x_finalcol += z;
+ x2_finalcol += z * z;
+ // So the bottom-right elements don't get counted twice:
+ if (i < height - (height % 3 == 1 ? 2 : 3)) {
+ x_sum += z;
+ x2_sum += z * z;
+ }
+ }
+ } else { // Two cols remaining
+ const int16_t x0 = diff[width - 2];
+ const int16_t y0 = diff[width - 1];
+ x_sum += x0 + y0;
+ x2_sum += x0 * x0 + y0 * y0;
+ x_finalcol += y0;
+ x2_finalcol += y0 * y0;
+ for (int i = 0; i < height - 1; ++i) {
+ const int16_t x = diff[i * stride + width - 2];
+ const int16_t y = diff[i * stride + width - 1];
+ const int16_t z = diff[(i + 1) * stride + width - 2];
+ const int16_t w = diff[(i + 1) * stride + width - 1];
+
+ // Horizontal and vertical correlations for the penultimate col:
+ // Skip these on the last iteration of this loop if we also had two
+ // rows remaining, otherwise the final horizontal and vertical correlation
+ // get erroneously processed twice
+ if (i < height - 2 || height % 3 == 1) {
+ xy_sum += x * y;
+ xz_sum += x * z;
+ }
+
+ x_finalcol += w;
+ x2_finalcol += w * w;
+ // So the bottom-right elements don't get counted twice:
+ if (i < height - (height % 3 == 1 ? 2 : 3)) {
+ x_sum += z + w;
+ x2_sum += z * z + w * w;
+ }
+
+ // Now just vertical correlations for the final column:
+ xz_sum += y * w;
+ }
+ }
+
+ // Calculate the simple sums and squared-sums
+ int64_t x_firstrow = 0, x_firstcol = 0;
+ int64_t x2_firstrow = 0, x2_firstcol = 0;
+
+ for (int j = 0; j < width; ++j) {
+ x_firstrow += diff[j];
+ x2_firstrow += diff[j] * diff[j];
+ }
+ for (int i = 0; i < height; ++i) {
+ x_firstcol += diff[i * stride];
+ x2_firstcol += diff[i * stride] * diff[i * stride];
+ }
+
+ int64_t xhor_sum = x_sum - x_finalcol;
+ int64_t xver_sum = x_sum - x_finalrow;
+ int64_t y_sum = x_sum - x_firstcol;
+ int64_t z_sum = x_sum - x_firstrow;
+ int64_t x2hor_sum = x2_sum - x2_finalcol;
+ int64_t x2ver_sum = x2_sum - x2_finalrow;
+ int64_t y2_sum = x2_sum - x2_firstcol;
+ int64_t z2_sum = x2_sum - x2_firstrow;
+
+ const float num_hor = (float)(height * (width - 1));
+ const float num_ver = (float)((height - 1) * width);
+
+ const float xhor_var_n = x2hor_sum - (xhor_sum * xhor_sum) / num_hor;
+ const float xver_var_n = x2ver_sum - (xver_sum * xver_sum) / num_ver;
+
+ const float y_var_n = y2_sum - (y_sum * y_sum) / num_hor;
+ const float z_var_n = z2_sum - (z_sum * z_sum) / num_ver;
+
+ const float xy_var_n = xy_sum - (xhor_sum * y_sum) / num_hor;
+ const float xz_var_n = xz_sum - (xver_sum * z_sum) / num_ver;
+
+ if (xhor_var_n > 0 && y_var_n > 0) {
+ *hcorr = xy_var_n / sqrtf(xhor_var_n * y_var_n);
+ *hcorr = *hcorr < 0 ? 0 : *hcorr;
+ } else {
+ *hcorr = 1.0;
+ }
+ if (xver_var_n > 0 && z_var_n > 0) {
+ *vcorr = xz_var_n / sqrtf(xver_var_n * z_var_n);
+ *vcorr = *vcorr < 0 ? 0 : *vcorr;
+ } else {
+ *vcorr = 1.0;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/reconinter_enc_sse2.c b/third_party/aom/av1/encoder/x86/reconinter_enc_sse2.c
new file mode 100644
index 0000000000..a492483721
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/reconinter_enc_sse2.c
@@ -0,0 +1,347 @@
+/*
+ * Copyright (c) 2021, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+
+#include "config/aom_config.h"
+#include "config/aom_dsp_rtcd.h"
+#include "config/aom_scale_rtcd.h"
+
+#include "aom/aom_integer.h"
+#include "aom_dsp/blend.h"
+#include "aom_dsp/x86/mem_sse2.h"
+#include "aom_dsp/x86/synonyms.h"
+
+#include "av1/common/av1_common_int.h"
+#include "av1/common/blockd.h"
+#include "av1/common/mvref_common.h"
+#include "av1/common/obmc.h"
+#include "av1/common/reconinter.h"
+#include "av1/common/reconintra.h"
+#include "av1/encoder/reconinter_enc.h"
+
+void aom_upsampled_pred_sse2(MACROBLOCKD *xd, const struct AV1Common *const cm,
+ int mi_row, int mi_col, const MV *const mv,
+ uint8_t *comp_pred, int width, int height,
+ int subpel_x_q3, int subpel_y_q3,
+ const uint8_t *ref, int ref_stride,
+ int subpel_search) {
+ // expect xd == NULL only in tests
+ if (xd != NULL) {
+ const MB_MODE_INFO *mi = xd->mi[0];
+ const int ref_num = 0;
+ const int is_intrabc = is_intrabc_block(mi);
+ const struct scale_factors *const sf =
+ is_intrabc ? &cm->sf_identity : xd->block_ref_scale_factors[ref_num];
+ const int is_scaled = av1_is_scaled(sf);
+
+ if (is_scaled) {
+ int plane = 0;
+ const int mi_x = mi_col * MI_SIZE;
+ const int mi_y = mi_row * MI_SIZE;
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const struct buf_2d *const dst_buf = &pd->dst;
+ const struct buf_2d *const pre_buf =
+ is_intrabc ? dst_buf : &pd->pre[ref_num];
+
+ InterPredParams inter_pred_params;
+ inter_pred_params.conv_params = get_conv_params(0, plane, xd->bd);
+ const int_interpfilters filters =
+ av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
+ av1_init_inter_params(
+ &inter_pred_params, width, height, mi_y >> pd->subsampling_y,
+ mi_x >> pd->subsampling_x, pd->subsampling_x, pd->subsampling_y,
+ xd->bd, is_cur_buf_hbd(xd), is_intrabc, sf, pre_buf, filters);
+ av1_enc_build_one_inter_predictor(comp_pred, width, mv,
+ &inter_pred_params);
+ return;
+ }
+ }
+
+ const InterpFilterParams *filter = av1_get_filter(subpel_search);
+ // (TODO:yunqing) 2-tap case uses 4-tap functions since there is no SIMD for
+ // 2-tap yet.
+ int filter_taps = (subpel_search <= USE_4_TAPS) ? 4 : SUBPEL_TAPS;
+
+ if (!subpel_x_q3 && !subpel_y_q3) {
+ if (width >= 16) {
+ int i;
+ assert(!(width & 15));
+ /*Read 16 pixels one row at a time.*/
+ for (i = 0; i < height; i++) {
+ int j;
+ for (j = 0; j < width; j += 16) {
+ xx_storeu_128(comp_pred, xx_loadu_128(ref));
+ comp_pred += 16;
+ ref += 16;
+ }
+ ref += ref_stride - width;
+ }
+ } else if (width >= 8) {
+ int i;
+ assert(!(width & 7));
+ assert(!(height & 1));
+ /*Read 8 pixels two rows at a time.*/
+ for (i = 0; i < height; i += 2) {
+ __m128i s0 = xx_loadl_64(ref + 0 * ref_stride);
+ __m128i s1 = xx_loadl_64(ref + 1 * ref_stride);
+ xx_storeu_128(comp_pred, _mm_unpacklo_epi64(s0, s1));
+ comp_pred += 16;
+ ref += 2 * ref_stride;
+ }
+ } else {
+ int i;
+ assert(!(width & 3));
+ assert(!(height & 3));
+ /*Read 4 pixels four rows at a time.*/
+ for (i = 0; i < height; i++) {
+ const __m128i row0 = xx_loadl_64(ref + 0 * ref_stride);
+ const __m128i row1 = xx_loadl_64(ref + 1 * ref_stride);
+ const __m128i row2 = xx_loadl_64(ref + 2 * ref_stride);
+ const __m128i row3 = xx_loadl_64(ref + 3 * ref_stride);
+ const __m128i reg = _mm_unpacklo_epi64(_mm_unpacklo_epi32(row0, row1),
+ _mm_unpacklo_epi32(row2, row3));
+ xx_storeu_128(comp_pred, reg);
+ comp_pred += 16;
+ ref += 4 * ref_stride;
+ }
+ }
+ } else if (!subpel_y_q3) {
+ const int16_t *const kernel =
+ av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ aom_convolve8_horiz(ref, ref_stride, comp_pred, width, kernel, 16, NULL, -1,
+ width, height);
+ } else if (!subpel_x_q3) {
+ const int16_t *const kernel =
+ av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ aom_convolve8_vert(ref, ref_stride, comp_pred, width, NULL, -1, kernel, 16,
+ width, height);
+ } else {
+ DECLARE_ALIGNED(16, uint8_t,
+ temp[((MAX_SB_SIZE * 2 + 16) + 16) * MAX_SB_SIZE]);
+ const int16_t *const kernel_x =
+ av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ const int16_t *const kernel_y =
+ av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ const uint8_t *ref_start = ref - ref_stride * ((filter_taps >> 1) - 1);
+ uint8_t *temp_start_horiz = (subpel_search <= USE_4_TAPS)
+ ? temp + (filter_taps >> 1) * MAX_SB_SIZE
+ : temp;
+ uint8_t *temp_start_vert = temp + MAX_SB_SIZE * ((filter->taps >> 1) - 1);
+ int intermediate_height =
+ (((height - 1) * 8 + subpel_y_q3) >> 3) + filter_taps;
+ assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
+ aom_convolve8_horiz(ref_start, ref_stride, temp_start_horiz, MAX_SB_SIZE,
+ kernel_x, 16, NULL, -1, width, intermediate_height);
+ aom_convolve8_vert(temp_start_vert, MAX_SB_SIZE, comp_pred, width, NULL, -1,
+ kernel_y, 16, width, height);
+ }
+}
+
+#if CONFIG_AV1_HIGHBITDEPTH
+static INLINE void highbd_compute_dist_wtd_comp_avg(__m128i *p0, __m128i *p1,
+ const __m128i *w0,
+ const __m128i *w1,
+ const __m128i *r,
+ void *const result) {
+ assert(DIST_PRECISION_BITS <= 4);
+ __m128i mult0 = _mm_mullo_epi16(*p0, *w0);
+ __m128i mult1 = _mm_mullo_epi16(*p1, *w1);
+ __m128i sum = _mm_adds_epu16(mult0, mult1);
+ __m128i round = _mm_adds_epu16(sum, *r);
+ __m128i shift = _mm_srli_epi16(round, DIST_PRECISION_BITS);
+
+ xx_storeu_128(result, shift);
+}
+
+void aom_highbd_upsampled_pred_sse2(MACROBLOCKD *xd,
+ const struct AV1Common *const cm,
+ int mi_row, int mi_col, const MV *const mv,
+ uint8_t *comp_pred8, int width, int height,
+ int subpel_x_q3, int subpel_y_q3,
+ const uint8_t *ref8, int ref_stride, int bd,
+ int subpel_search) {
+ // expect xd == NULL only in tests
+ if (xd != NULL) {
+ const MB_MODE_INFO *mi = xd->mi[0];
+ const int ref_num = 0;
+ const int is_intrabc = is_intrabc_block(mi);
+ const struct scale_factors *const sf =
+ is_intrabc ? &cm->sf_identity : xd->block_ref_scale_factors[ref_num];
+ const int is_scaled = av1_is_scaled(sf);
+
+ if (is_scaled) {
+ int plane = 0;
+ const int mi_x = mi_col * MI_SIZE;
+ const int mi_y = mi_row * MI_SIZE;
+ const struct macroblockd_plane *const pd = &xd->plane[plane];
+ const struct buf_2d *const dst_buf = &pd->dst;
+ const struct buf_2d *const pre_buf =
+ is_intrabc ? dst_buf : &pd->pre[ref_num];
+
+ InterPredParams inter_pred_params;
+ inter_pred_params.conv_params = get_conv_params(0, plane, xd->bd);
+ const int_interpfilters filters =
+ av1_broadcast_interp_filter(EIGHTTAP_REGULAR);
+ av1_init_inter_params(
+ &inter_pred_params, width, height, mi_y >> pd->subsampling_y,
+ mi_x >> pd->subsampling_x, pd->subsampling_x, pd->subsampling_y,
+ xd->bd, is_cur_buf_hbd(xd), is_intrabc, sf, pre_buf, filters);
+ av1_enc_build_one_inter_predictor(comp_pred8, width, mv,
+ &inter_pred_params);
+ return;
+ }
+ }
+
+ const InterpFilterParams *filter = av1_get_filter(subpel_search);
+ int filter_taps = (subpel_search <= USE_4_TAPS) ? 4 : SUBPEL_TAPS;
+ if (!subpel_x_q3 && !subpel_y_q3) {
+ uint16_t *ref = CONVERT_TO_SHORTPTR(ref8);
+ uint16_t *comp_pred = CONVERT_TO_SHORTPTR(comp_pred8);
+ if (width >= 8) {
+ int i;
+ assert(!(width & 7));
+ /*Read 8 pixels one row at a time.*/
+ for (i = 0; i < height; i++) {
+ int j;
+ for (j = 0; j < width; j += 8) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)ref);
+ _mm_storeu_si128((__m128i *)comp_pred, s0);
+ comp_pred += 8;
+ ref += 8;
+ }
+ ref += ref_stride - width;
+ }
+ } else {
+ int i;
+ assert(!(width & 3));
+ /*Read 4 pixels two rows at a time.*/
+ for (i = 0; i < height; i += 2) {
+ __m128i s0 = _mm_loadl_epi64((const __m128i *)ref);
+ __m128i s1 = _mm_loadl_epi64((const __m128i *)(ref + ref_stride));
+ __m128i t0 = _mm_unpacklo_epi64(s0, s1);
+ _mm_storeu_si128((__m128i *)comp_pred, t0);
+ comp_pred += 8;
+ ref += 2 * ref_stride;
+ }
+ }
+ } else if (!subpel_y_q3) {
+ const int16_t *const kernel =
+ av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ aom_highbd_convolve8_horiz(ref8, ref_stride, comp_pred8, width, kernel, 16,
+ NULL, -1, width, height, bd);
+ } else if (!subpel_x_q3) {
+ const int16_t *const kernel =
+ av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ aom_highbd_convolve8_vert(ref8, ref_stride, comp_pred8, width, NULL, -1,
+ kernel, 16, width, height, bd);
+ } else {
+ DECLARE_ALIGNED(16, uint16_t,
+ temp[((MAX_SB_SIZE + 16) + 16) * MAX_SB_SIZE]);
+ const int16_t *const kernel_x =
+ av1_get_interp_filter_subpel_kernel(filter, subpel_x_q3 << 1);
+ const int16_t *const kernel_y =
+ av1_get_interp_filter_subpel_kernel(filter, subpel_y_q3 << 1);
+ const uint8_t *ref_start = ref8 - ref_stride * ((filter_taps >> 1) - 1);
+ uint16_t *temp_start_horiz = (subpel_search <= USE_4_TAPS)
+ ? temp + (filter_taps >> 1) * MAX_SB_SIZE
+ : temp;
+ uint16_t *temp_start_vert = temp + MAX_SB_SIZE * ((filter->taps >> 1) - 1);
+ const int intermediate_height =
+ (((height - 1) * 8 + subpel_y_q3) >> 3) + filter_taps;
+ assert(intermediate_height <= (MAX_SB_SIZE * 2 + 16) + 16);
+ aom_highbd_convolve8_horiz(
+ ref_start, ref_stride, CONVERT_TO_BYTEPTR(temp_start_horiz),
+ MAX_SB_SIZE, kernel_x, 16, NULL, -1, width, intermediate_height, bd);
+ aom_highbd_convolve8_vert(CONVERT_TO_BYTEPTR(temp_start_vert), MAX_SB_SIZE,
+ comp_pred8, width, NULL, -1, kernel_y, 16, width,
+ height, bd);
+ }
+}
+
+void aom_highbd_comp_avg_upsampled_pred_sse2(
+ MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col,
+ const MV *const mv, uint8_t *comp_pred8, const uint8_t *pred8, int width,
+ int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref8,
+ int ref_stride, int bd, int subpel_search) {
+ aom_highbd_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred8, width,
+ height, subpel_x_q3, subpel_y_q3, ref8, ref_stride,
+ bd, subpel_search);
+ uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
+ uint16_t *comp_pred16 = CONVERT_TO_SHORTPTR(comp_pred8);
+ /*The total number of pixels must be a multiple of 8 (e.g., 4x4).*/
+ assert(!(width * height & 7));
+ int n = width * height >> 3;
+ for (int i = 0; i < n; i++) {
+ __m128i s0 = _mm_loadu_si128((const __m128i *)comp_pred16);
+ __m128i p0 = _mm_loadu_si128((const __m128i *)pred);
+ _mm_storeu_si128((__m128i *)comp_pred16, _mm_avg_epu16(s0, p0));
+ comp_pred16 += 8;
+ pred += 8;
+ }
+}
+
+void aom_highbd_dist_wtd_comp_avg_upsampled_pred_sse2(
+ MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col,
+ const MV *const mv, uint8_t *comp_pred8, const uint8_t *pred8, int width,
+ int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref8,
+ int ref_stride, int bd, const DIST_WTD_COMP_PARAMS *jcp_param,
+ int subpel_search) {
+ uint16_t *pred = CONVERT_TO_SHORTPTR(pred8);
+ int n;
+ int i;
+ aom_highbd_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred8, width,
+ height, subpel_x_q3, subpel_y_q3, ref8, ref_stride,
+ bd, subpel_search);
+ assert(!(width * height & 7));
+ n = width * height >> 3;
+
+ const int16_t wt0 = (int16_t)jcp_param->fwd_offset;
+ const int16_t wt1 = (int16_t)jcp_param->bck_offset;
+ const __m128i w0 = _mm_set1_epi16(wt0);
+ const __m128i w1 = _mm_set1_epi16(wt1);
+ const int16_t round = (int16_t)((1 << DIST_PRECISION_BITS) >> 1);
+ const __m128i r = _mm_set1_epi16(round);
+
+ uint16_t *comp_pred16 = CONVERT_TO_SHORTPTR(comp_pred8);
+ for (i = 0; i < n; i++) {
+ __m128i p0 = xx_loadu_128(comp_pred16);
+ __m128i p1 = xx_loadu_128(pred);
+
+ highbd_compute_dist_wtd_comp_avg(&p0, &p1, &w0, &w1, &r, comp_pred16);
+
+ comp_pred16 += 8;
+ pred += 8;
+ }
+}
+#endif // CONFIG_AV1_HIGHBITDEPTH
+
+void aom_comp_avg_upsampled_pred_sse2(
+ MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col,
+ const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width,
+ int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref,
+ int ref_stride, int subpel_search) {
+ int n;
+ int i;
+ aom_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred, width, height,
+ subpel_x_q3, subpel_y_q3, ref, ref_stride, subpel_search);
+ /*The total number of pixels must be a multiple of 16 (e.g., 4x4).*/
+ assert(!(width * height & 15));
+ n = width * height >> 4;
+ for (i = 0; i < n; i++) {
+ __m128i s0 = xx_loadu_128(comp_pred);
+ __m128i p0 = xx_loadu_128(pred);
+ xx_storeu_128(comp_pred, _mm_avg_epu8(s0, p0));
+ comp_pred += 16;
+ pred += 16;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/reconinter_enc_ssse3.c b/third_party/aom/av1/encoder/x86/reconinter_enc_ssse3.c
new file mode 100644
index 0000000000..df7aa95855
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/reconinter_enc_ssse3.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (c) 2021, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h> // SSE2
+#include <tmmintrin.h>
+
+#include "config/aom_config.h"
+#include "config/aom_dsp_rtcd.h"
+#include "config/av1_rtcd.h"
+
+#include "aom_dsp/x86/synonyms.h"
+
+static INLINE void compute_dist_wtd_avg(__m128i *p0, __m128i *p1,
+ const __m128i *w, const __m128i *r,
+ void *const result) {
+ __m128i p_lo = _mm_unpacklo_epi8(*p0, *p1);
+ __m128i mult_lo = _mm_maddubs_epi16(p_lo, *w);
+ __m128i round_lo = _mm_add_epi16(mult_lo, *r);
+ __m128i shift_lo = _mm_srai_epi16(round_lo, DIST_PRECISION_BITS);
+
+ __m128i p_hi = _mm_unpackhi_epi8(*p0, *p1);
+ __m128i mult_hi = _mm_maddubs_epi16(p_hi, *w);
+ __m128i round_hi = _mm_add_epi16(mult_hi, *r);
+ __m128i shift_hi = _mm_srai_epi16(round_hi, DIST_PRECISION_BITS);
+
+ xx_storeu_128(result, _mm_packus_epi16(shift_lo, shift_hi));
+}
+
+void aom_dist_wtd_comp_avg_upsampled_pred_ssse3(
+ MACROBLOCKD *xd, const struct AV1Common *const cm, int mi_row, int mi_col,
+ const MV *const mv, uint8_t *comp_pred, const uint8_t *pred, int width,
+ int height, int subpel_x_q3, int subpel_y_q3, const uint8_t *ref,
+ int ref_stride, const DIST_WTD_COMP_PARAMS *jcp_param, int subpel_search) {
+ int n;
+ int i;
+ aom_upsampled_pred(xd, cm, mi_row, mi_col, mv, comp_pred, width, height,
+ subpel_x_q3, subpel_y_q3, ref, ref_stride, subpel_search);
+ /*The total number of pixels must be a multiple of 16 (e.g., 4x4).*/
+ assert(!(width * height & 15));
+ n = width * height >> 4;
+
+ const int8_t w0 = (int8_t)jcp_param->fwd_offset;
+ const int8_t w1 = (int8_t)jcp_param->bck_offset;
+ const __m128i w = _mm_set_epi8(w1, w0, w1, w0, w1, w0, w1, w0, w1, w0, w1, w0,
+ w1, w0, w1, w0);
+ const int16_t round = (int16_t)((1 << DIST_PRECISION_BITS) >> 1);
+ const __m128i r = _mm_set1_epi16(round);
+
+ for (i = 0; i < n; i++) {
+ __m128i p0 = xx_loadu_128(comp_pred);
+ __m128i p1 = xx_loadu_128(pred);
+
+ compute_dist_wtd_avg(&p0, &p1, &w, &r, comp_pred);
+
+ comp_pred += 16;
+ pred += 16;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/temporal_filter_avx2.c b/third_party/aom/av1/encoder/x86/temporal_filter_avx2.c
new file mode 100644
index 0000000000..752d6f3f0b
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/temporal_filter_avx2.c
@@ -0,0 +1,647 @@
+/*
+ * Copyright (c) 2019, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <immintrin.h>
+
+#include "config/av1_rtcd.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/temporal_filter.h"
+
+#define SSE_STRIDE (BW + 2)
+
+DECLARE_ALIGNED(32, static const uint32_t, sse_bytemask[4][8]) = {
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0, 0 },
+ { 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0, 0 },
+ { 0, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0 },
+ { 0, 0, 0, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF }
+};
+
+DECLARE_ALIGNED(32, static const uint8_t, shufflemask_16b[2][16]) = {
+ { 0, 1, 0, 1, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 },
+ { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 10, 11, 10, 11 }
+};
+
+#define CALC_X_GRADIENT(AC, GI, DF, out) \
+ out = _mm256_abs_epi16( \
+ _mm256_add_epi16(_mm256_add_epi16(AC, GI), _mm256_slli_epi16(DF, 1)));
+
+#define CALC_Y_GRADIENT(AC, GI, BH, out) \
+ out = _mm256_abs_epi16( \
+ _mm256_add_epi16(_mm256_sub_epi16(AC, GI), _mm256_slli_epi16(BH, 1)));
+
+double av1_estimate_noise_from_single_plane_avx2(const uint8_t *src, int height,
+ int width, int stride,
+ int edge_thresh) {
+ int count = 0;
+ int64_t accum = 0;
+ // w32 stores width multiple of 32.
+ const int w32 = (width - 1) & ~0x1f;
+ const __m256i zero = _mm256_setzero_si256();
+ const __m256i edge_threshold = _mm256_set1_epi16(edge_thresh);
+ __m256i num_accumulator = zero;
+ __m256i sum_accumulator = zero;
+
+ // A | B | C
+ // D | E | F
+ // G | H | I
+ // g_x = (A - C) + (G - I) + 2*(D - F)
+ // g_y = (A + C) - (G + I) + 2*(B - H)
+ // v = 4*E - 2*(D+F+B+H) + (A+C+G+I)
+
+ // Process the width multiple of 32 here.
+ for (int w = 1; w < w32; w += 32) {
+ int h = 1;
+ const int start_idx = h * stride + w;
+ const int stride_0 = start_idx - stride;
+
+ __m256i num_accum_row_lvl = zero;
+ const __m256i A = _mm256_loadu_si256((__m256i *)(&src[stride_0 - 1]));
+ const __m256i C = _mm256_loadu_si256((__m256i *)(&src[stride_0 + 1]));
+ const __m256i D = _mm256_loadu_si256((__m256i *)(&src[start_idx - 1]));
+ const __m256i F = _mm256_loadu_si256((__m256i *)(&src[start_idx + 1]));
+ __m256i B = _mm256_loadu_si256((__m256i *)(&src[stride_0]));
+ __m256i E = _mm256_loadu_si256((__m256i *)(&src[start_idx]));
+
+ const __m256i A_lo = _mm256_unpacklo_epi8(A, zero);
+ const __m256i A_hi = _mm256_unpackhi_epi8(A, zero);
+ const __m256i C_lo = _mm256_unpacklo_epi8(C, zero);
+ const __m256i C_hi = _mm256_unpackhi_epi8(C, zero);
+ const __m256i D_lo = _mm256_unpacklo_epi8(D, zero);
+ const __m256i D_hi = _mm256_unpackhi_epi8(D, zero);
+ const __m256i F_lo = _mm256_unpacklo_epi8(F, zero);
+ const __m256i F_hi = _mm256_unpackhi_epi8(F, zero);
+
+ __m256i sub_AC_lo = _mm256_sub_epi16(A_lo, C_lo);
+ __m256i sub_AC_hi = _mm256_sub_epi16(A_hi, C_hi);
+ __m256i sum_AC_lo = _mm256_add_epi16(A_lo, C_lo);
+ __m256i sum_AC_hi = _mm256_add_epi16(A_hi, C_hi);
+ __m256i sub_DF_lo = _mm256_sub_epi16(D_lo, F_lo);
+ __m256i sub_DF_hi = _mm256_sub_epi16(D_hi, F_hi);
+ __m256i sum_DF_lo = _mm256_add_epi16(D_lo, F_lo);
+ __m256i sum_DF_hi = _mm256_add_epi16(D_hi, F_hi);
+
+ for (; h < height - 1; h++) {
+ __m256i sum_GI_lo, sub_GI_lo, sum_GI_hi, sub_GI_hi, gx_lo, gy_lo, gx_hi,
+ gy_hi;
+ const int k = h * stride + w;
+ const __m256i G = _mm256_loadu_si256((__m256i *)(&src[k + stride - 1]));
+ const __m256i H = _mm256_loadu_si256((__m256i *)(&src[k + stride]));
+ const __m256i I = _mm256_loadu_si256((__m256i *)(&src[k + stride + 1]));
+
+ const __m256i B_lo = _mm256_unpacklo_epi8(B, zero);
+ const __m256i B_hi = _mm256_unpackhi_epi8(B, zero);
+ const __m256i G_lo = _mm256_unpacklo_epi8(G, zero);
+ const __m256i G_hi = _mm256_unpackhi_epi8(G, zero);
+ const __m256i I_lo = _mm256_unpacklo_epi8(I, zero);
+ const __m256i I_hi = _mm256_unpackhi_epi8(I, zero);
+ const __m256i H_lo = _mm256_unpacklo_epi8(H, zero);
+ const __m256i H_hi = _mm256_unpackhi_epi8(H, zero);
+
+ sub_GI_lo = _mm256_sub_epi16(G_lo, I_lo);
+ sub_GI_hi = _mm256_sub_epi16(G_hi, I_hi);
+ sum_GI_lo = _mm256_add_epi16(G_lo, I_lo);
+ sum_GI_hi = _mm256_add_epi16(G_hi, I_hi);
+ const __m256i sub_BH_lo = _mm256_sub_epi16(B_lo, H_lo);
+ const __m256i sub_BH_hi = _mm256_sub_epi16(B_hi, H_hi);
+
+ CALC_X_GRADIENT(sub_AC_lo, sub_GI_lo, sub_DF_lo, gx_lo)
+ CALC_Y_GRADIENT(sum_AC_lo, sum_GI_lo, sub_BH_lo, gy_lo)
+
+ const __m256i ga_lo = _mm256_add_epi16(gx_lo, gy_lo);
+
+ CALC_X_GRADIENT(sub_AC_hi, sub_GI_hi, sub_DF_hi, gx_hi)
+ CALC_Y_GRADIENT(sum_AC_hi, sum_GI_hi, sub_BH_hi, gy_hi)
+
+ const __m256i ga_hi = _mm256_add_epi16(gx_hi, gy_hi);
+
+ __m256i cmp_lo = _mm256_cmpgt_epi16(edge_threshold, ga_lo);
+ __m256i cmp_hi = _mm256_cmpgt_epi16(edge_threshold, ga_hi);
+ const __m256i comp_reg = _mm256_add_epi16(cmp_lo, cmp_hi);
+
+ // v = 4*E -2*(D+F+B+H) + (A+C+G+I)
+ if (_mm256_movemask_epi8(comp_reg) != 0) {
+ const __m256i sum_BH_lo = _mm256_add_epi16(B_lo, H_lo);
+ const __m256i sum_BH_hi = _mm256_add_epi16(B_hi, H_hi);
+
+ // 2*(D+F+B+H)
+ const __m256i sum_DFBH_lo =
+ _mm256_slli_epi16(_mm256_add_epi16(sum_DF_lo, sum_BH_lo), 1);
+ // (A+C+G+I)
+ const __m256i sum_ACGI_lo = _mm256_add_epi16(sum_AC_lo, sum_GI_lo);
+ const __m256i sum_DFBH_hi =
+ _mm256_slli_epi16(_mm256_add_epi16(sum_DF_hi, sum_BH_hi), 1);
+ const __m256i sum_ACGI_hi = _mm256_add_epi16(sum_AC_hi, sum_GI_hi);
+
+ // Convert E register values from 8bit to 16bit
+ const __m256i E_lo = _mm256_unpacklo_epi8(E, zero);
+ const __m256i E_hi = _mm256_unpackhi_epi8(E, zero);
+
+ // 4*E - 2*(D+F+B+H)+ (A+C+G+I)
+ const __m256i var_lo_0 = _mm256_abs_epi16(_mm256_add_epi16(
+ _mm256_sub_epi16(_mm256_slli_epi16(E_lo, 2), sum_DFBH_lo),
+ sum_ACGI_lo));
+ const __m256i var_hi_0 = _mm256_abs_epi16(_mm256_add_epi16(
+ _mm256_sub_epi16(_mm256_slli_epi16(E_hi, 2), sum_DFBH_hi),
+ sum_ACGI_hi));
+ cmp_lo = _mm256_srli_epi16(cmp_lo, 15);
+ cmp_hi = _mm256_srli_epi16(cmp_hi, 15);
+ const __m256i var_lo = _mm256_mullo_epi16(var_lo_0, cmp_lo);
+ const __m256i var_hi = _mm256_mullo_epi16(var_hi_0, cmp_hi);
+
+ num_accum_row_lvl = _mm256_add_epi16(num_accum_row_lvl, cmp_lo);
+ num_accum_row_lvl = _mm256_add_epi16(num_accum_row_lvl, cmp_hi);
+
+ sum_accumulator = _mm256_add_epi32(sum_accumulator,
+ _mm256_unpacklo_epi16(var_lo, zero));
+ sum_accumulator = _mm256_add_epi32(sum_accumulator,
+ _mm256_unpackhi_epi16(var_lo, zero));
+ sum_accumulator = _mm256_add_epi32(sum_accumulator,
+ _mm256_unpacklo_epi16(var_hi, zero));
+ sum_accumulator = _mm256_add_epi32(sum_accumulator,
+ _mm256_unpackhi_epi16(var_hi, zero));
+ }
+ sub_AC_lo = sub_DF_lo;
+ sub_AC_hi = sub_DF_hi;
+ sub_DF_lo = sub_GI_lo;
+ sub_DF_hi = sub_GI_hi;
+ sum_AC_lo = sum_DF_lo;
+ sum_AC_hi = sum_DF_hi;
+ sum_DF_lo = sum_GI_lo;
+ sum_DF_hi = sum_GI_hi;
+ B = E;
+ E = H;
+ }
+ const __m256i num_0 = _mm256_unpacklo_epi16(num_accum_row_lvl, zero);
+ const __m256i num_1 = _mm256_unpackhi_epi16(num_accum_row_lvl, zero);
+ num_accumulator =
+ _mm256_add_epi32(num_accumulator, _mm256_add_epi32(num_0, num_1));
+ }
+
+ // Process the remaining width here.
+ for (int h = 1; h < height - 1; ++h) {
+ for (int w = w32 + 1; w < width - 1; ++w) {
+ const int k = h * stride + w;
+
+ // Compute sobel gradients
+ const int g_x = (src[k - stride - 1] - src[k - stride + 1]) +
+ (src[k + stride - 1] - src[k + stride + 1]) +
+ 2 * (src[k - 1] - src[k + 1]);
+ const int g_y = (src[k - stride - 1] - src[k + stride - 1]) +
+ (src[k - stride + 1] - src[k + stride + 1]) +
+ 2 * (src[k - stride] - src[k + stride]);
+ const int ga = abs(g_x) + abs(g_y);
+
+ if (ga < edge_thresh) {
+ // Find Laplacian
+ const int v =
+ 4 * src[k] -
+ 2 * (src[k - 1] + src[k + 1] + src[k - stride] + src[k + stride]) +
+ (src[k - stride - 1] + src[k - stride + 1] + src[k + stride - 1] +
+ src[k + stride + 1]);
+ accum += abs(v);
+ ++count;
+ }
+ }
+ }
+
+ // s0 s1 n0 n1 s2 s3 n2 n3
+ __m256i sum_avx = _mm256_hadd_epi32(sum_accumulator, num_accumulator);
+ __m128i sum_avx_lo = _mm256_castsi256_si128(sum_avx);
+ __m128i sum_avx_hi = _mm256_extractf128_si256(sum_avx, 1);
+ // s0+s2 s1+s3 n0+n2 n1+n3
+ __m128i sum_avx_1 = _mm_add_epi32(sum_avx_lo, sum_avx_hi);
+ // s0+s2+s1+s3 n0+n2+n1+n3
+ __m128i result = _mm_add_epi32(_mm_srli_si128(sum_avx_1, 4), sum_avx_1);
+
+ accum += _mm_cvtsi128_si32(result);
+ count += _mm_extract_epi32(result, 2);
+
+ // If very few smooth pels, return -1 since the estimate is unreliable.
+ return (count < 16) ? -1.0 : (double)accum / (6 * count) * SQRT_PI_BY_2;
+}
+
+static AOM_FORCE_INLINE void get_squared_error_16x16_avx2(
+ const uint8_t *frame1, const unsigned int stride, const uint8_t *frame2,
+ const unsigned int stride2, const int block_width, const int block_height,
+ uint16_t *frame_sse, const unsigned int sse_stride) {
+ (void)block_width;
+ const uint8_t *src1 = frame1;
+ const uint8_t *src2 = frame2;
+ uint16_t *dst = frame_sse;
+ for (int i = 0; i < block_height; i++) {
+ __m128i vf1_128, vf2_128;
+ __m256i vf1, vf2, vdiff1, vsqdiff1;
+
+ vf1_128 = _mm_loadu_si128((__m128i *)(src1));
+ vf2_128 = _mm_loadu_si128((__m128i *)(src2));
+ vf1 = _mm256_cvtepu8_epi16(vf1_128);
+ vf2 = _mm256_cvtepu8_epi16(vf2_128);
+ vdiff1 = _mm256_sub_epi16(vf1, vf2);
+ vsqdiff1 = _mm256_mullo_epi16(vdiff1, vdiff1);
+
+ _mm256_storeu_si256((__m256i *)(dst), vsqdiff1);
+ // Set zero to uninitialized memory to avoid uninitialized loads later
+ *(int *)(dst + 16) = _mm_cvtsi128_si32(_mm_setzero_si128());
+
+ src1 += stride, src2 += stride2;
+ dst += sse_stride;
+ }
+}
+
+static AOM_FORCE_INLINE void get_squared_error_32x32_avx2(
+ const uint8_t *frame1, const unsigned int stride, const uint8_t *frame2,
+ const unsigned int stride2, const int block_width, const int block_height,
+ uint16_t *frame_sse, const unsigned int sse_stride) {
+ (void)block_width;
+ const uint8_t *src1 = frame1;
+ const uint8_t *src2 = frame2;
+ uint16_t *dst = frame_sse;
+ for (int i = 0; i < block_height; i++) {
+ __m256i vsrc1, vsrc2, vmin, vmax, vdiff, vdiff1, vdiff2, vres1, vres2;
+
+ vsrc1 = _mm256_loadu_si256((__m256i *)src1);
+ vsrc2 = _mm256_loadu_si256((__m256i *)src2);
+ vmax = _mm256_max_epu8(vsrc1, vsrc2);
+ vmin = _mm256_min_epu8(vsrc1, vsrc2);
+ vdiff = _mm256_subs_epu8(vmax, vmin);
+
+ __m128i vtmp1 = _mm256_castsi256_si128(vdiff);
+ __m128i vtmp2 = _mm256_extracti128_si256(vdiff, 1);
+ vdiff1 = _mm256_cvtepu8_epi16(vtmp1);
+ vdiff2 = _mm256_cvtepu8_epi16(vtmp2);
+
+ vres1 = _mm256_mullo_epi16(vdiff1, vdiff1);
+ vres2 = _mm256_mullo_epi16(vdiff2, vdiff2);
+ _mm256_storeu_si256((__m256i *)(dst), vres1);
+ _mm256_storeu_si256((__m256i *)(dst + 16), vres2);
+ // Set zero to uninitialized memory to avoid uninitialized loads later
+ *(int *)(dst + 32) = _mm_cvtsi128_si32(_mm_setzero_si128());
+
+ src1 += stride;
+ src2 += stride2;
+ dst += sse_stride;
+ }
+}
+
+static AOM_FORCE_INLINE __m256i xx_load_and_pad(uint16_t *src, int col,
+ int block_width) {
+ __m128i v128tmp = _mm_loadu_si128((__m128i *)(src));
+ if (col == 0) {
+ // For the first column, replicate the first element twice to the left
+ v128tmp = _mm_shuffle_epi8(v128tmp, *(__m128i *)shufflemask_16b[0]);
+ }
+ if (col == block_width - 4) {
+ // For the last column, replicate the last element twice to the right
+ v128tmp = _mm_shuffle_epi8(v128tmp, *(__m128i *)shufflemask_16b[1]);
+ }
+ return _mm256_cvtepu16_epi32(v128tmp);
+}
+
+static AOM_FORCE_INLINE int32_t xx_mask_and_hadd(__m256i vsum, int i) {
+ // Mask the required 5 values inside the vector
+ __m256i vtmp = _mm256_and_si256(vsum, *(__m256i *)sse_bytemask[i]);
+ __m128i v128a, v128b;
+ // Extract 256b as two 128b registers A and B
+ v128a = _mm256_castsi256_si128(vtmp);
+ v128b = _mm256_extracti128_si256(vtmp, 1);
+ // A = [A0+B0, A1+B1, A2+B2, A3+B3]
+ v128a = _mm_add_epi32(v128a, v128b);
+ // B = [A2+B2, A3+B3, 0, 0]
+ v128b = _mm_srli_si128(v128a, 8);
+ // A = [A0+B0+A2+B2, A1+B1+A3+B3, X, X]
+ v128a = _mm_add_epi32(v128a, v128b);
+ // B = [A1+B1+A3+B3, 0, 0, 0]
+ v128b = _mm_srli_si128(v128a, 4);
+ // A = [A0+B0+A2+B2+A1+B1+A3+B3, X, X, X]
+ v128a = _mm_add_epi32(v128a, v128b);
+ return _mm_extract_epi32(v128a, 0);
+}
+
+// AVX2 implementation of approx_exp()
+static AOM_INLINE __m256 approx_exp_avx2(__m256 y) {
+#define A ((1 << 23) / 0.69314718056f) // (1 << 23) / ln(2)
+#define B \
+ 127 // Offset for the exponent according to IEEE floating point standard.
+#define C 60801 // Magic number controls the accuracy of approximation
+ const __m256 multiplier = _mm256_set1_ps(A);
+ const __m256i offset = _mm256_set1_epi32(B * (1 << 23) - C);
+
+ y = _mm256_mul_ps(y, multiplier);
+ y = _mm256_castsi256_ps(_mm256_add_epi32(_mm256_cvttps_epi32(y), offset));
+ return y;
+#undef A
+#undef B
+#undef C
+}
+
+static void apply_temporal_filter(
+ const uint8_t *frame1, const unsigned int stride, const uint8_t *frame2,
+ const unsigned int stride2, const int block_width, const int block_height,
+ const int *subblock_mses, unsigned int *accumulator, uint16_t *count,
+ uint16_t *frame_sse, uint32_t *luma_sse_sum,
+ const double inv_num_ref_pixels, const double decay_factor,
+ const double inv_factor, const double weight_factor, double *d_factor,
+ int tf_wgt_calc_lvl) {
+ assert(((block_width == 16) || (block_width == 32)) &&
+ ((block_height == 16) || (block_height == 32)));
+
+ uint32_t acc_5x5_sse[BH][BW];
+
+ if (block_width == 32) {
+ get_squared_error_32x32_avx2(frame1, stride, frame2, stride2, block_width,
+ block_height, frame_sse, SSE_STRIDE);
+ } else {
+ get_squared_error_16x16_avx2(frame1, stride, frame2, stride2, block_width,
+ block_height, frame_sse, SSE_STRIDE);
+ }
+
+ __m256i vsrc[5];
+
+ // Traverse 4 columns at a time
+ // First and last columns will require padding
+ for (int col = 0; col < block_width; col += 4) {
+ uint16_t *src = (col) ? frame_sse + col - 2 : frame_sse;
+
+ // Load and pad(for first and last col) 3 rows from the top
+ for (int i = 2; i < 5; i++) {
+ vsrc[i] = xx_load_and_pad(src, col, block_width);
+ src += SSE_STRIDE;
+ }
+
+ // Copy first row to first 2 vectors
+ vsrc[0] = vsrc[2];
+ vsrc[1] = vsrc[2];
+
+ for (int row = 0; row < block_height; row++) {
+ __m256i vsum = _mm256_setzero_si256();
+
+ // Add 5 consecutive rows
+ for (int i = 0; i < 5; i++) {
+ vsum = _mm256_add_epi32(vsum, vsrc[i]);
+ }
+
+ // Push all elements by one element to the top
+ for (int i = 0; i < 4; i++) {
+ vsrc[i] = vsrc[i + 1];
+ }
+
+ // Load next row to the last element
+ if (row <= block_height - 4) {
+ vsrc[4] = xx_load_and_pad(src, col, block_width);
+ src += SSE_STRIDE;
+ } else {
+ vsrc[4] = vsrc[3];
+ }
+
+ // Accumulate the sum horizontally
+ for (int i = 0; i < 4; i++) {
+ acc_5x5_sse[row][col + i] = xx_mask_and_hadd(vsum, i);
+ }
+ }
+ }
+
+ double subblock_mses_scaled[4];
+ double d_factor_decayed[4];
+ for (int idx = 0; idx < 4; idx++) {
+ subblock_mses_scaled[idx] = subblock_mses[idx] * inv_factor;
+ d_factor_decayed[idx] = d_factor[idx] * decay_factor;
+ }
+ if (tf_wgt_calc_lvl == 0) {
+ for (int i = 0, k = 0; i < block_height; i++) {
+ const int y_blk_raster_offset = (i >= block_height / 2) * 2;
+ for (int j = 0; j < block_width; j++, k++) {
+ const int pixel_value = frame2[i * stride2 + j];
+ uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j];
+
+ const double window_error = diff_sse * inv_num_ref_pixels;
+ const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2);
+ const double combined_error =
+ weight_factor * window_error + subblock_mses_scaled[subblock_idx];
+
+ double scaled_error = combined_error * d_factor_decayed[subblock_idx];
+ scaled_error = AOMMIN(scaled_error, 7);
+ const int weight = (int)(exp(-scaled_error) * TF_WEIGHT_SCALE);
+
+ count[k] += weight;
+ accumulator[k] += weight * pixel_value;
+ }
+ }
+ } else {
+ __m256d subblock_mses_reg[4];
+ __m256d d_factor_mul_n_decay_qr_invs[4];
+ const __m256 zero = _mm256_set1_ps(0.0f);
+ const __m256 point_five = _mm256_set1_ps(0.5f);
+ const __m256 seven = _mm256_set1_ps(7.0f);
+ const __m256d inv_num_ref_pixel_256bit = _mm256_set1_pd(inv_num_ref_pixels);
+ const __m256d weight_factor_256bit = _mm256_set1_pd(weight_factor);
+ const __m256 tf_weight_scale = _mm256_set1_ps((float)TF_WEIGHT_SCALE);
+ // Maintain registers to hold mse and d_factor at subblock level.
+ subblock_mses_reg[0] = _mm256_set1_pd(subblock_mses_scaled[0]);
+ subblock_mses_reg[1] = _mm256_set1_pd(subblock_mses_scaled[1]);
+ subblock_mses_reg[2] = _mm256_set1_pd(subblock_mses_scaled[2]);
+ subblock_mses_reg[3] = _mm256_set1_pd(subblock_mses_scaled[3]);
+ d_factor_mul_n_decay_qr_invs[0] = _mm256_set1_pd(d_factor_decayed[0]);
+ d_factor_mul_n_decay_qr_invs[1] = _mm256_set1_pd(d_factor_decayed[1]);
+ d_factor_mul_n_decay_qr_invs[2] = _mm256_set1_pd(d_factor_decayed[2]);
+ d_factor_mul_n_decay_qr_invs[3] = _mm256_set1_pd(d_factor_decayed[3]);
+
+ for (int i = 0; i < block_height; i++) {
+ const int y_blk_raster_offset = (i >= block_height / 2) * 2;
+ uint32_t *luma_sse_sum_temp = luma_sse_sum + i * BW;
+ for (int j = 0; j < block_width; j += 8) {
+ const __m256i acc_sse =
+ _mm256_lddqu_si256((__m256i *)(acc_5x5_sse[i] + j));
+ const __m256i luma_sse =
+ _mm256_lddqu_si256((__m256i *)((luma_sse_sum_temp + j)));
+
+ // uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j];
+ const __m256i diff_sse = _mm256_add_epi32(acc_sse, luma_sse);
+
+ const __m256d diff_sse_pd_1 =
+ _mm256_cvtepi32_pd(_mm256_castsi256_si128(diff_sse));
+ const __m256d diff_sse_pd_2 =
+ _mm256_cvtepi32_pd(_mm256_extracti128_si256(diff_sse, 1));
+
+ // const double window_error = diff_sse * inv_num_ref_pixels;
+ const __m256d window_error_1 =
+ _mm256_mul_pd(diff_sse_pd_1, inv_num_ref_pixel_256bit);
+ const __m256d window_error_2 =
+ _mm256_mul_pd(diff_sse_pd_2, inv_num_ref_pixel_256bit);
+
+ // const int subblock_idx = y_blk_raster_offset + (j >= block_width /
+ // 2);
+ const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2);
+ const __m256d blk_error = subblock_mses_reg[subblock_idx];
+
+ // const double combined_error =
+ // weight_factor *window_error + subblock_mses_scaled[subblock_idx];
+ const __m256d combined_error_1 = _mm256_add_pd(
+ _mm256_mul_pd(window_error_1, weight_factor_256bit), blk_error);
+
+ const __m256d combined_error_2 = _mm256_add_pd(
+ _mm256_mul_pd(window_error_2, weight_factor_256bit), blk_error);
+
+ // d_factor_decayed[subblock_idx]
+ const __m256d d_fact_mul_n_decay =
+ d_factor_mul_n_decay_qr_invs[subblock_idx];
+
+ // double scaled_error = combined_error *
+ // d_factor_decayed[subblock_idx];
+ const __m256d scaled_error_1 =
+ _mm256_mul_pd(combined_error_1, d_fact_mul_n_decay);
+ const __m256d scaled_error_2 =
+ _mm256_mul_pd(combined_error_2, d_fact_mul_n_decay);
+
+ const __m128 scaled_error_ps_1 = _mm256_cvtpd_ps(scaled_error_1);
+ const __m128 scaled_error_ps_2 = _mm256_cvtpd_ps(scaled_error_2);
+
+ const __m256 scaled_error_ps = _mm256_insertf128_ps(
+ _mm256_castps128_ps256(scaled_error_ps_1), scaled_error_ps_2, 0x1);
+
+ // scaled_error = AOMMIN(scaled_error, 7);
+ const __m256 scaled_diff_ps = _mm256_min_ps(scaled_error_ps, seven);
+ const __m256 minus_scaled_diff_ps = _mm256_sub_ps(zero, scaled_diff_ps);
+ // const int weight =
+ //(int)(approx_exp((float)-scaled_error) * TF_WEIGHT_SCALE + 0.5f);
+ const __m256 exp_result = approx_exp_avx2(minus_scaled_diff_ps);
+ const __m256 scale_weight_exp_result =
+ _mm256_mul_ps(exp_result, tf_weight_scale);
+ const __m256 round_result =
+ _mm256_add_ps(scale_weight_exp_result, point_five);
+ __m256i weights_in_32bit = _mm256_cvttps_epi32(round_result);
+
+ __m128i weights_in_16bit =
+ _mm_packus_epi32(_mm256_castsi256_si128(weights_in_32bit),
+ _mm256_extractf128_si256(weights_in_32bit, 0x1));
+
+ // count[k] += weight;
+ // accumulator[k] += weight * pixel_value;
+ const int stride_idx = i * stride2 + j;
+ const __m128i count_array =
+ _mm_loadu_si128((__m128i *)(count + stride_idx));
+ _mm_storeu_si128((__m128i *)(count + stride_idx),
+ _mm_add_epi16(count_array, weights_in_16bit));
+
+ const __m256i accumulator_array =
+ _mm256_loadu_si256((__m256i *)(accumulator + stride_idx));
+ const __m128i pred_values =
+ _mm_loadl_epi64((__m128i *)(frame2 + stride_idx));
+
+ const __m256i pred_values_u32 = _mm256_cvtepu8_epi32(pred_values);
+ const __m256i mull_frame2_weight_u32 =
+ _mm256_mullo_epi32(pred_values_u32, weights_in_32bit);
+ _mm256_storeu_si256(
+ (__m256i *)(accumulator + stride_idx),
+ _mm256_add_epi32(accumulator_array, mull_frame2_weight_u32));
+ }
+ }
+ }
+}
+
+void av1_apply_temporal_filter_avx2(
+ const YV12_BUFFER_CONFIG *frame_to_filter, const MACROBLOCKD *mbd,
+ const BLOCK_SIZE block_size, const int mb_row, const int mb_col,
+ const int num_planes, const double *noise_levels, const MV *subblock_mvs,
+ const int *subblock_mses, const int q_factor, const int filter_strength,
+ int tf_wgt_calc_lvl, const uint8_t *pred, uint32_t *accum,
+ uint16_t *count) {
+ const int is_high_bitdepth = frame_to_filter->flags & YV12_FLAG_HIGHBITDEPTH;
+ assert(block_size == BLOCK_32X32 && "Only support 32x32 block with avx2!");
+ assert(TF_WINDOW_LENGTH == 5 && "Only support window length 5 with avx2!");
+ assert(!is_high_bitdepth && "Only support low bit-depth with avx2!");
+ assert(num_planes >= 1 && num_planes <= MAX_MB_PLANE);
+ (void)is_high_bitdepth;
+
+ const int mb_height = block_size_high[block_size];
+ const int mb_width = block_size_wide[block_size];
+ const int frame_height = frame_to_filter->y_crop_height;
+ const int frame_width = frame_to_filter->y_crop_width;
+ const int min_frame_size = AOMMIN(frame_height, frame_width);
+ // Variables to simplify combined error calculation.
+ const double inv_factor = 1.0 / ((TF_WINDOW_BLOCK_BALANCE_WEIGHT + 1) *
+ TF_SEARCH_ERROR_NORM_WEIGHT);
+ const double weight_factor =
+ (double)TF_WINDOW_BLOCK_BALANCE_WEIGHT * inv_factor;
+ // Adjust filtering based on q.
+ // Larger q -> stronger filtering -> larger weight.
+ // Smaller q -> weaker filtering -> smaller weight.
+ double q_decay = pow((double)q_factor / TF_Q_DECAY_THRESHOLD, 2);
+ q_decay = CLIP(q_decay, 1e-5, 1);
+ if (q_factor >= TF_QINDEX_CUTOFF) {
+ // Max q_factor is 255, therefore the upper bound of q_decay is 8.
+ // We do not need a clip here.
+ q_decay = 0.5 * pow((double)q_factor / 64, 2);
+ }
+ // Smaller strength -> smaller filtering weight.
+ double s_decay = pow((double)filter_strength / TF_STRENGTH_THRESHOLD, 2);
+ s_decay = CLIP(s_decay, 1e-5, 1);
+ double d_factor[4] = { 0 };
+ uint16_t frame_sse[SSE_STRIDE * BH] = { 0 };
+ uint32_t luma_sse_sum[BW * BH] = { 0 };
+
+ for (int subblock_idx = 0; subblock_idx < 4; subblock_idx++) {
+ // Larger motion vector -> smaller filtering weight.
+ const MV mv = subblock_mvs[subblock_idx];
+ const double distance = sqrt(pow(mv.row, 2) + pow(mv.col, 2));
+ double distance_threshold = min_frame_size * TF_SEARCH_DISTANCE_THRESHOLD;
+ distance_threshold = AOMMAX(distance_threshold, 1);
+ d_factor[subblock_idx] = distance / distance_threshold;
+ d_factor[subblock_idx] = AOMMAX(d_factor[subblock_idx], 1);
+ }
+
+ // Handle planes in sequence.
+ int plane_offset = 0;
+ for (int plane = 0; plane < num_planes; ++plane) {
+ const uint32_t plane_h = mb_height >> mbd->plane[plane].subsampling_y;
+ const uint32_t plane_w = mb_width >> mbd->plane[plane].subsampling_x;
+ const uint32_t frame_stride = frame_to_filter->strides[plane == 0 ? 0 : 1];
+ const int frame_offset = mb_row * plane_h * frame_stride + mb_col * plane_w;
+
+ const uint8_t *ref = frame_to_filter->buffers[plane] + frame_offset;
+ const int ss_x_shift =
+ mbd->plane[plane].subsampling_x - mbd->plane[AOM_PLANE_Y].subsampling_x;
+ const int ss_y_shift =
+ mbd->plane[plane].subsampling_y - mbd->plane[AOM_PLANE_Y].subsampling_y;
+ const int num_ref_pixels = TF_WINDOW_LENGTH * TF_WINDOW_LENGTH +
+ ((plane) ? (1 << (ss_x_shift + ss_y_shift)) : 0);
+ const double inv_num_ref_pixels = 1.0 / num_ref_pixels;
+ // Larger noise -> larger filtering weight.
+ const double n_decay = 0.5 + log(2 * noise_levels[plane] + 5.0);
+ // Decay factors for non-local mean approach.
+ const double decay_factor = 1 / (n_decay * q_decay * s_decay);
+
+ // Filter U-plane and V-plane using Y-plane. This is because motion
+ // search is only done on Y-plane, so the information from Y-plane
+ // will be more accurate. The luma sse sum is reused in both chroma
+ // planes.
+ if (plane == AOM_PLANE_U) {
+ for (unsigned int i = 0, k = 0; i < plane_h; i++) {
+ for (unsigned int j = 0; j < plane_w; j++, k++) {
+ for (int ii = 0; ii < (1 << ss_y_shift); ++ii) {
+ for (int jj = 0; jj < (1 << ss_x_shift); ++jj) {
+ const int yy = (i << ss_y_shift) + ii; // Y-coord on Y-plane.
+ const int xx = (j << ss_x_shift) + jj; // X-coord on Y-plane.
+ luma_sse_sum[i * BW + j] += frame_sse[yy * SSE_STRIDE + xx];
+ }
+ }
+ }
+ }
+ }
+
+ apply_temporal_filter(ref, frame_stride, pred + plane_offset, plane_w,
+ plane_w, plane_h, subblock_mses, accum + plane_offset,
+ count + plane_offset, frame_sse, luma_sse_sum,
+ inv_num_ref_pixels, decay_factor, inv_factor,
+ weight_factor, d_factor, tf_wgt_calc_lvl);
+ plane_offset += plane_h * plane_w;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/temporal_filter_sse2.c b/third_party/aom/av1/encoder/x86/temporal_filter_sse2.c
new file mode 100644
index 0000000000..842d3b13c8
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/temporal_filter_sse2.c
@@ -0,0 +1,320 @@
+/*
+ * Copyright (c) 2019, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <emmintrin.h>
+
+#include "config/av1_rtcd.h"
+#include "aom_dsp/mathutils.h"
+#include "av1/encoder/encoder.h"
+#include "av1/encoder/temporal_filter.h"
+
+// For the squared error buffer, keep a padding for 4 samples
+#define SSE_STRIDE (BW + 4)
+
+DECLARE_ALIGNED(32, static const uint32_t, sse_bytemask_2x4[4][2][4]) = {
+ { { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000 } },
+ { { 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000, 0x00000000 } },
+ { { 0x00000000, 0x00000000, 0xFFFFFFFF, 0xFFFFFFFF },
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0x00000000 } },
+ { { 0x00000000, 0x00000000, 0x00000000, 0xFFFFFFFF },
+ { 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF } }
+};
+
+static void get_squared_error(const uint8_t *frame1, const unsigned int stride,
+ const uint8_t *frame2, const unsigned int stride2,
+ const int block_width, const int block_height,
+ uint16_t *frame_sse,
+ const unsigned int dst_stride) {
+ const uint8_t *src1 = frame1;
+ const uint8_t *src2 = frame2;
+ uint16_t *dst = frame_sse;
+
+ for (int i = 0; i < block_height; i++) {
+ for (int j = 0; j < block_width; j += 16) {
+ // Set zero to uninitialized memory to avoid uninitialized loads later
+ *(int *)(dst) = _mm_cvtsi128_si32(_mm_setzero_si128());
+
+ __m128i vsrc1 = _mm_loadu_si128((__m128i *)(src1 + j));
+ __m128i vsrc2 = _mm_loadu_si128((__m128i *)(src2 + j));
+
+ __m128i vmax = _mm_max_epu8(vsrc1, vsrc2);
+ __m128i vmin = _mm_min_epu8(vsrc1, vsrc2);
+ __m128i vdiff = _mm_subs_epu8(vmax, vmin);
+
+ __m128i vzero = _mm_setzero_si128();
+ __m128i vdiff1 = _mm_unpacklo_epi8(vdiff, vzero);
+ __m128i vdiff2 = _mm_unpackhi_epi8(vdiff, vzero);
+
+ __m128i vres1 = _mm_mullo_epi16(vdiff1, vdiff1);
+ __m128i vres2 = _mm_mullo_epi16(vdiff2, vdiff2);
+
+ _mm_storeu_si128((__m128i *)(dst + j + 2), vres1);
+ _mm_storeu_si128((__m128i *)(dst + j + 10), vres2);
+ }
+
+ // Set zero to uninitialized memory to avoid uninitialized loads later
+ *(int *)(dst + block_width + 2) = _mm_cvtsi128_si32(_mm_setzero_si128());
+
+ src1 += stride;
+ src2 += stride2;
+ dst += dst_stride;
+ }
+}
+
+static void xx_load_and_pad(uint16_t *src, __m128i *dstvec, int col,
+ int block_width) {
+ __m128i vtmp = _mm_loadu_si128((__m128i *)src);
+ __m128i vzero = _mm_setzero_si128();
+ __m128i vtmp1 = _mm_unpacklo_epi16(vtmp, vzero);
+ __m128i vtmp2 = _mm_unpackhi_epi16(vtmp, vzero);
+ // For the first column, replicate the first element twice to the left
+ dstvec[0] = (col) ? vtmp1 : _mm_shuffle_epi32(vtmp1, 0xEA);
+ // For the last column, replicate the last element twice to the right
+ dstvec[1] = (col < block_width - 4) ? vtmp2 : _mm_shuffle_epi32(vtmp2, 0x54);
+}
+
+static int32_t xx_mask_and_hadd(__m128i vsum1, __m128i vsum2, int i) {
+ __m128i veca, vecb;
+ // Mask and obtain the required 5 values inside the vector
+ veca = _mm_and_si128(vsum1, *(__m128i *)sse_bytemask_2x4[i][0]);
+ vecb = _mm_and_si128(vsum2, *(__m128i *)sse_bytemask_2x4[i][1]);
+ // A = [A0+B0, A1+B1, A2+B2, A3+B3]
+ veca = _mm_add_epi32(veca, vecb);
+ // B = [A2+B2, A3+B3, 0, 0]
+ vecb = _mm_srli_si128(veca, 8);
+ // A = [A0+B0+A2+B2, A1+B1+A3+B3, X, X]
+ veca = _mm_add_epi32(veca, vecb);
+ // B = [A1+B1+A3+B3, 0, 0, 0]
+ vecb = _mm_srli_si128(veca, 4);
+ // A = [A0+B0+A2+B2+A1+B1+A3+B3, X, X, X]
+ veca = _mm_add_epi32(veca, vecb);
+ return _mm_cvtsi128_si32(veca);
+}
+
+static void apply_temporal_filter(
+ const uint8_t *frame1, const unsigned int stride, const uint8_t *frame2,
+ const unsigned int stride2, const int block_width, const int block_height,
+ const int *subblock_mses, unsigned int *accumulator, uint16_t *count,
+ uint16_t *frame_sse, uint32_t *luma_sse_sum,
+ const double inv_num_ref_pixels, const double decay_factor,
+ const double inv_factor, const double weight_factor, double *d_factor,
+ int tf_wgt_calc_lvl) {
+ assert(((block_width == 16) || (block_width == 32)) &&
+ ((block_height == 16) || (block_height == 32)));
+
+ uint32_t acc_5x5_sse[BH][BW];
+
+ get_squared_error(frame1, stride, frame2, stride2, block_width, block_height,
+ frame_sse, SSE_STRIDE);
+
+ __m128i vsrc[5][2];
+
+ // Traverse 4 columns at a time
+ // First and last columns will require padding
+ for (int col = 0; col < block_width; col += 4) {
+ uint16_t *src = frame_sse + col;
+
+ // Load and pad(for first and last col) 3 rows from the top
+ for (int i = 2; i < 5; i++) {
+ xx_load_and_pad(src, vsrc[i], col, block_width);
+ src += SSE_STRIDE;
+ }
+
+ // Padding for top 2 rows
+ vsrc[0][0] = vsrc[2][0];
+ vsrc[0][1] = vsrc[2][1];
+ vsrc[1][0] = vsrc[2][0];
+ vsrc[1][1] = vsrc[2][1];
+
+ for (int row = 0; row < block_height; row++) {
+ __m128i vsum1 = _mm_setzero_si128();
+ __m128i vsum2 = _mm_setzero_si128();
+
+ // Add 5 consecutive rows
+ for (int i = 0; i < 5; i++) {
+ vsum1 = _mm_add_epi32(vsrc[i][0], vsum1);
+ vsum2 = _mm_add_epi32(vsrc[i][1], vsum2);
+ }
+
+ // Push all elements by one element to the top
+ for (int i = 0; i < 4; i++) {
+ vsrc[i][0] = vsrc[i + 1][0];
+ vsrc[i][1] = vsrc[i + 1][1];
+ }
+
+ if (row <= block_height - 4) {
+ // Load next row
+ xx_load_and_pad(src, vsrc[4], col, block_width);
+ src += SSE_STRIDE;
+ } else {
+ // Padding for bottom 2 rows
+ vsrc[4][0] = vsrc[3][0];
+ vsrc[4][1] = vsrc[3][1];
+ }
+
+ // Accumulate the sum horizontally
+ for (int i = 0; i < 4; i++) {
+ acc_5x5_sse[row][col + i] = xx_mask_and_hadd(vsum1, vsum2, i);
+ }
+ }
+ }
+
+ double subblock_mses_scaled[4];
+ double d_factor_decayed[4];
+ for (int idx = 0; idx < 4; idx++) {
+ subblock_mses_scaled[idx] = subblock_mses[idx] * inv_factor;
+ d_factor_decayed[idx] = d_factor[idx] * decay_factor;
+ }
+ if (tf_wgt_calc_lvl == 0) {
+ for (int i = 0, k = 0; i < block_height; i++) {
+ const int y_blk_raster_offset = (i >= block_height / 2) * 2;
+ for (int j = 0; j < block_width; j++, k++) {
+ const int pixel_value = frame2[i * stride2 + j];
+ uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j];
+
+ const double window_error = diff_sse * inv_num_ref_pixels;
+ const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2);
+ const double combined_error =
+ weight_factor * window_error + subblock_mses_scaled[subblock_idx];
+
+ double scaled_error = combined_error * d_factor_decayed[subblock_idx];
+ scaled_error = AOMMIN(scaled_error, 7);
+ const int weight = (int)(exp(-scaled_error) * TF_WEIGHT_SCALE);
+
+ count[k] += weight;
+ accumulator[k] += weight * pixel_value;
+ }
+ }
+ } else {
+ for (int i = 0, k = 0; i < block_height; i++) {
+ const int y_blk_raster_offset = (i >= block_height / 2) * 2;
+ for (int j = 0; j < block_width; j++, k++) {
+ const int pixel_value = frame2[i * stride2 + j];
+ uint32_t diff_sse = acc_5x5_sse[i][j] + luma_sse_sum[i * BW + j];
+
+ const double window_error = diff_sse * inv_num_ref_pixels;
+ const int subblock_idx = y_blk_raster_offset + (j >= block_width / 2);
+ const double combined_error =
+ weight_factor * window_error + subblock_mses_scaled[subblock_idx];
+
+ double scaled_error = combined_error * d_factor_decayed[subblock_idx];
+ scaled_error = AOMMIN(scaled_error, 7);
+ const float fweight =
+ approx_exp((float)-scaled_error) * TF_WEIGHT_SCALE;
+ const int weight = iroundpf(fweight);
+ count[k] += weight;
+ accumulator[k] += weight * pixel_value;
+ }
+ }
+ }
+}
+
+void av1_apply_temporal_filter_sse2(
+ const YV12_BUFFER_CONFIG *frame_to_filter, const MACROBLOCKD *mbd,
+ const BLOCK_SIZE block_size, const int mb_row, const int mb_col,
+ const int num_planes, const double *noise_levels, const MV *subblock_mvs,
+ const int *subblock_mses, const int q_factor, const int filter_strength,
+ int tf_wgt_calc_lvl, const uint8_t *pred, uint32_t *accum,
+ uint16_t *count) {
+ const int is_high_bitdepth = frame_to_filter->flags & YV12_FLAG_HIGHBITDEPTH;
+ assert(block_size == BLOCK_32X32 && "Only support 32x32 block with sse2!");
+ assert(TF_WINDOW_LENGTH == 5 && "Only support window length 5 with sse2!");
+ assert(!is_high_bitdepth && "Only support low bit-depth with sse2!");
+ assert(num_planes >= 1 && num_planes <= MAX_MB_PLANE);
+ (void)is_high_bitdepth;
+
+ const int mb_height = block_size_high[block_size];
+ const int mb_width = block_size_wide[block_size];
+ const int frame_height = frame_to_filter->y_crop_height;
+ const int frame_width = frame_to_filter->y_crop_width;
+ const int min_frame_size = AOMMIN(frame_height, frame_width);
+ // Variables to simplify combined error calculation.
+ const double inv_factor = 1.0 / ((TF_WINDOW_BLOCK_BALANCE_WEIGHT + 1) *
+ TF_SEARCH_ERROR_NORM_WEIGHT);
+ const double weight_factor =
+ (double)TF_WINDOW_BLOCK_BALANCE_WEIGHT * inv_factor;
+ // Adjust filtering based on q.
+ // Larger q -> stronger filtering -> larger weight.
+ // Smaller q -> weaker filtering -> smaller weight.
+ double q_decay = pow((double)q_factor / TF_Q_DECAY_THRESHOLD, 2);
+ q_decay = CLIP(q_decay, 1e-5, 1);
+ if (q_factor >= TF_QINDEX_CUTOFF) {
+ // Max q_factor is 255, therefore the upper bound of q_decay is 8.
+ // We do not need a clip here.
+ q_decay = 0.5 * pow((double)q_factor / 64, 2);
+ }
+ // Smaller strength -> smaller filtering weight.
+ double s_decay = pow((double)filter_strength / TF_STRENGTH_THRESHOLD, 2);
+ s_decay = CLIP(s_decay, 1e-5, 1);
+ double d_factor[4] = { 0 };
+ uint16_t frame_sse[SSE_STRIDE * BH] = { 0 };
+ uint32_t luma_sse_sum[BW * BH] = { 0 };
+
+ for (int subblock_idx = 0; subblock_idx < 4; subblock_idx++) {
+ // Larger motion vector -> smaller filtering weight.
+ const MV mv = subblock_mvs[subblock_idx];
+ const double distance = sqrt(pow(mv.row, 2) + pow(mv.col, 2));
+ double distance_threshold = min_frame_size * TF_SEARCH_DISTANCE_THRESHOLD;
+ distance_threshold = AOMMAX(distance_threshold, 1);
+ d_factor[subblock_idx] = distance / distance_threshold;
+ d_factor[subblock_idx] = AOMMAX(d_factor[subblock_idx], 1);
+ }
+
+ // Handle planes in sequence.
+ int plane_offset = 0;
+ for (int plane = 0; plane < num_planes; ++plane) {
+ const uint32_t plane_h = mb_height >> mbd->plane[plane].subsampling_y;
+ const uint32_t plane_w = mb_width >> mbd->plane[plane].subsampling_x;
+ const uint32_t frame_stride = frame_to_filter->strides[plane == 0 ? 0 : 1];
+ const int frame_offset = mb_row * plane_h * frame_stride + mb_col * plane_w;
+
+ const uint8_t *ref = frame_to_filter->buffers[plane] + frame_offset;
+ const int ss_x_shift =
+ mbd->plane[plane].subsampling_x - mbd->plane[AOM_PLANE_Y].subsampling_x;
+ const int ss_y_shift =
+ mbd->plane[plane].subsampling_y - mbd->plane[AOM_PLANE_Y].subsampling_y;
+ const int num_ref_pixels = TF_WINDOW_LENGTH * TF_WINDOW_LENGTH +
+ ((plane) ? (1 << (ss_x_shift + ss_y_shift)) : 0);
+ const double inv_num_ref_pixels = 1.0 / num_ref_pixels;
+ // Larger noise -> larger filtering weight.
+ const double n_decay = 0.5 + log(2 * noise_levels[plane] + 5.0);
+ // Decay factors for non-local mean approach.
+ const double decay_factor = 1 / (n_decay * q_decay * s_decay);
+
+ // Filter U-plane and V-plane using Y-plane. This is because motion
+ // search is only done on Y-plane, so the information from Y-plane
+ // will be more accurate. The luma sse sum is reused in both chroma
+ // planes.
+ if (plane == AOM_PLANE_U) {
+ for (unsigned int i = 0, k = 0; i < plane_h; i++) {
+ for (unsigned int j = 0; j < plane_w; j++, k++) {
+ for (int ii = 0; ii < (1 << ss_y_shift); ++ii) {
+ for (int jj = 0; jj < (1 << ss_x_shift); ++jj) {
+ const int yy = (i << ss_y_shift) + ii; // Y-coord on Y-plane.
+ const int xx = (j << ss_x_shift) + jj; // X-coord on Y-plane.
+ luma_sse_sum[i * BW + j] += frame_sse[yy * SSE_STRIDE + xx + 2];
+ }
+ }
+ }
+ }
+ }
+
+ apply_temporal_filter(ref, frame_stride, pred + plane_offset, plane_w,
+ plane_w, plane_h, subblock_mses, accum + plane_offset,
+ count + plane_offset, frame_sse, luma_sse_sum,
+ inv_num_ref_pixels, decay_factor, inv_factor,
+ weight_factor, d_factor, tf_wgt_calc_lvl);
+ plane_offset += plane_h * plane_w;
+ }
+}
diff --git a/third_party/aom/av1/encoder/x86/wedge_utils_avx2.c b/third_party/aom/av1/encoder/x86/wedge_utils_avx2.c
new file mode 100644
index 0000000000..9cde860534
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/wedge_utils_avx2.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <immintrin.h>
+#include <smmintrin.h>
+
+#include "aom_dsp/x86/synonyms.h"
+#include "aom_dsp/x86/synonyms_avx2.h"
+#include "aom/aom_integer.h"
+
+#include "av1/common/reconinter.h"
+
+#define MAX_MASK_VALUE (1 << WEDGE_WEIGHT_BITS)
+
+/**
+ * See av1_wedge_sse_from_residuals_c
+ */
+uint64_t av1_wedge_sse_from_residuals_avx2(const int16_t *r1, const int16_t *d,
+ const uint8_t *m, int N) {
+ int n = -N;
+
+ uint64_t csse;
+
+ const __m256i v_mask_max_w = _mm256_set1_epi16(MAX_MASK_VALUE);
+ const __m256i v_zext_q = yy_set1_64_from_32i(~0);
+
+ __m256i v_acc0_q = _mm256_setzero_si256();
+
+ assert(N % 64 == 0);
+
+ r1 += N;
+ d += N;
+ m += N;
+
+ do {
+ const __m256i v_r0_w = _mm256_lddqu_si256((__m256i *)(r1 + n));
+ const __m256i v_d0_w = _mm256_lddqu_si256((__m256i *)(d + n));
+ const __m128i v_m01_b = _mm_lddqu_si128((__m128i *)(m + n));
+
+ const __m256i v_rd0l_w = _mm256_unpacklo_epi16(v_d0_w, v_r0_w);
+ const __m256i v_rd0h_w = _mm256_unpackhi_epi16(v_d0_w, v_r0_w);
+ const __m256i v_m0_w = _mm256_cvtepu8_epi16(v_m01_b);
+
+ const __m256i v_m0l_w = _mm256_unpacklo_epi16(v_m0_w, v_mask_max_w);
+ const __m256i v_m0h_w = _mm256_unpackhi_epi16(v_m0_w, v_mask_max_w);
+
+ const __m256i v_t0l_d = _mm256_madd_epi16(v_rd0l_w, v_m0l_w);
+ const __m256i v_t0h_d = _mm256_madd_epi16(v_rd0h_w, v_m0h_w);
+
+ const __m256i v_t0_w = _mm256_packs_epi32(v_t0l_d, v_t0h_d);
+
+ const __m256i v_sq0_d = _mm256_madd_epi16(v_t0_w, v_t0_w);
+
+ const __m256i v_sum0_q = _mm256_add_epi64(
+ _mm256_and_si256(v_sq0_d, v_zext_q), _mm256_srli_epi64(v_sq0_d, 32));
+
+ v_acc0_q = _mm256_add_epi64(v_acc0_q, v_sum0_q);
+
+ n += 16;
+ } while (n);
+
+ v_acc0_q = _mm256_add_epi64(v_acc0_q, _mm256_srli_si256(v_acc0_q, 8));
+ __m128i v_acc_q_0 = _mm256_castsi256_si128(v_acc0_q);
+ __m128i v_acc_q_1 = _mm256_extracti128_si256(v_acc0_q, 1);
+ v_acc_q_0 = _mm_add_epi64(v_acc_q_0, v_acc_q_1);
+#if AOM_ARCH_X86_64
+ csse = (uint64_t)_mm_extract_epi64(v_acc_q_0, 0);
+#else
+ xx_storel_64(&csse, v_acc_q_0);
+#endif
+
+ return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
+}
+
+/**
+ * See av1_wedge_sign_from_residuals_c
+ */
+int8_t av1_wedge_sign_from_residuals_avx2(const int16_t *ds, const uint8_t *m,
+ int N, int64_t limit) {
+ int64_t acc;
+ __m256i v_acc0_d = _mm256_setzero_si256();
+
+ // Input size limited to 8192 by the use of 32 bit accumulators and m
+ // being between [0, 64]. Overflow might happen at larger sizes,
+ // though it is practically impossible on real video input.
+ assert(N < 8192);
+ assert(N % 64 == 0);
+
+ do {
+ const __m256i v_m01_b = _mm256_lddqu_si256((__m256i *)(m));
+ const __m256i v_m23_b = _mm256_lddqu_si256((__m256i *)(m + 32));
+
+ const __m256i v_d0_w = _mm256_lddqu_si256((__m256i *)(ds));
+ const __m256i v_d1_w = _mm256_lddqu_si256((__m256i *)(ds + 16));
+ const __m256i v_d2_w = _mm256_lddqu_si256((__m256i *)(ds + 32));
+ const __m256i v_d3_w = _mm256_lddqu_si256((__m256i *)(ds + 48));
+
+ const __m256i v_m0_w =
+ _mm256_cvtepu8_epi16(_mm256_castsi256_si128(v_m01_b));
+ const __m256i v_m1_w =
+ _mm256_cvtepu8_epi16(_mm256_extracti128_si256(v_m01_b, 1));
+ const __m256i v_m2_w =
+ _mm256_cvtepu8_epi16(_mm256_castsi256_si128(v_m23_b));
+ const __m256i v_m3_w =
+ _mm256_cvtepu8_epi16(_mm256_extracti128_si256(v_m23_b, 1));
+
+ const __m256i v_p0_d = _mm256_madd_epi16(v_d0_w, v_m0_w);
+ const __m256i v_p1_d = _mm256_madd_epi16(v_d1_w, v_m1_w);
+ const __m256i v_p2_d = _mm256_madd_epi16(v_d2_w, v_m2_w);
+ const __m256i v_p3_d = _mm256_madd_epi16(v_d3_w, v_m3_w);
+
+ const __m256i v_p01_d = _mm256_add_epi32(v_p0_d, v_p1_d);
+ const __m256i v_p23_d = _mm256_add_epi32(v_p2_d, v_p3_d);
+
+ const __m256i v_p0123_d = _mm256_add_epi32(v_p01_d, v_p23_d);
+
+ v_acc0_d = _mm256_add_epi32(v_acc0_d, v_p0123_d);
+
+ ds += 64;
+ m += 64;
+
+ N -= 64;
+ } while (N);
+
+ __m256i v_sign_d = _mm256_srai_epi32(v_acc0_d, 31);
+ v_acc0_d = _mm256_add_epi64(_mm256_unpacklo_epi32(v_acc0_d, v_sign_d),
+ _mm256_unpackhi_epi32(v_acc0_d, v_sign_d));
+
+ __m256i v_acc_q = _mm256_add_epi64(v_acc0_d, _mm256_srli_si256(v_acc0_d, 8));
+
+ __m128i v_acc_q_0 = _mm256_castsi256_si128(v_acc_q);
+ __m128i v_acc_q_1 = _mm256_extracti128_si256(v_acc_q, 1);
+ v_acc_q_0 = _mm_add_epi64(v_acc_q_0, v_acc_q_1);
+
+#if AOM_ARCH_X86_64
+ acc = _mm_extract_epi64(v_acc_q_0, 0);
+#else
+ xx_storel_64(&acc, v_acc_q_0);
+#endif
+
+ return acc > limit;
+}
+
+/**
+ * av1_wedge_compute_delta_squares_c
+ */
+void av1_wedge_compute_delta_squares_avx2(int16_t *d, const int16_t *a,
+ const int16_t *b, int N) {
+ const __m256i v_neg_w = _mm256_set1_epi32((int)0xffff0001);
+
+ assert(N % 64 == 0);
+
+ do {
+ const __m256i v_a0_w = _mm256_lddqu_si256((__m256i *)(a));
+ const __m256i v_b0_w = _mm256_lddqu_si256((__m256i *)(b));
+ const __m256i v_a1_w = _mm256_lddqu_si256((__m256i *)(a + 16));
+ const __m256i v_b1_w = _mm256_lddqu_si256((__m256i *)(b + 16));
+ const __m256i v_a2_w = _mm256_lddqu_si256((__m256i *)(a + 32));
+ const __m256i v_b2_w = _mm256_lddqu_si256((__m256i *)(b + 32));
+ const __m256i v_a3_w = _mm256_lddqu_si256((__m256i *)(a + 48));
+ const __m256i v_b3_w = _mm256_lddqu_si256((__m256i *)(b + 48));
+
+ const __m256i v_ab0l_w = _mm256_unpacklo_epi16(v_a0_w, v_b0_w);
+ const __m256i v_ab0h_w = _mm256_unpackhi_epi16(v_a0_w, v_b0_w);
+ const __m256i v_ab1l_w = _mm256_unpacklo_epi16(v_a1_w, v_b1_w);
+ const __m256i v_ab1h_w = _mm256_unpackhi_epi16(v_a1_w, v_b1_w);
+ const __m256i v_ab2l_w = _mm256_unpacklo_epi16(v_a2_w, v_b2_w);
+ const __m256i v_ab2h_w = _mm256_unpackhi_epi16(v_a2_w, v_b2_w);
+ const __m256i v_ab3l_w = _mm256_unpacklo_epi16(v_a3_w, v_b3_w);
+ const __m256i v_ab3h_w = _mm256_unpackhi_epi16(v_a3_w, v_b3_w);
+
+ // Negate top word of pairs
+ const __m256i v_abl0n_w = _mm256_sign_epi16(v_ab0l_w, v_neg_w);
+ const __m256i v_abh0n_w = _mm256_sign_epi16(v_ab0h_w, v_neg_w);
+ const __m256i v_abl1n_w = _mm256_sign_epi16(v_ab1l_w, v_neg_w);
+ const __m256i v_abh1n_w = _mm256_sign_epi16(v_ab1h_w, v_neg_w);
+ const __m256i v_abl2n_w = _mm256_sign_epi16(v_ab2l_w, v_neg_w);
+ const __m256i v_abh2n_w = _mm256_sign_epi16(v_ab2h_w, v_neg_w);
+ const __m256i v_abl3n_w = _mm256_sign_epi16(v_ab3l_w, v_neg_w);
+ const __m256i v_abh3n_w = _mm256_sign_epi16(v_ab3h_w, v_neg_w);
+
+ const __m256i v_r0l_w = _mm256_madd_epi16(v_ab0l_w, v_abl0n_w);
+ const __m256i v_r0h_w = _mm256_madd_epi16(v_ab0h_w, v_abh0n_w);
+ const __m256i v_r1l_w = _mm256_madd_epi16(v_ab1l_w, v_abl1n_w);
+ const __m256i v_r1h_w = _mm256_madd_epi16(v_ab1h_w, v_abh1n_w);
+ const __m256i v_r2l_w = _mm256_madd_epi16(v_ab2l_w, v_abl2n_w);
+ const __m256i v_r2h_w = _mm256_madd_epi16(v_ab2h_w, v_abh2n_w);
+ const __m256i v_r3l_w = _mm256_madd_epi16(v_ab3l_w, v_abl3n_w);
+ const __m256i v_r3h_w = _mm256_madd_epi16(v_ab3h_w, v_abh3n_w);
+
+ const __m256i v_r0_w = _mm256_packs_epi32(v_r0l_w, v_r0h_w);
+ const __m256i v_r1_w = _mm256_packs_epi32(v_r1l_w, v_r1h_w);
+ const __m256i v_r2_w = _mm256_packs_epi32(v_r2l_w, v_r2h_w);
+ const __m256i v_r3_w = _mm256_packs_epi32(v_r3l_w, v_r3h_w);
+
+ _mm256_store_si256((__m256i *)(d), v_r0_w);
+ _mm256_store_si256((__m256i *)(d + 16), v_r1_w);
+ _mm256_store_si256((__m256i *)(d + 32), v_r2_w);
+ _mm256_store_si256((__m256i *)(d + 48), v_r3_w);
+
+ a += 64;
+ b += 64;
+ d += 64;
+ N -= 64;
+ } while (N);
+}
diff --git a/third_party/aom/av1/encoder/x86/wedge_utils_sse2.c b/third_party/aom/av1/encoder/x86/wedge_utils_sse2.c
new file mode 100644
index 0000000000..d7ac2223f2
--- /dev/null
+++ b/third_party/aom/av1/encoder/x86/wedge_utils_sse2.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <immintrin.h>
+
+#include "aom_dsp/x86/synonyms.h"
+
+#include "aom/aom_integer.h"
+
+#include "av1/common/reconinter.h"
+
+#define MAX_MASK_VALUE (1 << WEDGE_WEIGHT_BITS)
+
+/**
+ * See av1_wedge_sse_from_residuals_c
+ */
+uint64_t av1_wedge_sse_from_residuals_sse2(const int16_t *r1, const int16_t *d,
+ const uint8_t *m, int N) {
+ int n = -N;
+ int n8 = n + 8;
+
+ uint64_t csse;
+
+ const __m128i v_mask_max_w = _mm_set1_epi16(MAX_MASK_VALUE);
+ const __m128i v_zext_q = xx_set1_64_from_32i(~0);
+
+ __m128i v_acc0_q = _mm_setzero_si128();
+
+ assert(N % 64 == 0);
+
+ r1 += N;
+ d += N;
+ m += N;
+
+ do {
+ const __m128i v_r0_w = xx_load_128(r1 + n);
+ const __m128i v_r1_w = xx_load_128(r1 + n8);
+ const __m128i v_d0_w = xx_load_128(d + n);
+ const __m128i v_d1_w = xx_load_128(d + n8);
+ const __m128i v_m01_b = xx_load_128(m + n);
+
+ const __m128i v_rd0l_w = _mm_unpacklo_epi16(v_d0_w, v_r0_w);
+ const __m128i v_rd0h_w = _mm_unpackhi_epi16(v_d0_w, v_r0_w);
+ const __m128i v_rd1l_w = _mm_unpacklo_epi16(v_d1_w, v_r1_w);
+ const __m128i v_rd1h_w = _mm_unpackhi_epi16(v_d1_w, v_r1_w);
+ const __m128i v_m0_w = _mm_unpacklo_epi8(v_m01_b, _mm_setzero_si128());
+ const __m128i v_m1_w = _mm_unpackhi_epi8(v_m01_b, _mm_setzero_si128());
+
+ const __m128i v_m0l_w = _mm_unpacklo_epi16(v_m0_w, v_mask_max_w);
+ const __m128i v_m0h_w = _mm_unpackhi_epi16(v_m0_w, v_mask_max_w);
+ const __m128i v_m1l_w = _mm_unpacklo_epi16(v_m1_w, v_mask_max_w);
+ const __m128i v_m1h_w = _mm_unpackhi_epi16(v_m1_w, v_mask_max_w);
+
+ const __m128i v_t0l_d = _mm_madd_epi16(v_rd0l_w, v_m0l_w);
+ const __m128i v_t0h_d = _mm_madd_epi16(v_rd0h_w, v_m0h_w);
+ const __m128i v_t1l_d = _mm_madd_epi16(v_rd1l_w, v_m1l_w);
+ const __m128i v_t1h_d = _mm_madd_epi16(v_rd1h_w, v_m1h_w);
+
+ const __m128i v_t0_w = _mm_packs_epi32(v_t0l_d, v_t0h_d);
+ const __m128i v_t1_w = _mm_packs_epi32(v_t1l_d, v_t1h_d);
+
+ const __m128i v_sq0_d = _mm_madd_epi16(v_t0_w, v_t0_w);
+ const __m128i v_sq1_d = _mm_madd_epi16(v_t1_w, v_t1_w);
+
+ const __m128i v_sum0_q = _mm_add_epi64(_mm_and_si128(v_sq0_d, v_zext_q),
+ _mm_srli_epi64(v_sq0_d, 32));
+ const __m128i v_sum1_q = _mm_add_epi64(_mm_and_si128(v_sq1_d, v_zext_q),
+ _mm_srli_epi64(v_sq1_d, 32));
+
+ v_acc0_q = _mm_add_epi64(v_acc0_q, v_sum0_q);
+ v_acc0_q = _mm_add_epi64(v_acc0_q, v_sum1_q);
+
+ n8 += 16;
+ n += 16;
+ } while (n);
+
+ v_acc0_q = _mm_add_epi64(v_acc0_q, _mm_srli_si128(v_acc0_q, 8));
+
+#if AOM_ARCH_X86_64
+ csse = (uint64_t)_mm_cvtsi128_si64(v_acc0_q);
+#else
+ xx_storel_64(&csse, v_acc0_q);
+#endif
+
+ return ROUND_POWER_OF_TWO(csse, 2 * WEDGE_WEIGHT_BITS);
+}
+
+/**
+ * See av1_wedge_sign_from_residuals_c
+ */
+int8_t av1_wedge_sign_from_residuals_sse2(const int16_t *ds, const uint8_t *m,
+ int N, int64_t limit) {
+ int64_t acc;
+
+ __m128i v_sign_d;
+ __m128i v_acc0_d = _mm_setzero_si128();
+ __m128i v_acc1_d = _mm_setzero_si128();
+ __m128i v_acc_q;
+
+ // Input size limited to 8192 by the use of 32 bit accumulators and m
+ // being between [0, 64]. Overflow might happen at larger sizes,
+ // though it is practically impossible on real video input.
+ assert(N < 8192);
+ assert(N % 64 == 0);
+
+ do {
+ const __m128i v_m01_b = xx_load_128(m);
+ const __m128i v_m23_b = xx_load_128(m + 16);
+ const __m128i v_m45_b = xx_load_128(m + 32);
+ const __m128i v_m67_b = xx_load_128(m + 48);
+
+ const __m128i v_d0_w = xx_load_128(ds);
+ const __m128i v_d1_w = xx_load_128(ds + 8);
+ const __m128i v_d2_w = xx_load_128(ds + 16);
+ const __m128i v_d3_w = xx_load_128(ds + 24);
+ const __m128i v_d4_w = xx_load_128(ds + 32);
+ const __m128i v_d5_w = xx_load_128(ds + 40);
+ const __m128i v_d6_w = xx_load_128(ds + 48);
+ const __m128i v_d7_w = xx_load_128(ds + 56);
+
+ const __m128i v_m0_w = _mm_unpacklo_epi8(v_m01_b, _mm_setzero_si128());
+ const __m128i v_m1_w = _mm_unpackhi_epi8(v_m01_b, _mm_setzero_si128());
+ const __m128i v_m2_w = _mm_unpacklo_epi8(v_m23_b, _mm_setzero_si128());
+ const __m128i v_m3_w = _mm_unpackhi_epi8(v_m23_b, _mm_setzero_si128());
+ const __m128i v_m4_w = _mm_unpacklo_epi8(v_m45_b, _mm_setzero_si128());
+ const __m128i v_m5_w = _mm_unpackhi_epi8(v_m45_b, _mm_setzero_si128());
+ const __m128i v_m6_w = _mm_unpacklo_epi8(v_m67_b, _mm_setzero_si128());
+ const __m128i v_m7_w = _mm_unpackhi_epi8(v_m67_b, _mm_setzero_si128());
+
+ const __m128i v_p0_d = _mm_madd_epi16(v_d0_w, v_m0_w);
+ const __m128i v_p1_d = _mm_madd_epi16(v_d1_w, v_m1_w);
+ const __m128i v_p2_d = _mm_madd_epi16(v_d2_w, v_m2_w);
+ const __m128i v_p3_d = _mm_madd_epi16(v_d3_w, v_m3_w);
+ const __m128i v_p4_d = _mm_madd_epi16(v_d4_w, v_m4_w);
+ const __m128i v_p5_d = _mm_madd_epi16(v_d5_w, v_m5_w);
+ const __m128i v_p6_d = _mm_madd_epi16(v_d6_w, v_m6_w);
+ const __m128i v_p7_d = _mm_madd_epi16(v_d7_w, v_m7_w);
+
+ const __m128i v_p01_d = _mm_add_epi32(v_p0_d, v_p1_d);
+ const __m128i v_p23_d = _mm_add_epi32(v_p2_d, v_p3_d);
+ const __m128i v_p45_d = _mm_add_epi32(v_p4_d, v_p5_d);
+ const __m128i v_p67_d = _mm_add_epi32(v_p6_d, v_p7_d);
+
+ const __m128i v_p0123_d = _mm_add_epi32(v_p01_d, v_p23_d);
+ const __m128i v_p4567_d = _mm_add_epi32(v_p45_d, v_p67_d);
+
+ v_acc0_d = _mm_add_epi32(v_acc0_d, v_p0123_d);
+ v_acc1_d = _mm_add_epi32(v_acc1_d, v_p4567_d);
+
+ ds += 64;
+ m += 64;
+
+ N -= 64;
+ } while (N);
+
+ v_sign_d = _mm_cmplt_epi32(v_acc0_d, _mm_setzero_si128());
+ v_acc0_d = _mm_add_epi64(_mm_unpacklo_epi32(v_acc0_d, v_sign_d),
+ _mm_unpackhi_epi32(v_acc0_d, v_sign_d));
+
+ v_sign_d = _mm_cmplt_epi32(v_acc1_d, _mm_setzero_si128());
+ v_acc1_d = _mm_add_epi64(_mm_unpacklo_epi32(v_acc1_d, v_sign_d),
+ _mm_unpackhi_epi32(v_acc1_d, v_sign_d));
+
+ v_acc_q = _mm_add_epi64(v_acc0_d, v_acc1_d);
+
+ v_acc_q = _mm_add_epi64(v_acc_q, _mm_srli_si128(v_acc_q, 8));
+
+#if AOM_ARCH_X86_64
+ acc = _mm_cvtsi128_si64(v_acc_q);
+#else
+ xx_storel_64(&acc, v_acc_q);
+#endif
+
+ return acc > limit;
+}
+
+// Negate under mask
+static INLINE __m128i negm_epi16(__m128i v_v_w, __m128i v_mask_w) {
+ return _mm_sub_epi16(_mm_xor_si128(v_v_w, v_mask_w), v_mask_w);
+}
+
+/**
+ * av1_wedge_compute_delta_squares_c
+ */
+void av1_wedge_compute_delta_squares_sse2(int16_t *d, const int16_t *a,
+ const int16_t *b, int N) {
+ const __m128i v_neg_w = _mm_set_epi16((short)0xffff, 0, (short)0xffff, 0,
+ (short)0xffff, 0, (short)0xffff, 0);
+
+ assert(N % 64 == 0);
+
+ do {
+ const __m128i v_a0_w = xx_load_128(a);
+ const __m128i v_b0_w = xx_load_128(b);
+ const __m128i v_a1_w = xx_load_128(a + 8);
+ const __m128i v_b1_w = xx_load_128(b + 8);
+ const __m128i v_a2_w = xx_load_128(a + 16);
+ const __m128i v_b2_w = xx_load_128(b + 16);
+ const __m128i v_a3_w = xx_load_128(a + 24);
+ const __m128i v_b3_w = xx_load_128(b + 24);
+
+ const __m128i v_ab0l_w = _mm_unpacklo_epi16(v_a0_w, v_b0_w);
+ const __m128i v_ab0h_w = _mm_unpackhi_epi16(v_a0_w, v_b0_w);
+ const __m128i v_ab1l_w = _mm_unpacklo_epi16(v_a1_w, v_b1_w);
+ const __m128i v_ab1h_w = _mm_unpackhi_epi16(v_a1_w, v_b1_w);
+ const __m128i v_ab2l_w = _mm_unpacklo_epi16(v_a2_w, v_b2_w);
+ const __m128i v_ab2h_w = _mm_unpackhi_epi16(v_a2_w, v_b2_w);
+ const __m128i v_ab3l_w = _mm_unpacklo_epi16(v_a3_w, v_b3_w);
+ const __m128i v_ab3h_w = _mm_unpackhi_epi16(v_a3_w, v_b3_w);
+
+ // Negate top word of pairs
+ const __m128i v_abl0n_w = negm_epi16(v_ab0l_w, v_neg_w);
+ const __m128i v_abh0n_w = negm_epi16(v_ab0h_w, v_neg_w);
+ const __m128i v_abl1n_w = negm_epi16(v_ab1l_w, v_neg_w);
+ const __m128i v_abh1n_w = negm_epi16(v_ab1h_w, v_neg_w);
+ const __m128i v_abl2n_w = negm_epi16(v_ab2l_w, v_neg_w);
+ const __m128i v_abh2n_w = negm_epi16(v_ab2h_w, v_neg_w);
+ const __m128i v_abl3n_w = negm_epi16(v_ab3l_w, v_neg_w);
+ const __m128i v_abh3n_w = negm_epi16(v_ab3h_w, v_neg_w);
+
+ const __m128i v_r0l_w = _mm_madd_epi16(v_ab0l_w, v_abl0n_w);
+ const __m128i v_r0h_w = _mm_madd_epi16(v_ab0h_w, v_abh0n_w);
+ const __m128i v_r1l_w = _mm_madd_epi16(v_ab1l_w, v_abl1n_w);
+ const __m128i v_r1h_w = _mm_madd_epi16(v_ab1h_w, v_abh1n_w);
+ const __m128i v_r2l_w = _mm_madd_epi16(v_ab2l_w, v_abl2n_w);
+ const __m128i v_r2h_w = _mm_madd_epi16(v_ab2h_w, v_abh2n_w);
+ const __m128i v_r3l_w = _mm_madd_epi16(v_ab3l_w, v_abl3n_w);
+ const __m128i v_r3h_w = _mm_madd_epi16(v_ab3h_w, v_abh3n_w);
+
+ const __m128i v_r0_w = _mm_packs_epi32(v_r0l_w, v_r0h_w);
+ const __m128i v_r1_w = _mm_packs_epi32(v_r1l_w, v_r1h_w);
+ const __m128i v_r2_w = _mm_packs_epi32(v_r2l_w, v_r2h_w);
+ const __m128i v_r3_w = _mm_packs_epi32(v_r3l_w, v_r3h_w);
+
+ xx_store_128(d, v_r0_w);
+ xx_store_128(d + 8, v_r1_w);
+ xx_store_128(d + 16, v_r2_w);
+ xx_store_128(d + 24, v_r3_w);
+
+ a += 32;
+ b += 32;
+ d += 32;
+ N -= 32;
+ } while (N);
+}