summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp/flow_estimation/x86
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c148
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c171
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/disflow_avx2.c417
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c424
4 files changed, 750 insertions, 410 deletions
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c
index 87c76fa13b..ff69ae75f5 100644
--- a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c
@@ -17,64 +17,112 @@
#include "aom_ports/mem.h"
#include "aom_dsp/flow_estimation/corner_match.h"
-DECLARE_ALIGNED(16, static const uint8_t,
- byte_mask[16]) = { 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 0, 0, 0 };
-#if MATCH_SZ != 13
-#error "Need to change byte_mask in corner_match_sse4.c if MATCH_SZ != 13"
+DECLARE_ALIGNED(32, static const uint16_t, ones_array[16]) = { 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1, 1, 1,
+ 1, 1, 1, 1 };
+
+#if MATCH_SZ != 16
+#error "Need to apply pixel mask in corner_match_avx2.c if MATCH_SZ != 16"
#endif
-/* Compute corr(frame1, frame2) * MATCH_SZ * stddev(frame1), where the
-correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
-of each image, centered at (x1, y1) and (x2, y2) respectively.
+/* Compute mean and standard deviation of pixels in a window of size
+ MATCH_SZ by MATCH_SZ centered at (x, y).
+ Store results into *mean and *one_over_stddev
+
+ Note: The output of this function is scaled by MATCH_SZ, as in
+ *mean = MATCH_SZ * <true mean> and
+ *one_over_stddev = 1 / (MATCH_SZ * <true stddev>)
+
+ Combined with the fact that we return 1/stddev rather than the standard
+ deviation itself, this allows us to completely avoid divisions in
+ aom_compute_correlation, which is much hotter than this function is.
+
+ Returns true if this feature point is usable, false otherwise.
*/
-double av1_compute_cross_correlation_avx2(const unsigned char *frame1,
- int stride1, int x1, int y1,
- const unsigned char *frame2,
- int stride2, int x2, int y2) {
- int i, stride1_i = 0, stride2_i = 0;
- __m256i temp1, sum_vec, sumsq2_vec, cross_vec, v, v1_1, v2_1;
- const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
- const __m256i zero = _mm256_setzero_si256();
- __m128i v1, v2;
-
- sum_vec = zero;
- sumsq2_vec = zero;
- cross_vec = zero;
+bool aom_compute_mean_stddev_avx2(const unsigned char *frame, int stride, int x,
+ int y, double *mean,
+ double *one_over_stddev) {
+ __m256i sum_vec = _mm256_setzero_si256();
+ __m256i sumsq_vec = _mm256_setzero_si256();
+
+ frame += (y - MATCH_SZ_BY2) * stride + (x - MATCH_SZ_BY2);
+
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ const __m256i v = _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i *)frame));
+
+ sum_vec = _mm256_add_epi16(sum_vec, v);
+ sumsq_vec = _mm256_add_epi32(sumsq_vec, _mm256_madd_epi16(v, v));
+
+ frame += stride;
+ }
+
+ // Reduce sum_vec and sumsq_vec into single values
+ // Start by reducing each vector to 8x32-bit values, hadd() to perform 8
+ // additions, sum vertically to do 4 more, then the last 2 in scalar code.
+ const __m256i ones = _mm256_load_si256((__m256i *)ones_array);
+ const __m256i partial_sum = _mm256_madd_epi16(sum_vec, ones);
+ const __m256i tmp_8x32 = _mm256_hadd_epi32(partial_sum, sumsq_vec);
+ const __m128i tmp_4x32 = _mm_add_epi32(_mm256_extracti128_si256(tmp_8x32, 0),
+ _mm256_extracti128_si256(tmp_8x32, 1));
+ const int sum =
+ _mm_extract_epi32(tmp_4x32, 0) + _mm_extract_epi32(tmp_4x32, 1);
+ const int sumsq =
+ _mm_extract_epi32(tmp_4x32, 2) + _mm_extract_epi32(tmp_4x32, 3);
+
+ *mean = (double)sum / MATCH_SZ;
+ const double variance = sumsq - (*mean) * (*mean);
+ if (variance < MIN_FEATURE_VARIANCE) {
+ *one_over_stddev = 0.0;
+ return false;
+ }
+ *one_over_stddev = 1.0 / sqrt(variance);
+ return true;
+}
+
+/* Compute corr(frame1, frame2) over a window of size MATCH_SZ by MATCH_SZ.
+ To save on computation, the mean and (1 divided by the) standard deviation
+ of the window in each frame are precomputed and passed into this function
+ as arguments.
+*/
+double aom_compute_correlation_avx2(const unsigned char *frame1, int stride1,
+ int x1, int y1, double mean1,
+ double one_over_stddev1,
+ const unsigned char *frame2, int stride2,
+ int x2, int y2, double mean2,
+ double one_over_stddev2) {
+ __m256i cross_vec = _mm256_setzero_si256();
frame1 += (y1 - MATCH_SZ_BY2) * stride1 + (x1 - MATCH_SZ_BY2);
frame2 += (y2 - MATCH_SZ_BY2) * stride2 + (x2 - MATCH_SZ_BY2);
- for (i = 0; i < MATCH_SZ; ++i) {
- v1 = _mm_and_si128(_mm_loadu_si128((__m128i *)&frame1[stride1_i]), mask);
- v1_1 = _mm256_cvtepu8_epi16(v1);
- v2 = _mm_and_si128(_mm_loadu_si128((__m128i *)&frame2[stride2_i]), mask);
- v2_1 = _mm256_cvtepu8_epi16(v2);
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ const __m256i v1 = _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i *)frame1));
+ const __m256i v2 = _mm256_cvtepu8_epi16(_mm_loadu_si128((__m128i *)frame2));
- v = _mm256_insertf128_si256(_mm256_castsi128_si256(v1), v2, 1);
- sumsq2_vec = _mm256_add_epi32(sumsq2_vec, _mm256_madd_epi16(v2_1, v2_1));
+ cross_vec = _mm256_add_epi32(cross_vec, _mm256_madd_epi16(v1, v2));
- sum_vec = _mm256_add_epi16(sum_vec, _mm256_sad_epu8(v, zero));
- cross_vec = _mm256_add_epi32(cross_vec, _mm256_madd_epi16(v1_1, v2_1));
- stride1_i += stride1;
- stride2_i += stride2;
+ frame1 += stride1;
+ frame2 += stride2;
}
- __m256i sum_vec1 = _mm256_srli_si256(sum_vec, 8);
- sum_vec = _mm256_add_epi32(sum_vec, sum_vec1);
- int sum1_acc = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_vec));
- int sum2_acc = _mm256_extract_epi32(sum_vec, 4);
-
- __m256i unp_low = _mm256_unpacklo_epi64(sumsq2_vec, cross_vec);
- __m256i unp_hig = _mm256_unpackhi_epi64(sumsq2_vec, cross_vec);
- temp1 = _mm256_add_epi32(unp_low, unp_hig);
-
- __m128i low_sumsq = _mm256_castsi256_si128(temp1);
- low_sumsq = _mm_add_epi32(low_sumsq, _mm256_extractf128_si256(temp1, 1));
- low_sumsq = _mm_add_epi32(low_sumsq, _mm_srli_epi64(low_sumsq, 32));
- int sumsq2_acc = _mm_cvtsi128_si32(low_sumsq);
- int cross_acc = _mm_extract_epi32(low_sumsq, 2);
-
- int var2 = sumsq2_acc * MATCH_SZ_SQ - sum2_acc * sum2_acc;
- int cov = cross_acc * MATCH_SZ_SQ - sum1_acc * sum2_acc;
- return cov / sqrt((double)var2);
+
+ // Sum cross_vec into a single value
+ const __m128i tmp = _mm_add_epi32(_mm256_extracti128_si256(cross_vec, 0),
+ _mm256_extracti128_si256(cross_vec, 1));
+ const int cross = _mm_extract_epi32(tmp, 0) + _mm_extract_epi32(tmp, 1) +
+ _mm_extract_epi32(tmp, 2) + _mm_extract_epi32(tmp, 3);
+
+ // Note: In theory, the calculations here "should" be
+ // covariance = cross / N^2 - mean1 * mean2
+ // correlation = covariance / (stddev1 * stddev2).
+ //
+ // However, because of the scaling in aom_compute_mean_stddev, the
+ // lines below actually calculate
+ // covariance * N^2 = cross - (mean1 * N) * (mean2 * N)
+ // correlation = (covariance * N^2) / ((stddev1 * N) * (stddev2 * N))
+ //
+ // ie. we have removed the need for a division, and still end up with the
+ // correct unscaled correlation (ie, in the range [-1, +1])
+ const double covariance = cross - mean1 * mean2;
+ const double correlation = covariance * (one_over_stddev1 * one_over_stddev2);
+ return correlation;
}
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c
index b3cb5bc5fd..bff7db6d2f 100644
--- a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c
@@ -21,84 +21,125 @@
#include "aom_ports/mem.h"
#include "aom_dsp/flow_estimation/corner_match.h"
-DECLARE_ALIGNED(16, static const uint8_t,
- byte_mask[16]) = { 255, 255, 255, 255, 255, 255, 255, 255,
- 255, 255, 255, 255, 255, 0, 0, 0 };
-#if MATCH_SZ != 13
-#error "Need to change byte_mask in corner_match_sse4.c if MATCH_SZ != 13"
+DECLARE_ALIGNED(16, static const uint16_t, ones_array[8]) = { 1, 1, 1, 1,
+ 1, 1, 1, 1 };
+
+#if MATCH_SZ != 16
+#error "Need to apply pixel mask in corner_match_sse4.c if MATCH_SZ != 16"
#endif
-/* Compute corr(frame1, frame2) * MATCH_SZ * stddev(frame1), where the
- correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
- of each image, centered at (x1, y1) and (x2, y2) respectively.
+/* Compute mean and standard deviation of pixels in a window of size
+ MATCH_SZ by MATCH_SZ centered at (x, y).
+ Store results into *mean and *one_over_stddev
+
+ Note: The output of this function is scaled by MATCH_SZ, as in
+ *mean = MATCH_SZ * <true mean> and
+ *one_over_stddev = 1 / (MATCH_SZ * <true stddev>)
+
+ Combined with the fact that we return 1/stddev rather than the standard
+ deviation itself, this allows us to completely avoid divisions in
+ aom_compute_correlation, which is much hotter than this function is.
+
+ Returns true if this feature point is usable, false otherwise.
+*/
+bool aom_compute_mean_stddev_sse4_1(const unsigned char *frame, int stride,
+ int x, int y, double *mean,
+ double *one_over_stddev) {
+ // 8 16-bit partial sums of pixels
+ // Each lane sums at most 2*MATCH_SZ pixels, which can have values up to 255,
+ // and is therefore at most 2*MATCH_SZ*255, which is > 2^8 but < 2^16.
+ // Thus this value is safe to store in 16 bits.
+ __m128i sum_vec = _mm_setzero_si128();
+
+ // 8 32-bit partial sums of squares
+ __m128i sumsq_vec_l = _mm_setzero_si128();
+ __m128i sumsq_vec_r = _mm_setzero_si128();
+
+ frame += (y - MATCH_SZ_BY2) * stride + (x - MATCH_SZ_BY2);
+
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ const __m128i v = _mm_loadu_si128((__m128i *)frame);
+ const __m128i v_l = _mm_cvtepu8_epi16(v);
+ const __m128i v_r = _mm_cvtepu8_epi16(_mm_srli_si128(v, 8));
+
+ sum_vec = _mm_add_epi16(sum_vec, _mm_add_epi16(v_l, v_r));
+ sumsq_vec_l = _mm_add_epi32(sumsq_vec_l, _mm_madd_epi16(v_l, v_l));
+ sumsq_vec_r = _mm_add_epi32(sumsq_vec_r, _mm_madd_epi16(v_r, v_r));
+
+ frame += stride;
+ }
+
+ // Reduce sum_vec and sumsq_vec into single values
+ // Start by reducing each vector to 4x32-bit values, hadd() to perform four
+ // additions, then perform the last two additions in scalar code.
+ const __m128i ones = _mm_load_si128((__m128i *)ones_array);
+ const __m128i partial_sum = _mm_madd_epi16(sum_vec, ones);
+ const __m128i partial_sumsq = _mm_add_epi32(sumsq_vec_l, sumsq_vec_r);
+ const __m128i tmp = _mm_hadd_epi32(partial_sum, partial_sumsq);
+ const int sum = _mm_extract_epi32(tmp, 0) + _mm_extract_epi32(tmp, 1);
+ const int sumsq = _mm_extract_epi32(tmp, 2) + _mm_extract_epi32(tmp, 3);
+
+ *mean = (double)sum / MATCH_SZ;
+ const double variance = sumsq - (*mean) * (*mean);
+ if (variance < MIN_FEATURE_VARIANCE) {
+ *one_over_stddev = 0.0;
+ return false;
+ }
+ *one_over_stddev = 1.0 / sqrt(variance);
+ return true;
+}
+
+/* Compute corr(frame1, frame2) over a window of size MATCH_SZ by MATCH_SZ.
+ To save on computation, the mean and (1 divided by the) standard deviation
+ of the window in each frame are precomputed and passed into this function
+ as arguments.
*/
-double av1_compute_cross_correlation_sse4_1(const unsigned char *frame1,
- int stride1, int x1, int y1,
- const unsigned char *frame2,
- int stride2, int x2, int y2) {
- int i;
- // 2 16-bit partial sums in lanes 0, 4 (== 2 32-bit partial sums in lanes 0,
- // 2)
- __m128i sum1_vec = _mm_setzero_si128();
- __m128i sum2_vec = _mm_setzero_si128();
- // 4 32-bit partial sums of squares
- __m128i sumsq2_vec = _mm_setzero_si128();
- __m128i cross_vec = _mm_setzero_si128();
-
- const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
- const __m128i zero = _mm_setzero_si128();
+double aom_compute_correlation_sse4_1(const unsigned char *frame1, int stride1,
+ int x1, int y1, double mean1,
+ double one_over_stddev1,
+ const unsigned char *frame2, int stride2,
+ int x2, int y2, double mean2,
+ double one_over_stddev2) {
+ // 8 32-bit partial sums of products
+ __m128i cross_vec_l = _mm_setzero_si128();
+ __m128i cross_vec_r = _mm_setzero_si128();
frame1 += (y1 - MATCH_SZ_BY2) * stride1 + (x1 - MATCH_SZ_BY2);
frame2 += (y2 - MATCH_SZ_BY2) * stride2 + (x2 - MATCH_SZ_BY2);
- for (i = 0; i < MATCH_SZ; ++i) {
- const __m128i v1 =
- _mm_and_si128(_mm_loadu_si128((__m128i *)&frame1[i * stride1]), mask);
- const __m128i v2 =
- _mm_and_si128(_mm_loadu_si128((__m128i *)&frame2[i * stride2]), mask);
-
- // Using the 'sad' intrinsic here is a bit faster than adding
- // v1_l + v1_r and v2_l + v2_r, plus it avoids the need for a 16->32 bit
- // conversion step later, for a net speedup of ~10%
- sum1_vec = _mm_add_epi16(sum1_vec, _mm_sad_epu8(v1, zero));
- sum2_vec = _mm_add_epi16(sum2_vec, _mm_sad_epu8(v2, zero));
+ for (int i = 0; i < MATCH_SZ; ++i) {
+ const __m128i v1 = _mm_loadu_si128((__m128i *)frame1);
+ const __m128i v2 = _mm_loadu_si128((__m128i *)frame2);
const __m128i v1_l = _mm_cvtepu8_epi16(v1);
const __m128i v1_r = _mm_cvtepu8_epi16(_mm_srli_si128(v1, 8));
const __m128i v2_l = _mm_cvtepu8_epi16(v2);
const __m128i v2_r = _mm_cvtepu8_epi16(_mm_srli_si128(v2, 8));
- sumsq2_vec = _mm_add_epi32(
- sumsq2_vec,
- _mm_add_epi32(_mm_madd_epi16(v2_l, v2_l), _mm_madd_epi16(v2_r, v2_r)));
- cross_vec = _mm_add_epi32(
- cross_vec,
- _mm_add_epi32(_mm_madd_epi16(v1_l, v2_l), _mm_madd_epi16(v1_r, v2_r)));
+ cross_vec_l = _mm_add_epi32(cross_vec_l, _mm_madd_epi16(v1_l, v2_l));
+ cross_vec_r = _mm_add_epi32(cross_vec_r, _mm_madd_epi16(v1_r, v2_r));
+
+ frame1 += stride1;
+ frame2 += stride2;
}
- // Now we can treat the four registers (sum1_vec, sum2_vec, sumsq2_vec,
- // cross_vec)
- // as holding 4 32-bit elements each, which we want to sum horizontally.
- // We do this by transposing and then summing vertically.
- __m128i tmp_0 = _mm_unpacklo_epi32(sum1_vec, sum2_vec);
- __m128i tmp_1 = _mm_unpackhi_epi32(sum1_vec, sum2_vec);
- __m128i tmp_2 = _mm_unpacklo_epi32(sumsq2_vec, cross_vec);
- __m128i tmp_3 = _mm_unpackhi_epi32(sumsq2_vec, cross_vec);
-
- __m128i tmp_4 = _mm_unpacklo_epi64(tmp_0, tmp_2);
- __m128i tmp_5 = _mm_unpackhi_epi64(tmp_0, tmp_2);
- __m128i tmp_6 = _mm_unpacklo_epi64(tmp_1, tmp_3);
- __m128i tmp_7 = _mm_unpackhi_epi64(tmp_1, tmp_3);
-
- __m128i res =
- _mm_add_epi32(_mm_add_epi32(tmp_4, tmp_5), _mm_add_epi32(tmp_6, tmp_7));
-
- int sum1 = _mm_extract_epi32(res, 0);
- int sum2 = _mm_extract_epi32(res, 1);
- int sumsq2 = _mm_extract_epi32(res, 2);
- int cross = _mm_extract_epi32(res, 3);
-
- int var2 = sumsq2 * MATCH_SZ_SQ - sum2 * sum2;
- int cov = cross * MATCH_SZ_SQ - sum1 * sum2;
- return cov / sqrt((double)var2);
+ // Sum cross_vec into a single value
+ const __m128i tmp = _mm_add_epi32(cross_vec_l, cross_vec_r);
+ const int cross = _mm_extract_epi32(tmp, 0) + _mm_extract_epi32(tmp, 1) +
+ _mm_extract_epi32(tmp, 2) + _mm_extract_epi32(tmp, 3);
+
+ // Note: In theory, the calculations here "should" be
+ // covariance = cross / N^2 - mean1 * mean2
+ // correlation = covariance / (stddev1 * stddev2).
+ //
+ // However, because of the scaling in aom_compute_mean_stddev, the
+ // lines below actually calculate
+ // covariance * N^2 = cross - (mean1 * N) * (mean2 * N)
+ // correlation = (covariance * N^2) / ((stddev1 * N) * (stddev2 * N))
+ //
+ // ie. we have removed the need for a division, and still end up with the
+ // correct unscaled correlation (ie, in the range [-1, +1])
+ const double covariance = cross - mean1 * mean2;
+ const double correlation = covariance * (one_over_stddev1 * one_over_stddev2);
+ return correlation;
}
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/disflow_avx2.c b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_avx2.c
new file mode 100644
index 0000000000..ad5a1bd7c6
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_avx2.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+#include <math.h>
+#include <immintrin.h>
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/flow_estimation/disflow.h"
+#include "aom_dsp/x86/synonyms.h"
+#include "aom_dsp/x86/synonyms_avx2.h"
+
+#include "config/aom_dsp_rtcd.h"
+
+#if DISFLOW_PATCH_SIZE != 8
+#error "Need to change disflow_avx2.c if DISFLOW_PATCH_SIZE != 8"
+#endif
+
+// Compute horizontal and vertical kernels and return them packed into a
+// register. The coefficient ordering is:
+// h0, h1, v0, v1, h2, h3, v2, v3
+// This is chosen because it takes less work than fully separating the kernels,
+// but it is separated enough that we can pick out each coefficient pair in the
+// main compute_flow_at_point function
+static INLINE __m128i compute_cubic_kernels(double u, double v) {
+ const __m128d x = _mm_set_pd(v, u);
+
+ const __m128d x2 = _mm_mul_pd(x, x);
+ const __m128d x3 = _mm_mul_pd(x2, x);
+
+ // Macro to multiply a value v by a constant coefficient c
+#define MULC(c, v) _mm_mul_pd(_mm_set1_pd(c), v)
+
+ // Compute floating-point kernel
+ // Note: To ensure results are bit-identical to the C code, we need to perform
+ // exactly the same sequence of operations here as in the C code.
+ __m128d k0 = _mm_sub_pd(_mm_add_pd(MULC(-0.5, x), x2), MULC(0.5, x3));
+ __m128d k1 =
+ _mm_add_pd(_mm_sub_pd(_mm_set1_pd(1.0), MULC(2.5, x2)), MULC(1.5, x3));
+ __m128d k2 =
+ _mm_sub_pd(_mm_add_pd(MULC(0.5, x), MULC(2.0, x2)), MULC(1.5, x3));
+ __m128d k3 = _mm_add_pd(MULC(-0.5, x2), MULC(0.5, x3));
+#undef MULC
+
+ // Integerize
+ __m128d prec = _mm_set1_pd((double)(1 << DISFLOW_INTERP_BITS));
+
+ k0 = _mm_round_pd(_mm_mul_pd(k0, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k1 = _mm_round_pd(_mm_mul_pd(k1, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k2 = _mm_round_pd(_mm_mul_pd(k2, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k3 = _mm_round_pd(_mm_mul_pd(k3, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ const __m128i c0 = _mm_cvtpd_epi32(k0);
+ const __m128i c1 = _mm_cvtpd_epi32(k1);
+ const __m128i c2 = _mm_cvtpd_epi32(k2);
+ const __m128i c3 = _mm_cvtpd_epi32(k3);
+
+ // Rearrange results and convert down to 16 bits, giving the target output
+ // ordering
+ const __m128i c01 = _mm_unpacklo_epi32(c0, c1);
+ const __m128i c23 = _mm_unpacklo_epi32(c2, c3);
+ return _mm_packs_epi32(c01, c23);
+}
+
+// Compare two regions of width x height pixels, one rooted at position
+// (x, y) in src and the other at (x + u, y + v) in ref.
+// This function returns the sum of squared pixel differences between
+// the two regions.
+//
+// TODO(rachelbarker): Test speed/quality impact of using bilinear interpolation
+// instad of bicubic interpolation
+static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
+ int width, int height, int stride, int x,
+ int y, double u, double v,
+ const int16_t *dx, const int16_t *dy,
+ int *b) {
+ const __m256i zero = _mm256_setzero_si256();
+
+ // Accumulate 8 32-bit partial sums for each element of b
+ // These will be flattened at the end.
+ __m256i b0_acc = _mm256_setzero_si256();
+ __m256i b1_acc = _mm256_setzero_si256();
+
+ // Split offset into integer and fractional parts, and compute cubic
+ // interpolation kernels
+ const int u_int = (int)floor(u);
+ const int v_int = (int)floor(v);
+ const double u_frac = u - floor(u);
+ const double v_frac = v - floor(v);
+
+ const __m128i kernels = compute_cubic_kernels(u_frac, v_frac);
+
+ // Storage for intermediate values between the two convolution directions
+ // In the AVX2 implementation, this needs a dummy row at the end, because
+ // we generate 2 rows at a time but the total number of rows is odd.
+ // So we generate one more row than we actually need.
+ DECLARE_ALIGNED(32, int16_t,
+ tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 4)]);
+ int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE; // Offset by one row
+
+ // Clamp coordinates so that all pixels we fetch will remain within the
+ // allocated border region, but allow them to go far enough out that
+ // the border pixels' values do not change.
+ // Since we are calculating an 8x8 block, the bottom-right pixel
+ // in the block has coordinates (x0 + 7, y0 + 7). Then, the cubic
+ // interpolation has 4 taps, meaning that the output of pixel
+ // (x_w, y_w) depends on the pixels in the range
+ // ([x_w - 1, x_w + 2], [y_w - 1, y_w + 2]).
+ //
+ // Thus the most extreme coordinates which will be fetched are
+ // (x0 - 1, y0 - 1) and (x0 + 9, y0 + 9).
+ const int x0 = clamp(x + u_int, -9, width);
+ const int y0 = clamp(y + v_int, -9, height);
+
+ // Horizontal convolution
+
+ // Prepare the kernel vectors
+ // We split the kernel into two vectors with kernel indices:
+ // 0, 1, 0, 1, 0, 1, 0, 1, and
+ // 2, 3, 2, 3, 2, 3, 2, 3
+ __m256i h_kernel_01 = _mm256_broadcastd_epi32(kernels);
+ __m256i h_kernel_23 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 8));
+
+ __m256i round_const_h = _mm256_set1_epi32(1 << (DISFLOW_INTERP_BITS - 6 - 1));
+
+ for (int i = -1; i < DISFLOW_PATCH_SIZE + 2; i += 2) {
+ const int y_w = y0 + i;
+ const uint8_t *ref_row = &ref[y_w * stride + (x0 - 1)];
+ int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
+
+ // Load this row of pixels.
+ // For an 8x8 patch, we need to load the 8 image pixels + 3 extras,
+ // for a total of 11 pixels. Here we load 16 pixels, but only use
+ // the first 11.
+ __m256i row =
+ yy_loadu2_128((__m128i *)(ref_row + stride), (__m128i *)ref_row);
+
+ // Expand pixels to int16s
+ // We must use unpacks here, as we have one row in each 128-bit lane
+ // and want to handle each of those independently.
+ // This is in contrast to _mm256_cvtepu8_epi16(), which takes a single
+ // 128-bit input and widens it to 256 bits.
+ __m256i px_0to7_i16 = _mm256_unpacklo_epi8(row, zero);
+ __m256i px_4to10_i16 =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row, 4), zero);
+
+ // Compute first four outputs
+ // input pixels 0, 1, 1, 2, 2, 3, 3, 4
+ // * kernel 0, 1, 0, 1, 0, 1, 0, 1
+ __m256i px0 =
+ _mm256_unpacklo_epi16(px_0to7_i16, _mm256_srli_si256(px_0to7_i16, 2));
+ // input pixels 2, 3, 3, 4, 4, 5, 5, 6
+ // * kernel 2, 3, 2, 3, 2, 3, 2, 3
+ __m256i px1 = _mm256_unpacklo_epi16(_mm256_srli_si256(px_0to7_i16, 4),
+ _mm256_srli_si256(px_0to7_i16, 6));
+ // Convolve with kernel and sum 2x2 boxes to form first 4 outputs
+ __m256i sum0 = _mm256_add_epi32(_mm256_madd_epi16(px0, h_kernel_01),
+ _mm256_madd_epi16(px1, h_kernel_23));
+
+ __m256i out0 = _mm256_srai_epi32(_mm256_add_epi32(sum0, round_const_h),
+ DISFLOW_INTERP_BITS - 6);
+
+ // Compute second four outputs
+ __m256i px2 =
+ _mm256_unpacklo_epi16(px_4to10_i16, _mm256_srli_si256(px_4to10_i16, 2));
+ __m256i px3 = _mm256_unpacklo_epi16(_mm256_srli_si256(px_4to10_i16, 4),
+ _mm256_srli_si256(px_4to10_i16, 6));
+ __m256i sum1 = _mm256_add_epi32(_mm256_madd_epi16(px2, h_kernel_01),
+ _mm256_madd_epi16(px3, h_kernel_23));
+
+ // Round by just enough bits that the result is
+ // guaranteed to fit into an i16. Then the next stage can use 16 x 16 -> 32
+ // bit multiplies, which should be a fair bit faster than 32 x 32 -> 32
+ // as it does now
+ // This means shifting down so we have 6 extra bits, for a maximum value
+ // of +18360, which can occur if u_frac == 0.5 and the input pixels are
+ // {0, 255, 255, 0}.
+ __m256i out1 = _mm256_srai_epi32(_mm256_add_epi32(sum1, round_const_h),
+ DISFLOW_INTERP_BITS - 6);
+
+ _mm256_storeu_si256((__m256i *)tmp_row, _mm256_packs_epi32(out0, out1));
+ }
+
+ // Vertical convolution
+ const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2;
+ __m256i round_const_v = _mm256_set1_epi32(1 << (round_bits - 1));
+
+ __m256i v_kernel_01 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 4));
+ __m256i v_kernel_23 = _mm256_broadcastd_epi32(_mm_srli_si128(kernels, 12));
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
+ int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
+
+ // Load 5 rows of 8 x 16-bit values, and pack into 4 registers
+ // holding rows {0, 1}, {1, 2}, {2, 3}, {3, 4}
+ __m128i row0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
+ __m128i row1 = _mm_loadu_si128((__m128i *)tmp_row);
+ __m128i row2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
+ __m128i row3 =
+ _mm_loadu_si128((__m128i *)(tmp_row + 2 * DISFLOW_PATCH_SIZE));
+ __m128i row4 =
+ _mm_loadu_si128((__m128i *)(tmp_row + 3 * DISFLOW_PATCH_SIZE));
+
+ __m256i px0 = _mm256_set_m128i(row1, row0);
+ __m256i px1 = _mm256_set_m128i(row2, row1);
+ __m256i px2 = _mm256_set_m128i(row3, row2);
+ __m256i px3 = _mm256_set_m128i(row4, row3);
+
+ // We want to calculate px0 * v_kernel[0] + px1 * v_kernel[1] + ... ,
+ // but each multiply expands its output to 32 bits. So we need to be
+ // a little clever about how we do this
+ __m256i sum0 = _mm256_add_epi32(
+ _mm256_madd_epi16(_mm256_unpacklo_epi16(px0, px1), v_kernel_01),
+ _mm256_madd_epi16(_mm256_unpacklo_epi16(px2, px3), v_kernel_23));
+ __m256i sum1 = _mm256_add_epi32(
+ _mm256_madd_epi16(_mm256_unpackhi_epi16(px0, px1), v_kernel_01),
+ _mm256_madd_epi16(_mm256_unpackhi_epi16(px2, px3), v_kernel_23));
+
+ __m256i sum0_rounded =
+ _mm256_srai_epi32(_mm256_add_epi32(sum0, round_const_v), round_bits);
+ __m256i sum1_rounded =
+ _mm256_srai_epi32(_mm256_add_epi32(sum1, round_const_v), round_bits);
+
+ __m256i warped = _mm256_packs_epi32(sum0_rounded, sum1_rounded);
+ __m128i src_pixels_u8 = xx_loadu_2x64(&src[(y + i + 1) * stride + x],
+ &src[(y + i) * stride + x]);
+ __m256i src_pixels =
+ _mm256_slli_epi16(_mm256_cvtepu8_epi16(src_pixels_u8), 3);
+
+ // Calculate delta from the target patch
+ __m256i dt = _mm256_sub_epi16(warped, src_pixels);
+
+ // Load 2x8 elements each of dx and dt, to pair with the 2x8 elements of dt
+ // that we have just computed. Then compute 2x8 partial sums of dx * dt
+ // and dy * dt, implicitly sum to give 2x4 partial sums of each, and
+ // accumulate.
+ __m256i dx_row = _mm256_loadu_si256((__m256i *)&dx[i * DISFLOW_PATCH_SIZE]);
+ __m256i dy_row = _mm256_loadu_si256((__m256i *)&dy[i * DISFLOW_PATCH_SIZE]);
+ b0_acc = _mm256_add_epi32(b0_acc, _mm256_madd_epi16(dx_row, dt));
+ b1_acc = _mm256_add_epi32(b1_acc, _mm256_madd_epi16(dy_row, dt));
+ }
+
+ // Flatten the two sets of partial sums to find the final value of b
+ // We need to set b[0] = sum(b0_acc), b[1] = sum(b1_acc).
+ // We need to do 14 additions in total; a `hadd` instruction can take care
+ // of eight of them, then a vertical sum can do four more, leaving two
+ // scalar additions.
+ __m256i partial_sum_256 = _mm256_hadd_epi32(b0_acc, b1_acc);
+ __m128i partial_sum =
+ _mm_add_epi32(_mm256_extracti128_si256(partial_sum_256, 0),
+ _mm256_extracti128_si256(partial_sum_256, 1));
+ b[0] = _mm_extract_epi32(partial_sum, 0) + _mm_extract_epi32(partial_sum, 1);
+ b[1] = _mm_extract_epi32(partial_sum, 2) + _mm_extract_epi32(partial_sum, 3);
+}
+
+// Compute the x and y gradients of the source patch in a single pass,
+// and store into dx and dy respectively.
+static INLINE void sobel_filter(const uint8_t *src, int src_stride, int16_t *dx,
+ int16_t *dy) {
+ const __m256i zero = _mm256_setzero_si256();
+
+ // Loop setup: Load the first two rows (of 10 input rows) and apply
+ // the horizontal parts of the two filters
+ __m256i row_m1_0 =
+ yy_loadu2_128((__m128i *)(src - 1), (__m128i *)(src - src_stride - 1));
+ __m256i row_m1_0_a = _mm256_unpacklo_epi8(row_m1_0, zero);
+ __m256i row_m1_0_b =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row_m1_0, 1), zero);
+ __m256i row_m1_0_c =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row_m1_0, 2), zero);
+
+ __m256i row_m1_0_hsmooth =
+ _mm256_add_epi16(_mm256_add_epi16(row_m1_0_a, row_m1_0_c),
+ _mm256_slli_epi16(row_m1_0_b, 1));
+ __m256i row_m1_0_hdiff = _mm256_sub_epi16(row_m1_0_a, row_m1_0_c);
+
+ // Main loop: For each pair of output rows (i, i+1):
+ // * Load rows (i+1, i+2) and apply both horizontal filters
+ // * Apply vertical filters and store results
+ // * Shift rows for next iteration
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
+ // Load rows (i+1, i+2) and apply both horizontal filters
+ const __m256i row_p1_p2 =
+ yy_loadu2_128((__m128i *)(src + (i + 2) * src_stride - 1),
+ (__m128i *)(src + (i + 1) * src_stride - 1));
+ const __m256i row_p1_p2_a = _mm256_unpacklo_epi8(row_p1_p2, zero);
+ const __m256i row_p1_p2_b =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row_p1_p2, 1), zero);
+ const __m256i row_p1_p2_c =
+ _mm256_unpacklo_epi8(_mm256_srli_si256(row_p1_p2, 2), zero);
+
+ const __m256i row_p1_p2_hsmooth =
+ _mm256_add_epi16(_mm256_add_epi16(row_p1_p2_a, row_p1_p2_c),
+ _mm256_slli_epi16(row_p1_p2_b, 1));
+ const __m256i row_p1_p2_hdiff = _mm256_sub_epi16(row_p1_p2_a, row_p1_p2_c);
+
+ // Apply vertical filters and store results
+ // dx = vertical smooth(horizontal diff(input))
+ // dy = vertical diff(horizontal smooth(input))
+ const __m256i row_0_p1_hdiff =
+ _mm256_permute2x128_si256(row_m1_0_hdiff, row_p1_p2_hdiff, 0x21);
+ const __m256i dx_row =
+ _mm256_add_epi16(_mm256_add_epi16(row_m1_0_hdiff, row_p1_p2_hdiff),
+ _mm256_slli_epi16(row_0_p1_hdiff, 1));
+ const __m256i dy_row =
+ _mm256_sub_epi16(row_m1_0_hsmooth, row_p1_p2_hsmooth);
+
+ _mm256_storeu_si256((__m256i *)(dx + i * DISFLOW_PATCH_SIZE), dx_row);
+ _mm256_storeu_si256((__m256i *)(dy + i * DISFLOW_PATCH_SIZE), dy_row);
+
+ // Shift rows for next iteration
+ // This allows a lot of work to be reused, reducing the number of
+ // horizontal filtering operations from 2*3*8 = 48 to 2*10 = 20
+ row_m1_0_hsmooth = row_p1_p2_hsmooth;
+ row_m1_0_hdiff = row_p1_p2_hdiff;
+ }
+}
+
+static INLINE void compute_flow_matrix(const int16_t *dx, int dx_stride,
+ const int16_t *dy, int dy_stride,
+ double *M) {
+ __m256i acc[4] = { 0 };
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i += 2) {
+ __m256i dx_row = _mm256_loadu_si256((__m256i *)&dx[i * dx_stride]);
+ __m256i dy_row = _mm256_loadu_si256((__m256i *)&dy[i * dy_stride]);
+
+ acc[0] = _mm256_add_epi32(acc[0], _mm256_madd_epi16(dx_row, dx_row));
+ acc[1] = _mm256_add_epi32(acc[1], _mm256_madd_epi16(dx_row, dy_row));
+ // Don't compute acc[2], as it should be equal to acc[1]
+ acc[3] = _mm256_add_epi32(acc[3], _mm256_madd_epi16(dy_row, dy_row));
+ }
+
+ // Condense sums
+ __m256i partial_sum_0 = _mm256_hadd_epi32(acc[0], acc[1]);
+ __m256i partial_sum_1 = _mm256_hadd_epi32(acc[1], acc[3]);
+ __m256i result_256 = _mm256_hadd_epi32(partial_sum_0, partial_sum_1);
+ __m128i result = _mm_add_epi32(_mm256_extracti128_si256(result_256, 0),
+ _mm256_extracti128_si256(result_256, 1));
+
+ // Apply regularization
+ // We follow the standard regularization method of adding `k * I` before
+ // inverting. This ensures that the matrix will be invertible.
+ //
+ // Setting the regularization strength k to 1 seems to work well here, as
+ // typical values coming from the other equations are very large (1e5 to
+ // 1e6, with an upper limit of around 6e7, at the time of writing).
+ // It also preserves the property that all matrix values are whole numbers,
+ // which is convenient for integerized SIMD implementation.
+ result = _mm_add_epi32(result, _mm_set_epi32(1, 0, 0, 1));
+
+ // Convert results to doubles and store
+ _mm256_storeu_pd(M, _mm256_cvtepi32_pd(result));
+}
+
+// Try to invert the matrix M
+// Note: Due to the nature of how a least-squares matrix is constructed, all of
+// the eigenvalues will be >= 0, and therefore det M >= 0 as well.
+// The regularization term `+ k * I` further ensures that det M >= k^2.
+// As mentioned in compute_flow_matrix(), here we use k = 1, so det M >= 1.
+// So we don't have to worry about non-invertible matrices here.
+static INLINE void invert_2x2(const double *M, double *M_inv) {
+ double det = (M[0] * M[3]) - (M[1] * M[2]);
+ assert(det >= 1);
+ const double det_inv = 1 / det;
+
+ M_inv[0] = M[3] * det_inv;
+ M_inv[1] = -M[1] * det_inv;
+ M_inv[2] = -M[2] * det_inv;
+ M_inv[3] = M[0] * det_inv;
+}
+
+void aom_compute_flow_at_point_avx2(const uint8_t *src, const uint8_t *ref,
+ int x, int y, int width, int height,
+ int stride, double *u, double *v) {
+ DECLARE_ALIGNED(32, double, M[4]);
+ DECLARE_ALIGNED(32, double, M_inv[4]);
+ DECLARE_ALIGNED(32, int16_t, dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
+ DECLARE_ALIGNED(32, int16_t, dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
+ int b[2];
+
+ // Compute gradients within this patch
+ const uint8_t *src_patch = &src[y * stride + x];
+ sobel_filter(src_patch, stride, dx, dy);
+
+ compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M);
+ invert_2x2(M, M_inv);
+
+ for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
+ compute_flow_vector(src, ref, width, height, stride, x, y, *u, *v, dx, dy,
+ b);
+
+ // Solve flow equations to find a better estimate for the flow vector
+ // at this point
+ const double step_u = M_inv[0] * b[0] + M_inv[1] * b[1];
+ const double step_v = M_inv[2] * b[0] + M_inv[3] * b[1];
+ *u += fclamp(step_u * DISFLOW_STEP_SIZE, -2, 2);
+ *v += fclamp(step_v * DISFLOW_STEP_SIZE, -2, 2);
+
+ if (fabs(step_u) + fabs(step_v) < DISFLOW_STEP_SIZE_THRESOLD) {
+ // Stop iteration when we're close to convergence
+ break;
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c
index 2c5effd638..e0a4bd040c 100644
--- a/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c
@@ -1,13 +1,12 @@
/*
- * Copyright (c) 2022, Alliance for Open Media. All rights reserved
+ * Copyright (c) 2024, Alliance for Open Media. All rights reserved
*
- * This source code is subject to the terms of the BSD 3-Clause Clear License
- * and the Alliance for Open Media Patent License 1.0. If the BSD 3-Clause Clear
- * License was not distributed with this source code in the LICENSE file, you
- * can obtain it at aomedia.org/license/software-license/bsd-3-c-c/. If the
- * Alliance for Open Media Patent License 1.0 was not distributed with this
- * source code in the PATENTS file, you can obtain it at
- * aomedia.org/license/patent-license/.
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <assert.h>
@@ -20,46 +19,59 @@
#include "config/aom_dsp_rtcd.h"
-// Internal cross-check against C code
-// If you set this to 1 and compile in debug mode, then the outputs of the two
-// convolution stages will be checked against the plain C version of the code,
-// and an assertion will be fired if the results differ.
-#define CHECK_RESULTS 0
-
-// Note: Max sum(+ve coefficients) = 1.125 * scale
-static INLINE void get_cubic_kernel_dbl(double x, double kernel[4]) {
- // Check that the fractional position is in range.
- //
- // Note: x is calculated from, e.g., `u_frac = u - floor(u)`.
- // Mathematically, this implies that 0 <= x < 1. However, in practice it is
- // possible to have x == 1 due to floating point rounding. This is fine,
- // and we still interpolate correctly if we allow x = 1.
- assert(0 <= x && x <= 1);
-
- double x2 = x * x;
- double x3 = x2 * x;
- kernel[0] = -0.5 * x + x2 - 0.5 * x3;
- kernel[1] = 1.0 - 2.5 * x2 + 1.5 * x3;
- kernel[2] = 0.5 * x + 2.0 * x2 - 1.5 * x3;
- kernel[3] = -0.5 * x2 + 0.5 * x3;
-}
-
-static INLINE void get_cubic_kernel_int(double x, int16_t kernel[4]) {
- double kernel_dbl[4];
- get_cubic_kernel_dbl(x, kernel_dbl);
-
- kernel[0] = (int16_t)rint(kernel_dbl[0] * (1 << DISFLOW_INTERP_BITS));
- kernel[1] = (int16_t)rint(kernel_dbl[1] * (1 << DISFLOW_INTERP_BITS));
- kernel[2] = (int16_t)rint(kernel_dbl[2] * (1 << DISFLOW_INTERP_BITS));
- kernel[3] = (int16_t)rint(kernel_dbl[3] * (1 << DISFLOW_INTERP_BITS));
-}
-
-#if CHECK_RESULTS
-static INLINE int get_cubic_value_int(const int *p, const int16_t kernel[4]) {
- return kernel[0] * p[0] + kernel[1] * p[1] + kernel[2] * p[2] +
- kernel[3] * p[3];
+#if DISFLOW_PATCH_SIZE != 8
+#error "Need to change disflow_sse4.c if DISFLOW_PATCH_SIZE != 8"
+#endif
+
+// Compute horizontal and vertical kernels and return them packed into a
+// register. The coefficient ordering is:
+// h0, h1, v0, v1, h2, h3, v2, v3
+// This is chosen because it takes less work than fully separating the kernels,
+// but it is separated enough that we can pick out each coefficient pair in the
+// main compute_flow_at_point function
+static INLINE __m128i compute_cubic_kernels(double u, double v) {
+ const __m128d x = _mm_set_pd(v, u);
+
+ const __m128d x2 = _mm_mul_pd(x, x);
+ const __m128d x3 = _mm_mul_pd(x2, x);
+
+ // Macro to multiply a value v by a constant coefficient c
+#define MULC(c, v) _mm_mul_pd(_mm_set1_pd(c), v)
+
+ // Compute floating-point kernel
+ // Note: To ensure results are bit-identical to the C code, we need to perform
+ // exactly the same sequence of operations here as in the C code.
+ __m128d k0 = _mm_sub_pd(_mm_add_pd(MULC(-0.5, x), x2), MULC(0.5, x3));
+ __m128d k1 =
+ _mm_add_pd(_mm_sub_pd(_mm_set1_pd(1.0), MULC(2.5, x2)), MULC(1.5, x3));
+ __m128d k2 =
+ _mm_sub_pd(_mm_add_pd(MULC(0.5, x), MULC(2.0, x2)), MULC(1.5, x3));
+ __m128d k3 = _mm_add_pd(MULC(-0.5, x2), MULC(0.5, x3));
+#undef MULC
+
+ // Integerize
+ __m128d prec = _mm_set1_pd((double)(1 << DISFLOW_INTERP_BITS));
+
+ k0 = _mm_round_pd(_mm_mul_pd(k0, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k1 = _mm_round_pd(_mm_mul_pd(k1, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k2 = _mm_round_pd(_mm_mul_pd(k2, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+ k3 = _mm_round_pd(_mm_mul_pd(k3, prec),
+ _MM_FROUND_TO_NEAREST_INT | _MM_FROUND_NO_EXC);
+
+ const __m128i c0 = _mm_cvtpd_epi32(k0);
+ const __m128i c1 = _mm_cvtpd_epi32(k1);
+ const __m128i c2 = _mm_cvtpd_epi32(k2);
+ const __m128i c3 = _mm_cvtpd_epi32(k3);
+
+ // Rearrange results and convert down to 16 bits, giving the target output
+ // ordering
+ const __m128i c01 = _mm_unpacklo_epi32(c0, c1);
+ const __m128i c23 = _mm_unpacklo_epi32(c2, c3);
+ return _mm_packs_epi32(c01, c23);
}
-#endif // CHECK_RESULTS
// Compare two regions of width x height pixels, one rooted at position
// (x, y) in src and the other at (x + u, y + v) in ref.
@@ -80,10 +92,6 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
// These will be flattened at the end.
__m128i b0_acc = _mm_setzero_si128();
__m128i b1_acc = _mm_setzero_si128();
-#if CHECK_RESULTS
- // Also keep a running sum using the C algorithm, for cross-checking
- int c_result[2] = { 0 };
-#endif // CHECK_RESULTS
// Split offset into integer and fractional parts, and compute cubic
// interpolation kernels
@@ -92,13 +100,11 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
const double u_frac = u - floor(u);
const double v_frac = v - floor(v);
- int16_t h_kernel[4];
- int16_t v_kernel[4];
- get_cubic_kernel_int(u_frac, h_kernel);
- get_cubic_kernel_int(v_frac, v_kernel);
+ const __m128i kernels = compute_cubic_kernels(u_frac, v_frac);
// Storage for intermediate values between the two convolution directions
- int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)];
+ DECLARE_ALIGNED(16, int16_t,
+ tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)]);
int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE; // Offset by one row
// Clamp coordinates so that all pixels we fetch will remain within the
@@ -121,8 +127,8 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
// We split the kernel into two vectors with kernel indices:
// 0, 1, 0, 1, 0, 1, 0, 1, and
// 2, 3, 2, 3, 2, 3, 2, 3
- __m128i h_kernel_01 = xx_set2_epi16(h_kernel[0], h_kernel[1]);
- __m128i h_kernel_23 = xx_set2_epi16(h_kernel[2], h_kernel[3]);
+ __m128i h_kernel_01 = _mm_set1_epi32(_mm_extract_epi32(kernels, 0));
+ __m128i h_kernel_23 = _mm_set1_epi32(_mm_extract_epi32(kernels, 2));
__m128i round_const_h = _mm_set1_epi32(1 << (DISFLOW_INTERP_BITS - 6 - 1));
@@ -141,10 +147,6 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
__m128i px_0to7_i16 = _mm_cvtepu8_epi16(row);
__m128i px_4to10_i16 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 4));
- // Relevant multiply instruction
- // This multiplies pointwise, then sums in pairs.
- //_mm_madd_epi16();
-
// Compute first four outputs
// input pixels 0, 1, 1, 2, 2, 3, 3, 4
// * kernel 0, 1, 0, 1, 0, 1, 0, 1
@@ -180,43 +182,14 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
DISFLOW_INTERP_BITS - 6);
_mm_storeu_si128((__m128i *)tmp_row, _mm_packs_epi32(out0, out1));
-
-#if CHECK_RESULTS && !defined(NDEBUG)
- // Cross-check
- for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) {
- const int x_w = x0 + j;
- int arr[4];
-
- arr[0] = (int)ref[y_w * stride + (x_w - 1)];
- arr[1] = (int)ref[y_w * stride + (x_w + 0)];
- arr[2] = (int)ref[y_w * stride + (x_w + 1)];
- arr[3] = (int)ref[y_w * stride + (x_w + 2)];
-
- // Apply kernel and round, keeping 6 extra bits of precision.
- //
- // 6 is the maximum allowable number of extra bits which will avoid
- // the intermediate values overflowing an int16_t. The most extreme
- // intermediate value occurs when:
- // * The input pixels are [0, 255, 255, 0]
- // * u_frac = 0.5
- // In this case, the un-scaled output is 255 * 1.125 = 286.875.
- // As an integer with 6 fractional bits, that is 18360, which fits
- // in an int16_t. But with 7 fractional bits it would be 36720,
- // which is too large.
- const int c_value = ROUND_POWER_OF_TWO(get_cubic_value_int(arr, h_kernel),
- DISFLOW_INTERP_BITS - 6);
- (void)c_value; // Suppress warnings
- assert(tmp_row[j] == c_value);
- }
-#endif // CHECK_RESULTS
}
// Vertical convolution
const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2;
__m128i round_const_v = _mm_set1_epi32(1 << (round_bits - 1));
- __m128i v_kernel_01 = xx_set2_epi16(v_kernel[0], v_kernel[1]);
- __m128i v_kernel_23 = xx_set2_epi16(v_kernel[2], v_kernel[3]);
+ __m128i v_kernel_01 = _mm_set1_epi32(_mm_extract_epi32(kernels, 1));
+ __m128i v_kernel_23 = _mm_set1_epi32(_mm_extract_epi32(kernels, 3));
for (int i = 0; i < DISFLOW_PATCH_SIZE; ++i) {
int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
@@ -259,30 +232,6 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
__m128i dy_row = _mm_loadu_si128((__m128i *)&dy[i * DISFLOW_PATCH_SIZE]);
b0_acc = _mm_add_epi32(b0_acc, _mm_madd_epi16(dx_row, dt));
b1_acc = _mm_add_epi32(b1_acc, _mm_madd_epi16(dy_row, dt));
-
-#if CHECK_RESULTS
- int16_t dt_arr[8];
- memcpy(dt_arr, &dt, 8 * sizeof(*dt_arr));
- for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) {
- int16_t *p = &tmp[i * DISFLOW_PATCH_SIZE + j];
- int arr[4] = { p[-DISFLOW_PATCH_SIZE], p[0], p[DISFLOW_PATCH_SIZE],
- p[2 * DISFLOW_PATCH_SIZE] };
- const int result = get_cubic_value_int(arr, v_kernel);
-
- // Apply kernel and round.
- // This time, we have to round off the 6 extra bits which were kept
- // earlier, but we also want to keep DISFLOW_DERIV_SCALE_LOG2 extra bits
- // of precision to match the scale of the dx and dy arrays.
- const int c_warped = ROUND_POWER_OF_TWO(result, round_bits);
- const int c_src_px = src[(x + j) + (y + i) * stride] << 3;
- const int c_dt = c_warped - c_src_px;
-
- assert(dt_arr[j] == c_dt);
-
- c_result[0] += dx[i * DISFLOW_PATCH_SIZE + j] * c_dt;
- c_result[1] += dy[i * DISFLOW_PATCH_SIZE + j] * c_dt;
- }
-#endif // CHECK_RESULTS
}
// Flatten the two sets of partial sums to find the final value of b
@@ -292,156 +241,66 @@ static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
__m128i partial_sum = _mm_hadd_epi32(b0_acc, b1_acc);
b[0] = _mm_extract_epi32(partial_sum, 0) + _mm_extract_epi32(partial_sum, 1);
b[1] = _mm_extract_epi32(partial_sum, 2) + _mm_extract_epi32(partial_sum, 3);
-
-#if CHECK_RESULTS
- assert(b[0] == c_result[0]);
- assert(b[1] == c_result[1]);
-#endif // CHECK_RESULTS
}
-static INLINE void sobel_filter_x(const uint8_t *src, int src_stride,
- int16_t *dst, int dst_stride) {
- int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
- int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE;
-#if CHECK_RESULTS
- const int taps = 3;
-#endif // CHECK_RESULTS
-
- // Horizontal filter
- // As the kernel is simply {1, 0, -1}, we implement this as simply
- // out[x] = image[x-1] - image[x+1]
- // rather than doing a "proper" convolution operation
- for (int y = -1; y < DISFLOW_PATCH_SIZE + 1; ++y) {
- const uint8_t *src_row = src + y * src_stride;
- int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
-
- // Load pixels and expand to 16 bits
- __m128i row = _mm_loadu_si128((__m128i *)(src_row - 1));
- __m128i px0 = _mm_cvtepu8_epi16(row);
- __m128i px2 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
-
- __m128i out = _mm_sub_epi16(px0, px2);
-
- // Store to intermediate array
- _mm_storeu_si128((__m128i *)tmp_row, out);
-
-#if CHECK_RESULTS
- // Cross-check
- static const int16_t h_kernel[3] = { 1, 0, -1 };
- for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
- int sum = 0;
- for (int k = 0; k < taps; ++k) {
- sum += h_kernel[k] * src_row[x + k - 1];
- }
- (void)sum;
- assert(tmp_row[x] == sum);
- }
-#endif // CHECK_RESULTS
- }
-
- // Vertical filter
- // Here the kernel is {1, 2, 1}, which can be implemented
- // with simple sums rather than multiplies and adds.
- // In order to minimize dependency chains, we evaluate in the order
- // (image[y - 1] + image[y + 1]) + (image[y] << 1)
- // This way, the first addition and the shift can happen in parallel
- for (int y = 0; y < DISFLOW_PATCH_SIZE; ++y) {
- const int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
- int16_t *dst_row = dst + y * dst_stride;
-
- __m128i px0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
- __m128i px1 = _mm_loadu_si128((__m128i *)tmp_row);
- __m128i px2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
-
- __m128i out =
- _mm_add_epi16(_mm_add_epi16(px0, px2), _mm_slli_epi16(px1, 1));
-
- _mm_storeu_si128((__m128i *)dst_row, out);
-
-#if CHECK_RESULTS
- static const int16_t v_kernel[3] = { 1, 2, 1 };
- for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
- int sum = 0;
- for (int k = 0; k < taps; ++k) {
- sum += v_kernel[k] * tmp[(y + k - 1) * DISFLOW_PATCH_SIZE + x];
- }
- (void)sum;
- assert(dst_row[x] == sum);
- }
-#endif // CHECK_RESULTS
- }
-}
-
-static INLINE void sobel_filter_y(const uint8_t *src, int src_stride,
- int16_t *dst, int dst_stride) {
- int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
- int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE;
-#if CHECK_RESULTS
- const int taps = 3;
-#endif // CHECK_RESULTS
-
- // Horizontal filter
- // Here the kernel is {1, 2, 1}, which can be implemented
- // with simple sums rather than multiplies and adds.
- // In order to minimize dependency chains, we evaluate in the order
- // (image[y - 1] + image[y + 1]) + (image[y] << 1)
- // This way, the first addition and the shift can happen in parallel
- for (int y = -1; y < DISFLOW_PATCH_SIZE + 1; ++y) {
- const uint8_t *src_row = src + y * src_stride;
- int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
-
- // Load pixels and expand to 16 bits
- __m128i row = _mm_loadu_si128((__m128i *)(src_row - 1));
- __m128i px0 = _mm_cvtepu8_epi16(row);
- __m128i px1 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 1));
- __m128i px2 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
-
- __m128i out =
- _mm_add_epi16(_mm_add_epi16(px0, px2), _mm_slli_epi16(px1, 1));
-
- // Store to intermediate array
- _mm_storeu_si128((__m128i *)tmp_row, out);
-
-#if CHECK_RESULTS
- // Cross-check
- static const int16_t h_kernel[3] = { 1, 2, 1 };
- for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
- int sum = 0;
- for (int k = 0; k < taps; ++k) {
- sum += h_kernel[k] * src_row[x + k - 1];
- }
- (void)sum;
- assert(tmp_row[x] == sum);
- }
-#endif // CHECK_RESULTS
- }
-
- // Vertical filter
- // As the kernel is simply {1, 0, -1}, we implement this as simply
- // out[x] = image[x-1] - image[x+1]
- // rather than doing a "proper" convolution operation
- for (int y = 0; y < DISFLOW_PATCH_SIZE; ++y) {
- const int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
- int16_t *dst_row = dst + y * dst_stride;
-
- __m128i px0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
- __m128i px2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
-
- __m128i out = _mm_sub_epi16(px0, px2);
-
- _mm_storeu_si128((__m128i *)dst_row, out);
-
-#if CHECK_RESULTS
- static const int16_t v_kernel[3] = { 1, 0, -1 };
- for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
- int sum = 0;
- for (int k = 0; k < taps; ++k) {
- sum += v_kernel[k] * tmp[(y + k - 1) * DISFLOW_PATCH_SIZE + x];
- }
- (void)sum;
- assert(dst_row[x] == sum);
- }
-#endif // CHECK_RESULTS
+// Compute the x and y gradients of the source patch in a single pass,
+// and store into dx and dy respectively.
+static INLINE void sobel_filter(const uint8_t *src, int src_stride, int16_t *dx,
+ int16_t *dy) {
+ // Loop setup: Load the first two rows (of 10 input rows) and apply
+ // the horizontal parts of the two filters
+ __m128i row_m1 = _mm_loadu_si128((__m128i *)(src - src_stride - 1));
+ __m128i row_m1_a = _mm_cvtepu8_epi16(row_m1);
+ __m128i row_m1_b = _mm_cvtepu8_epi16(_mm_srli_si128(row_m1, 1));
+ __m128i row_m1_c = _mm_cvtepu8_epi16(_mm_srli_si128(row_m1, 2));
+
+ __m128i row_m1_hsmooth = _mm_add_epi16(_mm_add_epi16(row_m1_a, row_m1_c),
+ _mm_slli_epi16(row_m1_b, 1));
+ __m128i row_m1_hdiff = _mm_sub_epi16(row_m1_a, row_m1_c);
+
+ __m128i row = _mm_loadu_si128((__m128i *)(src - 1));
+ __m128i row_a = _mm_cvtepu8_epi16(row);
+ __m128i row_b = _mm_cvtepu8_epi16(_mm_srli_si128(row, 1));
+ __m128i row_c = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
+
+ __m128i row_hsmooth =
+ _mm_add_epi16(_mm_add_epi16(row_a, row_c), _mm_slli_epi16(row_b, 1));
+ __m128i row_hdiff = _mm_sub_epi16(row_a, row_c);
+
+ // Main loop: For each of the 8 output rows:
+ // * Load row i+1 and apply both horizontal filters
+ // * Apply vertical filters and store results
+ // * Shift rows for next iteration
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ // Load row i+1 and apply both horizontal filters
+ const __m128i row_p1 =
+ _mm_loadu_si128((__m128i *)(src + (i + 1) * src_stride - 1));
+ const __m128i row_p1_a = _mm_cvtepu8_epi16(row_p1);
+ const __m128i row_p1_b = _mm_cvtepu8_epi16(_mm_srli_si128(row_p1, 1));
+ const __m128i row_p1_c = _mm_cvtepu8_epi16(_mm_srli_si128(row_p1, 2));
+
+ const __m128i row_p1_hsmooth = _mm_add_epi16(
+ _mm_add_epi16(row_p1_a, row_p1_c), _mm_slli_epi16(row_p1_b, 1));
+ const __m128i row_p1_hdiff = _mm_sub_epi16(row_p1_a, row_p1_c);
+
+ // Apply vertical filters and store results
+ // dx = vertical smooth(horizontal diff(input))
+ // dy = vertical diff(horizontal smooth(input))
+ const __m128i dx_row =
+ _mm_add_epi16(_mm_add_epi16(row_m1_hdiff, row_p1_hdiff),
+ _mm_slli_epi16(row_hdiff, 1));
+ const __m128i dy_row = _mm_sub_epi16(row_m1_hsmooth, row_p1_hsmooth);
+
+ _mm_storeu_si128((__m128i *)(dx + i * DISFLOW_PATCH_SIZE), dx_row);
+ _mm_storeu_si128((__m128i *)(dy + i * DISFLOW_PATCH_SIZE), dy_row);
+
+ // Shift rows for next iteration
+ // This allows a lot of work to be reused, reducing the number of
+ // horizontal filtering operations from 2*3*8 = 48 to 2*10 = 20
+ row_m1_hsmooth = row_hsmooth;
+ row_m1_hdiff = row_hdiff;
+ row_hsmooth = row_p1_hsmooth;
+ row_hdiff = row_p1_hdiff;
}
}
@@ -476,30 +335,6 @@ static INLINE void compute_flow_matrix(const int16_t *dx, int dx_stride,
// which is convenient for integerized SIMD implementation.
result = _mm_add_epi32(result, _mm_set_epi32(1, 0, 0, 1));
-#if CHECK_RESULTS
- int tmp[4] = { 0 };
-
- for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
- for (int j = 0; j < DISFLOW_PATCH_SIZE; j++) {
- tmp[0] += dx[i * dx_stride + j] * dx[i * dx_stride + j];
- tmp[1] += dx[i * dx_stride + j] * dy[i * dy_stride + j];
- // Don't compute tmp[2], as it should be equal to tmp[1]
- tmp[3] += dy[i * dy_stride + j] * dy[i * dy_stride + j];
- }
- }
-
- // Apply regularization
- tmp[0] += 1;
- tmp[3] += 1;
-
- tmp[2] = tmp[1];
-
- assert(tmp[0] == _mm_extract_epi32(result, 0));
- assert(tmp[1] == _mm_extract_epi32(result, 1));
- assert(tmp[2] == _mm_extract_epi32(result, 2));
- assert(tmp[3] == _mm_extract_epi32(result, 3));
-#endif // CHECK_RESULTS
-
// Convert results to doubles and store
_mm_storeu_pd(M, _mm_cvtepi32_pd(result));
_mm_storeu_pd(M + 2, _mm_cvtepi32_pd(_mm_srli_si128(result, 8)));
@@ -525,16 +360,15 @@ static INLINE void invert_2x2(const double *M, double *M_inv) {
void aom_compute_flow_at_point_sse4_1(const uint8_t *src, const uint8_t *ref,
int x, int y, int width, int height,
int stride, double *u, double *v) {
- double M[4];
- double M_inv[4];
+ DECLARE_ALIGNED(16, double, M[4]);
+ DECLARE_ALIGNED(16, double, M_inv[4]);
+ DECLARE_ALIGNED(16, int16_t, dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
+ DECLARE_ALIGNED(16, int16_t, dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE]);
int b[2];
- int16_t dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
- int16_t dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
// Compute gradients within this patch
const uint8_t *src_patch = &src[y * stride + x];
- sobel_filter_x(src_patch, stride, dx, DISFLOW_PATCH_SIZE);
- sobel_filter_y(src_patch, stride, dy, DISFLOW_PATCH_SIZE);
+ sobel_filter(src_patch, stride, dx, dy);
compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M);
invert_2x2(M, M_inv);