summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp/flow_estimation
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-19 00:47:55 +0000
commit26a029d407be480d791972afb5975cf62c9360a6 (patch)
treef435a8308119effd964b339f76abb83a57c29483 /third_party/aom/aom_dsp/flow_estimation
parentInitial commit. (diff)
downloadfirefox-26a029d407be480d791972afb5975cf62c9360a6.tar.xz
firefox-26a029d407be480d791972afb5975cf62c9360a6.zip
Adding upstream version 124.0.1.upstream/124.0.1
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'third_party/aom/aom_dsp/flow_estimation')
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/arm/disflow_neon.c368
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/corner_detect.c167
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/corner_detect.h80
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/corner_match.c259
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/corner_match.h41
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/disflow.c823
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/disflow.h106
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/flow_estimation.c60
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/flow_estimation.h95
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/ransac.c484
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/ransac.h35
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c80
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c104
-rw-r--r--third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c558
14 files changed, 3260 insertions, 0 deletions
diff --git a/third_party/aom/aom_dsp/flow_estimation/arm/disflow_neon.c b/third_party/aom/aom_dsp/flow_estimation/arm/disflow_neon.c
new file mode 100644
index 0000000000..ee42be7393
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/arm/disflow_neon.c
@@ -0,0 +1,368 @@
+/*
+ * Copyright (c) 2023, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include "aom_dsp/flow_estimation/disflow.h"
+
+#include <arm_neon.h>
+#include <math.h>
+
+#include "aom_dsp/arm/mem_neon.h"
+#include "aom_dsp/arm/sum_neon.h"
+#include "config/aom_config.h"
+#include "config/aom_dsp_rtcd.h"
+
+static INLINE void get_cubic_kernel_dbl(double x, double kernel[4]) {
+ // Check that the fractional position is in range.
+ //
+ // Note: x is calculated from (eg.) `u_frac = u - floor(u)`.
+ // Mathematically, this implies that 0 <= x < 1. However, in practice it is
+ // possible to have x == 1 due to floating point rounding. This is fine,
+ // and we still interpolate correctly if we allow x = 1.
+ assert(0 <= x && x <= 1);
+
+ double x2 = x * x;
+ double x3 = x2 * x;
+ kernel[0] = -0.5 * x + x2 - 0.5 * x3;
+ kernel[1] = 1.0 - 2.5 * x2 + 1.5 * x3;
+ kernel[2] = 0.5 * x + 2.0 * x2 - 1.5 * x3;
+ kernel[3] = -0.5 * x2 + 0.5 * x3;
+}
+
+static INLINE void get_cubic_kernel_int(double x, int kernel[4]) {
+ double kernel_dbl[4];
+ get_cubic_kernel_dbl(x, kernel_dbl);
+
+ kernel[0] = (int)rint(kernel_dbl[0] * (1 << DISFLOW_INTERP_BITS));
+ kernel[1] = (int)rint(kernel_dbl[1] * (1 << DISFLOW_INTERP_BITS));
+ kernel[2] = (int)rint(kernel_dbl[2] * (1 << DISFLOW_INTERP_BITS));
+ kernel[3] = (int)rint(kernel_dbl[3] * (1 << DISFLOW_INTERP_BITS));
+}
+
+// Compare two regions of width x height pixels, one rooted at position
+// (x, y) in src and the other at (x + u, y + v) in ref.
+// This function returns the sum of squared pixel differences between
+// the two regions.
+static INLINE void compute_flow_error(const uint8_t *src, const uint8_t *ref,
+ int width, int height, int stride, int x,
+ int y, double u, double v, int16_t *dt) {
+ // Split offset into integer and fractional parts, and compute cubic
+ // interpolation kernels
+ const int u_int = (int)floor(u);
+ const int v_int = (int)floor(v);
+ const double u_frac = u - floor(u);
+ const double v_frac = v - floor(v);
+
+ int h_kernel[4];
+ int v_kernel[4];
+ get_cubic_kernel_int(u_frac, h_kernel);
+ get_cubic_kernel_int(v_frac, v_kernel);
+
+ int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)];
+
+ // Clamp coordinates so that all pixels we fetch will remain within the
+ // allocated border region, but allow them to go far enough out that
+ // the border pixels' values do not change.
+ // Since we are calculating an 8x8 block, the bottom-right pixel
+ // in the block has coordinates (x0 + 7, y0 + 7). Then, the cubic
+ // interpolation has 4 taps, meaning that the output of pixel
+ // (x_w, y_w) depends on the pixels in the range
+ // ([x_w - 1, x_w + 2], [y_w - 1, y_w + 2]).
+ //
+ // Thus the most extreme coordinates which will be fetched are
+ // (x0 - 1, y0 - 1) and (x0 + 9, y0 + 9).
+ const int x0 = clamp(x + u_int, -9, width);
+ const int y0 = clamp(y + v_int, -9, height);
+
+ // Horizontal convolution.
+ const uint8_t *ref_start = ref + (y0 - 1) * stride + (x0 - 1);
+ int16x4_t h_filter = vmovn_s32(vld1q_s32(h_kernel));
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE + 3; ++i) {
+ uint8x16_t r = vld1q_u8(ref_start + i * stride);
+ uint16x8_t r0 = vmovl_u8(vget_low_u8(r));
+ uint16x8_t r1 = vmovl_u8(vget_high_u8(r));
+
+ int16x8_t s0 = vreinterpretq_s16_u16(r0);
+ int16x8_t s1 = vreinterpretq_s16_u16(vextq_u16(r0, r1, 1));
+ int16x8_t s2 = vreinterpretq_s16_u16(vextq_u16(r0, r1, 2));
+ int16x8_t s3 = vreinterpretq_s16_u16(vextq_u16(r0, r1, 3));
+
+ int32x4_t sum_lo = vmull_lane_s16(vget_low_s16(s0), h_filter, 0);
+ sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s1), h_filter, 1);
+ sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s2), h_filter, 2);
+ sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(s3), h_filter, 3);
+
+ int32x4_t sum_hi = vmull_lane_s16(vget_high_s16(s0), h_filter, 0);
+ sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s1), h_filter, 1);
+ sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s2), h_filter, 2);
+ sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(s3), h_filter, 3);
+
+ // 6 is the maximum allowable number of extra bits which will avoid
+ // the intermediate values overflowing an int16_t. The most extreme
+ // intermediate value occurs when:
+ // * The input pixels are [0, 255, 255, 0]
+ // * u_frac = 0.5
+ // In this case, the un-scaled output is 255 * 1.125 = 286.875.
+ // As an integer with 6 fractional bits, that is 18360, which fits
+ // in an int16_t. But with 7 fractional bits it would be 36720,
+ // which is too large.
+
+ int16x8_t sum = vcombine_s16(vrshrn_n_s32(sum_lo, DISFLOW_INTERP_BITS - 6),
+ vrshrn_n_s32(sum_hi, DISFLOW_INTERP_BITS - 6));
+ vst1q_s16(tmp_ + i * DISFLOW_PATCH_SIZE, sum);
+ }
+
+ // Vertical convolution.
+ int16x4_t v_filter = vmovn_s32(vld1q_s32(v_kernel));
+ int16_t *tmp_start = tmp_ + DISFLOW_PATCH_SIZE;
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; ++i) {
+ int16x8_t t0 = vld1q_s16(tmp_start + (i - 1) * DISFLOW_PATCH_SIZE);
+ int16x8_t t1 = vld1q_s16(tmp_start + i * DISFLOW_PATCH_SIZE);
+ int16x8_t t2 = vld1q_s16(tmp_start + (i + 1) * DISFLOW_PATCH_SIZE);
+ int16x8_t t3 = vld1q_s16(tmp_start + (i + 2) * DISFLOW_PATCH_SIZE);
+
+ int32x4_t sum_lo = vmull_lane_s16(vget_low_s16(t0), v_filter, 0);
+ sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(t1), v_filter, 1);
+ sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(t2), v_filter, 2);
+ sum_lo = vmlal_lane_s16(sum_lo, vget_low_s16(t3), v_filter, 3);
+
+ int32x4_t sum_hi = vmull_lane_s16(vget_high_s16(t0), v_filter, 0);
+ sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(t1), v_filter, 1);
+ sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(t2), v_filter, 2);
+ sum_hi = vmlal_lane_s16(sum_hi, vget_high_s16(t3), v_filter, 3);
+
+ uint8x8_t s = vld1_u8(src + (i + y) * stride + x);
+ int16x8_t s_s16 = vreinterpretq_s16_u16(vshll_n_u8(s, 3));
+
+ // This time, we have to round off the 6 extra bits which were kept
+ // earlier, but we also want to keep DISFLOW_DERIV_SCALE_LOG2 extra bits
+ // of precision to match the scale of the dx and dy arrays.
+ sum_lo = vrshrq_n_s32(sum_lo,
+ DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2);
+ sum_hi = vrshrq_n_s32(sum_hi,
+ DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2);
+ int32x4_t err_lo = vsubw_s16(sum_lo, vget_low_s16(s_s16));
+ int32x4_t err_hi = vsubw_s16(sum_hi, vget_high_s16(s_s16));
+ vst1q_s16(dt + i * DISFLOW_PATCH_SIZE,
+ vcombine_s16(vmovn_s32(err_lo), vmovn_s32(err_hi)));
+ }
+}
+
+static INLINE void sobel_filter_x(const uint8_t *src, int src_stride,
+ int16_t *dst, int dst_stride) {
+ int16_t tmp[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
+
+ // Horizontal filter, using kernel {1, 0, -1}.
+ const uint8_t *src_start = src - 1 * src_stride - 1;
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE + 2; i++) {
+ uint8x16_t s = vld1q_u8(src_start + i * src_stride);
+ uint8x8_t s0 = vget_low_u8(s);
+ uint8x8_t s2 = vget_low_u8(vextq_u8(s, s, 2));
+
+ // Given that the kernel is {1, 0, -1} the convolution is a simple
+ // subtraction.
+ int16x8_t diff = vreinterpretq_s16_u16(vsubl_u8(s0, s2));
+
+ vst1q_s16(tmp + i * DISFLOW_PATCH_SIZE, diff);
+ }
+
+ // Vertical filter, using kernel {1, 2, 1}.
+ // This kernel can be split into two 2-taps kernels of value {1, 1}.
+ // That way we need only 3 add operations to perform the convolution, one of
+ // which can be reused for the next line.
+ int16x8_t s0 = vld1q_s16(tmp);
+ int16x8_t s1 = vld1q_s16(tmp + DISFLOW_PATCH_SIZE);
+ int16x8_t sum01 = vaddq_s16(s0, s1);
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ int16x8_t s2 = vld1q_s16(tmp + (i + 2) * DISFLOW_PATCH_SIZE);
+
+ int16x8_t sum12 = vaddq_s16(s1, s2);
+ int16x8_t sum = vaddq_s16(sum01, sum12);
+
+ vst1q_s16(dst + i * dst_stride, sum);
+
+ sum01 = sum12;
+ s1 = s2;
+ }
+}
+
+static INLINE void sobel_filter_y(const uint8_t *src, int src_stride,
+ int16_t *dst, int dst_stride) {
+ int16_t tmp[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
+
+ // Horizontal filter, using kernel {1, 2, 1}.
+ // This kernel can be split into two 2-taps kernels of value {1, 1}.
+ // That way we need only 3 add operations to perform the convolution.
+ const uint8_t *src_start = src - 1 * src_stride - 1;
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE + 2; i++) {
+ uint8x16_t s = vld1q_u8(src_start + i * src_stride);
+ uint8x8_t s0 = vget_low_u8(s);
+ uint8x8_t s1 = vget_low_u8(vextq_u8(s, s, 1));
+ uint8x8_t s2 = vget_low_u8(vextq_u8(s, s, 2));
+
+ uint16x8_t sum01 = vaddl_u8(s0, s1);
+ uint16x8_t sum12 = vaddl_u8(s1, s2);
+ uint16x8_t sum = vaddq_u16(sum01, sum12);
+
+ vst1q_s16(tmp + i * DISFLOW_PATCH_SIZE, vreinterpretq_s16_u16(sum));
+ }
+
+ // Vertical filter, using kernel {1, 0, -1}.
+ // Load the whole block at once to avoid redundant loads during convolution.
+ int16x8_t t[10];
+ load_s16_8x10(tmp, DISFLOW_PATCH_SIZE, &t[0], &t[1], &t[2], &t[3], &t[4],
+ &t[5], &t[6], &t[7], &t[8], &t[9]);
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ // Given that the kernel is {1, 0, -1} the convolution is a simple
+ // subtraction.
+ int16x8_t diff = vsubq_s16(t[i], t[i + 2]);
+
+ vst1q_s16(dst + i * dst_stride, diff);
+ }
+}
+
+// Computes the components of the system of equations used to solve for
+// a flow vector.
+//
+// The flow equations are a least-squares system, derived as follows:
+//
+// For each pixel in the patch, we calculate the current error `dt`,
+// and the x and y gradients `dx` and `dy` of the source patch.
+// This means that, to first order, the squared error for this pixel is
+//
+// (dt + u * dx + v * dy)^2
+//
+// where (u, v) are the incremental changes to the flow vector.
+//
+// We then want to find the values of u and v which minimize the sum
+// of the squared error across all pixels. Conveniently, this fits exactly
+// into the form of a least squares problem, with one equation
+//
+// u * dx + v * dy = -dt
+//
+// for each pixel.
+//
+// Summing across all pixels in a square window of size DISFLOW_PATCH_SIZE,
+// and absorbing the - sign elsewhere, this results in the least squares system
+//
+// M = |sum(dx * dx) sum(dx * dy)|
+// |sum(dx * dy) sum(dy * dy)|
+//
+// b = |sum(dx * dt)|
+// |sum(dy * dt)|
+static INLINE void compute_flow_matrix(const int16_t *dx, int dx_stride,
+ const int16_t *dy, int dy_stride,
+ double *M_inv) {
+ int32x4_t sum[4] = { vdupq_n_s32(0), vdupq_n_s32(0), vdupq_n_s32(0),
+ vdupq_n_s32(0) };
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ int16x8_t x = vld1q_s16(dx + i * dx_stride);
+ int16x8_t y = vld1q_s16(dy + i * dy_stride);
+ sum[0] = vmlal_s16(sum[0], vget_low_s16(x), vget_low_s16(x));
+ sum[0] = vmlal_s16(sum[0], vget_high_s16(x), vget_high_s16(x));
+
+ sum[1] = vmlal_s16(sum[1], vget_low_s16(x), vget_low_s16(y));
+ sum[1] = vmlal_s16(sum[1], vget_high_s16(x), vget_high_s16(y));
+
+ sum[3] = vmlal_s16(sum[3], vget_low_s16(y), vget_low_s16(y));
+ sum[3] = vmlal_s16(sum[3], vget_high_s16(y), vget_high_s16(y));
+ }
+ sum[2] = sum[1];
+
+ int32x4_t res = horizontal_add_4d_s32x4(sum);
+
+ // Apply regularization
+ // We follow the standard regularization method of adding `k * I` before
+ // inverting. This ensures that the matrix will be invertible.
+ //
+ // Setting the regularization strength k to 1 seems to work well here, as
+ // typical values coming from the other equations are very large (1e5 to
+ // 1e6, with an upper limit of around 6e7, at the time of writing).
+ // It also preserves the property that all matrix values are whole numbers,
+ // which is convenient for integerized SIMD implementation.
+
+ double M0 = (double)vgetq_lane_s32(res, 0) + 1;
+ double M1 = (double)vgetq_lane_s32(res, 1);
+ double M2 = (double)vgetq_lane_s32(res, 2);
+ double M3 = (double)vgetq_lane_s32(res, 3) + 1;
+
+ // Invert matrix M.
+ double det = (M0 * M3) - (M1 * M2);
+ assert(det >= 1);
+ const double det_inv = 1 / det;
+
+ M_inv[0] = M3 * det_inv;
+ M_inv[1] = -M1 * det_inv;
+ M_inv[2] = -M2 * det_inv;
+ M_inv[3] = M0 * det_inv;
+}
+
+static INLINE void compute_flow_vector(const int16_t *dx, int dx_stride,
+ const int16_t *dy, int dy_stride,
+ const int16_t *dt, int dt_stride,
+ int *b) {
+ int32x4_t b_s32[2] = { vdupq_n_s32(0), vdupq_n_s32(0) };
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ int16x8_t dx16 = vld1q_s16(dx + i * dx_stride);
+ int16x8_t dy16 = vld1q_s16(dy + i * dy_stride);
+ int16x8_t dt16 = vld1q_s16(dt + i * dt_stride);
+
+ b_s32[0] = vmlal_s16(b_s32[0], vget_low_s16(dx16), vget_low_s16(dt16));
+ b_s32[0] = vmlal_s16(b_s32[0], vget_high_s16(dx16), vget_high_s16(dt16));
+
+ b_s32[1] = vmlal_s16(b_s32[1], vget_low_s16(dy16), vget_low_s16(dt16));
+ b_s32[1] = vmlal_s16(b_s32[1], vget_high_s16(dy16), vget_high_s16(dt16));
+ }
+
+ int32x4_t b_red = horizontal_add_2d_s32(b_s32[0], b_s32[1]);
+ vst1_s32(b, add_pairwise_s32x4(b_red));
+}
+
+void aom_compute_flow_at_point_neon(const uint8_t *src, const uint8_t *ref,
+ int x, int y, int width, int height,
+ int stride, double *u, double *v) {
+ double M_inv[4];
+ int b[2];
+ int16_t dt[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
+ int16_t dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
+ int16_t dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
+
+ // Compute gradients within this patch
+ const uint8_t *src_patch = &src[y * stride + x];
+ sobel_filter_x(src_patch, stride, dx, DISFLOW_PATCH_SIZE);
+ sobel_filter_y(src_patch, stride, dy, DISFLOW_PATCH_SIZE);
+
+ compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M_inv);
+
+ for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
+ compute_flow_error(src, ref, width, height, stride, x, y, *u, *v, dt);
+ compute_flow_vector(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, dt,
+ DISFLOW_PATCH_SIZE, b);
+
+ // Solve flow equations to find a better estimate for the flow vector
+ // at this point
+ const double step_u = M_inv[0] * b[0] + M_inv[1] * b[1];
+ const double step_v = M_inv[2] * b[0] + M_inv[3] * b[1];
+ *u += fclamp(step_u * DISFLOW_STEP_SIZE, -2, 2);
+ *v += fclamp(step_v * DISFLOW_STEP_SIZE, -2, 2);
+
+ if (fabs(step_u) + fabs(step_v) < DISFLOW_STEP_SIZE_THRESOLD) {
+ // Stop iteration when we're close to convergence
+ break;
+ }
+ }
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/corner_detect.c b/third_party/aom/aom_dsp/flow_estimation/corner_detect.c
new file mode 100644
index 0000000000..284d1bd7b8
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/corner_detect.c
@@ -0,0 +1,167 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include <memory.h>
+#include <math.h>
+#include <assert.h>
+
+#include "third_party/fastfeat/fast.h"
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/flow_estimation/corner_detect.h"
+#include "aom_mem/aom_mem.h"
+#include "av1/common/common.h"
+
+#define FAST_BARRIER 18
+
+size_t av1_get_corner_list_size(void) { return sizeof(CornerList); }
+
+CornerList *av1_alloc_corner_list(void) {
+ CornerList *corners = (CornerList *)aom_calloc(1, sizeof(*corners));
+ if (!corners) {
+ return NULL;
+ }
+
+ corners->valid = false;
+#if CONFIG_MULTITHREAD
+ pthread_mutex_init(&corners->mutex, NULL);
+#endif // CONFIG_MULTITHREAD
+ return corners;
+}
+
+static bool compute_corner_list(const ImagePyramid *pyr, CornerList *corners) {
+ const uint8_t *buf = pyr->layers[0].buffer;
+ int width = pyr->layers[0].width;
+ int height = pyr->layers[0].height;
+ int stride = pyr->layers[0].stride;
+
+ int *scores = NULL;
+ int num_corners;
+ xy *const frame_corners_xy = aom_fast9_detect_nonmax(
+ buf, width, height, stride, FAST_BARRIER, &scores, &num_corners);
+ if (num_corners < 0) return false;
+
+ if (num_corners <= MAX_CORNERS) {
+ // Use all detected corners
+ if (num_corners != 0) {
+ memcpy(corners->corners, frame_corners_xy,
+ sizeof(*frame_corners_xy) * num_corners);
+ }
+ corners->num_corners = num_corners;
+ } else {
+ // There are more than MAX_CORNERS corners avilable, so pick out a subset
+ // of the sharpest corners, as these will be the most useful for flow
+ // estimation
+ int histogram[256];
+ av1_zero(histogram);
+ for (int i = 0; i < num_corners; i++) {
+ assert(FAST_BARRIER <= scores[i] && scores[i] <= 255);
+ histogram[scores[i]] += 1;
+ }
+
+ int threshold = -1;
+ int found_corners = 0;
+ for (int bucket = 255; bucket >= 0; bucket--) {
+ if (found_corners + histogram[bucket] > MAX_CORNERS) {
+ // Set threshold here
+ threshold = bucket;
+ break;
+ }
+ found_corners += histogram[bucket];
+ }
+ assert(threshold != -1 && "Failed to select a valid threshold");
+
+ int copied_corners = 0;
+ for (int i = 0; i < num_corners; i++) {
+ if (scores[i] > threshold) {
+ assert(copied_corners < MAX_CORNERS);
+ corners->corners[2 * copied_corners + 0] = frame_corners_xy[i].x;
+ corners->corners[2 * copied_corners + 1] = frame_corners_xy[i].y;
+ copied_corners += 1;
+ }
+ }
+ assert(copied_corners == found_corners);
+ corners->num_corners = copied_corners;
+ }
+
+ free(scores);
+ free(frame_corners_xy);
+ return true;
+}
+
+bool av1_compute_corner_list(const ImagePyramid *pyr, CornerList *corners) {
+ assert(corners);
+
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&corners->mutex);
+#endif // CONFIG_MULTITHREAD
+
+ if (!corners->valid) {
+ corners->valid = compute_corner_list(pyr, corners);
+ }
+ bool valid = corners->valid;
+
+#if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&corners->mutex);
+#endif // CONFIG_MULTITHREAD
+ return valid;
+}
+
+#ifndef NDEBUG
+// Check if a corner list has already been computed.
+// This is mostly a debug helper - as it is necessary to hold corners->mutex
+// while reading the valid flag, we cannot just write:
+// assert(corners->valid);
+// This function allows the check to be correctly written as:
+// assert(aom_is_corner_list_valid(corners));
+bool aom_is_corner_list_valid(CornerList *corners) {
+ assert(corners);
+
+ // Per the comments in the CornerList struct, we must take this mutex
+ // before reading or writing the "valid" flag, and hold it while computing
+ // the pyramid, to ensure proper behaviour if multiple threads call this
+ // function simultaneously
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&corners->mutex);
+#endif // CONFIG_MULTITHREAD
+
+ bool valid = corners->valid;
+
+#if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&corners->mutex);
+#endif // CONFIG_MULTITHREAD
+
+ return valid;
+}
+#endif
+
+void av1_invalidate_corner_list(CornerList *corners) {
+ if (corners) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_lock(&corners->mutex);
+#endif // CONFIG_MULTITHREAD
+ corners->valid = false;
+#if CONFIG_MULTITHREAD
+ pthread_mutex_unlock(&corners->mutex);
+#endif // CONFIG_MULTITHREAD
+ }
+}
+
+void av1_free_corner_list(CornerList *corners) {
+ if (corners) {
+#if CONFIG_MULTITHREAD
+ pthread_mutex_destroy(&corners->mutex);
+#endif // CONFIG_MULTITHREAD
+ aom_free(corners);
+ }
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/corner_detect.h b/third_party/aom/aom_dsp/flow_estimation/corner_detect.h
new file mode 100644
index 0000000000..d05846ce5d
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/corner_detect.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AOM_DSP_FLOW_ESTIMATION_CORNER_DETECT_H_
+#define AOM_AOM_DSP_FLOW_ESTIMATION_CORNER_DETECT_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <memory.h>
+
+#include "aom_dsp/pyramid.h"
+#include "aom_util/aom_thread.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_CORNERS 4096
+
+typedef struct corner_list {
+#if CONFIG_MULTITHREAD
+ // Mutex which is used to prevent the corner list from being computed twice
+ // at the same time
+ //
+ // Semantics:
+ // * This mutex must be held whenever reading or writing the `valid` flag
+ //
+ // * This mutex must also be held while computing the image pyramid,
+ // to ensure that only one thread may do so at a time.
+ //
+ // * However, once you have read the valid flag and seen a true value,
+ // it is safe to drop the mutex and read from the remaining fields.
+ // This is because, once the image pyramid is computed, its contents
+ // will not be changed until the parent frame buffer is recycled,
+ // which will not happen until there are no more outstanding references
+ // to the frame buffer.
+ pthread_mutex_t mutex;
+#endif // CONFIG_MULTITHREAD
+ // Flag indicating whether the corner list contains valid data
+ bool valid;
+ // Number of corners found
+ int num_corners;
+ // (x, y) coordinates of each corner
+ int corners[2 * MAX_CORNERS];
+} CornerList;
+
+size_t av1_get_corner_list_size(void);
+
+CornerList *av1_alloc_corner_list(void);
+
+bool av1_compute_corner_list(const ImagePyramid *pyr, CornerList *corners);
+
+#ifndef NDEBUG
+// Check if a corner list has already been computed.
+// This is mostly a debug helper - as it is necessary to hold corners->mutex
+// while reading the valid flag, we cannot just write:
+// assert(corners->valid);
+// This function allows the check to be correctly written as:
+// assert(aom_is_corner_list_valid(corners));
+bool aom_is_corner_list_valid(CornerList *corners);
+#endif
+
+void av1_invalidate_corner_list(CornerList *corners);
+
+void av1_free_corner_list(CornerList *corners);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AOM_AOM_DSP_FLOW_ESTIMATION_CORNER_DETECT_H_
diff --git a/third_party/aom/aom_dsp/flow_estimation/corner_match.c b/third_party/aom/aom_dsp/flow_estimation/corner_match.c
new file mode 100644
index 0000000000..cef719b68d
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/corner_match.c
@@ -0,0 +1,259 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <stdlib.h>
+#include <memory.h>
+#include <math.h>
+
+#include "config/aom_dsp_rtcd.h"
+
+#include "aom_dsp/flow_estimation/corner_detect.h"
+#include "aom_dsp/flow_estimation/corner_match.h"
+#include "aom_dsp/flow_estimation/flow_estimation.h"
+#include "aom_dsp/flow_estimation/ransac.h"
+#include "aom_dsp/pyramid.h"
+#include "aom_scale/yv12config.h"
+
+#define SEARCH_SZ 9
+#define SEARCH_SZ_BY2 ((SEARCH_SZ - 1) / 2)
+
+#define THRESHOLD_NCC 0.75
+
+/* Compute var(frame) * MATCH_SZ_SQ over a MATCH_SZ by MATCH_SZ window of frame,
+ centered at (x, y).
+*/
+static double compute_variance(const unsigned char *frame, int stride, int x,
+ int y) {
+ int sum = 0;
+ int sumsq = 0;
+ int var;
+ int i, j;
+ for (i = 0; i < MATCH_SZ; ++i)
+ for (j = 0; j < MATCH_SZ; ++j) {
+ sum += frame[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)];
+ sumsq += frame[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)] *
+ frame[(i + y - MATCH_SZ_BY2) * stride + (j + x - MATCH_SZ_BY2)];
+ }
+ var = sumsq * MATCH_SZ_SQ - sum * sum;
+ return (double)var;
+}
+
+/* Compute corr(frame1, frame2) * MATCH_SZ * stddev(frame1), where the
+ correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
+ of each image, centered at (x1, y1) and (x2, y2) respectively.
+*/
+double av1_compute_cross_correlation_c(const unsigned char *frame1, int stride1,
+ int x1, int y1,
+ const unsigned char *frame2, int stride2,
+ int x2, int y2) {
+ int v1, v2;
+ int sum1 = 0;
+ int sum2 = 0;
+ int sumsq2 = 0;
+ int cross = 0;
+ int var2, cov;
+ int i, j;
+ for (i = 0; i < MATCH_SZ; ++i)
+ for (j = 0; j < MATCH_SZ; ++j) {
+ v1 = frame1[(i + y1 - MATCH_SZ_BY2) * stride1 + (j + x1 - MATCH_SZ_BY2)];
+ v2 = frame2[(i + y2 - MATCH_SZ_BY2) * stride2 + (j + x2 - MATCH_SZ_BY2)];
+ sum1 += v1;
+ sum2 += v2;
+ sumsq2 += v2 * v2;
+ cross += v1 * v2;
+ }
+ var2 = sumsq2 * MATCH_SZ_SQ - sum2 * sum2;
+ cov = cross * MATCH_SZ_SQ - sum1 * sum2;
+ return cov / sqrt((double)var2);
+}
+
+static int is_eligible_point(int pointx, int pointy, int width, int height) {
+ return (pointx >= MATCH_SZ_BY2 && pointy >= MATCH_SZ_BY2 &&
+ pointx + MATCH_SZ_BY2 < width && pointy + MATCH_SZ_BY2 < height);
+}
+
+static int is_eligible_distance(int point1x, int point1y, int point2x,
+ int point2y, int width, int height) {
+ const int thresh = (width < height ? height : width) >> 4;
+ return ((point1x - point2x) * (point1x - point2x) +
+ (point1y - point2y) * (point1y - point2y)) <= thresh * thresh;
+}
+
+static void improve_correspondence(const unsigned char *src,
+ const unsigned char *ref, int width,
+ int height, int src_stride, int ref_stride,
+ Correspondence *correspondences,
+ int num_correspondences) {
+ int i;
+ for (i = 0; i < num_correspondences; ++i) {
+ int x, y, best_x = 0, best_y = 0;
+ double best_match_ncc = 0.0;
+ // For this algorithm, all points have integer coordinates.
+ // It's a little more efficient to convert them to ints once,
+ // before the inner loops
+ int x0 = (int)correspondences[i].x;
+ int y0 = (int)correspondences[i].y;
+ int rx0 = (int)correspondences[i].rx;
+ int ry0 = (int)correspondences[i].ry;
+ for (y = -SEARCH_SZ_BY2; y <= SEARCH_SZ_BY2; ++y) {
+ for (x = -SEARCH_SZ_BY2; x <= SEARCH_SZ_BY2; ++x) {
+ double match_ncc;
+ if (!is_eligible_point(rx0 + x, ry0 + y, width, height)) continue;
+ if (!is_eligible_distance(x0, y0, rx0 + x, ry0 + y, width, height))
+ continue;
+ match_ncc = av1_compute_cross_correlation(src, src_stride, x0, y0, ref,
+ ref_stride, rx0 + x, ry0 + y);
+ if (match_ncc > best_match_ncc) {
+ best_match_ncc = match_ncc;
+ best_y = y;
+ best_x = x;
+ }
+ }
+ }
+ correspondences[i].rx += best_x;
+ correspondences[i].ry += best_y;
+ }
+ for (i = 0; i < num_correspondences; ++i) {
+ int x, y, best_x = 0, best_y = 0;
+ double best_match_ncc = 0.0;
+ int x0 = (int)correspondences[i].x;
+ int y0 = (int)correspondences[i].y;
+ int rx0 = (int)correspondences[i].rx;
+ int ry0 = (int)correspondences[i].ry;
+ for (y = -SEARCH_SZ_BY2; y <= SEARCH_SZ_BY2; ++y)
+ for (x = -SEARCH_SZ_BY2; x <= SEARCH_SZ_BY2; ++x) {
+ double match_ncc;
+ if (!is_eligible_point(x0 + x, y0 + y, width, height)) continue;
+ if (!is_eligible_distance(x0 + x, y0 + y, rx0, ry0, width, height))
+ continue;
+ match_ncc = av1_compute_cross_correlation(
+ ref, ref_stride, rx0, ry0, src, src_stride, x0 + x, y0 + y);
+ if (match_ncc > best_match_ncc) {
+ best_match_ncc = match_ncc;
+ best_y = y;
+ best_x = x;
+ }
+ }
+ correspondences[i].x += best_x;
+ correspondences[i].y += best_y;
+ }
+}
+
+static int determine_correspondence(const unsigned char *src,
+ const int *src_corners, int num_src_corners,
+ const unsigned char *ref,
+ const int *ref_corners, int num_ref_corners,
+ int width, int height, int src_stride,
+ int ref_stride,
+ Correspondence *correspondences) {
+ // TODO(sarahparker) Improve this to include 2-way match
+ int i, j;
+ int num_correspondences = 0;
+ for (i = 0; i < num_src_corners; ++i) {
+ double best_match_ncc = 0.0;
+ double template_norm;
+ int best_match_j = -1;
+ if (!is_eligible_point(src_corners[2 * i], src_corners[2 * i + 1], width,
+ height))
+ continue;
+ for (j = 0; j < num_ref_corners; ++j) {
+ double match_ncc;
+ if (!is_eligible_point(ref_corners[2 * j], ref_corners[2 * j + 1], width,
+ height))
+ continue;
+ if (!is_eligible_distance(src_corners[2 * i], src_corners[2 * i + 1],
+ ref_corners[2 * j], ref_corners[2 * j + 1],
+ width, height))
+ continue;
+ match_ncc = av1_compute_cross_correlation(
+ src, src_stride, src_corners[2 * i], src_corners[2 * i + 1], ref,
+ ref_stride, ref_corners[2 * j], ref_corners[2 * j + 1]);
+ if (match_ncc > best_match_ncc) {
+ best_match_ncc = match_ncc;
+ best_match_j = j;
+ }
+ }
+ // Note: We want to test if the best correlation is >= THRESHOLD_NCC,
+ // but need to account for the normalization in
+ // av1_compute_cross_correlation.
+ template_norm = compute_variance(src, src_stride, src_corners[2 * i],
+ src_corners[2 * i + 1]);
+ if (best_match_ncc > THRESHOLD_NCC * sqrt(template_norm)) {
+ correspondences[num_correspondences].x = src_corners[2 * i];
+ correspondences[num_correspondences].y = src_corners[2 * i + 1];
+ correspondences[num_correspondences].rx = ref_corners[2 * best_match_j];
+ correspondences[num_correspondences].ry =
+ ref_corners[2 * best_match_j + 1];
+ num_correspondences++;
+ }
+ }
+ improve_correspondence(src, ref, width, height, src_stride, ref_stride,
+ correspondences, num_correspondences);
+ return num_correspondences;
+}
+
+bool av1_compute_global_motion_feature_match(
+ TransformationType type, YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *ref,
+ int bit_depth, MotionModel *motion_models, int num_motion_models,
+ bool *mem_alloc_failed) {
+ int num_correspondences;
+ Correspondence *correspondences;
+ ImagePyramid *src_pyramid = src->y_pyramid;
+ CornerList *src_corners = src->corners;
+ ImagePyramid *ref_pyramid = ref->y_pyramid;
+ CornerList *ref_corners = ref->corners;
+
+ // Precompute information we will need about each frame
+ if (!aom_compute_pyramid(src, bit_depth, src_pyramid)) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+ if (!av1_compute_corner_list(src_pyramid, src_corners)) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+ if (!aom_compute_pyramid(ref, bit_depth, ref_pyramid)) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+ if (!av1_compute_corner_list(src_pyramid, src_corners)) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+
+ const uint8_t *src_buffer = src_pyramid->layers[0].buffer;
+ const int src_width = src_pyramid->layers[0].width;
+ const int src_height = src_pyramid->layers[0].height;
+ const int src_stride = src_pyramid->layers[0].stride;
+
+ const uint8_t *ref_buffer = ref_pyramid->layers[0].buffer;
+ assert(ref_pyramid->layers[0].width == src_width);
+ assert(ref_pyramid->layers[0].height == src_height);
+ const int ref_stride = ref_pyramid->layers[0].stride;
+
+ // find correspondences between the two images
+ correspondences = (Correspondence *)aom_malloc(src_corners->num_corners *
+ sizeof(*correspondences));
+ if (!correspondences) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+ num_correspondences = determine_correspondence(
+ src_buffer, src_corners->corners, src_corners->num_corners, ref_buffer,
+ ref_corners->corners, ref_corners->num_corners, src_width, src_height,
+ src_stride, ref_stride, correspondences);
+
+ bool result = ransac(correspondences, num_correspondences, type,
+ motion_models, num_motion_models, mem_alloc_failed);
+
+ aom_free(correspondences);
+ return result;
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/corner_match.h b/third_party/aom/aom_dsp/flow_estimation/corner_match.h
new file mode 100644
index 0000000000..4435d2c767
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/corner_match.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AOM_DSP_FLOW_ESTIMATION_CORNER_MATCH_H_
+#define AOM_AOM_DSP_FLOW_ESTIMATION_CORNER_MATCH_H_
+
+#include <stdbool.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+
+#include "aom_dsp/flow_estimation/corner_detect.h"
+#include "aom_dsp/flow_estimation/flow_estimation.h"
+#include "aom_scale/yv12config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MATCH_SZ 13
+#define MATCH_SZ_BY2 ((MATCH_SZ - 1) / 2)
+#define MATCH_SZ_SQ (MATCH_SZ * MATCH_SZ)
+
+bool av1_compute_global_motion_feature_match(
+ TransformationType type, YV12_BUFFER_CONFIG *src, YV12_BUFFER_CONFIG *ref,
+ int bit_depth, MotionModel *motion_models, int num_motion_models,
+ bool *mem_alloc_failed);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AOM_AOM_DSP_FLOW_ESTIMATION_CORNER_MATCH_H_
diff --git a/third_party/aom/aom_dsp/flow_estimation/disflow.c b/third_party/aom/aom_dsp/flow_estimation/disflow.c
new file mode 100644
index 0000000000..147a8ab3b3
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/disflow.c
@@ -0,0 +1,823 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+// Dense Inverse Search flow algorithm
+// Paper: https://arxiv.org/abs/1603.03590
+
+#include <assert.h>
+#include <math.h>
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/flow_estimation/corner_detect.h"
+#include "aom_dsp/flow_estimation/disflow.h"
+#include "aom_dsp/flow_estimation/ransac.h"
+#include "aom_dsp/pyramid.h"
+#include "aom_mem/aom_mem.h"
+
+#include "config/aom_dsp_rtcd.h"
+
+// Amount to downsample the flow field by.
+// eg. DOWNSAMPLE_SHIFT = 2 (DOWNSAMPLE_FACTOR == 4) means we calculate
+// one flow point for each 4x4 pixel region of the frame
+// Must be a power of 2
+#define DOWNSAMPLE_SHIFT 3
+#define DOWNSAMPLE_FACTOR (1 << DOWNSAMPLE_SHIFT)
+
+// Filters used when upscaling the flow field from one pyramid level
+// to another. See upscale_flow_component for details on kernel selection
+#define FLOW_UPSCALE_TAPS 4
+
+// Number of outermost flow field entries (on each edge) which can't be
+// computed, because the patch they correspond to extends outside of the
+// frame
+// The border is (DISFLOW_PATCH_SIZE >> 1) pixels, which is
+// (DISFLOW_PATCH_SIZE >> 1) >> DOWNSAMPLE_SHIFT many flow field entries
+#define FLOW_BORDER_INNER ((DISFLOW_PATCH_SIZE >> 1) >> DOWNSAMPLE_SHIFT)
+
+// Number of extra padding entries on each side of the flow field.
+// These samples are added so that we do not need to apply clamping when
+// interpolating or upsampling the flow field
+#define FLOW_BORDER_OUTER (FLOW_UPSCALE_TAPS / 2)
+
+// When downsampling the flow field, each flow field entry covers a square
+// region of pixels in the image pyramid. This value is equal to the position
+// of the center of that region, as an offset from the top/left edge.
+//
+// Note: Using ((DOWNSAMPLE_FACTOR - 1) / 2) is equivalent to the more
+// natural expression ((DOWNSAMPLE_FACTOR / 2) - 1),
+// unless DOWNSAMPLE_FACTOR == 1 (ie, no downsampling), in which case
+// this gives the correct offset of 0 instead of -1.
+#define UPSAMPLE_CENTER_OFFSET ((DOWNSAMPLE_FACTOR - 1) / 2)
+
+static double flow_upscale_filter[2][FLOW_UPSCALE_TAPS] = {
+ // Cubic interpolation kernels for phase=0.75 and phase=0.25, respectively
+ { -3 / 128., 29 / 128., 111 / 128., -9 / 128. },
+ { -9 / 128., 111 / 128., 29 / 128., -3 / 128. }
+};
+
+static INLINE void get_cubic_kernel_dbl(double x, double kernel[4]) {
+ // Check that the fractional position is in range.
+ //
+ // Note: x is calculated from (eg.) `u_frac = u - floor(u)`.
+ // Mathematically, this implies that 0 <= x < 1. However, in practice it is
+ // possible to have x == 1 due to floating point rounding. This is fine,
+ // and we still interpolate correctly if we allow x = 1.
+ assert(0 <= x && x <= 1);
+
+ double x2 = x * x;
+ double x3 = x2 * x;
+ kernel[0] = -0.5 * x + x2 - 0.5 * x3;
+ kernel[1] = 1.0 - 2.5 * x2 + 1.5 * x3;
+ kernel[2] = 0.5 * x + 2.0 * x2 - 1.5 * x3;
+ kernel[3] = -0.5 * x2 + 0.5 * x3;
+}
+
+static INLINE void get_cubic_kernel_int(double x, int kernel[4]) {
+ double kernel_dbl[4];
+ get_cubic_kernel_dbl(x, kernel_dbl);
+
+ kernel[0] = (int)rint(kernel_dbl[0] * (1 << DISFLOW_INTERP_BITS));
+ kernel[1] = (int)rint(kernel_dbl[1] * (1 << DISFLOW_INTERP_BITS));
+ kernel[2] = (int)rint(kernel_dbl[2] * (1 << DISFLOW_INTERP_BITS));
+ kernel[3] = (int)rint(kernel_dbl[3] * (1 << DISFLOW_INTERP_BITS));
+}
+
+static INLINE double get_cubic_value_dbl(const double *p,
+ const double kernel[4]) {
+ return kernel[0] * p[0] + kernel[1] * p[1] + kernel[2] * p[2] +
+ kernel[3] * p[3];
+}
+
+static INLINE int get_cubic_value_int(const int *p, const int kernel[4]) {
+ return kernel[0] * p[0] + kernel[1] * p[1] + kernel[2] * p[2] +
+ kernel[3] * p[3];
+}
+
+static INLINE double bicubic_interp_one(const double *arr, int stride,
+ const double h_kernel[4],
+ const double v_kernel[4]) {
+ double tmp[1 * 4];
+
+ // Horizontal convolution
+ for (int i = -1; i < 3; ++i) {
+ tmp[i + 1] = get_cubic_value_dbl(&arr[i * stride - 1], h_kernel);
+ }
+
+ // Vertical convolution
+ return get_cubic_value_dbl(tmp, v_kernel);
+}
+
+static int determine_disflow_correspondence(const ImagePyramid *src_pyr,
+ const ImagePyramid *ref_pyr,
+ CornerList *corners,
+ const FlowField *flow,
+ Correspondence *correspondences) {
+ const int width = flow->width;
+ const int height = flow->height;
+ const int stride = flow->stride;
+
+ int num_correspondences = 0;
+ for (int i = 0; i < corners->num_corners; ++i) {
+ const int x0 = corners->corners[2 * i];
+ const int y0 = corners->corners[2 * i + 1];
+
+ // Offset points, to compensate for the fact that (say) a flow field entry
+ // at horizontal index i, is nominally associated with the pixel at
+ // horizontal coordinate (i << DOWNSAMPLE_FACTOR) + UPSAMPLE_CENTER_OFFSET
+ // This offset must be applied before we split the coordinate into integer
+ // and fractional parts, in order for the interpolation to be correct.
+ const int x = x0 - UPSAMPLE_CENTER_OFFSET;
+ const int y = y0 - UPSAMPLE_CENTER_OFFSET;
+
+ // Split the pixel coordinates into integer flow field coordinates and
+ // an offset for interpolation
+ const int flow_x = x >> DOWNSAMPLE_SHIFT;
+ const double flow_sub_x =
+ (x & (DOWNSAMPLE_FACTOR - 1)) / (double)DOWNSAMPLE_FACTOR;
+ const int flow_y = y >> DOWNSAMPLE_SHIFT;
+ const double flow_sub_y =
+ (y & (DOWNSAMPLE_FACTOR - 1)) / (double)DOWNSAMPLE_FACTOR;
+
+ // Exclude points which would sample from the outer border of the flow
+ // field, as this would give lower-quality results.
+ //
+ // Note: As we never read from the border region at pyramid level 0, we
+ // can skip filling it in. If the conditions here are removed, or any
+ // other logic is added which reads from this border region, then
+ // compute_flow_field() will need to be modified to call
+ // fill_flow_field_borders() at pyramid level 0 to set up the correct
+ // border data.
+ if (flow_x < 1 || (flow_x + 2) >= width) continue;
+ if (flow_y < 1 || (flow_y + 2) >= height) continue;
+
+ double h_kernel[4];
+ double v_kernel[4];
+ get_cubic_kernel_dbl(flow_sub_x, h_kernel);
+ get_cubic_kernel_dbl(flow_sub_y, v_kernel);
+
+ double flow_u = bicubic_interp_one(&flow->u[flow_y * stride + flow_x],
+ stride, h_kernel, v_kernel);
+ double flow_v = bicubic_interp_one(&flow->v[flow_y * stride + flow_x],
+ stride, h_kernel, v_kernel);
+
+ // Refine the interpolated flow vector one last time
+ const int patch_tl_x = x0 - DISFLOW_PATCH_CENTER;
+ const int patch_tl_y = y0 - DISFLOW_PATCH_CENTER;
+ aom_compute_flow_at_point(
+ src_pyr->layers[0].buffer, ref_pyr->layers[0].buffer, patch_tl_x,
+ patch_tl_y, src_pyr->layers[0].width, src_pyr->layers[0].height,
+ src_pyr->layers[0].stride, &flow_u, &flow_v);
+
+ // Use original points (without offsets) when filling in correspondence
+ // array
+ correspondences[num_correspondences].x = x0;
+ correspondences[num_correspondences].y = y0;
+ correspondences[num_correspondences].rx = x0 + flow_u;
+ correspondences[num_correspondences].ry = y0 + flow_v;
+ num_correspondences++;
+ }
+ return num_correspondences;
+}
+
+// Compare two regions of width x height pixels, one rooted at position
+// (x, y) in src and the other at (x + u, y + v) in ref.
+// This function returns the sum of squared pixel differences between
+// the two regions.
+static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
+ int width, int height, int stride, int x,
+ int y, double u, double v,
+ const int16_t *dx, const int16_t *dy,
+ int *b) {
+ memset(b, 0, 2 * sizeof(*b));
+
+ // Split offset into integer and fractional parts, and compute cubic
+ // interpolation kernels
+ const int u_int = (int)floor(u);
+ const int v_int = (int)floor(v);
+ const double u_frac = u - floor(u);
+ const double v_frac = v - floor(v);
+
+ int h_kernel[4];
+ int v_kernel[4];
+ get_cubic_kernel_int(u_frac, h_kernel);
+ get_cubic_kernel_int(v_frac, v_kernel);
+
+ // Storage for intermediate values between the two convolution directions
+ int tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)];
+ int *tmp = tmp_ + DISFLOW_PATCH_SIZE; // Offset by one row
+
+ // Clamp coordinates so that all pixels we fetch will remain within the
+ // allocated border region, but allow them to go far enough out that
+ // the border pixels' values do not change.
+ // Since we are calculating an 8x8 block, the bottom-right pixel
+ // in the block has coordinates (x0 + 7, y0 + 7). Then, the cubic
+ // interpolation has 4 taps, meaning that the output of pixel
+ // (x_w, y_w) depends on the pixels in the range
+ // ([x_w - 1, x_w + 2], [y_w - 1, y_w + 2]).
+ //
+ // Thus the most extreme coordinates which will be fetched are
+ // (x0 - 1, y0 - 1) and (x0 + 9, y0 + 9).
+ const int x0 = clamp(x + u_int, -9, width);
+ const int y0 = clamp(y + v_int, -9, height);
+
+ // Horizontal convolution
+ for (int i = -1; i < DISFLOW_PATCH_SIZE + 2; ++i) {
+ const int y_w = y0 + i;
+ for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) {
+ const int x_w = x0 + j;
+ int arr[4];
+
+ arr[0] = (int)ref[y_w * stride + (x_w - 1)];
+ arr[1] = (int)ref[y_w * stride + (x_w + 0)];
+ arr[2] = (int)ref[y_w * stride + (x_w + 1)];
+ arr[3] = (int)ref[y_w * stride + (x_w + 2)];
+
+ // Apply kernel and round, keeping 6 extra bits of precision.
+ //
+ // 6 is the maximum allowable number of extra bits which will avoid
+ // the intermediate values overflowing an int16_t. The most extreme
+ // intermediate value occurs when:
+ // * The input pixels are [0, 255, 255, 0]
+ // * u_frac = 0.5
+ // In this case, the un-scaled output is 255 * 1.125 = 286.875.
+ // As an integer with 6 fractional bits, that is 18360, which fits
+ // in an int16_t. But with 7 fractional bits it would be 36720,
+ // which is too large.
+ tmp[i * DISFLOW_PATCH_SIZE + j] = ROUND_POWER_OF_TWO(
+ get_cubic_value_int(arr, h_kernel), DISFLOW_INTERP_BITS - 6);
+ }
+ }
+
+ // Vertical convolution
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; ++i) {
+ for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) {
+ const int *p = &tmp[i * DISFLOW_PATCH_SIZE + j];
+ const int arr[4] = { p[-DISFLOW_PATCH_SIZE], p[0], p[DISFLOW_PATCH_SIZE],
+ p[2 * DISFLOW_PATCH_SIZE] };
+ const int result = get_cubic_value_int(arr, v_kernel);
+
+ // Apply kernel and round.
+ // This time, we have to round off the 6 extra bits which were kept
+ // earlier, but we also want to keep DISFLOW_DERIV_SCALE_LOG2 extra bits
+ // of precision to match the scale of the dx and dy arrays.
+ const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2;
+ const int warped = ROUND_POWER_OF_TWO(result, round_bits);
+ const int src_px = src[(x + j) + (y + i) * stride] << 3;
+ const int dt = warped - src_px;
+ b[0] += dx[i * DISFLOW_PATCH_SIZE + j] * dt;
+ b[1] += dy[i * DISFLOW_PATCH_SIZE + j] * dt;
+ }
+ }
+}
+
+static INLINE void sobel_filter(const uint8_t *src, int src_stride,
+ int16_t *dst, int dst_stride, int dir) {
+ int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
+ int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE;
+
+ // Sobel filter kernel
+ // This must have an overall scale factor equal to DISFLOW_DERIV_SCALE,
+ // in order to produce correctly scaled outputs.
+ // To work out the scale factor, we multiply two factors:
+ //
+ // * For the derivative filter (sobel_a), comparing our filter
+ // image[x - 1] - image[x + 1]
+ // to the standard form
+ // d/dx image[x] = image[x+1] - image[x]
+ // tells us that we're actually calculating -2 * d/dx image[2]
+ //
+ // * For the smoothing filter (sobel_b), all coefficients are positive
+ // so the scale factor is just the sum of the coefficients
+ //
+ // Thus we need to make sure that DISFLOW_DERIV_SCALE = 2 * sum(sobel_b)
+ // (and take care of the - sign from sobel_a elsewhere)
+ static const int16_t sobel_a[3] = { 1, 0, -1 };
+ static const int16_t sobel_b[3] = { 1, 2, 1 };
+ const int taps = 3;
+
+ // horizontal filter
+ const int16_t *h_kernel = dir ? sobel_a : sobel_b;
+
+ for (int y = -1; y < DISFLOW_PATCH_SIZE + 1; ++y) {
+ for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
+ int sum = 0;
+ for (int k = 0; k < taps; ++k) {
+ sum += h_kernel[k] * src[y * src_stride + (x + k - 1)];
+ }
+ tmp[y * DISFLOW_PATCH_SIZE + x] = sum;
+ }
+ }
+
+ // vertical filter
+ const int16_t *v_kernel = dir ? sobel_b : sobel_a;
+
+ for (int y = 0; y < DISFLOW_PATCH_SIZE; ++y) {
+ for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
+ int sum = 0;
+ for (int k = 0; k < taps; ++k) {
+ sum += v_kernel[k] * tmp[(y + k - 1) * DISFLOW_PATCH_SIZE + x];
+ }
+ dst[y * dst_stride + x] = sum;
+ }
+ }
+}
+
+// Computes the components of the system of equations used to solve for
+// a flow vector.
+//
+// The flow equations are a least-squares system, derived as follows:
+//
+// For each pixel in the patch, we calculate the current error `dt`,
+// and the x and y gradients `dx` and `dy` of the source patch.
+// This means that, to first order, the squared error for this pixel is
+//
+// (dt + u * dx + v * dy)^2
+//
+// where (u, v) are the incremental changes to the flow vector.
+//
+// We then want to find the values of u and v which minimize the sum
+// of the squared error across all pixels. Conveniently, this fits exactly
+// into the form of a least squares problem, with one equation
+//
+// u * dx + v * dy = -dt
+//
+// for each pixel.
+//
+// Summing across all pixels in a square window of size DISFLOW_PATCH_SIZE,
+// and absorbing the - sign elsewhere, this results in the least squares system
+//
+// M = |sum(dx * dx) sum(dx * dy)|
+// |sum(dx * dy) sum(dy * dy)|
+//
+// b = |sum(dx * dt)|
+// |sum(dy * dt)|
+static INLINE void compute_flow_matrix(const int16_t *dx, int dx_stride,
+ const int16_t *dy, int dy_stride,
+ double *M) {
+ int tmp[4] = { 0 };
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ for (int j = 0; j < DISFLOW_PATCH_SIZE; j++) {
+ tmp[0] += dx[i * dx_stride + j] * dx[i * dx_stride + j];
+ tmp[1] += dx[i * dx_stride + j] * dy[i * dy_stride + j];
+ // Don't compute tmp[2], as it should be equal to tmp[1]
+ tmp[3] += dy[i * dy_stride + j] * dy[i * dy_stride + j];
+ }
+ }
+
+ // Apply regularization
+ // We follow the standard regularization method of adding `k * I` before
+ // inverting. This ensures that the matrix will be invertible.
+ //
+ // Setting the regularization strength k to 1 seems to work well here, as
+ // typical values coming from the other equations are very large (1e5 to
+ // 1e6, with an upper limit of around 6e7, at the time of writing).
+ // It also preserves the property that all matrix values are whole numbers,
+ // which is convenient for integerized SIMD implementation.
+ tmp[0] += 1;
+ tmp[3] += 1;
+
+ tmp[2] = tmp[1];
+
+ M[0] = (double)tmp[0];
+ M[1] = (double)tmp[1];
+ M[2] = (double)tmp[2];
+ M[3] = (double)tmp[3];
+}
+
+// Try to invert the matrix M
+// Note: Due to the nature of how a least-squares matrix is constructed, all of
+// the eigenvalues will be >= 0, and therefore det M >= 0 as well.
+// The regularization term `+ k * I` further ensures that det M >= k^2.
+// As mentioned in compute_flow_matrix(), here we use k = 1, so det M >= 1.
+// So we don't have to worry about non-invertible matrices here.
+static INLINE void invert_2x2(const double *M, double *M_inv) {
+ double det = (M[0] * M[3]) - (M[1] * M[2]);
+ assert(det >= 1);
+ const double det_inv = 1 / det;
+
+ M_inv[0] = M[3] * det_inv;
+ M_inv[1] = -M[1] * det_inv;
+ M_inv[2] = -M[2] * det_inv;
+ M_inv[3] = M[0] * det_inv;
+}
+
+void aom_compute_flow_at_point_c(const uint8_t *src, const uint8_t *ref, int x,
+ int y, int width, int height, int stride,
+ double *u, double *v) {
+ double M[4];
+ double M_inv[4];
+ int b[2];
+ int16_t dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
+ int16_t dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
+
+ // Compute gradients within this patch
+ const uint8_t *src_patch = &src[y * stride + x];
+ sobel_filter(src_patch, stride, dx, DISFLOW_PATCH_SIZE, 1);
+ sobel_filter(src_patch, stride, dy, DISFLOW_PATCH_SIZE, 0);
+
+ compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M);
+ invert_2x2(M, M_inv);
+
+ for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
+ compute_flow_vector(src, ref, width, height, stride, x, y, *u, *v, dx, dy,
+ b);
+
+ // Solve flow equations to find a better estimate for the flow vector
+ // at this point
+ const double step_u = M_inv[0] * b[0] + M_inv[1] * b[1];
+ const double step_v = M_inv[2] * b[0] + M_inv[3] * b[1];
+ *u += fclamp(step_u * DISFLOW_STEP_SIZE, -2, 2);
+ *v += fclamp(step_v * DISFLOW_STEP_SIZE, -2, 2);
+
+ if (fabs(step_u) + fabs(step_v) < DISFLOW_STEP_SIZE_THRESOLD) {
+ // Stop iteration when we're close to convergence
+ break;
+ }
+ }
+}
+
+static void fill_flow_field_borders(double *flow, int width, int height,
+ int stride) {
+ // Calculate the bounds of the rectangle which was filled in by
+ // compute_flow_field() before calling this function.
+ // These indices are inclusive on both ends.
+ const int left_index = FLOW_BORDER_INNER;
+ const int right_index = (width - FLOW_BORDER_INNER - 1);
+ const int top_index = FLOW_BORDER_INNER;
+ const int bottom_index = (height - FLOW_BORDER_INNER - 1);
+
+ // Left area
+ for (int i = top_index; i <= bottom_index; i += 1) {
+ double *row = flow + i * stride;
+ const double left = row[left_index];
+ for (int j = -FLOW_BORDER_OUTER; j < left_index; j++) {
+ row[j] = left;
+ }
+ }
+
+ // Right area
+ for (int i = top_index; i <= bottom_index; i += 1) {
+ double *row = flow + i * stride;
+ const double right = row[right_index];
+ for (int j = right_index + 1; j < width + FLOW_BORDER_OUTER; j++) {
+ row[j] = right;
+ }
+ }
+
+ // Top area
+ const double *top_row = flow + top_index * stride - FLOW_BORDER_OUTER;
+ for (int i = -FLOW_BORDER_OUTER; i < top_index; i++) {
+ double *row = flow + i * stride - FLOW_BORDER_OUTER;
+ size_t length = width + 2 * FLOW_BORDER_OUTER;
+ memcpy(row, top_row, length * sizeof(*row));
+ }
+
+ // Bottom area
+ const double *bottom_row = flow + bottom_index * stride - FLOW_BORDER_OUTER;
+ for (int i = bottom_index + 1; i < height + FLOW_BORDER_OUTER; i++) {
+ double *row = flow + i * stride - FLOW_BORDER_OUTER;
+ size_t length = width + 2 * FLOW_BORDER_OUTER;
+ memcpy(row, bottom_row, length * sizeof(*row));
+ }
+}
+
+// Upscale one component of the flow field, from a size of
+// cur_width x cur_height to a size of (2*cur_width) x (2*cur_height), storing
+// the result back into the same buffer. This function also scales the flow
+// vector by 2, so that when we move to the next pyramid level down, the implied
+// motion vector is the same.
+//
+// The temporary buffer tmpbuf must be large enough to hold an intermediate
+// array of size stride * cur_height, *plus* FLOW_BORDER_OUTER rows above and
+// below. In other words, indices from -FLOW_BORDER_OUTER * stride to
+// (cur_height + FLOW_BORDER_OUTER) * stride - 1 must be valid.
+//
+// Note that the same stride is used for u before and after upscaling
+// and for the temporary buffer, for simplicity.
+//
+// A note on phasing:
+//
+// The flow fields at two adjacent pyramid levels are offset from each other,
+// and we need to account for this in the construction of the interpolation
+// kernels.
+//
+// Consider an 8x8 pixel patch at pyramid level n. This is split into four
+// patches at pyramid level n-1. Bringing these patches back up to pyramid level
+// n, each sub-patch covers 4x4 pixels, and between them they cover the same
+// 8x8 region.
+//
+// Therefore, at pyramid level n, two adjacent patches look like this:
+//
+// + - - - - - - - + - - - - - - - +
+// | | |
+// | x x | x x |
+// | | |
+// | # | # |
+// | | |
+// | x x | x x |
+// | | |
+// + - - - - - - - + - - - - - - - +
+//
+// where # marks the center of a patch at pyramid level n (the input to this
+// function), and x marks the center of a patch at pyramid level n-1 (the output
+// of this function).
+//
+// By counting pixels (marked by +, -, and |), we can see that the flow vectors
+// at pyramid level n-1 are offset relative to the flow vectors at pyramid
+// level n, by 1/4 of the larger (input) patch size. Therefore, our
+// interpolation kernels need to have phases of 0.25 and 0.75.
+//
+// In addition, in order to handle the frame edges correctly, we need to
+// generate one output vector to the left and one to the right of each input
+// vector, even though these must be interpolated using different source points.
+static void upscale_flow_component(double *flow, int cur_width, int cur_height,
+ int stride, double *tmpbuf) {
+ const int half_len = FLOW_UPSCALE_TAPS / 2;
+
+ // Check that the outer border is large enough to avoid needing to clamp
+ // the source locations
+ assert(half_len <= FLOW_BORDER_OUTER);
+
+ // Horizontal upscale and multiply by 2
+ for (int i = 0; i < cur_height; i++) {
+ for (int j = 0; j < cur_width; j++) {
+ double left = 0;
+ for (int k = -half_len; k < half_len; k++) {
+ left +=
+ flow[i * stride + (j + k)] * flow_upscale_filter[0][k + half_len];
+ }
+ tmpbuf[i * stride + (2 * j + 0)] = 2.0 * left;
+
+ // Right output pixel is 0.25 units to the right of the input pixel
+ double right = 0;
+ for (int k = -(half_len - 1); k < (half_len + 1); k++) {
+ right += flow[i * stride + (j + k)] *
+ flow_upscale_filter[1][k + (half_len - 1)];
+ }
+ tmpbuf[i * stride + (2 * j + 1)] = 2.0 * right;
+ }
+ }
+
+ // Fill in top and bottom borders of tmpbuf
+ const double *top_row = &tmpbuf[0];
+ for (int i = -FLOW_BORDER_OUTER; i < 0; i++) {
+ double *row = &tmpbuf[i * stride];
+ memcpy(row, top_row, 2 * cur_width * sizeof(*row));
+ }
+
+ const double *bottom_row = &tmpbuf[(cur_height - 1) * stride];
+ for (int i = cur_height; i < cur_height + FLOW_BORDER_OUTER; i++) {
+ double *row = &tmpbuf[i * stride];
+ memcpy(row, bottom_row, 2 * cur_width * sizeof(*row));
+ }
+
+ // Vertical upscale
+ int upscaled_width = cur_width * 2;
+ for (int i = 0; i < cur_height; i++) {
+ for (int j = 0; j < upscaled_width; j++) {
+ double top = 0;
+ for (int k = -half_len; k < half_len; k++) {
+ top +=
+ tmpbuf[(i + k) * stride + j] * flow_upscale_filter[0][k + half_len];
+ }
+ flow[(2 * i) * stride + j] = top;
+
+ double bottom = 0;
+ for (int k = -(half_len - 1); k < (half_len + 1); k++) {
+ bottom += tmpbuf[(i + k) * stride + j] *
+ flow_upscale_filter[1][k + (half_len - 1)];
+ }
+ flow[(2 * i + 1) * stride + j] = bottom;
+ }
+ }
+}
+
+// make sure flow_u and flow_v start at 0
+static bool compute_flow_field(const ImagePyramid *src_pyr,
+ const ImagePyramid *ref_pyr, FlowField *flow) {
+ bool mem_status = true;
+ assert(src_pyr->n_levels == ref_pyr->n_levels);
+
+ double *flow_u = flow->u;
+ double *flow_v = flow->v;
+
+ double *tmpbuf0;
+ double *tmpbuf;
+
+ if (src_pyr->n_levels < 2) {
+ // tmpbuf not needed
+ tmpbuf0 = NULL;
+ tmpbuf = NULL;
+ } else {
+ // This line must match the calculation of cur_flow_height below
+ const int layer1_height = src_pyr->layers[1].height >> DOWNSAMPLE_SHIFT;
+
+ const size_t tmpbuf_size =
+ (layer1_height + 2 * FLOW_BORDER_OUTER) * flow->stride;
+ tmpbuf0 = aom_malloc(tmpbuf_size * sizeof(*tmpbuf0));
+ if (!tmpbuf0) {
+ mem_status = false;
+ goto free_tmpbuf;
+ }
+ tmpbuf = tmpbuf0 + FLOW_BORDER_OUTER * flow->stride;
+ }
+
+ // Compute flow field from coarsest to finest level of the pyramid
+ //
+ // Note: We stop after refining pyramid level 1 and interpolating it to
+ // generate an initial flow field at level 0. We do *not* refine the dense
+ // flow field at level 0. Instead, we wait until we have generated
+ // correspondences by interpolating this flow field, and then refine the
+ // correspondences themselves. This is both faster and gives better output
+ // compared to refining the flow field at level 0 and then interpolating.
+ for (int level = src_pyr->n_levels - 1; level >= 1; --level) {
+ const PyramidLayer *cur_layer = &src_pyr->layers[level];
+ const int cur_width = cur_layer->width;
+ const int cur_height = cur_layer->height;
+ const int cur_stride = cur_layer->stride;
+
+ const uint8_t *src_buffer = cur_layer->buffer;
+ const uint8_t *ref_buffer = ref_pyr->layers[level].buffer;
+
+ const int cur_flow_width = cur_width >> DOWNSAMPLE_SHIFT;
+ const int cur_flow_height = cur_height >> DOWNSAMPLE_SHIFT;
+ const int cur_flow_stride = flow->stride;
+
+ for (int i = FLOW_BORDER_INNER; i < cur_flow_height - FLOW_BORDER_INNER;
+ i += 1) {
+ for (int j = FLOW_BORDER_INNER; j < cur_flow_width - FLOW_BORDER_INNER;
+ j += 1) {
+ const int flow_field_idx = i * cur_flow_stride + j;
+
+ // Calculate the position of a patch of size DISFLOW_PATCH_SIZE pixels,
+ // which is centered on the region covered by this flow field entry
+ const int patch_center_x =
+ (j << DOWNSAMPLE_SHIFT) + UPSAMPLE_CENTER_OFFSET; // In pixels
+ const int patch_center_y =
+ (i << DOWNSAMPLE_SHIFT) + UPSAMPLE_CENTER_OFFSET; // In pixels
+ const int patch_tl_x = patch_center_x - DISFLOW_PATCH_CENTER;
+ const int patch_tl_y = patch_center_y - DISFLOW_PATCH_CENTER;
+ assert(patch_tl_x >= 0);
+ assert(patch_tl_y >= 0);
+
+ aom_compute_flow_at_point(src_buffer, ref_buffer, patch_tl_x,
+ patch_tl_y, cur_width, cur_height, cur_stride,
+ &flow_u[flow_field_idx],
+ &flow_v[flow_field_idx]);
+ }
+ }
+
+ // Fill in the areas which we haven't explicitly computed, with copies
+ // of the outermost values which we did compute
+ fill_flow_field_borders(flow_u, cur_flow_width, cur_flow_height,
+ cur_flow_stride);
+ fill_flow_field_borders(flow_v, cur_flow_width, cur_flow_height,
+ cur_flow_stride);
+
+ if (level > 0) {
+ const int upscale_flow_width = cur_flow_width << 1;
+ const int upscale_flow_height = cur_flow_height << 1;
+ const int upscale_stride = flow->stride;
+
+ upscale_flow_component(flow_u, cur_flow_width, cur_flow_height,
+ cur_flow_stride, tmpbuf);
+ upscale_flow_component(flow_v, cur_flow_width, cur_flow_height,
+ cur_flow_stride, tmpbuf);
+
+ // If we didn't fill in the rightmost column or bottommost row during
+ // upsampling (in order to keep the ratio to exactly 2), fill them
+ // in here by copying the next closest column/row
+ const PyramidLayer *next_layer = &src_pyr->layers[level - 1];
+ const int next_flow_width = next_layer->width >> DOWNSAMPLE_SHIFT;
+ const int next_flow_height = next_layer->height >> DOWNSAMPLE_SHIFT;
+
+ // Rightmost column
+ if (next_flow_width > upscale_flow_width) {
+ assert(next_flow_width == upscale_flow_width + 1);
+ for (int i = 0; i < upscale_flow_height; i++) {
+ const int index = i * upscale_stride + upscale_flow_width;
+ flow_u[index] = flow_u[index - 1];
+ flow_v[index] = flow_v[index - 1];
+ }
+ }
+
+ // Bottommost row
+ if (next_flow_height > upscale_flow_height) {
+ assert(next_flow_height == upscale_flow_height + 1);
+ for (int j = 0; j < next_flow_width; j++) {
+ const int index = upscale_flow_height * upscale_stride + j;
+ flow_u[index] = flow_u[index - upscale_stride];
+ flow_v[index] = flow_v[index - upscale_stride];
+ }
+ }
+ }
+ }
+
+free_tmpbuf:
+ aom_free(tmpbuf0);
+ return mem_status;
+}
+
+static FlowField *alloc_flow_field(int frame_width, int frame_height) {
+ FlowField *flow = (FlowField *)aom_malloc(sizeof(FlowField));
+ if (flow == NULL) return NULL;
+
+ // Calculate the size of the bottom (largest) layer of the flow pyramid
+ flow->width = frame_width >> DOWNSAMPLE_SHIFT;
+ flow->height = frame_height >> DOWNSAMPLE_SHIFT;
+ flow->stride = flow->width + 2 * FLOW_BORDER_OUTER;
+
+ const size_t flow_size =
+ flow->stride * (size_t)(flow->height + 2 * FLOW_BORDER_OUTER);
+
+ flow->buf0 = aom_calloc(2 * flow_size, sizeof(*flow->buf0));
+ if (!flow->buf0) {
+ aom_free(flow);
+ return NULL;
+ }
+
+ flow->u = flow->buf0 + FLOW_BORDER_OUTER * flow->stride + FLOW_BORDER_OUTER;
+ flow->v = flow->u + flow_size;
+
+ return flow;
+}
+
+static void free_flow_field(FlowField *flow) {
+ aom_free(flow->buf0);
+ aom_free(flow);
+}
+
+// Compute flow field between `src` and `ref`, and then use that flow to
+// compute a global motion model relating the two frames.
+//
+// Following the convention in flow_estimation.h, the flow vectors are computed
+// at fixed points in `src` and point to the corresponding locations in `ref`,
+// regardless of the temporal ordering of the frames.
+bool av1_compute_global_motion_disflow(TransformationType type,
+ YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *ref, int bit_depth,
+ MotionModel *motion_models,
+ int num_motion_models,
+ bool *mem_alloc_failed) {
+ // Precompute information we will need about each frame
+ ImagePyramid *src_pyramid = src->y_pyramid;
+ CornerList *src_corners = src->corners;
+ ImagePyramid *ref_pyramid = ref->y_pyramid;
+ if (!aom_compute_pyramid(src, bit_depth, src_pyramid)) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+ if (!av1_compute_corner_list(src_pyramid, src_corners)) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+ if (!aom_compute_pyramid(ref, bit_depth, ref_pyramid)) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+
+ const int src_width = src_pyramid->layers[0].width;
+ const int src_height = src_pyramid->layers[0].height;
+ assert(ref_pyramid->layers[0].width == src_width);
+ assert(ref_pyramid->layers[0].height == src_height);
+
+ FlowField *flow = alloc_flow_field(src_width, src_height);
+ if (!flow) {
+ *mem_alloc_failed = true;
+ return false;
+ }
+
+ if (!compute_flow_field(src_pyramid, ref_pyramid, flow)) {
+ *mem_alloc_failed = true;
+ free_flow_field(flow);
+ return false;
+ }
+
+ // find correspondences between the two images using the flow field
+ Correspondence *correspondences =
+ aom_malloc(src_corners->num_corners * sizeof(*correspondences));
+ if (!correspondences) {
+ *mem_alloc_failed = true;
+ free_flow_field(flow);
+ return false;
+ }
+
+ const int num_correspondences = determine_disflow_correspondence(
+ src_pyramid, ref_pyramid, src_corners, flow, correspondences);
+
+ bool result = ransac(correspondences, num_correspondences, type,
+ motion_models, num_motion_models, mem_alloc_failed);
+
+ aom_free(correspondences);
+ free_flow_field(flow);
+ return result;
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/disflow.h b/third_party/aom/aom_dsp/flow_estimation/disflow.h
new file mode 100644
index 0000000000..ef877b638c
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/disflow.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AOM_DSP_FLOW_ESTIMATION_DISFLOW_H_
+#define AOM_AOM_DSP_FLOW_ESTIMATION_DISFLOW_H_
+
+#include <stdbool.h>
+
+#include "aom_dsp/flow_estimation/flow_estimation.h"
+#include "aom_dsp/rect.h"
+#include "aom_scale/yv12config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+// Number of pyramid levels in disflow computation
+#define DISFLOW_PYRAMID_LEVELS 12
+
+// Size of square patches in the disflow dense grid
+// Must be a power of 2
+#define DISFLOW_PATCH_SIZE_LOG2 3
+#define DISFLOW_PATCH_SIZE (1 << DISFLOW_PATCH_SIZE_LOG2)
+// Center point of square patch
+#define DISFLOW_PATCH_CENTER ((DISFLOW_PATCH_SIZE / 2) - 1)
+
+// Overall scale of the `dx`, `dy` and `dt` arrays in the disflow code
+// In other words, the various derivatives are calculated with an internal
+// precision of (8 + DISFLOW_DERIV_SCALE_LOG2) bits, from an 8-bit input.
+//
+// This must be carefully synchronized with the code in sobel_filter()
+// (which fills the dx and dy arrays) and compute_flow_error() (which
+// fills dt); see the comments in those functions for more details
+#define DISFLOW_DERIV_SCALE_LOG2 3
+#define DISFLOW_DERIV_SCALE (1 << DISFLOW_DERIV_SCALE_LOG2)
+
+// Scale factor applied to each step in the main refinement loop
+//
+// This should be <= 1.0 to avoid overshoot. Values below 1.0
+// may help in some cases, but slow convergence overall, so
+// will require careful tuning.
+// TODO(rachelbarker): Tune this value
+#define DISFLOW_STEP_SIZE 1.0
+
+// Step size at which we should terminate iteration
+// The idea here is that, if we take a step which is much smaller than 1px in
+// size, then the values won't change much from iteration to iteration, so
+// many future steps will also be small, and that won't have much effect
+// on the ultimate result. So we can terminate early.
+//
+// To look at it another way, when we take a small step, that means that
+// either we're near to convergence (so can stop), or we're stuck in a
+// shallow valley and will take many iterations to get unstuck.
+//
+// Solving the latter properly requires fancier methods, such as "gradient
+// descent with momentum". For now, we terminate to avoid wasting a ton of
+// time on points which are either nearly-converged or stuck.
+//
+// Terminating at 1/8 px seems to give good results for global motion estimation
+#define DISFLOW_STEP_SIZE_THRESOLD (1. / 8.)
+
+// Max number of iterations if warp convergence is not found
+#define DISFLOW_MAX_ITR 4
+
+// Internal precision of cubic interpolation filters
+// The limiting factor here is that:
+// * Before integerizing, the maximum value of any kernel tap is 1.0
+// * After integerizing, each tap must fit into an int16_t.
+// Thus the largest multiplier we can get away with is 2^14 = 16384,
+// as 2^15 = 32768 is too large to fit in an int16_t.
+#define DISFLOW_INTERP_BITS 14
+
+typedef struct {
+ // Start of allocation for u and v buffers
+ double *buf0;
+
+ // x and y directions of flow, per patch
+ double *u;
+ double *v;
+
+ // Sizes of the above arrays
+ int width;
+ int height;
+ int stride;
+} FlowField;
+
+bool av1_compute_global_motion_disflow(TransformationType type,
+ YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *ref, int bit_depth,
+ MotionModel *motion_models,
+ int num_motion_models,
+ bool *mem_alloc_failed);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AOM_AOM_DSP_FLOW_ESTIMATION_DISFLOW_H_
diff --git a/third_party/aom/aom_dsp/flow_estimation/flow_estimation.c b/third_party/aom/aom_dsp/flow_estimation/flow_estimation.c
new file mode 100644
index 0000000000..0f47f86f55
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/flow_estimation.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <assert.h>
+
+#include "aom_dsp/flow_estimation/corner_detect.h"
+#include "aom_dsp/flow_estimation/corner_match.h"
+#include "aom_dsp/flow_estimation/disflow.h"
+#include "aom_dsp/flow_estimation/flow_estimation.h"
+#include "aom_ports/mem.h"
+#include "aom_scale/yv12config.h"
+
+// For each global motion method, how many pyramid levels should we allocate?
+// Note that this is a maximum, and fewer levels will be allocated if the frame
+// is not large enough to need all of the specified levels
+const int global_motion_pyr_levels[GLOBAL_MOTION_METHODS] = {
+ 1, // GLOBAL_MOTION_METHOD_FEATURE_MATCH
+ 16, // GLOBAL_MOTION_METHOD_DISFLOW
+};
+
+// clang-format off
+const double kIdentityParams[MAX_PARAMDIM] = {
+ 0.0, 0.0, 1.0, 0.0, 0.0, 1.0
+};
+// clang-format on
+
+// Compute a global motion model between the given source and ref frames.
+//
+// As is standard for video codecs, the resulting model maps from (x, y)
+// coordinates in `src` to the corresponding points in `ref`, regardless
+// of the temporal order of the two frames.
+//
+// Returns true if global motion estimation succeeded, false if not.
+// The output models should only be used if this function succeeds.
+bool aom_compute_global_motion(TransformationType type, YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *ref, int bit_depth,
+ GlobalMotionMethod gm_method,
+ MotionModel *motion_models,
+ int num_motion_models, bool *mem_alloc_failed) {
+ switch (gm_method) {
+ case GLOBAL_MOTION_METHOD_FEATURE_MATCH:
+ return av1_compute_global_motion_feature_match(
+ type, src, ref, bit_depth, motion_models, num_motion_models,
+ mem_alloc_failed);
+ case GLOBAL_MOTION_METHOD_DISFLOW:
+ return av1_compute_global_motion_disflow(type, src, ref, bit_depth,
+ motion_models, num_motion_models,
+ mem_alloc_failed);
+ default: assert(0 && "Unknown global motion estimation type");
+ }
+ return false;
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/flow_estimation.h b/third_party/aom/aom_dsp/flow_estimation/flow_estimation.h
new file mode 100644
index 0000000000..2dfae24980
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/flow_estimation.h
@@ -0,0 +1,95 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AOM_DSP_FLOW_ESTIMATION_H_
+#define AOM_AOM_DSP_FLOW_ESTIMATION_H_
+
+#include "aom_dsp/pyramid.h"
+#include "aom_dsp/flow_estimation/corner_detect.h"
+#include "aom_ports/mem.h"
+#include "aom_scale/yv12config.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define MAX_PARAMDIM 6
+#define MIN_INLIER_PROB 0.1
+
+/* clang-format off */
+enum {
+ IDENTITY = 0, // identity transformation, 0-parameter
+ TRANSLATION = 1, // translational motion 2-parameter
+ ROTZOOM = 2, // simplified affine with rotation + zoom only, 4-parameter
+ AFFINE = 3, // affine, 6-parameter
+ TRANS_TYPES,
+} UENUM1BYTE(TransformationType);
+/* clang-format on */
+
+// number of parameters used by each transformation in TransformationTypes
+static const int trans_model_params[TRANS_TYPES] = { 0, 2, 4, 6 };
+
+// Available methods which can be used for global motion estimation
+typedef enum {
+ GLOBAL_MOTION_METHOD_FEATURE_MATCH,
+ GLOBAL_MOTION_METHOD_DISFLOW,
+ GLOBAL_MOTION_METHOD_LAST = GLOBAL_MOTION_METHOD_DISFLOW,
+ GLOBAL_MOTION_METHODS
+} GlobalMotionMethod;
+
+typedef struct {
+ double params[MAX_PARAMDIM];
+ int *inliers;
+ int num_inliers;
+} MotionModel;
+
+// Data structure to store a single correspondence point during global
+// motion search.
+//
+// A correspondence (x, y) -> (rx, ry) means that point (x, y) in the
+// source frame corresponds to point (rx, ry) in the ref frame.
+typedef struct {
+ double x, y;
+ double rx, ry;
+} Correspondence;
+
+// For each global motion method, how many pyramid levels should we allocate?
+// Note that this is a maximum, and fewer levels will be allocated if the frame
+// is not large enough to need all of the specified levels
+extern const int global_motion_pyr_levels[GLOBAL_MOTION_METHODS];
+
+// Which global motion method should we use in practice?
+// Disflow is both faster and gives better results than feature matching in
+// practically all cases, so we use disflow by default
+static const GlobalMotionMethod default_global_motion_method =
+ GLOBAL_MOTION_METHOD_DISFLOW;
+
+extern const double kIdentityParams[MAX_PARAMDIM];
+
+// Compute a global motion model between the given source and ref frames.
+//
+// As is standard for video codecs, the resulting model maps from (x, y)
+// coordinates in `src` to the corresponding points in `ref`, regardless
+// of the temporal order of the two frames.
+//
+// Returns true if global motion estimation succeeded, false if not.
+// The output models should only be used if this function succeeds.
+bool aom_compute_global_motion(TransformationType type, YV12_BUFFER_CONFIG *src,
+ YV12_BUFFER_CONFIG *ref, int bit_depth,
+ GlobalMotionMethod gm_method,
+ MotionModel *motion_models,
+ int num_motion_models, bool *mem_alloc_failed);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AOM_AOM_DSP_FLOW_ESTIMATION_H_
diff --git a/third_party/aom/aom_dsp/flow_estimation/ransac.c b/third_party/aom/aom_dsp/flow_estimation/ransac.c
new file mode 100644
index 0000000000..b88a07b023
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/ransac.c
@@ -0,0 +1,484 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <memory.h>
+#include <math.h>
+#include <time.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <assert.h>
+
+#include "aom_dsp/flow_estimation/ransac.h"
+#include "aom_dsp/mathutils.h"
+#include "aom_mem/aom_mem.h"
+
+// TODO(rachelbarker): Remove dependence on code in av1/encoder/
+#include "av1/encoder/random.h"
+
+#define MAX_MINPTS 4
+#define MINPTS_MULTIPLIER 5
+
+#define INLIER_THRESHOLD 1.25
+#define INLIER_THRESHOLD_SQUARED (INLIER_THRESHOLD * INLIER_THRESHOLD)
+#define NUM_TRIALS 20
+
+// Flag to enable functions for finding TRANSLATION type models.
+//
+// These modes are not considered currently due to a spec bug (see comments
+// in gm_get_motion_vector() in av1/common/mv.h). Thus we don't need to compile
+// the corresponding search functions, but it is nice to keep the source around
+// but disabled, for completeness.
+#define ALLOW_TRANSLATION_MODELS 0
+
+////////////////////////////////////////////////////////////////////////////////
+// ransac
+typedef bool (*IsDegenerateFunc)(double *p);
+typedef bool (*FindTransformationFunc)(int points, const double *points1,
+ const double *points2, double *params);
+typedef void (*ProjectPointsFunc)(const double *mat, const double *points,
+ double *proj, int n, int stride_points,
+ int stride_proj);
+
+// vtable-like structure which stores all of the information needed by RANSAC
+// for a particular model type
+typedef struct {
+ IsDegenerateFunc is_degenerate;
+ FindTransformationFunc find_transformation;
+ ProjectPointsFunc project_points;
+ int minpts;
+} RansacModelInfo;
+
+#if ALLOW_TRANSLATION_MODELS
+static void project_points_translation(const double *mat, const double *points,
+ double *proj, int n, int stride_points,
+ int stride_proj) {
+ int i;
+ for (i = 0; i < n; ++i) {
+ const double x = *(points++), y = *(points++);
+ *(proj++) = x + mat[0];
+ *(proj++) = y + mat[1];
+ points += stride_points - 2;
+ proj += stride_proj - 2;
+ }
+}
+#endif // ALLOW_TRANSLATION_MODELS
+
+static void project_points_affine(const double *mat, const double *points,
+ double *proj, int n, int stride_points,
+ int stride_proj) {
+ int i;
+ for (i = 0; i < n; ++i) {
+ const double x = *(points++), y = *(points++);
+ *(proj++) = mat[2] * x + mat[3] * y + mat[0];
+ *(proj++) = mat[4] * x + mat[5] * y + mat[1];
+ points += stride_points - 2;
+ proj += stride_proj - 2;
+ }
+}
+
+#if ALLOW_TRANSLATION_MODELS
+static bool find_translation(int np, const double *pts1, const double *pts2,
+ double *params) {
+ double sumx = 0;
+ double sumy = 0;
+
+ for (int i = 0; i < np; ++i) {
+ double dx = *(pts2++);
+ double dy = *(pts2++);
+ double sx = *(pts1++);
+ double sy = *(pts1++);
+
+ sumx += dx - sx;
+ sumy += dy - sy;
+ }
+
+ params[0] = sumx / np;
+ params[1] = sumy / np;
+ params[2] = 1;
+ params[3] = 0;
+ params[4] = 0;
+ params[5] = 1;
+ return true;
+}
+#endif // ALLOW_TRANSLATION_MODELS
+
+static bool find_rotzoom(int np, const double *pts1, const double *pts2,
+ double *params) {
+ const int n = 4; // Size of least-squares problem
+ double mat[4 * 4]; // Accumulator for A'A
+ double y[4]; // Accumulator for A'b
+ double a[4]; // Single row of A
+ double b; // Single element of b
+
+ least_squares_init(mat, y, n);
+ for (int i = 0; i < np; ++i) {
+ double dx = *(pts2++);
+ double dy = *(pts2++);
+ double sx = *(pts1++);
+ double sy = *(pts1++);
+
+ a[0] = 1;
+ a[1] = 0;
+ a[2] = sx;
+ a[3] = sy;
+ b = dx;
+ least_squares_accumulate(mat, y, a, b, n);
+
+ a[0] = 0;
+ a[1] = 1;
+ a[2] = sy;
+ a[3] = -sx;
+ b = dy;
+ least_squares_accumulate(mat, y, a, b, n);
+ }
+
+ // Fill in params[0] .. params[3] with output model
+ if (!least_squares_solve(mat, y, params, n)) {
+ return false;
+ }
+
+ // Fill in remaining parameters
+ params[4] = -params[3];
+ params[5] = params[2];
+
+ return true;
+}
+
+static bool find_affine(int np, const double *pts1, const double *pts2,
+ double *params) {
+ // Note: The least squares problem for affine models is 6-dimensional,
+ // but it splits into two independent 3-dimensional subproblems.
+ // Solving these two subproblems separately and recombining at the end
+ // results in less total computation than solving the 6-dimensional
+ // problem directly.
+ //
+ // The two subproblems correspond to all the parameters which contribute
+ // to the x output of the model, and all the parameters which contribute
+ // to the y output, respectively.
+
+ const int n = 3; // Size of each least-squares problem
+ double mat[2][3 * 3]; // Accumulator for A'A
+ double y[2][3]; // Accumulator for A'b
+ double x[2][3]; // Output vector
+ double a[2][3]; // Single row of A
+ double b[2]; // Single element of b
+
+ least_squares_init(mat[0], y[0], n);
+ least_squares_init(mat[1], y[1], n);
+ for (int i = 0; i < np; ++i) {
+ double dx = *(pts2++);
+ double dy = *(pts2++);
+ double sx = *(pts1++);
+ double sy = *(pts1++);
+
+ a[0][0] = 1;
+ a[0][1] = sx;
+ a[0][2] = sy;
+ b[0] = dx;
+ least_squares_accumulate(mat[0], y[0], a[0], b[0], n);
+
+ a[1][0] = 1;
+ a[1][1] = sx;
+ a[1][2] = sy;
+ b[1] = dy;
+ least_squares_accumulate(mat[1], y[1], a[1], b[1], n);
+ }
+
+ if (!least_squares_solve(mat[0], y[0], x[0], n)) {
+ return false;
+ }
+ if (!least_squares_solve(mat[1], y[1], x[1], n)) {
+ return false;
+ }
+
+ // Rearrange least squares result to form output model
+ params[0] = x[0][0];
+ params[1] = x[1][0];
+ params[2] = x[0][1];
+ params[3] = x[0][2];
+ params[4] = x[1][1];
+ params[5] = x[1][2];
+
+ return true;
+}
+
+typedef struct {
+ int num_inliers;
+ double sse; // Sum of squared errors of inliers
+ int *inlier_indices;
+} RANSAC_MOTION;
+
+// Return -1 if 'a' is a better motion, 1 if 'b' is better, 0 otherwise.
+static int compare_motions(const void *arg_a, const void *arg_b) {
+ const RANSAC_MOTION *motion_a = (RANSAC_MOTION *)arg_a;
+ const RANSAC_MOTION *motion_b = (RANSAC_MOTION *)arg_b;
+
+ if (motion_a->num_inliers > motion_b->num_inliers) return -1;
+ if (motion_a->num_inliers < motion_b->num_inliers) return 1;
+ if (motion_a->sse < motion_b->sse) return -1;
+ if (motion_a->sse > motion_b->sse) return 1;
+ return 0;
+}
+
+static bool is_better_motion(const RANSAC_MOTION *motion_a,
+ const RANSAC_MOTION *motion_b) {
+ return compare_motions(motion_a, motion_b) < 0;
+}
+
+static void copy_points_at_indices(double *dest, const double *src,
+ const int *indices, int num_points) {
+ for (int i = 0; i < num_points; ++i) {
+ const int index = indices[i];
+ dest[i * 2] = src[index * 2];
+ dest[i * 2 + 1] = src[index * 2 + 1];
+ }
+}
+
+// Returns true on success, false on error
+static bool ransac_internal(const Correspondence *matched_points, int npoints,
+ MotionModel *motion_models, int num_desired_motions,
+ const RansacModelInfo *model_info,
+ bool *mem_alloc_failed) {
+ assert(npoints >= 0);
+ int i = 0;
+ int minpts = model_info->minpts;
+ bool ret_val = true;
+
+ unsigned int seed = (unsigned int)npoints;
+
+ int indices[MAX_MINPTS] = { 0 };
+
+ double *points1, *points2;
+ double *corners1, *corners2;
+ double *projected_corners;
+
+ // Store information for the num_desired_motions best transformations found
+ // and the worst motion among them, as well as the motion currently under
+ // consideration.
+ RANSAC_MOTION *motions, *worst_kept_motion = NULL;
+ RANSAC_MOTION current_motion;
+
+ // Store the parameters and the indices of the inlier points for the motion
+ // currently under consideration.
+ double params_this_motion[MAX_PARAMDIM];
+
+ if (npoints < minpts * MINPTS_MULTIPLIER || npoints == 0) {
+ return false;
+ }
+
+ int min_inliers = AOMMAX((int)(MIN_INLIER_PROB * npoints), minpts);
+
+ points1 = (double *)aom_malloc(sizeof(*points1) * npoints * 2);
+ points2 = (double *)aom_malloc(sizeof(*points2) * npoints * 2);
+ corners1 = (double *)aom_malloc(sizeof(*corners1) * npoints * 2);
+ corners2 = (double *)aom_malloc(sizeof(*corners2) * npoints * 2);
+ projected_corners =
+ (double *)aom_malloc(sizeof(*projected_corners) * npoints * 2);
+ motions =
+ (RANSAC_MOTION *)aom_calloc(num_desired_motions, sizeof(RANSAC_MOTION));
+
+ // Allocate one large buffer which will be carved up to store the inlier
+ // indices for the current motion plus the num_desired_motions many
+ // output models
+ // This allows us to keep the allocation/deallocation logic simple, without
+ // having to (for example) check that `motions` is non-null before allocating
+ // the inlier arrays
+ int *inlier_buffer = (int *)aom_malloc(sizeof(*inlier_buffer) * npoints *
+ (num_desired_motions + 1));
+
+ if (!(points1 && points2 && corners1 && corners2 && projected_corners &&
+ motions && inlier_buffer)) {
+ ret_val = false;
+ *mem_alloc_failed = true;
+ goto finish_ransac;
+ }
+
+ // Once all our allocations are known-good, we can fill in our structures
+ worst_kept_motion = motions;
+
+ for (i = 0; i < num_desired_motions; ++i) {
+ motions[i].inlier_indices = inlier_buffer + i * npoints;
+ }
+ memset(&current_motion, 0, sizeof(current_motion));
+ current_motion.inlier_indices = inlier_buffer + num_desired_motions * npoints;
+
+ for (i = 0; i < npoints; ++i) {
+ corners1[2 * i + 0] = matched_points[i].x;
+ corners1[2 * i + 1] = matched_points[i].y;
+ corners2[2 * i + 0] = matched_points[i].rx;
+ corners2[2 * i + 1] = matched_points[i].ry;
+ }
+
+ for (int trial_count = 0; trial_count < NUM_TRIALS; trial_count++) {
+ lcg_pick(npoints, minpts, indices, &seed);
+
+ copy_points_at_indices(points1, corners1, indices, minpts);
+ copy_points_at_indices(points2, corners2, indices, minpts);
+
+ if (model_info->is_degenerate(points1)) {
+ continue;
+ }
+
+ if (!model_info->find_transformation(minpts, points1, points2,
+ params_this_motion)) {
+ continue;
+ }
+
+ model_info->project_points(params_this_motion, corners1, projected_corners,
+ npoints, 2, 2);
+
+ current_motion.num_inliers = 0;
+ double sse = 0.0;
+ for (i = 0; i < npoints; ++i) {
+ double dx = projected_corners[i * 2] - corners2[i * 2];
+ double dy = projected_corners[i * 2 + 1] - corners2[i * 2 + 1];
+ double squared_error = dx * dx + dy * dy;
+
+ if (squared_error < INLIER_THRESHOLD_SQUARED) {
+ current_motion.inlier_indices[current_motion.num_inliers++] = i;
+ sse += squared_error;
+ }
+ }
+
+ if (current_motion.num_inliers < min_inliers) {
+ // Reject models with too few inliers
+ continue;
+ }
+
+ current_motion.sse = sse;
+ if (is_better_motion(&current_motion, worst_kept_motion)) {
+ // This motion is better than the worst currently kept motion. Remember
+ // the inlier points and sse. The parameters for each kept motion
+ // will be recomputed later using only the inliers.
+ worst_kept_motion->num_inliers = current_motion.num_inliers;
+ worst_kept_motion->sse = current_motion.sse;
+
+ // Rather than copying the (potentially many) inlier indices from
+ // current_motion.inlier_indices to worst_kept_motion->inlier_indices,
+ // we can swap the underlying pointers.
+ //
+ // This is okay because the next time current_motion.inlier_indices
+ // is used will be in the next trial, where we ignore its previous
+ // contents anyway. And both arrays will be deallocated together at the
+ // end of this function, so there are no lifetime issues.
+ int *tmp = worst_kept_motion->inlier_indices;
+ worst_kept_motion->inlier_indices = current_motion.inlier_indices;
+ current_motion.inlier_indices = tmp;
+
+ // Determine the new worst kept motion and its num_inliers and sse.
+ for (i = 0; i < num_desired_motions; ++i) {
+ if (is_better_motion(worst_kept_motion, &motions[i])) {
+ worst_kept_motion = &motions[i];
+ }
+ }
+ }
+ }
+
+ // Sort the motions, best first.
+ qsort(motions, num_desired_motions, sizeof(RANSAC_MOTION), compare_motions);
+
+ // Recompute the motions using only the inliers.
+ for (i = 0; i < num_desired_motions; ++i) {
+ int num_inliers = motions[i].num_inliers;
+ if (num_inliers > 0) {
+ assert(num_inliers >= minpts);
+
+ copy_points_at_indices(points1, corners1, motions[i].inlier_indices,
+ num_inliers);
+ copy_points_at_indices(points2, corners2, motions[i].inlier_indices,
+ num_inliers);
+
+ if (!model_info->find_transformation(num_inliers, points1, points2,
+ motion_models[i].params)) {
+ // In the unlikely event that this model fitting fails,
+ // we don't have a good fallback. So just clear the output
+ // model and move on
+ memcpy(motion_models[i].params, kIdentityParams,
+ MAX_PARAMDIM * sizeof(*(motion_models[i].params)));
+ motion_models[i].num_inliers = 0;
+ continue;
+ }
+
+ // Populate inliers array
+ for (int j = 0; j < num_inliers; j++) {
+ int index = motions[i].inlier_indices[j];
+ const Correspondence *corr = &matched_points[index];
+ motion_models[i].inliers[2 * j + 0] = (int)rint(corr->x);
+ motion_models[i].inliers[2 * j + 1] = (int)rint(corr->y);
+ }
+ motion_models[i].num_inliers = num_inliers;
+ } else {
+ memcpy(motion_models[i].params, kIdentityParams,
+ MAX_PARAMDIM * sizeof(*(motion_models[i].params)));
+ motion_models[i].num_inliers = 0;
+ }
+ }
+
+finish_ransac:
+ aom_free(inlier_buffer);
+ aom_free(motions);
+ aom_free(projected_corners);
+ aom_free(corners2);
+ aom_free(corners1);
+ aom_free(points2);
+ aom_free(points1);
+
+ return ret_val;
+}
+
+static bool is_collinear3(double *p1, double *p2, double *p3) {
+ static const double collinear_eps = 1e-3;
+ const double v =
+ (p2[0] - p1[0]) * (p3[1] - p1[1]) - (p2[1] - p1[1]) * (p3[0] - p1[0]);
+ return fabs(v) < collinear_eps;
+}
+
+#if ALLOW_TRANSLATION_MODELS
+static bool is_degenerate_translation(double *p) {
+ return (p[0] - p[2]) * (p[0] - p[2]) + (p[1] - p[3]) * (p[1] - p[3]) <= 2;
+}
+#endif // ALLOW_TRANSLATION_MODELS
+
+static bool is_degenerate_affine(double *p) {
+ return is_collinear3(p, p + 2, p + 4);
+}
+
+static const RansacModelInfo ransac_model_info[TRANS_TYPES] = {
+ // IDENTITY
+ { NULL, NULL, NULL, 0 },
+// TRANSLATION
+#if ALLOW_TRANSLATION_MODELS
+ { is_degenerate_translation, find_translation, project_points_translation,
+ 3 },
+#else
+ { NULL, NULL, NULL, 0 },
+#endif
+ // ROTZOOM
+ { is_degenerate_affine, find_rotzoom, project_points_affine, 3 },
+ // AFFINE
+ { is_degenerate_affine, find_affine, project_points_affine, 3 },
+};
+
+// Returns true on success, false on error
+bool ransac(const Correspondence *matched_points, int npoints,
+ TransformationType type, MotionModel *motion_models,
+ int num_desired_motions, bool *mem_alloc_failed) {
+#if ALLOW_TRANSLATION_MODELS
+ assert(type > IDENTITY && type < TRANS_TYPES);
+#else
+ assert(type > TRANSLATION && type < TRANS_TYPES);
+#endif // ALLOW_TRANSLATION_MODELS
+
+ return ransac_internal(matched_points, npoints, motion_models,
+ num_desired_motions, &ransac_model_info[type],
+ mem_alloc_failed);
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/ransac.h b/third_party/aom/aom_dsp/flow_estimation/ransac.h
new file mode 100644
index 0000000000..0529b6e13c
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/ransac.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (c) 2016, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#ifndef AOM_AOM_DSP_FLOW_ESTIMATION_RANSAC_H_
+#define AOM_AOM_DSP_FLOW_ESTIMATION_RANSAC_H_
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <math.h>
+#include <memory.h>
+#include <stdbool.h>
+
+#include "aom_dsp/flow_estimation/flow_estimation.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+bool ransac(const Correspondence *matched_points, int npoints,
+ TransformationType type, MotionModel *motion_models,
+ int num_desired_motions, bool *mem_alloc_failed);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif // AOM_AOM_DSP_FLOW_ESTIMATION_RANSAC_H_
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c
new file mode 100644
index 0000000000..87c76fa13b
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_avx2.c
@@ -0,0 +1,80 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <math.h>
+
+#include <immintrin.h>
+#include "config/aom_dsp_rtcd.h"
+
+#include "aom_ports/mem.h"
+#include "aom_dsp/flow_estimation/corner_match.h"
+
+DECLARE_ALIGNED(16, static const uint8_t,
+ byte_mask[16]) = { 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 0, 0, 0 };
+#if MATCH_SZ != 13
+#error "Need to change byte_mask in corner_match_sse4.c if MATCH_SZ != 13"
+#endif
+
+/* Compute corr(frame1, frame2) * MATCH_SZ * stddev(frame1), where the
+correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
+of each image, centered at (x1, y1) and (x2, y2) respectively.
+*/
+double av1_compute_cross_correlation_avx2(const unsigned char *frame1,
+ int stride1, int x1, int y1,
+ const unsigned char *frame2,
+ int stride2, int x2, int y2) {
+ int i, stride1_i = 0, stride2_i = 0;
+ __m256i temp1, sum_vec, sumsq2_vec, cross_vec, v, v1_1, v2_1;
+ const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
+ const __m256i zero = _mm256_setzero_si256();
+ __m128i v1, v2;
+
+ sum_vec = zero;
+ sumsq2_vec = zero;
+ cross_vec = zero;
+
+ frame1 += (y1 - MATCH_SZ_BY2) * stride1 + (x1 - MATCH_SZ_BY2);
+ frame2 += (y2 - MATCH_SZ_BY2) * stride2 + (x2 - MATCH_SZ_BY2);
+
+ for (i = 0; i < MATCH_SZ; ++i) {
+ v1 = _mm_and_si128(_mm_loadu_si128((__m128i *)&frame1[stride1_i]), mask);
+ v1_1 = _mm256_cvtepu8_epi16(v1);
+ v2 = _mm_and_si128(_mm_loadu_si128((__m128i *)&frame2[stride2_i]), mask);
+ v2_1 = _mm256_cvtepu8_epi16(v2);
+
+ v = _mm256_insertf128_si256(_mm256_castsi128_si256(v1), v2, 1);
+ sumsq2_vec = _mm256_add_epi32(sumsq2_vec, _mm256_madd_epi16(v2_1, v2_1));
+
+ sum_vec = _mm256_add_epi16(sum_vec, _mm256_sad_epu8(v, zero));
+ cross_vec = _mm256_add_epi32(cross_vec, _mm256_madd_epi16(v1_1, v2_1));
+ stride1_i += stride1;
+ stride2_i += stride2;
+ }
+ __m256i sum_vec1 = _mm256_srli_si256(sum_vec, 8);
+ sum_vec = _mm256_add_epi32(sum_vec, sum_vec1);
+ int sum1_acc = _mm_cvtsi128_si32(_mm256_castsi256_si128(sum_vec));
+ int sum2_acc = _mm256_extract_epi32(sum_vec, 4);
+
+ __m256i unp_low = _mm256_unpacklo_epi64(sumsq2_vec, cross_vec);
+ __m256i unp_hig = _mm256_unpackhi_epi64(sumsq2_vec, cross_vec);
+ temp1 = _mm256_add_epi32(unp_low, unp_hig);
+
+ __m128i low_sumsq = _mm256_castsi256_si128(temp1);
+ low_sumsq = _mm_add_epi32(low_sumsq, _mm256_extractf128_si256(temp1, 1));
+ low_sumsq = _mm_add_epi32(low_sumsq, _mm_srli_epi64(low_sumsq, 32));
+ int sumsq2_acc = _mm_cvtsi128_si32(low_sumsq);
+ int cross_acc = _mm_extract_epi32(low_sumsq, 2);
+
+ int var2 = sumsq2_acc * MATCH_SZ_SQ - sum2_acc * sum2_acc;
+ int cov = cross_acc * MATCH_SZ_SQ - sum1_acc * sum2_acc;
+ return cov / sqrt((double)var2);
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c
new file mode 100644
index 0000000000..b3cb5bc5fd
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/corner_match_sse4.c
@@ -0,0 +1,104 @@
+/*
+ * Copyright (c) 2018, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 2 Clause License and
+ * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
+ * was not distributed with this source code in the LICENSE file, you can
+ * obtain it at www.aomedia.org/license/software. If the Alliance for Open
+ * Media Patent License 1.0 was not distributed with this source code in the
+ * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
+ */
+
+#include <stdlib.h>
+#include <memory.h>
+#include <math.h>
+#include <assert.h>
+
+#include <smmintrin.h>
+
+#include "config/aom_dsp_rtcd.h"
+
+#include "aom_ports/mem.h"
+#include "aom_dsp/flow_estimation/corner_match.h"
+
+DECLARE_ALIGNED(16, static const uint8_t,
+ byte_mask[16]) = { 255, 255, 255, 255, 255, 255, 255, 255,
+ 255, 255, 255, 255, 255, 0, 0, 0 };
+#if MATCH_SZ != 13
+#error "Need to change byte_mask in corner_match_sse4.c if MATCH_SZ != 13"
+#endif
+
+/* Compute corr(frame1, frame2) * MATCH_SZ * stddev(frame1), where the
+ correlation/standard deviation are taken over MATCH_SZ by MATCH_SZ windows
+ of each image, centered at (x1, y1) and (x2, y2) respectively.
+*/
+double av1_compute_cross_correlation_sse4_1(const unsigned char *frame1,
+ int stride1, int x1, int y1,
+ const unsigned char *frame2,
+ int stride2, int x2, int y2) {
+ int i;
+ // 2 16-bit partial sums in lanes 0, 4 (== 2 32-bit partial sums in lanes 0,
+ // 2)
+ __m128i sum1_vec = _mm_setzero_si128();
+ __m128i sum2_vec = _mm_setzero_si128();
+ // 4 32-bit partial sums of squares
+ __m128i sumsq2_vec = _mm_setzero_si128();
+ __m128i cross_vec = _mm_setzero_si128();
+
+ const __m128i mask = _mm_load_si128((__m128i *)byte_mask);
+ const __m128i zero = _mm_setzero_si128();
+
+ frame1 += (y1 - MATCH_SZ_BY2) * stride1 + (x1 - MATCH_SZ_BY2);
+ frame2 += (y2 - MATCH_SZ_BY2) * stride2 + (x2 - MATCH_SZ_BY2);
+
+ for (i = 0; i < MATCH_SZ; ++i) {
+ const __m128i v1 =
+ _mm_and_si128(_mm_loadu_si128((__m128i *)&frame1[i * stride1]), mask);
+ const __m128i v2 =
+ _mm_and_si128(_mm_loadu_si128((__m128i *)&frame2[i * stride2]), mask);
+
+ // Using the 'sad' intrinsic here is a bit faster than adding
+ // v1_l + v1_r and v2_l + v2_r, plus it avoids the need for a 16->32 bit
+ // conversion step later, for a net speedup of ~10%
+ sum1_vec = _mm_add_epi16(sum1_vec, _mm_sad_epu8(v1, zero));
+ sum2_vec = _mm_add_epi16(sum2_vec, _mm_sad_epu8(v2, zero));
+
+ const __m128i v1_l = _mm_cvtepu8_epi16(v1);
+ const __m128i v1_r = _mm_cvtepu8_epi16(_mm_srli_si128(v1, 8));
+ const __m128i v2_l = _mm_cvtepu8_epi16(v2);
+ const __m128i v2_r = _mm_cvtepu8_epi16(_mm_srli_si128(v2, 8));
+
+ sumsq2_vec = _mm_add_epi32(
+ sumsq2_vec,
+ _mm_add_epi32(_mm_madd_epi16(v2_l, v2_l), _mm_madd_epi16(v2_r, v2_r)));
+ cross_vec = _mm_add_epi32(
+ cross_vec,
+ _mm_add_epi32(_mm_madd_epi16(v1_l, v2_l), _mm_madd_epi16(v1_r, v2_r)));
+ }
+
+ // Now we can treat the four registers (sum1_vec, sum2_vec, sumsq2_vec,
+ // cross_vec)
+ // as holding 4 32-bit elements each, which we want to sum horizontally.
+ // We do this by transposing and then summing vertically.
+ __m128i tmp_0 = _mm_unpacklo_epi32(sum1_vec, sum2_vec);
+ __m128i tmp_1 = _mm_unpackhi_epi32(sum1_vec, sum2_vec);
+ __m128i tmp_2 = _mm_unpacklo_epi32(sumsq2_vec, cross_vec);
+ __m128i tmp_3 = _mm_unpackhi_epi32(sumsq2_vec, cross_vec);
+
+ __m128i tmp_4 = _mm_unpacklo_epi64(tmp_0, tmp_2);
+ __m128i tmp_5 = _mm_unpackhi_epi64(tmp_0, tmp_2);
+ __m128i tmp_6 = _mm_unpacklo_epi64(tmp_1, tmp_3);
+ __m128i tmp_7 = _mm_unpackhi_epi64(tmp_1, tmp_3);
+
+ __m128i res =
+ _mm_add_epi32(_mm_add_epi32(tmp_4, tmp_5), _mm_add_epi32(tmp_6, tmp_7));
+
+ int sum1 = _mm_extract_epi32(res, 0);
+ int sum2 = _mm_extract_epi32(res, 1);
+ int sumsq2 = _mm_extract_epi32(res, 2);
+ int cross = _mm_extract_epi32(res, 3);
+
+ int var2 = sumsq2 * MATCH_SZ_SQ - sum2 * sum2;
+ int cov = cross * MATCH_SZ_SQ - sum1 * sum2;
+ return cov / sqrt((double)var2);
+}
diff --git a/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c
new file mode 100644
index 0000000000..d2b04c1973
--- /dev/null
+++ b/third_party/aom/aom_dsp/flow_estimation/x86/disflow_sse4.c
@@ -0,0 +1,558 @@
+/*
+ * Copyright (c) 2022, Alliance for Open Media. All rights reserved
+ *
+ * This source code is subject to the terms of the BSD 3-Clause Clear License
+ * and the Alliance for Open Media Patent License 1.0. If the BSD 3-Clause Clear
+ * License was not distributed with this source code in the LICENSE file, you
+ * can obtain it at aomedia.org/license/software-license/bsd-3-c-c/. If the
+ * Alliance for Open Media Patent License 1.0 was not distributed with this
+ * source code in the PATENTS file, you can obtain it at
+ * aomedia.org/license/patent-license/.
+ */
+
+#include <assert.h>
+#include <math.h>
+#include <smmintrin.h>
+
+#include "aom_dsp/aom_dsp_common.h"
+#include "aom_dsp/flow_estimation/disflow.h"
+#include "aom_dsp/x86/synonyms.h"
+
+#include "config/aom_dsp_rtcd.h"
+
+// Internal cross-check against C code
+// If you set this to 1 and compile in debug mode, then the outputs of the two
+// convolution stages will be checked against the plain C version of the code,
+// and an assertion will be fired if the results differ.
+#define CHECK_RESULTS 0
+
+// Note: Max sum(+ve coefficients) = 1.125 * scale
+static INLINE void get_cubic_kernel_dbl(double x, double kernel[4]) {
+ // Check that the fractional position is in range.
+ //
+ // Note: x is calculated from (eg.) `u_frac = u - floor(u)`.
+ // Mathematically, this implies that 0 <= x < 1. However, in practice it is
+ // possible to have x == 1 due to floating point rounding. This is fine,
+ // and we still interpolate correctly if we allow x = 1.
+ assert(0 <= x && x <= 1);
+
+ double x2 = x * x;
+ double x3 = x2 * x;
+ kernel[0] = -0.5 * x + x2 - 0.5 * x3;
+ kernel[1] = 1.0 - 2.5 * x2 + 1.5 * x3;
+ kernel[2] = 0.5 * x + 2.0 * x2 - 1.5 * x3;
+ kernel[3] = -0.5 * x2 + 0.5 * x3;
+}
+
+static INLINE void get_cubic_kernel_int(double x, int16_t kernel[4]) {
+ double kernel_dbl[4];
+ get_cubic_kernel_dbl(x, kernel_dbl);
+
+ kernel[0] = (int16_t)rint(kernel_dbl[0] * (1 << DISFLOW_INTERP_BITS));
+ kernel[1] = (int16_t)rint(kernel_dbl[1] * (1 << DISFLOW_INTERP_BITS));
+ kernel[2] = (int16_t)rint(kernel_dbl[2] * (1 << DISFLOW_INTERP_BITS));
+ kernel[3] = (int16_t)rint(kernel_dbl[3] * (1 << DISFLOW_INTERP_BITS));
+}
+
+#if CHECK_RESULTS
+static INLINE int get_cubic_value_int(const int *p, const int16_t kernel[4]) {
+ return kernel[0] * p[0] + kernel[1] * p[1] + kernel[2] * p[2] +
+ kernel[3] * p[3];
+}
+#endif // CHECK_RESULTS
+
+// Compare two regions of width x height pixels, one rooted at position
+// (x, y) in src and the other at (x + u, y + v) in ref.
+// This function returns the sum of squared pixel differences between
+// the two regions.
+//
+// TODO(rachelbarker): Test speed/quality impact of using bilinear interpolation
+// instad of bicubic interpolation
+static INLINE void compute_flow_vector(const uint8_t *src, const uint8_t *ref,
+ int width, int height, int stride, int x,
+ int y, double u, double v,
+ const int16_t *dx, const int16_t *dy,
+ int *b) {
+ // This function is written to do 8x8 convolutions only
+ assert(DISFLOW_PATCH_SIZE == 8);
+
+ // Accumulate 4 32-bit partial sums for each element of b
+ // These will be flattened at the end.
+ __m128i b0_acc = _mm_setzero_si128();
+ __m128i b1_acc = _mm_setzero_si128();
+#if CHECK_RESULTS
+ // Also keep a running sum using the C algorithm, for cross-checking
+ int c_result[2] = { 0 };
+#endif // CHECK_RESULTS
+
+ // Split offset into integer and fractional parts, and compute cubic
+ // interpolation kernels
+ const int u_int = (int)floor(u);
+ const int v_int = (int)floor(v);
+ const double u_frac = u - floor(u);
+ const double v_frac = v - floor(v);
+
+ int16_t h_kernel[4];
+ int16_t v_kernel[4];
+ get_cubic_kernel_int(u_frac, h_kernel);
+ get_cubic_kernel_int(v_frac, v_kernel);
+
+ // Storage for intermediate values between the two convolution directions
+ int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 3)];
+ int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE; // Offset by one row
+
+ // Clamp coordinates so that all pixels we fetch will remain within the
+ // allocated border region, but allow them to go far enough out that
+ // the border pixels' values do not change.
+ // Since we are calculating an 8x8 block, the bottom-right pixel
+ // in the block has coordinates (x0 + 7, y0 + 7). Then, the cubic
+ // interpolation has 4 taps, meaning that the output of pixel
+ // (x_w, y_w) depends on the pixels in the range
+ // ([x_w - 1, x_w + 2], [y_w - 1, y_w + 2]).
+ //
+ // Thus the most extreme coordinates which will be fetched are
+ // (x0 - 1, y0 - 1) and (x0 + 9, y0 + 9).
+ const int x0 = clamp(x + u_int, -9, width);
+ const int y0 = clamp(y + v_int, -9, height);
+
+ // Horizontal convolution
+
+ // Prepare the kernel vectors
+ // We split the kernel into two vectors with kernel indices:
+ // 0, 1, 0, 1, 0, 1, 0, 1, and
+ // 2, 3, 2, 3, 2, 3, 2, 3
+ __m128i h_kernel_01 = xx_set2_epi16(h_kernel[0], h_kernel[1]);
+ __m128i h_kernel_23 = xx_set2_epi16(h_kernel[2], h_kernel[3]);
+
+ __m128i round_const_h = _mm_set1_epi32(1 << (DISFLOW_INTERP_BITS - 6 - 1));
+
+ for (int i = -1; i < DISFLOW_PATCH_SIZE + 2; ++i) {
+ const int y_w = y0 + i;
+ const uint8_t *ref_row = &ref[y_w * stride + (x0 - 1)];
+ int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
+
+ // Load this row of pixels.
+ // For an 8x8 patch, we need to load the 8 image pixels + 3 extras,
+ // for a total of 11 pixels. Here we load 16 pixels, but only use
+ // the first 11.
+ __m128i row = _mm_loadu_si128((__m128i *)ref_row);
+
+ // Expand pixels to int16s
+ __m128i px_0to7_i16 = _mm_cvtepu8_epi16(row);
+ __m128i px_4to10_i16 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 4));
+
+ // Relevant multiply instruction
+ // This multiplies pointwise, then sums in pairs.
+ //_mm_madd_epi16();
+
+ // Compute first four outputs
+ // input pixels 0, 1, 1, 2, 2, 3, 3, 4
+ // * kernel 0, 1, 0, 1, 0, 1, 0, 1
+ __m128i px0 =
+ _mm_unpacklo_epi16(px_0to7_i16, _mm_srli_si128(px_0to7_i16, 2));
+ // input pixels 2, 3, 3, 4, 4, 5, 5, 6
+ // * kernel 2, 3, 2, 3, 2, 3, 2, 3
+ __m128i px1 = _mm_unpacklo_epi16(_mm_srli_si128(px_0to7_i16, 4),
+ _mm_srli_si128(px_0to7_i16, 6));
+ // Convolve with kernel and sum 2x2 boxes to form first 4 outputs
+ __m128i sum0 = _mm_add_epi32(_mm_madd_epi16(px0, h_kernel_01),
+ _mm_madd_epi16(px1, h_kernel_23));
+
+ __m128i out0 = _mm_srai_epi32(_mm_add_epi32(sum0, round_const_h),
+ DISFLOW_INTERP_BITS - 6);
+
+ // Compute second four outputs
+ __m128i px2 =
+ _mm_unpacklo_epi16(px_4to10_i16, _mm_srli_si128(px_4to10_i16, 2));
+ __m128i px3 = _mm_unpacklo_epi16(_mm_srli_si128(px_4to10_i16, 4),
+ _mm_srli_si128(px_4to10_i16, 6));
+ __m128i sum1 = _mm_add_epi32(_mm_madd_epi16(px2, h_kernel_01),
+ _mm_madd_epi16(px3, h_kernel_23));
+
+ // Round by just enough bits that the result is
+ // guaranteed to fit into an i16. Then the next stage can use 16 x 16 -> 32
+ // bit multiplies, which should be a fair bit faster than 32 x 32 -> 32
+ // as it does now
+ // This means shifting down so we have 6 extra bits, for a maximum value
+ // of +18360, which can occur if u_frac == 0.5 and the input pixels are
+ // {0, 255, 255, 0}.
+ __m128i out1 = _mm_srai_epi32(_mm_add_epi32(sum1, round_const_h),
+ DISFLOW_INTERP_BITS - 6);
+
+ _mm_storeu_si128((__m128i *)tmp_row, _mm_packs_epi32(out0, out1));
+
+#if CHECK_RESULTS && !defined(NDEBUG)
+ // Cross-check
+ for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) {
+ const int x_w = x0 + j;
+ int arr[4];
+
+ arr[0] = (int)ref[y_w * stride + (x_w - 1)];
+ arr[1] = (int)ref[y_w * stride + (x_w + 0)];
+ arr[2] = (int)ref[y_w * stride + (x_w + 1)];
+ arr[3] = (int)ref[y_w * stride + (x_w + 2)];
+
+ // Apply kernel and round, keeping 6 extra bits of precision.
+ //
+ // 6 is the maximum allowable number of extra bits which will avoid
+ // the intermediate values overflowing an int16_t. The most extreme
+ // intermediate value occurs when:
+ // * The input pixels are [0, 255, 255, 0]
+ // * u_frac = 0.5
+ // In this case, the un-scaled output is 255 * 1.125 = 286.875.
+ // As an integer with 6 fractional bits, that is 18360, which fits
+ // in an int16_t. But with 7 fractional bits it would be 36720,
+ // which is too large.
+ const int c_value = ROUND_POWER_OF_TWO(get_cubic_value_int(arr, h_kernel),
+ DISFLOW_INTERP_BITS - 6);
+ (void)c_value; // Suppress warnings
+ assert(tmp_row[j] == c_value);
+ }
+#endif // CHECK_RESULTS
+ }
+
+ // Vertical convolution
+ const int round_bits = DISFLOW_INTERP_BITS + 6 - DISFLOW_DERIV_SCALE_LOG2;
+ __m128i round_const_v = _mm_set1_epi32(1 << (round_bits - 1));
+
+ __m128i v_kernel_01 = xx_set2_epi16(v_kernel[0], v_kernel[1]);
+ __m128i v_kernel_23 = xx_set2_epi16(v_kernel[2], v_kernel[3]);
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; ++i) {
+ int16_t *tmp_row = &tmp[i * DISFLOW_PATCH_SIZE];
+
+ // Load 4 rows of 8 x 16-bit values
+ __m128i px0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
+ __m128i px1 = _mm_loadu_si128((__m128i *)tmp_row);
+ __m128i px2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
+ __m128i px3 =
+ _mm_loadu_si128((__m128i *)(tmp_row + 2 * DISFLOW_PATCH_SIZE));
+
+ // We want to calculate px0 * v_kernel[0] + px1 * v_kernel[1] + ... ,
+ // but each multiply expands its output to 32 bits. So we need to be
+ // a little clever about how we do this
+ __m128i sum0 = _mm_add_epi32(
+ _mm_madd_epi16(_mm_unpacklo_epi16(px0, px1), v_kernel_01),
+ _mm_madd_epi16(_mm_unpacklo_epi16(px2, px3), v_kernel_23));
+ __m128i sum1 = _mm_add_epi32(
+ _mm_madd_epi16(_mm_unpackhi_epi16(px0, px1), v_kernel_01),
+ _mm_madd_epi16(_mm_unpackhi_epi16(px2, px3), v_kernel_23));
+
+ __m128i sum0_rounded =
+ _mm_srai_epi32(_mm_add_epi32(sum0, round_const_v), round_bits);
+ __m128i sum1_rounded =
+ _mm_srai_epi32(_mm_add_epi32(sum1, round_const_v), round_bits);
+
+ __m128i warped = _mm_packs_epi32(sum0_rounded, sum1_rounded);
+ __m128i src_pixels_u8 =
+ _mm_loadl_epi64((__m128i *)&src[(y + i) * stride + x]);
+ __m128i src_pixels = _mm_slli_epi16(_mm_cvtepu8_epi16(src_pixels_u8), 3);
+
+ // Calculate delta from the target patch
+ __m128i dt = _mm_sub_epi16(warped, src_pixels);
+
+ // Load 8 elements each of dx and dt, to pair with the 8 elements of dt
+ // that we have just computed. Then compute 8 partial sums of dx * dt
+ // and dy * dt, implicitly sum to give 4 partial sums of each, and
+ // accumulate.
+ __m128i dx_row = _mm_loadu_si128((__m128i *)&dx[i * DISFLOW_PATCH_SIZE]);
+ __m128i dy_row = _mm_loadu_si128((__m128i *)&dy[i * DISFLOW_PATCH_SIZE]);
+ b0_acc = _mm_add_epi32(b0_acc, _mm_madd_epi16(dx_row, dt));
+ b1_acc = _mm_add_epi32(b1_acc, _mm_madd_epi16(dy_row, dt));
+
+#if CHECK_RESULTS
+ int16_t dt_arr[8];
+ memcpy(dt_arr, &dt, 8 * sizeof(*dt_arr));
+ for (int j = 0; j < DISFLOW_PATCH_SIZE; ++j) {
+ int16_t *p = &tmp[i * DISFLOW_PATCH_SIZE + j];
+ int arr[4] = { p[-DISFLOW_PATCH_SIZE], p[0], p[DISFLOW_PATCH_SIZE],
+ p[2 * DISFLOW_PATCH_SIZE] };
+ const int result = get_cubic_value_int(arr, v_kernel);
+
+ // Apply kernel and round.
+ // This time, we have to round off the 6 extra bits which were kept
+ // earlier, but we also want to keep DISFLOW_DERIV_SCALE_LOG2 extra bits
+ // of precision to match the scale of the dx and dy arrays.
+ const int c_warped = ROUND_POWER_OF_TWO(result, round_bits);
+ const int c_src_px = src[(x + j) + (y + i) * stride] << 3;
+ const int c_dt = c_warped - c_src_px;
+
+ assert(dt_arr[j] == c_dt);
+
+ c_result[0] += dx[i * DISFLOW_PATCH_SIZE + j] * c_dt;
+ c_result[1] += dy[i * DISFLOW_PATCH_SIZE + j] * c_dt;
+ }
+#endif // CHECK_RESULTS
+ }
+
+ // Flatten the two sets of partial sums to find the final value of b
+ // We need to set b[0] = sum(b0_acc), b[1] = sum(b1_acc).
+ // We need to do 6 additions in total; a `hadd` instruction can take care
+ // of four of them, leaving two scalar additions.
+ __m128i partial_sum = _mm_hadd_epi32(b0_acc, b1_acc);
+ b[0] = _mm_extract_epi32(partial_sum, 0) + _mm_extract_epi32(partial_sum, 1);
+ b[1] = _mm_extract_epi32(partial_sum, 2) + _mm_extract_epi32(partial_sum, 3);
+
+#if CHECK_RESULTS
+ assert(b[0] == c_result[0]);
+ assert(b[1] == c_result[1]);
+#endif // CHECK_RESULTS
+}
+
+static INLINE void sobel_filter_x(const uint8_t *src, int src_stride,
+ int16_t *dst, int dst_stride) {
+ int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
+ int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE;
+#if CHECK_RESULTS
+ const int taps = 3;
+#endif // CHECK_RESULTS
+
+ // Horizontal filter
+ // As the kernel is simply {1, 0, -1}, we implement this as simply
+ // out[x] = image[x-1] - image[x+1]
+ // rather than doing a "proper" convolution operation
+ for (int y = -1; y < DISFLOW_PATCH_SIZE + 1; ++y) {
+ const uint8_t *src_row = src + y * src_stride;
+ int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
+
+ // Load pixels and expand to 16 bits
+ __m128i row = _mm_loadu_si128((__m128i *)(src_row - 1));
+ __m128i px0 = _mm_cvtepu8_epi16(row);
+ __m128i px2 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
+
+ __m128i out = _mm_sub_epi16(px0, px2);
+
+ // Store to intermediate array
+ _mm_storeu_si128((__m128i *)tmp_row, out);
+
+#if CHECK_RESULTS
+ // Cross-check
+ static const int16_t h_kernel[3] = { 1, 0, -1 };
+ for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
+ int sum = 0;
+ for (int k = 0; k < taps; ++k) {
+ sum += h_kernel[k] * src_row[x + k - 1];
+ }
+ (void)sum;
+ assert(tmp_row[x] == sum);
+ }
+#endif // CHECK_RESULTS
+ }
+
+ // Vertical filter
+ // Here the kernel is {1, 2, 1}, which can be implemented
+ // with simple sums rather than multiplies and adds.
+ // In order to minimize dependency chains, we evaluate in the order
+ // (image[y - 1] + image[y + 1]) + (image[y] << 1)
+ // This way, the first addition and the shift can happen in parallel
+ for (int y = 0; y < DISFLOW_PATCH_SIZE; ++y) {
+ const int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
+ int16_t *dst_row = dst + y * dst_stride;
+
+ __m128i px0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
+ __m128i px1 = _mm_loadu_si128((__m128i *)tmp_row);
+ __m128i px2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
+
+ __m128i out =
+ _mm_add_epi16(_mm_add_epi16(px0, px2), _mm_slli_epi16(px1, 1));
+
+ _mm_storeu_si128((__m128i *)dst_row, out);
+
+#if CHECK_RESULTS
+ static const int16_t v_kernel[3] = { 1, 2, 1 };
+ for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
+ int sum = 0;
+ for (int k = 0; k < taps; ++k) {
+ sum += v_kernel[k] * tmp[(y + k - 1) * DISFLOW_PATCH_SIZE + x];
+ }
+ (void)sum;
+ assert(dst_row[x] == sum);
+ }
+#endif // CHECK_RESULTS
+ }
+}
+
+static INLINE void sobel_filter_y(const uint8_t *src, int src_stride,
+ int16_t *dst, int dst_stride) {
+ int16_t tmp_[DISFLOW_PATCH_SIZE * (DISFLOW_PATCH_SIZE + 2)];
+ int16_t *tmp = tmp_ + DISFLOW_PATCH_SIZE;
+#if CHECK_RESULTS
+ const int taps = 3;
+#endif // CHECK_RESULTS
+
+ // Horizontal filter
+ // Here the kernel is {1, 2, 1}, which can be implemented
+ // with simple sums rather than multiplies and adds.
+ // In order to minimize dependency chains, we evaluate in the order
+ // (image[y - 1] + image[y + 1]) + (image[y] << 1)
+ // This way, the first addition and the shift can happen in parallel
+ for (int y = -1; y < DISFLOW_PATCH_SIZE + 1; ++y) {
+ const uint8_t *src_row = src + y * src_stride;
+ int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
+
+ // Load pixels and expand to 16 bits
+ __m128i row = _mm_loadu_si128((__m128i *)(src_row - 1));
+ __m128i px0 = _mm_cvtepu8_epi16(row);
+ __m128i px1 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 1));
+ __m128i px2 = _mm_cvtepu8_epi16(_mm_srli_si128(row, 2));
+
+ __m128i out =
+ _mm_add_epi16(_mm_add_epi16(px0, px2), _mm_slli_epi16(px1, 1));
+
+ // Store to intermediate array
+ _mm_storeu_si128((__m128i *)tmp_row, out);
+
+#if CHECK_RESULTS
+ // Cross-check
+ static const int16_t h_kernel[3] = { 1, 2, 1 };
+ for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
+ int sum = 0;
+ for (int k = 0; k < taps; ++k) {
+ sum += h_kernel[k] * src_row[x + k - 1];
+ }
+ (void)sum;
+ assert(tmp_row[x] == sum);
+ }
+#endif // CHECK_RESULTS
+ }
+
+ // Vertical filter
+ // As the kernel is simply {1, 0, -1}, we implement this as simply
+ // out[x] = image[x-1] - image[x+1]
+ // rather than doing a "proper" convolution operation
+ for (int y = 0; y < DISFLOW_PATCH_SIZE; ++y) {
+ const int16_t *tmp_row = tmp + y * DISFLOW_PATCH_SIZE;
+ int16_t *dst_row = dst + y * dst_stride;
+
+ __m128i px0 = _mm_loadu_si128((__m128i *)(tmp_row - DISFLOW_PATCH_SIZE));
+ __m128i px2 = _mm_loadu_si128((__m128i *)(tmp_row + DISFLOW_PATCH_SIZE));
+
+ __m128i out = _mm_sub_epi16(px0, px2);
+
+ _mm_storeu_si128((__m128i *)dst_row, out);
+
+#if CHECK_RESULTS
+ static const int16_t v_kernel[3] = { 1, 0, -1 };
+ for (int x = 0; x < DISFLOW_PATCH_SIZE; ++x) {
+ int sum = 0;
+ for (int k = 0; k < taps; ++k) {
+ sum += v_kernel[k] * tmp[(y + k - 1) * DISFLOW_PATCH_SIZE + x];
+ }
+ (void)sum;
+ assert(dst_row[x] == sum);
+ }
+#endif // CHECK_RESULTS
+ }
+}
+
+static INLINE void compute_flow_matrix(const int16_t *dx, int dx_stride,
+ const int16_t *dy, int dy_stride,
+ double *M) {
+ __m128i acc[4] = { 0 };
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ __m128i dx_row = _mm_loadu_si128((__m128i *)&dx[i * dx_stride]);
+ __m128i dy_row = _mm_loadu_si128((__m128i *)&dy[i * dy_stride]);
+
+ acc[0] = _mm_add_epi32(acc[0], _mm_madd_epi16(dx_row, dx_row));
+ acc[1] = _mm_add_epi32(acc[1], _mm_madd_epi16(dx_row, dy_row));
+ // Don't compute acc[2], as it should be equal to acc[1]
+ acc[3] = _mm_add_epi32(acc[3], _mm_madd_epi16(dy_row, dy_row));
+ }
+
+ // Condense sums
+ __m128i partial_sum_0 = _mm_hadd_epi32(acc[0], acc[1]);
+ __m128i partial_sum_1 = _mm_hadd_epi32(acc[1], acc[3]);
+ __m128i result = _mm_hadd_epi32(partial_sum_0, partial_sum_1);
+
+ // Apply regularization
+ // We follow the standard regularization method of adding `k * I` before
+ // inverting. This ensures that the matrix will be invertible.
+ //
+ // Setting the regularization strength k to 1 seems to work well here, as
+ // typical values coming from the other equations are very large (1e5 to
+ // 1e6, with an upper limit of around 6e7, at the time of writing).
+ // It also preserves the property that all matrix values are whole numbers,
+ // which is convenient for integerized SIMD implementation.
+ result = _mm_add_epi32(result, _mm_set_epi32(1, 0, 0, 1));
+
+#if CHECK_RESULTS
+ int tmp[4] = { 0 };
+
+ for (int i = 0; i < DISFLOW_PATCH_SIZE; i++) {
+ for (int j = 0; j < DISFLOW_PATCH_SIZE; j++) {
+ tmp[0] += dx[i * dx_stride + j] * dx[i * dx_stride + j];
+ tmp[1] += dx[i * dx_stride + j] * dy[i * dy_stride + j];
+ // Don't compute tmp[2], as it should be equal to tmp[1]
+ tmp[3] += dy[i * dy_stride + j] * dy[i * dy_stride + j];
+ }
+ }
+
+ // Apply regularization
+ tmp[0] += 1;
+ tmp[3] += 1;
+
+ tmp[2] = tmp[1];
+
+ assert(tmp[0] == _mm_extract_epi32(result, 0));
+ assert(tmp[1] == _mm_extract_epi32(result, 1));
+ assert(tmp[2] == _mm_extract_epi32(result, 2));
+ assert(tmp[3] == _mm_extract_epi32(result, 3));
+#endif // CHECK_RESULTS
+
+ // Convert results to doubles and store
+ _mm_storeu_pd(M, _mm_cvtepi32_pd(result));
+ _mm_storeu_pd(M + 2, _mm_cvtepi32_pd(_mm_srli_si128(result, 8)));
+}
+
+// Try to invert the matrix M
+// Note: Due to the nature of how a least-squares matrix is constructed, all of
+// the eigenvalues will be >= 0, and therefore det M >= 0 as well.
+// The regularization term `+ k * I` further ensures that det M >= k^2.
+// As mentioned in compute_flow_matrix(), here we use k = 1, so det M >= 1.
+// So we don't have to worry about non-invertible matrices here.
+static INLINE void invert_2x2(const double *M, double *M_inv) {
+ double det = (M[0] * M[3]) - (M[1] * M[2]);
+ assert(det >= 1);
+ const double det_inv = 1 / det;
+
+ M_inv[0] = M[3] * det_inv;
+ M_inv[1] = -M[1] * det_inv;
+ M_inv[2] = -M[2] * det_inv;
+ M_inv[3] = M[0] * det_inv;
+}
+
+void aom_compute_flow_at_point_sse4_1(const uint8_t *src, const uint8_t *ref,
+ int x, int y, int width, int height,
+ int stride, double *u, double *v) {
+ double M[4];
+ double M_inv[4];
+ int b[2];
+ int16_t dx[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
+ int16_t dy[DISFLOW_PATCH_SIZE * DISFLOW_PATCH_SIZE];
+
+ // Compute gradients within this patch
+ const uint8_t *src_patch = &src[y * stride + x];
+ sobel_filter_x(src_patch, stride, dx, DISFLOW_PATCH_SIZE);
+ sobel_filter_y(src_patch, stride, dy, DISFLOW_PATCH_SIZE);
+
+ compute_flow_matrix(dx, DISFLOW_PATCH_SIZE, dy, DISFLOW_PATCH_SIZE, M);
+ invert_2x2(M, M_inv);
+
+ for (int itr = 0; itr < DISFLOW_MAX_ITR; itr++) {
+ compute_flow_vector(src, ref, width, height, stride, x, y, *u, *v, dx, dy,
+ b);
+
+ // Solve flow equations to find a better estimate for the flow vector
+ // at this point
+ const double step_u = M_inv[0] * b[0] + M_inv[1] * b[1];
+ const double step_v = M_inv[2] * b[0] + M_inv[3] * b[1];
+ *u += fclamp(step_u * DISFLOW_STEP_SIZE, -2, 2);
+ *v += fclamp(step_v * DISFLOW_STEP_SIZE, -2, 2);
+
+ if (fabs(step_u) + fabs(step_v) < DISFLOW_STEP_SIZE_THRESOLD) {
+ // Stop iteration when we're close to convergence
+ break;
+ }
+ }
+}