summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp/arm/blend_neon.h
blob: c8a03224e4f391fa0de956d0dbc0f377cb07bee6 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
/*
 * Copyright (c) 2023, Alliance for Open Media. All rights reserved
 *
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
 */

#ifndef AOM_AOM_DSP_ARM_BLEND_NEON_H_
#define AOM_AOM_DSP_ARM_BLEND_NEON_H_

#include <arm_neon.h>

#include "aom_dsp/blend.h"

static INLINE uint8x16_t alpha_blend_a64_u8x16(uint8x16_t m, uint8x16_t a,
                                               uint8x16_t b) {
  const uint8x16_t m_inv = vsubq_u8(vdupq_n_u8(AOM_BLEND_A64_MAX_ALPHA), m);

  uint16x8_t blend_u16_lo = vmull_u8(vget_low_u8(m), vget_low_u8(a));
  uint16x8_t blend_u16_hi = vmull_u8(vget_high_u8(m), vget_high_u8(a));

  blend_u16_lo = vmlal_u8(blend_u16_lo, vget_low_u8(m_inv), vget_low_u8(b));
  blend_u16_hi = vmlal_u8(blend_u16_hi, vget_high_u8(m_inv), vget_high_u8(b));

  uint8x8_t blend_u8_lo = vrshrn_n_u16(blend_u16_lo, AOM_BLEND_A64_ROUND_BITS);
  uint8x8_t blend_u8_hi = vrshrn_n_u16(blend_u16_hi, AOM_BLEND_A64_ROUND_BITS);

  return vcombine_u8(blend_u8_lo, blend_u8_hi);
}

static INLINE uint8x8_t alpha_blend_a64_u8x8(uint8x8_t m, uint8x8_t a,
                                             uint8x8_t b) {
  const uint8x8_t m_inv = vsub_u8(vdup_n_u8(AOM_BLEND_A64_MAX_ALPHA), m);

  uint16x8_t blend_u16 = vmull_u8(m, a);

  blend_u16 = vmlal_u8(blend_u16, m_inv, b);

  return vrshrn_n_u16(blend_u16, AOM_BLEND_A64_ROUND_BITS);
}

#if CONFIG_AV1_HIGHBITDEPTH
static INLINE uint16x8_t alpha_blend_a64_u16x8(uint16x8_t m, uint16x8_t a,
                                               uint16x8_t b) {
  uint16x8_t m_inv = vsubq_u16(vdupq_n_u16(AOM_BLEND_A64_MAX_ALPHA), m);

  uint32x4_t blend_u32_lo = vmull_u16(vget_low_u16(a), vget_low_u16(m));
  uint32x4_t blend_u32_hi = vmull_u16(vget_high_u16(a), vget_high_u16(m));

  blend_u32_lo = vmlal_u16(blend_u32_lo, vget_low_u16(b), vget_low_u16(m_inv));
  blend_u32_hi =
      vmlal_u16(blend_u32_hi, vget_high_u16(b), vget_high_u16(m_inv));

  uint16x4_t blend_u16_lo =
      vrshrn_n_u32(blend_u32_lo, AOM_BLEND_A64_ROUND_BITS);
  uint16x4_t blend_u16_hi =
      vrshrn_n_u32(blend_u32_hi, AOM_BLEND_A64_ROUND_BITS);

  return vcombine_u16(blend_u16_lo, blend_u16_hi);
}

static INLINE uint16x4_t alpha_blend_a64_u16x4(uint16x4_t m, uint16x4_t a,
                                               uint16x4_t b) {
  const uint16x4_t m_inv = vsub_u16(vdup_n_u16(AOM_BLEND_A64_MAX_ALPHA), m);

  uint32x4_t blend_u16 = vmull_u16(m, a);

  blend_u16 = vmlal_u16(blend_u16, m_inv, b);

  return vrshrn_n_u32(blend_u16, AOM_BLEND_A64_ROUND_BITS);
}
#endif  // CONFIG_AV1_HIGHBITDEPTH

static INLINE uint8x8_t avg_blend_u8x8(uint8x8_t a, uint8x8_t b) {
  return vrhadd_u8(a, b);
}

static INLINE uint8x16_t avg_blend_u8x16(uint8x16_t a, uint8x16_t b) {
  return vrhaddq_u8(a, b);
}

static INLINE uint8x8_t avg_blend_pairwise_u8x8(uint8x8_t a, uint8x8_t b) {
  return vrshr_n_u8(vpadd_u8(a, b), 1);
}

static INLINE uint8x16_t avg_blend_pairwise_u8x16(uint8x16_t a, uint8x16_t b) {
#if AOM_ARCH_AARCH64
  return vrshrq_n_u8(vpaddq_u8(a, b), 1);
#else
  uint8x8_t sum_pairwise_a = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
  uint8x8_t sum_pairwise_b = vpadd_u8(vget_low_u8(b), vget_high_u8(b));
  return vrshrq_n_u8(vcombine_u8(sum_pairwise_a, sum_pairwise_b), 1);
#endif  // AOM_ARCH_AARCH64
}

static INLINE uint8x8_t avg_blend_pairwise_u8x8_4(uint8x8_t a, uint8x8_t b,
                                                  uint8x8_t c, uint8x8_t d) {
  uint8x8_t a_c = vpadd_u8(a, c);
  uint8x8_t b_d = vpadd_u8(b, d);
  return vrshr_n_u8(vqadd_u8(a_c, b_d), 2);
}

static INLINE uint8x16_t avg_blend_pairwise_u8x16_4(uint8x16_t a, uint8x16_t b,
                                                    uint8x16_t c,
                                                    uint8x16_t d) {
#if AOM_ARCH_AARCH64
  uint8x16_t a_c = vpaddq_u8(a, c);
  uint8x16_t b_d = vpaddq_u8(b, d);
  return vrshrq_n_u8(vqaddq_u8(a_c, b_d), 2);
#else
  uint8x8_t sum_pairwise_a = vpadd_u8(vget_low_u8(a), vget_high_u8(a));
  uint8x8_t sum_pairwise_b = vpadd_u8(vget_low_u8(b), vget_high_u8(b));
  uint8x8_t sum_pairwise_c = vpadd_u8(vget_low_u8(c), vget_high_u8(c));
  uint8x8_t sum_pairwise_d = vpadd_u8(vget_low_u8(d), vget_high_u8(d));
  uint8x16_t a_c = vcombine_u8(sum_pairwise_a, sum_pairwise_c);
  uint8x16_t b_d = vcombine_u8(sum_pairwise_b, sum_pairwise_d);
  return vrshrq_n_u8(vqaddq_u8(a_c, b_d), 2);
#endif  // AOM_ARCH_AARCH64
}

#endif  // AOM_AOM_DSP_ARM_BLEND_NEON_H_