summaryrefslogtreecommitdiffstats
path: root/third_party/aom/aom_dsp/x86/quantize_avx2.c
blob: b808d46778e683bec214190bb5e92d1d563e6772 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
/*
 * Copyright (c) 2022, Alliance for Open Media. All rights reserved
 *
 * This source code is subject to the terms of the BSD 2 Clause License and
 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
 * was not distributed with this source code in the LICENSE file, you can
 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
 * Media Patent License 1.0 was not distributed with this source code in the
 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
 */

#include <immintrin.h>
#include "config/aom_dsp_rtcd.h"
#include "aom/aom_integer.h"
#include "aom_dsp/x86/quantize_x86.h"

static INLINE void load_b_values_avx2(const int16_t *zbin_ptr, __m256i *zbin,
                                      const int16_t *round_ptr, __m256i *round,
                                      const int16_t *quant_ptr, __m256i *quant,
                                      const int16_t *dequant_ptr,
                                      __m256i *dequant,
                                      const int16_t *shift_ptr, __m256i *shift,
                                      int log_scale) {
  *zbin = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)zbin_ptr));
  *zbin = _mm256_permute4x64_epi64(*zbin, 0x54);
  if (log_scale > 0) {
    const __m256i rnd = _mm256_set1_epi16((int16_t)(1 << (log_scale - 1)));
    *zbin = _mm256_add_epi16(*zbin, rnd);
    *zbin = _mm256_srai_epi16(*zbin, log_scale);
  }
  // Subtracting 1 here eliminates a _mm256_cmpeq_epi16() instruction when
  // calculating the zbin mask. (See quantize_b_logscale{0,1,2}_16)
  *zbin = _mm256_sub_epi16(*zbin, _mm256_set1_epi16(1));

  *round = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)round_ptr));
  *round = _mm256_permute4x64_epi64(*round, 0x54);
  if (log_scale > 0) {
    const __m256i rnd = _mm256_set1_epi16((int16_t)(1 << (log_scale - 1)));
    *round = _mm256_add_epi16(*round, rnd);
    *round = _mm256_srai_epi16(*round, log_scale);
  }

  *quant = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)quant_ptr));
  *quant = _mm256_permute4x64_epi64(*quant, 0x54);
  *dequant =
      _mm256_castsi128_si256(_mm_load_si128((const __m128i *)dequant_ptr));
  *dequant = _mm256_permute4x64_epi64(*dequant, 0x54);
  *shift = _mm256_castsi128_si256(_mm_load_si128((const __m128i *)shift_ptr));
  *shift = _mm256_permute4x64_epi64(*shift, 0x54);
}

static INLINE __m256i load_coefficients_avx2(const tran_low_t *coeff_ptr) {
  const __m256i coeff1 = _mm256_load_si256((__m256i *)coeff_ptr);
  const __m256i coeff2 = _mm256_load_si256((__m256i *)(coeff_ptr + 8));
  return _mm256_packs_epi32(coeff1, coeff2);
}

static INLINE void store_coefficients_avx2(__m256i coeff_vals,
                                           tran_low_t *coeff_ptr) {
  __m256i coeff_sign = _mm256_srai_epi16(coeff_vals, 15);
  __m256i coeff_vals_lo = _mm256_unpacklo_epi16(coeff_vals, coeff_sign);
  __m256i coeff_vals_hi = _mm256_unpackhi_epi16(coeff_vals, coeff_sign);
  _mm256_store_si256((__m256i *)coeff_ptr, coeff_vals_lo);
  _mm256_store_si256((__m256i *)(coeff_ptr + 8), coeff_vals_hi);
}

static AOM_FORCE_INLINE __m256i quantize_b_logscale0_16(
    const tran_low_t *coeff_ptr, tran_low_t *qcoeff_ptr,
    tran_low_t *dqcoeff_ptr, __m256i *v_quant, __m256i *v_dequant,
    __m256i *v_round, __m256i *v_zbin, __m256i *v_quant_shift) {
  const __m256i v_coeff = load_coefficients_avx2(coeff_ptr);
  const __m256i v_abs_coeff = _mm256_abs_epi16(v_coeff);
  const __m256i v_zbin_mask = _mm256_cmpgt_epi16(v_abs_coeff, *v_zbin);

  if (_mm256_movemask_epi8(v_zbin_mask) == 0) {
    _mm256_store_si256((__m256i *)qcoeff_ptr, _mm256_setzero_si256());
    _mm256_store_si256((__m256i *)dqcoeff_ptr, _mm256_setzero_si256());
    _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), _mm256_setzero_si256());
    _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), _mm256_setzero_si256());
    return _mm256_setzero_si256();
  }

  // tmp = v_zbin_mask ? (int64_t)abs_coeff + log_scaled_round : 0
  const __m256i v_tmp_rnd =
      _mm256_and_si256(_mm256_adds_epi16(v_abs_coeff, *v_round), v_zbin_mask);
  //  tmp32 = (int)(((((tmp * quant_ptr[rc != 0]) >> 16) + tmp) *
  //                 quant_shift_ptr[rc != 0]) >>
  //                (16 - log_scale + AOM_QM_BITS));
  const __m256i v_tmp32_a = _mm256_mulhi_epi16(v_tmp_rnd, *v_quant);
  const __m256i v_tmp32_b = _mm256_add_epi16(v_tmp32_a, v_tmp_rnd);
  const __m256i v_tmp32 = _mm256_mulhi_epi16(v_tmp32_b, *v_quant_shift);
  const __m256i v_nz_mask = _mm256_cmpgt_epi16(v_tmp32, _mm256_setzero_si256());
  const __m256i v_qcoeff = _mm256_sign_epi16(v_tmp32, v_coeff);
  const __m256i v_dqcoeff = _mm256_mullo_epi16(v_qcoeff, *v_dequant);
  store_coefficients_avx2(v_qcoeff, qcoeff_ptr);
  store_coefficients_avx2(v_dqcoeff, dqcoeff_ptr);
  return v_nz_mask;
}

static INLINE __m256i get_max_lane_eob(const int16_t *iscan, __m256i v_eobmax,
                                       __m256i v_mask) {
  const __m256i v_iscan = _mm256_loadu_si256((const __m256i *)iscan);
  const __m256i v_iscan_perm = _mm256_permute4x64_epi64(v_iscan, 0xD8);
  const __m256i v_iscan_plus1 = _mm256_sub_epi16(v_iscan_perm, v_mask);
  const __m256i v_nz_iscan = _mm256_and_si256(v_iscan_plus1, v_mask);
  return _mm256_max_epi16(v_eobmax, v_nz_iscan);
}

static INLINE int16_t accumulate_eob256(__m256i eob256) {
  const __m128i eob_lo = _mm256_castsi256_si128(eob256);
  const __m128i eob_hi = _mm256_extractf128_si256(eob256, 1);
  __m128i eob = _mm_max_epi16(eob_lo, eob_hi);
  __m128i eob_shuffled = _mm_shuffle_epi32(eob, 0xe);
  eob = _mm_max_epi16(eob, eob_shuffled);
  eob_shuffled = _mm_shufflelo_epi16(eob, 0xe);
  eob = _mm_max_epi16(eob, eob_shuffled);
  eob_shuffled = _mm_shufflelo_epi16(eob, 0x1);
  eob = _mm_max_epi16(eob, eob_shuffled);
  return _mm_extract_epi16(eob, 1);
}

void aom_quantize_b_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                         const int16_t *zbin_ptr, const int16_t *round_ptr,
                         const int16_t *quant_ptr,
                         const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
                         tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr,
                         uint16_t *eob_ptr, const int16_t *scan,
                         const int16_t *iscan) {
  (void)scan;
  __m256i v_zbin, v_round, v_quant, v_dequant, v_quant_shift;
  __m256i v_eobmax = _mm256_setzero_si256();

  load_b_values_avx2(zbin_ptr, &v_zbin, round_ptr, &v_round, quant_ptr,
                     &v_quant, dequant_ptr, &v_dequant, quant_shift_ptr,
                     &v_quant_shift, 0);

  // Do DC and first 15 AC.
  __m256i v_nz_mask =
      quantize_b_logscale0_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, &v_quant,
                              &v_dequant, &v_round, &v_zbin, &v_quant_shift);

  v_eobmax = get_max_lane_eob(iscan, v_eobmax, v_nz_mask);

  v_round = _mm256_unpackhi_epi64(v_round, v_round);
  v_quant = _mm256_unpackhi_epi64(v_quant, v_quant);
  v_dequant = _mm256_unpackhi_epi64(v_dequant, v_dequant);
  v_quant_shift = _mm256_unpackhi_epi64(v_quant_shift, v_quant_shift);
  v_zbin = _mm256_unpackhi_epi64(v_zbin, v_zbin);

  for (intptr_t count = n_coeffs - 16; count > 0; count -= 16) {
    coeff_ptr += 16;
    qcoeff_ptr += 16;
    dqcoeff_ptr += 16;
    iscan += 16;
    v_nz_mask =
        quantize_b_logscale0_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr, &v_quant,
                                &v_dequant, &v_round, &v_zbin, &v_quant_shift);

    v_eobmax = get_max_lane_eob(iscan, v_eobmax, v_nz_mask);
  }

  *eob_ptr = accumulate_eob256(v_eobmax);
}

static AOM_FORCE_INLINE __m256i quantize_b_logscale_16(
    const tran_low_t *coeff_ptr, tran_low_t *qcoeff_ptr,
    tran_low_t *dqcoeff_ptr, __m256i *v_quant, __m256i *v_dequant,
    __m256i *v_round, __m256i *v_zbin, __m256i *v_quant_shift, int log_scale) {
  const __m256i v_coeff = load_coefficients_avx2(coeff_ptr);
  const __m256i v_abs_coeff = _mm256_abs_epi16(v_coeff);
  const __m256i v_zbin_mask = _mm256_cmpgt_epi16(v_abs_coeff, *v_zbin);

  if (_mm256_movemask_epi8(v_zbin_mask) == 0) {
    _mm256_store_si256((__m256i *)qcoeff_ptr, _mm256_setzero_si256());
    _mm256_store_si256((__m256i *)dqcoeff_ptr, _mm256_setzero_si256());
    _mm256_store_si256((__m256i *)(qcoeff_ptr + 8), _mm256_setzero_si256());
    _mm256_store_si256((__m256i *)(dqcoeff_ptr + 8), _mm256_setzero_si256());
    return _mm256_setzero_si256();
  }

  // tmp = v_zbin_mask ? (int64_t)abs_coeff + log_scaled_round : 0
  const __m256i v_tmp_rnd =
      _mm256_and_si256(_mm256_adds_epi16(v_abs_coeff, *v_round), v_zbin_mask);
  //  tmp32 = (int)(((((tmp * quant_ptr[rc != 0]) >> 16) + tmp) *
  //                 quant_shift_ptr[rc != 0]) >>
  //                (16 - log_scale + AOM_QM_BITS));
  const __m256i v_tmp32_a = _mm256_mulhi_epi16(v_tmp_rnd, *v_quant);
  const __m256i v_tmp32_b = _mm256_add_epi16(v_tmp32_a, v_tmp_rnd);
  const __m256i v_tmp32_hi = _mm256_slli_epi16(
      _mm256_mulhi_epi16(v_tmp32_b, *v_quant_shift), log_scale);
  const __m256i v_tmp32_lo = _mm256_srli_epi16(
      _mm256_mullo_epi16(v_tmp32_b, *v_quant_shift), 16 - log_scale);
  const __m256i v_tmp32 = _mm256_or_si256(v_tmp32_hi, v_tmp32_lo);
  const __m256i v_dqcoeff_hi = _mm256_slli_epi16(
      _mm256_mulhi_epi16(v_tmp32, *v_dequant), 16 - log_scale);
  const __m256i v_dqcoeff_lo =
      _mm256_srli_epi16(_mm256_mullo_epi16(v_tmp32, *v_dequant), log_scale);
  const __m256i v_dqcoeff =
      _mm256_sign_epi16(_mm256_or_si256(v_dqcoeff_hi, v_dqcoeff_lo), v_coeff);
  const __m256i v_qcoeff = _mm256_sign_epi16(v_tmp32, v_coeff);
  const __m256i v_nz_mask = _mm256_cmpgt_epi16(v_tmp32, _mm256_setzero_si256());
  store_coefficients_avx2(v_qcoeff, qcoeff_ptr);
  store_coefficients_avx2(v_dqcoeff, dqcoeff_ptr);
  return v_nz_mask;
}

static AOM_FORCE_INLINE void quantize_b_no_qmatrix_avx2(
    const tran_low_t *coeff_ptr, intptr_t n_coeffs, const int16_t *zbin_ptr,
    const int16_t *round_ptr, const int16_t *quant_ptr,
    const int16_t *quant_shift_ptr, tran_low_t *qcoeff_ptr,
    tran_low_t *dqcoeff_ptr, const int16_t *dequant_ptr, uint16_t *eob_ptr,
    const int16_t *iscan, int log_scale) {
  __m256i v_zbin, v_round, v_quant, v_dequant, v_quant_shift;
  __m256i v_eobmax = _mm256_setzero_si256();

  load_b_values_avx2(zbin_ptr, &v_zbin, round_ptr, &v_round, quant_ptr,
                     &v_quant, dequant_ptr, &v_dequant, quant_shift_ptr,
                     &v_quant_shift, log_scale);

  // Do DC and first 15 AC.
  __m256i v_nz_mask = quantize_b_logscale_16(
      coeff_ptr, qcoeff_ptr, dqcoeff_ptr, &v_quant, &v_dequant, &v_round,
      &v_zbin, &v_quant_shift, log_scale);

  v_eobmax = get_max_lane_eob(iscan, v_eobmax, v_nz_mask);

  v_round = _mm256_unpackhi_epi64(v_round, v_round);
  v_quant = _mm256_unpackhi_epi64(v_quant, v_quant);
  v_dequant = _mm256_unpackhi_epi64(v_dequant, v_dequant);
  v_quant_shift = _mm256_unpackhi_epi64(v_quant_shift, v_quant_shift);
  v_zbin = _mm256_unpackhi_epi64(v_zbin, v_zbin);

  for (intptr_t count = n_coeffs - 16; count > 0; count -= 16) {
    coeff_ptr += 16;
    qcoeff_ptr += 16;
    dqcoeff_ptr += 16;
    iscan += 16;
    v_nz_mask = quantize_b_logscale_16(coeff_ptr, qcoeff_ptr, dqcoeff_ptr,
                                       &v_quant, &v_dequant, &v_round, &v_zbin,
                                       &v_quant_shift, log_scale);

    v_eobmax = get_max_lane_eob(iscan, v_eobmax, v_nz_mask);
  }

  *eob_ptr = accumulate_eob256(v_eobmax);
}

void aom_quantize_b_32x32_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                               const int16_t *zbin_ptr,
                               const int16_t *round_ptr,
                               const int16_t *quant_ptr,
                               const int16_t *quant_shift_ptr,
                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                               const int16_t *dequant_ptr, uint16_t *eob_ptr,
                               const int16_t *scan, const int16_t *iscan) {
  (void)scan;
  quantize_b_no_qmatrix_avx2(coeff_ptr, n_coeffs, zbin_ptr, round_ptr,
                             quant_ptr, quant_shift_ptr, qcoeff_ptr,
                             dqcoeff_ptr, dequant_ptr, eob_ptr, iscan, 1);
}

void aom_quantize_b_64x64_avx2(const tran_low_t *coeff_ptr, intptr_t n_coeffs,
                               const int16_t *zbin_ptr,
                               const int16_t *round_ptr,
                               const int16_t *quant_ptr,
                               const int16_t *quant_shift_ptr,
                               tran_low_t *qcoeff_ptr, tran_low_t *dqcoeff_ptr,
                               const int16_t *dequant_ptr, uint16_t *eob_ptr,
                               const int16_t *scan, const int16_t *iscan) {
  (void)scan;
  quantize_b_no_qmatrix_avx2(coeff_ptr, n_coeffs, zbin_ptr, round_ptr,
                             quant_ptr, quant_shift_ptr, qcoeff_ptr,
                             dqcoeff_ptr, dequant_ptr, eob_ptr, iscan, 2);
}