1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
|
/*
* Copyright (c) 2018, Alliance for Open Media. All rights reserved
*
* This source code is subject to the terms of the BSD 2 Clause License and
* the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
* was not distributed with this source code in the LICENSE file, you can
* obtain it at www.aomedia.org/license/software. If the Alliance for Open
* Media Patent License 1.0 was not distributed with this source code in the
* PATENTS file, you can obtain it at www.aomedia.org/license/patent.
*/
#include <assert.h>
#include <emmintrin.h> // SSE2
#include <smmintrin.h> /* SSE4.1 */
#include <immintrin.h> /* AVX2 */
#include "aom/aom_integer.h"
#include "aom_dsp/x86/mem_sse2.h"
#include "av1/common/onyxc_int.h"
#include "av1/common/txb_common.h"
#include "aom_dsp/x86/synonyms.h"
#include "aom_dsp/x86/synonyms_avx2.h"
void av1_txb_init_levels_avx2(const tran_low_t *const coeff, const int width,
const int height, uint8_t *const levels) {
const int stride = width + TX_PAD_HOR;
const __m256i y_zeros = _mm256_setzero_si256();
const int32_t pre_len = sizeof(*levels) * TX_PAD_TOP * stride;
uint8_t *pre_buf = levels - TX_PAD_TOP * stride;
uint8_t *pre_buf_end = pre_buf + pre_len;
do {
yy_storeu_256(pre_buf, y_zeros);
pre_buf += 32;
} while (pre_buf < pre_buf_end);
const int32_t bottom_len = sizeof(*levels) * (TX_PAD_BOTTOM * stride);
uint8_t *bottom_buf_end = levels + (height + TX_PAD_BOTTOM) * stride;
uint8_t *bottom_buf = bottom_buf_end - ((bottom_len + 31) & (~31));
do {
yy_storeu_256(bottom_buf, y_zeros);
bottom_buf += 32;
} while (bottom_buf < bottom_buf_end);
int i = 0;
uint8_t *ls = levels;
const tran_low_t *cf = coeff;
if (width == 4) {
do {
const __m256i c0 = yy_loadu_256(cf);
const __m256i c1 = yy_loadu_256(cf + 8);
const __m256i abs01 = _mm256_abs_epi16(_mm256_packs_epi32(c0, c1));
const __m256i abs01_8 = _mm256_packs_epi16(abs01, y_zeros);
const __m256i res_ = _mm256_shuffle_epi32(abs01_8, 0xd8);
const __m256i res = _mm256_permute4x64_epi64(res_, 0xd8);
yy_storeu_256(ls, res);
ls += 32;
cf += 16;
i += 4;
} while (i < height);
} else if (width == 8) {
do {
const __m256i coeffA = yy_loadu_256(cf);
const __m256i coeffB = yy_loadu_256(cf + 8);
const __m256i coeffC = yy_loadu_256(cf + 16);
const __m256i coeffD = yy_loadu_256(cf + 24);
const __m256i coeffAB = _mm256_packs_epi32(coeffA, coeffB);
const __m256i coeffCD = _mm256_packs_epi32(coeffC, coeffD);
const __m256i absAB = _mm256_abs_epi16(coeffAB);
const __m256i absCD = _mm256_abs_epi16(coeffCD);
const __m256i absABCD = _mm256_packs_epi16(absAB, absCD);
const __m256i res_ = _mm256_permute4x64_epi64(absABCD, 0xd8);
const __m256i res = _mm256_shuffle_epi32(res_, 0xd8);
const __m128i res0 = _mm256_castsi256_si128(res);
const __m128i res1 = _mm256_extracti128_si256(res, 1);
xx_storel_64(ls, res0);
*(int32_t *)(ls + width) = 0;
xx_storel_64(ls + stride, _mm_srli_si128(res0, 8));
*(int32_t *)(ls + width + stride) = 0;
xx_storel_64(ls + stride * 2, res1);
*(int32_t *)(ls + width + stride * 2) = 0;
xx_storel_64(ls + stride * 3, _mm_srli_si128(res1, 8));
*(int32_t *)(ls + width + stride * 3) = 0;
cf += 32;
ls += stride << 2;
i += 4;
} while (i < height);
} else if (width == 16) {
do {
const __m256i coeffA = yy_loadu_256(cf);
const __m256i coeffB = yy_loadu_256(cf + 8);
const __m256i coeffC = yy_loadu_256(cf + 16);
const __m256i coeffD = yy_loadu_256(cf + 24);
const __m256i coeffAB = _mm256_packs_epi32(coeffA, coeffB);
const __m256i coeffCD = _mm256_packs_epi32(coeffC, coeffD);
const __m256i absAB = _mm256_abs_epi16(coeffAB);
const __m256i absCD = _mm256_abs_epi16(coeffCD);
const __m256i absABCD = _mm256_packs_epi16(absAB, absCD);
const __m256i res_ = _mm256_permute4x64_epi64(absABCD, 0xd8);
const __m256i res = _mm256_shuffle_epi32(res_, 0xd8);
xx_storeu_128(ls, _mm256_castsi256_si128(res));
xx_storeu_128(ls + stride, _mm256_extracti128_si256(res, 1));
cf += 32;
*(int32_t *)(ls + width) = 0;
*(int32_t *)(ls + stride + width) = 0;
ls += stride << 1;
i += 2;
} while (i < height);
} else {
do {
const __m256i coeffA = yy_loadu_256(cf);
const __m256i coeffB = yy_loadu_256(cf + 8);
const __m256i coeffC = yy_loadu_256(cf + 16);
const __m256i coeffD = yy_loadu_256(cf + 24);
const __m256i coeffAB = _mm256_packs_epi32(coeffA, coeffB);
const __m256i coeffCD = _mm256_packs_epi32(coeffC, coeffD);
const __m256i absAB = _mm256_abs_epi16(coeffAB);
const __m256i absCD = _mm256_abs_epi16(coeffCD);
const __m256i absABCD = _mm256_packs_epi16(absAB, absCD);
const __m256i res_ = _mm256_permute4x64_epi64(absABCD, 0xd8);
const __m256i res = _mm256_shuffle_epi32(res_, 0xd8);
yy_storeu_256(ls, res);
cf += 32;
*(int32_t *)(ls + width) = 0;
ls += stride;
i += 1;
} while (i < height);
}
}
|