Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/libwebp/src/dsp/enc_sse41.c
9913 views
1
// Copyright 2015 Google Inc. All Rights Reserved.
2
//
3
// Use of this source code is governed by a BSD-style license
4
// that can be found in the COPYING file in the root of the source
5
// tree. An additional intellectual property rights grant can be found
6
// in the file PATENTS. All contributing project authors may
7
// be found in the AUTHORS file in the root of the source tree.
8
// -----------------------------------------------------------------------------
9
//
10
// SSE4 version of some encoding functions.
11
//
12
// Author: Skal ([email protected])
13
14
#include "src/dsp/dsp.h"
15
16
#if defined(WEBP_USE_SSE41)
17
#include <smmintrin.h>
18
#include <stdlib.h> // for abs()
19
20
#include "src/dsp/common_sse2.h"
21
#include "src/enc/vp8i_enc.h"
22
23
//------------------------------------------------------------------------------
24
// Compute susceptibility based on DCT-coeff histograms.
25
26
static void CollectHistogram_SSE41(const uint8_t* WEBP_RESTRICT ref,
27
const uint8_t* WEBP_RESTRICT pred,
28
int start_block, int end_block,
29
VP8Histogram* WEBP_RESTRICT const histo) {
30
const __m128i max_coeff_thresh = _mm_set1_epi16(MAX_COEFF_THRESH);
31
int j;
32
int distribution[MAX_COEFF_THRESH + 1] = { 0 };
33
for (j = start_block; j < end_block; ++j) {
34
int16_t out[16];
35
int k;
36
37
VP8FTransform(ref + VP8DspScan[j], pred + VP8DspScan[j], out);
38
39
// Convert coefficients to bin (within out[]).
40
{
41
// Load.
42
const __m128i out0 = _mm_loadu_si128((__m128i*)&out[0]);
43
const __m128i out1 = _mm_loadu_si128((__m128i*)&out[8]);
44
// v = abs(out) >> 3
45
const __m128i abs0 = _mm_abs_epi16(out0);
46
const __m128i abs1 = _mm_abs_epi16(out1);
47
const __m128i v0 = _mm_srai_epi16(abs0, 3);
48
const __m128i v1 = _mm_srai_epi16(abs1, 3);
49
// bin = min(v, MAX_COEFF_THRESH)
50
const __m128i bin0 = _mm_min_epi16(v0, max_coeff_thresh);
51
const __m128i bin1 = _mm_min_epi16(v1, max_coeff_thresh);
52
// Store.
53
_mm_storeu_si128((__m128i*)&out[0], bin0);
54
_mm_storeu_si128((__m128i*)&out[8], bin1);
55
}
56
57
// Convert coefficients to bin.
58
for (k = 0; k < 16; ++k) {
59
++distribution[out[k]];
60
}
61
}
62
VP8SetHistogramData(distribution, histo);
63
}
64
65
//------------------------------------------------------------------------------
66
// Texture distortion
67
//
68
// We try to match the spectral content (weighted) between source and
69
// reconstructed samples.
70
71
// Hadamard transform
72
// Returns the weighted sum of the absolute value of transformed coefficients.
73
// w[] contains a row-major 4 by 4 symmetric matrix.
74
static int TTransform_SSE41(const uint8_t* inA, const uint8_t* inB,
75
const uint16_t* const w) {
76
int32_t sum[4];
77
__m128i tmp_0, tmp_1, tmp_2, tmp_3;
78
79
// Load and combine inputs.
80
{
81
const __m128i inA_0 = _mm_loadu_si128((const __m128i*)&inA[BPS * 0]);
82
const __m128i inA_1 = _mm_loadu_si128((const __m128i*)&inA[BPS * 1]);
83
const __m128i inA_2 = _mm_loadu_si128((const __m128i*)&inA[BPS * 2]);
84
// In SSE4.1, with gcc 4.8 at least (maybe other versions),
85
// _mm_loadu_si128 is faster than _mm_loadl_epi64. But for the last lump
86
// of inA and inB, _mm_loadl_epi64 is still used not to have an out of
87
// bound read.
88
const __m128i inA_3 = _mm_loadl_epi64((const __m128i*)&inA[BPS * 3]);
89
const __m128i inB_0 = _mm_loadu_si128((const __m128i*)&inB[BPS * 0]);
90
const __m128i inB_1 = _mm_loadu_si128((const __m128i*)&inB[BPS * 1]);
91
const __m128i inB_2 = _mm_loadu_si128((const __m128i*)&inB[BPS * 2]);
92
const __m128i inB_3 = _mm_loadl_epi64((const __m128i*)&inB[BPS * 3]);
93
94
// Combine inA and inB (we'll do two transforms in parallel).
95
const __m128i inAB_0 = _mm_unpacklo_epi32(inA_0, inB_0);
96
const __m128i inAB_1 = _mm_unpacklo_epi32(inA_1, inB_1);
97
const __m128i inAB_2 = _mm_unpacklo_epi32(inA_2, inB_2);
98
const __m128i inAB_3 = _mm_unpacklo_epi32(inA_3, inB_3);
99
tmp_0 = _mm_cvtepu8_epi16(inAB_0);
100
tmp_1 = _mm_cvtepu8_epi16(inAB_1);
101
tmp_2 = _mm_cvtepu8_epi16(inAB_2);
102
tmp_3 = _mm_cvtepu8_epi16(inAB_3);
103
// a00 a01 a02 a03 b00 b01 b02 b03
104
// a10 a11 a12 a13 b10 b11 b12 b13
105
// a20 a21 a22 a23 b20 b21 b22 b23
106
// a30 a31 a32 a33 b30 b31 b32 b33
107
}
108
109
// Vertical pass first to avoid a transpose (vertical and horizontal passes
110
// are commutative because w/kWeightY is symmetric) and subsequent transpose.
111
{
112
// Calculate a and b (two 4x4 at once).
113
const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
114
const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
115
const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
116
const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
117
const __m128i b0 = _mm_add_epi16(a0, a1);
118
const __m128i b1 = _mm_add_epi16(a3, a2);
119
const __m128i b2 = _mm_sub_epi16(a3, a2);
120
const __m128i b3 = _mm_sub_epi16(a0, a1);
121
// a00 a01 a02 a03 b00 b01 b02 b03
122
// a10 a11 a12 a13 b10 b11 b12 b13
123
// a20 a21 a22 a23 b20 b21 b22 b23
124
// a30 a31 a32 a33 b30 b31 b32 b33
125
126
// Transpose the two 4x4.
127
VP8Transpose_2_4x4_16b(&b0, &b1, &b2, &b3, &tmp_0, &tmp_1, &tmp_2, &tmp_3);
128
}
129
130
// Horizontal pass and difference of weighted sums.
131
{
132
// Load all inputs.
133
const __m128i w_0 = _mm_loadu_si128((const __m128i*)&w[0]);
134
const __m128i w_8 = _mm_loadu_si128((const __m128i*)&w[8]);
135
136
// Calculate a and b (two 4x4 at once).
137
const __m128i a0 = _mm_add_epi16(tmp_0, tmp_2);
138
const __m128i a1 = _mm_add_epi16(tmp_1, tmp_3);
139
const __m128i a2 = _mm_sub_epi16(tmp_1, tmp_3);
140
const __m128i a3 = _mm_sub_epi16(tmp_0, tmp_2);
141
const __m128i b0 = _mm_add_epi16(a0, a1);
142
const __m128i b1 = _mm_add_epi16(a3, a2);
143
const __m128i b2 = _mm_sub_epi16(a3, a2);
144
const __m128i b3 = _mm_sub_epi16(a0, a1);
145
146
// Separate the transforms of inA and inB.
147
__m128i A_b0 = _mm_unpacklo_epi64(b0, b1);
148
__m128i A_b2 = _mm_unpacklo_epi64(b2, b3);
149
__m128i B_b0 = _mm_unpackhi_epi64(b0, b1);
150
__m128i B_b2 = _mm_unpackhi_epi64(b2, b3);
151
152
A_b0 = _mm_abs_epi16(A_b0);
153
A_b2 = _mm_abs_epi16(A_b2);
154
B_b0 = _mm_abs_epi16(B_b0);
155
B_b2 = _mm_abs_epi16(B_b2);
156
157
// weighted sums
158
A_b0 = _mm_madd_epi16(A_b0, w_0);
159
A_b2 = _mm_madd_epi16(A_b2, w_8);
160
B_b0 = _mm_madd_epi16(B_b0, w_0);
161
B_b2 = _mm_madd_epi16(B_b2, w_8);
162
A_b0 = _mm_add_epi32(A_b0, A_b2);
163
B_b0 = _mm_add_epi32(B_b0, B_b2);
164
165
// difference of weighted sums
166
A_b2 = _mm_sub_epi32(A_b0, B_b0);
167
_mm_storeu_si128((__m128i*)&sum[0], A_b2);
168
}
169
return sum[0] + sum[1] + sum[2] + sum[3];
170
}
171
172
static int Disto4x4_SSE41(const uint8_t* WEBP_RESTRICT const a,
173
const uint8_t* WEBP_RESTRICT const b,
174
const uint16_t* WEBP_RESTRICT const w) {
175
const int diff_sum = TTransform_SSE41(a, b, w);
176
return abs(diff_sum) >> 5;
177
}
178
179
static int Disto16x16_SSE41(const uint8_t* WEBP_RESTRICT const a,
180
const uint8_t* WEBP_RESTRICT const b,
181
const uint16_t* WEBP_RESTRICT const w) {
182
int D = 0;
183
int x, y;
184
for (y = 0; y < 16 * BPS; y += 4 * BPS) {
185
for (x = 0; x < 16; x += 4) {
186
D += Disto4x4_SSE41(a + x + y, b + x + y, w);
187
}
188
}
189
return D;
190
}
191
192
//------------------------------------------------------------------------------
193
// Quantization
194
//
195
196
// Generates a pshufb constant for shuffling 16b words.
197
#define PSHUFB_CST(A,B,C,D,E,F,G,H) \
198
_mm_set_epi8(2 * (H) + 1, 2 * (H) + 0, 2 * (G) + 1, 2 * (G) + 0, \
199
2 * (F) + 1, 2 * (F) + 0, 2 * (E) + 1, 2 * (E) + 0, \
200
2 * (D) + 1, 2 * (D) + 0, 2 * (C) + 1, 2 * (C) + 0, \
201
2 * (B) + 1, 2 * (B) + 0, 2 * (A) + 1, 2 * (A) + 0)
202
203
static WEBP_INLINE int DoQuantizeBlock_SSE41(int16_t in[16], int16_t out[16],
204
const uint16_t* const sharpen,
205
const VP8Matrix* const mtx) {
206
const __m128i max_coeff_2047 = _mm_set1_epi16(MAX_LEVEL);
207
const __m128i zero = _mm_setzero_si128();
208
__m128i out0, out8;
209
__m128i packed_out;
210
211
// Load all inputs.
212
__m128i in0 = _mm_loadu_si128((__m128i*)&in[0]);
213
__m128i in8 = _mm_loadu_si128((__m128i*)&in[8]);
214
const __m128i iq0 = _mm_loadu_si128((const __m128i*)&mtx->iq_[0]);
215
const __m128i iq8 = _mm_loadu_si128((const __m128i*)&mtx->iq_[8]);
216
const __m128i q0 = _mm_loadu_si128((const __m128i*)&mtx->q_[0]);
217
const __m128i q8 = _mm_loadu_si128((const __m128i*)&mtx->q_[8]);
218
219
// coeff = abs(in)
220
__m128i coeff0 = _mm_abs_epi16(in0);
221
__m128i coeff8 = _mm_abs_epi16(in8);
222
223
// coeff = abs(in) + sharpen
224
if (sharpen != NULL) {
225
const __m128i sharpen0 = _mm_loadu_si128((const __m128i*)&sharpen[0]);
226
const __m128i sharpen8 = _mm_loadu_si128((const __m128i*)&sharpen[8]);
227
coeff0 = _mm_add_epi16(coeff0, sharpen0);
228
coeff8 = _mm_add_epi16(coeff8, sharpen8);
229
}
230
231
// out = (coeff * iQ + B) >> QFIX
232
{
233
// doing calculations with 32b precision (QFIX=17)
234
// out = (coeff * iQ)
235
const __m128i coeff_iQ0H = _mm_mulhi_epu16(coeff0, iq0);
236
const __m128i coeff_iQ0L = _mm_mullo_epi16(coeff0, iq0);
237
const __m128i coeff_iQ8H = _mm_mulhi_epu16(coeff8, iq8);
238
const __m128i coeff_iQ8L = _mm_mullo_epi16(coeff8, iq8);
239
__m128i out_00 = _mm_unpacklo_epi16(coeff_iQ0L, coeff_iQ0H);
240
__m128i out_04 = _mm_unpackhi_epi16(coeff_iQ0L, coeff_iQ0H);
241
__m128i out_08 = _mm_unpacklo_epi16(coeff_iQ8L, coeff_iQ8H);
242
__m128i out_12 = _mm_unpackhi_epi16(coeff_iQ8L, coeff_iQ8H);
243
// out = (coeff * iQ + B)
244
const __m128i bias_00 = _mm_loadu_si128((const __m128i*)&mtx->bias_[0]);
245
const __m128i bias_04 = _mm_loadu_si128((const __m128i*)&mtx->bias_[4]);
246
const __m128i bias_08 = _mm_loadu_si128((const __m128i*)&mtx->bias_[8]);
247
const __m128i bias_12 = _mm_loadu_si128((const __m128i*)&mtx->bias_[12]);
248
out_00 = _mm_add_epi32(out_00, bias_00);
249
out_04 = _mm_add_epi32(out_04, bias_04);
250
out_08 = _mm_add_epi32(out_08, bias_08);
251
out_12 = _mm_add_epi32(out_12, bias_12);
252
// out = QUANTDIV(coeff, iQ, B, QFIX)
253
out_00 = _mm_srai_epi32(out_00, QFIX);
254
out_04 = _mm_srai_epi32(out_04, QFIX);
255
out_08 = _mm_srai_epi32(out_08, QFIX);
256
out_12 = _mm_srai_epi32(out_12, QFIX);
257
258
// pack result as 16b
259
out0 = _mm_packs_epi32(out_00, out_04);
260
out8 = _mm_packs_epi32(out_08, out_12);
261
262
// if (coeff > 2047) coeff = 2047
263
out0 = _mm_min_epi16(out0, max_coeff_2047);
264
out8 = _mm_min_epi16(out8, max_coeff_2047);
265
}
266
267
// put sign back
268
out0 = _mm_sign_epi16(out0, in0);
269
out8 = _mm_sign_epi16(out8, in8);
270
271
// in = out * Q
272
in0 = _mm_mullo_epi16(out0, q0);
273
in8 = _mm_mullo_epi16(out8, q8);
274
275
_mm_storeu_si128((__m128i*)&in[0], in0);
276
_mm_storeu_si128((__m128i*)&in[8], in8);
277
278
// zigzag the output before storing it. The re-ordering is:
279
// 0 1 2 3 4 5 6 7 | 8 9 10 11 12 13 14 15
280
// -> 0 1 4[8]5 2 3 6 | 9 12 13 10 [7]11 14 15
281
// There's only two misplaced entries ([8] and [7]) that are crossing the
282
// reg's boundaries.
283
// We use pshufb instead of pshuflo/pshufhi.
284
{
285
const __m128i kCst_lo = PSHUFB_CST(0, 1, 4, -1, 5, 2, 3, 6);
286
const __m128i kCst_7 = PSHUFB_CST(-1, -1, -1, -1, 7, -1, -1, -1);
287
const __m128i tmp_lo = _mm_shuffle_epi8(out0, kCst_lo);
288
const __m128i tmp_7 = _mm_shuffle_epi8(out0, kCst_7); // extract #7
289
const __m128i kCst_hi = PSHUFB_CST(1, 4, 5, 2, -1, 3, 6, 7);
290
const __m128i kCst_8 = PSHUFB_CST(-1, -1, -1, 0, -1, -1, -1, -1);
291
const __m128i tmp_hi = _mm_shuffle_epi8(out8, kCst_hi);
292
const __m128i tmp_8 = _mm_shuffle_epi8(out8, kCst_8); // extract #8
293
const __m128i out_z0 = _mm_or_si128(tmp_lo, tmp_8);
294
const __m128i out_z8 = _mm_or_si128(tmp_hi, tmp_7);
295
_mm_storeu_si128((__m128i*)&out[0], out_z0);
296
_mm_storeu_si128((__m128i*)&out[8], out_z8);
297
packed_out = _mm_packs_epi16(out_z0, out_z8);
298
}
299
300
// detect if all 'out' values are zeroes or not
301
return (_mm_movemask_epi8(_mm_cmpeq_epi8(packed_out, zero)) != 0xffff);
302
}
303
304
#undef PSHUFB_CST
305
306
static int QuantizeBlock_SSE41(int16_t in[16], int16_t out[16],
307
const VP8Matrix* WEBP_RESTRICT const mtx) {
308
return DoQuantizeBlock_SSE41(in, out, &mtx->sharpen_[0], mtx);
309
}
310
311
static int QuantizeBlockWHT_SSE41(int16_t in[16], int16_t out[16],
312
const VP8Matrix* WEBP_RESTRICT const mtx) {
313
return DoQuantizeBlock_SSE41(in, out, NULL, mtx);
314
}
315
316
static int Quantize2Blocks_SSE41(int16_t in[32], int16_t out[32],
317
const VP8Matrix* WEBP_RESTRICT const mtx) {
318
int nz;
319
const uint16_t* const sharpen = &mtx->sharpen_[0];
320
nz = DoQuantizeBlock_SSE41(in + 0 * 16, out + 0 * 16, sharpen, mtx) << 0;
321
nz |= DoQuantizeBlock_SSE41(in + 1 * 16, out + 1 * 16, sharpen, mtx) << 1;
322
return nz;
323
}
324
325
//------------------------------------------------------------------------------
326
// Entry point
327
328
extern void VP8EncDspInitSSE41(void);
329
WEBP_TSAN_IGNORE_FUNCTION void VP8EncDspInitSSE41(void) {
330
VP8CollectHistogram = CollectHistogram_SSE41;
331
VP8EncQuantizeBlock = QuantizeBlock_SSE41;
332
VP8EncQuantize2Blocks = Quantize2Blocks_SSE41;
333
VP8EncQuantizeBlockWHT = QuantizeBlockWHT_SSE41;
334
VP8TDisto4x4 = Disto4x4_SSE41;
335
VP8TDisto16x16 = Disto16x16_SSE41;
336
}
337
338
#else // !WEBP_USE_SSE41
339
340
WEBP_DSP_INIT_STUB(VP8EncDspInitSSE41)
341
342
#endif // WEBP_USE_SSE41
343
344