Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/3rdparty/libwebp/src/dsp/dec_msa.c
16348 views
1
// Copyright 2016 Google Inc. All Rights Reserved.
2
//
3
// Use of this source code is governed by a BSD-style license
4
// that can be found in the COPYING file in the root of the source
5
// tree. An additional intellectual property rights grant can be found
6
// in the file PATENTS. All contributing project authors may
7
// be found in the AUTHORS file in the root of the source tree.
8
// -----------------------------------------------------------------------------
9
//
10
// MSA version of dsp functions
11
//
12
// Author(s): Prashant Patil ([email protected])
13
14
15
#include "src/dsp/dsp.h"
16
17
#if defined(WEBP_USE_MSA)
18
19
#include "src/dsp/msa_macro.h"
20
21
//------------------------------------------------------------------------------
22
// Transforms
23
24
#define IDCT_1D_W(in0, in1, in2, in3, out0, out1, out2, out3) { \
25
v4i32 a1_m, b1_m, c1_m, d1_m; \
26
v4i32 c_tmp1_m, c_tmp2_m, d_tmp1_m, d_tmp2_m; \
27
const v4i32 cospi8sqrt2minus1 = __msa_fill_w(20091); \
28
const v4i32 sinpi8sqrt2 = __msa_fill_w(35468); \
29
\
30
a1_m = in0 + in2; \
31
b1_m = in0 - in2; \
32
c_tmp1_m = (in1 * sinpi8sqrt2) >> 16; \
33
c_tmp2_m = in3 + ((in3 * cospi8sqrt2minus1) >> 16); \
34
c1_m = c_tmp1_m - c_tmp2_m; \
35
d_tmp1_m = in1 + ((in1 * cospi8sqrt2minus1) >> 16); \
36
d_tmp2_m = (in3 * sinpi8sqrt2) >> 16; \
37
d1_m = d_tmp1_m + d_tmp2_m; \
38
BUTTERFLY_4(a1_m, b1_m, c1_m, d1_m, out0, out1, out2, out3); \
39
}
40
#define MULT1(a) ((((a) * 20091) >> 16) + (a))
41
#define MULT2(a) (((a) * 35468) >> 16)
42
43
static void TransformOne(const int16_t* in, uint8_t* dst) {
44
v8i16 input0, input1;
45
v4i32 in0, in1, in2, in3, hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3;
46
v4i32 res0, res1, res2, res3;
47
const v16i8 zero = { 0 };
48
v16i8 dest0, dest1, dest2, dest3;
49
50
LD_SH2(in, 8, input0, input1);
51
UNPCK_SH_SW(input0, in0, in1);
52
UNPCK_SH_SW(input1, in2, in3);
53
IDCT_1D_W(in0, in1, in2, in3, hz0, hz1, hz2, hz3);
54
TRANSPOSE4x4_SW_SW(hz0, hz1, hz2, hz3, hz0, hz1, hz2, hz3);
55
IDCT_1D_W(hz0, hz1, hz2, hz3, vt0, vt1, vt2, vt3);
56
SRARI_W4_SW(vt0, vt1, vt2, vt3, 3);
57
TRANSPOSE4x4_SW_SW(vt0, vt1, vt2, vt3, vt0, vt1, vt2, vt3);
58
LD_SB4(dst, BPS, dest0, dest1, dest2, dest3);
59
ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3,
60
res0, res1, res2, res3);
61
ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3,
62
res0, res1, res2, res3);
63
ADD4(res0, vt0, res1, vt1, res2, vt2, res3, vt3, res0, res1, res2, res3);
64
CLIP_SW4_0_255(res0, res1, res2, res3);
65
PCKEV_B2_SW(res0, res1, res2, res3, vt0, vt1);
66
res0 = (v4i32)__msa_pckev_b((v16i8)vt0, (v16i8)vt1);
67
ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS);
68
}
69
70
static void TransformTwo(const int16_t* in, uint8_t* dst, int do_two) {
71
TransformOne(in, dst);
72
if (do_two) {
73
TransformOne(in + 16, dst + 4);
74
}
75
}
76
77
static void TransformWHT(const int16_t* in, int16_t* out) {
78
v8i16 input0, input1;
79
const v8i16 mask0 = { 0, 1, 2, 3, 8, 9, 10, 11 };
80
const v8i16 mask1 = { 4, 5, 6, 7, 12, 13, 14, 15 };
81
const v8i16 mask2 = { 0, 4, 8, 12, 1, 5, 9, 13 };
82
const v8i16 mask3 = { 3, 7, 11, 15, 2, 6, 10, 14 };
83
v8i16 tmp0, tmp1, tmp2, tmp3;
84
v8i16 out0, out1;
85
86
LD_SH2(in, 8, input0, input1);
87
input1 = SLDI_SH(input1, input1, 8);
88
tmp0 = input0 + input1;
89
tmp1 = input0 - input1;
90
VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3);
91
out0 = tmp2 + tmp3;
92
out1 = tmp2 - tmp3;
93
VSHF_H2_SH(out0, out1, out0, out1, mask2, mask3, input0, input1);
94
tmp0 = input0 + input1;
95
tmp1 = input0 - input1;
96
VSHF_H2_SH(tmp0, tmp1, tmp0, tmp1, mask0, mask1, tmp2, tmp3);
97
tmp0 = tmp2 + tmp3;
98
tmp1 = tmp2 - tmp3;
99
ADDVI_H2_SH(tmp0, 3, tmp1, 3, out0, out1);
100
SRAI_H2_SH(out0, out1, 3);
101
out[0] = __msa_copy_s_h(out0, 0);
102
out[16] = __msa_copy_s_h(out0, 4);
103
out[32] = __msa_copy_s_h(out1, 0);
104
out[48] = __msa_copy_s_h(out1, 4);
105
out[64] = __msa_copy_s_h(out0, 1);
106
out[80] = __msa_copy_s_h(out0, 5);
107
out[96] = __msa_copy_s_h(out1, 1);
108
out[112] = __msa_copy_s_h(out1, 5);
109
out[128] = __msa_copy_s_h(out0, 2);
110
out[144] = __msa_copy_s_h(out0, 6);
111
out[160] = __msa_copy_s_h(out1, 2);
112
out[176] = __msa_copy_s_h(out1, 6);
113
out[192] = __msa_copy_s_h(out0, 3);
114
out[208] = __msa_copy_s_h(out0, 7);
115
out[224] = __msa_copy_s_h(out1, 3);
116
out[240] = __msa_copy_s_h(out1, 7);
117
}
118
119
static void TransformDC(const int16_t* in, uint8_t* dst) {
120
const int DC = (in[0] + 4) >> 3;
121
const v8i16 tmp0 = __msa_fill_h(DC);
122
ADDBLK_ST4x4_UB(tmp0, tmp0, tmp0, tmp0, dst, BPS);
123
}
124
125
static void TransformAC3(const int16_t* in, uint8_t* dst) {
126
const int a = in[0] + 4;
127
const int c4 = MULT2(in[4]);
128
const int d4 = MULT1(in[4]);
129
const int in2 = MULT2(in[1]);
130
const int in3 = MULT1(in[1]);
131
v4i32 tmp0 = { 0 };
132
v4i32 out0 = __msa_fill_w(a + d4);
133
v4i32 out1 = __msa_fill_w(a + c4);
134
v4i32 out2 = __msa_fill_w(a - c4);
135
v4i32 out3 = __msa_fill_w(a - d4);
136
v4i32 res0, res1, res2, res3;
137
const v4i32 zero = { 0 };
138
v16u8 dest0, dest1, dest2, dest3;
139
140
INSERT_W4_SW(in3, in2, -in2, -in3, tmp0);
141
ADD4(out0, tmp0, out1, tmp0, out2, tmp0, out3, tmp0,
142
out0, out1, out2, out3);
143
SRAI_W4_SW(out0, out1, out2, out3, 3);
144
LD_UB4(dst, BPS, dest0, dest1, dest2, dest3);
145
ILVR_B4_SW(zero, dest0, zero, dest1, zero, dest2, zero, dest3,
146
res0, res1, res2, res3);
147
ILVR_H4_SW(zero, res0, zero, res1, zero, res2, zero, res3,
148
res0, res1, res2, res3);
149
ADD4(res0, out0, res1, out1, res2, out2, res3, out3, res0, res1, res2, res3);
150
CLIP_SW4_0_255(res0, res1, res2, res3);
151
PCKEV_B2_SW(res0, res1, res2, res3, out0, out1);
152
res0 = (v4i32)__msa_pckev_b((v16i8)out0, (v16i8)out1);
153
ST4x4_UB(res0, res0, 3, 2, 1, 0, dst, BPS);
154
}
155
156
//------------------------------------------------------------------------------
157
// Edge filtering functions
158
159
#define FLIP_SIGN2(in0, in1, out0, out1) { \
160
out0 = (v16i8)__msa_xori_b(in0, 0x80); \
161
out1 = (v16i8)__msa_xori_b(in1, 0x80); \
162
}
163
164
#define FLIP_SIGN4(in0, in1, in2, in3, out0, out1, out2, out3) { \
165
FLIP_SIGN2(in0, in1, out0, out1); \
166
FLIP_SIGN2(in2, in3, out2, out3); \
167
}
168
169
#define FILT_VAL(q0_m, p0_m, mask, filt) do { \
170
v16i8 q0_sub_p0; \
171
q0_sub_p0 = __msa_subs_s_b(q0_m, p0_m); \
172
filt = __msa_adds_s_b(filt, q0_sub_p0); \
173
filt = __msa_adds_s_b(filt, q0_sub_p0); \
174
filt = __msa_adds_s_b(filt, q0_sub_p0); \
175
filt = filt & mask; \
176
} while (0)
177
178
#define FILT2(q_m, p_m, q, p) do { \
179
u_r = SRAI_H(temp1, 7); \
180
u_r = __msa_sat_s_h(u_r, 7); \
181
u_l = SRAI_H(temp3, 7); \
182
u_l = __msa_sat_s_h(u_l, 7); \
183
u = __msa_pckev_b((v16i8)u_l, (v16i8)u_r); \
184
q_m = __msa_subs_s_b(q_m, u); \
185
p_m = __msa_adds_s_b(p_m, u); \
186
q = __msa_xori_b((v16u8)q_m, 0x80); \
187
p = __msa_xori_b((v16u8)p_m, 0x80); \
188
} while (0)
189
190
#define LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev) do { \
191
v16i8 p1_m, p0_m, q0_m, q1_m; \
192
v16i8 filt, t1, t2; \
193
const v16i8 cnst4b = __msa_ldi_b(4); \
194
const v16i8 cnst3b = __msa_ldi_b(3); \
195
\
196
FLIP_SIGN4(p1, p0, q0, q1, p1_m, p0_m, q0_m, q1_m); \
197
filt = __msa_subs_s_b(p1_m, q1_m); \
198
filt = filt & hev; \
199
FILT_VAL(q0_m, p0_m, mask, filt); \
200
t1 = __msa_adds_s_b(filt, cnst4b); \
201
t1 = SRAI_B(t1, 3); \
202
t2 = __msa_adds_s_b(filt, cnst3b); \
203
t2 = SRAI_B(t2, 3); \
204
q0_m = __msa_subs_s_b(q0_m, t1); \
205
q0 = __msa_xori_b((v16u8)q0_m, 0x80); \
206
p0_m = __msa_adds_s_b(p0_m, t2); \
207
p0 = __msa_xori_b((v16u8)p0_m, 0x80); \
208
filt = __msa_srari_b(t1, 1); \
209
hev = __msa_xori_b(hev, 0xff); \
210
filt = filt & hev; \
211
q1_m = __msa_subs_s_b(q1_m, filt); \
212
q1 = __msa_xori_b((v16u8)q1_m, 0x80); \
213
p1_m = __msa_adds_s_b(p1_m, filt); \
214
p1 = __msa_xori_b((v16u8)p1_m, 0x80); \
215
} while (0)
216
217
#define LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev) do { \
218
v16i8 p2_m, p1_m, p0_m, q2_m, q1_m, q0_m; \
219
v16i8 u, filt, t1, t2, filt_sign; \
220
v8i16 filt_r, filt_l, u_r, u_l; \
221
v8i16 temp0, temp1, temp2, temp3; \
222
const v16i8 cnst4b = __msa_ldi_b(4); \
223
const v16i8 cnst3b = __msa_ldi_b(3); \
224
const v8i16 cnst9h = __msa_ldi_h(9); \
225
const v8i16 cnst63h = __msa_ldi_h(63); \
226
\
227
FLIP_SIGN4(p1, p0, q0, q1, p1_m, p0_m, q0_m, q1_m); \
228
filt = __msa_subs_s_b(p1_m, q1_m); \
229
FILT_VAL(q0_m, p0_m, mask, filt); \
230
FLIP_SIGN2(p2, q2, p2_m, q2_m); \
231
t2 = filt & hev; \
232
/* filt_val &= ~hev */ \
233
hev = __msa_xori_b(hev, 0xff); \
234
filt = filt & hev; \
235
t1 = __msa_adds_s_b(t2, cnst4b); \
236
t1 = SRAI_B(t1, 3); \
237
t2 = __msa_adds_s_b(t2, cnst3b); \
238
t2 = SRAI_B(t2, 3); \
239
q0_m = __msa_subs_s_b(q0_m, t1); \
240
p0_m = __msa_adds_s_b(p0_m, t2); \
241
filt_sign = __msa_clti_s_b(filt, 0); \
242
ILVRL_B2_SH(filt_sign, filt, filt_r, filt_l); \
243
/* update q2/p2 */ \
244
temp0 = filt_r * cnst9h; \
245
temp1 = temp0 + cnst63h; \
246
temp2 = filt_l * cnst9h; \
247
temp3 = temp2 + cnst63h; \
248
FILT2(q2_m, p2_m, q2, p2); \
249
/* update q1/p1 */ \
250
temp1 = temp1 + temp0; \
251
temp3 = temp3 + temp2; \
252
FILT2(q1_m, p1_m, q1, p1); \
253
/* update q0/p0 */ \
254
temp1 = temp1 + temp0; \
255
temp3 = temp3 + temp2; \
256
FILT2(q0_m, p0_m, q0, p0); \
257
} while (0)
258
259
#define LPF_MASK_HEV(p3_in, p2_in, p1_in, p0_in, \
260
q0_in, q1_in, q2_in, q3_in, \
261
limit_in, b_limit_in, thresh_in, \
262
hev_out, mask_out) do { \
263
v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \
264
v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \
265
v16u8 flat_out; \
266
\
267
/* absolute subtraction of pixel values */ \
268
p3_asub_p2_m = __msa_asub_u_b(p3_in, p2_in); \
269
p2_asub_p1_m = __msa_asub_u_b(p2_in, p1_in); \
270
p1_asub_p0_m = __msa_asub_u_b(p1_in, p0_in); \
271
q1_asub_q0_m = __msa_asub_u_b(q1_in, q0_in); \
272
q2_asub_q1_m = __msa_asub_u_b(q2_in, q1_in); \
273
q3_asub_q2_m = __msa_asub_u_b(q3_in, q2_in); \
274
p0_asub_q0_m = __msa_asub_u_b(p0_in, q0_in); \
275
p1_asub_q1_m = __msa_asub_u_b(p1_in, q1_in); \
276
/* calculation of hev */ \
277
flat_out = __msa_max_u_b(p1_asub_p0_m, q1_asub_q0_m); \
278
hev_out = (thresh_in < flat_out); \
279
/* calculation of mask */ \
280
p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p0_asub_q0_m); \
281
p1_asub_q1_m = SRAI_B(p1_asub_q1_m, 1); \
282
p0_asub_q0_m = __msa_adds_u_b(p0_asub_q0_m, p1_asub_q1_m); \
283
mask_out = (b_limit_in < p0_asub_q0_m); \
284
mask_out = __msa_max_u_b(flat_out, mask_out); \
285
p3_asub_p2_m = __msa_max_u_b(p3_asub_p2_m, p2_asub_p1_m); \
286
mask_out = __msa_max_u_b(p3_asub_p2_m, mask_out); \
287
q2_asub_q1_m = __msa_max_u_b(q2_asub_q1_m, q3_asub_q2_m); \
288
mask_out = __msa_max_u_b(q2_asub_q1_m, mask_out); \
289
mask_out = (limit_in < mask_out); \
290
mask_out = __msa_xori_b(mask_out, 0xff); \
291
} while (0)
292
293
#define ST6x1_UB(in0, in0_idx, in1, in1_idx, pdst, stride) do { \
294
const uint16_t tmp0_h = __msa_copy_s_h((v8i16)in1, in1_idx); \
295
const uint32_t tmp0_w = __msa_copy_s_w((v4i32)in0, in0_idx); \
296
SW(tmp0_w, pdst); \
297
SH(tmp0_h, pdst + stride); \
298
} while (0)
299
300
#define ST6x4_UB(in0, start_in0_idx, in1, start_in1_idx, pdst, stride) do { \
301
uint8_t* ptmp1 = (uint8_t*)pdst; \
302
ST6x1_UB(in0, start_in0_idx, in1, start_in1_idx, ptmp1, 4); \
303
ptmp1 += stride; \
304
ST6x1_UB(in0, start_in0_idx + 1, in1, start_in1_idx + 1, ptmp1, 4); \
305
ptmp1 += stride; \
306
ST6x1_UB(in0, start_in0_idx + 2, in1, start_in1_idx + 2, ptmp1, 4); \
307
ptmp1 += stride; \
308
ST6x1_UB(in0, start_in0_idx + 3, in1, start_in1_idx + 3, ptmp1, 4); \
309
} while (0)
310
311
#define LPF_SIMPLE_FILT(p1_in, p0_in, q0_in, q1_in, mask) do { \
312
v16i8 p1_m, p0_m, q0_m, q1_m, filt, filt1, filt2; \
313
const v16i8 cnst4b = __msa_ldi_b(4); \
314
const v16i8 cnst3b = __msa_ldi_b(3); \
315
\
316
FLIP_SIGN4(p1_in, p0_in, q0_in, q1_in, p1_m, p0_m, q0_m, q1_m); \
317
filt = __msa_subs_s_b(p1_m, q1_m); \
318
FILT_VAL(q0_m, p0_m, mask, filt); \
319
filt1 = __msa_adds_s_b(filt, cnst4b); \
320
filt1 = SRAI_B(filt1, 3); \
321
filt2 = __msa_adds_s_b(filt, cnst3b); \
322
filt2 = SRAI_B(filt2, 3); \
323
q0_m = __msa_subs_s_b(q0_m, filt1); \
324
p0_m = __msa_adds_s_b(p0_m, filt2); \
325
q0_in = __msa_xori_b((v16u8)q0_m, 0x80); \
326
p0_in = __msa_xori_b((v16u8)p0_m, 0x80); \
327
} while (0)
328
329
#define LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask) do { \
330
v16u8 p1_a_sub_q1, p0_a_sub_q0; \
331
\
332
p0_a_sub_q0 = __msa_asub_u_b(p0, q0); \
333
p1_a_sub_q1 = __msa_asub_u_b(p1, q1); \
334
p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \
335
p0_a_sub_q0 = __msa_adds_u_b(p0_a_sub_q0, p0_a_sub_q0); \
336
mask = __msa_adds_u_b(p0_a_sub_q0, p1_a_sub_q1); \
337
mask = (mask <= b_limit); \
338
} while (0)
339
340
static void VFilter16(uint8_t* src, int stride,
341
int b_limit_in, int limit_in, int thresh_in) {
342
uint8_t* ptemp = src - 4 * stride;
343
v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
344
v16u8 mask, hev;
345
const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in);
346
const v16u8 limit = (v16u8)__msa_fill_b(limit_in);
347
const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in);
348
349
LD_UB8(ptemp, stride, p3, p2, p1, p0, q0, q1, q2, q3);
350
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
351
hev, mask);
352
LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
353
ptemp = src - 3 * stride;
354
ST_UB4(p2, p1, p0, q0, ptemp, stride);
355
ptemp += (4 * stride);
356
ST_UB2(q1, q2, ptemp, stride);
357
}
358
359
static void HFilter16(uint8_t* src, int stride,
360
int b_limit_in, int limit_in, int thresh_in) {
361
uint8_t* ptmp = src - 4;
362
v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
363
v16u8 mask, hev;
364
v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
365
v16u8 row9, row10, row11, row12, row13, row14, row15;
366
v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
367
const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in);
368
const v16u8 limit = (v16u8)__msa_fill_b(limit_in);
369
const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in);
370
371
LD_UB8(ptmp, stride, row0, row1, row2, row3, row4, row5, row6, row7);
372
ptmp += (8 * stride);
373
LD_UB8(ptmp, stride, row8, row9, row10, row11, row12, row13, row14, row15);
374
TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
375
row8, row9, row10, row11, row12, row13, row14, row15,
376
p3, p2, p1, p0, q0, q1, q2, q3);
377
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
378
hev, mask);
379
LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
380
ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
381
ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4);
382
ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
383
ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7);
384
ILVRL_B2_SH(q2, q1, tmp2, tmp5);
385
ptmp = src - 3;
386
ST6x1_UB(tmp3, 0, tmp2, 0, ptmp, 4);
387
ptmp += stride;
388
ST6x1_UB(tmp3, 1, tmp2, 1, ptmp, 4);
389
ptmp += stride;
390
ST6x1_UB(tmp3, 2, tmp2, 2, ptmp, 4);
391
ptmp += stride;
392
ST6x1_UB(tmp3, 3, tmp2, 3, ptmp, 4);
393
ptmp += stride;
394
ST6x1_UB(tmp4, 0, tmp2, 4, ptmp, 4);
395
ptmp += stride;
396
ST6x1_UB(tmp4, 1, tmp2, 5, ptmp, 4);
397
ptmp += stride;
398
ST6x1_UB(tmp4, 2, tmp2, 6, ptmp, 4);
399
ptmp += stride;
400
ST6x1_UB(tmp4, 3, tmp2, 7, ptmp, 4);
401
ptmp += stride;
402
ST6x1_UB(tmp6, 0, tmp5, 0, ptmp, 4);
403
ptmp += stride;
404
ST6x1_UB(tmp6, 1, tmp5, 1, ptmp, 4);
405
ptmp += stride;
406
ST6x1_UB(tmp6, 2, tmp5, 2, ptmp, 4);
407
ptmp += stride;
408
ST6x1_UB(tmp6, 3, tmp5, 3, ptmp, 4);
409
ptmp += stride;
410
ST6x1_UB(tmp7, 0, tmp5, 4, ptmp, 4);
411
ptmp += stride;
412
ST6x1_UB(tmp7, 1, tmp5, 5, ptmp, 4);
413
ptmp += stride;
414
ST6x1_UB(tmp7, 2, tmp5, 6, ptmp, 4);
415
ptmp += stride;
416
ST6x1_UB(tmp7, 3, tmp5, 7, ptmp, 4);
417
}
418
419
// on three inner edges
420
static void VFilterHorEdge16i(uint8_t* src, int stride,
421
int b_limit, int limit, int thresh) {
422
v16u8 mask, hev;
423
v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
424
const v16u8 thresh0 = (v16u8)__msa_fill_b(thresh);
425
const v16u8 b_limit0 = (v16u8)__msa_fill_b(b_limit);
426
const v16u8 limit0 = (v16u8)__msa_fill_b(limit);
427
428
LD_UB8((src - 4 * stride), stride, p3, p2, p1, p0, q0, q1, q2, q3);
429
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
430
hev, mask);
431
LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
432
ST_UB4(p1, p0, q0, q1, (src - 2 * stride), stride);
433
}
434
435
static void VFilter16i(uint8_t* src_y, int stride,
436
int b_limit, int limit, int thresh) {
437
VFilterHorEdge16i(src_y + 4 * stride, stride, b_limit, limit, thresh);
438
VFilterHorEdge16i(src_y + 8 * stride, stride, b_limit, limit, thresh);
439
VFilterHorEdge16i(src_y + 12 * stride, stride, b_limit, limit, thresh);
440
}
441
442
static void HFilterVertEdge16i(uint8_t* src, int stride,
443
int b_limit, int limit, int thresh) {
444
v16u8 mask, hev;
445
v16u8 p3, p2, p1, p0, q3, q2, q1, q0;
446
v16u8 row0, row1, row2, row3, row4, row5, row6, row7;
447
v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
448
v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
449
const v16u8 thresh0 = (v16u8)__msa_fill_b(thresh);
450
const v16u8 b_limit0 = (v16u8)__msa_fill_b(b_limit);
451
const v16u8 limit0 = (v16u8)__msa_fill_b(limit);
452
453
LD_UB8(src - 4, stride, row0, row1, row2, row3, row4, row5, row6, row7);
454
LD_UB8(src - 4 + (8 * stride), stride,
455
row8, row9, row10, row11, row12, row13, row14, row15);
456
TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
457
row8, row9, row10, row11, row12, row13, row14, row15,
458
p3, p2, p1, p0, q0, q1, q2, q3);
459
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit0, b_limit0, thresh0,
460
hev, mask);
461
LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
462
ILVR_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
463
ILVRL_H2_SH(tmp1, tmp0, tmp2, tmp3);
464
ILVL_B2_SH(p0, p1, q1, q0, tmp0, tmp1);
465
ILVRL_H2_SH(tmp1, tmp0, tmp4, tmp5);
466
src -= 2;
467
ST4x8_UB(tmp2, tmp3, src, stride);
468
src += (8 * stride);
469
ST4x8_UB(tmp4, tmp5, src, stride);
470
}
471
472
static void HFilter16i(uint8_t* src_y, int stride,
473
int b_limit, int limit, int thresh) {
474
HFilterVertEdge16i(src_y + 4, stride, b_limit, limit, thresh);
475
HFilterVertEdge16i(src_y + 8, stride, b_limit, limit, thresh);
476
HFilterVertEdge16i(src_y + 12, stride, b_limit, limit, thresh);
477
}
478
479
// 8-pixels wide variants, for chroma filtering
480
static void VFilter8(uint8_t* src_u, uint8_t* src_v, int stride,
481
int b_limit_in, int limit_in, int thresh_in) {
482
uint8_t* ptmp_src_u = src_u - 4 * stride;
483
uint8_t* ptmp_src_v = src_v - 4 * stride;
484
uint64_t p2_d, p1_d, p0_d, q0_d, q1_d, q2_d;
485
v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev;
486
v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
487
v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
488
const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in);
489
const v16u8 limit = (v16u8)__msa_fill_b(limit_in);
490
const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in);
491
492
LD_UB8(ptmp_src_u, stride, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u);
493
LD_UB8(ptmp_src_v, stride, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v);
494
ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
495
ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3);
496
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
497
hev, mask);
498
LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
499
p2_d = __msa_copy_s_d((v2i64)p2, 0);
500
p1_d = __msa_copy_s_d((v2i64)p1, 0);
501
p0_d = __msa_copy_s_d((v2i64)p0, 0);
502
q0_d = __msa_copy_s_d((v2i64)q0, 0);
503
q1_d = __msa_copy_s_d((v2i64)q1, 0);
504
q2_d = __msa_copy_s_d((v2i64)q2, 0);
505
ptmp_src_u += stride;
506
SD4(p2_d, p1_d, p0_d, q0_d, ptmp_src_u, stride);
507
ptmp_src_u += (4 * stride);
508
SD(q1_d, ptmp_src_u);
509
ptmp_src_u += stride;
510
SD(q2_d, ptmp_src_u);
511
p2_d = __msa_copy_s_d((v2i64)p2, 1);
512
p1_d = __msa_copy_s_d((v2i64)p1, 1);
513
p0_d = __msa_copy_s_d((v2i64)p0, 1);
514
q0_d = __msa_copy_s_d((v2i64)q0, 1);
515
q1_d = __msa_copy_s_d((v2i64)q1, 1);
516
q2_d = __msa_copy_s_d((v2i64)q2, 1);
517
ptmp_src_v += stride;
518
SD4(p2_d, p1_d, p0_d, q0_d, ptmp_src_v, stride);
519
ptmp_src_v += (4 * stride);
520
SD(q1_d, ptmp_src_v);
521
ptmp_src_v += stride;
522
SD(q2_d, ptmp_src_v);
523
}
524
525
static void HFilter8(uint8_t* src_u, uint8_t* src_v, int stride,
526
int b_limit_in, int limit_in, int thresh_in) {
527
uint8_t* ptmp_src_u = src_u - 4;
528
uint8_t* ptmp_src_v = src_v - 4;
529
v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev;
530
v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
531
v16u8 row9, row10, row11, row12, row13, row14, row15;
532
v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
533
const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in);
534
const v16u8 limit = (v16u8)__msa_fill_b(limit_in);
535
const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in);
536
537
LD_UB8(ptmp_src_u, stride, row0, row1, row2, row3, row4, row5, row6, row7);
538
LD_UB8(ptmp_src_v, stride,
539
row8, row9, row10, row11, row12, row13, row14, row15);
540
TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
541
row8, row9, row10, row11, row12, row13, row14, row15,
542
p3, p2, p1, p0, q0, q1, q2, q3);
543
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
544
hev, mask);
545
LPF_MBFILTER(p2, p1, p0, q0, q1, q2, mask, hev);
546
ILVR_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
547
ILVRL_H2_SH(tmp1, tmp0, tmp3, tmp4);
548
ILVL_B2_SH(p1, p2, q0, p0, tmp0, tmp1);
549
ILVRL_H2_SH(tmp1, tmp0, tmp6, tmp7);
550
ILVRL_B2_SH(q2, q1, tmp2, tmp5);
551
ptmp_src_u += 1;
552
ST6x4_UB(tmp3, 0, tmp2, 0, ptmp_src_u, stride);
553
ptmp_src_u += 4 * stride;
554
ST6x4_UB(tmp4, 0, tmp2, 4, ptmp_src_u, stride);
555
ptmp_src_v += 1;
556
ST6x4_UB(tmp6, 0, tmp5, 0, ptmp_src_v, stride);
557
ptmp_src_v += 4 * stride;
558
ST6x4_UB(tmp7, 0, tmp5, 4, ptmp_src_v, stride);
559
}
560
561
static void VFilter8i(uint8_t* src_u, uint8_t* src_v, int stride,
562
int b_limit_in, int limit_in, int thresh_in) {
563
uint64_t p1_d, p0_d, q0_d, q1_d;
564
v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev;
565
v16u8 p3_u, p2_u, p1_u, p0_u, q3_u, q2_u, q1_u, q0_u;
566
v16u8 p3_v, p2_v, p1_v, p0_v, q3_v, q2_v, q1_v, q0_v;
567
const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in);
568
const v16u8 limit = (v16u8)__msa_fill_b(limit_in);
569
const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in);
570
571
LD_UB8(src_u, stride, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u);
572
src_u += (5 * stride);
573
LD_UB8(src_v, stride, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v);
574
src_v += (5 * stride);
575
ILVR_D4_UB(p3_v, p3_u, p2_v, p2_u, p1_v, p1_u, p0_v, p0_u, p3, p2, p1, p0);
576
ILVR_D4_UB(q0_v, q0_u, q1_v, q1_u, q2_v, q2_u, q3_v, q3_u, q0, q1, q2, q3);
577
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
578
hev, mask);
579
LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
580
p1_d = __msa_copy_s_d((v2i64)p1, 0);
581
p0_d = __msa_copy_s_d((v2i64)p0, 0);
582
q0_d = __msa_copy_s_d((v2i64)q0, 0);
583
q1_d = __msa_copy_s_d((v2i64)q1, 0);
584
SD4(q1_d, q0_d, p0_d, p1_d, src_u, -stride);
585
p1_d = __msa_copy_s_d((v2i64)p1, 1);
586
p0_d = __msa_copy_s_d((v2i64)p0, 1);
587
q0_d = __msa_copy_s_d((v2i64)q0, 1);
588
q1_d = __msa_copy_s_d((v2i64)q1, 1);
589
SD4(q1_d, q0_d, p0_d, p1_d, src_v, -stride);
590
}
591
592
static void HFilter8i(uint8_t* src_u, uint8_t* src_v, int stride,
593
int b_limit_in, int limit_in, int thresh_in) {
594
v16u8 p3, p2, p1, p0, q3, q2, q1, q0, mask, hev;
595
v16u8 row0, row1, row2, row3, row4, row5, row6, row7, row8;
596
v16u8 row9, row10, row11, row12, row13, row14, row15;
597
v4i32 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5;
598
const v16u8 thresh = (v16u8)__msa_fill_b(thresh_in);
599
const v16u8 limit = (v16u8)__msa_fill_b(limit_in);
600
const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in);
601
602
LD_UB8(src_u, stride, row0, row1, row2, row3, row4, row5, row6, row7);
603
LD_UB8(src_v, stride,
604
row8, row9, row10, row11, row12, row13, row14, row15);
605
TRANSPOSE16x8_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
606
row8, row9, row10, row11, row12, row13, row14, row15,
607
p3, p2, p1, p0, q0, q1, q2, q3);
608
LPF_MASK_HEV(p3, p2, p1, p0, q0, q1, q2, q3, limit, b_limit, thresh,
609
hev, mask);
610
LPF_FILTER4_4W(p1, p0, q0, q1, mask, hev);
611
ILVR_B2_SW(p0, p1, q1, q0, tmp0, tmp1);
612
ILVRL_H2_SW(tmp1, tmp0, tmp2, tmp3);
613
ILVL_B2_SW(p0, p1, q1, q0, tmp0, tmp1);
614
ILVRL_H2_SW(tmp1, tmp0, tmp4, tmp5);
615
src_u += 2;
616
ST4x4_UB(tmp2, tmp2, 0, 1, 2, 3, src_u, stride);
617
src_u += 4 * stride;
618
ST4x4_UB(tmp3, tmp3, 0, 1, 2, 3, src_u, stride);
619
src_v += 2;
620
ST4x4_UB(tmp4, tmp4, 0, 1, 2, 3, src_v, stride);
621
src_v += 4 * stride;
622
ST4x4_UB(tmp5, tmp5, 0, 1, 2, 3, src_v, stride);
623
}
624
625
static void SimpleVFilter16(uint8_t* src, int stride, int b_limit_in) {
626
v16u8 p1, p0, q1, q0, mask;
627
const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in);
628
629
LD_UB4(src - 2 * stride, stride, p1, p0, q0, q1);
630
LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask);
631
LPF_SIMPLE_FILT(p1, p0, q0, q1, mask);
632
ST_UB2(p0, q0, src - stride, stride);
633
}
634
635
static void SimpleHFilter16(uint8_t* src, int stride, int b_limit_in) {
636
v16u8 p1, p0, q1, q0, mask, row0, row1, row2, row3, row4, row5, row6, row7;
637
v16u8 row8, row9, row10, row11, row12, row13, row14, row15;
638
v8i16 tmp0, tmp1;
639
const v16u8 b_limit = (v16u8)__msa_fill_b(b_limit_in);
640
uint8_t* ptemp_src = src - 2;
641
642
LD_UB8(ptemp_src, stride, row0, row1, row2, row3, row4, row5, row6, row7);
643
LD_UB8(ptemp_src + 8 * stride, stride,
644
row8, row9, row10, row11, row12, row13, row14, row15);
645
TRANSPOSE16x4_UB_UB(row0, row1, row2, row3, row4, row5, row6, row7,
646
row8, row9, row10, row11, row12, row13, row14, row15,
647
p1, p0, q0, q1);
648
LPF_SIMPLE_MASK(p1, p0, q0, q1, b_limit, mask);
649
LPF_SIMPLE_FILT(p1, p0, q0, q1, mask);
650
ILVRL_B2_SH(q0, p0, tmp1, tmp0);
651
ptemp_src += 1;
652
ST2x4_UB(tmp1, 0, ptemp_src, stride);
653
ptemp_src += 4 * stride;
654
ST2x4_UB(tmp1, 4, ptemp_src, stride);
655
ptemp_src += 4 * stride;
656
ST2x4_UB(tmp0, 0, ptemp_src, stride);
657
ptemp_src += 4 * stride;
658
ST2x4_UB(tmp0, 4, ptemp_src, stride);
659
ptemp_src += 4 * stride;
660
}
661
662
static void SimpleVFilter16i(uint8_t* src_y, int stride, int b_limit_in) {
663
SimpleVFilter16(src_y + 4 * stride, stride, b_limit_in);
664
SimpleVFilter16(src_y + 8 * stride, stride, b_limit_in);
665
SimpleVFilter16(src_y + 12 * stride, stride, b_limit_in);
666
}
667
668
static void SimpleHFilter16i(uint8_t* src_y, int stride, int b_limit_in) {
669
SimpleHFilter16(src_y + 4, stride, b_limit_in);
670
SimpleHFilter16(src_y + 8, stride, b_limit_in);
671
SimpleHFilter16(src_y + 12, stride, b_limit_in);
672
}
673
674
//------------------------------------------------------------------------------
675
// Intra predictions
676
//------------------------------------------------------------------------------
677
678
// 4x4
679
680
static void DC4(uint8_t* dst) { // DC
681
uint32_t dc = 4;
682
int i;
683
for (i = 0; i < 4; ++i) dc += dst[i - BPS] + dst[-1 + i * BPS];
684
dc >>= 3;
685
dc = dc | (dc << 8) | (dc << 16) | (dc << 24);
686
SW4(dc, dc, dc, dc, dst, BPS);
687
}
688
689
static void TM4(uint8_t* dst) {
690
const uint8_t* const ptemp = dst - BPS - 1;
691
v8i16 T, d, r0, r1, r2, r3;
692
const v16i8 zero = { 0 };
693
const v8i16 TL = (v8i16)__msa_fill_h(ptemp[0 * BPS]);
694
const v8i16 L0 = (v8i16)__msa_fill_h(ptemp[1 * BPS]);
695
const v8i16 L1 = (v8i16)__msa_fill_h(ptemp[2 * BPS]);
696
const v8i16 L2 = (v8i16)__msa_fill_h(ptemp[3 * BPS]);
697
const v8i16 L3 = (v8i16)__msa_fill_h(ptemp[4 * BPS]);
698
const v16u8 T1 = LD_UB(ptemp + 1);
699
700
T = (v8i16)__msa_ilvr_b(zero, (v16i8)T1);
701
d = T - TL;
702
ADD4(d, L0, d, L1, d, L2, d, L3, r0, r1, r2, r3);
703
CLIP_SH4_0_255(r0, r1, r2, r3);
704
PCKEV_ST4x4_UB(r0, r1, r2, r3, dst, BPS);
705
}
706
707
static void VE4(uint8_t* dst) { // vertical
708
const uint8_t* const ptop = dst - BPS - 1;
709
const uint32_t val0 = LW(ptop + 0);
710
const uint32_t val1 = LW(ptop + 4);
711
uint32_t out;
712
v16u8 A = { 0 }, B, C, AC, B2, R;
713
714
INSERT_W2_UB(val0, val1, A);
715
B = SLDI_UB(A, A, 1);
716
C = SLDI_UB(A, A, 2);
717
AC = __msa_ave_u_b(A, C);
718
B2 = __msa_ave_u_b(B, B);
719
R = __msa_aver_u_b(AC, B2);
720
out = __msa_copy_s_w((v4i32)R, 0);
721
SW4(out, out, out, out, dst, BPS);
722
}
723
724
static void RD4(uint8_t* dst) { // Down-right
725
const uint8_t* const ptop = dst - 1 - BPS;
726
uint32_t val0 = LW(ptop + 0);
727
uint32_t val1 = LW(ptop + 4);
728
uint32_t val2, val3;
729
v16u8 A, B, C, AC, B2, R, A1 = { 0 };
730
731
INSERT_W2_UB(val0, val1, A1);
732
A = SLDI_UB(A1, A1, 12);
733
A = (v16u8)__msa_insert_b((v16i8)A, 3, ptop[1 * BPS]);
734
A = (v16u8)__msa_insert_b((v16i8)A, 2, ptop[2 * BPS]);
735
A = (v16u8)__msa_insert_b((v16i8)A, 1, ptop[3 * BPS]);
736
A = (v16u8)__msa_insert_b((v16i8)A, 0, ptop[4 * BPS]);
737
B = SLDI_UB(A, A, 1);
738
C = SLDI_UB(A, A, 2);
739
AC = __msa_ave_u_b(A, C);
740
B2 = __msa_ave_u_b(B, B);
741
R = __msa_aver_u_b(AC, B2);
742
val3 = __msa_copy_s_w((v4i32)R, 0);
743
R = SLDI_UB(R, R, 1);
744
val2 = __msa_copy_s_w((v4i32)R, 0);
745
R = SLDI_UB(R, R, 1);
746
val1 = __msa_copy_s_w((v4i32)R, 0);
747
R = SLDI_UB(R, R, 1);
748
val0 = __msa_copy_s_w((v4i32)R, 0);
749
SW4(val0, val1, val2, val3, dst, BPS);
750
}
751
752
static void LD4(uint8_t* dst) { // Down-Left
753
const uint8_t* const ptop = dst - BPS;
754
uint32_t val0 = LW(ptop + 0);
755
uint32_t val1 = LW(ptop + 4);
756
uint32_t val2, val3;
757
v16u8 A = { 0 }, B, C, AC, B2, R;
758
759
INSERT_W2_UB(val0, val1, A);
760
B = SLDI_UB(A, A, 1);
761
C = SLDI_UB(A, A, 2);
762
C = (v16u8)__msa_insert_b((v16i8)C, 6, ptop[7]);
763
AC = __msa_ave_u_b(A, C);
764
B2 = __msa_ave_u_b(B, B);
765
R = __msa_aver_u_b(AC, B2);
766
val0 = __msa_copy_s_w((v4i32)R, 0);
767
R = SLDI_UB(R, R, 1);
768
val1 = __msa_copy_s_w((v4i32)R, 0);
769
R = SLDI_UB(R, R, 1);
770
val2 = __msa_copy_s_w((v4i32)R, 0);
771
R = SLDI_UB(R, R, 1);
772
val3 = __msa_copy_s_w((v4i32)R, 0);
773
SW4(val0, val1, val2, val3, dst, BPS);
774
}
775
776
// 16x16
777
778
static void DC16(uint8_t* dst) { // DC
779
uint32_t dc = 16;
780
int i;
781
const v16u8 rtop = LD_UB(dst - BPS);
782
const v8u16 dctop = __msa_hadd_u_h(rtop, rtop);
783
v16u8 out;
784
785
for (i = 0; i < 16; ++i) {
786
dc += dst[-1 + i * BPS];
787
}
788
dc += HADD_UH_U32(dctop);
789
out = (v16u8)__msa_fill_b(dc >> 5);
790
ST_UB8(out, out, out, out, out, out, out, out, dst, BPS);
791
ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS);
792
}
793
794
static void TM16(uint8_t* dst) {
795
int j;
796
v8i16 d1, d2;
797
const v16i8 zero = { 0 };
798
const v8i16 TL = (v8i16)__msa_fill_h(dst[-1 - BPS]);
799
const v16i8 T = LD_SB(dst - BPS);
800
801
ILVRL_B2_SH(zero, T, d1, d2);
802
SUB2(d1, TL, d2, TL, d1, d2);
803
for (j = 0; j < 16; j += 4) {
804
v16i8 t0, t1, t2, t3;
805
v8i16 r0, r1, r2, r3, r4, r5, r6, r7;
806
const v8i16 L0 = (v8i16)__msa_fill_h(dst[-1 + 0 * BPS]);
807
const v8i16 L1 = (v8i16)__msa_fill_h(dst[-1 + 1 * BPS]);
808
const v8i16 L2 = (v8i16)__msa_fill_h(dst[-1 + 2 * BPS]);
809
const v8i16 L3 = (v8i16)__msa_fill_h(dst[-1 + 3 * BPS]);
810
ADD4(d1, L0, d1, L1, d1, L2, d1, L3, r0, r1, r2, r3);
811
ADD4(d2, L0, d2, L1, d2, L2, d2, L3, r4, r5, r6, r7);
812
CLIP_SH4_0_255(r0, r1, r2, r3);
813
CLIP_SH4_0_255(r4, r5, r6, r7);
814
PCKEV_B4_SB(r4, r0, r5, r1, r6, r2, r7, r3, t0, t1, t2, t3);
815
ST_SB4(t0, t1, t2, t3, dst, BPS);
816
dst += 4 * BPS;
817
}
818
}
819
820
static void VE16(uint8_t* dst) { // vertical
821
const v16u8 rtop = LD_UB(dst - BPS);
822
ST_UB8(rtop, rtop, rtop, rtop, rtop, rtop, rtop, rtop, dst, BPS);
823
ST_UB8(rtop, rtop, rtop, rtop, rtop, rtop, rtop, rtop, dst + 8 * BPS, BPS);
824
}
825
826
static void HE16(uint8_t* dst) { // horizontal
827
int j;
828
for (j = 16; j > 0; j -= 4) {
829
const v16u8 L0 = (v16u8)__msa_fill_b(dst[-1 + 0 * BPS]);
830
const v16u8 L1 = (v16u8)__msa_fill_b(dst[-1 + 1 * BPS]);
831
const v16u8 L2 = (v16u8)__msa_fill_b(dst[-1 + 2 * BPS]);
832
const v16u8 L3 = (v16u8)__msa_fill_b(dst[-1 + 3 * BPS]);
833
ST_UB4(L0, L1, L2, L3, dst, BPS);
834
dst += 4 * BPS;
835
}
836
}
837
838
static void DC16NoTop(uint8_t* dst) { // DC with top samples not available
839
int j;
840
uint32_t dc = 8;
841
v16u8 out;
842
843
for (j = 0; j < 16; ++j) {
844
dc += dst[-1 + j * BPS];
845
}
846
out = (v16u8)__msa_fill_b(dc >> 4);
847
ST_UB8(out, out, out, out, out, out, out, out, dst, BPS);
848
ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS);
849
}
850
851
static void DC16NoLeft(uint8_t* dst) { // DC with left samples not available
852
uint32_t dc = 8;
853
const v16u8 rtop = LD_UB(dst - BPS);
854
const v8u16 dctop = __msa_hadd_u_h(rtop, rtop);
855
v16u8 out;
856
857
dc += HADD_UH_U32(dctop);
858
out = (v16u8)__msa_fill_b(dc >> 4);
859
ST_UB8(out, out, out, out, out, out, out, out, dst, BPS);
860
ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS);
861
}
862
863
static void DC16NoTopLeft(uint8_t* dst) { // DC with nothing
864
const v16u8 out = (v16u8)__msa_fill_b(0x80);
865
ST_UB8(out, out, out, out, out, out, out, out, dst, BPS);
866
ST_UB8(out, out, out, out, out, out, out, out, dst + 8 * BPS, BPS);
867
}
868
869
// Chroma
870
871
#define STORE8x8(out, dst) do { \
872
SD4(out, out, out, out, dst + 0 * BPS, BPS); \
873
SD4(out, out, out, out, dst + 4 * BPS, BPS); \
874
} while (0)
875
876
static void DC8uv(uint8_t* dst) { // DC
877
uint32_t dc = 8;
878
int i;
879
uint64_t out;
880
const v16u8 rtop = LD_UB(dst - BPS);
881
const v8u16 temp0 = __msa_hadd_u_h(rtop, rtop);
882
const v4u32 temp1 = __msa_hadd_u_w(temp0, temp0);
883
const v2u64 temp2 = __msa_hadd_u_d(temp1, temp1);
884
v16u8 dctemp;
885
886
for (i = 0; i < 8; ++i) {
887
dc += dst[-1 + i * BPS];
888
}
889
dc += __msa_copy_s_w((v4i32)temp2, 0);
890
dctemp = (v16u8)__msa_fill_b(dc >> 4);
891
out = __msa_copy_s_d((v2i64)dctemp, 0);
892
STORE8x8(out, dst);
893
}
894
895
static void TM8uv(uint8_t* dst) {
896
int j;
897
const v16i8 T1 = LD_SB(dst - BPS);
898
const v16i8 zero = { 0 };
899
const v8i16 T = (v8i16)__msa_ilvr_b(zero, T1);
900
const v8i16 TL = (v8i16)__msa_fill_h(dst[-1 - BPS]);
901
const v8i16 d = T - TL;
902
903
for (j = 0; j < 8; j += 4) {
904
v16i8 t0, t1;
905
v8i16 r0 = (v8i16)__msa_fill_h(dst[-1 + 0 * BPS]);
906
v8i16 r1 = (v8i16)__msa_fill_h(dst[-1 + 1 * BPS]);
907
v8i16 r2 = (v8i16)__msa_fill_h(dst[-1 + 2 * BPS]);
908
v8i16 r3 = (v8i16)__msa_fill_h(dst[-1 + 3 * BPS]);
909
ADD4(d, r0, d, r1, d, r2, d, r3, r0, r1, r2, r3);
910
CLIP_SH4_0_255(r0, r1, r2, r3);
911
PCKEV_B2_SB(r1, r0, r3, r2, t0, t1);
912
ST4x4_UB(t0, t1, 0, 2, 0, 2, dst, BPS);
913
ST4x4_UB(t0, t1, 1, 3, 1, 3, dst + 4, BPS);
914
dst += 4 * BPS;
915
}
916
}
917
918
static void VE8uv(uint8_t* dst) { // vertical
919
const v16u8 rtop = LD_UB(dst - BPS);
920
const uint64_t out = __msa_copy_s_d((v2i64)rtop, 0);
921
STORE8x8(out, dst);
922
}
923
924
static void HE8uv(uint8_t* dst) { // horizontal
925
int j;
926
for (j = 0; j < 8; j += 4) {
927
const v16u8 L0 = (v16u8)__msa_fill_b(dst[-1 + 0 * BPS]);
928
const v16u8 L1 = (v16u8)__msa_fill_b(dst[-1 + 1 * BPS]);
929
const v16u8 L2 = (v16u8)__msa_fill_b(dst[-1 + 2 * BPS]);
930
const v16u8 L3 = (v16u8)__msa_fill_b(dst[-1 + 3 * BPS]);
931
const uint64_t out0 = __msa_copy_s_d((v2i64)L0, 0);
932
const uint64_t out1 = __msa_copy_s_d((v2i64)L1, 0);
933
const uint64_t out2 = __msa_copy_s_d((v2i64)L2, 0);
934
const uint64_t out3 = __msa_copy_s_d((v2i64)L3, 0);
935
SD4(out0, out1, out2, out3, dst, BPS);
936
dst += 4 * BPS;
937
}
938
}
939
940
static void DC8uvNoLeft(uint8_t* dst) { // DC with no left samples
941
const uint32_t dc = 4;
942
const v16u8 rtop = LD_UB(dst - BPS);
943
const v8u16 temp0 = __msa_hadd_u_h(rtop, rtop);
944
const v4u32 temp1 = __msa_hadd_u_w(temp0, temp0);
945
const v2u64 temp2 = __msa_hadd_u_d(temp1, temp1);
946
const uint32_t sum_m = __msa_copy_s_w((v4i32)temp2, 0);
947
const v16u8 dcval = (v16u8)__msa_fill_b((dc + sum_m) >> 3);
948
const uint64_t out = __msa_copy_s_d((v2i64)dcval, 0);
949
STORE8x8(out, dst);
950
}
951
952
static void DC8uvNoTop(uint8_t* dst) { // DC with no top samples
953
uint32_t dc = 4;
954
int i;
955
uint64_t out;
956
v16u8 dctemp;
957
958
for (i = 0; i < 8; ++i) {
959
dc += dst[-1 + i * BPS];
960
}
961
dctemp = (v16u8)__msa_fill_b(dc >> 3);
962
out = __msa_copy_s_d((v2i64)dctemp, 0);
963
STORE8x8(out, dst);
964
}
965
966
static void DC8uvNoTopLeft(uint8_t* dst) { // DC with nothing
967
const uint64_t out = 0x8080808080808080ULL;
968
STORE8x8(out, dst);
969
}
970
971
//------------------------------------------------------------------------------
972
// Entry point
973
974
extern void VP8DspInitMSA(void);
975
976
WEBP_TSAN_IGNORE_FUNCTION void VP8DspInitMSA(void) {
977
VP8TransformWHT = TransformWHT;
978
VP8Transform = TransformTwo;
979
VP8TransformDC = TransformDC;
980
VP8TransformAC3 = TransformAC3;
981
982
VP8VFilter16 = VFilter16;
983
VP8HFilter16 = HFilter16;
984
VP8VFilter16i = VFilter16i;
985
VP8HFilter16i = HFilter16i;
986
VP8VFilter8 = VFilter8;
987
VP8HFilter8 = HFilter8;
988
VP8VFilter8i = VFilter8i;
989
VP8HFilter8i = HFilter8i;
990
VP8SimpleVFilter16 = SimpleVFilter16;
991
VP8SimpleHFilter16 = SimpleHFilter16;
992
VP8SimpleVFilter16i = SimpleVFilter16i;
993
VP8SimpleHFilter16i = SimpleHFilter16i;
994
995
VP8PredLuma4[0] = DC4;
996
VP8PredLuma4[1] = TM4;
997
VP8PredLuma4[2] = VE4;
998
VP8PredLuma4[4] = RD4;
999
VP8PredLuma4[6] = LD4;
1000
VP8PredLuma16[0] = DC16;
1001
VP8PredLuma16[1] = TM16;
1002
VP8PredLuma16[2] = VE16;
1003
VP8PredLuma16[3] = HE16;
1004
VP8PredLuma16[4] = DC16NoTop;
1005
VP8PredLuma16[5] = DC16NoLeft;
1006
VP8PredLuma16[6] = DC16NoTopLeft;
1007
VP8PredChroma8[0] = DC8uv;
1008
VP8PredChroma8[1] = TM8uv;
1009
VP8PredChroma8[2] = VE8uv;
1010
VP8PredChroma8[3] = HE8uv;
1011
VP8PredChroma8[4] = DC8uvNoTop;
1012
VP8PredChroma8[5] = DC8uvNoLeft;
1013
VP8PredChroma8[6] = DC8uvNoTopLeft;
1014
}
1015
1016
#else // !WEBP_USE_MSA
1017
1018
WEBP_DSP_INIT_STUB(VP8DspInitMSA)
1019
1020
#endif // WEBP_USE_MSA
1021
1022