Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/3rdparty/libwebp/src/dsp/msa_macro.h
16348 views
1
// Copyright 2016 Google Inc. All Rights Reserved.
2
//
3
// Use of this source code is governed by a BSD-style license
4
// that can be found in the COPYING file in the root of the source
5
// tree. An additional intellectual property rights grant can be found
6
// in the file PATENTS. All contributing project authors may
7
// be found in the AUTHORS file in the root of the source tree.
8
// -----------------------------------------------------------------------------
9
//
10
// MSA common macros
11
//
12
// Author(s): Prashant Patil ([email protected])
13
14
#ifndef WEBP_DSP_MSA_MACRO_H_
15
#define WEBP_DSP_MSA_MACRO_H_
16
17
#include <stdint.h>
18
#include <msa.h>
19
20
#if defined(__clang__)
21
#define CLANG_BUILD
22
#endif
23
24
#ifdef CLANG_BUILD
25
#define ALPHAVAL (-1)
26
#define ADDVI_H(a, b) __msa_addvi_h((v8i16)a, b)
27
#define ADDVI_W(a, b) __msa_addvi_w((v4i32)a, b)
28
#define SRAI_B(a, b) __msa_srai_b((v16i8)a, b)
29
#define SRAI_H(a, b) __msa_srai_h((v8i16)a, b)
30
#define SRAI_W(a, b) __msa_srai_w((v4i32)a, b)
31
#define SRLI_H(a, b) __msa_srli_h((v8i16)a, b)
32
#define SLLI_B(a, b) __msa_slli_b((v4i32)a, b)
33
#define ANDI_B(a, b) __msa_andi_b((v16u8)a, b)
34
#define ORI_B(a, b) __msa_ori_b((v16u8)a, b)
35
#else
36
#define ALPHAVAL (0xff)
37
#define ADDVI_H(a, b) (a + b)
38
#define ADDVI_W(a, b) (a + b)
39
#define SRAI_B(a, b) (a >> b)
40
#define SRAI_H(a, b) (a >> b)
41
#define SRAI_W(a, b) (a >> b)
42
#define SRLI_H(a, b) (a << b)
43
#define SLLI_B(a, b) (a << b)
44
#define ANDI_B(a, b) (a & b)
45
#define ORI_B(a, b) (a | b)
46
#endif
47
48
#define LD_B(RTYPE, psrc) *((RTYPE*)(psrc))
49
#define LD_UB(...) LD_B(v16u8, __VA_ARGS__)
50
#define LD_SB(...) LD_B(v16i8, __VA_ARGS__)
51
52
#define LD_H(RTYPE, psrc) *((RTYPE*)(psrc))
53
#define LD_UH(...) LD_H(v8u16, __VA_ARGS__)
54
#define LD_SH(...) LD_H(v8i16, __VA_ARGS__)
55
56
#define LD_W(RTYPE, psrc) *((RTYPE*)(psrc))
57
#define LD_UW(...) LD_W(v4u32, __VA_ARGS__)
58
#define LD_SW(...) LD_W(v4i32, __VA_ARGS__)
59
60
#define ST_B(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
61
#define ST_UB(...) ST_B(v16u8, __VA_ARGS__)
62
#define ST_SB(...) ST_B(v16i8, __VA_ARGS__)
63
64
#define ST_H(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
65
#define ST_UH(...) ST_H(v8u16, __VA_ARGS__)
66
#define ST_SH(...) ST_H(v8i16, __VA_ARGS__)
67
68
#define ST_W(RTYPE, in, pdst) *((RTYPE*)(pdst)) = in
69
#define ST_UW(...) ST_W(v4u32, __VA_ARGS__)
70
#define ST_SW(...) ST_W(v4i32, __VA_ARGS__)
71
72
#define MSA_LOAD_FUNC(TYPE, INSTR, FUNC_NAME) \
73
static inline TYPE FUNC_NAME(const void* const psrc) { \
74
const uint8_t* const psrc_m = (const uint8_t*)psrc; \
75
TYPE val_m; \
76
asm volatile ( \
77
"" #INSTR " %[val_m], %[psrc_m] \n\t" \
78
: [val_m] "=r" (val_m) \
79
: [psrc_m] "m" (*psrc_m)); \
80
return val_m; \
81
}
82
83
#define MSA_LOAD(psrc, FUNC_NAME) FUNC_NAME(psrc)
84
85
#define MSA_STORE_FUNC(TYPE, INSTR, FUNC_NAME) \
86
static inline void FUNC_NAME(TYPE val, void* const pdst) { \
87
uint8_t* const pdst_m = (uint8_t*)pdst; \
88
TYPE val_m = val; \
89
asm volatile ( \
90
" " #INSTR " %[val_m], %[pdst_m] \n\t" \
91
: [pdst_m] "=m" (*pdst_m) \
92
: [val_m] "r" (val_m)); \
93
}
94
95
#define MSA_STORE(val, pdst, FUNC_NAME) FUNC_NAME(val, pdst)
96
97
#if (__mips_isa_rev >= 6)
98
MSA_LOAD_FUNC(uint16_t, lh, msa_lh);
99
#define LH(psrc) MSA_LOAD(psrc, msa_lh)
100
MSA_LOAD_FUNC(uint32_t, lw, msa_lw);
101
#define LW(psrc) MSA_LOAD(psrc, msa_lw)
102
#if (__mips == 64)
103
MSA_LOAD_FUNC(uint64_t, ld, msa_ld);
104
#define LD(psrc) MSA_LOAD(psrc, msa_ld)
105
#else // !(__mips == 64)
106
#define LD(psrc) ((((uint64_t)MSA_LOAD(psrc + 4, msa_lw)) << 32) | \
107
MSA_LOAD(psrc, msa_lw))
108
#endif // (__mips == 64)
109
110
MSA_STORE_FUNC(uint16_t, sh, msa_sh);
111
#define SH(val, pdst) MSA_STORE(val, pdst, msa_sh)
112
MSA_STORE_FUNC(uint32_t, sw, msa_sw);
113
#define SW(val, pdst) MSA_STORE(val, pdst, msa_sw)
114
MSA_STORE_FUNC(uint64_t, sd, msa_sd);
115
#define SD(val, pdst) MSA_STORE(val, pdst, msa_sd)
116
#else // !(__mips_isa_rev >= 6)
117
MSA_LOAD_FUNC(uint16_t, ulh, msa_ulh);
118
#define LH(psrc) MSA_LOAD(psrc, msa_ulh)
119
MSA_LOAD_FUNC(uint32_t, ulw, msa_ulw);
120
#define LW(psrc) MSA_LOAD(psrc, msa_ulw)
121
#if (__mips == 64)
122
MSA_LOAD_FUNC(uint64_t, uld, msa_uld);
123
#define LD(psrc) MSA_LOAD(psrc, msa_uld)
124
#else // !(__mips == 64)
125
#define LD(psrc) ((((uint64_t)MSA_LOAD(psrc + 4, msa_ulw)) << 32) | \
126
MSA_LOAD(psrc, msa_ulw))
127
#endif // (__mips == 64)
128
129
MSA_STORE_FUNC(uint16_t, ush, msa_ush);
130
#define SH(val, pdst) MSA_STORE(val, pdst, msa_ush)
131
MSA_STORE_FUNC(uint32_t, usw, msa_usw);
132
#define SW(val, pdst) MSA_STORE(val, pdst, msa_usw)
133
#define SD(val, pdst) do { \
134
uint8_t* const pdst_sd_m = (uint8_t*)(pdst); \
135
const uint32_t val0_m = (uint32_t)(val & 0x00000000FFFFFFFF); \
136
const uint32_t val1_m = (uint32_t)((val >> 32) & 0x00000000FFFFFFFF); \
137
SW(val0_m, pdst_sd_m); \
138
SW(val1_m, pdst_sd_m + 4); \
139
} while (0)
140
#endif // (__mips_isa_rev >= 6)
141
142
/* Description : Load 4 words with stride
143
* Arguments : Inputs - psrc, stride
144
* Outputs - out0, out1, out2, out3
145
* Details : Load word in 'out0' from (psrc)
146
* Load word in 'out1' from (psrc + stride)
147
* Load word in 'out2' from (psrc + 2 * stride)
148
* Load word in 'out3' from (psrc + 3 * stride)
149
*/
150
#define LW4(psrc, stride, out0, out1, out2, out3) do { \
151
const uint8_t* ptmp = (const uint8_t*)psrc; \
152
out0 = LW(ptmp); \
153
ptmp += stride; \
154
out1 = LW(ptmp); \
155
ptmp += stride; \
156
out2 = LW(ptmp); \
157
ptmp += stride; \
158
out3 = LW(ptmp); \
159
} while (0)
160
161
/* Description : Store words with stride
162
* Arguments : Inputs - in0, in1, in2, in3, pdst, stride
163
* Details : Store word from 'in0' to (pdst)
164
* Store word from 'in1' to (pdst + stride)
165
* Store word from 'in2' to (pdst + 2 * stride)
166
* Store word from 'in3' to (pdst + 3 * stride)
167
*/
168
#define SW4(in0, in1, in2, in3, pdst, stride) do { \
169
uint8_t* ptmp = (uint8_t*)pdst; \
170
SW(in0, ptmp); \
171
ptmp += stride; \
172
SW(in1, ptmp); \
173
ptmp += stride; \
174
SW(in2, ptmp); \
175
ptmp += stride; \
176
SW(in3, ptmp); \
177
} while (0)
178
179
#define SW3(in0, in1, in2, pdst, stride) do { \
180
uint8_t* ptmp = (uint8_t*)pdst; \
181
SW(in0, ptmp); \
182
ptmp += stride; \
183
SW(in1, ptmp); \
184
ptmp += stride; \
185
SW(in2, ptmp); \
186
} while (0)
187
188
#define SW2(in0, in1, pdst, stride) do { \
189
uint8_t* ptmp = (uint8_t*)pdst; \
190
SW(in0, ptmp); \
191
ptmp += stride; \
192
SW(in1, ptmp); \
193
} while (0)
194
195
/* Description : Store 4 double words with stride
196
* Arguments : Inputs - in0, in1, in2, in3, pdst, stride
197
* Details : Store double word from 'in0' to (pdst)
198
* Store double word from 'in1' to (pdst + stride)
199
* Store double word from 'in2' to (pdst + 2 * stride)
200
* Store double word from 'in3' to (pdst + 3 * stride)
201
*/
202
#define SD4(in0, in1, in2, in3, pdst, stride) do { \
203
uint8_t* ptmp = (uint8_t*)pdst; \
204
SD(in0, ptmp); \
205
ptmp += stride; \
206
SD(in1, ptmp); \
207
ptmp += stride; \
208
SD(in2, ptmp); \
209
ptmp += stride; \
210
SD(in3, ptmp); \
211
} while (0)
212
213
/* Description : Load vectors with 16 byte elements with stride
214
* Arguments : Inputs - psrc, stride
215
* Outputs - out0, out1
216
* Return Type - as per RTYPE
217
* Details : Load 16 byte elements in 'out0' from (psrc)
218
* Load 16 byte elements in 'out1' from (psrc + stride)
219
*/
220
#define LD_B2(RTYPE, psrc, stride, out0, out1) do { \
221
out0 = LD_B(RTYPE, psrc); \
222
out1 = LD_B(RTYPE, psrc + stride); \
223
} while (0)
224
#define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__)
225
#define LD_SB2(...) LD_B2(v16i8, __VA_ARGS__)
226
227
#define LD_B3(RTYPE, psrc, stride, out0, out1, out2) do { \
228
LD_B2(RTYPE, psrc, stride, out0, out1); \
229
out2 = LD_B(RTYPE, psrc + 2 * stride); \
230
} while (0)
231
#define LD_UB3(...) LD_B3(v16u8, __VA_ARGS__)
232
#define LD_SB3(...) LD_B3(v16i8, __VA_ARGS__)
233
234
#define LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3) do { \
235
LD_B2(RTYPE, psrc, stride, out0, out1); \
236
LD_B2(RTYPE, psrc + 2 * stride , stride, out2, out3); \
237
} while (0)
238
#define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__)
239
#define LD_SB4(...) LD_B4(v16i8, __VA_ARGS__)
240
241
#define LD_B8(RTYPE, psrc, stride, \
242
out0, out1, out2, out3, out4, out5, out6, out7) do { \
243
LD_B4(RTYPE, psrc, stride, out0, out1, out2, out3); \
244
LD_B4(RTYPE, psrc + 4 * stride, stride, out4, out5, out6, out7); \
245
} while (0)
246
#define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__)
247
#define LD_SB8(...) LD_B8(v16i8, __VA_ARGS__)
248
249
/* Description : Load vectors with 8 halfword elements with stride
250
* Arguments : Inputs - psrc, stride
251
* Outputs - out0, out1
252
* Details : Load 8 halfword elements in 'out0' from (psrc)
253
* Load 8 halfword elements in 'out1' from (psrc + stride)
254
*/
255
#define LD_H2(RTYPE, psrc, stride, out0, out1) do { \
256
out0 = LD_H(RTYPE, psrc); \
257
out1 = LD_H(RTYPE, psrc + stride); \
258
} while (0)
259
#define LD_UH2(...) LD_H2(v8u16, __VA_ARGS__)
260
#define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__)
261
262
/* Description : Load vectors with 4 word elements with stride
263
* Arguments : Inputs - psrc, stride
264
* Outputs - out0, out1, out2, out3
265
* Details : Load 4 word elements in 'out0' from (psrc + 0 * stride)
266
* Load 4 word elements in 'out1' from (psrc + 1 * stride)
267
* Load 4 word elements in 'out2' from (psrc + 2 * stride)
268
* Load 4 word elements in 'out3' from (psrc + 3 * stride)
269
*/
270
#define LD_W2(RTYPE, psrc, stride, out0, out1) do { \
271
out0 = LD_W(RTYPE, psrc); \
272
out1 = LD_W(RTYPE, psrc + stride); \
273
} while (0)
274
#define LD_UW2(...) LD_W2(v4u32, __VA_ARGS__)
275
#define LD_SW2(...) LD_W2(v4i32, __VA_ARGS__)
276
277
#define LD_W3(RTYPE, psrc, stride, out0, out1, out2) do { \
278
LD_W2(RTYPE, psrc, stride, out0, out1); \
279
out2 = LD_W(RTYPE, psrc + 2 * stride); \
280
} while (0)
281
#define LD_UW3(...) LD_W3(v4u32, __VA_ARGS__)
282
#define LD_SW3(...) LD_W3(v4i32, __VA_ARGS__)
283
284
#define LD_W4(RTYPE, psrc, stride, out0, out1, out2, out3) do { \
285
LD_W2(RTYPE, psrc, stride, out0, out1); \
286
LD_W2(RTYPE, psrc + 2 * stride, stride, out2, out3); \
287
} while (0)
288
#define LD_UW4(...) LD_W4(v4u32, __VA_ARGS__)
289
#define LD_SW4(...) LD_W4(v4i32, __VA_ARGS__)
290
291
/* Description : Store vectors of 16 byte elements with stride
292
* Arguments : Inputs - in0, in1, pdst, stride
293
* Details : Store 16 byte elements from 'in0' to (pdst)
294
* Store 16 byte elements from 'in1' to (pdst + stride)
295
*/
296
#define ST_B2(RTYPE, in0, in1, pdst, stride) do { \
297
ST_B(RTYPE, in0, pdst); \
298
ST_B(RTYPE, in1, pdst + stride); \
299
} while (0)
300
#define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__)
301
#define ST_SB2(...) ST_B2(v16i8, __VA_ARGS__)
302
303
#define ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \
304
ST_B2(RTYPE, in0, in1, pdst, stride); \
305
ST_B2(RTYPE, in2, in3, pdst + 2 * stride, stride); \
306
} while (0)
307
#define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__)
308
#define ST_SB4(...) ST_B4(v16i8, __VA_ARGS__)
309
310
#define ST_B8(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
311
pdst, stride) do { \
312
ST_B4(RTYPE, in0, in1, in2, in3, pdst, stride); \
313
ST_B4(RTYPE, in4, in5, in6, in7, pdst + 4 * stride, stride); \
314
} while (0)
315
#define ST_UB8(...) ST_B8(v16u8, __VA_ARGS__)
316
317
/* Description : Store vectors of 4 word elements with stride
318
* Arguments : Inputs - in0, in1, in2, in3, pdst, stride
319
* Details : Store 4 word elements from 'in0' to (pdst + 0 * stride)
320
* Store 4 word elements from 'in1' to (pdst + 1 * stride)
321
* Store 4 word elements from 'in2' to (pdst + 2 * stride)
322
* Store 4 word elements from 'in3' to (pdst + 3 * stride)
323
*/
324
#define ST_W2(RTYPE, in0, in1, pdst, stride) do { \
325
ST_W(RTYPE, in0, pdst); \
326
ST_W(RTYPE, in1, pdst + stride); \
327
} while (0)
328
#define ST_UW2(...) ST_W2(v4u32, __VA_ARGS__)
329
#define ST_SW2(...) ST_W2(v4i32, __VA_ARGS__)
330
331
#define ST_W3(RTYPE, in0, in1, in2, pdst, stride) do { \
332
ST_W2(RTYPE, in0, in1, pdst, stride); \
333
ST_W(RTYPE, in2, pdst + 2 * stride); \
334
} while (0)
335
#define ST_UW3(...) ST_W3(v4u32, __VA_ARGS__)
336
#define ST_SW3(...) ST_W3(v4i32, __VA_ARGS__)
337
338
#define ST_W4(RTYPE, in0, in1, in2, in3, pdst, stride) do { \
339
ST_W2(RTYPE, in0, in1, pdst, stride); \
340
ST_W2(RTYPE, in2, in3, pdst + 2 * stride, stride); \
341
} while (0)
342
#define ST_UW4(...) ST_W4(v4u32, __VA_ARGS__)
343
#define ST_SW4(...) ST_W4(v4i32, __VA_ARGS__)
344
345
/* Description : Store vectors of 8 halfword elements with stride
346
* Arguments : Inputs - in0, in1, pdst, stride
347
* Details : Store 8 halfword elements from 'in0' to (pdst)
348
* Store 8 halfword elements from 'in1' to (pdst + stride)
349
*/
350
#define ST_H2(RTYPE, in0, in1, pdst, stride) do { \
351
ST_H(RTYPE, in0, pdst); \
352
ST_H(RTYPE, in1, pdst + stride); \
353
} while (0)
354
#define ST_UH2(...) ST_H2(v8u16, __VA_ARGS__)
355
#define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__)
356
357
/* Description : Store 2x4 byte block to destination memory from input vector
358
* Arguments : Inputs - in, stidx, pdst, stride
359
* Details : Index 'stidx' halfword element from 'in' vector is copied to
360
* the GP register and stored to (pdst)
361
* Index 'stidx+1' halfword element from 'in' vector is copied to
362
* the GP register and stored to (pdst + stride)
363
* Index 'stidx+2' halfword element from 'in' vector is copied to
364
* the GP register and stored to (pdst + 2 * stride)
365
* Index 'stidx+3' halfword element from 'in' vector is copied to
366
* the GP register and stored to (pdst + 3 * stride)
367
*/
368
#define ST2x4_UB(in, stidx, pdst, stride) do { \
369
uint8_t* pblk_2x4_m = (uint8_t*)pdst; \
370
const uint16_t out0_m = __msa_copy_s_h((v8i16)in, stidx); \
371
const uint16_t out1_m = __msa_copy_s_h((v8i16)in, stidx + 1); \
372
const uint16_t out2_m = __msa_copy_s_h((v8i16)in, stidx + 2); \
373
const uint16_t out3_m = __msa_copy_s_h((v8i16)in, stidx + 3); \
374
SH(out0_m, pblk_2x4_m); \
375
pblk_2x4_m += stride; \
376
SH(out1_m, pblk_2x4_m); \
377
pblk_2x4_m += stride; \
378
SH(out2_m, pblk_2x4_m); \
379
pblk_2x4_m += stride; \
380
SH(out3_m, pblk_2x4_m); \
381
} while (0)
382
383
/* Description : Store 4x4 byte block to destination memory from input vector
384
* Arguments : Inputs - in0, in1, pdst, stride
385
* Details : 'Idx0' word element from input vector 'in0' is copied to the
386
* GP register and stored to (pdst)
387
* 'Idx1' word element from input vector 'in0' is copied to the
388
* GP register and stored to (pdst + stride)
389
* 'Idx2' word element from input vector 'in0' is copied to the
390
* GP register and stored to (pdst + 2 * stride)
391
* 'Idx3' word element from input vector 'in0' is copied to the
392
* GP register and stored to (pdst + 3 * stride)
393
*/
394
#define ST4x4_UB(in0, in1, idx0, idx1, idx2, idx3, pdst, stride) do { \
395
uint8_t* const pblk_4x4_m = (uint8_t*)pdst; \
396
const uint32_t out0_m = __msa_copy_s_w((v4i32)in0, idx0); \
397
const uint32_t out1_m = __msa_copy_s_w((v4i32)in0, idx1); \
398
const uint32_t out2_m = __msa_copy_s_w((v4i32)in1, idx2); \
399
const uint32_t out3_m = __msa_copy_s_w((v4i32)in1, idx3); \
400
SW4(out0_m, out1_m, out2_m, out3_m, pblk_4x4_m, stride); \
401
} while (0)
402
403
#define ST4x8_UB(in0, in1, pdst, stride) do { \
404
uint8_t* const pblk_4x8 = (uint8_t*)pdst; \
405
ST4x4_UB(in0, in0, 0, 1, 2, 3, pblk_4x8, stride); \
406
ST4x4_UB(in1, in1, 0, 1, 2, 3, pblk_4x8 + 4 * stride, stride); \
407
} while (0)
408
409
/* Description : Immediate number of elements to slide
410
* Arguments : Inputs - in0, in1, slide_val
411
* Outputs - out
412
* Return Type - as per RTYPE
413
* Details : Byte elements from 'in1' vector are slid into 'in0' by
414
* value specified in the 'slide_val'
415
*/
416
#define SLDI_B(RTYPE, in0, in1, slide_val) \
417
(RTYPE)__msa_sldi_b((v16i8)in0, (v16i8)in1, slide_val) \
418
419
#define SLDI_UB(...) SLDI_B(v16u8, __VA_ARGS__)
420
#define SLDI_SB(...) SLDI_B(v16i8, __VA_ARGS__)
421
#define SLDI_SH(...) SLDI_B(v8i16, __VA_ARGS__)
422
423
/* Description : Shuffle byte vector elements as per mask vector
424
* Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
425
* Outputs - out0, out1
426
* Return Type - as per RTYPE
427
* Details : Byte elements from 'in0' & 'in1' are copied selectively to
428
* 'out0' as per control vector 'mask0'
429
*/
430
#define VSHF_B(RTYPE, in0, in1, mask) \
431
(RTYPE)__msa_vshf_b((v16i8)mask, (v16i8)in1, (v16i8)in0)
432
433
#define VSHF_UB(...) VSHF_B(v16u8, __VA_ARGS__)
434
#define VSHF_SB(...) VSHF_B(v16i8, __VA_ARGS__)
435
#define VSHF_UH(...) VSHF_B(v8u16, __VA_ARGS__)
436
#define VSHF_SH(...) VSHF_B(v8i16, __VA_ARGS__)
437
438
#define VSHF_B2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) do { \
439
out0 = VSHF_B(RTYPE, in0, in1, mask0); \
440
out1 = VSHF_B(RTYPE, in2, in3, mask1); \
441
} while (0)
442
#define VSHF_B2_UB(...) VSHF_B2(v16u8, __VA_ARGS__)
443
#define VSHF_B2_SB(...) VSHF_B2(v16i8, __VA_ARGS__)
444
#define VSHF_B2_UH(...) VSHF_B2(v8u16, __VA_ARGS__)
445
#define VSHF_B2_SH(...) VSHF_B2(v8i16, __VA_ARGS__)
446
447
/* Description : Shuffle halfword vector elements as per mask vector
448
* Arguments : Inputs - in0, in1, in2, in3, mask0, mask1
449
* Outputs - out0, out1
450
* Return Type - as per RTYPE
451
* Details : halfword elements from 'in0' & 'in1' are copied selectively to
452
* 'out0' as per control vector 'mask0'
453
*/
454
#define VSHF_H2(RTYPE, in0, in1, in2, in3, mask0, mask1, out0, out1) do { \
455
out0 = (RTYPE)__msa_vshf_h((v8i16)mask0, (v8i16)in1, (v8i16)in0); \
456
out1 = (RTYPE)__msa_vshf_h((v8i16)mask1, (v8i16)in3, (v8i16)in2); \
457
} while (0)
458
#define VSHF_H2_UH(...) VSHF_H2(v8u16, __VA_ARGS__)
459
#define VSHF_H2_SH(...) VSHF_H2(v8i16, __VA_ARGS__)
460
461
/* Description : Dot product of byte vector elements
462
* Arguments : Inputs - mult0, mult1, cnst0, cnst1
463
* Outputs - out0, out1
464
* Return Type - as per RTYPE
465
* Details : Signed byte elements from 'mult0' are multiplied with
466
* signed byte elements from 'cnst0' producing a result
467
* twice the size of input i.e. signed halfword.
468
* The multiplication result of adjacent odd-even elements
469
* are added together and written to the 'out0' vector
470
*/
471
#define DOTP_SB2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
472
out0 = (RTYPE)__msa_dotp_s_h((v16i8)mult0, (v16i8)cnst0); \
473
out1 = (RTYPE)__msa_dotp_s_h((v16i8)mult1, (v16i8)cnst1); \
474
} while (0)
475
#define DOTP_SB2_SH(...) DOTP_SB2(v8i16, __VA_ARGS__)
476
477
/* Description : Dot product of halfword vector elements
478
* Arguments : Inputs - mult0, mult1, cnst0, cnst1
479
* Outputs - out0, out1
480
* Return Type - as per RTYPE
481
* Details : Signed halfword elements from 'mult0' are multiplied with
482
* signed halfword elements from 'cnst0' producing a result
483
* twice the size of input i.e. signed word.
484
* The multiplication result of adjacent odd-even elements
485
* are added together and written to the 'out0' vector
486
*/
487
#define DOTP_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
488
out0 = (RTYPE)__msa_dotp_s_w((v8i16)mult0, (v8i16)cnst0); \
489
out1 = (RTYPE)__msa_dotp_s_w((v8i16)mult1, (v8i16)cnst1); \
490
} while (0)
491
#define DOTP_SH2_SW(...) DOTP_SH2(v4i32, __VA_ARGS__)
492
493
/* Description : Dot product of unsigned word vector elements
494
* Arguments : Inputs - mult0, mult1, cnst0, cnst1
495
* Outputs - out0, out1
496
* Return Type - as per RTYPE
497
* Details : Unsigned word elements from 'mult0' are multiplied with
498
* unsigned word elements from 'cnst0' producing a result
499
* twice the size of input i.e. unsigned double word.
500
* The multiplication result of adjacent odd-even elements
501
* are added together and written to the 'out0' vector
502
*/
503
#define DOTP_UW2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
504
out0 = (RTYPE)__msa_dotp_u_d((v4u32)mult0, (v4u32)cnst0); \
505
out1 = (RTYPE)__msa_dotp_u_d((v4u32)mult1, (v4u32)cnst1); \
506
} while (0)
507
#define DOTP_UW2_UD(...) DOTP_UW2(v2u64, __VA_ARGS__)
508
509
/* Description : Dot product & addition of halfword vector elements
510
* Arguments : Inputs - mult0, mult1, cnst0, cnst1
511
* Outputs - out0, out1
512
* Return Type - as per RTYPE
513
* Details : Signed halfword elements from 'mult0' are multiplied with
514
* signed halfword elements from 'cnst0' producing a result
515
* twice the size of input i.e. signed word.
516
* The multiplication result of adjacent odd-even elements
517
* are added to the 'out0' vector
518
*/
519
#define DPADD_SH2(RTYPE, mult0, mult1, cnst0, cnst1, out0, out1) do { \
520
out0 = (RTYPE)__msa_dpadd_s_w((v4i32)out0, (v8i16)mult0, (v8i16)cnst0); \
521
out1 = (RTYPE)__msa_dpadd_s_w((v4i32)out1, (v8i16)mult1, (v8i16)cnst1); \
522
} while (0)
523
#define DPADD_SH2_SW(...) DPADD_SH2(v4i32, __VA_ARGS__)
524
525
/* Description : Clips all signed halfword elements of input vector
526
* between 0 & 255
527
* Arguments : Input/output - val
528
* Return Type - signed halfword
529
*/
530
#define CLIP_SH_0_255(val) do { \
531
const v8i16 max_m = __msa_ldi_h(255); \
532
val = __msa_maxi_s_h((v8i16)val, 0); \
533
val = __msa_min_s_h(max_m, (v8i16)val); \
534
} while (0)
535
536
#define CLIP_SH2_0_255(in0, in1) do { \
537
CLIP_SH_0_255(in0); \
538
CLIP_SH_0_255(in1); \
539
} while (0)
540
541
#define CLIP_SH4_0_255(in0, in1, in2, in3) do { \
542
CLIP_SH2_0_255(in0, in1); \
543
CLIP_SH2_0_255(in2, in3); \
544
} while (0)
545
546
/* Description : Clips all unsigned halfword elements of input vector
547
* between 0 & 255
548
* Arguments : Input - in
549
* Output - out_m
550
* Return Type - unsigned halfword
551
*/
552
#define CLIP_UH_0_255(in) do { \
553
const v8u16 max_m = (v8u16)__msa_ldi_h(255); \
554
in = __msa_maxi_u_h((v8u16) in, 0); \
555
in = __msa_min_u_h((v8u16) max_m, (v8u16) in); \
556
} while (0)
557
558
#define CLIP_UH2_0_255(in0, in1) do { \
559
CLIP_UH_0_255(in0); \
560
CLIP_UH_0_255(in1); \
561
} while (0)
562
563
/* Description : Clips all signed word elements of input vector
564
* between 0 & 255
565
* Arguments : Input/output - val
566
* Return Type - signed word
567
*/
568
#define CLIP_SW_0_255(val) do { \
569
const v4i32 max_m = __msa_ldi_w(255); \
570
val = __msa_maxi_s_w((v4i32)val, 0); \
571
val = __msa_min_s_w(max_m, (v4i32)val); \
572
} while (0)
573
574
#define CLIP_SW4_0_255(in0, in1, in2, in3) do { \
575
CLIP_SW_0_255(in0); \
576
CLIP_SW_0_255(in1); \
577
CLIP_SW_0_255(in2); \
578
CLIP_SW_0_255(in3); \
579
} while (0)
580
581
/* Description : Horizontal addition of 4 signed word elements of input vector
582
* Arguments : Input - in (signed word vector)
583
* Output - sum_m (i32 sum)
584
* Return Type - signed word (GP)
585
* Details : 4 signed word elements of 'in' vector are added together and
586
* the resulting integer sum is returned
587
*/
588
static WEBP_INLINE int32_t func_hadd_sw_s32(v4i32 in) {
589
const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in);
590
const v2i64 res1_m = __msa_splati_d(res0_m, 1);
591
const v2i64 out = res0_m + res1_m;
592
int32_t sum_m = __msa_copy_s_w((v4i32)out, 0);
593
return sum_m;
594
}
595
#define HADD_SW_S32(in) func_hadd_sw_s32(in)
596
597
/* Description : Horizontal addition of 8 signed halfword elements
598
* Arguments : Input - in (signed halfword vector)
599
* Output - sum_m (s32 sum)
600
* Return Type - signed word
601
* Details : 8 signed halfword elements of input vector are added
602
* together and the resulting integer sum is returned
603
*/
604
static WEBP_INLINE int32_t func_hadd_sh_s32(v8i16 in) {
605
const v4i32 res = __msa_hadd_s_w(in, in);
606
const v2i64 res0 = __msa_hadd_s_d(res, res);
607
const v2i64 res1 = __msa_splati_d(res0, 1);
608
const v2i64 res2 = res0 + res1;
609
const int32_t sum_m = __msa_copy_s_w((v4i32)res2, 0);
610
return sum_m;
611
}
612
#define HADD_SH_S32(in) func_hadd_sh_s32(in)
613
614
/* Description : Horizontal addition of 8 unsigned halfword elements
615
* Arguments : Input - in (unsigned halfword vector)
616
* Output - sum_m (u32 sum)
617
* Return Type - unsigned word
618
* Details : 8 unsigned halfword elements of input vector are added
619
* together and the resulting integer sum is returned
620
*/
621
static WEBP_INLINE uint32_t func_hadd_uh_u32(v8u16 in) {
622
uint32_t sum_m;
623
const v4u32 res_m = __msa_hadd_u_w(in, in);
624
v2u64 res0_m = __msa_hadd_u_d(res_m, res_m);
625
v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1);
626
res0_m = res0_m + res1_m;
627
sum_m = __msa_copy_s_w((v4i32)res0_m, 0);
628
return sum_m;
629
}
630
#define HADD_UH_U32(in) func_hadd_uh_u32(in)
631
632
/* Description : Horizontal addition of signed half word vector elements
633
Arguments : Inputs - in0, in1
634
Outputs - out0, out1
635
Return Type - as per RTYPE
636
Details : Each signed odd half word element from 'in0' is added to
637
even signed half word element from 'in0' (pairwise) and the
638
halfword result is written in 'out0'
639
*/
640
#define HADD_SH2(RTYPE, in0, in1, out0, out1) do { \
641
out0 = (RTYPE)__msa_hadd_s_w((v8i16)in0, (v8i16)in0); \
642
out1 = (RTYPE)__msa_hadd_s_w((v8i16)in1, (v8i16)in1); \
643
} while (0)
644
#define HADD_SH2_SW(...) HADD_SH2(v4i32, __VA_ARGS__)
645
646
#define HADD_SH4(RTYPE, in0, in1, in2, in3, out0, out1, out2, out3) do { \
647
HADD_SH2(RTYPE, in0, in1, out0, out1); \
648
HADD_SH2(RTYPE, in2, in3, out2, out3); \
649
} while (0)
650
#define HADD_SH4_SW(...) HADD_SH4(v4i32, __VA_ARGS__)
651
652
/* Description : Horizontal subtraction of unsigned byte vector elements
653
* Arguments : Inputs - in0, in1
654
* Outputs - out0, out1
655
* Return Type - as per RTYPE
656
* Details : Each unsigned odd byte element from 'in0' is subtracted from
657
* even unsigned byte element from 'in0' (pairwise) and the
658
* halfword result is written to 'out0'
659
*/
660
#define HSUB_UB2(RTYPE, in0, in1, out0, out1) do { \
661
out0 = (RTYPE)__msa_hsub_u_h((v16u8)in0, (v16u8)in0); \
662
out1 = (RTYPE)__msa_hsub_u_h((v16u8)in1, (v16u8)in1); \
663
} while (0)
664
#define HSUB_UB2_UH(...) HSUB_UB2(v8u16, __VA_ARGS__)
665
#define HSUB_UB2_SH(...) HSUB_UB2(v8i16, __VA_ARGS__)
666
#define HSUB_UB2_SW(...) HSUB_UB2(v4i32, __VA_ARGS__)
667
668
/* Description : Set element n input vector to GPR value
669
* Arguments : Inputs - in0, in1, in2, in3
670
* Output - out
671
* Return Type - as per RTYPE
672
* Details : Set element 0 in vector 'out' to value specified in 'in0'
673
*/
674
#define INSERT_W2(RTYPE, in0, in1, out) do { \
675
out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
676
out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
677
} while (0)
678
#define INSERT_W2_UB(...) INSERT_W2(v16u8, __VA_ARGS__)
679
#define INSERT_W2_SB(...) INSERT_W2(v16i8, __VA_ARGS__)
680
681
#define INSERT_W4(RTYPE, in0, in1, in2, in3, out) do { \
682
out = (RTYPE)__msa_insert_w((v4i32)out, 0, in0); \
683
out = (RTYPE)__msa_insert_w((v4i32)out, 1, in1); \
684
out = (RTYPE)__msa_insert_w((v4i32)out, 2, in2); \
685
out = (RTYPE)__msa_insert_w((v4i32)out, 3, in3); \
686
} while (0)
687
#define INSERT_W4_UB(...) INSERT_W4(v16u8, __VA_ARGS__)
688
#define INSERT_W4_SB(...) INSERT_W4(v16i8, __VA_ARGS__)
689
#define INSERT_W4_SW(...) INSERT_W4(v4i32, __VA_ARGS__)
690
691
/* Description : Set element n of double word input vector to GPR value
692
* Arguments : Inputs - in0, in1
693
* Output - out
694
* Return Type - as per RTYPE
695
* Details : Set element 0 in vector 'out' to GPR value specified in 'in0'
696
* Set element 1 in vector 'out' to GPR value specified in 'in1'
697
*/
698
#define INSERT_D2(RTYPE, in0, in1, out) do { \
699
out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \
700
out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \
701
} while (0)
702
#define INSERT_D2_UB(...) INSERT_D2(v16u8, __VA_ARGS__)
703
#define INSERT_D2_SB(...) INSERT_D2(v16i8, __VA_ARGS__)
704
705
/* Description : Interleave even byte elements from vectors
706
* Arguments : Inputs - in0, in1, in2, in3
707
* Outputs - out0, out1
708
* Return Type - as per RTYPE
709
* Details : Even byte elements of 'in0' and 'in1' are interleaved
710
* and written to 'out0'
711
*/
712
#define ILVEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
713
out0 = (RTYPE)__msa_ilvev_b((v16i8)in1, (v16i8)in0); \
714
out1 = (RTYPE)__msa_ilvev_b((v16i8)in3, (v16i8)in2); \
715
} while (0)
716
#define ILVEV_B2_UB(...) ILVEV_B2(v16u8, __VA_ARGS__)
717
#define ILVEV_B2_SB(...) ILVEV_B2(v16i8, __VA_ARGS__)
718
#define ILVEV_B2_UH(...) ILVEV_B2(v8u16, __VA_ARGS__)
719
#define ILVEV_B2_SH(...) ILVEV_B2(v8i16, __VA_ARGS__)
720
#define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
721
722
/* Description : Interleave odd byte elements from vectors
723
* Arguments : Inputs - in0, in1, in2, in3
724
* Outputs - out0, out1
725
* Return Type - as per RTYPE
726
* Details : Odd byte elements of 'in0' and 'in1' are interleaved
727
* and written to 'out0'
728
*/
729
#define ILVOD_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
730
out0 = (RTYPE)__msa_ilvod_b((v16i8)in1, (v16i8)in0); \
731
out1 = (RTYPE)__msa_ilvod_b((v16i8)in3, (v16i8)in2); \
732
} while (0)
733
#define ILVOD_B2_UB(...) ILVOD_B2(v16u8, __VA_ARGS__)
734
#define ILVOD_B2_SB(...) ILVOD_B2(v16i8, __VA_ARGS__)
735
#define ILVOD_B2_UH(...) ILVOD_B2(v8u16, __VA_ARGS__)
736
#define ILVOD_B2_SH(...) ILVOD_B2(v8i16, __VA_ARGS__)
737
#define ILVOD_B2_SD(...) ILVOD_B2(v2i64, __VA_ARGS__)
738
739
/* Description : Interleave even halfword elements from vectors
740
* Arguments : Inputs - in0, in1, in2, in3
741
* Outputs - out0, out1
742
* Return Type - as per RTYPE
743
* Details : Even halfword elements of 'in0' and 'in1' are interleaved
744
* and written to 'out0'
745
*/
746
#define ILVEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
747
out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
748
out1 = (RTYPE)__msa_ilvev_h((v8i16)in3, (v8i16)in2); \
749
} while (0)
750
#define ILVEV_H2_UB(...) ILVEV_H2(v16u8, __VA_ARGS__)
751
#define ILVEV_H2_UH(...) ILVEV_H2(v8u16, __VA_ARGS__)
752
#define ILVEV_H2_SH(...) ILVEV_H2(v8i16, __VA_ARGS__)
753
#define ILVEV_H2_SW(...) ILVEV_H2(v4i32, __VA_ARGS__)
754
755
/* Description : Interleave odd halfword elements from vectors
756
* Arguments : Inputs - in0, in1, in2, in3
757
* Outputs - out0, out1
758
* Return Type - as per RTYPE
759
* Details : Odd halfword elements of 'in0' and 'in1' are interleaved
760
* and written to 'out0'
761
*/
762
#define ILVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
763
out0 = (RTYPE)__msa_ilvod_h((v8i16)in1, (v8i16)in0); \
764
out1 = (RTYPE)__msa_ilvod_h((v8i16)in3, (v8i16)in2); \
765
} while (0)
766
#define ILVOD_H2_UB(...) ILVOD_H2(v16u8, __VA_ARGS__)
767
#define ILVOD_H2_UH(...) ILVOD_H2(v8u16, __VA_ARGS__)
768
#define ILVOD_H2_SH(...) ILVOD_H2(v8i16, __VA_ARGS__)
769
#define ILVOD_H2_SW(...) ILVOD_H2(v4i32, __VA_ARGS__)
770
771
/* Description : Interleave even word elements from vectors
772
* Arguments : Inputs - in0, in1, in2, in3
773
* Outputs - out0, out1
774
* Return Type - as per RTYPE
775
* Details : Even word elements of 'in0' and 'in1' are interleaved
776
* and written to 'out0'
777
*/
778
#define ILVEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
779
out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
780
out1 = (RTYPE)__msa_ilvev_w((v4i32)in3, (v4i32)in2); \
781
} while (0)
782
#define ILVEV_W2_UB(...) ILVEV_W2(v16u8, __VA_ARGS__)
783
#define ILVEV_W2_SB(...) ILVEV_W2(v16i8, __VA_ARGS__)
784
#define ILVEV_W2_UH(...) ILVEV_W2(v8u16, __VA_ARGS__)
785
#define ILVEV_W2_SD(...) ILVEV_W2(v2i64, __VA_ARGS__)
786
787
/* Description : Interleave even-odd word elements from vectors
788
* Arguments : Inputs - in0, in1, in2, in3
789
* Outputs - out0, out1
790
* Return Type - as per RTYPE
791
* Details : Even word elements of 'in0' and 'in1' are interleaved
792
* and written to 'out0'
793
* Odd word elements of 'in2' and 'in3' are interleaved
794
* and written to 'out1'
795
*/
796
#define ILVEVOD_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
797
out0 = (RTYPE)__msa_ilvev_w((v4i32)in1, (v4i32)in0); \
798
out1 = (RTYPE)__msa_ilvod_w((v4i32)in3, (v4i32)in2); \
799
} while (0)
800
#define ILVEVOD_W2_UB(...) ILVEVOD_W2(v16u8, __VA_ARGS__)
801
#define ILVEVOD_W2_UH(...) ILVEVOD_W2(v8u16, __VA_ARGS__)
802
#define ILVEVOD_W2_SH(...) ILVEVOD_W2(v8i16, __VA_ARGS__)
803
#define ILVEVOD_W2_SW(...) ILVEVOD_W2(v4i32, __VA_ARGS__)
804
805
/* Description : Interleave even-odd half-word elements from vectors
806
* Arguments : Inputs - in0, in1, in2, in3
807
* Outputs - out0, out1
808
* Return Type - as per RTYPE
809
* Details : Even half-word elements of 'in0' and 'in1' are interleaved
810
* and written to 'out0'
811
* Odd half-word elements of 'in2' and 'in3' are interleaved
812
* and written to 'out1'
813
*/
814
#define ILVEVOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
815
out0 = (RTYPE)__msa_ilvev_h((v8i16)in1, (v8i16)in0); \
816
out1 = (RTYPE)__msa_ilvod_h((v8i16)in3, (v8i16)in2); \
817
} while (0)
818
#define ILVEVOD_H2_UB(...) ILVEVOD_H2(v16u8, __VA_ARGS__)
819
#define ILVEVOD_H2_UH(...) ILVEVOD_H2(v8u16, __VA_ARGS__)
820
#define ILVEVOD_H2_SH(...) ILVEVOD_H2(v8i16, __VA_ARGS__)
821
#define ILVEVOD_H2_SW(...) ILVEVOD_H2(v4i32, __VA_ARGS__)
822
823
/* Description : Interleave even double word elements from vectors
824
* Arguments : Inputs - in0, in1, in2, in3
825
* Outputs - out0, out1
826
* Return Type - as per RTYPE
827
* Details : Even double word elements of 'in0' and 'in1' are interleaved
828
* and written to 'out0'
829
*/
830
#define ILVEV_D2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
831
out0 = (RTYPE)__msa_ilvev_d((v2i64)in1, (v2i64)in0); \
832
out1 = (RTYPE)__msa_ilvev_d((v2i64)in3, (v2i64)in2); \
833
} while (0)
834
#define ILVEV_D2_UB(...) ILVEV_D2(v16u8, __VA_ARGS__)
835
#define ILVEV_D2_SB(...) ILVEV_D2(v16i8, __VA_ARGS__)
836
#define ILVEV_D2_SW(...) ILVEV_D2(v4i32, __VA_ARGS__)
837
#define ILVEV_D2_SD(...) ILVEV_D2(v2i64, __VA_ARGS__)
838
839
/* Description : Interleave left half of byte elements from vectors
840
* Arguments : Inputs - in0, in1, in2, in3
841
* Outputs - out0, out1
842
* Return Type - as per RTYPE
843
* Details : Left half of byte elements of 'in0' and 'in1' are interleaved
844
* and written to 'out0'.
845
*/
846
#define ILVL_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
847
out0 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
848
out1 = (RTYPE)__msa_ilvl_b((v16i8)in2, (v16i8)in3); \
849
} while (0)
850
#define ILVL_B2_UB(...) ILVL_B2(v16u8, __VA_ARGS__)
851
#define ILVL_B2_SB(...) ILVL_B2(v16i8, __VA_ARGS__)
852
#define ILVL_B2_UH(...) ILVL_B2(v8u16, __VA_ARGS__)
853
#define ILVL_B2_SH(...) ILVL_B2(v8i16, __VA_ARGS__)
854
#define ILVL_B2_SW(...) ILVL_B2(v4i32, __VA_ARGS__)
855
856
/* Description : Interleave right half of byte elements from vectors
857
* Arguments : Inputs - in0, in1, in2, in3
858
* Outputs - out0, out1
859
* Return Type - as per RTYPE
860
* Details : Right half of byte elements of 'in0' and 'in1' are interleaved
861
* and written to out0.
862
*/
863
#define ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
864
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
865
out1 = (RTYPE)__msa_ilvr_b((v16i8)in2, (v16i8)in3); \
866
} while (0)
867
#define ILVR_B2_UB(...) ILVR_B2(v16u8, __VA_ARGS__)
868
#define ILVR_B2_SB(...) ILVR_B2(v16i8, __VA_ARGS__)
869
#define ILVR_B2_UH(...) ILVR_B2(v8u16, __VA_ARGS__)
870
#define ILVR_B2_SH(...) ILVR_B2(v8i16, __VA_ARGS__)
871
#define ILVR_B2_SW(...) ILVR_B2(v4i32, __VA_ARGS__)
872
873
#define ILVR_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
874
out0, out1, out2, out3) do { \
875
ILVR_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
876
ILVR_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
877
} while (0)
878
#define ILVR_B4_UB(...) ILVR_B4(v16u8, __VA_ARGS__)
879
#define ILVR_B4_SB(...) ILVR_B4(v16i8, __VA_ARGS__)
880
#define ILVR_B4_UH(...) ILVR_B4(v8u16, __VA_ARGS__)
881
#define ILVR_B4_SH(...) ILVR_B4(v8i16, __VA_ARGS__)
882
#define ILVR_B4_SW(...) ILVR_B4(v4i32, __VA_ARGS__)
883
884
/* Description : Interleave right half of halfword elements from vectors
885
* Arguments : Inputs - in0, in1, in2, in3
886
* Outputs - out0, out1
887
* Return Type - as per RTYPE
888
* Details : Right half of halfword elements of 'in0' and 'in1' are
889
* interleaved and written to 'out0'.
890
*/
891
#define ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
892
out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
893
out1 = (RTYPE)__msa_ilvr_h((v8i16)in2, (v8i16)in3); \
894
} while (0)
895
#define ILVR_H2_UB(...) ILVR_H2(v16u8, __VA_ARGS__)
896
#define ILVR_H2_SH(...) ILVR_H2(v8i16, __VA_ARGS__)
897
#define ILVR_H2_SW(...) ILVR_H2(v4i32, __VA_ARGS__)
898
899
#define ILVR_H4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
900
out0, out1, out2, out3) do { \
901
ILVR_H2(RTYPE, in0, in1, in2, in3, out0, out1); \
902
ILVR_H2(RTYPE, in4, in5, in6, in7, out2, out3); \
903
} while (0)
904
#define ILVR_H4_UB(...) ILVR_H4(v16u8, __VA_ARGS__)
905
#define ILVR_H4_SH(...) ILVR_H4(v8i16, __VA_ARGS__)
906
#define ILVR_H4_SW(...) ILVR_H4(v4i32, __VA_ARGS__)
907
908
/* Description : Interleave right half of double word elements from vectors
909
* Arguments : Inputs - in0, in1, in2, in3
910
* Outputs - out0, out1
911
* Return Type - as per RTYPE
912
* Details : Right half of double word elements of 'in0' and 'in1' are
913
* interleaved and written to 'out0'.
914
*/
915
#define ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
916
out0 = (RTYPE)__msa_ilvr_d((v2i64)in0, (v2i64)in1); \
917
out1 = (RTYPE)__msa_ilvr_d((v2i64)in2, (v2i64)in3); \
918
} while (0)
919
#define ILVR_D2_UB(...) ILVR_D2(v16u8, __VA_ARGS__)
920
#define ILVR_D2_SB(...) ILVR_D2(v16i8, __VA_ARGS__)
921
#define ILVR_D2_SH(...) ILVR_D2(v8i16, __VA_ARGS__)
922
923
#define ILVR_D4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
924
out0, out1, out2, out3) do { \
925
ILVR_D2(RTYPE, in0, in1, in2, in3, out0, out1); \
926
ILVR_D2(RTYPE, in4, in5, in6, in7, out2, out3); \
927
} while (0)
928
#define ILVR_D4_SB(...) ILVR_D4(v16i8, __VA_ARGS__)
929
#define ILVR_D4_UB(...) ILVR_D4(v16u8, __VA_ARGS__)
930
931
/* Description : Interleave both left and right half of input vectors
932
* Arguments : Inputs - in0, in1
933
* Outputs - out0, out1
934
* Return Type - as per RTYPE
935
* Details : Right half of byte elements from 'in0' and 'in1' are
936
* interleaved and written to 'out0'
937
*/
938
#define ILVRL_B2(RTYPE, in0, in1, out0, out1) do { \
939
out0 = (RTYPE)__msa_ilvr_b((v16i8)in0, (v16i8)in1); \
940
out1 = (RTYPE)__msa_ilvl_b((v16i8)in0, (v16i8)in1); \
941
} while (0)
942
#define ILVRL_B2_UB(...) ILVRL_B2(v16u8, __VA_ARGS__)
943
#define ILVRL_B2_SB(...) ILVRL_B2(v16i8, __VA_ARGS__)
944
#define ILVRL_B2_UH(...) ILVRL_B2(v8u16, __VA_ARGS__)
945
#define ILVRL_B2_SH(...) ILVRL_B2(v8i16, __VA_ARGS__)
946
#define ILVRL_B2_SW(...) ILVRL_B2(v4i32, __VA_ARGS__)
947
948
#define ILVRL_H2(RTYPE, in0, in1, out0, out1) do { \
949
out0 = (RTYPE)__msa_ilvr_h((v8i16)in0, (v8i16)in1); \
950
out1 = (RTYPE)__msa_ilvl_h((v8i16)in0, (v8i16)in1); \
951
} while (0)
952
#define ILVRL_H2_UB(...) ILVRL_H2(v16u8, __VA_ARGS__)
953
#define ILVRL_H2_SB(...) ILVRL_H2(v16i8, __VA_ARGS__)
954
#define ILVRL_H2_SH(...) ILVRL_H2(v8i16, __VA_ARGS__)
955
#define ILVRL_H2_SW(...) ILVRL_H2(v4i32, __VA_ARGS__)
956
#define ILVRL_H2_UW(...) ILVRL_H2(v4u32, __VA_ARGS__)
957
958
#define ILVRL_W2(RTYPE, in0, in1, out0, out1) do { \
959
out0 = (RTYPE)__msa_ilvr_w((v4i32)in0, (v4i32)in1); \
960
out1 = (RTYPE)__msa_ilvl_w((v4i32)in0, (v4i32)in1); \
961
} while (0)
962
#define ILVRL_W2_UB(...) ILVRL_W2(v16u8, __VA_ARGS__)
963
#define ILVRL_W2_SH(...) ILVRL_W2(v8i16, __VA_ARGS__)
964
#define ILVRL_W2_SW(...) ILVRL_W2(v4i32, __VA_ARGS__)
965
#define ILVRL_W2_UW(...) ILVRL_W2(v4u32, __VA_ARGS__)
966
967
/* Description : Pack even byte elements of vector pairs
968
* Arguments : Inputs - in0, in1, in2, in3
969
* Outputs - out0, out1
970
* Return Type - as per RTYPE
971
* Details : Even byte elements of 'in0' are copied to the left half of
972
* 'out0' & even byte elements of 'in1' are copied to the right
973
* half of 'out0'.
974
*/
975
#define PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
976
out0 = (RTYPE)__msa_pckev_b((v16i8)in0, (v16i8)in1); \
977
out1 = (RTYPE)__msa_pckev_b((v16i8)in2, (v16i8)in3); \
978
} while (0)
979
#define PCKEV_B2_SB(...) PCKEV_B2(v16i8, __VA_ARGS__)
980
#define PCKEV_B2_UB(...) PCKEV_B2(v16u8, __VA_ARGS__)
981
#define PCKEV_B2_SH(...) PCKEV_B2(v8i16, __VA_ARGS__)
982
#define PCKEV_B2_SW(...) PCKEV_B2(v4i32, __VA_ARGS__)
983
984
#define PCKEV_B4(RTYPE, in0, in1, in2, in3, in4, in5, in6, in7, \
985
out0, out1, out2, out3) do { \
986
PCKEV_B2(RTYPE, in0, in1, in2, in3, out0, out1); \
987
PCKEV_B2(RTYPE, in4, in5, in6, in7, out2, out3); \
988
} while (0)
989
#define PCKEV_B4_SB(...) PCKEV_B4(v16i8, __VA_ARGS__)
990
#define PCKEV_B4_UB(...) PCKEV_B4(v16u8, __VA_ARGS__)
991
#define PCKEV_B4_SH(...) PCKEV_B4(v8i16, __VA_ARGS__)
992
#define PCKEV_B4_SW(...) PCKEV_B4(v4i32, __VA_ARGS__)
993
994
/* Description : Pack even halfword elements of vector pairs
995
* Arguments : Inputs - in0, in1, in2, in3
996
* Outputs - out0, out1
997
* Return Type - as per RTYPE
998
* Details : Even halfword elements of 'in0' are copied to the left half of
999
* 'out0' & even halfword elements of 'in1' are copied to the
1000
* right half of 'out0'.
1001
*/
1002
#define PCKEV_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1003
out0 = (RTYPE)__msa_pckev_h((v8i16)in0, (v8i16)in1); \
1004
out1 = (RTYPE)__msa_pckev_h((v8i16)in2, (v8i16)in3); \
1005
} while (0)
1006
#define PCKEV_H2_UH(...) PCKEV_H2(v8u16, __VA_ARGS__)
1007
#define PCKEV_H2_SH(...) PCKEV_H2(v8i16, __VA_ARGS__)
1008
#define PCKEV_H2_SW(...) PCKEV_H2(v4i32, __VA_ARGS__)
1009
#define PCKEV_H2_UW(...) PCKEV_H2(v4u32, __VA_ARGS__)
1010
1011
/* Description : Pack even word elements of vector pairs
1012
* Arguments : Inputs - in0, in1, in2, in3
1013
* Outputs - out0, out1
1014
* Return Type - as per RTYPE
1015
* Details : Even word elements of 'in0' are copied to the left half of
1016
* 'out0' & even word elements of 'in1' are copied to the
1017
* right half of 'out0'.
1018
*/
1019
#define PCKEV_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1020
out0 = (RTYPE)__msa_pckev_w((v4i32)in0, (v4i32)in1); \
1021
out1 = (RTYPE)__msa_pckev_w((v4i32)in2, (v4i32)in3); \
1022
} while (0)
1023
#define PCKEV_W2_UH(...) PCKEV_W2(v8u16, __VA_ARGS__)
1024
#define PCKEV_W2_SH(...) PCKEV_W2(v8i16, __VA_ARGS__)
1025
#define PCKEV_W2_SW(...) PCKEV_W2(v4i32, __VA_ARGS__)
1026
#define PCKEV_W2_UW(...) PCKEV_W2(v4u32, __VA_ARGS__)
1027
1028
/* Description : Pack odd halfword elements of vector pairs
1029
* Arguments : Inputs - in0, in1, in2, in3
1030
* Outputs - out0, out1
1031
* Return Type - as per RTYPE
1032
* Details : Odd halfword elements of 'in0' are copied to the left half of
1033
* 'out0' & odd halfword elements of 'in1' are copied to the
1034
* right half of 'out0'.
1035
*/
1036
#define PCKOD_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1037
out0 = (RTYPE)__msa_pckod_h((v8i16)in0, (v8i16)in1); \
1038
out1 = (RTYPE)__msa_pckod_h((v8i16)in2, (v8i16)in3); \
1039
} while (0)
1040
#define PCKOD_H2_UH(...) PCKOD_H2(v8u16, __VA_ARGS__)
1041
#define PCKOD_H2_SH(...) PCKOD_H2(v8i16, __VA_ARGS__)
1042
#define PCKOD_H2_SW(...) PCKOD_H2(v4i32, __VA_ARGS__)
1043
#define PCKOD_H2_UW(...) PCKOD_H2(v4u32, __VA_ARGS__)
1044
1045
/* Description : Arithmetic immediate shift right all elements of word vector
1046
* Arguments : Inputs - in0, in1, shift
1047
* Outputs - in place operation
1048
* Return Type - as per input vector RTYPE
1049
* Details : Each element of vector 'in0' is right shifted by 'shift' and
1050
* the result is written in-place. 'shift' is a GP variable.
1051
*/
1052
#define SRAI_W2(RTYPE, in0, in1, shift_val) do { \
1053
in0 = (RTYPE)SRAI_W(in0, shift_val); \
1054
in1 = (RTYPE)SRAI_W(in1, shift_val); \
1055
} while (0)
1056
#define SRAI_W2_SW(...) SRAI_W2(v4i32, __VA_ARGS__)
1057
#define SRAI_W2_UW(...) SRAI_W2(v4u32, __VA_ARGS__)
1058
1059
#define SRAI_W4(RTYPE, in0, in1, in2, in3, shift_val) do { \
1060
SRAI_W2(RTYPE, in0, in1, shift_val); \
1061
SRAI_W2(RTYPE, in2, in3, shift_val); \
1062
} while (0)
1063
#define SRAI_W4_SW(...) SRAI_W4(v4i32, __VA_ARGS__)
1064
#define SRAI_W4_UW(...) SRAI_W4(v4u32, __VA_ARGS__)
1065
1066
/* Description : Arithmetic shift right all elements of half-word vector
1067
* Arguments : Inputs - in0, in1, shift
1068
* Outputs - in place operation
1069
* Return Type - as per input vector RTYPE
1070
* Details : Each element of vector 'in0' is right shifted by 'shift' and
1071
* the result is written in-place. 'shift' is a GP variable.
1072
*/
1073
#define SRAI_H2(RTYPE, in0, in1, shift_val) do { \
1074
in0 = (RTYPE)SRAI_H(in0, shift_val); \
1075
in1 = (RTYPE)SRAI_H(in1, shift_val); \
1076
} while (0)
1077
#define SRAI_H2_SH(...) SRAI_H2(v8i16, __VA_ARGS__)
1078
#define SRAI_H2_UH(...) SRAI_H2(v8u16, __VA_ARGS__)
1079
1080
/* Description : Arithmetic rounded shift right all elements of word vector
1081
* Arguments : Inputs - in0, in1, shift
1082
* Outputs - in place operation
1083
* Return Type - as per input vector RTYPE
1084
* Details : Each element of vector 'in0' is right shifted by 'shift' and
1085
* the result is written in-place. 'shift' is a GP variable.
1086
*/
1087
#define SRARI_W2(RTYPE, in0, in1, shift) do { \
1088
in0 = (RTYPE)__msa_srari_w((v4i32)in0, shift); \
1089
in1 = (RTYPE)__msa_srari_w((v4i32)in1, shift); \
1090
} while (0)
1091
#define SRARI_W2_SW(...) SRARI_W2(v4i32, __VA_ARGS__)
1092
1093
#define SRARI_W4(RTYPE, in0, in1, in2, in3, shift) do { \
1094
SRARI_W2(RTYPE, in0, in1, shift); \
1095
SRARI_W2(RTYPE, in2, in3, shift); \
1096
} while (0)
1097
#define SRARI_W4_SH(...) SRARI_W4(v8i16, __VA_ARGS__)
1098
#define SRARI_W4_UW(...) SRARI_W4(v4u32, __VA_ARGS__)
1099
#define SRARI_W4_SW(...) SRARI_W4(v4i32, __VA_ARGS__)
1100
1101
/* Description : Shift right arithmetic rounded double words
1102
* Arguments : Inputs - in0, in1, shift
1103
* Outputs - in place operation
1104
* Return Type - as per RTYPE
1105
* Details : Each element of vector 'in0' is shifted right arithmetically by
1106
* the number of bits in the corresponding element in the vector
1107
* 'shift'. The last discarded bit is added to shifted value for
1108
* rounding and the result is written in-place.
1109
* 'shift' is a vector.
1110
*/
1111
#define SRAR_D2(RTYPE, in0, in1, shift) do { \
1112
in0 = (RTYPE)__msa_srar_d((v2i64)in0, (v2i64)shift); \
1113
in1 = (RTYPE)__msa_srar_d((v2i64)in1, (v2i64)shift); \
1114
} while (0)
1115
#define SRAR_D2_SW(...) SRAR_D2(v4i32, __VA_ARGS__)
1116
#define SRAR_D2_SD(...) SRAR_D2(v2i64, __VA_ARGS__)
1117
#define SRAR_D2_UD(...) SRAR_D2(v2u64, __VA_ARGS__)
1118
1119
#define SRAR_D4(RTYPE, in0, in1, in2, in3, shift) do { \
1120
SRAR_D2(RTYPE, in0, in1, shift); \
1121
SRAR_D2(RTYPE, in2, in3, shift); \
1122
} while (0)
1123
#define SRAR_D4_SD(...) SRAR_D4(v2i64, __VA_ARGS__)
1124
#define SRAR_D4_UD(...) SRAR_D4(v2u64, __VA_ARGS__)
1125
1126
/* Description : Addition of 2 pairs of half-word vectors
1127
* Arguments : Inputs - in0, in1, in2, in3
1128
* Outputs - out0, out1
1129
* Details : Each element in 'in0' is added to 'in1' and result is written
1130
* to 'out0'.
1131
*/
1132
#define ADDVI_H2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1133
out0 = (RTYPE)ADDVI_H(in0, in1); \
1134
out1 = (RTYPE)ADDVI_H(in2, in3); \
1135
} while (0)
1136
#define ADDVI_H2_SH(...) ADDVI_H2(v8i16, __VA_ARGS__)
1137
#define ADDVI_H2_UH(...) ADDVI_H2(v8u16, __VA_ARGS__)
1138
1139
/* Description : Addition of 2 pairs of word vectors
1140
* Arguments : Inputs - in0, in1, in2, in3
1141
* Outputs - out0, out1
1142
* Details : Each element in 'in0' is added to 'in1' and result is written
1143
* to 'out0'.
1144
*/
1145
#define ADDVI_W2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1146
out0 = (RTYPE)ADDVI_W(in0, in1); \
1147
out1 = (RTYPE)ADDVI_W(in2, in3); \
1148
} while (0)
1149
#define ADDVI_W2_SW(...) ADDVI_W2(v4i32, __VA_ARGS__)
1150
1151
/* Description : Fill 2 pairs of word vectors with GP registers
1152
* Arguments : Inputs - in0, in1
1153
* Outputs - out0, out1
1154
* Details : GP register in0 is replicated in each word element of out0
1155
* GP register in1 is replicated in each word element of out1
1156
*/
1157
#define FILL_W2(RTYPE, in0, in1, out0, out1) do { \
1158
out0 = (RTYPE)__msa_fill_w(in0); \
1159
out1 = (RTYPE)__msa_fill_w(in1); \
1160
} while (0)
1161
#define FILL_W2_SW(...) FILL_W2(v4i32, __VA_ARGS__)
1162
1163
/* Description : Addition of 2 pairs of vectors
1164
* Arguments : Inputs - in0, in1, in2, in3
1165
* Outputs - out0, out1
1166
* Details : Each element in 'in0' is added to 'in1' and result is written
1167
* to 'out0'.
1168
*/
1169
#define ADD2(in0, in1, in2, in3, out0, out1) do { \
1170
out0 = in0 + in1; \
1171
out1 = in2 + in3; \
1172
} while (0)
1173
1174
#define ADD4(in0, in1, in2, in3, in4, in5, in6, in7, \
1175
out0, out1, out2, out3) do { \
1176
ADD2(in0, in1, in2, in3, out0, out1); \
1177
ADD2(in4, in5, in6, in7, out2, out3); \
1178
} while (0)
1179
1180
/* Description : Subtraction of 2 pairs of vectors
1181
* Arguments : Inputs - in0, in1, in2, in3
1182
* Outputs - out0, out1
1183
* Details : Each element in 'in1' is subtracted from 'in0' and result is
1184
* written to 'out0'.
1185
*/
1186
#define SUB2(in0, in1, in2, in3, out0, out1) do { \
1187
out0 = in0 - in1; \
1188
out1 = in2 - in3; \
1189
} while (0)
1190
1191
#define SUB3(in0, in1, in2, in3, in4, in5, out0, out1, out2) do { \
1192
out0 = in0 - in1; \
1193
out1 = in2 - in3; \
1194
out2 = in4 - in5; \
1195
} while (0)
1196
1197
#define SUB4(in0, in1, in2, in3, in4, in5, in6, in7, \
1198
out0, out1, out2, out3) do { \
1199
out0 = in0 - in1; \
1200
out1 = in2 - in3; \
1201
out2 = in4 - in5; \
1202
out3 = in6 - in7; \
1203
} while (0)
1204
1205
/* Description : Addition - Subtraction of input vectors
1206
* Arguments : Inputs - in0, in1
1207
* Outputs - out0, out1
1208
* Details : Each element in 'in1' is added to 'in0' and result is
1209
* written to 'out0'.
1210
* Each element in 'in1' is subtracted from 'in0' and result is
1211
* written to 'out1'.
1212
*/
1213
#define ADDSUB2(in0, in1, out0, out1) do { \
1214
out0 = in0 + in1; \
1215
out1 = in0 - in1; \
1216
} while (0)
1217
1218
/* Description : Multiplication of pairs of vectors
1219
* Arguments : Inputs - in0, in1, in2, in3
1220
* Outputs - out0, out1
1221
* Details : Each element from 'in0' is multiplied with elements from 'in1'
1222
* and the result is written to 'out0'
1223
*/
1224
#define MUL2(in0, in1, in2, in3, out0, out1) do { \
1225
out0 = in0 * in1; \
1226
out1 = in2 * in3; \
1227
} while (0)
1228
1229
#define MUL4(in0, in1, in2, in3, in4, in5, in6, in7, \
1230
out0, out1, out2, out3) do { \
1231
MUL2(in0, in1, in2, in3, out0, out1); \
1232
MUL2(in4, in5, in6, in7, out2, out3); \
1233
} while (0)
1234
1235
/* Description : Sign extend halfword elements from right half of the vector
1236
* Arguments : Input - in (halfword vector)
1237
* Output - out (sign extended word vector)
1238
* Return Type - signed word
1239
* Details : Sign bit of halfword elements from input vector 'in' is
1240
* extracted and interleaved with same vector 'in0' to generate
1241
* 4 word elements keeping sign intact
1242
*/
1243
#define UNPCK_R_SH_SW(in, out) do { \
1244
const v8i16 sign_m = __msa_clti_s_h((v8i16)in, 0); \
1245
out = (v4i32)__msa_ilvr_h(sign_m, (v8i16)in); \
1246
} while (0)
1247
1248
/* Description : Sign extend halfword elements from input vector and return
1249
* the result in pair of vectors
1250
* Arguments : Input - in (halfword vector)
1251
* Outputs - out0, out1 (sign extended word vectors)
1252
* Return Type - signed word
1253
* Details : Sign bit of halfword elements from input vector 'in' is
1254
* extracted and interleaved right with same vector 'in0' to
1255
* generate 4 signed word elements in 'out0'
1256
* Then interleaved left with same vector 'in0' to
1257
* generate 4 signed word elements in 'out1'
1258
*/
1259
#define UNPCK_SH_SW(in, out0, out1) do { \
1260
const v8i16 tmp_m = __msa_clti_s_h((v8i16)in, 0); \
1261
ILVRL_H2_SW(tmp_m, in, out0, out1); \
1262
} while (0)
1263
1264
/* Description : Butterfly of 4 input vectors
1265
* Arguments : Inputs - in0, in1, in2, in3
1266
* Outputs - out0, out1, out2, out3
1267
* Details : Butterfly operation
1268
*/
1269
#define BUTTERFLY_4(in0, in1, in2, in3, out0, out1, out2, out3) do { \
1270
out0 = in0 + in3; \
1271
out1 = in1 + in2; \
1272
out2 = in1 - in2; \
1273
out3 = in0 - in3; \
1274
} while (0)
1275
1276
/* Description : Transpose 16x4 block into 4x16 with byte elements in vectors
1277
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
1278
* in8, in9, in10, in11, in12, in13, in14, in15
1279
* Outputs - out0, out1, out2, out3
1280
* Return Type - unsigned byte
1281
*/
1282
#define TRANSPOSE16x4_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
1283
in8, in9, in10, in11, in12, in13, in14, in15, \
1284
out0, out1, out2, out3) do { \
1285
v2i64 tmp0_m, tmp1_m, tmp2_m, tmp3_m, tmp4_m, tmp5_m; \
1286
ILVEV_W2_SD(in0, in4, in8, in12, tmp2_m, tmp3_m); \
1287
ILVEV_W2_SD(in1, in5, in9, in13, tmp0_m, tmp1_m); \
1288
ILVEV_D2_UB(tmp2_m, tmp3_m, tmp0_m, tmp1_m, out1, out3); \
1289
ILVEV_W2_SD(in2, in6, in10, in14, tmp4_m, tmp5_m); \
1290
ILVEV_W2_SD(in3, in7, in11, in15, tmp0_m, tmp1_m); \
1291
ILVEV_D2_SD(tmp4_m, tmp5_m, tmp0_m, tmp1_m, tmp2_m, tmp3_m); \
1292
ILVEV_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
1293
ILVEVOD_H2_UB(tmp0_m, tmp1_m, tmp0_m, tmp1_m, out0, out2); \
1294
ILVOD_B2_SD(out1, out3, tmp2_m, tmp3_m, tmp0_m, tmp1_m); \
1295
ILVEVOD_H2_UB(tmp0_m, tmp1_m, tmp0_m, tmp1_m, out1, out3); \
1296
} while (0)
1297
1298
/* Description : Transpose 16x8 block into 8x16 with byte elements in vectors
1299
* Arguments : Inputs - in0, in1, in2, in3, in4, in5, in6, in7,
1300
* in8, in9, in10, in11, in12, in13, in14, in15
1301
* Outputs - out0, out1, out2, out3, out4, out5, out6, out7
1302
* Return Type - unsigned byte
1303
*/
1304
#define TRANSPOSE16x8_UB_UB(in0, in1, in2, in3, in4, in5, in6, in7, \
1305
in8, in9, in10, in11, in12, in13, in14, in15, \
1306
out0, out1, out2, out3, out4, out5, \
1307
out6, out7) do { \
1308
v8i16 tmp0_m, tmp1_m, tmp4_m, tmp5_m, tmp6_m, tmp7_m; \
1309
v4i32 tmp2_m, tmp3_m; \
1310
ILVEV_D2_UB(in0, in8, in1, in9, out7, out6); \
1311
ILVEV_D2_UB(in2, in10, in3, in11, out5, out4); \
1312
ILVEV_D2_UB(in4, in12, in5, in13, out3, out2); \
1313
ILVEV_D2_UB(in6, in14, in7, in15, out1, out0); \
1314
ILVEV_B2_SH(out7, out6, out5, out4, tmp0_m, tmp1_m); \
1315
ILVOD_B2_SH(out7, out6, out5, out4, tmp4_m, tmp5_m); \
1316
ILVEV_B2_UB(out3, out2, out1, out0, out5, out7); \
1317
ILVOD_B2_SH(out3, out2, out1, out0, tmp6_m, tmp7_m); \
1318
ILVEV_H2_SW(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
1319
ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out0, out4); \
1320
ILVOD_H2_SW(tmp0_m, tmp1_m, out5, out7, tmp2_m, tmp3_m); \
1321
ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out2, out6); \
1322
ILVEV_H2_SW(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
1323
ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out1, out5); \
1324
ILVOD_H2_SW(tmp4_m, tmp5_m, tmp6_m, tmp7_m, tmp2_m, tmp3_m); \
1325
ILVEVOD_W2_UB(tmp2_m, tmp3_m, tmp2_m, tmp3_m, out3, out7); \
1326
} while (0)
1327
1328
/* Description : Transpose 4x4 block with word elements in vectors
1329
* Arguments : Inputs - in0, in1, in2, in3
1330
* Outputs - out0, out1, out2, out3
1331
* Return Type - as per RTYPE
1332
*/
1333
#define TRANSPOSE4x4_W(RTYPE, in0, in1, in2, in3, \
1334
out0, out1, out2, out3) do { \
1335
v4i32 s0_m, s1_m, s2_m, s3_m; \
1336
ILVRL_W2_SW(in1, in0, s0_m, s1_m); \
1337
ILVRL_W2_SW(in3, in2, s2_m, s3_m); \
1338
out0 = (RTYPE)__msa_ilvr_d((v2i64)s2_m, (v2i64)s0_m); \
1339
out1 = (RTYPE)__msa_ilvl_d((v2i64)s2_m, (v2i64)s0_m); \
1340
out2 = (RTYPE)__msa_ilvr_d((v2i64)s3_m, (v2i64)s1_m); \
1341
out3 = (RTYPE)__msa_ilvl_d((v2i64)s3_m, (v2i64)s1_m); \
1342
} while (0)
1343
#define TRANSPOSE4x4_SW_SW(...) TRANSPOSE4x4_W(v4i32, __VA_ARGS__)
1344
1345
/* Description : Add block 4x4
1346
* Arguments : Inputs - in0, in1, in2, in3, pdst, stride
1347
* Details : Least significant 4 bytes from each input vector are added to
1348
* the destination bytes, clipped between 0-255 and stored.
1349
*/
1350
#define ADDBLK_ST4x4_UB(in0, in1, in2, in3, pdst, stride) do { \
1351
uint32_t src0_m, src1_m, src2_m, src3_m; \
1352
v8i16 inp0_m, inp1_m, res0_m, res1_m; \
1353
v16i8 dst0_m = { 0 }; \
1354
v16i8 dst1_m = { 0 }; \
1355
const v16i8 zero_m = { 0 }; \
1356
ILVR_D2_SH(in1, in0, in3, in2, inp0_m, inp1_m); \
1357
LW4(pdst, stride, src0_m, src1_m, src2_m, src3_m); \
1358
INSERT_W2_SB(src0_m, src1_m, dst0_m); \
1359
INSERT_W2_SB(src2_m, src3_m, dst1_m); \
1360
ILVR_B2_SH(zero_m, dst0_m, zero_m, dst1_m, res0_m, res1_m); \
1361
ADD2(res0_m, inp0_m, res1_m, inp1_m, res0_m, res1_m); \
1362
CLIP_SH2_0_255(res0_m, res1_m); \
1363
PCKEV_B2_SB(res0_m, res0_m, res1_m, res1_m, dst0_m, dst1_m); \
1364
ST4x4_UB(dst0_m, dst1_m, 0, 1, 0, 1, pdst, stride); \
1365
} while (0)
1366
1367
/* Description : Pack even byte elements, extract 0 & 2 index words from pair
1368
* of results and store 4 words in destination memory as per
1369
* stride
1370
* Arguments : Inputs - in0, in1, in2, in3, pdst, stride
1371
*/
1372
#define PCKEV_ST4x4_UB(in0, in1, in2, in3, pdst, stride) do { \
1373
v16i8 tmp0_m, tmp1_m; \
1374
PCKEV_B2_SB(in1, in0, in3, in2, tmp0_m, tmp1_m); \
1375
ST4x4_UB(tmp0_m, tmp1_m, 0, 2, 0, 2, pdst, stride); \
1376
} while (0)
1377
1378
/* Description : average with rounding (in0 + in1 + 1) / 2.
1379
* Arguments : Inputs - in0, in1, in2, in3,
1380
* Outputs - out0, out1
1381
* Return Type - as per RTYPE
1382
* Details : Each unsigned byte element from 'in0' vector is added with
1383
* each unsigned byte element from 'in1' vector. Then the average
1384
* with rounding is calculated and written to 'out0'
1385
*/
1386
#define AVER_UB2(RTYPE, in0, in1, in2, in3, out0, out1) do { \
1387
out0 = (RTYPE)__msa_aver_u_b((v16u8)in0, (v16u8)in1); \
1388
out1 = (RTYPE)__msa_aver_u_b((v16u8)in2, (v16u8)in3); \
1389
} while (0)
1390
#define AVER_UB2_UB(...) AVER_UB2(v16u8, __VA_ARGS__)
1391
1392
#endif /* WEBP_DSP_MSA_MACRO_H_ */
1393
1394