Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Tetragramm
GitHub Repository: Tetragramm/opencv
Path: blob/master/3rdparty/libwebp/src/dsp/lossless_neon.c
16348 views
1
// Copyright 2014 Google Inc. All Rights Reserved.
2
//
3
// Use of this source code is governed by a BSD-style license
4
// that can be found in the COPYING file in the root of the source
5
// tree. An additional intellectual property rights grant can be found
6
// in the file PATENTS. All contributing project authors may
7
// be found in the AUTHORS file in the root of the source tree.
8
// -----------------------------------------------------------------------------
9
//
10
// NEON variant of methods for lossless decoder
11
//
12
// Author: Skal ([email protected])
13
14
#include "src/dsp/dsp.h"
15
16
#if defined(WEBP_USE_NEON)
17
18
#include <arm_neon.h>
19
20
#include "src/dsp/lossless.h"
21
#include "src/dsp/neon.h"
22
23
//------------------------------------------------------------------------------
24
// Colorspace conversion functions
25
26
#if !defined(WORK_AROUND_GCC)
27
// gcc 4.6.0 had some trouble (NDK-r9) with this code. We only use it for
28
// gcc-4.8.x at least.
29
static void ConvertBGRAToRGBA_NEON(const uint32_t* src,
30
int num_pixels, uint8_t* dst) {
31
const uint32_t* const end = src + (num_pixels & ~15);
32
for (; src < end; src += 16) {
33
uint8x16x4_t pixel = vld4q_u8((uint8_t*)src);
34
// swap B and R. (VSWP d0,d2 has no intrinsics equivalent!)
35
const uint8x16_t tmp = pixel.val[0];
36
pixel.val[0] = pixel.val[2];
37
pixel.val[2] = tmp;
38
vst4q_u8(dst, pixel);
39
dst += 64;
40
}
41
VP8LConvertBGRAToRGBA_C(src, num_pixels & 15, dst); // left-overs
42
}
43
44
static void ConvertBGRAToBGR_NEON(const uint32_t* src,
45
int num_pixels, uint8_t* dst) {
46
const uint32_t* const end = src + (num_pixels & ~15);
47
for (; src < end; src += 16) {
48
const uint8x16x4_t pixel = vld4q_u8((uint8_t*)src);
49
const uint8x16x3_t tmp = { { pixel.val[0], pixel.val[1], pixel.val[2] } };
50
vst3q_u8(dst, tmp);
51
dst += 48;
52
}
53
VP8LConvertBGRAToBGR_C(src, num_pixels & 15, dst); // left-overs
54
}
55
56
static void ConvertBGRAToRGB_NEON(const uint32_t* src,
57
int num_pixels, uint8_t* dst) {
58
const uint32_t* const end = src + (num_pixels & ~15);
59
for (; src < end; src += 16) {
60
const uint8x16x4_t pixel = vld4q_u8((uint8_t*)src);
61
const uint8x16x3_t tmp = { { pixel.val[2], pixel.val[1], pixel.val[0] } };
62
vst3q_u8(dst, tmp);
63
dst += 48;
64
}
65
VP8LConvertBGRAToRGB_C(src, num_pixels & 15, dst); // left-overs
66
}
67
68
#else // WORK_AROUND_GCC
69
70
// gcc-4.6.0 fallback
71
72
static const uint8_t kRGBAShuffle[8] = { 2, 1, 0, 3, 6, 5, 4, 7 };
73
74
static void ConvertBGRAToRGBA_NEON(const uint32_t* src,
75
int num_pixels, uint8_t* dst) {
76
const uint32_t* const end = src + (num_pixels & ~1);
77
const uint8x8_t shuffle = vld1_u8(kRGBAShuffle);
78
for (; src < end; src += 2) {
79
const uint8x8_t pixels = vld1_u8((uint8_t*)src);
80
vst1_u8(dst, vtbl1_u8(pixels, shuffle));
81
dst += 8;
82
}
83
VP8LConvertBGRAToRGBA_C(src, num_pixels & 1, dst); // left-overs
84
}
85
86
static const uint8_t kBGRShuffle[3][8] = {
87
{ 0, 1, 2, 4, 5, 6, 8, 9 },
88
{ 10, 12, 13, 14, 16, 17, 18, 20 },
89
{ 21, 22, 24, 25, 26, 28, 29, 30 }
90
};
91
92
static void ConvertBGRAToBGR_NEON(const uint32_t* src,
93
int num_pixels, uint8_t* dst) {
94
const uint32_t* const end = src + (num_pixels & ~7);
95
const uint8x8_t shuffle0 = vld1_u8(kBGRShuffle[0]);
96
const uint8x8_t shuffle1 = vld1_u8(kBGRShuffle[1]);
97
const uint8x8_t shuffle2 = vld1_u8(kBGRShuffle[2]);
98
for (; src < end; src += 8) {
99
uint8x8x4_t pixels;
100
INIT_VECTOR4(pixels,
101
vld1_u8((const uint8_t*)(src + 0)),
102
vld1_u8((const uint8_t*)(src + 2)),
103
vld1_u8((const uint8_t*)(src + 4)),
104
vld1_u8((const uint8_t*)(src + 6)));
105
vst1_u8(dst + 0, vtbl4_u8(pixels, shuffle0));
106
vst1_u8(dst + 8, vtbl4_u8(pixels, shuffle1));
107
vst1_u8(dst + 16, vtbl4_u8(pixels, shuffle2));
108
dst += 8 * 3;
109
}
110
VP8LConvertBGRAToBGR_C(src, num_pixels & 7, dst); // left-overs
111
}
112
113
static const uint8_t kRGBShuffle[3][8] = {
114
{ 2, 1, 0, 6, 5, 4, 10, 9 },
115
{ 8, 14, 13, 12, 18, 17, 16, 22 },
116
{ 21, 20, 26, 25, 24, 30, 29, 28 }
117
};
118
119
static void ConvertBGRAToRGB_NEON(const uint32_t* src,
120
int num_pixels, uint8_t* dst) {
121
const uint32_t* const end = src + (num_pixels & ~7);
122
const uint8x8_t shuffle0 = vld1_u8(kRGBShuffle[0]);
123
const uint8x8_t shuffle1 = vld1_u8(kRGBShuffle[1]);
124
const uint8x8_t shuffle2 = vld1_u8(kRGBShuffle[2]);
125
for (; src < end; src += 8) {
126
uint8x8x4_t pixels;
127
INIT_VECTOR4(pixels,
128
vld1_u8((const uint8_t*)(src + 0)),
129
vld1_u8((const uint8_t*)(src + 2)),
130
vld1_u8((const uint8_t*)(src + 4)),
131
vld1_u8((const uint8_t*)(src + 6)));
132
vst1_u8(dst + 0, vtbl4_u8(pixels, shuffle0));
133
vst1_u8(dst + 8, vtbl4_u8(pixels, shuffle1));
134
vst1_u8(dst + 16, vtbl4_u8(pixels, shuffle2));
135
dst += 8 * 3;
136
}
137
VP8LConvertBGRAToRGB_C(src, num_pixels & 7, dst); // left-overs
138
}
139
140
#endif // !WORK_AROUND_GCC
141
142
//------------------------------------------------------------------------------
143
// Predictor Transform
144
145
#define LOAD_U32_AS_U8(IN) vreinterpret_u8_u32(vdup_n_u32((IN)))
146
#define LOAD_U32P_AS_U8(IN) vreinterpret_u8_u32(vld1_u32((IN)))
147
#define LOADQ_U32_AS_U8(IN) vreinterpretq_u8_u32(vdupq_n_u32((IN)))
148
#define LOADQ_U32P_AS_U8(IN) vreinterpretq_u8_u32(vld1q_u32((IN)))
149
#define GET_U8_AS_U32(IN) vget_lane_u32(vreinterpret_u32_u8((IN)), 0);
150
#define GETQ_U8_AS_U32(IN) vgetq_lane_u32(vreinterpretq_u32_u8((IN)), 0);
151
#define STOREQ_U8_AS_U32P(OUT, IN) vst1q_u32((OUT), vreinterpretq_u32_u8((IN)));
152
#define ROTATE32_LEFT(L) vextq_u8((L), (L), 12) // D|C|B|A -> C|B|A|D
153
154
static WEBP_INLINE uint8x8_t Average2_u8_NEON(uint32_t a0, uint32_t a1) {
155
const uint8x8_t A0 = LOAD_U32_AS_U8(a0);
156
const uint8x8_t A1 = LOAD_U32_AS_U8(a1);
157
return vhadd_u8(A0, A1);
158
}
159
160
static WEBP_INLINE uint32_t ClampedAddSubtractHalf_NEON(uint32_t c0,
161
uint32_t c1,
162
uint32_t c2) {
163
const uint8x8_t avg = Average2_u8_NEON(c0, c1);
164
// Remove one to c2 when bigger than avg.
165
const uint8x8_t C2 = LOAD_U32_AS_U8(c2);
166
const uint8x8_t cmp = vcgt_u8(C2, avg);
167
const uint8x8_t C2_1 = vadd_u8(C2, cmp);
168
// Compute half of the difference between avg and c2.
169
const int8x8_t diff_avg = vreinterpret_s8_u8(vhsub_u8(avg, C2_1));
170
// Compute the sum with avg and saturate.
171
const int16x8_t avg_16 = vreinterpretq_s16_u16(vmovl_u8(avg));
172
const uint8x8_t res = vqmovun_s16(vaddw_s8(avg_16, diff_avg));
173
const uint32_t output = GET_U8_AS_U32(res);
174
return output;
175
}
176
177
static WEBP_INLINE uint32_t Average2_NEON(uint32_t a0, uint32_t a1) {
178
const uint8x8_t avg_u8x8 = Average2_u8_NEON(a0, a1);
179
const uint32_t avg = GET_U8_AS_U32(avg_u8x8);
180
return avg;
181
}
182
183
static WEBP_INLINE uint32_t Average3_NEON(uint32_t a0, uint32_t a1,
184
uint32_t a2) {
185
const uint8x8_t avg0 = Average2_u8_NEON(a0, a2);
186
const uint8x8_t A1 = LOAD_U32_AS_U8(a1);
187
const uint32_t avg = GET_U8_AS_U32(vhadd_u8(avg0, A1));
188
return avg;
189
}
190
191
static uint32_t Predictor5_NEON(uint32_t left, const uint32_t* const top) {
192
return Average3_NEON(left, top[0], top[1]);
193
}
194
static uint32_t Predictor6_NEON(uint32_t left, const uint32_t* const top) {
195
return Average2_NEON(left, top[-1]);
196
}
197
static uint32_t Predictor7_NEON(uint32_t left, const uint32_t* const top) {
198
return Average2_NEON(left, top[0]);
199
}
200
static uint32_t Predictor13_NEON(uint32_t left, const uint32_t* const top) {
201
return ClampedAddSubtractHalf_NEON(left, top[0], top[-1]);
202
}
203
204
// Batch versions of those functions.
205
206
// Predictor0: ARGB_BLACK.
207
static void PredictorAdd0_NEON(const uint32_t* in, const uint32_t* upper,
208
int num_pixels, uint32_t* out) {
209
int i;
210
const uint8x16_t black = vreinterpretq_u8_u32(vdupq_n_u32(ARGB_BLACK));
211
for (i = 0; i + 4 <= num_pixels; i += 4) {
212
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
213
const uint8x16_t res = vaddq_u8(src, black);
214
STOREQ_U8_AS_U32P(&out[i], res);
215
}
216
VP8LPredictorsAdd_C[0](in + i, upper + i, num_pixels - i, out + i);
217
}
218
219
// Predictor1: left.
220
static void PredictorAdd1_NEON(const uint32_t* in, const uint32_t* upper,
221
int num_pixels, uint32_t* out) {
222
int i;
223
const uint8x16_t zero = LOADQ_U32_AS_U8(0);
224
for (i = 0; i + 4 <= num_pixels; i += 4) {
225
// a | b | c | d
226
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
227
// 0 | a | b | c
228
const uint8x16_t shift0 = vextq_u8(zero, src, 12);
229
// a | a + b | b + c | c + d
230
const uint8x16_t sum0 = vaddq_u8(src, shift0);
231
// 0 | 0 | a | a + b
232
const uint8x16_t shift1 = vextq_u8(zero, sum0, 8);
233
// a | a + b | a + b + c | a + b + c + d
234
const uint8x16_t sum1 = vaddq_u8(sum0, shift1);
235
const uint8x16_t prev = LOADQ_U32_AS_U8(out[i - 1]);
236
const uint8x16_t res = vaddq_u8(sum1, prev);
237
STOREQ_U8_AS_U32P(&out[i], res);
238
}
239
VP8LPredictorsAdd_C[1](in + i, upper + i, num_pixels - i, out + i);
240
}
241
242
// Macro that adds 32-bit integers from IN using mod 256 arithmetic
243
// per 8 bit channel.
244
#define GENERATE_PREDICTOR_1(X, IN) \
245
static void PredictorAdd##X##_NEON(const uint32_t* in, \
246
const uint32_t* upper, int num_pixels, \
247
uint32_t* out) { \
248
int i; \
249
for (i = 0; i + 4 <= num_pixels; i += 4) { \
250
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); \
251
const uint8x16_t other = LOADQ_U32P_AS_U8(&(IN)); \
252
const uint8x16_t res = vaddq_u8(src, other); \
253
STOREQ_U8_AS_U32P(&out[i], res); \
254
} \
255
VP8LPredictorsAdd_C[(X)](in + i, upper + i, num_pixels - i, out + i); \
256
}
257
// Predictor2: Top.
258
GENERATE_PREDICTOR_1(2, upper[i])
259
// Predictor3: Top-right.
260
GENERATE_PREDICTOR_1(3, upper[i + 1])
261
// Predictor4: Top-left.
262
GENERATE_PREDICTOR_1(4, upper[i - 1])
263
#undef GENERATE_PREDICTOR_1
264
265
// Predictor5: average(average(left, TR), T)
266
#define DO_PRED5(LANE) do { \
267
const uint8x16_t avgLTR = vhaddq_u8(L, TR); \
268
const uint8x16_t avg = vhaddq_u8(avgLTR, T); \
269
const uint8x16_t res = vaddq_u8(avg, src); \
270
vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \
271
L = ROTATE32_LEFT(res); \
272
} while (0)
273
274
static void PredictorAdd5_NEON(const uint32_t* in, const uint32_t* upper,
275
int num_pixels, uint32_t* out) {
276
int i;
277
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
278
for (i = 0; i + 4 <= num_pixels; i += 4) {
279
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
280
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i + 0]);
281
const uint8x16_t TR = LOADQ_U32P_AS_U8(&upper[i + 1]);
282
DO_PRED5(0);
283
DO_PRED5(1);
284
DO_PRED5(2);
285
DO_PRED5(3);
286
}
287
VP8LPredictorsAdd_C[5](in + i, upper + i, num_pixels - i, out + i);
288
}
289
#undef DO_PRED5
290
291
#define DO_PRED67(LANE) do { \
292
const uint8x16_t avg = vhaddq_u8(L, top); \
293
const uint8x16_t res = vaddq_u8(avg, src); \
294
vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \
295
L = ROTATE32_LEFT(res); \
296
} while (0)
297
298
// Predictor6: average(left, TL)
299
static void PredictorAdd6_NEON(const uint32_t* in, const uint32_t* upper,
300
int num_pixels, uint32_t* out) {
301
int i;
302
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
303
for (i = 0; i + 4 <= num_pixels; i += 4) {
304
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
305
const uint8x16_t top = LOADQ_U32P_AS_U8(&upper[i - 1]);
306
DO_PRED67(0);
307
DO_PRED67(1);
308
DO_PRED67(2);
309
DO_PRED67(3);
310
}
311
VP8LPredictorsAdd_C[6](in + i, upper + i, num_pixels - i, out + i);
312
}
313
314
// Predictor7: average(left, T)
315
static void PredictorAdd7_NEON(const uint32_t* in, const uint32_t* upper,
316
int num_pixels, uint32_t* out) {
317
int i;
318
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
319
for (i = 0; i + 4 <= num_pixels; i += 4) {
320
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
321
const uint8x16_t top = LOADQ_U32P_AS_U8(&upper[i]);
322
DO_PRED67(0);
323
DO_PRED67(1);
324
DO_PRED67(2);
325
DO_PRED67(3);
326
}
327
VP8LPredictorsAdd_C[7](in + i, upper + i, num_pixels - i, out + i);
328
}
329
#undef DO_PRED67
330
331
#define GENERATE_PREDICTOR_2(X, IN) \
332
static void PredictorAdd##X##_NEON(const uint32_t* in, \
333
const uint32_t* upper, int num_pixels, \
334
uint32_t* out) { \
335
int i; \
336
for (i = 0; i + 4 <= num_pixels; i += 4) { \
337
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]); \
338
const uint8x16_t Tother = LOADQ_U32P_AS_U8(&(IN)); \
339
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]); \
340
const uint8x16_t avg = vhaddq_u8(T, Tother); \
341
const uint8x16_t res = vaddq_u8(avg, src); \
342
STOREQ_U8_AS_U32P(&out[i], res); \
343
} \
344
VP8LPredictorsAdd_C[(X)](in + i, upper + i, num_pixels - i, out + i); \
345
}
346
// Predictor8: average TL T.
347
GENERATE_PREDICTOR_2(8, upper[i - 1])
348
// Predictor9: average T TR.
349
GENERATE_PREDICTOR_2(9, upper[i + 1])
350
#undef GENERATE_PREDICTOR_2
351
352
// Predictor10: average of (average of (L,TL), average of (T, TR)).
353
#define DO_PRED10(LANE) do { \
354
const uint8x16_t avgLTL = vhaddq_u8(L, TL); \
355
const uint8x16_t avg = vhaddq_u8(avgTTR, avgLTL); \
356
const uint8x16_t res = vaddq_u8(avg, src); \
357
vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \
358
L = ROTATE32_LEFT(res); \
359
} while (0)
360
361
static void PredictorAdd10_NEON(const uint32_t* in, const uint32_t* upper,
362
int num_pixels, uint32_t* out) {
363
int i;
364
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
365
for (i = 0; i + 4 <= num_pixels; i += 4) {
366
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
367
const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]);
368
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]);
369
const uint8x16_t TR = LOADQ_U32P_AS_U8(&upper[i + 1]);
370
const uint8x16_t avgTTR = vhaddq_u8(T, TR);
371
DO_PRED10(0);
372
DO_PRED10(1);
373
DO_PRED10(2);
374
DO_PRED10(3);
375
}
376
VP8LPredictorsAdd_C[10](in + i, upper + i, num_pixels - i, out + i);
377
}
378
#undef DO_PRED10
379
380
// Predictor11: select.
381
#define DO_PRED11(LANE) do { \
382
const uint8x16_t sumLin = vaddq_u8(L, src); /* in + L */ \
383
const uint8x16_t pLTL = vabdq_u8(L, TL); /* |L - TL| */ \
384
const uint16x8_t sum_LTL = vpaddlq_u8(pLTL); \
385
const uint32x4_t pa = vpaddlq_u16(sum_LTL); \
386
const uint32x4_t mask = vcleq_u32(pa, pb); \
387
const uint8x16_t res = vbslq_u8(vreinterpretq_u8_u32(mask), sumTin, sumLin); \
388
vst1q_lane_u32(&out[i + (LANE)], vreinterpretq_u32_u8(res), (LANE)); \
389
L = ROTATE32_LEFT(res); \
390
} while (0)
391
392
static void PredictorAdd11_NEON(const uint32_t* in, const uint32_t* upper,
393
int num_pixels, uint32_t* out) {
394
int i;
395
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
396
for (i = 0; i + 4 <= num_pixels; i += 4) {
397
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]);
398
const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]);
399
const uint8x16_t pTTL = vabdq_u8(T, TL); // |T - TL|
400
const uint16x8_t sum_TTL = vpaddlq_u8(pTTL);
401
const uint32x4_t pb = vpaddlq_u16(sum_TTL);
402
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
403
const uint8x16_t sumTin = vaddq_u8(T, src); // in + T
404
DO_PRED11(0);
405
DO_PRED11(1);
406
DO_PRED11(2);
407
DO_PRED11(3);
408
}
409
VP8LPredictorsAdd_C[11](in + i, upper + i, num_pixels - i, out + i);
410
}
411
#undef DO_PRED11
412
413
// Predictor12: ClampedAddSubtractFull.
414
#define DO_PRED12(DIFF, LANE) do { \
415
const uint8x8_t pred = \
416
vqmovun_s16(vaddq_s16(vreinterpretq_s16_u16(L), (DIFF))); \
417
const uint8x8_t res = \
418
vadd_u8(pred, (LANE <= 1) ? vget_low_u8(src) : vget_high_u8(src)); \
419
const uint16x8_t res16 = vmovl_u8(res); \
420
vst1_lane_u32(&out[i + (LANE)], vreinterpret_u32_u8(res), (LANE) & 1); \
421
/* rotate in the left predictor for next iteration */ \
422
L = vextq_u16(res16, res16, 4); \
423
} while (0)
424
425
static void PredictorAdd12_NEON(const uint32_t* in, const uint32_t* upper,
426
int num_pixels, uint32_t* out) {
427
int i;
428
uint16x8_t L = vmovl_u8(LOAD_U32_AS_U8(out[-1]));
429
for (i = 0; i + 4 <= num_pixels; i += 4) {
430
// load four pixels of source
431
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
432
// precompute the difference T - TL once for all, stored as s16
433
const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]);
434
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]);
435
const int16x8_t diff_lo =
436
vreinterpretq_s16_u16(vsubl_u8(vget_low_u8(T), vget_low_u8(TL)));
437
const int16x8_t diff_hi =
438
vreinterpretq_s16_u16(vsubl_u8(vget_high_u8(T), vget_high_u8(TL)));
439
// loop over the four reconstructed pixels
440
DO_PRED12(diff_lo, 0);
441
DO_PRED12(diff_lo, 1);
442
DO_PRED12(diff_hi, 2);
443
DO_PRED12(diff_hi, 3);
444
}
445
VP8LPredictorsAdd_C[12](in + i, upper + i, num_pixels - i, out + i);
446
}
447
#undef DO_PRED12
448
449
// Predictor13: ClampedAddSubtractHalf
450
#define DO_PRED13(LANE, LOW_OR_HI) do { \
451
const uint8x16_t avg = vhaddq_u8(L, T); \
452
const uint8x16_t cmp = vcgtq_u8(TL, avg); \
453
const uint8x16_t TL_1 = vaddq_u8(TL, cmp); \
454
/* Compute half of the difference between avg and TL'. */ \
455
const int8x8_t diff_avg = \
456
vreinterpret_s8_u8(LOW_OR_HI(vhsubq_u8(avg, TL_1))); \
457
/* Compute the sum with avg and saturate. */ \
458
const int16x8_t avg_16 = vreinterpretq_s16_u16(vmovl_u8(LOW_OR_HI(avg))); \
459
const uint8x8_t delta = vqmovun_s16(vaddw_s8(avg_16, diff_avg)); \
460
const uint8x8_t res = vadd_u8(LOW_OR_HI(src), delta); \
461
const uint8x16_t res2 = vcombine_u8(res, res); \
462
vst1_lane_u32(&out[i + (LANE)], vreinterpret_u32_u8(res), (LANE) & 1); \
463
L = ROTATE32_LEFT(res2); \
464
} while (0)
465
466
static void PredictorAdd13_NEON(const uint32_t* in, const uint32_t* upper,
467
int num_pixels, uint32_t* out) {
468
int i;
469
uint8x16_t L = LOADQ_U32_AS_U8(out[-1]);
470
for (i = 0; i + 4 <= num_pixels; i += 4) {
471
const uint8x16_t src = LOADQ_U32P_AS_U8(&in[i]);
472
const uint8x16_t T = LOADQ_U32P_AS_U8(&upper[i]);
473
const uint8x16_t TL = LOADQ_U32P_AS_U8(&upper[i - 1]);
474
DO_PRED13(0, vget_low_u8);
475
DO_PRED13(1, vget_low_u8);
476
DO_PRED13(2, vget_high_u8);
477
DO_PRED13(3, vget_high_u8);
478
}
479
VP8LPredictorsAdd_C[13](in + i, upper + i, num_pixels - i, out + i);
480
}
481
#undef DO_PRED13
482
483
#undef LOAD_U32_AS_U8
484
#undef LOAD_U32P_AS_U8
485
#undef LOADQ_U32_AS_U8
486
#undef LOADQ_U32P_AS_U8
487
#undef GET_U8_AS_U32
488
#undef GETQ_U8_AS_U32
489
#undef STOREQ_U8_AS_U32P
490
#undef ROTATE32_LEFT
491
492
//------------------------------------------------------------------------------
493
// Subtract-Green Transform
494
495
// vtbl?_u8 are marked unavailable for iOS arm64 with Xcode < 6.3, use
496
// non-standard versions there.
497
#if defined(__APPLE__) && defined(__aarch64__) && \
498
defined(__apple_build_version__) && (__apple_build_version__< 6020037)
499
#define USE_VTBLQ
500
#endif
501
502
#ifdef USE_VTBLQ
503
// 255 = byte will be zeroed
504
static const uint8_t kGreenShuffle[16] = {
505
1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, 13, 255, 13, 255
506
};
507
508
static WEBP_INLINE uint8x16_t DoGreenShuffle_NEON(const uint8x16_t argb,
509
const uint8x16_t shuffle) {
510
return vcombine_u8(vtbl1q_u8(argb, vget_low_u8(shuffle)),
511
vtbl1q_u8(argb, vget_high_u8(shuffle)));
512
}
513
#else // !USE_VTBLQ
514
// 255 = byte will be zeroed
515
static const uint8_t kGreenShuffle[8] = { 1, 255, 1, 255, 5, 255, 5, 255 };
516
517
static WEBP_INLINE uint8x16_t DoGreenShuffle_NEON(const uint8x16_t argb,
518
const uint8x8_t shuffle) {
519
return vcombine_u8(vtbl1_u8(vget_low_u8(argb), shuffle),
520
vtbl1_u8(vget_high_u8(argb), shuffle));
521
}
522
#endif // USE_VTBLQ
523
524
static void AddGreenToBlueAndRed_NEON(const uint32_t* src, int num_pixels,
525
uint32_t* dst) {
526
const uint32_t* const end = src + (num_pixels & ~3);
527
#ifdef USE_VTBLQ
528
const uint8x16_t shuffle = vld1q_u8(kGreenShuffle);
529
#else
530
const uint8x8_t shuffle = vld1_u8(kGreenShuffle);
531
#endif
532
for (; src < end; src += 4, dst += 4) {
533
const uint8x16_t argb = vld1q_u8((const uint8_t*)src);
534
const uint8x16_t greens = DoGreenShuffle_NEON(argb, shuffle);
535
vst1q_u8((uint8_t*)dst, vaddq_u8(argb, greens));
536
}
537
// fallthrough and finish off with plain-C
538
VP8LAddGreenToBlueAndRed_C(src, num_pixels & 3, dst);
539
}
540
541
//------------------------------------------------------------------------------
542
// Color Transform
543
544
static void TransformColorInverse_NEON(const VP8LMultipliers* const m,
545
const uint32_t* const src,
546
int num_pixels, uint32_t* dst) {
547
// sign-extended multiplying constants, pre-shifted by 6.
548
#define CST(X) (((int16_t)(m->X << 8)) >> 6)
549
const int16_t rb[8] = {
550
CST(green_to_blue_), CST(green_to_red_),
551
CST(green_to_blue_), CST(green_to_red_),
552
CST(green_to_blue_), CST(green_to_red_),
553
CST(green_to_blue_), CST(green_to_red_)
554
};
555
const int16x8_t mults_rb = vld1q_s16(rb);
556
const int16_t b2[8] = {
557
0, CST(red_to_blue_), 0, CST(red_to_blue_),
558
0, CST(red_to_blue_), 0, CST(red_to_blue_),
559
};
560
const int16x8_t mults_b2 = vld1q_s16(b2);
561
#undef CST
562
#ifdef USE_VTBLQ
563
static const uint8_t kg0g0[16] = {
564
255, 1, 255, 1, 255, 5, 255, 5, 255, 9, 255, 9, 255, 13, 255, 13
565
};
566
const uint8x16_t shuffle = vld1q_u8(kg0g0);
567
#else
568
static const uint8_t k0g0g[8] = { 255, 1, 255, 1, 255, 5, 255, 5 };
569
const uint8x8_t shuffle = vld1_u8(k0g0g);
570
#endif
571
const uint32x4_t mask_ag = vdupq_n_u32(0xff00ff00u);
572
int i;
573
for (i = 0; i + 4 <= num_pixels; i += 4) {
574
const uint8x16_t in = vld1q_u8((const uint8_t*)(src + i));
575
const uint32x4_t a0g0 = vandq_u32(vreinterpretq_u32_u8(in), mask_ag);
576
// 0 g 0 g
577
const uint8x16_t greens = DoGreenShuffle_NEON(in, shuffle);
578
// x dr x db1
579
const int16x8_t A = vqdmulhq_s16(vreinterpretq_s16_u8(greens), mults_rb);
580
// x r' x b'
581
const int8x16_t B = vaddq_s8(vreinterpretq_s8_u8(in),
582
vreinterpretq_s8_s16(A));
583
// r' 0 b' 0
584
const int16x8_t C = vshlq_n_s16(vreinterpretq_s16_s8(B), 8);
585
// x db2 0 0
586
const int16x8_t D = vqdmulhq_s16(C, mults_b2);
587
// 0 x db2 0
588
const uint32x4_t E = vshrq_n_u32(vreinterpretq_u32_s16(D), 8);
589
// r' x b'' 0
590
const int8x16_t F = vaddq_s8(vreinterpretq_s8_u32(E),
591
vreinterpretq_s8_s16(C));
592
// 0 r' 0 b''
593
const uint16x8_t G = vshrq_n_u16(vreinterpretq_u16_s8(F), 8);
594
const uint32x4_t out = vorrq_u32(vreinterpretq_u32_u16(G), a0g0);
595
vst1q_u32(dst + i, out);
596
}
597
// Fall-back to C-version for left-overs.
598
VP8LTransformColorInverse_C(m, src + i, num_pixels - i, dst + i);
599
}
600
601
#undef USE_VTBLQ
602
603
//------------------------------------------------------------------------------
604
// Entry point
605
606
extern void VP8LDspInitNEON(void);
607
608
WEBP_TSAN_IGNORE_FUNCTION void VP8LDspInitNEON(void) {
609
VP8LPredictors[5] = Predictor5_NEON;
610
VP8LPredictors[6] = Predictor6_NEON;
611
VP8LPredictors[7] = Predictor7_NEON;
612
VP8LPredictors[13] = Predictor13_NEON;
613
614
VP8LPredictorsAdd[0] = PredictorAdd0_NEON;
615
VP8LPredictorsAdd[1] = PredictorAdd1_NEON;
616
VP8LPredictorsAdd[2] = PredictorAdd2_NEON;
617
VP8LPredictorsAdd[3] = PredictorAdd3_NEON;
618
VP8LPredictorsAdd[4] = PredictorAdd4_NEON;
619
VP8LPredictorsAdd[5] = PredictorAdd5_NEON;
620
VP8LPredictorsAdd[6] = PredictorAdd6_NEON;
621
VP8LPredictorsAdd[7] = PredictorAdd7_NEON;
622
VP8LPredictorsAdd[8] = PredictorAdd8_NEON;
623
VP8LPredictorsAdd[9] = PredictorAdd9_NEON;
624
VP8LPredictorsAdd[10] = PredictorAdd10_NEON;
625
VP8LPredictorsAdd[11] = PredictorAdd11_NEON;
626
VP8LPredictorsAdd[12] = PredictorAdd12_NEON;
627
VP8LPredictorsAdd[13] = PredictorAdd13_NEON;
628
629
VP8LConvertBGRAToRGBA = ConvertBGRAToRGBA_NEON;
630
VP8LConvertBGRAToBGR = ConvertBGRAToBGR_NEON;
631
VP8LConvertBGRAToRGB = ConvertBGRAToRGB_NEON;
632
633
VP8LAddGreenToBlueAndRed = AddGreenToBlueAndRed_NEON;
634
VP8LTransformColorInverse = TransformColorInverse_NEON;
635
}
636
637
#else // !WEBP_USE_NEON
638
639
WEBP_DSP_INIT_STUB(VP8LDspInitNEON)
640
641
#endif // WEBP_USE_NEON
642
643