Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/libwebp/src/enc/picture_csp_enc.c
9917 views
1
// Copyright 2014 Google Inc. All Rights Reserved.
2
//
3
// Use of this source code is governed by a BSD-style license
4
// that can be found in the COPYING file in the root of the source
5
// tree. An additional intellectual property rights grant can be found
6
// in the file PATENTS. All contributing project authors may
7
// be found in the AUTHORS file in the root of the source tree.
8
// -----------------------------------------------------------------------------
9
//
10
// WebPPicture utils for colorspace conversion
11
//
12
// Author: Skal ([email protected])
13
14
#include <assert.h>
15
#include <stdlib.h>
16
#include <math.h>
17
18
#include "sharpyuv/sharpyuv.h"
19
#include "sharpyuv/sharpyuv_csp.h"
20
#include "src/enc/vp8i_enc.h"
21
#include "src/utils/random_utils.h"
22
#include "src/utils/utils.h"
23
#include "src/dsp/dsp.h"
24
#include "src/dsp/lossless.h"
25
#include "src/dsp/yuv.h"
26
#include "src/dsp/cpu.h"
27
28
#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
29
#include <pthread.h>
30
#endif
31
32
// Uncomment to disable gamma-compression during RGB->U/V averaging
33
#define USE_GAMMA_COMPRESSION
34
35
// If defined, use table to compute x / alpha.
36
#define USE_INVERSE_ALPHA_TABLE
37
38
#ifdef WORDS_BIGENDIAN
39
// uint32_t 0xff000000 is 0xff,00,00,00 in memory
40
#define CHANNEL_OFFSET(i) (i)
41
#else
42
// uint32_t 0xff000000 is 0x00,00,00,ff in memory
43
#define CHANNEL_OFFSET(i) (3-(i))
44
#endif
45
46
#define ALPHA_OFFSET CHANNEL_OFFSET(0)
47
48
//------------------------------------------------------------------------------
49
// Detection of non-trivial transparency
50
51
// Returns true if alpha[] has non-0xff values.
52
static int CheckNonOpaque(const uint8_t* alpha, int width, int height,
53
int x_step, int y_step) {
54
if (alpha == NULL) return 0;
55
WebPInitAlphaProcessing();
56
if (x_step == 1) {
57
for (; height-- > 0; alpha += y_step) {
58
if (WebPHasAlpha8b(alpha, width)) return 1;
59
}
60
} else {
61
for (; height-- > 0; alpha += y_step) {
62
if (WebPHasAlpha32b(alpha, width)) return 1;
63
}
64
}
65
return 0;
66
}
67
68
// Checking for the presence of non-opaque alpha.
69
int WebPPictureHasTransparency(const WebPPicture* picture) {
70
if (picture == NULL) return 0;
71
if (picture->use_argb) {
72
if (picture->argb != NULL) {
73
return CheckNonOpaque((const uint8_t*)picture->argb + ALPHA_OFFSET,
74
picture->width, picture->height,
75
4, picture->argb_stride * sizeof(*picture->argb));
76
}
77
return 0;
78
}
79
return CheckNonOpaque(picture->a, picture->width, picture->height,
80
1, picture->a_stride);
81
}
82
83
//------------------------------------------------------------------------------
84
// Code for gamma correction
85
86
#if defined(USE_GAMMA_COMPRESSION)
87
88
// Gamma correction compensates loss of resolution during chroma subsampling.
89
#define GAMMA_FIX 12 // fixed-point precision for linear values
90
#define GAMMA_TAB_FIX 7 // fixed-point fractional bits precision
91
#define GAMMA_TAB_SIZE (1 << (GAMMA_FIX - GAMMA_TAB_FIX))
92
static const double kGamma = 0.80;
93
static const int kGammaScale = ((1 << GAMMA_FIX) - 1);
94
static const int kGammaTabScale = (1 << GAMMA_TAB_FIX);
95
static const int kGammaTabRounder = (1 << GAMMA_TAB_FIX >> 1);
96
97
static int kLinearToGammaTab[GAMMA_TAB_SIZE + 1];
98
static uint16_t kGammaToLinearTab[256];
99
static volatile int kGammaTablesOk = 0;
100
static void InitGammaTables(void);
101
extern VP8CPUInfo VP8GetCPUInfo;
102
103
WEBP_DSP_INIT_FUNC(InitGammaTables) {
104
if (!kGammaTablesOk) {
105
int v;
106
const double scale = (double)(1 << GAMMA_TAB_FIX) / kGammaScale;
107
const double norm = 1. / 255.;
108
for (v = 0; v <= 255; ++v) {
109
kGammaToLinearTab[v] =
110
(uint16_t)(pow(norm * v, kGamma) * kGammaScale + .5);
111
}
112
for (v = 0; v <= GAMMA_TAB_SIZE; ++v) {
113
kLinearToGammaTab[v] = (int)(255. * pow(scale * v, 1. / kGamma) + .5);
114
}
115
kGammaTablesOk = 1;
116
}
117
}
118
119
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) {
120
return kGammaToLinearTab[v];
121
}
122
123
static WEBP_INLINE int Interpolate(int v) {
124
const int tab_pos = v >> (GAMMA_TAB_FIX + 2); // integer part
125
const int x = v & ((kGammaTabScale << 2) - 1); // fractional part
126
const int v0 = kLinearToGammaTab[tab_pos];
127
const int v1 = kLinearToGammaTab[tab_pos + 1];
128
const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate
129
assert(tab_pos + 1 < GAMMA_TAB_SIZE + 1);
130
return y;
131
}
132
133
// Convert a linear value 'v' to YUV_FIX+2 fixed-point precision
134
// U/V value, suitable for RGBToU/V calls.
135
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
136
const int y = Interpolate(base_value << shift); // final uplifted value
137
return (y + kGammaTabRounder) >> GAMMA_TAB_FIX; // descale
138
}
139
140
#else
141
142
static void InitGammaTables(void) {}
143
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) { return v; }
144
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
145
return (int)(base_value << shift);
146
}
147
148
#endif // USE_GAMMA_COMPRESSION
149
150
//------------------------------------------------------------------------------
151
// RGB -> YUV conversion
152
153
static int RGBToY(int r, int g, int b, VP8Random* const rg) {
154
return (rg == NULL) ? VP8RGBToY(r, g, b, YUV_HALF)
155
: VP8RGBToY(r, g, b, VP8RandomBits(rg, YUV_FIX));
156
}
157
158
static int RGBToU(int r, int g, int b, VP8Random* const rg) {
159
return (rg == NULL) ? VP8RGBToU(r, g, b, YUV_HALF << 2)
160
: VP8RGBToU(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
161
}
162
163
static int RGBToV(int r, int g, int b, VP8Random* const rg) {
164
return (rg == NULL) ? VP8RGBToV(r, g, b, YUV_HALF << 2)
165
: VP8RGBToV(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
166
}
167
168
//------------------------------------------------------------------------------
169
// Sharp RGB->YUV conversion
170
171
static const int kMinDimensionIterativeConversion = 4;
172
173
//------------------------------------------------------------------------------
174
// Main function
175
176
static int PreprocessARGB(const uint8_t* r_ptr,
177
const uint8_t* g_ptr,
178
const uint8_t* b_ptr,
179
int step, int rgb_stride,
180
WebPPicture* const picture) {
181
const int ok = SharpYuvConvert(
182
r_ptr, g_ptr, b_ptr, step, rgb_stride, /*rgb_bit_depth=*/8,
183
picture->y, picture->y_stride, picture->u, picture->uv_stride, picture->v,
184
picture->uv_stride, /*yuv_bit_depth=*/8, picture->width,
185
picture->height, SharpYuvGetConversionMatrix(kSharpYuvMatrixWebp));
186
if (!ok) {
187
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
188
}
189
return ok;
190
}
191
192
//------------------------------------------------------------------------------
193
// "Fast" regular RGB->YUV
194
195
#define SUM4(ptr, step) LinearToGamma( \
196
GammaToLinear((ptr)[0]) + \
197
GammaToLinear((ptr)[(step)]) + \
198
GammaToLinear((ptr)[rgb_stride]) + \
199
GammaToLinear((ptr)[rgb_stride + (step)]), 0) \
200
201
#define SUM2(ptr) \
202
LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[rgb_stride]), 1)
203
204
#define SUM2ALPHA(ptr) ((ptr)[0] + (ptr)[rgb_stride])
205
#define SUM4ALPHA(ptr) (SUM2ALPHA(ptr) + SUM2ALPHA((ptr) + 4))
206
207
#if defined(USE_INVERSE_ALPHA_TABLE)
208
209
static const int kAlphaFix = 19;
210
// Following table is (1 << kAlphaFix) / a. The (v * kInvAlpha[a]) >> kAlphaFix
211
// formula is then equal to v / a in most (99.6%) cases. Note that this table
212
// and constant are adjusted very tightly to fit 32b arithmetic.
213
// In particular, they use the fact that the operands for 'v / a' are actually
214
// derived as v = (a0.p0 + a1.p1 + a2.p2 + a3.p3) and a = a0 + a1 + a2 + a3
215
// with ai in [0..255] and pi in [0..1<<GAMMA_FIX). The constraint to avoid
216
// overflow is: GAMMA_FIX + kAlphaFix <= 31.
217
static const uint32_t kInvAlpha[4 * 0xff + 1] = {
218
0, /* alpha = 0 */
219
524288, 262144, 174762, 131072, 104857, 87381, 74898, 65536,
220
58254, 52428, 47662, 43690, 40329, 37449, 34952, 32768,
221
30840, 29127, 27594, 26214, 24966, 23831, 22795, 21845,
222
20971, 20164, 19418, 18724, 18078, 17476, 16912, 16384,
223
15887, 15420, 14979, 14563, 14169, 13797, 13443, 13107,
224
12787, 12483, 12192, 11915, 11650, 11397, 11155, 10922,
225
10699, 10485, 10280, 10082, 9892, 9709, 9532, 9362,
226
9198, 9039, 8886, 8738, 8594, 8456, 8322, 8192,
227
8065, 7943, 7825, 7710, 7598, 7489, 7384, 7281,
228
7182, 7084, 6990, 6898, 6808, 6721, 6636, 6553,
229
6472, 6393, 6316, 6241, 6168, 6096, 6026, 5957,
230
5890, 5825, 5761, 5698, 5637, 5577, 5518, 5461,
231
5405, 5349, 5295, 5242, 5190, 5140, 5090, 5041,
232
4993, 4946, 4899, 4854, 4809, 4766, 4723, 4681,
233
4639, 4599, 4559, 4519, 4481, 4443, 4405, 4369,
234
4332, 4297, 4262, 4228, 4194, 4161, 4128, 4096,
235
4064, 4032, 4002, 3971, 3942, 3912, 3883, 3855,
236
3826, 3799, 3771, 3744, 3718, 3692, 3666, 3640,
237
3615, 3591, 3566, 3542, 3518, 3495, 3472, 3449,
238
3426, 3404, 3382, 3360, 3339, 3318, 3297, 3276,
239
3256, 3236, 3216, 3196, 3177, 3158, 3139, 3120,
240
3102, 3084, 3066, 3048, 3030, 3013, 2995, 2978,
241
2962, 2945, 2928, 2912, 2896, 2880, 2864, 2849,
242
2833, 2818, 2803, 2788, 2774, 2759, 2744, 2730,
243
2716, 2702, 2688, 2674, 2661, 2647, 2634, 2621,
244
2608, 2595, 2582, 2570, 2557, 2545, 2532, 2520,
245
2508, 2496, 2484, 2473, 2461, 2449, 2438, 2427,
246
2416, 2404, 2394, 2383, 2372, 2361, 2351, 2340,
247
2330, 2319, 2309, 2299, 2289, 2279, 2269, 2259,
248
2250, 2240, 2231, 2221, 2212, 2202, 2193, 2184,
249
2175, 2166, 2157, 2148, 2139, 2131, 2122, 2114,
250
2105, 2097, 2088, 2080, 2072, 2064, 2056, 2048,
251
2040, 2032, 2024, 2016, 2008, 2001, 1993, 1985,
252
1978, 1971, 1963, 1956, 1949, 1941, 1934, 1927,
253
1920, 1913, 1906, 1899, 1892, 1885, 1879, 1872,
254
1865, 1859, 1852, 1846, 1839, 1833, 1826, 1820,
255
1814, 1807, 1801, 1795, 1789, 1783, 1777, 1771,
256
1765, 1759, 1753, 1747, 1741, 1736, 1730, 1724,
257
1718, 1713, 1707, 1702, 1696, 1691, 1685, 1680,
258
1675, 1669, 1664, 1659, 1653, 1648, 1643, 1638,
259
1633, 1628, 1623, 1618, 1613, 1608, 1603, 1598,
260
1593, 1588, 1583, 1579, 1574, 1569, 1565, 1560,
261
1555, 1551, 1546, 1542, 1537, 1533, 1528, 1524,
262
1519, 1515, 1510, 1506, 1502, 1497, 1493, 1489,
263
1485, 1481, 1476, 1472, 1468, 1464, 1460, 1456,
264
1452, 1448, 1444, 1440, 1436, 1432, 1428, 1424,
265
1420, 1416, 1413, 1409, 1405, 1401, 1398, 1394,
266
1390, 1387, 1383, 1379, 1376, 1372, 1368, 1365,
267
1361, 1358, 1354, 1351, 1347, 1344, 1340, 1337,
268
1334, 1330, 1327, 1323, 1320, 1317, 1314, 1310,
269
1307, 1304, 1300, 1297, 1294, 1291, 1288, 1285,
270
1281, 1278, 1275, 1272, 1269, 1266, 1263, 1260,
271
1257, 1254, 1251, 1248, 1245, 1242, 1239, 1236,
272
1233, 1230, 1227, 1224, 1222, 1219, 1216, 1213,
273
1210, 1208, 1205, 1202, 1199, 1197, 1194, 1191,
274
1188, 1186, 1183, 1180, 1178, 1175, 1172, 1170,
275
1167, 1165, 1162, 1159, 1157, 1154, 1152, 1149,
276
1147, 1144, 1142, 1139, 1137, 1134, 1132, 1129,
277
1127, 1125, 1122, 1120, 1117, 1115, 1113, 1110,
278
1108, 1106, 1103, 1101, 1099, 1096, 1094, 1092,
279
1089, 1087, 1085, 1083, 1081, 1078, 1076, 1074,
280
1072, 1069, 1067, 1065, 1063, 1061, 1059, 1057,
281
1054, 1052, 1050, 1048, 1046, 1044, 1042, 1040,
282
1038, 1036, 1034, 1032, 1030, 1028, 1026, 1024,
283
1022, 1020, 1018, 1016, 1014, 1012, 1010, 1008,
284
1006, 1004, 1002, 1000, 998, 996, 994, 992,
285
991, 989, 987, 985, 983, 981, 979, 978,
286
976, 974, 972, 970, 969, 967, 965, 963,
287
961, 960, 958, 956, 954, 953, 951, 949,
288
948, 946, 944, 942, 941, 939, 937, 936,
289
934, 932, 931, 929, 927, 926, 924, 923,
290
921, 919, 918, 916, 914, 913, 911, 910,
291
908, 907, 905, 903, 902, 900, 899, 897,
292
896, 894, 893, 891, 890, 888, 887, 885,
293
884, 882, 881, 879, 878, 876, 875, 873,
294
872, 870, 869, 868, 866, 865, 863, 862,
295
860, 859, 858, 856, 855, 853, 852, 851,
296
849, 848, 846, 845, 844, 842, 841, 840,
297
838, 837, 836, 834, 833, 832, 830, 829,
298
828, 826, 825, 824, 823, 821, 820, 819,
299
817, 816, 815, 814, 812, 811, 810, 809,
300
807, 806, 805, 804, 802, 801, 800, 799,
301
798, 796, 795, 794, 793, 791, 790, 789,
302
788, 787, 786, 784, 783, 782, 781, 780,
303
779, 777, 776, 775, 774, 773, 772, 771,
304
769, 768, 767, 766, 765, 764, 763, 762,
305
760, 759, 758, 757, 756, 755, 754, 753,
306
752, 751, 750, 748, 747, 746, 745, 744,
307
743, 742, 741, 740, 739, 738, 737, 736,
308
735, 734, 733, 732, 731, 730, 729, 728,
309
727, 726, 725, 724, 723, 722, 721, 720,
310
719, 718, 717, 716, 715, 714, 713, 712,
311
711, 710, 709, 708, 707, 706, 705, 704,
312
703, 702, 701, 700, 699, 699, 698, 697,
313
696, 695, 694, 693, 692, 691, 690, 689,
314
688, 688, 687, 686, 685, 684, 683, 682,
315
681, 680, 680, 679, 678, 677, 676, 675,
316
674, 673, 673, 672, 671, 670, 669, 668,
317
667, 667, 666, 665, 664, 663, 662, 661,
318
661, 660, 659, 658, 657, 657, 656, 655,
319
654, 653, 652, 652, 651, 650, 649, 648,
320
648, 647, 646, 645, 644, 644, 643, 642,
321
641, 640, 640, 639, 638, 637, 637, 636,
322
635, 634, 633, 633, 632, 631, 630, 630,
323
629, 628, 627, 627, 626, 625, 624, 624,
324
623, 622, 621, 621, 620, 619, 618, 618,
325
617, 616, 616, 615, 614, 613, 613, 612,
326
611, 611, 610, 609, 608, 608, 607, 606,
327
606, 605, 604, 604, 603, 602, 601, 601,
328
600, 599, 599, 598, 597, 597, 596, 595,
329
595, 594, 593, 593, 592, 591, 591, 590,
330
589, 589, 588, 587, 587, 586, 585, 585,
331
584, 583, 583, 582, 581, 581, 580, 579,
332
579, 578, 578, 577, 576, 576, 575, 574,
333
574, 573, 572, 572, 571, 571, 570, 569,
334
569, 568, 568, 567, 566, 566, 565, 564,
335
564, 563, 563, 562, 561, 561, 560, 560,
336
559, 558, 558, 557, 557, 556, 555, 555,
337
554, 554, 553, 553, 552, 551, 551, 550,
338
550, 549, 548, 548, 547, 547, 546, 546,
339
545, 544, 544, 543, 543, 542, 542, 541,
340
541, 540, 539, 539, 538, 538, 537, 537,
341
536, 536, 535, 534, 534, 533, 533, 532,
342
532, 531, 531, 530, 530, 529, 529, 528,
343
527, 527, 526, 526, 525, 525, 524, 524,
344
523, 523, 522, 522, 521, 521, 520, 520,
345
519, 519, 518, 518, 517, 517, 516, 516,
346
515, 515, 514, 514
347
};
348
349
// Note that LinearToGamma() expects the values to be premultiplied by 4,
350
// so we incorporate this factor 4 inside the DIVIDE_BY_ALPHA macro directly.
351
#define DIVIDE_BY_ALPHA(sum, a) (((sum) * kInvAlpha[(a)]) >> (kAlphaFix - 2))
352
353
#else
354
355
#define DIVIDE_BY_ALPHA(sum, a) (4 * (sum) / (a))
356
357
#endif // USE_INVERSE_ALPHA_TABLE
358
359
static WEBP_INLINE int LinearToGammaWeighted(const uint8_t* src,
360
const uint8_t* a_ptr,
361
uint32_t total_a, int step,
362
int rgb_stride) {
363
const uint32_t sum =
364
a_ptr[0] * GammaToLinear(src[0]) +
365
a_ptr[step] * GammaToLinear(src[step]) +
366
a_ptr[rgb_stride] * GammaToLinear(src[rgb_stride]) +
367
a_ptr[rgb_stride + step] * GammaToLinear(src[rgb_stride + step]);
368
assert(total_a > 0 && total_a <= 4 * 0xff);
369
#if defined(USE_INVERSE_ALPHA_TABLE)
370
assert((uint64_t)sum * kInvAlpha[total_a] < ((uint64_t)1 << 32));
371
#endif
372
return LinearToGamma(DIVIDE_BY_ALPHA(sum, total_a), 0);
373
}
374
375
static WEBP_INLINE void ConvertRowToY(const uint8_t* const r_ptr,
376
const uint8_t* const g_ptr,
377
const uint8_t* const b_ptr,
378
int step,
379
uint8_t* const dst_y,
380
int width,
381
VP8Random* const rg) {
382
int i, j;
383
for (i = 0, j = 0; i < width; i += 1, j += step) {
384
dst_y[i] = RGBToY(r_ptr[j], g_ptr[j], b_ptr[j], rg);
385
}
386
}
387
388
static WEBP_INLINE void AccumulateRGBA(const uint8_t* const r_ptr,
389
const uint8_t* const g_ptr,
390
const uint8_t* const b_ptr,
391
const uint8_t* const a_ptr,
392
int rgb_stride,
393
uint16_t* dst, int width) {
394
int i, j;
395
// we loop over 2x2 blocks and produce one R/G/B/A value for each.
396
for (i = 0, j = 0; i < (width >> 1); i += 1, j += 2 * 4, dst += 4) {
397
const uint32_t a = SUM4ALPHA(a_ptr + j);
398
int r, g, b;
399
if (a == 4 * 0xff || a == 0) {
400
r = SUM4(r_ptr + j, 4);
401
g = SUM4(g_ptr + j, 4);
402
b = SUM4(b_ptr + j, 4);
403
} else {
404
r = LinearToGammaWeighted(r_ptr + j, a_ptr + j, a, 4, rgb_stride);
405
g = LinearToGammaWeighted(g_ptr + j, a_ptr + j, a, 4, rgb_stride);
406
b = LinearToGammaWeighted(b_ptr + j, a_ptr + j, a, 4, rgb_stride);
407
}
408
dst[0] = r;
409
dst[1] = g;
410
dst[2] = b;
411
dst[3] = a;
412
}
413
if (width & 1) {
414
const uint32_t a = 2u * SUM2ALPHA(a_ptr + j);
415
int r, g, b;
416
if (a == 4 * 0xff || a == 0) {
417
r = SUM2(r_ptr + j);
418
g = SUM2(g_ptr + j);
419
b = SUM2(b_ptr + j);
420
} else {
421
r = LinearToGammaWeighted(r_ptr + j, a_ptr + j, a, 0, rgb_stride);
422
g = LinearToGammaWeighted(g_ptr + j, a_ptr + j, a, 0, rgb_stride);
423
b = LinearToGammaWeighted(b_ptr + j, a_ptr + j, a, 0, rgb_stride);
424
}
425
dst[0] = r;
426
dst[1] = g;
427
dst[2] = b;
428
dst[3] = a;
429
}
430
}
431
432
static WEBP_INLINE void AccumulateRGB(const uint8_t* const r_ptr,
433
const uint8_t* const g_ptr,
434
const uint8_t* const b_ptr,
435
int step, int rgb_stride,
436
uint16_t* dst, int width) {
437
int i, j;
438
for (i = 0, j = 0; i < (width >> 1); i += 1, j += 2 * step, dst += 4) {
439
dst[0] = SUM4(r_ptr + j, step);
440
dst[1] = SUM4(g_ptr + j, step);
441
dst[2] = SUM4(b_ptr + j, step);
442
// MemorySanitizer may raise false positives with data that passes through
443
// RGBA32PackedToPlanar_16b_SSE41() due to incorrect modeling of shuffles.
444
// See https://crbug.com/webp/573.
445
#ifdef WEBP_MSAN
446
dst[3] = 0;
447
#endif
448
}
449
if (width & 1) {
450
dst[0] = SUM2(r_ptr + j);
451
dst[1] = SUM2(g_ptr + j);
452
dst[2] = SUM2(b_ptr + j);
453
#ifdef WEBP_MSAN
454
dst[3] = 0;
455
#endif
456
}
457
}
458
459
static WEBP_INLINE void ConvertRowsToUV(const uint16_t* rgb,
460
uint8_t* const dst_u,
461
uint8_t* const dst_v,
462
int width,
463
VP8Random* const rg) {
464
int i;
465
for (i = 0; i < width; i += 1, rgb += 4) {
466
const int r = rgb[0], g = rgb[1], b = rgb[2];
467
dst_u[i] = RGBToU(r, g, b, rg);
468
dst_v[i] = RGBToV(r, g, b, rg);
469
}
470
}
471
472
extern void SharpYuvInit(VP8CPUInfo cpu_info_func);
473
474
static int ImportYUVAFromRGBA(const uint8_t* r_ptr,
475
const uint8_t* g_ptr,
476
const uint8_t* b_ptr,
477
const uint8_t* a_ptr,
478
int step, // bytes per pixel
479
int rgb_stride, // bytes per scanline
480
float dithering,
481
int use_iterative_conversion,
482
WebPPicture* const picture) {
483
int y;
484
const int width = picture->width;
485
const int height = picture->height;
486
const int has_alpha = CheckNonOpaque(a_ptr, width, height, step, rgb_stride);
487
const int is_rgb = (r_ptr < b_ptr); // otherwise it's bgr
488
489
picture->colorspace = has_alpha ? WEBP_YUV420A : WEBP_YUV420;
490
picture->use_argb = 0;
491
492
// disable smart conversion if source is too small (overkill).
493
if (width < kMinDimensionIterativeConversion ||
494
height < kMinDimensionIterativeConversion) {
495
use_iterative_conversion = 0;
496
}
497
498
if (!WebPPictureAllocYUVA(picture)) {
499
return 0;
500
}
501
if (has_alpha) {
502
assert(step == 4);
503
#if defined(USE_GAMMA_COMPRESSION) && defined(USE_INVERSE_ALPHA_TABLE)
504
assert(kAlphaFix + GAMMA_FIX <= 31);
505
#endif
506
}
507
508
if (use_iterative_conversion) {
509
SharpYuvInit(VP8GetCPUInfo);
510
if (!PreprocessARGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, picture)) {
511
return 0;
512
}
513
if (has_alpha) {
514
WebPExtractAlpha(a_ptr, rgb_stride, width, height,
515
picture->a, picture->a_stride);
516
}
517
} else {
518
const int uv_width = (width + 1) >> 1;
519
int use_dsp = (step == 3); // use special function in this case
520
// temporary storage for accumulated R/G/B values during conversion to U/V
521
uint16_t* const tmp_rgb =
522
(uint16_t*)WebPSafeMalloc(4 * uv_width, sizeof(*tmp_rgb));
523
uint8_t* dst_y = picture->y;
524
uint8_t* dst_u = picture->u;
525
uint8_t* dst_v = picture->v;
526
uint8_t* dst_a = picture->a;
527
528
VP8Random base_rg;
529
VP8Random* rg = NULL;
530
if (dithering > 0.) {
531
VP8InitRandom(&base_rg, dithering);
532
rg = &base_rg;
533
use_dsp = 0; // can't use dsp in this case
534
}
535
WebPInitConvertARGBToYUV();
536
InitGammaTables();
537
538
if (tmp_rgb == NULL) {
539
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
540
}
541
542
// Downsample Y/U/V planes, two rows at a time
543
for (y = 0; y < (height >> 1); ++y) {
544
int rows_have_alpha = has_alpha;
545
if (use_dsp) {
546
if (is_rgb) {
547
WebPConvertRGB24ToY(r_ptr, dst_y, width);
548
WebPConvertRGB24ToY(r_ptr + rgb_stride,
549
dst_y + picture->y_stride, width);
550
} else {
551
WebPConvertBGR24ToY(b_ptr, dst_y, width);
552
WebPConvertBGR24ToY(b_ptr + rgb_stride,
553
dst_y + picture->y_stride, width);
554
}
555
} else {
556
ConvertRowToY(r_ptr, g_ptr, b_ptr, step, dst_y, width, rg);
557
ConvertRowToY(r_ptr + rgb_stride,
558
g_ptr + rgb_stride,
559
b_ptr + rgb_stride, step,
560
dst_y + picture->y_stride, width, rg);
561
}
562
dst_y += 2 * picture->y_stride;
563
if (has_alpha) {
564
rows_have_alpha &= !WebPExtractAlpha(a_ptr, rgb_stride, width, 2,
565
dst_a, picture->a_stride);
566
dst_a += 2 * picture->a_stride;
567
}
568
// Collect averaged R/G/B(/A)
569
if (!rows_have_alpha) {
570
AccumulateRGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, tmp_rgb, width);
571
} else {
572
AccumulateRGBA(r_ptr, g_ptr, b_ptr, a_ptr, rgb_stride, tmp_rgb, width);
573
}
574
// Convert to U/V
575
if (rg == NULL) {
576
WebPConvertRGBA32ToUV(tmp_rgb, dst_u, dst_v, uv_width);
577
} else {
578
ConvertRowsToUV(tmp_rgb, dst_u, dst_v, uv_width, rg);
579
}
580
dst_u += picture->uv_stride;
581
dst_v += picture->uv_stride;
582
r_ptr += 2 * rgb_stride;
583
b_ptr += 2 * rgb_stride;
584
g_ptr += 2 * rgb_stride;
585
if (has_alpha) a_ptr += 2 * rgb_stride;
586
}
587
if (height & 1) { // extra last row
588
int row_has_alpha = has_alpha;
589
if (use_dsp) {
590
if (r_ptr < b_ptr) {
591
WebPConvertRGB24ToY(r_ptr, dst_y, width);
592
} else {
593
WebPConvertBGR24ToY(b_ptr, dst_y, width);
594
}
595
} else {
596
ConvertRowToY(r_ptr, g_ptr, b_ptr, step, dst_y, width, rg);
597
}
598
if (row_has_alpha) {
599
row_has_alpha &= !WebPExtractAlpha(a_ptr, 0, width, 1, dst_a, 0);
600
}
601
// Collect averaged R/G/B(/A)
602
if (!row_has_alpha) {
603
// Collect averaged R/G/B
604
AccumulateRGB(r_ptr, g_ptr, b_ptr, step, /* rgb_stride = */ 0,
605
tmp_rgb, width);
606
} else {
607
AccumulateRGBA(r_ptr, g_ptr, b_ptr, a_ptr, /* rgb_stride = */ 0,
608
tmp_rgb, width);
609
}
610
if (rg == NULL) {
611
WebPConvertRGBA32ToUV(tmp_rgb, dst_u, dst_v, uv_width);
612
} else {
613
ConvertRowsToUV(tmp_rgb, dst_u, dst_v, uv_width, rg);
614
}
615
}
616
WebPSafeFree(tmp_rgb);
617
}
618
return 1;
619
}
620
621
#undef SUM4
622
#undef SUM2
623
#undef SUM4ALPHA
624
#undef SUM2ALPHA
625
626
//------------------------------------------------------------------------------
627
// call for ARGB->YUVA conversion
628
629
static int PictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace,
630
float dithering, int use_iterative_conversion) {
631
if (picture == NULL) return 0;
632
if (picture->argb == NULL) {
633
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
634
} else if ((colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) {
635
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
636
} else {
637
const uint8_t* const argb = (const uint8_t*)picture->argb;
638
const uint8_t* const a = argb + CHANNEL_OFFSET(0);
639
const uint8_t* const r = argb + CHANNEL_OFFSET(1);
640
const uint8_t* const g = argb + CHANNEL_OFFSET(2);
641
const uint8_t* const b = argb + CHANNEL_OFFSET(3);
642
643
picture->colorspace = WEBP_YUV420;
644
return ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride,
645
dithering, use_iterative_conversion, picture);
646
}
647
}
648
649
int WebPPictureARGBToYUVADithered(WebPPicture* picture, WebPEncCSP colorspace,
650
float dithering) {
651
return PictureARGBToYUVA(picture, colorspace, dithering, 0);
652
}
653
654
int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
655
return PictureARGBToYUVA(picture, colorspace, 0.f, 0);
656
}
657
658
int WebPPictureSharpARGBToYUVA(WebPPicture* picture) {
659
return PictureARGBToYUVA(picture, WEBP_YUV420, 0.f, 1);
660
}
661
// for backward compatibility
662
int WebPPictureSmartARGBToYUVA(WebPPicture* picture) {
663
return WebPPictureSharpARGBToYUVA(picture);
664
}
665
666
//------------------------------------------------------------------------------
667
// call for YUVA -> ARGB conversion
668
669
int WebPPictureYUVAToARGB(WebPPicture* picture) {
670
if (picture == NULL) return 0;
671
if (picture->y == NULL || picture->u == NULL || picture->v == NULL) {
672
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
673
}
674
if ((picture->colorspace & WEBP_CSP_ALPHA_BIT) && picture->a == NULL) {
675
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
676
}
677
if ((picture->colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) {
678
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
679
}
680
// Allocate a new argb buffer (discarding the previous one).
681
if (!WebPPictureAllocARGB(picture)) return 0;
682
picture->use_argb = 1;
683
684
// Convert
685
{
686
int y;
687
const int width = picture->width;
688
const int height = picture->height;
689
const int argb_stride = 4 * picture->argb_stride;
690
uint8_t* dst = (uint8_t*)picture->argb;
691
const uint8_t* cur_u = picture->u, *cur_v = picture->v, *cur_y = picture->y;
692
WebPUpsampleLinePairFunc upsample =
693
WebPGetLinePairConverter(ALPHA_OFFSET > 0);
694
695
// First row, with replicated top samples.
696
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
697
cur_y += picture->y_stride;
698
dst += argb_stride;
699
// Center rows.
700
for (y = 1; y + 1 < height; y += 2) {
701
const uint8_t* const top_u = cur_u;
702
const uint8_t* const top_v = cur_v;
703
cur_u += picture->uv_stride;
704
cur_v += picture->uv_stride;
705
upsample(cur_y, cur_y + picture->y_stride, top_u, top_v, cur_u, cur_v,
706
dst, dst + argb_stride, width);
707
cur_y += 2 * picture->y_stride;
708
dst += 2 * argb_stride;
709
}
710
// Last row (if needed), with replicated bottom samples.
711
if (height > 1 && !(height & 1)) {
712
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
713
}
714
// Insert alpha values if needed, in replacement for the default 0xff ones.
715
if (picture->colorspace & WEBP_CSP_ALPHA_BIT) {
716
for (y = 0; y < height; ++y) {
717
uint32_t* const argb_dst = picture->argb + y * picture->argb_stride;
718
const uint8_t* const src = picture->a + y * picture->a_stride;
719
int x;
720
for (x = 0; x < width; ++x) {
721
argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | ((uint32_t)src[x] << 24);
722
}
723
}
724
}
725
}
726
return 1;
727
}
728
729
//------------------------------------------------------------------------------
730
// automatic import / conversion
731
732
static int Import(WebPPicture* const picture,
733
const uint8_t* rgb, int rgb_stride,
734
int step, int swap_rb, int import_alpha) {
735
int y;
736
// swap_rb -> b,g,r,a , !swap_rb -> r,g,b,a
737
const uint8_t* r_ptr = rgb + (swap_rb ? 2 : 0);
738
const uint8_t* g_ptr = rgb + 1;
739
const uint8_t* b_ptr = rgb + (swap_rb ? 0 : 2);
740
const int width = picture->width;
741
const int height = picture->height;
742
743
if (abs(rgb_stride) < (import_alpha ? 4 : 3) * width) return 0;
744
745
if (!picture->use_argb) {
746
const uint8_t* a_ptr = import_alpha ? rgb + 3 : NULL;
747
return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
748
0.f /* no dithering */, 0, picture);
749
}
750
if (!WebPPictureAlloc(picture)) return 0;
751
752
VP8LDspInit();
753
WebPInitAlphaProcessing();
754
755
if (import_alpha) {
756
// dst[] byte order is {a,r,g,b} for big-endian, {b,g,r,a} for little endian
757
uint32_t* dst = picture->argb;
758
const int do_copy = (ALPHA_OFFSET == 3) && swap_rb;
759
assert(step == 4);
760
if (do_copy) {
761
for (y = 0; y < height; ++y) {
762
memcpy(dst, rgb, width * 4);
763
rgb += rgb_stride;
764
dst += picture->argb_stride;
765
}
766
} else {
767
for (y = 0; y < height; ++y) {
768
#ifdef WORDS_BIGENDIAN
769
// BGRA or RGBA input order.
770
const uint8_t* a_ptr = rgb + 3;
771
WebPPackARGB(a_ptr, r_ptr, g_ptr, b_ptr, width, dst);
772
r_ptr += rgb_stride;
773
g_ptr += rgb_stride;
774
b_ptr += rgb_stride;
775
#else
776
// RGBA input order. Need to swap R and B.
777
VP8LConvertBGRAToRGBA((const uint32_t*)rgb, width, (uint8_t*)dst);
778
#endif
779
rgb += rgb_stride;
780
dst += picture->argb_stride;
781
}
782
}
783
} else {
784
uint32_t* dst = picture->argb;
785
assert(step >= 3);
786
for (y = 0; y < height; ++y) {
787
WebPPackRGB(r_ptr, g_ptr, b_ptr, width, step, dst);
788
r_ptr += rgb_stride;
789
g_ptr += rgb_stride;
790
b_ptr += rgb_stride;
791
dst += picture->argb_stride;
792
}
793
}
794
return 1;
795
}
796
797
// Public API
798
799
#if !defined(WEBP_REDUCE_CSP)
800
801
int WebPPictureImportBGR(WebPPicture* picture,
802
const uint8_t* bgr, int bgr_stride) {
803
return (picture != NULL && bgr != NULL)
804
? Import(picture, bgr, bgr_stride, 3, 1, 0)
805
: 0;
806
}
807
808
int WebPPictureImportBGRA(WebPPicture* picture,
809
const uint8_t* bgra, int bgra_stride) {
810
return (picture != NULL && bgra != NULL)
811
? Import(picture, bgra, bgra_stride, 4, 1, 1)
812
: 0;
813
}
814
815
816
int WebPPictureImportBGRX(WebPPicture* picture,
817
const uint8_t* bgrx, int bgrx_stride) {
818
return (picture != NULL && bgrx != NULL)
819
? Import(picture, bgrx, bgrx_stride, 4, 1, 0)
820
: 0;
821
}
822
823
#endif // WEBP_REDUCE_CSP
824
825
int WebPPictureImportRGB(WebPPicture* picture,
826
const uint8_t* rgb, int rgb_stride) {
827
return (picture != NULL && rgb != NULL)
828
? Import(picture, rgb, rgb_stride, 3, 0, 0)
829
: 0;
830
}
831
832
int WebPPictureImportRGBA(WebPPicture* picture,
833
const uint8_t* rgba, int rgba_stride) {
834
return (picture != NULL && rgba != NULL)
835
? Import(picture, rgba, rgba_stride, 4, 0, 1)
836
: 0;
837
}
838
839
int WebPPictureImportRGBX(WebPPicture* picture,
840
const uint8_t* rgbx, int rgbx_stride) {
841
return (picture != NULL && rgbx != NULL)
842
? Import(picture, rgbx, rgbx_stride, 4, 0, 0)
843
: 0;
844
}
845
846
//------------------------------------------------------------------------------
847
848