Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/libwebp/src/enc/picture_csp_enc.c
21869 views
1
// Copyright 2014 Google Inc. All Rights Reserved.
2
//
3
// Use of this source code is governed by a BSD-style license
4
// that can be found in the COPYING file in the root of the source
5
// tree. An additional intellectual property rights grant can be found
6
// in the file PATENTS. All contributing project authors may
7
// be found in the AUTHORS file in the root of the source tree.
8
// -----------------------------------------------------------------------------
9
//
10
// WebPPicture utils for colorspace conversion
11
//
12
// Author: Skal ([email protected])
13
14
#include <assert.h>
15
#include <math.h>
16
#include <stdlib.h>
17
#include <string.h>
18
19
#include "sharpyuv/sharpyuv.h"
20
#include "sharpyuv/sharpyuv_csp.h"
21
#include "src/dsp/cpu.h"
22
#include "src/dsp/dsp.h"
23
#include "src/dsp/lossless.h"
24
#include "src/dsp/yuv.h"
25
#include "src/enc/vp8i_enc.h"
26
#include "src/utils/random_utils.h"
27
#include "src/utils/utils.h"
28
#include "src/webp/encode.h"
29
#include "src/webp/types.h"
30
31
#if defined(WEBP_USE_THREAD) && !defined(_WIN32)
32
#include <pthread.h>
33
#endif
34
35
// Uncomment to disable gamma-compression during RGB->U/V averaging
36
#define USE_GAMMA_COMPRESSION
37
38
// If defined, use table to compute x / alpha.
39
#define USE_INVERSE_ALPHA_TABLE
40
41
#ifdef WORDS_BIGENDIAN
42
// uint32_t 0xff000000 is 0xff,00,00,00 in memory
43
#define CHANNEL_OFFSET(i) (i)
44
#else
45
// uint32_t 0xff000000 is 0x00,00,00,ff in memory
46
#define CHANNEL_OFFSET(i) (3-(i))
47
#endif
48
49
#define ALPHA_OFFSET CHANNEL_OFFSET(0)
50
51
//------------------------------------------------------------------------------
52
// Detection of non-trivial transparency
53
54
// Returns true if alpha[] has non-0xff values.
55
static int CheckNonOpaque(const uint8_t* alpha, int width, int height,
56
int x_step, int y_step) {
57
if (alpha == NULL) return 0;
58
WebPInitAlphaProcessing();
59
if (x_step == 1) {
60
for (; height-- > 0; alpha += y_step) {
61
if (WebPHasAlpha8b(alpha, width)) return 1;
62
}
63
} else {
64
for (; height-- > 0; alpha += y_step) {
65
if (WebPHasAlpha32b(alpha, width)) return 1;
66
}
67
}
68
return 0;
69
}
70
71
// Checking for the presence of non-opaque alpha.
72
int WebPPictureHasTransparency(const WebPPicture* picture) {
73
if (picture == NULL) return 0;
74
if (picture->use_argb) {
75
if (picture->argb != NULL) {
76
return CheckNonOpaque((const uint8_t*)picture->argb + ALPHA_OFFSET,
77
picture->width, picture->height,
78
4, picture->argb_stride * sizeof(*picture->argb));
79
}
80
return 0;
81
}
82
return CheckNonOpaque(picture->a, picture->width, picture->height,
83
1, picture->a_stride);
84
}
85
86
//------------------------------------------------------------------------------
87
// Code for gamma correction
88
89
#if defined(USE_GAMMA_COMPRESSION)
90
91
// Gamma correction compensates loss of resolution during chroma subsampling.
92
#define GAMMA_FIX 12 // fixed-point precision for linear values
93
#define GAMMA_TAB_FIX 7 // fixed-point fractional bits precision
94
#define GAMMA_TAB_SIZE (1 << (GAMMA_FIX - GAMMA_TAB_FIX))
95
static const double kGamma = 0.80;
96
static const int kGammaScale = ((1 << GAMMA_FIX) - 1);
97
static const int kGammaTabScale = (1 << GAMMA_TAB_FIX);
98
static const int kGammaTabRounder = (1 << GAMMA_TAB_FIX >> 1);
99
100
static int kLinearToGammaTab[GAMMA_TAB_SIZE + 1];
101
static uint16_t kGammaToLinearTab[256];
102
static volatile int kGammaTablesOk = 0;
103
static void InitGammaTables(void);
104
extern VP8CPUInfo VP8GetCPUInfo;
105
106
WEBP_DSP_INIT_FUNC(InitGammaTables) {
107
if (!kGammaTablesOk) {
108
int v;
109
const double scale = (double)(1 << GAMMA_TAB_FIX) / kGammaScale;
110
const double norm = 1. / 255.;
111
for (v = 0; v <= 255; ++v) {
112
kGammaToLinearTab[v] =
113
(uint16_t)(pow(norm * v, kGamma) * kGammaScale + .5);
114
}
115
for (v = 0; v <= GAMMA_TAB_SIZE; ++v) {
116
kLinearToGammaTab[v] = (int)(255. * pow(scale * v, 1. / kGamma) + .5);
117
}
118
kGammaTablesOk = 1;
119
}
120
}
121
122
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) {
123
return kGammaToLinearTab[v];
124
}
125
126
static WEBP_INLINE int Interpolate(int v) {
127
const int tab_pos = v >> (GAMMA_TAB_FIX + 2); // integer part
128
const int x = v & ((kGammaTabScale << 2) - 1); // fractional part
129
const int v0 = kLinearToGammaTab[tab_pos];
130
const int v1 = kLinearToGammaTab[tab_pos + 1];
131
const int y = v1 * x + v0 * ((kGammaTabScale << 2) - x); // interpolate
132
assert(tab_pos + 1 < GAMMA_TAB_SIZE + 1);
133
return y;
134
}
135
136
// Convert a linear value 'v' to YUV_FIX+2 fixed-point precision
137
// U/V value, suitable for RGBToU/V calls.
138
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
139
const int y = Interpolate(base_value << shift); // final uplifted value
140
return (y + kGammaTabRounder) >> GAMMA_TAB_FIX; // descale
141
}
142
143
#else
144
145
static void InitGammaTables(void) {}
146
static WEBP_INLINE uint32_t GammaToLinear(uint8_t v) { return v; }
147
static WEBP_INLINE int LinearToGamma(uint32_t base_value, int shift) {
148
return (int)(base_value << shift);
149
}
150
151
#endif // USE_GAMMA_COMPRESSION
152
153
//------------------------------------------------------------------------------
154
// RGB -> YUV conversion
155
156
static int RGBToY(int r, int g, int b, VP8Random* const rg) {
157
return (rg == NULL) ? VP8RGBToY(r, g, b, YUV_HALF)
158
: VP8RGBToY(r, g, b, VP8RandomBits(rg, YUV_FIX));
159
}
160
161
static int RGBToU(int r, int g, int b, VP8Random* const rg) {
162
return (rg == NULL) ? VP8RGBToU(r, g, b, YUV_HALF << 2)
163
: VP8RGBToU(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
164
}
165
166
static int RGBToV(int r, int g, int b, VP8Random* const rg) {
167
return (rg == NULL) ? VP8RGBToV(r, g, b, YUV_HALF << 2)
168
: VP8RGBToV(r, g, b, VP8RandomBits(rg, YUV_FIX + 2));
169
}
170
171
//------------------------------------------------------------------------------
172
// Sharp RGB->YUV conversion
173
174
static const int kMinDimensionIterativeConversion = 4;
175
176
//------------------------------------------------------------------------------
177
// Main function
178
179
static int PreprocessARGB(const uint8_t* r_ptr,
180
const uint8_t* g_ptr,
181
const uint8_t* b_ptr,
182
int step, int rgb_stride,
183
WebPPicture* const picture) {
184
const int ok = SharpYuvConvert(
185
r_ptr, g_ptr, b_ptr, step, rgb_stride, /*rgb_bit_depth=*/8,
186
picture->y, picture->y_stride, picture->u, picture->uv_stride, picture->v,
187
picture->uv_stride, /*yuv_bit_depth=*/8, picture->width,
188
picture->height, SharpYuvGetConversionMatrix(kSharpYuvMatrixWebp));
189
if (!ok) {
190
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
191
}
192
return ok;
193
}
194
195
//------------------------------------------------------------------------------
196
// "Fast" regular RGB->YUV
197
198
#define SUM4(ptr, step) LinearToGamma( \
199
GammaToLinear((ptr)[0]) + \
200
GammaToLinear((ptr)[(step)]) + \
201
GammaToLinear((ptr)[rgb_stride]) + \
202
GammaToLinear((ptr)[rgb_stride + (step)]), 0) \
203
204
#define SUM2(ptr) \
205
LinearToGamma(GammaToLinear((ptr)[0]) + GammaToLinear((ptr)[rgb_stride]), 1)
206
207
#define SUM2ALPHA(ptr) ((ptr)[0] + (ptr)[rgb_stride])
208
#define SUM4ALPHA(ptr) (SUM2ALPHA(ptr) + SUM2ALPHA((ptr) + 4))
209
210
#if defined(USE_INVERSE_ALPHA_TABLE)
211
212
static const int kAlphaFix = 19;
213
// Following table is (1 << kAlphaFix) / a. The (v * kInvAlpha[a]) >> kAlphaFix
214
// formula is then equal to v / a in most (99.6%) cases. Note that this table
215
// and constant are adjusted very tightly to fit 32b arithmetic.
216
// In particular, they use the fact that the operands for 'v / a' are actually
217
// derived as v = (a0.p0 + a1.p1 + a2.p2 + a3.p3) and a = a0 + a1 + a2 + a3
218
// with ai in [0..255] and pi in [0..1<<GAMMA_FIX). The constraint to avoid
219
// overflow is: GAMMA_FIX + kAlphaFix <= 31.
220
static const uint32_t kInvAlpha[4 * 0xff + 1] = {
221
0, /* alpha = 0 */
222
524288, 262144, 174762, 131072, 104857, 87381, 74898, 65536,
223
58254, 52428, 47662, 43690, 40329, 37449, 34952, 32768,
224
30840, 29127, 27594, 26214, 24966, 23831, 22795, 21845,
225
20971, 20164, 19418, 18724, 18078, 17476, 16912, 16384,
226
15887, 15420, 14979, 14563, 14169, 13797, 13443, 13107,
227
12787, 12483, 12192, 11915, 11650, 11397, 11155, 10922,
228
10699, 10485, 10280, 10082, 9892, 9709, 9532, 9362,
229
9198, 9039, 8886, 8738, 8594, 8456, 8322, 8192,
230
8065, 7943, 7825, 7710, 7598, 7489, 7384, 7281,
231
7182, 7084, 6990, 6898, 6808, 6721, 6636, 6553,
232
6472, 6393, 6316, 6241, 6168, 6096, 6026, 5957,
233
5890, 5825, 5761, 5698, 5637, 5577, 5518, 5461,
234
5405, 5349, 5295, 5242, 5190, 5140, 5090, 5041,
235
4993, 4946, 4899, 4854, 4809, 4766, 4723, 4681,
236
4639, 4599, 4559, 4519, 4481, 4443, 4405, 4369,
237
4332, 4297, 4262, 4228, 4194, 4161, 4128, 4096,
238
4064, 4032, 4002, 3971, 3942, 3912, 3883, 3855,
239
3826, 3799, 3771, 3744, 3718, 3692, 3666, 3640,
240
3615, 3591, 3566, 3542, 3518, 3495, 3472, 3449,
241
3426, 3404, 3382, 3360, 3339, 3318, 3297, 3276,
242
3256, 3236, 3216, 3196, 3177, 3158, 3139, 3120,
243
3102, 3084, 3066, 3048, 3030, 3013, 2995, 2978,
244
2962, 2945, 2928, 2912, 2896, 2880, 2864, 2849,
245
2833, 2818, 2803, 2788, 2774, 2759, 2744, 2730,
246
2716, 2702, 2688, 2674, 2661, 2647, 2634, 2621,
247
2608, 2595, 2582, 2570, 2557, 2545, 2532, 2520,
248
2508, 2496, 2484, 2473, 2461, 2449, 2438, 2427,
249
2416, 2404, 2394, 2383, 2372, 2361, 2351, 2340,
250
2330, 2319, 2309, 2299, 2289, 2279, 2269, 2259,
251
2250, 2240, 2231, 2221, 2212, 2202, 2193, 2184,
252
2175, 2166, 2157, 2148, 2139, 2131, 2122, 2114,
253
2105, 2097, 2088, 2080, 2072, 2064, 2056, 2048,
254
2040, 2032, 2024, 2016, 2008, 2001, 1993, 1985,
255
1978, 1971, 1963, 1956, 1949, 1941, 1934, 1927,
256
1920, 1913, 1906, 1899, 1892, 1885, 1879, 1872,
257
1865, 1859, 1852, 1846, 1839, 1833, 1826, 1820,
258
1814, 1807, 1801, 1795, 1789, 1783, 1777, 1771,
259
1765, 1759, 1753, 1747, 1741, 1736, 1730, 1724,
260
1718, 1713, 1707, 1702, 1696, 1691, 1685, 1680,
261
1675, 1669, 1664, 1659, 1653, 1648, 1643, 1638,
262
1633, 1628, 1623, 1618, 1613, 1608, 1603, 1598,
263
1593, 1588, 1583, 1579, 1574, 1569, 1565, 1560,
264
1555, 1551, 1546, 1542, 1537, 1533, 1528, 1524,
265
1519, 1515, 1510, 1506, 1502, 1497, 1493, 1489,
266
1485, 1481, 1476, 1472, 1468, 1464, 1460, 1456,
267
1452, 1448, 1444, 1440, 1436, 1432, 1428, 1424,
268
1420, 1416, 1413, 1409, 1405, 1401, 1398, 1394,
269
1390, 1387, 1383, 1379, 1376, 1372, 1368, 1365,
270
1361, 1358, 1354, 1351, 1347, 1344, 1340, 1337,
271
1334, 1330, 1327, 1323, 1320, 1317, 1314, 1310,
272
1307, 1304, 1300, 1297, 1294, 1291, 1288, 1285,
273
1281, 1278, 1275, 1272, 1269, 1266, 1263, 1260,
274
1257, 1254, 1251, 1248, 1245, 1242, 1239, 1236,
275
1233, 1230, 1227, 1224, 1222, 1219, 1216, 1213,
276
1210, 1208, 1205, 1202, 1199, 1197, 1194, 1191,
277
1188, 1186, 1183, 1180, 1178, 1175, 1172, 1170,
278
1167, 1165, 1162, 1159, 1157, 1154, 1152, 1149,
279
1147, 1144, 1142, 1139, 1137, 1134, 1132, 1129,
280
1127, 1125, 1122, 1120, 1117, 1115, 1113, 1110,
281
1108, 1106, 1103, 1101, 1099, 1096, 1094, 1092,
282
1089, 1087, 1085, 1083, 1081, 1078, 1076, 1074,
283
1072, 1069, 1067, 1065, 1063, 1061, 1059, 1057,
284
1054, 1052, 1050, 1048, 1046, 1044, 1042, 1040,
285
1038, 1036, 1034, 1032, 1030, 1028, 1026, 1024,
286
1022, 1020, 1018, 1016, 1014, 1012, 1010, 1008,
287
1006, 1004, 1002, 1000, 998, 996, 994, 992,
288
991, 989, 987, 985, 983, 981, 979, 978,
289
976, 974, 972, 970, 969, 967, 965, 963,
290
961, 960, 958, 956, 954, 953, 951, 949,
291
948, 946, 944, 942, 941, 939, 937, 936,
292
934, 932, 931, 929, 927, 926, 924, 923,
293
921, 919, 918, 916, 914, 913, 911, 910,
294
908, 907, 905, 903, 902, 900, 899, 897,
295
896, 894, 893, 891, 890, 888, 887, 885,
296
884, 882, 881, 879, 878, 876, 875, 873,
297
872, 870, 869, 868, 866, 865, 863, 862,
298
860, 859, 858, 856, 855, 853, 852, 851,
299
849, 848, 846, 845, 844, 842, 841, 840,
300
838, 837, 836, 834, 833, 832, 830, 829,
301
828, 826, 825, 824, 823, 821, 820, 819,
302
817, 816, 815, 814, 812, 811, 810, 809,
303
807, 806, 805, 804, 802, 801, 800, 799,
304
798, 796, 795, 794, 793, 791, 790, 789,
305
788, 787, 786, 784, 783, 782, 781, 780,
306
779, 777, 776, 775, 774, 773, 772, 771,
307
769, 768, 767, 766, 765, 764, 763, 762,
308
760, 759, 758, 757, 756, 755, 754, 753,
309
752, 751, 750, 748, 747, 746, 745, 744,
310
743, 742, 741, 740, 739, 738, 737, 736,
311
735, 734, 733, 732, 731, 730, 729, 728,
312
727, 726, 725, 724, 723, 722, 721, 720,
313
719, 718, 717, 716, 715, 714, 713, 712,
314
711, 710, 709, 708, 707, 706, 705, 704,
315
703, 702, 701, 700, 699, 699, 698, 697,
316
696, 695, 694, 693, 692, 691, 690, 689,
317
688, 688, 687, 686, 685, 684, 683, 682,
318
681, 680, 680, 679, 678, 677, 676, 675,
319
674, 673, 673, 672, 671, 670, 669, 668,
320
667, 667, 666, 665, 664, 663, 662, 661,
321
661, 660, 659, 658, 657, 657, 656, 655,
322
654, 653, 652, 652, 651, 650, 649, 648,
323
648, 647, 646, 645, 644, 644, 643, 642,
324
641, 640, 640, 639, 638, 637, 637, 636,
325
635, 634, 633, 633, 632, 631, 630, 630,
326
629, 628, 627, 627, 626, 625, 624, 624,
327
623, 622, 621, 621, 620, 619, 618, 618,
328
617, 616, 616, 615, 614, 613, 613, 612,
329
611, 611, 610, 609, 608, 608, 607, 606,
330
606, 605, 604, 604, 603, 602, 601, 601,
331
600, 599, 599, 598, 597, 597, 596, 595,
332
595, 594, 593, 593, 592, 591, 591, 590,
333
589, 589, 588, 587, 587, 586, 585, 585,
334
584, 583, 583, 582, 581, 581, 580, 579,
335
579, 578, 578, 577, 576, 576, 575, 574,
336
574, 573, 572, 572, 571, 571, 570, 569,
337
569, 568, 568, 567, 566, 566, 565, 564,
338
564, 563, 563, 562, 561, 561, 560, 560,
339
559, 558, 558, 557, 557, 556, 555, 555,
340
554, 554, 553, 553, 552, 551, 551, 550,
341
550, 549, 548, 548, 547, 547, 546, 546,
342
545, 544, 544, 543, 543, 542, 542, 541,
343
541, 540, 539, 539, 538, 538, 537, 537,
344
536, 536, 535, 534, 534, 533, 533, 532,
345
532, 531, 531, 530, 530, 529, 529, 528,
346
527, 527, 526, 526, 525, 525, 524, 524,
347
523, 523, 522, 522, 521, 521, 520, 520,
348
519, 519, 518, 518, 517, 517, 516, 516,
349
515, 515, 514, 514
350
};
351
352
// Note that LinearToGamma() expects the values to be premultiplied by 4,
353
// so we incorporate this factor 4 inside the DIVIDE_BY_ALPHA macro directly.
354
#define DIVIDE_BY_ALPHA(sum, a) (((sum) * kInvAlpha[(a)]) >> (kAlphaFix - 2))
355
356
#else
357
358
#define DIVIDE_BY_ALPHA(sum, a) (4 * (sum) / (a))
359
360
#endif // USE_INVERSE_ALPHA_TABLE
361
362
static WEBP_INLINE int LinearToGammaWeighted(const uint8_t* src,
363
const uint8_t* a_ptr,
364
uint32_t total_a, int step,
365
int rgb_stride) {
366
const uint32_t sum =
367
a_ptr[0] * GammaToLinear(src[0]) +
368
a_ptr[step] * GammaToLinear(src[step]) +
369
a_ptr[rgb_stride] * GammaToLinear(src[rgb_stride]) +
370
a_ptr[rgb_stride + step] * GammaToLinear(src[rgb_stride + step]);
371
assert(total_a > 0 && total_a <= 4 * 0xff);
372
#if defined(USE_INVERSE_ALPHA_TABLE)
373
assert((uint64_t)sum * kInvAlpha[total_a] < ((uint64_t)1 << 32));
374
#endif
375
return LinearToGamma(DIVIDE_BY_ALPHA(sum, total_a), 0);
376
}
377
378
static WEBP_INLINE void ConvertRowToY(const uint8_t* const r_ptr,
379
const uint8_t* const g_ptr,
380
const uint8_t* const b_ptr,
381
int step,
382
uint8_t* const dst_y,
383
int width,
384
VP8Random* const rg) {
385
int i, j;
386
for (i = 0, j = 0; i < width; i += 1, j += step) {
387
dst_y[i] = RGBToY(r_ptr[j], g_ptr[j], b_ptr[j], rg);
388
}
389
}
390
391
static WEBP_INLINE void AccumulateRGBA(const uint8_t* const r_ptr,
392
const uint8_t* const g_ptr,
393
const uint8_t* const b_ptr,
394
const uint8_t* const a_ptr,
395
int rgb_stride,
396
uint16_t* dst, int width) {
397
int i, j;
398
// we loop over 2x2 blocks and produce one R/G/B/A value for each.
399
for (i = 0, j = 0; i < (width >> 1); i += 1, j += 2 * 4, dst += 4) {
400
const uint32_t a = SUM4ALPHA(a_ptr + j);
401
int r, g, b;
402
if (a == 4 * 0xff || a == 0) {
403
r = SUM4(r_ptr + j, 4);
404
g = SUM4(g_ptr + j, 4);
405
b = SUM4(b_ptr + j, 4);
406
} else {
407
r = LinearToGammaWeighted(r_ptr + j, a_ptr + j, a, 4, rgb_stride);
408
g = LinearToGammaWeighted(g_ptr + j, a_ptr + j, a, 4, rgb_stride);
409
b = LinearToGammaWeighted(b_ptr + j, a_ptr + j, a, 4, rgb_stride);
410
}
411
dst[0] = r;
412
dst[1] = g;
413
dst[2] = b;
414
dst[3] = a;
415
}
416
if (width & 1) {
417
const uint32_t a = 2u * SUM2ALPHA(a_ptr + j);
418
int r, g, b;
419
if (a == 4 * 0xff || a == 0) {
420
r = SUM2(r_ptr + j);
421
g = SUM2(g_ptr + j);
422
b = SUM2(b_ptr + j);
423
} else {
424
r = LinearToGammaWeighted(r_ptr + j, a_ptr + j, a, 0, rgb_stride);
425
g = LinearToGammaWeighted(g_ptr + j, a_ptr + j, a, 0, rgb_stride);
426
b = LinearToGammaWeighted(b_ptr + j, a_ptr + j, a, 0, rgb_stride);
427
}
428
dst[0] = r;
429
dst[1] = g;
430
dst[2] = b;
431
dst[3] = a;
432
}
433
}
434
435
static WEBP_INLINE void AccumulateRGB(const uint8_t* const r_ptr,
436
const uint8_t* const g_ptr,
437
const uint8_t* const b_ptr,
438
int step, int rgb_stride,
439
uint16_t* dst, int width) {
440
int i, j;
441
for (i = 0, j = 0; i < (width >> 1); i += 1, j += 2 * step, dst += 4) {
442
dst[0] = SUM4(r_ptr + j, step);
443
dst[1] = SUM4(g_ptr + j, step);
444
dst[2] = SUM4(b_ptr + j, step);
445
// MemorySanitizer may raise false positives with data that passes through
446
// RGBA32PackedToPlanar_16b_SSE41() due to incorrect modeling of shuffles.
447
// See https://crbug.com/webp/573.
448
#ifdef WEBP_MSAN
449
dst[3] = 0;
450
#endif
451
}
452
if (width & 1) {
453
dst[0] = SUM2(r_ptr + j);
454
dst[1] = SUM2(g_ptr + j);
455
dst[2] = SUM2(b_ptr + j);
456
#ifdef WEBP_MSAN
457
dst[3] = 0;
458
#endif
459
}
460
}
461
462
static WEBP_INLINE void ConvertRowsToUV(const uint16_t* rgb,
463
uint8_t* const dst_u,
464
uint8_t* const dst_v,
465
int width,
466
VP8Random* const rg) {
467
int i;
468
for (i = 0; i < width; i += 1, rgb += 4) {
469
const int r = rgb[0], g = rgb[1], b = rgb[2];
470
dst_u[i] = RGBToU(r, g, b, rg);
471
dst_v[i] = RGBToV(r, g, b, rg);
472
}
473
}
474
475
extern void SharpYuvInit(VP8CPUInfo cpu_info_func);
476
477
static int ImportYUVAFromRGBA(const uint8_t* r_ptr,
478
const uint8_t* g_ptr,
479
const uint8_t* b_ptr,
480
const uint8_t* a_ptr,
481
int step, // bytes per pixel
482
int rgb_stride, // bytes per scanline
483
float dithering,
484
int use_iterative_conversion,
485
WebPPicture* const picture) {
486
int y;
487
const int width = picture->width;
488
const int height = picture->height;
489
const int has_alpha = CheckNonOpaque(a_ptr, width, height, step, rgb_stride);
490
const int is_rgb = (r_ptr < b_ptr); // otherwise it's bgr
491
492
picture->colorspace = has_alpha ? WEBP_YUV420A : WEBP_YUV420;
493
picture->use_argb = 0;
494
495
// disable smart conversion if source is too small (overkill).
496
if (width < kMinDimensionIterativeConversion ||
497
height < kMinDimensionIterativeConversion) {
498
use_iterative_conversion = 0;
499
}
500
501
if (!WebPPictureAllocYUVA(picture)) {
502
return 0;
503
}
504
if (has_alpha) {
505
assert(step == 4);
506
#if defined(USE_GAMMA_COMPRESSION) && defined(USE_INVERSE_ALPHA_TABLE)
507
assert(kAlphaFix + GAMMA_FIX <= 31);
508
#endif
509
}
510
511
if (use_iterative_conversion) {
512
SharpYuvInit(VP8GetCPUInfo);
513
if (!PreprocessARGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, picture)) {
514
return 0;
515
}
516
if (has_alpha) {
517
WebPExtractAlpha(a_ptr, rgb_stride, width, height,
518
picture->a, picture->a_stride);
519
}
520
} else {
521
const int uv_width = (width + 1) >> 1;
522
int use_dsp = (step == 3); // use special function in this case
523
// temporary storage for accumulated R/G/B values during conversion to U/V
524
uint16_t* const tmp_rgb =
525
(uint16_t*)WebPSafeMalloc(4 * uv_width, sizeof(*tmp_rgb));
526
uint8_t* dst_y = picture->y;
527
uint8_t* dst_u = picture->u;
528
uint8_t* dst_v = picture->v;
529
uint8_t* dst_a = picture->a;
530
531
VP8Random base_rg;
532
VP8Random* rg = NULL;
533
if (dithering > 0.) {
534
VP8InitRandom(&base_rg, dithering);
535
rg = &base_rg;
536
use_dsp = 0; // can't use dsp in this case
537
}
538
WebPInitConvertARGBToYUV();
539
InitGammaTables();
540
541
if (tmp_rgb == NULL) {
542
return WebPEncodingSetError(picture, VP8_ENC_ERROR_OUT_OF_MEMORY);
543
}
544
545
// Downsample Y/U/V planes, two rows at a time
546
for (y = 0; y < (height >> 1); ++y) {
547
int rows_have_alpha = has_alpha;
548
if (use_dsp) {
549
if (is_rgb) {
550
WebPConvertRGB24ToY(r_ptr, dst_y, width);
551
WebPConvertRGB24ToY(r_ptr + rgb_stride,
552
dst_y + picture->y_stride, width);
553
} else {
554
WebPConvertBGR24ToY(b_ptr, dst_y, width);
555
WebPConvertBGR24ToY(b_ptr + rgb_stride,
556
dst_y + picture->y_stride, width);
557
}
558
} else {
559
ConvertRowToY(r_ptr, g_ptr, b_ptr, step, dst_y, width, rg);
560
ConvertRowToY(r_ptr + rgb_stride,
561
g_ptr + rgb_stride,
562
b_ptr + rgb_stride, step,
563
dst_y + picture->y_stride, width, rg);
564
}
565
dst_y += 2 * picture->y_stride;
566
if (has_alpha) {
567
rows_have_alpha &= !WebPExtractAlpha(a_ptr, rgb_stride, width, 2,
568
dst_a, picture->a_stride);
569
dst_a += 2 * picture->a_stride;
570
}
571
// Collect averaged R/G/B(/A)
572
if (!rows_have_alpha) {
573
AccumulateRGB(r_ptr, g_ptr, b_ptr, step, rgb_stride, tmp_rgb, width);
574
} else {
575
AccumulateRGBA(r_ptr, g_ptr, b_ptr, a_ptr, rgb_stride, tmp_rgb, width);
576
}
577
// Convert to U/V
578
if (rg == NULL) {
579
WebPConvertRGBA32ToUV(tmp_rgb, dst_u, dst_v, uv_width);
580
} else {
581
ConvertRowsToUV(tmp_rgb, dst_u, dst_v, uv_width, rg);
582
}
583
dst_u += picture->uv_stride;
584
dst_v += picture->uv_stride;
585
r_ptr += 2 * rgb_stride;
586
b_ptr += 2 * rgb_stride;
587
g_ptr += 2 * rgb_stride;
588
if (has_alpha) a_ptr += 2 * rgb_stride;
589
}
590
if (height & 1) { // extra last row
591
int row_has_alpha = has_alpha;
592
if (use_dsp) {
593
if (r_ptr < b_ptr) {
594
WebPConvertRGB24ToY(r_ptr, dst_y, width);
595
} else {
596
WebPConvertBGR24ToY(b_ptr, dst_y, width);
597
}
598
} else {
599
ConvertRowToY(r_ptr, g_ptr, b_ptr, step, dst_y, width, rg);
600
}
601
if (row_has_alpha) {
602
row_has_alpha &= !WebPExtractAlpha(a_ptr, 0, width, 1, dst_a, 0);
603
}
604
// Collect averaged R/G/B(/A)
605
if (!row_has_alpha) {
606
// Collect averaged R/G/B
607
AccumulateRGB(r_ptr, g_ptr, b_ptr, step, /* rgb_stride = */ 0,
608
tmp_rgb, width);
609
} else {
610
AccumulateRGBA(r_ptr, g_ptr, b_ptr, a_ptr, /* rgb_stride = */ 0,
611
tmp_rgb, width);
612
}
613
if (rg == NULL) {
614
WebPConvertRGBA32ToUV(tmp_rgb, dst_u, dst_v, uv_width);
615
} else {
616
ConvertRowsToUV(tmp_rgb, dst_u, dst_v, uv_width, rg);
617
}
618
}
619
WebPSafeFree(tmp_rgb);
620
}
621
return 1;
622
}
623
624
#undef SUM4
625
#undef SUM2
626
#undef SUM4ALPHA
627
#undef SUM2ALPHA
628
629
//------------------------------------------------------------------------------
630
// call for ARGB->YUVA conversion
631
632
static int PictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace,
633
float dithering, int use_iterative_conversion) {
634
if (picture == NULL) return 0;
635
if (picture->argb == NULL) {
636
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
637
} else if ((colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) {
638
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
639
} else {
640
const uint8_t* const argb = (const uint8_t*)picture->argb;
641
const uint8_t* const a = argb + CHANNEL_OFFSET(0);
642
const uint8_t* const r = argb + CHANNEL_OFFSET(1);
643
const uint8_t* const g = argb + CHANNEL_OFFSET(2);
644
const uint8_t* const b = argb + CHANNEL_OFFSET(3);
645
646
picture->colorspace = WEBP_YUV420;
647
return ImportYUVAFromRGBA(r, g, b, a, 4, 4 * picture->argb_stride,
648
dithering, use_iterative_conversion, picture);
649
}
650
}
651
652
int WebPPictureARGBToYUVADithered(WebPPicture* picture, WebPEncCSP colorspace,
653
float dithering) {
654
return PictureARGBToYUVA(picture, colorspace, dithering, 0);
655
}
656
657
int WebPPictureARGBToYUVA(WebPPicture* picture, WebPEncCSP colorspace) {
658
return PictureARGBToYUVA(picture, colorspace, 0.f, 0);
659
}
660
661
int WebPPictureSharpARGBToYUVA(WebPPicture* picture) {
662
return PictureARGBToYUVA(picture, WEBP_YUV420, 0.f, 1);
663
}
664
// for backward compatibility
665
int WebPPictureSmartARGBToYUVA(WebPPicture* picture) {
666
return WebPPictureSharpARGBToYUVA(picture);
667
}
668
669
//------------------------------------------------------------------------------
670
// call for YUVA -> ARGB conversion
671
672
int WebPPictureYUVAToARGB(WebPPicture* picture) {
673
if (picture == NULL) return 0;
674
if (picture->y == NULL || picture->u == NULL || picture->v == NULL) {
675
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
676
}
677
if ((picture->colorspace & WEBP_CSP_ALPHA_BIT) && picture->a == NULL) {
678
return WebPEncodingSetError(picture, VP8_ENC_ERROR_NULL_PARAMETER);
679
}
680
if ((picture->colorspace & WEBP_CSP_UV_MASK) != WEBP_YUV420) {
681
return WebPEncodingSetError(picture, VP8_ENC_ERROR_INVALID_CONFIGURATION);
682
}
683
// Allocate a new argb buffer (discarding the previous one).
684
if (!WebPPictureAllocARGB(picture)) return 0;
685
picture->use_argb = 1;
686
687
// Convert
688
{
689
int y;
690
const int width = picture->width;
691
const int height = picture->height;
692
const int argb_stride = 4 * picture->argb_stride;
693
uint8_t* dst = (uint8_t*)picture->argb;
694
const uint8_t* cur_u = picture->u, *cur_v = picture->v, *cur_y = picture->y;
695
WebPUpsampleLinePairFunc upsample =
696
WebPGetLinePairConverter(ALPHA_OFFSET > 0);
697
698
// First row, with replicated top samples.
699
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
700
cur_y += picture->y_stride;
701
dst += argb_stride;
702
// Center rows.
703
for (y = 1; y + 1 < height; y += 2) {
704
const uint8_t* const top_u = cur_u;
705
const uint8_t* const top_v = cur_v;
706
cur_u += picture->uv_stride;
707
cur_v += picture->uv_stride;
708
upsample(cur_y, cur_y + picture->y_stride, top_u, top_v, cur_u, cur_v,
709
dst, dst + argb_stride, width);
710
cur_y += 2 * picture->y_stride;
711
dst += 2 * argb_stride;
712
}
713
// Last row (if needed), with replicated bottom samples.
714
if (height > 1 && !(height & 1)) {
715
upsample(cur_y, NULL, cur_u, cur_v, cur_u, cur_v, dst, NULL, width);
716
}
717
// Insert alpha values if needed, in replacement for the default 0xff ones.
718
if (picture->colorspace & WEBP_CSP_ALPHA_BIT) {
719
for (y = 0; y < height; ++y) {
720
uint32_t* const argb_dst = picture->argb + y * picture->argb_stride;
721
const uint8_t* const src = picture->a + y * picture->a_stride;
722
int x;
723
for (x = 0; x < width; ++x) {
724
argb_dst[x] = (argb_dst[x] & 0x00ffffffu) | ((uint32_t)src[x] << 24);
725
}
726
}
727
}
728
}
729
return 1;
730
}
731
732
//------------------------------------------------------------------------------
733
// automatic import / conversion
734
735
static int Import(WebPPicture* const picture,
736
const uint8_t* rgb, int rgb_stride,
737
int step, int swap_rb, int import_alpha) {
738
int y;
739
// swap_rb -> b,g,r,a , !swap_rb -> r,g,b,a
740
const uint8_t* r_ptr = rgb + (swap_rb ? 2 : 0);
741
const uint8_t* g_ptr = rgb + 1;
742
const uint8_t* b_ptr = rgb + (swap_rb ? 0 : 2);
743
const int width = picture->width;
744
const int height = picture->height;
745
746
if (abs(rgb_stride) < (import_alpha ? 4 : 3) * width) return 0;
747
748
if (!picture->use_argb) {
749
const uint8_t* a_ptr = import_alpha ? rgb + 3 : NULL;
750
return ImportYUVAFromRGBA(r_ptr, g_ptr, b_ptr, a_ptr, step, rgb_stride,
751
0.f /* no dithering */, 0, picture);
752
}
753
if (!WebPPictureAlloc(picture)) return 0;
754
755
VP8LDspInit();
756
WebPInitAlphaProcessing();
757
758
if (import_alpha) {
759
// dst[] byte order is {a,r,g,b} for big-endian, {b,g,r,a} for little endian
760
uint32_t* dst = picture->argb;
761
const int do_copy = (ALPHA_OFFSET == 3) && swap_rb;
762
assert(step == 4);
763
if (do_copy) {
764
for (y = 0; y < height; ++y) {
765
memcpy(dst, rgb, width * 4);
766
rgb += rgb_stride;
767
dst += picture->argb_stride;
768
}
769
} else {
770
for (y = 0; y < height; ++y) {
771
#ifdef WORDS_BIGENDIAN
772
// BGRA or RGBA input order.
773
const uint8_t* a_ptr = rgb + 3;
774
WebPPackARGB(a_ptr, r_ptr, g_ptr, b_ptr, width, dst);
775
r_ptr += rgb_stride;
776
g_ptr += rgb_stride;
777
b_ptr += rgb_stride;
778
#else
779
// RGBA input order. Need to swap R and B.
780
VP8LConvertBGRAToRGBA((const uint32_t*)rgb, width, (uint8_t*)dst);
781
#endif
782
rgb += rgb_stride;
783
dst += picture->argb_stride;
784
}
785
}
786
} else {
787
uint32_t* dst = picture->argb;
788
assert(step >= 3);
789
for (y = 0; y < height; ++y) {
790
WebPPackRGB(r_ptr, g_ptr, b_ptr, width, step, dst);
791
r_ptr += rgb_stride;
792
g_ptr += rgb_stride;
793
b_ptr += rgb_stride;
794
dst += picture->argb_stride;
795
}
796
}
797
return 1;
798
}
799
800
// Public API
801
802
#if !defined(WEBP_REDUCE_CSP)
803
804
int WebPPictureImportBGR(WebPPicture* picture,
805
const uint8_t* bgr, int bgr_stride) {
806
return (picture != NULL && bgr != NULL)
807
? Import(picture, bgr, bgr_stride, 3, 1, 0)
808
: 0;
809
}
810
811
int WebPPictureImportBGRA(WebPPicture* picture,
812
const uint8_t* bgra, int bgra_stride) {
813
return (picture != NULL && bgra != NULL)
814
? Import(picture, bgra, bgra_stride, 4, 1, 1)
815
: 0;
816
}
817
818
819
int WebPPictureImportBGRX(WebPPicture* picture,
820
const uint8_t* bgrx, int bgrx_stride) {
821
return (picture != NULL && bgrx != NULL)
822
? Import(picture, bgrx, bgrx_stride, 4, 1, 0)
823
: 0;
824
}
825
826
#endif // WEBP_REDUCE_CSP
827
828
int WebPPictureImportRGB(WebPPicture* picture,
829
const uint8_t* rgb, int rgb_stride) {
830
return (picture != NULL && rgb != NULL)
831
? Import(picture, rgb, rgb_stride, 3, 0, 0)
832
: 0;
833
}
834
835
int WebPPictureImportRGBA(WebPPicture* picture,
836
const uint8_t* rgba, int rgba_stride) {
837
return (picture != NULL && rgba != NULL)
838
? Import(picture, rgba, rgba_stride, 4, 0, 1)
839
: 0;
840
}
841
842
int WebPPictureImportRGBX(WebPPicture* picture,
843
const uint8_t* rgbx, int rgbx_stride) {
844
return (picture != NULL && rgbx != NULL)
845
? Import(picture, rgbx, rgbx_stride, 4, 0, 0)
846
: 0;
847
}
848
849
//------------------------------------------------------------------------------
850
851