Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
godotengine
GitHub Repository: godotengine/godot
Path: blob/master/thirdparty/libwebp/src/enc/analysis_enc.c
21252 views
1
// Copyright 2011 Google Inc. All Rights Reserved.
2
//
3
// Use of this source code is governed by a BSD-style license
4
// that can be found in the COPYING file in the root of the source
5
// tree. An additional intellectual property rights grant can be found
6
// in the file PATENTS. All contributing project authors may
7
// be found in the AUTHORS file in the root of the source tree.
8
// -----------------------------------------------------------------------------
9
//
10
// Macroblock analysis
11
//
12
// Author: Skal ([email protected])
13
14
#include <assert.h>
15
#include <stdlib.h>
16
#include <string.h>
17
18
#include "src/dec/common_dec.h"
19
#include "src/dsp/dsp.h"
20
#include "src/enc/vp8i_enc.h"
21
#include "src/utils/thread_utils.h"
22
#include "src/utils/utils.h"
23
#include "src/webp/encode.h"
24
#include "src/webp/types.h"
25
26
#define MAX_ITERS_K_MEANS 6
27
28
//------------------------------------------------------------------------------
29
// Smooth the segment map by replacing isolated block by the majority of its
30
// neighbours.
31
32
static void SmoothSegmentMap(VP8Encoder* const enc) {
33
int n, x, y;
34
const int w = enc->mb_w;
35
const int h = enc->mb_h;
36
const int majority_cnt_3_x_3_grid = 5;
37
uint8_t* const tmp = (uint8_t*)WebPSafeMalloc(w * h, sizeof(*tmp));
38
assert((uint64_t)(w * h) == (uint64_t)w * h); // no overflow, as per spec
39
40
if (tmp == NULL) return;
41
for (y = 1; y < h - 1; ++y) {
42
for (x = 1; x < w - 1; ++x) {
43
int cnt[NUM_MB_SEGMENTS] = { 0 };
44
const VP8MBInfo* const mb = &enc->mb_info[x + w * y];
45
int majority_seg = mb->segment;
46
// Check the 8 neighbouring segment values.
47
cnt[mb[-w - 1].segment]++; // top-left
48
cnt[mb[-w + 0].segment]++; // top
49
cnt[mb[-w + 1].segment]++; // top-right
50
cnt[mb[ - 1].segment]++; // left
51
cnt[mb[ + 1].segment]++; // right
52
cnt[mb[ w - 1].segment]++; // bottom-left
53
cnt[mb[ w + 0].segment]++; // bottom
54
cnt[mb[ w + 1].segment]++; // bottom-right
55
for (n = 0; n < NUM_MB_SEGMENTS; ++n) {
56
if (cnt[n] >= majority_cnt_3_x_3_grid) {
57
majority_seg = n;
58
break;
59
}
60
}
61
tmp[x + y * w] = majority_seg;
62
}
63
}
64
for (y = 1; y < h - 1; ++y) {
65
for (x = 1; x < w - 1; ++x) {
66
VP8MBInfo* const mb = &enc->mb_info[x + w * y];
67
mb->segment = tmp[x + y * w];
68
}
69
}
70
WebPSafeFree(tmp);
71
}
72
73
//------------------------------------------------------------------------------
74
// set segment susceptibility 'alpha' / 'beta'
75
76
static WEBP_INLINE int clip(int v, int m, int M) {
77
return (v < m) ? m : (v > M) ? M : v;
78
}
79
80
static void SetSegmentAlphas(VP8Encoder* const enc,
81
const int centers[NUM_MB_SEGMENTS],
82
int mid) {
83
const int nb = enc->segment_hdr.num_segments;
84
int min = centers[0], max = centers[0];
85
int n;
86
87
if (nb > 1) {
88
for (n = 0; n < nb; ++n) {
89
if (min > centers[n]) min = centers[n];
90
if (max < centers[n]) max = centers[n];
91
}
92
}
93
if (max == min) max = min + 1;
94
assert(mid <= max && mid >= min);
95
for (n = 0; n < nb; ++n) {
96
const int alpha = 255 * (centers[n] - mid) / (max - min);
97
const int beta = 255 * (centers[n] - min) / (max - min);
98
enc->dqm[n].alpha = clip(alpha, -127, 127);
99
enc->dqm[n].beta = clip(beta, 0, 255);
100
}
101
}
102
103
//------------------------------------------------------------------------------
104
// Compute susceptibility based on DCT-coeff histograms:
105
// the higher, the "easier" the macroblock is to compress.
106
107
#define MAX_ALPHA 255 // 8b of precision for susceptibilities.
108
#define ALPHA_SCALE (2 * MAX_ALPHA) // scaling factor for alpha.
109
#define DEFAULT_ALPHA (-1)
110
#define IS_BETTER_ALPHA(alpha, best_alpha) ((alpha) > (best_alpha))
111
112
static int FinalAlphaValue(int alpha) {
113
alpha = MAX_ALPHA - alpha;
114
return clip(alpha, 0, MAX_ALPHA);
115
}
116
117
static int GetAlpha(const VP8Histogram* const histo) {
118
// 'alpha' will later be clipped to [0..MAX_ALPHA] range, clamping outer
119
// values which happen to be mostly noise. This leaves the maximum precision
120
// for handling the useful small values which contribute most.
121
const int max_value = histo->max_value;
122
const int last_non_zero = histo->last_non_zero;
123
const int alpha =
124
(max_value > 1) ? ALPHA_SCALE * last_non_zero / max_value : 0;
125
return alpha;
126
}
127
128
static void InitHistogram(VP8Histogram* const histo) {
129
histo->max_value = 0;
130
histo->last_non_zero = 1;
131
}
132
133
//------------------------------------------------------------------------------
134
// Simplified k-Means, to assign Nb segments based on alpha-histogram
135
136
static void AssignSegments(VP8Encoder* const enc,
137
const int alphas[MAX_ALPHA + 1]) {
138
// 'num_segments' is previously validated and <= NUM_MB_SEGMENTS, but an
139
// explicit check is needed to avoid spurious warning about 'n + 1' exceeding
140
// array bounds of 'centers' with some compilers (noticed with gcc-4.9).
141
const int nb = (enc->segment_hdr.num_segments < NUM_MB_SEGMENTS) ?
142
enc->segment_hdr.num_segments : NUM_MB_SEGMENTS;
143
int centers[NUM_MB_SEGMENTS];
144
int weighted_average = 0;
145
int map[MAX_ALPHA + 1];
146
int a, n, k;
147
int min_a = 0, max_a = MAX_ALPHA, range_a;
148
// 'int' type is ok for histo, and won't overflow
149
int accum[NUM_MB_SEGMENTS], dist_accum[NUM_MB_SEGMENTS];
150
151
assert(nb >= 1);
152
assert(nb <= NUM_MB_SEGMENTS);
153
154
// bracket the input
155
for (n = 0; n <= MAX_ALPHA && alphas[n] == 0; ++n) {}
156
min_a = n;
157
for (n = MAX_ALPHA; n > min_a && alphas[n] == 0; --n) {}
158
max_a = n;
159
range_a = max_a - min_a;
160
161
// Spread initial centers evenly
162
for (k = 0, n = 1; k < nb; ++k, n += 2) {
163
assert(n < 2 * nb);
164
centers[k] = min_a + (n * range_a) / (2 * nb);
165
}
166
167
for (k = 0; k < MAX_ITERS_K_MEANS; ++k) { // few iters are enough
168
int total_weight;
169
int displaced;
170
// Reset stats
171
for (n = 0; n < nb; ++n) {
172
accum[n] = 0;
173
dist_accum[n] = 0;
174
}
175
// Assign nearest center for each 'a'
176
n = 0; // track the nearest center for current 'a'
177
for (a = min_a; a <= max_a; ++a) {
178
if (alphas[a]) {
179
while (n + 1 < nb && abs(a - centers[n + 1]) < abs(a - centers[n])) {
180
n++;
181
}
182
map[a] = n;
183
// accumulate contribution into best centroid
184
dist_accum[n] += a * alphas[a];
185
accum[n] += alphas[a];
186
}
187
}
188
// All point are classified. Move the centroids to the
189
// center of their respective cloud.
190
displaced = 0;
191
weighted_average = 0;
192
total_weight = 0;
193
for (n = 0; n < nb; ++n) {
194
if (accum[n]) {
195
const int new_center = (dist_accum[n] + accum[n] / 2) / accum[n];
196
displaced += abs(centers[n] - new_center);
197
centers[n] = new_center;
198
weighted_average += new_center * accum[n];
199
total_weight += accum[n];
200
}
201
}
202
weighted_average = (weighted_average + total_weight / 2) / total_weight;
203
if (displaced < 5) break; // no need to keep on looping...
204
}
205
206
// Map each original value to the closest centroid
207
for (n = 0; n < enc->mb_w * enc->mb_h; ++n) {
208
VP8MBInfo* const mb = &enc->mb_info[n];
209
const int alpha = mb->alpha;
210
mb->segment = map[alpha];
211
mb->alpha = centers[map[alpha]]; // for the record.
212
}
213
214
if (nb > 1) {
215
const int smooth = (enc->config->preprocessing & 1);
216
if (smooth) SmoothSegmentMap(enc);
217
}
218
219
SetSegmentAlphas(enc, centers, weighted_average); // pick some alphas.
220
}
221
222
//------------------------------------------------------------------------------
223
// Macroblock analysis: collect histogram for each mode, deduce the maximal
224
// susceptibility and set best modes for this macroblock.
225
// Segment assignment is done later.
226
227
// Number of modes to inspect for 'alpha' evaluation. We don't need to test all
228
// the possible modes during the analysis phase: we risk falling into a local
229
// optimum, or be subject to boundary effect
230
#define MAX_INTRA16_MODE 2
231
#define MAX_INTRA4_MODE 2
232
#define MAX_UV_MODE 2
233
234
static int MBAnalyzeBestIntra16Mode(VP8EncIterator* const it) {
235
const int max_mode = MAX_INTRA16_MODE;
236
int mode;
237
int best_alpha = DEFAULT_ALPHA;
238
int best_mode = 0;
239
240
VP8MakeLuma16Preds(it);
241
for (mode = 0; mode < max_mode; ++mode) {
242
VP8Histogram histo;
243
int alpha;
244
245
InitHistogram(&histo);
246
VP8CollectHistogram(it->yuv_in + Y_OFF_ENC,
247
it->yuv_p + VP8I16ModeOffsets[mode],
248
0, 16, &histo);
249
alpha = GetAlpha(&histo);
250
if (IS_BETTER_ALPHA(alpha, best_alpha)) {
251
best_alpha = alpha;
252
best_mode = mode;
253
}
254
}
255
VP8SetIntra16Mode(it, best_mode);
256
return best_alpha;
257
}
258
259
static int FastMBAnalyze(VP8EncIterator* const it) {
260
// Empirical cut-off value, should be around 16 (~=block size). We use the
261
// [8-17] range and favor intra4 at high quality, intra16 for low quality.
262
const int q = (int)it->enc->config->quality;
263
const uint32_t kThreshold = 8 + (17 - 8) * q / 100;
264
int k;
265
uint32_t dc[16], m, m2;
266
for (k = 0; k < 16; k += 4) {
267
VP8Mean16x4(it->yuv_in + Y_OFF_ENC + k * BPS, &dc[k]);
268
}
269
for (m = 0, m2 = 0, k = 0; k < 16; ++k) {
270
m += dc[k];
271
m2 += dc[k] * dc[k];
272
}
273
if (kThreshold * m2 < m * m) {
274
VP8SetIntra16Mode(it, 0); // DC16
275
} else {
276
const uint8_t modes[16] = { 0 }; // DC4
277
VP8SetIntra4Mode(it, modes);
278
}
279
return 0;
280
}
281
282
static int MBAnalyzeBestUVMode(VP8EncIterator* const it) {
283
int best_alpha = DEFAULT_ALPHA;
284
int smallest_alpha = 0;
285
int best_mode = 0;
286
const int max_mode = MAX_UV_MODE;
287
int mode;
288
289
VP8MakeChroma8Preds(it);
290
for (mode = 0; mode < max_mode; ++mode) {
291
VP8Histogram histo;
292
int alpha;
293
InitHistogram(&histo);
294
VP8CollectHistogram(it->yuv_in + U_OFF_ENC,
295
it->yuv_p + VP8UVModeOffsets[mode],
296
16, 16 + 4 + 4, &histo);
297
alpha = GetAlpha(&histo);
298
if (IS_BETTER_ALPHA(alpha, best_alpha)) {
299
best_alpha = alpha;
300
}
301
// The best prediction mode tends to be the one with the smallest alpha.
302
if (mode == 0 || alpha < smallest_alpha) {
303
smallest_alpha = alpha;
304
best_mode = mode;
305
}
306
}
307
VP8SetIntraUVMode(it, best_mode);
308
return best_alpha;
309
}
310
311
static void MBAnalyze(VP8EncIterator* const it,
312
int alphas[MAX_ALPHA + 1],
313
int* const alpha, int* const uv_alpha) {
314
const VP8Encoder* const enc = it->enc;
315
int best_alpha, best_uv_alpha;
316
317
VP8SetIntra16Mode(it, 0); // default: Intra16, DC_PRED
318
VP8SetSkip(it, 0); // not skipped
319
VP8SetSegment(it, 0); // default segment, spec-wise.
320
321
if (enc->method <= 1) {
322
best_alpha = FastMBAnalyze(it);
323
} else {
324
best_alpha = MBAnalyzeBestIntra16Mode(it);
325
}
326
best_uv_alpha = MBAnalyzeBestUVMode(it);
327
328
// Final susceptibility mix
329
best_alpha = (3 * best_alpha + best_uv_alpha + 2) >> 2;
330
best_alpha = FinalAlphaValue(best_alpha);
331
alphas[best_alpha]++;
332
it->mb->alpha = best_alpha; // for later remapping.
333
334
// Accumulate for later complexity analysis.
335
*alpha += best_alpha; // mixed susceptibility (not just luma)
336
*uv_alpha += best_uv_alpha;
337
}
338
339
static void DefaultMBInfo(VP8MBInfo* const mb) {
340
mb->type = 1; // I16x16
341
mb->uv_mode = 0;
342
mb->skip = 0; // not skipped
343
mb->segment = 0; // default segment
344
mb->alpha = 0;
345
}
346
347
//------------------------------------------------------------------------------
348
// Main analysis loop:
349
// Collect all susceptibilities for each macroblock and record their
350
// distribution in alphas[]. Segments is assigned a-posteriori, based on
351
// this histogram.
352
// We also pick an intra16 prediction mode, which shouldn't be considered
353
// final except for fast-encode settings. We can also pick some intra4 modes
354
// and decide intra4/intra16, but that's usually almost always a bad choice at
355
// this stage.
356
357
static void ResetAllMBInfo(VP8Encoder* const enc) {
358
int n;
359
for (n = 0; n < enc->mb_w * enc->mb_h; ++n) {
360
DefaultMBInfo(&enc->mb_info[n]);
361
}
362
// Default susceptibilities.
363
enc->dqm[0].alpha = 0;
364
enc->dqm[0].beta = 0;
365
// Note: we can't compute this 'alpha' / 'uv_alpha' -> set to default value.
366
enc->alpha = 0;
367
enc->uv_alpha = 0;
368
WebPReportProgress(enc->pic, enc->percent + 20, &enc->percent);
369
}
370
371
// struct used to collect job result
372
typedef struct {
373
WebPWorker worker;
374
int alphas[MAX_ALPHA + 1];
375
int alpha, uv_alpha;
376
VP8EncIterator it;
377
int delta_progress;
378
} SegmentJob;
379
380
// main work call
381
static int DoSegmentsJob(void* arg1, void* arg2) {
382
SegmentJob* const job = (SegmentJob*)arg1;
383
VP8EncIterator* const it = (VP8EncIterator*)arg2;
384
int ok = 1;
385
if (!VP8IteratorIsDone(it)) {
386
uint8_t tmp[32 + WEBP_ALIGN_CST];
387
uint8_t* const scratch = (uint8_t*)WEBP_ALIGN(tmp);
388
do {
389
// Let's pretend we have perfect lossless reconstruction.
390
VP8IteratorImport(it, scratch);
391
MBAnalyze(it, job->alphas, &job->alpha, &job->uv_alpha);
392
ok = VP8IteratorProgress(it, job->delta_progress);
393
} while (ok && VP8IteratorNext(it));
394
}
395
return ok;
396
}
397
398
#ifdef WEBP_USE_THREAD
399
static void MergeJobs(const SegmentJob* const src, SegmentJob* const dst) {
400
int i;
401
for (i = 0; i <= MAX_ALPHA; ++i) dst->alphas[i] += src->alphas[i];
402
dst->alpha += src->alpha;
403
dst->uv_alpha += src->uv_alpha;
404
}
405
#endif
406
407
// initialize the job struct with some tasks to perform
408
static void InitSegmentJob(VP8Encoder* const enc, SegmentJob* const job,
409
int start_row, int end_row) {
410
WebPGetWorkerInterface()->Init(&job->worker);
411
job->worker.data1 = job;
412
job->worker.data2 = &job->it;
413
job->worker.hook = DoSegmentsJob;
414
VP8IteratorInit(enc, &job->it);
415
VP8IteratorSetRow(&job->it, start_row);
416
VP8IteratorSetCountDown(&job->it, (end_row - start_row) * enc->mb_w);
417
memset(job->alphas, 0, sizeof(job->alphas));
418
job->alpha = 0;
419
job->uv_alpha = 0;
420
// only one of both jobs can record the progress, since we don't
421
// expect the user's hook to be multi-thread safe
422
job->delta_progress = (start_row == 0) ? 20 : 0;
423
}
424
425
// main entry point
426
int VP8EncAnalyze(VP8Encoder* const enc) {
427
int ok = 1;
428
const int do_segments =
429
enc->config->emulate_jpeg_size || // We need the complexity evaluation.
430
(enc->segment_hdr.num_segments > 1) ||
431
(enc->method <= 1); // for method 0 - 1, we need preds[] to be filled.
432
if (do_segments) {
433
const int last_row = enc->mb_h;
434
const int total_mb = last_row * enc->mb_w;
435
#ifdef WEBP_USE_THREAD
436
// We give a little more than a half work to the main thread.
437
const int split_row = (9 * last_row + 15) >> 4;
438
const int kMinSplitRow = 2; // minimal rows needed for mt to be worth it
439
const int do_mt = (enc->thread_level > 0) && (split_row >= kMinSplitRow);
440
#else
441
const int do_mt = 0;
442
#endif
443
const WebPWorkerInterface* const worker_interface =
444
WebPGetWorkerInterface();
445
SegmentJob main_job;
446
if (do_mt) {
447
#ifdef WEBP_USE_THREAD
448
SegmentJob side_job;
449
// Note the use of '&' instead of '&&' because we must call the functions
450
// no matter what.
451
InitSegmentJob(enc, &main_job, 0, split_row);
452
InitSegmentJob(enc, &side_job, split_row, last_row);
453
// we don't need to call Reset() on main_job.worker, since we're calling
454
// WebPWorkerExecute() on it
455
ok &= worker_interface->Reset(&side_job.worker);
456
// launch the two jobs in parallel
457
if (ok) {
458
worker_interface->Launch(&side_job.worker);
459
worker_interface->Execute(&main_job.worker);
460
ok &= worker_interface->Sync(&side_job.worker);
461
ok &= worker_interface->Sync(&main_job.worker);
462
}
463
worker_interface->End(&side_job.worker);
464
if (ok) MergeJobs(&side_job, &main_job); // merge results together
465
#endif // WEBP_USE_THREAD
466
} else {
467
// Even for single-thread case, we use the generic Worker tools.
468
InitSegmentJob(enc, &main_job, 0, last_row);
469
worker_interface->Execute(&main_job.worker);
470
ok &= worker_interface->Sync(&main_job.worker);
471
}
472
worker_interface->End(&main_job.worker);
473
if (ok) {
474
enc->alpha = main_job.alpha / total_mb;
475
enc->uv_alpha = main_job.uv_alpha / total_mb;
476
AssignSegments(enc, main_job.alphas);
477
}
478
} else { // Use only one default segment.
479
ResetAllMBInfo(enc);
480
}
481
if (!ok) {
482
return WebPEncodingSetError(enc->pic,
483
VP8_ENC_ERROR_OUT_OF_MEMORY); // imprecise
484
}
485
return ok;
486
}
487
488