Path: blob/master/thirdparty/libwebp/src/enc/histogram_enc.c
9913 views
// Copyright 2012 Google Inc. All Rights Reserved.1//2// Use of this source code is governed by a BSD-style license3// that can be found in the COPYING file in the root of the source4// tree. An additional intellectual property rights grant can be found5// in the file PATENTS. All contributing project authors may6// be found in the AUTHORS file in the root of the source tree.7// -----------------------------------------------------------------------------8//9// Author: Jyrki Alakuijala ([email protected])10//11#ifdef HAVE_CONFIG_H12#include "src/webp/config.h"13#endif1415#include <string.h>1617#include "src/dsp/lossless.h"18#include "src/dsp/lossless_common.h"19#include "src/enc/backward_references_enc.h"20#include "src/enc/histogram_enc.h"21#include "src/enc/vp8i_enc.h"22#include "src/utils/utils.h"2324// Number of partitions for the three dominant (literal, red and blue) symbol25// costs.26#define NUM_PARTITIONS 427// The size of the bin-hash corresponding to the three dominant costs.28#define BIN_SIZE (NUM_PARTITIONS * NUM_PARTITIONS * NUM_PARTITIONS)29// Maximum number of histograms allowed in greedy combining algorithm.30#define MAX_HISTO_GREEDY 1003132// Return the size of the histogram for a given cache_bits.33static int GetHistogramSize(int cache_bits) {34const int literal_size = VP8LHistogramNumCodes(cache_bits);35const size_t total_size = sizeof(VP8LHistogram) + sizeof(int) * literal_size;36assert(total_size <= (size_t)0x7fffffff);37return (int)total_size;38}3940static void HistogramClear(VP8LHistogram* const p) {41uint32_t* const literal = p->literal_;42const int cache_bits = p->palette_code_bits_;43const int histo_size = GetHistogramSize(cache_bits);44memset(p, 0, histo_size);45p->palette_code_bits_ = cache_bits;46p->literal_ = literal;47}4849// Swap two histogram pointers.50static void HistogramSwap(VP8LHistogram** const A, VP8LHistogram** const B) {51VP8LHistogram* const tmp = *A;52*A = *B;53*B = tmp;54}5556static void HistogramCopy(const VP8LHistogram* const src,57VP8LHistogram* const dst) {58uint32_t* const dst_literal = dst->literal_;59const int dst_cache_bits = dst->palette_code_bits_;60const int literal_size = VP8LHistogramNumCodes(dst_cache_bits);61const int histo_size = GetHistogramSize(dst_cache_bits);62assert(src->palette_code_bits_ == dst_cache_bits);63memcpy(dst, src, histo_size);64dst->literal_ = dst_literal;65memcpy(dst->literal_, src->literal_, literal_size * sizeof(*dst->literal_));66}6768void VP8LFreeHistogram(VP8LHistogram* const histo) {69WebPSafeFree(histo);70}7172void VP8LFreeHistogramSet(VP8LHistogramSet* const histo) {73WebPSafeFree(histo);74}7576void VP8LHistogramStoreRefs(const VP8LBackwardRefs* const refs,77VP8LHistogram* const histo) {78VP8LRefsCursor c = VP8LRefsCursorInit(refs);79while (VP8LRefsCursorOk(&c)) {80VP8LHistogramAddSinglePixOrCopy(histo, c.cur_pos, NULL, 0);81VP8LRefsCursorNext(&c);82}83}8485void VP8LHistogramCreate(VP8LHistogram* const p,86const VP8LBackwardRefs* const refs,87int palette_code_bits) {88if (palette_code_bits >= 0) {89p->palette_code_bits_ = palette_code_bits;90}91HistogramClear(p);92VP8LHistogramStoreRefs(refs, p);93}9495void VP8LHistogramInit(VP8LHistogram* const p, int palette_code_bits,96int init_arrays) {97p->palette_code_bits_ = palette_code_bits;98if (init_arrays) {99HistogramClear(p);100} else {101p->trivial_symbol_ = 0;102p->bit_cost_ = 0;103p->literal_cost_ = 0;104p->red_cost_ = 0;105p->blue_cost_ = 0;106memset(p->is_used_, 0, sizeof(p->is_used_));107}108}109110VP8LHistogram* VP8LAllocateHistogram(int cache_bits) {111VP8LHistogram* histo = NULL;112const int total_size = GetHistogramSize(cache_bits);113uint8_t* const memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));114if (memory == NULL) return NULL;115histo = (VP8LHistogram*)memory;116// literal_ won't necessary be aligned.117histo->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));118VP8LHistogramInit(histo, cache_bits, /*init_arrays=*/ 0);119return histo;120}121122// Resets the pointers of the histograms to point to the bit buffer in the set.123static void HistogramSetResetPointers(VP8LHistogramSet* const set,124int cache_bits) {125int i;126const int histo_size = GetHistogramSize(cache_bits);127uint8_t* memory = (uint8_t*) (set->histograms);128memory += set->max_size * sizeof(*set->histograms);129for (i = 0; i < set->max_size; ++i) {130memory = (uint8_t*) WEBP_ALIGN(memory);131set->histograms[i] = (VP8LHistogram*) memory;132// literal_ won't necessary be aligned.133set->histograms[i]->literal_ = (uint32_t*)(memory + sizeof(VP8LHistogram));134memory += histo_size;135}136}137138// Returns the total size of the VP8LHistogramSet.139static size_t HistogramSetTotalSize(int size, int cache_bits) {140const int histo_size = GetHistogramSize(cache_bits);141return (sizeof(VP8LHistogramSet) + size * (sizeof(VP8LHistogram*) +142histo_size + WEBP_ALIGN_CST));143}144145VP8LHistogramSet* VP8LAllocateHistogramSet(int size, int cache_bits) {146int i;147VP8LHistogramSet* set;148const size_t total_size = HistogramSetTotalSize(size, cache_bits);149uint8_t* memory = (uint8_t*)WebPSafeMalloc(total_size, sizeof(*memory));150if (memory == NULL) return NULL;151152set = (VP8LHistogramSet*)memory;153memory += sizeof(*set);154set->histograms = (VP8LHistogram**)memory;155set->max_size = size;156set->size = size;157HistogramSetResetPointers(set, cache_bits);158for (i = 0; i < size; ++i) {159VP8LHistogramInit(set->histograms[i], cache_bits, /*init_arrays=*/ 0);160}161return set;162}163164void VP8LHistogramSetClear(VP8LHistogramSet* const set) {165int i;166const int cache_bits = set->histograms[0]->palette_code_bits_;167const int size = set->max_size;168const size_t total_size = HistogramSetTotalSize(size, cache_bits);169uint8_t* memory = (uint8_t*)set;170171memset(memory, 0, total_size);172memory += sizeof(*set);173set->histograms = (VP8LHistogram**)memory;174set->max_size = size;175set->size = size;176HistogramSetResetPointers(set, cache_bits);177for (i = 0; i < size; ++i) {178set->histograms[i]->palette_code_bits_ = cache_bits;179}180}181182// Removes the histogram 'i' from 'set' by setting it to NULL.183static void HistogramSetRemoveHistogram(VP8LHistogramSet* const set, int i,184int* const num_used) {185assert(set->histograms[i] != NULL);186set->histograms[i] = NULL;187--*num_used;188// If we remove the last valid one, shrink until the next valid one.189if (i == set->size - 1) {190while (set->size >= 1 && set->histograms[set->size - 1] == NULL) {191--set->size;192}193}194}195196// -----------------------------------------------------------------------------197198void VP8LHistogramAddSinglePixOrCopy(VP8LHistogram* const histo,199const PixOrCopy* const v,200int (*const distance_modifier)(int, int),201int distance_modifier_arg0) {202if (PixOrCopyIsLiteral(v)) {203++histo->alpha_[PixOrCopyLiteral(v, 3)];204++histo->red_[PixOrCopyLiteral(v, 2)];205++histo->literal_[PixOrCopyLiteral(v, 1)];206++histo->blue_[PixOrCopyLiteral(v, 0)];207} else if (PixOrCopyIsCacheIdx(v)) {208const int literal_ix =209NUM_LITERAL_CODES + NUM_LENGTH_CODES + PixOrCopyCacheIdx(v);210assert(histo->palette_code_bits_ != 0);211++histo->literal_[literal_ix];212} else {213int code, extra_bits;214VP8LPrefixEncodeBits(PixOrCopyLength(v), &code, &extra_bits);215++histo->literal_[NUM_LITERAL_CODES + code];216if (distance_modifier == NULL) {217VP8LPrefixEncodeBits(PixOrCopyDistance(v), &code, &extra_bits);218} else {219VP8LPrefixEncodeBits(220distance_modifier(distance_modifier_arg0, PixOrCopyDistance(v)),221&code, &extra_bits);222}223++histo->distance_[code];224}225}226227// -----------------------------------------------------------------------------228// Entropy-related functions.229230static WEBP_INLINE uint64_t BitsEntropyRefine(const VP8LBitEntropy* entropy) {231uint64_t mix;232if (entropy->nonzeros < 5) {233if (entropy->nonzeros <= 1) {234return 0;235}236// Two symbols, they will be 0 and 1 in a Huffman code.237// Let's mix in a bit of entropy to favor good clustering when238// distributions of these are combined.239if (entropy->nonzeros == 2) {240return DivRound(99 * ((uint64_t)entropy->sum << LOG_2_PRECISION_BITS) +241entropy->entropy,242100);243}244// No matter what the entropy says, we cannot be better than min_limit245// with Huffman coding. I am mixing a bit of entropy into the246// min_limit since it produces much better (~0.5 %) compression results247// perhaps because of better entropy clustering.248if (entropy->nonzeros == 3) {249mix = 950;250} else {251mix = 700; // nonzeros == 4.252}253} else {254mix = 627;255}256257{258uint64_t min_limit = (uint64_t)(2 * entropy->sum - entropy->max_val)259<< LOG_2_PRECISION_BITS;260min_limit =261DivRound(mix * min_limit + (1000 - mix) * entropy->entropy, 1000);262return (entropy->entropy < min_limit) ? min_limit : entropy->entropy;263}264}265266uint64_t VP8LBitsEntropy(const uint32_t* const array, int n) {267VP8LBitEntropy entropy;268VP8LBitsEntropyUnrefined(array, n, &entropy);269270return BitsEntropyRefine(&entropy);271}272273static uint64_t InitialHuffmanCost(void) {274// Small bias because Huffman code length is typically not stored in275// full length.276static const uint64_t kHuffmanCodeOfHuffmanCodeSize = CODE_LENGTH_CODES * 3;277// Subtract a bias of 9.1.278return (kHuffmanCodeOfHuffmanCodeSize << LOG_2_PRECISION_BITS) -279DivRound(91ll << LOG_2_PRECISION_BITS, 10);280}281282// Finalize the Huffman cost based on streak numbers and length type (<3 or >=3)283static uint64_t FinalHuffmanCost(const VP8LStreaks* const stats) {284// The constants in this function are empirical and got rounded from285// their original values in 1/8 when switched to 1/1024.286uint64_t retval = InitialHuffmanCost();287// Second coefficient: Many zeros in the histogram are covered efficiently288// by a run-length encode. Originally 2/8.289uint32_t retval_extra = stats->counts[0] * 1600 + 240 * stats->streaks[0][1];290// Second coefficient: Constant values are encoded less efficiently, but still291// RLE'ed. Originally 6/8.292retval_extra += stats->counts[1] * 2640 + 720 * stats->streaks[1][1];293// 0s are usually encoded more efficiently than non-0s.294// Originally 15/8.295retval_extra += 1840 * stats->streaks[0][0];296// Originally 26/8.297retval_extra += 3360 * stats->streaks[1][0];298return retval + ((uint64_t)retval_extra << (LOG_2_PRECISION_BITS - 10));299}300301// Get the symbol entropy for the distribution 'population'.302// Set 'trivial_sym', if there's only one symbol present in the distribution.303static uint64_t PopulationCost(const uint32_t* const population, int length,304uint32_t* const trivial_sym,305uint8_t* const is_used) {306VP8LBitEntropy bit_entropy;307VP8LStreaks stats;308VP8LGetEntropyUnrefined(population, length, &bit_entropy, &stats);309if (trivial_sym != NULL) {310*trivial_sym = (bit_entropy.nonzeros == 1) ? bit_entropy.nonzero_code311: VP8L_NON_TRIVIAL_SYM;312}313// The histogram is used if there is at least one non-zero streak.314*is_used = (stats.streaks[1][0] != 0 || stats.streaks[1][1] != 0);315316return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);317}318319// trivial_at_end is 1 if the two histograms only have one element that is320// non-zero: both the zero-th one, or both the last one.321static WEBP_INLINE uint64_t GetCombinedEntropy(const uint32_t* const X,322const uint32_t* const Y,323int length, int is_X_used,324int is_Y_used,325int trivial_at_end) {326VP8LStreaks stats;327if (trivial_at_end) {328// This configuration is due to palettization that transforms an indexed329// pixel into 0xff000000 | (pixel << 8) in VP8LBundleColorMap.330// BitsEntropyRefine is 0 for histograms with only one non-zero value.331// Only FinalHuffmanCost needs to be evaluated.332memset(&stats, 0, sizeof(stats));333// Deal with the non-zero value at index 0 or length-1.334stats.streaks[1][0] = 1;335// Deal with the following/previous zero streak.336stats.counts[0] = 1;337stats.streaks[0][1] = length - 1;338return FinalHuffmanCost(&stats);339} else {340VP8LBitEntropy bit_entropy;341if (is_X_used) {342if (is_Y_used) {343VP8LGetCombinedEntropyUnrefined(X, Y, length, &bit_entropy, &stats);344} else {345VP8LGetEntropyUnrefined(X, length, &bit_entropy, &stats);346}347} else {348if (is_Y_used) {349VP8LGetEntropyUnrefined(Y, length, &bit_entropy, &stats);350} else {351memset(&stats, 0, sizeof(stats));352stats.counts[0] = 1;353stats.streaks[0][length > 3] = length;354VP8LBitEntropyInit(&bit_entropy);355}356}357358return BitsEntropyRefine(&bit_entropy) + FinalHuffmanCost(&stats);359}360}361362// Estimates the Entropy + Huffman + other block overhead size cost.363uint64_t VP8LHistogramEstimateBits(VP8LHistogram* const p) {364return PopulationCost(p->literal_,365VP8LHistogramNumCodes(p->palette_code_bits_), NULL,366&p->is_used_[0]) +367PopulationCost(p->red_, NUM_LITERAL_CODES, NULL, &p->is_used_[1]) +368PopulationCost(p->blue_, NUM_LITERAL_CODES, NULL, &p->is_used_[2]) +369PopulationCost(p->alpha_, NUM_LITERAL_CODES, NULL, &p->is_used_[3]) +370PopulationCost(p->distance_, NUM_DISTANCE_CODES, NULL,371&p->is_used_[4]) +372((uint64_t)(VP8LExtraCost(p->literal_ + NUM_LITERAL_CODES,373NUM_LENGTH_CODES) +374VP8LExtraCost(p->distance_, NUM_DISTANCE_CODES))375<< LOG_2_PRECISION_BITS);376}377378// -----------------------------------------------------------------------------379// Various histogram combine/cost-eval functions380381// Set a + b in b, saturating at WEBP_INT64_MAX.382static WEBP_INLINE void SaturateAdd(uint64_t a, int64_t* b) {383if (*b < 0 || (int64_t)a <= WEBP_INT64_MAX - *b) {384*b += (int64_t)a;385} else {386*b = WEBP_INT64_MAX;387}388}389390// Returns 1 if the cost of the combined histogram is less than the threshold.391// Otherwise returns 0 and the cost is invalid due to early bail-out.392WEBP_NODISCARD static int GetCombinedHistogramEntropy(393const VP8LHistogram* const a, const VP8LHistogram* const b,394int64_t cost_threshold_in, uint64_t* cost) {395const int palette_code_bits = a->palette_code_bits_;396int trivial_at_end = 0;397const uint64_t cost_threshold = (uint64_t)cost_threshold_in;398assert(a->palette_code_bits_ == b->palette_code_bits_);399if (cost_threshold_in <= 0) return 0;400*cost = GetCombinedEntropy(a->literal_, b->literal_,401VP8LHistogramNumCodes(palette_code_bits),402a->is_used_[0], b->is_used_[0], 0);403*cost += (uint64_t)VP8LExtraCostCombined(a->literal_ + NUM_LITERAL_CODES,404b->literal_ + NUM_LITERAL_CODES,405NUM_LENGTH_CODES)406<< LOG_2_PRECISION_BITS;407if (*cost >= cost_threshold) return 0;408409if (a->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM &&410a->trivial_symbol_ == b->trivial_symbol_) {411// A, R and B are all 0 or 0xff.412const uint32_t color_a = (a->trivial_symbol_ >> 24) & 0xff;413const uint32_t color_r = (a->trivial_symbol_ >> 16) & 0xff;414const uint32_t color_b = (a->trivial_symbol_ >> 0) & 0xff;415if ((color_a == 0 || color_a == 0xff) &&416(color_r == 0 || color_r == 0xff) &&417(color_b == 0 || color_b == 0xff)) {418trivial_at_end = 1;419}420}421422*cost += GetCombinedEntropy(a->red_, b->red_, NUM_LITERAL_CODES,423a->is_used_[1], b->is_used_[1], trivial_at_end);424if (*cost >= cost_threshold) return 0;425426*cost += GetCombinedEntropy(a->blue_, b->blue_, NUM_LITERAL_CODES,427a->is_used_[2], b->is_used_[2], trivial_at_end);428if (*cost >= cost_threshold) return 0;429430*cost += GetCombinedEntropy(a->alpha_, b->alpha_, NUM_LITERAL_CODES,431a->is_used_[3], b->is_used_[3], trivial_at_end);432if (*cost >= cost_threshold) return 0;433434*cost += GetCombinedEntropy(a->distance_, b->distance_, NUM_DISTANCE_CODES,435a->is_used_[4], b->is_used_[4], 0);436*cost += (uint64_t)VP8LExtraCostCombined(a->distance_, b->distance_,437NUM_DISTANCE_CODES)438<< LOG_2_PRECISION_BITS;439if (*cost >= cost_threshold) return 0;440441return 1;442}443444static WEBP_INLINE void HistogramAdd(const VP8LHistogram* const a,445const VP8LHistogram* const b,446VP8LHistogram* const out) {447VP8LHistogramAdd(a, b, out);448out->trivial_symbol_ = (a->trivial_symbol_ == b->trivial_symbol_)449? a->trivial_symbol_450: VP8L_NON_TRIVIAL_SYM;451}452453// Performs out = a + b, computing the cost C(a+b) - C(a) - C(b) while comparing454// to the threshold value 'cost_threshold'. The score returned is455// Score = C(a+b) - C(a) - C(b), where C(a) + C(b) is known and fixed.456// Since the previous score passed is 'cost_threshold', we only need to compare457// the partial cost against 'cost_threshold + C(a) + C(b)' to possibly bail-out458// early.459// Returns 1 if the cost is less than the threshold.460// Otherwise returns 0 and the cost is invalid due to early bail-out.461WEBP_NODISCARD static int HistogramAddEval(const VP8LHistogram* const a,462const VP8LHistogram* const b,463VP8LHistogram* const out,464int64_t cost_threshold) {465uint64_t cost;466const uint64_t sum_cost = a->bit_cost_ + b->bit_cost_;467SaturateAdd(sum_cost, &cost_threshold);468if (!GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) return 0;469470HistogramAdd(a, b, out);471out->bit_cost_ = cost;472out->palette_code_bits_ = a->palette_code_bits_;473return 1;474}475476// Same as HistogramAddEval(), except that the resulting histogram477// is not stored. Only the cost C(a+b) - C(a) is evaluated. We omit478// the term C(b) which is constant over all the evaluations.479// Returns 1 if the cost is less than the threshold.480// Otherwise returns 0 and the cost is invalid due to early bail-out.481WEBP_NODISCARD static int HistogramAddThresh(const VP8LHistogram* const a,482const VP8LHistogram* const b,483int64_t cost_threshold,484int64_t* cost_out) {485uint64_t cost;486assert(a != NULL && b != NULL);487SaturateAdd(a->bit_cost_, &cost_threshold);488if (!GetCombinedHistogramEntropy(a, b, cost_threshold, &cost)) return 0;489490*cost_out = (int64_t)cost - (int64_t)a->bit_cost_;491return 1;492}493494// -----------------------------------------------------------------------------495496// The structure to keep track of cost range for the three dominant entropy497// symbols.498typedef struct {499uint64_t literal_max_;500uint64_t literal_min_;501uint64_t red_max_;502uint64_t red_min_;503uint64_t blue_max_;504uint64_t blue_min_;505} DominantCostRange;506507static void DominantCostRangeInit(DominantCostRange* const c) {508c->literal_max_ = 0;509c->literal_min_ = WEBP_UINT64_MAX;510c->red_max_ = 0;511c->red_min_ = WEBP_UINT64_MAX;512c->blue_max_ = 0;513c->blue_min_ = WEBP_UINT64_MAX;514}515516static void UpdateDominantCostRange(517const VP8LHistogram* const h, DominantCostRange* const c) {518if (c->literal_max_ < h->literal_cost_) c->literal_max_ = h->literal_cost_;519if (c->literal_min_ > h->literal_cost_) c->literal_min_ = h->literal_cost_;520if (c->red_max_ < h->red_cost_) c->red_max_ = h->red_cost_;521if (c->red_min_ > h->red_cost_) c->red_min_ = h->red_cost_;522if (c->blue_max_ < h->blue_cost_) c->blue_max_ = h->blue_cost_;523if (c->blue_min_ > h->blue_cost_) c->blue_min_ = h->blue_cost_;524}525526static void UpdateHistogramCost(VP8LHistogram* const h) {527uint32_t alpha_sym, red_sym, blue_sym;528const uint64_t alpha_cost =529PopulationCost(h->alpha_, NUM_LITERAL_CODES, &alpha_sym, &h->is_used_[3]);530const uint64_t distance_cost =531PopulationCost(h->distance_, NUM_DISTANCE_CODES, NULL, &h->is_used_[4]) +532((uint64_t)VP8LExtraCost(h->distance_, NUM_DISTANCE_CODES)533<< LOG_2_PRECISION_BITS);534const int num_codes = VP8LHistogramNumCodes(h->palette_code_bits_);535h->literal_cost_ =536PopulationCost(h->literal_, num_codes, NULL, &h->is_used_[0]) +537((uint64_t)VP8LExtraCost(h->literal_ + NUM_LITERAL_CODES,538NUM_LENGTH_CODES)539<< LOG_2_PRECISION_BITS);540h->red_cost_ =541PopulationCost(h->red_, NUM_LITERAL_CODES, &red_sym, &h->is_used_[1]);542h->blue_cost_ =543PopulationCost(h->blue_, NUM_LITERAL_CODES, &blue_sym, &h->is_used_[2]);544h->bit_cost_ = h->literal_cost_ + h->red_cost_ + h->blue_cost_ +545alpha_cost + distance_cost;546if ((alpha_sym | red_sym | blue_sym) == VP8L_NON_TRIVIAL_SYM) {547h->trivial_symbol_ = VP8L_NON_TRIVIAL_SYM;548} else {549h->trivial_symbol_ =550((uint32_t)alpha_sym << 24) | (red_sym << 16) | (blue_sym << 0);551}552}553554static int GetBinIdForEntropy(uint64_t min, uint64_t max, uint64_t val) {555const uint64_t range = max - min;556if (range > 0) {557const uint64_t delta = val - min;558return (int)((NUM_PARTITIONS - 1e-6) * delta / range);559} else {560return 0;561}562}563564static int GetHistoBinIndex(const VP8LHistogram* const h,565const DominantCostRange* const c, int low_effort) {566int bin_id = GetBinIdForEntropy(c->literal_min_, c->literal_max_,567h->literal_cost_);568assert(bin_id < NUM_PARTITIONS);569if (!low_effort) {570bin_id = bin_id * NUM_PARTITIONS571+ GetBinIdForEntropy(c->red_min_, c->red_max_, h->red_cost_);572bin_id = bin_id * NUM_PARTITIONS573+ GetBinIdForEntropy(c->blue_min_, c->blue_max_, h->blue_cost_);574assert(bin_id < BIN_SIZE);575}576return bin_id;577}578579// Construct the histograms from backward references.580static void HistogramBuild(581int xsize, int histo_bits, const VP8LBackwardRefs* const backward_refs,582VP8LHistogramSet* const image_histo) {583int x = 0, y = 0;584const int histo_xsize = VP8LSubSampleSize(xsize, histo_bits);585VP8LHistogram** const histograms = image_histo->histograms;586VP8LRefsCursor c = VP8LRefsCursorInit(backward_refs);587assert(histo_bits > 0);588VP8LHistogramSetClear(image_histo);589while (VP8LRefsCursorOk(&c)) {590const PixOrCopy* const v = c.cur_pos;591const int ix = (y >> histo_bits) * histo_xsize + (x >> histo_bits);592VP8LHistogramAddSinglePixOrCopy(histograms[ix], v, NULL, 0);593x += PixOrCopyLength(v);594while (x >= xsize) {595x -= xsize;596++y;597}598VP8LRefsCursorNext(&c);599}600}601602// Copies the histograms and computes its bit_cost.603static const uint32_t kInvalidHistogramSymbol = (uint32_t)(-1);604static void HistogramCopyAndAnalyze(VP8LHistogramSet* const orig_histo,605VP8LHistogramSet* const image_histo,606int* const num_used,607uint32_t* const histogram_symbols) {608int i, cluster_id;609int num_used_orig = *num_used;610VP8LHistogram** const orig_histograms = orig_histo->histograms;611VP8LHistogram** const histograms = image_histo->histograms;612assert(image_histo->max_size == orig_histo->max_size);613for (cluster_id = 0, i = 0; i < orig_histo->max_size; ++i) {614VP8LHistogram* const histo = orig_histograms[i];615UpdateHistogramCost(histo);616617// Skip the histogram if it is completely empty, which can happen for tiles618// with no information (when they are skipped because of LZ77).619if (!histo->is_used_[0] && !histo->is_used_[1] && !histo->is_used_[2]620&& !histo->is_used_[3] && !histo->is_used_[4]) {621// The first histogram is always used. If an histogram is empty, we set622// its id to be the same as the previous one: this will improve623// compressibility for later LZ77.624assert(i > 0);625HistogramSetRemoveHistogram(image_histo, i, num_used);626HistogramSetRemoveHistogram(orig_histo, i, &num_used_orig);627histogram_symbols[i] = kInvalidHistogramSymbol;628} else {629// Copy histograms from orig_histo[] to image_histo[].630HistogramCopy(histo, histograms[i]);631histogram_symbols[i] = cluster_id++;632assert(cluster_id <= image_histo->max_size);633}634}635}636637// Partition histograms to different entropy bins for three dominant (literal,638// red and blue) symbol costs and compute the histogram aggregate bit_cost.639static void HistogramAnalyzeEntropyBin(VP8LHistogramSet* const image_histo,640uint16_t* const bin_map,641int low_effort) {642int i;643VP8LHistogram** const histograms = image_histo->histograms;644const int histo_size = image_histo->size;645DominantCostRange cost_range;646DominantCostRangeInit(&cost_range);647648// Analyze the dominant (literal, red and blue) entropy costs.649for (i = 0; i < histo_size; ++i) {650if (histograms[i] == NULL) continue;651UpdateDominantCostRange(histograms[i], &cost_range);652}653654// bin-hash histograms on three of the dominant (literal, red and blue)655// symbol costs and store the resulting bin_id for each histogram.656for (i = 0; i < histo_size; ++i) {657// bin_map[i] is not set to a special value as its use will later be guarded658// by another (histograms[i] == NULL).659if (histograms[i] == NULL) continue;660bin_map[i] = GetHistoBinIndex(histograms[i], &cost_range, low_effort);661}662}663664// Merges some histograms with same bin_id together if it's advantageous.665// Sets the remaining histograms to NULL.666// 'combine_cost_factor' has to be divided by 100.667static void HistogramCombineEntropyBin(668VP8LHistogramSet* const image_histo, int* num_used,669const uint32_t* const clusters, uint16_t* const cluster_mappings,670VP8LHistogram* cur_combo, const uint16_t* const bin_map, int num_bins,671int32_t combine_cost_factor, int low_effort) {672VP8LHistogram** const histograms = image_histo->histograms;673int idx;674struct {675int16_t first; // position of the histogram that accumulates all676// histograms with the same bin_id677uint16_t num_combine_failures; // number of combine failures per bin_id678} bin_info[BIN_SIZE];679680assert(num_bins <= BIN_SIZE);681for (idx = 0; idx < num_bins; ++idx) {682bin_info[idx].first = -1;683bin_info[idx].num_combine_failures = 0;684}685686// By default, a cluster matches itself.687for (idx = 0; idx < *num_used; ++idx) cluster_mappings[idx] = idx;688for (idx = 0; idx < image_histo->size; ++idx) {689int bin_id, first;690if (histograms[idx] == NULL) continue;691bin_id = bin_map[idx];692first = bin_info[bin_id].first;693if (first == -1) {694bin_info[bin_id].first = idx;695} else if (low_effort) {696HistogramAdd(histograms[idx], histograms[first], histograms[first]);697HistogramSetRemoveHistogram(image_histo, idx, num_used);698cluster_mappings[clusters[idx]] = clusters[first];699} else {700// try to merge #idx into #first (both share the same bin_id)701const uint64_t bit_cost = histograms[idx]->bit_cost_;702const int64_t bit_cost_thresh =703-DivRound((int64_t)bit_cost * combine_cost_factor, 100);704if (HistogramAddEval(histograms[first], histograms[idx], cur_combo,705bit_cost_thresh)) {706// Try to merge two histograms only if the combo is a trivial one or707// the two candidate histograms are already non-trivial.708// For some images, 'try_combine' turns out to be false for a lot of709// histogram pairs. In that case, we fallback to combining710// histograms as usual to avoid increasing the header size.711const int try_combine =712(cur_combo->trivial_symbol_ != VP8L_NON_TRIVIAL_SYM) ||713((histograms[idx]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM) &&714(histograms[first]->trivial_symbol_ == VP8L_NON_TRIVIAL_SYM));715const int max_combine_failures = 32;716if (try_combine ||717bin_info[bin_id].num_combine_failures >= max_combine_failures) {718// move the (better) merged histogram to its final slot719HistogramSwap(&cur_combo, &histograms[first]);720HistogramSetRemoveHistogram(image_histo, idx, num_used);721cluster_mappings[clusters[idx]] = clusters[first];722} else {723++bin_info[bin_id].num_combine_failures;724}725}726}727}728if (low_effort) {729// for low_effort case, update the final cost when everything is merged730for (idx = 0; idx < image_histo->size; ++idx) {731if (histograms[idx] == NULL) continue;732UpdateHistogramCost(histograms[idx]);733}734}735}736737// Implement a Lehmer random number generator with a multiplicative constant of738// 48271 and a modulo constant of 2^31 - 1.739static uint32_t MyRand(uint32_t* const seed) {740*seed = (uint32_t)(((uint64_t)(*seed) * 48271u) % 2147483647u);741assert(*seed > 0);742return *seed;743}744745// -----------------------------------------------------------------------------746// Histogram pairs priority queue747748// Pair of histograms. Negative idx1 value means that pair is out-of-date.749typedef struct {750int idx1;751int idx2;752int64_t cost_diff;753uint64_t cost_combo;754} HistogramPair;755756typedef struct {757HistogramPair* queue;758int size;759int max_size;760} HistoQueue;761762static int HistoQueueInit(HistoQueue* const histo_queue, const int max_size) {763histo_queue->size = 0;764histo_queue->max_size = max_size;765// We allocate max_size + 1 because the last element at index "size" is766// used as temporary data (and it could be up to max_size).767histo_queue->queue = (HistogramPair*)WebPSafeMalloc(768histo_queue->max_size + 1, sizeof(*histo_queue->queue));769return histo_queue->queue != NULL;770}771772static void HistoQueueClear(HistoQueue* const histo_queue) {773assert(histo_queue != NULL);774WebPSafeFree(histo_queue->queue);775histo_queue->size = 0;776histo_queue->max_size = 0;777}778779// Pop a specific pair in the queue by replacing it with the last one780// and shrinking the queue.781static void HistoQueuePopPair(HistoQueue* const histo_queue,782HistogramPair* const pair) {783assert(pair >= histo_queue->queue &&784pair < (histo_queue->queue + histo_queue->size));785assert(histo_queue->size > 0);786*pair = histo_queue->queue[histo_queue->size - 1];787--histo_queue->size;788}789790// Check whether a pair in the queue should be updated as head or not.791static void HistoQueueUpdateHead(HistoQueue* const histo_queue,792HistogramPair* const pair) {793assert(pair->cost_diff < 0);794assert(pair >= histo_queue->queue &&795pair < (histo_queue->queue + histo_queue->size));796assert(histo_queue->size > 0);797if (pair->cost_diff < histo_queue->queue[0].cost_diff) {798// Replace the best pair.799const HistogramPair tmp = histo_queue->queue[0];800histo_queue->queue[0] = *pair;801*pair = tmp;802}803}804805// Update the cost diff and combo of a pair of histograms. This needs to be806// called when the histograms have been merged with a third one.807// Returns 1 if the cost diff is less than the threshold.808// Otherwise returns 0 and the cost is invalid due to early bail-out.809WEBP_NODISCARD static int HistoQueueUpdatePair(const VP8LHistogram* const h1,810const VP8LHistogram* const h2,811int64_t cost_threshold,812HistogramPair* const pair) {813const int64_t sum_cost = h1->bit_cost_ + h2->bit_cost_;814SaturateAdd(sum_cost, &cost_threshold);815if (!GetCombinedHistogramEntropy(h1, h2, cost_threshold, &pair->cost_combo)) {816return 0;817}818pair->cost_diff = (int64_t)pair->cost_combo - sum_cost;819return 1;820}821822// Create a pair from indices "idx1" and "idx2" provided its cost823// is inferior to "threshold", a negative entropy.824// It returns the cost of the pair, or 0 if it superior to threshold.825static int64_t HistoQueuePush(HistoQueue* const histo_queue,826VP8LHistogram** const histograms, int idx1,827int idx2, int64_t threshold) {828const VP8LHistogram* h1;829const VP8LHistogram* h2;830HistogramPair pair;831832// Stop here if the queue is full.833if (histo_queue->size == histo_queue->max_size) return 0;834assert(threshold <= 0);835if (idx1 > idx2) {836const int tmp = idx2;837idx2 = idx1;838idx1 = tmp;839}840pair.idx1 = idx1;841pair.idx2 = idx2;842h1 = histograms[idx1];843h2 = histograms[idx2];844845// Do not even consider the pair if it does not improve the entropy.846if (!HistoQueueUpdatePair(h1, h2, threshold, &pair)) return 0;847848histo_queue->queue[histo_queue->size++] = pair;849HistoQueueUpdateHead(histo_queue, &histo_queue->queue[histo_queue->size - 1]);850851return pair.cost_diff;852}853854// -----------------------------------------------------------------------------855856// Combines histograms by continuously choosing the one with the highest cost857// reduction.858static int HistogramCombineGreedy(VP8LHistogramSet* const image_histo,859int* const num_used) {860int ok = 0;861const int image_histo_size = image_histo->size;862int i, j;863VP8LHistogram** const histograms = image_histo->histograms;864// Priority queue of histogram pairs.865HistoQueue histo_queue;866867// image_histo_size^2 for the queue size is safe. If you look at868// HistogramCombineGreedy, and imagine that UpdateQueueFront always pushes869// data to the queue, you insert at most:870// - image_histo_size*(image_histo_size-1)/2 (the first two for loops)871// - image_histo_size - 1 in the last for loop at the first iteration of872// the while loop, image_histo_size - 2 at the second iteration ...873// therefore image_histo_size*(image_histo_size-1)/2 overall too874if (!HistoQueueInit(&histo_queue, image_histo_size * image_histo_size)) {875goto End;876}877878for (i = 0; i < image_histo_size; ++i) {879if (image_histo->histograms[i] == NULL) continue;880for (j = i + 1; j < image_histo_size; ++j) {881// Initialize queue.882if (image_histo->histograms[j] == NULL) continue;883HistoQueuePush(&histo_queue, histograms, i, j, 0);884}885}886887while (histo_queue.size > 0) {888const int idx1 = histo_queue.queue[0].idx1;889const int idx2 = histo_queue.queue[0].idx2;890HistogramAdd(histograms[idx2], histograms[idx1], histograms[idx1]);891histograms[idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;892893// Remove merged histogram.894HistogramSetRemoveHistogram(image_histo, idx2, num_used);895896// Remove pairs intersecting the just combined best pair.897for (i = 0; i < histo_queue.size;) {898HistogramPair* const p = histo_queue.queue + i;899if (p->idx1 == idx1 || p->idx2 == idx1 ||900p->idx1 == idx2 || p->idx2 == idx2) {901HistoQueuePopPair(&histo_queue, p);902} else {903HistoQueueUpdateHead(&histo_queue, p);904++i;905}906}907908// Push new pairs formed with combined histogram to the queue.909for (i = 0; i < image_histo->size; ++i) {910if (i == idx1 || image_histo->histograms[i] == NULL) continue;911HistoQueuePush(&histo_queue, image_histo->histograms, idx1, i, 0);912}913}914915ok = 1;916917End:918HistoQueueClear(&histo_queue);919return ok;920}921922// Perform histogram aggregation using a stochastic approach.923// 'do_greedy' is set to 1 if a greedy approach needs to be performed924// afterwards, 0 otherwise.925static int PairComparison(const void* idx1, const void* idx2) {926// To be used with bsearch: <0 when *idx1<*idx2, >0 if >, 0 when ==.927return (*(int*) idx1 - *(int*) idx2);928}929static int HistogramCombineStochastic(VP8LHistogramSet* const image_histo,930int* const num_used, int min_cluster_size,931int* const do_greedy) {932int j, iter;933uint32_t seed = 1;934int tries_with_no_success = 0;935const int outer_iters = *num_used;936const int num_tries_no_success = outer_iters / 2;937VP8LHistogram** const histograms = image_histo->histograms;938// Priority queue of histogram pairs. Its size of 'kHistoQueueSize'939// impacts the quality of the compression and the speed: the smaller the940// faster but the worse for the compression.941HistoQueue histo_queue;942const int kHistoQueueSize = 9;943int ok = 0;944// mapping from an index in image_histo with no NULL histogram to the full945// blown image_histo.946int* mappings;947948if (*num_used < min_cluster_size) {949*do_greedy = 1;950return 1;951}952953mappings = (int*) WebPSafeMalloc(*num_used, sizeof(*mappings));954if (mappings == NULL) return 0;955if (!HistoQueueInit(&histo_queue, kHistoQueueSize)) goto End;956// Fill the initial mapping.957for (j = 0, iter = 0; iter < image_histo->size; ++iter) {958if (histograms[iter] == NULL) continue;959mappings[j++] = iter;960}961assert(j == *num_used);962963// Collapse similar histograms in 'image_histo'.964for (iter = 0;965iter < outer_iters && *num_used >= min_cluster_size &&966++tries_with_no_success < num_tries_no_success;967++iter) {968int* mapping_index;969int64_t best_cost =970(histo_queue.size == 0) ? 0 : histo_queue.queue[0].cost_diff;971int best_idx1 = -1, best_idx2 = 1;972const uint32_t rand_range = (*num_used - 1) * (*num_used);973// (*num_used) / 2 was chosen empirically. Less means faster but worse974// compression.975const int num_tries = (*num_used) / 2;976977// Pick random samples.978for (j = 0; *num_used >= 2 && j < num_tries; ++j) {979int64_t curr_cost;980// Choose two different histograms at random and try to combine them.981const uint32_t tmp = MyRand(&seed) % rand_range;982uint32_t idx1 = tmp / (*num_used - 1);983uint32_t idx2 = tmp % (*num_used - 1);984if (idx2 >= idx1) ++idx2;985idx1 = mappings[idx1];986idx2 = mappings[idx2];987988// Calculate cost reduction on combination.989curr_cost =990HistoQueuePush(&histo_queue, histograms, idx1, idx2, best_cost);991if (curr_cost < 0) { // found a better pair?992best_cost = curr_cost;993// Empty the queue if we reached full capacity.994if (histo_queue.size == histo_queue.max_size) break;995}996}997if (histo_queue.size == 0) continue;998999// Get the best histograms.1000best_idx1 = histo_queue.queue[0].idx1;1001best_idx2 = histo_queue.queue[0].idx2;1002assert(best_idx1 < best_idx2);1003// Pop best_idx2 from mappings.1004mapping_index = (int*) bsearch(&best_idx2, mappings, *num_used,1005sizeof(best_idx2), &PairComparison);1006assert(mapping_index != NULL);1007memmove(mapping_index, mapping_index + 1, sizeof(*mapping_index) *1008((*num_used) - (mapping_index - mappings) - 1));1009// Merge the histograms and remove best_idx2 from the queue.1010HistogramAdd(histograms[best_idx2], histograms[best_idx1],1011histograms[best_idx1]);1012histograms[best_idx1]->bit_cost_ = histo_queue.queue[0].cost_combo;1013HistogramSetRemoveHistogram(image_histo, best_idx2, num_used);1014// Parse the queue and update each pair that deals with best_idx1,1015// best_idx2 or image_histo_size.1016for (j = 0; j < histo_queue.size;) {1017HistogramPair* const p = histo_queue.queue + j;1018const int is_idx1_best = p->idx1 == best_idx1 || p->idx1 == best_idx2;1019const int is_idx2_best = p->idx2 == best_idx1 || p->idx2 == best_idx2;1020int do_eval = 0;1021// The front pair could have been duplicated by a random pick so1022// check for it all the time nevertheless.1023if (is_idx1_best && is_idx2_best) {1024HistoQueuePopPair(&histo_queue, p);1025continue;1026}1027// Any pair containing one of the two best indices should only refer to1028// best_idx1. Its cost should also be updated.1029if (is_idx1_best) {1030p->idx1 = best_idx1;1031do_eval = 1;1032} else if (is_idx2_best) {1033p->idx2 = best_idx1;1034do_eval = 1;1035}1036// Make sure the index order is respected.1037if (p->idx1 > p->idx2) {1038const int tmp = p->idx2;1039p->idx2 = p->idx1;1040p->idx1 = tmp;1041}1042if (do_eval) {1043// Re-evaluate the cost of an updated pair.1044if (!HistoQueueUpdatePair(histograms[p->idx1], histograms[p->idx2], 0,1045p)) {1046HistoQueuePopPair(&histo_queue, p);1047continue;1048}1049}1050HistoQueueUpdateHead(&histo_queue, p);1051++j;1052}1053tries_with_no_success = 0;1054}1055*do_greedy = (*num_used <= min_cluster_size);1056ok = 1;10571058End:1059HistoQueueClear(&histo_queue);1060WebPSafeFree(mappings);1061return ok;1062}10631064// -----------------------------------------------------------------------------1065// Histogram refinement10661067// Find the best 'out' histogram for each of the 'in' histograms.1068// At call-time, 'out' contains the histograms of the clusters.1069// Note: we assume that out[]->bit_cost_ is already up-to-date.1070static void HistogramRemap(const VP8LHistogramSet* const in,1071VP8LHistogramSet* const out,1072uint32_t* const symbols) {1073int i;1074VP8LHistogram** const in_histo = in->histograms;1075VP8LHistogram** const out_histo = out->histograms;1076const int in_size = out->max_size;1077const int out_size = out->size;1078if (out_size > 1) {1079for (i = 0; i < in_size; ++i) {1080int best_out = 0;1081int64_t best_bits = WEBP_INT64_MAX;1082int k;1083if (in_histo[i] == NULL) {1084// Arbitrarily set to the previous value if unused to help future LZ77.1085symbols[i] = symbols[i - 1];1086continue;1087}1088for (k = 0; k < out_size; ++k) {1089int64_t cur_bits;1090if (HistogramAddThresh(out_histo[k], in_histo[i], best_bits,1091&cur_bits)) {1092best_bits = cur_bits;1093best_out = k;1094}1095}1096symbols[i] = best_out;1097}1098} else {1099assert(out_size == 1);1100for (i = 0; i < in_size; ++i) {1101symbols[i] = 0;1102}1103}11041105// Recompute each out based on raw and symbols.1106VP8LHistogramSetClear(out);1107out->size = out_size;11081109for (i = 0; i < in_size; ++i) {1110int idx;1111if (in_histo[i] == NULL) continue;1112idx = symbols[i];1113HistogramAdd(in_histo[i], out_histo[idx], out_histo[idx]);1114}1115}11161117static int32_t GetCombineCostFactor(int histo_size, int quality) {1118int32_t combine_cost_factor = 16;1119if (quality < 90) {1120if (histo_size > 256) combine_cost_factor /= 2;1121if (histo_size > 512) combine_cost_factor /= 2;1122if (histo_size > 1024) combine_cost_factor /= 2;1123if (quality <= 50) combine_cost_factor /= 2;1124}1125return combine_cost_factor;1126}11271128// Given a HistogramSet 'set', the mapping of clusters 'cluster_mapping' and the1129// current assignment of the cells in 'symbols', merge the clusters and1130// assign the smallest possible clusters values.1131static void OptimizeHistogramSymbols(const VP8LHistogramSet* const set,1132uint16_t* const cluster_mappings,1133uint32_t num_clusters,1134uint16_t* const cluster_mappings_tmp,1135uint32_t* const symbols) {1136uint32_t i, cluster_max;1137int do_continue = 1;1138// First, assign the lowest cluster to each pixel.1139while (do_continue) {1140do_continue = 0;1141for (i = 0; i < num_clusters; ++i) {1142int k;1143k = cluster_mappings[i];1144while (k != cluster_mappings[k]) {1145cluster_mappings[k] = cluster_mappings[cluster_mappings[k]];1146k = cluster_mappings[k];1147}1148if (k != cluster_mappings[i]) {1149do_continue = 1;1150cluster_mappings[i] = k;1151}1152}1153}1154// Create a mapping from a cluster id to its minimal version.1155cluster_max = 0;1156memset(cluster_mappings_tmp, 0,1157set->max_size * sizeof(*cluster_mappings_tmp));1158assert(cluster_mappings[0] == 0);1159// Re-map the ids.1160for (i = 0; i < (uint32_t)set->max_size; ++i) {1161int cluster;1162if (symbols[i] == kInvalidHistogramSymbol) continue;1163cluster = cluster_mappings[symbols[i]];1164assert(symbols[i] < num_clusters);1165if (cluster > 0 && cluster_mappings_tmp[cluster] == 0) {1166++cluster_max;1167cluster_mappings_tmp[cluster] = cluster_max;1168}1169symbols[i] = cluster_mappings_tmp[cluster];1170}11711172// Make sure all cluster values are used.1173cluster_max = 0;1174for (i = 0; i < (uint32_t)set->max_size; ++i) {1175if (symbols[i] == kInvalidHistogramSymbol) continue;1176if (symbols[i] <= cluster_max) continue;1177++cluster_max;1178assert(symbols[i] == cluster_max);1179}1180}11811182static void RemoveEmptyHistograms(VP8LHistogramSet* const image_histo) {1183uint32_t size;1184int i;1185for (i = 0, size = 0; i < image_histo->size; ++i) {1186if (image_histo->histograms[i] == NULL) continue;1187image_histo->histograms[size++] = image_histo->histograms[i];1188}1189image_histo->size = size;1190}11911192int VP8LGetHistoImageSymbols(int xsize, int ysize,1193const VP8LBackwardRefs* const refs, int quality,1194int low_effort, int histogram_bits, int cache_bits,1195VP8LHistogramSet* const image_histo,1196VP8LHistogram* const tmp_histo,1197uint32_t* const histogram_symbols,1198const WebPPicture* const pic, int percent_range,1199int* const percent) {1200const int histo_xsize =1201histogram_bits ? VP8LSubSampleSize(xsize, histogram_bits) : 1;1202const int histo_ysize =1203histogram_bits ? VP8LSubSampleSize(ysize, histogram_bits) : 1;1204const int image_histo_raw_size = histo_xsize * histo_ysize;1205VP8LHistogramSet* const orig_histo =1206VP8LAllocateHistogramSet(image_histo_raw_size, cache_bits);1207// Don't attempt linear bin-partition heuristic for1208// histograms of small sizes (as bin_map will be very sparse) and1209// maximum quality q==100 (to preserve the compression gains at that level).1210const int entropy_combine_num_bins = low_effort ? NUM_PARTITIONS : BIN_SIZE;1211int entropy_combine;1212uint16_t* const map_tmp =1213(uint16_t*)WebPSafeMalloc(2 * image_histo_raw_size, sizeof(*map_tmp));1214uint16_t* const cluster_mappings = map_tmp + image_histo_raw_size;1215int num_used = image_histo_raw_size;1216if (orig_histo == NULL || map_tmp == NULL) {1217WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);1218goto Error;1219}12201221// Construct the histograms from backward references.1222HistogramBuild(xsize, histogram_bits, refs, orig_histo);1223// Copies the histograms and computes its bit_cost.1224// histogram_symbols is optimized1225HistogramCopyAndAnalyze(orig_histo, image_histo, &num_used,1226histogram_symbols);12271228entropy_combine =1229(num_used > entropy_combine_num_bins * 2) && (quality < 100);12301231if (entropy_combine) {1232uint16_t* const bin_map = map_tmp;1233const int32_t combine_cost_factor =1234GetCombineCostFactor(image_histo_raw_size, quality);1235const uint32_t num_clusters = num_used;12361237HistogramAnalyzeEntropyBin(image_histo, bin_map, low_effort);1238// Collapse histograms with similar entropy.1239HistogramCombineEntropyBin(1240image_histo, &num_used, histogram_symbols, cluster_mappings, tmp_histo,1241bin_map, entropy_combine_num_bins, combine_cost_factor, low_effort);1242OptimizeHistogramSymbols(image_histo, cluster_mappings, num_clusters,1243map_tmp, histogram_symbols);1244}12451246// Don't combine the histograms using stochastic and greedy heuristics for1247// low-effort compression mode.1248if (!low_effort || !entropy_combine) {1249// cubic ramp between 1 and MAX_HISTO_GREEDY:1250const int threshold_size =1251(int)(1 + DivRound(quality * quality * quality * (MAX_HISTO_GREEDY - 1),1252100 * 100 * 100));1253int do_greedy;1254if (!HistogramCombineStochastic(image_histo, &num_used, threshold_size,1255&do_greedy)) {1256WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);1257goto Error;1258}1259if (do_greedy) {1260RemoveEmptyHistograms(image_histo);1261if (!HistogramCombineGreedy(image_histo, &num_used)) {1262WebPEncodingSetError(pic, VP8_ENC_ERROR_OUT_OF_MEMORY);1263goto Error;1264}1265}1266}12671268// Find the optimal map from original histograms to the final ones.1269RemoveEmptyHistograms(image_histo);1270HistogramRemap(orig_histo, image_histo, histogram_symbols);12711272if (!WebPReportProgress(pic, *percent + percent_range, percent)) {1273goto Error;1274}12751276Error:1277VP8LFreeHistogramSet(orig_histo);1278WebPSafeFree(map_tmp);1279return (pic->error_code == VP8_ENC_OK);1280}128112821283