Path: blob/master/thirdparty/libwebp/src/mux/anim_encode.c
21080 views
// Copyright 2014 Google Inc. All Rights Reserved.1//2// Use of this source code is governed by a BSD-style license3// that can be found in the COPYING file in the root of the source4// tree. An additional intellectual property rights grant can be found5// in the file PATENTS. All contributing project authors may6// be found in the AUTHORS file in the root of the source tree.7// -----------------------------------------------------------------------------8//9// AnimEncoder implementation.10//1112#include <assert.h>13#include <limits.h>14#include <math.h> // for pow()15#include <stdio.h>16#include <stdlib.h> // for abs()17#include <string.h>1819#include "src/mux/animi.h"20#include "src/utils/utils.h"21#include "src/webp/decode.h"22#include "src/webp/encode.h"23#include "src/webp/format_constants.h"24#include "src/webp/mux.h"25#include "src/webp/mux_types.h"26#include "src/webp/types.h"2728#if defined(_MSC_VER) && _MSC_VER < 190029#define snprintf _snprintf30#endif3132#define ERROR_STR_MAX_LENGTH 1003334//------------------------------------------------------------------------------35// Internal structs.3637// Stores frame rectangle dimensions.38typedef struct {39int x_offset, y_offset, width, height;40} FrameRectangle;4142// Used to store two candidates of encoded data for an animation frame. One of43// the two will be chosen later.44typedef struct {45WebPMuxFrameInfo sub_frame; // Encoded frame rectangle.46WebPMuxFrameInfo key_frame; // Encoded frame if it is a key-frame.47int is_key_frame; // True if 'key_frame' has been chosen.48} EncodedFrame;4950struct WebPAnimEncoder {51const int canvas_width; // Canvas width.52const int canvas_height; // Canvas height.53const WebPAnimEncoderOptions options; // Global encoding options.5455FrameRectangle prev_rect; // Previous WebP frame rectangle.56WebPConfig last_config; // Cached in case a re-encode is needed.57WebPConfig last_config_reversed; // If 'last_config' uses lossless, then58// this config uses lossy and vice versa;59// only valid if 'options.allow_mixed'60// is true.6162WebPPicture* curr_canvas; // Only pointer; we don't own memory.6364// Canvas buffers.65WebPPicture curr_canvas_copy; // Possibly modified current canvas.66int curr_canvas_copy_modified; // True if pixels in 'curr_canvas_copy'67// differ from those in 'curr_canvas'.6869WebPPicture prev_canvas; // Previous canvas.70WebPPicture prev_canvas_disposed; // Previous canvas disposed to background.7172// Encoded data.73EncodedFrame* encoded_frames; // Array of encoded frames.74size_t size; // Number of allocated frames.75size_t start; // Frame start index.76size_t count; // Number of valid frames.77size_t flush_count; // If >0, 'flush_count' frames starting from78// 'start' are ready to be added to mux.7980// key-frame related.81int64_t best_delta; // min(canvas size - frame size) over the frames.82// Can be negative in certain cases due to83// transparent pixels in a frame.84int keyframe; // Index of selected key-frame relative to 'start'.85int count_since_key_frame; // Frames seen since the last key-frame.8687int first_timestamp; // Timestamp of the first frame.88int prev_timestamp; // Timestamp of the last added frame.89int prev_candidate_undecided; // True if it's not yet decided if previous90// frame would be a sub-frame or a key-frame.9192// Misc.93int is_first_frame; // True if first frame is yet to be added/being added.94int got_null_frame; // True if WebPAnimEncoderAdd() has already been called95// with a NULL frame.9697size_t in_frame_count; // Number of input frames processed so far.98size_t out_frame_count; // Number of frames added to mux so far. This may be99// different from 'in_frame_count' due to merging.100101WebPMux* mux; // Muxer to assemble the WebP bitstream.102char error_str[ERROR_STR_MAX_LENGTH]; // Error string. Empty if no error.103};104105// -----------------------------------------------------------------------------106// Life of WebPAnimEncoder object.107108#define DELTA_INFINITY (1ULL << 32)109#define KEYFRAME_NONE (-1)110111// Reset the counters in the WebPAnimEncoder.112static void ResetCounters(WebPAnimEncoder* const enc) {113enc->start = 0;114enc->count = 0;115enc->flush_count = 0;116enc->best_delta = DELTA_INFINITY;117enc->keyframe = KEYFRAME_NONE;118}119120static void DisableKeyframes(WebPAnimEncoderOptions* const enc_options) {121enc_options->kmax = INT_MAX;122enc_options->kmin = enc_options->kmax - 1;123}124125#define MAX_CACHED_FRAMES 30126127static void SanitizeEncoderOptions(WebPAnimEncoderOptions* const enc_options) {128int print_warning = enc_options->verbose;129130if (enc_options->minimize_size) {131DisableKeyframes(enc_options);132}133134if (enc_options->kmax == 1) { // All frames will be key-frames.135enc_options->kmin = 0;136enc_options->kmax = 0;137return;138} else if (enc_options->kmax <= 0) {139DisableKeyframes(enc_options);140print_warning = 0;141}142143if (enc_options->kmin >= enc_options->kmax) {144enc_options->kmin = enc_options->kmax - 1;145if (print_warning) {146fprintf(stderr, "WARNING: Setting kmin = %d, so that kmin < kmax.\n",147enc_options->kmin);148}149} else {150const int kmin_limit = enc_options->kmax / 2 + 1;151if (enc_options->kmin < kmin_limit && kmin_limit < enc_options->kmax) {152// This ensures that enc.keyframe + kmin >= kmax is always true. So, we153// can flush all the frames in the 'count_since_key_frame == kmax' case.154enc_options->kmin = kmin_limit;155if (print_warning) {156fprintf(stderr,157"WARNING: Setting kmin = %d, so that kmin >= kmax / 2 + 1.\n",158enc_options->kmin);159}160}161}162// Limit the max number of frames that are allocated.163if (enc_options->kmax - enc_options->kmin > MAX_CACHED_FRAMES) {164enc_options->kmin = enc_options->kmax - MAX_CACHED_FRAMES;165if (print_warning) {166fprintf(stderr,167"WARNING: Setting kmin = %d, so that kmax - kmin <= %d.\n",168enc_options->kmin, MAX_CACHED_FRAMES);169}170}171assert(enc_options->kmin < enc_options->kmax);172}173174#undef MAX_CACHED_FRAMES175176static void DefaultEncoderOptions(WebPAnimEncoderOptions* const enc_options) {177enc_options->anim_params.loop_count = 0;178enc_options->anim_params.bgcolor = 0xffffffff; // White.179enc_options->minimize_size = 0;180DisableKeyframes(enc_options);181enc_options->allow_mixed = 0;182enc_options->verbose = 0;183}184185int WebPAnimEncoderOptionsInitInternal(WebPAnimEncoderOptions* enc_options,186int abi_version) {187if (enc_options == NULL ||188WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_MUX_ABI_VERSION)) {189return 0;190}191DefaultEncoderOptions(enc_options);192return 1;193}194195// This value is used to match a later call to WebPReplaceTransparentPixels(),196// making it a no-op for lossless (see WebPEncode()).197#define TRANSPARENT_COLOR 0x00000000198199static void ClearRectangle(WebPPicture* const picture,200int left, int top, int width, int height) {201int j;202for (j = top; j < top + height; ++j) {203uint32_t* const dst = picture->argb + j * picture->argb_stride;204int i;205for (i = left; i < left + width; ++i) {206dst[i] = TRANSPARENT_COLOR;207}208}209}210211static void WebPUtilClearPic(WebPPicture* const picture,212const FrameRectangle* const rect) {213if (rect != NULL) {214ClearRectangle(picture, rect->x_offset, rect->y_offset,215rect->width, rect->height);216} else {217ClearRectangle(picture, 0, 0, picture->width, picture->height);218}219}220221static void MarkNoError(WebPAnimEncoder* const enc) {222enc->error_str[0] = '\0'; // Empty string.223}224225static void MarkError(WebPAnimEncoder* const enc, const char* str) {226if (snprintf(enc->error_str, ERROR_STR_MAX_LENGTH, "%s.", str) < 0) {227assert(0); // FIX ME!228}229}230231static void MarkError2(WebPAnimEncoder* const enc,232const char* str, int error_code) {233if (snprintf(enc->error_str, ERROR_STR_MAX_LENGTH, "%s: %d.", str,234error_code) < 0) {235assert(0); // FIX ME!236}237}238239WebPAnimEncoder* WebPAnimEncoderNewInternal(240int width, int height, const WebPAnimEncoderOptions* enc_options,241int abi_version) {242WebPAnimEncoder* enc;243244if (WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_MUX_ABI_VERSION)) {245return NULL;246}247if (width <= 0 || height <= 0 ||248(width * (uint64_t)height) >= MAX_IMAGE_AREA) {249return NULL;250}251252enc = (WebPAnimEncoder*)WebPSafeCalloc(1, sizeof(*enc));253if (enc == NULL) return NULL;254MarkNoError(enc);255256// Dimensions and options.257*(int*)&enc->canvas_width = width;258*(int*)&enc->canvas_height = height;259if (enc_options != NULL) {260*(WebPAnimEncoderOptions*)&enc->options = *enc_options;261SanitizeEncoderOptions((WebPAnimEncoderOptions*)&enc->options);262} else {263DefaultEncoderOptions((WebPAnimEncoderOptions*)&enc->options);264}265266// Canvas buffers.267if (!WebPPictureInit(&enc->curr_canvas_copy) ||268!WebPPictureInit(&enc->prev_canvas) ||269!WebPPictureInit(&enc->prev_canvas_disposed)) {270goto Err;271}272enc->curr_canvas_copy.width = width;273enc->curr_canvas_copy.height = height;274enc->curr_canvas_copy.use_argb = 1;275if (!WebPPictureAlloc(&enc->curr_canvas_copy) ||276!WebPPictureCopy(&enc->curr_canvas_copy, &enc->prev_canvas) ||277!WebPPictureCopy(&enc->curr_canvas_copy, &enc->prev_canvas_disposed)) {278goto Err;279}280WebPUtilClearPic(&enc->prev_canvas, NULL);281enc->curr_canvas_copy_modified = 1;282283// Encoded frames.284ResetCounters(enc);285// Note: one extra storage is for the previous frame.286enc->size = enc->options.kmax - enc->options.kmin + 1;287// We need space for at least 2 frames. But when kmin, kmax are both zero,288// enc->size will be 1. So we handle that special case below.289if (enc->size < 2) enc->size = 2;290enc->encoded_frames =291(EncodedFrame*)WebPSafeCalloc(enc->size, sizeof(*enc->encoded_frames));292if (enc->encoded_frames == NULL) goto Err;293294enc->mux = WebPMuxNew();295if (enc->mux == NULL) goto Err;296297enc->count_since_key_frame = 0;298enc->first_timestamp = 0;299enc->prev_timestamp = 0;300enc->prev_candidate_undecided = 0;301enc->is_first_frame = 1;302enc->got_null_frame = 0;303304return enc; // All OK.305306Err:307WebPAnimEncoderDelete(enc);308return NULL;309}310311// Release the data contained by 'encoded_frame'.312static void FrameRelease(EncodedFrame* const encoded_frame) {313if (encoded_frame != NULL) {314WebPDataClear(&encoded_frame->sub_frame.bitstream);315WebPDataClear(&encoded_frame->key_frame.bitstream);316memset(encoded_frame, 0, sizeof(*encoded_frame));317}318}319320void WebPAnimEncoderDelete(WebPAnimEncoder* enc) {321if (enc != NULL) {322WebPPictureFree(&enc->curr_canvas_copy);323WebPPictureFree(&enc->prev_canvas);324WebPPictureFree(&enc->prev_canvas_disposed);325if (enc->encoded_frames != NULL) {326size_t i;327for (i = 0; i < enc->size; ++i) {328FrameRelease(&enc->encoded_frames[i]);329}330WebPSafeFree(enc->encoded_frames);331}332WebPMuxDelete(enc->mux);333WebPSafeFree(enc);334}335}336337// -----------------------------------------------------------------------------338// Frame addition.339340// Returns cached frame at the given 'position'.341static EncodedFrame* GetFrame(const WebPAnimEncoder* const enc,342size_t position) {343assert(enc->start + position < enc->size);344return &enc->encoded_frames[enc->start + position];345}346347typedef int (*ComparePixelsFunc)(const uint32_t*, int, const uint32_t*, int,348int, int);349350// Returns true if 'length' number of pixels in 'src' and 'dst' are equal,351// assuming the given step sizes between pixels.352// 'max_allowed_diff' is unused and only there to allow function pointer use.353static WEBP_INLINE int ComparePixelsLossless(const uint32_t* src, int src_step,354const uint32_t* dst, int dst_step,355int length, int max_allowed_diff) {356(void)max_allowed_diff;357assert(length > 0);358while (length-- > 0) {359if (*src != *dst) {360return 0;361}362src += src_step;363dst += dst_step;364}365return 1;366}367368// Helper to check if each channel in 'src' and 'dst' is at most off by369// 'max_allowed_diff'.370static WEBP_INLINE int PixelsAreSimilar(uint32_t src, uint32_t dst,371int max_allowed_diff) {372const int src_a = (src >> 24) & 0xff;373const int src_r = (src >> 16) & 0xff;374const int src_g = (src >> 8) & 0xff;375const int src_b = (src >> 0) & 0xff;376const int dst_a = (dst >> 24) & 0xff;377const int dst_r = (dst >> 16) & 0xff;378const int dst_g = (dst >> 8) & 0xff;379const int dst_b = (dst >> 0) & 0xff;380381return (src_a == dst_a) &&382(abs(src_r - dst_r) * dst_a <= (max_allowed_diff * 255)) &&383(abs(src_g - dst_g) * dst_a <= (max_allowed_diff * 255)) &&384(abs(src_b - dst_b) * dst_a <= (max_allowed_diff * 255));385}386387// Returns true if 'length' number of pixels in 'src' and 'dst' are within an388// error bound, assuming the given step sizes between pixels.389static WEBP_INLINE int ComparePixelsLossy(const uint32_t* src, int src_step,390const uint32_t* dst, int dst_step,391int length, int max_allowed_diff) {392assert(length > 0);393while (length-- > 0) {394if (!PixelsAreSimilar(*src, *dst, max_allowed_diff)) {395return 0;396}397src += src_step;398dst += dst_step;399}400return 1;401}402403static int IsEmptyRect(const FrameRectangle* const rect) {404return (rect->width == 0) || (rect->height == 0);405}406407static int QualityToMaxDiff(float quality) {408const double val = pow(quality / 100., 0.5);409const double max_diff = 31 * (1 - val) + 1 * val;410return (int)(max_diff + 0.5);411}412413// Assumes that an initial valid guess of change rectangle 'rect' is passed.414static void MinimizeChangeRectangle(const WebPPicture* const src,415const WebPPicture* const dst,416FrameRectangle* const rect,417int is_lossless, float quality) {418int i, j;419const ComparePixelsFunc compare_pixels =420is_lossless ? ComparePixelsLossless : ComparePixelsLossy;421const int max_allowed_diff_lossy = QualityToMaxDiff(quality);422const int max_allowed_diff = is_lossless ? 0 : max_allowed_diff_lossy;423424// Assumption/correctness checks.425assert(src->width == dst->width && src->height == dst->height);426assert(rect->x_offset + rect->width <= dst->width);427assert(rect->y_offset + rect->height <= dst->height);428429// Left boundary.430for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {431const uint32_t* const src_argb =432&src->argb[rect->y_offset * src->argb_stride + i];433const uint32_t* const dst_argb =434&dst->argb[rect->y_offset * dst->argb_stride + i];435if (compare_pixels(src_argb, src->argb_stride, dst_argb, dst->argb_stride,436rect->height, max_allowed_diff)) {437--rect->width; // Redundant column.438++rect->x_offset;439} else {440break;441}442}443if (rect->width == 0) goto NoChange;444445// Right boundary.446for (i = rect->x_offset + rect->width - 1; i >= rect->x_offset; --i) {447const uint32_t* const src_argb =448&src->argb[rect->y_offset * src->argb_stride + i];449const uint32_t* const dst_argb =450&dst->argb[rect->y_offset * dst->argb_stride + i];451if (compare_pixels(src_argb, src->argb_stride, dst_argb, dst->argb_stride,452rect->height, max_allowed_diff)) {453--rect->width; // Redundant column.454} else {455break;456}457}458if (rect->width == 0) goto NoChange;459460// Top boundary.461for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {462const uint32_t* const src_argb =463&src->argb[j * src->argb_stride + rect->x_offset];464const uint32_t* const dst_argb =465&dst->argb[j * dst->argb_stride + rect->x_offset];466if (compare_pixels(src_argb, 1, dst_argb, 1, rect->width,467max_allowed_diff)) {468--rect->height; // Redundant row.469++rect->y_offset;470} else {471break;472}473}474if (rect->height == 0) goto NoChange;475476// Bottom boundary.477for (j = rect->y_offset + rect->height - 1; j >= rect->y_offset; --j) {478const uint32_t* const src_argb =479&src->argb[j * src->argb_stride + rect->x_offset];480const uint32_t* const dst_argb =481&dst->argb[j * dst->argb_stride + rect->x_offset];482if (compare_pixels(src_argb, 1, dst_argb, 1, rect->width,483max_allowed_diff)) {484--rect->height; // Redundant row.485} else {486break;487}488}489if (rect->height == 0) goto NoChange;490491if (IsEmptyRect(rect)) {492NoChange:493rect->x_offset = 0;494rect->y_offset = 0;495rect->width = 0;496rect->height = 0;497}498}499500// Snap rectangle to even offsets (and adjust dimensions if needed).501static WEBP_INLINE void SnapToEvenOffsets(FrameRectangle* const rect) {502rect->width += (rect->x_offset & 1);503rect->height += (rect->y_offset & 1);504rect->x_offset &= ~1;505rect->y_offset &= ~1;506}507508typedef struct {509int should_try; // Should try this set of parameters.510int empty_rect_allowed; // Frame with empty rectangle can be skipped.511FrameRectangle rect_ll; // Frame rectangle for lossless compression.512WebPPicture sub_frame_ll; // Sub-frame pic for lossless compression.513FrameRectangle rect_lossy; // Frame rectangle for lossy compression.514// Could be smaller than 'rect_ll' as pixels515// with small diffs can be ignored.516WebPPicture sub_frame_lossy; // Sub-frame pic for lossless compression.517} SubFrameParams;518519static int SubFrameParamsInit(SubFrameParams* const params,520int should_try, int empty_rect_allowed) {521params->should_try = should_try;522params->empty_rect_allowed = empty_rect_allowed;523if (!WebPPictureInit(¶ms->sub_frame_ll) ||524!WebPPictureInit(¶ms->sub_frame_lossy)) {525return 0;526}527return 1;528}529530static void SubFrameParamsFree(SubFrameParams* const params) {531WebPPictureFree(¶ms->sub_frame_ll);532WebPPictureFree(¶ms->sub_frame_lossy);533}534535// Given previous and current canvas, picks the optimal rectangle for the536// current frame based on 'is_lossless' and other parameters. Assumes that the537// initial guess 'rect' is valid.538static int GetSubRect(const WebPPicture* const prev_canvas,539const WebPPicture* const curr_canvas, int is_key_frame,540int is_first_frame, int empty_rect_allowed,541int is_lossless, float quality,542FrameRectangle* const rect,543WebPPicture* const sub_frame) {544if (!is_key_frame || is_first_frame) { // Optimize frame rectangle.545// Note: This behaves as expected for first frame, as 'prev_canvas' is546// initialized to a fully transparent canvas in the beginning.547MinimizeChangeRectangle(prev_canvas, curr_canvas, rect,548is_lossless, quality);549}550551if (IsEmptyRect(rect)) {552if (empty_rect_allowed) { // No need to get 'sub_frame'.553return 1;554} else { // Force a 1x1 rectangle.555rect->width = 1;556rect->height = 1;557assert(rect->x_offset == 0);558assert(rect->y_offset == 0);559}560}561562SnapToEvenOffsets(rect);563return WebPPictureView(curr_canvas, rect->x_offset, rect->y_offset,564rect->width, rect->height, sub_frame);565}566567// Picks optimal frame rectangle for both lossless and lossy compression. The568// initial guess for frame rectangles will be the full canvas.569static int GetSubRects(const WebPPicture* const prev_canvas,570const WebPPicture* const curr_canvas, int is_key_frame,571int is_first_frame, float quality,572SubFrameParams* const params) {573// Lossless frame rectangle.574params->rect_ll.x_offset = 0;575params->rect_ll.y_offset = 0;576params->rect_ll.width = curr_canvas->width;577params->rect_ll.height = curr_canvas->height;578if (!GetSubRect(prev_canvas, curr_canvas, is_key_frame, is_first_frame,579params->empty_rect_allowed, 1, quality,580¶ms->rect_ll, ¶ms->sub_frame_ll)) {581return 0;582}583// Lossy frame rectangle.584params->rect_lossy = params->rect_ll; // seed with lossless rect.585return GetSubRect(prev_canvas, curr_canvas, is_key_frame, is_first_frame,586params->empty_rect_allowed, 0, quality,587¶ms->rect_lossy, ¶ms->sub_frame_lossy);588}589590static WEBP_INLINE int clip(int v, int min_v, int max_v) {591return (v < min_v) ? min_v : (v > max_v) ? max_v : v;592}593594int WebPAnimEncoderRefineRect(595const WebPPicture* const prev_canvas, const WebPPicture* const curr_canvas,596int is_lossless, float quality, int* const x_offset, int* const y_offset,597int* const width, int* const height) {598FrameRectangle rect;599int right, left, bottom, top;600if (prev_canvas == NULL || curr_canvas == NULL ||601prev_canvas->width != curr_canvas->width ||602prev_canvas->height != curr_canvas->height ||603!prev_canvas->use_argb || !curr_canvas->use_argb) {604return 0;605}606right = clip(*x_offset + *width, 0, curr_canvas->width);607left = clip(*x_offset, 0, curr_canvas->width - 1);608bottom = clip(*y_offset + *height, 0, curr_canvas->height);609top = clip(*y_offset, 0, curr_canvas->height - 1);610rect.x_offset = left;611rect.y_offset = top;612rect.width = clip(right - left, 0, curr_canvas->width - rect.x_offset);613rect.height = clip(bottom - top, 0, curr_canvas->height - rect.y_offset);614MinimizeChangeRectangle(prev_canvas, curr_canvas, &rect, is_lossless,615quality);616SnapToEvenOffsets(&rect);617*x_offset = rect.x_offset;618*y_offset = rect.y_offset;619*width = rect.width;620*height = rect.height;621return 1;622}623624static void DisposeFrameRectangle(int dispose_method,625const FrameRectangle* const rect,626WebPPicture* const curr_canvas) {627assert(rect != NULL);628if (dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {629WebPUtilClearPic(curr_canvas, rect);630}631}632633static uint32_t RectArea(const FrameRectangle* const rect) {634return (uint32_t)rect->width * rect->height;635}636637static int IsLosslessBlendingPossible(const WebPPicture* const src,638const WebPPicture* const dst,639const FrameRectangle* const rect) {640int i, j;641assert(src->width == dst->width && src->height == dst->height);642assert(rect->x_offset + rect->width <= dst->width);643assert(rect->y_offset + rect->height <= dst->height);644for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {645for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {646const uint32_t src_pixel = src->argb[j * src->argb_stride + i];647const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i];648const uint32_t dst_alpha = dst_pixel >> 24;649if (dst_alpha != 0xff && src_pixel != dst_pixel) {650// In this case, if we use blending, we can't attain the desired651// 'dst_pixel' value for this pixel. So, blending is not possible.652return 0;653}654}655}656return 1;657}658659static int IsLossyBlendingPossible(const WebPPicture* const src,660const WebPPicture* const dst,661const FrameRectangle* const rect,662float quality) {663const int max_allowed_diff_lossy = QualityToMaxDiff(quality);664int i, j;665assert(src->width == dst->width && src->height == dst->height);666assert(rect->x_offset + rect->width <= dst->width);667assert(rect->y_offset + rect->height <= dst->height);668for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {669for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {670const uint32_t src_pixel = src->argb[j * src->argb_stride + i];671const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i];672const uint32_t dst_alpha = dst_pixel >> 24;673if (dst_alpha != 0xff &&674!PixelsAreSimilar(src_pixel, dst_pixel, max_allowed_diff_lossy)) {675// In this case, if we use blending, we can't attain the desired676// 'dst_pixel' value for this pixel. So, blending is not possible.677return 0;678}679}680}681return 1;682}683684// For pixels in 'rect', replace those pixels in 'dst' that are same as 'src' by685// transparent pixels.686// Returns true if at least one pixel gets modified.687static int IncreaseTransparency(const WebPPicture* const src,688const FrameRectangle* const rect,689WebPPicture* const dst) {690int i, j;691int modified = 0;692assert(src != NULL && dst != NULL && rect != NULL);693assert(src->width == dst->width && src->height == dst->height);694for (j = rect->y_offset; j < rect->y_offset + rect->height; ++j) {695const uint32_t* const psrc = src->argb + j * src->argb_stride;696uint32_t* const pdst = dst->argb + j * dst->argb_stride;697for (i = rect->x_offset; i < rect->x_offset + rect->width; ++i) {698if (psrc[i] == pdst[i] && pdst[i] != TRANSPARENT_COLOR) {699pdst[i] = TRANSPARENT_COLOR;700modified = 1;701}702}703}704return modified;705}706707#undef TRANSPARENT_COLOR708709// Replace similar blocks of pixels by a 'see-through' transparent block710// with uniform average color.711// Assumes lossy compression is being used.712// Returns true if at least one pixel gets modified.713static int FlattenSimilarBlocks(const WebPPicture* const src,714const FrameRectangle* const rect,715WebPPicture* const dst, float quality) {716const int max_allowed_diff_lossy = QualityToMaxDiff(quality);717int i, j;718int modified = 0;719const int block_size = 8;720const int y_start = (rect->y_offset + block_size) & ~(block_size - 1);721const int y_end = (rect->y_offset + rect->height) & ~(block_size - 1);722const int x_start = (rect->x_offset + block_size) & ~(block_size - 1);723const int x_end = (rect->x_offset + rect->width) & ~(block_size - 1);724assert(src != NULL && dst != NULL && rect != NULL);725assert(src->width == dst->width && src->height == dst->height);726assert((block_size & (block_size - 1)) == 0); // must be a power of 2727// Iterate over each block and count similar pixels.728for (j = y_start; j < y_end; j += block_size) {729for (i = x_start; i < x_end; i += block_size) {730int cnt = 0;731int avg_r = 0, avg_g = 0, avg_b = 0;732int x, y;733const uint32_t* const psrc = src->argb + j * src->argb_stride + i;734uint32_t* const pdst = dst->argb + j * dst->argb_stride + i;735for (y = 0; y < block_size; ++y) {736for (x = 0; x < block_size; ++x) {737const uint32_t src_pixel = psrc[x + y * src->argb_stride];738const int alpha = src_pixel >> 24;739if (alpha == 0xff &&740PixelsAreSimilar(src_pixel, pdst[x + y * dst->argb_stride],741max_allowed_diff_lossy)) {742++cnt;743avg_r += (src_pixel >> 16) & 0xff;744avg_g += (src_pixel >> 8) & 0xff;745avg_b += (src_pixel >> 0) & 0xff;746}747}748}749// If we have a fully similar block, we replace it with an750// average transparent block. This compresses better in lossy mode.751if (cnt == block_size * block_size) {752const uint32_t color = (0x00 << 24) |753((avg_r / cnt) << 16) |754((avg_g / cnt) << 8) |755((avg_b / cnt) << 0);756for (y = 0; y < block_size; ++y) {757for (x = 0; x < block_size; ++x) {758pdst[x + y * dst->argb_stride] = color;759}760}761modified = 1;762}763}764}765return modified;766}767768static int EncodeFrame(const WebPConfig* const config, WebPPicture* const pic,769WebPMemoryWriter* const memory) {770pic->use_argb = 1;771pic->writer = WebPMemoryWrite;772pic->custom_ptr = memory;773if (!WebPEncode(config, pic)) {774return 0;775}776return 1;777}778779// Struct representing a candidate encoded frame including its metadata.780typedef struct {781WebPMemoryWriter mem;782WebPMuxFrameInfo info;783FrameRectangle rect;784int evaluate; // True if this candidate should be evaluated.785} Candidate;786787// Generates a candidate encoded frame given a picture and metadata.788static WebPEncodingError EncodeCandidate(WebPPicture* const sub_frame,789const FrameRectangle* const rect,790const WebPConfig* const encoder_config,791int use_blending,792Candidate* const candidate) {793WebPConfig config = *encoder_config;794WebPEncodingError error_code = VP8_ENC_OK;795assert(candidate != NULL);796memset(candidate, 0, sizeof(*candidate));797798// Set frame rect and info.799candidate->rect = *rect;800candidate->info.id = WEBP_CHUNK_ANMF;801candidate->info.x_offset = rect->x_offset;802candidate->info.y_offset = rect->y_offset;803candidate->info.dispose_method = WEBP_MUX_DISPOSE_NONE; // Set later.804candidate->info.blend_method =805use_blending ? WEBP_MUX_BLEND : WEBP_MUX_NO_BLEND;806candidate->info.duration = 0; // Set in next call to WebPAnimEncoderAdd().807808// Encode picture.809WebPMemoryWriterInit(&candidate->mem);810811if (!config.lossless && use_blending) {812// Disable filtering to avoid blockiness in reconstructed frames at the813// time of decoding.814config.autofilter = 0;815config.filter_strength = 0;816}817if (!EncodeFrame(&config, sub_frame, &candidate->mem)) {818error_code = sub_frame->error_code;819goto Err;820}821822candidate->evaluate = 1;823return error_code;824825Err:826WebPMemoryWriterClear(&candidate->mem);827return error_code;828}829830static void CopyCurrentCanvas(WebPAnimEncoder* const enc) {831if (enc->curr_canvas_copy_modified) {832WebPCopyPixels(enc->curr_canvas, &enc->curr_canvas_copy);833enc->curr_canvas_copy.progress_hook = enc->curr_canvas->progress_hook;834enc->curr_canvas_copy.user_data = enc->curr_canvas->user_data;835enc->curr_canvas_copy_modified = 0;836}837}838839enum {840LL_DISP_NONE = 0,841LL_DISP_BG,842LOSSY_DISP_NONE,843LOSSY_DISP_BG,844CANDIDATE_COUNT845};846847#define MIN_COLORS_LOSSY 31 // Don't try lossy below this threshold.848#define MAX_COLORS_LOSSLESS 194 // Don't try lossless above this threshold.849850// Generates candidates for a given dispose method given pre-filled sub-frame851// 'params'.852static WebPEncodingError GenerateCandidates(853WebPAnimEncoder* const enc, Candidate candidates[CANDIDATE_COUNT],854WebPMuxAnimDispose dispose_method, int is_lossless, int is_key_frame,855SubFrameParams* const params,856const WebPConfig* const config_ll, const WebPConfig* const config_lossy) {857WebPEncodingError error_code = VP8_ENC_OK;858const int is_dispose_none = (dispose_method == WEBP_MUX_DISPOSE_NONE);859Candidate* const candidate_ll =860is_dispose_none ? &candidates[LL_DISP_NONE] : &candidates[LL_DISP_BG];861Candidate* const candidate_lossy = is_dispose_none862? &candidates[LOSSY_DISP_NONE]863: &candidates[LOSSY_DISP_BG];864WebPPicture* const curr_canvas = &enc->curr_canvas_copy;865const WebPPicture* const prev_canvas =866is_dispose_none ? &enc->prev_canvas : &enc->prev_canvas_disposed;867int use_blending_ll, use_blending_lossy;868int evaluate_ll, evaluate_lossy;869870CopyCurrentCanvas(enc);871use_blending_ll =872!is_key_frame &&873IsLosslessBlendingPossible(prev_canvas, curr_canvas, ¶ms->rect_ll);874use_blending_lossy =875!is_key_frame &&876IsLossyBlendingPossible(prev_canvas, curr_canvas, ¶ms->rect_lossy,877config_lossy->quality);878879// Pick candidates to be tried.880if (!enc->options.allow_mixed) {881evaluate_ll = is_lossless;882evaluate_lossy = !is_lossless;883} else if (enc->options.minimize_size) {884evaluate_ll = 1;885evaluate_lossy = 1;886} else { // Use a heuristic for trying lossless and/or lossy compression.887const int num_colors = WebPGetColorPalette(¶ms->sub_frame_ll, NULL);888evaluate_ll = (num_colors < MAX_COLORS_LOSSLESS);889evaluate_lossy = (num_colors >= MIN_COLORS_LOSSY);890}891892// Generate candidates.893if (evaluate_ll) {894CopyCurrentCanvas(enc);895if (use_blending_ll) {896enc->curr_canvas_copy_modified =897IncreaseTransparency(prev_canvas, ¶ms->rect_ll, curr_canvas);898}899error_code = EncodeCandidate(¶ms->sub_frame_ll, ¶ms->rect_ll,900config_ll, use_blending_ll, candidate_ll);901if (error_code != VP8_ENC_OK) return error_code;902}903if (evaluate_lossy) {904CopyCurrentCanvas(enc);905if (use_blending_lossy) {906enc->curr_canvas_copy_modified =907FlattenSimilarBlocks(prev_canvas, ¶ms->rect_lossy, curr_canvas,908config_lossy->quality);909}910error_code =911EncodeCandidate(¶ms->sub_frame_lossy, ¶ms->rect_lossy,912config_lossy, use_blending_lossy, candidate_lossy);913if (error_code != VP8_ENC_OK) return error_code;914enc->curr_canvas_copy_modified = 1;915}916return error_code;917}918919#undef MIN_COLORS_LOSSY920#undef MAX_COLORS_LOSSLESS921922static void GetEncodedData(const WebPMemoryWriter* const memory,923WebPData* const encoded_data) {924encoded_data->bytes = memory->mem;925encoded_data->size = memory->size;926}927928// Sets dispose method of the previous frame to be 'dispose_method'.929static void SetPreviousDisposeMethod(WebPAnimEncoder* const enc,930WebPMuxAnimDispose dispose_method) {931const size_t position = enc->count - 2;932EncodedFrame* const prev_enc_frame = GetFrame(enc, position);933assert(enc->count >= 2); // As current and previous frames are in enc.934935if (enc->prev_candidate_undecided) {936assert(dispose_method == WEBP_MUX_DISPOSE_NONE);937prev_enc_frame->sub_frame.dispose_method = dispose_method;938prev_enc_frame->key_frame.dispose_method = dispose_method;939} else {940WebPMuxFrameInfo* const prev_info = prev_enc_frame->is_key_frame941? &prev_enc_frame->key_frame942: &prev_enc_frame->sub_frame;943prev_info->dispose_method = dispose_method;944}945}946947static int IncreasePreviousDuration(WebPAnimEncoder* const enc, int duration) {948const size_t position = enc->count - 1;949EncodedFrame* const prev_enc_frame = GetFrame(enc, position);950int new_duration;951952assert(enc->count >= 1);953assert(!prev_enc_frame->is_key_frame ||954prev_enc_frame->sub_frame.duration ==955prev_enc_frame->key_frame.duration);956assert(prev_enc_frame->sub_frame.duration ==957(prev_enc_frame->sub_frame.duration & (MAX_DURATION - 1)));958assert(duration == (duration & (MAX_DURATION - 1)));959960new_duration = prev_enc_frame->sub_frame.duration + duration;961if (new_duration >= MAX_DURATION) { // Special case.962// Separate out previous frame from earlier merged frames to avoid overflow.963// We add a 1x1 transparent frame for the previous frame, with blending on.964const FrameRectangle rect = { 0, 0, 1, 1 };965const uint8_t lossless_1x1_bytes[] = {9660x52, 0x49, 0x46, 0x46, 0x14, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50,9670x56, 0x50, 0x38, 0x4c, 0x08, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00,9680x10, 0x88, 0x88, 0x08969};970const WebPData lossless_1x1 = {971lossless_1x1_bytes, sizeof(lossless_1x1_bytes)972};973const uint8_t lossy_1x1_bytes[] = {9740x52, 0x49, 0x46, 0x46, 0x40, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50,9750x56, 0x50, 0x38, 0x58, 0x0a, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,9760x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x4c, 0x50, 0x48, 0x02, 0x00,9770x00, 0x00, 0x00, 0x00, 0x56, 0x50, 0x38, 0x20, 0x18, 0x00, 0x00, 0x00,9780x30, 0x01, 0x00, 0x9d, 0x01, 0x2a, 0x01, 0x00, 0x01, 0x00, 0x02, 0x00,9790x34, 0x25, 0xa4, 0x00, 0x03, 0x70, 0x00, 0xfe, 0xfb, 0xfd, 0x50, 0x00980};981const WebPData lossy_1x1 = { lossy_1x1_bytes, sizeof(lossy_1x1_bytes) };982const int can_use_lossless =983(enc->last_config.lossless || enc->options.allow_mixed);984EncodedFrame* const curr_enc_frame = GetFrame(enc, enc->count);985curr_enc_frame->is_key_frame = 0;986curr_enc_frame->sub_frame.id = WEBP_CHUNK_ANMF;987curr_enc_frame->sub_frame.x_offset = 0;988curr_enc_frame->sub_frame.y_offset = 0;989curr_enc_frame->sub_frame.dispose_method = WEBP_MUX_DISPOSE_NONE;990curr_enc_frame->sub_frame.blend_method = WEBP_MUX_BLEND;991curr_enc_frame->sub_frame.duration = duration;992if (!WebPDataCopy(can_use_lossless ? &lossless_1x1 : &lossy_1x1,993&curr_enc_frame->sub_frame.bitstream)) {994return 0;995}996++enc->count;997++enc->count_since_key_frame;998enc->flush_count = enc->count - 1;999enc->prev_candidate_undecided = 0;1000enc->prev_rect = rect;1001} else { // Regular case.1002// Increase duration of the previous frame by 'duration'.1003prev_enc_frame->sub_frame.duration = new_duration;1004prev_enc_frame->key_frame.duration = new_duration;1005}1006return 1;1007}10081009// Pick the candidate encoded frame with smallest size and release other1010// candidates.1011// TODO(later): Perhaps a rough SSIM/PSNR produced by the encoder should1012// also be a criteria, in addition to sizes.1013static void PickBestCandidate(WebPAnimEncoder* const enc,1014Candidate* const candidates, int is_key_frame,1015EncodedFrame* const encoded_frame) {1016int i;1017int best_idx = -1;1018size_t best_size = ~0;1019for (i = 0; i < CANDIDATE_COUNT; ++i) {1020if (candidates[i].evaluate) {1021const size_t candidate_size = candidates[i].mem.size;1022if (candidate_size < best_size) {1023best_idx = i;1024best_size = candidate_size;1025}1026}1027}1028assert(best_idx != -1);1029for (i = 0; i < CANDIDATE_COUNT; ++i) {1030if (candidates[i].evaluate) {1031if (i == best_idx) {1032WebPMuxFrameInfo* const dst = is_key_frame1033? &encoded_frame->key_frame1034: &encoded_frame->sub_frame;1035*dst = candidates[i].info;1036GetEncodedData(&candidates[i].mem, &dst->bitstream);1037if (!is_key_frame) {1038// Note: Previous dispose method only matters for non-keyframes.1039// Also, we don't want to modify previous dispose method that was1040// selected when a non key-frame was assumed.1041const WebPMuxAnimDispose prev_dispose_method =1042(best_idx == LL_DISP_NONE || best_idx == LOSSY_DISP_NONE)1043? WEBP_MUX_DISPOSE_NONE1044: WEBP_MUX_DISPOSE_BACKGROUND;1045SetPreviousDisposeMethod(enc, prev_dispose_method);1046}1047enc->prev_rect = candidates[i].rect; // save for next frame.1048} else {1049WebPMemoryWriterClear(&candidates[i].mem);1050candidates[i].evaluate = 0;1051}1052}1053}1054}10551056// Depending on the configuration, tries different compressions1057// (lossy/lossless), dispose methods, blending methods etc to encode the current1058// frame and outputs the best one in 'encoded_frame'.1059// 'frame_skipped' will be set to true if this frame should actually be skipped.1060static WebPEncodingError SetFrame(WebPAnimEncoder* const enc,1061const WebPConfig* const config,1062int is_key_frame,1063EncodedFrame* const encoded_frame,1064int* const frame_skipped) {1065int i;1066WebPEncodingError error_code = VP8_ENC_OK;1067const WebPPicture* const curr_canvas = &enc->curr_canvas_copy;1068const WebPPicture* const prev_canvas = &enc->prev_canvas;1069Candidate candidates[CANDIDATE_COUNT];1070const int is_lossless = config->lossless;1071const int consider_lossless = is_lossless || enc->options.allow_mixed;1072const int consider_lossy = !is_lossless || enc->options.allow_mixed;1073const int is_first_frame = enc->is_first_frame;10741075// First frame cannot be skipped as there is no 'previous frame' to merge it1076// to. So, empty rectangle is not allowed for the first frame.1077const int empty_rect_allowed_none = !is_first_frame;10781079// Even if there is exact pixel match between 'disposed previous canvas' and1080// 'current canvas', we can't skip current frame, as there may not be exact1081// pixel match between 'previous canvas' and 'current canvas'. So, we don't1082// allow empty rectangle in this case.1083const int empty_rect_allowed_bg = 0;10841085// If current frame is a key-frame, dispose method of previous frame doesn't1086// matter, so we don't try dispose to background.1087// Also, if key-frame insertion is on, and previous frame could be picked as1088// either a sub-frame or a key-frame, then we can't be sure about what frame1089// rectangle would be disposed. In that case too, we don't try dispose to1090// background.1091const int dispose_bg_possible =1092!is_key_frame && !enc->prev_candidate_undecided;10931094SubFrameParams dispose_none_params;1095SubFrameParams dispose_bg_params;10961097WebPConfig config_ll = *config;1098WebPConfig config_lossy = *config;1099config_ll.lossless = 1;1100config_lossy.lossless = 0;1101enc->last_config = *config;1102enc->last_config_reversed = config->lossless ? config_lossy : config_ll;1103*frame_skipped = 0;11041105if (!SubFrameParamsInit(&dispose_none_params, 1, empty_rect_allowed_none) ||1106!SubFrameParamsInit(&dispose_bg_params, 0, empty_rect_allowed_bg)) {1107return VP8_ENC_ERROR_INVALID_CONFIGURATION;1108}11091110memset(candidates, 0, sizeof(candidates));11111112// Change-rectangle assuming previous frame was DISPOSE_NONE.1113if (!GetSubRects(prev_canvas, curr_canvas, is_key_frame, is_first_frame,1114config_lossy.quality, &dispose_none_params)) {1115error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION;1116goto Err;1117}11181119if ((consider_lossless && IsEmptyRect(&dispose_none_params.rect_ll)) ||1120(consider_lossy && IsEmptyRect(&dispose_none_params.rect_lossy))) {1121// Don't encode the frame at all. Instead, the duration of the previous1122// frame will be increased later.1123assert(empty_rect_allowed_none);1124*frame_skipped = 1;1125goto End;1126}11271128if (dispose_bg_possible) {1129// Change-rectangle assuming previous frame was DISPOSE_BACKGROUND.1130WebPPicture* const prev_canvas_disposed = &enc->prev_canvas_disposed;1131WebPCopyPixels(prev_canvas, prev_canvas_disposed);1132DisposeFrameRectangle(WEBP_MUX_DISPOSE_BACKGROUND, &enc->prev_rect,1133prev_canvas_disposed);11341135if (!GetSubRects(prev_canvas_disposed, curr_canvas, is_key_frame,1136is_first_frame, config_lossy.quality,1137&dispose_bg_params)) {1138error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION;1139goto Err;1140}1141assert(!IsEmptyRect(&dispose_bg_params.rect_ll));1142assert(!IsEmptyRect(&dispose_bg_params.rect_lossy));11431144if (enc->options.minimize_size) { // Try both dispose methods.1145dispose_bg_params.should_try = 1;1146dispose_none_params.should_try = 1;1147} else if ((is_lossless &&1148RectArea(&dispose_bg_params.rect_ll) <1149RectArea(&dispose_none_params.rect_ll)) ||1150(!is_lossless &&1151RectArea(&dispose_bg_params.rect_lossy) <1152RectArea(&dispose_none_params.rect_lossy))) {1153dispose_bg_params.should_try = 1; // Pick DISPOSE_BACKGROUND.1154dispose_none_params.should_try = 0;1155}1156}11571158if (dispose_none_params.should_try) {1159error_code = GenerateCandidates(1160enc, candidates, WEBP_MUX_DISPOSE_NONE, is_lossless, is_key_frame,1161&dispose_none_params, &config_ll, &config_lossy);1162if (error_code != VP8_ENC_OK) goto Err;1163}11641165if (dispose_bg_params.should_try) {1166assert(!enc->is_first_frame);1167assert(dispose_bg_possible);1168error_code = GenerateCandidates(1169enc, candidates, WEBP_MUX_DISPOSE_BACKGROUND, is_lossless, is_key_frame,1170&dispose_bg_params, &config_ll, &config_lossy);1171if (error_code != VP8_ENC_OK) goto Err;1172}11731174PickBestCandidate(enc, candidates, is_key_frame, encoded_frame);11751176goto End;11771178Err:1179for (i = 0; i < CANDIDATE_COUNT; ++i) {1180if (candidates[i].evaluate) {1181WebPMemoryWriterClear(&candidates[i].mem);1182}1183}11841185End:1186SubFrameParamsFree(&dispose_none_params);1187SubFrameParamsFree(&dispose_bg_params);1188return error_code;1189}11901191// Calculate the penalty incurred if we encode given frame as a key frame1192// instead of a sub-frame.1193static int64_t KeyFramePenalty(const EncodedFrame* const encoded_frame) {1194return ((int64_t)encoded_frame->key_frame.bitstream.size -1195encoded_frame->sub_frame.bitstream.size);1196}11971198static int CacheFrame(WebPAnimEncoder* const enc,1199const WebPConfig* const config) {1200int ok = 0;1201int frame_skipped = 0;1202WebPEncodingError error_code = VP8_ENC_OK;1203const size_t position = enc->count;1204EncodedFrame* const encoded_frame = GetFrame(enc, position);12051206++enc->count;12071208if (enc->is_first_frame) { // Add this as a key-frame.1209error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped);1210if (error_code != VP8_ENC_OK) goto End;1211assert(frame_skipped == 0); // First frame can't be skipped, even if empty.1212assert(position == 0 && enc->count == 1);1213encoded_frame->is_key_frame = 1;1214enc->flush_count = 0;1215enc->count_since_key_frame = 0;1216enc->prev_candidate_undecided = 0;1217} else {1218++enc->count_since_key_frame;1219if (enc->count_since_key_frame <= enc->options.kmin) {1220// Add this as a frame rectangle.1221error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped);1222if (error_code != VP8_ENC_OK) goto End;1223if (frame_skipped) goto Skip;1224encoded_frame->is_key_frame = 0;1225enc->flush_count = enc->count - 1;1226enc->prev_candidate_undecided = 0;1227} else {1228int64_t curr_delta;1229FrameRectangle prev_rect_key, prev_rect_sub;12301231// Add this as a frame rectangle to enc.1232error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped);1233if (error_code != VP8_ENC_OK) goto End;1234if (frame_skipped) goto Skip;1235prev_rect_sub = enc->prev_rect;123612371238// Add this as a key-frame to enc, too.1239error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped);1240if (error_code != VP8_ENC_OK) goto End;1241assert(frame_skipped == 0); // Key-frame cannot be an empty rectangle.1242prev_rect_key = enc->prev_rect;12431244// Analyze size difference of the two variants.1245curr_delta = KeyFramePenalty(encoded_frame);1246if (curr_delta <= enc->best_delta) { // Pick this as the key-frame.1247if (enc->keyframe != KEYFRAME_NONE) {1248EncodedFrame* const old_keyframe = GetFrame(enc, enc->keyframe);1249assert(old_keyframe->is_key_frame);1250old_keyframe->is_key_frame = 0;1251}1252encoded_frame->is_key_frame = 1;1253enc->prev_candidate_undecided = 1;1254enc->keyframe = (int)position;1255enc->best_delta = curr_delta;1256enc->flush_count = enc->count - 1; // We can flush previous frames.1257} else {1258encoded_frame->is_key_frame = 0;1259enc->prev_candidate_undecided = 0;1260}1261// Note: We need '>=' below because when kmin and kmax are both zero,1262// count_since_key_frame will always be > kmax.1263if (enc->count_since_key_frame >= enc->options.kmax) {1264enc->flush_count = enc->count - 1;1265enc->count_since_key_frame = 0;1266enc->keyframe = KEYFRAME_NONE;1267enc->best_delta = DELTA_INFINITY;1268}1269if (!enc->prev_candidate_undecided) {1270enc->prev_rect =1271encoded_frame->is_key_frame ? prev_rect_key : prev_rect_sub;1272}1273}1274}12751276// Update previous to previous and previous canvases for next call.1277WebPCopyPixels(enc->curr_canvas, &enc->prev_canvas);1278enc->is_first_frame = 0;12791280Skip:1281ok = 1;1282++enc->in_frame_count;12831284End:1285if (!ok || frame_skipped) {1286FrameRelease(encoded_frame);1287// We reset some counters, as the frame addition failed/was skipped.1288--enc->count;1289if (!enc->is_first_frame) --enc->count_since_key_frame;1290if (!ok) {1291MarkError2(enc, "ERROR adding frame. WebPEncodingError", error_code);1292}1293}1294enc->curr_canvas->error_code = error_code; // report error_code1295assert(ok || error_code != VP8_ENC_OK);1296return ok;1297}12981299static int FlushFrames(WebPAnimEncoder* const enc) {1300while (enc->flush_count > 0) {1301WebPMuxError err;1302EncodedFrame* const curr = GetFrame(enc, 0);1303const WebPMuxFrameInfo* const info =1304curr->is_key_frame ? &curr->key_frame : &curr->sub_frame;1305assert(enc->mux != NULL);1306err = WebPMuxPushFrame(enc->mux, info, 1);1307if (err != WEBP_MUX_OK) {1308MarkError2(enc, "ERROR adding frame. WebPMuxError", err);1309return 0;1310}1311if (enc->options.verbose) {1312fprintf(stderr, "INFO: Added frame. offset:%d,%d dispose:%d blend:%d\n",1313info->x_offset, info->y_offset, info->dispose_method,1314info->blend_method);1315}1316++enc->out_frame_count;1317FrameRelease(curr);1318++enc->start;1319--enc->flush_count;1320--enc->count;1321if (enc->keyframe != KEYFRAME_NONE) --enc->keyframe;1322}13231324if (enc->count == 1 && enc->start != 0) {1325// Move enc->start to index 0.1326const int enc_start_tmp = (int)enc->start;1327EncodedFrame temp = enc->encoded_frames[0];1328enc->encoded_frames[0] = enc->encoded_frames[enc_start_tmp];1329enc->encoded_frames[enc_start_tmp] = temp;1330FrameRelease(&enc->encoded_frames[enc_start_tmp]);1331enc->start = 0;1332}1333return 1;1334}13351336#undef DELTA_INFINITY1337#undef KEYFRAME_NONE13381339int WebPAnimEncoderAdd(WebPAnimEncoder* enc, WebPPicture* frame, int timestamp,1340const WebPConfig* encoder_config) {1341WebPConfig config;1342int ok;13431344if (enc == NULL) {1345return 0;1346}1347MarkNoError(enc);13481349if (!enc->is_first_frame) {1350// Make sure timestamps are non-decreasing (integer wrap-around is OK).1351const uint32_t prev_frame_duration =1352(uint32_t)timestamp - enc->prev_timestamp;1353if (prev_frame_duration >= MAX_DURATION) {1354if (frame != NULL) {1355frame->error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION;1356}1357MarkError(enc, "ERROR adding frame: timestamps must be non-decreasing");1358return 0;1359}1360if (!IncreasePreviousDuration(enc, (int)prev_frame_duration)) {1361return 0;1362}1363// IncreasePreviousDuration() may add a frame to avoid exceeding1364// MAX_DURATION which could cause CacheFrame() to over read 'encoded_frames'1365// before the next flush.1366if (enc->count == enc->size && !FlushFrames(enc)) {1367return 0;1368}1369} else {1370enc->first_timestamp = timestamp;1371}13721373if (frame == NULL) { // Special: last call.1374enc->got_null_frame = 1;1375enc->prev_timestamp = timestamp;1376return 1;1377}13781379if (frame->width != enc->canvas_width ||1380frame->height != enc->canvas_height) {1381frame->error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION;1382MarkError(enc, "ERROR adding frame: Invalid frame dimensions");1383return 0;1384}13851386if (!frame->use_argb) { // Convert frame from YUV(A) to ARGB.1387if (enc->options.verbose) {1388fprintf(stderr, "WARNING: Converting frame from YUV(A) to ARGB format; "1389"this incurs a small loss.\n");1390}1391if (!WebPPictureYUVAToARGB(frame)) {1392MarkError(enc, "ERROR converting frame from YUV(A) to ARGB");1393return 0;1394}1395}13961397if (encoder_config != NULL) {1398if (!WebPValidateConfig(encoder_config)) {1399MarkError(enc, "ERROR adding frame: Invalid WebPConfig");1400return 0;1401}1402config = *encoder_config;1403} else {1404if (!WebPConfigInit(&config)) {1405MarkError(enc, "Cannot Init config");1406return 0;1407}1408config.lossless = 1;1409}1410assert(enc->curr_canvas == NULL);1411enc->curr_canvas = frame; // Store reference.1412assert(enc->curr_canvas_copy_modified == 1);1413CopyCurrentCanvas(enc);14141415ok = CacheFrame(enc, &config) && FlushFrames(enc);14161417enc->curr_canvas = NULL;1418enc->curr_canvas_copy_modified = 1;1419if (ok) {1420enc->prev_timestamp = timestamp;1421}1422return ok;1423}14241425// -----------------------------------------------------------------------------1426// Bitstream assembly.14271428WEBP_NODISCARD static int DecodeFrameOntoCanvas(1429const WebPMuxFrameInfo* const frame, WebPPicture* const canvas) {1430const WebPData* const image = &frame->bitstream;1431WebPPicture sub_image;1432WebPDecoderConfig config;1433if (!WebPInitDecoderConfig(&config)) {1434return 0;1435}1436WebPUtilClearPic(canvas, NULL);1437if (WebPGetFeatures(image->bytes, image->size, &config.input) !=1438VP8_STATUS_OK) {1439return 0;1440}1441if (!WebPPictureView(canvas, frame->x_offset, frame->y_offset,1442config.input.width, config.input.height, &sub_image)) {1443return 0;1444}1445config.output.is_external_memory = 1;1446config.output.colorspace = MODE_BGRA;1447config.output.u.RGBA.rgba = (uint8_t*)sub_image.argb;1448config.output.u.RGBA.stride = sub_image.argb_stride * 4;1449config.output.u.RGBA.size = config.output.u.RGBA.stride * sub_image.height;14501451if (WebPDecode(image->bytes, image->size, &config) != VP8_STATUS_OK) {1452return 0;1453}1454return 1;1455}14561457static int FrameToFullCanvas(WebPAnimEncoder* const enc,1458const WebPMuxFrameInfo* const frame,1459WebPData* const full_image) {1460WebPPicture* const canvas_buf = &enc->curr_canvas_copy;1461WebPMemoryWriter mem1, mem2;1462WebPMemoryWriterInit(&mem1);1463WebPMemoryWriterInit(&mem2);14641465if (!DecodeFrameOntoCanvas(frame, canvas_buf)) goto Err;1466if (!EncodeFrame(&enc->last_config, canvas_buf, &mem1)) goto Err;1467GetEncodedData(&mem1, full_image);14681469if (enc->options.allow_mixed) {1470if (!EncodeFrame(&enc->last_config_reversed, canvas_buf, &mem2)) goto Err;1471if (mem2.size < mem1.size) {1472GetEncodedData(&mem2, full_image);1473WebPMemoryWriterClear(&mem1);1474} else {1475WebPMemoryWriterClear(&mem2);1476}1477}1478return 1;14791480Err:1481WebPMemoryWriterClear(&mem1);1482WebPMemoryWriterClear(&mem2);1483return 0;1484}14851486// Convert a single-frame animation to a non-animated image if appropriate.1487// TODO(urvang): Can we pick one of the two heuristically (based on frame1488// rectangle and/or presence of alpha)?1489static WebPMuxError OptimizeSingleFrame(WebPAnimEncoder* const enc,1490WebPData* const webp_data) {1491WebPMuxError err = WEBP_MUX_OK;1492int canvas_width, canvas_height;1493WebPMuxFrameInfo frame;1494WebPData full_image;1495WebPData webp_data2;1496WebPMux* const mux = WebPMuxCreate(webp_data, 0);1497if (mux == NULL) return WEBP_MUX_BAD_DATA;1498assert(enc->out_frame_count == 1);1499WebPDataInit(&frame.bitstream);1500WebPDataInit(&full_image);1501WebPDataInit(&webp_data2);15021503err = WebPMuxGetFrame(mux, 1, &frame);1504if (err != WEBP_MUX_OK) goto End;1505if (frame.id != WEBP_CHUNK_ANMF) goto End; // Non-animation: nothing to do.1506err = WebPMuxGetCanvasSize(mux, &canvas_width, &canvas_height);1507if (err != WEBP_MUX_OK) goto End;1508if (!FrameToFullCanvas(enc, &frame, &full_image)) {1509err = WEBP_MUX_BAD_DATA;1510goto End;1511}1512err = WebPMuxSetImage(mux, &full_image, 1);1513if (err != WEBP_MUX_OK) goto End;1514err = WebPMuxAssemble(mux, &webp_data2);1515if (err != WEBP_MUX_OK) goto End;15161517if (webp_data2.size < webp_data->size) { // Pick 'webp_data2' if smaller.1518WebPDataClear(webp_data);1519*webp_data = webp_data2;1520WebPDataInit(&webp_data2);1521}15221523End:1524WebPDataClear(&frame.bitstream);1525WebPDataClear(&full_image);1526WebPMuxDelete(mux);1527WebPDataClear(&webp_data2);1528return err;1529}15301531int WebPAnimEncoderAssemble(WebPAnimEncoder* enc, WebPData* webp_data) {1532WebPMux* mux;1533WebPMuxError err;15341535if (enc == NULL) {1536return 0;1537}1538MarkNoError(enc);15391540if (webp_data == NULL) {1541MarkError(enc, "ERROR assembling: NULL input");1542return 0;1543}15441545if (enc->in_frame_count == 0) {1546MarkError(enc, "ERROR: No frames to assemble");1547return 0;1548}15491550if (!enc->got_null_frame && enc->in_frame_count > 1 && enc->count > 0) {1551// set duration of the last frame to be avg of durations of previous frames.1552const double delta_time =1553(uint32_t)enc->prev_timestamp - enc->first_timestamp;1554const int average_duration = (int)(delta_time / (enc->in_frame_count - 1));1555if (!IncreasePreviousDuration(enc, average_duration)) {1556return 0;1557}1558}15591560// Flush any remaining frames.1561enc->flush_count = enc->count;1562if (!FlushFrames(enc)) {1563return 0;1564}15651566// Set definitive canvas size.1567mux = enc->mux;1568err = WebPMuxSetCanvasSize(mux, enc->canvas_width, enc->canvas_height);1569if (err != WEBP_MUX_OK) goto Err;15701571err = WebPMuxSetAnimationParams(mux, &enc->options.anim_params);1572if (err != WEBP_MUX_OK) goto Err;15731574// Assemble into a WebP bitstream.1575err = WebPMuxAssemble(mux, webp_data);1576if (err != WEBP_MUX_OK) goto Err;15771578if (enc->out_frame_count == 1) {1579err = OptimizeSingleFrame(enc, webp_data);1580if (err != WEBP_MUX_OK) goto Err;1581}1582return 1;15831584Err:1585MarkError2(enc, "ERROR assembling WebP", err);1586return 0;1587}15881589const char* WebPAnimEncoderGetError(WebPAnimEncoder* enc) {1590if (enc == NULL) return NULL;1591return enc->error_str;1592}15931594WebPMuxError WebPAnimEncoderSetChunk(1595WebPAnimEncoder* enc, const char fourcc[4], const WebPData* chunk_data,1596int copy_data) {1597if (enc == NULL) return WEBP_MUX_INVALID_ARGUMENT;1598return WebPMuxSetChunk(enc->mux, fourcc, chunk_data, copy_data);1599}16001601WebPMuxError WebPAnimEncoderGetChunk(1602const WebPAnimEncoder* enc, const char fourcc[4], WebPData* chunk_data) {1603if (enc == NULL) return WEBP_MUX_INVALID_ARGUMENT;1604return WebPMuxGetChunk(enc->mux, fourcc, chunk_data);1605}16061607WebPMuxError WebPAnimEncoderDeleteChunk(1608WebPAnimEncoder* enc, const char fourcc[4]) {1609if (enc == NULL) return WEBP_MUX_INVALID_ARGUMENT;1610return WebPMuxDeleteChunk(enc->mux, fourcc);1611}16121613// -----------------------------------------------------------------------------161416151616