Path: blob/master/3rdparty/libwebp/src/mux/anim_encode.c
16344 views
// Copyright 2014 Google Inc. All Rights Reserved.1//2// Use of this source code is governed by a BSD-style license3// that can be found in the COPYING file in the root of the source4// tree. An additional intellectual property rights grant can be found5// in the file PATENTS. All contributing project authors may6// be found in the AUTHORS file in the root of the source tree.7// -----------------------------------------------------------------------------8//9// AnimEncoder implementation.10//1112#include <assert.h>13#include <limits.h>14#include <math.h> // for pow()15#include <stdio.h>16#include <stdlib.h> // for abs()1718#include "src/mux/animi.h"19#include "src/utils/utils.h"20#include "src/webp/decode.h"21#include "src/webp/encode.h"22#include "src/webp/format_constants.h"23#include "src/webp/mux.h"2425#if defined(_MSC_VER) && _MSC_VER < 190026#define snprintf _snprintf27#endif2829#define ERROR_STR_MAX_LENGTH 1003031//------------------------------------------------------------------------------32// Internal structs.3334// Stores frame rectangle dimensions.35typedef struct {36int x_offset_, y_offset_, width_, height_;37} FrameRectangle;3839// Used to store two candidates of encoded data for an animation frame. One of40// the two will be chosen later.41typedef struct {42WebPMuxFrameInfo sub_frame_; // Encoded frame rectangle.43WebPMuxFrameInfo key_frame_; // Encoded frame if it is a key-frame.44int is_key_frame_; // True if 'key_frame' has been chosen.45} EncodedFrame;4647struct WebPAnimEncoder {48const int canvas_width_; // Canvas width.49const int canvas_height_; // Canvas height.50const WebPAnimEncoderOptions options_; // Global encoding options.5152FrameRectangle prev_rect_; // Previous WebP frame rectangle.53WebPConfig last_config_; // Cached in case a re-encode is needed.54WebPConfig last_config_reversed_; // If 'last_config_' uses lossless, then55// this config uses lossy and vice versa;56// only valid if 'options_.allow_mixed'57// is true.5859WebPPicture* curr_canvas_; // Only pointer; we don't own memory.6061// Canvas buffers.62WebPPicture curr_canvas_copy_; // Possibly modified current canvas.63int curr_canvas_copy_modified_; // True if pixels in 'curr_canvas_copy_'64// differ from those in 'curr_canvas_'.6566WebPPicture prev_canvas_; // Previous canvas.67WebPPicture prev_canvas_disposed_; // Previous canvas disposed to background.6869// Encoded data.70EncodedFrame* encoded_frames_; // Array of encoded frames.71size_t size_; // Number of allocated frames.72size_t start_; // Frame start index.73size_t count_; // Number of valid frames.74size_t flush_count_; // If >0, 'flush_count' frames starting from75// 'start' are ready to be added to mux.7677// key-frame related.78int64_t best_delta_; // min(canvas size - frame size) over the frames.79// Can be negative in certain cases due to80// transparent pixels in a frame.81int keyframe_; // Index of selected key-frame relative to 'start_'.82int count_since_key_frame_; // Frames seen since the last key-frame.8384int first_timestamp_; // Timestamp of the first frame.85int prev_timestamp_; // Timestamp of the last added frame.86int prev_candidate_undecided_; // True if it's not yet decided if previous87// frame would be a sub-frame or a key-frame.8889// Misc.90int is_first_frame_; // True if first frame is yet to be added/being added.91int got_null_frame_; // True if WebPAnimEncoderAdd() has already been called92// with a NULL frame.9394size_t in_frame_count_; // Number of input frames processed so far.95size_t out_frame_count_; // Number of frames added to mux so far. This may be96// different from 'in_frame_count_' due to merging.9798WebPMux* mux_; // Muxer to assemble the WebP bitstream.99char error_str_[ERROR_STR_MAX_LENGTH]; // Error string. Empty if no error.100};101102// -----------------------------------------------------------------------------103// Life of WebPAnimEncoder object.104105#define DELTA_INFINITY (1ULL << 32)106#define KEYFRAME_NONE (-1)107108// Reset the counters in the WebPAnimEncoder.109static void ResetCounters(WebPAnimEncoder* const enc) {110enc->start_ = 0;111enc->count_ = 0;112enc->flush_count_ = 0;113enc->best_delta_ = DELTA_INFINITY;114enc->keyframe_ = KEYFRAME_NONE;115}116117static void DisableKeyframes(WebPAnimEncoderOptions* const enc_options) {118enc_options->kmax = INT_MAX;119enc_options->kmin = enc_options->kmax - 1;120}121122#define MAX_CACHED_FRAMES 30123124static void SanitizeEncoderOptions(WebPAnimEncoderOptions* const enc_options) {125int print_warning = enc_options->verbose;126127if (enc_options->minimize_size) {128DisableKeyframes(enc_options);129}130131if (enc_options->kmax == 1) { // All frames will be key-frames.132enc_options->kmin = 0;133enc_options->kmax = 0;134return;135} else if (enc_options->kmax <= 0) {136DisableKeyframes(enc_options);137print_warning = 0;138}139140if (enc_options->kmin >= enc_options->kmax) {141enc_options->kmin = enc_options->kmax - 1;142if (print_warning) {143fprintf(stderr, "WARNING: Setting kmin = %d, so that kmin < kmax.\n",144enc_options->kmin);145}146} else {147const int kmin_limit = enc_options->kmax / 2 + 1;148if (enc_options->kmin < kmin_limit && kmin_limit < enc_options->kmax) {149// This ensures that enc.keyframe + kmin >= kmax is always true. So, we150// can flush all the frames in the 'count_since_key_frame == kmax' case.151enc_options->kmin = kmin_limit;152if (print_warning) {153fprintf(stderr,154"WARNING: Setting kmin = %d, so that kmin >= kmax / 2 + 1.\n",155enc_options->kmin);156}157}158}159// Limit the max number of frames that are allocated.160if (enc_options->kmax - enc_options->kmin > MAX_CACHED_FRAMES) {161enc_options->kmin = enc_options->kmax - MAX_CACHED_FRAMES;162if (print_warning) {163fprintf(stderr,164"WARNING: Setting kmin = %d, so that kmax - kmin <= %d.\n",165enc_options->kmin, MAX_CACHED_FRAMES);166}167}168assert(enc_options->kmin < enc_options->kmax);169}170171#undef MAX_CACHED_FRAMES172173static void DefaultEncoderOptions(WebPAnimEncoderOptions* const enc_options) {174enc_options->anim_params.loop_count = 0;175enc_options->anim_params.bgcolor = 0xffffffff; // White.176enc_options->minimize_size = 0;177DisableKeyframes(enc_options);178enc_options->allow_mixed = 0;179enc_options->verbose = 0;180}181182int WebPAnimEncoderOptionsInitInternal(WebPAnimEncoderOptions* enc_options,183int abi_version) {184if (enc_options == NULL ||185WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_MUX_ABI_VERSION)) {186return 0;187}188DefaultEncoderOptions(enc_options);189return 1;190}191192// This starting value is more fit to WebPCleanupTransparentAreaLossless().193#define TRANSPARENT_COLOR 0x00000000194195static void ClearRectangle(WebPPicture* const picture,196int left, int top, int width, int height) {197int j;198for (j = top; j < top + height; ++j) {199uint32_t* const dst = picture->argb + j * picture->argb_stride;200int i;201for (i = left; i < left + width; ++i) {202dst[i] = TRANSPARENT_COLOR;203}204}205}206207static void WebPUtilClearPic(WebPPicture* const picture,208const FrameRectangle* const rect) {209if (rect != NULL) {210ClearRectangle(picture, rect->x_offset_, rect->y_offset_,211rect->width_, rect->height_);212} else {213ClearRectangle(picture, 0, 0, picture->width, picture->height);214}215}216217static void MarkNoError(WebPAnimEncoder* const enc) {218enc->error_str_[0] = '\0'; // Empty string.219}220221static void MarkError(WebPAnimEncoder* const enc, const char* str) {222if (snprintf(enc->error_str_, ERROR_STR_MAX_LENGTH, "%s.", str) < 0) {223assert(0); // FIX ME!224}225}226227static void MarkError2(WebPAnimEncoder* const enc,228const char* str, int error_code) {229if (snprintf(enc->error_str_, ERROR_STR_MAX_LENGTH, "%s: %d.", str,230error_code) < 0) {231assert(0); // FIX ME!232}233}234235WebPAnimEncoder* WebPAnimEncoderNewInternal(236int width, int height, const WebPAnimEncoderOptions* enc_options,237int abi_version) {238WebPAnimEncoder* enc;239240if (WEBP_ABI_IS_INCOMPATIBLE(abi_version, WEBP_MUX_ABI_VERSION)) {241return NULL;242}243if (width <= 0 || height <= 0 ||244(width * (uint64_t)height) >= MAX_IMAGE_AREA) {245return NULL;246}247248enc = (WebPAnimEncoder*)WebPSafeCalloc(1, sizeof(*enc));249if (enc == NULL) return NULL;250// sanity inits, so we can call WebPAnimEncoderDelete():251enc->encoded_frames_ = NULL;252enc->mux_ = NULL;253MarkNoError(enc);254255// Dimensions and options.256*(int*)&enc->canvas_width_ = width;257*(int*)&enc->canvas_height_ = height;258if (enc_options != NULL) {259*(WebPAnimEncoderOptions*)&enc->options_ = *enc_options;260SanitizeEncoderOptions((WebPAnimEncoderOptions*)&enc->options_);261} else {262DefaultEncoderOptions((WebPAnimEncoderOptions*)&enc->options_);263}264265// Canvas buffers.266if (!WebPPictureInit(&enc->curr_canvas_copy_) ||267!WebPPictureInit(&enc->prev_canvas_) ||268!WebPPictureInit(&enc->prev_canvas_disposed_)) {269goto Err;270}271enc->curr_canvas_copy_.width = width;272enc->curr_canvas_copy_.height = height;273enc->curr_canvas_copy_.use_argb = 1;274if (!WebPPictureAlloc(&enc->curr_canvas_copy_) ||275!WebPPictureCopy(&enc->curr_canvas_copy_, &enc->prev_canvas_) ||276!WebPPictureCopy(&enc->curr_canvas_copy_, &enc->prev_canvas_disposed_)) {277goto Err;278}279WebPUtilClearPic(&enc->prev_canvas_, NULL);280enc->curr_canvas_copy_modified_ = 1;281282// Encoded frames.283ResetCounters(enc);284// Note: one extra storage is for the previous frame.285enc->size_ = enc->options_.kmax - enc->options_.kmin + 1;286// We need space for at least 2 frames. But when kmin, kmax are both zero,287// enc->size_ will be 1. So we handle that special case below.288if (enc->size_ < 2) enc->size_ = 2;289enc->encoded_frames_ =290(EncodedFrame*)WebPSafeCalloc(enc->size_, sizeof(*enc->encoded_frames_));291if (enc->encoded_frames_ == NULL) goto Err;292293enc->mux_ = WebPMuxNew();294if (enc->mux_ == NULL) goto Err;295296enc->count_since_key_frame_ = 0;297enc->first_timestamp_ = 0;298enc->prev_timestamp_ = 0;299enc->prev_candidate_undecided_ = 0;300enc->is_first_frame_ = 1;301enc->got_null_frame_ = 0;302303return enc; // All OK.304305Err:306WebPAnimEncoderDelete(enc);307return NULL;308}309310// Release the data contained by 'encoded_frame'.311static void FrameRelease(EncodedFrame* const encoded_frame) {312if (encoded_frame != NULL) {313WebPDataClear(&encoded_frame->sub_frame_.bitstream);314WebPDataClear(&encoded_frame->key_frame_.bitstream);315memset(encoded_frame, 0, sizeof(*encoded_frame));316}317}318319void WebPAnimEncoderDelete(WebPAnimEncoder* enc) {320if (enc != NULL) {321WebPPictureFree(&enc->curr_canvas_copy_);322WebPPictureFree(&enc->prev_canvas_);323WebPPictureFree(&enc->prev_canvas_disposed_);324if (enc->encoded_frames_ != NULL) {325size_t i;326for (i = 0; i < enc->size_; ++i) {327FrameRelease(&enc->encoded_frames_[i]);328}329WebPSafeFree(enc->encoded_frames_);330}331WebPMuxDelete(enc->mux_);332WebPSafeFree(enc);333}334}335336// -----------------------------------------------------------------------------337// Frame addition.338339// Returns cached frame at the given 'position'.340static EncodedFrame* GetFrame(const WebPAnimEncoder* const enc,341size_t position) {342assert(enc->start_ + position < enc->size_);343return &enc->encoded_frames_[enc->start_ + position];344}345346typedef int (*ComparePixelsFunc)(const uint32_t*, int, const uint32_t*, int,347int, int);348349// Returns true if 'length' number of pixels in 'src' and 'dst' are equal,350// assuming the given step sizes between pixels.351// 'max_allowed_diff' is unused and only there to allow function pointer use.352static WEBP_INLINE int ComparePixelsLossless(const uint32_t* src, int src_step,353const uint32_t* dst, int dst_step,354int length, int max_allowed_diff) {355(void)max_allowed_diff;356assert(length > 0);357while (length-- > 0) {358if (*src != *dst) {359return 0;360}361src += src_step;362dst += dst_step;363}364return 1;365}366367// Helper to check if each channel in 'src' and 'dst' is at most off by368// 'max_allowed_diff'.369static WEBP_INLINE int PixelsAreSimilar(uint32_t src, uint32_t dst,370int max_allowed_diff) {371const int src_a = (src >> 24) & 0xff;372const int src_r = (src >> 16) & 0xff;373const int src_g = (src >> 8) & 0xff;374const int src_b = (src >> 0) & 0xff;375const int dst_a = (dst >> 24) & 0xff;376const int dst_r = (dst >> 16) & 0xff;377const int dst_g = (dst >> 8) & 0xff;378const int dst_b = (dst >> 0) & 0xff;379380return (src_a == dst_a) &&381(abs(src_r - dst_r) * dst_a <= (max_allowed_diff * 255)) &&382(abs(src_g - dst_g) * dst_a <= (max_allowed_diff * 255)) &&383(abs(src_b - dst_b) * dst_a <= (max_allowed_diff * 255));384}385386// Returns true if 'length' number of pixels in 'src' and 'dst' are within an387// error bound, assuming the given step sizes between pixels.388static WEBP_INLINE int ComparePixelsLossy(const uint32_t* src, int src_step,389const uint32_t* dst, int dst_step,390int length, int max_allowed_diff) {391assert(length > 0);392while (length-- > 0) {393if (!PixelsAreSimilar(*src, *dst, max_allowed_diff)) {394return 0;395}396src += src_step;397dst += dst_step;398}399return 1;400}401402static int IsEmptyRect(const FrameRectangle* const rect) {403return (rect->width_ == 0) || (rect->height_ == 0);404}405406static int QualityToMaxDiff(float quality) {407const double val = pow(quality / 100., 0.5);408const double max_diff = 31 * (1 - val) + 1 * val;409return (int)(max_diff + 0.5);410}411412// Assumes that an initial valid guess of change rectangle 'rect' is passed.413static void MinimizeChangeRectangle(const WebPPicture* const src,414const WebPPicture* const dst,415FrameRectangle* const rect,416int is_lossless, float quality) {417int i, j;418const ComparePixelsFunc compare_pixels =419is_lossless ? ComparePixelsLossless : ComparePixelsLossy;420const int max_allowed_diff_lossy = QualityToMaxDiff(quality);421const int max_allowed_diff = is_lossless ? 0 : max_allowed_diff_lossy;422423// Sanity checks.424assert(src->width == dst->width && src->height == dst->height);425assert(rect->x_offset_ + rect->width_ <= dst->width);426assert(rect->y_offset_ + rect->height_ <= dst->height);427428// Left boundary.429for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) {430const uint32_t* const src_argb =431&src->argb[rect->y_offset_ * src->argb_stride + i];432const uint32_t* const dst_argb =433&dst->argb[rect->y_offset_ * dst->argb_stride + i];434if (compare_pixels(src_argb, src->argb_stride, dst_argb, dst->argb_stride,435rect->height_, max_allowed_diff)) {436--rect->width_; // Redundant column.437++rect->x_offset_;438} else {439break;440}441}442if (rect->width_ == 0) goto NoChange;443444// Right boundary.445for (i = rect->x_offset_ + rect->width_ - 1; i >= rect->x_offset_; --i) {446const uint32_t* const src_argb =447&src->argb[rect->y_offset_ * src->argb_stride + i];448const uint32_t* const dst_argb =449&dst->argb[rect->y_offset_ * dst->argb_stride + i];450if (compare_pixels(src_argb, src->argb_stride, dst_argb, dst->argb_stride,451rect->height_, max_allowed_diff)) {452--rect->width_; // Redundant column.453} else {454break;455}456}457if (rect->width_ == 0) goto NoChange;458459// Top boundary.460for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) {461const uint32_t* const src_argb =462&src->argb[j * src->argb_stride + rect->x_offset_];463const uint32_t* const dst_argb =464&dst->argb[j * dst->argb_stride + rect->x_offset_];465if (compare_pixels(src_argb, 1, dst_argb, 1, rect->width_,466max_allowed_diff)) {467--rect->height_; // Redundant row.468++rect->y_offset_;469} else {470break;471}472}473if (rect->height_ == 0) goto NoChange;474475// Bottom boundary.476for (j = rect->y_offset_ + rect->height_ - 1; j >= rect->y_offset_; --j) {477const uint32_t* const src_argb =478&src->argb[j * src->argb_stride + rect->x_offset_];479const uint32_t* const dst_argb =480&dst->argb[j * dst->argb_stride + rect->x_offset_];481if (compare_pixels(src_argb, 1, dst_argb, 1, rect->width_,482max_allowed_diff)) {483--rect->height_; // Redundant row.484} else {485break;486}487}488if (rect->height_ == 0) goto NoChange;489490if (IsEmptyRect(rect)) {491NoChange:492rect->x_offset_ = 0;493rect->y_offset_ = 0;494rect->width_ = 0;495rect->height_ = 0;496}497}498499// Snap rectangle to even offsets (and adjust dimensions if needed).500static WEBP_INLINE void SnapToEvenOffsets(FrameRectangle* const rect) {501rect->width_ += (rect->x_offset_ & 1);502rect->height_ += (rect->y_offset_ & 1);503rect->x_offset_ &= ~1;504rect->y_offset_ &= ~1;505}506507typedef struct {508int should_try_; // Should try this set of parameters.509int empty_rect_allowed_; // Frame with empty rectangle can be skipped.510FrameRectangle rect_ll_; // Frame rectangle for lossless compression.511WebPPicture sub_frame_ll_; // Sub-frame pic for lossless compression.512FrameRectangle rect_lossy_; // Frame rectangle for lossy compression.513// Could be smaller than rect_ll_ as pixels514// with small diffs can be ignored.515WebPPicture sub_frame_lossy_; // Sub-frame pic for lossless compression.516} SubFrameParams;517518static int SubFrameParamsInit(SubFrameParams* const params,519int should_try, int empty_rect_allowed) {520params->should_try_ = should_try;521params->empty_rect_allowed_ = empty_rect_allowed;522if (!WebPPictureInit(¶ms->sub_frame_ll_) ||523!WebPPictureInit(¶ms->sub_frame_lossy_)) {524return 0;525}526return 1;527}528529static void SubFrameParamsFree(SubFrameParams* const params) {530WebPPictureFree(¶ms->sub_frame_ll_);531WebPPictureFree(¶ms->sub_frame_lossy_);532}533534// Given previous and current canvas, picks the optimal rectangle for the535// current frame based on 'is_lossless' and other parameters. Assumes that the536// initial guess 'rect' is valid.537static int GetSubRect(const WebPPicture* const prev_canvas,538const WebPPicture* const curr_canvas, int is_key_frame,539int is_first_frame, int empty_rect_allowed,540int is_lossless, float quality,541FrameRectangle* const rect,542WebPPicture* const sub_frame) {543if (!is_key_frame || is_first_frame) { // Optimize frame rectangle.544// Note: This behaves as expected for first frame, as 'prev_canvas' is545// initialized to a fully transparent canvas in the beginning.546MinimizeChangeRectangle(prev_canvas, curr_canvas, rect,547is_lossless, quality);548}549550if (IsEmptyRect(rect)) {551if (empty_rect_allowed) { // No need to get 'sub_frame'.552return 1;553} else { // Force a 1x1 rectangle.554rect->width_ = 1;555rect->height_ = 1;556assert(rect->x_offset_ == 0);557assert(rect->y_offset_ == 0);558}559}560561SnapToEvenOffsets(rect);562return WebPPictureView(curr_canvas, rect->x_offset_, rect->y_offset_,563rect->width_, rect->height_, sub_frame);564}565566// Picks optimal frame rectangle for both lossless and lossy compression. The567// initial guess for frame rectangles will be the full canvas.568static int GetSubRects(const WebPPicture* const prev_canvas,569const WebPPicture* const curr_canvas, int is_key_frame,570int is_first_frame, float quality,571SubFrameParams* const params) {572// Lossless frame rectangle.573params->rect_ll_.x_offset_ = 0;574params->rect_ll_.y_offset_ = 0;575params->rect_ll_.width_ = curr_canvas->width;576params->rect_ll_.height_ = curr_canvas->height;577if (!GetSubRect(prev_canvas, curr_canvas, is_key_frame, is_first_frame,578params->empty_rect_allowed_, 1, quality,579¶ms->rect_ll_, ¶ms->sub_frame_ll_)) {580return 0;581}582// Lossy frame rectangle.583params->rect_lossy_ = params->rect_ll_; // seed with lossless rect.584return GetSubRect(prev_canvas, curr_canvas, is_key_frame, is_first_frame,585params->empty_rect_allowed_, 0, quality,586¶ms->rect_lossy_, ¶ms->sub_frame_lossy_);587}588589static WEBP_INLINE int clip(int v, int min_v, int max_v) {590return (v < min_v) ? min_v : (v > max_v) ? max_v : v;591}592593int WebPAnimEncoderRefineRect(594const WebPPicture* const prev_canvas, const WebPPicture* const curr_canvas,595int is_lossless, float quality, int* const x_offset, int* const y_offset,596int* const width, int* const height) {597FrameRectangle rect;598const int right = clip(*x_offset + *width, 0, curr_canvas->width);599const int left = clip(*x_offset, 0, curr_canvas->width - 1);600const int bottom = clip(*y_offset + *height, 0, curr_canvas->height);601const int top = clip(*y_offset, 0, curr_canvas->height - 1);602if (prev_canvas == NULL || curr_canvas == NULL ||603prev_canvas->width != curr_canvas->width ||604prev_canvas->height != curr_canvas->height ||605!prev_canvas->use_argb || !curr_canvas->use_argb) {606return 0;607}608rect.x_offset_ = left;609rect.y_offset_ = top;610rect.width_ = clip(right - left, 0, curr_canvas->width - rect.x_offset_);611rect.height_ = clip(bottom - top, 0, curr_canvas->height - rect.y_offset_);612MinimizeChangeRectangle(prev_canvas, curr_canvas, &rect, is_lossless,613quality);614SnapToEvenOffsets(&rect);615*x_offset = rect.x_offset_;616*y_offset = rect.y_offset_;617*width = rect.width_;618*height = rect.height_;619return 1;620}621622static void DisposeFrameRectangle(int dispose_method,623const FrameRectangle* const rect,624WebPPicture* const curr_canvas) {625assert(rect != NULL);626if (dispose_method == WEBP_MUX_DISPOSE_BACKGROUND) {627WebPUtilClearPic(curr_canvas, rect);628}629}630631static uint32_t RectArea(const FrameRectangle* const rect) {632return (uint32_t)rect->width_ * rect->height_;633}634635static int IsLosslessBlendingPossible(const WebPPicture* const src,636const WebPPicture* const dst,637const FrameRectangle* const rect) {638int i, j;639assert(src->width == dst->width && src->height == dst->height);640assert(rect->x_offset_ + rect->width_ <= dst->width);641assert(rect->y_offset_ + rect->height_ <= dst->height);642for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) {643for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) {644const uint32_t src_pixel = src->argb[j * src->argb_stride + i];645const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i];646const uint32_t dst_alpha = dst_pixel >> 24;647if (dst_alpha != 0xff && src_pixel != dst_pixel) {648// In this case, if we use blending, we can't attain the desired649// 'dst_pixel' value for this pixel. So, blending is not possible.650return 0;651}652}653}654return 1;655}656657static int IsLossyBlendingPossible(const WebPPicture* const src,658const WebPPicture* const dst,659const FrameRectangle* const rect,660float quality) {661const int max_allowed_diff_lossy = QualityToMaxDiff(quality);662int i, j;663assert(src->width == dst->width && src->height == dst->height);664assert(rect->x_offset_ + rect->width_ <= dst->width);665assert(rect->y_offset_ + rect->height_ <= dst->height);666for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) {667for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) {668const uint32_t src_pixel = src->argb[j * src->argb_stride + i];669const uint32_t dst_pixel = dst->argb[j * dst->argb_stride + i];670const uint32_t dst_alpha = dst_pixel >> 24;671if (dst_alpha != 0xff &&672!PixelsAreSimilar(src_pixel, dst_pixel, max_allowed_diff_lossy)) {673// In this case, if we use blending, we can't attain the desired674// 'dst_pixel' value for this pixel. So, blending is not possible.675return 0;676}677}678}679return 1;680}681682// For pixels in 'rect', replace those pixels in 'dst' that are same as 'src' by683// transparent pixels.684// Returns true if at least one pixel gets modified.685static int IncreaseTransparency(const WebPPicture* const src,686const FrameRectangle* const rect,687WebPPicture* const dst) {688int i, j;689int modified = 0;690assert(src != NULL && dst != NULL && rect != NULL);691assert(src->width == dst->width && src->height == dst->height);692for (j = rect->y_offset_; j < rect->y_offset_ + rect->height_; ++j) {693const uint32_t* const psrc = src->argb + j * src->argb_stride;694uint32_t* const pdst = dst->argb + j * dst->argb_stride;695for (i = rect->x_offset_; i < rect->x_offset_ + rect->width_; ++i) {696if (psrc[i] == pdst[i] && pdst[i] != TRANSPARENT_COLOR) {697pdst[i] = TRANSPARENT_COLOR;698modified = 1;699}700}701}702return modified;703}704705#undef TRANSPARENT_COLOR706707// Replace similar blocks of pixels by a 'see-through' transparent block708// with uniform average color.709// Assumes lossy compression is being used.710// Returns true if at least one pixel gets modified.711static int FlattenSimilarBlocks(const WebPPicture* const src,712const FrameRectangle* const rect,713WebPPicture* const dst, float quality) {714const int max_allowed_diff_lossy = QualityToMaxDiff(quality);715int i, j;716int modified = 0;717const int block_size = 8;718const int y_start = (rect->y_offset_ + block_size) & ~(block_size - 1);719const int y_end = (rect->y_offset_ + rect->height_) & ~(block_size - 1);720const int x_start = (rect->x_offset_ + block_size) & ~(block_size - 1);721const int x_end = (rect->x_offset_ + rect->width_) & ~(block_size - 1);722assert(src != NULL && dst != NULL && rect != NULL);723assert(src->width == dst->width && src->height == dst->height);724assert((block_size & (block_size - 1)) == 0); // must be a power of 2725// Iterate over each block and count similar pixels.726for (j = y_start; j < y_end; j += block_size) {727for (i = x_start; i < x_end; i += block_size) {728int cnt = 0;729int avg_r = 0, avg_g = 0, avg_b = 0;730int x, y;731const uint32_t* const psrc = src->argb + j * src->argb_stride + i;732uint32_t* const pdst = dst->argb + j * dst->argb_stride + i;733for (y = 0; y < block_size; ++y) {734for (x = 0; x < block_size; ++x) {735const uint32_t src_pixel = psrc[x + y * src->argb_stride];736const int alpha = src_pixel >> 24;737if (alpha == 0xff &&738PixelsAreSimilar(src_pixel, pdst[x + y * dst->argb_stride],739max_allowed_diff_lossy)) {740++cnt;741avg_r += (src_pixel >> 16) & 0xff;742avg_g += (src_pixel >> 8) & 0xff;743avg_b += (src_pixel >> 0) & 0xff;744}745}746}747// If we have a fully similar block, we replace it with an748// average transparent block. This compresses better in lossy mode.749if (cnt == block_size * block_size) {750const uint32_t color = (0x00 << 24) |751((avg_r / cnt) << 16) |752((avg_g / cnt) << 8) |753((avg_b / cnt) << 0);754for (y = 0; y < block_size; ++y) {755for (x = 0; x < block_size; ++x) {756pdst[x + y * dst->argb_stride] = color;757}758}759modified = 1;760}761}762}763return modified;764}765766static int EncodeFrame(const WebPConfig* const config, WebPPicture* const pic,767WebPMemoryWriter* const memory) {768pic->use_argb = 1;769pic->writer = WebPMemoryWrite;770pic->custom_ptr = memory;771if (!WebPEncode(config, pic)) {772return 0;773}774return 1;775}776777// Struct representing a candidate encoded frame including its metadata.778typedef struct {779WebPMemoryWriter mem_;780WebPMuxFrameInfo info_;781FrameRectangle rect_;782int evaluate_; // True if this candidate should be evaluated.783} Candidate;784785// Generates a candidate encoded frame given a picture and metadata.786static WebPEncodingError EncodeCandidate(WebPPicture* const sub_frame,787const FrameRectangle* const rect,788const WebPConfig* const encoder_config,789int use_blending,790Candidate* const candidate) {791WebPConfig config = *encoder_config;792WebPEncodingError error_code = VP8_ENC_OK;793assert(candidate != NULL);794memset(candidate, 0, sizeof(*candidate));795796// Set frame rect and info.797candidate->rect_ = *rect;798candidate->info_.id = WEBP_CHUNK_ANMF;799candidate->info_.x_offset = rect->x_offset_;800candidate->info_.y_offset = rect->y_offset_;801candidate->info_.dispose_method = WEBP_MUX_DISPOSE_NONE; // Set later.802candidate->info_.blend_method =803use_blending ? WEBP_MUX_BLEND : WEBP_MUX_NO_BLEND;804candidate->info_.duration = 0; // Set in next call to WebPAnimEncoderAdd().805806// Encode picture.807WebPMemoryWriterInit(&candidate->mem_);808809if (!config.lossless && use_blending) {810// Disable filtering to avoid blockiness in reconstructed frames at the811// time of decoding.812config.autofilter = 0;813config.filter_strength = 0;814}815if (!EncodeFrame(&config, sub_frame, &candidate->mem_)) {816error_code = sub_frame->error_code;817goto Err;818}819820candidate->evaluate_ = 1;821return error_code;822823Err:824WebPMemoryWriterClear(&candidate->mem_);825return error_code;826}827828static void CopyCurrentCanvas(WebPAnimEncoder* const enc) {829if (enc->curr_canvas_copy_modified_) {830WebPCopyPixels(enc->curr_canvas_, &enc->curr_canvas_copy_);831enc->curr_canvas_copy_.progress_hook = enc->curr_canvas_->progress_hook;832enc->curr_canvas_copy_.user_data = enc->curr_canvas_->user_data;833enc->curr_canvas_copy_modified_ = 0;834}835}836837enum {838LL_DISP_NONE = 0,839LL_DISP_BG,840LOSSY_DISP_NONE,841LOSSY_DISP_BG,842CANDIDATE_COUNT843};844845#define MIN_COLORS_LOSSY 31 // Don't try lossy below this threshold.846#define MAX_COLORS_LOSSLESS 194 // Don't try lossless above this threshold.847848// Generates candidates for a given dispose method given pre-filled sub-frame849// 'params'.850static WebPEncodingError GenerateCandidates(851WebPAnimEncoder* const enc, Candidate candidates[CANDIDATE_COUNT],852WebPMuxAnimDispose dispose_method, int is_lossless, int is_key_frame,853SubFrameParams* const params,854const WebPConfig* const config_ll, const WebPConfig* const config_lossy) {855WebPEncodingError error_code = VP8_ENC_OK;856const int is_dispose_none = (dispose_method == WEBP_MUX_DISPOSE_NONE);857Candidate* const candidate_ll =858is_dispose_none ? &candidates[LL_DISP_NONE] : &candidates[LL_DISP_BG];859Candidate* const candidate_lossy = is_dispose_none860? &candidates[LOSSY_DISP_NONE]861: &candidates[LOSSY_DISP_BG];862WebPPicture* const curr_canvas = &enc->curr_canvas_copy_;863const WebPPicture* const prev_canvas =864is_dispose_none ? &enc->prev_canvas_ : &enc->prev_canvas_disposed_;865int use_blending_ll, use_blending_lossy;866int evaluate_ll, evaluate_lossy;867868CopyCurrentCanvas(enc);869use_blending_ll =870!is_key_frame &&871IsLosslessBlendingPossible(prev_canvas, curr_canvas, ¶ms->rect_ll_);872use_blending_lossy =873!is_key_frame &&874IsLossyBlendingPossible(prev_canvas, curr_canvas, ¶ms->rect_lossy_,875config_lossy->quality);876877// Pick candidates to be tried.878if (!enc->options_.allow_mixed) {879evaluate_ll = is_lossless;880evaluate_lossy = !is_lossless;881} else if (enc->options_.minimize_size) {882evaluate_ll = 1;883evaluate_lossy = 1;884} else { // Use a heuristic for trying lossless and/or lossy compression.885const int num_colors = WebPGetColorPalette(¶ms->sub_frame_ll_, NULL);886evaluate_ll = (num_colors < MAX_COLORS_LOSSLESS);887evaluate_lossy = (num_colors >= MIN_COLORS_LOSSY);888}889890// Generate candidates.891if (evaluate_ll) {892CopyCurrentCanvas(enc);893if (use_blending_ll) {894enc->curr_canvas_copy_modified_ =895IncreaseTransparency(prev_canvas, ¶ms->rect_ll_, curr_canvas);896}897error_code = EncodeCandidate(¶ms->sub_frame_ll_, ¶ms->rect_ll_,898config_ll, use_blending_ll, candidate_ll);899if (error_code != VP8_ENC_OK) return error_code;900}901if (evaluate_lossy) {902CopyCurrentCanvas(enc);903if (use_blending_lossy) {904enc->curr_canvas_copy_modified_ =905FlattenSimilarBlocks(prev_canvas, ¶ms->rect_lossy_, curr_canvas,906config_lossy->quality);907}908error_code =909EncodeCandidate(¶ms->sub_frame_lossy_, ¶ms->rect_lossy_,910config_lossy, use_blending_lossy, candidate_lossy);911if (error_code != VP8_ENC_OK) return error_code;912enc->curr_canvas_copy_modified_ = 1;913}914return error_code;915}916917#undef MIN_COLORS_LOSSY918#undef MAX_COLORS_LOSSLESS919920static void GetEncodedData(const WebPMemoryWriter* const memory,921WebPData* const encoded_data) {922encoded_data->bytes = memory->mem;923encoded_data->size = memory->size;924}925926// Sets dispose method of the previous frame to be 'dispose_method'.927static void SetPreviousDisposeMethod(WebPAnimEncoder* const enc,928WebPMuxAnimDispose dispose_method) {929const size_t position = enc->count_ - 2;930EncodedFrame* const prev_enc_frame = GetFrame(enc, position);931assert(enc->count_ >= 2); // As current and previous frames are in enc.932933if (enc->prev_candidate_undecided_) {934assert(dispose_method == WEBP_MUX_DISPOSE_NONE);935prev_enc_frame->sub_frame_.dispose_method = dispose_method;936prev_enc_frame->key_frame_.dispose_method = dispose_method;937} else {938WebPMuxFrameInfo* const prev_info = prev_enc_frame->is_key_frame_939? &prev_enc_frame->key_frame_940: &prev_enc_frame->sub_frame_;941prev_info->dispose_method = dispose_method;942}943}944945static int IncreasePreviousDuration(WebPAnimEncoder* const enc, int duration) {946const size_t position = enc->count_ - 1;947EncodedFrame* const prev_enc_frame = GetFrame(enc, position);948int new_duration;949950assert(enc->count_ >= 1);951assert(prev_enc_frame->sub_frame_.duration ==952prev_enc_frame->key_frame_.duration);953assert(prev_enc_frame->sub_frame_.duration ==954(prev_enc_frame->sub_frame_.duration & (MAX_DURATION - 1)));955assert(duration == (duration & (MAX_DURATION - 1)));956957new_duration = prev_enc_frame->sub_frame_.duration + duration;958if (new_duration >= MAX_DURATION) { // Special case.959// Separate out previous frame from earlier merged frames to avoid overflow.960// We add a 1x1 transparent frame for the previous frame, with blending on.961const FrameRectangle rect = { 0, 0, 1, 1 };962const uint8_t lossless_1x1_bytes[] = {9630x52, 0x49, 0x46, 0x46, 0x14, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50,9640x56, 0x50, 0x38, 0x4c, 0x08, 0x00, 0x00, 0x00, 0x2f, 0x00, 0x00, 0x00,9650x10, 0x88, 0x88, 0x08966};967const WebPData lossless_1x1 = {968lossless_1x1_bytes, sizeof(lossless_1x1_bytes)969};970const uint8_t lossy_1x1_bytes[] = {9710x52, 0x49, 0x46, 0x46, 0x40, 0x00, 0x00, 0x00, 0x57, 0x45, 0x42, 0x50,9720x56, 0x50, 0x38, 0x58, 0x0a, 0x00, 0x00, 0x00, 0x10, 0x00, 0x00, 0x00,9730x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x41, 0x4c, 0x50, 0x48, 0x02, 0x00,9740x00, 0x00, 0x00, 0x00, 0x56, 0x50, 0x38, 0x20, 0x18, 0x00, 0x00, 0x00,9750x30, 0x01, 0x00, 0x9d, 0x01, 0x2a, 0x01, 0x00, 0x01, 0x00, 0x02, 0x00,9760x34, 0x25, 0xa4, 0x00, 0x03, 0x70, 0x00, 0xfe, 0xfb, 0xfd, 0x50, 0x00977};978const WebPData lossy_1x1 = { lossy_1x1_bytes, sizeof(lossy_1x1_bytes) };979const int can_use_lossless =980(enc->last_config_.lossless || enc->options_.allow_mixed);981EncodedFrame* const curr_enc_frame = GetFrame(enc, enc->count_);982curr_enc_frame->is_key_frame_ = 0;983curr_enc_frame->sub_frame_.id = WEBP_CHUNK_ANMF;984curr_enc_frame->sub_frame_.x_offset = 0;985curr_enc_frame->sub_frame_.y_offset = 0;986curr_enc_frame->sub_frame_.dispose_method = WEBP_MUX_DISPOSE_NONE;987curr_enc_frame->sub_frame_.blend_method = WEBP_MUX_BLEND;988curr_enc_frame->sub_frame_.duration = duration;989if (!WebPDataCopy(can_use_lossless ? &lossless_1x1 : &lossy_1x1,990&curr_enc_frame->sub_frame_.bitstream)) {991return 0;992}993++enc->count_;994++enc->count_since_key_frame_;995enc->flush_count_ = enc->count_ - 1;996enc->prev_candidate_undecided_ = 0;997enc->prev_rect_ = rect;998} else { // Regular case.999// Increase duration of the previous frame by 'duration'.1000prev_enc_frame->sub_frame_.duration = new_duration;1001prev_enc_frame->key_frame_.duration = new_duration;1002}1003return 1;1004}10051006// Pick the candidate encoded frame with smallest size and release other1007// candidates.1008// TODO(later): Perhaps a rough SSIM/PSNR produced by the encoder should1009// also be a criteria, in addition to sizes.1010static void PickBestCandidate(WebPAnimEncoder* const enc,1011Candidate* const candidates, int is_key_frame,1012EncodedFrame* const encoded_frame) {1013int i;1014int best_idx = -1;1015size_t best_size = ~0;1016for (i = 0; i < CANDIDATE_COUNT; ++i) {1017if (candidates[i].evaluate_) {1018const size_t candidate_size = candidates[i].mem_.size;1019if (candidate_size < best_size) {1020best_idx = i;1021best_size = candidate_size;1022}1023}1024}1025assert(best_idx != -1);1026for (i = 0; i < CANDIDATE_COUNT; ++i) {1027if (candidates[i].evaluate_) {1028if (i == best_idx) {1029WebPMuxFrameInfo* const dst = is_key_frame1030? &encoded_frame->key_frame_1031: &encoded_frame->sub_frame_;1032*dst = candidates[i].info_;1033GetEncodedData(&candidates[i].mem_, &dst->bitstream);1034if (!is_key_frame) {1035// Note: Previous dispose method only matters for non-keyframes.1036// Also, we don't want to modify previous dispose method that was1037// selected when a non key-frame was assumed.1038const WebPMuxAnimDispose prev_dispose_method =1039(best_idx == LL_DISP_NONE || best_idx == LOSSY_DISP_NONE)1040? WEBP_MUX_DISPOSE_NONE1041: WEBP_MUX_DISPOSE_BACKGROUND;1042SetPreviousDisposeMethod(enc, prev_dispose_method);1043}1044enc->prev_rect_ = candidates[i].rect_; // save for next frame.1045} else {1046WebPMemoryWriterClear(&candidates[i].mem_);1047candidates[i].evaluate_ = 0;1048}1049}1050}1051}10521053// Depending on the configuration, tries different compressions1054// (lossy/lossless), dispose methods, blending methods etc to encode the current1055// frame and outputs the best one in 'encoded_frame'.1056// 'frame_skipped' will be set to true if this frame should actually be skipped.1057static WebPEncodingError SetFrame(WebPAnimEncoder* const enc,1058const WebPConfig* const config,1059int is_key_frame,1060EncodedFrame* const encoded_frame,1061int* const frame_skipped) {1062int i;1063WebPEncodingError error_code = VP8_ENC_OK;1064const WebPPicture* const curr_canvas = &enc->curr_canvas_copy_;1065const WebPPicture* const prev_canvas = &enc->prev_canvas_;1066Candidate candidates[CANDIDATE_COUNT];1067const int is_lossless = config->lossless;1068const int consider_lossless = is_lossless || enc->options_.allow_mixed;1069const int consider_lossy = !is_lossless || enc->options_.allow_mixed;1070const int is_first_frame = enc->is_first_frame_;10711072// First frame cannot be skipped as there is no 'previous frame' to merge it1073// to. So, empty rectangle is not allowed for the first frame.1074const int empty_rect_allowed_none = !is_first_frame;10751076// Even if there is exact pixel match between 'disposed previous canvas' and1077// 'current canvas', we can't skip current frame, as there may not be exact1078// pixel match between 'previous canvas' and 'current canvas'. So, we don't1079// allow empty rectangle in this case.1080const int empty_rect_allowed_bg = 0;10811082// If current frame is a key-frame, dispose method of previous frame doesn't1083// matter, so we don't try dispose to background.1084// Also, if key-frame insertion is on, and previous frame could be picked as1085// either a sub-frame or a key-frame, then we can't be sure about what frame1086// rectangle would be disposed. In that case too, we don't try dispose to1087// background.1088const int dispose_bg_possible =1089!is_key_frame && !enc->prev_candidate_undecided_;10901091SubFrameParams dispose_none_params;1092SubFrameParams dispose_bg_params;10931094WebPConfig config_ll = *config;1095WebPConfig config_lossy = *config;1096config_ll.lossless = 1;1097config_lossy.lossless = 0;1098enc->last_config_ = *config;1099enc->last_config_reversed_ = config->lossless ? config_lossy : config_ll;1100*frame_skipped = 0;11011102if (!SubFrameParamsInit(&dispose_none_params, 1, empty_rect_allowed_none) ||1103!SubFrameParamsInit(&dispose_bg_params, 0, empty_rect_allowed_bg)) {1104return VP8_ENC_ERROR_INVALID_CONFIGURATION;1105}11061107memset(candidates, 0, sizeof(candidates));11081109// Change-rectangle assuming previous frame was DISPOSE_NONE.1110if (!GetSubRects(prev_canvas, curr_canvas, is_key_frame, is_first_frame,1111config_lossy.quality, &dispose_none_params)) {1112error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION;1113goto Err;1114}11151116if ((consider_lossless && IsEmptyRect(&dispose_none_params.rect_ll_)) ||1117(consider_lossy && IsEmptyRect(&dispose_none_params.rect_lossy_))) {1118// Don't encode the frame at all. Instead, the duration of the previous1119// frame will be increased later.1120assert(empty_rect_allowed_none);1121*frame_skipped = 1;1122goto End;1123}11241125if (dispose_bg_possible) {1126// Change-rectangle assuming previous frame was DISPOSE_BACKGROUND.1127WebPPicture* const prev_canvas_disposed = &enc->prev_canvas_disposed_;1128WebPCopyPixels(prev_canvas, prev_canvas_disposed);1129DisposeFrameRectangle(WEBP_MUX_DISPOSE_BACKGROUND, &enc->prev_rect_,1130prev_canvas_disposed);11311132if (!GetSubRects(prev_canvas_disposed, curr_canvas, is_key_frame,1133is_first_frame, config_lossy.quality,1134&dispose_bg_params)) {1135error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION;1136goto Err;1137}1138assert(!IsEmptyRect(&dispose_bg_params.rect_ll_));1139assert(!IsEmptyRect(&dispose_bg_params.rect_lossy_));11401141if (enc->options_.minimize_size) { // Try both dispose methods.1142dispose_bg_params.should_try_ = 1;1143dispose_none_params.should_try_ = 1;1144} else if ((is_lossless &&1145RectArea(&dispose_bg_params.rect_ll_) <1146RectArea(&dispose_none_params.rect_ll_)) ||1147(!is_lossless &&1148RectArea(&dispose_bg_params.rect_lossy_) <1149RectArea(&dispose_none_params.rect_lossy_))) {1150dispose_bg_params.should_try_ = 1; // Pick DISPOSE_BACKGROUND.1151dispose_none_params.should_try_ = 0;1152}1153}11541155if (dispose_none_params.should_try_) {1156error_code = GenerateCandidates(1157enc, candidates, WEBP_MUX_DISPOSE_NONE, is_lossless, is_key_frame,1158&dispose_none_params, &config_ll, &config_lossy);1159if (error_code != VP8_ENC_OK) goto Err;1160}11611162if (dispose_bg_params.should_try_) {1163assert(!enc->is_first_frame_);1164assert(dispose_bg_possible);1165error_code = GenerateCandidates(1166enc, candidates, WEBP_MUX_DISPOSE_BACKGROUND, is_lossless, is_key_frame,1167&dispose_bg_params, &config_ll, &config_lossy);1168if (error_code != VP8_ENC_OK) goto Err;1169}11701171PickBestCandidate(enc, candidates, is_key_frame, encoded_frame);11721173goto End;11741175Err:1176for (i = 0; i < CANDIDATE_COUNT; ++i) {1177if (candidates[i].evaluate_) {1178WebPMemoryWriterClear(&candidates[i].mem_);1179}1180}11811182End:1183SubFrameParamsFree(&dispose_none_params);1184SubFrameParamsFree(&dispose_bg_params);1185return error_code;1186}11871188// Calculate the penalty incurred if we encode given frame as a key frame1189// instead of a sub-frame.1190static int64_t KeyFramePenalty(const EncodedFrame* const encoded_frame) {1191return ((int64_t)encoded_frame->key_frame_.bitstream.size -1192encoded_frame->sub_frame_.bitstream.size);1193}11941195static int CacheFrame(WebPAnimEncoder* const enc,1196const WebPConfig* const config) {1197int ok = 0;1198int frame_skipped = 0;1199WebPEncodingError error_code = VP8_ENC_OK;1200const size_t position = enc->count_;1201EncodedFrame* const encoded_frame = GetFrame(enc, position);12021203++enc->count_;12041205if (enc->is_first_frame_) { // Add this as a key-frame.1206error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped);1207if (error_code != VP8_ENC_OK) goto End;1208assert(frame_skipped == 0); // First frame can't be skipped, even if empty.1209assert(position == 0 && enc->count_ == 1);1210encoded_frame->is_key_frame_ = 1;1211enc->flush_count_ = 0;1212enc->count_since_key_frame_ = 0;1213enc->prev_candidate_undecided_ = 0;1214} else {1215++enc->count_since_key_frame_;1216if (enc->count_since_key_frame_ <= enc->options_.kmin) {1217// Add this as a frame rectangle.1218error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped);1219if (error_code != VP8_ENC_OK) goto End;1220if (frame_skipped) goto Skip;1221encoded_frame->is_key_frame_ = 0;1222enc->flush_count_ = enc->count_ - 1;1223enc->prev_candidate_undecided_ = 0;1224} else {1225int64_t curr_delta;1226FrameRectangle prev_rect_key, prev_rect_sub;12271228// Add this as a frame rectangle to enc.1229error_code = SetFrame(enc, config, 0, encoded_frame, &frame_skipped);1230if (error_code != VP8_ENC_OK) goto End;1231if (frame_skipped) goto Skip;1232prev_rect_sub = enc->prev_rect_;123312341235// Add this as a key-frame to enc, too.1236error_code = SetFrame(enc, config, 1, encoded_frame, &frame_skipped);1237if (error_code != VP8_ENC_OK) goto End;1238assert(frame_skipped == 0); // Key-frame cannot be an empty rectangle.1239prev_rect_key = enc->prev_rect_;12401241// Analyze size difference of the two variants.1242curr_delta = KeyFramePenalty(encoded_frame);1243if (curr_delta <= enc->best_delta_) { // Pick this as the key-frame.1244if (enc->keyframe_ != KEYFRAME_NONE) {1245EncodedFrame* const old_keyframe = GetFrame(enc, enc->keyframe_);1246assert(old_keyframe->is_key_frame_);1247old_keyframe->is_key_frame_ = 0;1248}1249encoded_frame->is_key_frame_ = 1;1250enc->prev_candidate_undecided_ = 1;1251enc->keyframe_ = (int)position;1252enc->best_delta_ = curr_delta;1253enc->flush_count_ = enc->count_ - 1; // We can flush previous frames.1254} else {1255encoded_frame->is_key_frame_ = 0;1256enc->prev_candidate_undecided_ = 0;1257}1258// Note: We need '>=' below because when kmin and kmax are both zero,1259// count_since_key_frame will always be > kmax.1260if (enc->count_since_key_frame_ >= enc->options_.kmax) {1261enc->flush_count_ = enc->count_ - 1;1262enc->count_since_key_frame_ = 0;1263enc->keyframe_ = KEYFRAME_NONE;1264enc->best_delta_ = DELTA_INFINITY;1265}1266if (!enc->prev_candidate_undecided_) {1267enc->prev_rect_ =1268encoded_frame->is_key_frame_ ? prev_rect_key : prev_rect_sub;1269}1270}1271}12721273// Update previous to previous and previous canvases for next call.1274WebPCopyPixels(enc->curr_canvas_, &enc->prev_canvas_);1275enc->is_first_frame_ = 0;12761277Skip:1278ok = 1;1279++enc->in_frame_count_;12801281End:1282if (!ok || frame_skipped) {1283FrameRelease(encoded_frame);1284// We reset some counters, as the frame addition failed/was skipped.1285--enc->count_;1286if (!enc->is_first_frame_) --enc->count_since_key_frame_;1287if (!ok) {1288MarkError2(enc, "ERROR adding frame. WebPEncodingError", error_code);1289}1290}1291enc->curr_canvas_->error_code = error_code; // report error_code1292assert(ok || error_code != VP8_ENC_OK);1293return ok;1294}12951296static int FlushFrames(WebPAnimEncoder* const enc) {1297while (enc->flush_count_ > 0) {1298WebPMuxError err;1299EncodedFrame* const curr = GetFrame(enc, 0);1300const WebPMuxFrameInfo* const info =1301curr->is_key_frame_ ? &curr->key_frame_ : &curr->sub_frame_;1302assert(enc->mux_ != NULL);1303err = WebPMuxPushFrame(enc->mux_, info, 1);1304if (err != WEBP_MUX_OK) {1305MarkError2(enc, "ERROR adding frame. WebPMuxError", err);1306return 0;1307}1308if (enc->options_.verbose) {1309fprintf(stderr, "INFO: Added frame. offset:%d,%d dispose:%d blend:%d\n",1310info->x_offset, info->y_offset, info->dispose_method,1311info->blend_method);1312}1313++enc->out_frame_count_;1314FrameRelease(curr);1315++enc->start_;1316--enc->flush_count_;1317--enc->count_;1318if (enc->keyframe_ != KEYFRAME_NONE) --enc->keyframe_;1319}13201321if (enc->count_ == 1 && enc->start_ != 0) {1322// Move enc->start to index 0.1323const int enc_start_tmp = (int)enc->start_;1324EncodedFrame temp = enc->encoded_frames_[0];1325enc->encoded_frames_[0] = enc->encoded_frames_[enc_start_tmp];1326enc->encoded_frames_[enc_start_tmp] = temp;1327FrameRelease(&enc->encoded_frames_[enc_start_tmp]);1328enc->start_ = 0;1329}1330return 1;1331}13321333#undef DELTA_INFINITY1334#undef KEYFRAME_NONE13351336int WebPAnimEncoderAdd(WebPAnimEncoder* enc, WebPPicture* frame, int timestamp,1337const WebPConfig* encoder_config) {1338WebPConfig config;1339int ok;13401341if (enc == NULL) {1342return 0;1343}1344MarkNoError(enc);13451346if (!enc->is_first_frame_) {1347// Make sure timestamps are non-decreasing (integer wrap-around is OK).1348const uint32_t prev_frame_duration =1349(uint32_t)timestamp - enc->prev_timestamp_;1350if (prev_frame_duration >= MAX_DURATION) {1351if (frame != NULL) {1352frame->error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION;1353}1354MarkError(enc, "ERROR adding frame: timestamps must be non-decreasing");1355return 0;1356}1357if (!IncreasePreviousDuration(enc, (int)prev_frame_duration)) {1358return 0;1359}1360} else {1361enc->first_timestamp_ = timestamp;1362}13631364if (frame == NULL) { // Special: last call.1365enc->got_null_frame_ = 1;1366enc->prev_timestamp_ = timestamp;1367return 1;1368}13691370if (frame->width != enc->canvas_width_ ||1371frame->height != enc->canvas_height_) {1372frame->error_code = VP8_ENC_ERROR_INVALID_CONFIGURATION;1373MarkError(enc, "ERROR adding frame: Invalid frame dimensions");1374return 0;1375}13761377if (!frame->use_argb) { // Convert frame from YUV(A) to ARGB.1378if (enc->options_.verbose) {1379fprintf(stderr, "WARNING: Converting frame from YUV(A) to ARGB format; "1380"this incurs a small loss.\n");1381}1382if (!WebPPictureYUVAToARGB(frame)) {1383MarkError(enc, "ERROR converting frame from YUV(A) to ARGB");1384return 0;1385}1386}13871388if (encoder_config != NULL) {1389if (!WebPValidateConfig(encoder_config)) {1390MarkError(enc, "ERROR adding frame: Invalid WebPConfig");1391return 0;1392}1393config = *encoder_config;1394} else {1395WebPConfigInit(&config);1396config.lossless = 1;1397}1398assert(enc->curr_canvas_ == NULL);1399enc->curr_canvas_ = frame; // Store reference.1400assert(enc->curr_canvas_copy_modified_ == 1);1401CopyCurrentCanvas(enc);14021403ok = CacheFrame(enc, &config) && FlushFrames(enc);14041405enc->curr_canvas_ = NULL;1406enc->curr_canvas_copy_modified_ = 1;1407if (ok) {1408enc->prev_timestamp_ = timestamp;1409}1410return ok;1411}14121413// -----------------------------------------------------------------------------1414// Bitstream assembly.14151416static int DecodeFrameOntoCanvas(const WebPMuxFrameInfo* const frame,1417WebPPicture* const canvas) {1418const WebPData* const image = &frame->bitstream;1419WebPPicture sub_image;1420WebPDecoderConfig config;1421WebPInitDecoderConfig(&config);1422WebPUtilClearPic(canvas, NULL);1423if (WebPGetFeatures(image->bytes, image->size, &config.input) !=1424VP8_STATUS_OK) {1425return 0;1426}1427if (!WebPPictureView(canvas, frame->x_offset, frame->y_offset,1428config.input.width, config.input.height, &sub_image)) {1429return 0;1430}1431config.output.is_external_memory = 1;1432config.output.colorspace = MODE_BGRA;1433config.output.u.RGBA.rgba = (uint8_t*)sub_image.argb;1434config.output.u.RGBA.stride = sub_image.argb_stride * 4;1435config.output.u.RGBA.size = config.output.u.RGBA.stride * sub_image.height;14361437if (WebPDecode(image->bytes, image->size, &config) != VP8_STATUS_OK) {1438return 0;1439}1440return 1;1441}14421443static int FrameToFullCanvas(WebPAnimEncoder* const enc,1444const WebPMuxFrameInfo* const frame,1445WebPData* const full_image) {1446WebPPicture* const canvas_buf = &enc->curr_canvas_copy_;1447WebPMemoryWriter mem1, mem2;1448WebPMemoryWriterInit(&mem1);1449WebPMemoryWriterInit(&mem2);14501451if (!DecodeFrameOntoCanvas(frame, canvas_buf)) goto Err;1452if (!EncodeFrame(&enc->last_config_, canvas_buf, &mem1)) goto Err;1453GetEncodedData(&mem1, full_image);14541455if (enc->options_.allow_mixed) {1456if (!EncodeFrame(&enc->last_config_reversed_, canvas_buf, &mem2)) goto Err;1457if (mem2.size < mem1.size) {1458GetEncodedData(&mem2, full_image);1459WebPMemoryWriterClear(&mem1);1460} else {1461WebPMemoryWriterClear(&mem2);1462}1463}1464return 1;14651466Err:1467WebPMemoryWriterClear(&mem1);1468WebPMemoryWriterClear(&mem2);1469return 0;1470}14711472// Convert a single-frame animation to a non-animated image if appropriate.1473// TODO(urvang): Can we pick one of the two heuristically (based on frame1474// rectangle and/or presence of alpha)?1475static WebPMuxError OptimizeSingleFrame(WebPAnimEncoder* const enc,1476WebPData* const webp_data) {1477WebPMuxError err = WEBP_MUX_OK;1478int canvas_width, canvas_height;1479WebPMuxFrameInfo frame;1480WebPData full_image;1481WebPData webp_data2;1482WebPMux* const mux = WebPMuxCreate(webp_data, 0);1483if (mux == NULL) return WEBP_MUX_BAD_DATA;1484assert(enc->out_frame_count_ == 1);1485WebPDataInit(&frame.bitstream);1486WebPDataInit(&full_image);1487WebPDataInit(&webp_data2);14881489err = WebPMuxGetFrame(mux, 1, &frame);1490if (err != WEBP_MUX_OK) goto End;1491if (frame.id != WEBP_CHUNK_ANMF) goto End; // Non-animation: nothing to do.1492err = WebPMuxGetCanvasSize(mux, &canvas_width, &canvas_height);1493if (err != WEBP_MUX_OK) goto End;1494if (!FrameToFullCanvas(enc, &frame, &full_image)) {1495err = WEBP_MUX_BAD_DATA;1496goto End;1497}1498err = WebPMuxSetImage(mux, &full_image, 1);1499if (err != WEBP_MUX_OK) goto End;1500err = WebPMuxAssemble(mux, &webp_data2);1501if (err != WEBP_MUX_OK) goto End;15021503if (webp_data2.size < webp_data->size) { // Pick 'webp_data2' if smaller.1504WebPDataClear(webp_data);1505*webp_data = webp_data2;1506WebPDataInit(&webp_data2);1507}15081509End:1510WebPDataClear(&frame.bitstream);1511WebPDataClear(&full_image);1512WebPMuxDelete(mux);1513WebPDataClear(&webp_data2);1514return err;1515}15161517int WebPAnimEncoderAssemble(WebPAnimEncoder* enc, WebPData* webp_data) {1518WebPMux* mux;1519WebPMuxError err;15201521if (enc == NULL) {1522return 0;1523}1524MarkNoError(enc);15251526if (webp_data == NULL) {1527MarkError(enc, "ERROR assembling: NULL input");1528return 0;1529}15301531if (enc->in_frame_count_ == 0) {1532MarkError(enc, "ERROR: No frames to assemble");1533return 0;1534}15351536if (!enc->got_null_frame_ && enc->in_frame_count_ > 1 && enc->count_ > 0) {1537// set duration of the last frame to be avg of durations of previous frames.1538const double delta_time =1539(uint32_t)enc->prev_timestamp_ - enc->first_timestamp_;1540const int average_duration = (int)(delta_time / (enc->in_frame_count_ - 1));1541if (!IncreasePreviousDuration(enc, average_duration)) {1542return 0;1543}1544}15451546// Flush any remaining frames.1547enc->flush_count_ = enc->count_;1548if (!FlushFrames(enc)) {1549return 0;1550}15511552// Set definitive canvas size.1553mux = enc->mux_;1554err = WebPMuxSetCanvasSize(mux, enc->canvas_width_, enc->canvas_height_);1555if (err != WEBP_MUX_OK) goto Err;15561557err = WebPMuxSetAnimationParams(mux, &enc->options_.anim_params);1558if (err != WEBP_MUX_OK) goto Err;15591560// Assemble into a WebP bitstream.1561err = WebPMuxAssemble(mux, webp_data);1562if (err != WEBP_MUX_OK) goto Err;15631564if (enc->out_frame_count_ == 1) {1565err = OptimizeSingleFrame(enc, webp_data);1566if (err != WEBP_MUX_OK) goto Err;1567}1568return 1;15691570Err:1571MarkError2(enc, "ERROR assembling WebP", err);1572return 0;1573}15741575const char* WebPAnimEncoderGetError(WebPAnimEncoder* enc) {1576if (enc == NULL) return NULL;1577return enc->error_str_;1578}15791580// -----------------------------------------------------------------------------158115821583