Path: blob/master/Utilities/cmliblzma/liblzma/common/block_buffer_encoder.c
3153 views
// SPDX-License-Identifier: 0BSD12///////////////////////////////////////////////////////////////////////////////3//4/// \file block_buffer_encoder.c5/// \brief Single-call .xz Block encoder6//7// Author: Lasse Collin8//9///////////////////////////////////////////////////////////////////////////////1011#include "block_buffer_encoder.h"12#include "block_encoder.h"13#include "filter_encoder.h"14#include "lzma2_encoder.h"15#include "check.h"161718/// Estimate the maximum size of the Block Header and Check fields for19/// a Block that uses LZMA2 uncompressed chunks. We could use20/// lzma_block_header_size() but this is simpler.21///22/// Block Header Size + Block Flags + Compressed Size23/// + Uncompressed Size + Filter Flags for LZMA2 + CRC32 + Check24/// and round up to the next multiple of four to take Header Padding25/// into account.26#define HEADERS_BOUND ((1 + 1 + 2 * LZMA_VLI_BYTES_MAX + 3 + 4 \27+ LZMA_CHECK_SIZE_MAX + 3) & ~3)282930static uint64_t31lzma2_bound(uint64_t uncompressed_size)32{33// Prevent integer overflow in overhead calculation.34if (uncompressed_size > COMPRESSED_SIZE_MAX)35return 0;3637// Calculate the exact overhead of the LZMA2 headers: Round38// uncompressed_size up to the next multiple of LZMA2_CHUNK_MAX,39// multiply by the size of per-chunk header, and add one byte for40// the end marker.41const uint64_t overhead = ((uncompressed_size + LZMA2_CHUNK_MAX - 1)42/ LZMA2_CHUNK_MAX)43* LZMA2_HEADER_UNCOMPRESSED + 1;4445// Catch the possible integer overflow.46if (COMPRESSED_SIZE_MAX - overhead < uncompressed_size)47return 0;4849return uncompressed_size + overhead;50}515253extern uint64_t54lzma_block_buffer_bound64(uint64_t uncompressed_size)55{56// If the data doesn't compress, we always use uncompressed57// LZMA2 chunks.58uint64_t lzma2_size = lzma2_bound(uncompressed_size);59if (lzma2_size == 0)60return 0;6162// Take Block Padding into account.63lzma2_size = (lzma2_size + 3) & ~UINT64_C(3);6465// No risk of integer overflow because lzma2_bound() already takes66// into account the size of the headers in the Block.67return HEADERS_BOUND + lzma2_size;68}697071extern LZMA_API(size_t)72lzma_block_buffer_bound(size_t uncompressed_size)73{74uint64_t ret = lzma_block_buffer_bound64(uncompressed_size);7576#if SIZE_MAX < UINT64_MAX77// Catch the possible integer overflow on 32-bit systems.78if (ret > SIZE_MAX)79return 0;80#endif8182return ret;83}848586static lzma_ret87block_encode_uncompressed(lzma_block *block, const uint8_t *in, size_t in_size,88uint8_t *out, size_t *out_pos, size_t out_size)89{90// Use LZMA2 uncompressed chunks. We wouldn't need a dictionary at91// all, but LZMA2 always requires a dictionary, so use the minimum92// value to minimize memory usage of the decoder.93lzma_options_lzma lzma2 = {94.dict_size = LZMA_DICT_SIZE_MIN,95};9697lzma_filter filters[2];98filters[0].id = LZMA_FILTER_LZMA2;99filters[0].options = &lzma2;100filters[1].id = LZMA_VLI_UNKNOWN;101102// Set the above filter options to *block temporarily so that we can103// encode the Block Header.104lzma_filter *filters_orig = block->filters;105block->filters = filters;106107if (lzma_block_header_size(block) != LZMA_OK) {108block->filters = filters_orig;109return LZMA_PROG_ERROR;110}111112// Check that there's enough output space. The caller has already113// set block->compressed_size to what lzma2_bound() has returned,114// so we can reuse that value. We know that compressed_size is a115// known valid VLI and header_size is a small value so their sum116// will never overflow.117assert(block->compressed_size == lzma2_bound(in_size));118if (out_size - *out_pos119< block->header_size + block->compressed_size) {120block->filters = filters_orig;121return LZMA_BUF_ERROR;122}123124if (lzma_block_header_encode(block, out + *out_pos) != LZMA_OK) {125block->filters = filters_orig;126return LZMA_PROG_ERROR;127}128129block->filters = filters_orig;130*out_pos += block->header_size;131132// Encode the data using LZMA2 uncompressed chunks.133size_t in_pos = 0;134uint8_t control = 0x01; // Dictionary reset135136while (in_pos < in_size) {137// Control byte: Indicate uncompressed chunk, of which138// the first resets the dictionary.139out[(*out_pos)++] = control;140control = 0x02; // No dictionary reset141142// Size of the uncompressed chunk143const size_t copy_size144= my_min(in_size - in_pos, LZMA2_CHUNK_MAX);145out[(*out_pos)++] = (copy_size - 1) >> 8;146out[(*out_pos)++] = (copy_size - 1) & 0xFF;147148// The actual data149assert(*out_pos + copy_size <= out_size);150memcpy(out + *out_pos, in + in_pos, copy_size);151152in_pos += copy_size;153*out_pos += copy_size;154}155156// End marker157out[(*out_pos)++] = 0x00;158assert(*out_pos <= out_size);159160return LZMA_OK;161}162163164static lzma_ret165block_encode_normal(lzma_block *block, const lzma_allocator *allocator,166const uint8_t *in, size_t in_size,167uint8_t *out, size_t *out_pos, size_t out_size)168{169// Find out the size of the Block Header.170return_if_error(lzma_block_header_size(block));171172// Reserve space for the Block Header and skip it for now.173if (out_size - *out_pos <= block->header_size)174return LZMA_BUF_ERROR;175176const size_t out_start = *out_pos;177*out_pos += block->header_size;178179// Limit out_size so that we stop encoding if the output would grow180// bigger than what uncompressed Block would be.181if (out_size - *out_pos > block->compressed_size)182out_size = *out_pos + block->compressed_size;183184// TODO: In many common cases this could be optimized to use185// significantly less memory.186lzma_next_coder raw_encoder = LZMA_NEXT_CODER_INIT;187lzma_ret ret = lzma_raw_encoder_init(188&raw_encoder, allocator, block->filters);189190if (ret == LZMA_OK) {191size_t in_pos = 0;192ret = raw_encoder.code(raw_encoder.coder, allocator,193in, &in_pos, in_size, out, out_pos, out_size,194LZMA_FINISH);195}196197// NOTE: This needs to be run even if lzma_raw_encoder_init() failed.198lzma_next_end(&raw_encoder, allocator);199200if (ret == LZMA_STREAM_END) {201// Compression was successful. Write the Block Header.202block->compressed_size203= *out_pos - (out_start + block->header_size);204ret = lzma_block_header_encode(block, out + out_start);205if (ret != LZMA_OK)206ret = LZMA_PROG_ERROR;207208} else if (ret == LZMA_OK) {209// Output buffer became full.210ret = LZMA_BUF_ERROR;211}212213// Reset *out_pos if something went wrong.214if (ret != LZMA_OK)215*out_pos = out_start;216217return ret;218}219220221static lzma_ret222block_buffer_encode(lzma_block *block, const lzma_allocator *allocator,223const uint8_t *in, size_t in_size,224uint8_t *out, size_t *out_pos, size_t out_size,225bool try_to_compress)226{227// Validate the arguments.228if (block == NULL || (in == NULL && in_size != 0) || out == NULL229|| out_pos == NULL || *out_pos > out_size)230return LZMA_PROG_ERROR;231232// The contents of the structure may depend on the version so233// check the version before validating the contents of *block.234if (block->version > 1)235return LZMA_OPTIONS_ERROR;236237if ((unsigned int)(block->check) > LZMA_CHECK_ID_MAX238|| (try_to_compress && block->filters == NULL))239return LZMA_PROG_ERROR;240241if (!lzma_check_is_supported(block->check))242return LZMA_UNSUPPORTED_CHECK;243244// Size of a Block has to be a multiple of four, so limit the size245// here already. This way we don't need to check it again when adding246// Block Padding.247out_size -= (out_size - *out_pos) & 3;248249// Get the size of the Check field.250const size_t check_size = lzma_check_size(block->check);251assert(check_size != UINT32_MAX);252253// Reserve space for the Check field.254if (out_size - *out_pos <= check_size)255return LZMA_BUF_ERROR;256257out_size -= check_size;258259// Initialize block->uncompressed_size and calculate the worst-case260// value for block->compressed_size.261block->uncompressed_size = in_size;262block->compressed_size = lzma2_bound(in_size);263if (block->compressed_size == 0)264return LZMA_DATA_ERROR;265266// Do the actual compression.267lzma_ret ret = LZMA_BUF_ERROR;268if (try_to_compress)269ret = block_encode_normal(block, allocator,270in, in_size, out, out_pos, out_size);271272if (ret != LZMA_OK) {273// If the error was something else than output buffer274// becoming full, return the error now.275if (ret != LZMA_BUF_ERROR)276return ret;277278// The data was incompressible (at least with the options279// given to us) or the output buffer was too small. Use the280// uncompressed chunks of LZMA2 to wrap the data into a valid281// Block. If we haven't been given enough output space, even282// this may fail.283return_if_error(block_encode_uncompressed(block, in, in_size,284out, out_pos, out_size));285}286287assert(*out_pos <= out_size);288289// Block Padding. No buffer overflow here, because we already adjusted290// out_size so that (out_size - out_start) is a multiple of four.291// Thus, if the buffer is full, the loop body can never run.292for (size_t i = (size_t)(block->compressed_size); i & 3; ++i) {293assert(*out_pos < out_size);294out[(*out_pos)++] = 0x00;295}296297// If there's no Check field, we are done now.298if (check_size > 0) {299// Calculate the integrity check. We reserved space for300// the Check field earlier so we don't need to check for301// available output space here.302lzma_check_state check;303lzma_check_init(&check, block->check);304lzma_check_update(&check, block->check, in, in_size);305lzma_check_finish(&check, block->check);306307memcpy(block->raw_check, check.buffer.u8, check_size);308memcpy(out + *out_pos, check.buffer.u8, check_size);309*out_pos += check_size;310}311312return LZMA_OK;313}314315316extern LZMA_API(lzma_ret)317lzma_block_buffer_encode(lzma_block *block, const lzma_allocator *allocator,318const uint8_t *in, size_t in_size,319uint8_t *out, size_t *out_pos, size_t out_size)320{321return block_buffer_encode(block, allocator,322in, in_size, out, out_pos, out_size, true);323}324325326#ifdef HAVE_SYMBOL_VERSIONS_LINUX327// This is for compatibility with binaries linked against liblzma that328// has been patched with xz-5.2.2-compat-libs.patch from RHEL/CentOS 7.329LZMA_SYMVER_API("lzma_block_uncomp_encode@XZ_5.2.2",330lzma_ret, lzma_block_uncomp_encode_522)(lzma_block *block,331const uint8_t *in, size_t in_size,332uint8_t *out, size_t *out_pos, size_t out_size)333lzma_nothrow lzma_attr_warn_unused_result334__attribute__((__alias__("lzma_block_uncomp_encode_52")));335336LZMA_SYMVER_API("lzma_block_uncomp_encode@@XZ_5.2",337lzma_ret, lzma_block_uncomp_encode_52)(lzma_block *block,338const uint8_t *in, size_t in_size,339uint8_t *out, size_t *out_pos, size_t out_size)340lzma_nothrow lzma_attr_warn_unused_result;341342#define lzma_block_uncomp_encode lzma_block_uncomp_encode_52343#endif344extern LZMA_API(lzma_ret)345lzma_block_uncomp_encode(lzma_block *block,346const uint8_t *in, size_t in_size,347uint8_t *out, size_t *out_pos, size_t out_size)348{349// It won't allocate any memory from heap so no need350// for lzma_allocator.351return block_buffer_encode(block, NULL,352in, in_size, out, out_pos, out_size, false);353}354355356