Path: blob/master/Utilities/cmzstd/lib/compress/zstd_cwksp.h
3158 views
/*1* Copyright (c) Meta Platforms, Inc. and affiliates.2* All rights reserved.3*4* This source code is licensed under both the BSD-style license (found in the5* LICENSE file in the root directory of this source tree) and the GPLv2 (found6* in the COPYING file in the root directory of this source tree).7* You may select, at your option, one of the above-listed licenses.8*/910#ifndef ZSTD_CWKSP_H11#define ZSTD_CWKSP_H1213/*-*************************************14* Dependencies15***************************************/16#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */17#include "../common/zstd_internal.h"18#include "../common/portability_macros.h"1920#if defined (__cplusplus)21extern "C" {22#endif2324/*-*************************************25* Constants26***************************************/2728/* Since the workspace is effectively its own little malloc implementation /29* arena, when we run under ASAN, we should similarly insert redzones between30* each internal element of the workspace, so ASAN will catch overruns that31* reach outside an object but that stay inside the workspace.32*33* This defines the size of that redzone.34*/35#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE36#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 12837#endif383940/* Set our tables and aligneds to align by 64 bytes */41#define ZSTD_CWKSP_ALIGNMENT_BYTES 644243/*-*************************************44* Structures45***************************************/46typedef enum {47ZSTD_cwksp_alloc_objects,48ZSTD_cwksp_alloc_aligned_init_once,49ZSTD_cwksp_alloc_aligned,50ZSTD_cwksp_alloc_buffers51} ZSTD_cwksp_alloc_phase_e;5253/**54* Used to describe whether the workspace is statically allocated (and will not55* necessarily ever be freed), or if it's dynamically allocated and we can56* expect a well-formed caller to free this.57*/58typedef enum {59ZSTD_cwksp_dynamic_alloc,60ZSTD_cwksp_static_alloc61} ZSTD_cwksp_static_alloc_e;6263/**64* Zstd fits all its internal datastructures into a single continuous buffer,65* so that it only needs to perform a single OS allocation (or so that a buffer66* can be provided to it and it can perform no allocations at all). This buffer67* is called the workspace.68*69* Several optimizations complicate that process of allocating memory ranges70* from this workspace for each internal datastructure:71*72* - These different internal datastructures have different setup requirements:73*74* - The static objects need to be cleared once and can then be trivially75* reused for each compression.76*77* - Various buffers don't need to be initialized at all--they are always78* written into before they're read.79*80* - The matchstate tables have a unique requirement that they don't need81* their memory to be totally cleared, but they do need the memory to have82* some bound, i.e., a guarantee that all values in the memory they've been83* allocated is less than some maximum value (which is the starting value84* for the indices that they will then use for compression). When this85* guarantee is provided to them, they can use the memory without any setup86* work. When it can't, they have to clear the area.87*88* - These buffers also have different alignment requirements.89*90* - We would like to reuse the objects in the workspace for multiple91* compressions without having to perform any expensive reallocation or92* reinitialization work.93*94* - We would like to be able to efficiently reuse the workspace across95* multiple compressions **even when the compression parameters change** and96* we need to resize some of the objects (where possible).97*98* To attempt to manage this buffer, given these constraints, the ZSTD_cwksp99* abstraction was created. It works as follows:100*101* Workspace Layout:102*103* [ ... workspace ... ]104* [objects][tables ->] free space [<- buffers][<- aligned][<- init once]105*106* The various objects that live in the workspace are divided into the107* following categories, and are allocated separately:108*109* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,110* so that literally everything fits in a single buffer. Note: if present,111* this must be the first object in the workspace, since ZSTD_customFree{CCtx,112* CDict}() rely on a pointer comparison to see whether one or two frees are113* required.114*115* - Fixed size objects: these are fixed-size, fixed-count objects that are116* nonetheless "dynamically" allocated in the workspace so that we can117* control how they're initialized separately from the broader ZSTD_CCtx.118* Examples:119* - Entropy Workspace120* - 2 x ZSTD_compressedBlockState_t121* - CDict dictionary contents122*123* - Tables: these are any of several different datastructures (hash tables,124* chain tables, binary trees) that all respect a common format: they are125* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).126* Their sizes depend on the cparams. These tables are 64-byte aligned.127*128* - Init once: these buffers require to be initialized at least once before129* use. They should be used when we want to skip memory initialization130* while not triggering memory checkers (like Valgrind) when reading from131* from this memory without writing to it first.132* These buffers should be used carefully as they might contain data133* from previous compressions.134* Buffers are aligned to 64 bytes.135*136* - Aligned: these buffers don't require any initialization before they're137* used. The user of the buffer should make sure they write into a buffer138* location before reading from it.139* Buffers are aligned to 64 bytes.140*141* - Buffers: these buffers are used for various purposes that don't require142* any alignment or initialization before they're used. This means they can143* be moved around at no cost for a new compression.144*145* Allocating Memory:146*147* The various types of objects must be allocated in order, so they can be148* correctly packed into the workspace buffer. That order is:149*150* 1. Objects151* 2. Init once / Tables152* 3. Aligned / Tables153* 4. Buffers / Tables154*155* Attempts to reserve objects of different types out of order will fail.156*/157typedef struct {158void* workspace;159void* workspaceEnd;160161void* objectEnd;162void* tableEnd;163void* tableValidEnd;164void* allocStart;165void* initOnceStart;166167BYTE allocFailed;168int workspaceOversizedDuration;169ZSTD_cwksp_alloc_phase_e phase;170ZSTD_cwksp_static_alloc_e isStatic;171} ZSTD_cwksp;172173/*-*************************************174* Functions175***************************************/176177MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);178MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);179180MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {181(void)ws;182assert(ws->workspace <= ws->objectEnd);183assert(ws->objectEnd <= ws->tableEnd);184assert(ws->objectEnd <= ws->tableValidEnd);185assert(ws->tableEnd <= ws->allocStart);186assert(ws->tableValidEnd <= ws->allocStart);187assert(ws->allocStart <= ws->workspaceEnd);188assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));189assert(ws->workspace <= ws->initOnceStart);190#if ZSTD_MEMORY_SANITIZER191{192intptr_t const offset = __msan_test_shadow(ws->initOnceStart,193(U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);194#if defined(ZSTD_MSAN_PRINT)195if(offset!=-1) {196__msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);197}198#endif199assert(offset==-1);200};201#endif202}203204/**205* Align must be a power of 2.206*/207MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {208size_t const mask = align - 1;209assert((align & mask) == 0);210return (size + mask) & ~mask;211}212213/**214* Use this to determine how much space in the workspace we will consume to215* allocate this object. (Normally it should be exactly the size of the object,216* but under special conditions, like ASAN, where we pad each object, it might217* be larger.)218*219* Since tables aren't currently redzoned, you don't need to call through this220* to figure out how much space you need for the matchState tables. Everything221* else is though.222*223* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().224*/225MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {226if (size == 0)227return 0;228#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)229return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;230#else231return size;232#endif233}234235/**236* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.237* Used to determine the number of bytes required for a given "aligned".238*/239MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {240return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));241}242243/**244* Returns the amount of additional space the cwksp must allocate245* for internal purposes (currently only alignment).246*/247MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {248/* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES249* bytes to align the beginning of tables section and end of buffers;250*/251size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;252return slackSpace;253}254255256/**257* Return the number of additional bytes required to align a pointer to the given number of bytes.258* alignBytes must be a power of two.259*/260MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {261size_t const alignBytesMask = alignBytes - 1;262size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;263assert((alignBytes & alignBytesMask) == 0);264assert(bytes < alignBytes);265return bytes;266}267268/**269* Returns the initial value for allocStart which is used to determine the position from270* which we can allocate from the end of the workspace.271*/272MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {273return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));274}275276/**277* Internal function. Do not use directly.278* Reserves the given number of bytes within the aligned/buffer segment of the wksp,279* which counts from the end of the wksp (as opposed to the object/table segment).280*281* Returns a pointer to the beginning of that space.282*/283MEM_STATIC void*284ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)285{286void* const alloc = (BYTE*)ws->allocStart - bytes;287void* const bottom = ws->tableEnd;288DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",289alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);290ZSTD_cwksp_assert_internal_consistency(ws);291assert(alloc >= bottom);292if (alloc < bottom) {293DEBUGLOG(4, "cwksp: alloc failed!");294ws->allocFailed = 1;295return NULL;296}297/* the area is reserved from the end of wksp.298* If it overlaps with tableValidEnd, it voids guarantees on values' range */299if (alloc < ws->tableValidEnd) {300ws->tableValidEnd = alloc;301}302ws->allocStart = alloc;303return alloc;304}305306/**307* Moves the cwksp to the next phase, and does any necessary allocations.308* cwksp initialization must necessarily go through each phase in order.309* Returns a 0 on success, or zstd error310*/311MEM_STATIC size_t312ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)313{314assert(phase >= ws->phase);315if (phase > ws->phase) {316/* Going from allocating objects to allocating initOnce / tables */317if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&318phase >= ZSTD_cwksp_alloc_aligned_init_once) {319ws->tableValidEnd = ws->objectEnd;320ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);321322{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */323void *const alloc = ws->objectEnd;324size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);325void *const objectEnd = (BYTE *) alloc + bytesToAlign;326DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);327RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,328"table phase - alignment initial allocation failed!");329ws->objectEnd = objectEnd;330ws->tableEnd = objectEnd; /* table area starts being empty */331if (ws->tableValidEnd < ws->tableEnd) {332ws->tableValidEnd = ws->tableEnd;333}334}335}336ws->phase = phase;337ZSTD_cwksp_assert_internal_consistency(ws);338}339return 0;340}341342/**343* Returns whether this object/buffer/etc was allocated in this workspace.344*/345MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)346{347return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);348}349350/**351* Internal function. Do not use directly.352*/353MEM_STATIC void*354ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)355{356void* alloc;357if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {358return NULL;359}360361#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)362/* over-reserve space */363bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;364#endif365366alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);367368#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)369/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on370* either size. */371if (alloc) {372alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;373if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {374/* We need to keep the redzone poisoned while unpoisoning the bytes that375* are actually allocated. */376__asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);377}378}379#endif380381return alloc;382}383384/**385* Reserves and returns unaligned memory.386*/387MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)388{389return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);390}391392/**393* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).394* This memory has been initialized at least once in the past.395* This doesn't mean it has been initialized this time, and it might contain data from previous396* operations.397* The main usage is for algorithms that might need read access into uninitialized memory.398* The algorithm must maintain safety under these conditions and must make sure it doesn't399* leak any of the past data (directly or in side channels).400*/401MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)402{403size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);404void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);405assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);406if(ptr && ptr < ws->initOnceStart) {407/* We assume the memory following the current allocation is either:408* 1. Not usable as initOnce memory (end of workspace)409* 2. Another initOnce buffer that has been allocated before (and so was previously memset)410* 3. An ASAN redzone, in which case we don't want to write on it411* For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.412* Note that we assume here that MSAN and ASAN cannot run in the same time. */413ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));414ws->initOnceStart = ptr;415}416#if ZSTD_MEMORY_SANITIZER417assert(__msan_test_shadow(ptr, bytes) == -1);418#endif419return ptr;420}421422/**423* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).424*/425MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)426{427void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),428ZSTD_cwksp_alloc_aligned);429assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);430return ptr;431}432433/**434* Aligned on 64 bytes. These buffers have the special property that435* their values remain constrained, allowing us to re-use them without436* memset()-ing them.437*/438MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)439{440const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;441void* alloc;442void* end;443void* top;444445/* We can only start allocating tables after we are done reserving space for objects at the446* start of the workspace */447if(ws->phase < phase) {448if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {449return NULL;450}451}452alloc = ws->tableEnd;453end = (BYTE *)alloc + bytes;454top = ws->allocStart;455456DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",457alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);458assert((bytes & (sizeof(U32)-1)) == 0);459ZSTD_cwksp_assert_internal_consistency(ws);460assert(end <= top);461if (end > top) {462DEBUGLOG(4, "cwksp: table alloc failed!");463ws->allocFailed = 1;464return NULL;465}466ws->tableEnd = end;467468#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)469if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {470__asan_unpoison_memory_region(alloc, bytes);471}472#endif473474assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);475assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);476return alloc;477}478479/**480* Aligned on sizeof(void*).481* Note : should happen only once, at workspace first initialization482*/483MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)484{485size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));486void* alloc = ws->objectEnd;487void* end = (BYTE*)alloc + roundedBytes;488489#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)490/* over-reserve space */491end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;492#endif493494DEBUGLOG(4,495"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",496alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);497assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);498assert(bytes % ZSTD_ALIGNOF(void*) == 0);499ZSTD_cwksp_assert_internal_consistency(ws);500/* we must be in the first phase, no advance is possible */501if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {502DEBUGLOG(3, "cwksp: object alloc failed!");503ws->allocFailed = 1;504return NULL;505}506ws->objectEnd = end;507ws->tableEnd = end;508ws->tableValidEnd = end;509510#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)511/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on512* either size. */513alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;514if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {515__asan_unpoison_memory_region(alloc, bytes);516}517#endif518519return alloc;520}521522MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)523{524DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");525526#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)527/* To validate that the table re-use logic is sound, and that we don't528* access table space that we haven't cleaned, we re-"poison" the table529* space every time we mark it dirty.530* Since tableValidEnd space and initOnce space may overlap we don't poison531* the initOnce portion as it break its promise. This means that this poisoning532* check isn't always applied fully. */533{534size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;535assert(__msan_test_shadow(ws->objectEnd, size) == -1);536if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {537__msan_poison(ws->objectEnd, size);538} else {539assert(ws->initOnceStart >= ws->objectEnd);540__msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);541}542}543#endif544545assert(ws->tableValidEnd >= ws->objectEnd);546assert(ws->tableValidEnd <= ws->allocStart);547ws->tableValidEnd = ws->objectEnd;548ZSTD_cwksp_assert_internal_consistency(ws);549}550551MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {552DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");553assert(ws->tableValidEnd >= ws->objectEnd);554assert(ws->tableValidEnd <= ws->allocStart);555if (ws->tableValidEnd < ws->tableEnd) {556ws->tableValidEnd = ws->tableEnd;557}558ZSTD_cwksp_assert_internal_consistency(ws);559}560561/**562* Zero the part of the allocated tables not already marked clean.563*/564MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {565DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");566assert(ws->tableValidEnd >= ws->objectEnd);567assert(ws->tableValidEnd <= ws->allocStart);568if (ws->tableValidEnd < ws->tableEnd) {569ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));570}571ZSTD_cwksp_mark_tables_clean(ws);572}573574/**575* Invalidates table allocations.576* All other allocations remain valid.577*/578MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {579DEBUGLOG(4, "cwksp: clearing tables!");580581#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)582/* We don't do this when the workspace is statically allocated, because583* when that is the case, we have no capability to hook into the end of the584* workspace's lifecycle to unpoison the memory.585*/586if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {587size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;588__asan_poison_memory_region(ws->objectEnd, size);589}590#endif591592ws->tableEnd = ws->objectEnd;593ZSTD_cwksp_assert_internal_consistency(ws);594}595596/**597* Invalidates all buffer, aligned, and table allocations.598* Object allocations remain valid.599*/600MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {601DEBUGLOG(4, "cwksp: clearing!");602603#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)604/* To validate that the context re-use logic is sound, and that we don't605* access stuff that this compression hasn't initialized, we re-"poison"606* the workspace except for the areas in which we expect memory re-use607* without initialization (objects, valid tables area and init once608* memory). */609{610if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {611size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;612__msan_poison(ws->tableValidEnd, size);613}614}615#endif616617#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)618/* We don't do this when the workspace is statically allocated, because619* when that is the case, we have no capability to hook into the end of the620* workspace's lifecycle to unpoison the memory.621*/622if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {623size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;624__asan_poison_memory_region(ws->objectEnd, size);625}626#endif627628ws->tableEnd = ws->objectEnd;629ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);630ws->allocFailed = 0;631if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {632ws->phase = ZSTD_cwksp_alloc_aligned_init_once;633}634ZSTD_cwksp_assert_internal_consistency(ws);635}636637/**638* The provided workspace takes ownership of the buffer [start, start+size).639* Any existing values in the workspace are ignored (the previously managed640* buffer, if present, must be separately freed).641*/642MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {643DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);644assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */645ws->workspace = start;646ws->workspaceEnd = (BYTE*)start + size;647ws->objectEnd = ws->workspace;648ws->tableValidEnd = ws->objectEnd;649ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);650ws->phase = ZSTD_cwksp_alloc_objects;651ws->isStatic = isStatic;652ZSTD_cwksp_clear(ws);653ws->workspaceOversizedDuration = 0;654ZSTD_cwksp_assert_internal_consistency(ws);655}656657MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {658void* workspace = ZSTD_customMalloc(size, customMem);659DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);660RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");661ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);662return 0;663}664665MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {666void *ptr = ws->workspace;667DEBUGLOG(4, "cwksp: freeing workspace");668ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));669ZSTD_customFree(ptr, customMem);670}671672/**673* Moves the management of a workspace from one cwksp to another. The src cwksp674* is left in an invalid state (src must be re-init()'ed before it's used again).675*/676MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {677*dst = *src;678ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));679}680681MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {682return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);683}684685MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {686return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)687+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);688}689690MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {691return ws->allocFailed;692}693694/*-*************************************695* Functions Checking Free Space696***************************************/697698/* ZSTD_alignmentSpaceWithinBounds() :699* Returns if the estimated space needed for a wksp is within an acceptable limit of the700* actual amount of space used.701*/702MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {703/* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice704* the alignment bytes difference between estimation and actual usage */705return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&706ZSTD_cwksp_used(ws) <= estimatedSpace;707}708709710MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {711return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);712}713714MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {715return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;716}717718MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {719return ZSTD_cwksp_check_available(720ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);721}722723MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {724return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)725&& ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;726}727728MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(729ZSTD_cwksp* ws, size_t additionalNeededSpace) {730if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {731ws->workspaceOversizedDuration++;732} else {733ws->workspaceOversizedDuration = 0;734}735}736737#if defined (__cplusplus)738}739#endif740741#endif /* ZSTD_CWKSP_H */742743744