Path: blob/main/sys/contrib/zstd/lib/compress/zstd_cwksp.h
48375 views
/*1* Copyright (c) Yann Collet, Facebook, Inc.2* All rights reserved.3*4* This source code is licensed under both the BSD-style license (found in the5* LICENSE file in the root directory of this source tree) and the GPLv2 (found6* in the COPYING file in the root directory of this source tree).7* You may select, at your option, one of the above-listed licenses.8*/910#ifndef ZSTD_CWKSP_H11#define ZSTD_CWKSP_H1213/*-*************************************14* Dependencies15***************************************/16#include "../common/zstd_internal.h"1718#if defined (__cplusplus)19extern "C" {20#endif2122/*-*************************************23* Constants24***************************************/2526/* Since the workspace is effectively its own little malloc implementation /27* arena, when we run under ASAN, we should similarly insert redzones between28* each internal element of the workspace, so ASAN will catch overruns that29* reach outside an object but that stay inside the workspace.30*31* This defines the size of that redzone.32*/33#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE34#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 12835#endif363738/* Set our tables and aligneds to align by 64 bytes */39#define ZSTD_CWKSP_ALIGNMENT_BYTES 644041/*-*************************************42* Structures43***************************************/44typedef enum {45ZSTD_cwksp_alloc_objects,46ZSTD_cwksp_alloc_buffers,47ZSTD_cwksp_alloc_aligned48} ZSTD_cwksp_alloc_phase_e;4950/**51* Used to describe whether the workspace is statically allocated (and will not52* necessarily ever be freed), or if it's dynamically allocated and we can53* expect a well-formed caller to free this.54*/55typedef enum {56ZSTD_cwksp_dynamic_alloc,57ZSTD_cwksp_static_alloc58} ZSTD_cwksp_static_alloc_e;5960/**61* Zstd fits all its internal datastructures into a single continuous buffer,62* so that it only needs to perform a single OS allocation (or so that a buffer63* can be provided to it and it can perform no allocations at all). This buffer64* is called the workspace.65*66* Several optimizations complicate that process of allocating memory ranges67* from this workspace for each internal datastructure:68*69* - These different internal datastructures have different setup requirements:70*71* - The static objects need to be cleared once and can then be trivially72* reused for each compression.73*74* - Various buffers don't need to be initialized at all--they are always75* written into before they're read.76*77* - The matchstate tables have a unique requirement that they don't need78* their memory to be totally cleared, but they do need the memory to have79* some bound, i.e., a guarantee that all values in the memory they've been80* allocated is less than some maximum value (which is the starting value81* for the indices that they will then use for compression). When this82* guarantee is provided to them, they can use the memory without any setup83* work. When it can't, they have to clear the area.84*85* - These buffers also have different alignment requirements.86*87* - We would like to reuse the objects in the workspace for multiple88* compressions without having to perform any expensive reallocation or89* reinitialization work.90*91* - We would like to be able to efficiently reuse the workspace across92* multiple compressions **even when the compression parameters change** and93* we need to resize some of the objects (where possible).94*95* To attempt to manage this buffer, given these constraints, the ZSTD_cwksp96* abstraction was created. It works as follows:97*98* Workspace Layout:99*100* [ ... workspace ... ]101* [objects][tables ... ->] free space [<- ... aligned][<- ... buffers]102*103* The various objects that live in the workspace are divided into the104* following categories, and are allocated separately:105*106* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,107* so that literally everything fits in a single buffer. Note: if present,108* this must be the first object in the workspace, since ZSTD_customFree{CCtx,109* CDict}() rely on a pointer comparison to see whether one or two frees are110* required.111*112* - Fixed size objects: these are fixed-size, fixed-count objects that are113* nonetheless "dynamically" allocated in the workspace so that we can114* control how they're initialized separately from the broader ZSTD_CCtx.115* Examples:116* - Entropy Workspace117* - 2 x ZSTD_compressedBlockState_t118* - CDict dictionary contents119*120* - Tables: these are any of several different datastructures (hash tables,121* chain tables, binary trees) that all respect a common format: they are122* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).123* Their sizes depend on the cparams. These tables are 64-byte aligned.124*125* - Aligned: these buffers are used for various purposes that require 4 byte126* alignment, but don't require any initialization before they're used. These127* buffers are each aligned to 64 bytes.128*129* - Buffers: these buffers are used for various purposes that don't require130* any alignment or initialization before they're used. This means they can131* be moved around at no cost for a new compression.132*133* Allocating Memory:134*135* The various types of objects must be allocated in order, so they can be136* correctly packed into the workspace buffer. That order is:137*138* 1. Objects139* 2. Buffers140* 3. Aligned/Tables141*142* Attempts to reserve objects of different types out of order will fail.143*/144typedef struct {145void* workspace;146void* workspaceEnd;147148void* objectEnd;149void* tableEnd;150void* tableValidEnd;151void* allocStart;152153BYTE allocFailed;154int workspaceOversizedDuration;155ZSTD_cwksp_alloc_phase_e phase;156ZSTD_cwksp_static_alloc_e isStatic;157} ZSTD_cwksp;158159/*-*************************************160* Functions161***************************************/162163MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);164165MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {166(void)ws;167assert(ws->workspace <= ws->objectEnd);168assert(ws->objectEnd <= ws->tableEnd);169assert(ws->objectEnd <= ws->tableValidEnd);170assert(ws->tableEnd <= ws->allocStart);171assert(ws->tableValidEnd <= ws->allocStart);172assert(ws->allocStart <= ws->workspaceEnd);173}174175/**176* Align must be a power of 2.177*/178MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {179size_t const mask = align - 1;180assert((align & mask) == 0);181return (size + mask) & ~mask;182}183184/**185* Use this to determine how much space in the workspace we will consume to186* allocate this object. (Normally it should be exactly the size of the object,187* but under special conditions, like ASAN, where we pad each object, it might188* be larger.)189*190* Since tables aren't currently redzoned, you don't need to call through this191* to figure out how much space you need for the matchState tables. Everything192* else is though.193*194* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().195*/196MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {197if (size == 0)198return 0;199#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)200return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;201#else202return size;203#endif204}205206/**207* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.208* Used to determine the number of bytes required for a given "aligned".209*/210MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {211return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));212}213214/**215* Returns the amount of additional space the cwksp must allocate216* for internal purposes (currently only alignment).217*/218MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {219/* For alignment, the wksp will always allocate an additional n_1=[1, 64] bytes220* to align the beginning of tables section, as well as another n_2=[0, 63] bytes221* to align the beginning of the aligned section.222*223* n_1 + n_2 == 64 bytes if the cwksp is freshly allocated, due to tables and224* aligneds being sized in multiples of 64 bytes.225*/226size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES;227return slackSpace;228}229230231/**232* Return the number of additional bytes required to align a pointer to the given number of bytes.233* alignBytes must be a power of two.234*/235MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {236size_t const alignBytesMask = alignBytes - 1;237size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;238assert((alignBytes & alignBytesMask) == 0);239assert(bytes != ZSTD_CWKSP_ALIGNMENT_BYTES);240return bytes;241}242243/**244* Internal function. Do not use directly.245* Reserves the given number of bytes within the aligned/buffer segment of the wksp,246* which counts from the end of the wksp (as opposed to the object/table segment).247*248* Returns a pointer to the beginning of that space.249*/250MEM_STATIC void*251ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)252{253void* const alloc = (BYTE*)ws->allocStart - bytes;254void* const bottom = ws->tableEnd;255DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",256alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);257ZSTD_cwksp_assert_internal_consistency(ws);258assert(alloc >= bottom);259if (alloc < bottom) {260DEBUGLOG(4, "cwksp: alloc failed!");261ws->allocFailed = 1;262return NULL;263}264/* the area is reserved from the end of wksp.265* If it overlaps with tableValidEnd, it voids guarantees on values' range */266if (alloc < ws->tableValidEnd) {267ws->tableValidEnd = alloc;268}269ws->allocStart = alloc;270return alloc;271}272273/**274* Moves the cwksp to the next phase, and does any necessary allocations.275* cwksp initialization must necessarily go through each phase in order.276* Returns a 0 on success, or zstd error277*/278MEM_STATIC size_t279ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)280{281assert(phase >= ws->phase);282if (phase > ws->phase) {283/* Going from allocating objects to allocating buffers */284if (ws->phase < ZSTD_cwksp_alloc_buffers &&285phase >= ZSTD_cwksp_alloc_buffers) {286ws->tableValidEnd = ws->objectEnd;287}288289/* Going from allocating buffers to allocating aligneds/tables */290if (ws->phase < ZSTD_cwksp_alloc_aligned &&291phase >= ZSTD_cwksp_alloc_aligned) {292{ /* Align the start of the "aligned" to 64 bytes. Use [1, 64] bytes. */293size_t const bytesToAlign =294ZSTD_CWKSP_ALIGNMENT_BYTES - ZSTD_cwksp_bytes_to_align_ptr(ws->allocStart, ZSTD_CWKSP_ALIGNMENT_BYTES);295DEBUGLOG(5, "reserving aligned alignment addtl space: %zu", bytesToAlign);296ZSTD_STATIC_ASSERT((ZSTD_CWKSP_ALIGNMENT_BYTES & (ZSTD_CWKSP_ALIGNMENT_BYTES - 1)) == 0); /* power of 2 */297RETURN_ERROR_IF(!ZSTD_cwksp_reserve_internal_buffer_space(ws, bytesToAlign),298memory_allocation, "aligned phase - alignment initial allocation failed!");299}300{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */301void* const alloc = ws->objectEnd;302size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);303void* const objectEnd = (BYTE*)alloc + bytesToAlign;304DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);305RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,306"table phase - alignment initial allocation failed!");307ws->objectEnd = objectEnd;308ws->tableEnd = objectEnd; /* table area starts being empty */309if (ws->tableValidEnd < ws->tableEnd) {310ws->tableValidEnd = ws->tableEnd;311} } }312ws->phase = phase;313ZSTD_cwksp_assert_internal_consistency(ws);314}315return 0;316}317318/**319* Returns whether this object/buffer/etc was allocated in this workspace.320*/321MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)322{323return (ptr != NULL) && (ws->workspace <= ptr) && (ptr <= ws->workspaceEnd);324}325326/**327* Internal function. Do not use directly.328*/329MEM_STATIC void*330ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)331{332void* alloc;333if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {334return NULL;335}336337#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)338/* over-reserve space */339bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;340#endif341342alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);343344#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)345/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on346* either size. */347if (alloc) {348alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;349if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {350__asan_unpoison_memory_region(alloc, bytes);351}352}353#endif354355return alloc;356}357358/**359* Reserves and returns unaligned memory.360*/361MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)362{363return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);364}365366/**367* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).368*/369MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)370{371void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),372ZSTD_cwksp_alloc_aligned);373assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);374return ptr;375}376377/**378* Aligned on 64 bytes. These buffers have the special property that379* their values remain constrained, allowing us to re-use them without380* memset()-ing them.381*/382MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)383{384const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned;385void* alloc;386void* end;387void* top;388389if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {390return NULL;391}392alloc = ws->tableEnd;393end = (BYTE *)alloc + bytes;394top = ws->allocStart;395396DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",397alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);398assert((bytes & (sizeof(U32)-1)) == 0);399ZSTD_cwksp_assert_internal_consistency(ws);400assert(end <= top);401if (end > top) {402DEBUGLOG(4, "cwksp: table alloc failed!");403ws->allocFailed = 1;404return NULL;405}406ws->tableEnd = end;407408#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)409if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {410__asan_unpoison_memory_region(alloc, bytes);411}412#endif413414assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);415assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);416return alloc;417}418419/**420* Aligned on sizeof(void*).421* Note : should happen only once, at workspace first initialization422*/423MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)424{425size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));426void* alloc = ws->objectEnd;427void* end = (BYTE*)alloc + roundedBytes;428429#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)430/* over-reserve space */431end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;432#endif433434DEBUGLOG(4,435"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",436alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);437assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);438assert(bytes % ZSTD_ALIGNOF(void*) == 0);439ZSTD_cwksp_assert_internal_consistency(ws);440/* we must be in the first phase, no advance is possible */441if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {442DEBUGLOG(3, "cwksp: object alloc failed!");443ws->allocFailed = 1;444return NULL;445}446ws->objectEnd = end;447ws->tableEnd = end;448ws->tableValidEnd = end;449450#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)451/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on452* either size. */453alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;454if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {455__asan_unpoison_memory_region(alloc, bytes);456}457#endif458459return alloc;460}461462MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)463{464DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");465466#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)467/* To validate that the table re-use logic is sound, and that we don't468* access table space that we haven't cleaned, we re-"poison" the table469* space every time we mark it dirty. */470{471size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;472assert(__msan_test_shadow(ws->objectEnd, size) == -1);473__msan_poison(ws->objectEnd, size);474}475#endif476477assert(ws->tableValidEnd >= ws->objectEnd);478assert(ws->tableValidEnd <= ws->allocStart);479ws->tableValidEnd = ws->objectEnd;480ZSTD_cwksp_assert_internal_consistency(ws);481}482483MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {484DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");485assert(ws->tableValidEnd >= ws->objectEnd);486assert(ws->tableValidEnd <= ws->allocStart);487if (ws->tableValidEnd < ws->tableEnd) {488ws->tableValidEnd = ws->tableEnd;489}490ZSTD_cwksp_assert_internal_consistency(ws);491}492493/**494* Zero the part of the allocated tables not already marked clean.495*/496MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {497DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");498assert(ws->tableValidEnd >= ws->objectEnd);499assert(ws->tableValidEnd <= ws->allocStart);500if (ws->tableValidEnd < ws->tableEnd) {501ZSTD_memset(ws->tableValidEnd, 0, (BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd);502}503ZSTD_cwksp_mark_tables_clean(ws);504}505506/**507* Invalidates table allocations.508* All other allocations remain valid.509*/510MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {511DEBUGLOG(4, "cwksp: clearing tables!");512513#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)514/* We don't do this when the workspace is statically allocated, because515* when that is the case, we have no capability to hook into the end of the516* workspace's lifecycle to unpoison the memory.517*/518if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {519size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;520__asan_poison_memory_region(ws->objectEnd, size);521}522#endif523524ws->tableEnd = ws->objectEnd;525ZSTD_cwksp_assert_internal_consistency(ws);526}527528/**529* Invalidates all buffer, aligned, and table allocations.530* Object allocations remain valid.531*/532MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {533DEBUGLOG(4, "cwksp: clearing!");534535#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)536/* To validate that the context re-use logic is sound, and that we don't537* access stuff that this compression hasn't initialized, we re-"poison"538* the workspace (or at least the non-static, non-table parts of it)539* every time we start a new compression. */540{541size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->tableValidEnd;542__msan_poison(ws->tableValidEnd, size);543}544#endif545546#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)547/* We don't do this when the workspace is statically allocated, because548* when that is the case, we have no capability to hook into the end of the549* workspace's lifecycle to unpoison the memory.550*/551if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {552size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;553__asan_poison_memory_region(ws->objectEnd, size);554}555#endif556557ws->tableEnd = ws->objectEnd;558ws->allocStart = ws->workspaceEnd;559ws->allocFailed = 0;560if (ws->phase > ZSTD_cwksp_alloc_buffers) {561ws->phase = ZSTD_cwksp_alloc_buffers;562}563ZSTD_cwksp_assert_internal_consistency(ws);564}565566/**567* The provided workspace takes ownership of the buffer [start, start+size).568* Any existing values in the workspace are ignored (the previously managed569* buffer, if present, must be separately freed).570*/571MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {572DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);573assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */574ws->workspace = start;575ws->workspaceEnd = (BYTE*)start + size;576ws->objectEnd = ws->workspace;577ws->tableValidEnd = ws->objectEnd;578ws->phase = ZSTD_cwksp_alloc_objects;579ws->isStatic = isStatic;580ZSTD_cwksp_clear(ws);581ws->workspaceOversizedDuration = 0;582ZSTD_cwksp_assert_internal_consistency(ws);583}584585MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {586void* workspace = ZSTD_customMalloc(size, customMem);587DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);588RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");589ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);590return 0;591}592593MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {594void *ptr = ws->workspace;595DEBUGLOG(4, "cwksp: freeing workspace");596ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));597ZSTD_customFree(ptr, customMem);598}599600/**601* Moves the management of a workspace from one cwksp to another. The src cwksp602* is left in an invalid state (src must be re-init()'ed before it's used again).603*/604MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {605*dst = *src;606ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));607}608609MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {610return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);611}612613MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {614return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)615+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);616}617618MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {619return ws->allocFailed;620}621622/*-*************************************623* Functions Checking Free Space624***************************************/625626/* ZSTD_alignmentSpaceWithinBounds() :627* Returns if the estimated space needed for a wksp is within an acceptable limit of the628* actual amount of space used.629*/630MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp* const ws,631size_t const estimatedSpace, int resizedWorkspace) {632if (resizedWorkspace) {633/* Resized/newly allocated wksp should have exact bounds */634return ZSTD_cwksp_used(ws) == estimatedSpace;635} else {636/* Due to alignment, when reusing a workspace, we can actually consume 63 fewer or more bytes637* than estimatedSpace. See the comments in zstd_cwksp.h for details.638*/639return (ZSTD_cwksp_used(ws) >= estimatedSpace - 63) && (ZSTD_cwksp_used(ws) <= estimatedSpace + 63);640}641}642643644MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {645return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);646}647648MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {649return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;650}651652MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {653return ZSTD_cwksp_check_available(654ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);655}656657MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {658return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)659&& ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;660}661662MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(663ZSTD_cwksp* ws, size_t additionalNeededSpace) {664if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {665ws->workspaceOversizedDuration++;666} else {667ws->workspaceOversizedDuration = 0;668}669}670671#if defined (__cplusplus)672}673#endif674675#endif /* ZSTD_CWKSP_H */676677678