Path: blob/main/sys/contrib/zstd/lib/common/zstd_internal.h
48378 views
/*1* Copyright (c) Yann Collet, Facebook, Inc.2* All rights reserved.3*4* This source code is licensed under both the BSD-style license (found in the5* LICENSE file in the root directory of this source tree) and the GPLv2 (found6* in the COPYING file in the root directory of this source tree).7* You may select, at your option, one of the above-listed licenses.8*/910#ifndef ZSTD_CCOMMON_H_MODULE11#define ZSTD_CCOMMON_H_MODULE1213/* this module contains definitions which must be identical14* across compression, decompression and dictBuilder.15* It also contains a few functions useful to at least 2 of them16* and which benefit from being inlined */1718/*-*************************************19* Dependencies20***************************************/21#include "compiler.h"22#include "cpu.h"23#include "mem.h"24#include "debug.h" /* assert, DEBUGLOG, RAWLOG, g_debuglevel */25#include "error_private.h"26#define ZSTD_STATIC_LINKING_ONLY27#include "../zstd.h"28#define FSE_STATIC_LINKING_ONLY29#include "fse.h"30#define HUF_STATIC_LINKING_ONLY31#include "huf.h"32#ifndef XXH_STATIC_LINKING_ONLY33# define XXH_STATIC_LINKING_ONLY /* XXH64_state_t */34#endif35#include "xxhash.h" /* XXH_reset, update, digest */36#ifndef ZSTD_NO_TRACE37# include "zstd_trace.h"38#else39# define ZSTD_TRACE 040#endif4142#if defined (__cplusplus)43extern "C" {44#endif4546/* ---- static assert (debug) --- */47#define ZSTD_STATIC_ASSERT(c) DEBUG_STATIC_ASSERT(c)48#define ZSTD_isError ERR_isError /* for inlining */49#define FSE_isError ERR_isError50#define HUF_isError ERR_isError515253/*-*************************************54* shared macros55***************************************/56#undef MIN57#undef MAX58#define MIN(a,b) ((a)<(b) ? (a) : (b))59#define MAX(a,b) ((a)>(b) ? (a) : (b))60#define BOUNDED(min,val,max) (MAX(min,MIN(val,max)))616263/*-*************************************64* Common constants65***************************************/66#define ZSTD_OPT_NUM (1<<12)6768#define ZSTD_REP_NUM 3 /* number of repcodes */69static UNUSED_ATTR const U32 repStartValue[ZSTD_REP_NUM] = { 1, 4, 8 };7071#define KB *(1 <<10)72#define MB *(1 <<20)73#define GB *(1U<<30)7475#define BIT7 12876#define BIT6 6477#define BIT5 3278#define BIT4 1679#define BIT1 280#define BIT0 18182#define ZSTD_WINDOWLOG_ABSOLUTEMIN 1083static UNUSED_ATTR const size_t ZSTD_fcs_fieldSize[4] = { 0, 2, 4, 8 };84static UNUSED_ATTR const size_t ZSTD_did_fieldSize[4] = { 0, 1, 2, 4 };8586#define ZSTD_FRAMEIDSIZE 4 /* magic number size */8788#define ZSTD_BLOCKHEADERSIZE 3 /* C standard doesn't allow `static const` variable to be init using another `static const` variable */89static UNUSED_ATTR const size_t ZSTD_blockHeaderSize = ZSTD_BLOCKHEADERSIZE;90typedef enum { bt_raw, bt_rle, bt_compressed, bt_reserved } blockType_e;9192#define ZSTD_FRAMECHECKSUMSIZE 49394#define MIN_SEQUENCES_SIZE 1 /* nbSeq==0 */95#define MIN_CBLOCK_SIZE (1 /*litCSize*/ + 1 /* RLE or RAW */ + MIN_SEQUENCES_SIZE /* nbSeq==0 */) /* for a non-null block */9697#define HufLog 1298typedef enum { set_basic, set_rle, set_compressed, set_repeat } symbolEncodingType_e;99100#define LONGNBSEQ 0x7F00101102#define MINMATCH 3103104#define Litbits 8105#define MaxLit ((1<<Litbits) - 1)106#define MaxML 52107#define MaxLL 35108#define DefaultMaxOff 28109#define MaxOff 31110#define MaxSeq MAX(MaxLL, MaxML) /* Assumption : MaxOff < MaxLL,MaxML */111#define MLFSELog 9112#define LLFSELog 9113#define OffFSELog 8114#define MaxFSELog MAX(MAX(MLFSELog, LLFSELog), OffFSELog)115116#define ZSTD_MAX_HUF_HEADER_SIZE 128 /* header + <= 127 byte tree description */117/* Each table cannot take more than #symbols * FSELog bits */118#define ZSTD_MAX_FSE_HEADERS_SIZE (((MaxML + 1) * MLFSELog + (MaxLL + 1) * LLFSELog + (MaxOff + 1) * OffFSELog + 7) / 8)119120static UNUSED_ATTR const U8 LL_bits[MaxLL+1] = {1210, 0, 0, 0, 0, 0, 0, 0,1220, 0, 0, 0, 0, 0, 0, 0,1231, 1, 1, 1, 2, 2, 3, 3,1244, 6, 7, 8, 9,10,11,12,12513,14,15,16126};127static UNUSED_ATTR const S16 LL_defaultNorm[MaxLL+1] = {1284, 3, 2, 2, 2, 2, 2, 2,1292, 2, 2, 2, 2, 1, 1, 1,1302, 2, 2, 2, 2, 2, 2, 2,1312, 3, 2, 1, 1, 1, 1, 1,132-1,-1,-1,-1133};134#define LL_DEFAULTNORMLOG 6 /* for static allocation */135static UNUSED_ATTR const U32 LL_defaultNormLog = LL_DEFAULTNORMLOG;136137static UNUSED_ATTR const U8 ML_bits[MaxML+1] = {1380, 0, 0, 0, 0, 0, 0, 0,1390, 0, 0, 0, 0, 0, 0, 0,1400, 0, 0, 0, 0, 0, 0, 0,1410, 0, 0, 0, 0, 0, 0, 0,1421, 1, 1, 1, 2, 2, 3, 3,1434, 4, 5, 7, 8, 9,10,11,14412,13,14,15,16145};146static UNUSED_ATTR const S16 ML_defaultNorm[MaxML+1] = {1471, 4, 3, 2, 2, 2, 2, 2,1482, 1, 1, 1, 1, 1, 1, 1,1491, 1, 1, 1, 1, 1, 1, 1,1501, 1, 1, 1, 1, 1, 1, 1,1511, 1, 1, 1, 1, 1, 1, 1,1521, 1, 1, 1, 1, 1,-1,-1,153-1,-1,-1,-1,-1154};155#define ML_DEFAULTNORMLOG 6 /* for static allocation */156static UNUSED_ATTR const U32 ML_defaultNormLog = ML_DEFAULTNORMLOG;157158static UNUSED_ATTR const S16 OF_defaultNorm[DefaultMaxOff+1] = {1591, 1, 1, 1, 1, 1, 2, 2,1602, 1, 1, 1, 1, 1, 1, 1,1611, 1, 1, 1, 1, 1, 1, 1,162-1,-1,-1,-1,-1163};164#define OF_DEFAULTNORMLOG 5 /* for static allocation */165static UNUSED_ATTR const U32 OF_defaultNormLog = OF_DEFAULTNORMLOG;166167168/*-*******************************************169* Shared functions to include for inlining170*********************************************/171static void ZSTD_copy8(void* dst, const void* src) {172#if defined(ZSTD_ARCH_ARM_NEON)173vst1_u8((uint8_t*)dst, vld1_u8((const uint8_t*)src));174#else175ZSTD_memcpy(dst, src, 8);176#endif177}178#define COPY8(d,s) { ZSTD_copy8(d,s); d+=8; s+=8; }179180/* Need to use memmove here since the literal buffer can now be located within181the dst buffer. In circumstances where the op "catches up" to where the182literal buffer is, there can be partial overlaps in this call on the final183copy if the literal is being shifted by less than 16 bytes. */184static void ZSTD_copy16(void* dst, const void* src) {185#if defined(ZSTD_ARCH_ARM_NEON)186vst1q_u8((uint8_t*)dst, vld1q_u8((const uint8_t*)src));187#elif defined(ZSTD_ARCH_X86_SSE2)188_mm_storeu_si128((__m128i*)dst, _mm_loadu_si128((const __m128i*)src));189#elif defined(__clang__)190ZSTD_memmove(dst, src, 16);191#else192/* ZSTD_memmove is not inlined properly by gcc */193BYTE copy16_buf[16];194ZSTD_memcpy(copy16_buf, src, 16);195ZSTD_memcpy(dst, copy16_buf, 16);196#endif197}198#define COPY16(d,s) { ZSTD_copy16(d,s); d+=16; s+=16; }199200#define WILDCOPY_OVERLENGTH 32201#define WILDCOPY_VECLEN 16202203typedef enum {204ZSTD_no_overlap,205ZSTD_overlap_src_before_dst206/* ZSTD_overlap_dst_before_src, */207} ZSTD_overlap_e;208209/*! ZSTD_wildcopy() :210* Custom version of ZSTD_memcpy(), can over read/write up to WILDCOPY_OVERLENGTH bytes (if length==0)211* @param ovtype controls the overlap detection212* - ZSTD_no_overlap: The source and destination are guaranteed to be at least WILDCOPY_VECLEN bytes apart.213* - ZSTD_overlap_src_before_dst: The src and dst may overlap, but they MUST be at least 8 bytes apart.214* The src buffer must be before the dst buffer.215*/216MEM_STATIC FORCE_INLINE_ATTR217void ZSTD_wildcopy(void* dst, const void* src, ptrdiff_t length, ZSTD_overlap_e const ovtype)218{219ptrdiff_t diff = (BYTE*)dst - (const BYTE*)src;220const BYTE* ip = (const BYTE*)src;221BYTE* op = (BYTE*)dst;222BYTE* const oend = op + length;223224if (ovtype == ZSTD_overlap_src_before_dst && diff < WILDCOPY_VECLEN) {225/* Handle short offset copies. */226do {227COPY8(op, ip)228} while (op < oend);229} else {230assert(diff >= WILDCOPY_VECLEN || diff <= -WILDCOPY_VECLEN);231/* Separate out the first COPY16() call because the copy length is232* almost certain to be short, so the branches have different233* probabilities. Since it is almost certain to be short, only do234* one COPY16() in the first call. Then, do two calls per loop since235* at that point it is more likely to have a high trip count.236*/237#ifdef __aarch64__238do {239COPY16(op, ip);240}241while (op < oend);242#else243ZSTD_copy16(op, ip);244if (16 >= length) return;245op += 16;246ip += 16;247do {248COPY16(op, ip);249COPY16(op, ip);250}251while (op < oend);252#endif253}254}255256MEM_STATIC size_t ZSTD_limitCopy(void* dst, size_t dstCapacity, const void* src, size_t srcSize)257{258size_t const length = MIN(dstCapacity, srcSize);259if (length > 0) {260ZSTD_memcpy(dst, src, length);261}262return length;263}264265/* define "workspace is too large" as this number of times larger than needed */266#define ZSTD_WORKSPACETOOLARGE_FACTOR 3267268/* when workspace is continuously too large269* during at least this number of times,270* context's memory usage is considered wasteful,271* because it's sized to handle a worst case scenario which rarely happens.272* In which case, resize it down to free some memory */273#define ZSTD_WORKSPACETOOLARGE_MAXDURATION 128274275/* Controls whether the input/output buffer is buffered or stable. */276typedef enum {277ZSTD_bm_buffered = 0, /* Buffer the input/output */278ZSTD_bm_stable = 1 /* ZSTD_inBuffer/ZSTD_outBuffer is stable */279} ZSTD_bufferMode_e;280281282/*-*******************************************283* Private declarations284*********************************************/285typedef struct seqDef_s {286U32 offBase; /* offBase == Offset + ZSTD_REP_NUM, or repcode 1,2,3 */287U16 litLength;288U16 mlBase; /* mlBase == matchLength - MINMATCH */289} seqDef;290291/* Controls whether seqStore has a single "long" litLength or matchLength. See seqStore_t. */292typedef enum {293ZSTD_llt_none = 0, /* no longLengthType */294ZSTD_llt_literalLength = 1, /* represents a long literal */295ZSTD_llt_matchLength = 2 /* represents a long match */296} ZSTD_longLengthType_e;297298typedef struct {299seqDef* sequencesStart;300seqDef* sequences; /* ptr to end of sequences */301BYTE* litStart;302BYTE* lit; /* ptr to end of literals */303BYTE* llCode;304BYTE* mlCode;305BYTE* ofCode;306size_t maxNbSeq;307size_t maxNbLit;308309/* longLengthPos and longLengthType to allow us to represent either a single litLength or matchLength310* in the seqStore that has a value larger than U16 (if it exists). To do so, we increment311* the existing value of the litLength or matchLength by 0x10000.312*/313ZSTD_longLengthType_e longLengthType;314U32 longLengthPos; /* Index of the sequence to apply long length modification to */315} seqStore_t;316317typedef struct {318U32 litLength;319U32 matchLength;320} ZSTD_sequenceLength;321322/**323* Returns the ZSTD_sequenceLength for the given sequences. It handles the decoding of long sequences324* indicated by longLengthPos and longLengthType, and adds MINMATCH back to matchLength.325*/326MEM_STATIC ZSTD_sequenceLength ZSTD_getSequenceLength(seqStore_t const* seqStore, seqDef const* seq)327{328ZSTD_sequenceLength seqLen;329seqLen.litLength = seq->litLength;330seqLen.matchLength = seq->mlBase + MINMATCH;331if (seqStore->longLengthPos == (U32)(seq - seqStore->sequencesStart)) {332if (seqStore->longLengthType == ZSTD_llt_literalLength) {333seqLen.litLength += 0xFFFF;334}335if (seqStore->longLengthType == ZSTD_llt_matchLength) {336seqLen.matchLength += 0xFFFF;337}338}339return seqLen;340}341342/**343* Contains the compressed frame size and an upper-bound for the decompressed frame size.344* Note: before using `compressedSize`, check for errors using ZSTD_isError().345* similarly, before using `decompressedBound`, check for errors using:346* `decompressedBound != ZSTD_CONTENTSIZE_ERROR`347*/348typedef struct {349size_t compressedSize;350unsigned long long decompressedBound;351} ZSTD_frameSizeInfo; /* decompress & legacy */352353const seqStore_t* ZSTD_getSeqStore(const ZSTD_CCtx* ctx); /* compress & dictBuilder */354void ZSTD_seqToCodes(const seqStore_t* seqStorePtr); /* compress, dictBuilder, decodeCorpus (shouldn't get its definition from here) */355356/* custom memory allocation functions */357void* ZSTD_customMalloc(size_t size, ZSTD_customMem customMem);358void* ZSTD_customCalloc(size_t size, ZSTD_customMem customMem);359void ZSTD_customFree(void* ptr, ZSTD_customMem customMem);360361362MEM_STATIC U32 ZSTD_highbit32(U32 val) /* compress, dictBuilder, decodeCorpus */363{364assert(val != 0);365{366# if defined(_MSC_VER) /* Visual */367# if STATIC_BMI2 == 1368return _lzcnt_u32(val)^31;369# else370if (val != 0) {371unsigned long r;372_BitScanReverse(&r, val);373return (unsigned)r;374} else {375/* Should not reach this code path */376__assume(0);377}378# endif379# elif defined(__GNUC__) && (__GNUC__ >= 3) /* GCC Intrinsic */380return __builtin_clz (val) ^ 31;381# elif defined(__ICCARM__) /* IAR Intrinsic */382return 31 - __CLZ(val);383# else /* Software version */384static const U32 DeBruijnClz[32] = { 0, 9, 1, 10, 13, 21, 2, 29, 11, 14, 16, 18, 22, 25, 3, 30, 8, 12, 20, 28, 15, 17, 24, 7, 19, 27, 23, 6, 26, 5, 4, 31 };385U32 v = val;386v |= v >> 1;387v |= v >> 2;388v |= v >> 4;389v |= v >> 8;390v |= v >> 16;391return DeBruijnClz[(v * 0x07C4ACDDU) >> 27];392# endif393}394}395396/**397* Counts the number of trailing zeros of a `size_t`.398* Most compilers should support CTZ as a builtin. A backup399* implementation is provided if the builtin isn't supported, but400* it may not be terribly efficient.401*/402MEM_STATIC unsigned ZSTD_countTrailingZeros(size_t val)403{404if (MEM_64bits()) {405# if defined(_MSC_VER) && defined(_WIN64)406# if STATIC_BMI2407return _tzcnt_u64(val);408# else409if (val != 0) {410unsigned long r;411_BitScanForward64(&r, (U64)val);412return (unsigned)r;413} else {414/* Should not reach this code path */415__assume(0);416}417# endif418# elif defined(__GNUC__) && (__GNUC__ >= 4)419return __builtin_ctzll((U64)val);420# else421static const int DeBruijnBytePos[64] = { 0, 1, 2, 7, 3, 13, 8, 19,4224, 25, 14, 28, 9, 34, 20, 56,4235, 17, 26, 54, 15, 41, 29, 43,42410, 31, 38, 35, 21, 45, 49, 57,42563, 6, 12, 18, 24, 27, 33, 55,42616, 53, 40, 42, 30, 37, 44, 48,42762, 11, 23, 32, 52, 39, 36, 47,42861, 22, 51, 46, 60, 50, 59, 58 };429return DeBruijnBytePos[((U64)((val & -(long long)val) * 0x0218A392CDABBD3FULL)) >> 58];430# endif431} else { /* 32 bits */432# if defined(_MSC_VER)433if (val != 0) {434unsigned long r;435_BitScanForward(&r, (U32)val);436return (unsigned)r;437} else {438/* Should not reach this code path */439__assume(0);440}441# elif defined(__GNUC__) && (__GNUC__ >= 3)442return __builtin_ctz((U32)val);443# else444static const int DeBruijnBytePos[32] = { 0, 1, 28, 2, 29, 14, 24, 3,44530, 22, 20, 15, 25, 17, 4, 8,44631, 27, 13, 23, 21, 19, 16, 7,44726, 12, 18, 6, 11, 5, 10, 9 };448return DeBruijnBytePos[((U32)((val & -(S32)val) * 0x077CB531U)) >> 27];449# endif450}451}452453454/* ZSTD_invalidateRepCodes() :455* ensures next compression will not use repcodes from previous block.456* Note : only works with regular variant;457* do not use with extDict variant ! */458void ZSTD_invalidateRepCodes(ZSTD_CCtx* cctx); /* zstdmt, adaptive_compression (shouldn't get this definition from here) */459460461typedef struct {462blockType_e blockType;463U32 lastBlock;464U32 origSize;465} blockProperties_t; /* declared here for decompress and fullbench */466467/*! ZSTD_getcBlockSize() :468* Provides the size of compressed block from block header `src` */469/* Used by: decompress, fullbench (does not get its definition from here) */470size_t ZSTD_getcBlockSize(const void* src, size_t srcSize,471blockProperties_t* bpPtr);472473/*! ZSTD_decodeSeqHeaders() :474* decode sequence header from src */475/* Used by: decompress, fullbench (does not get its definition from here) */476size_t ZSTD_decodeSeqHeaders(ZSTD_DCtx* dctx, int* nbSeqPtr,477const void* src, size_t srcSize);478479/**480* @returns true iff the CPU supports dynamic BMI2 dispatch.481*/482MEM_STATIC int ZSTD_cpuSupportsBmi2(void)483{484ZSTD_cpuid_t cpuid = ZSTD_cpuid();485return ZSTD_cpuid_bmi1(cpuid) && ZSTD_cpuid_bmi2(cpuid);486}487488#if defined (__cplusplus)489}490#endif491492#endif /* ZSTD_CCOMMON_H_MODULE */493494495