Path: blob/master/Utilities/cmzstd/lib/dictBuilder/zdict.c
5042 views
/*1* Copyright (c) Meta Platforms, Inc. and affiliates.2* All rights reserved.3*4* This source code is licensed under both the BSD-style license (found in the5* LICENSE file in the root directory of this source tree) and the GPLv2 (found6* in the COPYING file in the root directory of this source tree).7* You may select, at your option, one of the above-listed licenses.8*/91011/*-**************************************12* Tuning parameters13****************************************/14#define MINRATIO 4 /* minimum nb of apparition to be selected in dictionary */15#define ZDICT_MAX_SAMPLES_SIZE (2000U << 20)16#define ZDICT_MIN_SAMPLES_SIZE (ZDICT_CONTENTSIZE_MIN * MINRATIO)171819/*-**************************************20* Compiler Options21****************************************/22/* Unix Large Files support (>4GB) */23#define _FILE_OFFSET_BITS 6424#if (defined(__sun__) && (!defined(__LP64__))) /* Sun Solaris 32-bits requires specific definitions */25# ifndef _LARGEFILE_SOURCE26# define _LARGEFILE_SOURCE27# endif28#elif ! defined(__LP64__) /* No point defining Large file for 64 bit */29# ifndef _LARGEFILE64_SOURCE30# define _LARGEFILE64_SOURCE31# endif32#endif333435/*-*************************************36* Dependencies37***************************************/38#include <stdlib.h> /* malloc, free */39#include <string.h> /* memset */40#include <stdio.h> /* fprintf, fopen, ftello64 */41#include <time.h> /* clock */4243#ifndef ZDICT_STATIC_LINKING_ONLY44# define ZDICT_STATIC_LINKING_ONLY45#endif4647#include "../common/mem.h" /* read */48#include "../common/fse.h" /* FSE_normalizeCount, FSE_writeNCount */49#include "../common/huf.h" /* HUF_buildCTable, HUF_writeCTable */50#include "../common/zstd_internal.h" /* includes zstd.h */51#include "../common/xxhash.h" /* XXH64 */52#include "../compress/zstd_compress_internal.h" /* ZSTD_loadCEntropy() */53#include "../zdict.h"54#include "divsufsort.h"55#include "../common/bits.h" /* ZSTD_NbCommonBytes */565758/*-*************************************59* Constants60***************************************/61#define KB *(1 <<10)62#define MB *(1 <<20)63#define GB *(1U<<30)6465#define DICTLISTSIZE_DEFAULT 100006667#define NOISELENGTH 326869static const U32 g_selectivity_default = 9;707172/*-*************************************73* Console display74***************************************/75#undef DISPLAY76#define DISPLAY(...) do { fprintf(stderr, __VA_ARGS__); fflush( stderr ); } while (0)77#undef DISPLAYLEVEL78#define DISPLAYLEVEL(l, ...) do { if (notificationLevel>=l) { DISPLAY(__VA_ARGS__); } } while (0) /* 0 : no display; 1: errors; 2: default; 3: details; 4: debug */7980static clock_t ZDICT_clockSpan(clock_t nPrevious) { return clock() - nPrevious; }8182static void ZDICT_printHex(const void* ptr, size_t length)83{84const BYTE* const b = (const BYTE*)ptr;85size_t u;86for (u=0; u<length; u++) {87BYTE c = b[u];88if (c<32 || c>126) c = '.'; /* non-printable char */89DISPLAY("%c", c);90}91}929394/*-********************************************************95* Helper functions96**********************************************************/97unsigned ZDICT_isError(size_t errorCode) { return ERR_isError(errorCode); }9899const char* ZDICT_getErrorName(size_t errorCode) { return ERR_getErrorName(errorCode); }100101unsigned ZDICT_getDictID(const void* dictBuffer, size_t dictSize)102{103if (dictSize < 8) return 0;104if (MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return 0;105return MEM_readLE32((const char*)dictBuffer + 4);106}107108size_t ZDICT_getDictHeaderSize(const void* dictBuffer, size_t dictSize)109{110size_t headerSize;111if (dictSize <= 8 || MEM_readLE32(dictBuffer) != ZSTD_MAGIC_DICTIONARY) return ERROR(dictionary_corrupted);112113{ ZSTD_compressedBlockState_t* bs = (ZSTD_compressedBlockState_t*)malloc(sizeof(ZSTD_compressedBlockState_t));114U32* wksp = (U32*)malloc(HUF_WORKSPACE_SIZE);115if (!bs || !wksp) {116headerSize = ERROR(memory_allocation);117} else {118ZSTD_reset_compressedBlockState(bs);119headerSize = ZSTD_loadCEntropy(bs, wksp, dictBuffer, dictSize);120}121122free(bs);123free(wksp);124}125126return headerSize;127}128129/*-********************************************************130* Dictionary training functions131**********************************************************/132/*! ZDICT_count() :133Count the nb of common bytes between 2 pointers.134Note : this function presumes end of buffer followed by noisy guard band.135*/136static size_t ZDICT_count(const void* pIn, const void* pMatch)137{138const char* const pStart = (const char*)pIn;139for (;;) {140size_t const diff = MEM_readST(pMatch) ^ MEM_readST(pIn);141if (!diff) {142pIn = (const char*)pIn+sizeof(size_t);143pMatch = (const char*)pMatch+sizeof(size_t);144continue;145}146pIn = (const char*)pIn+ZSTD_NbCommonBytes(diff);147return (size_t)((const char*)pIn - pStart);148}149}150151152typedef struct {153U32 pos;154U32 length;155U32 savings;156} dictItem;157158static void ZDICT_initDictItem(dictItem* d)159{160d->pos = 1;161d->length = 0;162d->savings = (U32)(-1);163}164165166#define LLIMIT 64 /* heuristic determined experimentally */167#define MINMATCHLENGTH 7 /* heuristic determined experimentally */168static dictItem ZDICT_analyzePos(169BYTE* doneMarks,170const int* suffix, U32 start,171const void* buffer, U32 minRatio, U32 notificationLevel)172{173U32 lengthList[LLIMIT] = {0};174U32 cumulLength[LLIMIT] = {0};175U32 savings[LLIMIT] = {0};176const BYTE* b = (const BYTE*)buffer;177size_t maxLength = LLIMIT;178size_t pos = (size_t)suffix[start];179U32 end = start;180dictItem solution;181182/* init */183memset(&solution, 0, sizeof(solution));184doneMarks[pos] = 1;185186/* trivial repetition cases */187if ( (MEM_read16(b+pos+0) == MEM_read16(b+pos+2))188||(MEM_read16(b+pos+1) == MEM_read16(b+pos+3))189||(MEM_read16(b+pos+2) == MEM_read16(b+pos+4)) ) {190/* skip and mark segment */191U16 const pattern16 = MEM_read16(b+pos+4);192U32 u, patternEnd = 6;193while (MEM_read16(b+pos+patternEnd) == pattern16) patternEnd+=2 ;194if (b[pos+patternEnd] == b[pos+patternEnd-1]) patternEnd++;195for (u=1; u<patternEnd; u++)196doneMarks[pos+u] = 1;197return solution;198}199200/* look forward */201{ size_t length;202do {203end++;204length = ZDICT_count(b + pos, b + suffix[end]);205} while (length >= MINMATCHLENGTH);206}207208/* look backward */209{ size_t length;210do {211length = ZDICT_count(b + pos, b + *(suffix+start-1));212if (length >=MINMATCHLENGTH) start--;213} while(length >= MINMATCHLENGTH);214}215216/* exit if not found a minimum nb of repetitions */217if (end-start < minRatio) {218U32 idx;219for(idx=start; idx<end; idx++)220doneMarks[suffix[idx]] = 1;221return solution;222}223224{ int i;225U32 mml;226U32 refinedStart = start;227U32 refinedEnd = end;228229DISPLAYLEVEL(4, "\n");230DISPLAYLEVEL(4, "found %3u matches of length >= %i at pos %7u ", (unsigned)(end-start), MINMATCHLENGTH, (unsigned)pos);231DISPLAYLEVEL(4, "\n");232233for (mml = MINMATCHLENGTH ; ; mml++) {234BYTE currentChar = 0;235U32 currentCount = 0;236U32 currentID = refinedStart;237U32 id;238U32 selectedCount = 0;239U32 selectedID = currentID;240for (id =refinedStart; id < refinedEnd; id++) {241if (b[suffix[id] + mml] != currentChar) {242if (currentCount > selectedCount) {243selectedCount = currentCount;244selectedID = currentID;245}246currentID = id;247currentChar = b[ suffix[id] + mml];248currentCount = 0;249}250currentCount ++;251}252if (currentCount > selectedCount) { /* for last */253selectedCount = currentCount;254selectedID = currentID;255}256257if (selectedCount < minRatio)258break;259refinedStart = selectedID;260refinedEnd = refinedStart + selectedCount;261}262263/* evaluate gain based on new dict */264start = refinedStart;265pos = suffix[refinedStart];266end = start;267memset(lengthList, 0, sizeof(lengthList));268269/* look forward */270{ size_t length;271do {272end++;273length = ZDICT_count(b + pos, b + suffix[end]);274if (length >= LLIMIT) length = LLIMIT-1;275lengthList[length]++;276} while (length >=MINMATCHLENGTH);277}278279/* look backward */280{ size_t length = MINMATCHLENGTH;281while ((length >= MINMATCHLENGTH) & (start > 0)) {282length = ZDICT_count(b + pos, b + suffix[start - 1]);283if (length >= LLIMIT) length = LLIMIT - 1;284lengthList[length]++;285if (length >= MINMATCHLENGTH) start--;286}287}288289/* largest useful length */290memset(cumulLength, 0, sizeof(cumulLength));291cumulLength[maxLength-1] = lengthList[maxLength-1];292for (i=(int)(maxLength-2); i>=0; i--)293cumulLength[i] = cumulLength[i+1] + lengthList[i];294295for (i=LLIMIT-1; i>=MINMATCHLENGTH; i--) if (cumulLength[i]>=minRatio) break;296maxLength = i;297298/* reduce maxLength in case of final into repetitive data */299{ U32 l = (U32)maxLength;300BYTE const c = b[pos + maxLength-1];301while (b[pos+l-2]==c) l--;302maxLength = l;303}304if (maxLength < MINMATCHLENGTH) return solution; /* skip : no long-enough solution */305306/* calculate savings */307savings[5] = 0;308for (i=MINMATCHLENGTH; i<=(int)maxLength; i++)309savings[i] = savings[i-1] + (lengthList[i] * (i-3));310311DISPLAYLEVEL(4, "Selected dict at position %u, of length %u : saves %u (ratio: %.2f) \n",312(unsigned)pos, (unsigned)maxLength, (unsigned)savings[maxLength], (double)savings[maxLength] / (double)maxLength);313314solution.pos = (U32)pos;315solution.length = (U32)maxLength;316solution.savings = savings[maxLength];317318/* mark positions done */319{ U32 id;320for (id=start; id<end; id++) {321U32 p, pEnd, length;322U32 const testedPos = (U32)suffix[id];323if (testedPos == pos)324length = solution.length;325else {326length = (U32)ZDICT_count(b+pos, b+testedPos);327if (length > solution.length) length = solution.length;328}329pEnd = (U32)(testedPos + length);330for (p=testedPos; p<pEnd; p++)331doneMarks[p] = 1;332} } }333334return solution;335}336337338static int isIncluded(const void* in, const void* container, size_t length)339{340const char* const ip = (const char*) in;341const char* const into = (const char*) container;342size_t u;343344for (u=0; u<length; u++) { /* works because end of buffer is a noisy guard band */345if (ip[u] != into[u]) break;346}347348return u==length;349}350351/*! ZDICT_tryMerge() :352check if dictItem can be merged, do it if possible353@return : id of destination elt, 0 if not merged354*/355static U32 ZDICT_tryMerge(dictItem* table, dictItem elt, U32 eltNbToSkip, const void* buffer)356{357const U32 tableSize = table->pos;358const U32 eltEnd = elt.pos + elt.length;359const char* const buf = (const char*) buffer;360361/* tail overlap */362U32 u; for (u=1; u<tableSize; u++) {363if (u==eltNbToSkip) continue;364if ((table[u].pos > elt.pos) && (table[u].pos <= eltEnd)) { /* overlap, existing > new */365/* append */366U32 const addedLength = table[u].pos - elt.pos;367table[u].length += addedLength;368table[u].pos = elt.pos;369table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */370table[u].savings += elt.length / 8; /* rough approx bonus */371elt = table[u];372/* sort : improve rank */373while ((u>1) && (table[u-1].savings < elt.savings))374table[u] = table[u-1], u--;375table[u] = elt;376return u;377} }378379/* front overlap */380for (u=1; u<tableSize; u++) {381if (u==eltNbToSkip) continue;382383if ((table[u].pos + table[u].length >= elt.pos) && (table[u].pos < elt.pos)) { /* overlap, existing < new */384/* append */385int const addedLength = (int)eltEnd - (int)(table[u].pos + table[u].length);386table[u].savings += elt.length / 8; /* rough approx bonus */387if (addedLength > 0) { /* otherwise, elt fully included into existing */388table[u].length += addedLength;389table[u].savings += elt.savings * addedLength / elt.length; /* rough approx */390}391/* sort : improve rank */392elt = table[u];393while ((u>1) && (table[u-1].savings < elt.savings))394table[u] = table[u-1], u--;395table[u] = elt;396return u;397}398399if (MEM_read64(buf + table[u].pos) == MEM_read64(buf + elt.pos + 1)) {400if (isIncluded(buf + table[u].pos, buf + elt.pos + 1, table[u].length)) {401size_t const addedLength = MAX( (int)elt.length - (int)table[u].length , 1 );402table[u].pos = elt.pos;403table[u].savings += (U32)(elt.savings * addedLength / elt.length);404table[u].length = MIN(elt.length, table[u].length + 1);405return u;406}407}408}409410return 0;411}412413414static void ZDICT_removeDictItem(dictItem* table, U32 id)415{416/* convention : table[0].pos stores nb of elts */417U32 const max = table[0].pos;418U32 u;419if (!id) return; /* protection, should never happen */420for (u=id; u<max-1; u++)421table[u] = table[u+1];422table->pos--;423}424425426static void ZDICT_insertDictItem(dictItem* table, U32 maxSize, dictItem elt, const void* buffer)427{428/* merge if possible */429U32 mergeId = ZDICT_tryMerge(table, elt, 0, buffer);430if (mergeId) {431U32 newMerge = 1;432while (newMerge) {433newMerge = ZDICT_tryMerge(table, table[mergeId], mergeId, buffer);434if (newMerge) ZDICT_removeDictItem(table, mergeId);435mergeId = newMerge;436}437return;438}439440/* insert */441{ U32 current;442U32 nextElt = table->pos;443if (nextElt >= maxSize) nextElt = maxSize-1;444current = nextElt-1;445while (table[current].savings < elt.savings) {446table[current+1] = table[current];447current--;448}449table[current+1] = elt;450table->pos = nextElt+1;451}452}453454455static U32 ZDICT_dictSize(const dictItem* dictList)456{457U32 u, dictSize = 0;458for (u=1; u<dictList[0].pos; u++)459dictSize += dictList[u].length;460return dictSize;461}462463464static size_t ZDICT_trainBuffer_legacy(dictItem* dictList, U32 dictListSize,465const void* const buffer, size_t bufferSize, /* buffer must end with noisy guard band */466const size_t* fileSizes, unsigned nbFiles,467unsigned minRatio, U32 notificationLevel)468{469int* const suffix0 = (int*)malloc((bufferSize+2)*sizeof(*suffix0));470int* const suffix = suffix0+1;471U32* reverseSuffix = (U32*)malloc((bufferSize)*sizeof(*reverseSuffix));472BYTE* doneMarks = (BYTE*)malloc((bufferSize+16)*sizeof(*doneMarks)); /* +16 for overflow security */473U32* filePos = (U32*)malloc(nbFiles * sizeof(*filePos));474size_t result = 0;475clock_t displayClock = 0;476clock_t const refreshRate = CLOCKS_PER_SEC * 3 / 10;477478# undef DISPLAYUPDATE479# define DISPLAYUPDATE(l, ...) \480do { \481if (notificationLevel>=l) { \482if (ZDICT_clockSpan(displayClock) > refreshRate) { \483displayClock = clock(); \484DISPLAY(__VA_ARGS__); \485} \486if (notificationLevel>=4) fflush(stderr); \487} \488} while (0)489490/* init */491DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */492if (!suffix0 || !reverseSuffix || !doneMarks || !filePos) {493result = ERROR(memory_allocation);494goto _cleanup;495}496if (minRatio < MINRATIO) minRatio = MINRATIO;497memset(doneMarks, 0, bufferSize+16);498499/* limit sample set size (divsufsort limitation)*/500if (bufferSize > ZDICT_MAX_SAMPLES_SIZE) DISPLAYLEVEL(3, "sample set too large : reduced to %u MB ...\n", (unsigned)(ZDICT_MAX_SAMPLES_SIZE>>20));501while (bufferSize > ZDICT_MAX_SAMPLES_SIZE) bufferSize -= fileSizes[--nbFiles];502503/* sort */504DISPLAYLEVEL(2, "sorting %u files of total size %u MB ...\n", nbFiles, (unsigned)(bufferSize>>20));505{ int const divSuftSortResult = divsufsort((const unsigned char*)buffer, suffix, (int)bufferSize, 0);506if (divSuftSortResult != 0) { result = ERROR(GENERIC); goto _cleanup; }507}508suffix[bufferSize] = (int)bufferSize; /* leads into noise */509suffix0[0] = (int)bufferSize; /* leads into noise */510/* build reverse suffix sort */511{ size_t pos;512for (pos=0; pos < bufferSize; pos++)513reverseSuffix[suffix[pos]] = (U32)pos;514/* note filePos tracks borders between samples.515It's not used at this stage, but planned to become useful in a later update */516filePos[0] = 0;517for (pos=1; pos<nbFiles; pos++)518filePos[pos] = (U32)(filePos[pos-1] + fileSizes[pos-1]);519}520521DISPLAYLEVEL(2, "finding patterns ... \n");522DISPLAYLEVEL(3, "minimum ratio : %u \n", minRatio);523524{ U32 cursor; for (cursor=0; cursor < bufferSize; ) {525dictItem solution;526if (doneMarks[cursor]) { cursor++; continue; }527solution = ZDICT_analyzePos(doneMarks, suffix, reverseSuffix[cursor], buffer, minRatio, notificationLevel);528if (solution.length==0) { cursor++; continue; }529ZDICT_insertDictItem(dictList, dictListSize, solution, buffer);530cursor += solution.length;531DISPLAYUPDATE(2, "\r%4.2f %% \r", (double)cursor / (double)bufferSize * 100.0);532} }533534_cleanup:535free(suffix0);536free(reverseSuffix);537free(doneMarks);538free(filePos);539return result;540}541542543static void ZDICT_fillNoise(void* buffer, size_t length)544{545unsigned const prime1 = 2654435761U;546unsigned const prime2 = 2246822519U;547unsigned acc = prime1;548size_t p=0;549for (p=0; p<length; p++) {550acc *= prime2;551((unsigned char*)buffer)[p] = (unsigned char)(acc >> 21);552}553}554555556typedef struct557{558ZSTD_CDict* dict; /* dictionary */559ZSTD_CCtx* zc; /* working context */560void* workPlace; /* must be ZSTD_BLOCKSIZE_MAX allocated */561} EStats_ress_t;562563#define MAXREPOFFSET 1024564565static void ZDICT_countEStats(EStats_ress_t esr, const ZSTD_parameters* params,566unsigned* countLit, unsigned* offsetcodeCount, unsigned* matchlengthCount, unsigned* litlengthCount, U32* repOffsets,567const void* src, size_t srcSize,568U32 notificationLevel)569{570size_t const blockSizeMax = MIN (ZSTD_BLOCKSIZE_MAX, 1 << params->cParams.windowLog);571size_t cSize;572573if (srcSize > blockSizeMax) srcSize = blockSizeMax; /* protection vs large samples */574{ size_t const errorCode = ZSTD_compressBegin_usingCDict_deprecated(esr.zc, esr.dict);575if (ZSTD_isError(errorCode)) { DISPLAYLEVEL(1, "warning : ZSTD_compressBegin_usingCDict failed \n"); return; }576577}578cSize = ZSTD_compressBlock_deprecated(esr.zc, esr.workPlace, ZSTD_BLOCKSIZE_MAX, src, srcSize);579if (ZSTD_isError(cSize)) { DISPLAYLEVEL(3, "warning : could not compress sample size %u \n", (unsigned)srcSize); return; }580581if (cSize) { /* if == 0; block is not compressible */582const SeqStore_t* const seqStorePtr = ZSTD_getSeqStore(esr.zc);583584/* literals stats */585{ const BYTE* bytePtr;586for(bytePtr = seqStorePtr->litStart; bytePtr < seqStorePtr->lit; bytePtr++)587countLit[*bytePtr]++;588}589590/* seqStats */591{ U32 const nbSeq = (U32)(seqStorePtr->sequences - seqStorePtr->sequencesStart);592ZSTD_seqToCodes(seqStorePtr);593594{ const BYTE* codePtr = seqStorePtr->ofCode;595U32 u;596for (u=0; u<nbSeq; u++) offsetcodeCount[codePtr[u]]++;597}598599{ const BYTE* codePtr = seqStorePtr->mlCode;600U32 u;601for (u=0; u<nbSeq; u++) matchlengthCount[codePtr[u]]++;602}603604{ const BYTE* codePtr = seqStorePtr->llCode;605U32 u;606for (u=0; u<nbSeq; u++) litlengthCount[codePtr[u]]++;607}608609if (nbSeq >= 2) { /* rep offsets */610const SeqDef* const seq = seqStorePtr->sequencesStart;611U32 offset1 = seq[0].offBase - ZSTD_REP_NUM;612U32 offset2 = seq[1].offBase - ZSTD_REP_NUM;613if (offset1 >= MAXREPOFFSET) offset1 = 0;614if (offset2 >= MAXREPOFFSET) offset2 = 0;615repOffsets[offset1] += 3;616repOffsets[offset2] += 1;617} } }618}619620static size_t ZDICT_totalSampleSize(const size_t* fileSizes, unsigned nbFiles)621{622size_t total=0;623unsigned u;624for (u=0; u<nbFiles; u++) total += fileSizes[u];625return total;626}627628typedef struct { U32 offset; U32 count; } offsetCount_t;629630static void ZDICT_insertSortCount(offsetCount_t table[ZSTD_REP_NUM+1], U32 val, U32 count)631{632U32 u;633table[ZSTD_REP_NUM].offset = val;634table[ZSTD_REP_NUM].count = count;635for (u=ZSTD_REP_NUM; u>0; u--) {636offsetCount_t tmp;637if (table[u-1].count >= table[u].count) break;638tmp = table[u-1];639table[u-1] = table[u];640table[u] = tmp;641}642}643644/* ZDICT_flatLit() :645* rewrite `countLit` to contain a mostly flat but still compressible distribution of literals.646* necessary to avoid generating a non-compressible distribution that HUF_writeCTable() cannot encode.647*/648static void ZDICT_flatLit(unsigned* countLit)649{650int u;651for (u=1; u<256; u++) countLit[u] = 2;652countLit[0] = 4;653countLit[253] = 1;654countLit[254] = 1;655}656657#define OFFCODE_MAX 30 /* only applicable to first block */658static size_t ZDICT_analyzeEntropy(void* dstBuffer, size_t maxDstSize,659int compressionLevel,660const void* srcBuffer, const size_t* fileSizes, unsigned nbFiles,661const void* dictBuffer, size_t dictBufferSize,662unsigned notificationLevel)663{664unsigned countLit[256];665HUF_CREATE_STATIC_CTABLE(hufTable, 255);666unsigned offcodeCount[OFFCODE_MAX+1];667short offcodeNCount[OFFCODE_MAX+1];668U32 offcodeMax = ZSTD_highbit32((U32)(dictBufferSize + 128 KB));669unsigned matchLengthCount[MaxML+1];670short matchLengthNCount[MaxML+1];671unsigned litLengthCount[MaxLL+1];672short litLengthNCount[MaxLL+1];673U32 repOffset[MAXREPOFFSET];674offsetCount_t bestRepOffset[ZSTD_REP_NUM+1];675EStats_ress_t esr = { NULL, NULL, NULL };676ZSTD_parameters params;677U32 u, huffLog = 11, Offlog = OffFSELog, mlLog = MLFSELog, llLog = LLFSELog, total;678size_t pos = 0, errorCode;679size_t eSize = 0;680size_t const totalSrcSize = ZDICT_totalSampleSize(fileSizes, nbFiles);681size_t const averageSampleSize = totalSrcSize / (nbFiles + !nbFiles);682BYTE* dstPtr = (BYTE*)dstBuffer;683U32 wksp[HUF_CTABLE_WORKSPACE_SIZE_U32];684685/* init */686DEBUGLOG(4, "ZDICT_analyzeEntropy");687if (offcodeMax>OFFCODE_MAX) { eSize = ERROR(dictionaryCreation_failed); goto _cleanup; } /* too large dictionary */688for (u=0; u<256; u++) countLit[u] = 1; /* any character must be described */689for (u=0; u<=offcodeMax; u++) offcodeCount[u] = 1;690for (u=0; u<=MaxML; u++) matchLengthCount[u] = 1;691for (u=0; u<=MaxLL; u++) litLengthCount[u] = 1;692memset(repOffset, 0, sizeof(repOffset));693repOffset[1] = repOffset[4] = repOffset[8] = 1;694memset(bestRepOffset, 0, sizeof(bestRepOffset));695if (compressionLevel==0) compressionLevel = ZSTD_CLEVEL_DEFAULT;696params = ZSTD_getParams(compressionLevel, averageSampleSize, dictBufferSize);697698esr.dict = ZSTD_createCDict_advanced(dictBuffer, dictBufferSize, ZSTD_dlm_byRef, ZSTD_dct_rawContent, params.cParams, ZSTD_defaultCMem);699esr.zc = ZSTD_createCCtx();700esr.workPlace = malloc(ZSTD_BLOCKSIZE_MAX);701if (!esr.dict || !esr.zc || !esr.workPlace) {702eSize = ERROR(memory_allocation);703DISPLAYLEVEL(1, "Not enough memory \n");704goto _cleanup;705}706707/* collect stats on all samples */708for (u=0; u<nbFiles; u++) {709ZDICT_countEStats(esr, ¶ms,710countLit, offcodeCount, matchLengthCount, litLengthCount, repOffset,711(const char*)srcBuffer + pos, fileSizes[u],712notificationLevel);713pos += fileSizes[u];714}715716if (notificationLevel >= 4) {717/* writeStats */718DISPLAYLEVEL(4, "Offset Code Frequencies : \n");719for (u=0; u<=offcodeMax; u++) {720DISPLAYLEVEL(4, "%2u :%7u \n", u, offcodeCount[u]);721} }722723/* analyze, build stats, starting with literals */724{ size_t maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp));725if (HUF_isError(maxNbBits)) {726eSize = maxNbBits;727DISPLAYLEVEL(1, " HUF_buildCTable error \n");728goto _cleanup;729}730if (maxNbBits==8) { /* not compressible : will fail on HUF_writeCTable() */731DISPLAYLEVEL(2, "warning : pathological dataset : literals are not compressible : samples are noisy or too regular \n");732ZDICT_flatLit(countLit); /* replace distribution by a fake "mostly flat but still compressible" distribution, that HUF_writeCTable() can encode */733maxNbBits = HUF_buildCTable_wksp(hufTable, countLit, 255, huffLog, wksp, sizeof(wksp));734assert(maxNbBits==9);735}736huffLog = (U32)maxNbBits;737}738739/* looking for most common first offsets */740{ U32 offset;741for (offset=1; offset<MAXREPOFFSET; offset++)742ZDICT_insertSortCount(bestRepOffset, offset, repOffset[offset]);743}744/* note : the result of this phase should be used to better appreciate the impact on statistics */745746total=0; for (u=0; u<=offcodeMax; u++) total+=offcodeCount[u];747errorCode = FSE_normalizeCount(offcodeNCount, Offlog, offcodeCount, total, offcodeMax, /* useLowProbCount */ 1);748if (FSE_isError(errorCode)) {749eSize = errorCode;750DISPLAYLEVEL(1, "FSE_normalizeCount error with offcodeCount \n");751goto _cleanup;752}753Offlog = (U32)errorCode;754755total=0; for (u=0; u<=MaxML; u++) total+=matchLengthCount[u];756errorCode = FSE_normalizeCount(matchLengthNCount, mlLog, matchLengthCount, total, MaxML, /* useLowProbCount */ 1);757if (FSE_isError(errorCode)) {758eSize = errorCode;759DISPLAYLEVEL(1, "FSE_normalizeCount error with matchLengthCount \n");760goto _cleanup;761}762mlLog = (U32)errorCode;763764total=0; for (u=0; u<=MaxLL; u++) total+=litLengthCount[u];765errorCode = FSE_normalizeCount(litLengthNCount, llLog, litLengthCount, total, MaxLL, /* useLowProbCount */ 1);766if (FSE_isError(errorCode)) {767eSize = errorCode;768DISPLAYLEVEL(1, "FSE_normalizeCount error with litLengthCount \n");769goto _cleanup;770}771llLog = (U32)errorCode;772773/* write result to buffer */774{ size_t const hhSize = HUF_writeCTable_wksp(dstPtr, maxDstSize, hufTable, 255, huffLog, wksp, sizeof(wksp));775if (HUF_isError(hhSize)) {776eSize = hhSize;777DISPLAYLEVEL(1, "HUF_writeCTable error \n");778goto _cleanup;779}780dstPtr += hhSize;781maxDstSize -= hhSize;782eSize += hhSize;783}784785{ size_t const ohSize = FSE_writeNCount(dstPtr, maxDstSize, offcodeNCount, OFFCODE_MAX, Offlog);786if (FSE_isError(ohSize)) {787eSize = ohSize;788DISPLAYLEVEL(1, "FSE_writeNCount error with offcodeNCount \n");789goto _cleanup;790}791dstPtr += ohSize;792maxDstSize -= ohSize;793eSize += ohSize;794}795796{ size_t const mhSize = FSE_writeNCount(dstPtr, maxDstSize, matchLengthNCount, MaxML, mlLog);797if (FSE_isError(mhSize)) {798eSize = mhSize;799DISPLAYLEVEL(1, "FSE_writeNCount error with matchLengthNCount \n");800goto _cleanup;801}802dstPtr += mhSize;803maxDstSize -= mhSize;804eSize += mhSize;805}806807{ size_t const lhSize = FSE_writeNCount(dstPtr, maxDstSize, litLengthNCount, MaxLL, llLog);808if (FSE_isError(lhSize)) {809eSize = lhSize;810DISPLAYLEVEL(1, "FSE_writeNCount error with litlengthNCount \n");811goto _cleanup;812}813dstPtr += lhSize;814maxDstSize -= lhSize;815eSize += lhSize;816}817818if (maxDstSize<12) {819eSize = ERROR(dstSize_tooSmall);820DISPLAYLEVEL(1, "not enough space to write RepOffsets \n");821goto _cleanup;822}823# if 0824MEM_writeLE32(dstPtr+0, bestRepOffset[0].offset);825MEM_writeLE32(dstPtr+4, bestRepOffset[1].offset);826MEM_writeLE32(dstPtr+8, bestRepOffset[2].offset);827#else828/* at this stage, we don't use the result of "most common first offset",829* as the impact of statistics is not properly evaluated */830MEM_writeLE32(dstPtr+0, repStartValue[0]);831MEM_writeLE32(dstPtr+4, repStartValue[1]);832MEM_writeLE32(dstPtr+8, repStartValue[2]);833#endif834eSize += 12;835836_cleanup:837ZSTD_freeCDict(esr.dict);838ZSTD_freeCCtx(esr.zc);839free(esr.workPlace);840841return eSize;842}843844845/**846* @returns the maximum repcode value847*/848static U32 ZDICT_maxRep(U32 const reps[ZSTD_REP_NUM])849{850U32 maxRep = reps[0];851int r;852for (r = 1; r < ZSTD_REP_NUM; ++r)853maxRep = MAX(maxRep, reps[r]);854return maxRep;855}856857size_t ZDICT_finalizeDictionary(void* dictBuffer, size_t dictBufferCapacity,858const void* customDictContent, size_t dictContentSize,859const void* samplesBuffer, const size_t* samplesSizes,860unsigned nbSamples, ZDICT_params_t params)861{862size_t hSize;863#define HBUFFSIZE 256 /* should prove large enough for all entropy headers */864BYTE header[HBUFFSIZE];865int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;866U32 const notificationLevel = params.notificationLevel;867/* The final dictionary content must be at least as large as the largest repcode */868size_t const minContentSize = (size_t)ZDICT_maxRep(repStartValue);869size_t paddingSize;870871/* check conditions */872DEBUGLOG(4, "ZDICT_finalizeDictionary");873if (dictBufferCapacity < dictContentSize) return ERROR(dstSize_tooSmall);874if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) return ERROR(dstSize_tooSmall);875876/* dictionary header */877MEM_writeLE32(header, ZSTD_MAGIC_DICTIONARY);878{ U64 const randomID = XXH64(customDictContent, dictContentSize, 0);879U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;880U32 const dictID = params.dictID ? params.dictID : compliantID;881MEM_writeLE32(header+4, dictID);882}883hSize = 8;884885/* entropy tables */886DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */887DISPLAYLEVEL(2, "statistics ... \n");888{ size_t const eSize = ZDICT_analyzeEntropy(header+hSize, HBUFFSIZE-hSize,889compressionLevel,890samplesBuffer, samplesSizes, nbSamples,891customDictContent, dictContentSize,892notificationLevel);893if (ZDICT_isError(eSize)) return eSize;894hSize += eSize;895}896897/* Shrink the content size if it doesn't fit in the buffer */898if (hSize + dictContentSize > dictBufferCapacity) {899dictContentSize = dictBufferCapacity - hSize;900}901902/* Pad the dictionary content with zeros if it is too small */903if (dictContentSize < minContentSize) {904RETURN_ERROR_IF(hSize + minContentSize > dictBufferCapacity, dstSize_tooSmall,905"dictBufferCapacity too small to fit max repcode");906paddingSize = minContentSize - dictContentSize;907} else {908paddingSize = 0;909}910911{912size_t const dictSize = hSize + paddingSize + dictContentSize;913914/* The dictionary consists of the header, optional padding, and the content.915* The padding comes before the content because the "best" position in the916* dictionary is the last byte.917*/918BYTE* const outDictHeader = (BYTE*)dictBuffer;919BYTE* const outDictPadding = outDictHeader + hSize;920BYTE* const outDictContent = outDictPadding + paddingSize;921922assert(dictSize <= dictBufferCapacity);923assert(outDictContent + dictContentSize == (BYTE*)dictBuffer + dictSize);924925/* First copy the customDictContent into its final location.926* `customDictContent` and `dictBuffer` may overlap, so we must927* do this before any other writes into the output buffer.928* Then copy the header & padding into the output buffer.929*/930memmove(outDictContent, customDictContent, dictContentSize);931memcpy(outDictHeader, header, hSize);932memset(outDictPadding, 0, paddingSize);933934return dictSize;935}936}937938939static size_t ZDICT_addEntropyTablesFromBuffer_advanced(940void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,941const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,942ZDICT_params_t params)943{944int const compressionLevel = (params.compressionLevel == 0) ? ZSTD_CLEVEL_DEFAULT : params.compressionLevel;945U32 const notificationLevel = params.notificationLevel;946size_t hSize = 8;947948/* calculate entropy tables */949DISPLAYLEVEL(2, "\r%70s\r", ""); /* clean display line */950DISPLAYLEVEL(2, "statistics ... \n");951{ size_t const eSize = ZDICT_analyzeEntropy((char*)dictBuffer+hSize, dictBufferCapacity-hSize,952compressionLevel,953samplesBuffer, samplesSizes, nbSamples,954(char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize,955notificationLevel);956if (ZDICT_isError(eSize)) return eSize;957hSize += eSize;958}959960/* add dictionary header (after entropy tables) */961MEM_writeLE32(dictBuffer, ZSTD_MAGIC_DICTIONARY);962{ U64 const randomID = XXH64((char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize, 0);963U32 const compliantID = (randomID % ((1U<<31)-32768)) + 32768;964U32 const dictID = params.dictID ? params.dictID : compliantID;965MEM_writeLE32((char*)dictBuffer+4, dictID);966}967968if (hSize + dictContentSize < dictBufferCapacity)969memmove((char*)dictBuffer + hSize, (char*)dictBuffer + dictBufferCapacity - dictContentSize, dictContentSize);970return MIN(dictBufferCapacity, hSize+dictContentSize);971}972973/*! ZDICT_trainFromBuffer_unsafe_legacy() :974* Warning : `samplesBuffer` must be followed by noisy guard band !!!975* @return : size of dictionary, or an error code which can be tested with ZDICT_isError()976*/977static size_t ZDICT_trainFromBuffer_unsafe_legacy(978void* dictBuffer, size_t maxDictSize,979const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,980ZDICT_legacy_params_t params)981{982U32 const dictListSize = MAX(MAX(DICTLISTSIZE_DEFAULT, nbSamples), (U32)(maxDictSize/16));983dictItem* const dictList = (dictItem*)malloc(dictListSize * sizeof(*dictList));984unsigned const selectivity = params.selectivityLevel == 0 ? g_selectivity_default : params.selectivityLevel;985unsigned const minRep = (selectivity > 30) ? MINRATIO : nbSamples >> selectivity;986size_t const targetDictSize = maxDictSize;987size_t const samplesBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);988size_t dictSize = 0;989U32 const notificationLevel = params.zParams.notificationLevel;990991/* checks */992if (!dictList) return ERROR(memory_allocation);993if (maxDictSize < ZDICT_DICTSIZE_MIN) { free(dictList); return ERROR(dstSize_tooSmall); } /* requested dictionary size is too small */994if (samplesBuffSize < ZDICT_MIN_SAMPLES_SIZE) { free(dictList); return ERROR(dictionaryCreation_failed); } /* not enough source to create dictionary */995996/* init */997ZDICT_initDictItem(dictList);998999/* build dictionary */1000ZDICT_trainBuffer_legacy(dictList, dictListSize,1001samplesBuffer, samplesBuffSize,1002samplesSizes, nbSamples,1003minRep, notificationLevel);10041005/* display best matches */1006if (params.zParams.notificationLevel>= 3) {1007unsigned const nb = MIN(25, dictList[0].pos);1008unsigned const dictContentSize = ZDICT_dictSize(dictList);1009unsigned u;1010DISPLAYLEVEL(3, "\n %u segments found, of total size %u \n", (unsigned)dictList[0].pos-1, dictContentSize);1011DISPLAYLEVEL(3, "list %u best segments \n", nb-1);1012for (u=1; u<nb; u++) {1013unsigned const pos = dictList[u].pos;1014unsigned const length = dictList[u].length;1015U32 const printedLength = MIN(40, length);1016if ((pos > samplesBuffSize) || ((pos + length) > samplesBuffSize)) {1017free(dictList);1018return ERROR(GENERIC); /* should never happen */1019}1020DISPLAYLEVEL(3, "%3u:%3u bytes at pos %8u, savings %7u bytes |",1021u, length, pos, (unsigned)dictList[u].savings);1022ZDICT_printHex((const char*)samplesBuffer+pos, printedLength);1023DISPLAYLEVEL(3, "| \n");1024} }102510261027/* create dictionary */1028{ unsigned dictContentSize = ZDICT_dictSize(dictList);1029if (dictContentSize < ZDICT_CONTENTSIZE_MIN) { free(dictList); return ERROR(dictionaryCreation_failed); } /* dictionary content too small */1030if (dictContentSize < targetDictSize/4) {1031DISPLAYLEVEL(2, "! warning : selected content significantly smaller than requested (%u < %u) \n", dictContentSize, (unsigned)maxDictSize);1032if (samplesBuffSize < 10 * targetDictSize)1033DISPLAYLEVEL(2, "! consider increasing the number of samples (total size : %u MB)\n", (unsigned)(samplesBuffSize>>20));1034if (minRep > MINRATIO) {1035DISPLAYLEVEL(2, "! consider increasing selectivity to produce larger dictionary (-s%u) \n", selectivity+1);1036DISPLAYLEVEL(2, "! note : larger dictionaries are not necessarily better, test its efficiency on samples \n");1037}1038}10391040if ((dictContentSize > targetDictSize*3) && (nbSamples > 2*MINRATIO) && (selectivity>1)) {1041unsigned proposedSelectivity = selectivity-1;1042while ((nbSamples >> proposedSelectivity) <= MINRATIO) { proposedSelectivity--; }1043DISPLAYLEVEL(2, "! note : calculated dictionary significantly larger than requested (%u > %u) \n", dictContentSize, (unsigned)maxDictSize);1044DISPLAYLEVEL(2, "! consider increasing dictionary size, or produce denser dictionary (-s%u) \n", proposedSelectivity);1045DISPLAYLEVEL(2, "! always test dictionary efficiency on real samples \n");1046}10471048/* limit dictionary size */1049{ U32 const max = dictList->pos; /* convention : nb of useful elts within dictList */1050U32 currentSize = 0;1051U32 n; for (n=1; n<max; n++) {1052currentSize += dictList[n].length;1053if (currentSize > targetDictSize) { currentSize -= dictList[n].length; break; }1054}1055dictList->pos = n;1056dictContentSize = currentSize;1057}10581059/* build dict content */1060{ U32 u;1061BYTE* ptr = (BYTE*)dictBuffer + maxDictSize;1062for (u=1; u<dictList->pos; u++) {1063U32 l = dictList[u].length;1064ptr -= l;1065if (ptr<(BYTE*)dictBuffer) { free(dictList); return ERROR(GENERIC); } /* should not happen */1066memcpy(ptr, (const char*)samplesBuffer+dictList[u].pos, l);1067} }10681069dictSize = ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, maxDictSize,1070samplesBuffer, samplesSizes, nbSamples,1071params.zParams);1072}10731074/* clean up */1075free(dictList);1076return dictSize;1077}107810791080/* ZDICT_trainFromBuffer_legacy() :1081* issue : samplesBuffer need to be followed by a noisy guard band.1082* work around : duplicate the buffer, and add the noise */1083size_t ZDICT_trainFromBuffer_legacy(void* dictBuffer, size_t dictBufferCapacity,1084const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples,1085ZDICT_legacy_params_t params)1086{1087size_t result;1088void* newBuff;1089size_t const sBuffSize = ZDICT_totalSampleSize(samplesSizes, nbSamples);1090if (sBuffSize < ZDICT_MIN_SAMPLES_SIZE) return 0; /* not enough content => no dictionary */10911092newBuff = malloc(sBuffSize + NOISELENGTH);1093if (!newBuff) return ERROR(memory_allocation);10941095memcpy(newBuff, samplesBuffer, sBuffSize);1096ZDICT_fillNoise((char*)newBuff + sBuffSize, NOISELENGTH); /* guard band, for end of buffer condition */10971098result =1099ZDICT_trainFromBuffer_unsafe_legacy(dictBuffer, dictBufferCapacity, newBuff,1100samplesSizes, nbSamples, params);1101free(newBuff);1102return result;1103}110411051106size_t ZDICT_trainFromBuffer(void* dictBuffer, size_t dictBufferCapacity,1107const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)1108{1109ZDICT_fastCover_params_t params;1110DEBUGLOG(3, "ZDICT_trainFromBuffer");1111memset(¶ms, 0, sizeof(params));1112params.d = 8;1113params.steps = 4;1114/* Use default level since no compression level information is available */1115params.zParams.compressionLevel = ZSTD_CLEVEL_DEFAULT;1116#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=1)1117params.zParams.notificationLevel = DEBUGLEVEL;1118#endif1119return ZDICT_optimizeTrainFromBuffer_fastCover(dictBuffer, dictBufferCapacity,1120samplesBuffer, samplesSizes, nbSamples,1121¶ms);1122}11231124size_t ZDICT_addEntropyTablesFromBuffer(void* dictBuffer, size_t dictContentSize, size_t dictBufferCapacity,1125const void* samplesBuffer, const size_t* samplesSizes, unsigned nbSamples)1126{1127ZDICT_params_t params;1128memset(¶ms, 0, sizeof(params));1129return ZDICT_addEntropyTablesFromBuffer_advanced(dictBuffer, dictContentSize, dictBufferCapacity,1130samplesBuffer, samplesSizes, nbSamples,1131params);1132}113311341135