Path: blob/master/Utilities/cmzstd/lib/compress/zstd_lazy.c
3158 views
/*1* Copyright (c) Meta Platforms, Inc. and affiliates.2* All rights reserved.3*4* This source code is licensed under both the BSD-style license (found in the5* LICENSE file in the root directory of this source tree) and the GPLv2 (found6* in the COPYING file in the root directory of this source tree).7* You may select, at your option, one of the above-listed licenses.8*/910#include "zstd_compress_internal.h"11#include "zstd_lazy.h"12#include "../common/bits.h" /* ZSTD_countTrailingZeros64 */1314#define kLazySkippingStep 8151617/*-*************************************18* Binary Tree search19***************************************/2021static void22ZSTD_updateDUBT(ZSTD_matchState_t* ms,23const BYTE* ip, const BYTE* iend,24U32 mls)25{26const ZSTD_compressionParameters* const cParams = &ms->cParams;27U32* const hashTable = ms->hashTable;28U32 const hashLog = cParams->hashLog;2930U32* const bt = ms->chainTable;31U32 const btLog = cParams->chainLog - 1;32U32 const btMask = (1 << btLog) - 1;3334const BYTE* const base = ms->window.base;35U32 const target = (U32)(ip - base);36U32 idx = ms->nextToUpdate;3738if (idx != target)39DEBUGLOG(7, "ZSTD_updateDUBT, from %u to %u (dictLimit:%u)",40idx, target, ms->window.dictLimit);41assert(ip + 8 <= iend); /* condition for ZSTD_hashPtr */42(void)iend;4344assert(idx >= ms->window.dictLimit); /* condition for valid base+idx */45for ( ; idx < target ; idx++) {46size_t const h = ZSTD_hashPtr(base + idx, hashLog, mls); /* assumption : ip + 8 <= iend */47U32 const matchIndex = hashTable[h];4849U32* const nextCandidatePtr = bt + 2*(idx&btMask);50U32* const sortMarkPtr = nextCandidatePtr + 1;5152DEBUGLOG(8, "ZSTD_updateDUBT: insert %u", idx);53hashTable[h] = idx; /* Update Hash Table */54*nextCandidatePtr = matchIndex; /* update BT like a chain */55*sortMarkPtr = ZSTD_DUBT_UNSORTED_MARK;56}57ms->nextToUpdate = target;58}596061/** ZSTD_insertDUBT1() :62* sort one already inserted but unsorted position63* assumption : curr >= btlow == (curr - btmask)64* doesn't fail */65static void66ZSTD_insertDUBT1(const ZSTD_matchState_t* ms,67U32 curr, const BYTE* inputEnd,68U32 nbCompares, U32 btLow,69const ZSTD_dictMode_e dictMode)70{71const ZSTD_compressionParameters* const cParams = &ms->cParams;72U32* const bt = ms->chainTable;73U32 const btLog = cParams->chainLog - 1;74U32 const btMask = (1 << btLog) - 1;75size_t commonLengthSmaller=0, commonLengthLarger=0;76const BYTE* const base = ms->window.base;77const BYTE* const dictBase = ms->window.dictBase;78const U32 dictLimit = ms->window.dictLimit;79const BYTE* const ip = (curr>=dictLimit) ? base + curr : dictBase + curr;80const BYTE* const iend = (curr>=dictLimit) ? inputEnd : dictBase + dictLimit;81const BYTE* const dictEnd = dictBase + dictLimit;82const BYTE* const prefixStart = base + dictLimit;83const BYTE* match;84U32* smallerPtr = bt + 2*(curr&btMask);85U32* largerPtr = smallerPtr + 1;86U32 matchIndex = *smallerPtr; /* this candidate is unsorted : next sorted candidate is reached through *smallerPtr, while *largerPtr contains previous unsorted candidate (which is already saved and can be overwritten) */87U32 dummy32; /* to be nullified at the end */88U32 const windowValid = ms->window.lowLimit;89U32 const maxDistance = 1U << cParams->windowLog;90U32 const windowLow = (curr - windowValid > maxDistance) ? curr - maxDistance : windowValid;919293DEBUGLOG(8, "ZSTD_insertDUBT1(%u) (dictLimit=%u, lowLimit=%u)",94curr, dictLimit, windowLow);95assert(curr >= btLow);96assert(ip < iend); /* condition for ZSTD_count */9798for (; nbCompares && (matchIndex > windowLow); --nbCompares) {99U32* const nextPtr = bt + 2*(matchIndex & btMask);100size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */101assert(matchIndex < curr);102/* note : all candidates are now supposed sorted,103* but it's still possible to have nextPtr[1] == ZSTD_DUBT_UNSORTED_MARK104* when a real index has the same value as ZSTD_DUBT_UNSORTED_MARK */105106if ( (dictMode != ZSTD_extDict)107|| (matchIndex+matchLength >= dictLimit) /* both in current segment*/108|| (curr < dictLimit) /* both in extDict */) {109const BYTE* const mBase = ( (dictMode != ZSTD_extDict)110|| (matchIndex+matchLength >= dictLimit)) ?111base : dictBase;112assert( (matchIndex+matchLength >= dictLimit) /* might be wrong if extDict is incorrectly set to 0 */113|| (curr < dictLimit) );114match = mBase + matchIndex;115matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);116} else {117match = dictBase + matchIndex;118matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);119if (matchIndex+matchLength >= dictLimit)120match = base + matchIndex; /* preparation for next read of match[matchLength] */121}122123DEBUGLOG(8, "ZSTD_insertDUBT1: comparing %u with %u : found %u common bytes ",124curr, matchIndex, (U32)matchLength);125126if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */127break; /* drop , to guarantee consistency ; miss a bit of compression, but other solutions can corrupt tree */128}129130if (match[matchLength] < ip[matchLength]) { /* necessarily within buffer */131/* match is smaller than current */132*smallerPtr = matchIndex; /* update smaller idx */133commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */134if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop searching */135DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is smaller : next => %u",136matchIndex, btLow, nextPtr[1]);137smallerPtr = nextPtr+1; /* new "candidate" => larger than match, which was smaller than target */138matchIndex = nextPtr[1]; /* new matchIndex, larger than previous and closer to current */139} else {140/* match is larger than current */141*largerPtr = matchIndex;142commonLengthLarger = matchLength;143if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop searching */144DEBUGLOG(8, "ZSTD_insertDUBT1: %u (>btLow=%u) is larger => %u",145matchIndex, btLow, nextPtr[0]);146largerPtr = nextPtr;147matchIndex = nextPtr[0];148} }149150*smallerPtr = *largerPtr = 0;151}152153154static size_t155ZSTD_DUBT_findBetterDictMatch (156const ZSTD_matchState_t* ms,157const BYTE* const ip, const BYTE* const iend,158size_t* offsetPtr,159size_t bestLength,160U32 nbCompares,161U32 const mls,162const ZSTD_dictMode_e dictMode)163{164const ZSTD_matchState_t * const dms = ms->dictMatchState;165const ZSTD_compressionParameters* const dmsCParams = &dms->cParams;166const U32 * const dictHashTable = dms->hashTable;167U32 const hashLog = dmsCParams->hashLog;168size_t const h = ZSTD_hashPtr(ip, hashLog, mls);169U32 dictMatchIndex = dictHashTable[h];170171const BYTE* const base = ms->window.base;172const BYTE* const prefixStart = base + ms->window.dictLimit;173U32 const curr = (U32)(ip-base);174const BYTE* const dictBase = dms->window.base;175const BYTE* const dictEnd = dms->window.nextSrc;176U32 const dictHighLimit = (U32)(dms->window.nextSrc - dms->window.base);177U32 const dictLowLimit = dms->window.lowLimit;178U32 const dictIndexDelta = ms->window.lowLimit - dictHighLimit;179180U32* const dictBt = dms->chainTable;181U32 const btLog = dmsCParams->chainLog - 1;182U32 const btMask = (1 << btLog) - 1;183U32 const btLow = (btMask >= dictHighLimit - dictLowLimit) ? dictLowLimit : dictHighLimit - btMask;184185size_t commonLengthSmaller=0, commonLengthLarger=0;186187(void)dictMode;188assert(dictMode == ZSTD_dictMatchState);189190for (; nbCompares && (dictMatchIndex > dictLowLimit); --nbCompares) {191U32* const nextPtr = dictBt + 2*(dictMatchIndex & btMask);192size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */193const BYTE* match = dictBase + dictMatchIndex;194matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);195if (dictMatchIndex+matchLength >= dictHighLimit)196match = base + dictMatchIndex + dictIndexDelta; /* to prepare for next usage of match[matchLength] */197198if (matchLength > bestLength) {199U32 matchIndex = dictMatchIndex + dictIndexDelta;200if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr-matchIndex+1) - ZSTD_highbit32((U32)offsetPtr[0]+1)) ) {201DEBUGLOG(9, "ZSTD_DUBT_findBetterDictMatch(%u) : found better match length %u -> %u and offsetCode %u -> %u (dictMatchIndex %u, matchIndex %u)",202curr, (U32)bestLength, (U32)matchLength, (U32)*offsetPtr, OFFSET_TO_OFFBASE(curr - matchIndex), dictMatchIndex, matchIndex);203bestLength = matchLength, *offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);204}205if (ip+matchLength == iend) { /* reached end of input : ip[matchLength] is not valid, no way to know if it's larger or smaller than match */206break; /* drop, to guarantee consistency (miss a little bit of compression) */207}208}209210if (match[matchLength] < ip[matchLength]) {211if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */212commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */213dictMatchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */214} else {215/* match is larger than current */216if (dictMatchIndex <= btLow) { break; } /* beyond tree size, stop the search */217commonLengthLarger = matchLength;218dictMatchIndex = nextPtr[0];219}220}221222if (bestLength >= MINMATCH) {223U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offsetPtr); (void)mIndex;224DEBUGLOG(8, "ZSTD_DUBT_findBetterDictMatch(%u) : found match of length %u and offsetCode %u (pos %u)",225curr, (U32)bestLength, (U32)*offsetPtr, mIndex);226}227return bestLength;228229}230231232static size_t233ZSTD_DUBT_findBestMatch(ZSTD_matchState_t* ms,234const BYTE* const ip, const BYTE* const iend,235size_t* offBasePtr,236U32 const mls,237const ZSTD_dictMode_e dictMode)238{239const ZSTD_compressionParameters* const cParams = &ms->cParams;240U32* const hashTable = ms->hashTable;241U32 const hashLog = cParams->hashLog;242size_t const h = ZSTD_hashPtr(ip, hashLog, mls);243U32 matchIndex = hashTable[h];244245const BYTE* const base = ms->window.base;246U32 const curr = (U32)(ip-base);247U32 const windowLow = ZSTD_getLowestMatchIndex(ms, curr, cParams->windowLog);248249U32* const bt = ms->chainTable;250U32 const btLog = cParams->chainLog - 1;251U32 const btMask = (1 << btLog) - 1;252U32 const btLow = (btMask >= curr) ? 0 : curr - btMask;253U32 const unsortLimit = MAX(btLow, windowLow);254255U32* nextCandidate = bt + 2*(matchIndex&btMask);256U32* unsortedMark = bt + 2*(matchIndex&btMask) + 1;257U32 nbCompares = 1U << cParams->searchLog;258U32 nbCandidates = nbCompares;259U32 previousCandidate = 0;260261DEBUGLOG(7, "ZSTD_DUBT_findBestMatch (%u) ", curr);262assert(ip <= iend-8); /* required for h calculation */263assert(dictMode != ZSTD_dedicatedDictSearch);264265/* reach end of unsorted candidates list */266while ( (matchIndex > unsortLimit)267&& (*unsortedMark == ZSTD_DUBT_UNSORTED_MARK)268&& (nbCandidates > 1) ) {269DEBUGLOG(8, "ZSTD_DUBT_findBestMatch: candidate %u is unsorted",270matchIndex);271*unsortedMark = previousCandidate; /* the unsortedMark becomes a reversed chain, to move up back to original position */272previousCandidate = matchIndex;273matchIndex = *nextCandidate;274nextCandidate = bt + 2*(matchIndex&btMask);275unsortedMark = bt + 2*(matchIndex&btMask) + 1;276nbCandidates --;277}278279/* nullify last candidate if it's still unsorted280* simplification, detrimental to compression ratio, beneficial for speed */281if ( (matchIndex > unsortLimit)282&& (*unsortedMark==ZSTD_DUBT_UNSORTED_MARK) ) {283DEBUGLOG(7, "ZSTD_DUBT_findBestMatch: nullify last unsorted candidate %u",284matchIndex);285*nextCandidate = *unsortedMark = 0;286}287288/* batch sort stacked candidates */289matchIndex = previousCandidate;290while (matchIndex) { /* will end on matchIndex == 0 */291U32* const nextCandidateIdxPtr = bt + 2*(matchIndex&btMask) + 1;292U32 const nextCandidateIdx = *nextCandidateIdxPtr;293ZSTD_insertDUBT1(ms, matchIndex, iend,294nbCandidates, unsortLimit, dictMode);295matchIndex = nextCandidateIdx;296nbCandidates++;297}298299/* find longest match */300{ size_t commonLengthSmaller = 0, commonLengthLarger = 0;301const BYTE* const dictBase = ms->window.dictBase;302const U32 dictLimit = ms->window.dictLimit;303const BYTE* const dictEnd = dictBase + dictLimit;304const BYTE* const prefixStart = base + dictLimit;305U32* smallerPtr = bt + 2*(curr&btMask);306U32* largerPtr = bt + 2*(curr&btMask) + 1;307U32 matchEndIdx = curr + 8 + 1;308U32 dummy32; /* to be nullified at the end */309size_t bestLength = 0;310311matchIndex = hashTable[h];312hashTable[h] = curr; /* Update Hash Table */313314for (; nbCompares && (matchIndex > windowLow); --nbCompares) {315U32* const nextPtr = bt + 2*(matchIndex & btMask);316size_t matchLength = MIN(commonLengthSmaller, commonLengthLarger); /* guaranteed minimum nb of common bytes */317const BYTE* match;318319if ((dictMode != ZSTD_extDict) || (matchIndex+matchLength >= dictLimit)) {320match = base + matchIndex;321matchLength += ZSTD_count(ip+matchLength, match+matchLength, iend);322} else {323match = dictBase + matchIndex;324matchLength += ZSTD_count_2segments(ip+matchLength, match+matchLength, iend, dictEnd, prefixStart);325if (matchIndex+matchLength >= dictLimit)326match = base + matchIndex; /* to prepare for next usage of match[matchLength] */327}328329if (matchLength > bestLength) {330if (matchLength > matchEndIdx - matchIndex)331matchEndIdx = matchIndex + (U32)matchLength;332if ( (4*(int)(matchLength-bestLength)) > (int)(ZSTD_highbit32(curr - matchIndex + 1) - ZSTD_highbit32((U32)*offBasePtr)) )333bestLength = matchLength, *offBasePtr = OFFSET_TO_OFFBASE(curr - matchIndex);334if (ip+matchLength == iend) { /* equal : no way to know if inf or sup */335if (dictMode == ZSTD_dictMatchState) {336nbCompares = 0; /* in addition to avoiding checking any337* further in this loop, make sure we338* skip checking in the dictionary. */339}340break; /* drop, to guarantee consistency (miss a little bit of compression) */341}342}343344if (match[matchLength] < ip[matchLength]) {345/* match is smaller than current */346*smallerPtr = matchIndex; /* update smaller idx */347commonLengthSmaller = matchLength; /* all smaller will now have at least this guaranteed common length */348if (matchIndex <= btLow) { smallerPtr=&dummy32; break; } /* beyond tree size, stop the search */349smallerPtr = nextPtr+1; /* new "smaller" => larger of match */350matchIndex = nextPtr[1]; /* new matchIndex larger than previous (closer to current) */351} else {352/* match is larger than current */353*largerPtr = matchIndex;354commonLengthLarger = matchLength;355if (matchIndex <= btLow) { largerPtr=&dummy32; break; } /* beyond tree size, stop the search */356largerPtr = nextPtr;357matchIndex = nextPtr[0];358} }359360*smallerPtr = *largerPtr = 0;361362assert(nbCompares <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */363if (dictMode == ZSTD_dictMatchState && nbCompares) {364bestLength = ZSTD_DUBT_findBetterDictMatch(365ms, ip, iend,366offBasePtr, bestLength, nbCompares,367mls, dictMode);368}369370assert(matchEndIdx > curr+8); /* ensure nextToUpdate is increased */371ms->nextToUpdate = matchEndIdx - 8; /* skip repetitive patterns */372if (bestLength >= MINMATCH) {373U32 const mIndex = curr - (U32)OFFBASE_TO_OFFSET(*offBasePtr); (void)mIndex;374DEBUGLOG(8, "ZSTD_DUBT_findBestMatch(%u) : found match of length %u and offsetCode %u (pos %u)",375curr, (U32)bestLength, (U32)*offBasePtr, mIndex);376}377return bestLength;378}379}380381382/** ZSTD_BtFindBestMatch() : Tree updater, providing best match */383FORCE_INLINE_TEMPLATE size_t384ZSTD_BtFindBestMatch( ZSTD_matchState_t* ms,385const BYTE* const ip, const BYTE* const iLimit,386size_t* offBasePtr,387const U32 mls /* template */,388const ZSTD_dictMode_e dictMode)389{390DEBUGLOG(7, "ZSTD_BtFindBestMatch");391if (ip < ms->window.base + ms->nextToUpdate) return 0; /* skipped area */392ZSTD_updateDUBT(ms, ip, iLimit, mls);393return ZSTD_DUBT_findBestMatch(ms, ip, iLimit, offBasePtr, mls, dictMode);394}395396/***********************************397* Dedicated dict search398***********************************/399400void ZSTD_dedicatedDictSearch_lazy_loadDictionary(ZSTD_matchState_t* ms, const BYTE* const ip)401{402const BYTE* const base = ms->window.base;403U32 const target = (U32)(ip - base);404U32* const hashTable = ms->hashTable;405U32* const chainTable = ms->chainTable;406U32 const chainSize = 1 << ms->cParams.chainLog;407U32 idx = ms->nextToUpdate;408U32 const minChain = chainSize < target - idx ? target - chainSize : idx;409U32 const bucketSize = 1 << ZSTD_LAZY_DDSS_BUCKET_LOG;410U32 const cacheSize = bucketSize - 1;411U32 const chainAttempts = (1 << ms->cParams.searchLog) - cacheSize;412U32 const chainLimit = chainAttempts > 255 ? 255 : chainAttempts;413414/* We know the hashtable is oversized by a factor of `bucketSize`.415* We are going to temporarily pretend `bucketSize == 1`, keeping only a416* single entry. We will use the rest of the space to construct a temporary417* chaintable.418*/419U32 const hashLog = ms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;420U32* const tmpHashTable = hashTable;421U32* const tmpChainTable = hashTable + ((size_t)1 << hashLog);422U32 const tmpChainSize = (U32)((1 << ZSTD_LAZY_DDSS_BUCKET_LOG) - 1) << hashLog;423U32 const tmpMinChain = tmpChainSize < target ? target - tmpChainSize : idx;424U32 hashIdx;425426assert(ms->cParams.chainLog <= 24);427assert(ms->cParams.hashLog > ms->cParams.chainLog);428assert(idx != 0);429assert(tmpMinChain <= minChain);430431/* fill conventional hash table and conventional chain table */432for ( ; idx < target; idx++) {433U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch);434if (idx >= tmpMinChain) {435tmpChainTable[idx - tmpMinChain] = hashTable[h];436}437tmpHashTable[h] = idx;438}439440/* sort chains into ddss chain table */441{442U32 chainPos = 0;443for (hashIdx = 0; hashIdx < (1U << hashLog); hashIdx++) {444U32 count;445U32 countBeyondMinChain = 0;446U32 i = tmpHashTable[hashIdx];447for (count = 0; i >= tmpMinChain && count < cacheSize; count++) {448/* skip through the chain to the first position that won't be449* in the hash cache bucket */450if (i < minChain) {451countBeyondMinChain++;452}453i = tmpChainTable[i - tmpMinChain];454}455if (count == cacheSize) {456for (count = 0; count < chainLimit;) {457if (i < minChain) {458if (!i || ++countBeyondMinChain > cacheSize) {459/* only allow pulling `cacheSize` number of entries460* into the cache or chainTable beyond `minChain`,461* to replace the entries pulled out of the462* chainTable into the cache. This lets us reach463* back further without increasing the total number464* of entries in the chainTable, guaranteeing the465* DDSS chain table will fit into the space466* allocated for the regular one. */467break;468}469}470chainTable[chainPos++] = i;471count++;472if (i < tmpMinChain) {473break;474}475i = tmpChainTable[i - tmpMinChain];476}477} else {478count = 0;479}480if (count) {481tmpHashTable[hashIdx] = ((chainPos - count) << 8) + count;482} else {483tmpHashTable[hashIdx] = 0;484}485}486assert(chainPos <= chainSize); /* I believe this is guaranteed... */487}488489/* move chain pointers into the last entry of each hash bucket */490for (hashIdx = (1 << hashLog); hashIdx; ) {491U32 const bucketIdx = --hashIdx << ZSTD_LAZY_DDSS_BUCKET_LOG;492U32 const chainPackedPointer = tmpHashTable[hashIdx];493U32 i;494for (i = 0; i < cacheSize; i++) {495hashTable[bucketIdx + i] = 0;496}497hashTable[bucketIdx + bucketSize - 1] = chainPackedPointer;498}499500/* fill the buckets of the hash table */501for (idx = ms->nextToUpdate; idx < target; idx++) {502U32 const h = (U32)ZSTD_hashPtr(base + idx, hashLog, ms->cParams.minMatch)503<< ZSTD_LAZY_DDSS_BUCKET_LOG;504U32 i;505/* Shift hash cache down 1. */506for (i = cacheSize - 1; i; i--)507hashTable[h + i] = hashTable[h + i - 1];508hashTable[h] = idx;509}510511ms->nextToUpdate = target;512}513514/* Returns the longest match length found in the dedicated dict search structure.515* If none are longer than the argument ml, then ml will be returned.516*/517FORCE_INLINE_TEMPLATE518size_t ZSTD_dedicatedDictSearch_lazy_search(size_t* offsetPtr, size_t ml, U32 nbAttempts,519const ZSTD_matchState_t* const dms,520const BYTE* const ip, const BYTE* const iLimit,521const BYTE* const prefixStart, const U32 curr,522const U32 dictLimit, const size_t ddsIdx) {523const U32 ddsLowestIndex = dms->window.dictLimit;524const BYTE* const ddsBase = dms->window.base;525const BYTE* const ddsEnd = dms->window.nextSrc;526const U32 ddsSize = (U32)(ddsEnd - ddsBase);527const U32 ddsIndexDelta = dictLimit - ddsSize;528const U32 bucketSize = (1 << ZSTD_LAZY_DDSS_BUCKET_LOG);529const U32 bucketLimit = nbAttempts < bucketSize - 1 ? nbAttempts : bucketSize - 1;530U32 ddsAttempt;531U32 matchIndex;532533for (ddsAttempt = 0; ddsAttempt < bucketSize - 1; ddsAttempt++) {534PREFETCH_L1(ddsBase + dms->hashTable[ddsIdx + ddsAttempt]);535}536537{538U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];539U32 const chainIndex = chainPackedPointer >> 8;540541PREFETCH_L1(&dms->chainTable[chainIndex]);542}543544for (ddsAttempt = 0; ddsAttempt < bucketLimit; ddsAttempt++) {545size_t currentMl=0;546const BYTE* match;547matchIndex = dms->hashTable[ddsIdx + ddsAttempt];548match = ddsBase + matchIndex;549550if (!matchIndex) {551return ml;552}553554/* guaranteed by table construction */555(void)ddsLowestIndex;556assert(matchIndex >= ddsLowestIndex);557assert(match+4 <= ddsEnd);558if (MEM_read32(match) == MEM_read32(ip)) {559/* assumption : matchIndex <= dictLimit-4 (by table construction) */560currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;561}562563/* save best solution */564if (currentMl > ml) {565ml = currentMl;566*offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));567if (ip+currentMl == iLimit) {568/* best possible, avoids read overflow on next attempt */569return ml;570}571}572}573574{575U32 const chainPackedPointer = dms->hashTable[ddsIdx + bucketSize - 1];576U32 chainIndex = chainPackedPointer >> 8;577U32 const chainLength = chainPackedPointer & 0xFF;578U32 const chainAttempts = nbAttempts - ddsAttempt;579U32 const chainLimit = chainAttempts > chainLength ? chainLength : chainAttempts;580U32 chainAttempt;581582for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++) {583PREFETCH_L1(ddsBase + dms->chainTable[chainIndex + chainAttempt]);584}585586for (chainAttempt = 0 ; chainAttempt < chainLimit; chainAttempt++, chainIndex++) {587size_t currentMl=0;588const BYTE* match;589matchIndex = dms->chainTable[chainIndex];590match = ddsBase + matchIndex;591592/* guaranteed by table construction */593assert(matchIndex >= ddsLowestIndex);594assert(match+4 <= ddsEnd);595if (MEM_read32(match) == MEM_read32(ip)) {596/* assumption : matchIndex <= dictLimit-4 (by table construction) */597currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, ddsEnd, prefixStart) + 4;598}599600/* save best solution */601if (currentMl > ml) {602ml = currentMl;603*offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + ddsIndexDelta));604if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */605}606}607}608return ml;609}610611612/* *********************************613* Hash Chain614***********************************/615#define NEXT_IN_CHAIN(d, mask) chainTable[(d) & (mask)]616617/* Update chains up to ip (excluded)618Assumption : always within prefix (i.e. not within extDict) */619FORCE_INLINE_TEMPLATE U32 ZSTD_insertAndFindFirstIndex_internal(620ZSTD_matchState_t* ms,621const ZSTD_compressionParameters* const cParams,622const BYTE* ip, U32 const mls, U32 const lazySkipping)623{624U32* const hashTable = ms->hashTable;625const U32 hashLog = cParams->hashLog;626U32* const chainTable = ms->chainTable;627const U32 chainMask = (1 << cParams->chainLog) - 1;628const BYTE* const base = ms->window.base;629const U32 target = (U32)(ip - base);630U32 idx = ms->nextToUpdate;631632while(idx < target) { /* catch up */633size_t const h = ZSTD_hashPtr(base+idx, hashLog, mls);634NEXT_IN_CHAIN(idx, chainMask) = hashTable[h];635hashTable[h] = idx;636idx++;637/* Stop inserting every position when in the lazy skipping mode. */638if (lazySkipping)639break;640}641642ms->nextToUpdate = target;643return hashTable[ZSTD_hashPtr(ip, hashLog, mls)];644}645646U32 ZSTD_insertAndFindFirstIndex(ZSTD_matchState_t* ms, const BYTE* ip) {647const ZSTD_compressionParameters* const cParams = &ms->cParams;648return ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, ms->cParams.minMatch, /* lazySkipping*/ 0);649}650651/* inlining is important to hardwire a hot branch (template emulation) */652FORCE_INLINE_TEMPLATE653size_t ZSTD_HcFindBestMatch(654ZSTD_matchState_t* ms,655const BYTE* const ip, const BYTE* const iLimit,656size_t* offsetPtr,657const U32 mls, const ZSTD_dictMode_e dictMode)658{659const ZSTD_compressionParameters* const cParams = &ms->cParams;660U32* const chainTable = ms->chainTable;661const U32 chainSize = (1 << cParams->chainLog);662const U32 chainMask = chainSize-1;663const BYTE* const base = ms->window.base;664const BYTE* const dictBase = ms->window.dictBase;665const U32 dictLimit = ms->window.dictLimit;666const BYTE* const prefixStart = base + dictLimit;667const BYTE* const dictEnd = dictBase + dictLimit;668const U32 curr = (U32)(ip-base);669const U32 maxDistance = 1U << cParams->windowLog;670const U32 lowestValid = ms->window.lowLimit;671const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;672const U32 isDictionary = (ms->loadedDictEnd != 0);673const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;674const U32 minChain = curr > chainSize ? curr - chainSize : 0;675U32 nbAttempts = 1U << cParams->searchLog;676size_t ml=4-1;677678const ZSTD_matchState_t* const dms = ms->dictMatchState;679const U32 ddsHashLog = dictMode == ZSTD_dedicatedDictSearch680? dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG : 0;681const size_t ddsIdx = dictMode == ZSTD_dedicatedDictSearch682? ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG : 0;683684U32 matchIndex;685686if (dictMode == ZSTD_dedicatedDictSearch) {687const U32* entry = &dms->hashTable[ddsIdx];688PREFETCH_L1(entry);689}690691/* HC4 match finder */692matchIndex = ZSTD_insertAndFindFirstIndex_internal(ms, cParams, ip, mls, ms->lazySkipping);693694for ( ; (matchIndex>=lowLimit) & (nbAttempts>0) ; nbAttempts--) {695size_t currentMl=0;696if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {697const BYTE* const match = base + matchIndex;698assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */699/* read 4B starting from (match + ml + 1 - sizeof(U32)) */700if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */701currentMl = ZSTD_count(ip, match, iLimit);702} else {703const BYTE* const match = dictBase + matchIndex;704assert(match+4 <= dictEnd);705if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */706currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;707}708709/* save best solution */710if (currentMl > ml) {711ml = currentMl;712*offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);713if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */714}715716if (matchIndex <= minChain) break;717matchIndex = NEXT_IN_CHAIN(matchIndex, chainMask);718}719720assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */721if (dictMode == ZSTD_dedicatedDictSearch) {722ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts, dms,723ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);724} else if (dictMode == ZSTD_dictMatchState) {725const U32* const dmsChainTable = dms->chainTable;726const U32 dmsChainSize = (1 << dms->cParams.chainLog);727const U32 dmsChainMask = dmsChainSize - 1;728const U32 dmsLowestIndex = dms->window.dictLimit;729const BYTE* const dmsBase = dms->window.base;730const BYTE* const dmsEnd = dms->window.nextSrc;731const U32 dmsSize = (U32)(dmsEnd - dmsBase);732const U32 dmsIndexDelta = dictLimit - dmsSize;733const U32 dmsMinChain = dmsSize > dmsChainSize ? dmsSize - dmsChainSize : 0;734735matchIndex = dms->hashTable[ZSTD_hashPtr(ip, dms->cParams.hashLog, mls)];736737for ( ; (matchIndex>=dmsLowestIndex) & (nbAttempts>0) ; nbAttempts--) {738size_t currentMl=0;739const BYTE* const match = dmsBase + matchIndex;740assert(match+4 <= dmsEnd);741if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */742currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;743744/* save best solution */745if (currentMl > ml) {746ml = currentMl;747assert(curr > matchIndex + dmsIndexDelta);748*offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));749if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */750}751752if (matchIndex <= dmsMinChain) break;753754matchIndex = dmsChainTable[matchIndex & dmsChainMask];755}756}757758return ml;759}760761/* *********************************762* (SIMD) Row-based matchfinder763***********************************/764/* Constants for row-based hash */765#define ZSTD_ROW_HASH_TAG_MASK ((1u << ZSTD_ROW_HASH_TAG_BITS) - 1)766#define ZSTD_ROW_HASH_MAX_ENTRIES 64 /* absolute maximum number of entries per row, for all configurations */767768#define ZSTD_ROW_HASH_CACHE_MASK (ZSTD_ROW_HASH_CACHE_SIZE - 1)769770typedef U64 ZSTD_VecMask; /* Clarifies when we are interacting with a U64 representing a mask of matches */771772/* ZSTD_VecMask_next():773* Starting from the LSB, returns the idx of the next non-zero bit.774* Basically counting the nb of trailing zeroes.775*/776MEM_STATIC U32 ZSTD_VecMask_next(ZSTD_VecMask val) {777return ZSTD_countTrailingZeros64(val);778}779780/* ZSTD_row_nextIndex():781* Returns the next index to insert at within a tagTable row, and updates the "head"782* value to reflect the update. Essentially cycles backwards from [1, {entries per row})783*/784FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextIndex(BYTE* const tagRow, U32 const rowMask) {785U32 next = (*tagRow-1) & rowMask;786next += (next == 0) ? rowMask : 0; /* skip first position */787*tagRow = (BYTE)next;788return next;789}790791/* ZSTD_isAligned():792* Checks that a pointer is aligned to "align" bytes which must be a power of 2.793*/794MEM_STATIC int ZSTD_isAligned(void const* ptr, size_t align) {795assert((align & (align - 1)) == 0);796return (((size_t)ptr) & (align - 1)) == 0;797}798799/* ZSTD_row_prefetch():800* Performs prefetching for the hashTable and tagTable at a given row.801*/802FORCE_INLINE_TEMPLATE void ZSTD_row_prefetch(U32 const* hashTable, BYTE const* tagTable, U32 const relRow, U32 const rowLog) {803PREFETCH_L1(hashTable + relRow);804if (rowLog >= 5) {805PREFETCH_L1(hashTable + relRow + 16);806/* Note: prefetching more of the hash table does not appear to be beneficial for 128-entry rows */807}808PREFETCH_L1(tagTable + relRow);809if (rowLog == 6) {810PREFETCH_L1(tagTable + relRow + 32);811}812assert(rowLog == 4 || rowLog == 5 || rowLog == 6);813assert(ZSTD_isAligned(hashTable + relRow, 64)); /* prefetched hash row always 64-byte aligned */814assert(ZSTD_isAligned(tagTable + relRow, (size_t)1 << rowLog)); /* prefetched tagRow sits on correct multiple of bytes (32,64,128) */815}816817/* ZSTD_row_fillHashCache():818* Fill up the hash cache starting at idx, prefetching up to ZSTD_ROW_HASH_CACHE_SIZE entries,819* but not beyond iLimit.820*/821FORCE_INLINE_TEMPLATE void ZSTD_row_fillHashCache(ZSTD_matchState_t* ms, const BYTE* base,822U32 const rowLog, U32 const mls,823U32 idx, const BYTE* const iLimit)824{825U32 const* const hashTable = ms->hashTable;826BYTE const* const tagTable = ms->tagTable;827U32 const hashLog = ms->rowHashLog;828U32 const maxElemsToPrefetch = (base + idx) > iLimit ? 0 : (U32)(iLimit - (base + idx) + 1);829U32 const lim = idx + MIN(ZSTD_ROW_HASH_CACHE_SIZE, maxElemsToPrefetch);830831for (; idx < lim; ++idx) {832U32 const hash = (U32)ZSTD_hashPtrSalted(base + idx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);833U32 const row = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;834ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);835ms->hashCache[idx & ZSTD_ROW_HASH_CACHE_MASK] = hash;836}837838DEBUGLOG(6, "ZSTD_row_fillHashCache(): [%u %u %u %u %u %u %u %u]", ms->hashCache[0], ms->hashCache[1],839ms->hashCache[2], ms->hashCache[3], ms->hashCache[4],840ms->hashCache[5], ms->hashCache[6], ms->hashCache[7]);841}842843/* ZSTD_row_nextCachedHash():844* Returns the hash of base + idx, and replaces the hash in the hash cache with the byte at845* base + idx + ZSTD_ROW_HASH_CACHE_SIZE. Also prefetches the appropriate rows from hashTable and tagTable.846*/847FORCE_INLINE_TEMPLATE U32 ZSTD_row_nextCachedHash(U32* cache, U32 const* hashTable,848BYTE const* tagTable, BYTE const* base,849U32 idx, U32 const hashLog,850U32 const rowLog, U32 const mls,851U64 const hashSalt)852{853U32 const newHash = (U32)ZSTD_hashPtrSalted(base+idx+ZSTD_ROW_HASH_CACHE_SIZE, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);854U32 const row = (newHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;855ZSTD_row_prefetch(hashTable, tagTable, row, rowLog);856{ U32 const hash = cache[idx & ZSTD_ROW_HASH_CACHE_MASK];857cache[idx & ZSTD_ROW_HASH_CACHE_MASK] = newHash;858return hash;859}860}861862/* ZSTD_row_update_internalImpl():863* Updates the hash table with positions starting from updateStartIdx until updateEndIdx.864*/865FORCE_INLINE_TEMPLATE void ZSTD_row_update_internalImpl(ZSTD_matchState_t* ms,866U32 updateStartIdx, U32 const updateEndIdx,867U32 const mls, U32 const rowLog,868U32 const rowMask, U32 const useCache)869{870U32* const hashTable = ms->hashTable;871BYTE* const tagTable = ms->tagTable;872U32 const hashLog = ms->rowHashLog;873const BYTE* const base = ms->window.base;874875DEBUGLOG(6, "ZSTD_row_update_internalImpl(): updateStartIdx=%u, updateEndIdx=%u", updateStartIdx, updateEndIdx);876for (; updateStartIdx < updateEndIdx; ++updateStartIdx) {877U32 const hash = useCache ? ZSTD_row_nextCachedHash(ms->hashCache, hashTable, tagTable, base, updateStartIdx, hashLog, rowLog, mls, ms->hashSalt)878: (U32)ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt);879U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;880U32* const row = hashTable + relRow;881BYTE* tagRow = tagTable + relRow;882U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);883884assert(hash == ZSTD_hashPtrSalted(base + updateStartIdx, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, ms->hashSalt));885tagRow[pos] = hash & ZSTD_ROW_HASH_TAG_MASK;886row[pos] = updateStartIdx;887}888}889890/* ZSTD_row_update_internal():891* Inserts the byte at ip into the appropriate position in the hash table, and updates ms->nextToUpdate.892* Skips sections of long matches as is necessary.893*/894FORCE_INLINE_TEMPLATE void ZSTD_row_update_internal(ZSTD_matchState_t* ms, const BYTE* ip,895U32 const mls, U32 const rowLog,896U32 const rowMask, U32 const useCache)897{898U32 idx = ms->nextToUpdate;899const BYTE* const base = ms->window.base;900const U32 target = (U32)(ip - base);901const U32 kSkipThreshold = 384;902const U32 kMaxMatchStartPositionsToUpdate = 96;903const U32 kMaxMatchEndPositionsToUpdate = 32;904905if (useCache) {906/* Only skip positions when using hash cache, i.e.907* if we are loading a dict, don't skip anything.908* If we decide to skip, then we only update a set number909* of positions at the beginning and end of the match.910*/911if (UNLIKELY(target - idx > kSkipThreshold)) {912U32 const bound = idx + kMaxMatchStartPositionsToUpdate;913ZSTD_row_update_internalImpl(ms, idx, bound, mls, rowLog, rowMask, useCache);914idx = target - kMaxMatchEndPositionsToUpdate;915ZSTD_row_fillHashCache(ms, base, rowLog, mls, idx, ip+1);916}917}918assert(target >= idx);919ZSTD_row_update_internalImpl(ms, idx, target, mls, rowLog, rowMask, useCache);920ms->nextToUpdate = target;921}922923/* ZSTD_row_update():924* External wrapper for ZSTD_row_update_internal(). Used for filling the hashtable during dictionary925* processing.926*/927void ZSTD_row_update(ZSTD_matchState_t* const ms, const BYTE* ip) {928const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);929const U32 rowMask = (1u << rowLog) - 1;930const U32 mls = MIN(ms->cParams.minMatch, 6 /* mls caps out at 6 */);931932DEBUGLOG(5, "ZSTD_row_update(), rowLog=%u", rowLog);933ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 0 /* don't use cache */);934}935936/* Returns the mask width of bits group of which will be set to 1. Given not all937* architectures have easy movemask instruction, this helps to iterate over938* groups of bits easier and faster.939*/940FORCE_INLINE_TEMPLATE U32941ZSTD_row_matchMaskGroupWidth(const U32 rowEntries)942{943assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);944assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);945(void)rowEntries;946#if defined(ZSTD_ARCH_ARM_NEON)947/* NEON path only works for little endian */948if (!MEM_isLittleEndian()) {949return 1;950}951if (rowEntries == 16) {952return 4;953}954if (rowEntries == 32) {955return 2;956}957if (rowEntries == 64) {958return 1;959}960#endif961return 1;962}963964#if defined(ZSTD_ARCH_X86_SSE2)965FORCE_INLINE_TEMPLATE ZSTD_VecMask966ZSTD_row_getSSEMask(int nbChunks, const BYTE* const src, const BYTE tag, const U32 head)967{968const __m128i comparisonMask = _mm_set1_epi8((char)tag);969int matches[4] = {0};970int i;971assert(nbChunks == 1 || nbChunks == 2 || nbChunks == 4);972for (i=0; i<nbChunks; i++) {973const __m128i chunk = _mm_loadu_si128((const __m128i*)(const void*)(src + 16*i));974const __m128i equalMask = _mm_cmpeq_epi8(chunk, comparisonMask);975matches[i] = _mm_movemask_epi8(equalMask);976}977if (nbChunks == 1) return ZSTD_rotateRight_U16((U16)matches[0], head);978if (nbChunks == 2) return ZSTD_rotateRight_U32((U32)matches[1] << 16 | (U32)matches[0], head);979assert(nbChunks == 4);980return ZSTD_rotateRight_U64((U64)matches[3] << 48 | (U64)matches[2] << 32 | (U64)matches[1] << 16 | (U64)matches[0], head);981}982#endif983984#if defined(ZSTD_ARCH_ARM_NEON)985FORCE_INLINE_TEMPLATE ZSTD_VecMask986ZSTD_row_getNEONMask(const U32 rowEntries, const BYTE* const src, const BYTE tag, const U32 headGrouped)987{988assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);989if (rowEntries == 16) {990/* vshrn_n_u16 shifts by 4 every u16 and narrows to 8 lower bits.991* After that groups of 4 bits represent the equalMask. We lower992* all bits except the highest in these groups by doing AND with993* 0x88 = 0b10001000.994*/995const uint8x16_t chunk = vld1q_u8(src);996const uint16x8_t equalMask = vreinterpretq_u16_u8(vceqq_u8(chunk, vdupq_n_u8(tag)));997const uint8x8_t res = vshrn_n_u16(equalMask, 4);998const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0);999return ZSTD_rotateRight_U64(matches, headGrouped) & 0x8888888888888888ull;1000} else if (rowEntries == 32) {1001/* Same idea as with rowEntries == 16 but doing AND with1002* 0x55 = 0b01010101.1003*/1004const uint16x8x2_t chunk = vld2q_u16((const uint16_t*)(const void*)src);1005const uint8x16_t chunk0 = vreinterpretq_u8_u16(chunk.val[0]);1006const uint8x16_t chunk1 = vreinterpretq_u8_u16(chunk.val[1]);1007const uint8x16_t dup = vdupq_n_u8(tag);1008const uint8x8_t t0 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk0, dup)), 6);1009const uint8x8_t t1 = vshrn_n_u16(vreinterpretq_u16_u8(vceqq_u8(chunk1, dup)), 6);1010const uint8x8_t res = vsli_n_u8(t0, t1, 4);1011const U64 matches = vget_lane_u64(vreinterpret_u64_u8(res), 0) ;1012return ZSTD_rotateRight_U64(matches, headGrouped) & 0x5555555555555555ull;1013} else { /* rowEntries == 64 */1014const uint8x16x4_t chunk = vld4q_u8(src);1015const uint8x16_t dup = vdupq_n_u8(tag);1016const uint8x16_t cmp0 = vceqq_u8(chunk.val[0], dup);1017const uint8x16_t cmp1 = vceqq_u8(chunk.val[1], dup);1018const uint8x16_t cmp2 = vceqq_u8(chunk.val[2], dup);1019const uint8x16_t cmp3 = vceqq_u8(chunk.val[3], dup);10201021const uint8x16_t t0 = vsriq_n_u8(cmp1, cmp0, 1);1022const uint8x16_t t1 = vsriq_n_u8(cmp3, cmp2, 1);1023const uint8x16_t t2 = vsriq_n_u8(t1, t0, 2);1024const uint8x16_t t3 = vsriq_n_u8(t2, t2, 4);1025const uint8x8_t t4 = vshrn_n_u16(vreinterpretq_u16_u8(t3), 4);1026const U64 matches = vget_lane_u64(vreinterpret_u64_u8(t4), 0);1027return ZSTD_rotateRight_U64(matches, headGrouped);1028}1029}1030#endif10311032/* Returns a ZSTD_VecMask (U64) that has the nth group (determined by1033* ZSTD_row_matchMaskGroupWidth) of bits set to 1 if the newly-computed "tag"1034* matches the hash at the nth position in a row of the tagTable.1035* Each row is a circular buffer beginning at the value of "headGrouped". So we1036* must rotate the "matches" bitfield to match up with the actual layout of the1037* entries within the hashTable */1038FORCE_INLINE_TEMPLATE ZSTD_VecMask1039ZSTD_row_getMatchMask(const BYTE* const tagRow, const BYTE tag, const U32 headGrouped, const U32 rowEntries)1040{1041const BYTE* const src = tagRow;1042assert((rowEntries == 16) || (rowEntries == 32) || rowEntries == 64);1043assert(rowEntries <= ZSTD_ROW_HASH_MAX_ENTRIES);1044assert(ZSTD_row_matchMaskGroupWidth(rowEntries) * rowEntries <= sizeof(ZSTD_VecMask) * 8);10451046#if defined(ZSTD_ARCH_X86_SSE2)10471048return ZSTD_row_getSSEMask(rowEntries / 16, src, tag, headGrouped);10491050#else /* SW or NEON-LE */10511052# if defined(ZSTD_ARCH_ARM_NEON)1053/* This NEON path only works for little endian - otherwise use SWAR below */1054if (MEM_isLittleEndian()) {1055return ZSTD_row_getNEONMask(rowEntries, src, tag, headGrouped);1056}1057# endif /* ZSTD_ARCH_ARM_NEON */1058/* SWAR */1059{ const int chunkSize = sizeof(size_t);1060const size_t shiftAmount = ((chunkSize * 8) - chunkSize);1061const size_t xFF = ~((size_t)0);1062const size_t x01 = xFF / 0xFF;1063const size_t x80 = x01 << 7;1064const size_t splatChar = tag * x01;1065ZSTD_VecMask matches = 0;1066int i = rowEntries - chunkSize;1067assert((sizeof(size_t) == 4) || (sizeof(size_t) == 8));1068if (MEM_isLittleEndian()) { /* runtime check so have two loops */1069const size_t extractMagic = (xFF / 0x7F) >> chunkSize;1070do {1071size_t chunk = MEM_readST(&src[i]);1072chunk ^= splatChar;1073chunk = (((chunk | x80) - x01) | chunk) & x80;1074matches <<= chunkSize;1075matches |= (chunk * extractMagic) >> shiftAmount;1076i -= chunkSize;1077} while (i >= 0);1078} else { /* big endian: reverse bits during extraction */1079const size_t msb = xFF ^ (xFF >> 1);1080const size_t extractMagic = (msb / 0x1FF) | msb;1081do {1082size_t chunk = MEM_readST(&src[i]);1083chunk ^= splatChar;1084chunk = (((chunk | x80) - x01) | chunk) & x80;1085matches <<= chunkSize;1086matches |= ((chunk >> 7) * extractMagic) >> shiftAmount;1087i -= chunkSize;1088} while (i >= 0);1089}1090matches = ~matches;1091if (rowEntries == 16) {1092return ZSTD_rotateRight_U16((U16)matches, headGrouped);1093} else if (rowEntries == 32) {1094return ZSTD_rotateRight_U32((U32)matches, headGrouped);1095} else {1096return ZSTD_rotateRight_U64((U64)matches, headGrouped);1097}1098}1099#endif1100}11011102/* The high-level approach of the SIMD row based match finder is as follows:1103* - Figure out where to insert the new entry:1104* - Generate a hash from a byte along with an additional 1-byte "short hash". The additional byte is our "tag"1105* - The hashTable is effectively split into groups or "rows" of 16 or 32 entries of U32, and the hash determines1106* which row to insert into.1107* - Determine the correct position within the row to insert the entry into. Each row of 16 or 32 can1108* be considered as a circular buffer with a "head" index that resides in the tagTable.1109* - Also insert the "tag" into the equivalent row and position in the tagTable.1110* - Note: The tagTable has 17 or 33 1-byte entries per row, due to 16 or 32 tags, and 1 "head" entry.1111* The 17 or 33 entry rows are spaced out to occur every 32 or 64 bytes, respectively,1112* for alignment/performance reasons, leaving some bytes unused.1113* - Use SIMD to efficiently compare the tags in the tagTable to the 1-byte "short hash" and1114* generate a bitfield that we can cycle through to check the collisions in the hash table.1115* - Pick the longest match.1116*/1117FORCE_INLINE_TEMPLATE1118size_t ZSTD_RowFindBestMatch(1119ZSTD_matchState_t* ms,1120const BYTE* const ip, const BYTE* const iLimit,1121size_t* offsetPtr,1122const U32 mls, const ZSTD_dictMode_e dictMode,1123const U32 rowLog)1124{1125U32* const hashTable = ms->hashTable;1126BYTE* const tagTable = ms->tagTable;1127U32* const hashCache = ms->hashCache;1128const U32 hashLog = ms->rowHashLog;1129const ZSTD_compressionParameters* const cParams = &ms->cParams;1130const BYTE* const base = ms->window.base;1131const BYTE* const dictBase = ms->window.dictBase;1132const U32 dictLimit = ms->window.dictLimit;1133const BYTE* const prefixStart = base + dictLimit;1134const BYTE* const dictEnd = dictBase + dictLimit;1135const U32 curr = (U32)(ip-base);1136const U32 maxDistance = 1U << cParams->windowLog;1137const U32 lowestValid = ms->window.lowLimit;1138const U32 withinMaxDistance = (curr - lowestValid > maxDistance) ? curr - maxDistance : lowestValid;1139const U32 isDictionary = (ms->loadedDictEnd != 0);1140const U32 lowLimit = isDictionary ? lowestValid : withinMaxDistance;1141const U32 rowEntries = (1U << rowLog);1142const U32 rowMask = rowEntries - 1;1143const U32 cappedSearchLog = MIN(cParams->searchLog, rowLog); /* nb of searches is capped at nb entries per row */1144const U32 groupWidth = ZSTD_row_matchMaskGroupWidth(rowEntries);1145const U64 hashSalt = ms->hashSalt;1146U32 nbAttempts = 1U << cappedSearchLog;1147size_t ml=4-1;1148U32 hash;11491150/* DMS/DDS variables that may be referenced laster */1151const ZSTD_matchState_t* const dms = ms->dictMatchState;11521153/* Initialize the following variables to satisfy static analyzer */1154size_t ddsIdx = 0;1155U32 ddsExtraAttempts = 0; /* cctx hash tables are limited in searches, but allow extra searches into DDS */1156U32 dmsTag = 0;1157U32* dmsRow = NULL;1158BYTE* dmsTagRow = NULL;11591160if (dictMode == ZSTD_dedicatedDictSearch) {1161const U32 ddsHashLog = dms->cParams.hashLog - ZSTD_LAZY_DDSS_BUCKET_LOG;1162{ /* Prefetch DDS hashtable entry */1163ddsIdx = ZSTD_hashPtr(ip, ddsHashLog, mls) << ZSTD_LAZY_DDSS_BUCKET_LOG;1164PREFETCH_L1(&dms->hashTable[ddsIdx]);1165}1166ddsExtraAttempts = cParams->searchLog > rowLog ? 1U << (cParams->searchLog - rowLog) : 0;1167}11681169if (dictMode == ZSTD_dictMatchState) {1170/* Prefetch DMS rows */1171U32* const dmsHashTable = dms->hashTable;1172BYTE* const dmsTagTable = dms->tagTable;1173U32 const dmsHash = (U32)ZSTD_hashPtr(ip, dms->rowHashLog + ZSTD_ROW_HASH_TAG_BITS, mls);1174U32 const dmsRelRow = (dmsHash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;1175dmsTag = dmsHash & ZSTD_ROW_HASH_TAG_MASK;1176dmsTagRow = (BYTE*)(dmsTagTable + dmsRelRow);1177dmsRow = dmsHashTable + dmsRelRow;1178ZSTD_row_prefetch(dmsHashTable, dmsTagTable, dmsRelRow, rowLog);1179}11801181/* Update the hashTable and tagTable up to (but not including) ip */1182if (!ms->lazySkipping) {1183ZSTD_row_update_internal(ms, ip, mls, rowLog, rowMask, 1 /* useCache */);1184hash = ZSTD_row_nextCachedHash(hashCache, hashTable, tagTable, base, curr, hashLog, rowLog, mls, hashSalt);1185} else {1186/* Stop inserting every position when in the lazy skipping mode.1187* The hash cache is also not kept up to date in this mode.1188*/1189hash = (U32)ZSTD_hashPtrSalted(ip, hashLog + ZSTD_ROW_HASH_TAG_BITS, mls, hashSalt);1190ms->nextToUpdate = curr;1191}1192ms->hashSaltEntropy += hash; /* collect salt entropy */11931194{ /* Get the hash for ip, compute the appropriate row */1195U32 const relRow = (hash >> ZSTD_ROW_HASH_TAG_BITS) << rowLog;1196U32 const tag = hash & ZSTD_ROW_HASH_TAG_MASK;1197U32* const row = hashTable + relRow;1198BYTE* tagRow = (BYTE*)(tagTable + relRow);1199U32 const headGrouped = (*tagRow & rowMask) * groupWidth;1200U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];1201size_t numMatches = 0;1202size_t currMatch = 0;1203ZSTD_VecMask matches = ZSTD_row_getMatchMask(tagRow, (BYTE)tag, headGrouped, rowEntries);12041205/* Cycle through the matches and prefetch */1206for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {1207U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;1208U32 const matchIndex = row[matchPos];1209if(matchPos == 0) continue;1210assert(numMatches < rowEntries);1211if (matchIndex < lowLimit)1212break;1213if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {1214PREFETCH_L1(base + matchIndex);1215} else {1216PREFETCH_L1(dictBase + matchIndex);1217}1218matchBuffer[numMatches++] = matchIndex;1219--nbAttempts;1220}12211222/* Speed opt: insert current byte into hashtable too. This allows us to avoid one iteration of the loop1223in ZSTD_row_update_internal() at the next search. */1224{1225U32 const pos = ZSTD_row_nextIndex(tagRow, rowMask);1226tagRow[pos] = (BYTE)tag;1227row[pos] = ms->nextToUpdate++;1228}12291230/* Return the longest match */1231for (; currMatch < numMatches; ++currMatch) {1232U32 const matchIndex = matchBuffer[currMatch];1233size_t currentMl=0;1234assert(matchIndex < curr);1235assert(matchIndex >= lowLimit);12361237if ((dictMode != ZSTD_extDict) || matchIndex >= dictLimit) {1238const BYTE* const match = base + matchIndex;1239assert(matchIndex >= dictLimit); /* ensures this is true if dictMode != ZSTD_extDict */1240/* read 4B starting from (match + ml + 1 - sizeof(U32)) */1241if (MEM_read32(match + ml - 3) == MEM_read32(ip + ml - 3)) /* potentially better */1242currentMl = ZSTD_count(ip, match, iLimit);1243} else {1244const BYTE* const match = dictBase + matchIndex;1245assert(match+4 <= dictEnd);1246if (MEM_read32(match) == MEM_read32(ip)) /* assumption : matchIndex <= dictLimit-4 (by table construction) */1247currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dictEnd, prefixStart) + 4;1248}12491250/* Save best solution */1251if (currentMl > ml) {1252ml = currentMl;1253*offsetPtr = OFFSET_TO_OFFBASE(curr - matchIndex);1254if (ip+currentMl == iLimit) break; /* best possible, avoids read overflow on next attempt */1255}1256}1257}12581259assert(nbAttempts <= (1U << ZSTD_SEARCHLOG_MAX)); /* Check we haven't underflowed. */1260if (dictMode == ZSTD_dedicatedDictSearch) {1261ml = ZSTD_dedicatedDictSearch_lazy_search(offsetPtr, ml, nbAttempts + ddsExtraAttempts, dms,1262ip, iLimit, prefixStart, curr, dictLimit, ddsIdx);1263} else if (dictMode == ZSTD_dictMatchState) {1264/* TODO: Measure and potentially add prefetching to DMS */1265const U32 dmsLowestIndex = dms->window.dictLimit;1266const BYTE* const dmsBase = dms->window.base;1267const BYTE* const dmsEnd = dms->window.nextSrc;1268const U32 dmsSize = (U32)(dmsEnd - dmsBase);1269const U32 dmsIndexDelta = dictLimit - dmsSize;12701271{ U32 const headGrouped = (*dmsTagRow & rowMask) * groupWidth;1272U32 matchBuffer[ZSTD_ROW_HASH_MAX_ENTRIES];1273size_t numMatches = 0;1274size_t currMatch = 0;1275ZSTD_VecMask matches = ZSTD_row_getMatchMask(dmsTagRow, (BYTE)dmsTag, headGrouped, rowEntries);12761277for (; (matches > 0) && (nbAttempts > 0); matches &= (matches - 1)) {1278U32 const matchPos = ((headGrouped + ZSTD_VecMask_next(matches)) / groupWidth) & rowMask;1279U32 const matchIndex = dmsRow[matchPos];1280if(matchPos == 0) continue;1281if (matchIndex < dmsLowestIndex)1282break;1283PREFETCH_L1(dmsBase + matchIndex);1284matchBuffer[numMatches++] = matchIndex;1285--nbAttempts;1286}12871288/* Return the longest match */1289for (; currMatch < numMatches; ++currMatch) {1290U32 const matchIndex = matchBuffer[currMatch];1291size_t currentMl=0;1292assert(matchIndex >= dmsLowestIndex);1293assert(matchIndex < curr);12941295{ const BYTE* const match = dmsBase + matchIndex;1296assert(match+4 <= dmsEnd);1297if (MEM_read32(match) == MEM_read32(ip))1298currentMl = ZSTD_count_2segments(ip+4, match+4, iLimit, dmsEnd, prefixStart) + 4;1299}13001301if (currentMl > ml) {1302ml = currentMl;1303assert(curr > matchIndex + dmsIndexDelta);1304*offsetPtr = OFFSET_TO_OFFBASE(curr - (matchIndex + dmsIndexDelta));1305if (ip+currentMl == iLimit) break;1306}1307}1308}1309}1310return ml;1311}131213131314/**1315* Generate search functions templated on (dictMode, mls, rowLog).1316* These functions are outlined for code size & compilation time.1317* ZSTD_searchMax() dispatches to the correct implementation function.1318*1319* TODO: The start of the search function involves loading and calculating a1320* bunch of constants from the ZSTD_matchState_t. These computations could be1321* done in an initialization function, and saved somewhere in the match state.1322* Then we could pass a pointer to the saved state instead of the match state,1323* and avoid duplicate computations.1324*1325* TODO: Move the match re-winding into searchMax. This improves compression1326* ratio, and unlocks further simplifications with the next TODO.1327*1328* TODO: Try moving the repcode search into searchMax. After the re-winding1329* and repcode search are in searchMax, there is no more logic in the match1330* finder loop that requires knowledge about the dictMode. So we should be1331* able to avoid force inlining it, and we can join the extDict loop with1332* the single segment loop. It should go in searchMax instead of its own1333* function to avoid having multiple virtual function calls per search.1334*/13351336#define ZSTD_BT_SEARCH_FN(dictMode, mls) ZSTD_BtFindBestMatch_##dictMode##_##mls1337#define ZSTD_HC_SEARCH_FN(dictMode, mls) ZSTD_HcFindBestMatch_##dictMode##_##mls1338#define ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) ZSTD_RowFindBestMatch_##dictMode##_##mls##_##rowLog13391340#define ZSTD_SEARCH_FN_ATTRS FORCE_NOINLINE13411342#define GEN_ZSTD_BT_SEARCH_FN(dictMode, mls) \1343ZSTD_SEARCH_FN_ATTRS size_t ZSTD_BT_SEARCH_FN(dictMode, mls)( \1344ZSTD_matchState_t* ms, \1345const BYTE* ip, const BYTE* const iLimit, \1346size_t* offBasePtr) \1347{ \1348assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \1349return ZSTD_BtFindBestMatch(ms, ip, iLimit, offBasePtr, mls, ZSTD_##dictMode); \1350} \13511352#define GEN_ZSTD_HC_SEARCH_FN(dictMode, mls) \1353ZSTD_SEARCH_FN_ATTRS size_t ZSTD_HC_SEARCH_FN(dictMode, mls)( \1354ZSTD_matchState_t* ms, \1355const BYTE* ip, const BYTE* const iLimit, \1356size_t* offsetPtr) \1357{ \1358assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \1359return ZSTD_HcFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode); \1360} \13611362#define GEN_ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog) \1363ZSTD_SEARCH_FN_ATTRS size_t ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)( \1364ZSTD_matchState_t* ms, \1365const BYTE* ip, const BYTE* const iLimit, \1366size_t* offsetPtr) \1367{ \1368assert(MAX(4, MIN(6, ms->cParams.minMatch)) == mls); \1369assert(MAX(4, MIN(6, ms->cParams.searchLog)) == rowLog); \1370return ZSTD_RowFindBestMatch(ms, ip, iLimit, offsetPtr, mls, ZSTD_##dictMode, rowLog); \1371} \13721373#define ZSTD_FOR_EACH_ROWLOG(X, dictMode, mls) \1374X(dictMode, mls, 4) \1375X(dictMode, mls, 5) \1376X(dictMode, mls, 6)13771378#define ZSTD_FOR_EACH_MLS_ROWLOG(X, dictMode) \1379ZSTD_FOR_EACH_ROWLOG(X, dictMode, 4) \1380ZSTD_FOR_EACH_ROWLOG(X, dictMode, 5) \1381ZSTD_FOR_EACH_ROWLOG(X, dictMode, 6)13821383#define ZSTD_FOR_EACH_MLS(X, dictMode) \1384X(dictMode, 4) \1385X(dictMode, 5) \1386X(dictMode, 6)13871388#define ZSTD_FOR_EACH_DICT_MODE(X, ...) \1389X(__VA_ARGS__, noDict) \1390X(__VA_ARGS__, extDict) \1391X(__VA_ARGS__, dictMatchState) \1392X(__VA_ARGS__, dedicatedDictSearch)13931394/* Generate row search fns for each combination of (dictMode, mls, rowLog) */1395ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS_ROWLOG, GEN_ZSTD_ROW_SEARCH_FN)1396/* Generate binary Tree search fns for each combination of (dictMode, mls) */1397ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_BT_SEARCH_FN)1398/* Generate hash chain search fns for each combination of (dictMode, mls) */1399ZSTD_FOR_EACH_DICT_MODE(ZSTD_FOR_EACH_MLS, GEN_ZSTD_HC_SEARCH_FN)14001401typedef enum { search_hashChain=0, search_binaryTree=1, search_rowHash=2 } searchMethod_e;14021403#define GEN_ZSTD_CALL_BT_SEARCH_FN(dictMode, mls) \1404case mls: \1405return ZSTD_BT_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);1406#define GEN_ZSTD_CALL_HC_SEARCH_FN(dictMode, mls) \1407case mls: \1408return ZSTD_HC_SEARCH_FN(dictMode, mls)(ms, ip, iend, offsetPtr);1409#define GEN_ZSTD_CALL_ROW_SEARCH_FN(dictMode, mls, rowLog) \1410case rowLog: \1411return ZSTD_ROW_SEARCH_FN(dictMode, mls, rowLog)(ms, ip, iend, offsetPtr);14121413#define ZSTD_SWITCH_MLS(X, dictMode) \1414switch (mls) { \1415ZSTD_FOR_EACH_MLS(X, dictMode) \1416}14171418#define ZSTD_SWITCH_ROWLOG(dictMode, mls) \1419case mls: \1420switch (rowLog) { \1421ZSTD_FOR_EACH_ROWLOG(GEN_ZSTD_CALL_ROW_SEARCH_FN, dictMode, mls) \1422} \1423ZSTD_UNREACHABLE; \1424break;14251426#define ZSTD_SWITCH_SEARCH_METHOD(dictMode) \1427switch (searchMethod) { \1428case search_hashChain: \1429ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_HC_SEARCH_FN, dictMode) \1430break; \1431case search_binaryTree: \1432ZSTD_SWITCH_MLS(GEN_ZSTD_CALL_BT_SEARCH_FN, dictMode) \1433break; \1434case search_rowHash: \1435ZSTD_SWITCH_MLS(ZSTD_SWITCH_ROWLOG, dictMode) \1436break; \1437} \1438ZSTD_UNREACHABLE;14391440/**1441* Searches for the longest match at @p ip.1442* Dispatches to the correct implementation function based on the1443* (searchMethod, dictMode, mls, rowLog). We use switch statements1444* here instead of using an indirect function call through a function1445* pointer because after Spectre and Meltdown mitigations, indirect1446* function calls can be very costly, especially in the kernel.1447*1448* NOTE: dictMode and searchMethod should be templated, so those switch1449* statements should be optimized out. Only the mls & rowLog switches1450* should be left.1451*1452* @param ms The match state.1453* @param ip The position to search at.1454* @param iend The end of the input data.1455* @param[out] offsetPtr Stores the match offset into this pointer.1456* @param mls The minimum search length, in the range [4, 6].1457* @param rowLog The row log (if applicable), in the range [4, 6].1458* @param searchMethod The search method to use (templated).1459* @param dictMode The dictMode (templated).1460*1461* @returns The length of the longest match found, or < mls if no match is found.1462* If a match is found its offset is stored in @p offsetPtr.1463*/1464FORCE_INLINE_TEMPLATE size_t ZSTD_searchMax(1465ZSTD_matchState_t* ms,1466const BYTE* ip,1467const BYTE* iend,1468size_t* offsetPtr,1469U32 const mls,1470U32 const rowLog,1471searchMethod_e const searchMethod,1472ZSTD_dictMode_e const dictMode)1473{1474if (dictMode == ZSTD_noDict) {1475ZSTD_SWITCH_SEARCH_METHOD(noDict)1476} else if (dictMode == ZSTD_extDict) {1477ZSTD_SWITCH_SEARCH_METHOD(extDict)1478} else if (dictMode == ZSTD_dictMatchState) {1479ZSTD_SWITCH_SEARCH_METHOD(dictMatchState)1480} else if (dictMode == ZSTD_dedicatedDictSearch) {1481ZSTD_SWITCH_SEARCH_METHOD(dedicatedDictSearch)1482}1483ZSTD_UNREACHABLE;1484return 0;1485}14861487/* *******************************1488* Common parser - lazy strategy1489*********************************/14901491FORCE_INLINE_TEMPLATE size_t1492ZSTD_compressBlock_lazy_generic(1493ZSTD_matchState_t* ms, seqStore_t* seqStore,1494U32 rep[ZSTD_REP_NUM],1495const void* src, size_t srcSize,1496const searchMethod_e searchMethod, const U32 depth,1497ZSTD_dictMode_e const dictMode)1498{1499const BYTE* const istart = (const BYTE*)src;1500const BYTE* ip = istart;1501const BYTE* anchor = istart;1502const BYTE* const iend = istart + srcSize;1503const BYTE* const ilimit = (searchMethod == search_rowHash) ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;1504const BYTE* const base = ms->window.base;1505const U32 prefixLowestIndex = ms->window.dictLimit;1506const BYTE* const prefixLowest = base + prefixLowestIndex;1507const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);1508const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);15091510U32 offset_1 = rep[0], offset_2 = rep[1];1511U32 offsetSaved1 = 0, offsetSaved2 = 0;15121513const int isDMS = dictMode == ZSTD_dictMatchState;1514const int isDDS = dictMode == ZSTD_dedicatedDictSearch;1515const int isDxS = isDMS || isDDS;1516const ZSTD_matchState_t* const dms = ms->dictMatchState;1517const U32 dictLowestIndex = isDxS ? dms->window.dictLimit : 0;1518const BYTE* const dictBase = isDxS ? dms->window.base : NULL;1519const BYTE* const dictLowest = isDxS ? dictBase + dictLowestIndex : NULL;1520const BYTE* const dictEnd = isDxS ? dms->window.nextSrc : NULL;1521const U32 dictIndexDelta = isDxS ?1522prefixLowestIndex - (U32)(dictEnd - dictBase) :15230;1524const U32 dictAndPrefixLength = (U32)((ip - prefixLowest) + (dictEnd - dictLowest));15251526DEBUGLOG(5, "ZSTD_compressBlock_lazy_generic (dictMode=%u) (searchFunc=%u)", (U32)dictMode, (U32)searchMethod);1527ip += (dictAndPrefixLength == 0);1528if (dictMode == ZSTD_noDict) {1529U32 const curr = (U32)(ip - base);1530U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, ms->cParams.windowLog);1531U32 const maxRep = curr - windowLow;1532if (offset_2 > maxRep) offsetSaved2 = offset_2, offset_2 = 0;1533if (offset_1 > maxRep) offsetSaved1 = offset_1, offset_1 = 0;1534}1535if (isDxS) {1536/* dictMatchState repCode checks don't currently handle repCode == 01537* disabling. */1538assert(offset_1 <= dictAndPrefixLength);1539assert(offset_2 <= dictAndPrefixLength);1540}15411542/* Reset the lazy skipping state */1543ms->lazySkipping = 0;15441545if (searchMethod == search_rowHash) {1546ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);1547}15481549/* Match Loop */1550#if defined(__GNUC__) && defined(__x86_64__)1551/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the1552* code alignment is perturbed. To fix the instability align the loop on 32-bytes.1553*/1554__asm__(".p2align 5");1555#endif1556while (ip < ilimit) {1557size_t matchLength=0;1558size_t offBase = REPCODE1_TO_OFFBASE;1559const BYTE* start=ip+1;1560DEBUGLOG(7, "search baseline (depth 0)");15611562/* check repCode */1563if (isDxS) {1564const U32 repIndex = (U32)(ip - base) + 1 - offset_1;1565const BYTE* repMatch = ((dictMode == ZSTD_dictMatchState || dictMode == ZSTD_dedicatedDictSearch)1566&& repIndex < prefixLowestIndex) ?1567dictBase + (repIndex - dictIndexDelta) :1568base + repIndex;1569if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)1570&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {1571const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;1572matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;1573if (depth==0) goto _storeSequence;1574}1575}1576if ( dictMode == ZSTD_noDict1577&& ((offset_1 > 0) & (MEM_read32(ip+1-offset_1) == MEM_read32(ip+1)))) {1578matchLength = ZSTD_count(ip+1+4, ip+1+4-offset_1, iend) + 4;1579if (depth==0) goto _storeSequence;1580}15811582/* first search (depth 0) */1583{ size_t offbaseFound = 999999999;1584size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &offbaseFound, mls, rowLog, searchMethod, dictMode);1585if (ml2 > matchLength)1586matchLength = ml2, start = ip, offBase = offbaseFound;1587}15881589if (matchLength < 4) {1590size_t const step = ((size_t)(ip-anchor) >> kSearchStrength) + 1; /* jump faster over incompressible sections */;1591ip += step;1592/* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.1593* In this mode we stop inserting every position into our tables, and only insert1594* positions that we search, which is one in step positions.1595* The exact cutoff is flexible, I've just chosen a number that is reasonably high,1596* so we minimize the compression ratio loss in "normal" scenarios. This mode gets1597* triggered once we've gone 2KB without finding any matches.1598*/1599ms->lazySkipping = step > kLazySkippingStep;1600continue;1601}16021603/* let's try to find a better solution */1604if (depth>=1)1605while (ip<ilimit) {1606DEBUGLOG(7, "search depth 1");1607ip ++;1608if ( (dictMode == ZSTD_noDict)1609&& (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {1610size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;1611int const gain2 = (int)(mlRep * 3);1612int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);1613if ((mlRep >= 4) && (gain2 > gain1))1614matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;1615}1616if (isDxS) {1617const U32 repIndex = (U32)(ip - base) - offset_1;1618const BYTE* repMatch = repIndex < prefixLowestIndex ?1619dictBase + (repIndex - dictIndexDelta) :1620base + repIndex;1621if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)1622&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {1623const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;1624size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;1625int const gain2 = (int)(mlRep * 3);1626int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);1627if ((mlRep >= 4) && (gain2 > gain1))1628matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;1629}1630}1631{ size_t ofbCandidate=999999999;1632size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);1633int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */1634int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);1635if ((ml2 >= 4) && (gain2 > gain1)) {1636matchLength = ml2, offBase = ofbCandidate, start = ip;1637continue; /* search a better one */1638} }16391640/* let's find an even better one */1641if ((depth==2) && (ip<ilimit)) {1642DEBUGLOG(7, "search depth 2");1643ip ++;1644if ( (dictMode == ZSTD_noDict)1645&& (offBase) && ((offset_1>0) & (MEM_read32(ip) == MEM_read32(ip - offset_1)))) {1646size_t const mlRep = ZSTD_count(ip+4, ip+4-offset_1, iend) + 4;1647int const gain2 = (int)(mlRep * 4);1648int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);1649if ((mlRep >= 4) && (gain2 > gain1))1650matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;1651}1652if (isDxS) {1653const U32 repIndex = (U32)(ip - base) - offset_1;1654const BYTE* repMatch = repIndex < prefixLowestIndex ?1655dictBase + (repIndex - dictIndexDelta) :1656base + repIndex;1657if (((U32)((prefixLowestIndex-1) - repIndex) >= 3 /* intentional underflow */)1658&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {1659const BYTE* repMatchEnd = repIndex < prefixLowestIndex ? dictEnd : iend;1660size_t const mlRep = ZSTD_count_2segments(ip+4, repMatch+4, iend, repMatchEnd, prefixLowest) + 4;1661int const gain2 = (int)(mlRep * 4);1662int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);1663if ((mlRep >= 4) && (gain2 > gain1))1664matchLength = mlRep, offBase = REPCODE1_TO_OFFBASE, start = ip;1665}1666}1667{ size_t ofbCandidate=999999999;1668size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, dictMode);1669int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */1670int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);1671if ((ml2 >= 4) && (gain2 > gain1)) {1672matchLength = ml2, offBase = ofbCandidate, start = ip;1673continue;1674} } }1675break; /* nothing found : store previous solution */1676}16771678/* NOTE:1679* Pay attention that `start[-value]` can lead to strange undefined behavior1680* notably if `value` is unsigned, resulting in a large positive `-value`.1681*/1682/* catch up */1683if (OFFBASE_IS_OFFSET(offBase)) {1684if (dictMode == ZSTD_noDict) {1685while ( ((start > anchor) & (start - OFFBASE_TO_OFFSET(offBase) > prefixLowest))1686&& (start[-1] == (start-OFFBASE_TO_OFFSET(offBase))[-1]) ) /* only search for offset within prefix */1687{ start--; matchLength++; }1688}1689if (isDxS) {1690U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));1691const BYTE* match = (matchIndex < prefixLowestIndex) ? dictBase + matchIndex - dictIndexDelta : base + matchIndex;1692const BYTE* const mStart = (matchIndex < prefixLowestIndex) ? dictLowest : prefixLowest;1693while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */1694}1695offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);1696}1697/* store sequence */1698_storeSequence:1699{ size_t const litLength = (size_t)(start - anchor);1700ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);1701anchor = ip = start + matchLength;1702}1703if (ms->lazySkipping) {1704/* We've found a match, disable lazy skipping mode, and refill the hash cache. */1705if (searchMethod == search_rowHash) {1706ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);1707}1708ms->lazySkipping = 0;1709}17101711/* check immediate repcode */1712if (isDxS) {1713while (ip <= ilimit) {1714U32 const current2 = (U32)(ip-base);1715U32 const repIndex = current2 - offset_2;1716const BYTE* repMatch = repIndex < prefixLowestIndex ?1717dictBase - dictIndexDelta + repIndex :1718base + repIndex;1719if ( ((U32)((prefixLowestIndex-1) - (U32)repIndex) >= 3 /* intentional overflow */)1720&& (MEM_read32(repMatch) == MEM_read32(ip)) ) {1721const BYTE* const repEnd2 = repIndex < prefixLowestIndex ? dictEnd : iend;1722matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd2, prefixLowest) + 4;1723offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset_2 <=> offset_1 */1724ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);1725ip += matchLength;1726anchor = ip;1727continue;1728}1729break;1730}1731}17321733if (dictMode == ZSTD_noDict) {1734while ( ((ip <= ilimit) & (offset_2>0))1735&& (MEM_read32(ip) == MEM_read32(ip - offset_2)) ) {1736/* store sequence */1737matchLength = ZSTD_count(ip+4, ip+4-offset_2, iend) + 4;1738offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap repcodes */1739ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);1740ip += matchLength;1741anchor = ip;1742continue; /* faster when present ... (?) */1743} } }17441745/* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),1746* rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */1747offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;17481749/* save reps for next block */1750rep[0] = offset_1 ? offset_1 : offsetSaved1;1751rep[1] = offset_2 ? offset_2 : offsetSaved2;17521753/* Return the last literals size */1754return (size_t)(iend - anchor);1755}175617571758size_t ZSTD_compressBlock_btlazy2(1759ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1760void const* src, size_t srcSize)1761{1762return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_noDict);1763}17641765size_t ZSTD_compressBlock_lazy2(1766ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1767void const* src, size_t srcSize)1768{1769return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_noDict);1770}17711772size_t ZSTD_compressBlock_lazy(1773ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1774void const* src, size_t srcSize)1775{1776return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_noDict);1777}17781779size_t ZSTD_compressBlock_greedy(1780ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1781void const* src, size_t srcSize)1782{1783return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_noDict);1784}17851786size_t ZSTD_compressBlock_btlazy2_dictMatchState(1787ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1788void const* src, size_t srcSize)1789{1790return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2, ZSTD_dictMatchState);1791}17921793size_t ZSTD_compressBlock_lazy2_dictMatchState(1794ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1795void const* src, size_t srcSize)1796{1797return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dictMatchState);1798}17991800size_t ZSTD_compressBlock_lazy_dictMatchState(1801ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1802void const* src, size_t srcSize)1803{1804return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dictMatchState);1805}18061807size_t ZSTD_compressBlock_greedy_dictMatchState(1808ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1809void const* src, size_t srcSize)1810{1811return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dictMatchState);1812}181318141815size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch(1816ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1817void const* src, size_t srcSize)1818{1819return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2, ZSTD_dedicatedDictSearch);1820}18211822size_t ZSTD_compressBlock_lazy_dedicatedDictSearch(1823ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1824void const* src, size_t srcSize)1825{1826return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1, ZSTD_dedicatedDictSearch);1827}18281829size_t ZSTD_compressBlock_greedy_dedicatedDictSearch(1830ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1831void const* src, size_t srcSize)1832{1833return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0, ZSTD_dedicatedDictSearch);1834}18351836/* Row-based matchfinder */1837size_t ZSTD_compressBlock_lazy2_row(1838ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1839void const* src, size_t srcSize)1840{1841return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_noDict);1842}18431844size_t ZSTD_compressBlock_lazy_row(1845ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1846void const* src, size_t srcSize)1847{1848return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_noDict);1849}18501851size_t ZSTD_compressBlock_greedy_row(1852ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1853void const* src, size_t srcSize)1854{1855return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_noDict);1856}18571858size_t ZSTD_compressBlock_lazy2_dictMatchState_row(1859ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1860void const* src, size_t srcSize)1861{1862return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dictMatchState);1863}18641865size_t ZSTD_compressBlock_lazy_dictMatchState_row(1866ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1867void const* src, size_t srcSize)1868{1869return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dictMatchState);1870}18711872size_t ZSTD_compressBlock_greedy_dictMatchState_row(1873ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1874void const* src, size_t srcSize)1875{1876return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dictMatchState);1877}187818791880size_t ZSTD_compressBlock_lazy2_dedicatedDictSearch_row(1881ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1882void const* src, size_t srcSize)1883{1884return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2, ZSTD_dedicatedDictSearch);1885}18861887size_t ZSTD_compressBlock_lazy_dedicatedDictSearch_row(1888ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1889void const* src, size_t srcSize)1890{1891return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1, ZSTD_dedicatedDictSearch);1892}18931894size_t ZSTD_compressBlock_greedy_dedicatedDictSearch_row(1895ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],1896void const* src, size_t srcSize)1897{1898return ZSTD_compressBlock_lazy_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0, ZSTD_dedicatedDictSearch);1899}19001901FORCE_INLINE_TEMPLATE1902size_t ZSTD_compressBlock_lazy_extDict_generic(1903ZSTD_matchState_t* ms, seqStore_t* seqStore,1904U32 rep[ZSTD_REP_NUM],1905const void* src, size_t srcSize,1906const searchMethod_e searchMethod, const U32 depth)1907{1908const BYTE* const istart = (const BYTE*)src;1909const BYTE* ip = istart;1910const BYTE* anchor = istart;1911const BYTE* const iend = istart + srcSize;1912const BYTE* const ilimit = searchMethod == search_rowHash ? iend - 8 - ZSTD_ROW_HASH_CACHE_SIZE : iend - 8;1913const BYTE* const base = ms->window.base;1914const U32 dictLimit = ms->window.dictLimit;1915const BYTE* const prefixStart = base + dictLimit;1916const BYTE* const dictBase = ms->window.dictBase;1917const BYTE* const dictEnd = dictBase + dictLimit;1918const BYTE* const dictStart = dictBase + ms->window.lowLimit;1919const U32 windowLog = ms->cParams.windowLog;1920const U32 mls = BOUNDED(4, ms->cParams.minMatch, 6);1921const U32 rowLog = BOUNDED(4, ms->cParams.searchLog, 6);19221923U32 offset_1 = rep[0], offset_2 = rep[1];19241925DEBUGLOG(5, "ZSTD_compressBlock_lazy_extDict_generic (searchFunc=%u)", (U32)searchMethod);19261927/* Reset the lazy skipping state */1928ms->lazySkipping = 0;19291930/* init */1931ip += (ip == prefixStart);1932if (searchMethod == search_rowHash) {1933ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);1934}19351936/* Match Loop */1937#if defined(__GNUC__) && defined(__x86_64__)1938/* I've measured random a 5% speed loss on levels 5 & 6 (greedy) when the1939* code alignment is perturbed. To fix the instability align the loop on 32-bytes.1940*/1941__asm__(".p2align 5");1942#endif1943while (ip < ilimit) {1944size_t matchLength=0;1945size_t offBase = REPCODE1_TO_OFFBASE;1946const BYTE* start=ip+1;1947U32 curr = (U32)(ip-base);19481949/* check repCode */1950{ const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr+1, windowLog);1951const U32 repIndex = (U32)(curr+1 - offset_1);1952const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;1953const BYTE* const repMatch = repBase + repIndex;1954if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow */1955& (offset_1 <= curr+1 - windowLow) ) /* note: we are searching at curr+1 */1956if (MEM_read32(ip+1) == MEM_read32(repMatch)) {1957/* repcode detected we should take it */1958const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;1959matchLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repEnd, prefixStart) + 4;1960if (depth==0) goto _storeSequence;1961} }19621963/* first search (depth 0) */1964{ size_t ofbCandidate = 999999999;1965size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);1966if (ml2 > matchLength)1967matchLength = ml2, start = ip, offBase = ofbCandidate;1968}19691970if (matchLength < 4) {1971size_t const step = ((size_t)(ip-anchor) >> kSearchStrength);1972ip += step + 1; /* jump faster over incompressible sections */1973/* Enter the lazy skipping mode once we are skipping more than 8 bytes at a time.1974* In this mode we stop inserting every position into our tables, and only insert1975* positions that we search, which is one in step positions.1976* The exact cutoff is flexible, I've just chosen a number that is reasonably high,1977* so we minimize the compression ratio loss in "normal" scenarios. This mode gets1978* triggered once we've gone 2KB without finding any matches.1979*/1980ms->lazySkipping = step > kLazySkippingStep;1981continue;1982}19831984/* let's try to find a better solution */1985if (depth>=1)1986while (ip<ilimit) {1987ip ++;1988curr++;1989/* check repCode */1990if (offBase) {1991const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);1992const U32 repIndex = (U32)(curr - offset_1);1993const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;1994const BYTE* const repMatch = repBase + repIndex;1995if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */1996& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */1997if (MEM_read32(ip) == MEM_read32(repMatch)) {1998/* repcode detected */1999const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;2000size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;2001int const gain2 = (int)(repLength * 3);2002int const gain1 = (int)(matchLength*3 - ZSTD_highbit32((U32)offBase) + 1);2003if ((repLength >= 4) && (gain2 > gain1))2004matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;2005} }20062007/* search match, depth 1 */2008{ size_t ofbCandidate = 999999999;2009size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);2010int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */2011int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 4);2012if ((ml2 >= 4) && (gain2 > gain1)) {2013matchLength = ml2, offBase = ofbCandidate, start = ip;2014continue; /* search a better one */2015} }20162017/* let's find an even better one */2018if ((depth==2) && (ip<ilimit)) {2019ip ++;2020curr++;2021/* check repCode */2022if (offBase) {2023const U32 windowLow = ZSTD_getLowestMatchIndex(ms, curr, windowLog);2024const U32 repIndex = (U32)(curr - offset_1);2025const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;2026const BYTE* const repMatch = repBase + repIndex;2027if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */2028& (offset_1 <= curr - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */2029if (MEM_read32(ip) == MEM_read32(repMatch)) {2030/* repcode detected */2031const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;2032size_t const repLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;2033int const gain2 = (int)(repLength * 4);2034int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 1);2035if ((repLength >= 4) && (gain2 > gain1))2036matchLength = repLength, offBase = REPCODE1_TO_OFFBASE, start = ip;2037} }20382039/* search match, depth 2 */2040{ size_t ofbCandidate = 999999999;2041size_t const ml2 = ZSTD_searchMax(ms, ip, iend, &ofbCandidate, mls, rowLog, searchMethod, ZSTD_extDict);2042int const gain2 = (int)(ml2*4 - ZSTD_highbit32((U32)ofbCandidate)); /* raw approx */2043int const gain1 = (int)(matchLength*4 - ZSTD_highbit32((U32)offBase) + 7);2044if ((ml2 >= 4) && (gain2 > gain1)) {2045matchLength = ml2, offBase = ofbCandidate, start = ip;2046continue;2047} } }2048break; /* nothing found : store previous solution */2049}20502051/* catch up */2052if (OFFBASE_IS_OFFSET(offBase)) {2053U32 const matchIndex = (U32)((size_t)(start-base) - OFFBASE_TO_OFFSET(offBase));2054const BYTE* match = (matchIndex < dictLimit) ? dictBase + matchIndex : base + matchIndex;2055const BYTE* const mStart = (matchIndex < dictLimit) ? dictStart : prefixStart;2056while ((start>anchor) && (match>mStart) && (start[-1] == match[-1])) { start--; match--; matchLength++; } /* catch up */2057offset_2 = offset_1; offset_1 = (U32)OFFBASE_TO_OFFSET(offBase);2058}20592060/* store sequence */2061_storeSequence:2062{ size_t const litLength = (size_t)(start - anchor);2063ZSTD_storeSeq(seqStore, litLength, anchor, iend, (U32)offBase, matchLength);2064anchor = ip = start + matchLength;2065}2066if (ms->lazySkipping) {2067/* We've found a match, disable lazy skipping mode, and refill the hash cache. */2068if (searchMethod == search_rowHash) {2069ZSTD_row_fillHashCache(ms, base, rowLog, mls, ms->nextToUpdate, ilimit);2070}2071ms->lazySkipping = 0;2072}20732074/* check immediate repcode */2075while (ip <= ilimit) {2076const U32 repCurrent = (U32)(ip-base);2077const U32 windowLow = ZSTD_getLowestMatchIndex(ms, repCurrent, windowLog);2078const U32 repIndex = repCurrent - offset_2;2079const BYTE* const repBase = repIndex < dictLimit ? dictBase : base;2080const BYTE* const repMatch = repBase + repIndex;2081if ( ((U32)((dictLimit-1) - repIndex) >= 3) /* intentional overflow : do not test positions overlapping 2 memory segments */2082& (offset_2 <= repCurrent - windowLow) ) /* equivalent to `curr > repIndex >= windowLow` */2083if (MEM_read32(ip) == MEM_read32(repMatch)) {2084/* repcode detected we should take it */2085const BYTE* const repEnd = repIndex < dictLimit ? dictEnd : iend;2086matchLength = ZSTD_count_2segments(ip+4, repMatch+4, iend, repEnd, prefixStart) + 4;2087offBase = offset_2; offset_2 = offset_1; offset_1 = (U32)offBase; /* swap offset history */2088ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, matchLength);2089ip += matchLength;2090anchor = ip;2091continue; /* faster when present ... (?) */2092}2093break;2094} }20952096/* Save reps for next block */2097rep[0] = offset_1;2098rep[1] = offset_2;20992100/* Return the last literals size */2101return (size_t)(iend - anchor);2102}210321042105size_t ZSTD_compressBlock_greedy_extDict(2106ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],2107void const* src, size_t srcSize)2108{2109return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 0);2110}21112112size_t ZSTD_compressBlock_lazy_extDict(2113ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],2114void const* src, size_t srcSize)21152116{2117return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 1);2118}21192120size_t ZSTD_compressBlock_lazy2_extDict(2121ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],2122void const* src, size_t srcSize)21232124{2125return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_hashChain, 2);2126}21272128size_t ZSTD_compressBlock_btlazy2_extDict(2129ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],2130void const* src, size_t srcSize)21312132{2133return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_binaryTree, 2);2134}21352136size_t ZSTD_compressBlock_greedy_extDict_row(2137ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],2138void const* src, size_t srcSize)2139{2140return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 0);2141}21422143size_t ZSTD_compressBlock_lazy_extDict_row(2144ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],2145void const* src, size_t srcSize)21462147{2148return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 1);2149}21502151size_t ZSTD_compressBlock_lazy2_extDict_row(2152ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],2153void const* src, size_t srcSize)2154{2155return ZSTD_compressBlock_lazy_extDict_generic(ms, seqStore, rep, src, srcSize, search_rowHash, 2);2156}215721582159