Path: blob/master/Utilities/cmzstd/lib/compress/zstd_fast.c
3158 views
/*1* Copyright (c) Meta Platforms, Inc. and affiliates.2* All rights reserved.3*4* This source code is licensed under both the BSD-style license (found in the5* LICENSE file in the root directory of this source tree) and the GPLv2 (found6* in the COPYING file in the root directory of this source tree).7* You may select, at your option, one of the above-listed licenses.8*/910#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */11#include "zstd_fast.h"1213static void ZSTD_fillHashTableForCDict(ZSTD_matchState_t* ms,14const void* const end,15ZSTD_dictTableLoadMethod_e dtlm)16{17const ZSTD_compressionParameters* const cParams = &ms->cParams;18U32* const hashTable = ms->hashTable;19U32 const hBits = cParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;20U32 const mls = cParams->minMatch;21const BYTE* const base = ms->window.base;22const BYTE* ip = base + ms->nextToUpdate;23const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;24const U32 fastHashFillStep = 3;2526/* Currently, we always use ZSTD_dtlm_full for filling CDict tables.27* Feel free to remove this assert if there's a good reason! */28assert(dtlm == ZSTD_dtlm_full);2930/* Always insert every fastHashFillStep position into the hash table.31* Insert the other positions if their hash entry is empty.32*/33for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {34U32 const curr = (U32)(ip - base);35{ size_t const hashAndTag = ZSTD_hashPtr(ip, hBits, mls);36ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr); }3738if (dtlm == ZSTD_dtlm_fast) continue;39/* Only load extra positions for ZSTD_dtlm_full */40{ U32 p;41for (p = 1; p < fastHashFillStep; ++p) {42size_t const hashAndTag = ZSTD_hashPtr(ip + p, hBits, mls);43if (hashTable[hashAndTag >> ZSTD_SHORT_CACHE_TAG_BITS] == 0) { /* not yet filled */44ZSTD_writeTaggedIndex(hashTable, hashAndTag, curr + p);45} } } }46}4748static void ZSTD_fillHashTableForCCtx(ZSTD_matchState_t* ms,49const void* const end,50ZSTD_dictTableLoadMethod_e dtlm)51{52const ZSTD_compressionParameters* const cParams = &ms->cParams;53U32* const hashTable = ms->hashTable;54U32 const hBits = cParams->hashLog;55U32 const mls = cParams->minMatch;56const BYTE* const base = ms->window.base;57const BYTE* ip = base + ms->nextToUpdate;58const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;59const U32 fastHashFillStep = 3;6061/* Currently, we always use ZSTD_dtlm_fast for filling CCtx tables.62* Feel free to remove this assert if there's a good reason! */63assert(dtlm == ZSTD_dtlm_fast);6465/* Always insert every fastHashFillStep position into the hash table.66* Insert the other positions if their hash entry is empty.67*/68for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {69U32 const curr = (U32)(ip - base);70size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);71hashTable[hash0] = curr;72if (dtlm == ZSTD_dtlm_fast) continue;73/* Only load extra positions for ZSTD_dtlm_full */74{ U32 p;75for (p = 1; p < fastHashFillStep; ++p) {76size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);77if (hashTable[hash] == 0) { /* not yet filled */78hashTable[hash] = curr + p;79} } } }80}8182void ZSTD_fillHashTable(ZSTD_matchState_t* ms,83const void* const end,84ZSTD_dictTableLoadMethod_e dtlm,85ZSTD_tableFillPurpose_e tfp)86{87if (tfp == ZSTD_tfp_forCDict) {88ZSTD_fillHashTableForCDict(ms, end, dtlm);89} else {90ZSTD_fillHashTableForCCtx(ms, end, dtlm);91}92}939495/**96* If you squint hard enough (and ignore repcodes), the search operation at any97* given position is broken into 4 stages:98*99* 1. Hash (map position to hash value via input read)100* 2. Lookup (map hash val to index via hashtable read)101* 3. Load (map index to value at that position via input read)102* 4. Compare103*104* Each of these steps involves a memory read at an address which is computed105* from the previous step. This means these steps must be sequenced and their106* latencies are cumulative.107*108* Rather than do 1->2->3->4 sequentially for a single position before moving109* onto the next, this implementation interleaves these operations across the110* next few positions:111*112* R = Repcode Read & Compare113* H = Hash114* T = Table Lookup115* M = Match Read & Compare116*117* Pos | Time -->118* ----+-------------------119* N | ... M120* N+1 | ... TM121* N+2 | R H T M122* N+3 | H TM123* N+4 | R H T M124* N+5 | H ...125* N+6 | R ...126*127* This is very much analogous to the pipelining of execution in a CPU. And just128* like a CPU, we have to dump the pipeline when we find a match (i.e., take a129* branch).130*131* When this happens, we throw away our current state, and do the following prep132* to re-enter the loop:133*134* Pos | Time -->135* ----+-------------------136* N | H T137* N+1 | H138*139* This is also the work we do at the beginning to enter the loop initially.140*/141FORCE_INLINE_TEMPLATE size_t142ZSTD_compressBlock_fast_noDict_generic(143ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],144void const* src, size_t srcSize,145U32 const mls, U32 const hasStep)146{147const ZSTD_compressionParameters* const cParams = &ms->cParams;148U32* const hashTable = ms->hashTable;149U32 const hlog = cParams->hashLog;150/* support stepSize of 0 */151size_t const stepSize = hasStep ? (cParams->targetLength + !(cParams->targetLength) + 1) : 2;152const BYTE* const base = ms->window.base;153const BYTE* const istart = (const BYTE*)src;154const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);155const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);156const BYTE* const prefixStart = base + prefixStartIndex;157const BYTE* const iend = istart + srcSize;158const BYTE* const ilimit = iend - HASH_READ_SIZE;159160const BYTE* anchor = istart;161const BYTE* ip0 = istart;162const BYTE* ip1;163const BYTE* ip2;164const BYTE* ip3;165U32 current0;166167U32 rep_offset1 = rep[0];168U32 rep_offset2 = rep[1];169U32 offsetSaved1 = 0, offsetSaved2 = 0;170171size_t hash0; /* hash for ip0 */172size_t hash1; /* hash for ip1 */173U32 idx; /* match idx for ip0 */174U32 mval; /* src value at match idx */175176U32 offcode;177const BYTE* match0;178size_t mLength;179180/* ip0 and ip1 are always adjacent. The targetLength skipping and181* uncompressibility acceleration is applied to every other position,182* matching the behavior of #1562. step therefore represents the gap183* between pairs of positions, from ip0 to ip2 or ip1 to ip3. */184size_t step;185const BYTE* nextStep;186const size_t kStepIncr = (1 << (kSearchStrength - 1));187188DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");189ip0 += (ip0 == prefixStart);190{ U32 const curr = (U32)(ip0 - base);191U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, curr, cParams->windowLog);192U32 const maxRep = curr - windowLow;193if (rep_offset2 > maxRep) offsetSaved2 = rep_offset2, rep_offset2 = 0;194if (rep_offset1 > maxRep) offsetSaved1 = rep_offset1, rep_offset1 = 0;195}196197/* start each op */198_start: /* Requires: ip0 */199200step = stepSize;201nextStep = ip0 + kStepIncr;202203/* calculate positions, ip0 - anchor == 0, so we skip step calc */204ip1 = ip0 + 1;205ip2 = ip0 + step;206ip3 = ip2 + 1;207208if (ip3 >= ilimit) {209goto _cleanup;210}211212hash0 = ZSTD_hashPtr(ip0, hlog, mls);213hash1 = ZSTD_hashPtr(ip1, hlog, mls);214215idx = hashTable[hash0];216217do {218/* load repcode match for ip[2]*/219const U32 rval = MEM_read32(ip2 - rep_offset1);220221/* write back hash table entry */222current0 = (U32)(ip0 - base);223hashTable[hash0] = current0;224225/* check repcode at ip[2] */226if ((MEM_read32(ip2) == rval) & (rep_offset1 > 0)) {227ip0 = ip2;228match0 = ip0 - rep_offset1;229mLength = ip0[-1] == match0[-1];230ip0 -= mLength;231match0 -= mLength;232offcode = REPCODE1_TO_OFFBASE;233mLength += 4;234235/* First write next hash table entry; we've already calculated it.236* This write is known to be safe because the ip1 is before the237* repcode (ip2). */238hashTable[hash1] = (U32)(ip1 - base);239240goto _match;241}242243/* load match for ip[0] */244if (idx >= prefixStartIndex) {245mval = MEM_read32(base + idx);246} else {247mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */248}249250/* check match at ip[0] */251if (MEM_read32(ip0) == mval) {252/* found a match! */253254/* First write next hash table entry; we've already calculated it.255* This write is known to be safe because the ip1 == ip0 + 1, so256* we know we will resume searching after ip1 */257hashTable[hash1] = (U32)(ip1 - base);258259goto _offset;260}261262/* lookup ip[1] */263idx = hashTable[hash1];264265/* hash ip[2] */266hash0 = hash1;267hash1 = ZSTD_hashPtr(ip2, hlog, mls);268269/* advance to next positions */270ip0 = ip1;271ip1 = ip2;272ip2 = ip3;273274/* write back hash table entry */275current0 = (U32)(ip0 - base);276hashTable[hash0] = current0;277278/* load match for ip[0] */279if (idx >= prefixStartIndex) {280mval = MEM_read32(base + idx);281} else {282mval = MEM_read32(ip0) ^ 1; /* guaranteed to not match. */283}284285/* check match at ip[0] */286if (MEM_read32(ip0) == mval) {287/* found a match! */288289/* first write next hash table entry; we've already calculated it */290if (step <= 4) {291/* We need to avoid writing an index into the hash table >= the292* position at which we will pick up our searching after we've293* taken this match.294*295* The minimum possible match has length 4, so the earliest ip0296* can be after we take this match will be the current ip0 + 4.297* ip1 is ip0 + step - 1. If ip1 is >= ip0 + 4, we can't safely298* write this position.299*/300hashTable[hash1] = (U32)(ip1 - base);301}302303goto _offset;304}305306/* lookup ip[1] */307idx = hashTable[hash1];308309/* hash ip[2] */310hash0 = hash1;311hash1 = ZSTD_hashPtr(ip2, hlog, mls);312313/* advance to next positions */314ip0 = ip1;315ip1 = ip2;316ip2 = ip0 + step;317ip3 = ip1 + step;318319/* calculate step */320if (ip2 >= nextStep) {321step++;322PREFETCH_L1(ip1 + 64);323PREFETCH_L1(ip1 + 128);324nextStep += kStepIncr;325}326} while (ip3 < ilimit);327328_cleanup:329/* Note that there are probably still a couple positions we could search.330* However, it seems to be a meaningful performance hit to try to search331* them. So let's not. */332333/* When the repcodes are outside of the prefix, we set them to zero before the loop.334* When the offsets are still zero, we need to restore them after the block to have a correct335* repcode history. If only one offset was invalid, it is easy. The tricky case is when both336* offsets were invalid. We need to figure out which offset to refill with.337* - If both offsets are zero they are in the same order.338* - If both offsets are non-zero, we won't restore the offsets from `offsetSaved[12]`.339* - If only one is zero, we need to decide which offset to restore.340* - If rep_offset1 is non-zero, then rep_offset2 must be offsetSaved1.341* - It is impossible for rep_offset2 to be non-zero.342*343* So if rep_offset1 started invalid (offsetSaved1 != 0) and became valid (rep_offset1 != 0), then344* set rep[0] = rep_offset1 and rep[1] = offsetSaved1.345*/346offsetSaved2 = ((offsetSaved1 != 0) && (rep_offset1 != 0)) ? offsetSaved1 : offsetSaved2;347348/* save reps for next block */349rep[0] = rep_offset1 ? rep_offset1 : offsetSaved1;350rep[1] = rep_offset2 ? rep_offset2 : offsetSaved2;351352/* Return the last literals size */353return (size_t)(iend - anchor);354355_offset: /* Requires: ip0, idx */356357/* Compute the offset code. */358match0 = base + idx;359rep_offset2 = rep_offset1;360rep_offset1 = (U32)(ip0-match0);361offcode = OFFSET_TO_OFFBASE(rep_offset1);362mLength = 4;363364/* Count the backwards match length. */365while (((ip0>anchor) & (match0>prefixStart)) && (ip0[-1] == match0[-1])) {366ip0--;367match0--;368mLength++;369}370371_match: /* Requires: ip0, match0, offcode */372373/* Count the forward length. */374mLength += ZSTD_count(ip0 + mLength, match0 + mLength, iend);375376ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);377378ip0 += mLength;379anchor = ip0;380381/* Fill table and check for immediate repcode. */382if (ip0 <= ilimit) {383/* Fill Table */384assert(base+current0+2 > istart); /* check base overflow */385hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */386hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);387388if (rep_offset2 > 0) { /* rep_offset2==0 means rep_offset2 is invalidated */389while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - rep_offset2)) ) {390/* store sequence */391size_t const rLength = ZSTD_count(ip0+4, ip0+4-rep_offset2, iend) + 4;392{ U32 const tmpOff = rep_offset2; rep_offset2 = rep_offset1; rep_offset1 = tmpOff; } /* swap rep_offset2 <=> rep_offset1 */393hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);394ip0 += rLength;395ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, REPCODE1_TO_OFFBASE, rLength);396anchor = ip0;397continue; /* faster when present (confirmed on gcc-8) ... (?) */398} } }399400goto _start;401}402403#define ZSTD_GEN_FAST_FN(dictMode, mls, step) \404static size_t ZSTD_compressBlock_fast_##dictMode##_##mls##_##step( \405ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM], \406void const* src, size_t srcSize) \407{ \408return ZSTD_compressBlock_fast_##dictMode##_generic(ms, seqStore, rep, src, srcSize, mls, step); \409}410411ZSTD_GEN_FAST_FN(noDict, 4, 1)412ZSTD_GEN_FAST_FN(noDict, 5, 1)413ZSTD_GEN_FAST_FN(noDict, 6, 1)414ZSTD_GEN_FAST_FN(noDict, 7, 1)415416ZSTD_GEN_FAST_FN(noDict, 4, 0)417ZSTD_GEN_FAST_FN(noDict, 5, 0)418ZSTD_GEN_FAST_FN(noDict, 6, 0)419ZSTD_GEN_FAST_FN(noDict, 7, 0)420421size_t ZSTD_compressBlock_fast(422ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],423void const* src, size_t srcSize)424{425U32 const mls = ms->cParams.minMatch;426assert(ms->dictMatchState == NULL);427if (ms->cParams.targetLength > 1) {428switch(mls)429{430default: /* includes case 3 */431case 4 :432return ZSTD_compressBlock_fast_noDict_4_1(ms, seqStore, rep, src, srcSize);433case 5 :434return ZSTD_compressBlock_fast_noDict_5_1(ms, seqStore, rep, src, srcSize);435case 6 :436return ZSTD_compressBlock_fast_noDict_6_1(ms, seqStore, rep, src, srcSize);437case 7 :438return ZSTD_compressBlock_fast_noDict_7_1(ms, seqStore, rep, src, srcSize);439}440} else {441switch(mls)442{443default: /* includes case 3 */444case 4 :445return ZSTD_compressBlock_fast_noDict_4_0(ms, seqStore, rep, src, srcSize);446case 5 :447return ZSTD_compressBlock_fast_noDict_5_0(ms, seqStore, rep, src, srcSize);448case 6 :449return ZSTD_compressBlock_fast_noDict_6_0(ms, seqStore, rep, src, srcSize);450case 7 :451return ZSTD_compressBlock_fast_noDict_7_0(ms, seqStore, rep, src, srcSize);452}453454}455}456457FORCE_INLINE_TEMPLATE458size_t ZSTD_compressBlock_fast_dictMatchState_generic(459ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],460void const* src, size_t srcSize, U32 const mls, U32 const hasStep)461{462const ZSTD_compressionParameters* const cParams = &ms->cParams;463U32* const hashTable = ms->hashTable;464U32 const hlog = cParams->hashLog;465/* support stepSize of 0 */466U32 const stepSize = cParams->targetLength + !(cParams->targetLength);467const BYTE* const base = ms->window.base;468const BYTE* const istart = (const BYTE*)src;469const BYTE* ip0 = istart;470const BYTE* ip1 = ip0 + stepSize; /* we assert below that stepSize >= 1 */471const BYTE* anchor = istart;472const U32 prefixStartIndex = ms->window.dictLimit;473const BYTE* const prefixStart = base + prefixStartIndex;474const BYTE* const iend = istart + srcSize;475const BYTE* const ilimit = iend - HASH_READ_SIZE;476U32 offset_1=rep[0], offset_2=rep[1];477478const ZSTD_matchState_t* const dms = ms->dictMatchState;479const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;480const U32* const dictHashTable = dms->hashTable;481const U32 dictStartIndex = dms->window.dictLimit;482const BYTE* const dictBase = dms->window.base;483const BYTE* const dictStart = dictBase + dictStartIndex;484const BYTE* const dictEnd = dms->window.nextSrc;485const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);486const U32 dictAndPrefixLength = (U32)(istart - prefixStart + dictEnd - dictStart);487const U32 dictHBits = dictCParams->hashLog + ZSTD_SHORT_CACHE_TAG_BITS;488489/* if a dictionary is still attached, it necessarily means that490* it is within window size. So we just check it. */491const U32 maxDistance = 1U << cParams->windowLog;492const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);493assert(endIndex - prefixStartIndex <= maxDistance);494(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */495496(void)hasStep; /* not currently specialized on whether it's accelerated */497498/* ensure there will be no underflow499* when translating a dict index into a local index */500assert(prefixStartIndex >= (U32)(dictEnd - dictBase));501502if (ms->prefetchCDictTables) {503size_t const hashTableBytes = (((size_t)1) << dictCParams->hashLog) * sizeof(U32);504PREFETCH_AREA(dictHashTable, hashTableBytes)505}506507/* init */508DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");509ip0 += (dictAndPrefixLength == 0);510/* dictMatchState repCode checks don't currently handle repCode == 0511* disabling. */512assert(offset_1 <= dictAndPrefixLength);513assert(offset_2 <= dictAndPrefixLength);514515/* Outer search loop */516assert(stepSize >= 1);517while (ip1 <= ilimit) { /* repcode check at (ip0 + 1) is safe because ip0 < ip1 */518size_t mLength;519size_t hash0 = ZSTD_hashPtr(ip0, hlog, mls);520521size_t const dictHashAndTag0 = ZSTD_hashPtr(ip0, dictHBits, mls);522U32 dictMatchIndexAndTag = dictHashTable[dictHashAndTag0 >> ZSTD_SHORT_CACHE_TAG_BITS];523int dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag0);524525U32 matchIndex = hashTable[hash0];526U32 curr = (U32)(ip0 - base);527size_t step = stepSize;528const size_t kStepIncr = 1 << kSearchStrength;529const BYTE* nextStep = ip0 + kStepIncr;530531/* Inner search loop */532while (1) {533const BYTE* match = base + matchIndex;534const U32 repIndex = curr + 1 - offset_1;535const BYTE* repMatch = (repIndex < prefixStartIndex) ?536dictBase + (repIndex - dictIndexDelta) :537base + repIndex;538const size_t hash1 = ZSTD_hashPtr(ip1, hlog, mls);539size_t const dictHashAndTag1 = ZSTD_hashPtr(ip1, dictHBits, mls);540hashTable[hash0] = curr; /* update hash table */541542if (((U32) ((prefixStartIndex - 1) - repIndex) >=5433) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */544&& (MEM_read32(repMatch) == MEM_read32(ip0 + 1))) {545const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;546mLength = ZSTD_count_2segments(ip0 + 1 + 4, repMatch + 4, iend, repMatchEnd, prefixStart) + 4;547ip0++;548ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, REPCODE1_TO_OFFBASE, mLength);549break;550}551552if (dictTagsMatch) {553/* Found a possible dict match */554const U32 dictMatchIndex = dictMatchIndexAndTag >> ZSTD_SHORT_CACHE_TAG_BITS;555const BYTE* dictMatch = dictBase + dictMatchIndex;556if (dictMatchIndex > dictStartIndex &&557MEM_read32(dictMatch) == MEM_read32(ip0)) {558/* To replicate extDict parse behavior, we only use dict matches when the normal matchIndex is invalid */559if (matchIndex <= prefixStartIndex) {560U32 const offset = (U32) (curr - dictMatchIndex - dictIndexDelta);561mLength = ZSTD_count_2segments(ip0 + 4, dictMatch + 4, iend, dictEnd, prefixStart) + 4;562while (((ip0 > anchor) & (dictMatch > dictStart))563&& (ip0[-1] == dictMatch[-1])) {564ip0--;565dictMatch--;566mLength++;567} /* catch up */568offset_2 = offset_1;569offset_1 = offset;570ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);571break;572}573}574}575576if (matchIndex > prefixStartIndex && MEM_read32(match) == MEM_read32(ip0)) {577/* found a regular match */578U32 const offset = (U32) (ip0 - match);579mLength = ZSTD_count(ip0 + 4, match + 4, iend) + 4;580while (((ip0 > anchor) & (match > prefixStart))581&& (ip0[-1] == match[-1])) {582ip0--;583match--;584mLength++;585} /* catch up */586offset_2 = offset_1;587offset_1 = offset;588ZSTD_storeSeq(seqStore, (size_t) (ip0 - anchor), anchor, iend, OFFSET_TO_OFFBASE(offset), mLength);589break;590}591592/* Prepare for next iteration */593dictMatchIndexAndTag = dictHashTable[dictHashAndTag1 >> ZSTD_SHORT_CACHE_TAG_BITS];594dictTagsMatch = ZSTD_comparePackedTags(dictMatchIndexAndTag, dictHashAndTag1);595matchIndex = hashTable[hash1];596597if (ip1 >= nextStep) {598step++;599nextStep += kStepIncr;600}601ip0 = ip1;602ip1 = ip1 + step;603if (ip1 > ilimit) goto _cleanup;604605curr = (U32)(ip0 - base);606hash0 = hash1;607} /* end inner search loop */608609/* match found */610assert(mLength);611ip0 += mLength;612anchor = ip0;613614if (ip0 <= ilimit) {615/* Fill Table */616assert(base+curr+2 > istart); /* check base overflow */617hashTable[ZSTD_hashPtr(base+curr+2, hlog, mls)] = curr+2; /* here because curr+2 could be > iend-8 */618hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);619620/* check immediate repcode */621while (ip0 <= ilimit) {622U32 const current2 = (U32)(ip0-base);623U32 const repIndex2 = current2 - offset_2;624const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?625dictBase - dictIndexDelta + repIndex2 :626base + repIndex2;627if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)628&& (MEM_read32(repMatch2) == MEM_read32(ip0))) {629const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;630size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;631U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */632ZSTD_storeSeq(seqStore, 0, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);633hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = current2;634ip0 += repLength2;635anchor = ip0;636continue;637}638break;639}640}641642/* Prepare for next iteration */643assert(ip0 == anchor);644ip1 = ip0 + stepSize;645}646647_cleanup:648/* save reps for next block */649rep[0] = offset_1;650rep[1] = offset_2;651652/* Return the last literals size */653return (size_t)(iend - anchor);654}655656657ZSTD_GEN_FAST_FN(dictMatchState, 4, 0)658ZSTD_GEN_FAST_FN(dictMatchState, 5, 0)659ZSTD_GEN_FAST_FN(dictMatchState, 6, 0)660ZSTD_GEN_FAST_FN(dictMatchState, 7, 0)661662size_t ZSTD_compressBlock_fast_dictMatchState(663ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],664void const* src, size_t srcSize)665{666U32 const mls = ms->cParams.minMatch;667assert(ms->dictMatchState != NULL);668switch(mls)669{670default: /* includes case 3 */671case 4 :672return ZSTD_compressBlock_fast_dictMatchState_4_0(ms, seqStore, rep, src, srcSize);673case 5 :674return ZSTD_compressBlock_fast_dictMatchState_5_0(ms, seqStore, rep, src, srcSize);675case 6 :676return ZSTD_compressBlock_fast_dictMatchState_6_0(ms, seqStore, rep, src, srcSize);677case 7 :678return ZSTD_compressBlock_fast_dictMatchState_7_0(ms, seqStore, rep, src, srcSize);679}680}681682683static size_t ZSTD_compressBlock_fast_extDict_generic(684ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],685void const* src, size_t srcSize, U32 const mls, U32 const hasStep)686{687const ZSTD_compressionParameters* const cParams = &ms->cParams;688U32* const hashTable = ms->hashTable;689U32 const hlog = cParams->hashLog;690/* support stepSize of 0 */691size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;692const BYTE* const base = ms->window.base;693const BYTE* const dictBase = ms->window.dictBase;694const BYTE* const istart = (const BYTE*)src;695const BYTE* anchor = istart;696const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);697const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);698const U32 dictStartIndex = lowLimit;699const BYTE* const dictStart = dictBase + dictStartIndex;700const U32 dictLimit = ms->window.dictLimit;701const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;702const BYTE* const prefixStart = base + prefixStartIndex;703const BYTE* const dictEnd = dictBase + prefixStartIndex;704const BYTE* const iend = istart + srcSize;705const BYTE* const ilimit = iend - 8;706U32 offset_1=rep[0], offset_2=rep[1];707U32 offsetSaved1 = 0, offsetSaved2 = 0;708709const BYTE* ip0 = istart;710const BYTE* ip1;711const BYTE* ip2;712const BYTE* ip3;713U32 current0;714715716size_t hash0; /* hash for ip0 */717size_t hash1; /* hash for ip1 */718U32 idx; /* match idx for ip0 */719const BYTE* idxBase; /* base pointer for idx */720721U32 offcode;722const BYTE* match0;723size_t mLength;724const BYTE* matchEnd = 0; /* initialize to avoid warning, assert != 0 later */725726size_t step;727const BYTE* nextStep;728const size_t kStepIncr = (1 << (kSearchStrength - 1));729730(void)hasStep; /* not currently specialized on whether it's accelerated */731732DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);733734/* switch to "regular" variant if extDict is invalidated due to maxDistance */735if (prefixStartIndex == dictStartIndex)736return ZSTD_compressBlock_fast(ms, seqStore, rep, src, srcSize);737738{ U32 const curr = (U32)(ip0 - base);739U32 const maxRep = curr - dictStartIndex;740if (offset_2 >= maxRep) offsetSaved2 = offset_2, offset_2 = 0;741if (offset_1 >= maxRep) offsetSaved1 = offset_1, offset_1 = 0;742}743744/* start each op */745_start: /* Requires: ip0 */746747step = stepSize;748nextStep = ip0 + kStepIncr;749750/* calculate positions, ip0 - anchor == 0, so we skip step calc */751ip1 = ip0 + 1;752ip2 = ip0 + step;753ip3 = ip2 + 1;754755if (ip3 >= ilimit) {756goto _cleanup;757}758759hash0 = ZSTD_hashPtr(ip0, hlog, mls);760hash1 = ZSTD_hashPtr(ip1, hlog, mls);761762idx = hashTable[hash0];763idxBase = idx < prefixStartIndex ? dictBase : base;764765do {766{ /* load repcode match for ip[2] */767U32 const current2 = (U32)(ip2 - base);768U32 const repIndex = current2 - offset_1;769const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;770U32 rval;771if ( ((U32)(prefixStartIndex - repIndex) >= 4) /* intentional underflow */772& (offset_1 > 0) ) {773rval = MEM_read32(repBase + repIndex);774} else {775rval = MEM_read32(ip2) ^ 1; /* guaranteed to not match. */776}777778/* write back hash table entry */779current0 = (U32)(ip0 - base);780hashTable[hash0] = current0;781782/* check repcode at ip[2] */783if (MEM_read32(ip2) == rval) {784ip0 = ip2;785match0 = repBase + repIndex;786matchEnd = repIndex < prefixStartIndex ? dictEnd : iend;787assert((match0 != prefixStart) & (match0 != dictStart));788mLength = ip0[-1] == match0[-1];789ip0 -= mLength;790match0 -= mLength;791offcode = REPCODE1_TO_OFFBASE;792mLength += 4;793goto _match;794} }795796{ /* load match for ip[0] */797U32 const mval = idx >= dictStartIndex ?798MEM_read32(idxBase + idx) :799MEM_read32(ip0) ^ 1; /* guaranteed not to match */800801/* check match at ip[0] */802if (MEM_read32(ip0) == mval) {803/* found a match! */804goto _offset;805} }806807/* lookup ip[1] */808idx = hashTable[hash1];809idxBase = idx < prefixStartIndex ? dictBase : base;810811/* hash ip[2] */812hash0 = hash1;813hash1 = ZSTD_hashPtr(ip2, hlog, mls);814815/* advance to next positions */816ip0 = ip1;817ip1 = ip2;818ip2 = ip3;819820/* write back hash table entry */821current0 = (U32)(ip0 - base);822hashTable[hash0] = current0;823824{ /* load match for ip[0] */825U32 const mval = idx >= dictStartIndex ?826MEM_read32(idxBase + idx) :827MEM_read32(ip0) ^ 1; /* guaranteed not to match */828829/* check match at ip[0] */830if (MEM_read32(ip0) == mval) {831/* found a match! */832goto _offset;833} }834835/* lookup ip[1] */836idx = hashTable[hash1];837idxBase = idx < prefixStartIndex ? dictBase : base;838839/* hash ip[2] */840hash0 = hash1;841hash1 = ZSTD_hashPtr(ip2, hlog, mls);842843/* advance to next positions */844ip0 = ip1;845ip1 = ip2;846ip2 = ip0 + step;847ip3 = ip1 + step;848849/* calculate step */850if (ip2 >= nextStep) {851step++;852PREFETCH_L1(ip1 + 64);853PREFETCH_L1(ip1 + 128);854nextStep += kStepIncr;855}856} while (ip3 < ilimit);857858_cleanup:859/* Note that there are probably still a couple positions we could search.860* However, it seems to be a meaningful performance hit to try to search861* them. So let's not. */862863/* If offset_1 started invalid (offsetSaved1 != 0) and became valid (offset_1 != 0),864* rotate saved offsets. See comment in ZSTD_compressBlock_fast_noDict for more context. */865offsetSaved2 = ((offsetSaved1 != 0) && (offset_1 != 0)) ? offsetSaved1 : offsetSaved2;866867/* save reps for next block */868rep[0] = offset_1 ? offset_1 : offsetSaved1;869rep[1] = offset_2 ? offset_2 : offsetSaved2;870871/* Return the last literals size */872return (size_t)(iend - anchor);873874_offset: /* Requires: ip0, idx, idxBase */875876/* Compute the offset code. */877{ U32 const offset = current0 - idx;878const BYTE* const lowMatchPtr = idx < prefixStartIndex ? dictStart : prefixStart;879matchEnd = idx < prefixStartIndex ? dictEnd : iend;880match0 = idxBase + idx;881offset_2 = offset_1;882offset_1 = offset;883offcode = OFFSET_TO_OFFBASE(offset);884mLength = 4;885886/* Count the backwards match length. */887while (((ip0>anchor) & (match0>lowMatchPtr)) && (ip0[-1] == match0[-1])) {888ip0--;889match0--;890mLength++;891} }892893_match: /* Requires: ip0, match0, offcode, matchEnd */894895/* Count the forward length. */896assert(matchEnd != 0);897mLength += ZSTD_count_2segments(ip0 + mLength, match0 + mLength, iend, matchEnd, prefixStart);898899ZSTD_storeSeq(seqStore, (size_t)(ip0 - anchor), anchor, iend, offcode, mLength);900901ip0 += mLength;902anchor = ip0;903904/* write next hash table entry */905if (ip1 < ip0) {906hashTable[hash1] = (U32)(ip1 - base);907}908909/* Fill table and check for immediate repcode. */910if (ip0 <= ilimit) {911/* Fill Table */912assert(base+current0+2 > istart); /* check base overflow */913hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */914hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);915916while (ip0 <= ilimit) {917U32 const repIndex2 = (U32)(ip0-base) - offset_2;918const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;919if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 > 0)) /* intentional underflow */920&& (MEM_read32(repMatch2) == MEM_read32(ip0)) ) {921const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;922size_t const repLength2 = ZSTD_count_2segments(ip0+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;923{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */924ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, REPCODE1_TO_OFFBASE, repLength2);925hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);926ip0 += repLength2;927anchor = ip0;928continue;929}930break;931} }932933goto _start;934}935936ZSTD_GEN_FAST_FN(extDict, 4, 0)937ZSTD_GEN_FAST_FN(extDict, 5, 0)938ZSTD_GEN_FAST_FN(extDict, 6, 0)939ZSTD_GEN_FAST_FN(extDict, 7, 0)940941size_t ZSTD_compressBlock_fast_extDict(942ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],943void const* src, size_t srcSize)944{945U32 const mls = ms->cParams.minMatch;946assert(ms->dictMatchState == NULL);947switch(mls)948{949default: /* includes case 3 */950case 4 :951return ZSTD_compressBlock_fast_extDict_4_0(ms, seqStore, rep, src, srcSize);952case 5 :953return ZSTD_compressBlock_fast_extDict_5_0(ms, seqStore, rep, src, srcSize);954case 6 :955return ZSTD_compressBlock_fast_extDict_6_0(ms, seqStore, rep, src, srcSize);956case 7 :957return ZSTD_compressBlock_fast_extDict_7_0(ms, seqStore, rep, src, srcSize);958}959}960961962