Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
freebsd
GitHub Repository: freebsd/freebsd-src
Path: blob/main/sys/contrib/openzfs/module/zstd/lib/compress/zstd_fast.c
48774 views
1
// SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0-only
2
/*
3
* Copyright (c) 2016-2020, Yann Collet, Facebook, Inc.
4
* All rights reserved.
5
*
6
* This source code is licensed under both the BSD-style license (found in the
7
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
8
* in the COPYING file in the root directory of this source tree).
9
* You may select, at your option, one of the above-listed licenses.
10
*/
11
12
#include "zstd_compress_internal.h" /* ZSTD_hashPtr, ZSTD_count, ZSTD_storeSeq */
13
#include "zstd_fast.h"
14
15
16
void ZSTD_fillHashTable(ZSTD_matchState_t* ms,
17
const void* const end,
18
ZSTD_dictTableLoadMethod_e dtlm)
19
{
20
const ZSTD_compressionParameters* const cParams = &ms->cParams;
21
U32* const hashTable = ms->hashTable;
22
U32 const hBits = cParams->hashLog;
23
U32 const mls = cParams->minMatch;
24
const BYTE* const base = ms->window.base;
25
const BYTE* ip = base + ms->nextToUpdate;
26
const BYTE* const iend = ((const BYTE*)end) - HASH_READ_SIZE;
27
const U32 fastHashFillStep = 3;
28
29
/* Always insert every fastHashFillStep position into the hash table.
30
* Insert the other positions if their hash entry is empty.
31
*/
32
for ( ; ip + fastHashFillStep < iend + 2; ip += fastHashFillStep) {
33
U32 const current = (U32)(ip - base);
34
size_t const hash0 = ZSTD_hashPtr(ip, hBits, mls);
35
hashTable[hash0] = current;
36
if (dtlm == ZSTD_dtlm_fast) continue;
37
/* Only load extra positions for ZSTD_dtlm_full */
38
{ U32 p;
39
for (p = 1; p < fastHashFillStep; ++p) {
40
size_t const hash = ZSTD_hashPtr(ip + p, hBits, mls);
41
if (hashTable[hash] == 0) { /* not yet filled */
42
hashTable[hash] = current + p;
43
} } } }
44
}
45
46
47
FORCE_INLINE_TEMPLATE size_t
48
ZSTD_compressBlock_fast_generic(
49
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
50
void const* src, size_t srcSize,
51
U32 const mls)
52
{
53
const ZSTD_compressionParameters* const cParams = &ms->cParams;
54
U32* const hashTable = ms->hashTable;
55
U32 const hlog = cParams->hashLog;
56
/* support stepSize of 0 */
57
size_t const stepSize = cParams->targetLength + !(cParams->targetLength) + 1;
58
const BYTE* const base = ms->window.base;
59
const BYTE* const istart = (const BYTE*)src;
60
/* We check ip0 (ip + 0) and ip1 (ip + 1) each loop */
61
const BYTE* ip0 = istart;
62
const BYTE* ip1;
63
const BYTE* anchor = istart;
64
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
65
const U32 prefixStartIndex = ZSTD_getLowestPrefixIndex(ms, endIndex, cParams->windowLog);
66
const BYTE* const prefixStart = base + prefixStartIndex;
67
const BYTE* const iend = istart + srcSize;
68
const BYTE* const ilimit = iend - HASH_READ_SIZE;
69
U32 offset_1=rep[0], offset_2=rep[1];
70
U32 offsetSaved = 0;
71
72
/* init */
73
DEBUGLOG(5, "ZSTD_compressBlock_fast_generic");
74
ip0 += (ip0 == prefixStart);
75
ip1 = ip0 + 1;
76
{ U32 const current = (U32)(ip0 - base);
77
U32 const windowLow = ZSTD_getLowestPrefixIndex(ms, current, cParams->windowLog);
78
U32 const maxRep = current - windowLow;
79
if (offset_2 > maxRep) offsetSaved = offset_2, offset_2 = 0;
80
if (offset_1 > maxRep) offsetSaved = offset_1, offset_1 = 0;
81
}
82
83
/* Main Search Loop */
84
#ifdef __INTEL_COMPILER
85
/* From intel 'The vector pragma indicates that the loop should be
86
* vectorized if it is legal to do so'. Can be used together with
87
* #pragma ivdep (but have opted to exclude that because intel
88
* warns against using it).*/
89
#pragma vector always
90
#endif
91
while (ip1 < ilimit) { /* < instead of <=, because check at ip0+2 */
92
size_t mLength;
93
BYTE const* ip2 = ip0 + 2;
94
size_t const h0 = ZSTD_hashPtr(ip0, hlog, mls);
95
U32 const val0 = MEM_read32(ip0);
96
size_t const h1 = ZSTD_hashPtr(ip1, hlog, mls);
97
U32 const val1 = MEM_read32(ip1);
98
U32 const current0 = (U32)(ip0-base);
99
U32 const current1 = (U32)(ip1-base);
100
U32 const matchIndex0 = hashTable[h0];
101
U32 const matchIndex1 = hashTable[h1];
102
BYTE const* repMatch = ip2 - offset_1;
103
const BYTE* match0 = base + matchIndex0;
104
const BYTE* match1 = base + matchIndex1;
105
U32 offcode;
106
107
#if defined(__aarch64__)
108
PREFETCH_L1(ip0+256);
109
#endif
110
111
hashTable[h0] = current0; /* update hash table */
112
hashTable[h1] = current1; /* update hash table */
113
114
assert(ip0 + 1 == ip1);
115
116
if ((offset_1 > 0) & (MEM_read32(repMatch) == MEM_read32(ip2))) {
117
mLength = (ip2[-1] == repMatch[-1]) ? 1 : 0;
118
ip0 = ip2 - mLength;
119
match0 = repMatch - mLength;
120
mLength += 4;
121
offcode = 0;
122
goto _match;
123
}
124
if ((matchIndex0 > prefixStartIndex) && MEM_read32(match0) == val0) {
125
/* found a regular match */
126
goto _offset;
127
}
128
if ((matchIndex1 > prefixStartIndex) && MEM_read32(match1) == val1) {
129
/* found a regular match after one literal */
130
ip0 = ip1;
131
match0 = match1;
132
goto _offset;
133
}
134
{ size_t const step = ((size_t)(ip0-anchor) >> (kSearchStrength - 1)) + stepSize;
135
assert(step >= 2);
136
ip0 += step;
137
ip1 += step;
138
continue;
139
}
140
_offset: /* Requires: ip0, match0 */
141
/* Compute the offset code */
142
offset_2 = offset_1;
143
offset_1 = (U32)(ip0-match0);
144
offcode = offset_1 + ZSTD_REP_MOVE;
145
mLength = 4;
146
/* Count the backwards match length */
147
while (((ip0>anchor) & (match0>prefixStart))
148
&& (ip0[-1] == match0[-1])) { ip0--; match0--; mLength++; } /* catch up */
149
150
_match: /* Requires: ip0, match0, offcode */
151
/* Count the forward length */
152
mLength += ZSTD_count(ip0+mLength, match0+mLength, iend);
153
ZSTD_storeSeq(seqStore, (size_t)(ip0-anchor), anchor, iend, offcode, mLength-MINMATCH);
154
/* match found */
155
ip0 += mLength;
156
anchor = ip0;
157
158
if (ip0 <= ilimit) {
159
/* Fill Table */
160
assert(base+current0+2 > istart); /* check base overflow */
161
hashTable[ZSTD_hashPtr(base+current0+2, hlog, mls)] = current0+2; /* here because current+2 could be > iend-8 */
162
hashTable[ZSTD_hashPtr(ip0-2, hlog, mls)] = (U32)(ip0-2-base);
163
164
if (offset_2 > 0) { /* offset_2==0 means offset_2 is invalidated */
165
while ( (ip0 <= ilimit) && (MEM_read32(ip0) == MEM_read32(ip0 - offset_2)) ) {
166
/* store sequence */
167
size_t const rLength = ZSTD_count(ip0+4, ip0+4-offset_2, iend) + 4;
168
{ U32 const tmpOff = offset_2; offset_2 = offset_1; offset_1 = tmpOff; } /* swap offset_2 <=> offset_1 */
169
hashTable[ZSTD_hashPtr(ip0, hlog, mls)] = (U32)(ip0-base);
170
ip0 += rLength;
171
ZSTD_storeSeq(seqStore, 0 /*litLen*/, anchor, iend, 0 /*offCode*/, rLength-MINMATCH);
172
anchor = ip0;
173
continue; /* faster when present (confirmed on gcc-8) ... (?) */
174
} } }
175
ip1 = ip0 + 1;
176
}
177
178
/* save reps for next block */
179
rep[0] = offset_1 ? offset_1 : offsetSaved;
180
rep[1] = offset_2 ? offset_2 : offsetSaved;
181
182
/* Return the last literals size */
183
return (size_t)(iend - anchor);
184
}
185
186
187
size_t ZSTD_compressBlock_fast(
188
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
189
void const* src, size_t srcSize)
190
{
191
U32 const mls = ms->cParams.minMatch;
192
assert(ms->dictMatchState == NULL);
193
switch(mls)
194
{
195
default: /* includes case 3 */
196
case 4 :
197
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 4);
198
case 5 :
199
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 5);
200
case 6 :
201
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 6);
202
case 7 :
203
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, 7);
204
}
205
}
206
207
FORCE_INLINE_TEMPLATE
208
size_t ZSTD_compressBlock_fast_dictMatchState_generic(
209
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
210
void const* src, size_t srcSize, U32 const mls)
211
{
212
const ZSTD_compressionParameters* const cParams = &ms->cParams;
213
U32* const hashTable = ms->hashTable;
214
U32 const hlog = cParams->hashLog;
215
/* support stepSize of 0 */
216
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
217
const BYTE* const base = ms->window.base;
218
const BYTE* const istart = (const BYTE*)src;
219
const BYTE* ip = istart;
220
const BYTE* anchor = istart;
221
const U32 prefixStartIndex = ms->window.dictLimit;
222
const BYTE* const prefixStart = base + prefixStartIndex;
223
const BYTE* const iend = istart + srcSize;
224
const BYTE* const ilimit = iend - HASH_READ_SIZE;
225
U32 offset_1=rep[0], offset_2=rep[1];
226
U32 offsetSaved = 0;
227
228
const ZSTD_matchState_t* const dms = ms->dictMatchState;
229
const ZSTD_compressionParameters* const dictCParams = &dms->cParams ;
230
const U32* const dictHashTable = dms->hashTable;
231
const U32 dictStartIndex = dms->window.dictLimit;
232
const BYTE* const dictBase = dms->window.base;
233
const BYTE* const dictStart = dictBase + dictStartIndex;
234
const BYTE* const dictEnd = dms->window.nextSrc;
235
const U32 dictIndexDelta = prefixStartIndex - (U32)(dictEnd - dictBase);
236
const U32 dictAndPrefixLength = (U32)(ip - prefixStart + dictEnd - dictStart);
237
const U32 dictHLog = dictCParams->hashLog;
238
239
/* if a dictionary is still attached, it necessarily means that
240
* it is within window size. So we just check it. */
241
const U32 maxDistance = 1U << cParams->windowLog;
242
const U32 endIndex = (U32)((size_t)(ip - base) + srcSize);
243
assert(endIndex - prefixStartIndex <= maxDistance);
244
(void)maxDistance; (void)endIndex; /* these variables are not used when assert() is disabled */
245
246
/* ensure there will be no no underflow
247
* when translating a dict index into a local index */
248
assert(prefixStartIndex >= (U32)(dictEnd - dictBase));
249
250
/* init */
251
DEBUGLOG(5, "ZSTD_compressBlock_fast_dictMatchState_generic");
252
ip += (dictAndPrefixLength == 0);
253
/* dictMatchState repCode checks don't currently handle repCode == 0
254
* disabling. */
255
assert(offset_1 <= dictAndPrefixLength);
256
assert(offset_2 <= dictAndPrefixLength);
257
258
/* Main Search Loop */
259
while (ip < ilimit) { /* < instead of <=, because repcode check at (ip+1) */
260
size_t mLength;
261
size_t const h = ZSTD_hashPtr(ip, hlog, mls);
262
U32 const current = (U32)(ip-base);
263
U32 const matchIndex = hashTable[h];
264
const BYTE* match = base + matchIndex;
265
const U32 repIndex = current + 1 - offset_1;
266
const BYTE* repMatch = (repIndex < prefixStartIndex) ?
267
dictBase + (repIndex - dictIndexDelta) :
268
base + repIndex;
269
hashTable[h] = current; /* update hash table */
270
271
if ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow : ensure repIndex isn't overlapping dict + prefix */
272
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
273
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
274
mLength = ZSTD_count_2segments(ip+1+4, repMatch+4, iend, repMatchEnd, prefixStart) + 4;
275
ip++;
276
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, mLength-MINMATCH);
277
} else if ( (matchIndex <= prefixStartIndex) ) {
278
size_t const dictHash = ZSTD_hashPtr(ip, dictHLog, mls);
279
U32 const dictMatchIndex = dictHashTable[dictHash];
280
const BYTE* dictMatch = dictBase + dictMatchIndex;
281
if (dictMatchIndex <= dictStartIndex ||
282
MEM_read32(dictMatch) != MEM_read32(ip)) {
283
assert(stepSize >= 1);
284
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
285
continue;
286
} else {
287
/* found a dict match */
288
U32 const offset = (U32)(current-dictMatchIndex-dictIndexDelta);
289
mLength = ZSTD_count_2segments(ip+4, dictMatch+4, iend, dictEnd, prefixStart) + 4;
290
while (((ip>anchor) & (dictMatch>dictStart))
291
&& (ip[-1] == dictMatch[-1])) {
292
ip--; dictMatch--; mLength++;
293
} /* catch up */
294
offset_2 = offset_1;
295
offset_1 = offset;
296
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
297
}
298
} else if (MEM_read32(match) != MEM_read32(ip)) {
299
/* it's not a match, and we're not going to check the dictionary */
300
assert(stepSize >= 1);
301
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
302
continue;
303
} else {
304
/* found a regular match */
305
U32 const offset = (U32)(ip-match);
306
mLength = ZSTD_count(ip+4, match+4, iend) + 4;
307
while (((ip>anchor) & (match>prefixStart))
308
&& (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
309
offset_2 = offset_1;
310
offset_1 = offset;
311
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
312
}
313
314
/* match found */
315
ip += mLength;
316
anchor = ip;
317
318
if (ip <= ilimit) {
319
/* Fill Table */
320
assert(base+current+2 > istart); /* check base overflow */
321
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2; /* here because current+2 could be > iend-8 */
322
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
323
324
/* check immediate repcode */
325
while (ip <= ilimit) {
326
U32 const current2 = (U32)(ip-base);
327
U32 const repIndex2 = current2 - offset_2;
328
const BYTE* repMatch2 = repIndex2 < prefixStartIndex ?
329
dictBase - dictIndexDelta + repIndex2 :
330
base + repIndex2;
331
if ( ((U32)((prefixStartIndex-1) - (U32)repIndex2) >= 3 /* intentional overflow */)
332
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
333
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
334
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
335
U32 tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; /* swap offset_2 <=> offset_1 */
336
ZSTD_storeSeq(seqStore, 0, anchor, iend, 0, repLength2-MINMATCH);
337
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
338
ip += repLength2;
339
anchor = ip;
340
continue;
341
}
342
break;
343
}
344
}
345
}
346
347
/* save reps for next block */
348
rep[0] = offset_1 ? offset_1 : offsetSaved;
349
rep[1] = offset_2 ? offset_2 : offsetSaved;
350
351
/* Return the last literals size */
352
return (size_t)(iend - anchor);
353
}
354
355
size_t ZSTD_compressBlock_fast_dictMatchState(
356
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
357
void const* src, size_t srcSize)
358
{
359
U32 const mls = ms->cParams.minMatch;
360
assert(ms->dictMatchState != NULL);
361
switch(mls)
362
{
363
default: /* includes case 3 */
364
case 4 :
365
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 4);
366
case 5 :
367
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 5);
368
case 6 :
369
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 6);
370
case 7 :
371
return ZSTD_compressBlock_fast_dictMatchState_generic(ms, seqStore, rep, src, srcSize, 7);
372
}
373
}
374
375
376
static size_t ZSTD_compressBlock_fast_extDict_generic(
377
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
378
void const* src, size_t srcSize, U32 const mls)
379
{
380
const ZSTD_compressionParameters* const cParams = &ms->cParams;
381
U32* const hashTable = ms->hashTable;
382
U32 const hlog = cParams->hashLog;
383
/* support stepSize of 0 */
384
U32 const stepSize = cParams->targetLength + !(cParams->targetLength);
385
const BYTE* const base = ms->window.base;
386
const BYTE* const dictBase = ms->window.dictBase;
387
const BYTE* const istart = (const BYTE*)src;
388
const BYTE* ip = istart;
389
const BYTE* anchor = istart;
390
const U32 endIndex = (U32)((size_t)(istart - base) + srcSize);
391
const U32 lowLimit = ZSTD_getLowestMatchIndex(ms, endIndex, cParams->windowLog);
392
const U32 dictStartIndex = lowLimit;
393
const BYTE* const dictStart = dictBase + dictStartIndex;
394
const U32 dictLimit = ms->window.dictLimit;
395
const U32 prefixStartIndex = dictLimit < lowLimit ? lowLimit : dictLimit;
396
const BYTE* const prefixStart = base + prefixStartIndex;
397
const BYTE* const dictEnd = dictBase + prefixStartIndex;
398
const BYTE* const iend = istart + srcSize;
399
const BYTE* const ilimit = iend - 8;
400
U32 offset_1=rep[0], offset_2=rep[1];
401
402
DEBUGLOG(5, "ZSTD_compressBlock_fast_extDict_generic (offset_1=%u)", offset_1);
403
404
/* switch to "regular" variant if extDict is invalidated due to maxDistance */
405
if (prefixStartIndex == dictStartIndex)
406
return ZSTD_compressBlock_fast_generic(ms, seqStore, rep, src, srcSize, mls);
407
408
/* Search Loop */
409
while (ip < ilimit) { /* < instead of <=, because (ip+1) */
410
const size_t h = ZSTD_hashPtr(ip, hlog, mls);
411
const U32 matchIndex = hashTable[h];
412
const BYTE* const matchBase = matchIndex < prefixStartIndex ? dictBase : base;
413
const BYTE* match = matchBase + matchIndex;
414
const U32 current = (U32)(ip-base);
415
const U32 repIndex = current + 1 - offset_1;
416
const BYTE* const repBase = repIndex < prefixStartIndex ? dictBase : base;
417
const BYTE* const repMatch = repBase + repIndex;
418
hashTable[h] = current; /* update hash table */
419
DEBUGLOG(7, "offset_1 = %u , current = %u", offset_1, current);
420
421
if ( ( ((U32)((prefixStartIndex-1) - repIndex) >= 3) /* intentional underflow */
422
& (offset_1 < current+1 - dictStartIndex) ) /* note: we are searching at current+1 */
423
&& (MEM_read32(repMatch) == MEM_read32(ip+1)) ) {
424
const BYTE* const repMatchEnd = repIndex < prefixStartIndex ? dictEnd : iend;
425
size_t const rLength = ZSTD_count_2segments(ip+1 +4, repMatch +4, iend, repMatchEnd, prefixStart) + 4;
426
ip++;
427
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, 0, rLength-MINMATCH);
428
ip += rLength;
429
anchor = ip;
430
} else {
431
if ( (matchIndex < dictStartIndex) ||
432
(MEM_read32(match) != MEM_read32(ip)) ) {
433
assert(stepSize >= 1);
434
ip += ((ip-anchor) >> kSearchStrength) + stepSize;
435
continue;
436
}
437
{ const BYTE* const matchEnd = matchIndex < prefixStartIndex ? dictEnd : iend;
438
const BYTE* const lowMatchPtr = matchIndex < prefixStartIndex ? dictStart : prefixStart;
439
U32 const offset = current - matchIndex;
440
size_t mLength = ZSTD_count_2segments(ip+4, match+4, iend, matchEnd, prefixStart) + 4;
441
while (((ip>anchor) & (match>lowMatchPtr)) && (ip[-1] == match[-1])) { ip--; match--; mLength++; } /* catch up */
442
offset_2 = offset_1; offset_1 = offset; /* update offset history */
443
ZSTD_storeSeq(seqStore, (size_t)(ip-anchor), anchor, iend, offset + ZSTD_REP_MOVE, mLength-MINMATCH);
444
ip += mLength;
445
anchor = ip;
446
} }
447
448
if (ip <= ilimit) {
449
/* Fill Table */
450
hashTable[ZSTD_hashPtr(base+current+2, hlog, mls)] = current+2;
451
hashTable[ZSTD_hashPtr(ip-2, hlog, mls)] = (U32)(ip-2-base);
452
/* check immediate repcode */
453
while (ip <= ilimit) {
454
U32 const current2 = (U32)(ip-base);
455
U32 const repIndex2 = current2 - offset_2;
456
const BYTE* const repMatch2 = repIndex2 < prefixStartIndex ? dictBase + repIndex2 : base + repIndex2;
457
if ( (((U32)((prefixStartIndex-1) - repIndex2) >= 3) & (offset_2 < current - dictStartIndex)) /* intentional overflow */
458
&& (MEM_read32(repMatch2) == MEM_read32(ip)) ) {
459
const BYTE* const repEnd2 = repIndex2 < prefixStartIndex ? dictEnd : iend;
460
size_t const repLength2 = ZSTD_count_2segments(ip+4, repMatch2+4, iend, repEnd2, prefixStart) + 4;
461
{ U32 const tmpOffset = offset_2; offset_2 = offset_1; offset_1 = tmpOffset; } /* swap offset_2 <=> offset_1 */
462
ZSTD_storeSeq(seqStore, 0 /*litlen*/, anchor, iend, 0 /*offcode*/, repLength2-MINMATCH);
463
hashTable[ZSTD_hashPtr(ip, hlog, mls)] = current2;
464
ip += repLength2;
465
anchor = ip;
466
continue;
467
}
468
break;
469
} } }
470
471
/* save reps for next block */
472
rep[0] = offset_1;
473
rep[1] = offset_2;
474
475
/* Return the last literals size */
476
return (size_t)(iend - anchor);
477
}
478
479
480
size_t ZSTD_compressBlock_fast_extDict(
481
ZSTD_matchState_t* ms, seqStore_t* seqStore, U32 rep[ZSTD_REP_NUM],
482
void const* src, size_t srcSize)
483
{
484
U32 const mls = ms->cParams.minMatch;
485
switch(mls)
486
{
487
default: /* includes case 3 */
488
case 4 :
489
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 4);
490
case 5 :
491
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 5);
492
case 6 :
493
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 6);
494
case 7 :
495
return ZSTD_compressBlock_fast_extDict_generic(ms, seqStore, rep, src, srcSize, 7);
496
}
497
}
498
499