Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmzstd/lib/compress/zstdmt_compress.c
3158 views
1
/*
2
* Copyright (c) Meta Platforms, Inc. and affiliates.
3
* All rights reserved.
4
*
5
* This source code is licensed under both the BSD-style license (found in the
6
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
* in the COPYING file in the root directory of this source tree).
8
* You may select, at your option, one of the above-listed licenses.
9
*/
10
11
12
/* ====== Compiler specifics ====== */
13
#if defined(_MSC_VER)
14
# pragma warning(disable : 4204) /* disable: C4204: non-constant aggregate initializer */
15
#endif
16
17
18
/* ====== Constants ====== */
19
#define ZSTDMT_OVERLAPLOG_DEFAULT 0
20
21
22
/* ====== Dependencies ====== */
23
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customCalloc, ZSTD_customFree */
24
#include "../common/zstd_deps.h" /* ZSTD_memcpy, ZSTD_memset, INT_MAX, UINT_MAX */
25
#include "../common/mem.h" /* MEM_STATIC */
26
#include "../common/pool.h" /* threadpool */
27
#include "../common/threading.h" /* mutex */
28
#include "zstd_compress_internal.h" /* MIN, ERROR, ZSTD_*, ZSTD_highbit32 */
29
#include "zstd_ldm.h"
30
#include "zstdmt_compress.h"
31
32
/* Guards code to support resizing the SeqPool.
33
* We will want to resize the SeqPool to save memory in the future.
34
* Until then, comment the code out since it is unused.
35
*/
36
#define ZSTD_RESIZE_SEQPOOL 0
37
38
/* ====== Debug ====== */
39
#if defined(DEBUGLEVEL) && (DEBUGLEVEL>=2) \
40
&& !defined(_MSC_VER) \
41
&& !defined(__MINGW32__)
42
43
# include <stdio.h>
44
# include <unistd.h>
45
# include <sys/times.h>
46
47
# define DEBUG_PRINTHEX(l,p,n) { \
48
unsigned debug_u; \
49
for (debug_u=0; debug_u<(n); debug_u++) \
50
RAWLOG(l, "%02X ", ((const unsigned char*)(p))[debug_u]); \
51
RAWLOG(l, " \n"); \
52
}
53
54
static unsigned long long GetCurrentClockTimeMicroseconds(void)
55
{
56
static clock_t _ticksPerSecond = 0;
57
if (_ticksPerSecond <= 0) _ticksPerSecond = sysconf(_SC_CLK_TCK);
58
59
{ struct tms junk; clock_t newTicks = (clock_t) times(&junk);
60
return ((((unsigned long long)newTicks)*(1000000))/_ticksPerSecond);
61
} }
62
63
#define MUTEX_WAIT_TIME_DLEVEL 6
64
#define ZSTD_PTHREAD_MUTEX_LOCK(mutex) { \
65
if (DEBUGLEVEL >= MUTEX_WAIT_TIME_DLEVEL) { \
66
unsigned long long const beforeTime = GetCurrentClockTimeMicroseconds(); \
67
ZSTD_pthread_mutex_lock(mutex); \
68
{ unsigned long long const afterTime = GetCurrentClockTimeMicroseconds(); \
69
unsigned long long const elapsedTime = (afterTime-beforeTime); \
70
if (elapsedTime > 1000) { /* or whatever threshold you like; I'm using 1 millisecond here */ \
71
DEBUGLOG(MUTEX_WAIT_TIME_DLEVEL, "Thread took %llu microseconds to acquire mutex %s \n", \
72
elapsedTime, #mutex); \
73
} } \
74
} else { \
75
ZSTD_pthread_mutex_lock(mutex); \
76
} \
77
}
78
79
#else
80
81
# define ZSTD_PTHREAD_MUTEX_LOCK(m) ZSTD_pthread_mutex_lock(m)
82
# define DEBUG_PRINTHEX(l,p,n) {}
83
84
#endif
85
86
87
/* ===== Buffer Pool ===== */
88
/* a single Buffer Pool can be invoked from multiple threads in parallel */
89
90
typedef struct buffer_s {
91
void* start;
92
size_t capacity;
93
} buffer_t;
94
95
static const buffer_t g_nullBuffer = { NULL, 0 };
96
97
typedef struct ZSTDMT_bufferPool_s {
98
ZSTD_pthread_mutex_t poolMutex;
99
size_t bufferSize;
100
unsigned totalBuffers;
101
unsigned nbBuffers;
102
ZSTD_customMem cMem;
103
buffer_t bTable[1]; /* variable size */
104
} ZSTDMT_bufferPool;
105
106
static ZSTDMT_bufferPool* ZSTDMT_createBufferPool(unsigned maxNbBuffers, ZSTD_customMem cMem)
107
{
108
ZSTDMT_bufferPool* const bufPool = (ZSTDMT_bufferPool*)ZSTD_customCalloc(
109
sizeof(ZSTDMT_bufferPool) + (maxNbBuffers-1) * sizeof(buffer_t), cMem);
110
if (bufPool==NULL) return NULL;
111
if (ZSTD_pthread_mutex_init(&bufPool->poolMutex, NULL)) {
112
ZSTD_customFree(bufPool, cMem);
113
return NULL;
114
}
115
bufPool->bufferSize = 64 KB;
116
bufPool->totalBuffers = maxNbBuffers;
117
bufPool->nbBuffers = 0;
118
bufPool->cMem = cMem;
119
return bufPool;
120
}
121
122
static void ZSTDMT_freeBufferPool(ZSTDMT_bufferPool* bufPool)
123
{
124
unsigned u;
125
DEBUGLOG(3, "ZSTDMT_freeBufferPool (address:%08X)", (U32)(size_t)bufPool);
126
if (!bufPool) return; /* compatibility with free on NULL */
127
for (u=0; u<bufPool->totalBuffers; u++) {
128
DEBUGLOG(4, "free buffer %2u (address:%08X)", u, (U32)(size_t)bufPool->bTable[u].start);
129
ZSTD_customFree(bufPool->bTable[u].start, bufPool->cMem);
130
}
131
ZSTD_pthread_mutex_destroy(&bufPool->poolMutex);
132
ZSTD_customFree(bufPool, bufPool->cMem);
133
}
134
135
/* only works at initialization, not during compression */
136
static size_t ZSTDMT_sizeof_bufferPool(ZSTDMT_bufferPool* bufPool)
137
{
138
size_t const poolSize = sizeof(*bufPool)
139
+ (bufPool->totalBuffers - 1) * sizeof(buffer_t);
140
unsigned u;
141
size_t totalBufferSize = 0;
142
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
143
for (u=0; u<bufPool->totalBuffers; u++)
144
totalBufferSize += bufPool->bTable[u].capacity;
145
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
146
147
return poolSize + totalBufferSize;
148
}
149
150
/* ZSTDMT_setBufferSize() :
151
* all future buffers provided by this buffer pool will have _at least_ this size
152
* note : it's better for all buffers to have same size,
153
* as they become freely interchangeable, reducing malloc/free usages and memory fragmentation */
154
static void ZSTDMT_setBufferSize(ZSTDMT_bufferPool* const bufPool, size_t const bSize)
155
{
156
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
157
DEBUGLOG(4, "ZSTDMT_setBufferSize: bSize = %u", (U32)bSize);
158
bufPool->bufferSize = bSize;
159
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
160
}
161
162
163
static ZSTDMT_bufferPool* ZSTDMT_expandBufferPool(ZSTDMT_bufferPool* srcBufPool, unsigned maxNbBuffers)
164
{
165
if (srcBufPool==NULL) return NULL;
166
if (srcBufPool->totalBuffers >= maxNbBuffers) /* good enough */
167
return srcBufPool;
168
/* need a larger buffer pool */
169
{ ZSTD_customMem const cMem = srcBufPool->cMem;
170
size_t const bSize = srcBufPool->bufferSize; /* forward parameters */
171
ZSTDMT_bufferPool* newBufPool;
172
ZSTDMT_freeBufferPool(srcBufPool);
173
newBufPool = ZSTDMT_createBufferPool(maxNbBuffers, cMem);
174
if (newBufPool==NULL) return newBufPool;
175
ZSTDMT_setBufferSize(newBufPool, bSize);
176
return newBufPool;
177
}
178
}
179
180
/** ZSTDMT_getBuffer() :
181
* assumption : bufPool must be valid
182
* @return : a buffer, with start pointer and size
183
* note: allocation may fail, in this case, start==NULL and size==0 */
184
static buffer_t ZSTDMT_getBuffer(ZSTDMT_bufferPool* bufPool)
185
{
186
size_t const bSize = bufPool->bufferSize;
187
DEBUGLOG(5, "ZSTDMT_getBuffer: bSize = %u", (U32)bufPool->bufferSize);
188
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
189
if (bufPool->nbBuffers) { /* try to use an existing buffer */
190
buffer_t const buf = bufPool->bTable[--(bufPool->nbBuffers)];
191
size_t const availBufferSize = buf.capacity;
192
bufPool->bTable[bufPool->nbBuffers] = g_nullBuffer;
193
if ((availBufferSize >= bSize) & ((availBufferSize>>3) <= bSize)) {
194
/* large enough, but not too much */
195
DEBUGLOG(5, "ZSTDMT_getBuffer: provide buffer %u of size %u",
196
bufPool->nbBuffers, (U32)buf.capacity);
197
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
198
return buf;
199
}
200
/* size conditions not respected : scratch this buffer, create new one */
201
DEBUGLOG(5, "ZSTDMT_getBuffer: existing buffer does not meet size conditions => freeing");
202
ZSTD_customFree(buf.start, bufPool->cMem);
203
}
204
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
205
/* create new buffer */
206
DEBUGLOG(5, "ZSTDMT_getBuffer: create a new buffer");
207
{ buffer_t buffer;
208
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
209
buffer.start = start; /* note : start can be NULL if malloc fails ! */
210
buffer.capacity = (start==NULL) ? 0 : bSize;
211
if (start==NULL) {
212
DEBUGLOG(5, "ZSTDMT_getBuffer: buffer allocation failure !!");
213
} else {
214
DEBUGLOG(5, "ZSTDMT_getBuffer: created buffer of size %u", (U32)bSize);
215
}
216
return buffer;
217
}
218
}
219
220
#if ZSTD_RESIZE_SEQPOOL
221
/** ZSTDMT_resizeBuffer() :
222
* assumption : bufPool must be valid
223
* @return : a buffer that is at least the buffer pool buffer size.
224
* If a reallocation happens, the data in the input buffer is copied.
225
*/
226
static buffer_t ZSTDMT_resizeBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buffer)
227
{
228
size_t const bSize = bufPool->bufferSize;
229
if (buffer.capacity < bSize) {
230
void* const start = ZSTD_customMalloc(bSize, bufPool->cMem);
231
buffer_t newBuffer;
232
newBuffer.start = start;
233
newBuffer.capacity = start == NULL ? 0 : bSize;
234
if (start != NULL) {
235
assert(newBuffer.capacity >= buffer.capacity);
236
ZSTD_memcpy(newBuffer.start, buffer.start, buffer.capacity);
237
DEBUGLOG(5, "ZSTDMT_resizeBuffer: created buffer of size %u", (U32)bSize);
238
return newBuffer;
239
}
240
DEBUGLOG(5, "ZSTDMT_resizeBuffer: buffer allocation failure !!");
241
}
242
return buffer;
243
}
244
#endif
245
246
/* store buffer for later re-use, up to pool capacity */
247
static void ZSTDMT_releaseBuffer(ZSTDMT_bufferPool* bufPool, buffer_t buf)
248
{
249
DEBUGLOG(5, "ZSTDMT_releaseBuffer");
250
if (buf.start == NULL) return; /* compatible with release on NULL */
251
ZSTD_pthread_mutex_lock(&bufPool->poolMutex);
252
if (bufPool->nbBuffers < bufPool->totalBuffers) {
253
bufPool->bTable[bufPool->nbBuffers++] = buf; /* stored for later use */
254
DEBUGLOG(5, "ZSTDMT_releaseBuffer: stored buffer of size %u in slot %u",
255
(U32)buf.capacity, (U32)(bufPool->nbBuffers-1));
256
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
257
return;
258
}
259
ZSTD_pthread_mutex_unlock(&bufPool->poolMutex);
260
/* Reached bufferPool capacity (should not happen) */
261
DEBUGLOG(5, "ZSTDMT_releaseBuffer: pool capacity reached => freeing ");
262
ZSTD_customFree(buf.start, bufPool->cMem);
263
}
264
265
/* We need 2 output buffers per worker since each dstBuff must be flushed after it is released.
266
* The 3 additional buffers are as follows:
267
* 1 buffer for input loading
268
* 1 buffer for "next input" when submitting current one
269
* 1 buffer stuck in queue */
270
#define BUF_POOL_MAX_NB_BUFFERS(nbWorkers) (2*(nbWorkers) + 3)
271
272
/* After a worker releases its rawSeqStore, it is immediately ready for reuse.
273
* So we only need one seq buffer per worker. */
274
#define SEQ_POOL_MAX_NB_BUFFERS(nbWorkers) (nbWorkers)
275
276
/* ===== Seq Pool Wrapper ====== */
277
278
typedef ZSTDMT_bufferPool ZSTDMT_seqPool;
279
280
static size_t ZSTDMT_sizeof_seqPool(ZSTDMT_seqPool* seqPool)
281
{
282
return ZSTDMT_sizeof_bufferPool(seqPool);
283
}
284
285
static rawSeqStore_t bufferToSeq(buffer_t buffer)
286
{
287
rawSeqStore_t seq = kNullRawSeqStore;
288
seq.seq = (rawSeq*)buffer.start;
289
seq.capacity = buffer.capacity / sizeof(rawSeq);
290
return seq;
291
}
292
293
static buffer_t seqToBuffer(rawSeqStore_t seq)
294
{
295
buffer_t buffer;
296
buffer.start = seq.seq;
297
buffer.capacity = seq.capacity * sizeof(rawSeq);
298
return buffer;
299
}
300
301
static rawSeqStore_t ZSTDMT_getSeq(ZSTDMT_seqPool* seqPool)
302
{
303
if (seqPool->bufferSize == 0) {
304
return kNullRawSeqStore;
305
}
306
return bufferToSeq(ZSTDMT_getBuffer(seqPool));
307
}
308
309
#if ZSTD_RESIZE_SEQPOOL
310
static rawSeqStore_t ZSTDMT_resizeSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
311
{
312
return bufferToSeq(ZSTDMT_resizeBuffer(seqPool, seqToBuffer(seq)));
313
}
314
#endif
315
316
static void ZSTDMT_releaseSeq(ZSTDMT_seqPool* seqPool, rawSeqStore_t seq)
317
{
318
ZSTDMT_releaseBuffer(seqPool, seqToBuffer(seq));
319
}
320
321
static void ZSTDMT_setNbSeq(ZSTDMT_seqPool* const seqPool, size_t const nbSeq)
322
{
323
ZSTDMT_setBufferSize(seqPool, nbSeq * sizeof(rawSeq));
324
}
325
326
static ZSTDMT_seqPool* ZSTDMT_createSeqPool(unsigned nbWorkers, ZSTD_customMem cMem)
327
{
328
ZSTDMT_seqPool* const seqPool = ZSTDMT_createBufferPool(SEQ_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
329
if (seqPool == NULL) return NULL;
330
ZSTDMT_setNbSeq(seqPool, 0);
331
return seqPool;
332
}
333
334
static void ZSTDMT_freeSeqPool(ZSTDMT_seqPool* seqPool)
335
{
336
ZSTDMT_freeBufferPool(seqPool);
337
}
338
339
static ZSTDMT_seqPool* ZSTDMT_expandSeqPool(ZSTDMT_seqPool* pool, U32 nbWorkers)
340
{
341
return ZSTDMT_expandBufferPool(pool, SEQ_POOL_MAX_NB_BUFFERS(nbWorkers));
342
}
343
344
345
/* ===== CCtx Pool ===== */
346
/* a single CCtx Pool can be invoked from multiple threads in parallel */
347
348
typedef struct {
349
ZSTD_pthread_mutex_t poolMutex;
350
int totalCCtx;
351
int availCCtx;
352
ZSTD_customMem cMem;
353
ZSTD_CCtx* cctx[1]; /* variable size */
354
} ZSTDMT_CCtxPool;
355
356
/* note : all CCtx borrowed from the pool should be released back to the pool _before_ freeing the pool */
357
static void ZSTDMT_freeCCtxPool(ZSTDMT_CCtxPool* pool)
358
{
359
int cid;
360
for (cid=0; cid<pool->totalCCtx; cid++)
361
ZSTD_freeCCtx(pool->cctx[cid]); /* note : compatible with free on NULL */
362
ZSTD_pthread_mutex_destroy(&pool->poolMutex);
363
ZSTD_customFree(pool, pool->cMem);
364
}
365
366
/* ZSTDMT_createCCtxPool() :
367
* implies nbWorkers >= 1 , checked by caller ZSTDMT_createCCtx() */
368
static ZSTDMT_CCtxPool* ZSTDMT_createCCtxPool(int nbWorkers,
369
ZSTD_customMem cMem)
370
{
371
ZSTDMT_CCtxPool* const cctxPool = (ZSTDMT_CCtxPool*) ZSTD_customCalloc(
372
sizeof(ZSTDMT_CCtxPool) + (nbWorkers-1)*sizeof(ZSTD_CCtx*), cMem);
373
assert(nbWorkers > 0);
374
if (!cctxPool) return NULL;
375
if (ZSTD_pthread_mutex_init(&cctxPool->poolMutex, NULL)) {
376
ZSTD_customFree(cctxPool, cMem);
377
return NULL;
378
}
379
cctxPool->cMem = cMem;
380
cctxPool->totalCCtx = nbWorkers;
381
cctxPool->availCCtx = 1; /* at least one cctx for single-thread mode */
382
cctxPool->cctx[0] = ZSTD_createCCtx_advanced(cMem);
383
if (!cctxPool->cctx[0]) { ZSTDMT_freeCCtxPool(cctxPool); return NULL; }
384
DEBUGLOG(3, "cctxPool created, with %u workers", nbWorkers);
385
return cctxPool;
386
}
387
388
static ZSTDMT_CCtxPool* ZSTDMT_expandCCtxPool(ZSTDMT_CCtxPool* srcPool,
389
int nbWorkers)
390
{
391
if (srcPool==NULL) return NULL;
392
if (nbWorkers <= srcPool->totalCCtx) return srcPool; /* good enough */
393
/* need a larger cctx pool */
394
{ ZSTD_customMem const cMem = srcPool->cMem;
395
ZSTDMT_freeCCtxPool(srcPool);
396
return ZSTDMT_createCCtxPool(nbWorkers, cMem);
397
}
398
}
399
400
/* only works during initialization phase, not during compression */
401
static size_t ZSTDMT_sizeof_CCtxPool(ZSTDMT_CCtxPool* cctxPool)
402
{
403
ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
404
{ unsigned const nbWorkers = cctxPool->totalCCtx;
405
size_t const poolSize = sizeof(*cctxPool)
406
+ (nbWorkers-1) * sizeof(ZSTD_CCtx*);
407
unsigned u;
408
size_t totalCCtxSize = 0;
409
for (u=0; u<nbWorkers; u++) {
410
totalCCtxSize += ZSTD_sizeof_CCtx(cctxPool->cctx[u]);
411
}
412
ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
413
assert(nbWorkers > 0);
414
return poolSize + totalCCtxSize;
415
}
416
}
417
418
static ZSTD_CCtx* ZSTDMT_getCCtx(ZSTDMT_CCtxPool* cctxPool)
419
{
420
DEBUGLOG(5, "ZSTDMT_getCCtx");
421
ZSTD_pthread_mutex_lock(&cctxPool->poolMutex);
422
if (cctxPool->availCCtx) {
423
cctxPool->availCCtx--;
424
{ ZSTD_CCtx* const cctx = cctxPool->cctx[cctxPool->availCCtx];
425
ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
426
return cctx;
427
} }
428
ZSTD_pthread_mutex_unlock(&cctxPool->poolMutex);
429
DEBUGLOG(5, "create one more CCtx");
430
return ZSTD_createCCtx_advanced(cctxPool->cMem); /* note : can be NULL, when creation fails ! */
431
}
432
433
static void ZSTDMT_releaseCCtx(ZSTDMT_CCtxPool* pool, ZSTD_CCtx* cctx)
434
{
435
if (cctx==NULL) return; /* compatibility with release on NULL */
436
ZSTD_pthread_mutex_lock(&pool->poolMutex);
437
if (pool->availCCtx < pool->totalCCtx)
438
pool->cctx[pool->availCCtx++] = cctx;
439
else {
440
/* pool overflow : should not happen, since totalCCtx==nbWorkers */
441
DEBUGLOG(4, "CCtx pool overflow : free cctx");
442
ZSTD_freeCCtx(cctx);
443
}
444
ZSTD_pthread_mutex_unlock(&pool->poolMutex);
445
}
446
447
/* ==== Serial State ==== */
448
449
typedef struct {
450
void const* start;
451
size_t size;
452
} range_t;
453
454
typedef struct {
455
/* All variables in the struct are protected by mutex. */
456
ZSTD_pthread_mutex_t mutex;
457
ZSTD_pthread_cond_t cond;
458
ZSTD_CCtx_params params;
459
ldmState_t ldmState;
460
XXH64_state_t xxhState;
461
unsigned nextJobID;
462
/* Protects ldmWindow.
463
* Must be acquired after the main mutex when acquiring both.
464
*/
465
ZSTD_pthread_mutex_t ldmWindowMutex;
466
ZSTD_pthread_cond_t ldmWindowCond; /* Signaled when ldmWindow is updated */
467
ZSTD_window_t ldmWindow; /* A thread-safe copy of ldmState.window */
468
} serialState_t;
469
470
static int
471
ZSTDMT_serialState_reset(serialState_t* serialState,
472
ZSTDMT_seqPool* seqPool,
473
ZSTD_CCtx_params params,
474
size_t jobSize,
475
const void* dict, size_t const dictSize,
476
ZSTD_dictContentType_e dictContentType)
477
{
478
/* Adjust parameters */
479
if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
480
DEBUGLOG(4, "LDM window size = %u KB", (1U << params.cParams.windowLog) >> 10);
481
ZSTD_ldm_adjustParameters(&params.ldmParams, &params.cParams);
482
assert(params.ldmParams.hashLog >= params.ldmParams.bucketSizeLog);
483
assert(params.ldmParams.hashRateLog < 32);
484
} else {
485
ZSTD_memset(&params.ldmParams, 0, sizeof(params.ldmParams));
486
}
487
serialState->nextJobID = 0;
488
if (params.fParams.checksumFlag)
489
XXH64_reset(&serialState->xxhState, 0);
490
if (params.ldmParams.enableLdm == ZSTD_ps_enable) {
491
ZSTD_customMem cMem = params.customMem;
492
unsigned const hashLog = params.ldmParams.hashLog;
493
size_t const hashSize = ((size_t)1 << hashLog) * sizeof(ldmEntry_t);
494
unsigned const bucketLog =
495
params.ldmParams.hashLog - params.ldmParams.bucketSizeLog;
496
unsigned const prevBucketLog =
497
serialState->params.ldmParams.hashLog -
498
serialState->params.ldmParams.bucketSizeLog;
499
size_t const numBuckets = (size_t)1 << bucketLog;
500
/* Size the seq pool tables */
501
ZSTDMT_setNbSeq(seqPool, ZSTD_ldm_getMaxNbSeq(params.ldmParams, jobSize));
502
/* Reset the window */
503
ZSTD_window_init(&serialState->ldmState.window);
504
/* Resize tables and output space if necessary. */
505
if (serialState->ldmState.hashTable == NULL || serialState->params.ldmParams.hashLog < hashLog) {
506
ZSTD_customFree(serialState->ldmState.hashTable, cMem);
507
serialState->ldmState.hashTable = (ldmEntry_t*)ZSTD_customMalloc(hashSize, cMem);
508
}
509
if (serialState->ldmState.bucketOffsets == NULL || prevBucketLog < bucketLog) {
510
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
511
serialState->ldmState.bucketOffsets = (BYTE*)ZSTD_customMalloc(numBuckets, cMem);
512
}
513
if (!serialState->ldmState.hashTable || !serialState->ldmState.bucketOffsets)
514
return 1;
515
/* Zero the tables */
516
ZSTD_memset(serialState->ldmState.hashTable, 0, hashSize);
517
ZSTD_memset(serialState->ldmState.bucketOffsets, 0, numBuckets);
518
519
/* Update window state and fill hash table with dict */
520
serialState->ldmState.loadedDictEnd = 0;
521
if (dictSize > 0) {
522
if (dictContentType == ZSTD_dct_rawContent) {
523
BYTE const* const dictEnd = (const BYTE*)dict + dictSize;
524
ZSTD_window_update(&serialState->ldmState.window, dict, dictSize, /* forceNonContiguous */ 0);
525
ZSTD_ldm_fillHashTable(&serialState->ldmState, (const BYTE*)dict, dictEnd, &params.ldmParams);
526
serialState->ldmState.loadedDictEnd = params.forceWindow ? 0 : (U32)(dictEnd - serialState->ldmState.window.base);
527
} else {
528
/* don't even load anything */
529
}
530
}
531
532
/* Initialize serialState's copy of ldmWindow. */
533
serialState->ldmWindow = serialState->ldmState.window;
534
}
535
536
serialState->params = params;
537
serialState->params.jobSize = (U32)jobSize;
538
return 0;
539
}
540
541
static int ZSTDMT_serialState_init(serialState_t* serialState)
542
{
543
int initError = 0;
544
ZSTD_memset(serialState, 0, sizeof(*serialState));
545
initError |= ZSTD_pthread_mutex_init(&serialState->mutex, NULL);
546
initError |= ZSTD_pthread_cond_init(&serialState->cond, NULL);
547
initError |= ZSTD_pthread_mutex_init(&serialState->ldmWindowMutex, NULL);
548
initError |= ZSTD_pthread_cond_init(&serialState->ldmWindowCond, NULL);
549
return initError;
550
}
551
552
static void ZSTDMT_serialState_free(serialState_t* serialState)
553
{
554
ZSTD_customMem cMem = serialState->params.customMem;
555
ZSTD_pthread_mutex_destroy(&serialState->mutex);
556
ZSTD_pthread_cond_destroy(&serialState->cond);
557
ZSTD_pthread_mutex_destroy(&serialState->ldmWindowMutex);
558
ZSTD_pthread_cond_destroy(&serialState->ldmWindowCond);
559
ZSTD_customFree(serialState->ldmState.hashTable, cMem);
560
ZSTD_customFree(serialState->ldmState.bucketOffsets, cMem);
561
}
562
563
static void ZSTDMT_serialState_update(serialState_t* serialState,
564
ZSTD_CCtx* jobCCtx, rawSeqStore_t seqStore,
565
range_t src, unsigned jobID)
566
{
567
/* Wait for our turn */
568
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
569
while (serialState->nextJobID < jobID) {
570
DEBUGLOG(5, "wait for serialState->cond");
571
ZSTD_pthread_cond_wait(&serialState->cond, &serialState->mutex);
572
}
573
/* A future job may error and skip our job */
574
if (serialState->nextJobID == jobID) {
575
/* It is now our turn, do any processing necessary */
576
if (serialState->params.ldmParams.enableLdm == ZSTD_ps_enable) {
577
size_t error;
578
assert(seqStore.seq != NULL && seqStore.pos == 0 &&
579
seqStore.size == 0 && seqStore.capacity > 0);
580
assert(src.size <= serialState->params.jobSize);
581
ZSTD_window_update(&serialState->ldmState.window, src.start, src.size, /* forceNonContiguous */ 0);
582
error = ZSTD_ldm_generateSequences(
583
&serialState->ldmState, &seqStore,
584
&serialState->params.ldmParams, src.start, src.size);
585
/* We provide a large enough buffer to never fail. */
586
assert(!ZSTD_isError(error)); (void)error;
587
/* Update ldmWindow to match the ldmState.window and signal the main
588
* thread if it is waiting for a buffer.
589
*/
590
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
591
serialState->ldmWindow = serialState->ldmState.window;
592
ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
593
ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
594
}
595
if (serialState->params.fParams.checksumFlag && src.size > 0)
596
XXH64_update(&serialState->xxhState, src.start, src.size);
597
}
598
/* Now it is the next jobs turn */
599
serialState->nextJobID++;
600
ZSTD_pthread_cond_broadcast(&serialState->cond);
601
ZSTD_pthread_mutex_unlock(&serialState->mutex);
602
603
if (seqStore.size > 0) {
604
size_t const err = ZSTD_referenceExternalSequences(
605
jobCCtx, seqStore.seq, seqStore.size);
606
assert(serialState->params.ldmParams.enableLdm == ZSTD_ps_enable);
607
assert(!ZSTD_isError(err));
608
(void)err;
609
}
610
}
611
612
static void ZSTDMT_serialState_ensureFinished(serialState_t* serialState,
613
unsigned jobID, size_t cSize)
614
{
615
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->mutex);
616
if (serialState->nextJobID <= jobID) {
617
assert(ZSTD_isError(cSize)); (void)cSize;
618
DEBUGLOG(5, "Skipping past job %u because of error", jobID);
619
serialState->nextJobID = jobID + 1;
620
ZSTD_pthread_cond_broadcast(&serialState->cond);
621
622
ZSTD_PTHREAD_MUTEX_LOCK(&serialState->ldmWindowMutex);
623
ZSTD_window_clear(&serialState->ldmWindow);
624
ZSTD_pthread_cond_signal(&serialState->ldmWindowCond);
625
ZSTD_pthread_mutex_unlock(&serialState->ldmWindowMutex);
626
}
627
ZSTD_pthread_mutex_unlock(&serialState->mutex);
628
629
}
630
631
632
/* ------------------------------------------ */
633
/* ===== Worker thread ===== */
634
/* ------------------------------------------ */
635
636
static const range_t kNullRange = { NULL, 0 };
637
638
typedef struct {
639
size_t consumed; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx */
640
size_t cSize; /* SHARED - set0 by mtctx, then modified by worker AND read by mtctx, then set0 by mtctx */
641
ZSTD_pthread_mutex_t job_mutex; /* Thread-safe - used by mtctx and worker */
642
ZSTD_pthread_cond_t job_cond; /* Thread-safe - used by mtctx and worker */
643
ZSTDMT_CCtxPool* cctxPool; /* Thread-safe - used by mtctx and (all) workers */
644
ZSTDMT_bufferPool* bufPool; /* Thread-safe - used by mtctx and (all) workers */
645
ZSTDMT_seqPool* seqPool; /* Thread-safe - used by mtctx and (all) workers */
646
serialState_t* serial; /* Thread-safe - used by mtctx and (all) workers */
647
buffer_t dstBuff; /* set by worker (or mtctx), then read by worker & mtctx, then modified by mtctx => no barrier */
648
range_t prefix; /* set by mtctx, then read by worker & mtctx => no barrier */
649
range_t src; /* set by mtctx, then read by worker & mtctx => no barrier */
650
unsigned jobID; /* set by mtctx, then read by worker => no barrier */
651
unsigned firstJob; /* set by mtctx, then read by worker => no barrier */
652
unsigned lastJob; /* set by mtctx, then read by worker => no barrier */
653
ZSTD_CCtx_params params; /* set by mtctx, then read by worker => no barrier */
654
const ZSTD_CDict* cdict; /* set by mtctx, then read by worker => no barrier */
655
unsigned long long fullFrameSize; /* set by mtctx, then read by worker => no barrier */
656
size_t dstFlushed; /* used only by mtctx */
657
unsigned frameChecksumNeeded; /* used only by mtctx */
658
} ZSTDMT_jobDescription;
659
660
#define JOB_ERROR(e) { \
661
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex); \
662
job->cSize = e; \
663
ZSTD_pthread_mutex_unlock(&job->job_mutex); \
664
goto _endJob; \
665
}
666
667
/* ZSTDMT_compressionJob() is a POOL_function type */
668
static void ZSTDMT_compressionJob(void* jobDescription)
669
{
670
ZSTDMT_jobDescription* const job = (ZSTDMT_jobDescription*)jobDescription;
671
ZSTD_CCtx_params jobParams = job->params; /* do not modify job->params ! copy it, modify the copy */
672
ZSTD_CCtx* const cctx = ZSTDMT_getCCtx(job->cctxPool);
673
rawSeqStore_t rawSeqStore = ZSTDMT_getSeq(job->seqPool);
674
buffer_t dstBuff = job->dstBuff;
675
size_t lastCBlockSize = 0;
676
677
/* resources */
678
if (cctx==NULL) JOB_ERROR(ERROR(memory_allocation));
679
if (dstBuff.start == NULL) { /* streaming job : doesn't provide a dstBuffer */
680
dstBuff = ZSTDMT_getBuffer(job->bufPool);
681
if (dstBuff.start==NULL) JOB_ERROR(ERROR(memory_allocation));
682
job->dstBuff = dstBuff; /* this value can be read in ZSTDMT_flush, when it copies the whole job */
683
}
684
if (jobParams.ldmParams.enableLdm == ZSTD_ps_enable && rawSeqStore.seq == NULL)
685
JOB_ERROR(ERROR(memory_allocation));
686
687
/* Don't compute the checksum for chunks, since we compute it externally,
688
* but write it in the header.
689
*/
690
if (job->jobID != 0) jobParams.fParams.checksumFlag = 0;
691
/* Don't run LDM for the chunks, since we handle it externally */
692
jobParams.ldmParams.enableLdm = ZSTD_ps_disable;
693
/* Correct nbWorkers to 0. */
694
jobParams.nbWorkers = 0;
695
696
697
/* init */
698
if (job->cdict) {
699
size_t const initError = ZSTD_compressBegin_advanced_internal(cctx, NULL, 0, ZSTD_dct_auto, ZSTD_dtlm_fast, job->cdict, &jobParams, job->fullFrameSize);
700
assert(job->firstJob); /* only allowed for first job */
701
if (ZSTD_isError(initError)) JOB_ERROR(initError);
702
} else { /* srcStart points at reloaded section */
703
U64 const pledgedSrcSize = job->firstJob ? job->fullFrameSize : job->src.size;
704
{ size_t const forceWindowError = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_forceMaxWindow, !job->firstJob);
705
if (ZSTD_isError(forceWindowError)) JOB_ERROR(forceWindowError);
706
}
707
if (!job->firstJob) {
708
size_t const err = ZSTD_CCtxParams_setParameter(&jobParams, ZSTD_c_deterministicRefPrefix, 0);
709
if (ZSTD_isError(err)) JOB_ERROR(err);
710
}
711
{ size_t const initError = ZSTD_compressBegin_advanced_internal(cctx,
712
job->prefix.start, job->prefix.size, ZSTD_dct_rawContent, /* load dictionary in "content-only" mode (no header analysis) */
713
ZSTD_dtlm_fast,
714
NULL, /*cdict*/
715
&jobParams, pledgedSrcSize);
716
if (ZSTD_isError(initError)) JOB_ERROR(initError);
717
} }
718
719
/* Perform serial step as early as possible, but after CCtx initialization */
720
ZSTDMT_serialState_update(job->serial, cctx, rawSeqStore, job->src, job->jobID);
721
722
if (!job->firstJob) { /* flush and overwrite frame header when it's not first job */
723
size_t const hSize = ZSTD_compressContinue_public(cctx, dstBuff.start, dstBuff.capacity, job->src.start, 0);
724
if (ZSTD_isError(hSize)) JOB_ERROR(hSize);
725
DEBUGLOG(5, "ZSTDMT_compressionJob: flush and overwrite %u bytes of frame header (not first job)", (U32)hSize);
726
ZSTD_invalidateRepCodes(cctx);
727
}
728
729
/* compress */
730
{ size_t const chunkSize = 4*ZSTD_BLOCKSIZE_MAX;
731
int const nbChunks = (int)((job->src.size + (chunkSize-1)) / chunkSize);
732
const BYTE* ip = (const BYTE*) job->src.start;
733
BYTE* const ostart = (BYTE*)dstBuff.start;
734
BYTE* op = ostart;
735
BYTE* oend = op + dstBuff.capacity;
736
int chunkNb;
737
if (sizeof(size_t) > sizeof(int)) assert(job->src.size < ((size_t)INT_MAX) * chunkSize); /* check overflow */
738
DEBUGLOG(5, "ZSTDMT_compressionJob: compress %u bytes in %i blocks", (U32)job->src.size, nbChunks);
739
assert(job->cSize == 0);
740
for (chunkNb = 1; chunkNb < nbChunks; chunkNb++) {
741
size_t const cSize = ZSTD_compressContinue_public(cctx, op, oend-op, ip, chunkSize);
742
if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
743
ip += chunkSize;
744
op += cSize; assert(op < oend);
745
/* stats */
746
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
747
job->cSize += cSize;
748
job->consumed = chunkSize * chunkNb;
749
DEBUGLOG(5, "ZSTDMT_compressionJob: compress new block : cSize==%u bytes (total: %u)",
750
(U32)cSize, (U32)job->cSize);
751
ZSTD_pthread_cond_signal(&job->job_cond); /* warns some more data is ready to be flushed */
752
ZSTD_pthread_mutex_unlock(&job->job_mutex);
753
}
754
/* last block */
755
assert(chunkSize > 0);
756
assert((chunkSize & (chunkSize - 1)) == 0); /* chunkSize must be power of 2 for mask==(chunkSize-1) to work */
757
if ((nbChunks > 0) | job->lastJob /*must output a "last block" flag*/ ) {
758
size_t const lastBlockSize1 = job->src.size & (chunkSize-1);
759
size_t const lastBlockSize = ((lastBlockSize1==0) & (job->src.size>=chunkSize)) ? chunkSize : lastBlockSize1;
760
size_t const cSize = (job->lastJob) ?
761
ZSTD_compressEnd_public(cctx, op, oend-op, ip, lastBlockSize) :
762
ZSTD_compressContinue_public(cctx, op, oend-op, ip, lastBlockSize);
763
if (ZSTD_isError(cSize)) JOB_ERROR(cSize);
764
lastCBlockSize = cSize;
765
} }
766
if (!job->firstJob) {
767
/* Double check that we don't have an ext-dict, because then our
768
* repcode invalidation doesn't work.
769
*/
770
assert(!ZSTD_window_hasExtDict(cctx->blockState.matchState.window));
771
}
772
ZSTD_CCtx_trace(cctx, 0);
773
774
_endJob:
775
ZSTDMT_serialState_ensureFinished(job->serial, job->jobID, job->cSize);
776
if (job->prefix.size > 0)
777
DEBUGLOG(5, "Finished with prefix: %zx", (size_t)job->prefix.start);
778
DEBUGLOG(5, "Finished with source: %zx", (size_t)job->src.start);
779
/* release resources */
780
ZSTDMT_releaseSeq(job->seqPool, rawSeqStore);
781
ZSTDMT_releaseCCtx(job->cctxPool, cctx);
782
/* report */
783
ZSTD_PTHREAD_MUTEX_LOCK(&job->job_mutex);
784
if (ZSTD_isError(job->cSize)) assert(lastCBlockSize == 0);
785
job->cSize += lastCBlockSize;
786
job->consumed = job->src.size; /* when job->consumed == job->src.size , compression job is presumed completed */
787
ZSTD_pthread_cond_signal(&job->job_cond);
788
ZSTD_pthread_mutex_unlock(&job->job_mutex);
789
}
790
791
792
/* ------------------------------------------ */
793
/* ===== Multi-threaded compression ===== */
794
/* ------------------------------------------ */
795
796
typedef struct {
797
range_t prefix; /* read-only non-owned prefix buffer */
798
buffer_t buffer;
799
size_t filled;
800
} inBuff_t;
801
802
typedef struct {
803
BYTE* buffer; /* The round input buffer. All jobs get references
804
* to pieces of the buffer. ZSTDMT_tryGetInputRange()
805
* handles handing out job input buffers, and makes
806
* sure it doesn't overlap with any pieces still in use.
807
*/
808
size_t capacity; /* The capacity of buffer. */
809
size_t pos; /* The position of the current inBuff in the round
810
* buffer. Updated past the end if the inBuff once
811
* the inBuff is sent to the worker thread.
812
* pos <= capacity.
813
*/
814
} roundBuff_t;
815
816
static const roundBuff_t kNullRoundBuff = {NULL, 0, 0};
817
818
#define RSYNC_LENGTH 32
819
/* Don't create chunks smaller than the zstd block size.
820
* This stops us from regressing compression ratio too much,
821
* and ensures our output fits in ZSTD_compressBound().
822
*
823
* If this is shrunk < ZSTD_BLOCKSIZELOG_MIN then
824
* ZSTD_COMPRESSBOUND() will need to be updated.
825
*/
826
#define RSYNC_MIN_BLOCK_LOG ZSTD_BLOCKSIZELOG_MAX
827
#define RSYNC_MIN_BLOCK_SIZE (1<<RSYNC_MIN_BLOCK_LOG)
828
829
typedef struct {
830
U64 hash;
831
U64 hitMask;
832
U64 primePower;
833
} rsyncState_t;
834
835
struct ZSTDMT_CCtx_s {
836
POOL_ctx* factory;
837
ZSTDMT_jobDescription* jobs;
838
ZSTDMT_bufferPool* bufPool;
839
ZSTDMT_CCtxPool* cctxPool;
840
ZSTDMT_seqPool* seqPool;
841
ZSTD_CCtx_params params;
842
size_t targetSectionSize;
843
size_t targetPrefixSize;
844
int jobReady; /* 1 => one job is already prepared, but pool has shortage of workers. Don't create a new job. */
845
inBuff_t inBuff;
846
roundBuff_t roundBuff;
847
serialState_t serial;
848
rsyncState_t rsync;
849
unsigned jobIDMask;
850
unsigned doneJobID;
851
unsigned nextJobID;
852
unsigned frameEnded;
853
unsigned allJobsCompleted;
854
unsigned long long frameContentSize;
855
unsigned long long consumed;
856
unsigned long long produced;
857
ZSTD_customMem cMem;
858
ZSTD_CDict* cdictLocal;
859
const ZSTD_CDict* cdict;
860
unsigned providedFactory: 1;
861
};
862
863
static void ZSTDMT_freeJobsTable(ZSTDMT_jobDescription* jobTable, U32 nbJobs, ZSTD_customMem cMem)
864
{
865
U32 jobNb;
866
if (jobTable == NULL) return;
867
for (jobNb=0; jobNb<nbJobs; jobNb++) {
868
ZSTD_pthread_mutex_destroy(&jobTable[jobNb].job_mutex);
869
ZSTD_pthread_cond_destroy(&jobTable[jobNb].job_cond);
870
}
871
ZSTD_customFree(jobTable, cMem);
872
}
873
874
/* ZSTDMT_allocJobsTable()
875
* allocate and init a job table.
876
* update *nbJobsPtr to next power of 2 value, as size of table */
877
static ZSTDMT_jobDescription* ZSTDMT_createJobsTable(U32* nbJobsPtr, ZSTD_customMem cMem)
878
{
879
U32 const nbJobsLog2 = ZSTD_highbit32(*nbJobsPtr) + 1;
880
U32 const nbJobs = 1 << nbJobsLog2;
881
U32 jobNb;
882
ZSTDMT_jobDescription* const jobTable = (ZSTDMT_jobDescription*)
883
ZSTD_customCalloc(nbJobs * sizeof(ZSTDMT_jobDescription), cMem);
884
int initError = 0;
885
if (jobTable==NULL) return NULL;
886
*nbJobsPtr = nbJobs;
887
for (jobNb=0; jobNb<nbJobs; jobNb++) {
888
initError |= ZSTD_pthread_mutex_init(&jobTable[jobNb].job_mutex, NULL);
889
initError |= ZSTD_pthread_cond_init(&jobTable[jobNb].job_cond, NULL);
890
}
891
if (initError != 0) {
892
ZSTDMT_freeJobsTable(jobTable, nbJobs, cMem);
893
return NULL;
894
}
895
return jobTable;
896
}
897
898
static size_t ZSTDMT_expandJobsTable (ZSTDMT_CCtx* mtctx, U32 nbWorkers) {
899
U32 nbJobs = nbWorkers + 2;
900
if (nbJobs > mtctx->jobIDMask+1) { /* need more job capacity */
901
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
902
mtctx->jobIDMask = 0;
903
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, mtctx->cMem);
904
if (mtctx->jobs==NULL) return ERROR(memory_allocation);
905
assert((nbJobs != 0) && ((nbJobs & (nbJobs - 1)) == 0)); /* ensure nbJobs is a power of 2 */
906
mtctx->jobIDMask = nbJobs - 1;
907
}
908
return 0;
909
}
910
911
912
/* ZSTDMT_CCtxParam_setNbWorkers():
913
* Internal use only */
914
static size_t ZSTDMT_CCtxParam_setNbWorkers(ZSTD_CCtx_params* params, unsigned nbWorkers)
915
{
916
return ZSTD_CCtxParams_setParameter(params, ZSTD_c_nbWorkers, (int)nbWorkers);
917
}
918
919
MEM_STATIC ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced_internal(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
920
{
921
ZSTDMT_CCtx* mtctx;
922
U32 nbJobs = nbWorkers + 2;
923
int initError;
924
DEBUGLOG(3, "ZSTDMT_createCCtx_advanced (nbWorkers = %u)", nbWorkers);
925
926
if (nbWorkers < 1) return NULL;
927
nbWorkers = MIN(nbWorkers , ZSTDMT_NBWORKERS_MAX);
928
if ((cMem.customAlloc!=NULL) ^ (cMem.customFree!=NULL))
929
/* invalid custom allocator */
930
return NULL;
931
932
mtctx = (ZSTDMT_CCtx*) ZSTD_customCalloc(sizeof(ZSTDMT_CCtx), cMem);
933
if (!mtctx) return NULL;
934
ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
935
mtctx->cMem = cMem;
936
mtctx->allJobsCompleted = 1;
937
if (pool != NULL) {
938
mtctx->factory = pool;
939
mtctx->providedFactory = 1;
940
}
941
else {
942
mtctx->factory = POOL_create_advanced(nbWorkers, 0, cMem);
943
mtctx->providedFactory = 0;
944
}
945
mtctx->jobs = ZSTDMT_createJobsTable(&nbJobs, cMem);
946
assert(nbJobs > 0); assert((nbJobs & (nbJobs - 1)) == 0); /* ensure nbJobs is a power of 2 */
947
mtctx->jobIDMask = nbJobs - 1;
948
mtctx->bufPool = ZSTDMT_createBufferPool(BUF_POOL_MAX_NB_BUFFERS(nbWorkers), cMem);
949
mtctx->cctxPool = ZSTDMT_createCCtxPool(nbWorkers, cMem);
950
mtctx->seqPool = ZSTDMT_createSeqPool(nbWorkers, cMem);
951
initError = ZSTDMT_serialState_init(&mtctx->serial);
952
mtctx->roundBuff = kNullRoundBuff;
953
if (!mtctx->factory | !mtctx->jobs | !mtctx->bufPool | !mtctx->cctxPool | !mtctx->seqPool | initError) {
954
ZSTDMT_freeCCtx(mtctx);
955
return NULL;
956
}
957
DEBUGLOG(3, "mt_cctx created, for %u threads", nbWorkers);
958
return mtctx;
959
}
960
961
ZSTDMT_CCtx* ZSTDMT_createCCtx_advanced(unsigned nbWorkers, ZSTD_customMem cMem, ZSTD_threadPool* pool)
962
{
963
#ifdef ZSTD_MULTITHREAD
964
return ZSTDMT_createCCtx_advanced_internal(nbWorkers, cMem, pool);
965
#else
966
(void)nbWorkers;
967
(void)cMem;
968
(void)pool;
969
return NULL;
970
#endif
971
}
972
973
974
/* ZSTDMT_releaseAllJobResources() :
975
* note : ensure all workers are killed first ! */
976
static void ZSTDMT_releaseAllJobResources(ZSTDMT_CCtx* mtctx)
977
{
978
unsigned jobID;
979
DEBUGLOG(3, "ZSTDMT_releaseAllJobResources");
980
for (jobID=0; jobID <= mtctx->jobIDMask; jobID++) {
981
/* Copy the mutex/cond out */
982
ZSTD_pthread_mutex_t const mutex = mtctx->jobs[jobID].job_mutex;
983
ZSTD_pthread_cond_t const cond = mtctx->jobs[jobID].job_cond;
984
985
DEBUGLOG(4, "job%02u: release dst address %08X", jobID, (U32)(size_t)mtctx->jobs[jobID].dstBuff.start);
986
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[jobID].dstBuff);
987
988
/* Clear the job description, but keep the mutex/cond */
989
ZSTD_memset(&mtctx->jobs[jobID], 0, sizeof(mtctx->jobs[jobID]));
990
mtctx->jobs[jobID].job_mutex = mutex;
991
mtctx->jobs[jobID].job_cond = cond;
992
}
993
mtctx->inBuff.buffer = g_nullBuffer;
994
mtctx->inBuff.filled = 0;
995
mtctx->allJobsCompleted = 1;
996
}
997
998
static void ZSTDMT_waitForAllJobsCompleted(ZSTDMT_CCtx* mtctx)
999
{
1000
DEBUGLOG(4, "ZSTDMT_waitForAllJobsCompleted");
1001
while (mtctx->doneJobID < mtctx->nextJobID) {
1002
unsigned const jobID = mtctx->doneJobID & mtctx->jobIDMask;
1003
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[jobID].job_mutex);
1004
while (mtctx->jobs[jobID].consumed < mtctx->jobs[jobID].src.size) {
1005
DEBUGLOG(4, "waiting for jobCompleted signal from job %u", mtctx->doneJobID); /* we want to block when waiting for data to flush */
1006
ZSTD_pthread_cond_wait(&mtctx->jobs[jobID].job_cond, &mtctx->jobs[jobID].job_mutex);
1007
}
1008
ZSTD_pthread_mutex_unlock(&mtctx->jobs[jobID].job_mutex);
1009
mtctx->doneJobID++;
1010
}
1011
}
1012
1013
size_t ZSTDMT_freeCCtx(ZSTDMT_CCtx* mtctx)
1014
{
1015
if (mtctx==NULL) return 0; /* compatible with free on NULL */
1016
if (!mtctx->providedFactory)
1017
POOL_free(mtctx->factory); /* stop and free worker threads */
1018
ZSTDMT_releaseAllJobResources(mtctx); /* release job resources into pools first */
1019
ZSTDMT_freeJobsTable(mtctx->jobs, mtctx->jobIDMask+1, mtctx->cMem);
1020
ZSTDMT_freeBufferPool(mtctx->bufPool);
1021
ZSTDMT_freeCCtxPool(mtctx->cctxPool);
1022
ZSTDMT_freeSeqPool(mtctx->seqPool);
1023
ZSTDMT_serialState_free(&mtctx->serial);
1024
ZSTD_freeCDict(mtctx->cdictLocal);
1025
if (mtctx->roundBuff.buffer)
1026
ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1027
ZSTD_customFree(mtctx, mtctx->cMem);
1028
return 0;
1029
}
1030
1031
size_t ZSTDMT_sizeof_CCtx(ZSTDMT_CCtx* mtctx)
1032
{
1033
if (mtctx == NULL) return 0; /* supports sizeof NULL */
1034
return sizeof(*mtctx)
1035
+ POOL_sizeof(mtctx->factory)
1036
+ ZSTDMT_sizeof_bufferPool(mtctx->bufPool)
1037
+ (mtctx->jobIDMask+1) * sizeof(ZSTDMT_jobDescription)
1038
+ ZSTDMT_sizeof_CCtxPool(mtctx->cctxPool)
1039
+ ZSTDMT_sizeof_seqPool(mtctx->seqPool)
1040
+ ZSTD_sizeof_CDict(mtctx->cdictLocal)
1041
+ mtctx->roundBuff.capacity;
1042
}
1043
1044
1045
/* ZSTDMT_resize() :
1046
* @return : error code if fails, 0 on success */
1047
static size_t ZSTDMT_resize(ZSTDMT_CCtx* mtctx, unsigned nbWorkers)
1048
{
1049
if (POOL_resize(mtctx->factory, nbWorkers)) return ERROR(memory_allocation);
1050
FORWARD_IF_ERROR( ZSTDMT_expandJobsTable(mtctx, nbWorkers) , "");
1051
mtctx->bufPool = ZSTDMT_expandBufferPool(mtctx->bufPool, BUF_POOL_MAX_NB_BUFFERS(nbWorkers));
1052
if (mtctx->bufPool == NULL) return ERROR(memory_allocation);
1053
mtctx->cctxPool = ZSTDMT_expandCCtxPool(mtctx->cctxPool, nbWorkers);
1054
if (mtctx->cctxPool == NULL) return ERROR(memory_allocation);
1055
mtctx->seqPool = ZSTDMT_expandSeqPool(mtctx->seqPool, nbWorkers);
1056
if (mtctx->seqPool == NULL) return ERROR(memory_allocation);
1057
ZSTDMT_CCtxParam_setNbWorkers(&mtctx->params, nbWorkers);
1058
return 0;
1059
}
1060
1061
1062
/*! ZSTDMT_updateCParams_whileCompressing() :
1063
* Updates a selected set of compression parameters, remaining compatible with currently active frame.
1064
* New parameters will be applied to next compression job. */
1065
void ZSTDMT_updateCParams_whileCompressing(ZSTDMT_CCtx* mtctx, const ZSTD_CCtx_params* cctxParams)
1066
{
1067
U32 const saved_wlog = mtctx->params.cParams.windowLog; /* Do not modify windowLog while compressing */
1068
int const compressionLevel = cctxParams->compressionLevel;
1069
DEBUGLOG(5, "ZSTDMT_updateCParams_whileCompressing (level:%i)",
1070
compressionLevel);
1071
mtctx->params.compressionLevel = compressionLevel;
1072
{ ZSTD_compressionParameters cParams = ZSTD_getCParamsFromCCtxParams(cctxParams, ZSTD_CONTENTSIZE_UNKNOWN, 0, ZSTD_cpm_noAttachDict);
1073
cParams.windowLog = saved_wlog;
1074
mtctx->params.cParams = cParams;
1075
}
1076
}
1077
1078
/* ZSTDMT_getFrameProgression():
1079
* tells how much data has been consumed (input) and produced (output) for current frame.
1080
* able to count progression inside worker threads.
1081
* Note : mutex will be acquired during statistics collection inside workers. */
1082
ZSTD_frameProgression ZSTDMT_getFrameProgression(ZSTDMT_CCtx* mtctx)
1083
{
1084
ZSTD_frameProgression fps;
1085
DEBUGLOG(5, "ZSTDMT_getFrameProgression");
1086
fps.ingested = mtctx->consumed + mtctx->inBuff.filled;
1087
fps.consumed = mtctx->consumed;
1088
fps.produced = fps.flushed = mtctx->produced;
1089
fps.currentJobID = mtctx->nextJobID;
1090
fps.nbActiveWorkers = 0;
1091
{ unsigned jobNb;
1092
unsigned lastJobNb = mtctx->nextJobID + mtctx->jobReady; assert(mtctx->jobReady <= 1);
1093
DEBUGLOG(6, "ZSTDMT_getFrameProgression: jobs: from %u to <%u (jobReady:%u)",
1094
mtctx->doneJobID, lastJobNb, mtctx->jobReady)
1095
for (jobNb = mtctx->doneJobID ; jobNb < lastJobNb ; jobNb++) {
1096
unsigned const wJobID = jobNb & mtctx->jobIDMask;
1097
ZSTDMT_jobDescription* jobPtr = &mtctx->jobs[wJobID];
1098
ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1099
{ size_t const cResult = jobPtr->cSize;
1100
size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1101
size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1102
assert(flushed <= produced);
1103
fps.ingested += jobPtr->src.size;
1104
fps.consumed += jobPtr->consumed;
1105
fps.produced += produced;
1106
fps.flushed += flushed;
1107
fps.nbActiveWorkers += (jobPtr->consumed < jobPtr->src.size);
1108
}
1109
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1110
}
1111
}
1112
return fps;
1113
}
1114
1115
1116
size_t ZSTDMT_toFlushNow(ZSTDMT_CCtx* mtctx)
1117
{
1118
size_t toFlush;
1119
unsigned const jobID = mtctx->doneJobID;
1120
assert(jobID <= mtctx->nextJobID);
1121
if (jobID == mtctx->nextJobID) return 0; /* no active job => nothing to flush */
1122
1123
/* look into oldest non-fully-flushed job */
1124
{ unsigned const wJobID = jobID & mtctx->jobIDMask;
1125
ZSTDMT_jobDescription* const jobPtr = &mtctx->jobs[wJobID];
1126
ZSTD_pthread_mutex_lock(&jobPtr->job_mutex);
1127
{ size_t const cResult = jobPtr->cSize;
1128
size_t const produced = ZSTD_isError(cResult) ? 0 : cResult;
1129
size_t const flushed = ZSTD_isError(cResult) ? 0 : jobPtr->dstFlushed;
1130
assert(flushed <= produced);
1131
assert(jobPtr->consumed <= jobPtr->src.size);
1132
toFlush = produced - flushed;
1133
/* if toFlush==0, nothing is available to flush.
1134
* However, jobID is expected to still be active:
1135
* if jobID was already completed and fully flushed,
1136
* ZSTDMT_flushProduced() should have already moved onto next job.
1137
* Therefore, some input has not yet been consumed. */
1138
if (toFlush==0) {
1139
assert(jobPtr->consumed < jobPtr->src.size);
1140
}
1141
}
1142
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1143
}
1144
1145
return toFlush;
1146
}
1147
1148
1149
/* ------------------------------------------ */
1150
/* ===== Multi-threaded compression ===== */
1151
/* ------------------------------------------ */
1152
1153
static unsigned ZSTDMT_computeTargetJobLog(const ZSTD_CCtx_params* params)
1154
{
1155
unsigned jobLog;
1156
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
1157
/* In Long Range Mode, the windowLog is typically oversized.
1158
* In which case, it's preferable to determine the jobSize
1159
* based on cycleLog instead. */
1160
jobLog = MAX(21, ZSTD_cycleLog(params->cParams.chainLog, params->cParams.strategy) + 3);
1161
} else {
1162
jobLog = MAX(20, params->cParams.windowLog + 2);
1163
}
1164
return MIN(jobLog, (unsigned)ZSTDMT_JOBLOG_MAX);
1165
}
1166
1167
static int ZSTDMT_overlapLog_default(ZSTD_strategy strat)
1168
{
1169
switch(strat)
1170
{
1171
case ZSTD_btultra2:
1172
return 9;
1173
case ZSTD_btultra:
1174
case ZSTD_btopt:
1175
return 8;
1176
case ZSTD_btlazy2:
1177
case ZSTD_lazy2:
1178
return 7;
1179
case ZSTD_lazy:
1180
case ZSTD_greedy:
1181
case ZSTD_dfast:
1182
case ZSTD_fast:
1183
default:;
1184
}
1185
return 6;
1186
}
1187
1188
static int ZSTDMT_overlapLog(int ovlog, ZSTD_strategy strat)
1189
{
1190
assert(0 <= ovlog && ovlog <= 9);
1191
if (ovlog == 0) return ZSTDMT_overlapLog_default(strat);
1192
return ovlog;
1193
}
1194
1195
static size_t ZSTDMT_computeOverlapSize(const ZSTD_CCtx_params* params)
1196
{
1197
int const overlapRLog = 9 - ZSTDMT_overlapLog(params->overlapLog, params->cParams.strategy);
1198
int ovLog = (overlapRLog >= 8) ? 0 : (params->cParams.windowLog - overlapRLog);
1199
assert(0 <= overlapRLog && overlapRLog <= 8);
1200
if (params->ldmParams.enableLdm == ZSTD_ps_enable) {
1201
/* In Long Range Mode, the windowLog is typically oversized.
1202
* In which case, it's preferable to determine the jobSize
1203
* based on chainLog instead.
1204
* Then, ovLog becomes a fraction of the jobSize, rather than windowSize */
1205
ovLog = MIN(params->cParams.windowLog, ZSTDMT_computeTargetJobLog(params) - 2)
1206
- overlapRLog;
1207
}
1208
assert(0 <= ovLog && ovLog <= ZSTD_WINDOWLOG_MAX);
1209
DEBUGLOG(4, "overlapLog : %i", params->overlapLog);
1210
DEBUGLOG(4, "overlap size : %i", 1 << ovLog);
1211
return (ovLog==0) ? 0 : (size_t)1 << ovLog;
1212
}
1213
1214
/* ====================================== */
1215
/* ======= Streaming API ======= */
1216
/* ====================================== */
1217
1218
size_t ZSTDMT_initCStream_internal(
1219
ZSTDMT_CCtx* mtctx,
1220
const void* dict, size_t dictSize, ZSTD_dictContentType_e dictContentType,
1221
const ZSTD_CDict* cdict, ZSTD_CCtx_params params,
1222
unsigned long long pledgedSrcSize)
1223
{
1224
DEBUGLOG(4, "ZSTDMT_initCStream_internal (pledgedSrcSize=%u, nbWorkers=%u, cctxPool=%u)",
1225
(U32)pledgedSrcSize, params.nbWorkers, mtctx->cctxPool->totalCCtx);
1226
1227
/* params supposed partially fully validated at this point */
1228
assert(!ZSTD_isError(ZSTD_checkCParams(params.cParams)));
1229
assert(!((dict) && (cdict))); /* either dict or cdict, not both */
1230
1231
/* init */
1232
if (params.nbWorkers != mtctx->params.nbWorkers)
1233
FORWARD_IF_ERROR( ZSTDMT_resize(mtctx, params.nbWorkers) , "");
1234
1235
if (params.jobSize != 0 && params.jobSize < ZSTDMT_JOBSIZE_MIN) params.jobSize = ZSTDMT_JOBSIZE_MIN;
1236
if (params.jobSize > (size_t)ZSTDMT_JOBSIZE_MAX) params.jobSize = (size_t)ZSTDMT_JOBSIZE_MAX;
1237
1238
DEBUGLOG(4, "ZSTDMT_initCStream_internal: %u workers", params.nbWorkers);
1239
1240
if (mtctx->allJobsCompleted == 0) { /* previous compression not correctly finished */
1241
ZSTDMT_waitForAllJobsCompleted(mtctx);
1242
ZSTDMT_releaseAllJobResources(mtctx);
1243
mtctx->allJobsCompleted = 1;
1244
}
1245
1246
mtctx->params = params;
1247
mtctx->frameContentSize = pledgedSrcSize;
1248
if (dict) {
1249
ZSTD_freeCDict(mtctx->cdictLocal);
1250
mtctx->cdictLocal = ZSTD_createCDict_advanced(dict, dictSize,
1251
ZSTD_dlm_byCopy, dictContentType, /* note : a loadPrefix becomes an internal CDict */
1252
params.cParams, mtctx->cMem);
1253
mtctx->cdict = mtctx->cdictLocal;
1254
if (mtctx->cdictLocal == NULL) return ERROR(memory_allocation);
1255
} else {
1256
ZSTD_freeCDict(mtctx->cdictLocal);
1257
mtctx->cdictLocal = NULL;
1258
mtctx->cdict = cdict;
1259
}
1260
1261
mtctx->targetPrefixSize = ZSTDMT_computeOverlapSize(&params);
1262
DEBUGLOG(4, "overlapLog=%i => %u KB", params.overlapLog, (U32)(mtctx->targetPrefixSize>>10));
1263
mtctx->targetSectionSize = params.jobSize;
1264
if (mtctx->targetSectionSize == 0) {
1265
mtctx->targetSectionSize = 1ULL << ZSTDMT_computeTargetJobLog(&params);
1266
}
1267
assert(mtctx->targetSectionSize <= (size_t)ZSTDMT_JOBSIZE_MAX);
1268
1269
if (params.rsyncable) {
1270
/* Aim for the targetsectionSize as the average job size. */
1271
U32 const jobSizeKB = (U32)(mtctx->targetSectionSize >> 10);
1272
U32 const rsyncBits = (assert(jobSizeKB >= 1), ZSTD_highbit32(jobSizeKB) + 10);
1273
/* We refuse to create jobs < RSYNC_MIN_BLOCK_SIZE bytes, so make sure our
1274
* expected job size is at least 4x larger. */
1275
assert(rsyncBits >= RSYNC_MIN_BLOCK_LOG + 2);
1276
DEBUGLOG(4, "rsyncLog = %u", rsyncBits);
1277
mtctx->rsync.hash = 0;
1278
mtctx->rsync.hitMask = (1ULL << rsyncBits) - 1;
1279
mtctx->rsync.primePower = ZSTD_rollingHash_primePower(RSYNC_LENGTH);
1280
}
1281
if (mtctx->targetSectionSize < mtctx->targetPrefixSize) mtctx->targetSectionSize = mtctx->targetPrefixSize; /* job size must be >= overlap size */
1282
DEBUGLOG(4, "Job Size : %u KB (note : set to %u)", (U32)(mtctx->targetSectionSize>>10), (U32)params.jobSize);
1283
DEBUGLOG(4, "inBuff Size : %u KB", (U32)(mtctx->targetSectionSize>>10));
1284
ZSTDMT_setBufferSize(mtctx->bufPool, ZSTD_compressBound(mtctx->targetSectionSize));
1285
{
1286
/* If ldm is enabled we need windowSize space. */
1287
size_t const windowSize = mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable ? (1U << mtctx->params.cParams.windowLog) : 0;
1288
/* Two buffers of slack, plus extra space for the overlap
1289
* This is the minimum slack that LDM works with. One extra because
1290
* flush might waste up to targetSectionSize-1 bytes. Another extra
1291
* for the overlap (if > 0), then one to fill which doesn't overlap
1292
* with the LDM window.
1293
*/
1294
size_t const nbSlackBuffers = 2 + (mtctx->targetPrefixSize > 0);
1295
size_t const slackSize = mtctx->targetSectionSize * nbSlackBuffers;
1296
/* Compute the total size, and always have enough slack */
1297
size_t const nbWorkers = MAX(mtctx->params.nbWorkers, 1);
1298
size_t const sectionsSize = mtctx->targetSectionSize * nbWorkers;
1299
size_t const capacity = MAX(windowSize, sectionsSize) + slackSize;
1300
if (mtctx->roundBuff.capacity < capacity) {
1301
if (mtctx->roundBuff.buffer)
1302
ZSTD_customFree(mtctx->roundBuff.buffer, mtctx->cMem);
1303
mtctx->roundBuff.buffer = (BYTE*)ZSTD_customMalloc(capacity, mtctx->cMem);
1304
if (mtctx->roundBuff.buffer == NULL) {
1305
mtctx->roundBuff.capacity = 0;
1306
return ERROR(memory_allocation);
1307
}
1308
mtctx->roundBuff.capacity = capacity;
1309
}
1310
}
1311
DEBUGLOG(4, "roundBuff capacity : %u KB", (U32)(mtctx->roundBuff.capacity>>10));
1312
mtctx->roundBuff.pos = 0;
1313
mtctx->inBuff.buffer = g_nullBuffer;
1314
mtctx->inBuff.filled = 0;
1315
mtctx->inBuff.prefix = kNullRange;
1316
mtctx->doneJobID = 0;
1317
mtctx->nextJobID = 0;
1318
mtctx->frameEnded = 0;
1319
mtctx->allJobsCompleted = 0;
1320
mtctx->consumed = 0;
1321
mtctx->produced = 0;
1322
if (ZSTDMT_serialState_reset(&mtctx->serial, mtctx->seqPool, params, mtctx->targetSectionSize,
1323
dict, dictSize, dictContentType))
1324
return ERROR(memory_allocation);
1325
return 0;
1326
}
1327
1328
1329
/* ZSTDMT_writeLastEmptyBlock()
1330
* Write a single empty block with an end-of-frame to finish a frame.
1331
* Job must be created from streaming variant.
1332
* This function is always successful if expected conditions are fulfilled.
1333
*/
1334
static void ZSTDMT_writeLastEmptyBlock(ZSTDMT_jobDescription* job)
1335
{
1336
assert(job->lastJob == 1);
1337
assert(job->src.size == 0); /* last job is empty -> will be simplified into a last empty block */
1338
assert(job->firstJob == 0); /* cannot be first job, as it also needs to create frame header */
1339
assert(job->dstBuff.start == NULL); /* invoked from streaming variant only (otherwise, dstBuff might be user's output) */
1340
job->dstBuff = ZSTDMT_getBuffer(job->bufPool);
1341
if (job->dstBuff.start == NULL) {
1342
job->cSize = ERROR(memory_allocation);
1343
return;
1344
}
1345
assert(job->dstBuff.capacity >= ZSTD_blockHeaderSize); /* no buffer should ever be that small */
1346
job->src = kNullRange;
1347
job->cSize = ZSTD_writeLastEmptyBlock(job->dstBuff.start, job->dstBuff.capacity);
1348
assert(!ZSTD_isError(job->cSize));
1349
assert(job->consumed == 0);
1350
}
1351
1352
static size_t ZSTDMT_createCompressionJob(ZSTDMT_CCtx* mtctx, size_t srcSize, ZSTD_EndDirective endOp)
1353
{
1354
unsigned const jobID = mtctx->nextJobID & mtctx->jobIDMask;
1355
int const endFrame = (endOp == ZSTD_e_end);
1356
1357
if (mtctx->nextJobID > mtctx->doneJobID + mtctx->jobIDMask) {
1358
DEBUGLOG(5, "ZSTDMT_createCompressionJob: will not create new job : table is full");
1359
assert((mtctx->nextJobID & mtctx->jobIDMask) == (mtctx->doneJobID & mtctx->jobIDMask));
1360
return 0;
1361
}
1362
1363
if (!mtctx->jobReady) {
1364
BYTE const* src = (BYTE const*)mtctx->inBuff.buffer.start;
1365
DEBUGLOG(5, "ZSTDMT_createCompressionJob: preparing job %u to compress %u bytes with %u preload ",
1366
mtctx->nextJobID, (U32)srcSize, (U32)mtctx->inBuff.prefix.size);
1367
mtctx->jobs[jobID].src.start = src;
1368
mtctx->jobs[jobID].src.size = srcSize;
1369
assert(mtctx->inBuff.filled >= srcSize);
1370
mtctx->jobs[jobID].prefix = mtctx->inBuff.prefix;
1371
mtctx->jobs[jobID].consumed = 0;
1372
mtctx->jobs[jobID].cSize = 0;
1373
mtctx->jobs[jobID].params = mtctx->params;
1374
mtctx->jobs[jobID].cdict = mtctx->nextJobID==0 ? mtctx->cdict : NULL;
1375
mtctx->jobs[jobID].fullFrameSize = mtctx->frameContentSize;
1376
mtctx->jobs[jobID].dstBuff = g_nullBuffer;
1377
mtctx->jobs[jobID].cctxPool = mtctx->cctxPool;
1378
mtctx->jobs[jobID].bufPool = mtctx->bufPool;
1379
mtctx->jobs[jobID].seqPool = mtctx->seqPool;
1380
mtctx->jobs[jobID].serial = &mtctx->serial;
1381
mtctx->jobs[jobID].jobID = mtctx->nextJobID;
1382
mtctx->jobs[jobID].firstJob = (mtctx->nextJobID==0);
1383
mtctx->jobs[jobID].lastJob = endFrame;
1384
mtctx->jobs[jobID].frameChecksumNeeded = mtctx->params.fParams.checksumFlag && endFrame && (mtctx->nextJobID>0);
1385
mtctx->jobs[jobID].dstFlushed = 0;
1386
1387
/* Update the round buffer pos and clear the input buffer to be reset */
1388
mtctx->roundBuff.pos += srcSize;
1389
mtctx->inBuff.buffer = g_nullBuffer;
1390
mtctx->inBuff.filled = 0;
1391
/* Set the prefix */
1392
if (!endFrame) {
1393
size_t const newPrefixSize = MIN(srcSize, mtctx->targetPrefixSize);
1394
mtctx->inBuff.prefix.start = src + srcSize - newPrefixSize;
1395
mtctx->inBuff.prefix.size = newPrefixSize;
1396
} else { /* endFrame==1 => no need for another input buffer */
1397
mtctx->inBuff.prefix = kNullRange;
1398
mtctx->frameEnded = endFrame;
1399
if (mtctx->nextJobID == 0) {
1400
/* single job exception : checksum is already calculated directly within worker thread */
1401
mtctx->params.fParams.checksumFlag = 0;
1402
} }
1403
1404
if ( (srcSize == 0)
1405
&& (mtctx->nextJobID>0)/*single job must also write frame header*/ ) {
1406
DEBUGLOG(5, "ZSTDMT_createCompressionJob: creating a last empty block to end frame");
1407
assert(endOp == ZSTD_e_end); /* only possible case : need to end the frame with an empty last block */
1408
ZSTDMT_writeLastEmptyBlock(mtctx->jobs + jobID);
1409
mtctx->nextJobID++;
1410
return 0;
1411
}
1412
}
1413
1414
DEBUGLOG(5, "ZSTDMT_createCompressionJob: posting job %u : %u bytes (end:%u, jobNb == %u (mod:%u))",
1415
mtctx->nextJobID,
1416
(U32)mtctx->jobs[jobID].src.size,
1417
mtctx->jobs[jobID].lastJob,
1418
mtctx->nextJobID,
1419
jobID);
1420
if (POOL_tryAdd(mtctx->factory, ZSTDMT_compressionJob, &mtctx->jobs[jobID])) {
1421
mtctx->nextJobID++;
1422
mtctx->jobReady = 0;
1423
} else {
1424
DEBUGLOG(5, "ZSTDMT_createCompressionJob: no worker available for job %u", mtctx->nextJobID);
1425
mtctx->jobReady = 1;
1426
}
1427
return 0;
1428
}
1429
1430
1431
/*! ZSTDMT_flushProduced() :
1432
* flush whatever data has been produced but not yet flushed in current job.
1433
* move to next job if current one is fully flushed.
1434
* `output` : `pos` will be updated with amount of data flushed .
1435
* `blockToFlush` : if >0, the function will block and wait if there is no data available to flush .
1436
* @return : amount of data remaining within internal buffer, 0 if no more, 1 if unknown but > 0, or an error code */
1437
static size_t ZSTDMT_flushProduced(ZSTDMT_CCtx* mtctx, ZSTD_outBuffer* output, unsigned blockToFlush, ZSTD_EndDirective end)
1438
{
1439
unsigned const wJobID = mtctx->doneJobID & mtctx->jobIDMask;
1440
DEBUGLOG(5, "ZSTDMT_flushProduced (blocking:%u , job %u <= %u)",
1441
blockToFlush, mtctx->doneJobID, mtctx->nextJobID);
1442
assert(output->size >= output->pos);
1443
1444
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1445
if ( blockToFlush
1446
&& (mtctx->doneJobID < mtctx->nextJobID) ) {
1447
assert(mtctx->jobs[wJobID].dstFlushed <= mtctx->jobs[wJobID].cSize);
1448
while (mtctx->jobs[wJobID].dstFlushed == mtctx->jobs[wJobID].cSize) { /* nothing to flush */
1449
if (mtctx->jobs[wJobID].consumed == mtctx->jobs[wJobID].src.size) {
1450
DEBUGLOG(5, "job %u is completely consumed (%u == %u) => don't wait for cond, there will be none",
1451
mtctx->doneJobID, (U32)mtctx->jobs[wJobID].consumed, (U32)mtctx->jobs[wJobID].src.size);
1452
break;
1453
}
1454
DEBUGLOG(5, "waiting for something to flush from job %u (currently flushed: %u bytes)",
1455
mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1456
ZSTD_pthread_cond_wait(&mtctx->jobs[wJobID].job_cond, &mtctx->jobs[wJobID].job_mutex); /* block when nothing to flush but some to come */
1457
} }
1458
1459
/* try to flush something */
1460
{ size_t cSize = mtctx->jobs[wJobID].cSize; /* shared */
1461
size_t const srcConsumed = mtctx->jobs[wJobID].consumed; /* shared */
1462
size_t const srcSize = mtctx->jobs[wJobID].src.size; /* read-only, could be done after mutex lock, but no-declaration-after-statement */
1463
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1464
if (ZSTD_isError(cSize)) {
1465
DEBUGLOG(5, "ZSTDMT_flushProduced: job %u : compression error detected : %s",
1466
mtctx->doneJobID, ZSTD_getErrorName(cSize));
1467
ZSTDMT_waitForAllJobsCompleted(mtctx);
1468
ZSTDMT_releaseAllJobResources(mtctx);
1469
return cSize;
1470
}
1471
/* add frame checksum if necessary (can only happen once) */
1472
assert(srcConsumed <= srcSize);
1473
if ( (srcConsumed == srcSize) /* job completed -> worker no longer active */
1474
&& mtctx->jobs[wJobID].frameChecksumNeeded ) {
1475
U32 const checksum = (U32)XXH64_digest(&mtctx->serial.xxhState);
1476
DEBUGLOG(4, "ZSTDMT_flushProduced: writing checksum : %08X \n", checksum);
1477
MEM_writeLE32((char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].cSize, checksum);
1478
cSize += 4;
1479
mtctx->jobs[wJobID].cSize += 4; /* can write this shared value, as worker is no longer active */
1480
mtctx->jobs[wJobID].frameChecksumNeeded = 0;
1481
}
1482
1483
if (cSize > 0) { /* compression is ongoing or completed */
1484
size_t const toFlush = MIN(cSize - mtctx->jobs[wJobID].dstFlushed, output->size - output->pos);
1485
DEBUGLOG(5, "ZSTDMT_flushProduced: Flushing %u bytes from job %u (completion:%u/%u, generated:%u)",
1486
(U32)toFlush, mtctx->doneJobID, (U32)srcConsumed, (U32)srcSize, (U32)cSize);
1487
assert(mtctx->doneJobID < mtctx->nextJobID);
1488
assert(cSize >= mtctx->jobs[wJobID].dstFlushed);
1489
assert(mtctx->jobs[wJobID].dstBuff.start != NULL);
1490
if (toFlush > 0) {
1491
ZSTD_memcpy((char*)output->dst + output->pos,
1492
(const char*)mtctx->jobs[wJobID].dstBuff.start + mtctx->jobs[wJobID].dstFlushed,
1493
toFlush);
1494
}
1495
output->pos += toFlush;
1496
mtctx->jobs[wJobID].dstFlushed += toFlush; /* can write : this value is only used by mtctx */
1497
1498
if ( (srcConsumed == srcSize) /* job is completed */
1499
&& (mtctx->jobs[wJobID].dstFlushed == cSize) ) { /* output buffer fully flushed => free this job position */
1500
DEBUGLOG(5, "Job %u completed (%u bytes), moving to next one",
1501
mtctx->doneJobID, (U32)mtctx->jobs[wJobID].dstFlushed);
1502
ZSTDMT_releaseBuffer(mtctx->bufPool, mtctx->jobs[wJobID].dstBuff);
1503
DEBUGLOG(5, "dstBuffer released");
1504
mtctx->jobs[wJobID].dstBuff = g_nullBuffer;
1505
mtctx->jobs[wJobID].cSize = 0; /* ensure this job slot is considered "not started" in future check */
1506
mtctx->consumed += srcSize;
1507
mtctx->produced += cSize;
1508
mtctx->doneJobID++;
1509
} }
1510
1511
/* return value : how many bytes left in buffer ; fake it to 1 when unknown but >0 */
1512
if (cSize > mtctx->jobs[wJobID].dstFlushed) return (cSize - mtctx->jobs[wJobID].dstFlushed);
1513
if (srcSize > srcConsumed) return 1; /* current job not completely compressed */
1514
}
1515
if (mtctx->doneJobID < mtctx->nextJobID) return 1; /* some more jobs ongoing */
1516
if (mtctx->jobReady) return 1; /* one job is ready to push, just not yet in the list */
1517
if (mtctx->inBuff.filled > 0) return 1; /* input is not empty, and still needs to be converted into a job */
1518
mtctx->allJobsCompleted = mtctx->frameEnded; /* all jobs are entirely flushed => if this one is last one, frame is completed */
1519
if (end == ZSTD_e_end) return !mtctx->frameEnded; /* for ZSTD_e_end, question becomes : is frame completed ? instead of : are internal buffers fully flushed ? */
1520
return 0; /* internal buffers fully flushed */
1521
}
1522
1523
/**
1524
* Returns the range of data used by the earliest job that is not yet complete.
1525
* If the data of the first job is broken up into two segments, we cover both
1526
* sections.
1527
*/
1528
static range_t ZSTDMT_getInputDataInUse(ZSTDMT_CCtx* mtctx)
1529
{
1530
unsigned const firstJobID = mtctx->doneJobID;
1531
unsigned const lastJobID = mtctx->nextJobID;
1532
unsigned jobID;
1533
1534
for (jobID = firstJobID; jobID < lastJobID; ++jobID) {
1535
unsigned const wJobID = jobID & mtctx->jobIDMask;
1536
size_t consumed;
1537
1538
ZSTD_PTHREAD_MUTEX_LOCK(&mtctx->jobs[wJobID].job_mutex);
1539
consumed = mtctx->jobs[wJobID].consumed;
1540
ZSTD_pthread_mutex_unlock(&mtctx->jobs[wJobID].job_mutex);
1541
1542
if (consumed < mtctx->jobs[wJobID].src.size) {
1543
range_t range = mtctx->jobs[wJobID].prefix;
1544
if (range.size == 0) {
1545
/* Empty prefix */
1546
range = mtctx->jobs[wJobID].src;
1547
}
1548
/* Job source in multiple segments not supported yet */
1549
assert(range.start <= mtctx->jobs[wJobID].src.start);
1550
return range;
1551
}
1552
}
1553
return kNullRange;
1554
}
1555
1556
/**
1557
* Returns non-zero iff buffer and range overlap.
1558
*/
1559
static int ZSTDMT_isOverlapped(buffer_t buffer, range_t range)
1560
{
1561
BYTE const* const bufferStart = (BYTE const*)buffer.start;
1562
BYTE const* const rangeStart = (BYTE const*)range.start;
1563
1564
if (rangeStart == NULL || bufferStart == NULL)
1565
return 0;
1566
1567
{
1568
BYTE const* const bufferEnd = bufferStart + buffer.capacity;
1569
BYTE const* const rangeEnd = rangeStart + range.size;
1570
1571
/* Empty ranges cannot overlap */
1572
if (bufferStart == bufferEnd || rangeStart == rangeEnd)
1573
return 0;
1574
1575
return bufferStart < rangeEnd && rangeStart < bufferEnd;
1576
}
1577
}
1578
1579
static int ZSTDMT_doesOverlapWindow(buffer_t buffer, ZSTD_window_t window)
1580
{
1581
range_t extDict;
1582
range_t prefix;
1583
1584
DEBUGLOG(5, "ZSTDMT_doesOverlapWindow");
1585
extDict.start = window.dictBase + window.lowLimit;
1586
extDict.size = window.dictLimit - window.lowLimit;
1587
1588
prefix.start = window.base + window.dictLimit;
1589
prefix.size = window.nextSrc - (window.base + window.dictLimit);
1590
DEBUGLOG(5, "extDict [0x%zx, 0x%zx)",
1591
(size_t)extDict.start,
1592
(size_t)extDict.start + extDict.size);
1593
DEBUGLOG(5, "prefix [0x%zx, 0x%zx)",
1594
(size_t)prefix.start,
1595
(size_t)prefix.start + prefix.size);
1596
1597
return ZSTDMT_isOverlapped(buffer, extDict)
1598
|| ZSTDMT_isOverlapped(buffer, prefix);
1599
}
1600
1601
static void ZSTDMT_waitForLdmComplete(ZSTDMT_CCtx* mtctx, buffer_t buffer)
1602
{
1603
if (mtctx->params.ldmParams.enableLdm == ZSTD_ps_enable) {
1604
ZSTD_pthread_mutex_t* mutex = &mtctx->serial.ldmWindowMutex;
1605
DEBUGLOG(5, "ZSTDMT_waitForLdmComplete");
1606
DEBUGLOG(5, "source [0x%zx, 0x%zx)",
1607
(size_t)buffer.start,
1608
(size_t)buffer.start + buffer.capacity);
1609
ZSTD_PTHREAD_MUTEX_LOCK(mutex);
1610
while (ZSTDMT_doesOverlapWindow(buffer, mtctx->serial.ldmWindow)) {
1611
DEBUGLOG(5, "Waiting for LDM to finish...");
1612
ZSTD_pthread_cond_wait(&mtctx->serial.ldmWindowCond, mutex);
1613
}
1614
DEBUGLOG(6, "Done waiting for LDM to finish");
1615
ZSTD_pthread_mutex_unlock(mutex);
1616
}
1617
}
1618
1619
/**
1620
* Attempts to set the inBuff to the next section to fill.
1621
* If any part of the new section is still in use we give up.
1622
* Returns non-zero if the buffer is filled.
1623
*/
1624
static int ZSTDMT_tryGetInputRange(ZSTDMT_CCtx* mtctx)
1625
{
1626
range_t const inUse = ZSTDMT_getInputDataInUse(mtctx);
1627
size_t const spaceLeft = mtctx->roundBuff.capacity - mtctx->roundBuff.pos;
1628
size_t const target = mtctx->targetSectionSize;
1629
buffer_t buffer;
1630
1631
DEBUGLOG(5, "ZSTDMT_tryGetInputRange");
1632
assert(mtctx->inBuff.buffer.start == NULL);
1633
assert(mtctx->roundBuff.capacity >= target);
1634
1635
if (spaceLeft < target) {
1636
/* ZSTD_invalidateRepCodes() doesn't work for extDict variants.
1637
* Simply copy the prefix to the beginning in that case.
1638
*/
1639
BYTE* const start = (BYTE*)mtctx->roundBuff.buffer;
1640
size_t const prefixSize = mtctx->inBuff.prefix.size;
1641
1642
buffer.start = start;
1643
buffer.capacity = prefixSize;
1644
if (ZSTDMT_isOverlapped(buffer, inUse)) {
1645
DEBUGLOG(5, "Waiting for buffer...");
1646
return 0;
1647
}
1648
ZSTDMT_waitForLdmComplete(mtctx, buffer);
1649
ZSTD_memmove(start, mtctx->inBuff.prefix.start, prefixSize);
1650
mtctx->inBuff.prefix.start = start;
1651
mtctx->roundBuff.pos = prefixSize;
1652
}
1653
buffer.start = mtctx->roundBuff.buffer + mtctx->roundBuff.pos;
1654
buffer.capacity = target;
1655
1656
if (ZSTDMT_isOverlapped(buffer, inUse)) {
1657
DEBUGLOG(5, "Waiting for buffer...");
1658
return 0;
1659
}
1660
assert(!ZSTDMT_isOverlapped(buffer, mtctx->inBuff.prefix));
1661
1662
ZSTDMT_waitForLdmComplete(mtctx, buffer);
1663
1664
DEBUGLOG(5, "Using prefix range [%zx, %zx)",
1665
(size_t)mtctx->inBuff.prefix.start,
1666
(size_t)mtctx->inBuff.prefix.start + mtctx->inBuff.prefix.size);
1667
DEBUGLOG(5, "Using source range [%zx, %zx)",
1668
(size_t)buffer.start,
1669
(size_t)buffer.start + buffer.capacity);
1670
1671
1672
mtctx->inBuff.buffer = buffer;
1673
mtctx->inBuff.filled = 0;
1674
assert(mtctx->roundBuff.pos + buffer.capacity <= mtctx->roundBuff.capacity);
1675
return 1;
1676
}
1677
1678
typedef struct {
1679
size_t toLoad; /* The number of bytes to load from the input. */
1680
int flush; /* Boolean declaring if we must flush because we found a synchronization point. */
1681
} syncPoint_t;
1682
1683
/**
1684
* Searches through the input for a synchronization point. If one is found, we
1685
* will instruct the caller to flush, and return the number of bytes to load.
1686
* Otherwise, we will load as many bytes as possible and instruct the caller
1687
* to continue as normal.
1688
*/
1689
static syncPoint_t
1690
findSynchronizationPoint(ZSTDMT_CCtx const* mtctx, ZSTD_inBuffer const input)
1691
{
1692
BYTE const* const istart = (BYTE const*)input.src + input.pos;
1693
U64 const primePower = mtctx->rsync.primePower;
1694
U64 const hitMask = mtctx->rsync.hitMask;
1695
1696
syncPoint_t syncPoint;
1697
U64 hash;
1698
BYTE const* prev;
1699
size_t pos;
1700
1701
syncPoint.toLoad = MIN(input.size - input.pos, mtctx->targetSectionSize - mtctx->inBuff.filled);
1702
syncPoint.flush = 0;
1703
if (!mtctx->params.rsyncable)
1704
/* Rsync is disabled. */
1705
return syncPoint;
1706
if (mtctx->inBuff.filled + input.size - input.pos < RSYNC_MIN_BLOCK_SIZE)
1707
/* We don't emit synchronization points if it would produce too small blocks.
1708
* We don't have enough input to find a synchronization point, so don't look.
1709
*/
1710
return syncPoint;
1711
if (mtctx->inBuff.filled + syncPoint.toLoad < RSYNC_LENGTH)
1712
/* Not enough to compute the hash.
1713
* We will miss any synchronization points in this RSYNC_LENGTH byte
1714
* window. However, since it depends only in the internal buffers, if the
1715
* state is already synchronized, we will remain synchronized.
1716
* Additionally, the probability that we miss a synchronization point is
1717
* low: RSYNC_LENGTH / targetSectionSize.
1718
*/
1719
return syncPoint;
1720
/* Initialize the loop variables. */
1721
if (mtctx->inBuff.filled < RSYNC_MIN_BLOCK_SIZE) {
1722
/* We don't need to scan the first RSYNC_MIN_BLOCK_SIZE positions
1723
* because they can't possibly be a sync point. So we can start
1724
* part way through the input buffer.
1725
*/
1726
pos = RSYNC_MIN_BLOCK_SIZE - mtctx->inBuff.filled;
1727
if (pos >= RSYNC_LENGTH) {
1728
prev = istart + pos - RSYNC_LENGTH;
1729
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1730
} else {
1731
assert(mtctx->inBuff.filled >= RSYNC_LENGTH);
1732
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1733
hash = ZSTD_rollingHash_compute(prev + pos, (RSYNC_LENGTH - pos));
1734
hash = ZSTD_rollingHash_append(hash, istart, pos);
1735
}
1736
} else {
1737
/* We have enough bytes buffered to initialize the hash,
1738
* and have processed enough bytes to find a sync point.
1739
* Start scanning at the beginning of the input.
1740
*/
1741
assert(mtctx->inBuff.filled >= RSYNC_MIN_BLOCK_SIZE);
1742
assert(RSYNC_MIN_BLOCK_SIZE >= RSYNC_LENGTH);
1743
pos = 0;
1744
prev = (BYTE const*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled - RSYNC_LENGTH;
1745
hash = ZSTD_rollingHash_compute(prev, RSYNC_LENGTH);
1746
if ((hash & hitMask) == hitMask) {
1747
/* We're already at a sync point so don't load any more until
1748
* we're able to flush this sync point.
1749
* This likely happened because the job table was full so we
1750
* couldn't add our job.
1751
*/
1752
syncPoint.toLoad = 0;
1753
syncPoint.flush = 1;
1754
return syncPoint;
1755
}
1756
}
1757
/* Starting with the hash of the previous RSYNC_LENGTH bytes, roll
1758
* through the input. If we hit a synchronization point, then cut the
1759
* job off, and tell the compressor to flush the job. Otherwise, load
1760
* all the bytes and continue as normal.
1761
* If we go too long without a synchronization point (targetSectionSize)
1762
* then a block will be emitted anyways, but this is okay, since if we
1763
* are already synchronized we will remain synchronized.
1764
*/
1765
assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1766
for (; pos < syncPoint.toLoad; ++pos) {
1767
BYTE const toRemove = pos < RSYNC_LENGTH ? prev[pos] : istart[pos - RSYNC_LENGTH];
1768
/* This assert is very expensive, and Debian compiles with asserts enabled.
1769
* So disable it for now. We can get similar coverage by checking it at the
1770
* beginning & end of the loop.
1771
* assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1772
*/
1773
hash = ZSTD_rollingHash_rotate(hash, toRemove, istart[pos], primePower);
1774
assert(mtctx->inBuff.filled + pos >= RSYNC_MIN_BLOCK_SIZE);
1775
if ((hash & hitMask) == hitMask) {
1776
syncPoint.toLoad = pos + 1;
1777
syncPoint.flush = 1;
1778
++pos; /* for assert */
1779
break;
1780
}
1781
}
1782
assert(pos < RSYNC_LENGTH || ZSTD_rollingHash_compute(istart + pos - RSYNC_LENGTH, RSYNC_LENGTH) == hash);
1783
return syncPoint;
1784
}
1785
1786
size_t ZSTDMT_nextInputSizeHint(const ZSTDMT_CCtx* mtctx)
1787
{
1788
size_t hintInSize = mtctx->targetSectionSize - mtctx->inBuff.filled;
1789
if (hintInSize==0) hintInSize = mtctx->targetSectionSize;
1790
return hintInSize;
1791
}
1792
1793
/** ZSTDMT_compressStream_generic() :
1794
* internal use only - exposed to be invoked from zstd_compress.c
1795
* assumption : output and input are valid (pos <= size)
1796
* @return : minimum amount of data remaining to flush, 0 if none */
1797
size_t ZSTDMT_compressStream_generic(ZSTDMT_CCtx* mtctx,
1798
ZSTD_outBuffer* output,
1799
ZSTD_inBuffer* input,
1800
ZSTD_EndDirective endOp)
1801
{
1802
unsigned forwardInputProgress = 0;
1803
DEBUGLOG(5, "ZSTDMT_compressStream_generic (endOp=%u, srcSize=%u)",
1804
(U32)endOp, (U32)(input->size - input->pos));
1805
assert(output->pos <= output->size);
1806
assert(input->pos <= input->size);
1807
1808
if ((mtctx->frameEnded) && (endOp==ZSTD_e_continue)) {
1809
/* current frame being ended. Only flush/end are allowed */
1810
return ERROR(stage_wrong);
1811
}
1812
1813
/* fill input buffer */
1814
if ( (!mtctx->jobReady)
1815
&& (input->size > input->pos) ) { /* support NULL input */
1816
if (mtctx->inBuff.buffer.start == NULL) {
1817
assert(mtctx->inBuff.filled == 0); /* Can't fill an empty buffer */
1818
if (!ZSTDMT_tryGetInputRange(mtctx)) {
1819
/* It is only possible for this operation to fail if there are
1820
* still compression jobs ongoing.
1821
*/
1822
DEBUGLOG(5, "ZSTDMT_tryGetInputRange failed");
1823
assert(mtctx->doneJobID != mtctx->nextJobID);
1824
} else
1825
DEBUGLOG(5, "ZSTDMT_tryGetInputRange completed successfully : mtctx->inBuff.buffer.start = %p", mtctx->inBuff.buffer.start);
1826
}
1827
if (mtctx->inBuff.buffer.start != NULL) {
1828
syncPoint_t const syncPoint = findSynchronizationPoint(mtctx, *input);
1829
if (syncPoint.flush && endOp == ZSTD_e_continue) {
1830
endOp = ZSTD_e_flush;
1831
}
1832
assert(mtctx->inBuff.buffer.capacity >= mtctx->targetSectionSize);
1833
DEBUGLOG(5, "ZSTDMT_compressStream_generic: adding %u bytes on top of %u to buffer of size %u",
1834
(U32)syncPoint.toLoad, (U32)mtctx->inBuff.filled, (U32)mtctx->targetSectionSize);
1835
ZSTD_memcpy((char*)mtctx->inBuff.buffer.start + mtctx->inBuff.filled, (const char*)input->src + input->pos, syncPoint.toLoad);
1836
input->pos += syncPoint.toLoad;
1837
mtctx->inBuff.filled += syncPoint.toLoad;
1838
forwardInputProgress = syncPoint.toLoad>0;
1839
}
1840
}
1841
if ((input->pos < input->size) && (endOp == ZSTD_e_end)) {
1842
/* Can't end yet because the input is not fully consumed.
1843
* We are in one of these cases:
1844
* - mtctx->inBuff is NULL & empty: we couldn't get an input buffer so don't create a new job.
1845
* - We filled the input buffer: flush this job but don't end the frame.
1846
* - We hit a synchronization point: flush this job but don't end the frame.
1847
*/
1848
assert(mtctx->inBuff.filled == 0 || mtctx->inBuff.filled == mtctx->targetSectionSize || mtctx->params.rsyncable);
1849
endOp = ZSTD_e_flush;
1850
}
1851
1852
if ( (mtctx->jobReady)
1853
|| (mtctx->inBuff.filled >= mtctx->targetSectionSize) /* filled enough : let's compress */
1854
|| ((endOp != ZSTD_e_continue) && (mtctx->inBuff.filled > 0)) /* something to flush : let's go */
1855
|| ((endOp == ZSTD_e_end) && (!mtctx->frameEnded)) ) { /* must finish the frame with a zero-size block */
1856
size_t const jobSize = mtctx->inBuff.filled;
1857
assert(mtctx->inBuff.filled <= mtctx->targetSectionSize);
1858
FORWARD_IF_ERROR( ZSTDMT_createCompressionJob(mtctx, jobSize, endOp) , "");
1859
}
1860
1861
/* check for potential compressed data ready to be flushed */
1862
{ size_t const remainingToFlush = ZSTDMT_flushProduced(mtctx, output, !forwardInputProgress, endOp); /* block if there was no forward input progress */
1863
if (input->pos < input->size) return MAX(remainingToFlush, 1); /* input not consumed : do not end flush yet */
1864
DEBUGLOG(5, "end of ZSTDMT_compressStream_generic: remainingToFlush = %u", (U32)remainingToFlush);
1865
return remainingToFlush;
1866
}
1867
}
1868
1869