Book a Demo!
CoCalc Logo Icon
StoreFeaturesDocsShareSupportNewsAboutPoliciesSign UpSign In
Kitware
GitHub Repository: Kitware/CMake
Path: blob/master/Utilities/cmzstd/lib/compress/zstd_cwksp.h
5028 views
1
/*
2
* Copyright (c) Meta Platforms, Inc. and affiliates.
3
* All rights reserved.
4
*
5
* This source code is licensed under both the BSD-style license (found in the
6
* LICENSE file in the root directory of this source tree) and the GPLv2 (found
7
* in the COPYING file in the root directory of this source tree).
8
* You may select, at your option, one of the above-listed licenses.
9
*/
10
11
#ifndef ZSTD_CWKSP_H
12
#define ZSTD_CWKSP_H
13
14
/*-*************************************
15
* Dependencies
16
***************************************/
17
#include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
18
#include "../common/zstd_internal.h"
19
#include "../common/portability_macros.h"
20
#include "../common/compiler.h" /* ZS2_isPower2 */
21
22
/*-*************************************
23
* Constants
24
***************************************/
25
26
/* Since the workspace is effectively its own little malloc implementation /
27
* arena, when we run under ASAN, we should similarly insert redzones between
28
* each internal element of the workspace, so ASAN will catch overruns that
29
* reach outside an object but that stay inside the workspace.
30
*
31
* This defines the size of that redzone.
32
*/
33
#ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
34
#define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
35
#endif
36
37
38
/* Set our tables and aligneds to align by 64 bytes */
39
#define ZSTD_CWKSP_ALIGNMENT_BYTES 64
40
41
/*-*************************************
42
* Structures
43
***************************************/
44
typedef enum {
45
ZSTD_cwksp_alloc_objects,
46
ZSTD_cwksp_alloc_aligned_init_once,
47
ZSTD_cwksp_alloc_aligned,
48
ZSTD_cwksp_alloc_buffers
49
} ZSTD_cwksp_alloc_phase_e;
50
51
/**
52
* Used to describe whether the workspace is statically allocated (and will not
53
* necessarily ever be freed), or if it's dynamically allocated and we can
54
* expect a well-formed caller to free this.
55
*/
56
typedef enum {
57
ZSTD_cwksp_dynamic_alloc,
58
ZSTD_cwksp_static_alloc
59
} ZSTD_cwksp_static_alloc_e;
60
61
/**
62
* Zstd fits all its internal datastructures into a single continuous buffer,
63
* so that it only needs to perform a single OS allocation (or so that a buffer
64
* can be provided to it and it can perform no allocations at all). This buffer
65
* is called the workspace.
66
*
67
* Several optimizations complicate that process of allocating memory ranges
68
* from this workspace for each internal datastructure:
69
*
70
* - These different internal datastructures have different setup requirements:
71
*
72
* - The static objects need to be cleared once and can then be trivially
73
* reused for each compression.
74
*
75
* - Various buffers don't need to be initialized at all--they are always
76
* written into before they're read.
77
*
78
* - The matchstate tables have a unique requirement that they don't need
79
* their memory to be totally cleared, but they do need the memory to have
80
* some bound, i.e., a guarantee that all values in the memory they've been
81
* allocated is less than some maximum value (which is the starting value
82
* for the indices that they will then use for compression). When this
83
* guarantee is provided to them, they can use the memory without any setup
84
* work. When it can't, they have to clear the area.
85
*
86
* - These buffers also have different alignment requirements.
87
*
88
* - We would like to reuse the objects in the workspace for multiple
89
* compressions without having to perform any expensive reallocation or
90
* reinitialization work.
91
*
92
* - We would like to be able to efficiently reuse the workspace across
93
* multiple compressions **even when the compression parameters change** and
94
* we need to resize some of the objects (where possible).
95
*
96
* To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
97
* abstraction was created. It works as follows:
98
*
99
* Workspace Layout:
100
*
101
* [ ... workspace ... ]
102
* [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
103
*
104
* The various objects that live in the workspace are divided into the
105
* following categories, and are allocated separately:
106
*
107
* - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
108
* so that literally everything fits in a single buffer. Note: if present,
109
* this must be the first object in the workspace, since ZSTD_customFree{CCtx,
110
* CDict}() rely on a pointer comparison to see whether one or two frees are
111
* required.
112
*
113
* - Fixed size objects: these are fixed-size, fixed-count objects that are
114
* nonetheless "dynamically" allocated in the workspace so that we can
115
* control how they're initialized separately from the broader ZSTD_CCtx.
116
* Examples:
117
* - Entropy Workspace
118
* - 2 x ZSTD_compressedBlockState_t
119
* - CDict dictionary contents
120
*
121
* - Tables: these are any of several different datastructures (hash tables,
122
* chain tables, binary trees) that all respect a common format: they are
123
* uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
124
* Their sizes depend on the cparams. These tables are 64-byte aligned.
125
*
126
* - Init once: these buffers require to be initialized at least once before
127
* use. They should be used when we want to skip memory initialization
128
* while not triggering memory checkers (like Valgrind) when reading from
129
* from this memory without writing to it first.
130
* These buffers should be used carefully as they might contain data
131
* from previous compressions.
132
* Buffers are aligned to 64 bytes.
133
*
134
* - Aligned: these buffers don't require any initialization before they're
135
* used. The user of the buffer should make sure they write into a buffer
136
* location before reading from it.
137
* Buffers are aligned to 64 bytes.
138
*
139
* - Buffers: these buffers are used for various purposes that don't require
140
* any alignment or initialization before they're used. This means they can
141
* be moved around at no cost for a new compression.
142
*
143
* Allocating Memory:
144
*
145
* The various types of objects must be allocated in order, so they can be
146
* correctly packed into the workspace buffer. That order is:
147
*
148
* 1. Objects
149
* 2. Init once / Tables
150
* 3. Aligned / Tables
151
* 4. Buffers / Tables
152
*
153
* Attempts to reserve objects of different types out of order will fail.
154
*/
155
typedef struct {
156
void* workspace;
157
void* workspaceEnd;
158
159
void* objectEnd;
160
void* tableEnd;
161
void* tableValidEnd;
162
void* allocStart;
163
void* initOnceStart;
164
165
BYTE allocFailed;
166
int workspaceOversizedDuration;
167
ZSTD_cwksp_alloc_phase_e phase;
168
ZSTD_cwksp_static_alloc_e isStatic;
169
} ZSTD_cwksp;
170
171
/*-*************************************
172
* Functions
173
***************************************/
174
175
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
176
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
177
178
MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
179
(void)ws;
180
assert(ws->workspace <= ws->objectEnd);
181
assert(ws->objectEnd <= ws->tableEnd);
182
assert(ws->objectEnd <= ws->tableValidEnd);
183
assert(ws->tableEnd <= ws->allocStart);
184
assert(ws->tableValidEnd <= ws->allocStart);
185
assert(ws->allocStart <= ws->workspaceEnd);
186
assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
187
assert(ws->workspace <= ws->initOnceStart);
188
#if ZSTD_MEMORY_SANITIZER
189
{
190
intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
191
(U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
192
(void)offset;
193
#if defined(ZSTD_MSAN_PRINT)
194
if(offset!=-1) {
195
__msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
196
}
197
#endif
198
assert(offset==-1);
199
};
200
#endif
201
}
202
203
/**
204
* Align must be a power of 2.
205
*/
206
MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t align) {
207
size_t const mask = align - 1;
208
assert(ZSTD_isPower2(align));
209
return (size + mask) & ~mask;
210
}
211
212
/**
213
* Use this to determine how much space in the workspace we will consume to
214
* allocate this object. (Normally it should be exactly the size of the object,
215
* but under special conditions, like ASAN, where we pad each object, it might
216
* be larger.)
217
*
218
* Since tables aren't currently redzoned, you don't need to call through this
219
* to figure out how much space you need for the matchState tables. Everything
220
* else is though.
221
*
222
* Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned64_alloc_size().
223
*/
224
MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
225
if (size == 0)
226
return 0;
227
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
228
return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
229
#else
230
return size;
231
#endif
232
}
233
234
MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size, size_t alignment) {
235
return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, alignment));
236
}
237
238
/**
239
* Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
240
* Used to determine the number of bytes required for a given "aligned".
241
*/
242
MEM_STATIC size_t ZSTD_cwksp_aligned64_alloc_size(size_t size) {
243
return ZSTD_cwksp_aligned_alloc_size(size, ZSTD_CWKSP_ALIGNMENT_BYTES);
244
}
245
246
/**
247
* Returns the amount of additional space the cwksp must allocate
248
* for internal purposes (currently only alignment).
249
*/
250
MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
251
/* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
252
* bytes to align the beginning of tables section and end of buffers;
253
*/
254
size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
255
return slackSpace;
256
}
257
258
259
/**
260
* Return the number of additional bytes required to align a pointer to the given number of bytes.
261
* alignBytes must be a power of two.
262
*/
263
MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
264
size_t const alignBytesMask = alignBytes - 1;
265
size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
266
assert(ZSTD_isPower2(alignBytes));
267
assert(bytes < alignBytes);
268
return bytes;
269
}
270
271
/**
272
* Returns the initial value for allocStart which is used to determine the position from
273
* which we can allocate from the end of the workspace.
274
*/
275
MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws)
276
{
277
char* endPtr = (char*)ws->workspaceEnd;
278
assert(ZSTD_isPower2(ZSTD_CWKSP_ALIGNMENT_BYTES));
279
endPtr = endPtr - ((size_t)endPtr % ZSTD_CWKSP_ALIGNMENT_BYTES);
280
return (void*)endPtr;
281
}
282
283
/**
284
* Internal function. Do not use directly.
285
* Reserves the given number of bytes within the aligned/buffer segment of the wksp,
286
* which counts from the end of the wksp (as opposed to the object/table segment).
287
*
288
* Returns a pointer to the beginning of that space.
289
*/
290
MEM_STATIC void*
291
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
292
{
293
void* const alloc = (BYTE*)ws->allocStart - bytes;
294
void* const bottom = ws->tableEnd;
295
DEBUGLOG(5, "cwksp: reserving [0x%p]:%zd bytes; %zd bytes remaining",
296
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
297
ZSTD_cwksp_assert_internal_consistency(ws);
298
assert(alloc >= bottom);
299
if (alloc < bottom) {
300
DEBUGLOG(4, "cwksp: alloc failed!");
301
ws->allocFailed = 1;
302
return NULL;
303
}
304
/* the area is reserved from the end of wksp.
305
* If it overlaps with tableValidEnd, it voids guarantees on values' range */
306
if (alloc < ws->tableValidEnd) {
307
ws->tableValidEnd = alloc;
308
}
309
ws->allocStart = alloc;
310
return alloc;
311
}
312
313
/**
314
* Moves the cwksp to the next phase, and does any necessary allocations.
315
* cwksp initialization must necessarily go through each phase in order.
316
* Returns a 0 on success, or zstd error
317
*/
318
MEM_STATIC size_t
319
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
320
{
321
assert(phase >= ws->phase);
322
if (phase > ws->phase) {
323
/* Going from allocating objects to allocating initOnce / tables */
324
if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
325
phase >= ZSTD_cwksp_alloc_aligned_init_once) {
326
ws->tableValidEnd = ws->objectEnd;
327
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
328
329
{ /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
330
void *const alloc = ws->objectEnd;
331
size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
332
void *const objectEnd = (BYTE *) alloc + bytesToAlign;
333
DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
334
RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
335
"table phase - alignment initial allocation failed!");
336
ws->objectEnd = objectEnd;
337
ws->tableEnd = objectEnd; /* table area starts being empty */
338
if (ws->tableValidEnd < ws->tableEnd) {
339
ws->tableValidEnd = ws->tableEnd;
340
}
341
}
342
}
343
ws->phase = phase;
344
ZSTD_cwksp_assert_internal_consistency(ws);
345
}
346
return 0;
347
}
348
349
/**
350
* Returns whether this object/buffer/etc was allocated in this workspace.
351
*/
352
MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
353
{
354
return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
355
}
356
357
/**
358
* Internal function. Do not use directly.
359
*/
360
MEM_STATIC void*
361
ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
362
{
363
void* alloc;
364
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
365
return NULL;
366
}
367
368
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
369
/* over-reserve space */
370
bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
371
#endif
372
373
alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
374
375
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
376
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
377
* either size. */
378
if (alloc) {
379
alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
380
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
381
/* We need to keep the redzone poisoned while unpoisoning the bytes that
382
* are actually allocated. */
383
__asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
384
}
385
}
386
#endif
387
388
return alloc;
389
}
390
391
/**
392
* Reserves and returns unaligned memory.
393
*/
394
MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
395
{
396
return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
397
}
398
399
/**
400
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
401
* This memory has been initialized at least once in the past.
402
* This doesn't mean it has been initialized this time, and it might contain data from previous
403
* operations.
404
* The main usage is for algorithms that might need read access into uninitialized memory.
405
* The algorithm must maintain safety under these conditions and must make sure it doesn't
406
* leak any of the past data (directly or in side channels).
407
*/
408
MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
409
{
410
size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
411
void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
412
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
413
if(ptr && ptr < ws->initOnceStart) {
414
/* We assume the memory following the current allocation is either:
415
* 1. Not usable as initOnce memory (end of workspace)
416
* 2. Another initOnce buffer that has been allocated before (and so was previously memset)
417
* 3. An ASAN redzone, in which case we don't want to write on it
418
* For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
419
* Note that we assume here that MSAN and ASAN cannot run in the same time. */
420
ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
421
ws->initOnceStart = ptr;
422
}
423
#if ZSTD_MEMORY_SANITIZER
424
assert(__msan_test_shadow(ptr, bytes) == -1);
425
#endif
426
return ptr;
427
}
428
429
/**
430
* Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
431
*/
432
MEM_STATIC void* ZSTD_cwksp_reserve_aligned64(ZSTD_cwksp* ws, size_t bytes)
433
{
434
void* const ptr = ZSTD_cwksp_reserve_internal(ws,
435
ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
436
ZSTD_cwksp_alloc_aligned);
437
assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
438
return ptr;
439
}
440
441
/**
442
* Aligned on 64 bytes. These buffers have the special property that
443
* their values remain constrained, allowing us to reuse them without
444
* memset()-ing them.
445
*/
446
MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
447
{
448
const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
449
void* alloc;
450
void* end;
451
void* top;
452
453
/* We can only start allocating tables after we are done reserving space for objects at the
454
* start of the workspace */
455
if(ws->phase < phase) {
456
if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
457
return NULL;
458
}
459
}
460
alloc = ws->tableEnd;
461
end = (BYTE *)alloc + bytes;
462
top = ws->allocStart;
463
464
DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
465
alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
466
assert((bytes & (sizeof(U32)-1)) == 0);
467
ZSTD_cwksp_assert_internal_consistency(ws);
468
assert(end <= top);
469
if (end > top) {
470
DEBUGLOG(4, "cwksp: table alloc failed!");
471
ws->allocFailed = 1;
472
return NULL;
473
}
474
ws->tableEnd = end;
475
476
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
477
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
478
__asan_unpoison_memory_region(alloc, bytes);
479
}
480
#endif
481
482
assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
483
assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
484
return alloc;
485
}
486
487
/**
488
* Aligned on sizeof(void*).
489
* Note : should happen only once, at workspace first initialization
490
*/
491
MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
492
{
493
size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
494
void* alloc = ws->objectEnd;
495
void* end = (BYTE*)alloc + roundedBytes;
496
497
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
498
/* over-reserve space */
499
end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
500
#endif
501
502
DEBUGLOG(4,
503
"cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
504
alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
505
assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
506
assert(bytes % ZSTD_ALIGNOF(void*) == 0);
507
ZSTD_cwksp_assert_internal_consistency(ws);
508
/* we must be in the first phase, no advance is possible */
509
if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
510
DEBUGLOG(3, "cwksp: object alloc failed!");
511
ws->allocFailed = 1;
512
return NULL;
513
}
514
ws->objectEnd = end;
515
ws->tableEnd = end;
516
ws->tableValidEnd = end;
517
518
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
519
/* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
520
* either size. */
521
alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
522
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
523
__asan_unpoison_memory_region(alloc, bytes);
524
}
525
#endif
526
527
return alloc;
528
}
529
/**
530
* with alignment control
531
* Note : should happen only once, at workspace first initialization
532
*/
533
MEM_STATIC void* ZSTD_cwksp_reserve_object_aligned(ZSTD_cwksp* ws, size_t byteSize, size_t alignment)
534
{
535
size_t const mask = alignment - 1;
536
size_t const surplus = (alignment > sizeof(void*)) ? alignment - sizeof(void*) : 0;
537
void* const start = ZSTD_cwksp_reserve_object(ws, byteSize + surplus);
538
if (start == NULL) return NULL;
539
if (surplus == 0) return start;
540
assert(ZSTD_isPower2(alignment));
541
return (void*)(((size_t)start + surplus) & ~mask);
542
}
543
544
MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
545
{
546
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
547
548
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
549
/* To validate that the table reuse logic is sound, and that we don't
550
* access table space that we haven't cleaned, we re-"poison" the table
551
* space every time we mark it dirty.
552
* Since tableValidEnd space and initOnce space may overlap we don't poison
553
* the initOnce portion as it break its promise. This means that this poisoning
554
* check isn't always applied fully. */
555
{
556
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
557
assert(__msan_test_shadow(ws->objectEnd, size) == -1);
558
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
559
__msan_poison(ws->objectEnd, size);
560
} else {
561
assert(ws->initOnceStart >= ws->objectEnd);
562
__msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
563
}
564
}
565
#endif
566
567
assert(ws->tableValidEnd >= ws->objectEnd);
568
assert(ws->tableValidEnd <= ws->allocStart);
569
ws->tableValidEnd = ws->objectEnd;
570
ZSTD_cwksp_assert_internal_consistency(ws);
571
}
572
573
MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
574
DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
575
assert(ws->tableValidEnd >= ws->objectEnd);
576
assert(ws->tableValidEnd <= ws->allocStart);
577
if (ws->tableValidEnd < ws->tableEnd) {
578
ws->tableValidEnd = ws->tableEnd;
579
}
580
ZSTD_cwksp_assert_internal_consistency(ws);
581
}
582
583
/**
584
* Zero the part of the allocated tables not already marked clean.
585
*/
586
MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
587
DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
588
assert(ws->tableValidEnd >= ws->objectEnd);
589
assert(ws->tableValidEnd <= ws->allocStart);
590
if (ws->tableValidEnd < ws->tableEnd) {
591
ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
592
}
593
ZSTD_cwksp_mark_tables_clean(ws);
594
}
595
596
/**
597
* Invalidates table allocations.
598
* All other allocations remain valid.
599
*/
600
MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws)
601
{
602
DEBUGLOG(4, "cwksp: clearing tables!");
603
604
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
605
/* We don't do this when the workspace is statically allocated, because
606
* when that is the case, we have no capability to hook into the end of the
607
* workspace's lifecycle to unpoison the memory.
608
*/
609
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
610
size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
611
__asan_poison_memory_region(ws->objectEnd, size);
612
}
613
#endif
614
615
ws->tableEnd = ws->objectEnd;
616
ZSTD_cwksp_assert_internal_consistency(ws);
617
}
618
619
/**
620
* Invalidates all buffer, aligned, and table allocations.
621
* Object allocations remain valid.
622
*/
623
MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
624
DEBUGLOG(4, "cwksp: clearing!");
625
626
#if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
627
/* To validate that the context reuse logic is sound, and that we don't
628
* access stuff that this compression hasn't initialized, we re-"poison"
629
* the workspace except for the areas in which we expect memory reuse
630
* without initialization (objects, valid tables area and init once
631
* memory). */
632
{
633
if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
634
size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
635
__msan_poison(ws->tableValidEnd, size);
636
}
637
}
638
#endif
639
640
#if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
641
/* We don't do this when the workspace is statically allocated, because
642
* when that is the case, we have no capability to hook into the end of the
643
* workspace's lifecycle to unpoison the memory.
644
*/
645
if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
646
size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
647
__asan_poison_memory_region(ws->objectEnd, size);
648
}
649
#endif
650
651
ws->tableEnd = ws->objectEnd;
652
ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
653
ws->allocFailed = 0;
654
if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
655
ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
656
}
657
ZSTD_cwksp_assert_internal_consistency(ws);
658
}
659
660
MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
661
return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
662
}
663
664
MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
665
return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
666
+ (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
667
}
668
669
/**
670
* The provided workspace takes ownership of the buffer [start, start+size).
671
* Any existing values in the workspace are ignored (the previously managed
672
* buffer, if present, must be separately freed).
673
*/
674
MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
675
DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
676
assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
677
ws->workspace = start;
678
ws->workspaceEnd = (BYTE*)start + size;
679
ws->objectEnd = ws->workspace;
680
ws->tableValidEnd = ws->objectEnd;
681
ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
682
ws->phase = ZSTD_cwksp_alloc_objects;
683
ws->isStatic = isStatic;
684
ZSTD_cwksp_clear(ws);
685
ws->workspaceOversizedDuration = 0;
686
ZSTD_cwksp_assert_internal_consistency(ws);
687
}
688
689
MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
690
void* workspace = ZSTD_customMalloc(size, customMem);
691
DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
692
RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
693
ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
694
return 0;
695
}
696
697
MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
698
void *ptr = ws->workspace;
699
DEBUGLOG(4, "cwksp: freeing workspace");
700
#if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE)
701
if (ptr != NULL && customMem.customFree != NULL) {
702
__msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws));
703
}
704
#endif
705
ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
706
ZSTD_customFree(ptr, customMem);
707
}
708
709
/**
710
* Moves the management of a workspace from one cwksp to another. The src cwksp
711
* is left in an invalid state (src must be re-init()'ed before it's used again).
712
*/
713
MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
714
*dst = *src;
715
ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
716
}
717
718
MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
719
return ws->allocFailed;
720
}
721
722
/*-*************************************
723
* Functions Checking Free Space
724
***************************************/
725
726
/* ZSTD_alignmentSpaceWithinBounds() :
727
* Returns if the estimated space needed for a wksp is within an acceptable limit of the
728
* actual amount of space used.
729
*/
730
MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
731
/* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
732
* the alignment bytes difference between estimation and actual usage */
733
return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
734
ZSTD_cwksp_used(ws) <= estimatedSpace;
735
}
736
737
738
MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
739
return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
740
}
741
742
MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
743
return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
744
}
745
746
MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
747
return ZSTD_cwksp_check_available(
748
ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
749
}
750
751
MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
752
return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
753
&& ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
754
}
755
756
MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
757
ZSTD_cwksp* ws, size_t additionalNeededSpace) {
758
if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
759
ws->workspaceOversizedDuration++;
760
} else {
761
ws->workspaceOversizedDuration = 0;
762
}
763
}
764
765
#endif /* ZSTD_CWKSP_H */
766
767