1*01826a49SYabin Cui /*
2*01826a49SYabin Cui * Copyright (c) Meta Platforms, Inc. and affiliates.
3*01826a49SYabin Cui * All rights reserved.
4*01826a49SYabin Cui *
5*01826a49SYabin Cui * This source code is licensed under both the BSD-style license (found in the
6*01826a49SYabin Cui * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7*01826a49SYabin Cui * in the COPYING file in the root directory of this source tree).
8*01826a49SYabin Cui * You may select, at your option, one of the above-listed licenses.
9*01826a49SYabin Cui */
10*01826a49SYabin Cui
11*01826a49SYabin Cui #ifndef ZSTD_CWKSP_H
12*01826a49SYabin Cui #define ZSTD_CWKSP_H
13*01826a49SYabin Cui
14*01826a49SYabin Cui /*-*************************************
15*01826a49SYabin Cui * Dependencies
16*01826a49SYabin Cui ***************************************/
17*01826a49SYabin Cui #include "../common/allocations.h" /* ZSTD_customMalloc, ZSTD_customFree */
18*01826a49SYabin Cui #include "../common/zstd_internal.h"
19*01826a49SYabin Cui #include "../common/portability_macros.h"
20*01826a49SYabin Cui
21*01826a49SYabin Cui #if defined (__cplusplus)
22*01826a49SYabin Cui extern "C" {
23*01826a49SYabin Cui #endif
24*01826a49SYabin Cui
25*01826a49SYabin Cui /*-*************************************
26*01826a49SYabin Cui * Constants
27*01826a49SYabin Cui ***************************************/
28*01826a49SYabin Cui
29*01826a49SYabin Cui /* Since the workspace is effectively its own little malloc implementation /
30*01826a49SYabin Cui * arena, when we run under ASAN, we should similarly insert redzones between
31*01826a49SYabin Cui * each internal element of the workspace, so ASAN will catch overruns that
32*01826a49SYabin Cui * reach outside an object but that stay inside the workspace.
33*01826a49SYabin Cui *
34*01826a49SYabin Cui * This defines the size of that redzone.
35*01826a49SYabin Cui */
36*01826a49SYabin Cui #ifndef ZSTD_CWKSP_ASAN_REDZONE_SIZE
37*01826a49SYabin Cui #define ZSTD_CWKSP_ASAN_REDZONE_SIZE 128
38*01826a49SYabin Cui #endif
39*01826a49SYabin Cui
40*01826a49SYabin Cui
41*01826a49SYabin Cui /* Set our tables and aligneds to align by 64 bytes */
42*01826a49SYabin Cui #define ZSTD_CWKSP_ALIGNMENT_BYTES 64
43*01826a49SYabin Cui
44*01826a49SYabin Cui /*-*************************************
45*01826a49SYabin Cui * Structures
46*01826a49SYabin Cui ***************************************/
47*01826a49SYabin Cui typedef enum {
48*01826a49SYabin Cui ZSTD_cwksp_alloc_objects,
49*01826a49SYabin Cui ZSTD_cwksp_alloc_aligned_init_once,
50*01826a49SYabin Cui ZSTD_cwksp_alloc_aligned,
51*01826a49SYabin Cui ZSTD_cwksp_alloc_buffers
52*01826a49SYabin Cui } ZSTD_cwksp_alloc_phase_e;
53*01826a49SYabin Cui
54*01826a49SYabin Cui /**
55*01826a49SYabin Cui * Used to describe whether the workspace is statically allocated (and will not
56*01826a49SYabin Cui * necessarily ever be freed), or if it's dynamically allocated and we can
57*01826a49SYabin Cui * expect a well-formed caller to free this.
58*01826a49SYabin Cui */
59*01826a49SYabin Cui typedef enum {
60*01826a49SYabin Cui ZSTD_cwksp_dynamic_alloc,
61*01826a49SYabin Cui ZSTD_cwksp_static_alloc
62*01826a49SYabin Cui } ZSTD_cwksp_static_alloc_e;
63*01826a49SYabin Cui
64*01826a49SYabin Cui /**
65*01826a49SYabin Cui * Zstd fits all its internal datastructures into a single continuous buffer,
66*01826a49SYabin Cui * so that it only needs to perform a single OS allocation (or so that a buffer
67*01826a49SYabin Cui * can be provided to it and it can perform no allocations at all). This buffer
68*01826a49SYabin Cui * is called the workspace.
69*01826a49SYabin Cui *
70*01826a49SYabin Cui * Several optimizations complicate that process of allocating memory ranges
71*01826a49SYabin Cui * from this workspace for each internal datastructure:
72*01826a49SYabin Cui *
73*01826a49SYabin Cui * - These different internal datastructures have different setup requirements:
74*01826a49SYabin Cui *
75*01826a49SYabin Cui * - The static objects need to be cleared once and can then be trivially
76*01826a49SYabin Cui * reused for each compression.
77*01826a49SYabin Cui *
78*01826a49SYabin Cui * - Various buffers don't need to be initialized at all--they are always
79*01826a49SYabin Cui * written into before they're read.
80*01826a49SYabin Cui *
81*01826a49SYabin Cui * - The matchstate tables have a unique requirement that they don't need
82*01826a49SYabin Cui * their memory to be totally cleared, but they do need the memory to have
83*01826a49SYabin Cui * some bound, i.e., a guarantee that all values in the memory they've been
84*01826a49SYabin Cui * allocated is less than some maximum value (which is the starting value
85*01826a49SYabin Cui * for the indices that they will then use for compression). When this
86*01826a49SYabin Cui * guarantee is provided to them, they can use the memory without any setup
87*01826a49SYabin Cui * work. When it can't, they have to clear the area.
88*01826a49SYabin Cui *
89*01826a49SYabin Cui * - These buffers also have different alignment requirements.
90*01826a49SYabin Cui *
91*01826a49SYabin Cui * - We would like to reuse the objects in the workspace for multiple
92*01826a49SYabin Cui * compressions without having to perform any expensive reallocation or
93*01826a49SYabin Cui * reinitialization work.
94*01826a49SYabin Cui *
95*01826a49SYabin Cui * - We would like to be able to efficiently reuse the workspace across
96*01826a49SYabin Cui * multiple compressions **even when the compression parameters change** and
97*01826a49SYabin Cui * we need to resize some of the objects (where possible).
98*01826a49SYabin Cui *
99*01826a49SYabin Cui * To attempt to manage this buffer, given these constraints, the ZSTD_cwksp
100*01826a49SYabin Cui * abstraction was created. It works as follows:
101*01826a49SYabin Cui *
102*01826a49SYabin Cui * Workspace Layout:
103*01826a49SYabin Cui *
104*01826a49SYabin Cui * [ ... workspace ... ]
105*01826a49SYabin Cui * [objects][tables ->] free space [<- buffers][<- aligned][<- init once]
106*01826a49SYabin Cui *
107*01826a49SYabin Cui * The various objects that live in the workspace are divided into the
108*01826a49SYabin Cui * following categories, and are allocated separately:
109*01826a49SYabin Cui *
110*01826a49SYabin Cui * - Static objects: this is optionally the enclosing ZSTD_CCtx or ZSTD_CDict,
111*01826a49SYabin Cui * so that literally everything fits in a single buffer. Note: if present,
112*01826a49SYabin Cui * this must be the first object in the workspace, since ZSTD_customFree{CCtx,
113*01826a49SYabin Cui * CDict}() rely on a pointer comparison to see whether one or two frees are
114*01826a49SYabin Cui * required.
115*01826a49SYabin Cui *
116*01826a49SYabin Cui * - Fixed size objects: these are fixed-size, fixed-count objects that are
117*01826a49SYabin Cui * nonetheless "dynamically" allocated in the workspace so that we can
118*01826a49SYabin Cui * control how they're initialized separately from the broader ZSTD_CCtx.
119*01826a49SYabin Cui * Examples:
120*01826a49SYabin Cui * - Entropy Workspace
121*01826a49SYabin Cui * - 2 x ZSTD_compressedBlockState_t
122*01826a49SYabin Cui * - CDict dictionary contents
123*01826a49SYabin Cui *
124*01826a49SYabin Cui * - Tables: these are any of several different datastructures (hash tables,
125*01826a49SYabin Cui * chain tables, binary trees) that all respect a common format: they are
126*01826a49SYabin Cui * uint32_t arrays, all of whose values are between 0 and (nextSrc - base).
127*01826a49SYabin Cui * Their sizes depend on the cparams. These tables are 64-byte aligned.
128*01826a49SYabin Cui *
129*01826a49SYabin Cui * - Init once: these buffers require to be initialized at least once before
130*01826a49SYabin Cui * use. They should be used when we want to skip memory initialization
131*01826a49SYabin Cui * while not triggering memory checkers (like Valgrind) when reading from
132*01826a49SYabin Cui * from this memory without writing to it first.
133*01826a49SYabin Cui * These buffers should be used carefully as they might contain data
134*01826a49SYabin Cui * from previous compressions.
135*01826a49SYabin Cui * Buffers are aligned to 64 bytes.
136*01826a49SYabin Cui *
137*01826a49SYabin Cui * - Aligned: these buffers don't require any initialization before they're
138*01826a49SYabin Cui * used. The user of the buffer should make sure they write into a buffer
139*01826a49SYabin Cui * location before reading from it.
140*01826a49SYabin Cui * Buffers are aligned to 64 bytes.
141*01826a49SYabin Cui *
142*01826a49SYabin Cui * - Buffers: these buffers are used for various purposes that don't require
143*01826a49SYabin Cui * any alignment or initialization before they're used. This means they can
144*01826a49SYabin Cui * be moved around at no cost for a new compression.
145*01826a49SYabin Cui *
146*01826a49SYabin Cui * Allocating Memory:
147*01826a49SYabin Cui *
148*01826a49SYabin Cui * The various types of objects must be allocated in order, so they can be
149*01826a49SYabin Cui * correctly packed into the workspace buffer. That order is:
150*01826a49SYabin Cui *
151*01826a49SYabin Cui * 1. Objects
152*01826a49SYabin Cui * 2. Init once / Tables
153*01826a49SYabin Cui * 3. Aligned / Tables
154*01826a49SYabin Cui * 4. Buffers / Tables
155*01826a49SYabin Cui *
156*01826a49SYabin Cui * Attempts to reserve objects of different types out of order will fail.
157*01826a49SYabin Cui */
158*01826a49SYabin Cui typedef struct {
159*01826a49SYabin Cui void* workspace;
160*01826a49SYabin Cui void* workspaceEnd;
161*01826a49SYabin Cui
162*01826a49SYabin Cui void* objectEnd;
163*01826a49SYabin Cui void* tableEnd;
164*01826a49SYabin Cui void* tableValidEnd;
165*01826a49SYabin Cui void* allocStart;
166*01826a49SYabin Cui void* initOnceStart;
167*01826a49SYabin Cui
168*01826a49SYabin Cui BYTE allocFailed;
169*01826a49SYabin Cui int workspaceOversizedDuration;
170*01826a49SYabin Cui ZSTD_cwksp_alloc_phase_e phase;
171*01826a49SYabin Cui ZSTD_cwksp_static_alloc_e isStatic;
172*01826a49SYabin Cui } ZSTD_cwksp;
173*01826a49SYabin Cui
174*01826a49SYabin Cui /*-*************************************
175*01826a49SYabin Cui * Functions
176*01826a49SYabin Cui ***************************************/
177*01826a49SYabin Cui
178*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws);
179*01826a49SYabin Cui MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws);
180*01826a49SYabin Cui
ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp * ws)181*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_assert_internal_consistency(ZSTD_cwksp* ws) {
182*01826a49SYabin Cui (void)ws;
183*01826a49SYabin Cui assert(ws->workspace <= ws->objectEnd);
184*01826a49SYabin Cui assert(ws->objectEnd <= ws->tableEnd);
185*01826a49SYabin Cui assert(ws->objectEnd <= ws->tableValidEnd);
186*01826a49SYabin Cui assert(ws->tableEnd <= ws->allocStart);
187*01826a49SYabin Cui assert(ws->tableValidEnd <= ws->allocStart);
188*01826a49SYabin Cui assert(ws->allocStart <= ws->workspaceEnd);
189*01826a49SYabin Cui assert(ws->initOnceStart <= ZSTD_cwksp_initialAllocStart(ws));
190*01826a49SYabin Cui assert(ws->workspace <= ws->initOnceStart);
191*01826a49SYabin Cui #if ZSTD_MEMORY_SANITIZER
192*01826a49SYabin Cui {
193*01826a49SYabin Cui intptr_t const offset = __msan_test_shadow(ws->initOnceStart,
194*01826a49SYabin Cui (U8*)ZSTD_cwksp_initialAllocStart(ws) - (U8*)ws->initOnceStart);
195*01826a49SYabin Cui (void)offset;
196*01826a49SYabin Cui #if defined(ZSTD_MSAN_PRINT)
197*01826a49SYabin Cui if(offset!=-1) {
198*01826a49SYabin Cui __msan_print_shadow((U8*)ws->initOnceStart + offset - 8, 32);
199*01826a49SYabin Cui }
200*01826a49SYabin Cui #endif
201*01826a49SYabin Cui assert(offset==-1);
202*01826a49SYabin Cui };
203*01826a49SYabin Cui #endif
204*01826a49SYabin Cui }
205*01826a49SYabin Cui
206*01826a49SYabin Cui /**
207*01826a49SYabin Cui * Align must be a power of 2.
208*01826a49SYabin Cui */
ZSTD_cwksp_align(size_t size,size_t const align)209*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_align(size_t size, size_t const align) {
210*01826a49SYabin Cui size_t const mask = align - 1;
211*01826a49SYabin Cui assert((align & mask) == 0);
212*01826a49SYabin Cui return (size + mask) & ~mask;
213*01826a49SYabin Cui }
214*01826a49SYabin Cui
215*01826a49SYabin Cui /**
216*01826a49SYabin Cui * Use this to determine how much space in the workspace we will consume to
217*01826a49SYabin Cui * allocate this object. (Normally it should be exactly the size of the object,
218*01826a49SYabin Cui * but under special conditions, like ASAN, where we pad each object, it might
219*01826a49SYabin Cui * be larger.)
220*01826a49SYabin Cui *
221*01826a49SYabin Cui * Since tables aren't currently redzoned, you don't need to call through this
222*01826a49SYabin Cui * to figure out how much space you need for the matchState tables. Everything
223*01826a49SYabin Cui * else is though.
224*01826a49SYabin Cui *
225*01826a49SYabin Cui * Do not use for sizing aligned buffers. Instead, use ZSTD_cwksp_aligned_alloc_size().
226*01826a49SYabin Cui */
ZSTD_cwksp_alloc_size(size_t size)227*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_alloc_size(size_t size) {
228*01826a49SYabin Cui if (size == 0)
229*01826a49SYabin Cui return 0;
230*01826a49SYabin Cui #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
231*01826a49SYabin Cui return size + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
232*01826a49SYabin Cui #else
233*01826a49SYabin Cui return size;
234*01826a49SYabin Cui #endif
235*01826a49SYabin Cui }
236*01826a49SYabin Cui
237*01826a49SYabin Cui /**
238*01826a49SYabin Cui * Returns an adjusted alloc size that is the nearest larger multiple of 64 bytes.
239*01826a49SYabin Cui * Used to determine the number of bytes required for a given "aligned".
240*01826a49SYabin Cui */
ZSTD_cwksp_aligned_alloc_size(size_t size)241*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_aligned_alloc_size(size_t size) {
242*01826a49SYabin Cui return ZSTD_cwksp_alloc_size(ZSTD_cwksp_align(size, ZSTD_CWKSP_ALIGNMENT_BYTES));
243*01826a49SYabin Cui }
244*01826a49SYabin Cui
245*01826a49SYabin Cui /**
246*01826a49SYabin Cui * Returns the amount of additional space the cwksp must allocate
247*01826a49SYabin Cui * for internal purposes (currently only alignment).
248*01826a49SYabin Cui */
ZSTD_cwksp_slack_space_required(void)249*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_slack_space_required(void) {
250*01826a49SYabin Cui /* For alignment, the wksp will always allocate an additional 2*ZSTD_CWKSP_ALIGNMENT_BYTES
251*01826a49SYabin Cui * bytes to align the beginning of tables section and end of buffers;
252*01826a49SYabin Cui */
253*01826a49SYabin Cui size_t const slackSpace = ZSTD_CWKSP_ALIGNMENT_BYTES * 2;
254*01826a49SYabin Cui return slackSpace;
255*01826a49SYabin Cui }
256*01826a49SYabin Cui
257*01826a49SYabin Cui
258*01826a49SYabin Cui /**
259*01826a49SYabin Cui * Return the number of additional bytes required to align a pointer to the given number of bytes.
260*01826a49SYabin Cui * alignBytes must be a power of two.
261*01826a49SYabin Cui */
ZSTD_cwksp_bytes_to_align_ptr(void * ptr,const size_t alignBytes)262*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_bytes_to_align_ptr(void* ptr, const size_t alignBytes) {
263*01826a49SYabin Cui size_t const alignBytesMask = alignBytes - 1;
264*01826a49SYabin Cui size_t const bytes = (alignBytes - ((size_t)ptr & (alignBytesMask))) & alignBytesMask;
265*01826a49SYabin Cui assert((alignBytes & alignBytesMask) == 0);
266*01826a49SYabin Cui assert(bytes < alignBytes);
267*01826a49SYabin Cui return bytes;
268*01826a49SYabin Cui }
269*01826a49SYabin Cui
270*01826a49SYabin Cui /**
271*01826a49SYabin Cui * Returns the initial value for allocStart which is used to determine the position from
272*01826a49SYabin Cui * which we can allocate from the end of the workspace.
273*01826a49SYabin Cui */
ZSTD_cwksp_initialAllocStart(ZSTD_cwksp * ws)274*01826a49SYabin Cui MEM_STATIC void* ZSTD_cwksp_initialAllocStart(ZSTD_cwksp* ws) {
275*01826a49SYabin Cui return (void*)((size_t)ws->workspaceEnd & ~(ZSTD_CWKSP_ALIGNMENT_BYTES-1));
276*01826a49SYabin Cui }
277*01826a49SYabin Cui
278*01826a49SYabin Cui /**
279*01826a49SYabin Cui * Internal function. Do not use directly.
280*01826a49SYabin Cui * Reserves the given number of bytes within the aligned/buffer segment of the wksp,
281*01826a49SYabin Cui * which counts from the end of the wksp (as opposed to the object/table segment).
282*01826a49SYabin Cui *
283*01826a49SYabin Cui * Returns a pointer to the beginning of that space.
284*01826a49SYabin Cui */
285*01826a49SYabin Cui MEM_STATIC void*
ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp * ws,size_t const bytes)286*01826a49SYabin Cui ZSTD_cwksp_reserve_internal_buffer_space(ZSTD_cwksp* ws, size_t const bytes)
287*01826a49SYabin Cui {
288*01826a49SYabin Cui void* const alloc = (BYTE*)ws->allocStart - bytes;
289*01826a49SYabin Cui void* const bottom = ws->tableEnd;
290*01826a49SYabin Cui DEBUGLOG(5, "cwksp: reserving %p %zd bytes, %zd bytes remaining",
291*01826a49SYabin Cui alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
292*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
293*01826a49SYabin Cui assert(alloc >= bottom);
294*01826a49SYabin Cui if (alloc < bottom) {
295*01826a49SYabin Cui DEBUGLOG(4, "cwksp: alloc failed!");
296*01826a49SYabin Cui ws->allocFailed = 1;
297*01826a49SYabin Cui return NULL;
298*01826a49SYabin Cui }
299*01826a49SYabin Cui /* the area is reserved from the end of wksp.
300*01826a49SYabin Cui * If it overlaps with tableValidEnd, it voids guarantees on values' range */
301*01826a49SYabin Cui if (alloc < ws->tableValidEnd) {
302*01826a49SYabin Cui ws->tableValidEnd = alloc;
303*01826a49SYabin Cui }
304*01826a49SYabin Cui ws->allocStart = alloc;
305*01826a49SYabin Cui return alloc;
306*01826a49SYabin Cui }
307*01826a49SYabin Cui
308*01826a49SYabin Cui /**
309*01826a49SYabin Cui * Moves the cwksp to the next phase, and does any necessary allocations.
310*01826a49SYabin Cui * cwksp initialization must necessarily go through each phase in order.
311*01826a49SYabin Cui * Returns a 0 on success, or zstd error
312*01826a49SYabin Cui */
313*01826a49SYabin Cui MEM_STATIC size_t
ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp * ws,ZSTD_cwksp_alloc_phase_e phase)314*01826a49SYabin Cui ZSTD_cwksp_internal_advance_phase(ZSTD_cwksp* ws, ZSTD_cwksp_alloc_phase_e phase)
315*01826a49SYabin Cui {
316*01826a49SYabin Cui assert(phase >= ws->phase);
317*01826a49SYabin Cui if (phase > ws->phase) {
318*01826a49SYabin Cui /* Going from allocating objects to allocating initOnce / tables */
319*01826a49SYabin Cui if (ws->phase < ZSTD_cwksp_alloc_aligned_init_once &&
320*01826a49SYabin Cui phase >= ZSTD_cwksp_alloc_aligned_init_once) {
321*01826a49SYabin Cui ws->tableValidEnd = ws->objectEnd;
322*01826a49SYabin Cui ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
323*01826a49SYabin Cui
324*01826a49SYabin Cui { /* Align the start of the tables to 64 bytes. Use [0, 63] bytes */
325*01826a49SYabin Cui void *const alloc = ws->objectEnd;
326*01826a49SYabin Cui size_t const bytesToAlign = ZSTD_cwksp_bytes_to_align_ptr(alloc, ZSTD_CWKSP_ALIGNMENT_BYTES);
327*01826a49SYabin Cui void *const objectEnd = (BYTE *) alloc + bytesToAlign;
328*01826a49SYabin Cui DEBUGLOG(5, "reserving table alignment addtl space: %zu", bytesToAlign);
329*01826a49SYabin Cui RETURN_ERROR_IF(objectEnd > ws->workspaceEnd, memory_allocation,
330*01826a49SYabin Cui "table phase - alignment initial allocation failed!");
331*01826a49SYabin Cui ws->objectEnd = objectEnd;
332*01826a49SYabin Cui ws->tableEnd = objectEnd; /* table area starts being empty */
333*01826a49SYabin Cui if (ws->tableValidEnd < ws->tableEnd) {
334*01826a49SYabin Cui ws->tableValidEnd = ws->tableEnd;
335*01826a49SYabin Cui }
336*01826a49SYabin Cui }
337*01826a49SYabin Cui }
338*01826a49SYabin Cui ws->phase = phase;
339*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
340*01826a49SYabin Cui }
341*01826a49SYabin Cui return 0;
342*01826a49SYabin Cui }
343*01826a49SYabin Cui
344*01826a49SYabin Cui /**
345*01826a49SYabin Cui * Returns whether this object/buffer/etc was allocated in this workspace.
346*01826a49SYabin Cui */
ZSTD_cwksp_owns_buffer(const ZSTD_cwksp * ws,const void * ptr)347*01826a49SYabin Cui MEM_STATIC int ZSTD_cwksp_owns_buffer(const ZSTD_cwksp* ws, const void* ptr)
348*01826a49SYabin Cui {
349*01826a49SYabin Cui return (ptr != NULL) && (ws->workspace <= ptr) && (ptr < ws->workspaceEnd);
350*01826a49SYabin Cui }
351*01826a49SYabin Cui
352*01826a49SYabin Cui /**
353*01826a49SYabin Cui * Internal function. Do not use directly.
354*01826a49SYabin Cui */
355*01826a49SYabin Cui MEM_STATIC void*
ZSTD_cwksp_reserve_internal(ZSTD_cwksp * ws,size_t bytes,ZSTD_cwksp_alloc_phase_e phase)356*01826a49SYabin Cui ZSTD_cwksp_reserve_internal(ZSTD_cwksp* ws, size_t bytes, ZSTD_cwksp_alloc_phase_e phase)
357*01826a49SYabin Cui {
358*01826a49SYabin Cui void* alloc;
359*01826a49SYabin Cui if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase)) || bytes == 0) {
360*01826a49SYabin Cui return NULL;
361*01826a49SYabin Cui }
362*01826a49SYabin Cui
363*01826a49SYabin Cui #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
364*01826a49SYabin Cui /* over-reserve space */
365*01826a49SYabin Cui bytes += 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
366*01826a49SYabin Cui #endif
367*01826a49SYabin Cui
368*01826a49SYabin Cui alloc = ZSTD_cwksp_reserve_internal_buffer_space(ws, bytes);
369*01826a49SYabin Cui
370*01826a49SYabin Cui #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
371*01826a49SYabin Cui /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
372*01826a49SYabin Cui * either size. */
373*01826a49SYabin Cui if (alloc) {
374*01826a49SYabin Cui alloc = (BYTE *)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
375*01826a49SYabin Cui if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
376*01826a49SYabin Cui /* We need to keep the redzone poisoned while unpoisoning the bytes that
377*01826a49SYabin Cui * are actually allocated. */
378*01826a49SYabin Cui __asan_unpoison_memory_region(alloc, bytes - 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE);
379*01826a49SYabin Cui }
380*01826a49SYabin Cui }
381*01826a49SYabin Cui #endif
382*01826a49SYabin Cui
383*01826a49SYabin Cui return alloc;
384*01826a49SYabin Cui }
385*01826a49SYabin Cui
386*01826a49SYabin Cui /**
387*01826a49SYabin Cui * Reserves and returns unaligned memory.
388*01826a49SYabin Cui */
ZSTD_cwksp_reserve_buffer(ZSTD_cwksp * ws,size_t bytes)389*01826a49SYabin Cui MEM_STATIC BYTE* ZSTD_cwksp_reserve_buffer(ZSTD_cwksp* ws, size_t bytes)
390*01826a49SYabin Cui {
391*01826a49SYabin Cui return (BYTE*)ZSTD_cwksp_reserve_internal(ws, bytes, ZSTD_cwksp_alloc_buffers);
392*01826a49SYabin Cui }
393*01826a49SYabin Cui
394*01826a49SYabin Cui /**
395*01826a49SYabin Cui * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
396*01826a49SYabin Cui * This memory has been initialized at least once in the past.
397*01826a49SYabin Cui * This doesn't mean it has been initialized this time, and it might contain data from previous
398*01826a49SYabin Cui * operations.
399*01826a49SYabin Cui * The main usage is for algorithms that might need read access into uninitialized memory.
400*01826a49SYabin Cui * The algorithm must maintain safety under these conditions and must make sure it doesn't
401*01826a49SYabin Cui * leak any of the past data (directly or in side channels).
402*01826a49SYabin Cui */
ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp * ws,size_t bytes)403*01826a49SYabin Cui MEM_STATIC void* ZSTD_cwksp_reserve_aligned_init_once(ZSTD_cwksp* ws, size_t bytes)
404*01826a49SYabin Cui {
405*01826a49SYabin Cui size_t const alignedBytes = ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES);
406*01826a49SYabin Cui void* ptr = ZSTD_cwksp_reserve_internal(ws, alignedBytes, ZSTD_cwksp_alloc_aligned_init_once);
407*01826a49SYabin Cui assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
408*01826a49SYabin Cui if(ptr && ptr < ws->initOnceStart) {
409*01826a49SYabin Cui /* We assume the memory following the current allocation is either:
410*01826a49SYabin Cui * 1. Not usable as initOnce memory (end of workspace)
411*01826a49SYabin Cui * 2. Another initOnce buffer that has been allocated before (and so was previously memset)
412*01826a49SYabin Cui * 3. An ASAN redzone, in which case we don't want to write on it
413*01826a49SYabin Cui * For these reasons it should be fine to not explicitly zero every byte up to ws->initOnceStart.
414*01826a49SYabin Cui * Note that we assume here that MSAN and ASAN cannot run in the same time. */
415*01826a49SYabin Cui ZSTD_memset(ptr, 0, MIN((size_t)((U8*)ws->initOnceStart - (U8*)ptr), alignedBytes));
416*01826a49SYabin Cui ws->initOnceStart = ptr;
417*01826a49SYabin Cui }
418*01826a49SYabin Cui #if ZSTD_MEMORY_SANITIZER
419*01826a49SYabin Cui assert(__msan_test_shadow(ptr, bytes) == -1);
420*01826a49SYabin Cui #endif
421*01826a49SYabin Cui return ptr;
422*01826a49SYabin Cui }
423*01826a49SYabin Cui
424*01826a49SYabin Cui /**
425*01826a49SYabin Cui * Reserves and returns memory sized on and aligned on ZSTD_CWKSP_ALIGNMENT_BYTES (64 bytes).
426*01826a49SYabin Cui */
ZSTD_cwksp_reserve_aligned(ZSTD_cwksp * ws,size_t bytes)427*01826a49SYabin Cui MEM_STATIC void* ZSTD_cwksp_reserve_aligned(ZSTD_cwksp* ws, size_t bytes)
428*01826a49SYabin Cui {
429*01826a49SYabin Cui void* ptr = ZSTD_cwksp_reserve_internal(ws, ZSTD_cwksp_align(bytes, ZSTD_CWKSP_ALIGNMENT_BYTES),
430*01826a49SYabin Cui ZSTD_cwksp_alloc_aligned);
431*01826a49SYabin Cui assert(((size_t)ptr & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
432*01826a49SYabin Cui return ptr;
433*01826a49SYabin Cui }
434*01826a49SYabin Cui
435*01826a49SYabin Cui /**
436*01826a49SYabin Cui * Aligned on 64 bytes. These buffers have the special property that
437*01826a49SYabin Cui * their values remain constrained, allowing us to reuse them without
438*01826a49SYabin Cui * memset()-ing them.
439*01826a49SYabin Cui */
ZSTD_cwksp_reserve_table(ZSTD_cwksp * ws,size_t bytes)440*01826a49SYabin Cui MEM_STATIC void* ZSTD_cwksp_reserve_table(ZSTD_cwksp* ws, size_t bytes)
441*01826a49SYabin Cui {
442*01826a49SYabin Cui const ZSTD_cwksp_alloc_phase_e phase = ZSTD_cwksp_alloc_aligned_init_once;
443*01826a49SYabin Cui void* alloc;
444*01826a49SYabin Cui void* end;
445*01826a49SYabin Cui void* top;
446*01826a49SYabin Cui
447*01826a49SYabin Cui /* We can only start allocating tables after we are done reserving space for objects at the
448*01826a49SYabin Cui * start of the workspace */
449*01826a49SYabin Cui if(ws->phase < phase) {
450*01826a49SYabin Cui if (ZSTD_isError(ZSTD_cwksp_internal_advance_phase(ws, phase))) {
451*01826a49SYabin Cui return NULL;
452*01826a49SYabin Cui }
453*01826a49SYabin Cui }
454*01826a49SYabin Cui alloc = ws->tableEnd;
455*01826a49SYabin Cui end = (BYTE *)alloc + bytes;
456*01826a49SYabin Cui top = ws->allocStart;
457*01826a49SYabin Cui
458*01826a49SYabin Cui DEBUGLOG(5, "cwksp: reserving %p table %zd bytes, %zd bytes remaining",
459*01826a49SYabin Cui alloc, bytes, ZSTD_cwksp_available_space(ws) - bytes);
460*01826a49SYabin Cui assert((bytes & (sizeof(U32)-1)) == 0);
461*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
462*01826a49SYabin Cui assert(end <= top);
463*01826a49SYabin Cui if (end > top) {
464*01826a49SYabin Cui DEBUGLOG(4, "cwksp: table alloc failed!");
465*01826a49SYabin Cui ws->allocFailed = 1;
466*01826a49SYabin Cui return NULL;
467*01826a49SYabin Cui }
468*01826a49SYabin Cui ws->tableEnd = end;
469*01826a49SYabin Cui
470*01826a49SYabin Cui #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
471*01826a49SYabin Cui if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
472*01826a49SYabin Cui __asan_unpoison_memory_region(alloc, bytes);
473*01826a49SYabin Cui }
474*01826a49SYabin Cui #endif
475*01826a49SYabin Cui
476*01826a49SYabin Cui assert((bytes & (ZSTD_CWKSP_ALIGNMENT_BYTES-1)) == 0);
477*01826a49SYabin Cui assert(((size_t)alloc & (ZSTD_CWKSP_ALIGNMENT_BYTES-1))== 0);
478*01826a49SYabin Cui return alloc;
479*01826a49SYabin Cui }
480*01826a49SYabin Cui
481*01826a49SYabin Cui /**
482*01826a49SYabin Cui * Aligned on sizeof(void*).
483*01826a49SYabin Cui * Note : should happen only once, at workspace first initialization
484*01826a49SYabin Cui */
ZSTD_cwksp_reserve_object(ZSTD_cwksp * ws,size_t bytes)485*01826a49SYabin Cui MEM_STATIC void* ZSTD_cwksp_reserve_object(ZSTD_cwksp* ws, size_t bytes)
486*01826a49SYabin Cui {
487*01826a49SYabin Cui size_t const roundedBytes = ZSTD_cwksp_align(bytes, sizeof(void*));
488*01826a49SYabin Cui void* alloc = ws->objectEnd;
489*01826a49SYabin Cui void* end = (BYTE*)alloc + roundedBytes;
490*01826a49SYabin Cui
491*01826a49SYabin Cui #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
492*01826a49SYabin Cui /* over-reserve space */
493*01826a49SYabin Cui end = (BYTE *)end + 2 * ZSTD_CWKSP_ASAN_REDZONE_SIZE;
494*01826a49SYabin Cui #endif
495*01826a49SYabin Cui
496*01826a49SYabin Cui DEBUGLOG(4,
497*01826a49SYabin Cui "cwksp: reserving %p object %zd bytes (rounded to %zd), %zd bytes remaining",
498*01826a49SYabin Cui alloc, bytes, roundedBytes, ZSTD_cwksp_available_space(ws) - roundedBytes);
499*01826a49SYabin Cui assert((size_t)alloc % ZSTD_ALIGNOF(void*) == 0);
500*01826a49SYabin Cui assert(bytes % ZSTD_ALIGNOF(void*) == 0);
501*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
502*01826a49SYabin Cui /* we must be in the first phase, no advance is possible */
503*01826a49SYabin Cui if (ws->phase != ZSTD_cwksp_alloc_objects || end > ws->workspaceEnd) {
504*01826a49SYabin Cui DEBUGLOG(3, "cwksp: object alloc failed!");
505*01826a49SYabin Cui ws->allocFailed = 1;
506*01826a49SYabin Cui return NULL;
507*01826a49SYabin Cui }
508*01826a49SYabin Cui ws->objectEnd = end;
509*01826a49SYabin Cui ws->tableEnd = end;
510*01826a49SYabin Cui ws->tableValidEnd = end;
511*01826a49SYabin Cui
512*01826a49SYabin Cui #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
513*01826a49SYabin Cui /* Move alloc so there's ZSTD_CWKSP_ASAN_REDZONE_SIZE unused space on
514*01826a49SYabin Cui * either size. */
515*01826a49SYabin Cui alloc = (BYTE*)alloc + ZSTD_CWKSP_ASAN_REDZONE_SIZE;
516*01826a49SYabin Cui if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
517*01826a49SYabin Cui __asan_unpoison_memory_region(alloc, bytes);
518*01826a49SYabin Cui }
519*01826a49SYabin Cui #endif
520*01826a49SYabin Cui
521*01826a49SYabin Cui return alloc;
522*01826a49SYabin Cui }
523*01826a49SYabin Cui
ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp * ws)524*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_mark_tables_dirty(ZSTD_cwksp* ws)
525*01826a49SYabin Cui {
526*01826a49SYabin Cui DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_dirty");
527*01826a49SYabin Cui
528*01826a49SYabin Cui #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
529*01826a49SYabin Cui /* To validate that the table reuse logic is sound, and that we don't
530*01826a49SYabin Cui * access table space that we haven't cleaned, we re-"poison" the table
531*01826a49SYabin Cui * space every time we mark it dirty.
532*01826a49SYabin Cui * Since tableValidEnd space and initOnce space may overlap we don't poison
533*01826a49SYabin Cui * the initOnce portion as it break its promise. This means that this poisoning
534*01826a49SYabin Cui * check isn't always applied fully. */
535*01826a49SYabin Cui {
536*01826a49SYabin Cui size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
537*01826a49SYabin Cui assert(__msan_test_shadow(ws->objectEnd, size) == -1);
538*01826a49SYabin Cui if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
539*01826a49SYabin Cui __msan_poison(ws->objectEnd, size);
540*01826a49SYabin Cui } else {
541*01826a49SYabin Cui assert(ws->initOnceStart >= ws->objectEnd);
542*01826a49SYabin Cui __msan_poison(ws->objectEnd, (BYTE*)ws->initOnceStart - (BYTE*)ws->objectEnd);
543*01826a49SYabin Cui }
544*01826a49SYabin Cui }
545*01826a49SYabin Cui #endif
546*01826a49SYabin Cui
547*01826a49SYabin Cui assert(ws->tableValidEnd >= ws->objectEnd);
548*01826a49SYabin Cui assert(ws->tableValidEnd <= ws->allocStart);
549*01826a49SYabin Cui ws->tableValidEnd = ws->objectEnd;
550*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
551*01826a49SYabin Cui }
552*01826a49SYabin Cui
ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp * ws)553*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_mark_tables_clean(ZSTD_cwksp* ws) {
554*01826a49SYabin Cui DEBUGLOG(4, "cwksp: ZSTD_cwksp_mark_tables_clean");
555*01826a49SYabin Cui assert(ws->tableValidEnd >= ws->objectEnd);
556*01826a49SYabin Cui assert(ws->tableValidEnd <= ws->allocStart);
557*01826a49SYabin Cui if (ws->tableValidEnd < ws->tableEnd) {
558*01826a49SYabin Cui ws->tableValidEnd = ws->tableEnd;
559*01826a49SYabin Cui }
560*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
561*01826a49SYabin Cui }
562*01826a49SYabin Cui
563*01826a49SYabin Cui /**
564*01826a49SYabin Cui * Zero the part of the allocated tables not already marked clean.
565*01826a49SYabin Cui */
ZSTD_cwksp_clean_tables(ZSTD_cwksp * ws)566*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_clean_tables(ZSTD_cwksp* ws) {
567*01826a49SYabin Cui DEBUGLOG(4, "cwksp: ZSTD_cwksp_clean_tables");
568*01826a49SYabin Cui assert(ws->tableValidEnd >= ws->objectEnd);
569*01826a49SYabin Cui assert(ws->tableValidEnd <= ws->allocStart);
570*01826a49SYabin Cui if (ws->tableValidEnd < ws->tableEnd) {
571*01826a49SYabin Cui ZSTD_memset(ws->tableValidEnd, 0, (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->tableValidEnd));
572*01826a49SYabin Cui }
573*01826a49SYabin Cui ZSTD_cwksp_mark_tables_clean(ws);
574*01826a49SYabin Cui }
575*01826a49SYabin Cui
576*01826a49SYabin Cui /**
577*01826a49SYabin Cui * Invalidates table allocations.
578*01826a49SYabin Cui * All other allocations remain valid.
579*01826a49SYabin Cui */
ZSTD_cwksp_clear_tables(ZSTD_cwksp * ws)580*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_clear_tables(ZSTD_cwksp* ws) {
581*01826a49SYabin Cui DEBUGLOG(4, "cwksp: clearing tables!");
582*01826a49SYabin Cui
583*01826a49SYabin Cui #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
584*01826a49SYabin Cui /* We don't do this when the workspace is statically allocated, because
585*01826a49SYabin Cui * when that is the case, we have no capability to hook into the end of the
586*01826a49SYabin Cui * workspace's lifecycle to unpoison the memory.
587*01826a49SYabin Cui */
588*01826a49SYabin Cui if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
589*01826a49SYabin Cui size_t size = (BYTE*)ws->tableValidEnd - (BYTE*)ws->objectEnd;
590*01826a49SYabin Cui __asan_poison_memory_region(ws->objectEnd, size);
591*01826a49SYabin Cui }
592*01826a49SYabin Cui #endif
593*01826a49SYabin Cui
594*01826a49SYabin Cui ws->tableEnd = ws->objectEnd;
595*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
596*01826a49SYabin Cui }
597*01826a49SYabin Cui
598*01826a49SYabin Cui /**
599*01826a49SYabin Cui * Invalidates all buffer, aligned, and table allocations.
600*01826a49SYabin Cui * Object allocations remain valid.
601*01826a49SYabin Cui */
ZSTD_cwksp_clear(ZSTD_cwksp * ws)602*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_clear(ZSTD_cwksp* ws) {
603*01826a49SYabin Cui DEBUGLOG(4, "cwksp: clearing!");
604*01826a49SYabin Cui
605*01826a49SYabin Cui #if ZSTD_MEMORY_SANITIZER && !defined (ZSTD_MSAN_DONT_POISON_WORKSPACE)
606*01826a49SYabin Cui /* To validate that the context reuse logic is sound, and that we don't
607*01826a49SYabin Cui * access stuff that this compression hasn't initialized, we re-"poison"
608*01826a49SYabin Cui * the workspace except for the areas in which we expect memory reuse
609*01826a49SYabin Cui * without initialization (objects, valid tables area and init once
610*01826a49SYabin Cui * memory). */
611*01826a49SYabin Cui {
612*01826a49SYabin Cui if((BYTE*)ws->tableValidEnd < (BYTE*)ws->initOnceStart) {
613*01826a49SYabin Cui size_t size = (BYTE*)ws->initOnceStart - (BYTE*)ws->tableValidEnd;
614*01826a49SYabin Cui __msan_poison(ws->tableValidEnd, size);
615*01826a49SYabin Cui }
616*01826a49SYabin Cui }
617*01826a49SYabin Cui #endif
618*01826a49SYabin Cui
619*01826a49SYabin Cui #if ZSTD_ADDRESS_SANITIZER && !defined (ZSTD_ASAN_DONT_POISON_WORKSPACE)
620*01826a49SYabin Cui /* We don't do this when the workspace is statically allocated, because
621*01826a49SYabin Cui * when that is the case, we have no capability to hook into the end of the
622*01826a49SYabin Cui * workspace's lifecycle to unpoison the memory.
623*01826a49SYabin Cui */
624*01826a49SYabin Cui if (ws->isStatic == ZSTD_cwksp_dynamic_alloc) {
625*01826a49SYabin Cui size_t size = (BYTE*)ws->workspaceEnd - (BYTE*)ws->objectEnd;
626*01826a49SYabin Cui __asan_poison_memory_region(ws->objectEnd, size);
627*01826a49SYabin Cui }
628*01826a49SYabin Cui #endif
629*01826a49SYabin Cui
630*01826a49SYabin Cui ws->tableEnd = ws->objectEnd;
631*01826a49SYabin Cui ws->allocStart = ZSTD_cwksp_initialAllocStart(ws);
632*01826a49SYabin Cui ws->allocFailed = 0;
633*01826a49SYabin Cui if (ws->phase > ZSTD_cwksp_alloc_aligned_init_once) {
634*01826a49SYabin Cui ws->phase = ZSTD_cwksp_alloc_aligned_init_once;
635*01826a49SYabin Cui }
636*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
637*01826a49SYabin Cui }
638*01826a49SYabin Cui
ZSTD_cwksp_sizeof(const ZSTD_cwksp * ws)639*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_sizeof(const ZSTD_cwksp* ws) {
640*01826a49SYabin Cui return (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->workspace);
641*01826a49SYabin Cui }
642*01826a49SYabin Cui
ZSTD_cwksp_used(const ZSTD_cwksp * ws)643*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_used(const ZSTD_cwksp* ws) {
644*01826a49SYabin Cui return (size_t)((BYTE*)ws->tableEnd - (BYTE*)ws->workspace)
645*01826a49SYabin Cui + (size_t)((BYTE*)ws->workspaceEnd - (BYTE*)ws->allocStart);
646*01826a49SYabin Cui }
647*01826a49SYabin Cui
648*01826a49SYabin Cui /**
649*01826a49SYabin Cui * The provided workspace takes ownership of the buffer [start, start+size).
650*01826a49SYabin Cui * Any existing values in the workspace are ignored (the previously managed
651*01826a49SYabin Cui * buffer, if present, must be separately freed).
652*01826a49SYabin Cui */
ZSTD_cwksp_init(ZSTD_cwksp * ws,void * start,size_t size,ZSTD_cwksp_static_alloc_e isStatic)653*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_init(ZSTD_cwksp* ws, void* start, size_t size, ZSTD_cwksp_static_alloc_e isStatic) {
654*01826a49SYabin Cui DEBUGLOG(4, "cwksp: init'ing workspace with %zd bytes", size);
655*01826a49SYabin Cui assert(((size_t)start & (sizeof(void*)-1)) == 0); /* ensure correct alignment */
656*01826a49SYabin Cui ws->workspace = start;
657*01826a49SYabin Cui ws->workspaceEnd = (BYTE*)start + size;
658*01826a49SYabin Cui ws->objectEnd = ws->workspace;
659*01826a49SYabin Cui ws->tableValidEnd = ws->objectEnd;
660*01826a49SYabin Cui ws->initOnceStart = ZSTD_cwksp_initialAllocStart(ws);
661*01826a49SYabin Cui ws->phase = ZSTD_cwksp_alloc_objects;
662*01826a49SYabin Cui ws->isStatic = isStatic;
663*01826a49SYabin Cui ZSTD_cwksp_clear(ws);
664*01826a49SYabin Cui ws->workspaceOversizedDuration = 0;
665*01826a49SYabin Cui ZSTD_cwksp_assert_internal_consistency(ws);
666*01826a49SYabin Cui }
667*01826a49SYabin Cui
ZSTD_cwksp_create(ZSTD_cwksp * ws,size_t size,ZSTD_customMem customMem)668*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_create(ZSTD_cwksp* ws, size_t size, ZSTD_customMem customMem) {
669*01826a49SYabin Cui void* workspace = ZSTD_customMalloc(size, customMem);
670*01826a49SYabin Cui DEBUGLOG(4, "cwksp: creating new workspace with %zd bytes", size);
671*01826a49SYabin Cui RETURN_ERROR_IF(workspace == NULL, memory_allocation, "NULL pointer!");
672*01826a49SYabin Cui ZSTD_cwksp_init(ws, workspace, size, ZSTD_cwksp_dynamic_alloc);
673*01826a49SYabin Cui return 0;
674*01826a49SYabin Cui }
675*01826a49SYabin Cui
ZSTD_cwksp_free(ZSTD_cwksp * ws,ZSTD_customMem customMem)676*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_free(ZSTD_cwksp* ws, ZSTD_customMem customMem) {
677*01826a49SYabin Cui void *ptr = ws->workspace;
678*01826a49SYabin Cui DEBUGLOG(4, "cwksp: freeing workspace");
679*01826a49SYabin Cui #if ZSTD_MEMORY_SANITIZER && !defined(ZSTD_MSAN_DONT_POISON_WORKSPACE)
680*01826a49SYabin Cui if (ptr != NULL && customMem.customFree != NULL) {
681*01826a49SYabin Cui __msan_unpoison(ptr, ZSTD_cwksp_sizeof(ws));
682*01826a49SYabin Cui }
683*01826a49SYabin Cui #endif
684*01826a49SYabin Cui ZSTD_memset(ws, 0, sizeof(ZSTD_cwksp));
685*01826a49SYabin Cui ZSTD_customFree(ptr, customMem);
686*01826a49SYabin Cui }
687*01826a49SYabin Cui
688*01826a49SYabin Cui /**
689*01826a49SYabin Cui * Moves the management of a workspace from one cwksp to another. The src cwksp
690*01826a49SYabin Cui * is left in an invalid state (src must be re-init()'ed before it's used again).
691*01826a49SYabin Cui */
ZSTD_cwksp_move(ZSTD_cwksp * dst,ZSTD_cwksp * src)692*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_move(ZSTD_cwksp* dst, ZSTD_cwksp* src) {
693*01826a49SYabin Cui *dst = *src;
694*01826a49SYabin Cui ZSTD_memset(src, 0, sizeof(ZSTD_cwksp));
695*01826a49SYabin Cui }
696*01826a49SYabin Cui
ZSTD_cwksp_reserve_failed(const ZSTD_cwksp * ws)697*01826a49SYabin Cui MEM_STATIC int ZSTD_cwksp_reserve_failed(const ZSTD_cwksp* ws) {
698*01826a49SYabin Cui return ws->allocFailed;
699*01826a49SYabin Cui }
700*01826a49SYabin Cui
701*01826a49SYabin Cui /*-*************************************
702*01826a49SYabin Cui * Functions Checking Free Space
703*01826a49SYabin Cui ***************************************/
704*01826a49SYabin Cui
705*01826a49SYabin Cui /* ZSTD_alignmentSpaceWithinBounds() :
706*01826a49SYabin Cui * Returns if the estimated space needed for a wksp is within an acceptable limit of the
707*01826a49SYabin Cui * actual amount of space used.
708*01826a49SYabin Cui */
ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp * const ws,size_t const estimatedSpace)709*01826a49SYabin Cui MEM_STATIC int ZSTD_cwksp_estimated_space_within_bounds(const ZSTD_cwksp *const ws, size_t const estimatedSpace) {
710*01826a49SYabin Cui /* We have an alignment space between objects and tables between tables and buffers, so we can have up to twice
711*01826a49SYabin Cui * the alignment bytes difference between estimation and actual usage */
712*01826a49SYabin Cui return (estimatedSpace - ZSTD_cwksp_slack_space_required()) <= ZSTD_cwksp_used(ws) &&
713*01826a49SYabin Cui ZSTD_cwksp_used(ws) <= estimatedSpace;
714*01826a49SYabin Cui }
715*01826a49SYabin Cui
716*01826a49SYabin Cui
ZSTD_cwksp_available_space(ZSTD_cwksp * ws)717*01826a49SYabin Cui MEM_STATIC size_t ZSTD_cwksp_available_space(ZSTD_cwksp* ws) {
718*01826a49SYabin Cui return (size_t)((BYTE*)ws->allocStart - (BYTE*)ws->tableEnd);
719*01826a49SYabin Cui }
720*01826a49SYabin Cui
ZSTD_cwksp_check_available(ZSTD_cwksp * ws,size_t additionalNeededSpace)721*01826a49SYabin Cui MEM_STATIC int ZSTD_cwksp_check_available(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
722*01826a49SYabin Cui return ZSTD_cwksp_available_space(ws) >= additionalNeededSpace;
723*01826a49SYabin Cui }
724*01826a49SYabin Cui
ZSTD_cwksp_check_too_large(ZSTD_cwksp * ws,size_t additionalNeededSpace)725*01826a49SYabin Cui MEM_STATIC int ZSTD_cwksp_check_too_large(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
726*01826a49SYabin Cui return ZSTD_cwksp_check_available(
727*01826a49SYabin Cui ws, additionalNeededSpace * ZSTD_WORKSPACETOOLARGE_FACTOR);
728*01826a49SYabin Cui }
729*01826a49SYabin Cui
ZSTD_cwksp_check_wasteful(ZSTD_cwksp * ws,size_t additionalNeededSpace)730*01826a49SYabin Cui MEM_STATIC int ZSTD_cwksp_check_wasteful(ZSTD_cwksp* ws, size_t additionalNeededSpace) {
731*01826a49SYabin Cui return ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)
732*01826a49SYabin Cui && ws->workspaceOversizedDuration > ZSTD_WORKSPACETOOLARGE_MAXDURATION;
733*01826a49SYabin Cui }
734*01826a49SYabin Cui
ZSTD_cwksp_bump_oversized_duration(ZSTD_cwksp * ws,size_t additionalNeededSpace)735*01826a49SYabin Cui MEM_STATIC void ZSTD_cwksp_bump_oversized_duration(
736*01826a49SYabin Cui ZSTD_cwksp* ws, size_t additionalNeededSpace) {
737*01826a49SYabin Cui if (ZSTD_cwksp_check_too_large(ws, additionalNeededSpace)) {
738*01826a49SYabin Cui ws->workspaceOversizedDuration++;
739*01826a49SYabin Cui } else {
740*01826a49SYabin Cui ws->workspaceOversizedDuration = 0;
741*01826a49SYabin Cui }
742*01826a49SYabin Cui }
743*01826a49SYabin Cui
744*01826a49SYabin Cui #if defined (__cplusplus)
745*01826a49SYabin Cui }
746*01826a49SYabin Cui #endif
747*01826a49SYabin Cui
748*01826a49SYabin Cui #endif /* ZSTD_CWKSP_H */
749