xref: /aosp_15_r20/external/zstd/lib/dictBuilder/cover.c (revision 01826a4963a0d8a59bc3812d29bdf0fb76416722)
1*01826a49SYabin Cui /*
2*01826a49SYabin Cui  * Copyright (c) Meta Platforms, Inc. and affiliates.
3*01826a49SYabin Cui  * All rights reserved.
4*01826a49SYabin Cui  *
5*01826a49SYabin Cui  * This source code is licensed under both the BSD-style license (found in the
6*01826a49SYabin Cui  * LICENSE file in the root directory of this source tree) and the GPLv2 (found
7*01826a49SYabin Cui  * in the COPYING file in the root directory of this source tree).
8*01826a49SYabin Cui  * You may select, at your option, one of the above-listed licenses.
9*01826a49SYabin Cui  */
10*01826a49SYabin Cui 
11*01826a49SYabin Cui /* *****************************************************************************
12*01826a49SYabin Cui  * Constructs a dictionary using a heuristic based on the following paper:
13*01826a49SYabin Cui  *
14*01826a49SYabin Cui  * Liao, Petri, Moffat, Wirth
15*01826a49SYabin Cui  * Effective Construction of Relative Lempel-Ziv Dictionaries
16*01826a49SYabin Cui  * Published in WWW 2016.
17*01826a49SYabin Cui  *
18*01826a49SYabin Cui  * Adapted from code originally written by @ot (Giuseppe Ottaviano).
19*01826a49SYabin Cui  ******************************************************************************/
20*01826a49SYabin Cui 
21*01826a49SYabin Cui /*-*************************************
22*01826a49SYabin Cui *  Dependencies
23*01826a49SYabin Cui ***************************************/
24*01826a49SYabin Cui #include <stdio.h>  /* fprintf */
25*01826a49SYabin Cui #include <stdlib.h> /* malloc, free, qsort */
26*01826a49SYabin Cui #include <string.h> /* memset */
27*01826a49SYabin Cui #include <time.h>   /* clock */
28*01826a49SYabin Cui 
29*01826a49SYabin Cui #ifndef ZDICT_STATIC_LINKING_ONLY
30*01826a49SYabin Cui #  define ZDICT_STATIC_LINKING_ONLY
31*01826a49SYabin Cui #endif
32*01826a49SYabin Cui 
33*01826a49SYabin Cui #include "../common/mem.h" /* read */
34*01826a49SYabin Cui #include "../common/pool.h" /* POOL_ctx */
35*01826a49SYabin Cui #include "../common/threading.h" /* ZSTD_pthread_mutex_t */
36*01826a49SYabin Cui #include "../common/zstd_internal.h" /* includes zstd.h */
37*01826a49SYabin Cui #include "../common/bits.h" /* ZSTD_highbit32 */
38*01826a49SYabin Cui #include "../zdict.h"
39*01826a49SYabin Cui #include "cover.h"
40*01826a49SYabin Cui 
41*01826a49SYabin Cui /*-*************************************
42*01826a49SYabin Cui *  Constants
43*01826a49SYabin Cui ***************************************/
44*01826a49SYabin Cui /**
45*01826a49SYabin Cui * There are 32bit indexes used to ref samples, so limit samples size to 4GB
46*01826a49SYabin Cui * on 64bit builds.
47*01826a49SYabin Cui * For 32bit builds we choose 1 GB.
48*01826a49SYabin Cui * Most 32bit platforms have 2GB user-mode addressable space and we allocate a large
49*01826a49SYabin Cui * contiguous buffer, so 1GB is already a high limit.
50*01826a49SYabin Cui */
51*01826a49SYabin Cui #define COVER_MAX_SAMPLES_SIZE (sizeof(size_t) == 8 ? ((unsigned)-1) : ((unsigned)1 GB))
52*01826a49SYabin Cui #define COVER_DEFAULT_SPLITPOINT 1.0
53*01826a49SYabin Cui 
54*01826a49SYabin Cui /*-*************************************
55*01826a49SYabin Cui *  Console display
56*01826a49SYabin Cui ***************************************/
57*01826a49SYabin Cui #ifndef LOCALDISPLAYLEVEL
58*01826a49SYabin Cui static int g_displayLevel = 0;
59*01826a49SYabin Cui #endif
60*01826a49SYabin Cui #undef  DISPLAY
61*01826a49SYabin Cui #define DISPLAY(...)                                                           \
62*01826a49SYabin Cui   {                                                                            \
63*01826a49SYabin Cui     fprintf(stderr, __VA_ARGS__);                                              \
64*01826a49SYabin Cui     fflush(stderr);                                                            \
65*01826a49SYabin Cui   }
66*01826a49SYabin Cui #undef  LOCALDISPLAYLEVEL
67*01826a49SYabin Cui #define LOCALDISPLAYLEVEL(displayLevel, l, ...)                                \
68*01826a49SYabin Cui   if (displayLevel >= l) {                                                     \
69*01826a49SYabin Cui     DISPLAY(__VA_ARGS__);                                                      \
70*01826a49SYabin Cui   } /* 0 : no display;   1: errors;   2: default;  3: details;  4: debug */
71*01826a49SYabin Cui #undef  DISPLAYLEVEL
72*01826a49SYabin Cui #define DISPLAYLEVEL(l, ...) LOCALDISPLAYLEVEL(g_displayLevel, l, __VA_ARGS__)
73*01826a49SYabin Cui 
74*01826a49SYabin Cui #ifndef LOCALDISPLAYUPDATE
75*01826a49SYabin Cui static const clock_t g_refreshRate = CLOCKS_PER_SEC * 15 / 100;
76*01826a49SYabin Cui static clock_t g_time = 0;
77*01826a49SYabin Cui #endif
78*01826a49SYabin Cui #undef  LOCALDISPLAYUPDATE
79*01826a49SYabin Cui #define LOCALDISPLAYUPDATE(displayLevel, l, ...)                               \
80*01826a49SYabin Cui   if (displayLevel >= l) {                                                     \
81*01826a49SYabin Cui     if ((clock() - g_time > g_refreshRate) || (displayLevel >= 4)) {           \
82*01826a49SYabin Cui       g_time = clock();                                                        \
83*01826a49SYabin Cui       DISPLAY(__VA_ARGS__);                                                    \
84*01826a49SYabin Cui     }                                                                          \
85*01826a49SYabin Cui   }
86*01826a49SYabin Cui #undef  DISPLAYUPDATE
87*01826a49SYabin Cui #define DISPLAYUPDATE(l, ...) LOCALDISPLAYUPDATE(g_displayLevel, l, __VA_ARGS__)
88*01826a49SYabin Cui 
89*01826a49SYabin Cui /*-*************************************
90*01826a49SYabin Cui * Hash table
91*01826a49SYabin Cui ***************************************
92*01826a49SYabin Cui * A small specialized hash map for storing activeDmers.
93*01826a49SYabin Cui * The map does not resize, so if it becomes full it will loop forever.
94*01826a49SYabin Cui * Thus, the map must be large enough to store every value.
95*01826a49SYabin Cui * The map implements linear probing and keeps its load less than 0.5.
96*01826a49SYabin Cui */
97*01826a49SYabin Cui 
98*01826a49SYabin Cui #define MAP_EMPTY_VALUE ((U32)-1)
99*01826a49SYabin Cui typedef struct COVER_map_pair_t_s {
100*01826a49SYabin Cui   U32 key;
101*01826a49SYabin Cui   U32 value;
102*01826a49SYabin Cui } COVER_map_pair_t;
103*01826a49SYabin Cui 
104*01826a49SYabin Cui typedef struct COVER_map_s {
105*01826a49SYabin Cui   COVER_map_pair_t *data;
106*01826a49SYabin Cui   U32 sizeLog;
107*01826a49SYabin Cui   U32 size;
108*01826a49SYabin Cui   U32 sizeMask;
109*01826a49SYabin Cui } COVER_map_t;
110*01826a49SYabin Cui 
111*01826a49SYabin Cui /**
112*01826a49SYabin Cui  * Clear the map.
113*01826a49SYabin Cui  */
COVER_map_clear(COVER_map_t * map)114*01826a49SYabin Cui static void COVER_map_clear(COVER_map_t *map) {
115*01826a49SYabin Cui   memset(map->data, MAP_EMPTY_VALUE, map->size * sizeof(COVER_map_pair_t));
116*01826a49SYabin Cui }
117*01826a49SYabin Cui 
118*01826a49SYabin Cui /**
119*01826a49SYabin Cui  * Initializes a map of the given size.
120*01826a49SYabin Cui  * Returns 1 on success and 0 on failure.
121*01826a49SYabin Cui  * The map must be destroyed with COVER_map_destroy().
122*01826a49SYabin Cui  * The map is only guaranteed to be large enough to hold size elements.
123*01826a49SYabin Cui  */
COVER_map_init(COVER_map_t * map,U32 size)124*01826a49SYabin Cui static int COVER_map_init(COVER_map_t *map, U32 size) {
125*01826a49SYabin Cui   map->sizeLog = ZSTD_highbit32(size) + 2;
126*01826a49SYabin Cui   map->size = (U32)1 << map->sizeLog;
127*01826a49SYabin Cui   map->sizeMask = map->size - 1;
128*01826a49SYabin Cui   map->data = (COVER_map_pair_t *)malloc(map->size * sizeof(COVER_map_pair_t));
129*01826a49SYabin Cui   if (!map->data) {
130*01826a49SYabin Cui     map->sizeLog = 0;
131*01826a49SYabin Cui     map->size = 0;
132*01826a49SYabin Cui     return 0;
133*01826a49SYabin Cui   }
134*01826a49SYabin Cui   COVER_map_clear(map);
135*01826a49SYabin Cui   return 1;
136*01826a49SYabin Cui }
137*01826a49SYabin Cui 
138*01826a49SYabin Cui /**
139*01826a49SYabin Cui  * Internal hash function
140*01826a49SYabin Cui  */
141*01826a49SYabin Cui static const U32 COVER_prime4bytes = 2654435761U;
COVER_map_hash(COVER_map_t * map,U32 key)142*01826a49SYabin Cui static U32 COVER_map_hash(COVER_map_t *map, U32 key) {
143*01826a49SYabin Cui   return (key * COVER_prime4bytes) >> (32 - map->sizeLog);
144*01826a49SYabin Cui }
145*01826a49SYabin Cui 
146*01826a49SYabin Cui /**
147*01826a49SYabin Cui  * Helper function that returns the index that a key should be placed into.
148*01826a49SYabin Cui  */
COVER_map_index(COVER_map_t * map,U32 key)149*01826a49SYabin Cui static U32 COVER_map_index(COVER_map_t *map, U32 key) {
150*01826a49SYabin Cui   const U32 hash = COVER_map_hash(map, key);
151*01826a49SYabin Cui   U32 i;
152*01826a49SYabin Cui   for (i = hash;; i = (i + 1) & map->sizeMask) {
153*01826a49SYabin Cui     COVER_map_pair_t *pos = &map->data[i];
154*01826a49SYabin Cui     if (pos->value == MAP_EMPTY_VALUE) {
155*01826a49SYabin Cui       return i;
156*01826a49SYabin Cui     }
157*01826a49SYabin Cui     if (pos->key == key) {
158*01826a49SYabin Cui       return i;
159*01826a49SYabin Cui     }
160*01826a49SYabin Cui   }
161*01826a49SYabin Cui }
162*01826a49SYabin Cui 
163*01826a49SYabin Cui /**
164*01826a49SYabin Cui  * Returns the pointer to the value for key.
165*01826a49SYabin Cui  * If key is not in the map, it is inserted and the value is set to 0.
166*01826a49SYabin Cui  * The map must not be full.
167*01826a49SYabin Cui  */
COVER_map_at(COVER_map_t * map,U32 key)168*01826a49SYabin Cui static U32 *COVER_map_at(COVER_map_t *map, U32 key) {
169*01826a49SYabin Cui   COVER_map_pair_t *pos = &map->data[COVER_map_index(map, key)];
170*01826a49SYabin Cui   if (pos->value == MAP_EMPTY_VALUE) {
171*01826a49SYabin Cui     pos->key = key;
172*01826a49SYabin Cui     pos->value = 0;
173*01826a49SYabin Cui   }
174*01826a49SYabin Cui   return &pos->value;
175*01826a49SYabin Cui }
176*01826a49SYabin Cui 
177*01826a49SYabin Cui /**
178*01826a49SYabin Cui  * Deletes key from the map if present.
179*01826a49SYabin Cui  */
COVER_map_remove(COVER_map_t * map,U32 key)180*01826a49SYabin Cui static void COVER_map_remove(COVER_map_t *map, U32 key) {
181*01826a49SYabin Cui   U32 i = COVER_map_index(map, key);
182*01826a49SYabin Cui   COVER_map_pair_t *del = &map->data[i];
183*01826a49SYabin Cui   U32 shift = 1;
184*01826a49SYabin Cui   if (del->value == MAP_EMPTY_VALUE) {
185*01826a49SYabin Cui     return;
186*01826a49SYabin Cui   }
187*01826a49SYabin Cui   for (i = (i + 1) & map->sizeMask;; i = (i + 1) & map->sizeMask) {
188*01826a49SYabin Cui     COVER_map_pair_t *const pos = &map->data[i];
189*01826a49SYabin Cui     /* If the position is empty we are done */
190*01826a49SYabin Cui     if (pos->value == MAP_EMPTY_VALUE) {
191*01826a49SYabin Cui       del->value = MAP_EMPTY_VALUE;
192*01826a49SYabin Cui       return;
193*01826a49SYabin Cui     }
194*01826a49SYabin Cui     /* If pos can be moved to del do so */
195*01826a49SYabin Cui     if (((i - COVER_map_hash(map, pos->key)) & map->sizeMask) >= shift) {
196*01826a49SYabin Cui       del->key = pos->key;
197*01826a49SYabin Cui       del->value = pos->value;
198*01826a49SYabin Cui       del = pos;
199*01826a49SYabin Cui       shift = 1;
200*01826a49SYabin Cui     } else {
201*01826a49SYabin Cui       ++shift;
202*01826a49SYabin Cui     }
203*01826a49SYabin Cui   }
204*01826a49SYabin Cui }
205*01826a49SYabin Cui 
206*01826a49SYabin Cui /**
207*01826a49SYabin Cui  * Destroys a map that is inited with COVER_map_init().
208*01826a49SYabin Cui  */
COVER_map_destroy(COVER_map_t * map)209*01826a49SYabin Cui static void COVER_map_destroy(COVER_map_t *map) {
210*01826a49SYabin Cui   if (map->data) {
211*01826a49SYabin Cui     free(map->data);
212*01826a49SYabin Cui   }
213*01826a49SYabin Cui   map->data = NULL;
214*01826a49SYabin Cui   map->size = 0;
215*01826a49SYabin Cui }
216*01826a49SYabin Cui 
217*01826a49SYabin Cui /*-*************************************
218*01826a49SYabin Cui * Context
219*01826a49SYabin Cui ***************************************/
220*01826a49SYabin Cui 
221*01826a49SYabin Cui typedef struct {
222*01826a49SYabin Cui   const BYTE *samples;
223*01826a49SYabin Cui   size_t *offsets;
224*01826a49SYabin Cui   const size_t *samplesSizes;
225*01826a49SYabin Cui   size_t nbSamples;
226*01826a49SYabin Cui   size_t nbTrainSamples;
227*01826a49SYabin Cui   size_t nbTestSamples;
228*01826a49SYabin Cui   U32 *suffix;
229*01826a49SYabin Cui   size_t suffixSize;
230*01826a49SYabin Cui   U32 *freqs;
231*01826a49SYabin Cui   U32 *dmerAt;
232*01826a49SYabin Cui   unsigned d;
233*01826a49SYabin Cui } COVER_ctx_t;
234*01826a49SYabin Cui 
235*01826a49SYabin Cui /* We need a global context for qsort... */
236*01826a49SYabin Cui static COVER_ctx_t *g_coverCtx = NULL;
237*01826a49SYabin Cui 
238*01826a49SYabin Cui /*-*************************************
239*01826a49SYabin Cui *  Helper functions
240*01826a49SYabin Cui ***************************************/
241*01826a49SYabin Cui 
242*01826a49SYabin Cui /**
243*01826a49SYabin Cui  * Returns the sum of the sample sizes.
244*01826a49SYabin Cui  */
COVER_sum(const size_t * samplesSizes,unsigned nbSamples)245*01826a49SYabin Cui size_t COVER_sum(const size_t *samplesSizes, unsigned nbSamples) {
246*01826a49SYabin Cui   size_t sum = 0;
247*01826a49SYabin Cui   unsigned i;
248*01826a49SYabin Cui   for (i = 0; i < nbSamples; ++i) {
249*01826a49SYabin Cui     sum += samplesSizes[i];
250*01826a49SYabin Cui   }
251*01826a49SYabin Cui   return sum;
252*01826a49SYabin Cui }
253*01826a49SYabin Cui 
254*01826a49SYabin Cui /**
255*01826a49SYabin Cui  * Returns -1 if the dmer at lp is less than the dmer at rp.
256*01826a49SYabin Cui  * Return 0 if the dmers at lp and rp are equal.
257*01826a49SYabin Cui  * Returns 1 if the dmer at lp is greater than the dmer at rp.
258*01826a49SYabin Cui  */
COVER_cmp(COVER_ctx_t * ctx,const void * lp,const void * rp)259*01826a49SYabin Cui static int COVER_cmp(COVER_ctx_t *ctx, const void *lp, const void *rp) {
260*01826a49SYabin Cui   U32 const lhs = *(U32 const *)lp;
261*01826a49SYabin Cui   U32 const rhs = *(U32 const *)rp;
262*01826a49SYabin Cui   return memcmp(ctx->samples + lhs, ctx->samples + rhs, ctx->d);
263*01826a49SYabin Cui }
264*01826a49SYabin Cui /**
265*01826a49SYabin Cui  * Faster version for d <= 8.
266*01826a49SYabin Cui  */
COVER_cmp8(COVER_ctx_t * ctx,const void * lp,const void * rp)267*01826a49SYabin Cui static int COVER_cmp8(COVER_ctx_t *ctx, const void *lp, const void *rp) {
268*01826a49SYabin Cui   U64 const mask = (ctx->d == 8) ? (U64)-1 : (((U64)1 << (8 * ctx->d)) - 1);
269*01826a49SYabin Cui   U64 const lhs = MEM_readLE64(ctx->samples + *(U32 const *)lp) & mask;
270*01826a49SYabin Cui   U64 const rhs = MEM_readLE64(ctx->samples + *(U32 const *)rp) & mask;
271*01826a49SYabin Cui   if (lhs < rhs) {
272*01826a49SYabin Cui     return -1;
273*01826a49SYabin Cui   }
274*01826a49SYabin Cui   return (lhs > rhs);
275*01826a49SYabin Cui }
276*01826a49SYabin Cui 
277*01826a49SYabin Cui /**
278*01826a49SYabin Cui  * Same as COVER_cmp() except ties are broken by pointer value
279*01826a49SYabin Cui  * NOTE: g_coverCtx must be set to call this function.  A global is required because
280*01826a49SYabin Cui  * qsort doesn't take an opaque pointer.
281*01826a49SYabin Cui  */
COVER_strict_cmp(const void * lp,const void * rp)282*01826a49SYabin Cui static int WIN_CDECL COVER_strict_cmp(const void *lp, const void *rp) {
283*01826a49SYabin Cui   int result = COVER_cmp(g_coverCtx, lp, rp);
284*01826a49SYabin Cui   if (result == 0) {
285*01826a49SYabin Cui     result = lp < rp ? -1 : 1;
286*01826a49SYabin Cui   }
287*01826a49SYabin Cui   return result;
288*01826a49SYabin Cui }
289*01826a49SYabin Cui /**
290*01826a49SYabin Cui  * Faster version for d <= 8.
291*01826a49SYabin Cui  */
COVER_strict_cmp8(const void * lp,const void * rp)292*01826a49SYabin Cui static int WIN_CDECL COVER_strict_cmp8(const void *lp, const void *rp) {
293*01826a49SYabin Cui   int result = COVER_cmp8(g_coverCtx, lp, rp);
294*01826a49SYabin Cui   if (result == 0) {
295*01826a49SYabin Cui     result = lp < rp ? -1 : 1;
296*01826a49SYabin Cui   }
297*01826a49SYabin Cui   return result;
298*01826a49SYabin Cui }
299*01826a49SYabin Cui 
300*01826a49SYabin Cui /**
301*01826a49SYabin Cui  * Returns the first pointer in [first, last) whose element does not compare
302*01826a49SYabin Cui  * less than value.  If no such element exists it returns last.
303*01826a49SYabin Cui  */
COVER_lower_bound(const size_t * first,const size_t * last,size_t value)304*01826a49SYabin Cui static const size_t *COVER_lower_bound(const size_t* first, const size_t* last,
305*01826a49SYabin Cui                                        size_t value) {
306*01826a49SYabin Cui   size_t count = (size_t)(last - first);
307*01826a49SYabin Cui   assert(last >= first);
308*01826a49SYabin Cui   while (count != 0) {
309*01826a49SYabin Cui     size_t step = count / 2;
310*01826a49SYabin Cui     const size_t *ptr = first;
311*01826a49SYabin Cui     ptr += step;
312*01826a49SYabin Cui     if (*ptr < value) {
313*01826a49SYabin Cui       first = ++ptr;
314*01826a49SYabin Cui       count -= step + 1;
315*01826a49SYabin Cui     } else {
316*01826a49SYabin Cui       count = step;
317*01826a49SYabin Cui     }
318*01826a49SYabin Cui   }
319*01826a49SYabin Cui   return first;
320*01826a49SYabin Cui }
321*01826a49SYabin Cui 
322*01826a49SYabin Cui /**
323*01826a49SYabin Cui  * Generic groupBy function.
324*01826a49SYabin Cui  * Groups an array sorted by cmp into groups with equivalent values.
325*01826a49SYabin Cui  * Calls grp for each group.
326*01826a49SYabin Cui  */
327*01826a49SYabin Cui static void
COVER_groupBy(const void * data,size_t count,size_t size,COVER_ctx_t * ctx,int (* cmp)(COVER_ctx_t *,const void *,const void *),void (* grp)(COVER_ctx_t *,const void *,const void *))328*01826a49SYabin Cui COVER_groupBy(const void *data, size_t count, size_t size, COVER_ctx_t *ctx,
329*01826a49SYabin Cui               int (*cmp)(COVER_ctx_t *, const void *, const void *),
330*01826a49SYabin Cui               void (*grp)(COVER_ctx_t *, const void *, const void *)) {
331*01826a49SYabin Cui   const BYTE *ptr = (const BYTE *)data;
332*01826a49SYabin Cui   size_t num = 0;
333*01826a49SYabin Cui   while (num < count) {
334*01826a49SYabin Cui     const BYTE *grpEnd = ptr + size;
335*01826a49SYabin Cui     ++num;
336*01826a49SYabin Cui     while (num < count && cmp(ctx, ptr, grpEnd) == 0) {
337*01826a49SYabin Cui       grpEnd += size;
338*01826a49SYabin Cui       ++num;
339*01826a49SYabin Cui     }
340*01826a49SYabin Cui     grp(ctx, ptr, grpEnd);
341*01826a49SYabin Cui     ptr = grpEnd;
342*01826a49SYabin Cui   }
343*01826a49SYabin Cui }
344*01826a49SYabin Cui 
345*01826a49SYabin Cui /*-*************************************
346*01826a49SYabin Cui *  Cover functions
347*01826a49SYabin Cui ***************************************/
348*01826a49SYabin Cui 
349*01826a49SYabin Cui /**
350*01826a49SYabin Cui  * Called on each group of positions with the same dmer.
351*01826a49SYabin Cui  * Counts the frequency of each dmer and saves it in the suffix array.
352*01826a49SYabin Cui  * Fills `ctx->dmerAt`.
353*01826a49SYabin Cui  */
COVER_group(COVER_ctx_t * ctx,const void * group,const void * groupEnd)354*01826a49SYabin Cui static void COVER_group(COVER_ctx_t *ctx, const void *group,
355*01826a49SYabin Cui                         const void *groupEnd) {
356*01826a49SYabin Cui   /* The group consists of all the positions with the same first d bytes. */
357*01826a49SYabin Cui   const U32 *grpPtr = (const U32 *)group;
358*01826a49SYabin Cui   const U32 *grpEnd = (const U32 *)groupEnd;
359*01826a49SYabin Cui   /* The dmerId is how we will reference this dmer.
360*01826a49SYabin Cui    * This allows us to map the whole dmer space to a much smaller space, the
361*01826a49SYabin Cui    * size of the suffix array.
362*01826a49SYabin Cui    */
363*01826a49SYabin Cui   const U32 dmerId = (U32)(grpPtr - ctx->suffix);
364*01826a49SYabin Cui   /* Count the number of samples this dmer shows up in */
365*01826a49SYabin Cui   U32 freq = 0;
366*01826a49SYabin Cui   /* Details */
367*01826a49SYabin Cui   const size_t *curOffsetPtr = ctx->offsets;
368*01826a49SYabin Cui   const size_t *offsetsEnd = ctx->offsets + ctx->nbSamples;
369*01826a49SYabin Cui   /* Once *grpPtr >= curSampleEnd this occurrence of the dmer is in a
370*01826a49SYabin Cui    * different sample than the last.
371*01826a49SYabin Cui    */
372*01826a49SYabin Cui   size_t curSampleEnd = ctx->offsets[0];
373*01826a49SYabin Cui   for (; grpPtr != grpEnd; ++grpPtr) {
374*01826a49SYabin Cui     /* Save the dmerId for this position so we can get back to it. */
375*01826a49SYabin Cui     ctx->dmerAt[*grpPtr] = dmerId;
376*01826a49SYabin Cui     /* Dictionaries only help for the first reference to the dmer.
377*01826a49SYabin Cui      * After that zstd can reference the match from the previous reference.
378*01826a49SYabin Cui      * So only count each dmer once for each sample it is in.
379*01826a49SYabin Cui      */
380*01826a49SYabin Cui     if (*grpPtr < curSampleEnd) {
381*01826a49SYabin Cui       continue;
382*01826a49SYabin Cui     }
383*01826a49SYabin Cui     freq += 1;
384*01826a49SYabin Cui     /* Binary search to find the end of the sample *grpPtr is in.
385*01826a49SYabin Cui      * In the common case that grpPtr + 1 == grpEnd we can skip the binary
386*01826a49SYabin Cui      * search because the loop is over.
387*01826a49SYabin Cui      */
388*01826a49SYabin Cui     if (grpPtr + 1 != grpEnd) {
389*01826a49SYabin Cui       const size_t *sampleEndPtr =
390*01826a49SYabin Cui           COVER_lower_bound(curOffsetPtr, offsetsEnd, *grpPtr);
391*01826a49SYabin Cui       curSampleEnd = *sampleEndPtr;
392*01826a49SYabin Cui       curOffsetPtr = sampleEndPtr + 1;
393*01826a49SYabin Cui     }
394*01826a49SYabin Cui   }
395*01826a49SYabin Cui   /* At this point we are never going to look at this segment of the suffix
396*01826a49SYabin Cui    * array again.  We take advantage of this fact to save memory.
397*01826a49SYabin Cui    * We store the frequency of the dmer in the first position of the group,
398*01826a49SYabin Cui    * which is dmerId.
399*01826a49SYabin Cui    */
400*01826a49SYabin Cui   ctx->suffix[dmerId] = freq;
401*01826a49SYabin Cui }
402*01826a49SYabin Cui 
403*01826a49SYabin Cui 
404*01826a49SYabin Cui /**
405*01826a49SYabin Cui  * Selects the best segment in an epoch.
406*01826a49SYabin Cui  * Segments of are scored according to the function:
407*01826a49SYabin Cui  *
408*01826a49SYabin Cui  * Let F(d) be the frequency of dmer d.
409*01826a49SYabin Cui  * Let S_i be the dmer at position i of segment S which has length k.
410*01826a49SYabin Cui  *
411*01826a49SYabin Cui  *     Score(S) = F(S_1) + F(S_2) + ... + F(S_{k-d+1})
412*01826a49SYabin Cui  *
413*01826a49SYabin Cui  * Once the dmer d is in the dictionary we set F(d) = 0.
414*01826a49SYabin Cui  */
COVER_selectSegment(const COVER_ctx_t * ctx,U32 * freqs,COVER_map_t * activeDmers,U32 begin,U32 end,ZDICT_cover_params_t parameters)415*01826a49SYabin Cui static COVER_segment_t COVER_selectSegment(const COVER_ctx_t *ctx, U32 *freqs,
416*01826a49SYabin Cui                                            COVER_map_t *activeDmers, U32 begin,
417*01826a49SYabin Cui                                            U32 end,
418*01826a49SYabin Cui                                            ZDICT_cover_params_t parameters) {
419*01826a49SYabin Cui   /* Constants */
420*01826a49SYabin Cui   const U32 k = parameters.k;
421*01826a49SYabin Cui   const U32 d = parameters.d;
422*01826a49SYabin Cui   const U32 dmersInK = k - d + 1;
423*01826a49SYabin Cui   /* Try each segment (activeSegment) and save the best (bestSegment) */
424*01826a49SYabin Cui   COVER_segment_t bestSegment = {0, 0, 0};
425*01826a49SYabin Cui   COVER_segment_t activeSegment;
426*01826a49SYabin Cui   /* Reset the activeDmers in the segment */
427*01826a49SYabin Cui   COVER_map_clear(activeDmers);
428*01826a49SYabin Cui   /* The activeSegment starts at the beginning of the epoch. */
429*01826a49SYabin Cui   activeSegment.begin = begin;
430*01826a49SYabin Cui   activeSegment.end = begin;
431*01826a49SYabin Cui   activeSegment.score = 0;
432*01826a49SYabin Cui   /* Slide the activeSegment through the whole epoch.
433*01826a49SYabin Cui    * Save the best segment in bestSegment.
434*01826a49SYabin Cui    */
435*01826a49SYabin Cui   while (activeSegment.end < end) {
436*01826a49SYabin Cui     /* The dmerId for the dmer at the next position */
437*01826a49SYabin Cui     U32 newDmer = ctx->dmerAt[activeSegment.end];
438*01826a49SYabin Cui     /* The entry in activeDmers for this dmerId */
439*01826a49SYabin Cui     U32 *newDmerOcc = COVER_map_at(activeDmers, newDmer);
440*01826a49SYabin Cui     /* If the dmer isn't already present in the segment add its score. */
441*01826a49SYabin Cui     if (*newDmerOcc == 0) {
442*01826a49SYabin Cui       /* The paper suggest using the L-0.5 norm, but experiments show that it
443*01826a49SYabin Cui        * doesn't help.
444*01826a49SYabin Cui        */
445*01826a49SYabin Cui       activeSegment.score += freqs[newDmer];
446*01826a49SYabin Cui     }
447*01826a49SYabin Cui     /* Add the dmer to the segment */
448*01826a49SYabin Cui     activeSegment.end += 1;
449*01826a49SYabin Cui     *newDmerOcc += 1;
450*01826a49SYabin Cui 
451*01826a49SYabin Cui     /* If the window is now too large, drop the first position */
452*01826a49SYabin Cui     if (activeSegment.end - activeSegment.begin == dmersInK + 1) {
453*01826a49SYabin Cui       U32 delDmer = ctx->dmerAt[activeSegment.begin];
454*01826a49SYabin Cui       U32 *delDmerOcc = COVER_map_at(activeDmers, delDmer);
455*01826a49SYabin Cui       activeSegment.begin += 1;
456*01826a49SYabin Cui       *delDmerOcc -= 1;
457*01826a49SYabin Cui       /* If this is the last occurrence of the dmer, subtract its score */
458*01826a49SYabin Cui       if (*delDmerOcc == 0) {
459*01826a49SYabin Cui         COVER_map_remove(activeDmers, delDmer);
460*01826a49SYabin Cui         activeSegment.score -= freqs[delDmer];
461*01826a49SYabin Cui       }
462*01826a49SYabin Cui     }
463*01826a49SYabin Cui 
464*01826a49SYabin Cui     /* If this segment is the best so far save it */
465*01826a49SYabin Cui     if (activeSegment.score > bestSegment.score) {
466*01826a49SYabin Cui       bestSegment = activeSegment;
467*01826a49SYabin Cui     }
468*01826a49SYabin Cui   }
469*01826a49SYabin Cui   {
470*01826a49SYabin Cui     /* Trim off the zero frequency head and tail from the segment. */
471*01826a49SYabin Cui     U32 newBegin = bestSegment.end;
472*01826a49SYabin Cui     U32 newEnd = bestSegment.begin;
473*01826a49SYabin Cui     U32 pos;
474*01826a49SYabin Cui     for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
475*01826a49SYabin Cui       U32 freq = freqs[ctx->dmerAt[pos]];
476*01826a49SYabin Cui       if (freq != 0) {
477*01826a49SYabin Cui         newBegin = MIN(newBegin, pos);
478*01826a49SYabin Cui         newEnd = pos + 1;
479*01826a49SYabin Cui       }
480*01826a49SYabin Cui     }
481*01826a49SYabin Cui     bestSegment.begin = newBegin;
482*01826a49SYabin Cui     bestSegment.end = newEnd;
483*01826a49SYabin Cui   }
484*01826a49SYabin Cui   {
485*01826a49SYabin Cui     /* Zero out the frequency of each dmer covered by the chosen segment. */
486*01826a49SYabin Cui     U32 pos;
487*01826a49SYabin Cui     for (pos = bestSegment.begin; pos != bestSegment.end; ++pos) {
488*01826a49SYabin Cui       freqs[ctx->dmerAt[pos]] = 0;
489*01826a49SYabin Cui     }
490*01826a49SYabin Cui   }
491*01826a49SYabin Cui   return bestSegment;
492*01826a49SYabin Cui }
493*01826a49SYabin Cui 
494*01826a49SYabin Cui /**
495*01826a49SYabin Cui  * Check the validity of the parameters.
496*01826a49SYabin Cui  * Returns non-zero if the parameters are valid and 0 otherwise.
497*01826a49SYabin Cui  */
COVER_checkParameters(ZDICT_cover_params_t parameters,size_t maxDictSize)498*01826a49SYabin Cui static int COVER_checkParameters(ZDICT_cover_params_t parameters,
499*01826a49SYabin Cui                                  size_t maxDictSize) {
500*01826a49SYabin Cui   /* k and d are required parameters */
501*01826a49SYabin Cui   if (parameters.d == 0 || parameters.k == 0) {
502*01826a49SYabin Cui     return 0;
503*01826a49SYabin Cui   }
504*01826a49SYabin Cui   /* k <= maxDictSize */
505*01826a49SYabin Cui   if (parameters.k > maxDictSize) {
506*01826a49SYabin Cui     return 0;
507*01826a49SYabin Cui   }
508*01826a49SYabin Cui   /* d <= k */
509*01826a49SYabin Cui   if (parameters.d > parameters.k) {
510*01826a49SYabin Cui     return 0;
511*01826a49SYabin Cui   }
512*01826a49SYabin Cui   /* 0 < splitPoint <= 1 */
513*01826a49SYabin Cui   if (parameters.splitPoint <= 0 || parameters.splitPoint > 1){
514*01826a49SYabin Cui     return 0;
515*01826a49SYabin Cui   }
516*01826a49SYabin Cui   return 1;
517*01826a49SYabin Cui }
518*01826a49SYabin Cui 
519*01826a49SYabin Cui /**
520*01826a49SYabin Cui  * Clean up a context initialized with `COVER_ctx_init()`.
521*01826a49SYabin Cui  */
COVER_ctx_destroy(COVER_ctx_t * ctx)522*01826a49SYabin Cui static void COVER_ctx_destroy(COVER_ctx_t *ctx) {
523*01826a49SYabin Cui   if (!ctx) {
524*01826a49SYabin Cui     return;
525*01826a49SYabin Cui   }
526*01826a49SYabin Cui   if (ctx->suffix) {
527*01826a49SYabin Cui     free(ctx->suffix);
528*01826a49SYabin Cui     ctx->suffix = NULL;
529*01826a49SYabin Cui   }
530*01826a49SYabin Cui   if (ctx->freqs) {
531*01826a49SYabin Cui     free(ctx->freqs);
532*01826a49SYabin Cui     ctx->freqs = NULL;
533*01826a49SYabin Cui   }
534*01826a49SYabin Cui   if (ctx->dmerAt) {
535*01826a49SYabin Cui     free(ctx->dmerAt);
536*01826a49SYabin Cui     ctx->dmerAt = NULL;
537*01826a49SYabin Cui   }
538*01826a49SYabin Cui   if (ctx->offsets) {
539*01826a49SYabin Cui     free(ctx->offsets);
540*01826a49SYabin Cui     ctx->offsets = NULL;
541*01826a49SYabin Cui   }
542*01826a49SYabin Cui }
543*01826a49SYabin Cui 
544*01826a49SYabin Cui /**
545*01826a49SYabin Cui  * Prepare a context for dictionary building.
546*01826a49SYabin Cui  * The context is only dependent on the parameter `d` and can be used multiple
547*01826a49SYabin Cui  * times.
548*01826a49SYabin Cui  * Returns 0 on success or error code on error.
549*01826a49SYabin Cui  * The context must be destroyed with `COVER_ctx_destroy()`.
550*01826a49SYabin Cui  */
COVER_ctx_init(COVER_ctx_t * ctx,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples,unsigned d,double splitPoint)551*01826a49SYabin Cui static size_t COVER_ctx_init(COVER_ctx_t *ctx, const void *samplesBuffer,
552*01826a49SYabin Cui                           const size_t *samplesSizes, unsigned nbSamples,
553*01826a49SYabin Cui                           unsigned d, double splitPoint)
554*01826a49SYabin Cui {
555*01826a49SYabin Cui   const BYTE *const samples = (const BYTE *)samplesBuffer;
556*01826a49SYabin Cui   const size_t totalSamplesSize = COVER_sum(samplesSizes, nbSamples);
557*01826a49SYabin Cui   /* Split samples into testing and training sets */
558*01826a49SYabin Cui   const unsigned nbTrainSamples = splitPoint < 1.0 ? (unsigned)((double)nbSamples * splitPoint) : nbSamples;
559*01826a49SYabin Cui   const unsigned nbTestSamples = splitPoint < 1.0 ? nbSamples - nbTrainSamples : nbSamples;
560*01826a49SYabin Cui   const size_t trainingSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes, nbTrainSamples) : totalSamplesSize;
561*01826a49SYabin Cui   const size_t testSamplesSize = splitPoint < 1.0 ? COVER_sum(samplesSizes + nbTrainSamples, nbTestSamples) : totalSamplesSize;
562*01826a49SYabin Cui   /* Checks */
563*01826a49SYabin Cui   if (totalSamplesSize < MAX(d, sizeof(U64)) ||
564*01826a49SYabin Cui       totalSamplesSize >= (size_t)COVER_MAX_SAMPLES_SIZE) {
565*01826a49SYabin Cui     DISPLAYLEVEL(1, "Total samples size is too large (%u MB), maximum size is %u MB\n",
566*01826a49SYabin Cui                  (unsigned)(totalSamplesSize>>20), (COVER_MAX_SAMPLES_SIZE >> 20));
567*01826a49SYabin Cui     return ERROR(srcSize_wrong);
568*01826a49SYabin Cui   }
569*01826a49SYabin Cui   /* Check if there are at least 5 training samples */
570*01826a49SYabin Cui   if (nbTrainSamples < 5) {
571*01826a49SYabin Cui     DISPLAYLEVEL(1, "Total number of training samples is %u and is invalid.", nbTrainSamples);
572*01826a49SYabin Cui     return ERROR(srcSize_wrong);
573*01826a49SYabin Cui   }
574*01826a49SYabin Cui   /* Check if there's testing sample */
575*01826a49SYabin Cui   if (nbTestSamples < 1) {
576*01826a49SYabin Cui     DISPLAYLEVEL(1, "Total number of testing samples is %u and is invalid.", nbTestSamples);
577*01826a49SYabin Cui     return ERROR(srcSize_wrong);
578*01826a49SYabin Cui   }
579*01826a49SYabin Cui   /* Zero the context */
580*01826a49SYabin Cui   memset(ctx, 0, sizeof(*ctx));
581*01826a49SYabin Cui   DISPLAYLEVEL(2, "Training on %u samples of total size %u\n", nbTrainSamples,
582*01826a49SYabin Cui                (unsigned)trainingSamplesSize);
583*01826a49SYabin Cui   DISPLAYLEVEL(2, "Testing on %u samples of total size %u\n", nbTestSamples,
584*01826a49SYabin Cui                (unsigned)testSamplesSize);
585*01826a49SYabin Cui   ctx->samples = samples;
586*01826a49SYabin Cui   ctx->samplesSizes = samplesSizes;
587*01826a49SYabin Cui   ctx->nbSamples = nbSamples;
588*01826a49SYabin Cui   ctx->nbTrainSamples = nbTrainSamples;
589*01826a49SYabin Cui   ctx->nbTestSamples = nbTestSamples;
590*01826a49SYabin Cui   /* Partial suffix array */
591*01826a49SYabin Cui   ctx->suffixSize = trainingSamplesSize - MAX(d, sizeof(U64)) + 1;
592*01826a49SYabin Cui   ctx->suffix = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
593*01826a49SYabin Cui   /* Maps index to the dmerID */
594*01826a49SYabin Cui   ctx->dmerAt = (U32 *)malloc(ctx->suffixSize * sizeof(U32));
595*01826a49SYabin Cui   /* The offsets of each file */
596*01826a49SYabin Cui   ctx->offsets = (size_t *)malloc((nbSamples + 1) * sizeof(size_t));
597*01826a49SYabin Cui   if (!ctx->suffix || !ctx->dmerAt || !ctx->offsets) {
598*01826a49SYabin Cui     DISPLAYLEVEL(1, "Failed to allocate scratch buffers\n");
599*01826a49SYabin Cui     COVER_ctx_destroy(ctx);
600*01826a49SYabin Cui     return ERROR(memory_allocation);
601*01826a49SYabin Cui   }
602*01826a49SYabin Cui   ctx->freqs = NULL;
603*01826a49SYabin Cui   ctx->d = d;
604*01826a49SYabin Cui 
605*01826a49SYabin Cui   /* Fill offsets from the samplesSizes */
606*01826a49SYabin Cui   {
607*01826a49SYabin Cui     U32 i;
608*01826a49SYabin Cui     ctx->offsets[0] = 0;
609*01826a49SYabin Cui     for (i = 1; i <= nbSamples; ++i) {
610*01826a49SYabin Cui       ctx->offsets[i] = ctx->offsets[i - 1] + samplesSizes[i - 1];
611*01826a49SYabin Cui     }
612*01826a49SYabin Cui   }
613*01826a49SYabin Cui   DISPLAYLEVEL(2, "Constructing partial suffix array\n");
614*01826a49SYabin Cui   {
615*01826a49SYabin Cui     /* suffix is a partial suffix array.
616*01826a49SYabin Cui      * It only sorts suffixes by their first parameters.d bytes.
617*01826a49SYabin Cui      * The sort is stable, so each dmer group is sorted by position in input.
618*01826a49SYabin Cui      */
619*01826a49SYabin Cui     U32 i;
620*01826a49SYabin Cui     for (i = 0; i < ctx->suffixSize; ++i) {
621*01826a49SYabin Cui       ctx->suffix[i] = i;
622*01826a49SYabin Cui     }
623*01826a49SYabin Cui     /* qsort doesn't take an opaque pointer, so pass as a global.
624*01826a49SYabin Cui      * On OpenBSD qsort() is not guaranteed to be stable, their mergesort() is.
625*01826a49SYabin Cui      */
626*01826a49SYabin Cui     g_coverCtx = ctx;
627*01826a49SYabin Cui #if defined(__OpenBSD__)
628*01826a49SYabin Cui     mergesort(ctx->suffix, ctx->suffixSize, sizeof(U32),
629*01826a49SYabin Cui           (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
630*01826a49SYabin Cui #else
631*01826a49SYabin Cui     qsort(ctx->suffix, ctx->suffixSize, sizeof(U32),
632*01826a49SYabin Cui           (ctx->d <= 8 ? &COVER_strict_cmp8 : &COVER_strict_cmp));
633*01826a49SYabin Cui #endif
634*01826a49SYabin Cui   }
635*01826a49SYabin Cui   DISPLAYLEVEL(2, "Computing frequencies\n");
636*01826a49SYabin Cui   /* For each dmer group (group of positions with the same first d bytes):
637*01826a49SYabin Cui    * 1. For each position we set dmerAt[position] = dmerID.  The dmerID is
638*01826a49SYabin Cui    *    (groupBeginPtr - suffix).  This allows us to go from position to
639*01826a49SYabin Cui    *    dmerID so we can look up values in freq.
640*01826a49SYabin Cui    * 2. We calculate how many samples the dmer occurs in and save it in
641*01826a49SYabin Cui    *    freqs[dmerId].
642*01826a49SYabin Cui    */
643*01826a49SYabin Cui   COVER_groupBy(ctx->suffix, ctx->suffixSize, sizeof(U32), ctx,
644*01826a49SYabin Cui                 (ctx->d <= 8 ? &COVER_cmp8 : &COVER_cmp), &COVER_group);
645*01826a49SYabin Cui   ctx->freqs = ctx->suffix;
646*01826a49SYabin Cui   ctx->suffix = NULL;
647*01826a49SYabin Cui   return 0;
648*01826a49SYabin Cui }
649*01826a49SYabin Cui 
COVER_warnOnSmallCorpus(size_t maxDictSize,size_t nbDmers,int displayLevel)650*01826a49SYabin Cui void COVER_warnOnSmallCorpus(size_t maxDictSize, size_t nbDmers, int displayLevel)
651*01826a49SYabin Cui {
652*01826a49SYabin Cui   const double ratio = (double)nbDmers / (double)maxDictSize;
653*01826a49SYabin Cui   if (ratio >= 10) {
654*01826a49SYabin Cui       return;
655*01826a49SYabin Cui   }
656*01826a49SYabin Cui   LOCALDISPLAYLEVEL(displayLevel, 1,
657*01826a49SYabin Cui                     "WARNING: The maximum dictionary size %u is too large "
658*01826a49SYabin Cui                     "compared to the source size %u! "
659*01826a49SYabin Cui                     "size(source)/size(dictionary) = %f, but it should be >= "
660*01826a49SYabin Cui                     "10! This may lead to a subpar dictionary! We recommend "
661*01826a49SYabin Cui                     "training on sources at least 10x, and preferably 100x "
662*01826a49SYabin Cui                     "the size of the dictionary! \n", (U32)maxDictSize,
663*01826a49SYabin Cui                     (U32)nbDmers, ratio);
664*01826a49SYabin Cui }
665*01826a49SYabin Cui 
COVER_computeEpochs(U32 maxDictSize,U32 nbDmers,U32 k,U32 passes)666*01826a49SYabin Cui COVER_epoch_info_t COVER_computeEpochs(U32 maxDictSize,
667*01826a49SYabin Cui                                        U32 nbDmers, U32 k, U32 passes)
668*01826a49SYabin Cui {
669*01826a49SYabin Cui   const U32 minEpochSize = k * 10;
670*01826a49SYabin Cui   COVER_epoch_info_t epochs;
671*01826a49SYabin Cui   epochs.num = MAX(1, maxDictSize / k / passes);
672*01826a49SYabin Cui   epochs.size = nbDmers / epochs.num;
673*01826a49SYabin Cui   if (epochs.size >= minEpochSize) {
674*01826a49SYabin Cui       assert(epochs.size * epochs.num <= nbDmers);
675*01826a49SYabin Cui       return epochs;
676*01826a49SYabin Cui   }
677*01826a49SYabin Cui   epochs.size = MIN(minEpochSize, nbDmers);
678*01826a49SYabin Cui   epochs.num = nbDmers / epochs.size;
679*01826a49SYabin Cui   assert(epochs.size * epochs.num <= nbDmers);
680*01826a49SYabin Cui   return epochs;
681*01826a49SYabin Cui }
682*01826a49SYabin Cui 
683*01826a49SYabin Cui /**
684*01826a49SYabin Cui  * Given the prepared context build the dictionary.
685*01826a49SYabin Cui  */
COVER_buildDictionary(const COVER_ctx_t * ctx,U32 * freqs,COVER_map_t * activeDmers,void * dictBuffer,size_t dictBufferCapacity,ZDICT_cover_params_t parameters)686*01826a49SYabin Cui static size_t COVER_buildDictionary(const COVER_ctx_t *ctx, U32 *freqs,
687*01826a49SYabin Cui                                     COVER_map_t *activeDmers, void *dictBuffer,
688*01826a49SYabin Cui                                     size_t dictBufferCapacity,
689*01826a49SYabin Cui                                     ZDICT_cover_params_t parameters) {
690*01826a49SYabin Cui   BYTE *const dict = (BYTE *)dictBuffer;
691*01826a49SYabin Cui   size_t tail = dictBufferCapacity;
692*01826a49SYabin Cui   /* Divide the data into epochs. We will select one segment from each epoch. */
693*01826a49SYabin Cui   const COVER_epoch_info_t epochs = COVER_computeEpochs(
694*01826a49SYabin Cui       (U32)dictBufferCapacity, (U32)ctx->suffixSize, parameters.k, 4);
695*01826a49SYabin Cui   const size_t maxZeroScoreRun = MAX(10, MIN(100, epochs.num >> 3));
696*01826a49SYabin Cui   size_t zeroScoreRun = 0;
697*01826a49SYabin Cui   size_t epoch;
698*01826a49SYabin Cui   DISPLAYLEVEL(2, "Breaking content into %u epochs of size %u\n",
699*01826a49SYabin Cui                 (U32)epochs.num, (U32)epochs.size);
700*01826a49SYabin Cui   /* Loop through the epochs until there are no more segments or the dictionary
701*01826a49SYabin Cui    * is full.
702*01826a49SYabin Cui    */
703*01826a49SYabin Cui   for (epoch = 0; tail > 0; epoch = (epoch + 1) % epochs.num) {
704*01826a49SYabin Cui     const U32 epochBegin = (U32)(epoch * epochs.size);
705*01826a49SYabin Cui     const U32 epochEnd = epochBegin + epochs.size;
706*01826a49SYabin Cui     size_t segmentSize;
707*01826a49SYabin Cui     /* Select a segment */
708*01826a49SYabin Cui     COVER_segment_t segment = COVER_selectSegment(
709*01826a49SYabin Cui         ctx, freqs, activeDmers, epochBegin, epochEnd, parameters);
710*01826a49SYabin Cui     /* If the segment covers no dmers, then we are out of content.
711*01826a49SYabin Cui      * There may be new content in other epochs, for continue for some time.
712*01826a49SYabin Cui      */
713*01826a49SYabin Cui     if (segment.score == 0) {
714*01826a49SYabin Cui       if (++zeroScoreRun >= maxZeroScoreRun) {
715*01826a49SYabin Cui           break;
716*01826a49SYabin Cui       }
717*01826a49SYabin Cui       continue;
718*01826a49SYabin Cui     }
719*01826a49SYabin Cui     zeroScoreRun = 0;
720*01826a49SYabin Cui     /* Trim the segment if necessary and if it is too small then we are done */
721*01826a49SYabin Cui     segmentSize = MIN(segment.end - segment.begin + parameters.d - 1, tail);
722*01826a49SYabin Cui     if (segmentSize < parameters.d) {
723*01826a49SYabin Cui       break;
724*01826a49SYabin Cui     }
725*01826a49SYabin Cui     /* We fill the dictionary from the back to allow the best segments to be
726*01826a49SYabin Cui      * referenced with the smallest offsets.
727*01826a49SYabin Cui      */
728*01826a49SYabin Cui     tail -= segmentSize;
729*01826a49SYabin Cui     memcpy(dict + tail, ctx->samples + segment.begin, segmentSize);
730*01826a49SYabin Cui     DISPLAYUPDATE(
731*01826a49SYabin Cui         2, "\r%u%%       ",
732*01826a49SYabin Cui         (unsigned)(((dictBufferCapacity - tail) * 100) / dictBufferCapacity));
733*01826a49SYabin Cui   }
734*01826a49SYabin Cui   DISPLAYLEVEL(2, "\r%79s\r", "");
735*01826a49SYabin Cui   return tail;
736*01826a49SYabin Cui }
737*01826a49SYabin Cui 
ZDICT_trainFromBuffer_cover(void * dictBuffer,size_t dictBufferCapacity,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples,ZDICT_cover_params_t parameters)738*01826a49SYabin Cui ZDICTLIB_STATIC_API size_t ZDICT_trainFromBuffer_cover(
739*01826a49SYabin Cui     void *dictBuffer, size_t dictBufferCapacity,
740*01826a49SYabin Cui     const void *samplesBuffer, const size_t *samplesSizes, unsigned nbSamples,
741*01826a49SYabin Cui     ZDICT_cover_params_t parameters)
742*01826a49SYabin Cui {
743*01826a49SYabin Cui   BYTE* const dict = (BYTE*)dictBuffer;
744*01826a49SYabin Cui   COVER_ctx_t ctx;
745*01826a49SYabin Cui   COVER_map_t activeDmers;
746*01826a49SYabin Cui   parameters.splitPoint = 1.0;
747*01826a49SYabin Cui   /* Initialize global data */
748*01826a49SYabin Cui   g_displayLevel = (int)parameters.zParams.notificationLevel;
749*01826a49SYabin Cui   /* Checks */
750*01826a49SYabin Cui   if (!COVER_checkParameters(parameters, dictBufferCapacity)) {
751*01826a49SYabin Cui     DISPLAYLEVEL(1, "Cover parameters incorrect\n");
752*01826a49SYabin Cui     return ERROR(parameter_outOfBound);
753*01826a49SYabin Cui   }
754*01826a49SYabin Cui   if (nbSamples == 0) {
755*01826a49SYabin Cui     DISPLAYLEVEL(1, "Cover must have at least one input file\n");
756*01826a49SYabin Cui     return ERROR(srcSize_wrong);
757*01826a49SYabin Cui   }
758*01826a49SYabin Cui   if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
759*01826a49SYabin Cui     DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
760*01826a49SYabin Cui                  ZDICT_DICTSIZE_MIN);
761*01826a49SYabin Cui     return ERROR(dstSize_tooSmall);
762*01826a49SYabin Cui   }
763*01826a49SYabin Cui   /* Initialize context and activeDmers */
764*01826a49SYabin Cui   {
765*01826a49SYabin Cui     size_t const initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples,
766*01826a49SYabin Cui                       parameters.d, parameters.splitPoint);
767*01826a49SYabin Cui     if (ZSTD_isError(initVal)) {
768*01826a49SYabin Cui       return initVal;
769*01826a49SYabin Cui     }
770*01826a49SYabin Cui   }
771*01826a49SYabin Cui   COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, g_displayLevel);
772*01826a49SYabin Cui   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
773*01826a49SYabin Cui     DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
774*01826a49SYabin Cui     COVER_ctx_destroy(&ctx);
775*01826a49SYabin Cui     return ERROR(memory_allocation);
776*01826a49SYabin Cui   }
777*01826a49SYabin Cui 
778*01826a49SYabin Cui   DISPLAYLEVEL(2, "Building dictionary\n");
779*01826a49SYabin Cui   {
780*01826a49SYabin Cui     const size_t tail =
781*01826a49SYabin Cui         COVER_buildDictionary(&ctx, ctx.freqs, &activeDmers, dictBuffer,
782*01826a49SYabin Cui                               dictBufferCapacity, parameters);
783*01826a49SYabin Cui     const size_t dictionarySize = ZDICT_finalizeDictionary(
784*01826a49SYabin Cui         dict, dictBufferCapacity, dict + tail, dictBufferCapacity - tail,
785*01826a49SYabin Cui         samplesBuffer, samplesSizes, nbSamples, parameters.zParams);
786*01826a49SYabin Cui     if (!ZSTD_isError(dictionarySize)) {
787*01826a49SYabin Cui       DISPLAYLEVEL(2, "Constructed dictionary of size %u\n",
788*01826a49SYabin Cui                    (unsigned)dictionarySize);
789*01826a49SYabin Cui     }
790*01826a49SYabin Cui     COVER_ctx_destroy(&ctx);
791*01826a49SYabin Cui     COVER_map_destroy(&activeDmers);
792*01826a49SYabin Cui     return dictionarySize;
793*01826a49SYabin Cui   }
794*01826a49SYabin Cui }
795*01826a49SYabin Cui 
796*01826a49SYabin Cui 
797*01826a49SYabin Cui 
COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,const size_t * samplesSizes,const BYTE * samples,size_t * offsets,size_t nbTrainSamples,size_t nbSamples,BYTE * const dict,size_t dictBufferCapacity)798*01826a49SYabin Cui size_t COVER_checkTotalCompressedSize(const ZDICT_cover_params_t parameters,
799*01826a49SYabin Cui                                     const size_t *samplesSizes, const BYTE *samples,
800*01826a49SYabin Cui                                     size_t *offsets,
801*01826a49SYabin Cui                                     size_t nbTrainSamples, size_t nbSamples,
802*01826a49SYabin Cui                                     BYTE *const dict, size_t dictBufferCapacity) {
803*01826a49SYabin Cui   size_t totalCompressedSize = ERROR(GENERIC);
804*01826a49SYabin Cui   /* Pointers */
805*01826a49SYabin Cui   ZSTD_CCtx *cctx;
806*01826a49SYabin Cui   ZSTD_CDict *cdict;
807*01826a49SYabin Cui   void *dst;
808*01826a49SYabin Cui   /* Local variables */
809*01826a49SYabin Cui   size_t dstCapacity;
810*01826a49SYabin Cui   size_t i;
811*01826a49SYabin Cui   /* Allocate dst with enough space to compress the maximum sized sample */
812*01826a49SYabin Cui   {
813*01826a49SYabin Cui     size_t maxSampleSize = 0;
814*01826a49SYabin Cui     i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
815*01826a49SYabin Cui     for (; i < nbSamples; ++i) {
816*01826a49SYabin Cui       maxSampleSize = MAX(samplesSizes[i], maxSampleSize);
817*01826a49SYabin Cui     }
818*01826a49SYabin Cui     dstCapacity = ZSTD_compressBound(maxSampleSize);
819*01826a49SYabin Cui     dst = malloc(dstCapacity);
820*01826a49SYabin Cui   }
821*01826a49SYabin Cui   /* Create the cctx and cdict */
822*01826a49SYabin Cui   cctx = ZSTD_createCCtx();
823*01826a49SYabin Cui   cdict = ZSTD_createCDict(dict, dictBufferCapacity,
824*01826a49SYabin Cui                            parameters.zParams.compressionLevel);
825*01826a49SYabin Cui   if (!dst || !cctx || !cdict) {
826*01826a49SYabin Cui     goto _compressCleanup;
827*01826a49SYabin Cui   }
828*01826a49SYabin Cui   /* Compress each sample and sum their sizes (or error) */
829*01826a49SYabin Cui   totalCompressedSize = dictBufferCapacity;
830*01826a49SYabin Cui   i = parameters.splitPoint < 1.0 ? nbTrainSamples : 0;
831*01826a49SYabin Cui   for (; i < nbSamples; ++i) {
832*01826a49SYabin Cui     const size_t size = ZSTD_compress_usingCDict(
833*01826a49SYabin Cui         cctx, dst, dstCapacity, samples + offsets[i],
834*01826a49SYabin Cui         samplesSizes[i], cdict);
835*01826a49SYabin Cui     if (ZSTD_isError(size)) {
836*01826a49SYabin Cui       totalCompressedSize = size;
837*01826a49SYabin Cui       goto _compressCleanup;
838*01826a49SYabin Cui     }
839*01826a49SYabin Cui     totalCompressedSize += size;
840*01826a49SYabin Cui   }
841*01826a49SYabin Cui _compressCleanup:
842*01826a49SYabin Cui   ZSTD_freeCCtx(cctx);
843*01826a49SYabin Cui   ZSTD_freeCDict(cdict);
844*01826a49SYabin Cui   if (dst) {
845*01826a49SYabin Cui     free(dst);
846*01826a49SYabin Cui   }
847*01826a49SYabin Cui   return totalCompressedSize;
848*01826a49SYabin Cui }
849*01826a49SYabin Cui 
850*01826a49SYabin Cui 
851*01826a49SYabin Cui /**
852*01826a49SYabin Cui  * Initialize the `COVER_best_t`.
853*01826a49SYabin Cui  */
COVER_best_init(COVER_best_t * best)854*01826a49SYabin Cui void COVER_best_init(COVER_best_t *best) {
855*01826a49SYabin Cui   if (best==NULL) return; /* compatible with init on NULL */
856*01826a49SYabin Cui   (void)ZSTD_pthread_mutex_init(&best->mutex, NULL);
857*01826a49SYabin Cui   (void)ZSTD_pthread_cond_init(&best->cond, NULL);
858*01826a49SYabin Cui   best->liveJobs = 0;
859*01826a49SYabin Cui   best->dict = NULL;
860*01826a49SYabin Cui   best->dictSize = 0;
861*01826a49SYabin Cui   best->compressedSize = (size_t)-1;
862*01826a49SYabin Cui   memset(&best->parameters, 0, sizeof(best->parameters));
863*01826a49SYabin Cui }
864*01826a49SYabin Cui 
865*01826a49SYabin Cui /**
866*01826a49SYabin Cui  * Wait until liveJobs == 0.
867*01826a49SYabin Cui  */
COVER_best_wait(COVER_best_t * best)868*01826a49SYabin Cui void COVER_best_wait(COVER_best_t *best) {
869*01826a49SYabin Cui   if (!best) {
870*01826a49SYabin Cui     return;
871*01826a49SYabin Cui   }
872*01826a49SYabin Cui   ZSTD_pthread_mutex_lock(&best->mutex);
873*01826a49SYabin Cui   while (best->liveJobs != 0) {
874*01826a49SYabin Cui     ZSTD_pthread_cond_wait(&best->cond, &best->mutex);
875*01826a49SYabin Cui   }
876*01826a49SYabin Cui   ZSTD_pthread_mutex_unlock(&best->mutex);
877*01826a49SYabin Cui }
878*01826a49SYabin Cui 
879*01826a49SYabin Cui /**
880*01826a49SYabin Cui  * Call COVER_best_wait() and then destroy the COVER_best_t.
881*01826a49SYabin Cui  */
COVER_best_destroy(COVER_best_t * best)882*01826a49SYabin Cui void COVER_best_destroy(COVER_best_t *best) {
883*01826a49SYabin Cui   if (!best) {
884*01826a49SYabin Cui     return;
885*01826a49SYabin Cui   }
886*01826a49SYabin Cui   COVER_best_wait(best);
887*01826a49SYabin Cui   if (best->dict) {
888*01826a49SYabin Cui     free(best->dict);
889*01826a49SYabin Cui   }
890*01826a49SYabin Cui   ZSTD_pthread_mutex_destroy(&best->mutex);
891*01826a49SYabin Cui   ZSTD_pthread_cond_destroy(&best->cond);
892*01826a49SYabin Cui }
893*01826a49SYabin Cui 
894*01826a49SYabin Cui /**
895*01826a49SYabin Cui  * Called when a thread is about to be launched.
896*01826a49SYabin Cui  * Increments liveJobs.
897*01826a49SYabin Cui  */
COVER_best_start(COVER_best_t * best)898*01826a49SYabin Cui void COVER_best_start(COVER_best_t *best) {
899*01826a49SYabin Cui   if (!best) {
900*01826a49SYabin Cui     return;
901*01826a49SYabin Cui   }
902*01826a49SYabin Cui   ZSTD_pthread_mutex_lock(&best->mutex);
903*01826a49SYabin Cui   ++best->liveJobs;
904*01826a49SYabin Cui   ZSTD_pthread_mutex_unlock(&best->mutex);
905*01826a49SYabin Cui }
906*01826a49SYabin Cui 
907*01826a49SYabin Cui /**
908*01826a49SYabin Cui  * Called when a thread finishes executing, both on error or success.
909*01826a49SYabin Cui  * Decrements liveJobs and signals any waiting threads if liveJobs == 0.
910*01826a49SYabin Cui  * If this dictionary is the best so far save it and its parameters.
911*01826a49SYabin Cui  */
COVER_best_finish(COVER_best_t * best,ZDICT_cover_params_t parameters,COVER_dictSelection_t selection)912*01826a49SYabin Cui void COVER_best_finish(COVER_best_t* best,
913*01826a49SYabin Cui                       ZDICT_cover_params_t parameters,
914*01826a49SYabin Cui                       COVER_dictSelection_t selection)
915*01826a49SYabin Cui {
916*01826a49SYabin Cui   void* dict = selection.dictContent;
917*01826a49SYabin Cui   size_t compressedSize = selection.totalCompressedSize;
918*01826a49SYabin Cui   size_t dictSize = selection.dictSize;
919*01826a49SYabin Cui   if (!best) {
920*01826a49SYabin Cui     return;
921*01826a49SYabin Cui   }
922*01826a49SYabin Cui   {
923*01826a49SYabin Cui     size_t liveJobs;
924*01826a49SYabin Cui     ZSTD_pthread_mutex_lock(&best->mutex);
925*01826a49SYabin Cui     --best->liveJobs;
926*01826a49SYabin Cui     liveJobs = best->liveJobs;
927*01826a49SYabin Cui     /* If the new dictionary is better */
928*01826a49SYabin Cui     if (compressedSize < best->compressedSize) {
929*01826a49SYabin Cui       /* Allocate space if necessary */
930*01826a49SYabin Cui       if (!best->dict || best->dictSize < dictSize) {
931*01826a49SYabin Cui         if (best->dict) {
932*01826a49SYabin Cui           free(best->dict);
933*01826a49SYabin Cui         }
934*01826a49SYabin Cui         best->dict = malloc(dictSize);
935*01826a49SYabin Cui         if (!best->dict) {
936*01826a49SYabin Cui           best->compressedSize = ERROR(GENERIC);
937*01826a49SYabin Cui           best->dictSize = 0;
938*01826a49SYabin Cui           ZSTD_pthread_cond_signal(&best->cond);
939*01826a49SYabin Cui           ZSTD_pthread_mutex_unlock(&best->mutex);
940*01826a49SYabin Cui           return;
941*01826a49SYabin Cui         }
942*01826a49SYabin Cui       }
943*01826a49SYabin Cui       /* Save the dictionary, parameters, and size */
944*01826a49SYabin Cui       if (dict) {
945*01826a49SYabin Cui         memcpy(best->dict, dict, dictSize);
946*01826a49SYabin Cui         best->dictSize = dictSize;
947*01826a49SYabin Cui         best->parameters = parameters;
948*01826a49SYabin Cui         best->compressedSize = compressedSize;
949*01826a49SYabin Cui       }
950*01826a49SYabin Cui     }
951*01826a49SYabin Cui     if (liveJobs == 0) {
952*01826a49SYabin Cui       ZSTD_pthread_cond_broadcast(&best->cond);
953*01826a49SYabin Cui     }
954*01826a49SYabin Cui     ZSTD_pthread_mutex_unlock(&best->mutex);
955*01826a49SYabin Cui   }
956*01826a49SYabin Cui }
957*01826a49SYabin Cui 
setDictSelection(BYTE * buf,size_t s,size_t csz)958*01826a49SYabin Cui static COVER_dictSelection_t setDictSelection(BYTE* buf, size_t s, size_t csz)
959*01826a49SYabin Cui {
960*01826a49SYabin Cui     COVER_dictSelection_t ds;
961*01826a49SYabin Cui     ds.dictContent = buf;
962*01826a49SYabin Cui     ds.dictSize = s;
963*01826a49SYabin Cui     ds.totalCompressedSize = csz;
964*01826a49SYabin Cui     return ds;
965*01826a49SYabin Cui }
966*01826a49SYabin Cui 
COVER_dictSelectionError(size_t error)967*01826a49SYabin Cui COVER_dictSelection_t COVER_dictSelectionError(size_t error) {
968*01826a49SYabin Cui     return setDictSelection(NULL, 0, error);
969*01826a49SYabin Cui }
970*01826a49SYabin Cui 
COVER_dictSelectionIsError(COVER_dictSelection_t selection)971*01826a49SYabin Cui unsigned COVER_dictSelectionIsError(COVER_dictSelection_t selection) {
972*01826a49SYabin Cui   return (ZSTD_isError(selection.totalCompressedSize) || !selection.dictContent);
973*01826a49SYabin Cui }
974*01826a49SYabin Cui 
COVER_dictSelectionFree(COVER_dictSelection_t selection)975*01826a49SYabin Cui void COVER_dictSelectionFree(COVER_dictSelection_t selection){
976*01826a49SYabin Cui   free(selection.dictContent);
977*01826a49SYabin Cui }
978*01826a49SYabin Cui 
COVER_selectDict(BYTE * customDictContent,size_t dictBufferCapacity,size_t dictContentSize,const BYTE * samplesBuffer,const size_t * samplesSizes,unsigned nbFinalizeSamples,size_t nbCheckSamples,size_t nbSamples,ZDICT_cover_params_t params,size_t * offsets,size_t totalCompressedSize)979*01826a49SYabin Cui COVER_dictSelection_t COVER_selectDict(BYTE* customDictContent, size_t dictBufferCapacity,
980*01826a49SYabin Cui         size_t dictContentSize, const BYTE* samplesBuffer, const size_t* samplesSizes, unsigned nbFinalizeSamples,
981*01826a49SYabin Cui         size_t nbCheckSamples, size_t nbSamples, ZDICT_cover_params_t params, size_t* offsets, size_t totalCompressedSize) {
982*01826a49SYabin Cui 
983*01826a49SYabin Cui   size_t largestDict = 0;
984*01826a49SYabin Cui   size_t largestCompressed = 0;
985*01826a49SYabin Cui   BYTE* customDictContentEnd = customDictContent + dictContentSize;
986*01826a49SYabin Cui 
987*01826a49SYabin Cui   BYTE* largestDictbuffer = (BYTE*)malloc(dictBufferCapacity);
988*01826a49SYabin Cui   BYTE* candidateDictBuffer = (BYTE*)malloc(dictBufferCapacity);
989*01826a49SYabin Cui   double regressionTolerance = ((double)params.shrinkDictMaxRegression / 100.0) + 1.00;
990*01826a49SYabin Cui 
991*01826a49SYabin Cui   if (!largestDictbuffer || !candidateDictBuffer) {
992*01826a49SYabin Cui     free(largestDictbuffer);
993*01826a49SYabin Cui     free(candidateDictBuffer);
994*01826a49SYabin Cui     return COVER_dictSelectionError(dictContentSize);
995*01826a49SYabin Cui   }
996*01826a49SYabin Cui 
997*01826a49SYabin Cui   /* Initial dictionary size and compressed size */
998*01826a49SYabin Cui   memcpy(largestDictbuffer, customDictContent, dictContentSize);
999*01826a49SYabin Cui   dictContentSize = ZDICT_finalizeDictionary(
1000*01826a49SYabin Cui     largestDictbuffer, dictBufferCapacity, customDictContent, dictContentSize,
1001*01826a49SYabin Cui     samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
1002*01826a49SYabin Cui 
1003*01826a49SYabin Cui   if (ZDICT_isError(dictContentSize)) {
1004*01826a49SYabin Cui     free(largestDictbuffer);
1005*01826a49SYabin Cui     free(candidateDictBuffer);
1006*01826a49SYabin Cui     return COVER_dictSelectionError(dictContentSize);
1007*01826a49SYabin Cui   }
1008*01826a49SYabin Cui 
1009*01826a49SYabin Cui   totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
1010*01826a49SYabin Cui                                                        samplesBuffer, offsets,
1011*01826a49SYabin Cui                                                        nbCheckSamples, nbSamples,
1012*01826a49SYabin Cui                                                        largestDictbuffer, dictContentSize);
1013*01826a49SYabin Cui 
1014*01826a49SYabin Cui   if (ZSTD_isError(totalCompressedSize)) {
1015*01826a49SYabin Cui     free(largestDictbuffer);
1016*01826a49SYabin Cui     free(candidateDictBuffer);
1017*01826a49SYabin Cui     return COVER_dictSelectionError(totalCompressedSize);
1018*01826a49SYabin Cui   }
1019*01826a49SYabin Cui 
1020*01826a49SYabin Cui   if (params.shrinkDict == 0) {
1021*01826a49SYabin Cui     free(candidateDictBuffer);
1022*01826a49SYabin Cui     return setDictSelection(largestDictbuffer, dictContentSize, totalCompressedSize);
1023*01826a49SYabin Cui   }
1024*01826a49SYabin Cui 
1025*01826a49SYabin Cui   largestDict = dictContentSize;
1026*01826a49SYabin Cui   largestCompressed = totalCompressedSize;
1027*01826a49SYabin Cui   dictContentSize = ZDICT_DICTSIZE_MIN;
1028*01826a49SYabin Cui 
1029*01826a49SYabin Cui   /* Largest dict is initially at least ZDICT_DICTSIZE_MIN */
1030*01826a49SYabin Cui   while (dictContentSize < largestDict) {
1031*01826a49SYabin Cui     memcpy(candidateDictBuffer, largestDictbuffer, largestDict);
1032*01826a49SYabin Cui     dictContentSize = ZDICT_finalizeDictionary(
1033*01826a49SYabin Cui       candidateDictBuffer, dictBufferCapacity, customDictContentEnd - dictContentSize, dictContentSize,
1034*01826a49SYabin Cui       samplesBuffer, samplesSizes, nbFinalizeSamples, params.zParams);
1035*01826a49SYabin Cui 
1036*01826a49SYabin Cui     if (ZDICT_isError(dictContentSize)) {
1037*01826a49SYabin Cui       free(largestDictbuffer);
1038*01826a49SYabin Cui       free(candidateDictBuffer);
1039*01826a49SYabin Cui       return COVER_dictSelectionError(dictContentSize);
1040*01826a49SYabin Cui 
1041*01826a49SYabin Cui     }
1042*01826a49SYabin Cui 
1043*01826a49SYabin Cui     totalCompressedSize = COVER_checkTotalCompressedSize(params, samplesSizes,
1044*01826a49SYabin Cui                                                          samplesBuffer, offsets,
1045*01826a49SYabin Cui                                                          nbCheckSamples, nbSamples,
1046*01826a49SYabin Cui                                                          candidateDictBuffer, dictContentSize);
1047*01826a49SYabin Cui 
1048*01826a49SYabin Cui     if (ZSTD_isError(totalCompressedSize)) {
1049*01826a49SYabin Cui       free(largestDictbuffer);
1050*01826a49SYabin Cui       free(candidateDictBuffer);
1051*01826a49SYabin Cui       return COVER_dictSelectionError(totalCompressedSize);
1052*01826a49SYabin Cui     }
1053*01826a49SYabin Cui 
1054*01826a49SYabin Cui     if ((double)totalCompressedSize <= (double)largestCompressed * regressionTolerance) {
1055*01826a49SYabin Cui       free(largestDictbuffer);
1056*01826a49SYabin Cui       return setDictSelection( candidateDictBuffer, dictContentSize, totalCompressedSize );
1057*01826a49SYabin Cui     }
1058*01826a49SYabin Cui     dictContentSize *= 2;
1059*01826a49SYabin Cui   }
1060*01826a49SYabin Cui   dictContentSize = largestDict;
1061*01826a49SYabin Cui   totalCompressedSize = largestCompressed;
1062*01826a49SYabin Cui   free(candidateDictBuffer);
1063*01826a49SYabin Cui   return setDictSelection( largestDictbuffer, dictContentSize, totalCompressedSize );
1064*01826a49SYabin Cui }
1065*01826a49SYabin Cui 
1066*01826a49SYabin Cui /**
1067*01826a49SYabin Cui  * Parameters for COVER_tryParameters().
1068*01826a49SYabin Cui  */
1069*01826a49SYabin Cui typedef struct COVER_tryParameters_data_s {
1070*01826a49SYabin Cui   const COVER_ctx_t *ctx;
1071*01826a49SYabin Cui   COVER_best_t *best;
1072*01826a49SYabin Cui   size_t dictBufferCapacity;
1073*01826a49SYabin Cui   ZDICT_cover_params_t parameters;
1074*01826a49SYabin Cui } COVER_tryParameters_data_t;
1075*01826a49SYabin Cui 
1076*01826a49SYabin Cui /**
1077*01826a49SYabin Cui  * Tries a set of parameters and updates the COVER_best_t with the results.
1078*01826a49SYabin Cui  * This function is thread safe if zstd is compiled with multithreaded support.
1079*01826a49SYabin Cui  * It takes its parameters as an *OWNING* opaque pointer to support threading.
1080*01826a49SYabin Cui  */
COVER_tryParameters(void * opaque)1081*01826a49SYabin Cui static void COVER_tryParameters(void *opaque)
1082*01826a49SYabin Cui {
1083*01826a49SYabin Cui   /* Save parameters as local variables */
1084*01826a49SYabin Cui   COVER_tryParameters_data_t *const data = (COVER_tryParameters_data_t*)opaque;
1085*01826a49SYabin Cui   const COVER_ctx_t *const ctx = data->ctx;
1086*01826a49SYabin Cui   const ZDICT_cover_params_t parameters = data->parameters;
1087*01826a49SYabin Cui   size_t dictBufferCapacity = data->dictBufferCapacity;
1088*01826a49SYabin Cui   size_t totalCompressedSize = ERROR(GENERIC);
1089*01826a49SYabin Cui   /* Allocate space for hash table, dict, and freqs */
1090*01826a49SYabin Cui   COVER_map_t activeDmers;
1091*01826a49SYabin Cui   BYTE* const dict = (BYTE*)malloc(dictBufferCapacity);
1092*01826a49SYabin Cui   COVER_dictSelection_t selection = COVER_dictSelectionError(ERROR(GENERIC));
1093*01826a49SYabin Cui   U32* const freqs = (U32*)malloc(ctx->suffixSize * sizeof(U32));
1094*01826a49SYabin Cui   if (!COVER_map_init(&activeDmers, parameters.k - parameters.d + 1)) {
1095*01826a49SYabin Cui     DISPLAYLEVEL(1, "Failed to allocate dmer map: out of memory\n");
1096*01826a49SYabin Cui     goto _cleanup;
1097*01826a49SYabin Cui   }
1098*01826a49SYabin Cui   if (!dict || !freqs) {
1099*01826a49SYabin Cui     DISPLAYLEVEL(1, "Failed to allocate buffers: out of memory\n");
1100*01826a49SYabin Cui     goto _cleanup;
1101*01826a49SYabin Cui   }
1102*01826a49SYabin Cui   /* Copy the frequencies because we need to modify them */
1103*01826a49SYabin Cui   memcpy(freqs, ctx->freqs, ctx->suffixSize * sizeof(U32));
1104*01826a49SYabin Cui   /* Build the dictionary */
1105*01826a49SYabin Cui   {
1106*01826a49SYabin Cui     const size_t tail = COVER_buildDictionary(ctx, freqs, &activeDmers, dict,
1107*01826a49SYabin Cui                                               dictBufferCapacity, parameters);
1108*01826a49SYabin Cui     selection = COVER_selectDict(dict + tail, dictBufferCapacity, dictBufferCapacity - tail,
1109*01826a49SYabin Cui         ctx->samples, ctx->samplesSizes, (unsigned)ctx->nbTrainSamples, ctx->nbTrainSamples, ctx->nbSamples, parameters, ctx->offsets,
1110*01826a49SYabin Cui         totalCompressedSize);
1111*01826a49SYabin Cui 
1112*01826a49SYabin Cui     if (COVER_dictSelectionIsError(selection)) {
1113*01826a49SYabin Cui       DISPLAYLEVEL(1, "Failed to select dictionary\n");
1114*01826a49SYabin Cui       goto _cleanup;
1115*01826a49SYabin Cui     }
1116*01826a49SYabin Cui   }
1117*01826a49SYabin Cui _cleanup:
1118*01826a49SYabin Cui   free(dict);
1119*01826a49SYabin Cui   COVER_best_finish(data->best, parameters, selection);
1120*01826a49SYabin Cui   free(data);
1121*01826a49SYabin Cui   COVER_map_destroy(&activeDmers);
1122*01826a49SYabin Cui   COVER_dictSelectionFree(selection);
1123*01826a49SYabin Cui   free(freqs);
1124*01826a49SYabin Cui }
1125*01826a49SYabin Cui 
ZDICT_optimizeTrainFromBuffer_cover(void * dictBuffer,size_t dictBufferCapacity,const void * samplesBuffer,const size_t * samplesSizes,unsigned nbSamples,ZDICT_cover_params_t * parameters)1126*01826a49SYabin Cui ZDICTLIB_STATIC_API size_t ZDICT_optimizeTrainFromBuffer_cover(
1127*01826a49SYabin Cui     void* dictBuffer, size_t dictBufferCapacity, const void* samplesBuffer,
1128*01826a49SYabin Cui     const size_t* samplesSizes, unsigned nbSamples,
1129*01826a49SYabin Cui     ZDICT_cover_params_t* parameters)
1130*01826a49SYabin Cui {
1131*01826a49SYabin Cui   /* constants */
1132*01826a49SYabin Cui   const unsigned nbThreads = parameters->nbThreads;
1133*01826a49SYabin Cui   const double splitPoint =
1134*01826a49SYabin Cui       parameters->splitPoint <= 0.0 ? COVER_DEFAULT_SPLITPOINT : parameters->splitPoint;
1135*01826a49SYabin Cui   const unsigned kMinD = parameters->d == 0 ? 6 : parameters->d;
1136*01826a49SYabin Cui   const unsigned kMaxD = parameters->d == 0 ? 8 : parameters->d;
1137*01826a49SYabin Cui   const unsigned kMinK = parameters->k == 0 ? 50 : parameters->k;
1138*01826a49SYabin Cui   const unsigned kMaxK = parameters->k == 0 ? 2000 : parameters->k;
1139*01826a49SYabin Cui   const unsigned kSteps = parameters->steps == 0 ? 40 : parameters->steps;
1140*01826a49SYabin Cui   const unsigned kStepSize = MAX((kMaxK - kMinK) / kSteps, 1);
1141*01826a49SYabin Cui   const unsigned kIterations =
1142*01826a49SYabin Cui       (1 + (kMaxD - kMinD) / 2) * (1 + (kMaxK - kMinK) / kStepSize);
1143*01826a49SYabin Cui   const unsigned shrinkDict = 0;
1144*01826a49SYabin Cui   /* Local variables */
1145*01826a49SYabin Cui   const int displayLevel = parameters->zParams.notificationLevel;
1146*01826a49SYabin Cui   unsigned iteration = 1;
1147*01826a49SYabin Cui   unsigned d;
1148*01826a49SYabin Cui   unsigned k;
1149*01826a49SYabin Cui   COVER_best_t best;
1150*01826a49SYabin Cui   POOL_ctx *pool = NULL;
1151*01826a49SYabin Cui   int warned = 0;
1152*01826a49SYabin Cui 
1153*01826a49SYabin Cui   /* Checks */
1154*01826a49SYabin Cui   if (splitPoint <= 0 || splitPoint > 1) {
1155*01826a49SYabin Cui     LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
1156*01826a49SYabin Cui     return ERROR(parameter_outOfBound);
1157*01826a49SYabin Cui   }
1158*01826a49SYabin Cui   if (kMinK < kMaxD || kMaxK < kMinK) {
1159*01826a49SYabin Cui     LOCALDISPLAYLEVEL(displayLevel, 1, "Incorrect parameters\n");
1160*01826a49SYabin Cui     return ERROR(parameter_outOfBound);
1161*01826a49SYabin Cui   }
1162*01826a49SYabin Cui   if (nbSamples == 0) {
1163*01826a49SYabin Cui     DISPLAYLEVEL(1, "Cover must have at least one input file\n");
1164*01826a49SYabin Cui     return ERROR(srcSize_wrong);
1165*01826a49SYabin Cui   }
1166*01826a49SYabin Cui   if (dictBufferCapacity < ZDICT_DICTSIZE_MIN) {
1167*01826a49SYabin Cui     DISPLAYLEVEL(1, "dictBufferCapacity must be at least %u\n",
1168*01826a49SYabin Cui                  ZDICT_DICTSIZE_MIN);
1169*01826a49SYabin Cui     return ERROR(dstSize_tooSmall);
1170*01826a49SYabin Cui   }
1171*01826a49SYabin Cui   if (nbThreads > 1) {
1172*01826a49SYabin Cui     pool = POOL_create(nbThreads, 1);
1173*01826a49SYabin Cui     if (!pool) {
1174*01826a49SYabin Cui       return ERROR(memory_allocation);
1175*01826a49SYabin Cui     }
1176*01826a49SYabin Cui   }
1177*01826a49SYabin Cui   /* Initialization */
1178*01826a49SYabin Cui   COVER_best_init(&best);
1179*01826a49SYabin Cui   /* Turn down global display level to clean up display at level 2 and below */
1180*01826a49SYabin Cui   g_displayLevel = displayLevel == 0 ? 0 : displayLevel - 1;
1181*01826a49SYabin Cui   /* Loop through d first because each new value needs a new context */
1182*01826a49SYabin Cui   LOCALDISPLAYLEVEL(displayLevel, 2, "Trying %u different sets of parameters\n",
1183*01826a49SYabin Cui                     kIterations);
1184*01826a49SYabin Cui   for (d = kMinD; d <= kMaxD; d += 2) {
1185*01826a49SYabin Cui     /* Initialize the context for this value of d */
1186*01826a49SYabin Cui     COVER_ctx_t ctx;
1187*01826a49SYabin Cui     LOCALDISPLAYLEVEL(displayLevel, 3, "d=%u\n", d);
1188*01826a49SYabin Cui     {
1189*01826a49SYabin Cui       const size_t initVal = COVER_ctx_init(&ctx, samplesBuffer, samplesSizes, nbSamples, d, splitPoint);
1190*01826a49SYabin Cui       if (ZSTD_isError(initVal)) {
1191*01826a49SYabin Cui         LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to initialize context\n");
1192*01826a49SYabin Cui         COVER_best_destroy(&best);
1193*01826a49SYabin Cui         POOL_free(pool);
1194*01826a49SYabin Cui         return initVal;
1195*01826a49SYabin Cui       }
1196*01826a49SYabin Cui     }
1197*01826a49SYabin Cui     if (!warned) {
1198*01826a49SYabin Cui       COVER_warnOnSmallCorpus(dictBufferCapacity, ctx.suffixSize, displayLevel);
1199*01826a49SYabin Cui       warned = 1;
1200*01826a49SYabin Cui     }
1201*01826a49SYabin Cui     /* Loop through k reusing the same context */
1202*01826a49SYabin Cui     for (k = kMinK; k <= kMaxK; k += kStepSize) {
1203*01826a49SYabin Cui       /* Prepare the arguments */
1204*01826a49SYabin Cui       COVER_tryParameters_data_t *data = (COVER_tryParameters_data_t *)malloc(
1205*01826a49SYabin Cui           sizeof(COVER_tryParameters_data_t));
1206*01826a49SYabin Cui       LOCALDISPLAYLEVEL(displayLevel, 3, "k=%u\n", k);
1207*01826a49SYabin Cui       if (!data) {
1208*01826a49SYabin Cui         LOCALDISPLAYLEVEL(displayLevel, 1, "Failed to allocate parameters\n");
1209*01826a49SYabin Cui         COVER_best_destroy(&best);
1210*01826a49SYabin Cui         COVER_ctx_destroy(&ctx);
1211*01826a49SYabin Cui         POOL_free(pool);
1212*01826a49SYabin Cui         return ERROR(memory_allocation);
1213*01826a49SYabin Cui       }
1214*01826a49SYabin Cui       data->ctx = &ctx;
1215*01826a49SYabin Cui       data->best = &best;
1216*01826a49SYabin Cui       data->dictBufferCapacity = dictBufferCapacity;
1217*01826a49SYabin Cui       data->parameters = *parameters;
1218*01826a49SYabin Cui       data->parameters.k = k;
1219*01826a49SYabin Cui       data->parameters.d = d;
1220*01826a49SYabin Cui       data->parameters.splitPoint = splitPoint;
1221*01826a49SYabin Cui       data->parameters.steps = kSteps;
1222*01826a49SYabin Cui       data->parameters.shrinkDict = shrinkDict;
1223*01826a49SYabin Cui       data->parameters.zParams.notificationLevel = g_displayLevel;
1224*01826a49SYabin Cui       /* Check the parameters */
1225*01826a49SYabin Cui       if (!COVER_checkParameters(data->parameters, dictBufferCapacity)) {
1226*01826a49SYabin Cui         DISPLAYLEVEL(1, "Cover parameters incorrect\n");
1227*01826a49SYabin Cui         free(data);
1228*01826a49SYabin Cui         continue;
1229*01826a49SYabin Cui       }
1230*01826a49SYabin Cui       /* Call the function and pass ownership of data to it */
1231*01826a49SYabin Cui       COVER_best_start(&best);
1232*01826a49SYabin Cui       if (pool) {
1233*01826a49SYabin Cui         POOL_add(pool, &COVER_tryParameters, data);
1234*01826a49SYabin Cui       } else {
1235*01826a49SYabin Cui         COVER_tryParameters(data);
1236*01826a49SYabin Cui       }
1237*01826a49SYabin Cui       /* Print status */
1238*01826a49SYabin Cui       LOCALDISPLAYUPDATE(displayLevel, 2, "\r%u%%       ",
1239*01826a49SYabin Cui                          (unsigned)((iteration * 100) / kIterations));
1240*01826a49SYabin Cui       ++iteration;
1241*01826a49SYabin Cui     }
1242*01826a49SYabin Cui     COVER_best_wait(&best);
1243*01826a49SYabin Cui     COVER_ctx_destroy(&ctx);
1244*01826a49SYabin Cui   }
1245*01826a49SYabin Cui   LOCALDISPLAYLEVEL(displayLevel, 2, "\r%79s\r", "");
1246*01826a49SYabin Cui   /* Fill the output buffer and parameters with output of the best parameters */
1247*01826a49SYabin Cui   {
1248*01826a49SYabin Cui     const size_t dictSize = best.dictSize;
1249*01826a49SYabin Cui     if (ZSTD_isError(best.compressedSize)) {
1250*01826a49SYabin Cui       const size_t compressedSize = best.compressedSize;
1251*01826a49SYabin Cui       COVER_best_destroy(&best);
1252*01826a49SYabin Cui       POOL_free(pool);
1253*01826a49SYabin Cui       return compressedSize;
1254*01826a49SYabin Cui     }
1255*01826a49SYabin Cui     *parameters = best.parameters;
1256*01826a49SYabin Cui     memcpy(dictBuffer, best.dict, dictSize);
1257*01826a49SYabin Cui     COVER_best_destroy(&best);
1258*01826a49SYabin Cui     POOL_free(pool);
1259*01826a49SYabin Cui     return dictSize;
1260*01826a49SYabin Cui   }
1261*01826a49SYabin Cui }
1262