xref: /aosp_15_r20/external/mesa3d/src/util/ralloc.c (revision 6104692788411f58d303aa86923a9ff6ecaded22)
1 /*
2  * Copyright © 2010 Intel Corporation
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice (including the next
12  * paragraph) shall be included in all copies or substantial portions of the
13  * Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
18  * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
21  * DEALINGS IN THE SOFTWARE.
22  */
23 
24 #include <assert.h>
25 #include <stdarg.h>
26 #include <stdint.h>
27 #include <stdio.h>
28 #include <stdlib.h>
29 #include <string.h>
30 
31 #include "util/list.h"
32 #include "util/macros.h"
33 #include "util/u_math.h"
34 #include "util/u_printf.h"
35 
36 #include "ralloc.h"
37 
38 #define CANARY 0x5A1106
39 
40 #if defined(__LP64__) || defined(_WIN64)
41 #define HEADER_ALIGN 16
42 #else
43 #define HEADER_ALIGN 8
44 #endif
45 
46 /* Align the header's size so that ralloc() allocations will return with the
47  * same alignment as a libc malloc would have (8 on 32-bit GLIBC, 16 on
48  * 64-bit), avoiding performance penalities on x86 and alignment faults on
49  * ARM.
50  */
51 struct ralloc_header
52 {
53    alignas(HEADER_ALIGN)
54 
55 #ifndef NDEBUG
56    /* A canary value used to determine whether a pointer is ralloc'd. */
57    unsigned canary;
58    unsigned size;
59 #endif
60 
61    struct ralloc_header *parent;
62 
63    /* The first child (head of a linked list) */
64    struct ralloc_header *child;
65 
66    /* Linked list of siblings */
67    struct ralloc_header *prev;
68    struct ralloc_header *next;
69 
70    void (*destructor)(void *);
71 };
72 
73 typedef struct ralloc_header ralloc_header;
74 
75 static void unlink_block(ralloc_header *info);
76 static void unsafe_free(ralloc_header *info);
77 
78 static ralloc_header *
get_header(const void * ptr)79 get_header(const void *ptr)
80 {
81    ralloc_header *info = (ralloc_header *) (((char *) ptr) -
82 					    sizeof(ralloc_header));
83    assert(info->canary == CANARY);
84    return info;
85 }
86 
87 #define PTR_FROM_HEADER(info) (((char *) info) + sizeof(ralloc_header))
88 
89 static void
add_child(ralloc_header * parent,ralloc_header * info)90 add_child(ralloc_header *parent, ralloc_header *info)
91 {
92    if (parent != NULL) {
93       info->parent = parent;
94       info->next = parent->child;
95       parent->child = info;
96 
97       if (info->next != NULL)
98 	 info->next->prev = info;
99    }
100 }
101 
102 void *
ralloc_context(const void * ctx)103 ralloc_context(const void *ctx)
104 {
105    return ralloc_size(ctx, 0);
106 }
107 
108 void *
ralloc_size(const void * ctx,size_t size)109 ralloc_size(const void *ctx, size_t size)
110 {
111    /* Some malloc allocation doesn't always align to 16 bytes even on 64 bits
112     * system, from Android bionic/tests/malloc_test.cpp:
113     *  - Allocations of a size that rounds up to a multiple of 16 bytes
114     *    must have at least 16 byte alignment.
115     *  - Allocations of a size that rounds up to a multiple of 8 bytes and
116     *    not 16 bytes, are only required to have at least 8 byte alignment.
117     */
118    void *block = malloc(align64(size + sizeof(ralloc_header),
119                                 alignof(ralloc_header)));
120    ralloc_header *info;
121    ralloc_header *parent;
122 
123    if (unlikely(block == NULL))
124       return NULL;
125 
126    info = (ralloc_header *) block;
127    /* measurements have shown that calloc is slower (because of
128     * the multiplication overflow checking?), so clear things
129     * manually
130     */
131    info->parent = NULL;
132    info->child = NULL;
133    info->prev = NULL;
134    info->next = NULL;
135    info->destructor = NULL;
136 
137    parent = ctx != NULL ? get_header(ctx) : NULL;
138 
139    add_child(parent, info);
140 
141 #ifndef NDEBUG
142    info->canary = CANARY;
143    info->size = size;
144 #endif
145 
146    return PTR_FROM_HEADER(info);
147 }
148 
149 void *
rzalloc_size(const void * ctx,size_t size)150 rzalloc_size(const void *ctx, size_t size)
151 {
152    void *ptr = ralloc_size(ctx, size);
153 
154    if (likely(ptr))
155       memset(ptr, 0, size);
156 
157    return ptr;
158 }
159 
160 /* helper function - assumes ptr != NULL */
161 static void *
resize(void * ptr,size_t size)162 resize(void *ptr, size_t size)
163 {
164    ralloc_header *child, *old, *info;
165 
166    old = get_header(ptr);
167    info = realloc(old, align64(size + sizeof(ralloc_header),
168                                alignof(ralloc_header)));
169 
170    if (info == NULL)
171       return NULL;
172 
173    /* Update parent and sibling's links to the reallocated node. */
174    if (info != old && info->parent != NULL) {
175       if (info->parent->child == old)
176 	 info->parent->child = info;
177 
178       if (info->prev != NULL)
179 	 info->prev->next = info;
180 
181       if (info->next != NULL)
182 	 info->next->prev = info;
183    }
184 
185    /* Update child->parent links for all children */
186    for (child = info->child; child != NULL; child = child->next)
187       child->parent = info;
188 
189    return PTR_FROM_HEADER(info);
190 }
191 
192 void *
reralloc_size(const void * ctx,void * ptr,size_t size)193 reralloc_size(const void *ctx, void *ptr, size_t size)
194 {
195    if (unlikely(ptr == NULL))
196       return ralloc_size(ctx, size);
197 
198    assert(ralloc_parent(ptr) == ctx);
199    return resize(ptr, size);
200 }
201 
202 void *
rerzalloc_size(const void * ctx,void * ptr,size_t old_size,size_t new_size)203 rerzalloc_size(const void *ctx, void *ptr, size_t old_size, size_t new_size)
204 {
205    if (unlikely(ptr == NULL))
206       return rzalloc_size(ctx, new_size);
207 
208    assert(ralloc_parent(ptr) == ctx);
209    ptr = resize(ptr, new_size);
210 
211    if (new_size > old_size)
212       memset((char *)ptr + old_size, 0, new_size - old_size);
213 
214    return ptr;
215 }
216 
217 void *
ralloc_array_size(const void * ctx,size_t size,unsigned count)218 ralloc_array_size(const void *ctx, size_t size, unsigned count)
219 {
220    if (count > SIZE_MAX/size)
221       return NULL;
222 
223    return ralloc_size(ctx, size * count);
224 }
225 
226 void *
rzalloc_array_size(const void * ctx,size_t size,unsigned count)227 rzalloc_array_size(const void *ctx, size_t size, unsigned count)
228 {
229    if (count > SIZE_MAX/size)
230       return NULL;
231 
232    return rzalloc_size(ctx, size * count);
233 }
234 
235 void *
reralloc_array_size(const void * ctx,void * ptr,size_t size,unsigned count)236 reralloc_array_size(const void *ctx, void *ptr, size_t size, unsigned count)
237 {
238    if (count > SIZE_MAX/size)
239       return NULL;
240 
241    return reralloc_size(ctx, ptr, size * count);
242 }
243 
244 void *
rerzalloc_array_size(const void * ctx,void * ptr,size_t size,unsigned old_count,unsigned new_count)245 rerzalloc_array_size(const void *ctx, void *ptr, size_t size,
246                      unsigned old_count, unsigned new_count)
247 {
248    if (new_count > SIZE_MAX/size)
249       return NULL;
250 
251    return rerzalloc_size(ctx, ptr, size * old_count, size * new_count);
252 }
253 
254 void
ralloc_free(void * ptr)255 ralloc_free(void *ptr)
256 {
257    ralloc_header *info;
258 
259    if (ptr == NULL)
260       return;
261 
262    info = get_header(ptr);
263    unlink_block(info);
264    unsafe_free(info);
265 }
266 
267 static void
unlink_block(ralloc_header * info)268 unlink_block(ralloc_header *info)
269 {
270    /* Unlink from parent & siblings */
271    if (info->parent != NULL) {
272       if (info->parent->child == info)
273 	 info->parent->child = info->next;
274 
275       if (info->prev != NULL)
276 	 info->prev->next = info->next;
277 
278       if (info->next != NULL)
279 	 info->next->prev = info->prev;
280    }
281    info->parent = NULL;
282    info->prev = NULL;
283    info->next = NULL;
284 }
285 
286 static void
unsafe_free(ralloc_header * info)287 unsafe_free(ralloc_header *info)
288 {
289    /* Recursively free any children...don't waste time unlinking them. */
290    ralloc_header *temp;
291    while (info->child != NULL) {
292       temp = info->child;
293       info->child = temp->next;
294       unsafe_free(temp);
295    }
296 
297    /* Free the block itself.  Call the destructor first, if any. */
298    if (info->destructor != NULL)
299       info->destructor(PTR_FROM_HEADER(info));
300 
301    free(info);
302 }
303 
304 void
ralloc_steal(const void * new_ctx,void * ptr)305 ralloc_steal(const void *new_ctx, void *ptr)
306 {
307    ralloc_header *info, *parent;
308 
309    if (unlikely(ptr == NULL))
310       return;
311 
312    info = get_header(ptr);
313    parent = new_ctx ? get_header(new_ctx) : NULL;
314 
315    unlink_block(info);
316 
317    add_child(parent, info);
318 }
319 
320 void
ralloc_adopt(const void * new_ctx,void * old_ctx)321 ralloc_adopt(const void *new_ctx, void *old_ctx)
322 {
323    ralloc_header *new_info, *old_info, *child;
324 
325    if (unlikely(old_ctx == NULL))
326       return;
327 
328    old_info = get_header(old_ctx);
329    new_info = get_header(new_ctx);
330 
331    /* If there are no children, bail. */
332    if (unlikely(old_info->child == NULL))
333       return;
334 
335    /* Set all the children's parent to new_ctx; get a pointer to the last child. */
336    for (child = old_info->child; child->next != NULL; child = child->next) {
337       child->parent = new_info;
338    }
339    child->parent = new_info;
340 
341    /* Connect the two lists together; parent them to new_ctx; make old_ctx empty. */
342    child->next = new_info->child;
343    if (child->next)
344       child->next->prev = child;
345    new_info->child = old_info->child;
346    old_info->child = NULL;
347 }
348 
349 void *
ralloc_parent(const void * ptr)350 ralloc_parent(const void *ptr)
351 {
352    ralloc_header *info;
353 
354    if (unlikely(ptr == NULL))
355       return NULL;
356 
357    info = get_header(ptr);
358    return info->parent ? PTR_FROM_HEADER(info->parent) : NULL;
359 }
360 
361 void
ralloc_set_destructor(const void * ptr,void (* destructor)(void *))362 ralloc_set_destructor(const void *ptr, void(*destructor)(void *))
363 {
364    ralloc_header *info = get_header(ptr);
365    info->destructor = destructor;
366 }
367 
368 void *
ralloc_memdup(const void * ctx,const void * mem,size_t n)369 ralloc_memdup(const void *ctx, const void *mem, size_t n)
370 {
371    void *ptr = ralloc_size(ctx, n);
372 
373    if (unlikely(ptr == NULL))
374       return NULL;
375 
376    memcpy(ptr, mem, n);
377    return ptr;
378 }
379 
380 char *
ralloc_strdup(const void * ctx,const char * str)381 ralloc_strdup(const void *ctx, const char *str)
382 {
383    size_t n;
384    char *ptr;
385 
386    if (unlikely(str == NULL))
387       return NULL;
388 
389    n = strlen(str);
390    ptr = ralloc_array(ctx, char, n + 1);
391    memcpy(ptr, str, n);
392    ptr[n] = '\0';
393    return ptr;
394 }
395 
396 char *
ralloc_strndup(const void * ctx,const char * str,size_t max)397 ralloc_strndup(const void *ctx, const char *str, size_t max)
398 {
399    size_t n;
400    char *ptr;
401 
402    if (unlikely(str == NULL))
403       return NULL;
404 
405    n = strnlen(str, max);
406    ptr = ralloc_array(ctx, char, n + 1);
407    memcpy(ptr, str, n);
408    ptr[n] = '\0';
409    return ptr;
410 }
411 
412 /* helper routine for strcat/strncat - n is the exact amount to copy */
413 static bool
cat(char ** dest,const char * str,size_t n)414 cat(char **dest, const char *str, size_t n)
415 {
416    char *both;
417    size_t existing_length;
418    assert(dest != NULL && *dest != NULL);
419 
420    existing_length = strlen(*dest);
421    both = resize(*dest, existing_length + n + 1);
422    if (unlikely(both == NULL))
423       return false;
424 
425    memcpy(both + existing_length, str, n);
426    both[existing_length + n] = '\0';
427 
428    *dest = both;
429    return true;
430 }
431 
432 
433 bool
ralloc_strcat(char ** dest,const char * str)434 ralloc_strcat(char **dest, const char *str)
435 {
436    return cat(dest, str, strlen(str));
437 }
438 
439 bool
ralloc_strncat(char ** dest,const char * str,size_t n)440 ralloc_strncat(char **dest, const char *str, size_t n)
441 {
442    return cat(dest, str, strnlen(str, n));
443 }
444 
445 bool
ralloc_str_append(char ** dest,const char * str,size_t existing_length,size_t str_size)446 ralloc_str_append(char **dest, const char *str,
447                   size_t existing_length, size_t str_size)
448 {
449    char *both;
450    assert(dest != NULL && *dest != NULL);
451 
452    both = resize(*dest, existing_length + str_size + 1);
453    if (unlikely(both == NULL))
454       return false;
455 
456    memcpy(both + existing_length, str, str_size);
457    both[existing_length + str_size] = '\0';
458 
459    *dest = both;
460 
461    return true;
462 }
463 
464 char *
ralloc_asprintf(const void * ctx,const char * fmt,...)465 ralloc_asprintf(const void *ctx, const char *fmt, ...)
466 {
467    char *ptr;
468    va_list args;
469    va_start(args, fmt);
470    ptr = ralloc_vasprintf(ctx, fmt, args);
471    va_end(args);
472    return ptr;
473 }
474 
475 char *
ralloc_vasprintf(const void * ctx,const char * fmt,va_list args)476 ralloc_vasprintf(const void *ctx, const char *fmt, va_list args)
477 {
478    size_t size = u_printf_length(fmt, args) + 1;
479 
480    char *ptr = ralloc_size(ctx, size);
481    if (ptr != NULL)
482       vsnprintf(ptr, size, fmt, args);
483 
484    return ptr;
485 }
486 
487 bool
ralloc_asprintf_append(char ** str,const char * fmt,...)488 ralloc_asprintf_append(char **str, const char *fmt, ...)
489 {
490    bool success;
491    va_list args;
492    va_start(args, fmt);
493    success = ralloc_vasprintf_append(str, fmt, args);
494    va_end(args);
495    return success;
496 }
497 
498 bool
ralloc_vasprintf_append(char ** str,const char * fmt,va_list args)499 ralloc_vasprintf_append(char **str, const char *fmt, va_list args)
500 {
501    size_t existing_length;
502    assert(str != NULL);
503    existing_length = *str ? strlen(*str) : 0;
504    return ralloc_vasprintf_rewrite_tail(str, &existing_length, fmt, args);
505 }
506 
507 bool
ralloc_asprintf_rewrite_tail(char ** str,size_t * start,const char * fmt,...)508 ralloc_asprintf_rewrite_tail(char **str, size_t *start, const char *fmt, ...)
509 {
510    bool success;
511    va_list args;
512    va_start(args, fmt);
513    success = ralloc_vasprintf_rewrite_tail(str, start, fmt, args);
514    va_end(args);
515    return success;
516 }
517 
518 bool
ralloc_vasprintf_rewrite_tail(char ** str,size_t * start,const char * fmt,va_list args)519 ralloc_vasprintf_rewrite_tail(char **str, size_t *start, const char *fmt,
520 			      va_list args)
521 {
522    size_t new_length;
523    char *ptr;
524 
525    assert(str != NULL);
526 
527    if (unlikely(*str == NULL)) {
528       // Assuming a NULL context is probably bad, but it's expected behavior.
529       *str = ralloc_vasprintf(NULL, fmt, args);
530       *start = strlen(*str);
531       return true;
532    }
533 
534    new_length = u_printf_length(fmt, args);
535 
536    ptr = resize(*str, *start + new_length + 1);
537    if (unlikely(ptr == NULL))
538       return false;
539 
540    vsnprintf(ptr + *start, new_length + 1, fmt, args);
541    *str = ptr;
542    *start += new_length;
543    return true;
544 }
545 
546 /***************************************************************************
547  * GC context.
548  ***************************************************************************
549  */
550 
551 /* The maximum size of an object that will be allocated specially.
552  */
553 #define MAX_FREELIST_SIZE 512
554 
555 /* Allocations small enough to be allocated from a freelist will be aligned up
556  * to this size.
557  */
558 #define FREELIST_ALIGNMENT 32
559 
560 #define NUM_FREELIST_BUCKETS (MAX_FREELIST_SIZE / FREELIST_ALIGNMENT)
561 
562 /* The size of a slab. */
563 #define SLAB_SIZE (32 * 1024)
564 
565 #define GC_CONTEXT_CANARY 0xAF6B6C83
566 #define GC_CANARY 0xAF6B5B72
567 
568 enum gc_flags {
569    IS_USED = (1 << 0),
570    CURRENT_GENERATION = (1 << 1),
571    IS_PADDING = (1 << 7),
572 };
573 
574 typedef struct
575 {
576 #ifndef NDEBUG
577    /* A canary value used to determine whether a pointer is allocated using gc_alloc. */
578    unsigned canary;
579 #endif
580 
581    uint16_t slab_offset;
582    uint8_t bucket;
583    uint8_t flags;
584 
585    /* The last padding byte must have IS_PADDING set and is used to store the amount of padding. If
586     * there is no padding, the IS_PADDING bit of "flags" is unset and "flags" is checked instead.
587     * Because of this, "flags" must be the last member of this struct.
588     */
589    uint8_t padding[];
590 } gc_block_header;
591 
592 /* This structure is at the start of the slab. Objects inside a slab are
593  * allocated using a freelist backed by a simple linear allocator.
594  */
595 typedef struct gc_slab {
596    alignas(HEADER_ALIGN)
597 
598    gc_ctx *ctx;
599 
600    /* Objects are allocated using either linear or freelist allocation. "next_available" is the
601     * pointer used for linear allocation, while "freelist" is the next free object for freelist
602     * allocation.
603     */
604    char *next_available;
605    gc_block_header *freelist;
606 
607    /* Slabs that handle the same-sized objects. */
608    struct list_head link;
609 
610    /* Free slabs that handle the same-sized objects. */
611    struct list_head free_link;
612 
613    /* Number of allocated and free objects, recorded so that we can free the slab if it
614     * becomes empty or add one to the freelist if it's no longer full.
615     */
616    unsigned num_allocated;
617    unsigned num_free;
618 } gc_slab;
619 
620 struct gc_ctx {
621 #ifndef NDEBUG
622    unsigned canary;
623 #endif
624 
625    /* Array of slabs for fixed-size allocations. Each slab tracks allocations
626     * of specific sized blocks. User allocations are rounded up to the nearest
627     * fixed size. slabs[N] contains allocations of size
628     * FREELIST_ALIGNMENT * (N + 1).
629     */
630    struct {
631       /* List of slabs in this bucket. */
632       struct list_head slabs;
633 
634       /* List of slabs with free space in this bucket, so we can quickly choose one when
635        * allocating.
636        */
637       struct list_head free_slabs;
638    } slabs[NUM_FREELIST_BUCKETS];
639 
640    uint8_t current_gen;
641    void *rubbish;
642 };
643 
644 static gc_block_header *
get_gc_header(const void * ptr)645 get_gc_header(const void *ptr)
646 {
647    uint8_t *c_ptr = (uint8_t *)ptr;
648 
649    /* Adjust for padding added to ensure alignment of the allocation. There might also be padding
650     * added by the compiler into gc_block_header, but that isn't counted in the IS_PADDING byte.
651     */
652    if (c_ptr[-1] & IS_PADDING)
653       c_ptr -= c_ptr[-1] & ~IS_PADDING;
654 
655    c_ptr -= sizeof(gc_block_header);
656 
657    gc_block_header *info = (gc_block_header *)c_ptr;
658    assert(info->canary == GC_CANARY);
659    return info;
660 }
661 
662 static gc_block_header *
get_gc_freelist_next(gc_block_header * ptr)663 get_gc_freelist_next(gc_block_header *ptr)
664 {
665    gc_block_header *next;
666    /* work around possible strict aliasing bug using memcpy */
667    memcpy(&next, (void*)(ptr + 1), sizeof(next));
668    return next;
669 }
670 
671 static void
set_gc_freelist_next(gc_block_header * ptr,gc_block_header * next)672 set_gc_freelist_next(gc_block_header *ptr, gc_block_header *next)
673 {
674    memcpy((void*)(ptr + 1), &next, sizeof(next));
675 }
676 
677 static gc_slab *
get_gc_slab(gc_block_header * header)678 get_gc_slab(gc_block_header *header)
679 {
680    return (gc_slab *)((char *)header - header->slab_offset);
681 }
682 
683 gc_ctx *
gc_context(const void * parent)684 gc_context(const void *parent)
685 {
686    gc_ctx *ctx = rzalloc(parent, gc_ctx);
687    for (unsigned i = 0; i < NUM_FREELIST_BUCKETS; i++) {
688       list_inithead(&ctx->slabs[i].slabs);
689       list_inithead(&ctx->slabs[i].free_slabs);
690    }
691 #ifndef NDEBUG
692    ctx->canary = GC_CONTEXT_CANARY;
693 #endif
694    return ctx;
695 }
696 
697 static_assert(UINT32_MAX >= MAX_FREELIST_SIZE, "Freelist sizes use uint32_t");
698 
699 static uint32_t
gc_bucket_obj_size(uint32_t bucket)700 gc_bucket_obj_size(uint32_t bucket)
701 {
702    return (bucket + 1) * FREELIST_ALIGNMENT;
703 }
704 
705 static uint32_t
gc_bucket_for_size(uint32_t size)706 gc_bucket_for_size(uint32_t size)
707 {
708    return (size - 1) / FREELIST_ALIGNMENT;
709 }
710 
711 static_assert(UINT32_MAX >= SLAB_SIZE, "SLAB_SIZE use uint32_t");
712 
713 static uint32_t
gc_bucket_num_objs(uint32_t bucket)714 gc_bucket_num_objs(uint32_t bucket)
715 {
716    return (SLAB_SIZE - sizeof(gc_slab)) / gc_bucket_obj_size(bucket);
717 }
718 
719 static gc_block_header *
alloc_from_slab(gc_slab * slab,uint32_t bucket)720 alloc_from_slab(gc_slab *slab, uint32_t bucket)
721 {
722    uint32_t size = gc_bucket_obj_size(bucket);
723    gc_block_header *header;
724    if (slab->freelist) {
725       /* Prioritize already-allocated chunks, since they probably have a page
726        * backing them.
727        */
728       header = slab->freelist;
729       slab->freelist = get_gc_freelist_next(slab->freelist);
730    } else if (slab->next_available + size <= ((char *) slab) + SLAB_SIZE) {
731       header = (gc_block_header *) slab->next_available;
732       header->slab_offset = (char *) header - (char *) slab;
733       header->bucket = bucket;
734       slab->next_available += size;
735    } else {
736       return NULL;
737    }
738 
739    slab->num_allocated++;
740    slab->num_free--;
741    if (!slab->num_free)
742       list_del(&slab->free_link);
743    return header;
744 }
745 
746 static void
free_slab(gc_slab * slab)747 free_slab(gc_slab *slab)
748 {
749    if (list_is_linked(&slab->free_link))
750       list_del(&slab->free_link);
751    list_del(&slab->link);
752    ralloc_free(slab);
753 }
754 
755 static void
free_from_slab(gc_block_header * header,bool keep_empty_slabs)756 free_from_slab(gc_block_header *header, bool keep_empty_slabs)
757 {
758    gc_slab *slab = get_gc_slab(header);
759 
760    if (slab->num_allocated == 1 && !(keep_empty_slabs && list_is_singular(&slab->free_link))) {
761       /* Free the slab if this is the last object. */
762       free_slab(slab);
763       return;
764    } else if (slab->num_free == 0) {
765       list_add(&slab->free_link, &slab->ctx->slabs[header->bucket].free_slabs);
766    } else {
767       /* Keep the free list sorted by the number of free objects in ascending order. By prefering to
768        * allocate from the slab with the fewest free objects, we help free the slabs with many free
769        * objects.
770        */
771       while (slab->free_link.next != &slab->ctx->slabs[header->bucket].free_slabs &&
772              slab->num_free > list_entry(slab->free_link.next, gc_slab, free_link)->num_free) {
773          gc_slab *next = list_entry(slab->free_link.next, gc_slab, free_link);
774 
775          /* Move "slab" to after "next". */
776          list_move_to(&slab->free_link, &next->free_link);
777       }
778    }
779 
780    set_gc_freelist_next(header, slab->freelist);
781    slab->freelist = header;
782 
783    slab->num_allocated--;
784    slab->num_free++;
785 }
786 
787 static uint32_t
get_slab_size(uint32_t bucket)788 get_slab_size(uint32_t bucket)
789 {
790    /* SLAB_SIZE rounded down to a multiple of the object size so that it's not larger than what can
791     * be used.
792     */
793    uint32_t obj_size = gc_bucket_obj_size(bucket);
794    uint32_t num_objs = gc_bucket_num_objs(bucket);
795    return align((uint32_t)sizeof(gc_slab) + num_objs * obj_size, alignof(gc_slab));
796 }
797 
798 static gc_slab *
create_slab(gc_ctx * ctx,unsigned bucket)799 create_slab(gc_ctx *ctx, unsigned bucket)
800 {
801    gc_slab *slab = ralloc_size(ctx, get_slab_size(bucket));
802    if (unlikely(!slab))
803       return NULL;
804 
805    slab->ctx = ctx;
806    slab->freelist = NULL;
807    slab->next_available = (char*)(slab + 1);
808    slab->num_allocated = 0;
809    slab->num_free = gc_bucket_num_objs(bucket);
810 
811    list_addtail(&slab->link, &ctx->slabs[bucket].slabs);
812    list_addtail(&slab->free_link, &ctx->slabs[bucket].free_slabs);
813 
814    return slab;
815 }
816 
817 void *
gc_alloc_size(gc_ctx * ctx,size_t size,size_t alignment)818 gc_alloc_size(gc_ctx *ctx, size_t size, size_t alignment)
819 {
820    assert(ctx);
821    assert(util_is_power_of_two_nonzero_uintptr(alignment));
822 
823    alignment = MAX2(alignment, alignof(gc_block_header));
824 
825    /* Alignment will add at most align-alignof(gc_block_header) bytes of padding to the header, and
826     * the IS_PADDING byte can only encode up to 127.
827     */
828    assert((alignment - alignof(gc_block_header)) <= 127);
829 
830    /* We can only align as high as the slab is. */
831    assert(alignment <= HEADER_ALIGN);
832 
833    size_t header_size = align64(sizeof(gc_block_header), alignment);
834    size = align64(size, alignment);
835    size += header_size;
836 
837    gc_block_header *header = NULL;
838    if (size <= MAX_FREELIST_SIZE) {
839       uint32_t bucket = gc_bucket_for_size((uint32_t)size);
840       if (list_is_empty(&ctx->slabs[bucket].free_slabs) && !create_slab(ctx, bucket))
841          return NULL;
842       gc_slab *slab = list_first_entry(&ctx->slabs[bucket].free_slabs, gc_slab, free_link);
843       header = alloc_from_slab(slab, bucket);
844    } else {
845       header = ralloc_size(ctx, size);
846       if (unlikely(!header))
847          return NULL;
848       /* Mark the header as allocated directly, so we know to actually free it. */
849       header->bucket = NUM_FREELIST_BUCKETS;
850    }
851 
852    header->flags = ctx->current_gen | IS_USED;
853 #ifndef NDEBUG
854    header->canary = GC_CANARY;
855 #endif
856 
857    uint8_t *ptr = (uint8_t *)header + header_size;
858    if ((header_size - 1) != offsetof(gc_block_header, flags))
859       ptr[-1] = IS_PADDING | (header_size - sizeof(gc_block_header));
860 
861    assert(((uintptr_t)ptr & (alignment - 1)) == 0);
862    return ptr;
863 }
864 
865 void *
gc_zalloc_size(gc_ctx * ctx,size_t size,size_t alignment)866 gc_zalloc_size(gc_ctx *ctx, size_t size, size_t alignment)
867 {
868    void *ptr = gc_alloc_size(ctx, size, alignment);
869 
870    if (likely(ptr))
871       memset(ptr, 0, size);
872 
873    return ptr;
874 }
875 
876 void
gc_free(void * ptr)877 gc_free(void *ptr)
878 {
879    if (!ptr)
880       return;
881 
882    gc_block_header *header = get_gc_header(ptr);
883    header->flags &= ~IS_USED;
884 
885    if (header->bucket < NUM_FREELIST_BUCKETS)
886       free_from_slab(header, true);
887    else
888       ralloc_free(header);
889 }
890 
gc_get_context(void * ptr)891 gc_ctx *gc_get_context(void *ptr)
892 {
893    gc_block_header *header = get_gc_header(ptr);
894 
895    if (header->bucket < NUM_FREELIST_BUCKETS)
896       return get_gc_slab(header)->ctx;
897    else
898       return ralloc_parent(header);
899 }
900 
901 void
gc_sweep_start(gc_ctx * ctx)902 gc_sweep_start(gc_ctx *ctx)
903 {
904    ctx->current_gen ^= CURRENT_GENERATION;
905 
906    ctx->rubbish = ralloc_context(NULL);
907    ralloc_adopt(ctx->rubbish, ctx);
908 }
909 
910 void
gc_mark_live(gc_ctx * ctx,const void * mem)911 gc_mark_live(gc_ctx *ctx, const void *mem)
912 {
913    gc_block_header *header = get_gc_header(mem);
914    if (header->bucket < NUM_FREELIST_BUCKETS)
915       header->flags ^= CURRENT_GENERATION;
916    else
917       ralloc_steal(ctx, header);
918 }
919 
920 void
gc_sweep_end(gc_ctx * ctx)921 gc_sweep_end(gc_ctx *ctx)
922 {
923    assert(ctx->rubbish);
924 
925    for (unsigned i = 0; i < NUM_FREELIST_BUCKETS; i++) {
926       unsigned obj_size = gc_bucket_obj_size(i);
927       list_for_each_entry_safe(gc_slab, slab, &ctx->slabs[i].slabs, link) {
928          if (!slab->num_allocated) {
929             free_slab(slab);
930             continue;
931          }
932 
933          for (char *ptr = (char*)(slab + 1); ptr != slab->next_available; ptr += obj_size) {
934             gc_block_header *header = (gc_block_header *)ptr;
935             if (!(header->flags & IS_USED))
936                continue;
937             if ((header->flags & CURRENT_GENERATION) == ctx->current_gen)
938                continue;
939 
940             bool last = slab->num_allocated == 1;
941 
942             header->flags &= ~IS_USED;
943             free_from_slab(header, false);
944 
945             if (last)
946                break;
947          }
948       }
949    }
950 
951    for (unsigned i = 0; i < NUM_FREELIST_BUCKETS; i++) {
952       list_for_each_entry(gc_slab, slab, &ctx->slabs[i].slabs, link) {
953          assert(slab->num_allocated > 0); /* free_from_slab() should free it otherwise */
954          ralloc_steal(ctx, slab);
955       }
956    }
957 
958    ralloc_free(ctx->rubbish);
959    ctx->rubbish = NULL;
960 }
961 
962 /***************************************************************************
963  * Linear allocator for short-lived allocations.
964  ***************************************************************************
965  *
966  * The allocator consists of a parent node (2K buffer), which requires
967  * a ralloc parent, and child nodes (allocations). Child nodes can't be freed
968  * directly, because the parent doesn't track them. You have to release
969  * the parent node in order to release all its children.
970  *
971  * The allocator uses a fixed-sized buffer with a monotonically increasing
972  * offset after each allocation. If the buffer is all used, another buffer
973  * is allocated, using the linear parent node as ralloc parent.
974  *
975  * The linear parent node is always the first buffer and keeps track of all
976  * other buffers.
977  */
978 
979 #define SUBALLOC_ALIGNMENT 8
980 #define LMAGIC_CONTEXT 0x87b9c7d3
981 #define LMAGIC_NODE    0x87b910d3
982 
983 struct linear_ctx {
984 
985    alignas(HEADER_ALIGN)
986 
987 #ifndef NDEBUG
988    unsigned magic;   /* for debugging */
989 #endif
990    unsigned min_buffer_size;
991 
992    unsigned offset;  /* points to the first unused byte in the latest buffer */
993    unsigned size;    /* size of the latest buffer */
994    void *latest;     /* the only buffer that has free space */
995 };
996 
997 typedef struct linear_ctx linear_ctx;
998 
999 #ifndef NDEBUG
1000 struct linear_node_canary {
1001    alignas(HEADER_ALIGN)
1002    unsigned magic;
1003    unsigned offset;  /* points to the first unused byte in *this* buffer */
1004 };
1005 
1006 typedef struct linear_node_canary linear_node_canary;
1007 
1008 static linear_node_canary *
get_node_canary(void * ptr)1009 get_node_canary(void *ptr)
1010 {
1011    return (void *)((char *)ptr - sizeof(linear_node_canary));
1012 }
1013 #endif
1014 
1015 static unsigned
get_node_canary_size()1016 get_node_canary_size()
1017 {
1018 #ifndef NDEBUG
1019    return sizeof(linear_node_canary);
1020 #else
1021    return 0;
1022 #endif
1023 }
1024 
1025 void *
linear_alloc_child(linear_ctx * ctx,unsigned size)1026 linear_alloc_child(linear_ctx *ctx, unsigned size)
1027 {
1028    assert(ctx->magic == LMAGIC_CONTEXT);
1029    assert(get_node_canary(ctx->latest)->magic == LMAGIC_NODE);
1030    assert(get_node_canary(ctx->latest)->offset == ctx->offset);
1031 
1032    size = ALIGN_POT(size, SUBALLOC_ALIGNMENT);
1033 
1034    if (unlikely(ctx->offset + size > ctx->size)) {
1035       /* allocate a new node */
1036       unsigned node_size = size;
1037       if (likely(node_size < ctx->min_buffer_size))
1038          node_size = ctx->min_buffer_size;
1039 
1040       const unsigned canary_size = get_node_canary_size();
1041       const unsigned full_size = canary_size + node_size;
1042 
1043       /* linear context is also a ralloc context */
1044       char *ptr = ralloc_size(ctx, full_size);
1045       if (unlikely(!ptr))
1046          return NULL;
1047 
1048 #ifndef NDEBUG
1049       linear_node_canary *canary = (void *) ptr;
1050       canary->magic = LMAGIC_NODE;
1051       canary->offset = 0;
1052 #endif
1053 
1054       /* If the new buffer is going to be full, don't update `latest`
1055        * pointer.  Either the current one is also full, so doesn't
1056        * matter, or the current one is not full, so there's still chance
1057        * to use that space.
1058        */
1059       if (unlikely(size == node_size)) {
1060 #ifndef NDEBUG
1061          canary->offset = size;
1062 #endif
1063          assert((uintptr_t)(ptr + canary_size) % SUBALLOC_ALIGNMENT == 0);
1064          return ptr + canary_size;
1065       }
1066 
1067       ctx->offset = 0;
1068       ctx->size = node_size;
1069       ctx->latest = ptr + canary_size;
1070    }
1071 
1072    void *ptr = (char *)ctx->latest + ctx->offset;
1073    ctx->offset += size;
1074 
1075 #ifndef NDEBUG
1076    linear_node_canary *canary = get_node_canary(ctx->latest);
1077    canary->offset += size;
1078 #endif
1079 
1080    assert((uintptr_t)ptr % SUBALLOC_ALIGNMENT == 0);
1081    return ptr;
1082 }
1083 
1084 linear_ctx *
linear_context(void * ralloc_ctx)1085 linear_context(void *ralloc_ctx)
1086 {
1087    const linear_opts opts = {0};
1088    return linear_context_with_opts(ralloc_ctx, &opts);
1089 }
1090 
1091 linear_ctx *
linear_context_with_opts(void * ralloc_ctx,const linear_opts * opts)1092 linear_context_with_opts(void *ralloc_ctx, const linear_opts *opts)
1093 {
1094    linear_ctx *ctx;
1095 
1096    if (unlikely(!ralloc_ctx))
1097       return NULL;
1098 
1099    const unsigned default_min_buffer_size = 2048;
1100    const unsigned min_buffer_size =
1101       MAX2(ALIGN_POT(opts->min_buffer_size, default_min_buffer_size),
1102            default_min_buffer_size);
1103 
1104    const unsigned size = min_buffer_size;
1105    const unsigned canary_size = get_node_canary_size();
1106    const unsigned full_size =
1107       sizeof(linear_ctx) + canary_size + size;
1108 
1109    ctx = ralloc_size(ralloc_ctx, full_size);
1110    if (unlikely(!ctx))
1111       return NULL;
1112 
1113    ctx->min_buffer_size = min_buffer_size;
1114 
1115    ctx->offset = 0;
1116    ctx->size = size;
1117    ctx->latest = (char *)&ctx[1] + canary_size;
1118 #ifndef NDEBUG
1119    ctx->magic = LMAGIC_CONTEXT;
1120    linear_node_canary *canary = get_node_canary(ctx->latest);
1121    canary->magic = LMAGIC_NODE;
1122    canary->offset = 0;
1123 #endif
1124 
1125    return ctx;
1126 }
1127 
1128 void *
linear_zalloc_child(linear_ctx * ctx,unsigned size)1129 linear_zalloc_child(linear_ctx *ctx, unsigned size)
1130 {
1131    void *ptr = linear_alloc_child(ctx, size);
1132 
1133    if (likely(ptr))
1134       memset(ptr, 0, size);
1135    return ptr;
1136 }
1137 
1138 void
linear_free_context(linear_ctx * ctx)1139 linear_free_context(linear_ctx *ctx)
1140 {
1141    if (unlikely(!ctx))
1142       return;
1143 
1144    assert(ctx->magic == LMAGIC_CONTEXT);
1145 
1146    /* Linear context is also the ralloc parent of extra nodes. */
1147    ralloc_free(ctx);
1148 }
1149 
1150 void
ralloc_steal_linear_context(void * new_ralloc_ctx,linear_ctx * ctx)1151 ralloc_steal_linear_context(void *new_ralloc_ctx, linear_ctx *ctx)
1152 {
1153    if (unlikely(!ctx))
1154       return;
1155 
1156    assert(ctx->magic == LMAGIC_CONTEXT);
1157 
1158    /* Linear context is also the ralloc parent of extra nodes. */
1159    ralloc_steal(new_ralloc_ctx, ctx);
1160 }
1161 
1162 void *
ralloc_parent_of_linear_context(linear_ctx * ctx)1163 ralloc_parent_of_linear_context(linear_ctx *ctx)
1164 {
1165    assert(ctx->magic == LMAGIC_CONTEXT);
1166    return PTR_FROM_HEADER(get_header(ctx)->parent);
1167 }
1168 
1169 /* All code below is pretty much copied from ralloc and only the alloc
1170  * calls are different.
1171  */
1172 
1173 char *
linear_strdup(linear_ctx * ctx,const char * str)1174 linear_strdup(linear_ctx *ctx, const char *str)
1175 {
1176    unsigned n;
1177    char *ptr;
1178 
1179    if (unlikely(!str))
1180       return NULL;
1181 
1182    n = strlen(str);
1183    ptr = linear_alloc_child(ctx, n + 1);
1184    if (unlikely(!ptr))
1185       return NULL;
1186 
1187    memcpy(ptr, str, n);
1188    ptr[n] = '\0';
1189    return ptr;
1190 }
1191 
1192 char *
linear_asprintf(linear_ctx * ctx,const char * fmt,...)1193 linear_asprintf(linear_ctx *ctx, const char *fmt, ...)
1194 {
1195    char *ptr;
1196    va_list args;
1197    va_start(args, fmt);
1198    ptr = linear_vasprintf(ctx, fmt, args);
1199    va_end(args);
1200    return ptr;
1201 }
1202 
1203 char *
linear_vasprintf(linear_ctx * ctx,const char * fmt,va_list args)1204 linear_vasprintf(linear_ctx *ctx, const char *fmt, va_list args)
1205 {
1206    unsigned size = u_printf_length(fmt, args) + 1;
1207 
1208    char *ptr = linear_alloc_child(ctx, size);
1209    if (ptr != NULL)
1210       vsnprintf(ptr, size, fmt, args);
1211 
1212    return ptr;
1213 }
1214 
1215 bool
linear_asprintf_append(linear_ctx * ctx,char ** str,const char * fmt,...)1216 linear_asprintf_append(linear_ctx *ctx, char **str, const char *fmt, ...)
1217 {
1218    bool success;
1219    va_list args;
1220    va_start(args, fmt);
1221    success = linear_vasprintf_append(ctx, str, fmt, args);
1222    va_end(args);
1223    return success;
1224 }
1225 
1226 bool
linear_vasprintf_append(linear_ctx * ctx,char ** str,const char * fmt,va_list args)1227 linear_vasprintf_append(linear_ctx *ctx, char **str, const char *fmt, va_list args)
1228 {
1229    size_t existing_length;
1230    assert(str != NULL);
1231    existing_length = *str ? strlen(*str) : 0;
1232    return linear_vasprintf_rewrite_tail(ctx, str, &existing_length, fmt, args);
1233 }
1234 
1235 bool
linear_asprintf_rewrite_tail(linear_ctx * ctx,char ** str,size_t * start,const char * fmt,...)1236 linear_asprintf_rewrite_tail(linear_ctx *ctx, char **str, size_t *start,
1237                              const char *fmt, ...)
1238 {
1239    bool success;
1240    va_list args;
1241    va_start(args, fmt);
1242    success = linear_vasprintf_rewrite_tail(ctx, str, start, fmt, args);
1243    va_end(args);
1244    return success;
1245 }
1246 
1247 bool
linear_vasprintf_rewrite_tail(linear_ctx * ctx,char ** str,size_t * start,const char * fmt,va_list args)1248 linear_vasprintf_rewrite_tail(linear_ctx *ctx, char **str, size_t *start,
1249                               const char *fmt, va_list args)
1250 {
1251    size_t new_length;
1252    char *ptr;
1253 
1254    assert(str != NULL);
1255 
1256    if (unlikely(*str == NULL)) {
1257       *str = linear_vasprintf(ctx, fmt, args);
1258       *start = strlen(*str);
1259       return true;
1260    }
1261 
1262    new_length = u_printf_length(fmt, args);
1263 
1264    ptr = linear_alloc_child(ctx, *start + new_length + 1);
1265    if (unlikely(ptr == NULL))
1266       return false;
1267 
1268    memcpy(ptr, *str, *start);
1269 
1270    vsnprintf(ptr + *start, new_length + 1, fmt, args);
1271    *str = ptr;
1272    *start += new_length;
1273    return true;
1274 }
1275 
1276 /* helper routine for strcat/strncat - n is the exact amount to copy */
1277 static bool
linear_cat(linear_ctx * ctx,char ** dest,const char * str,unsigned n)1278 linear_cat(linear_ctx *ctx, char **dest, const char *str, unsigned n)
1279 {
1280    char *both;
1281    unsigned existing_length;
1282    assert(dest != NULL && *dest != NULL);
1283 
1284    existing_length = strlen(*dest);
1285    both = linear_alloc_child(ctx, existing_length + n + 1);
1286    if (unlikely(both == NULL))
1287       return false;
1288 
1289    memcpy(both, *dest, existing_length);
1290    memcpy(both + existing_length, str, n);
1291    both[existing_length + n] = '\0';
1292 
1293    *dest = both;
1294    return true;
1295 }
1296 
1297 bool
linear_strcat(linear_ctx * ctx,char ** dest,const char * str)1298 linear_strcat(linear_ctx *ctx, char **dest, const char *str)
1299 {
1300    return linear_cat(ctx, dest, str, strlen(str));
1301 }
1302 
1303 void *
linear_alloc_child_array(linear_ctx * ctx,size_t size,unsigned count)1304 linear_alloc_child_array(linear_ctx *ctx, size_t size, unsigned count)
1305 {
1306    if (count > SIZE_MAX/size)
1307       return NULL;
1308 
1309    return linear_alloc_child(ctx, size * count);
1310 }
1311 
1312 void *
linear_zalloc_child_array(linear_ctx * ctx,size_t size,unsigned count)1313 linear_zalloc_child_array(linear_ctx *ctx, size_t size, unsigned count)
1314 {
1315    if (count > SIZE_MAX/size)
1316       return NULL;
1317 
1318    return linear_zalloc_child(ctx, size * count);
1319 }
1320 
1321 typedef struct {
1322    FILE *f;
1323    unsigned indent;
1324 
1325    unsigned ralloc_count;
1326    unsigned linear_count;
1327    unsigned gc_count;
1328 
1329    /* These don't include padding or metadata from suballocators. */
1330    unsigned content_bytes;
1331    unsigned ralloc_metadata_bytes;
1332    unsigned linear_metadata_bytes;
1333    unsigned gc_metadata_bytes;
1334 
1335    bool inside_linear;
1336    bool inside_gc;
1337 } ralloc_print_info_state;
1338 
1339 static void
ralloc_print_info_helper(ralloc_print_info_state * state,const ralloc_header * info)1340 ralloc_print_info_helper(ralloc_print_info_state *state, const ralloc_header *info)
1341 {
1342    FILE *f = state->f;
1343 
1344    if (f) {
1345       for (unsigned i = 0; i < state->indent; i++) fputc(' ', f);
1346       fprintf(f, "%p", info);
1347    }
1348 
1349    /* TODO: Account for padding used in various places. */
1350 
1351 #ifndef NDEBUG
1352    assert(info->canary == CANARY);
1353    if (f) fprintf(f, " (%d bytes)", info->size);
1354    state->content_bytes += info->size;
1355    state->ralloc_metadata_bytes += sizeof(ralloc_header);
1356 
1357    const void *ptr = PTR_FROM_HEADER(info);
1358    const linear_ctx *lin_ctx = ptr;
1359    const gc_ctx *gc_ctx = ptr;
1360 
1361    if (lin_ctx->magic == LMAGIC_CONTEXT) {
1362       if (f) fprintf(f, " (linear context)");
1363       assert(!state->inside_gc && !state->inside_linear);
1364       state->inside_linear = true;
1365       state->linear_metadata_bytes += sizeof(linear_ctx);
1366       state->content_bytes -= sizeof(linear_ctx);
1367       state->linear_count++;
1368    } else if (gc_ctx->canary == GC_CONTEXT_CANARY) {
1369       if (f) fprintf(f, " (gc context)");
1370       assert(!state->inside_gc && !state->inside_linear);
1371       state->inside_gc = true;
1372       state->gc_metadata_bytes += sizeof(gc_block_header);
1373    } else if (state->inside_linear) {
1374       const linear_node_canary *lin_node = ptr;
1375       if (lin_node->magic == LMAGIC_NODE) {
1376          if (f) fprintf(f, " (linear node buffer)");
1377          state->content_bytes -= sizeof(linear_node_canary);
1378          state->linear_metadata_bytes += sizeof(linear_node_canary);
1379          state->linear_count++;
1380       }
1381    } else if (state->inside_gc) {
1382       if (f) fprintf(f, " (gc slab or large block)");
1383       state->gc_count++;
1384    }
1385 #endif
1386 
1387    state->ralloc_count++;
1388    if (f) fprintf(f, "\n");
1389 
1390    const ralloc_header *c = info->child;
1391    state->indent += 2;
1392    while (c != NULL) {
1393       ralloc_print_info_helper(state, c);
1394       c = c->next;
1395    }
1396    state->indent -= 2;
1397 
1398 #ifndef NDEBUG
1399    if (lin_ctx->magic == LMAGIC_CONTEXT) state->inside_linear = false;
1400    else if (gc_ctx->canary == GC_CONTEXT_CANARY) state->inside_gc = false;
1401 #endif
1402 }
1403 
1404 void
ralloc_print_info(FILE * f,const void * p,unsigned flags)1405 ralloc_print_info(FILE *f, const void *p, unsigned flags)
1406 {
1407    ralloc_print_info_state state = {
1408       .f =  ((flags & RALLOC_PRINT_INFO_SUMMARY_ONLY) == 1) ? NULL : f,
1409    };
1410 
1411    const ralloc_header *info = get_header(p);
1412    ralloc_print_info_helper(&state, info);
1413 
1414    fprintf(f, "==== RALLOC INFO ptr=%p info=%p\n"
1415               "ralloc allocations    = %d\n"
1416               "  - linear            = %d\n"
1417               "  - gc                = %d\n"
1418               "  - other             = %d\n",
1419               p, info,
1420               state.ralloc_count,
1421               state.linear_count,
1422               state.gc_count,
1423               state.ralloc_count - state.linear_count - state.gc_count);
1424 
1425    if (state.content_bytes) {
1426       fprintf(f,
1427               "content bytes         = %d\n"
1428               "ralloc metadata bytes = %d\n"
1429               "linear metadata bytes = %d\n",
1430               state.content_bytes,
1431               state.ralloc_metadata_bytes,
1432               state.linear_metadata_bytes);
1433    }
1434 
1435    fprintf(f, "====\n");
1436 }
1437 
1438