1 /*
2  * Copyright (c) 2015 Google, Inc. All rights reserved
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining
5  * a copy of this software and associated documentation files
6  * (the "Software"), to deal in the Software without restriction,
7  * including without limitation the rights to use, copy, modify, merge,
8  * publish, distribute, sublicense, and/or sell copies of the Software,
9  * and to permit persons to whom the Software is furnished to do so,
10  * subject to the following conditions:
11  *
12  * The above copyright notice and this permission notice shall be
13  * included in all copies or substantial portions of the Software.
14  *
15  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
16  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
17  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
18  * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
19  * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
20  * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
21  * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
22  */
23 #include <debug.h>
24 #include <trace.h>
25 #include <assert.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <string.h>
29 #include <kernel/thread.h>
30 #include <kernel/mutex.h>
31 #include <kernel/spinlock.h>
32 #include <lib/cmpctmalloc.h>
33 #include <lib/heap.h>
34 #include <lib/page_alloc.h>
35 
36 // Malloc implementation tuned for space.
37 //
38 // Allocation strategy takes place with a global mutex.  Freelist entries are
39 // kept in linked lists with 8 different sizes per binary order of magnitude
40 // and the header size is two words with eager coalescing on free.
41 
42 #ifdef DEBUG
43 #define CMPCT_DEBUG
44 #endif
45 
46 #define LOCAL_TRACE 0
47 
48 #define ALLOC_FILL 0x99
49 #define FREE_FILL 0x77
50 #define PADDING_FILL 0x55
51 
52 #if WITH_KERNEL_VM && !defined(HEAP_GROW_SIZE)
53 #define HEAP_GROW_SIZE (1 * 1024 * 1024) /* Grow aggressively */
54 #elif !defined(HEAP_GROW_SIZE)
55 #define HEAP_GROW_SIZE (4 * 1024) /* Grow less aggressively */
56 #endif
57 
58 STATIC_ASSERT(IS_PAGE_ALIGNED(HEAP_GROW_SIZE));
59 
60 // Individual allocations above 4Mbytes are just fetched directly from the
61 // block allocator.
62 #define HEAP_ALLOC_VIRTUAL_BITS 22
63 
64 // When we grow the heap we have to have somewhere in the freelist to put the
65 // resulting freelist entry, so the freelist has to have a certain number of
66 // buckets.
67 STATIC_ASSERT(HEAP_GROW_SIZE <= (1u << HEAP_ALLOC_VIRTUAL_BITS));
68 
69 // Buckets for allocations.  The smallest 15 buckets are 8, 16, 24, etc. up to
70 // 120 bytes.  After that we round up to the nearest size that can be written
71 // /^0*1...0*$/, giving 8 buckets per order of binary magnitude.  The freelist
72 // entries in a given bucket have at least the given size, plus the header
73 // size.  On 64 bit, the 8 byte bucket is useless, since the freelist header
74 // is 16 bytes larger than the header, but we have it for simplicity.
75 #define NUMBER_OF_BUCKETS (1 + 15 + (HEAP_ALLOC_VIRTUAL_BITS - 7) * 8)
76 
77 // All individual memory areas on the heap start with this.
78 typedef struct header_struct {
79     struct header_struct *left;  // Pointer to the previous area in memory order.
80     size_t size;
81 } header_t;
82 
83 typedef struct free_struct {
84     header_t header;
85     struct free_struct *next;
86     struct free_struct *prev;
87 } free_t;
88 
89 struct heap {
90     size_t size;
91     size_t remaining;
92     mutex_t lock;
93     free_t *free_lists[NUMBER_OF_BUCKETS];
94     // We have some 32 bit words that tell us whether there is an entry in the
95     // freelist.
96 #define BUCKET_WORDS (((NUMBER_OF_BUCKETS) + 31) >> 5)
97     uint32_t free_list_bits[BUCKET_WORDS];
98 };
99 
100 // Heap static vars.
101 static struct heap theheap;
102 
103 static ssize_t heap_grow(size_t len, free_t **bucket);
104 
lock(void)105 static void lock(void)
106 {
107     mutex_acquire(&theheap.lock);
108 }
109 
unlock(void)110 static void unlock(void)
111 {
112     mutex_release(&theheap.lock);
113 }
114 
dump_free(header_t * header)115 static void dump_free(header_t *header)
116 {
117     dprintf(INFO, "\t\tbase %p, end 0x%lx, len 0x%zx\n", header, (vaddr_t)header + header->size, header->size);
118 }
119 
cmpct_dump(void)120 void cmpct_dump(void)
121 {
122     lock();
123     dprintf(INFO, "Heap dump (using cmpctmalloc):\n");
124     dprintf(INFO, "\tsize %lu, remaining %lu\n",
125             (unsigned long)theheap.size,
126             (unsigned long)theheap.remaining);
127 
128     dprintf(INFO, "\tfree list:\n");
129     for (int i = 0; i < NUMBER_OF_BUCKETS; i++) {
130         bool header_printed = false;
131         free_t *free_area = theheap.free_lists[i];
132         for (; free_area != NULL; free_area = free_area->next) {
133             ASSERT(free_area != free_area->next);
134             if (!header_printed) {
135                 dprintf(INFO, "\tbucket %d\n", i);
136                 header_printed = true;
137             }
138             dump_free(&free_area->header);
139         }
140     }
141     unlock();
142 }
143 
144 // Operates in sizes that don't include the allocation header.
size_to_index_helper(size_t size,size_t * rounded_up_out,int adjust,int increment)145 static int size_to_index_helper(
146     size_t size, size_t *rounded_up_out, int adjust, int increment)
147 {
148     // First buckets are simply 8-spaced up to 128.
149     if (size <= 128) {
150         if (sizeof(size_t) == 8u && size <= sizeof(free_t) - sizeof(header_t)) {
151             *rounded_up_out = sizeof(free_t) - sizeof(header_t);
152         } else {
153             *rounded_up_out = size;
154         }
155         // No allocation is smaller than 8 bytes, so the first bucket is for 8
156         // byte spaces (not including the header).  For 64 bit, the free list
157         // struct is 16 bytes larger than the header, so no allocation can be
158         // smaller than that (otherwise how to free it), but we have empty 8
159         // and 16 byte buckets for simplicity.
160         return (size >> 3) - 1;
161     }
162 
163     // We are going to go up to the next size to round up, but if we hit a
164     // bucket size exactly we don't want to go up. By subtracting 8 here, we
165     // will do the right thing (the carry propagates up for the round numbers
166     // we are interested in).
167     size += adjust;
168     // After 128 the buckets are logarithmically spaced, every 16 up to 256,
169     // every 32 up to 512 etc.  This can be thought of as rows of 8 buckets.
170     // GCC intrinsic count-leading-zeros.
171     // Eg. 128-255 has 24 leading zeros and we want row to be 4.
172     unsigned row = sizeof(size_t) * 8 - 4 - __builtin_clzl(size);
173     // For row 4 we want to shift down 4 bits.
174     unsigned column = (size >> row) & 7;
175     int row_column = (row << 3) | column;
176     row_column += increment;
177     size = (8 + (row_column & 7)) << (row_column >> 3);
178     *rounded_up_out = size;
179     // We start with 15 buckets, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96,
180     // 104, 112, 120.  Then we have row 4, sizes 128 and up, with the
181     // row-column 8 and up.
182     int answer = row_column + 15 - 32;
183     DEBUG_ASSERT(answer < NUMBER_OF_BUCKETS);
184     return answer;
185 }
186 
187 // Round up size to next bucket when allocating.
size_to_index_allocating(size_t size,size_t * rounded_up_out)188 static int size_to_index_allocating(size_t size, size_t *rounded_up_out)
189 {
190     size_t rounded = round_up(size, 8);
191     return size_to_index_helper(rounded, rounded_up_out, -8, 1);
192 }
193 
194 // Round down size to next bucket when freeing.
size_to_index_freeing(size_t size)195 static int size_to_index_freeing(size_t size)
196 {
197     size_t unused;
198     return size_to_index_helper(size, &unused, 0, 0);
199 }
200 
tag_as_free(void * left)201 inline header_t *tag_as_free(void *left)
202 {
203     return (header_t *)((uintptr_t)left | 1);
204 }
205 
is_tagged_as_free(header_t * header)206 inline bool is_tagged_as_free(header_t *header)
207 {
208     return ((uintptr_t)(header->left) & 1) != 0;
209 }
210 
untag(void * left)211 inline header_t *untag(void *left)
212 {
213     return (header_t *)((uintptr_t)left & ~1);
214 }
215 
right_header(header_t * header)216 inline header_t *right_header(header_t *header)
217 {
218     return (header_t *)((char *)header + header->size);
219 }
220 
set_free_list_bit(int index)221 inline static void set_free_list_bit(int index)
222 {
223     theheap.free_list_bits[index >> 5] |= (1u << (31 - (index & 0x1f)));
224 }
225 
clear_free_list_bit(int index)226 inline static void clear_free_list_bit(int index)
227 {
228     theheap.free_list_bits[index >> 5] &= ~(1u << (31 - (index & 0x1f)));
229 }
230 
find_nonempty_bucket(int index)231 static int find_nonempty_bucket(int index)
232 {
233     uint32_t mask = (1u << (31 - (index & 0x1f))) - 1;
234     mask = mask * 2 + 1;
235     mask &= theheap.free_list_bits[index >> 5];
236     if (mask != 0) return (index & ~0x1f) + __builtin_clz(mask);
237     for (index = round_up(index + 1, 32); index <= NUMBER_OF_BUCKETS; index += 32) {
238         mask = theheap.free_list_bits[index >> 5];
239         if (mask != 0u) return index + __builtin_clz(mask);
240     }
241     return -1;
242 }
243 
is_start_of_os_allocation(header_t * header)244 static bool is_start_of_os_allocation(header_t *header)
245 {
246     return header->left == untag(NULL);
247 }
248 
create_free_area(void * address,void * left,size_t size,free_t ** bucket)249 static void create_free_area(void *address, void *left, size_t size, free_t **bucket)
250 {
251     free_t *free_area = (free_t *)address;
252     free_area->header.size = size;
253     free_area->header.left = tag_as_free(left);
254     if (bucket == NULL) {
255         int index = size_to_index_freeing(size - sizeof(header_t));
256         set_free_list_bit(index);
257         bucket = &theheap.free_lists[index];
258     }
259     free_t *old_head = *bucket;
260     if (old_head != NULL) old_head->prev = free_area;
261     free_area->next = old_head;
262     free_area->prev = NULL;
263     *bucket = free_area;
264     theheap.remaining += size;
265 #ifdef CMPCT_DEBUG
266     memset(free_area + 1, FREE_FILL, size - sizeof(free_t));
267 #endif
268 }
269 
is_end_of_os_allocation(char * address)270 static bool is_end_of_os_allocation(char *address)
271 {
272     return ((header_t *)address)->size == 0;
273 }
274 
free_to_os(header_t * header,size_t size)275 static void free_to_os(header_t *header, size_t size)
276 {
277     DEBUG_ASSERT(IS_PAGE_ALIGNED(size));
278     page_free(header, size >> PAGE_SIZE_SHIFT);
279     theheap.size -= size;
280 }
281 
free_memory(void * address,void * left,size_t size)282 static void free_memory(void *address, void *left, size_t size)
283 {
284     left = untag(left);
285     if (IS_PAGE_ALIGNED(left) &&
286             is_start_of_os_allocation(left) &&
287             is_end_of_os_allocation((char *)address + size)) {
288         free_to_os(left, size + ((header_t *)left)->size + sizeof(header_t));
289     } else {
290         create_free_area(address, left, size, NULL);
291     }
292 }
293 
unlink_free(free_t * free_area,int bucket)294 static void unlink_free(free_t *free_area, int bucket)
295 {
296     theheap.remaining -= free_area->header.size;
297     ASSERT(theheap.remaining < 4000000000u);
298     free_t *next = free_area->next;
299     free_t *prev = free_area->prev;
300     if (theheap.free_lists[bucket] == free_area) {
301         theheap.free_lists[bucket] = next;
302         if (next == NULL) clear_free_list_bit(bucket);
303     }
304     if (prev != NULL) prev->next = next;
305     if (next != NULL) next->prev = prev;
306 }
307 
unlink_free_unknown_bucket(free_t * free_area)308 static void unlink_free_unknown_bucket(free_t *free_area)
309 {
310     unlink_free(free_area, size_to_index_freeing(free_area->header.size - sizeof(header_t)));
311 }
312 
create_allocation_header(void * address,size_t offset,size_t size,void * left)313 static void *create_allocation_header(
314     void *address, size_t offset, size_t size, void *left)
315 {
316     header_t *standalone = (header_t *)((char *)address + offset);
317     standalone->left = untag(left);
318     standalone->size = size;
319     return standalone + 1;
320 }
321 
FixLeftPointer(header_t * right,header_t * new_left)322 static void FixLeftPointer(header_t *right, header_t *new_left)
323 {
324     int tag = (uintptr_t)right->left & 1;
325     right->left = (header_t *)(((uintptr_t)new_left & ~1) | tag);
326 }
327 
WasteFreeMemory(void)328 static void WasteFreeMemory(void)
329 {
330     while (theheap.remaining != 0) cmpct_alloc(1);
331 }
332 
333 // If we just make a big allocation it gets rounded off.  If we actually
334 // want to use a reasonably accurate amount of memory for test purposes, we
335 // have to do many small allocations.
TestTrimHelper(ssize_t target)336 static void *TestTrimHelper(ssize_t target)
337 {
338     char *answer = NULL;
339     size_t remaining = theheap.remaining;
340     while (theheap.remaining - target > 512) {
341         char *next_block = cmpct_alloc(8 + ((theheap.remaining - target) >> 2));
342         *(char **)next_block = answer;
343         answer = next_block;
344         if (theheap.remaining > remaining) return answer;
345         // Abandon attempt to hit particular freelist entry size if we accidentally got more memory
346         // from the OS.
347         remaining = theheap.remaining;
348     }
349     return answer;
350 }
351 
TestTrimFreeHelper(char * block)352 static void TestTrimFreeHelper(char *block)
353 {
354     while (block) {
355         char *next_block = *(char **)block;
356         cmpct_free(block);
357         block = next_block;
358     }
359 }
360 
cmpct_test_trim(void)361 static void cmpct_test_trim(void)
362 {
363     WasteFreeMemory();
364 
365     size_t test_sizes[200];
366     int sizes = 0;
367 
368     for (size_t s = 1; s < PAGE_SIZE * 4; s = (s + 1) * 1.1) {
369         test_sizes[sizes++] = s;
370         ASSERT(sizes < 200);
371     }
372     for (ssize_t s = -32; s <= 32; s += 8) {
373         test_sizes[sizes++] = PAGE_SIZE + s;
374         ASSERT(sizes < 200);
375     }
376 
377     // Test allocations at the start of an OS allocation.
378     for (int with_second_alloc = 0; with_second_alloc < 2; with_second_alloc++) {
379         for (int i = 0; i < sizes; i++) {
380             size_t s = test_sizes[i];
381 
382             char *a, *a2 = NULL;
383             a = cmpct_alloc(s);
384             if (with_second_alloc) {
385                 a2 = cmpct_alloc(1);
386                 if (s < PAGE_SIZE >> 1) {
387                     // It is the intention of the test that a is at the start of an OS allocation
388                     // and that a2 is "right after" it.  Otherwise we are not testing what I
389                     // thought.  OS allocations are certainly not smaller than a page, so check in
390                     // that case.
391                     ASSERT((uintptr_t)(a2 - a) < s * 1.13 + 48);
392                 }
393             }
394             cmpct_trim();
395             size_t remaining = theheap.remaining;
396             // We should have < 1 page on either side of the a allocation.
397             ASSERT(remaining < PAGE_SIZE * 2);
398             cmpct_free(a);
399             if (with_second_alloc) {
400                 // Now only a2 is holding onto the OS allocation.
401                 ASSERT(theheap.remaining > remaining);
402             } else {
403                 ASSERT(theheap.remaining == 0);
404             }
405             remaining = theheap.remaining;
406             cmpct_trim();
407             ASSERT(theheap.remaining <= remaining);
408             // If a was at least one page then the trim should have freed up that page.
409             if (s >= PAGE_SIZE && with_second_alloc) ASSERT(theheap.remaining < remaining);
410             if (with_second_alloc) cmpct_free(a2);
411         }
412         ASSERT(theheap.remaining == 0);
413     }
414 
415     ASSERT(theheap.remaining == 0);
416 
417     // Now test allocations near the end of an OS allocation.
418     for (ssize_t wobble = -64; wobble <= 64; wobble += 8) {
419         for (int i = 0; i < sizes; i++) {
420             size_t s = test_sizes[i];
421 
422             if ((ssize_t)s + wobble < 0) continue;
423 
424             char *start_of_os_alloc = cmpct_alloc(1);
425 
426             // If the OS allocations are very small this test does not make sense.
427             if (theheap.remaining <= s + wobble) {
428                 cmpct_free(start_of_os_alloc);
429                 continue;
430             }
431 
432             char *big_bit_in_the_middle = TestTrimHelper(s + wobble);
433             size_t remaining = theheap.remaining;
434 
435             // If the remaining is big we started a new OS allocation and the test
436             // makes no sense.
437             if (remaining > 128 + s * 1.13 + wobble) {
438                 cmpct_free(start_of_os_alloc);
439                 TestTrimFreeHelper(big_bit_in_the_middle);
440                 continue;
441             }
442 
443             cmpct_free(start_of_os_alloc);
444             remaining = theheap.remaining;
445 
446             // This trim should sometimes trim a page off the end of the OS allocation.
447             cmpct_trim();
448             ASSERT(theheap.remaining <= remaining);
449             remaining = theheap.remaining;
450 
451             // We should have < 1 page on either side of the big allocation.
452             ASSERT(remaining < PAGE_SIZE * 2);
453 
454             TestTrimFreeHelper(big_bit_in_the_middle);
455         }
456     }
457 }
458 
459 
cmpct_test_buckets(void)460 static void cmpct_test_buckets(void)
461 {
462     size_t rounded;
463     unsigned bucket;
464     // Check for the 8-spaced buckets up to 128.
465     for (unsigned i = 1; i <= 128; i++) {
466         // Round up when allocating.
467         bucket = size_to_index_allocating(i, &rounded);
468         unsigned expected = (round_up(i, 8) >> 3) - 1;
469         ASSERT(bucket == expected);
470         ASSERT(IS_ALIGNED(rounded, 8));
471         ASSERT(rounded >= i);
472         if (i >= sizeof(free_t) - sizeof(header_t)) {
473             // Once we get above the size of the free area struct (4 words), we
474             // won't round up much for these small size.
475             ASSERT(rounded - i < 8);
476         }
477         // Only rounded sizes are freed.
478         if ((i & 7) == 0) {
479             // Up to size 128 we have exact buckets for each multiple of 8.
480             ASSERT(bucket == (unsigned)size_to_index_freeing(i));
481         }
482     }
483     int bucket_base = 7;
484     for (unsigned j = 16; j < 1024; j *= 2, bucket_base += 8) {
485         // Note the "<=", which ensures that we test the powers of 2 twice to ensure
486         // that both ways of calculating the bucket number match.
487         for (unsigned i = j * 8; i <= j * 16; i++) {
488             // Round up to j multiple in this range when allocating.
489             bucket = size_to_index_allocating(i, &rounded);
490             unsigned expected = bucket_base + round_up(i, j) / j;
491             ASSERT(bucket == expected);
492             ASSERT(IS_ALIGNED(rounded, j));
493             ASSERT(rounded >= i);
494             ASSERT(rounded - i < j);
495             // Only 8-rounded sizes are freed or chopped off the end of a free area
496             // when allocating.
497             if ((i & 7) == 0) {
498                 // When freeing, if we don't hit the size of the bucket precisely,
499                 // we have to put the free space into a smaller bucket, because
500                 // the buckets have entries that will always be big enough for
501                 // the corresponding allocation size (so we don't have to
502                 // traverse the free chains to find a big enough one).
503                 if ((i % j) == 0) {
504                     ASSERT((int)bucket == size_to_index_freeing(i));
505                 } else {
506                     ASSERT((int)bucket - 1 == size_to_index_freeing(i));
507                 }
508             }
509         }
510     }
511 }
512 
cmpct_test_get_back_newly_freed_helper(size_t size)513 static void cmpct_test_get_back_newly_freed_helper(size_t size)
514 {
515     void *allocated = cmpct_alloc(size);
516     if (allocated == NULL) return;
517     char *allocated2 = cmpct_alloc(8);
518     char *expected_position = (char *)allocated + size;
519     if (allocated2 < expected_position || allocated2 > expected_position + 128) {
520         // If the allocated2 allocation is not in the same OS allocation as the
521         // first allocation then the test may not work as expected (the memory
522         // may be returned to the OS when we free the first allocation, and we
523         // might not get it back).
524         cmpct_free(allocated);
525         cmpct_free(allocated2);
526         return;
527     }
528 
529     cmpct_free(allocated);
530     void *allocated3 = cmpct_alloc(size);
531     // To avoid churn and fragmentation we would want to get the newly freed
532     // memory back again when we allocate the same size shortly after.
533     ASSERT(allocated3 == allocated);
534     cmpct_free(allocated2);
535     cmpct_free(allocated3);
536 }
537 
cmpct_test_get_back_newly_freed(void)538 static void cmpct_test_get_back_newly_freed(void)
539 {
540     size_t increment = 16;
541     for (size_t i = 128; i <= 0x8000000; i *= 2, increment *= 2) {
542         for (size_t j = i; j < i * 2; j += increment) {
543             cmpct_test_get_back_newly_freed_helper(i - 8);
544             cmpct_test_get_back_newly_freed_helper(i);
545             cmpct_test_get_back_newly_freed_helper(i + 1);
546         }
547     }
548     for (size_t i = 1024; i <= 2048; i++) {
549         cmpct_test_get_back_newly_freed_helper(i);
550     }
551 }
552 
cmpct_test_return_to_os(void)553 static void cmpct_test_return_to_os(void)
554 {
555     cmpct_trim();
556     size_t remaining = theheap.remaining;
557     // This goes in a new OS allocation since the trim above removed any free
558     // area big enough to contain it.
559     void *a = cmpct_alloc(5000);
560     void *b = cmpct_alloc(2500);
561     cmpct_free(a);
562     cmpct_free(b);
563     // If things work as expected the new allocation is at the start of an OS
564     // allocation.  There's just one sentinel and one header to the left of it.
565     // It that's not the case then the allocation was met from some space in
566     // the middle of an OS allocation, and our test won't work as expected, so
567     // bail out.
568     if (((uintptr_t)a & (PAGE_SIZE - 1)) != sizeof(header_t) * 2) return;
569     // No trim needed when the entire OS allocation is free.
570     ASSERT(remaining == theheap.remaining);
571 }
572 
cmpct_test(void)573 void cmpct_test(void)
574 {
575     cmpct_test_buckets();
576     cmpct_test_get_back_newly_freed();
577     cmpct_test_return_to_os();
578     cmpct_test_trim();
579     cmpct_dump();
580     void *ptr[16];
581 
582     ptr[0] = cmpct_alloc(8);
583     ptr[1] = cmpct_alloc(32);
584     ptr[2] = cmpct_alloc(7);
585     cmpct_trim();
586     ptr[3] = cmpct_alloc(0);
587     ptr[4] = cmpct_alloc(98713);
588     ptr[5] = cmpct_alloc(16);
589 
590     cmpct_free(ptr[5]);
591     cmpct_free(ptr[1]);
592     cmpct_free(ptr[3]);
593     cmpct_free(ptr[0]);
594     cmpct_free(ptr[4]);
595     cmpct_free(ptr[2]);
596 
597     cmpct_dump();
598     cmpct_trim();
599     cmpct_dump();
600 
601     int i;
602     for (i=0; i < 16; i++)
603         ptr[i] = 0;
604 
605     for (i=0; i < 32768; i++) {
606         unsigned int index = (unsigned int)rand() % 16;
607 
608         if ((i % (16*1024)) == 0)
609             printf("pass %d\n", i);
610 
611 //      printf("index 0x%x\n", index);
612         if (ptr[index]) {
613 //          printf("freeing ptr[0x%x] = %p\n", index, ptr[index]);
614             cmpct_free(ptr[index]);
615             ptr[index] = 0;
616         }
617         unsigned int align = 1 << ((unsigned int)rand() % 8);
618         ptr[index] = cmpct_memalign((unsigned int)rand() % 32768, align);
619 //      printf("ptr[0x%x] = %p, align 0x%x\n", index, ptr[index], align);
620 
621         DEBUG_ASSERT(((addr_t)ptr[index] % align) == 0);
622 //      cmpct_dump();
623     }
624 
625     for (i=0; i < 16; i++) {
626         if (ptr[i])
627             cmpct_free(ptr[i]);
628     }
629 
630     cmpct_dump();
631 }
632 
large_alloc(size_t size)633 static void *large_alloc(size_t size)
634 {
635 #ifdef CMPCT_DEBUG
636     size_t requested_size = size;
637 #endif
638     size = round_up(size, 8);
639     free_t *free_area = NULL;
640     lock();
641     if (heap_grow(size, &free_area) < 0) {
642       return 0;
643     }
644     void *result =
645         create_allocation_header(free_area, 0, free_area->header.size, free_area->header.left);
646     // Normally the 'remaining free space' counter would be decremented when we
647     // unlink the free area from its bucket.  However in this case the free
648     // area was too big to go in any bucket and we had it in our own
649     // "free_area" variable so there is no unlinking and we have to adjust the
650     // counter here.
651     theheap.remaining -= free_area->header.size;
652     unlock();
653 #ifdef CMPCT_DEBUG
654     memset(result, ALLOC_FILL, requested_size);
655     memset((char *)result + requested_size, PADDING_FILL,
656         free_area->header.size - (requested_size + sizeof(header_t)));
657 #endif
658     return result;
659 }
660 
cmpct_trim(void)661 void cmpct_trim(void)
662 {
663     // Look at free list entries that are at least as large as one page plus a
664     // header. They might be at the start or the end of a block, so we can trim
665     // them and free the page(s).
666     lock();
667     for (int bucket = size_to_index_freeing(PAGE_SIZE);
668             bucket < NUMBER_OF_BUCKETS;
669             bucket++) {
670         free_t *next;
671         for (free_t *free_area = theheap.free_lists[bucket];
672                 free_area != NULL;
673                 free_area = next) {
674             DEBUG_ASSERT(free_area->header.size >= PAGE_SIZE + sizeof(header_t));
675             next = free_area->next;
676             header_t *right = right_header(&free_area->header);
677             if (is_end_of_os_allocation((char *)right)) {
678                 char *old_os_allocation_end = (char *)round_up((uintptr_t)right, PAGE_SIZE);
679                 // The page will end with a smaller free list entry and a header-sized sentinel.
680                 char *new_os_allocation_end = (char *)
681                                               round_up((uintptr_t)free_area + sizeof(header_t) + sizeof(free_t), PAGE_SIZE);
682                 size_t freed_up = old_os_allocation_end - new_os_allocation_end;
683                 DEBUG_ASSERT(IS_PAGE_ALIGNED(freed_up));
684                 // Rare, because we only look at large freelist entries, but unlucky rounding
685                 // could mean we can't actually free anything here.
686                 if (freed_up == 0) continue;
687                 unlink_free(free_area, bucket);
688                 size_t new_free_size = free_area->header.size - freed_up;
689                 DEBUG_ASSERT(new_free_size >= sizeof(free_t));
690                 // Right sentinel, not free, stops attempts to coalesce right.
691                 create_allocation_header(free_area, new_free_size, 0, free_area);
692                 // Also puts it in the correct bucket.
693                 create_free_area(free_area, untag(free_area->header.left), new_free_size, NULL);
694                 page_free(new_os_allocation_end, freed_up >> PAGE_SIZE_SHIFT);
695                 theheap.size -= freed_up;
696             } else if (is_start_of_os_allocation(untag(free_area->header.left))) {
697                 char *old_os_allocation_start =
698                     (char *)round_down((uintptr_t)free_area, PAGE_SIZE);
699                 // For the sentinel, we need at least one header-size of space between the page
700                 // edge and the first allocation to the right of the free area.
701                 char *new_os_allocation_start =
702                     (char *)round_down((uintptr_t)(right - 1), PAGE_SIZE);
703                 size_t freed_up = new_os_allocation_start - old_os_allocation_start;
704                 DEBUG_ASSERT(IS_PAGE_ALIGNED(freed_up));
705                 // This should not happen because we only look at the large free list buckets.
706                 if (freed_up == 0) continue;
707                 unlink_free(free_area, bucket);
708                 size_t sentinel_size = sizeof(header_t);
709                 size_t new_free_size = free_area->header.size - freed_up;
710                 if (new_free_size < sizeof(free_t)) {
711                     sentinel_size += new_free_size;
712                     new_free_size = 0;
713                 }
714                 // Left sentinel, not free, stops attempts to coalesce left.
715                 create_allocation_header(new_os_allocation_start, 0, sentinel_size, NULL);
716                 if (new_free_size == 0) {
717                     FixLeftPointer(right, (header_t *)new_os_allocation_start);
718                 } else {
719                     DEBUG_ASSERT(new_free_size >= sizeof(free_t));
720                     char *new_free = new_os_allocation_start + sentinel_size;
721                     // Also puts it in the correct bucket.
722                     create_free_area(new_free, new_os_allocation_start, new_free_size, NULL);
723                     FixLeftPointer(right, (header_t *)new_free);
724                 }
725                 page_free(old_os_allocation_start, freed_up >> PAGE_SIZE_SHIFT);
726                 theheap.size -= freed_up;
727             }
728         }
729     }
730     unlock();
731 }
732 
cmpct_alloc(size_t size)733 void *cmpct_alloc(size_t size)
734 {
735     if (size == 0u) return NULL;
736 
737     if (size + sizeof(header_t) > (1u << HEAP_ALLOC_VIRTUAL_BITS)) return large_alloc(size);
738 
739     size_t rounded_up;
740     int start_bucket = size_to_index_allocating(size, &rounded_up);
741 
742     rounded_up += sizeof(header_t);
743 
744     lock();
745     int bucket = find_nonempty_bucket(start_bucket);
746     if (bucket == -1) {
747         // Grow heap by at least 12% if we can.
748         size_t growby = MIN(1u << HEAP_ALLOC_VIRTUAL_BITS,
749                             MAX(theheap.size >> 3,
750                                 MAX(HEAP_GROW_SIZE, rounded_up)));
751         while (heap_grow(growby, NULL) < 0) {
752             if (growby <= rounded_up) {
753                 unlock();
754                 return NULL;
755             }
756             growby = MAX(growby >> 1, rounded_up);
757         }
758         bucket = find_nonempty_bucket(start_bucket);
759     }
760     free_t *head = theheap.free_lists[bucket];
761     size_t left_over = head->header.size - rounded_up;
762     // We can't carve off the rest for a new free space if it's smaller than the
763     // free-list linked structure.  We also don't carve it off if it's less than
764     // 1.6% the size of the allocation.  This is to avoid small long-lived
765     // allocations being placed right next to large allocations, hindering
766     // coalescing and returning pages to the OS.
767     if (left_over >= sizeof(free_t) && left_over > (size >> 6)) {
768         header_t *right = right_header(&head->header);
769         unlink_free(head, bucket);
770         void *free = (char *)head + rounded_up;
771         create_free_area(free, head, left_over, NULL);
772         FixLeftPointer(right, (header_t *)free);
773         head->header.size -= left_over;
774     } else {
775         unlink_free(head, bucket);
776     }
777     void *result =
778         create_allocation_header(head, 0, head->header.size, head->header.left);
779 #ifdef CMPCT_DEBUG
780     memset(result, ALLOC_FILL, size);
781     memset(((char *)result) + size, PADDING_FILL, rounded_up - size - sizeof(header_t));
782 #endif
783     unlock();
784     return result;
785 }
786 
cmpct_memalign(size_t size,size_t alignment)787 void *cmpct_memalign(size_t size, size_t alignment)
788 {
789     if (alignment < 8) return cmpct_alloc(size);
790     size_t padded_size =
791         size + alignment + sizeof(free_t) + sizeof(header_t);
792     char *unaligned = (char *)cmpct_alloc(padded_size);
793     lock();
794     size_t mask = alignment - 1;
795     uintptr_t payload_int = (uintptr_t)unaligned + sizeof(free_t) +
796                             sizeof(header_t) + mask;
797     char *payload = (char *)(payload_int & ~mask);
798     if (unaligned != payload) {
799         header_t *unaligned_header = (header_t *)unaligned - 1;
800         header_t *header = (header_t *)payload - 1;
801         size_t left_over = payload - unaligned;
802         create_allocation_header(
803             header, 0, unaligned_header->size - left_over, unaligned_header);
804         header_t *right = right_header(unaligned_header);
805         unaligned_header->size = left_over;
806         FixLeftPointer(right, header);
807         unlock();
808         cmpct_free(unaligned);
809     } else {
810         unlock();
811     }
812     // TODO: Free the part after the aligned allocation.
813     return payload;
814 }
815 
cmpct_free(void * payload)816 void cmpct_free(void *payload)
817 {
818     if (payload == NULL) return;
819     header_t *header = (header_t *)payload - 1;
820     DEBUG_ASSERT(!is_tagged_as_free(header));  // Double free!
821     size_t size = header->size;
822     lock();
823     header_t *left = header->left;
824     if (left != NULL && is_tagged_as_free(left)) {
825         // Coalesce with left free object.
826         unlink_free_unknown_bucket((free_t *)left);
827         header_t *right = right_header(header);
828         if (is_tagged_as_free(right)) {
829             // Coalesce both sides.
830             unlink_free_unknown_bucket((free_t *)right);
831             header_t *right_right = right_header(right);
832             FixLeftPointer(right_right, left);
833             free_memory(left, left->left, left->size + size + right->size);
834         } else {
835             // Coalesce only left.
836             FixLeftPointer(right, left);
837             free_memory(left, left->left, left->size + size);
838         }
839     } else {
840         header_t *right = right_header(header);
841         if (is_tagged_as_free(right)) {
842             // Coalesce only right.
843             header_t *right_right = right_header(right);
844             unlink_free_unknown_bucket((free_t *)right);
845             FixLeftPointer(right_right, header);
846             free_memory(header, left, size + right->size);
847         } else {
848             free_memory(header, left, size);
849         }
850     }
851     unlock();
852 }
853 
cmpct_realloc(void * payload,size_t size)854 void *cmpct_realloc(void *payload, size_t size)
855 {
856     if (payload == NULL) return cmpct_alloc(size);
857     header_t *header = (header_t *)payload - 1;
858     size_t old_size = header->size - sizeof(header_t);
859     void *new_payload = cmpct_alloc(size);
860     memcpy(new_payload, payload, MIN(size, old_size));
861     cmpct_free(payload);
862     return new_payload;
863 }
864 
add_to_heap(void * new_area,size_t size,free_t ** bucket)865 static void add_to_heap(void *new_area, size_t size, free_t **bucket)
866 {
867     void *top = (char *)new_area + size;
868     header_t *left_sentinel = (header_t *)new_area;
869     // Not free, stops attempts to coalesce left.
870     create_allocation_header(left_sentinel, 0, sizeof(header_t), NULL);
871     header_t *new_header = left_sentinel + 1;
872     size_t free_size = size - 2 * sizeof(header_t);
873     create_free_area(new_header, left_sentinel, free_size, bucket);
874     header_t *right_sentinel = (header_t *)(top - sizeof(header_t));
875     // Not free, stops attempts to coalesce right.
876     create_allocation_header(right_sentinel, 0, 0, new_header);
877 }
878 
879 // Create a new free-list entry of at least size bytes (including the
880 // allocation header).  Called with the lock, apart from during init.
heap_grow(size_t size,free_t ** bucket)881 static ssize_t heap_grow(size_t size, free_t **bucket)
882 {
883     // The new free list entry will have a header on each side (the
884     // sentinels) so we need to grow the gross heap size by this much more.
885     size += 2 * sizeof(header_t);
886     size = round_up(size, PAGE_SIZE);
887     void *ptr = page_alloc(size >> PAGE_SIZE_SHIFT, PAGE_ALLOC_ANY_ARENA);
888     if (ptr == NULL) return -1;
889     theheap.size += size;
890     LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr);
891     add_to_heap(ptr, size, bucket);
892     return size;
893 }
894 
cmpct_init(void)895 void cmpct_init(void)
896 {
897     LTRACE_ENTRY;
898 
899     // Create a mutex.
900     mutex_init(&theheap.lock);
901 
902     // Initialize the free list.
903     for (int i = 0; i < NUMBER_OF_BUCKETS; i++) {
904         theheap.free_lists[i] = NULL;
905     }
906     for (int i = 0; i < BUCKET_WORDS; i++) {
907         theheap.free_list_bits[i] = 0;
908     }
909 
910     size_t initial_alloc = HEAP_GROW_SIZE - 2 * sizeof(header_t);
911 
912     theheap.remaining = 0;
913 
914     heap_grow(initial_alloc, NULL);
915 }
916