1 /*
2 * Copyright (c) 2008-2009,2012-2015 Travis Geiselbrecht
3 * Copyright (c) 2009 Corey Tabaka
4 *
5 * Permission is hereby granted, free of charge, to any person obtaining
6 * a copy of this software and associated documentation files
7 * (the "Software"), to deal in the Software without restriction,
8 * including without limitation the rights to use, copy, modify, merge,
9 * publish, distribute, sublicense, and/or sell copies of the Software,
10 * and to permit persons to whom the Software is furnished to do so,
11 * subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be
14 * included in all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
17 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
18 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
19 * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
20 * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
21 * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
22 * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
23 */
24 #include <debug.h>
25 #include <trace.h>
26 #include <assert.h>
27 #include <err.h>
28 #include <list.h>
29 #include <rand.h>
30 #include <stdio.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <kernel/mutex.h>
34 #include <lib/miniheap.h>
35 #include <lib/heap.h>
36 #include <lib/page_alloc.h>
37 #include <inttypes.h>
38
39 #define LOCAL_TRACE 0
40
41 #define DEBUG_HEAP 0
42 #define ALLOC_FILL 0x99
43 #define FREE_FILL 0x77
44 #define PADDING_FILL 0x55
45 #define PADDING_SIZE 64
46
47 // whether or not the heap will try to trim itself every time a free happens
48 #ifndef MINIHEAP_AUTOTRIM
49 #define MINIHEAP_AUTOTRIM 0
50 #endif
51
52 #define HEAP_MAGIC (0x48454150) // 'HEAP'
53
54 struct free_heap_chunk {
55 struct list_node node;
56 size_t len;
57 };
58
59 struct heap {
60 void *base;
61 size_t len;
62 size_t remaining;
63 size_t low_watermark;
64 mutex_t lock;
65 struct list_node free_list;
66 };
67
68 // heap static vars
69 static struct heap theheap;
70
71 // structure placed at the beginning every allocation
72 struct alloc_struct_begin {
73 #if LK_DEBUGLEVEL > 1
74 unsigned int magic;
75 #endif
76 void *ptr;
77 size_t size;
78 #if DEBUG_HEAP
79 void *padding_start;
80 size_t padding_size;
81 #endif
82 };
83
84 static ssize_t heap_grow(size_t len);
85
dump_free_chunk(struct free_heap_chunk * chunk)86 static void dump_free_chunk(struct free_heap_chunk *chunk)
87 {
88 dprintf(INFO, "\t\tbase %p, end 0x%" PRIxVADDR ", len 0x%zx\n", chunk, (vaddr_t)chunk + chunk->len, chunk->len);
89 }
90
miniheap_dump(void)91 void miniheap_dump(void)
92 {
93 dprintf(INFO, "Heap dump (using miniheap):\n");
94 dprintf(INFO, "\tbase %p, len 0x%zx\n", theheap.base, theheap.len);
95 dprintf(INFO, "\tfree list:\n");
96
97 mutex_acquire(&theheap.lock);
98
99 struct free_heap_chunk *chunk;
100 list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
101 dump_free_chunk(chunk);
102 }
103 mutex_release(&theheap.lock);
104
105 }
106
107 // try to insert this free chunk into the free list, consuming the chunk by merging it with
108 // nearby ones if possible. Returns base of whatever chunk it became in the list.
heap_insert_free_chunk(struct free_heap_chunk * chunk)109 static struct free_heap_chunk *heap_insert_free_chunk(struct free_heap_chunk *chunk)
110 {
111 #if LK_DEBUGLEVEL > INFO
112 vaddr_t chunk_end = (vaddr_t)chunk + chunk->len;
113 #endif
114
115 LTRACEF("chunk ptr %p, size 0x%zx\n", chunk, chunk->len);
116
117 struct free_heap_chunk *next_chunk;
118 struct free_heap_chunk *last_chunk;
119
120 mutex_acquire(&theheap.lock);
121
122 theheap.remaining += chunk->len;
123
124 // walk through the list, finding the node to insert before
125 // note: next_chunk is initialized to NULL by list_for_every_entry macro
126 // both when next item is the last element (entry->next == list)
127 // or its content is empty.
128 list_for_every_entry(&theheap.free_list, next_chunk, struct free_heap_chunk, node) {
129 if (chunk < next_chunk) {
130 #if LK_DEBUGLEVEL > INFO
131 DEBUG_ASSERT(chunk_end <= (vaddr_t)next_chunk);
132 #endif
133
134 list_add_before(&next_chunk->node, &chunk->node);
135
136 goto try_merge;
137 }
138 }
139
140 // walked off the end of the list, add it at the tail
141 list_add_tail(&theheap.free_list, &chunk->node);
142
143 // try to merge with the previous chunk
144 try_merge:
145 last_chunk = list_prev_type(&theheap.free_list, &chunk->node, struct free_heap_chunk, node);
146 if (last_chunk) {
147 if ((vaddr_t)last_chunk + last_chunk->len == (vaddr_t)chunk) {
148 // easy, just extend the previous chunk
149 last_chunk->len += chunk->len;
150
151 // remove ourself from the list
152 list_delete(&chunk->node);
153
154 // set the chunk pointer to the newly extended chunk, in case
155 // it needs to merge with the next chunk below
156 chunk = last_chunk;
157 }
158 }
159
160 // try to merge with the next chunk
161 if (next_chunk) {
162 if ((vaddr_t)chunk + chunk->len == (vaddr_t)next_chunk) {
163 // extend our chunk
164 chunk->len += next_chunk->len;
165
166 // remove them from the list
167 list_delete(&next_chunk->node);
168 }
169 }
170
171 mutex_release(&theheap.lock);
172
173 return chunk;
174 }
175
heap_create_free_chunk(void * ptr,size_t len,bool allow_debug)176 static struct free_heap_chunk *heap_create_free_chunk(void *ptr, size_t len, bool allow_debug)
177 {
178 DEBUG_ASSERT(ptr);
179 DEBUG_ASSERT((len % sizeof(void *)) == 0); // size must be aligned on pointer boundary
180
181 #if DEBUG_HEAP
182 if (allow_debug)
183 memset(ptr, FREE_FILL, len);
184 #endif
185
186 struct free_heap_chunk *chunk = (struct free_heap_chunk *)ptr;
187 chunk->len = len;
188
189 return chunk;
190 }
191
miniheap_malloc(size_t size)192 void *miniheap_malloc(size_t size)
193 {
194 // The maximum alignment that C needs is 8 bytes on the 32-bit platforms we
195 // care about and 16 bytes on the 64-bit platforms we care about. In
196 // practice, the kernel itself probally doesn't need 16 byte alignment, but
197 // be conservative. For smaller allocations we could potentially get away
198 // with smaller alignments, but miniheap_memalign currently rounds smaller
199 // alignments up to 16 bytes. So good enough.
200 return miniheap_memalign(sizeof(void *) * 2, size);
201 }
202
miniheap_memalign(unsigned int alignment,size_t size)203 void *miniheap_memalign(unsigned int alignment, size_t size)
204 {
205 void *ptr;
206 #if DEBUG_HEAP
207 size_t original_size = size;
208 #endif
209
210 LTRACEF("size %zd, align %d\n", size, alignment);
211
212 // alignment must be power of 2
213 if (alignment && (alignment & (alignment - 1))) {
214 return NULL;
215 }
216
217 // we always put a size field + base pointer + magic in front of the allocation
218 size += sizeof(struct alloc_struct_begin);
219 #if DEBUG_HEAP
220 size += PADDING_SIZE;
221 #endif
222
223 // make sure we allocate at least the size of a struct free_heap_chunk so that
224 // when we free it, we can create a struct free_heap_chunk struct and stick it
225 // in the spot
226 if (size < sizeof(struct free_heap_chunk))
227 size = sizeof(struct free_heap_chunk);
228
229 // round up size to a multiple of native pointer size
230 size = round_up(size, sizeof(void *));
231
232 // deal with nonzero alignments
233 if (alignment > 0) {
234 if (alignment < 16)
235 alignment = 16;
236
237 // add alignment for worst case fit
238 size += alignment;
239 }
240
241 retry:
242 mutex_acquire(&theheap.lock);
243
244 // walk through the list
245 ptr = NULL;
246 struct free_heap_chunk *chunk;
247 list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
248 DEBUG_ASSERT((chunk->len % sizeof(void *)) == 0); // len should always be a multiple of pointer size
249
250 // is it big enough to service our allocation?
251 if (chunk->len >= size) {
252 ptr = chunk;
253
254 // remove it from the list
255 struct list_node *next_node = list_next(&theheap.free_list, &chunk->node);
256 list_delete(&chunk->node);
257
258 if (chunk->len > size + sizeof(struct free_heap_chunk)) {
259 // there's enough space in this chunk to create a new one after the allocation
260 struct free_heap_chunk *newchunk = heap_create_free_chunk((uint8_t *)ptr + size, chunk->len - size, true);
261
262 // truncate this chunk
263 chunk->len -= chunk->len - size;
264
265 // add the new one where chunk used to be
266 if (next_node)
267 list_add_before(next_node, &newchunk->node);
268 else
269 list_add_tail(&theheap.free_list, &newchunk->node);
270 }
271
272 // the allocated size is actually the length of this chunk, not the size requested
273 DEBUG_ASSERT(chunk->len >= size);
274 size = chunk->len;
275
276 #if DEBUG_HEAP
277 memset(ptr, ALLOC_FILL, size);
278 #endif
279
280 ptr = (void *)((addr_t)ptr + sizeof(struct alloc_struct_begin));
281
282 // align the output if requested
283 if (alignment > 0) {
284 ptr = (void *)round_up((addr_t)ptr, (addr_t)alignment);
285 }
286
287 struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr;
288 as--;
289 #if LK_DEBUGLEVEL > 1
290 as->magic = HEAP_MAGIC;
291 #endif
292 as->ptr = (void *)chunk;
293 as->size = size;
294 theheap.remaining -= size;
295
296 if (theheap.remaining < theheap.low_watermark) {
297 theheap.low_watermark = theheap.remaining;
298 }
299 #if DEBUG_HEAP
300 as->padding_start = ((uint8_t *)ptr + original_size);
301 as->padding_size = (((addr_t)chunk + size) - ((addr_t)ptr + original_size));
302 // printf("padding start %p, size %u, chunk %p, size %u\n", as->padding_start, as->padding_size, chunk, size);
303
304 memset(as->padding_start, PADDING_FILL, as->padding_size);
305 #endif
306
307 break;
308 }
309 }
310
311 mutex_release(&theheap.lock);
312
313 /* try to grow the heap if we can */
314 if (ptr == NULL) {
315 ssize_t err = heap_grow(size);
316 /*
317 * Retry as long as heap_grow succeeds. This can happen multiple times
318 * if another thread is allocating at the same time and used new newly
319 * grown chunk before we get the heaplock again.
320 */
321 if (err >= 0) {
322 goto retry;
323 }
324 }
325
326 LTRACEF("returning ptr %p\n", ptr);
327
328 return ptr;
329 }
330
miniheap_realloc(void * ptr,size_t size)331 void *miniheap_realloc(void *ptr, size_t size)
332 {
333 /* slow implementation */
334 if (!ptr)
335 return miniheap_malloc(size);
336 if (size == 0) {
337 miniheap_free(ptr);
338 return NULL;
339 }
340
341 // XXX better implementation
342 void *p = miniheap_malloc(size);
343 if (!p)
344 return NULL;
345
346 memcpy(p, ptr, size); // XXX wrong
347 miniheap_free(ptr);
348
349 return p;
350 }
351
miniheap_free(void * ptr)352 void miniheap_free(void *ptr)
353 {
354 if (!ptr)
355 return;
356
357 LTRACEF("ptr %p\n", ptr);
358
359 // check for the old allocation structure
360 struct alloc_struct_begin *as = (struct alloc_struct_begin *)ptr;
361 as--;
362
363 #if LK_DEBUGLEVEL > 1
364 DEBUG_ASSERT(as->magic == HEAP_MAGIC);
365 #endif
366
367 #if DEBUG_HEAP
368 {
369 uint i;
370 uint8_t *pad = (uint8_t *)as->padding_start;
371
372 for (i = 0; i < as->padding_size; i++) {
373 if (pad[i] != PADDING_FILL) {
374 printf("free at %p scribbled outside the lines:\n", ptr);
375 hexdump(pad, as->padding_size);
376 panic("die\n");
377 }
378 }
379 }
380 #endif
381
382 LTRACEF("allocation was %zd bytes long at ptr %p\n", as->size, as->ptr);
383
384 // looks good, create a free chunk and add it to the pool
385 heap_insert_free_chunk(heap_create_free_chunk(as->ptr, as->size, true));
386
387 #if MINIHEAP_AUTOTRIM
388 miniheap_trim();
389 #endif
390 }
391
miniheap_trim(void)392 void miniheap_trim(void)
393 {
394 LTRACE_ENTRY;
395
396 mutex_acquire(&theheap.lock);
397
398 // walk through the list, finding free chunks that can be returned to the page allocator
399 struct free_heap_chunk *chunk;
400 struct free_heap_chunk *next_chunk;
401 list_for_every_entry_safe(&theheap.free_list, chunk, next_chunk, struct free_heap_chunk, node) {
402 LTRACEF("looking at chunk %p, len 0x%zx\n", chunk, chunk->len);
403
404 uintptr_t start = (uintptr_t)chunk;
405 uintptr_t end = start + chunk->len;
406 DEBUG_ASSERT(end > start); // make sure it doesn't wrap the address space and has a positive len
407
408 // compute the page aligned region in this free block (if any)
409 uintptr_t start_page = round_up(start, PAGE_SIZE);
410 uintptr_t end_page = round_down(end, PAGE_SIZE);
411 DEBUG_ASSERT(end_page <= end);
412 DEBUG_ASSERT(start_page >= start);
413
414 LTRACEF("start page 0x%" PRIxPTR ", end page 0x%" PRIxPTR "\n", start_page, end_page);
415
416 retry:
417 // see if the free block encompasses at least one page
418 if (unlikely(end_page > start_page)) {
419 LTRACEF("could trim: start 0x%" PRIxPTR ", end 0x%" PRIxPTR "\n", start_page, end_page);
420
421 // cases where the start of the block is already page aligned
422 if (start_page == start) {
423 // look for special case, we're going to completely remove the chunk
424 if (end_page == end) {
425 LTRACEF("special case, free chunk completely covers page(s)\n");
426 list_delete(&chunk->node);
427 goto free_chunk;
428 }
429 } else {
430 // start of block is not page aligned,
431 // will there be enough space before the block if we trim?
432 if (start_page - start < sizeof(struct free_heap_chunk)) {
433 LTRACEF("not enough space for free chunk before\n");
434 start_page += PAGE_SIZE;
435 goto retry;
436 }
437 }
438
439 // do we need to split the free block and create a new block afterwards?
440 if (end_page < end) {
441 size_t new_chunk_size = end - end_page;
442 LTRACEF("will have to split, new chunk will be 0x%zx bytes long\n", new_chunk_size);
443
444 // if there's not enough space afterwards for a free chunk, we can't free the last page
445 if (new_chunk_size < sizeof(struct free_heap_chunk)) {
446 LTRACEF("not enough space for free chunk afterwards\n");
447 end_page -= PAGE_SIZE;
448 goto retry;
449 }
450
451 // trim the new space off the end of the current chunk
452 chunk->len -= new_chunk_size;
453 end = end_page;
454
455 // create a new chunk after the one we're trimming
456 struct free_heap_chunk *new_chunk = heap_create_free_chunk((void *)end_page, new_chunk_size, false);
457
458 // link it with the current block
459 list_add_after(&chunk->node, &new_chunk->node);
460 }
461
462 // check again to see if we are now completely covering a block
463 if (start_page == start && end_page == end) {
464 LTRACEF("special case, after splitting off new chunk, free chunk completely covers page(s)\n");
465 list_delete(&chunk->node);
466 goto free_chunk;
467 }
468
469 // trim the size of the block
470 chunk->len -= end_page - start_page;
471
472 free_chunk:
473 // return it to the allocator
474 LTRACEF("returning %p size 0x%" PRIxPTR " to the page allocator\n", (void *)start_page, end_page - start_page);
475 page_free((void *)start_page, (end_page - start_page) / PAGE_SIZE);
476
477 // tweak accounting
478 theheap.remaining -= end_page - start_page;
479 }
480 }
481
482 mutex_release(&theheap.lock);
483 }
484
miniheap_get_stats(struct miniheap_stats * ptr)485 void miniheap_get_stats(struct miniheap_stats *ptr)
486 {
487 struct free_heap_chunk *chunk;
488
489 ptr->heap_start = theheap.base;
490 ptr->heap_len = theheap.len;
491 ptr->heap_free=0;
492 ptr->heap_max_chunk = 0;
493
494 mutex_acquire(&theheap.lock);
495
496 list_for_every_entry(&theheap.free_list, chunk, struct free_heap_chunk, node) {
497 ptr->heap_free += chunk->len;
498
499 if (chunk->len > ptr->heap_max_chunk) {
500 ptr->heap_max_chunk = chunk->len;
501 }
502 }
503
504 ptr->heap_low_watermark = theheap.low_watermark;
505
506 mutex_release(&theheap.lock);
507 }
508
heap_grow(size_t size)509 static ssize_t heap_grow(size_t size)
510 {
511 size = round_up(size, PAGE_SIZE);
512 void *ptr = page_alloc(size / PAGE_SIZE, PAGE_ALLOC_ANY_ARENA);
513 if (!ptr) {
514 TRACEF("failed to grow kernel heap by 0x%zx bytes\n", size);
515 return ERR_NO_MEMORY;
516 }
517
518 LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr);
519
520 heap_insert_free_chunk(heap_create_free_chunk(ptr, size, true));
521
522 /* change the heap start and end variables */
523 if ((uintptr_t)ptr < (uintptr_t)theheap.base || theheap.base == 0)
524 theheap.base = ptr;
525
526 uintptr_t endptr = (uintptr_t)ptr + size;
527 if (endptr > (uintptr_t)theheap.base + theheap.len) {
528 theheap.len = (uintptr_t)endptr - (uintptr_t)theheap.base;
529 }
530
531 return size;
532 }
533
miniheap_init(void * ptr,size_t len)534 void miniheap_init(void *ptr, size_t len)
535 {
536 DEBUG_ASSERT(ptr);
537 LTRACEF("ptr %p, len %zu\n", ptr, len);
538
539 // create a mutex
540 mutex_init(&theheap.lock);
541
542 // initialize the free list
543 list_initialize(&theheap.free_list);
544
545 // set the heap range
546 theheap.base = ptr;
547 theheap.len = len;
548 theheap.remaining = 0; // will get set by heap_insert_free_chunk()
549 theheap.low_watermark = 0;
550
551 // if passed a default range, use it
552 if (len > 0)
553 heap_insert_free_chunk(heap_create_free_chunk(ptr, len, true));
554 }
555
556