1 /*
2
3 american fuzzy lop++ - dislocator, an abusive allocator
4 -----------------------------------------------------
5
6 Originally written by Michal Zalewski
7
8 Copyright 2016 Google Inc. All rights reserved.
9 Copyright 2019-2024 AFLplusplus Project. All rights reserved.
10
11 Licensed under the Apache License, Version 2.0 (the "License");
12 you may not use this file except in compliance with the License.
13 You may obtain a copy of the License at:
14
15 http://www.apache.org/licenses/LICENSE-2.0
16
17 This is a companion library that can be used as a drop-in replacement
18 for the libc allocator in the fuzzed binaries. See README.dislocator.md for
19 more info.
20
21 */
22
23 #define _GNU_SOURCE
24 #include <stdio.h>
25 #include <stdlib.h>
26 #include <stddef.h>
27 #include <string.h>
28 #include <limits.h>
29 #include <errno.h>
30 #include <sys/mman.h>
31
32 #ifdef __APPLE__
33 #include <mach/vm_statistics.h>
34 #endif
35
36 #ifdef __FreeBSD__
37 #include <sys/param.h>
38 #endif
39
40 #if (defined(__linux__) && !defined(__ANDROID__)) || defined(__HAIKU__)
41 #include <unistd.h>
42 #include <sys/prctl.h>
43 #ifdef __linux__
44 #include <sys/syscall.h>
45 #include <malloc.h>
46 #endif
47 #ifdef __NR_getrandom
48 #define arc4random_buf(p, l) \
49 do { \
50 \
51 ssize_t rd = syscall(__NR_getrandom, p, l, 0); \
52 if (rd != l) DEBUGF("getrandom failed"); \
53 \
54 } while (0)
55
56 #else
57 #include <time.h>
58 #define arc4random_buf(p, l) \
59 do { \
60 \
61 srand(time(NULL)); \
62 u32 i; \
63 u8 *ptr = (u8 *)p; \
64 for (i = 0; i < l; i++) \
65 ptr[i] = rand() % INT_MAX; \
66 \
67 } while (0)
68
69 #endif
70 #ifndef PR_SET_VMA
71 #define PR_SET_VMA 0x53564d41
72 #define PR_SET_VMA_ANON_NAME 0
73 #endif
74 #endif
75
76 #include "config.h"
77 #include "types.h"
78
79 #if __STDC_VERSION__ < 201112L || \
80 (defined(__FreeBSD__) && __FreeBSD_version < 1200000)
81 // use this hack if not C11
82 typedef struct {
83
84 long long __ll;
85 long double __ld;
86
87 } max_align_t;
88
89 #endif
90
91 #define ALLOC_ALIGN_SIZE (_Alignof(max_align_t))
92
93 #ifndef PAGE_SIZE
94 #define PAGE_SIZE 4096
95 #endif /* !PAGE_SIZE */
96
97 #ifndef MAP_ANONYMOUS
98 #define MAP_ANONYMOUS MAP_ANON
99 #endif /* !MAP_ANONYMOUS */
100
101 #define SUPER_PAGE_SIZE 1 << 21
102
103 /* Error / message handling: */
104
105 #define DEBUGF(_x...) \
106 do { \
107 \
108 if (alloc_verbose) { \
109 \
110 if (++call_depth == 1) { \
111 \
112 fprintf(stderr, "[AFL] " _x); \
113 fprintf(stderr, "\n"); \
114 \
115 } \
116 call_depth--; \
117 \
118 } \
119 \
120 } while (0)
121
122 #define FATAL(_x...) \
123 do { \
124 \
125 if (++call_depth == 1) { \
126 \
127 fprintf(stderr, "*** [AFL] " _x); \
128 fprintf(stderr, " ***\n"); \
129 abort(); \
130 \
131 } \
132 call_depth--; \
133 \
134 } while (0)
135
136 /* Macro to count the number of pages needed to store a buffer: */
137
138 #define PG_COUNT(_l) (((_l) + (PAGE_SIZE - 1)) / PAGE_SIZE)
139
140 /* Canary & clobber bytes: */
141
142 #define ALLOC_CANARY 0xAACCAACC
143 #define ALLOC_CLOBBER 0xCC
144
145 #define TAIL_ALLOC_CANARY 0xAC
146
147 #define PTR_C(_p) (((u32 *)(_p))[-1])
148 #define PTR_L(_p) (((u32 *)(_p))[-2])
149
150 /* Configurable stuff (use AFL_LD_* to set): */
151
152 static size_t max_mem = MAX_ALLOC; /* Max heap usage to permit */
153 static u8 alloc_verbose, /* Additional debug messages */
154 hard_fail, /* abort() when max_mem exceeded? */
155 no_calloc_over, /* abort() on calloc() overflows? */
156 align_allocations; /* Force alignment to sizeof(void*) */
157
158 #if defined __OpenBSD__ || defined __APPLE__
159 #define __thread
160 #warning no thread support available
161 #endif
162 static _Atomic size_t total_mem; /* Currently allocated mem */
163
164 static __thread u32 call_depth; /* To avoid recursion via fprintf() */
165 static u32 alloc_canary;
166
167 /* This is the main alloc function. It allocates one page more than necessary,
168 sets that tailing page to PROT_NONE, and then increments the return address
169 so that it is right-aligned to that boundary. Since it always uses mmap(),
170 the returned memory will be zeroed. */
171
__dislocator_alloc(size_t len)172 static void *__dislocator_alloc(size_t len) {
173
174 u8 *ret, *base;
175 size_t tlen;
176 int flags, protflags, fd, sp;
177
178 if (total_mem + len > max_mem || total_mem + len < total_mem) {
179
180 if (hard_fail) FATAL("total allocs exceed %zu MB", max_mem / 1024 / 1024);
181
182 DEBUGF("total allocs exceed %zu MB, returning NULL", max_mem / 1024 / 1024);
183
184 return NULL;
185
186 }
187
188 size_t rlen;
189 if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1)))
190 rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
191 else
192 rlen = len;
193
194 /* We will also store buffer length and a canary below the actual buffer, so
195 let's add 8 bytes for that. */
196
197 base = NULL;
198 tlen = (1 + PG_COUNT(rlen + 8)) * PAGE_SIZE;
199 protflags = PROT_READ | PROT_WRITE;
200 flags = MAP_PRIVATE | MAP_ANONYMOUS;
201 fd = -1;
202 #if defined(PROT_MAX)
203 // apply when sysctl vm.imply_prot_max is set to 1
204 // no-op otherwise
205 protflags |= PROT_MAX(PROT_READ | PROT_WRITE);
206 #endif
207 #if defined(USEHUGEPAGE)
208 sp = (rlen >= SUPER_PAGE_SIZE && !(rlen % SUPER_PAGE_SIZE));
209
210 #if defined(__APPLE__)
211 if (sp) fd = VM_FLAGS_SUPERPAGE_SIZE_2MB;
212 #elif defined(__linux__)
213 if (sp) flags |= MAP_HUGETLB;
214 #elif defined(__FreeBSD__)
215 if (sp) flags |= MAP_ALIGNED_SUPER;
216 #elif defined(__sun)
217 if (sp) {
218
219 base = (void *)(caddr_t)(1 << 21);
220 flags |= MAP_ALIGN;
221
222 }
223
224 #endif
225 #else
226 (void)sp;
227 #endif
228
229 ret = (u8 *)mmap(base, tlen, protflags, flags, fd, 0);
230 #if defined(USEHUGEPAGE)
231 /* We try one more time with regular call */
232 if (ret == MAP_FAILED) {
233
234 #if defined(__APPLE__)
235 fd = -1;
236 #elif defined(__linux__)
237 flags &= -MAP_HUGETLB;
238 #elif defined(__FreeBSD__)
239 flags &= -MAP_ALIGNED_SUPER;
240 #elif defined(__sun)
241 flags &= -MAP_ALIGN;
242 #endif
243 ret = (u8 *)mmap(NULL, tlen, protflags, flags, fd, 0);
244
245 }
246
247 #endif
248
249 if (ret == MAP_FAILED) {
250
251 if (hard_fail) FATAL("mmap() failed on alloc (OOM?)");
252
253 DEBUGF("mmap() failed on alloc (OOM?)");
254
255 return NULL;
256
257 }
258
259 #if defined(USENAMEDPAGE)
260 #if defined(__linux__)
261 // in the /proc/<pid>/maps file, the anonymous page appears as
262 // `<start>-<end> ---p 00000000 00:00 0 [anon:libdislocator]`
263 if (prctl(PR_SET_VMA, PR_SET_VMA_ANON_NAME, (unsigned long)ret, tlen,
264 (unsigned long)"libdislocator") < 0) {
265
266 DEBUGF("prctl() failed");
267
268 }
269
270 #endif
271 #endif
272
273 /* Set PROT_NONE on the last page. */
274
275 if (mprotect(ret + PG_COUNT(rlen + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE))
276 FATAL("mprotect() failed when allocating memory");
277
278 /* Offset the return pointer so that it's right-aligned to the page
279 boundary. */
280
281 ret += PAGE_SIZE * PG_COUNT(rlen + 8) - rlen - 8;
282
283 /* Store allocation metadata. */
284
285 ret += 8;
286
287 PTR_L(ret) = len;
288 PTR_C(ret) = alloc_canary;
289
290 total_mem += len;
291
292 if (rlen != len) {
293
294 size_t i;
295 for (i = len; i < rlen; ++i)
296 ret[i] = TAIL_ALLOC_CANARY;
297
298 }
299
300 return ret;
301
302 }
303
304 /* The "user-facing" wrapper for calloc(). This just checks for overflows and
305 displays debug messages if requested. */
306
calloc(size_t elem_len,size_t elem_cnt)307 __attribute__((malloc)) __attribute__((alloc_size(1, 2))) void *calloc(
308 size_t elem_len, size_t elem_cnt) {
309
310 void *ret;
311
312 size_t len = elem_len * elem_cnt;
313
314 /* Perform some sanity checks to detect obvious issues... */
315
316 if (elem_cnt && len / elem_cnt != elem_len) {
317
318 if (no_calloc_over) {
319
320 DEBUGF("calloc(%zu, %zu) would overflow, returning NULL", elem_len,
321 elem_cnt);
322 return NULL;
323
324 }
325
326 FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
327
328 }
329
330 ret = __dislocator_alloc(len);
331
332 DEBUGF("calloc(%zu, %zu) = %p [%zu total]", elem_len, elem_cnt, ret,
333 total_mem);
334
335 return ret;
336
337 }
338
339 /* The wrapper for malloc(). Roughly the same, also clobbers the returned
340 memory (unlike calloc(), malloc() is not guaranteed to return zeroed
341 memory). */
342
malloc(size_t len)343 __attribute__((malloc)) __attribute__((alloc_size(1))) void *malloc(
344 size_t len) {
345
346 void *ret;
347
348 ret = __dislocator_alloc(len);
349
350 DEBUGF("malloc(%zu) = %p [%zu total]", len, ret, total_mem);
351
352 if (ret && len) memset(ret, ALLOC_CLOBBER, len);
353
354 return ret;
355
356 }
357
358 /* The wrapper for free(). This simply marks the entire region as PROT_NONE.
359 If the region is already freed, the code will segfault during the attempt to
360 read the canary. Not very graceful, but works, right? */
361
free(void * ptr)362 void free(void *ptr) {
363
364 u32 len;
365
366 DEBUGF("free(%p)", ptr);
367
368 if (!ptr) return;
369
370 if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on free()");
371
372 len = PTR_L(ptr);
373
374 total_mem -= len;
375 u8 *ptr_ = ptr;
376
377 if (align_allocations && (len & (ALLOC_ALIGN_SIZE - 1))) {
378
379 size_t rlen = (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
380 for (; len < rlen; ++len)
381 if (ptr_[len] != TAIL_ALLOC_CANARY)
382 FATAL("bad tail allocator canary on free()");
383
384 }
385
386 /* Protect everything. Note that the extra page at the end is already
387 set as PROT_NONE, so we don't need to touch that. */
388
389 ptr_ -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8;
390
391 if (mprotect(ptr_ - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE))
392 FATAL("mprotect() failed when freeing memory");
393
394 ptr = ptr_;
395
396 /* Keep the mapping; this is wasteful, but prevents ptr reuse. */
397
398 }
399
400 /* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
401 move data, and then free (aka mprotect()) the original one. */
402
realloc(void * ptr,size_t len)403 __attribute__((alloc_size(2))) void *realloc(void *ptr, size_t len) {
404
405 void *ret;
406
407 ret = malloc(len);
408
409 if (ret && ptr) {
410
411 if (PTR_C(ptr) != alloc_canary) FATAL("bad allocator canary on realloc()");
412 // Here the tail canary check is delayed to free()
413
414 memcpy(ret, ptr, MIN(len, PTR_L(ptr)));
415 free(ptr);
416
417 }
418
419 DEBUGF("realloc(%p, %zu) = %p [%zu total]", ptr, len, ret, total_mem);
420
421 return ret;
422
423 }
424
425 /* posix_memalign we mainly check the proper alignment argument
426 if the requested size fits within the alignment we do
427 a normal request */
428
posix_memalign(void ** ptr,size_t align,size_t len)429 int posix_memalign(void **ptr, size_t align, size_t len) {
430
431 // if (*ptr == NULL) return EINVAL; // (andrea) Why? I comment it out for now
432 if ((align % 2) || (align % sizeof(void *))) return EINVAL;
433 if (len == 0) {
434
435 *ptr = NULL;
436 return 0;
437
438 }
439
440 size_t rem = len % align;
441 if (rem) len += align - rem;
442
443 *ptr = __dislocator_alloc(len);
444
445 if (*ptr && len) memset(*ptr, ALLOC_CLOBBER, len);
446
447 DEBUGF("posix_memalign(%p %zu, %zu) [*ptr = %p]", ptr, align, len, *ptr);
448
449 return 0;
450
451 }
452
453 /* just the non-posix fashion */
454
memalign(size_t align,size_t len)455 __attribute__((malloc)) __attribute__((alloc_size(2))) void *memalign(
456 size_t align, size_t len) {
457
458 void *ret = NULL;
459
460 if (posix_memalign(&ret, align, len)) {
461
462 DEBUGF("memalign(%zu, %zu) failed", align, len);
463
464 }
465
466 return ret;
467
468 }
469
470 /* sort of C11 alias of memalign only more severe, alignment-wise */
471
aligned_alloc(size_t align,size_t len)472 __attribute__((malloc)) __attribute__((alloc_size(2))) void *aligned_alloc(
473 size_t align, size_t len) {
474
475 void *ret = NULL;
476
477 if ((len % align)) return NULL;
478
479 if (posix_memalign(&ret, align, len)) {
480
481 DEBUGF("aligned_alloc(%zu, %zu) failed", align, len);
482
483 }
484
485 return ret;
486
487 }
488
489 /* specific BSD api mainly checking possible overflow for the size */
490
reallocarray(void * ptr,size_t elem_len,size_t elem_cnt)491 __attribute__((alloc_size(2, 3))) void *reallocarray(void *ptr, size_t elem_len,
492 size_t elem_cnt) {
493
494 const size_t elem_lim = 1UL << (sizeof(size_t) * 4);
495 const size_t elem_tot = elem_len * elem_cnt;
496 void *ret = NULL;
497
498 if ((elem_len >= elem_lim || elem_cnt >= elem_lim) && elem_len > 0 &&
499 elem_cnt > (SIZE_MAX / elem_len)) {
500
501 DEBUGF("reallocarray size overflow (%zu)", elem_tot);
502
503 } else {
504
505 ret = realloc(ptr, elem_tot);
506
507 }
508
509 return ret;
510
511 }
512
reallocarr(void * ptr,size_t elem_len,size_t elem_cnt)513 int reallocarr(void *ptr, size_t elem_len, size_t elem_cnt) {
514
515 void *ret = NULL;
516 const size_t elem_tot = elem_len * elem_cnt;
517
518 if (elem_tot == 0) {
519
520 void **h = &ptr;
521 *h = ret;
522 return 0;
523
524 }
525
526 ret = reallocarray(ptr, elem_len, elem_cnt);
527 return ret ? 0 : -1;
528
529 }
530
531 #if defined(__APPLE__)
malloc_size(const void * ptr)532 size_t malloc_size(const void *ptr) {
533
534 #elif !defined(__ANDROID__)
535 size_t malloc_usable_size(void *ptr) {
536
537 #else
538 size_t malloc_usable_size(const void *ptr) {
539
540 #endif
541
542 return ptr ? PTR_L(ptr) : 0;
543
544 }
545
546 #if defined(__APPLE__)
547 size_t malloc_good_size(size_t len) {
548
549 return (len & ~(ALLOC_ALIGN_SIZE - 1)) + ALLOC_ALIGN_SIZE;
550
551 }
552
553 #endif
554
555 __attribute__((constructor)) void __dislocator_init(void) {
556
557 char *tmp = getenv("AFL_LD_LIMIT_MB");
558
559 if (tmp) {
560
561 char *tok;
562 unsigned long long mmem = strtoull(tmp, &tok, 10);
563 if (*tok != '\0' || errno == ERANGE || mmem > SIZE_MAX / 1024 / 1024)
564 FATAL("Bad value for AFL_LD_LIMIT_MB");
565 max_mem = mmem * 1024 * 1024;
566
567 }
568
569 alloc_canary = ALLOC_CANARY;
570 tmp = getenv("AFL_RANDOM_ALLOC_CANARY");
571
572 if (tmp) arc4random_buf(&alloc_canary, sizeof(alloc_canary));
573
574 alloc_verbose = !!getenv("AFL_LD_VERBOSE");
575 hard_fail = !!getenv("AFL_LD_HARD_FAIL");
576 no_calloc_over = !!getenv("AFL_LD_NO_CALLOC_OVER");
577 align_allocations = !!getenv("AFL_ALIGNED_ALLOC");
578
579 }
580
581 /* NetBSD fault handler specific api subset */
582
583 void (*esetfunc(void (*fn)(int, const char *, ...)))(int, const char *, ...) {
584
585 /* Might not be meaningful to implement; upper calls already report errors */
586 return NULL;
587
588 }
589
590 void *emalloc(size_t len) {
591
592 return malloc(len);
593
594 }
595
596 void *ecalloc(size_t elem_len, size_t elem_cnt) {
597
598 return calloc(elem_len, elem_cnt);
599
600 }
601
602 void *erealloc(void *ptr, size_t len) {
603
604 return realloc(ptr, len);
605
606 }
607
608