1 /*
2 american fuzzy lop++ - instrumentation bootstrap
3 ------------------------------------------------
4
5 Copyright 2015, 2016 Google Inc. All rights reserved.
6 Copyright 2019-2024 AFLplusplus Project. All rights reserved.
7
8 Licensed under the Apache License, Version 2.0 (the "License");
9 you may not use this file except in compliance with the License.
10 You may obtain a copy of the License at:
11
12 https://www.apache.org/licenses/LICENSE-2.0
13
14
15 */
16
17 #ifdef __AFL_CODE_COVERAGE
18 #ifndef _GNU_SOURCE
19 #define _GNU_SOURCE
20 #endif
21 #ifndef __USE_GNU
22 #define __USE_GNU
23 #endif
24 #include <dlfcn.h>
25
26 __attribute__((weak)) void __sanitizer_symbolize_pc(void *, const char *fmt,
27 char *out_buf,
28 size_t out_buf_size);
29 #endif
30
31 #ifdef __ANDROID__
32 #include "android-ashmem.h"
33 #endif
34 #include "config.h"
35 #include "types.h"
36 #include "cmplog.h"
37 #include "llvm-alternative-coverage.h"
38
39 #define XXH_INLINE_ALL
40 #include "xxhash.h"
41 #undef XXH_INLINE_ALL
42
43 #include <stdio.h>
44 #include <stdlib.h>
45 #include <signal.h>
46 #include <unistd.h>
47 #include <string.h>
48 #include <assert.h>
49 #include <stdint.h>
50 #include <stddef.h>
51 #include <limits.h>
52 #include <errno.h>
53
54 #include <sys/mman.h>
55 #if !defined(__HAIKU__) && !defined(__OpenBSD__)
56 #include <sys/syscall.h>
57 #endif
58 #ifndef USEMMAP
59 #include <sys/shm.h>
60 #endif
61 #include <sys/wait.h>
62 #include <sys/types.h>
63
64 #if !__GNUC__
65 #include "llvm/Config/llvm-config.h"
66 #endif
67
68 #ifdef __linux__
69 #include "snapshot-inl.h"
70 #endif
71
72 /* This is a somewhat ugly hack for the experimental 'trace-pc-guard' mode.
73 Basically, we need to make sure that the forkserver is initialized after
74 the LLVM-generated runtime initialization pass, not before. */
75
76 #ifndef MAP_FIXED_NOREPLACE
77 #ifdef MAP_EXCL
78 #define MAP_FIXED_NOREPLACE MAP_EXCL | MAP_FIXED
79 #else
80 #define MAP_FIXED_NOREPLACE MAP_FIXED
81 #endif
82 #endif
83
84 #define CTOR_PRIO 3
85 #define EARLY_FS_PRIO 5
86
87 #include <sys/mman.h>
88 #include <fcntl.h>
89
90 /* Globals needed by the injected instrumentation. The __afl_area_initial region
91 is used for instrumentation output before __afl_map_shm() has a chance to
92 run. It will end up as .comm, so it shouldn't be too wasteful. */
93
94 #if defined(__HAIKU__)
95 extern ssize_t _kern_write(int fd, off_t pos, const void *buffer,
96 size_t bufferSize);
97 #endif // HAIKU
98
99 char *strcasestr(const char *haystack, const char *needle);
100
101 static u8 __afl_area_initial[MAP_INITIAL_SIZE];
102 static u8 *__afl_area_ptr_dummy = __afl_area_initial;
103 static u8 *__afl_area_ptr_backup = __afl_area_initial;
104
105 u8 *__afl_area_ptr = __afl_area_initial;
106 u8 *__afl_dictionary;
107 u8 *__afl_fuzz_ptr;
108 static u32 __afl_fuzz_len_dummy;
109 u32 *__afl_fuzz_len = &__afl_fuzz_len_dummy;
110 int __afl_sharedmem_fuzzing __attribute__((weak));
111
112 u32 __afl_final_loc;
113 u32 __afl_map_size = MAP_SIZE;
114 u32 __afl_dictionary_len;
115 u64 __afl_map_addr;
116 u32 __afl_first_final_loc;
117
118 #ifdef __AFL_CODE_COVERAGE
119 typedef struct afl_module_info_t afl_module_info_t;
120
121 struct afl_module_info_t {
122
123 // A unique id starting with 0
124 u32 id;
125
126 // Name and base address of the module
127 char *name;
128 uintptr_t base_address;
129
130 // PC Guard start/stop
131 u32 *start;
132 u32 *stop;
133
134 // PC Table begin/end
135 const uintptr_t *pcs_beg;
136 const uintptr_t *pcs_end;
137
138 u8 mapped;
139
140 afl_module_info_t *next;
141
142 };
143
144 typedef struct {
145
146 uintptr_t PC, PCFlags;
147
148 } PCTableEntry;
149
150 afl_module_info_t *__afl_module_info = NULL;
151
152 u32 __afl_pcmap_size = 0;
153 uintptr_t *__afl_pcmap_ptr = NULL;
154
155 typedef struct {
156
157 uintptr_t start;
158 u32 len;
159
160 } FilterPCEntry;
161
162 u32 __afl_filter_pcs_size = 0;
163 FilterPCEntry *__afl_filter_pcs = NULL;
164 u8 *__afl_filter_pcs_module = NULL;
165
166 #endif // __AFL_CODE_COVERAGE
167
168 /* 1 if we are running in afl, and the forkserver was started, else 0 */
169 u32 __afl_connected = 0;
170
171 // for the __AFL_COVERAGE_ON/__AFL_COVERAGE_OFF features to work:
172 int __afl_selective_coverage __attribute__((weak));
173 int __afl_selective_coverage_start_off __attribute__((weak));
174 static int __afl_selective_coverage_temp = 1;
175
176 #if defined(__ANDROID__) || defined(__HAIKU__) || defined(NO_TLS)
177 PREV_LOC_T __afl_prev_loc[NGRAM_SIZE_MAX];
178 PREV_LOC_T __afl_prev_caller[CTX_MAX_K];
179 u32 __afl_prev_ctx;
180 #else
181 __thread PREV_LOC_T __afl_prev_loc[NGRAM_SIZE_MAX];
182 __thread PREV_LOC_T __afl_prev_caller[CTX_MAX_K];
183 __thread u32 __afl_prev_ctx;
184 #endif
185
186 struct cmp_map *__afl_cmp_map;
187 struct cmp_map *__afl_cmp_map_backup;
188
189 /* Child pid? */
190
191 static s32 child_pid;
192 static void (*old_sigterm_handler)(int) = 0;
193
194 /* Running in persistent mode? */
195
196 static u8 is_persistent;
197
198 /* Are we in sancov mode? */
199
200 static u8 _is_sancov;
201
202 /* Debug? */
203
204 /*static*/ u32 __afl_debug;
205
206 /* Already initialized markers */
207
208 u32 __afl_already_initialized_shm;
209 u32 __afl_already_initialized_forkserver;
210 u32 __afl_already_initialized_first;
211 u32 __afl_already_initialized_second;
212 u32 __afl_already_initialized_early;
213 u32 __afl_already_initialized_init;
214
215 /* Dummy pipe for area_is_valid() */
216
217 static int __afl_dummy_fd[2] = {2, 2};
218
219 /* ensure we kill the child on termination */
220
at_exit(int signal)221 static void at_exit(int signal) {
222
223 if (unlikely(child_pid > 0)) {
224
225 kill(child_pid, SIGKILL);
226 waitpid(child_pid, NULL, 0);
227 child_pid = -1;
228
229 }
230
231 _exit(0);
232
233 }
234
235 #define default_hash(a, b) XXH3_64bits(a, b)
236
237 /* Uninspired gcc plugin instrumentation */
238
__afl_trace(const u32 x)239 void __afl_trace(const u32 x) {
240
241 PREV_LOC_T prev = __afl_prev_loc[0];
242 __afl_prev_loc[0] = (x >> 1);
243
244 u8 *p = &__afl_area_ptr[prev ^ x];
245
246 #if 1 /* enable for neverZero feature. */
247 #if __GNUC__
248 u8 c = __builtin_add_overflow(*p, 1, p);
249 *p += c;
250 #else
251 *p += 1 + ((u8)(1 + *p) == 0);
252 #endif
253 #else
254 ++*p;
255 #endif
256
257 return;
258
259 }
260
261 /* Error reporting to forkserver controller */
262
send_forkserver_error(int error)263 static void send_forkserver_error(int error) {
264
265 u32 status;
266 if (!error || error > 0xffff) return;
267 status = (FS_OPT_ERROR | FS_OPT_SET_ERROR(error));
268 if (write(FORKSRV_FD + 1, (char *)&status, 4) != 4) { return; }
269
270 }
271
272 /* SHM fuzzing setup. */
273
__afl_map_shm_fuzz()274 static void __afl_map_shm_fuzz() {
275
276 char *id_str = getenv(SHM_FUZZ_ENV_VAR);
277
278 if (__afl_debug) {
279
280 fprintf(stderr, "DEBUG: fuzzcase shmem %s\n", id_str ? id_str : "none");
281
282 }
283
284 if (id_str) {
285
286 u8 *map = NULL;
287
288 #ifdef USEMMAP
289 const char *shm_file_path = id_str;
290 int shm_fd = -1;
291
292 /* create the shared memory segment as if it was a file */
293 shm_fd = shm_open(shm_file_path, O_RDWR, DEFAULT_PERMISSION);
294 if (shm_fd == -1) {
295
296 fprintf(stderr, "shm_open() failed for fuzz\n");
297 send_forkserver_error(FS_ERROR_SHM_OPEN);
298 exit(1);
299
300 }
301
302 map =
303 (u8 *)mmap(0, MAX_FILE + sizeof(u32), PROT_READ, MAP_SHARED, shm_fd, 0);
304
305 #else
306 u32 shm_id = atoi(id_str);
307 map = (u8 *)shmat(shm_id, NULL, 0);
308
309 #endif
310
311 /* Whooooops. */
312
313 if (!map || map == (void *)-1) {
314
315 perror("Could not access fuzzing shared memory");
316 send_forkserver_error(FS_ERROR_SHM_OPEN);
317 exit(1);
318
319 }
320
321 __afl_fuzz_len = (u32 *)map;
322 __afl_fuzz_ptr = map + sizeof(u32);
323
324 if (__afl_debug) {
325
326 fprintf(stderr, "DEBUG: successfully got fuzzing shared memory\n");
327
328 }
329
330 } else {
331
332 fprintf(stderr, "Error: variable for fuzzing shared memory is not set\n");
333 send_forkserver_error(FS_ERROR_SHM_OPEN);
334 exit(1);
335
336 }
337
338 }
339
340 /* SHM setup. */
341
__afl_map_shm(void)342 static void __afl_map_shm(void) {
343
344 if (__afl_already_initialized_shm) return;
345 __afl_already_initialized_shm = 1;
346
347 // if we are not running in afl ensure the map exists
348 if (!__afl_area_ptr) { __afl_area_ptr = __afl_area_ptr_dummy; }
349
350 char *id_str = getenv(SHM_ENV_VAR);
351
352 if (__afl_final_loc) {
353
354 __afl_map_size = ++__afl_final_loc; // as we count starting 0
355
356 if (getenv("AFL_DUMP_MAP_SIZE")) {
357
358 printf("%u\n", __afl_map_size);
359 exit(-1);
360
361 }
362
363 if (__afl_final_loc > MAP_SIZE) {
364
365 char *ptr;
366 u32 val = 0;
367 if ((ptr = getenv("AFL_MAP_SIZE")) != NULL) { val = atoi(ptr); }
368 if (val < __afl_final_loc) {
369
370 if (__afl_final_loc > FS_OPT_MAX_MAPSIZE) {
371
372 if (!getenv("AFL_QUIET"))
373 fprintf(stderr,
374 "Error: AFL++ tools *require* to set AFL_MAP_SIZE to %u "
375 "to be able to run this instrumented program!\n",
376 __afl_final_loc);
377
378 if (id_str) {
379
380 send_forkserver_error(FS_ERROR_MAP_SIZE);
381 exit(-1);
382
383 }
384
385 } else {
386
387 if (__afl_final_loc > MAP_INITIAL_SIZE && !getenv("AFL_QUIET")) {
388
389 fprintf(stderr,
390 "Warning: AFL++ tools might need to set AFL_MAP_SIZE to %u "
391 "to be able to run this instrumented program if this "
392 "crashes!\n",
393 __afl_final_loc);
394
395 }
396
397 }
398
399 }
400
401 }
402
403 } else {
404
405 if (getenv("AFL_DUMP_MAP_SIZE")) {
406
407 printf("%u\n", MAP_SIZE);
408 exit(-1);
409
410 }
411
412 }
413
414 if (__afl_sharedmem_fuzzing && (!id_str || !getenv(SHM_FUZZ_ENV_VAR) ||
415 fcntl(FORKSRV_FD, F_GETFD) == -1 ||
416 fcntl(FORKSRV_FD + 1, F_GETFD) == -1)) {
417
418 if (__afl_debug) {
419
420 fprintf(stderr,
421 "DEBUG: running not inside afl-fuzz, disabling shared memory "
422 "testcases\n");
423
424 }
425
426 __afl_sharedmem_fuzzing = 0;
427
428 }
429
430 if (!id_str) {
431
432 u32 val = 0;
433 u8 *ptr;
434
435 if ((ptr = getenv("AFL_MAP_SIZE")) != NULL) { val = atoi(ptr); }
436
437 if (val > MAP_INITIAL_SIZE) {
438
439 __afl_map_size = val;
440
441 } else {
442
443 if (__afl_first_final_loc > MAP_INITIAL_SIZE) {
444
445 // done in second stage constructor
446 __afl_map_size = __afl_first_final_loc;
447
448 } else {
449
450 __afl_map_size = MAP_INITIAL_SIZE;
451
452 }
453
454 }
455
456 if (__afl_map_size > MAP_INITIAL_SIZE && __afl_final_loc < __afl_map_size) {
457
458 __afl_final_loc = __afl_map_size;
459
460 }
461
462 if (__afl_debug) {
463
464 fprintf(stderr, "DEBUG: (0) init map size is %u to %p\n", __afl_map_size,
465 __afl_area_ptr_dummy);
466
467 }
468
469 }
470
471 /* If we're running under AFL, attach to the appropriate region, replacing the
472 early-stage __afl_area_initial region that is needed to allow some really
473 hacky .init code to work correctly in projects such as OpenSSL. */
474
475 if (__afl_debug) {
476
477 fprintf(
478 stderr,
479 "DEBUG: (1) id_str %s, __afl_area_ptr %p, __afl_area_initial %p, "
480 "__afl_area_ptr_dummy %p, __afl_map_addr 0x%llx, MAP_SIZE %u, "
481 "__afl_final_loc %u, __afl_map_size %u, max_size_forkserver %u/0x%x\n",
482 id_str == NULL ? "<null>" : id_str, __afl_area_ptr, __afl_area_initial,
483 __afl_area_ptr_dummy, __afl_map_addr, MAP_SIZE, __afl_final_loc,
484 __afl_map_size, FS_OPT_MAX_MAPSIZE, FS_OPT_MAX_MAPSIZE);
485
486 }
487
488 if (id_str) {
489
490 if (__afl_area_ptr && __afl_area_ptr != __afl_area_initial &&
491 __afl_area_ptr != __afl_area_ptr_dummy) {
492
493 if (__afl_map_addr) {
494
495 munmap((void *)__afl_map_addr, __afl_final_loc);
496
497 } else {
498
499 free(__afl_area_ptr);
500
501 }
502
503 __afl_area_ptr = __afl_area_ptr_dummy;
504
505 }
506
507 #ifdef USEMMAP
508 const char *shm_file_path = id_str;
509 int shm_fd = -1;
510 unsigned char *shm_base = NULL;
511
512 /* create the shared memory segment as if it was a file */
513 shm_fd = shm_open(shm_file_path, O_RDWR, DEFAULT_PERMISSION);
514 if (shm_fd == -1) {
515
516 fprintf(stderr, "shm_open() failed\n");
517 send_forkserver_error(FS_ERROR_SHM_OPEN);
518 exit(1);
519
520 }
521
522 /* map the shared memory segment to the address space of the process */
523 if (__afl_map_addr) {
524
525 shm_base =
526 mmap((void *)__afl_map_addr, __afl_map_size, PROT_READ | PROT_WRITE,
527 MAP_FIXED_NOREPLACE | MAP_SHARED, shm_fd, 0);
528
529 } else {
530
531 shm_base = mmap(0, __afl_map_size, PROT_READ | PROT_WRITE, MAP_SHARED,
532 shm_fd, 0);
533
534 }
535
536 close(shm_fd);
537 shm_fd = -1;
538
539 if (shm_base == MAP_FAILED) {
540
541 fprintf(stderr, "mmap() failed\n");
542 perror("mmap for map");
543
544 if (__afl_map_addr)
545 send_forkserver_error(FS_ERROR_MAP_ADDR);
546 else
547 send_forkserver_error(FS_ERROR_MMAP);
548
549 exit(2);
550
551 }
552
553 __afl_area_ptr = shm_base;
554 #else
555 u32 shm_id = atoi(id_str);
556
557 if (__afl_map_size && __afl_map_size > MAP_SIZE) {
558
559 u8 *map_env = (u8 *)getenv("AFL_MAP_SIZE");
560 if (!map_env || atoi((char *)map_env) < MAP_SIZE) {
561
562 fprintf(stderr, "FS_ERROR_MAP_SIZE\n");
563 send_forkserver_error(FS_ERROR_MAP_SIZE);
564 _exit(1);
565
566 }
567
568 }
569
570 __afl_area_ptr = (u8 *)shmat(shm_id, (void *)__afl_map_addr, 0);
571
572 /* Whooooops. */
573
574 if (!__afl_area_ptr || __afl_area_ptr == (void *)-1) {
575
576 if (__afl_map_addr)
577 send_forkserver_error(FS_ERROR_MAP_ADDR);
578 else
579 send_forkserver_error(FS_ERROR_SHMAT);
580
581 perror("shmat for map");
582 _exit(1);
583
584 }
585
586 #endif
587
588 /* Write something into the bitmap so that even with low AFL_INST_RATIO,
589 our parent doesn't give up on us. */
590
591 __afl_area_ptr[0] = 1;
592
593 } else if ((!__afl_area_ptr || __afl_area_ptr == __afl_area_initial) &&
594
595 __afl_map_addr) {
596
597 __afl_area_ptr = (u8 *)mmap(
598 (void *)__afl_map_addr, __afl_map_size, PROT_READ | PROT_WRITE,
599 MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
600
601 if (__afl_area_ptr == MAP_FAILED) {
602
603 fprintf(stderr, "can not acquire mmap for address %p\n",
604 (void *)__afl_map_addr);
605 send_forkserver_error(FS_ERROR_SHM_OPEN);
606 exit(1);
607
608 }
609
610 } else if (__afl_final_loc > MAP_INITIAL_SIZE &&
611
612 __afl_final_loc > __afl_first_final_loc) {
613
614 if (__afl_area_initial != __afl_area_ptr_dummy) {
615
616 free(__afl_area_ptr_dummy);
617
618 }
619
620 __afl_area_ptr_dummy = (u8 *)malloc(__afl_final_loc);
621 __afl_area_ptr = __afl_area_ptr_dummy;
622 __afl_map_size = __afl_final_loc;
623
624 if (!__afl_area_ptr_dummy) {
625
626 fprintf(stderr,
627 "Error: AFL++ could not acquire %u bytes of memory, exiting!\n",
628 __afl_final_loc);
629 exit(-1);
630
631 }
632
633 } // else: nothing to be done
634
635 __afl_area_ptr_backup = __afl_area_ptr;
636
637 if (__afl_debug) {
638
639 fprintf(stderr,
640 "DEBUG: (2) id_str %s, __afl_area_ptr %p, __afl_area_initial %p, "
641 "__afl_area_ptr_dummy %p, __afl_map_addr 0x%llx, MAP_SIZE "
642 "%u, __afl_final_loc %u, __afl_map_size %u, "
643 "max_size_forkserver %u/0x%x\n",
644 id_str == NULL ? "<null>" : id_str, __afl_area_ptr,
645 __afl_area_initial, __afl_area_ptr_dummy, __afl_map_addr, MAP_SIZE,
646 __afl_final_loc, __afl_map_size, FS_OPT_MAX_MAPSIZE,
647 FS_OPT_MAX_MAPSIZE);
648
649 }
650
651 if (__afl_selective_coverage) {
652
653 if (__afl_map_size > MAP_INITIAL_SIZE) {
654
655 __afl_area_ptr_dummy = (u8 *)malloc(__afl_map_size);
656
657 if (__afl_area_ptr_dummy) {
658
659 if (__afl_selective_coverage_start_off) {
660
661 __afl_area_ptr = __afl_area_ptr_dummy;
662
663 }
664
665 } else {
666
667 fprintf(stderr, "Error: __afl_selective_coverage failed!\n");
668 __afl_selective_coverage = 0;
669 // continue;
670
671 }
672
673 }
674
675 }
676
677 id_str = getenv(CMPLOG_SHM_ENV_VAR);
678
679 if (__afl_debug) {
680
681 fprintf(stderr, "DEBUG: cmplog id_str %s\n",
682 id_str == NULL ? "<null>" : id_str);
683
684 }
685
686 if (id_str) {
687
688 // /dev/null doesn't work so we use /dev/urandom
689 if ((__afl_dummy_fd[1] = open("/dev/urandom", O_WRONLY)) < 0) {
690
691 if (pipe(__afl_dummy_fd) < 0) { __afl_dummy_fd[1] = 1; }
692
693 }
694
695 #ifdef USEMMAP
696 const char *shm_file_path = id_str;
697 int shm_fd = -1;
698 struct cmp_map *shm_base = NULL;
699
700 /* create the shared memory segment as if it was a file */
701 shm_fd = shm_open(shm_file_path, O_RDWR, DEFAULT_PERMISSION);
702 if (shm_fd == -1) {
703
704 perror("shm_open() failed\n");
705 send_forkserver_error(FS_ERROR_SHM_OPEN);
706 exit(1);
707
708 }
709
710 /* map the shared memory segment to the address space of the process */
711 shm_base = mmap(0, sizeof(struct cmp_map), PROT_READ | PROT_WRITE,
712 MAP_SHARED, shm_fd, 0);
713 if (shm_base == MAP_FAILED) {
714
715 close(shm_fd);
716 shm_fd = -1;
717
718 fprintf(stderr, "mmap() failed\n");
719 send_forkserver_error(FS_ERROR_SHM_OPEN);
720 exit(2);
721
722 }
723
724 __afl_cmp_map = shm_base;
725 #else
726 u32 shm_id = atoi(id_str);
727
728 __afl_cmp_map = (struct cmp_map *)shmat(shm_id, NULL, 0);
729 #endif
730
731 __afl_cmp_map_backup = __afl_cmp_map;
732
733 if (!__afl_cmp_map || __afl_cmp_map == (void *)-1) {
734
735 perror("shmat for cmplog");
736 send_forkserver_error(FS_ERROR_SHM_OPEN);
737 _exit(1);
738
739 }
740
741 }
742
743 #ifdef __AFL_CODE_COVERAGE
744 char *pcmap_id_str = getenv("__AFL_PCMAP_SHM_ID");
745
746 if (pcmap_id_str) {
747
748 __afl_pcmap_size = __afl_map_size * sizeof(void *);
749 u32 shm_id = atoi(pcmap_id_str);
750
751 __afl_pcmap_ptr = (uintptr_t *)shmat(shm_id, NULL, 0);
752
753 if (__afl_debug) {
754
755 fprintf(stderr, "DEBUG: Received %p via shmat for pcmap\n",
756 __afl_pcmap_ptr);
757
758 }
759
760 }
761
762 #endif // __AFL_CODE_COVERAGE
763
764 }
765
766 /* unmap SHM. */
767
__afl_unmap_shm(void)768 static void __afl_unmap_shm(void) {
769
770 if (!__afl_already_initialized_shm) return;
771
772 #ifdef __AFL_CODE_COVERAGE
773 if (__afl_pcmap_size) {
774
775 shmdt((void *)__afl_pcmap_ptr);
776 __afl_pcmap_ptr = NULL;
777 __afl_pcmap_size = 0;
778
779 }
780
781 #endif // __AFL_CODE_COVERAGE
782
783 char *id_str = getenv(SHM_ENV_VAR);
784
785 if (id_str) {
786
787 #ifdef USEMMAP
788
789 munmap((void *)__afl_area_ptr, __afl_map_size);
790
791 #else
792
793 shmdt((void *)__afl_area_ptr);
794
795 #endif
796
797 } else if ((!__afl_area_ptr || __afl_area_ptr == __afl_area_initial) &&
798
799 __afl_map_addr) {
800
801 munmap((void *)__afl_map_addr, __afl_map_size);
802
803 }
804
805 __afl_area_ptr = __afl_area_ptr_dummy;
806
807 id_str = getenv(CMPLOG_SHM_ENV_VAR);
808
809 if (id_str) {
810
811 #ifdef USEMMAP
812
813 munmap((void *)__afl_cmp_map, __afl_map_size);
814
815 #else
816
817 shmdt((void *)__afl_cmp_map);
818
819 #endif
820
821 __afl_cmp_map = NULL;
822 __afl_cmp_map_backup = NULL;
823
824 }
825
826 __afl_already_initialized_shm = 0;
827
828 }
829
830 #define write_error(text) write_error_with_location(text, __FILE__, __LINE__)
831
write_error_with_location(char * text,char * filename,int linenumber)832 void write_error_with_location(char *text, char *filename, int linenumber) {
833
834 u8 *o = getenv("__AFL_OUT_DIR");
835 char *e = strerror(errno);
836
837 if (o) {
838
839 char buf[4096];
840 snprintf(buf, sizeof(buf), "%s/error.txt", o);
841 FILE *f = fopen(buf, "a");
842
843 if (f) {
844
845 fprintf(f, "File %s, line %d: Error(%s): %s\n", filename, linenumber,
846 text, e);
847 fclose(f);
848
849 }
850
851 }
852
853 fprintf(stderr, "File %s, line %d: Error(%s): %s\n", filename, linenumber,
854 text, e);
855
856 }
857
858 #ifdef __linux__
__afl_start_snapshots(void)859 static void __afl_start_snapshots(void) {
860
861 static u8 tmp[4] = {0, 0, 0, 0};
862 u32 status = 0;
863 u32 already_read_first = 0;
864 u32 was_killed;
865
866 u8 child_stopped = 0;
867
868 void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL);
869
870 /* Phone home and tell the parent that we're OK. If parent isn't there,
871 assume we're not running in forkserver mode and just execute program. */
872
873 status |= (FS_OPT_ENABLED | FS_OPT_SNAPSHOT | FS_OPT_NEWCMPLOG);
874 if (__afl_sharedmem_fuzzing) { status |= FS_OPT_SHDMEM_FUZZ; }
875 if (__afl_map_size <= FS_OPT_MAX_MAPSIZE)
876 status |= (FS_OPT_SET_MAPSIZE(__afl_map_size) | FS_OPT_MAPSIZE);
877 if (__afl_dictionary_len && __afl_dictionary) { status |= FS_OPT_AUTODICT; }
878 memcpy(tmp, &status, 4);
879
880 if (write(FORKSRV_FD + 1, tmp, 4) != 4) { return; }
881
882 if (__afl_sharedmem_fuzzing || (__afl_dictionary_len && __afl_dictionary)) {
883
884 if (read(FORKSRV_FD, &was_killed, 4) != 4) {
885
886 write_error("read to afl-fuzz");
887 _exit(1);
888
889 }
890
891 if (__afl_debug) {
892
893 fprintf(stderr, "DEBUG: target forkserver recv: %08x\n", was_killed);
894
895 }
896
897 if ((was_killed & (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) ==
898 (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) {
899
900 __afl_map_shm_fuzz();
901
902 }
903
904 if ((was_killed & (FS_OPT_ENABLED | FS_OPT_AUTODICT)) ==
905 (FS_OPT_ENABLED | FS_OPT_AUTODICT) &&
906 __afl_dictionary_len && __afl_dictionary) {
907
908 // great lets pass the dictionary through the forkserver FD
909 u32 len = __afl_dictionary_len, offset = 0;
910 s32 ret;
911
912 if (write(FORKSRV_FD + 1, &len, 4) != 4) {
913
914 write(2, "Error: could not send dictionary len\n",
915 strlen("Error: could not send dictionary len\n"));
916 _exit(1);
917
918 }
919
920 while (len != 0) {
921
922 ret = write(FORKSRV_FD + 1, __afl_dictionary + offset, len);
923
924 if (ret < 1) {
925
926 write(2, "Error: could not send dictionary\n",
927 strlen("Error: could not send dictionary\n"));
928 _exit(1);
929
930 }
931
932 len -= ret;
933 offset += ret;
934
935 }
936
937 } else {
938
939 // uh this forkserver does not understand extended option passing
940 // or does not want the dictionary
941 if (!__afl_fuzz_ptr) already_read_first = 1;
942
943 }
944
945 }
946
947 while (1) {
948
949 int status;
950
951 if (already_read_first) {
952
953 already_read_first = 0;
954
955 } else {
956
957 /* Wait for parent by reading from the pipe. Abort if read fails. */
958 if (read(FORKSRV_FD, &was_killed, 4) != 4) {
959
960 write_error("reading from afl-fuzz");
961 _exit(1);
962
963 }
964
965 }
966
967 #ifdef _AFL_DOCUMENT_MUTATIONS
968 if (__afl_fuzz_ptr) {
969
970 static uint32_t counter = 0;
971 char fn[32];
972 sprintf(fn, "%09u:forkserver", counter);
973 s32 fd_doc = open(fn, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
974 if (fd_doc >= 0) {
975
976 if (write(fd_doc, __afl_fuzz_ptr, *__afl_fuzz_len) != *__afl_fuzz_len) {
977
978 fprintf(stderr, "write of mutation file failed: %s\n", fn);
979 unlink(fn);
980
981 }
982
983 close(fd_doc);
984
985 }
986
987 counter++;
988
989 }
990
991 #endif
992
993 /* If we stopped the child in persistent mode, but there was a race
994 condition and afl-fuzz already issued SIGKILL, write off the old
995 process. */
996
997 if (child_stopped && was_killed) {
998
999 child_stopped = 0;
1000 if (waitpid(child_pid, &status, 0) < 0) {
1001
1002 write_error("child_stopped && was_killed");
1003 _exit(1); // TODO why exit?
1004
1005 }
1006
1007 }
1008
1009 if (!child_stopped) {
1010
1011 /* Once woken up, create a clone of our process. */
1012
1013 child_pid = fork();
1014 if (child_pid < 0) {
1015
1016 write_error("fork");
1017 _exit(1);
1018
1019 }
1020
1021 /* In child process: close fds, resume execution. */
1022
1023 if (!child_pid) {
1024
1025 //(void)nice(-20); // does not seem to improve
1026
1027 signal(SIGCHLD, old_sigchld_handler);
1028 signal(SIGTERM, old_sigterm_handler);
1029
1030 close(FORKSRV_FD);
1031 close(FORKSRV_FD + 1);
1032
1033 if (!afl_snapshot_take(AFL_SNAPSHOT_MMAP | AFL_SNAPSHOT_FDS |
1034 AFL_SNAPSHOT_REGS | AFL_SNAPSHOT_EXIT)) {
1035
1036 raise(SIGSTOP);
1037
1038 }
1039
1040 __afl_area_ptr[0] = 1;
1041 memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T));
1042
1043 return;
1044
1045 }
1046
1047 } else {
1048
1049 /* Special handling for persistent mode: if the child is alive but
1050 currently stopped, simply restart it with SIGCONT. */
1051
1052 kill(child_pid, SIGCONT);
1053 child_stopped = 0;
1054
1055 }
1056
1057 /* In parent process: write PID to pipe, then wait for child. */
1058
1059 if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) {
1060
1061 write_error("write to afl-fuzz");
1062 _exit(1);
1063
1064 }
1065
1066 if (waitpid(child_pid, &status, WUNTRACED) < 0) {
1067
1068 write_error("waitpid");
1069 _exit(1);
1070
1071 }
1072
1073 /* In persistent mode, the child stops itself with SIGSTOP to indicate
1074 a successful run. In this case, we want to wake it up without forking
1075 again. */
1076
1077 if (WIFSTOPPED(status)) child_stopped = 1;
1078
1079 /* Relay wait status to pipe, then loop back. */
1080
1081 if (write(FORKSRV_FD + 1, &status, 4) != 4) {
1082
1083 write_error("writing to afl-fuzz");
1084 _exit(1);
1085
1086 }
1087
1088 }
1089
1090 }
1091
1092 #endif
1093
1094 /* Fork server logic. */
1095
__afl_start_forkserver(void)1096 static void __afl_start_forkserver(void) {
1097
1098 if (__afl_already_initialized_forkserver) return;
1099 __afl_already_initialized_forkserver = 1;
1100
1101 struct sigaction orig_action;
1102 sigaction(SIGTERM, NULL, &orig_action);
1103 old_sigterm_handler = orig_action.sa_handler;
1104 signal(SIGTERM, at_exit);
1105
1106 #ifdef __linux__
1107 if (/*!is_persistent &&*/ !__afl_cmp_map && !getenv("AFL_NO_SNAPSHOT") &&
1108 afl_snapshot_init() >= 0) {
1109
1110 __afl_start_snapshots();
1111 return;
1112
1113 }
1114
1115 #endif
1116
1117 u8 tmp[4] = {0, 0, 0, 0};
1118 u32 status_for_fsrv = 0;
1119 u32 already_read_first = 0;
1120 u32 was_killed;
1121
1122 u8 child_stopped = 0;
1123
1124 void (*old_sigchld_handler)(int) = signal(SIGCHLD, SIG_DFL);
1125
1126 if (__afl_map_size <= FS_OPT_MAX_MAPSIZE) {
1127
1128 status_for_fsrv |= (FS_OPT_SET_MAPSIZE(__afl_map_size) | FS_OPT_MAPSIZE);
1129
1130 }
1131
1132 if (__afl_dictionary_len && __afl_dictionary) {
1133
1134 status_for_fsrv |= FS_OPT_AUTODICT;
1135
1136 }
1137
1138 if (__afl_sharedmem_fuzzing) { status_for_fsrv |= FS_OPT_SHDMEM_FUZZ; }
1139 if (status_for_fsrv) {
1140
1141 status_for_fsrv |= (FS_OPT_ENABLED | FS_OPT_NEWCMPLOG);
1142
1143 }
1144
1145 memcpy(tmp, &status_for_fsrv, 4);
1146
1147 /* Phone home and tell the parent that we're OK. If parent isn't there,
1148 assume we're not running in forkserver mode and just execute program. */
1149
1150 if (write(FORKSRV_FD + 1, tmp, 4) != 4) { return; }
1151
1152 __afl_connected = 1;
1153
1154 if (__afl_sharedmem_fuzzing || (__afl_dictionary_len && __afl_dictionary)) {
1155
1156 if (read(FORKSRV_FD, &was_killed, 4) != 4) _exit(1);
1157
1158 if (__afl_debug) {
1159
1160 fprintf(stderr, "DEBUG: target forkserver recv: %08x\n", was_killed);
1161
1162 }
1163
1164 if ((was_killed & (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) ==
1165 (FS_OPT_ENABLED | FS_OPT_SHDMEM_FUZZ)) {
1166
1167 __afl_map_shm_fuzz();
1168
1169 }
1170
1171 if ((was_killed & (FS_OPT_ENABLED | FS_OPT_AUTODICT)) ==
1172 (FS_OPT_ENABLED | FS_OPT_AUTODICT) &&
1173 __afl_dictionary_len && __afl_dictionary) {
1174
1175 // great lets pass the dictionary through the forkserver FD
1176 u32 len = __afl_dictionary_len, offset = 0;
1177
1178 if (write(FORKSRV_FD + 1, &len, 4) != 4) {
1179
1180 write(2, "Error: could not send dictionary len\n",
1181 strlen("Error: could not send dictionary len\n"));
1182 _exit(1);
1183
1184 }
1185
1186 while (len != 0) {
1187
1188 s32 ret;
1189 ret = write(FORKSRV_FD + 1, __afl_dictionary + offset, len);
1190
1191 if (ret < 1) {
1192
1193 write(2, "Error: could not send dictionary\n",
1194 strlen("Error: could not send dictionary\n"));
1195 _exit(1);
1196
1197 }
1198
1199 len -= ret;
1200 offset += ret;
1201
1202 }
1203
1204 } else {
1205
1206 // uh this forkserver does not understand extended option passing
1207 // or does not want the dictionary
1208 if (!__afl_fuzz_ptr) already_read_first = 1;
1209
1210 }
1211
1212 }
1213
1214 while (1) {
1215
1216 int status;
1217
1218 /* Wait for parent by reading from the pipe. Abort if read fails. */
1219
1220 if (already_read_first) {
1221
1222 already_read_first = 0;
1223
1224 } else {
1225
1226 if (read(FORKSRV_FD, &was_killed, 4) != 4) {
1227
1228 // write_error("read from afl-fuzz");
1229 _exit(1);
1230
1231 }
1232
1233 }
1234
1235 #ifdef _AFL_DOCUMENT_MUTATIONS
1236 if (__afl_fuzz_ptr) {
1237
1238 static uint32_t counter = 0;
1239 char fn[32];
1240 sprintf(fn, "%09u:forkserver", counter);
1241 s32 fd_doc = open(fn, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
1242 if (fd_doc >= 0) {
1243
1244 if (write(fd_doc, __afl_fuzz_ptr, *__afl_fuzz_len) != *__afl_fuzz_len) {
1245
1246 fprintf(stderr, "write of mutation file failed: %s\n", fn);
1247 unlink(fn);
1248
1249 }
1250
1251 close(fd_doc);
1252
1253 }
1254
1255 counter++;
1256
1257 }
1258
1259 #endif
1260
1261 /* If we stopped the child in persistent mode, but there was a race
1262 condition and afl-fuzz already issued SIGKILL, write off the old
1263 process. */
1264
1265 if (child_stopped && was_killed) {
1266
1267 child_stopped = 0;
1268 if (waitpid(child_pid, &status, 0) < 0) {
1269
1270 write_error("child_stopped && was_killed");
1271 _exit(1);
1272
1273 }
1274
1275 }
1276
1277 if (!child_stopped) {
1278
1279 /* Once woken up, create a clone of our process. */
1280
1281 child_pid = fork();
1282 if (child_pid < 0) {
1283
1284 write_error("fork");
1285 _exit(1);
1286
1287 }
1288
1289 /* In child process: close fds, resume execution. */
1290
1291 if (!child_pid) {
1292
1293 //(void)nice(-20);
1294
1295 signal(SIGCHLD, old_sigchld_handler);
1296 signal(SIGTERM, old_sigterm_handler);
1297
1298 close(FORKSRV_FD);
1299 close(FORKSRV_FD + 1);
1300 return;
1301
1302 }
1303
1304 } else {
1305
1306 /* Special handling for persistent mode: if the child is alive but
1307 currently stopped, simply restart it with SIGCONT. */
1308
1309 kill(child_pid, SIGCONT);
1310 child_stopped = 0;
1311
1312 }
1313
1314 /* In parent process: write PID to pipe, then wait for child. */
1315
1316 if (write(FORKSRV_FD + 1, &child_pid, 4) != 4) {
1317
1318 write_error("write to afl-fuzz");
1319 _exit(1);
1320
1321 }
1322
1323 if (waitpid(child_pid, &status, is_persistent ? WUNTRACED : 0) < 0) {
1324
1325 write_error("waitpid");
1326 _exit(1);
1327
1328 }
1329
1330 /* In persistent mode, the child stops itself with SIGSTOP to indicate
1331 a successful run. In this case, we want to wake it up without forking
1332 again. */
1333
1334 if (WIFSTOPPED(status)) child_stopped = 1;
1335
1336 /* Relay wait status to pipe, then loop back. */
1337
1338 if (write(FORKSRV_FD + 1, &status, 4) != 4) {
1339
1340 write_error("writing to afl-fuzz");
1341 _exit(1);
1342
1343 }
1344
1345 }
1346
1347 }
1348
1349 /* A simplified persistent mode handler, used as explained in
1350 * README.llvm.md. */
1351
__afl_persistent_loop(unsigned int max_cnt)1352 int __afl_persistent_loop(unsigned int max_cnt) {
1353
1354 static u8 first_pass = 1;
1355 static u32 cycle_cnt;
1356
1357 if (first_pass) {
1358
1359 /* Make sure that every iteration of __AFL_LOOP() starts with a clean slate.
1360 On subsequent calls, the parent will take care of that, but on the first
1361 iteration, it's our job to erase any trace of whatever happened
1362 before the loop. */
1363
1364 memset(__afl_area_ptr, 0, __afl_map_size);
1365 __afl_area_ptr[0] = 1;
1366 memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T));
1367
1368 cycle_cnt = max_cnt;
1369 first_pass = 0;
1370 __afl_selective_coverage_temp = 1;
1371
1372 return 1;
1373
1374 } else if (--cycle_cnt) {
1375
1376 raise(SIGSTOP);
1377
1378 __afl_area_ptr[0] = 1;
1379 memset(__afl_prev_loc, 0, NGRAM_SIZE_MAX * sizeof(PREV_LOC_T));
1380 __afl_selective_coverage_temp = 1;
1381
1382 return 1;
1383
1384 } else {
1385
1386 /* When exiting __AFL_LOOP(), make sure that the subsequent code that
1387 follows the loop is not traced. We do that by pivoting back to the
1388 dummy output region. */
1389
1390 __afl_area_ptr = __afl_area_ptr_dummy;
1391
1392 return 0;
1393
1394 }
1395
1396 }
1397
1398 /* This one can be called from user code when deferred forkserver mode
1399 is enabled. */
1400
__afl_manual_init(void)1401 void __afl_manual_init(void) {
1402
1403 static u8 init_done;
1404
1405 if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) {
1406
1407 init_done = 1;
1408 is_persistent = 0;
1409 __afl_sharedmem_fuzzing = 0;
1410 if (__afl_area_ptr == NULL) __afl_area_ptr = __afl_area_ptr_dummy;
1411
1412 if (__afl_debug) {
1413
1414 fprintf(stderr,
1415 "DEBUG: disabled instrumentation because of "
1416 "AFL_DISABLE_LLVM_INSTRUMENTATION\n");
1417
1418 }
1419
1420 }
1421
1422 if (!init_done) {
1423
1424 __afl_start_forkserver();
1425 init_done = 1;
1426
1427 }
1428
1429 }
1430
1431 /* Initialization of the forkserver - latest possible */
1432
__afl_auto_init(void)1433 __attribute__((constructor())) void __afl_auto_init(void) {
1434
1435 if (__afl_already_initialized_init) { return; }
1436
1437 #ifdef __ANDROID__
1438 // Disable handlers in linker/debuggerd, check include/debuggerd/handler.h
1439 signal(SIGABRT, SIG_DFL);
1440 signal(SIGBUS, SIG_DFL);
1441 signal(SIGFPE, SIG_DFL);
1442 signal(SIGILL, SIG_DFL);
1443 signal(SIGSEGV, SIG_DFL);
1444 signal(SIGSTKFLT, SIG_DFL);
1445 signal(SIGSYS, SIG_DFL);
1446 signal(SIGTRAP, SIG_DFL);
1447 #endif
1448
1449 __afl_already_initialized_init = 1;
1450
1451 if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
1452
1453 if (getenv(DEFER_ENV_VAR)) return;
1454
1455 __afl_manual_init();
1456
1457 }
1458
1459 /* Optionally run an early forkserver */
1460
__early_forkserver(void)1461 __attribute__((constructor(EARLY_FS_PRIO))) void __early_forkserver(void) {
1462
1463 if (getenv("AFL_EARLY_FORKSERVER")) { __afl_auto_init(); }
1464
1465 }
1466
1467 /* Initialization of the shmem - earliest possible because of LTO fixed mem. */
1468
__afl_auto_early(void)1469 __attribute__((constructor(CTOR_PRIO))) void __afl_auto_early(void) {
1470
1471 if (__afl_already_initialized_early) return;
1472 __afl_already_initialized_early = 1;
1473
1474 is_persistent = !!getenv(PERSIST_ENV_VAR);
1475
1476 if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
1477
1478 __afl_map_shm();
1479
1480 }
1481
1482 /* preset __afl_area_ptr #2 */
1483
__afl_auto_second(void)1484 __attribute__((constructor(1))) void __afl_auto_second(void) {
1485
1486 if (__afl_already_initialized_second) return;
1487 __afl_already_initialized_second = 1;
1488
1489 if (getenv("AFL_DEBUG")) {
1490
1491 __afl_debug = 1;
1492 fprintf(stderr, "DEBUG: debug enabled\n");
1493 fprintf(stderr, "DEBUG: AFL++ afl-compiler-rt" VERSION "\n");
1494
1495 }
1496
1497 if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
1498 u8 *ptr;
1499
1500 if (__afl_final_loc > MAP_INITIAL_SIZE) {
1501
1502 __afl_first_final_loc = __afl_final_loc + 1;
1503
1504 if (__afl_area_ptr && __afl_area_ptr != __afl_area_initial)
1505 free(__afl_area_ptr);
1506
1507 if (__afl_map_addr)
1508 ptr = (u8 *)mmap((void *)__afl_map_addr, __afl_first_final_loc,
1509 PROT_READ | PROT_WRITE,
1510 MAP_FIXED_NOREPLACE | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1511 else
1512 ptr = (u8 *)malloc(__afl_first_final_loc);
1513
1514 if (ptr && (ssize_t)ptr != -1) {
1515
1516 __afl_area_ptr = ptr;
1517 __afl_area_ptr_dummy = __afl_area_ptr;
1518 __afl_area_ptr_backup = __afl_area_ptr;
1519
1520 }
1521
1522 }
1523
1524 } // ptr memleak report is a false positive
1525
1526 /* preset __afl_area_ptr #1 - at constructor level 0 global variables have
1527 not been set */
1528
__afl_auto_first(void)1529 __attribute__((constructor(0))) void __afl_auto_first(void) {
1530
1531 if (__afl_already_initialized_first) return;
1532 __afl_already_initialized_first = 1;
1533
1534 if (getenv("AFL_DISABLE_LLVM_INSTRUMENTATION")) return;
1535
1536 /*
1537 u8 *ptr = (u8 *)malloc(MAP_INITIAL_SIZE);
1538
1539 if (ptr && (ssize_t)ptr != -1) {
1540
1541 __afl_area_ptr = ptr;
1542 __afl_area_ptr_backup = __afl_area_ptr;
1543
1544 }
1545
1546 */
1547
1548 } // ptr memleak report is a false positive
1549
1550 /* The following stuff deals with supporting -fsanitize-coverage=trace-pc-guard.
1551 It remains non-operational in the traditional, plugin-backed LLVM mode.
1552 For more info about 'trace-pc-guard', see README.llvm.md.
1553
1554 The first function (__sanitizer_cov_trace_pc_guard) is called back on every
1555 edge (as opposed to every basic block). */
1556
__sanitizer_cov_trace_pc_guard(uint32_t * guard)1557 void __sanitizer_cov_trace_pc_guard(uint32_t *guard) {
1558
1559 // For stability analysis, if you want to know to which function unstable
1560 // edge IDs belong - uncomment, recompile+install llvm_mode, recompile
1561 // the target. libunwind and libbacktrace are better solutions.
1562 // Set AFL_DEBUG_CHILD=1 and run afl-fuzz with 2>file to capture
1563 // the backtrace output
1564 /*
1565 uint32_t unstable[] = { ... unstable edge IDs };
1566 uint32_t idx;
1567 char bt[1024];
1568 for (idx = 0; i < sizeof(unstable)/sizeof(uint32_t); i++) {
1569
1570 if (unstable[idx] == __afl_area_ptr[*guard]) {
1571
1572 int bt_size = backtrace(bt, 256);
1573 if (bt_size > 0) {
1574
1575 char **bt_syms = backtrace_symbols(bt, bt_size);
1576 if (bt_syms) {
1577
1578 fprintf(stderr, "DEBUG: edge=%u caller=%s\n", unstable[idx],
1579 bt_syms[0]);
1580 free(bt_syms);
1581
1582 }
1583
1584 }
1585
1586 }
1587
1588 }
1589
1590 */
1591
1592 #if (LLVM_VERSION_MAJOR < 9)
1593
1594 __afl_area_ptr[*guard]++;
1595
1596 #else
1597
1598 __afl_area_ptr[*guard] =
1599 __afl_area_ptr[*guard] + 1 + (__afl_area_ptr[*guard] == 255 ? 1 : 0);
1600
1601 #endif
1602
1603 }
1604
1605 #ifdef __AFL_CODE_COVERAGE
afl_read_pc_filter_file(const char * filter_file)1606 void afl_read_pc_filter_file(const char *filter_file) {
1607
1608 FILE *file;
1609 char ch;
1610
1611 file = fopen(filter_file, "r");
1612 if (file == NULL) {
1613
1614 perror("Error opening file");
1615 return;
1616
1617 }
1618
1619 // Check how many PCs we expect to read
1620 while ((ch = fgetc(file)) != EOF) {
1621
1622 if (ch == '\n') { __afl_filter_pcs_size++; }
1623
1624 }
1625
1626 // Rewind to actually read the PCs
1627 fseek(file, 0, SEEK_SET);
1628
1629 __afl_filter_pcs = malloc(__afl_filter_pcs_size * sizeof(FilterPCEntry));
1630 if (!__afl_filter_pcs) {
1631
1632 perror("Error allocating PC array");
1633 return;
1634
1635 }
1636
1637 for (size_t i = 0; i < __afl_filter_pcs_size; i++) {
1638
1639 fscanf(file, "%lx", &(__afl_filter_pcs[i].start));
1640 ch = fgetc(file); // Read tab
1641 fscanf(file, "%u", &(__afl_filter_pcs[i].len));
1642 ch = fgetc(file); // Read tab
1643
1644 if (!__afl_filter_pcs_module) {
1645
1646 // Read the module name and store it.
1647 // TODO: We only support one module here right now although
1648 // there is technically no reason to support multiple modules
1649 // in one go.
1650 size_t max_module_len = 255;
1651 size_t i = 0;
1652 __afl_filter_pcs_module = malloc(max_module_len);
1653 while (i < max_module_len - 1 &&
1654 (__afl_filter_pcs_module[i] = fgetc(file)) != '\t') {
1655
1656 ++i;
1657
1658 }
1659
1660 __afl_filter_pcs_module[i] = '\0';
1661 fprintf(stderr, "DEBUGXXX: Read module name %s\n",
1662 __afl_filter_pcs_module);
1663
1664 }
1665
1666 while ((ch = fgetc(file)) != '\n' && ch != EOF)
1667 ;
1668
1669 }
1670
1671 fclose(file);
1672
1673 }
1674
locate_in_pcs(uintptr_t needle,u32 * index)1675 u32 locate_in_pcs(uintptr_t needle, u32 *index) {
1676
1677 size_t lower_bound = 0;
1678 size_t upper_bound = __afl_filter_pcs_size - 1;
1679
1680 while (lower_bound < __afl_filter_pcs_size && lower_bound <= upper_bound) {
1681
1682 size_t current_index = lower_bound + (upper_bound - lower_bound) / 2;
1683
1684 if (__afl_filter_pcs[current_index].start <= needle) {
1685
1686 if (__afl_filter_pcs[current_index].start +
1687 __afl_filter_pcs[current_index].len >
1688 needle) {
1689
1690 // Hit
1691 *index = current_index;
1692 return 1;
1693
1694 } else {
1695
1696 lower_bound = current_index + 1;
1697
1698 }
1699
1700 } else {
1701
1702 if (!current_index) { break; }
1703 upper_bound = current_index - 1;
1704
1705 }
1706
1707 }
1708
1709 return 0;
1710
1711 }
1712
__sanitizer_cov_pcs_init(const uintptr_t * pcs_beg,const uintptr_t * pcs_end)1713 void __sanitizer_cov_pcs_init(const uintptr_t *pcs_beg,
1714 const uintptr_t *pcs_end) {
1715
1716 // If for whatever reason, we cannot get dlinfo here, then pc_guard_init also
1717 // couldn't get it and we'd end up attributing to the wrong module.
1718 Dl_info dlinfo;
1719 if (!dladdr(__builtin_return_address(0), &dlinfo)) {
1720
1721 fprintf(stderr,
1722 "WARNING: Ignoring __sanitizer_cov_pcs_init callback due to "
1723 "missing module info\n");
1724 return;
1725
1726 }
1727
1728 if (__afl_debug) {
1729
1730 fprintf(
1731 stderr,
1732 "DEBUG: (%u) __sanitizer_cov_pcs_init called for module %s with %ld "
1733 "PCs\n",
1734 getpid(), dlinfo.dli_fname, pcs_end - pcs_beg);
1735
1736 }
1737
1738 afl_module_info_t *last_module_info = __afl_module_info;
1739 while (last_module_info && last_module_info->next) {
1740
1741 last_module_info = last_module_info->next;
1742
1743 }
1744
1745 if (!last_module_info) {
1746
1747 fprintf(stderr,
1748 "ERROR: __sanitizer_cov_pcs_init called with no module info?!\n");
1749 abort();
1750
1751 }
1752
1753 if (strcmp(dlinfo.dli_fname, last_module_info->name)) {
1754
1755 // This can happen with modules being loaded after the forkserver
1756 // where we decide to not track the module. In that case we must
1757 // not track it here either.
1758 fprintf(
1759 stderr,
1760 "WARNING: __sanitizer_cov_pcs_init module info mismatch: %s vs %s\n",
1761 dlinfo.dli_fname, last_module_info->name);
1762 return;
1763
1764 }
1765
1766 last_module_info->pcs_beg = pcs_beg;
1767 last_module_info->pcs_end = pcs_end;
1768
1769 // This is a direct filter based on symbolizing inside the runtime.
1770 // It should only be used with smaller binaries to avoid long startup
1771 // times. Currently, this only supports a single token to scan for.
1772 const char *pc_filter = getenv("AFL_PC_FILTER");
1773
1774 // This is a much faster PC filter based on pre-symbolized input data
1775 // that is sorted for fast lookup through binary search. This method
1776 // of filtering is suitable even for very large binaries.
1777 const char *pc_filter_file = getenv("AFL_PC_FILTER_FILE");
1778 if (pc_filter_file && !__afl_filter_pcs) {
1779
1780 afl_read_pc_filter_file(pc_filter_file);
1781
1782 }
1783
1784 // Now update the pcmap. If this is the last module coming in, after all
1785 // pre-loaded code, then this will also map all of our delayed previous
1786 // modules.
1787 //
1788 for (afl_module_info_t *mod_info = __afl_module_info; mod_info;
1789 mod_info = mod_info->next) {
1790
1791 if (mod_info->mapped) { continue; }
1792
1793 if (!mod_info->start) {
1794
1795 fprintf(stderr,
1796 "ERROR: __sanitizer_cov_pcs_init called with mod_info->start == "
1797 "NULL (%s)\n",
1798 mod_info->name);
1799 abort();
1800
1801 }
1802
1803 PCTableEntry *start = (PCTableEntry *)(mod_info->pcs_beg);
1804 PCTableEntry *end = (PCTableEntry *)(mod_info->pcs_end);
1805
1806 if (!*mod_info->stop) { continue; }
1807
1808 u32 in_module_index = 0;
1809
1810 while (start < end) {
1811
1812 if (*mod_info->start + in_module_index >= __afl_map_size) {
1813
1814 fprintf(stderr,
1815 "ERROR: __sanitizer_cov_pcs_init out of bounds?! Start: %u "
1816 "Stop: %u Map Size: %u (%s)\n",
1817 *mod_info->start, *mod_info->stop, __afl_map_size,
1818 mod_info->name);
1819 abort();
1820
1821 }
1822
1823 u32 orig_start_index = *mod_info->start;
1824
1825 uintptr_t PC = start->PC;
1826
1827 // This is what `GetPreviousInstructionPc` in sanitizer runtime does
1828 // for x86/x86-64. Needs more work for ARM and other archs.
1829 PC = PC - 1;
1830
1831 // Calculate relative offset in module
1832 PC = PC - mod_info->base_address;
1833
1834 if (__afl_pcmap_ptr) {
1835
1836 __afl_pcmap_ptr[orig_start_index + in_module_index] = PC;
1837
1838 }
1839
1840 if (pc_filter) {
1841
1842 char PcDescr[1024];
1843 // This function is a part of the sanitizer run-time.
1844 // To use it, link with AddressSanitizer or other sanitizer.
1845 __sanitizer_symbolize_pc((void *)start->PC, "%p %F %L", PcDescr,
1846 sizeof(PcDescr));
1847
1848 if (strstr(PcDescr, pc_filter)) {
1849
1850 if (__afl_debug)
1851 fprintf(
1852 stderr,
1853 "DEBUG: Selective instrumentation match: %s (PC %p Index %u)\n",
1854 PcDescr, (void *)start->PC,
1855 *(mod_info->start + in_module_index));
1856 // No change to guard needed
1857
1858 } else {
1859
1860 // Null out the guard to disable this edge
1861 *(mod_info->start + in_module_index) = 0;
1862
1863 }
1864
1865 }
1866
1867 if (__afl_filter_pcs && strstr(mod_info->name, __afl_filter_pcs_module)) {
1868
1869 u32 result_index;
1870 if (locate_in_pcs(PC, &result_index)) {
1871
1872 if (__afl_debug)
1873 fprintf(stderr,
1874 "DEBUG: Selective instrumentation match: (PC %lx File "
1875 "Index %u PC Index %u)\n",
1876 PC, result_index, in_module_index);
1877
1878 } else {
1879
1880 // Null out the guard to disable this edge
1881 *(mod_info->start + in_module_index) = 0;
1882
1883 }
1884
1885 }
1886
1887 start++;
1888 in_module_index++;
1889
1890 }
1891
1892 mod_info->mapped = 1;
1893
1894 if (__afl_debug) {
1895
1896 fprintf(stderr,
1897 "DEBUG: __sanitizer_cov_pcs_init successfully mapped %s with %u "
1898 "PCs\n",
1899 mod_info->name, in_module_index);
1900
1901 }
1902
1903 }
1904
1905 }
1906
1907 #endif // __AFL_CODE_COVERAGE
1908
1909 /* Init callback. Populates instrumentation IDs. Note that we're using
1910 ID of 0 as a special value to indicate non-instrumented bits. That may
1911 still touch the bitmap, but in a fairly harmless way. */
1912
__sanitizer_cov_trace_pc_guard_init(uint32_t * start,uint32_t * stop)1913 void __sanitizer_cov_trace_pc_guard_init(uint32_t *start, uint32_t *stop) {
1914
1915 u32 inst_ratio = 100;
1916 char *x;
1917
1918 _is_sancov = 1;
1919
1920 if (!getenv("AFL_DUMP_MAP_SIZE")) {
1921
1922 __afl_auto_first();
1923 __afl_auto_second();
1924 __afl_auto_early();
1925
1926 }
1927
1928 if (__afl_debug) {
1929
1930 fprintf(
1931 stderr,
1932 "DEBUG: Running __sanitizer_cov_trace_pc_guard_init: %p-%p (%lu edges) "
1933 "after_fs=%u *start=%u\n",
1934 start, stop, (unsigned long)(stop - start),
1935 __afl_already_initialized_forkserver, *start);
1936
1937 }
1938
1939 if (start == stop || *start) { return; }
1940
1941 #ifdef __AFL_CODE_COVERAGE
1942 u32 *orig_start = start;
1943 afl_module_info_t *mod_info = NULL;
1944
1945 Dl_info dlinfo;
1946 if (dladdr(__builtin_return_address(0), &dlinfo)) {
1947
1948 if (__afl_already_initialized_forkserver) {
1949
1950 fprintf(stderr, "[pcmap] Error: Module was not preloaded: %s\n",
1951 dlinfo.dli_fname);
1952
1953 } else {
1954
1955 afl_module_info_t *last_module_info = __afl_module_info;
1956 while (last_module_info && last_module_info->next) {
1957
1958 last_module_info = last_module_info->next;
1959
1960 }
1961
1962 mod_info = malloc(sizeof(afl_module_info_t));
1963
1964 mod_info->id = last_module_info ? last_module_info->id + 1 : 0;
1965 mod_info->name = strdup(dlinfo.dli_fname);
1966 mod_info->base_address = (uintptr_t)dlinfo.dli_fbase;
1967 mod_info->start = NULL;
1968 mod_info->stop = NULL;
1969 mod_info->pcs_beg = NULL;
1970 mod_info->pcs_end = NULL;
1971 mod_info->mapped = 0;
1972 mod_info->next = NULL;
1973
1974 if (last_module_info) {
1975
1976 last_module_info->next = mod_info;
1977
1978 } else {
1979
1980 __afl_module_info = mod_info;
1981
1982 }
1983
1984 if (__afl_debug) {
1985
1986 fprintf(stderr, "[pcmap] Module: %s Base Address: %p\n",
1987 dlinfo.dli_fname, dlinfo.dli_fbase);
1988
1989 }
1990
1991 }
1992
1993 } else {
1994
1995 fprintf(stderr, "[pcmap] dladdr call failed\n");
1996
1997 }
1998
1999 #endif // __AFL_CODE_COVERAGE
2000
2001 x = getenv("AFL_INST_RATIO");
2002 if (x) {
2003
2004 inst_ratio = (u32)atoi(x);
2005
2006 if (!inst_ratio || inst_ratio > 100) {
2007
2008 fprintf(stderr, "[-] ERROR: Invalid AFL_INST_RATIO (must be 1-100).\n");
2009 abort();
2010
2011 }
2012
2013 }
2014
2015 // If a dlopen of an instrumented library happens after the forkserver then
2016 // we have a problem as we cannot increase the coverage map anymore.
2017 if (__afl_already_initialized_forkserver) {
2018
2019 if (!getenv("AFL_IGNORE_PROBLEMS")) {
2020
2021 fprintf(
2022 stderr,
2023 "[-] FATAL: forkserver is already up, but an instrumented dlopen() "
2024 "library loaded afterwards. You must AFL_PRELOAD such libraries to "
2025 "be able to fuzz them or LD_PRELOAD to run outside of afl-fuzz.\n"
2026 "To ignore this set AFL_IGNORE_PROBLEMS=1 but this will lead to "
2027 "ambiguous coverage data.\n"
2028 "In addition, you can set AFL_IGNORE_PROBLEMS_COVERAGE=1 to "
2029 "ignore the additional coverage instead (use with caution!).\n");
2030 abort();
2031
2032 } else {
2033
2034 u8 ignore_dso_after_fs = !!getenv("AFL_IGNORE_PROBLEMS_COVERAGE");
2035 if (__afl_debug && ignore_dso_after_fs) {
2036
2037 fprintf(stderr,
2038 "DEBUG: Ignoring coverage from dynamically loaded code\n");
2039
2040 }
2041
2042 static u32 offset = 5;
2043
2044 while (start < stop) {
2045
2046 if (!ignore_dso_after_fs &&
2047 (likely(inst_ratio == 100) || R(100) < inst_ratio)) {
2048
2049 *(start++) = offset;
2050
2051 } else {
2052
2053 *(start++) = 0; // write to map[0]
2054
2055 }
2056
2057 if (unlikely(++offset >= __afl_final_loc)) { offset = 5; }
2058
2059 }
2060
2061 }
2062
2063 return; // we are done for this special case
2064
2065 }
2066
2067 /* Make sure that the first element in the range is always set - we use that
2068 to avoid duplicate calls (which can happen as an artifact of the underlying
2069 implementation in LLVM). */
2070
2071 if (__afl_final_loc < 5) __afl_final_loc = 5; // we skip the first 5 entries
2072
2073 *(start++) = ++__afl_final_loc;
2074
2075 while (start < stop) {
2076
2077 if (likely(inst_ratio == 100) || R(100) < inst_ratio) {
2078
2079 *(start++) = ++__afl_final_loc;
2080
2081 } else {
2082
2083 *(start++) = 0; // write to map[0]
2084
2085 }
2086
2087 }
2088
2089 #ifdef __AFL_CODE_COVERAGE
2090 if (mod_info) {
2091
2092 if (!mod_info->start) {
2093
2094 mod_info->start = orig_start;
2095 mod_info->stop = stop - 1;
2096
2097 }
2098
2099 if (__afl_debug) {
2100
2101 fprintf(stderr, "DEBUG: [pcmap] Start Index: %u Stop Index: %u\n",
2102 *(mod_info->start), *(mod_info->stop));
2103
2104 }
2105
2106 }
2107
2108 #endif // __AFL_CODE_COVERAGE
2109
2110 if (__afl_debug) {
2111
2112 fprintf(stderr,
2113 "DEBUG: Done __sanitizer_cov_trace_pc_guard_init: __afl_final_loc "
2114 "= %u\n",
2115 __afl_final_loc);
2116
2117 }
2118
2119 if (__afl_already_initialized_shm) {
2120
2121 if (__afl_final_loc > __afl_map_size) {
2122
2123 if (__afl_debug) {
2124
2125 fprintf(stderr, "DEBUG: Reinit shm necessary (+%u)\n",
2126 __afl_final_loc - __afl_map_size);
2127
2128 }
2129
2130 __afl_unmap_shm();
2131 __afl_map_shm();
2132
2133 }
2134
2135 __afl_map_size = __afl_final_loc + 1;
2136
2137 }
2138
2139 }
2140
2141 ///// CmpLog instrumentation
2142
__cmplog_ins_hook1(uint8_t arg1,uint8_t arg2,uint8_t attr)2143 void __cmplog_ins_hook1(uint8_t arg1, uint8_t arg2, uint8_t attr) {
2144
2145 // fprintf(stderr, "hook1 arg0=%02x arg1=%02x attr=%u\n",
2146 // (u8) arg1, (u8) arg2, attr);
2147
2148 return;
2149
2150 /*
2151
2152 if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
2153
2154 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2155 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2156
2157 u32 hits;
2158
2159 if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
2160
2161 __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
2162 hits = 0;
2163 __afl_cmp_map->headers[k].hits = 1;
2164 __afl_cmp_map->headers[k].shape = 0;
2165
2166 } else {
2167
2168 hits = __afl_cmp_map->headers[k].hits++;
2169
2170 }
2171
2172 __afl_cmp_map->headers[k].attribute = attr;
2173
2174 hits &= CMP_MAP_H - 1;
2175 __afl_cmp_map->log[k][hits].v0 = arg1;
2176 __afl_cmp_map->log[k][hits].v1 = arg2;
2177
2178 */
2179
2180 }
2181
__cmplog_ins_hook2(uint16_t arg1,uint16_t arg2,uint8_t attr)2182 void __cmplog_ins_hook2(uint16_t arg1, uint16_t arg2, uint8_t attr) {
2183
2184 if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
2185
2186 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2187 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2188
2189 u32 hits;
2190
2191 if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
2192
2193 __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
2194 hits = 0;
2195 __afl_cmp_map->headers[k].hits = 1;
2196 __afl_cmp_map->headers[k].shape = 1;
2197
2198 } else {
2199
2200 hits = __afl_cmp_map->headers[k].hits++;
2201
2202 if (!__afl_cmp_map->headers[k].shape) {
2203
2204 __afl_cmp_map->headers[k].shape = 1;
2205
2206 }
2207
2208 }
2209
2210 __afl_cmp_map->headers[k].attribute = attr;
2211
2212 hits &= CMP_MAP_H - 1;
2213 __afl_cmp_map->log[k][hits].v0 = arg1;
2214 __afl_cmp_map->log[k][hits].v1 = arg2;
2215
2216 }
2217
__cmplog_ins_hook4(uint32_t arg1,uint32_t arg2,uint8_t attr)2218 void __cmplog_ins_hook4(uint32_t arg1, uint32_t arg2, uint8_t attr) {
2219
2220 // fprintf(stderr, "hook4 arg0=%x arg1=%x attr=%u\n", arg1, arg2, attr);
2221
2222 if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
2223
2224 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2225 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2226
2227 u32 hits;
2228
2229 if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
2230
2231 __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
2232 hits = 0;
2233 __afl_cmp_map->headers[k].hits = 1;
2234 __afl_cmp_map->headers[k].shape = 3;
2235
2236 } else {
2237
2238 hits = __afl_cmp_map->headers[k].hits++;
2239
2240 if (__afl_cmp_map->headers[k].shape < 3) {
2241
2242 __afl_cmp_map->headers[k].shape = 3;
2243
2244 }
2245
2246 }
2247
2248 __afl_cmp_map->headers[k].attribute = attr;
2249
2250 hits &= CMP_MAP_H - 1;
2251 __afl_cmp_map->log[k][hits].v0 = arg1;
2252 __afl_cmp_map->log[k][hits].v1 = arg2;
2253
2254 }
2255
__cmplog_ins_hook8(uint64_t arg1,uint64_t arg2,uint8_t attr)2256 void __cmplog_ins_hook8(uint64_t arg1, uint64_t arg2, uint8_t attr) {
2257
2258 // fprintf(stderr, "hook8 arg0=%lx arg1=%lx attr=%u\n", arg1, arg2, attr);
2259
2260 if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
2261
2262 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2263 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2264
2265 u32 hits;
2266
2267 if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
2268
2269 __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
2270 hits = 0;
2271 __afl_cmp_map->headers[k].hits = 1;
2272 __afl_cmp_map->headers[k].shape = 7;
2273
2274 } else {
2275
2276 hits = __afl_cmp_map->headers[k].hits++;
2277
2278 if (__afl_cmp_map->headers[k].shape < 7) {
2279
2280 __afl_cmp_map->headers[k].shape = 7;
2281
2282 }
2283
2284 }
2285
2286 __afl_cmp_map->headers[k].attribute = attr;
2287
2288 hits &= CMP_MAP_H - 1;
2289 __afl_cmp_map->log[k][hits].v0 = arg1;
2290 __afl_cmp_map->log[k][hits].v1 = arg2;
2291
2292 }
2293
2294 #ifdef WORD_SIZE_64
2295 // support for u24 to u120 via llvm _ExitInt(). size is in bytes minus 1
__cmplog_ins_hookN(uint128_t arg1,uint128_t arg2,uint8_t attr,uint8_t size)2296 void __cmplog_ins_hookN(uint128_t arg1, uint128_t arg2, uint8_t attr,
2297 uint8_t size) {
2298
2299 // fprintf(stderr, "hookN arg0=%llx:%llx arg1=%llx:%llx bytes=%u attr=%u\n",
2300 // (u64)(arg1 >> 64), (u64)arg1, (u64)(arg2 >> 64), (u64)arg2, size + 1,
2301 // attr);
2302
2303 if (unlikely(!__afl_cmp_map || arg1 == arg2)) return;
2304
2305 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2306 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2307
2308 u32 hits;
2309
2310 if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
2311
2312 __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
2313 hits = 0;
2314 __afl_cmp_map->headers[k].hits = 1;
2315 __afl_cmp_map->headers[k].shape = size;
2316
2317 } else {
2318
2319 hits = __afl_cmp_map->headers[k].hits++;
2320
2321 if (__afl_cmp_map->headers[k].shape < size) {
2322
2323 __afl_cmp_map->headers[k].shape = size;
2324
2325 }
2326
2327 }
2328
2329 __afl_cmp_map->headers[k].attribute = attr;
2330
2331 hits &= CMP_MAP_H - 1;
2332 __afl_cmp_map->log[k][hits].v0 = (u64)arg1;
2333 __afl_cmp_map->log[k][hits].v1 = (u64)arg2;
2334
2335 if (size > 7) {
2336
2337 __afl_cmp_map->log[k][hits].v0_128 = (u64)(arg1 >> 64);
2338 __afl_cmp_map->log[k][hits].v1_128 = (u64)(arg2 >> 64);
2339
2340 }
2341
2342 }
2343
__cmplog_ins_hook16(uint128_t arg1,uint128_t arg2,uint8_t attr)2344 void __cmplog_ins_hook16(uint128_t arg1, uint128_t arg2, uint8_t attr) {
2345
2346 if (likely(!__afl_cmp_map)) return;
2347
2348 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2349 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2350
2351 u32 hits;
2352
2353 if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
2354
2355 __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
2356 hits = 0;
2357 __afl_cmp_map->headers[k].hits = 1;
2358 __afl_cmp_map->headers[k].shape = 15;
2359
2360 } else {
2361
2362 hits = __afl_cmp_map->headers[k].hits++;
2363
2364 if (__afl_cmp_map->headers[k].shape < 15) {
2365
2366 __afl_cmp_map->headers[k].shape = 15;
2367
2368 }
2369
2370 }
2371
2372 __afl_cmp_map->headers[k].attribute = attr;
2373
2374 hits &= CMP_MAP_H - 1;
2375 __afl_cmp_map->log[k][hits].v0 = (u64)arg1;
2376 __afl_cmp_map->log[k][hits].v1 = (u64)arg2;
2377 __afl_cmp_map->log[k][hits].v0_128 = (u64)(arg1 >> 64);
2378 __afl_cmp_map->log[k][hits].v1_128 = (u64)(arg2 >> 64);
2379
2380 }
2381
2382 #endif
2383
__sanitizer_cov_trace_cmp1(uint8_t arg1,uint8_t arg2)2384 void __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2) {
2385
2386 //__cmplog_ins_hook1(arg1, arg2, 0);
2387
2388 }
2389
__sanitizer_cov_trace_const_cmp1(uint8_t arg1,uint8_t arg2)2390 void __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2) {
2391
2392 //__cmplog_ins_hook1(arg1, arg2, 0);
2393
2394 }
2395
__sanitizer_cov_trace_cmp2(uint16_t arg1,uint16_t arg2)2396 void __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2) {
2397
2398 __cmplog_ins_hook2(arg1, arg2, 0);
2399
2400 }
2401
__sanitizer_cov_trace_const_cmp2(uint16_t arg1,uint16_t arg2)2402 void __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2) {
2403
2404 __cmplog_ins_hook2(arg1, arg2, 0);
2405
2406 }
2407
__sanitizer_cov_trace_cmp4(uint32_t arg1,uint32_t arg2)2408 void __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2) {
2409
2410 __cmplog_ins_hook4(arg1, arg2, 0);
2411
2412 }
2413
__sanitizer_cov_trace_const_cmp4(uint32_t arg1,uint32_t arg2)2414 void __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2) {
2415
2416 __cmplog_ins_hook4(arg1, arg2, 0);
2417
2418 }
2419
__sanitizer_cov_trace_cmp8(uint64_t arg1,uint64_t arg2)2420 void __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2) {
2421
2422 __cmplog_ins_hook8(arg1, arg2, 0);
2423
2424 }
2425
__sanitizer_cov_trace_const_cmp8(uint64_t arg1,uint64_t arg2)2426 void __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2) {
2427
2428 __cmplog_ins_hook8(arg1, arg2, 0);
2429
2430 }
2431
2432 #ifdef WORD_SIZE_64
__sanitizer_cov_trace_cmp16(uint128_t arg1,uint128_t arg2)2433 void __sanitizer_cov_trace_cmp16(uint128_t arg1, uint128_t arg2) {
2434
2435 __cmplog_ins_hook16(arg1, arg2, 0);
2436
2437 }
2438
__sanitizer_cov_trace_const_cmp16(uint128_t arg1,uint128_t arg2)2439 void __sanitizer_cov_trace_const_cmp16(uint128_t arg1, uint128_t arg2) {
2440
2441 __cmplog_ins_hook16(arg1, arg2, 0);
2442
2443 }
2444
2445 #endif
2446
__sanitizer_cov_trace_switch(uint64_t val,uint64_t * cases)2447 void __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases) {
2448
2449 if (likely(!__afl_cmp_map)) return;
2450
2451 for (uint64_t i = 0; i < cases[0]; i++) {
2452
2453 uintptr_t k = (uintptr_t)__builtin_return_address(0) + i;
2454 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) &
2455 (CMP_MAP_W - 1));
2456
2457 u32 hits;
2458
2459 if (__afl_cmp_map->headers[k].type != CMP_TYPE_INS) {
2460
2461 __afl_cmp_map->headers[k].type = CMP_TYPE_INS;
2462 hits = 0;
2463 __afl_cmp_map->headers[k].hits = 1;
2464 __afl_cmp_map->headers[k].shape = 7;
2465
2466 } else {
2467
2468 hits = __afl_cmp_map->headers[k].hits++;
2469
2470 if (__afl_cmp_map->headers[k].shape < 7) {
2471
2472 __afl_cmp_map->headers[k].shape = 7;
2473
2474 }
2475
2476 }
2477
2478 __afl_cmp_map->headers[k].attribute = 1;
2479
2480 hits &= CMP_MAP_H - 1;
2481 __afl_cmp_map->log[k][hits].v0 = val;
2482 __afl_cmp_map->log[k][hits].v1 = cases[i + 2];
2483
2484 }
2485
2486 }
2487
__asan_region_is_poisoned(void * beg,size_t size)2488 __attribute__((weak)) void *__asan_region_is_poisoned(void *beg, size_t size) {
2489
2490 return NULL;
2491
2492 }
2493
2494 // POSIX shenanigan to see if an area is mapped.
2495 // If it is mapped as X-only, we have a problem, so maybe we should add a check
2496 // to avoid to call it on .text addresses
area_is_valid(void * ptr,size_t len)2497 static int area_is_valid(void *ptr, size_t len) {
2498
2499 if (unlikely(!ptr || __asan_region_is_poisoned(ptr, len))) { return 0; }
2500
2501 #ifdef __HAIKU__
2502 long r = _kern_write(__afl_dummy_fd[1], -1, ptr, len);
2503 #elif defined(__OpenBSD__)
2504 long r = write(__afl_dummy_fd[1], ptr, len);
2505 #else
2506 long r = syscall(SYS_write, __afl_dummy_fd[1], ptr, len);
2507 #endif // HAIKU, OPENBSD
2508
2509 if (r <= 0 || r > len) return 0;
2510
2511 // even if the write succeed this can be a false positive if we cross
2512 // a page boundary. who knows why.
2513
2514 char *p = (char *)ptr;
2515 long page_size = sysconf(_SC_PAGE_SIZE);
2516 char *page = (char *)((uintptr_t)p & ~(page_size - 1)) + page_size;
2517
2518 if (page > p + len) {
2519
2520 // no, not crossing a page boundary
2521 return (int)r;
2522
2523 } else {
2524
2525 // yes it crosses a boundary, hence we can only return the length of
2526 // rest of the first page, we cannot detect if the next page is valid
2527 // or not, neither by SYS_write nor msync() :-(
2528 return (int)(page - p);
2529
2530 }
2531
2532 }
2533
2534 /* hook for string with length functions, eg. strncmp, strncasecmp etc.
2535 Note that we ignore the len parameter and take longer strings if present. */
__cmplog_rtn_hook_strn(u8 * ptr1,u8 * ptr2,u64 len)2536 void __cmplog_rtn_hook_strn(u8 *ptr1, u8 *ptr2, u64 len) {
2537
2538 // fprintf(stderr, "RTN1 %p %p %u\n", ptr1, ptr2, len);
2539 if (likely(!__afl_cmp_map)) return;
2540 if (unlikely(!len)) return;
2541 int len0 = MIN(len, 31);
2542 int len1 = strnlen(ptr1, len0);
2543 if (len1 < 31) len1 = area_is_valid(ptr1, len1 + 1);
2544 int len2 = strnlen(ptr2, len0);
2545 if (len2 < 31) len2 = area_is_valid(ptr2, len2 + 1);
2546 int l = MAX(len1, len2);
2547 if (l < 2) return;
2548
2549 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2550 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2551
2552 u32 hits;
2553
2554 if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
2555
2556 __afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
2557 __afl_cmp_map->headers[k].hits = 1;
2558 __afl_cmp_map->headers[k].shape = l - 1;
2559 hits = 0;
2560
2561 } else {
2562
2563 hits = __afl_cmp_map->headers[k].hits++;
2564
2565 if (__afl_cmp_map->headers[k].shape < l) {
2566
2567 __afl_cmp_map->headers[k].shape = l - 1;
2568
2569 }
2570
2571 }
2572
2573 struct cmpfn_operands *cmpfn = (struct cmpfn_operands *)__afl_cmp_map->log[k];
2574 hits &= CMP_MAP_RTN_H - 1;
2575
2576 cmpfn[hits].v0_len = 0x80 + l;
2577 cmpfn[hits].v1_len = 0x80 + l;
2578 __builtin_memcpy(cmpfn[hits].v0, ptr1, len1);
2579 __builtin_memcpy(cmpfn[hits].v1, ptr2, len2);
2580 // fprintf(stderr, "RTN3\n");
2581
2582 }
2583
2584 /* hook for string functions, eg. strcmp, strcasecmp etc. */
__cmplog_rtn_hook_str(u8 * ptr1,u8 * ptr2)2585 void __cmplog_rtn_hook_str(u8 *ptr1, u8 *ptr2) {
2586
2587 // fprintf(stderr, "RTN1 %p %p\n", ptr1, ptr2);
2588 if (likely(!__afl_cmp_map)) return;
2589 if (unlikely(!ptr1 || !ptr2)) return;
2590 int len1 = strnlen(ptr1, 30) + 1;
2591 int len2 = strnlen(ptr2, 30) + 1;
2592 int l = MAX(len1, len2);
2593 if (l < 3) return;
2594
2595 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2596 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2597
2598 u32 hits;
2599
2600 if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
2601
2602 __afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
2603 __afl_cmp_map->headers[k].hits = 1;
2604 __afl_cmp_map->headers[k].shape = l - 1;
2605 hits = 0;
2606
2607 } else {
2608
2609 hits = __afl_cmp_map->headers[k].hits++;
2610
2611 if (__afl_cmp_map->headers[k].shape < l) {
2612
2613 __afl_cmp_map->headers[k].shape = l - 1;
2614
2615 }
2616
2617 }
2618
2619 struct cmpfn_operands *cmpfn = (struct cmpfn_operands *)__afl_cmp_map->log[k];
2620 hits &= CMP_MAP_RTN_H - 1;
2621
2622 cmpfn[hits].v0_len = 0x80 + len1;
2623 cmpfn[hits].v1_len = 0x80 + len2;
2624 __builtin_memcpy(cmpfn[hits].v0, ptr1, len1);
2625 __builtin_memcpy(cmpfn[hits].v1, ptr2, len2);
2626 // fprintf(stderr, "RTN3\n");
2627
2628 }
2629
2630 /* hook function for all other func(ptr, ptr, ...) variants */
__cmplog_rtn_hook(u8 * ptr1,u8 * ptr2)2631 void __cmplog_rtn_hook(u8 *ptr1, u8 *ptr2) {
2632
2633 /*
2634 u32 i;
2635 if (area_is_valid(ptr1, 31) <= 0 || area_is_valid(ptr2, 31) <= 0) return;
2636 fprintf(stderr, "rtn arg0=");
2637 for (i = 0; i < 32; i++)
2638 fprintf(stderr, "%02x", ptr1[i]);
2639 fprintf(stderr, " arg1=");
2640 for (i = 0; i < 32; i++)
2641 fprintf(stderr, "%02x", ptr2[i]);
2642 fprintf(stderr, "\n");
2643 */
2644
2645 // fprintf(stderr, "RTN1 %p %p\n", ptr1, ptr2);
2646 if (likely(!__afl_cmp_map)) return;
2647 int l1, l2;
2648 if ((l1 = area_is_valid(ptr1, 31)) <= 0 ||
2649 (l2 = area_is_valid(ptr2, 31)) <= 0)
2650 return;
2651 int len = MIN(31, MIN(l1, l2));
2652
2653 // fprintf(stderr, "RTN2 %u\n", len);
2654 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2655 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2656
2657 u32 hits;
2658
2659 if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
2660
2661 __afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
2662 __afl_cmp_map->headers[k].hits = 1;
2663 __afl_cmp_map->headers[k].shape = len - 1;
2664 hits = 0;
2665
2666 } else {
2667
2668 hits = __afl_cmp_map->headers[k].hits++;
2669
2670 if (__afl_cmp_map->headers[k].shape < len) {
2671
2672 __afl_cmp_map->headers[k].shape = len - 1;
2673
2674 }
2675
2676 }
2677
2678 struct cmpfn_operands *cmpfn = (struct cmpfn_operands *)__afl_cmp_map->log[k];
2679 hits &= CMP_MAP_RTN_H - 1;
2680
2681 cmpfn[hits].v0_len = len;
2682 cmpfn[hits].v1_len = len;
2683 __builtin_memcpy(cmpfn[hits].v0, ptr1, len);
2684 __builtin_memcpy(cmpfn[hits].v1, ptr2, len);
2685 // fprintf(stderr, "RTN3\n");
2686
2687 }
2688
2689 /* hook for func(ptr, ptr, len, ...) looking functions.
2690 Note that for the time being we ignore len as this could be wrong
2691 information and pass it on to the standard binary rtn hook */
__cmplog_rtn_hook_n(u8 * ptr1,u8 * ptr2,u64 len)2692 void __cmplog_rtn_hook_n(u8 *ptr1, u8 *ptr2, u64 len) {
2693
2694 (void)(len);
2695 __cmplog_rtn_hook(ptr1, ptr2);
2696
2697 #if 0
2698 /*
2699 u32 i;
2700 if (area_is_valid(ptr1, 31) <= 0 || area_is_valid(ptr2, 31) <= 0) return;
2701 fprintf(stderr, "rtn_n len=%u arg0=", len);
2702 for (i = 0; i < len; i++)
2703 fprintf(stderr, "%02x", ptr1[i]);
2704 fprintf(stderr, " arg1=");
2705 for (i = 0; i < len; i++)
2706 fprintf(stderr, "%02x", ptr2[i]);
2707 fprintf(stderr, "\n");
2708 */
2709
2710 // fprintf(stderr, "RTN1 %p %p %u\n", ptr1, ptr2, len);
2711 if (likely(!__afl_cmp_map)) return;
2712 if (unlikely(!len)) return;
2713 int l = MIN(31, len);
2714
2715 if ((l = area_is_valid(ptr1, l)) <= 0 || (l = area_is_valid(ptr2, l)) <= 0)
2716 return;
2717
2718 // fprintf(stderr, "RTN2 %u\n", l);
2719 uintptr_t k = (uintptr_t)__builtin_return_address(0);
2720 k = (uintptr_t)(default_hash((u8 *)&k, sizeof(uintptr_t)) & (CMP_MAP_W - 1));
2721
2722 u32 hits;
2723
2724 if (__afl_cmp_map->headers[k].type != CMP_TYPE_RTN) {
2725
2726 __afl_cmp_map->headers[k].type = CMP_TYPE_RTN;
2727 __afl_cmp_map->headers[k].hits = 1;
2728 __afl_cmp_map->headers[k].shape = l - 1;
2729 hits = 0;
2730
2731 } else {
2732
2733 hits = __afl_cmp_map->headers[k].hits++;
2734
2735 if (__afl_cmp_map->headers[k].shape < l) {
2736
2737 __afl_cmp_map->headers[k].shape = l - 1;
2738
2739 }
2740
2741 }
2742
2743 struct cmpfn_operands *cmpfn = (struct cmpfn_operands *)__afl_cmp_map->log[k];
2744 hits &= CMP_MAP_RTN_H - 1;
2745
2746 cmpfn[hits].v0_len = l;
2747 cmpfn[hits].v1_len = l;
2748 __builtin_memcpy(cmpfn[hits].v0, ptr1, l);
2749 __builtin_memcpy(cmpfn[hits].v1, ptr2, l);
2750 // fprintf(stderr, "RTN3\n");
2751 #endif
2752
2753 }
2754
2755 // gcc libstdc++
2756 // _ZNKSt7__cxx1112basic_stringIcSt11char_traitsIcESaIcEE7compareEPKc
get_gcc_stdstring(u8 * string)2757 static u8 *get_gcc_stdstring(u8 *string) {
2758
2759 u32 *len = (u32 *)(string + 8);
2760
2761 if (*len < 16) { // in structure
2762
2763 return (string + 16);
2764
2765 } else { // in memory
2766
2767 u8 **ptr = (u8 **)string;
2768 return (*ptr);
2769
2770 }
2771
2772 }
2773
2774 // llvm libc++ _ZNKSt3__112basic_stringIcNS_11char_traitsIcEENS_9allocator
2775 // IcEEE7compareEmmPKcm
get_llvm_stdstring(u8 * string)2776 static u8 *get_llvm_stdstring(u8 *string) {
2777
2778 // length is in: if ((string[0] & 1) == 0) u8 len = (string[0] >> 1);
2779 // or: if (string[0] & 1) u32 *len = (u32 *) (string + 8);
2780
2781 if (string[0] & 1) { // in memory
2782
2783 u8 **ptr = (u8 **)(string + 16);
2784 return (*ptr);
2785
2786 } else { // in structure
2787
2788 return (string + 1);
2789
2790 }
2791
2792 }
2793
__cmplog_rtn_gcc_stdstring_cstring(u8 * stdstring,u8 * cstring)2794 void __cmplog_rtn_gcc_stdstring_cstring(u8 *stdstring, u8 *cstring) {
2795
2796 if (likely(!__afl_cmp_map)) return;
2797 if (area_is_valid(stdstring, 32) <= 0 || area_is_valid(cstring, 32) <= 0)
2798 return;
2799
2800 __cmplog_rtn_hook(get_gcc_stdstring(stdstring), cstring);
2801
2802 }
2803
__cmplog_rtn_gcc_stdstring_stdstring(u8 * stdstring1,u8 * stdstring2)2804 void __cmplog_rtn_gcc_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
2805
2806 if (likely(!__afl_cmp_map)) return;
2807 if (area_is_valid(stdstring1, 32) <= 0 || area_is_valid(stdstring2, 32) <= 0)
2808 return;
2809
2810 __cmplog_rtn_hook(get_gcc_stdstring(stdstring1),
2811 get_gcc_stdstring(stdstring2));
2812
2813 }
2814
__cmplog_rtn_llvm_stdstring_cstring(u8 * stdstring,u8 * cstring)2815 void __cmplog_rtn_llvm_stdstring_cstring(u8 *stdstring, u8 *cstring) {
2816
2817 if (likely(!__afl_cmp_map)) return;
2818 if (area_is_valid(stdstring, 32) <= 0 || area_is_valid(cstring, 32) <= 0)
2819 return;
2820
2821 __cmplog_rtn_hook(get_llvm_stdstring(stdstring), cstring);
2822
2823 }
2824
__cmplog_rtn_llvm_stdstring_stdstring(u8 * stdstring1,u8 * stdstring2)2825 void __cmplog_rtn_llvm_stdstring_stdstring(u8 *stdstring1, u8 *stdstring2) {
2826
2827 if (likely(!__afl_cmp_map)) return;
2828 if (area_is_valid(stdstring1, 32) <= 0 || area_is_valid(stdstring2, 32) <= 0)
2829 return;
2830
2831 __cmplog_rtn_hook(get_llvm_stdstring(stdstring1),
2832 get_llvm_stdstring(stdstring2));
2833
2834 }
2835
2836 /* COVERAGE manipulation features */
2837
2838 // this variable is then used in the shm setup to create an additional map
2839 // if __afl_map_size > MAP_SIZE or cmplog is used.
2840 // Especially with cmplog this would result in a ~260MB mem increase per
2841 // target run.
2842
2843 // disable coverage from this point onwards until turned on again
__afl_coverage_off()2844 void __afl_coverage_off() {
2845
2846 if (likely(__afl_selective_coverage)) {
2847
2848 __afl_area_ptr = __afl_area_ptr_dummy;
2849 __afl_cmp_map = NULL;
2850
2851 }
2852
2853 }
2854
2855 // enable coverage
__afl_coverage_on()2856 void __afl_coverage_on() {
2857
2858 if (likely(__afl_selective_coverage && __afl_selective_coverage_temp)) {
2859
2860 __afl_area_ptr = __afl_area_ptr_backup;
2861 if (__afl_cmp_map_backup) { __afl_cmp_map = __afl_cmp_map_backup; }
2862
2863 }
2864
2865 }
2866
2867 // discard all coverage up to this point
__afl_coverage_discard()2868 void __afl_coverage_discard() {
2869
2870 memset(__afl_area_ptr_backup, 0, __afl_map_size);
2871 __afl_area_ptr_backup[0] = 1;
2872
2873 if (__afl_cmp_map) { memset(__afl_cmp_map, 0, sizeof(struct cmp_map)); }
2874
2875 }
2876
2877 // discard the testcase
__afl_coverage_skip()2878 void __afl_coverage_skip() {
2879
2880 __afl_coverage_discard();
2881
2882 if (likely(is_persistent && __afl_selective_coverage)) {
2883
2884 __afl_coverage_off();
2885 __afl_selective_coverage_temp = 0;
2886
2887 } else {
2888
2889 exit(0);
2890
2891 }
2892
2893 }
2894
2895 // mark this area as especially interesting
__afl_coverage_interesting(u8 val,u32 id)2896 void __afl_coverage_interesting(u8 val, u32 id) {
2897
2898 __afl_area_ptr[id] = val;
2899
2900 }
2901
__afl_set_persistent_mode(u8 mode)2902 void __afl_set_persistent_mode(u8 mode) {
2903
2904 is_persistent = mode;
2905
2906 }
2907
2908 // Marker: ADD_TO_INJECTIONS
2909
__afl_injection_sql(u8 * buf)2910 void __afl_injection_sql(u8 *buf) {
2911
2912 if (likely(buf)) {
2913
2914 if (unlikely(strstr((char *)buf, "'\"\"'"))) {
2915
2916 fprintf(stderr, "ALERT: Detected SQL injection in query: %s\n", buf);
2917 abort();
2918
2919 }
2920
2921 }
2922
2923 }
2924
__afl_injection_ldap(u8 * buf)2925 void __afl_injection_ldap(u8 *buf) {
2926
2927 if (likely(buf)) {
2928
2929 if (unlikely(strstr((char *)buf, "*)(1=*))(|"))) {
2930
2931 fprintf(stderr, "ALERT: Detected LDAP injection in query: %s\n", buf);
2932 abort();
2933
2934 }
2935
2936 }
2937
2938 }
2939
__afl_injection_xss(u8 * buf)2940 void __afl_injection_xss(u8 *buf) {
2941
2942 if (likely(buf)) {
2943
2944 if (unlikely(strstr((char *)buf, "1\"><\""))) {
2945
2946 fprintf(stderr, "ALERT: Detected XSS injection in content: %s\n", buf);
2947 abort();
2948
2949 }
2950
2951 }
2952
2953 }
2954
2955 #undef write_error
2956
2957