1 /*
2 american fuzzy lop++ - bitmap related routines
3 ----------------------------------------------
4
5 Originally written by Michal Zalewski
6
7 Now maintained by Marc Heuse <[email protected]>,
8 Heiko Eißfeldt <[email protected]> and
9 Andrea Fioraldi <[email protected]>
10
11 Copyright 2016, 2017 Google Inc. All rights reserved.
12 Copyright 2019-2024 AFLplusplus Project. All rights reserved.
13
14 Licensed under the Apache License, Version 2.0 (the "License");
15 you may not use this file except in compliance with the License.
16 You may obtain a copy of the License at:
17
18 https://www.apache.org/licenses/LICENSE-2.0
19
20 This is the real deal: the program takes an instrumented binary and
21 attempts a variety of basic fuzzing tricks, paying close attention to
22 how they affect the execution path.
23
24 */
25
26 #include "afl-fuzz.h"
27 #include <limits.h>
28 #if !defined NAME_MAX
29 #define NAME_MAX _XOPEN_NAME_MAX
30 #endif
31
32 /* Write bitmap to file. The bitmap is useful mostly for the secret
33 -B option, to focus a separate fuzzing session on a particular
34 interesting input without rediscovering all the others. */
35
write_bitmap(afl_state_t * afl)36 void write_bitmap(afl_state_t *afl) {
37
38 u8 fname[PATH_MAX];
39 s32 fd;
40
41 if (!afl->bitmap_changed) { return; }
42 afl->bitmap_changed = 0;
43
44 snprintf(fname, PATH_MAX, "%s/fuzz_bitmap", afl->out_dir);
45 fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
46
47 if (fd < 0) { PFATAL("Unable to open '%s'", fname); }
48
49 ck_write(fd, afl->virgin_bits, afl->fsrv.map_size, fname);
50
51 close(fd);
52
53 }
54
55 /* Count the number of bits set in the provided bitmap. Used for the status
56 screen several times every second, does not have to be fast. */
57
count_bits(afl_state_t * afl,u8 * mem)58 u32 count_bits(afl_state_t *afl, u8 *mem) {
59
60 u32 *ptr = (u32 *)mem;
61 u32 i = ((afl->fsrv.real_map_size + 3) >> 2);
62 u32 ret = 0;
63
64 while (i--) {
65
66 u32 v = *(ptr++);
67
68 /* This gets called on the inverse, virgin bitmap; optimize for sparse
69 data. */
70
71 if (likely(v == 0xffffffff)) {
72
73 ret += 32;
74 continue;
75
76 }
77
78 v -= ((v >> 1) & 0x55555555);
79 v = (v & 0x33333333) + ((v >> 2) & 0x33333333);
80 ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24;
81
82 }
83
84 return ret;
85
86 }
87
88 /* Count the number of bytes set in the bitmap. Called fairly sporadically,
89 mostly to update the status screen or calibrate and examine confirmed
90 new paths. */
91
count_bytes(afl_state_t * afl,u8 * mem)92 u32 count_bytes(afl_state_t *afl, u8 *mem) {
93
94 u32 *ptr = (u32 *)mem;
95 u32 i = ((afl->fsrv.real_map_size + 3) >> 2);
96 u32 ret = 0;
97
98 while (i--) {
99
100 u32 v = *(ptr++);
101
102 if (likely(!v)) { continue; }
103 if (v & 0x000000ffU) { ++ret; }
104 if (v & 0x0000ff00U) { ++ret; }
105 if (v & 0x00ff0000U) { ++ret; }
106 if (v & 0xff000000U) { ++ret; }
107
108 }
109
110 return ret;
111
112 }
113
114 /* Count the number of non-255 bytes set in the bitmap. Used strictly for the
115 status screen, several calls per second or so. */
116
count_non_255_bytes(afl_state_t * afl,u8 * mem)117 u32 count_non_255_bytes(afl_state_t *afl, u8 *mem) {
118
119 u32 *ptr = (u32 *)mem;
120 u32 i = ((afl->fsrv.real_map_size + 3) >> 2);
121 u32 ret = 0;
122
123 while (i--) {
124
125 u32 v = *(ptr++);
126
127 /* This is called on the virgin bitmap, so optimize for the most likely
128 case. */
129
130 if (likely(v == 0xffffffffU)) { continue; }
131 if ((v & 0x000000ffU) != 0x000000ffU) { ++ret; }
132 if ((v & 0x0000ff00U) != 0x0000ff00U) { ++ret; }
133 if ((v & 0x00ff0000U) != 0x00ff0000U) { ++ret; }
134 if ((v & 0xff000000U) != 0xff000000U) { ++ret; }
135
136 }
137
138 return ret;
139
140 }
141
142 /* Destructively simplify trace by eliminating hit count information
143 and replacing it with 0x80 or 0x01 depending on whether the tuple
144 is hit or not. Called on every new crash or timeout, should be
145 reasonably fast. */
146 const u8 simplify_lookup[256] = {
147
148 [0] = 1, [1 ... 255] = 128
149
150 };
151
152 /* Destructively classify execution counts in a trace. This is used as a
153 preprocessing step for any newly acquired traces. Called on every exec,
154 must be fast. */
155
156 const u8 count_class_lookup8[256] = {
157
158 [0] = 0,
159 [1] = 1,
160 [2] = 2,
161 [3] = 4,
162 [4 ... 7] = 8,
163 [8 ... 15] = 16,
164 [16 ... 31] = 32,
165 [32 ... 127] = 64,
166 [128 ... 255] = 128
167
168 };
169
170 u16 count_class_lookup16[65536];
171
init_count_class16(void)172 void init_count_class16(void) {
173
174 u32 b1, b2;
175
176 for (b1 = 0; b1 < 256; b1++) {
177
178 for (b2 = 0; b2 < 256; b2++) {
179
180 count_class_lookup16[(b1 << 8) + b2] =
181 (count_class_lookup8[b1] << 8) | count_class_lookup8[b2];
182
183 }
184
185 }
186
187 }
188
189 /* Import coverage processing routines. */
190
191 #ifdef WORD_SIZE_64
192 #include "coverage-64.h"
193 #else
194 #include "coverage-32.h"
195 #endif
196
197 /* Check if the current execution path brings anything new to the table.
198 Update virgin bits to reflect the finds. Returns 1 if the only change is
199 the hit-count for a particular tuple; 2 if there are new tuples seen.
200 Updates the map, so subsequent calls will always return 0.
201
202 This function is called after every exec() on a fairly large buffer, so
203 it needs to be fast. We do this in 32-bit and 64-bit flavors. */
204
has_new_bits(afl_state_t * afl,u8 * virgin_map)205 inline u8 has_new_bits(afl_state_t *afl, u8 *virgin_map) {
206
207 #ifdef WORD_SIZE_64
208
209 u64 *current = (u64 *)afl->fsrv.trace_bits;
210 u64 *virgin = (u64 *)virgin_map;
211
212 u32 i = ((afl->fsrv.real_map_size + 7) >> 3);
213
214 #else
215
216 u32 *current = (u32 *)afl->fsrv.trace_bits;
217 u32 *virgin = (u32 *)virgin_map;
218
219 u32 i = ((afl->fsrv.real_map_size + 3) >> 2);
220
221 #endif /* ^WORD_SIZE_64 */
222
223 u8 ret = 0;
224 while (i--) {
225
226 if (unlikely(*current)) discover_word(&ret, current, virgin);
227
228 current++;
229 virgin++;
230
231 }
232
233 if (unlikely(ret) && likely(virgin_map == afl->virgin_bits))
234 afl->bitmap_changed = 1;
235
236 return ret;
237
238 }
239
240 /* A combination of classify_counts and has_new_bits. If 0 is returned, then the
241 * trace bits are kept as-is. Otherwise, the trace bits are overwritten with
242 * classified values.
243 *
244 * This accelerates the processing: in most cases, no interesting behavior
245 * happen, and the trace bits will be discarded soon. This function optimizes
246 * for such cases: one-pass scan on trace bits without modifying anything. Only
247 * on rare cases it fall backs to the slow path: classify_counts() first, then
248 * return has_new_bits(). */
249
has_new_bits_unclassified(afl_state_t * afl,u8 * virgin_map)250 inline u8 has_new_bits_unclassified(afl_state_t *afl, u8 *virgin_map) {
251
252 /* Handle the hot path first: no new coverage */
253 u8 *end = afl->fsrv.trace_bits + afl->fsrv.map_size;
254
255 #ifdef WORD_SIZE_64
256
257 if (!skim((u64 *)virgin_map, (u64 *)afl->fsrv.trace_bits, (u64 *)end))
258 return 0;
259
260 #else
261
262 if (!skim((u32 *)virgin_map, (u32 *)afl->fsrv.trace_bits, (u32 *)end))
263 return 0;
264
265 #endif /* ^WORD_SIZE_64 */
266 classify_counts(&afl->fsrv);
267 return has_new_bits(afl, virgin_map);
268
269 }
270
271 /* Compact trace bytes into a smaller bitmap. We effectively just drop the
272 count information here. This is called only sporadically, for some
273 new paths. */
274
minimize_bits(afl_state_t * afl,u8 * dst,u8 * src)275 void minimize_bits(afl_state_t *afl, u8 *dst, u8 *src) {
276
277 u32 i = 0;
278
279 while (i < afl->fsrv.map_size) {
280
281 if (*(src++)) { dst[i >> 3] |= 1 << (i & 7); }
282 ++i;
283
284 }
285
286 }
287
288 #ifndef SIMPLE_FILES
289
290 /* Construct a file name for a new test case, capturing the operation
291 that led to its discovery. Returns a ptr to afl->describe_op_buf_256. */
292
describe_op(afl_state_t * afl,u8 new_bits,size_t max_description_len)293 u8 *describe_op(afl_state_t *afl, u8 new_bits, size_t max_description_len) {
294
295 u8 is_timeout = 0;
296
297 if (new_bits & 0xf0) {
298
299 new_bits -= 0x80;
300 is_timeout = 1;
301
302 }
303
304 size_t real_max_len =
305 MIN(max_description_len, sizeof(afl->describe_op_buf_256));
306 u8 *ret = afl->describe_op_buf_256;
307
308 if (unlikely(afl->syncing_party)) {
309
310 sprintf(ret, "sync:%s,src:%06u", afl->syncing_party, afl->syncing_case);
311
312 } else {
313
314 sprintf(ret, "src:%06u", afl->current_entry);
315
316 if (afl->splicing_with >= 0) {
317
318 sprintf(ret + strlen(ret), "+%06d", afl->splicing_with);
319
320 }
321
322 sprintf(ret + strlen(ret), ",time:%llu,execs:%llu",
323 get_cur_time() + afl->prev_run_time - afl->start_time,
324 afl->fsrv.total_execs);
325
326 if (afl->current_custom_fuzz &&
327 afl->current_custom_fuzz->afl_custom_describe) {
328
329 /* We are currently in a custom mutator that supports afl_custom_describe,
330 * use it! */
331
332 size_t len_current = strlen(ret);
333 ret[len_current++] = ',';
334 ret[len_current] = '\0';
335
336 ssize_t size_left = real_max_len - len_current - strlen(",+cov") - 2;
337 if (is_timeout) { size_left -= strlen(",+tout"); }
338 if (unlikely(size_left <= 0)) FATAL("filename got too long");
339
340 const char *custom_description =
341 afl->current_custom_fuzz->afl_custom_describe(
342 afl->current_custom_fuzz->data, size_left);
343 if (!custom_description || !custom_description[0]) {
344
345 DEBUGF("Error getting a description from afl_custom_describe");
346 /* Take the stage name as description fallback */
347 sprintf(ret + len_current, "op:%s", afl->stage_short);
348
349 } else {
350
351 /* We got a proper custom description, use it */
352 strncat(ret + len_current, custom_description, size_left);
353
354 }
355
356 } else {
357
358 /* Normal testcase descriptions start here */
359 sprintf(ret + strlen(ret), ",op:%s", afl->stage_short);
360
361 if (afl->stage_cur_byte >= 0) {
362
363 sprintf(ret + strlen(ret), ",pos:%d", afl->stage_cur_byte);
364
365 if (afl->stage_val_type != STAGE_VAL_NONE) {
366
367 sprintf(ret + strlen(ret), ",val:%s%+d",
368 (afl->stage_val_type == STAGE_VAL_BE) ? "be:" : "",
369 afl->stage_cur_val);
370
371 }
372
373 } else {
374
375 sprintf(ret + strlen(ret), ",rep:%d", afl->stage_cur_val);
376
377 }
378
379 }
380
381 }
382
383 if (is_timeout) { strcat(ret, ",+tout"); }
384
385 if (new_bits == 2) { strcat(ret, ",+cov"); }
386
387 if (unlikely(strlen(ret) >= max_description_len))
388 FATAL("describe string is too long");
389
390 return ret;
391
392 }
393
394 #endif /* !SIMPLE_FILES */
395
396 /* Write a message accompanying the crash directory :-) */
397
write_crash_readme(afl_state_t * afl)398 void write_crash_readme(afl_state_t *afl) {
399
400 u8 fn[PATH_MAX];
401 s32 fd;
402 FILE *f;
403
404 u8 val_buf[STRINGIFY_VAL_SIZE_MAX];
405
406 sprintf(fn, "%s/crashes/README.txt", afl->out_dir);
407
408 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
409
410 /* Do not die on errors here - that would be impolite. */
411
412 if (unlikely(fd < 0)) { return; }
413
414 f = fdopen(fd, "w");
415
416 if (unlikely(!f)) {
417
418 close(fd);
419 return;
420
421 }
422
423 fprintf(
424 f,
425 "Command line used to find this crash:\n\n"
426
427 "%s\n\n"
428
429 "If you can't reproduce a bug outside of afl-fuzz, be sure to set the "
430 "same\n"
431 "memory limit. The limit used for this fuzzing session was %s.\n\n"
432
433 "Need a tool to minimize test cases before investigating the crashes or "
434 "sending\n"
435 "them to a vendor? Check out the afl-tmin that comes with the fuzzer!\n\n"
436
437 "Found any cool bugs in open-source tools using afl-fuzz? If yes, please "
438 "post\n"
439 "to https://github.com/AFLplusplus/AFLplusplus/issues/286 once the "
440 "issues\n"
441 " are fixed :)\n\n",
442
443 afl->orig_cmdline,
444 stringify_mem_size(val_buf, sizeof(val_buf),
445 afl->fsrv.mem_limit << 20)); /* ignore errors */
446
447 fclose(f);
448
449 }
450
451 /* Check if the result of an execve() during routine fuzzing is interesting,
452 save or queue the input test case for further analysis if so. Returns 1 if
453 entry is saved, 0 otherwise. */
454
455 u8 __attribute__((hot))
save_if_interesting(afl_state_t * afl,void * mem,u32 len,u8 fault)456 save_if_interesting(afl_state_t *afl, void *mem, u32 len, u8 fault) {
457
458 if (unlikely(len == 0)) { return 0; }
459
460 if (unlikely(fault == FSRV_RUN_TMOUT && afl->afl_env.afl_ignore_timeouts)) {
461
462 if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) {
463
464 classify_counts(&afl->fsrv);
465 u64 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
466
467 // Saturated increment
468 if (likely(afl->n_fuzz[cksum % N_FUZZ_SIZE] < 0xFFFFFFFF))
469 afl->n_fuzz[cksum % N_FUZZ_SIZE]++;
470
471 }
472
473 return 0;
474
475 }
476
477 u8 fn[PATH_MAX];
478 u8 *queue_fn = "";
479 u8 new_bits = 0, keeping = 0, res, classified = 0, is_timeout = 0,
480 need_hash = 1;
481 s32 fd;
482 u64 cksum = 0;
483
484 /* Update path frequency. */
485
486 /* Generating a hash on every input is super expensive. Bad idea and should
487 only be used for special schedules */
488 if (likely(afl->schedule >= FAST && afl->schedule <= RARE)) {
489
490 classify_counts(&afl->fsrv);
491 classified = 1;
492 need_hash = 0;
493
494 cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
495
496 /* Saturated increment */
497 if (likely(afl->n_fuzz[cksum % N_FUZZ_SIZE] < 0xFFFFFFFF))
498 afl->n_fuzz[cksum % N_FUZZ_SIZE]++;
499
500 }
501
502 if (likely(fault == afl->crash_mode)) {
503
504 /* Keep only if there are new bits in the map, add to queue for
505 future fuzzing, etc. */
506
507 if (likely(classified)) {
508
509 new_bits = has_new_bits(afl, afl->virgin_bits);
510
511 } else {
512
513 new_bits = has_new_bits_unclassified(afl, afl->virgin_bits);
514
515 if (unlikely(new_bits)) { classified = 1; }
516
517 }
518
519 if (likely(!new_bits)) {
520
521 if (unlikely(afl->crash_mode)) { ++afl->total_crashes; }
522 return 0;
523
524 }
525
526 save_to_queue:
527
528 #ifndef SIMPLE_FILES
529
530 queue_fn =
531 alloc_printf("%s/queue/id:%06u,%s", afl->out_dir, afl->queued_items,
532 describe_op(afl, new_bits + is_timeout,
533 NAME_MAX - strlen("id:000000,")));
534
535 #else
536
537 queue_fn =
538 alloc_printf("%s/queue/id_%06u", afl->out_dir, afl->queued_items);
539
540 #endif /* ^!SIMPLE_FILES */
541 fd = open(queue_fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
542 if (unlikely(fd < 0)) { PFATAL("Unable to create '%s'", queue_fn); }
543 ck_write(fd, mem, len, queue_fn);
544 close(fd);
545 add_to_queue(afl, queue_fn, len, 0);
546
547 if (unlikely(afl->fuzz_mode) &&
548 likely(afl->switch_fuzz_mode && !afl->non_instrumented_mode)) {
549
550 if (afl->afl_env.afl_no_ui) {
551
552 ACTF("New coverage found, switching back to exploration mode.");
553
554 }
555
556 afl->fuzz_mode = 0;
557
558 }
559
560 #ifdef INTROSPECTION
561 if (afl->custom_mutators_count && afl->current_custom_fuzz) {
562
563 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
564
565 if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
566
567 const char *ptr = el->afl_custom_introspection(el->data);
568
569 if (ptr != NULL && *ptr != 0) {
570
571 fprintf(afl->introspection_file, "QUEUE CUSTOM %s = %s\n", ptr,
572 afl->queue_top->fname);
573
574 }
575
576 }
577
578 });
579
580 } else if (afl->mutation[0] != 0) {
581
582 fprintf(afl->introspection_file, "QUEUE %s = %s\n", afl->mutation,
583 afl->queue_top->fname);
584
585 }
586
587 #endif
588
589 if (new_bits == 2) {
590
591 afl->queue_top->has_new_cov = 1;
592 ++afl->queued_with_cov;
593
594 }
595
596 if (unlikely(need_hash && new_bits)) {
597
598 /* due to classify counts we have to recalculate the checksum */
599 afl->queue_top->exec_cksum =
600 hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
601 need_hash = 0;
602
603 }
604
605 /* For AFLFast schedules we update the new queue entry */
606 if (likely(cksum)) {
607
608 afl->queue_top->n_fuzz_entry = cksum % N_FUZZ_SIZE;
609 afl->n_fuzz[afl->queue_top->n_fuzz_entry] = 1;
610
611 }
612
613 /* Try to calibrate inline; this also calls update_bitmap_score() when
614 successful. */
615 res = calibrate_case(afl, afl->queue_top, mem, afl->queue_cycle - 1, 0);
616
617 if (unlikely(res == FSRV_RUN_ERROR)) {
618
619 FATAL("Unable to execute target application");
620
621 }
622
623 if (likely(afl->q_testcase_max_cache_size)) {
624
625 queue_testcase_store_mem(afl, afl->queue_top, mem);
626
627 }
628
629 keeping = 1;
630
631 }
632
633 switch (fault) {
634
635 case FSRV_RUN_TMOUT:
636
637 /* Timeouts are not very interesting, but we're still obliged to keep
638 a handful of samples. We use the presence of new bits in the
639 hang-specific bitmap as a signal of uniqueness. In "non-instrumented"
640 mode, we just keep everything. */
641
642 ++afl->total_tmouts;
643
644 if (afl->saved_hangs >= KEEP_UNIQUE_HANG) { return keeping; }
645
646 if (likely(!afl->non_instrumented_mode)) {
647
648 if (unlikely(!classified)) {
649
650 classify_counts(&afl->fsrv);
651 classified = 1;
652
653 }
654
655 simplify_trace(afl, afl->fsrv.trace_bits);
656
657 if (!has_new_bits(afl, afl->virgin_tmout)) { return keeping; }
658
659 }
660
661 is_timeout = 0x80;
662 #ifdef INTROSPECTION
663 if (afl->custom_mutators_count && afl->current_custom_fuzz) {
664
665 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
666
667 if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
668
669 const char *ptr = el->afl_custom_introspection(el->data);
670
671 if (ptr != NULL && *ptr != 0) {
672
673 fprintf(afl->introspection_file,
674 "UNIQUE_TIMEOUT CUSTOM %s = %s\n", ptr,
675 afl->queue_top->fname);
676
677 }
678
679 }
680
681 });
682
683 } else if (afl->mutation[0] != 0) {
684
685 fprintf(afl->introspection_file, "UNIQUE_TIMEOUT %s\n", afl->mutation);
686
687 }
688
689 #endif
690
691 /* Before saving, we make sure that it's a genuine hang by re-running
692 the target with a more generous timeout (unless the default timeout
693 is already generous). */
694
695 if (afl->fsrv.exec_tmout < afl->hang_tmout) {
696
697 u8 new_fault;
698 u32 tmp_len = write_to_testcase(afl, &mem, len, 0);
699
700 if (likely(tmp_len)) {
701
702 len = tmp_len;
703
704 } else {
705
706 len = write_to_testcase(afl, &mem, len, 1);
707
708 }
709
710 new_fault = fuzz_run_target(afl, &afl->fsrv, afl->hang_tmout);
711 classify_counts(&afl->fsrv);
712
713 /* A corner case that one user reported bumping into: increasing the
714 timeout actually uncovers a crash. Make sure we don't discard it if
715 so. */
716
717 if (!afl->stop_soon && new_fault == FSRV_RUN_CRASH) {
718
719 goto keep_as_crash;
720
721 }
722
723 if (afl->stop_soon || new_fault != FSRV_RUN_TMOUT) {
724
725 if (afl->afl_env.afl_keep_timeouts) {
726
727 ++afl->saved_tmouts;
728 goto save_to_queue;
729
730 } else {
731
732 return keeping;
733
734 }
735
736 }
737
738 }
739
740 #ifndef SIMPLE_FILES
741
742 snprintf(fn, PATH_MAX, "%s/hangs/id:%06llu,%s", afl->out_dir,
743 afl->saved_hangs,
744 describe_op(afl, 0, NAME_MAX - strlen("id:000000,")));
745
746 #else
747
748 snprintf(fn, PATH_MAX, "%s/hangs/id_%06llu", afl->out_dir,
749 afl->saved_hangs);
750
751 #endif /* ^!SIMPLE_FILES */
752
753 ++afl->saved_hangs;
754
755 afl->last_hang_time = get_cur_time();
756
757 break;
758
759 case FSRV_RUN_CRASH:
760
761 keep_as_crash:
762
763 /* This is handled in a manner roughly similar to timeouts,
764 except for slightly different limits and no need to re-run test
765 cases. */
766
767 ++afl->total_crashes;
768
769 if (afl->saved_crashes >= KEEP_UNIQUE_CRASH) { return keeping; }
770
771 if (likely(!afl->non_instrumented_mode)) {
772
773 if (unlikely(!classified)) {
774
775 classify_counts(&afl->fsrv);
776 classified = 1;
777
778 }
779
780 simplify_trace(afl, afl->fsrv.trace_bits);
781
782 if (!has_new_bits(afl, afl->virgin_crash)) { return keeping; }
783
784 }
785
786 if (unlikely(!afl->saved_crashes) &&
787 (afl->afl_env.afl_no_crash_readme != 1)) {
788
789 write_crash_readme(afl);
790
791 }
792
793 #ifndef SIMPLE_FILES
794
795 snprintf(fn, PATH_MAX, "%s/crashes/id:%06llu,sig:%02u,%s", afl->out_dir,
796 afl->saved_crashes, afl->fsrv.last_kill_signal,
797 describe_op(afl, 0, NAME_MAX - strlen("id:000000,sig:00,")));
798
799 #else
800
801 snprintf(fn, PATH_MAX, "%s/crashes/id_%06llu_%02u", afl->out_dir,
802 afl->saved_crashes, afl->fsrv.last_kill_signal);
803
804 #endif /* ^!SIMPLE_FILES */
805
806 ++afl->saved_crashes;
807 #ifdef INTROSPECTION
808 if (afl->custom_mutators_count && afl->current_custom_fuzz) {
809
810 LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
811
812 if (afl->current_custom_fuzz == el && el->afl_custom_introspection) {
813
814 const char *ptr = el->afl_custom_introspection(el->data);
815
816 if (ptr != NULL && *ptr != 0) {
817
818 fprintf(afl->introspection_file, "UNIQUE_CRASH CUSTOM %s = %s\n",
819 ptr, afl->queue_top->fname);
820
821 }
822
823 }
824
825 });
826
827 } else if (afl->mutation[0] != 0) {
828
829 fprintf(afl->introspection_file, "UNIQUE_CRASH %s\n", afl->mutation);
830
831 }
832
833 #endif
834 if (unlikely(afl->infoexec)) {
835
836 // if the user wants to be informed on new crashes - do that
837 #if !TARGET_OS_IPHONE
838 // we dont care if system errors, but we dont want a
839 // compiler warning either
840 // See
841 // https://stackoverflow.com/questions/11888594/ignoring-return-values-in-c
842 (void)(system(afl->infoexec) + 1);
843 #else
844 WARNF("command execution unsupported");
845 #endif
846
847 }
848
849 afl->last_crash_time = get_cur_time();
850 afl->last_crash_execs = afl->fsrv.total_execs;
851
852 break;
853
854 case FSRV_RUN_ERROR:
855 FATAL("Unable to execute target application");
856
857 default:
858 return keeping;
859
860 }
861
862 /* If we're here, we apparently want to save the crash or hang
863 test case, too. */
864
865 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
866 if (unlikely(fd < 0)) { PFATAL("Unable to create '%s'", fn); }
867 ck_write(fd, mem, len, fn);
868 close(fd);
869
870 #ifdef __linux__
871 if (afl->fsrv.nyx_mode && fault == FSRV_RUN_CRASH) {
872
873 u8 fn_log[PATH_MAX];
874
875 (void)(snprintf(fn_log, PATH_MAX, "%s.log", fn) + 1);
876 fd = open(fn_log, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
877 if (unlikely(fd < 0)) { PFATAL("Unable to create '%s'", fn_log); }
878
879 u32 nyx_aux_string_len = afl->fsrv.nyx_handlers->nyx_get_aux_string(
880 afl->fsrv.nyx_runner, afl->fsrv.nyx_aux_string,
881 afl->fsrv.nyx_aux_string_len);
882
883 ck_write(fd, afl->fsrv.nyx_aux_string, nyx_aux_string_len, fn_log);
884 close(fd);
885
886 }
887
888 #endif
889
890 return keeping;
891
892 }
893
894