xref: /aosp_15_r20/external/AFLplusplus/src/afl-fuzz-run.c (revision 08b48e0b10e97b33e7b60c5b6e2243bd915777f2)
1 /*
2    american fuzzy lop++ - target execution related routines
3    --------------------------------------------------------
4 
5    Originally written by Michal Zalewski
6 
7    Now maintained by Marc Heuse <[email protected]>,
8                         Heiko Eißfeldt <[email protected]> and
9                         Andrea Fioraldi <[email protected]> and
10                         Dominik Maier <[email protected]>
11 
12    Copyright 2016, 2017 Google Inc. All rights reserved.
13    Copyright 2019-2024 AFLplusplus Project. All rights reserved.
14 
15    Licensed under the Apache License, Version 2.0 (the "License");
16    you may not use this file except in compliance with the License.
17    You may obtain a copy of the License at:
18 
19      https://www.apache.org/licenses/LICENSE-2.0
20 
21    This is the real deal: the program takes an instrumented binary and
22    attempts a variety of basic fuzzing tricks, paying close attention to
23    how they affect the execution path.
24 
25  */
26 
27 #include "afl-fuzz.h"
28 #include <sys/time.h>
29 #include <signal.h>
30 #include <limits.h>
31 #if !defined NAME_MAX
32   #define NAME_MAX _XOPEN_NAME_MAX
33 #endif
34 
35 #include "cmplog.h"
36 
37 #ifdef PROFILING
38 u64 time_spent_working = 0;
39 #endif
40 
41 /* Execute target application, monitoring for timeouts. Return status
42    information. The called program will update afl->fsrv->trace_bits. */
43 
44 fsrv_run_result_t __attribute__((hot))
fuzz_run_target(afl_state_t * afl,afl_forkserver_t * fsrv,u32 timeout)45 fuzz_run_target(afl_state_t *afl, afl_forkserver_t *fsrv, u32 timeout) {
46 
47 #ifdef PROFILING
48   static u64      time_spent_start = 0;
49   struct timespec spec;
50   if (time_spent_start) {
51 
52     u64 current;
53     clock_gettime(CLOCK_REALTIME, &spec);
54     current = (spec.tv_sec * 1000000000) + spec.tv_nsec;
55     time_spent_working += (current - time_spent_start);
56 
57   }
58 
59 #endif
60 
61   fsrv_run_result_t res = afl_fsrv_run_target(fsrv, timeout, &afl->stop_soon);
62 
63   /* If post_run() function is defined in custom mutator, the function will be
64      called each time after AFL++ executes the target program. */
65 
66   if (unlikely(afl->custom_mutators_count)) {
67 
68     LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
69 
70       if (unlikely(el->afl_custom_post_run)) {
71 
72         el->afl_custom_post_run(el->data);
73 
74       }
75 
76     });
77 
78   }
79 
80 #ifdef PROFILING
81   clock_gettime(CLOCK_REALTIME, &spec);
82   time_spent_start = (spec.tv_sec * 1000000000) + spec.tv_nsec;
83 #endif
84 
85   return res;
86 
87 }
88 
89 /* Write modified data to file for testing. If afl->fsrv.out_file is set, the
90    old file is unlinked and a new one is created. Otherwise, afl->fsrv.out_fd is
91    rewound and truncated. */
92 
93 u32 __attribute__((hot))
write_to_testcase(afl_state_t * afl,void ** mem,u32 len,u32 fix)94 write_to_testcase(afl_state_t *afl, void **mem, u32 len, u32 fix) {
95 
96   u8 sent = 0;
97 
98   if (unlikely(afl->custom_mutators_count)) {
99 
100     ssize_t new_size = len;
101     u8     *new_mem = *mem;
102     u8     *new_buf = NULL;
103 
104     LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
105 
106       if (el->afl_custom_post_process) {
107 
108         new_size =
109             el->afl_custom_post_process(el->data, new_mem, new_size, &new_buf);
110 
111         if (unlikely(!new_buf || new_size <= 0)) {
112 
113           new_size = 0;
114           new_buf = new_mem;
115           // FATAL("Custom_post_process failed (ret: %lu)", (long
116           // unsigned)new_size);
117 
118         } else {
119 
120           new_mem = new_buf;
121 
122         }
123 
124       }
125 
126     });
127 
128     if (unlikely(!new_size)) {
129 
130       // perform dummy runs (fix = 1), but skip all others
131       if (fix) {
132 
133         new_size = len;
134 
135       } else {
136 
137         return 0;
138 
139       }
140 
141     }
142 
143     if (unlikely(new_size < afl->min_length && !fix)) {
144 
145       new_size = afl->min_length;
146 
147     } else if (unlikely(new_size > afl->max_length)) {
148 
149       new_size = afl->max_length;
150 
151     }
152 
153     if (new_mem != *mem && new_mem != NULL && new_size > 0) {
154 
155       new_buf = afl_realloc(AFL_BUF_PARAM(out_scratch), new_size);
156       if (unlikely(!new_buf)) { PFATAL("alloc"); }
157       memcpy(new_buf, new_mem, new_size);
158 
159       /* if AFL_POST_PROCESS_KEEP_ORIGINAL is set then save the original memory
160          prior post-processing in new_mem to restore it later */
161       if (unlikely(afl->afl_env.afl_post_process_keep_original)) {
162 
163         new_mem = *mem;
164 
165       }
166 
167       *mem = new_buf;
168       afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch));
169 
170     }
171 
172     LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
173 
174       if (el->afl_custom_fuzz_send) {
175 
176         el->afl_custom_fuzz_send(el->data, *mem, new_size);
177         sent = 1;
178 
179       }
180 
181     });
182 
183     if (likely(!sent)) {
184 
185       /* everything as planned. use the potentially new data. */
186       afl_fsrv_write_to_testcase(&afl->fsrv, *mem, new_size);
187 
188       if (likely(!afl->afl_env.afl_post_process_keep_original)) {
189 
190         len = new_size;
191 
192       } else {
193 
194         /* restore the original memory which was saved in new_mem */
195         *mem = new_mem;
196         afl_swap_bufs(AFL_BUF_PARAM(out), AFL_BUF_PARAM(out_scratch));
197 
198       }
199 
200     }
201 
202   } else {                                   /* !afl->custom_mutators_count */
203 
204     if (unlikely(len < afl->min_length && !fix)) {
205 
206       len = afl->min_length;
207 
208     } else if (unlikely(len > afl->max_length)) {
209 
210       len = afl->max_length;
211 
212     }
213 
214     /* boring uncustom. */
215     afl_fsrv_write_to_testcase(&afl->fsrv, *mem, len);
216 
217   }
218 
219 #ifdef _AFL_DOCUMENT_MUTATIONS
220   s32  doc_fd;
221   char fn[PATH_MAX];
222   snprintf(fn, PATH_MAX, "%s/mutations/%09u:%s", afl->out_dir,
223            afl->document_counter++,
224            describe_op(afl, 0, NAME_MAX - strlen("000000000:")));
225 
226   if ((doc_fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION)) >=
227       0) {
228 
229     if (write(doc_fd, *mem, len) != len)
230       PFATAL("write to mutation file failed: %s", fn);
231     close(doc_fd);
232 
233   }
234 
235 #endif
236 
237   return len;
238 
239 }
240 
241 /* The same, but with an adjustable gap. Used for trimming. */
242 
write_with_gap(afl_state_t * afl,u8 * mem,u32 len,u32 skip_at,u32 skip_len)243 static void write_with_gap(afl_state_t *afl, u8 *mem, u32 len, u32 skip_at,
244                            u32 skip_len) {
245 
246   s32 fd = afl->fsrv.out_fd;
247   u32 tail_len = len - skip_at - skip_len;
248 
249   /*
250   This memory is used to carry out the post_processing(if present) after copying
251   the testcase by removing the gaps. This can break though
252   */
253   u8 *mem_trimmed = afl_realloc(AFL_BUF_PARAM(out_scratch), len - skip_len + 1);
254   if (unlikely(!mem_trimmed)) { PFATAL("alloc"); }
255 
256   ssize_t new_size = len - skip_len;
257   u8     *new_mem = mem;
258 
259   bool post_process_skipped = true;
260 
261   if (unlikely(afl->custom_mutators_count)) {
262 
263     u8 *new_buf = NULL;
264     new_mem = mem_trimmed;
265 
266     LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
267 
268       if (el->afl_custom_post_process) {
269 
270         // We copy into the mem_trimmed only if we actually have custom mutators
271         // *with* post_processing installed
272 
273         if (post_process_skipped) {
274 
275           if (skip_at) { memcpy(mem_trimmed, (u8 *)mem, skip_at); }
276 
277           if (tail_len) {
278 
279             memcpy(mem_trimmed + skip_at, (u8 *)mem + skip_at + skip_len,
280                    tail_len);
281 
282           }
283 
284           post_process_skipped = false;
285 
286         }
287 
288         new_size =
289             el->afl_custom_post_process(el->data, new_mem, new_size, &new_buf);
290 
291         if (unlikely(!new_buf && new_size <= 0)) {
292 
293           new_size = 0;
294           new_buf = new_mem;
295           // FATAL("Custom_post_process failed (ret: %lu)", (long
296           // unsigned)new_size);
297 
298         } else {
299 
300           new_mem = new_buf;
301 
302         }
303 
304       }
305 
306     });
307 
308   }
309 
310   if (likely(afl->fsrv.use_shmem_fuzz)) {
311 
312     if (!post_process_skipped) {
313 
314       // If we did post_processing, copy directly from the new_mem buffer
315 
316       memcpy(afl->fsrv.shmem_fuzz, new_mem, new_size);
317 
318     } else {
319 
320       memcpy(afl->fsrv.shmem_fuzz, mem, skip_at);
321 
322       memcpy(afl->fsrv.shmem_fuzz + skip_at, mem + skip_at + skip_len,
323              tail_len);
324 
325     }
326 
327     *afl->fsrv.shmem_fuzz_len = new_size;
328 
329 #ifdef _DEBUG
330     if (afl->debug) {
331 
332       fprintf(
333           stderr, "FS crc: %16llx len: %u\n",
334           hash64(afl->fsrv.shmem_fuzz, *afl->fsrv.shmem_fuzz_len, HASH_CONST),
335           *afl->fsrv.shmem_fuzz_len);
336       fprintf(stderr, "SHM :");
337       for (u32 i = 0; i < *afl->fsrv.shmem_fuzz_len; i++)
338         fprintf(stderr, "%02x", afl->fsrv.shmem_fuzz[i]);
339       fprintf(stderr, "\nORIG:");
340       for (u32 i = 0; i < *afl->fsrv.shmem_fuzz_len; i++)
341         fprintf(stderr, "%02x", (u8)((u8 *)mem)[i]);
342       fprintf(stderr, "\n");
343 
344     }
345 
346 #endif
347 
348     return;
349 
350   } else if (unlikely(!afl->fsrv.use_stdin)) {
351 
352     if (unlikely(afl->no_unlink)) {
353 
354       fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_TRUNC,
355                 DEFAULT_PERMISSION);
356 
357     } else {
358 
359       unlink(afl->fsrv.out_file);                         /* Ignore errors. */
360       fd = open(afl->fsrv.out_file, O_WRONLY | O_CREAT | O_EXCL,
361                 DEFAULT_PERMISSION);
362 
363     }
364 
365     if (fd < 0) { PFATAL("Unable to create '%s'", afl->fsrv.out_file); }
366 
367   } else {
368 
369     lseek(fd, 0, SEEK_SET);
370 
371   }
372 
373   if (!post_process_skipped) {
374 
375     ck_write(fd, new_mem, new_size, afl->fsrv.out_file);
376 
377   } else {
378 
379     ck_write(fd, mem, skip_at, afl->fsrv.out_file);
380 
381     ck_write(fd, mem + skip_at + skip_len, tail_len, afl->fsrv.out_file);
382 
383   }
384 
385   if (afl->fsrv.use_stdin) {
386 
387     if (ftruncate(fd, new_size)) { PFATAL("ftruncate() failed"); }
388     lseek(fd, 0, SEEK_SET);
389 
390   } else {
391 
392     close(fd);
393 
394   }
395 
396 }
397 
398 /* Calibrate a new test case. This is done when processing the input directory
399    to warn about flaky or otherwise problematic test cases early on; and when
400    new paths are discovered to detect variable behavior and so on. */
401 
calibrate_case(afl_state_t * afl,struct queue_entry * q,u8 * use_mem,u32 handicap,u8 from_queue)402 u8 calibrate_case(afl_state_t *afl, struct queue_entry *q, u8 *use_mem,
403                   u32 handicap, u8 from_queue) {
404 
405   u8 fault = 0, new_bits = 0, var_detected = 0, hnb = 0,
406      first_run = (q->exec_cksum == 0);
407   u64 start_us, stop_us, diff_us;
408   s32 old_sc = afl->stage_cur, old_sm = afl->stage_max;
409   u32 use_tmout = afl->fsrv.exec_tmout;
410   u8 *old_sn = afl->stage_name;
411 
412   if (unlikely(afl->shm.cmplog_mode)) { q->exec_cksum = 0; }
413 
414   /* Be a bit more generous about timeouts when resuming sessions, or when
415      trying to calibrate already-added finds. This helps avoid trouble due
416      to intermittent latency. */
417 
418   if (!from_queue || afl->resuming_fuzz) {
419 
420     use_tmout = MAX(afl->fsrv.exec_tmout + CAL_TMOUT_ADD,
421                     afl->fsrv.exec_tmout * CAL_TMOUT_PERC / 100);
422 
423   }
424 
425   ++q->cal_failed;
426 
427   afl->stage_name = "calibration";
428   afl->stage_max = afl->afl_env.afl_cal_fast ? CAL_CYCLES_FAST : CAL_CYCLES;
429 
430   /* Make sure the forkserver is up before we do anything, and let's not
431      count its spin-up time toward binary calibration. */
432 
433   if (!afl->fsrv.fsrv_pid) {
434 
435     if (afl->fsrv.cmplog_binary &&
436         afl->fsrv.init_child_func != cmplog_exec_child) {
437 
438       FATAL("BUG in afl-fuzz detected. Cmplog mode not set correctly.");
439 
440     }
441 
442     afl_fsrv_start(&afl->fsrv, afl->argv, &afl->stop_soon,
443                    afl->afl_env.afl_debug_child);
444 
445     if (afl->fsrv.support_shmem_fuzz && !afl->fsrv.use_shmem_fuzz) {
446 
447       afl_shm_deinit(afl->shm_fuzz);
448       ck_free(afl->shm_fuzz);
449       afl->shm_fuzz = NULL;
450       afl->fsrv.support_shmem_fuzz = 0;
451       afl->fsrv.shmem_fuzz = NULL;
452 
453     }
454 
455   }
456 
457   /* we need a dummy run if this is LTO + cmplog */
458   if (unlikely(afl->shm.cmplog_mode)) {
459 
460     (void)write_to_testcase(afl, (void **)&use_mem, q->len, 1);
461 
462     fault = fuzz_run_target(afl, &afl->fsrv, use_tmout);
463 
464     /* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed,
465        we want to bail out quickly. */
466 
467     if (afl->stop_soon || fault != afl->crash_mode) { goto abort_calibration; }
468 
469     if (!afl->non_instrumented_mode && !afl->stage_cur &&
470         !count_bytes(afl, afl->fsrv.trace_bits)) {
471 
472       fault = FSRV_RUN_NOINST;
473       goto abort_calibration;
474 
475     }
476 
477 #ifdef INTROSPECTION
478     if (unlikely(!q->bitsmap_size)) q->bitsmap_size = afl->bitsmap_size;
479 #endif
480 
481   }
482 
483   if (q->exec_cksum) {
484 
485     memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
486     hnb = has_new_bits(afl, afl->virgin_bits);
487     if (hnb > new_bits) { new_bits = hnb; }
488 
489   }
490 
491   start_us = get_cur_time_us();
492 
493   for (afl->stage_cur = 0; afl->stage_cur < afl->stage_max; ++afl->stage_cur) {
494 
495     if (unlikely(afl->debug)) {
496 
497       DEBUGF("calibration stage %d/%d\n", afl->stage_cur + 1, afl->stage_max);
498 
499     }
500 
501     u64 cksum;
502 
503     (void)write_to_testcase(afl, (void **)&use_mem, q->len, 1);
504 
505     fault = fuzz_run_target(afl, &afl->fsrv, use_tmout);
506 
507     /* afl->stop_soon is set by the handler for Ctrl+C. When it's pressed,
508        we want to bail out quickly. */
509 
510     if (afl->stop_soon || fault != afl->crash_mode) { goto abort_calibration; }
511 
512     if (!afl->non_instrumented_mode && !afl->stage_cur &&
513         !count_bytes(afl, afl->fsrv.trace_bits)) {
514 
515       fault = FSRV_RUN_NOINST;
516       goto abort_calibration;
517 
518     }
519 
520 #ifdef INTROSPECTION
521     if (unlikely(!q->bitsmap_size)) q->bitsmap_size = afl->bitsmap_size;
522 #endif
523 
524     classify_counts(&afl->fsrv);
525     cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
526     if (q->exec_cksum != cksum) {
527 
528       hnb = has_new_bits(afl, afl->virgin_bits);
529       if (hnb > new_bits) { new_bits = hnb; }
530 
531       if (q->exec_cksum) {
532 
533         u32 i;
534 
535         for (i = 0; i < afl->fsrv.map_size; ++i) {
536 
537           if (unlikely(!afl->var_bytes[i]) &&
538               unlikely(afl->first_trace[i] != afl->fsrv.trace_bits[i])) {
539 
540             afl->var_bytes[i] = 1;
541             // ignore the variable edge by setting it to fully discovered
542             afl->virgin_bits[i] = 0;
543 
544           }
545 
546         }
547 
548         if (unlikely(!var_detected && !afl->afl_env.afl_no_warn_instability)) {
549 
550           // note: from_queue seems to only be set during initialization
551           if (afl->afl_env.afl_no_ui || from_queue) {
552 
553             WARNF("instability detected during calibration");
554 
555           } else if (afl->debug) {
556 
557             DEBUGF("instability detected during calibration\n");
558 
559           }
560 
561         }
562 
563         var_detected = 1;
564         afl->stage_max =
565             afl->afl_env.afl_cal_fast ? CAL_CYCLES : CAL_CYCLES_LONG;
566 
567       } else {
568 
569         q->exec_cksum = cksum;
570         memcpy(afl->first_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
571 
572       }
573 
574     }
575 
576   }
577 
578   if (unlikely(afl->fixed_seed)) {
579 
580     diff_us = (u64)(afl->fsrv.exec_tmout - 1) * (u64)afl->stage_max;
581 
582   } else {
583 
584     stop_us = get_cur_time_us();
585     diff_us = stop_us - start_us;
586     if (unlikely(!diff_us)) { ++diff_us; }
587 
588   }
589 
590   afl->total_cal_us += diff_us;
591   afl->total_cal_cycles += afl->stage_max;
592 
593   /* OK, let's collect some stats about the performance of this test case.
594      This is used for fuzzing air time calculations in calculate_score(). */
595 
596   if (unlikely(!afl->stage_max)) {
597 
598     // Pretty sure this cannot happen, yet scan-build complains.
599     FATAL("BUG: stage_max should not be 0 here! Please report this condition.");
600 
601   }
602 
603   q->exec_us = diff_us / afl->stage_max;
604   q->bitmap_size = count_bytes(afl, afl->fsrv.trace_bits);
605   q->handicap = handicap;
606   q->cal_failed = 0;
607 
608   afl->total_bitmap_size += q->bitmap_size;
609   ++afl->total_bitmap_entries;
610 
611   update_bitmap_score(afl, q);
612 
613   /* If this case didn't result in new output from the instrumentation, tell
614      parent. This is a non-critical problem, but something to warn the user
615      about. */
616 
617   if (!afl->non_instrumented_mode && first_run && !fault && !new_bits) {
618 
619     fault = FSRV_RUN_NOBITS;
620 
621   }
622 
623 abort_calibration:
624 
625   if (new_bits == 2 && !q->has_new_cov) {
626 
627     q->has_new_cov = 1;
628     ++afl->queued_with_cov;
629 
630   }
631 
632   /* Mark variable paths. */
633 
634   if (var_detected) {
635 
636     afl->var_byte_count = count_bytes(afl, afl->var_bytes);
637 
638     if (!q->var_behavior) {
639 
640       mark_as_variable(afl, q);
641       ++afl->queued_variable;
642 
643     }
644 
645   }
646 
647   afl->stage_name = old_sn;
648   afl->stage_cur = old_sc;
649   afl->stage_max = old_sm;
650 
651   if (!first_run) { show_stats(afl); }
652 
653   return fault;
654 
655 }
656 
657 /* Grab interesting test cases from other fuzzers. */
658 
sync_fuzzers(afl_state_t * afl)659 void sync_fuzzers(afl_state_t *afl) {
660 
661   DIR           *sd;
662   struct dirent *sd_ent;
663   u32            sync_cnt = 0, synced = 0, entries = 0;
664   u8             path[PATH_MAX + 1 + NAME_MAX];
665 
666   sd = opendir(afl->sync_dir);
667   if (!sd) { PFATAL("Unable to open '%s'", afl->sync_dir); }
668 
669   afl->stage_max = afl->stage_cur = 0;
670   afl->cur_depth = 0;
671 
672   /* Look at the entries created for every other fuzzer in the sync directory.
673    */
674 
675   while ((sd_ent = readdir(sd))) {
676 
677     u8  qd_synced_path[PATH_MAX], qd_path[PATH_MAX];
678     u32 min_accept = 0, next_min_accept = 0;
679 
680     s32 id_fd;
681 
682     /* Skip dot files and our own output directory. */
683 
684     if (sd_ent->d_name[0] == '.' || !strcmp(afl->sync_id, sd_ent->d_name)) {
685 
686       continue;
687 
688     }
689 
690     entries++;
691 
692     // secondary nodes only syncs from main, the main node syncs from everyone
693     if (likely(afl->is_secondary_node)) {
694 
695       sprintf(qd_path, "%s/%s/is_main_node", afl->sync_dir, sd_ent->d_name);
696       int res = access(qd_path, F_OK);
697       if (unlikely(afl->is_main_node)) {  // an elected temporary main node
698 
699         if (likely(res == 0)) {  // there is another main node? downgrade.
700 
701           afl->is_main_node = 0;
702           sprintf(qd_path, "%s/is_main_node", afl->out_dir);
703           unlink(qd_path);
704 
705         }
706 
707       } else {
708 
709         if (likely(res != 0)) { continue; }
710 
711       }
712 
713     }
714 
715     synced++;
716 
717     /* document the attempt to sync to this instance */
718 
719     sprintf(qd_synced_path, "%s/.synced/%s.last", afl->out_dir, sd_ent->d_name);
720     id_fd =
721         open(qd_synced_path, O_RDWR | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
722     if (id_fd >= 0) close(id_fd);
723 
724     /* Skip anything that doesn't have a queue/ subdirectory. */
725 
726     sprintf(qd_path, "%s/%s/queue", afl->sync_dir, sd_ent->d_name);
727 
728     struct dirent **namelist = NULL;
729     int             m = 0, n, o;
730 
731     n = scandir(qd_path, &namelist, NULL, alphasort);
732 
733     if (n < 1) {
734 
735       if (namelist) free(namelist);
736       continue;
737 
738     }
739 
740     /* Retrieve the ID of the last seen test case. */
741 
742     sprintf(qd_synced_path, "%s/.synced/%s", afl->out_dir, sd_ent->d_name);
743 
744     id_fd = open(qd_synced_path, O_RDWR | O_CREAT, DEFAULT_PERMISSION);
745 
746     if (id_fd < 0) { PFATAL("Unable to create '%s'", qd_synced_path); }
747 
748     if (read(id_fd, &min_accept, sizeof(u32)) == sizeof(u32)) {
749 
750       next_min_accept = min_accept;
751       lseek(id_fd, 0, SEEK_SET);
752 
753     }
754 
755     /* Show stats */
756 
757     snprintf(afl->stage_name_buf, STAGE_BUF_SIZE, "sync %u", ++sync_cnt);
758 
759     afl->stage_name = afl->stage_name_buf;
760     afl->stage_cur = 0;
761     afl->stage_max = 0;
762 
763     /* For every file queued by this fuzzer, parse ID and see if we have
764        looked at it before; exec a test case if not. */
765 
766     u8 entry[12];
767     sprintf(entry, "id:%06u", next_min_accept);
768 
769     while (m < n) {
770 
771       if (strncmp(namelist[m]->d_name, entry, 9)) {
772 
773         m++;
774 
775       } else {
776 
777         break;
778 
779       }
780 
781     }
782 
783     if (m >= n) { goto close_sync; }  // nothing new
784 
785     for (o = m; o < n; o++) {
786 
787       s32         fd;
788       struct stat st;
789 
790       snprintf(path, sizeof(path), "%s/%s", qd_path, namelist[o]->d_name);
791       afl->syncing_case = next_min_accept;
792       next_min_accept++;
793 
794       /* Allow this to fail in case the other fuzzer is resuming or so... */
795 
796       fd = open(path, O_RDONLY);
797 
798       if (fd < 0) { continue; }
799 
800       if (fstat(fd, &st)) { WARNF("fstat() failed"); }
801 
802       /* Ignore zero-sized or oversized files. */
803 
804       if (st.st_size && st.st_size <= MAX_FILE) {
805 
806         u8  fault;
807         u8 *mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0);
808 
809         if (mem == MAP_FAILED) { PFATAL("Unable to mmap '%s'", path); }
810 
811         /* See what happens. We rely on save_if_interesting() to catch major
812            errors and save the test case. */
813 
814         (void)write_to_testcase(afl, (void **)&mem, st.st_size, 1);
815 
816         fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
817 
818         if (afl->stop_soon) { goto close_sync; }
819 
820         afl->syncing_party = sd_ent->d_name;
821         afl->queued_imported +=
822             save_if_interesting(afl, mem, st.st_size, fault);
823         afl->syncing_party = 0;
824 
825         munmap(mem, st.st_size);
826 
827       }
828 
829       close(fd);
830 
831     }
832 
833     ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path);
834 
835   close_sync:
836     close(id_fd);
837     if (n > 0)
838       for (m = 0; m < n; m++)
839         free(namelist[m]);
840     free(namelist);
841 
842   }
843 
844   closedir(sd);
845 
846   // If we are a secondary and no main was found to sync then become the main
847   if (unlikely(synced == 0) && likely(entries) &&
848       likely(afl->is_secondary_node)) {
849 
850     // there is a small race condition here that another secondary runs at the
851     // same time. If so, the first temporary main node running again will demote
852     // themselves so this is not an issue
853 
854     //    u8 path2[PATH_MAX];
855     afl->is_main_node = 1;
856     sprintf(path, "%s/is_main_node", afl->out_dir);
857     int fd = open(path, O_CREAT | O_RDWR, 0644);
858     if (fd >= 0) { close(fd); }
859 
860   }
861 
862   if (afl->foreign_sync_cnt) read_foreign_testcases(afl, 0);
863 
864   afl->last_sync_time = get_cur_time();
865   afl->last_sync_cycle = afl->queue_cycle;
866 
867 }
868 
869 /* Trim all new test cases to save cycles when doing deterministic checks. The
870    trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of
871    file size, to keep the stage short and sweet. */
872 
trim_case(afl_state_t * afl,struct queue_entry * q,u8 * in_buf)873 u8 trim_case(afl_state_t *afl, struct queue_entry *q, u8 *in_buf) {
874 
875   u32 orig_len = q->len;
876 
877   /* Custom mutator trimmer */
878   if (afl->custom_mutators_count) {
879 
880     u8   trimmed_case = 0;
881     bool custom_trimmed = false;
882 
883     LIST_FOREACH(&afl->custom_mutator_list, struct custom_mutator, {
884 
885       if (el->afl_custom_trim) {
886 
887         trimmed_case = trim_case_custom(afl, q, in_buf, el);
888         custom_trimmed = true;
889 
890       }
891 
892     });
893 
894     if (orig_len != q->len || custom_trimmed) {
895 
896       queue_testcase_retake(afl, q, orig_len);
897 
898     }
899 
900     if (custom_trimmed) return trimmed_case;
901 
902   }
903 
904   u8  needs_write = 0, fault = 0;
905   u32 trim_exec = 0;
906   u32 remove_len;
907   u32 len_p2;
908 
909   u8 val_bufs[2][STRINGIFY_VAL_SIZE_MAX];
910 
911   /* Although the trimmer will be less useful when variable behavior is
912      detected, it will still work to some extent, so we don't check for
913      this. */
914 
915   if (unlikely(q->len < 5)) { return 0; }
916 
917   afl->stage_name = afl->stage_name_buf;
918   afl->bytes_trim_in += q->len;
919 
920   /* Select initial chunk len, starting with large steps. */
921 
922   len_p2 = next_pow2(q->len);
923 
924   remove_len = MAX(len_p2 / TRIM_START_STEPS, (u32)TRIM_MIN_BYTES);
925 
926   /* Continue until the number of steps gets too high or the stepover
927      gets too small. */
928 
929   while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, (u32)TRIM_MIN_BYTES)) {
930 
931     u32 remove_pos = remove_len;
932 
933     sprintf(afl->stage_name_buf, "trim %s/%s",
934             u_stringify_int(val_bufs[0], remove_len),
935             u_stringify_int(val_bufs[1], remove_len));
936 
937     afl->stage_cur = 0;
938     afl->stage_max = q->len / remove_len;
939 
940     while (remove_pos < q->len) {
941 
942       u32 trim_avail = MIN(remove_len, q->len - remove_pos);
943       u64 cksum;
944 
945       write_with_gap(afl, in_buf, q->len, remove_pos, trim_avail);
946 
947       fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
948 
949       if (afl->stop_soon || fault == FSRV_RUN_ERROR) { goto abort_trimming; }
950 
951       /* Note that we don't keep track of crashes or hangs here; maybe TODO?
952        */
953 
954       ++afl->trim_execs;
955       classify_counts(&afl->fsrv);
956       cksum = hash64(afl->fsrv.trace_bits, afl->fsrv.map_size, HASH_CONST);
957 
958       /* If the deletion had no impact on the trace, make it permanent. This
959          isn't perfect for variable-path inputs, but we're just making a
960          best-effort pass, so it's not a big deal if we end up with false
961          negatives every now and then. */
962 
963       if (cksum == q->exec_cksum) {
964 
965         u32 move_tail = q->len - remove_pos - trim_avail;
966 
967         q->len -= trim_avail;
968         len_p2 = next_pow2(q->len);
969 
970         memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail,
971                 move_tail);
972 
973         /* Let's save a clean trace, which will be needed by
974            update_bitmap_score once we're done with the trimming stuff. */
975 
976         if (!needs_write) {
977 
978           needs_write = 1;
979           memcpy(afl->clean_trace, afl->fsrv.trace_bits, afl->fsrv.map_size);
980 
981         }
982 
983       } else {
984 
985         remove_pos += remove_len;
986 
987       }
988 
989       /* Since this can be slow, update the screen every now and then. */
990 
991       if (!(trim_exec++ % afl->stats_update_freq)) { show_stats(afl); }
992       ++afl->stage_cur;
993 
994     }
995 
996     remove_len >>= 1;
997 
998   }
999 
1000   /* If we have made changes to in_buf, we also need to update the on-disk
1001      version of the test case. */
1002 
1003   if (needs_write) {
1004 
1005     s32 fd;
1006 
1007     if (unlikely(afl->no_unlink)) {
1008 
1009       fd = open(q->fname, O_WRONLY | O_CREAT | O_TRUNC, DEFAULT_PERMISSION);
1010 
1011       if (fd < 0) { PFATAL("Unable to create '%s'", q->fname); }
1012 
1013       u32 written = 0;
1014       while (written < q->len) {
1015 
1016         ssize_t result = write(fd, in_buf, q->len - written);
1017         if (result > 0) written += result;
1018 
1019       }
1020 
1021     } else {
1022 
1023       unlink(q->fname);                                    /* ignore errors */
1024       fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, DEFAULT_PERMISSION);
1025 
1026       if (fd < 0) { PFATAL("Unable to create '%s'", q->fname); }
1027 
1028       ck_write(fd, in_buf, q->len, q->fname);
1029 
1030     }
1031 
1032     close(fd);
1033 
1034     queue_testcase_retake_mem(afl, q, in_buf, q->len, orig_len);
1035 
1036     memcpy(afl->fsrv.trace_bits, afl->clean_trace, afl->fsrv.map_size);
1037     update_bitmap_score(afl, q);
1038 
1039   }
1040 
1041 abort_trimming:
1042 
1043   afl->bytes_trim_out += q->len;
1044   return fault;
1045 
1046 }
1047 
1048 /* Write a modified test case, run program, process results. Handle
1049    error conditions, returning 1 if it's time to bail out. This is
1050    a helper function for fuzz_one(). */
1051 
1052 u8 __attribute__((hot))
common_fuzz_stuff(afl_state_t * afl,u8 * out_buf,u32 len)1053 common_fuzz_stuff(afl_state_t *afl, u8 *out_buf, u32 len) {
1054 
1055   u8 fault;
1056 
1057   if (unlikely(len = write_to_testcase(afl, (void **)&out_buf, len, 0)) == 0) {
1058 
1059     return 0;
1060 
1061   }
1062 
1063   fault = fuzz_run_target(afl, &afl->fsrv, afl->fsrv.exec_tmout);
1064 
1065   if (afl->stop_soon) { return 1; }
1066 
1067   if (fault == FSRV_RUN_TMOUT) {
1068 
1069     if (afl->subseq_tmouts++ > TMOUT_LIMIT) {
1070 
1071       ++afl->cur_skipped_items;
1072       return 1;
1073 
1074     }
1075 
1076   } else {
1077 
1078     afl->subseq_tmouts = 0;
1079 
1080   }
1081 
1082   /* Users can hit us with SIGUSR1 to request the current input
1083      to be abandoned. */
1084 
1085   if (afl->skip_requested) {
1086 
1087     afl->skip_requested = 0;
1088     ++afl->cur_skipped_items;
1089     return 1;
1090 
1091   }
1092 
1093   /* This handles FAULT_ERROR for us: */
1094 
1095   afl->queued_discovered += save_if_interesting(afl, out_buf, len, fault);
1096 
1097   if (!(afl->stage_cur % afl->stats_update_freq) ||
1098       afl->stage_cur + 1 == afl->stage_max) {
1099 
1100     show_stats(afl);
1101 
1102   }
1103 
1104   return 0;
1105 
1106 }
1107 
1108