OLD | NEW |
1 /* | 1 /* |
2 american fuzzy lop - fuzzer code | 2 american fuzzy lop - fuzzer code |
3 -------------------------------- | 3 -------------------------------- |
4 | 4 |
5 Written and maintained by Michal Zalewski <lcamtuf@google.com> | 5 Written and maintained by Michal Zalewski <lcamtuf@google.com> |
6 | 6 |
7 Forkserver design by Jann Horn <jannhorn@googlemail.com> | 7 Forkserver design by Jann Horn <jannhorn@googlemail.com> |
8 | 8 |
9 Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved. | 9 Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved. |
10 | 10 |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
53 #include <sys/types.h> | 53 #include <sys/types.h> |
54 #include <sys/resource.h> | 54 #include <sys/resource.h> |
55 #include <sys/mman.h> | 55 #include <sys/mman.h> |
56 #include <sys/ioctl.h> | 56 #include <sys/ioctl.h> |
57 #include <sys/file.h> | 57 #include <sys/file.h> |
58 | 58 |
59 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) | 59 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) |
60 # include <sys/sysctl.h> | 60 # include <sys/sysctl.h> |
61 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ | 61 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ |
62 | 62 |
63 /* For supporting -Z on systems that have sched_setaffinity. */ | 63 /* For systems that have sched_setaffinity; right now just Linux, but one |
| 64 can hope... */ |
64 | 65 |
65 #ifdef __linux__ | 66 #ifdef __linux__ |
66 # define HAVE_AFFINITY 1 | 67 # define HAVE_AFFINITY 1 |
67 #endif /* __linux__ */ | 68 #endif /* __linux__ */ |
68 | 69 |
69 /* A toggle to export some variables when building as a library. Not very | 70 /* A toggle to export some variables when building as a library. Not very |
70 useful for the general public. */ | 71 useful for the general public. */ |
71 | 72 |
72 #ifdef AFL_LIB | 73 #ifdef AFL_LIB |
73 # define EXP_ST | 74 # define EXP_ST |
(...skipping 30 matching lines...) Expand all Loading... |
104 resuming_fuzz, /* Resuming an older fuzzing job? */ | 105 resuming_fuzz, /* Resuming an older fuzzing job? */ |
105 timeout_given, /* Specific timeout given? */ | 106 timeout_given, /* Specific timeout given? */ |
106 not_on_tty, /* stdout is not a tty */ | 107 not_on_tty, /* stdout is not a tty */ |
107 term_too_small, /* terminal dimensions too small */ | 108 term_too_small, /* terminal dimensions too small */ |
108 uses_asan, /* Target uses ASAN? */ | 109 uses_asan, /* Target uses ASAN? */ |
109 no_forkserver, /* Disable forkserver? */ | 110 no_forkserver, /* Disable forkserver? */ |
110 crash_mode, /* Crash mode! Yeah! */ | 111 crash_mode, /* Crash mode! Yeah! */ |
111 in_place_resume, /* Attempt in-place resume? */ | 112 in_place_resume, /* Attempt in-place resume? */ |
112 auto_changed, /* Auto-generated tokens changed? */ | 113 auto_changed, /* Auto-generated tokens changed? */ |
113 no_cpu_meter_red, /* Feng shui on the status screen */ | 114 no_cpu_meter_red, /* Feng shui on the status screen */ |
114 no_var_check, /* Don't detect variable behavior */ | |
115 shuffle_queue, /* Shuffle input queue? */ | 115 shuffle_queue, /* Shuffle input queue? */ |
116 bitmap_changed = 1, /* Time to update bitmap? */ | 116 bitmap_changed = 1, /* Time to update bitmap? */ |
117 qemu_mode, /* Running in QEMU mode? */ | 117 qemu_mode, /* Running in QEMU mode? */ |
118 skip_requested, /* Skip request, via SIGUSR1 */ | 118 skip_requested, /* Skip request, via SIGUSR1 */ |
119 run_over10m; /* Run time over 10 minutes? */ | 119 run_over10m, /* Run time over 10 minutes? */ |
| 120 persistent_mode; /* Running in persistent mode? */ |
120 | 121 |
121 static s32 out_fd, /* Persistent fd for out_file */ | 122 static s32 out_fd, /* Persistent fd for out_file */ |
122 dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ | 123 dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ |
123 dev_null_fd = -1, /* Persistent fd for /dev/null */ | 124 dev_null_fd = -1, /* Persistent fd for /dev/null */ |
124 fsrv_ctl_fd, /* Fork server control pipe (write) */ | 125 fsrv_ctl_fd, /* Fork server control pipe (write) */ |
125 fsrv_st_fd; /* Fork server status pipe (read) */ | 126 fsrv_st_fd; /* Fork server status pipe (read) */ |
126 | 127 |
127 static s32 forksrv_pid, /* PID of the fork server */ | 128 static s32 forksrv_pid, /* PID of the fork server */ |
128 child_pid = -1, /* PID of the fuzzed program */ | 129 child_pid = -1, /* PID of the fuzzed program */ |
129 out_dir_fd = -1; /* FD of the lock file */ | 130 out_dir_fd = -1; /* FD of the lock file */ |
130 | 131 |
131 EXP_ST u8* trace_bits; /* SHM with instrumentation bitmap */ | 132 EXP_ST u8* trace_bits; /* SHM with instrumentation bitmap */ |
132 | 133 |
133 EXP_ST u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ | 134 EXP_ST u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ |
134 virgin_hang[MAP_SIZE], /* Bits we haven't seen in hangs */ | 135 virgin_hang[MAP_SIZE], /* Bits we haven't seen in hangs */ |
135 virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ | 136 virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ |
136 | 137 |
| 138 static u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */ |
| 139 |
137 static s32 shm_id; /* ID of the SHM region */ | 140 static s32 shm_id; /* ID of the SHM region */ |
138 | 141 |
139 static volatile u8 stop_soon, /* Ctrl-C pressed? */ | 142 static volatile u8 stop_soon, /* Ctrl-C pressed? */ |
140 clear_screen = 1, /* Window resized? */ | 143 clear_screen = 1, /* Window resized? */ |
141 child_timed_out; /* Traced process timed out? */ | 144 child_timed_out; /* Traced process timed out? */ |
142 | 145 |
143 EXP_ST u32 queued_paths, /* Total number of queued testcases */ | 146 EXP_ST u32 queued_paths, /* Total number of queued testcases */ |
144 queued_variable, /* Testcases with variable behavior */ | 147 queued_variable, /* Testcases with variable behavior */ |
145 queued_at_start, /* Total number of initial inputs */ | 148 queued_at_start, /* Total number of initial inputs */ |
146 queued_discovered, /* Items discovered during this run */ | 149 queued_discovered, /* Items discovered during this run */ |
147 queued_imported, /* Items imported via -S */ | 150 queued_imported, /* Items imported via -S */ |
148 queued_favored, /* Paths deemed favorable */ | 151 queued_favored, /* Paths deemed favorable */ |
149 queued_with_cov, /* Paths with new coverage bytes */ | 152 queued_with_cov, /* Paths with new coverage bytes */ |
150 pending_not_fuzzed, /* Queued but not done yet */ | 153 pending_not_fuzzed, /* Queued but not done yet */ |
151 pending_favored, /* Pending favored paths */ | 154 pending_favored, /* Pending favored paths */ |
152 cur_skipped_paths, /* Abandoned inputs in cur cycle */ | 155 cur_skipped_paths, /* Abandoned inputs in cur cycle */ |
153 cur_depth, /* Current path depth */ | 156 cur_depth, /* Current path depth */ |
154 max_depth, /* Max path depth */ | 157 max_depth, /* Max path depth */ |
155 useless_at_start, /* Number of useless starting paths */ | 158 useless_at_start, /* Number of useless starting paths */ |
| 159 var_byte_count, /* Bitmap bytes with var behavior */ |
156 current_entry, /* Current queue entry ID */ | 160 current_entry, /* Current queue entry ID */ |
157 havoc_div = 1; /* Cycle count divisor for havoc */ | 161 havoc_div = 1; /* Cycle count divisor for havoc */ |
158 | 162 |
159 EXP_ST u64 total_crashes, /* Total number of crashes */ | 163 EXP_ST u64 total_crashes, /* Total number of crashes */ |
160 unique_crashes, /* Crashes with unique signatures */ | 164 unique_crashes, /* Crashes with unique signatures */ |
161 total_hangs, /* Total number of hangs */ | 165 total_hangs, /* Total number of hangs */ |
162 unique_hangs, /* Hangs with unique signatures */ | 166 unique_hangs, /* Hangs with unique signatures */ |
163 total_execs, /* Total execve() calls */ | 167 total_execs, /* Total execve() calls */ |
164 start_time, /* Unix start time (ms) */ | 168 start_time, /* Unix start time (ms) */ |
165 last_path_time, /* Time for most recent path (ms) */ | 169 last_path_time, /* Time for most recent path (ms) */ |
166 last_crash_time, /* Time for most recent crash (ms) */ | 170 last_crash_time, /* Time for most recent crash (ms) */ |
167 last_hang_time, /* Time for most recent hang (ms) */ | 171 last_hang_time, /* Time for most recent hang (ms) */ |
| 172 last_crash_execs, /* Exec counter at last crash */ |
168 queue_cycle, /* Queue round counter */ | 173 queue_cycle, /* Queue round counter */ |
169 cycles_wo_finds, /* Cycles without any new paths */ | 174 cycles_wo_finds, /* Cycles without any new paths */ |
170 trim_execs, /* Execs done to trim input files */ | 175 trim_execs, /* Execs done to trim input files */ |
171 bytes_trim_in, /* Bytes coming into the trimmer */ | 176 bytes_trim_in, /* Bytes coming into the trimmer */ |
172 bytes_trim_out, /* Bytes coming outa the trimmer */ | 177 bytes_trim_out, /* Bytes coming outa the trimmer */ |
173 blocks_eff_total, /* Blocks subject to effector maps */ | 178 blocks_eff_total, /* Blocks subject to effector maps */ |
174 blocks_eff_select; /* Blocks selected as fuzzable */ | 179 blocks_eff_select; /* Blocks selected as fuzzable */ |
175 | 180 |
176 static u32 subseq_hangs; /* Number of hangs in a row */ | 181 static u32 subseq_hangs; /* Number of hangs in a row */ |
177 | 182 |
178 static u8 *stage_name = "init", /* Name of the current fuzz stage */ | 183 static u8 *stage_name = "init", /* Name of the current fuzz stage */ |
179 *stage_short, /* Short stage name */ | 184 *stage_short, /* Short stage name */ |
180 *syncing_party; /* Currently syncing with... */ | 185 *syncing_party; /* Currently syncing with... */ |
181 | 186 |
182 static s32 stage_cur, stage_max; /* Stage progression */ | 187 static s32 stage_cur, stage_max; /* Stage progression */ |
183 static s32 splicing_with = -1; /* Splicing with which test case? */ | 188 static s32 splicing_with = -1; /* Splicing with which test case? */ |
184 | 189 |
| 190 static u32 master_id, master_max; /* Master instance job splitting */ |
| 191 |
185 static u32 syncing_case; /* Syncing with case #... */ | 192 static u32 syncing_case; /* Syncing with case #... */ |
186 | 193 |
187 static s32 stage_cur_byte, /* Byte offset of current stage op */ | 194 static s32 stage_cur_byte, /* Byte offset of current stage op */ |
188 stage_cur_val; /* Value used for stage op */ | 195 stage_cur_val; /* Value used for stage op */ |
189 | 196 |
190 static u8 stage_val_type; /* Value type (STAGE_VAL_*) */ | 197 static u8 stage_val_type; /* Value type (STAGE_VAL_*) */ |
191 | 198 |
192 static u64 stage_finds[32], /* Patterns found per fuzz stage */ | 199 static u64 stage_finds[32], /* Patterns found per fuzz stage */ |
193 stage_cycles[32]; /* Execs per fuzz stage */ | 200 stage_cycles[32]; /* Execs per fuzz stage */ |
194 | 201 |
195 static u32 rand_cnt; /* Random number counter */ | 202 static u32 rand_cnt; /* Random number counter */ |
196 | 203 |
197 static u64 total_cal_us, /* Total calibration time (us) */ | 204 static u64 total_cal_us, /* Total calibration time (us) */ |
198 total_cal_cycles; /* Total calibration cycles */ | 205 total_cal_cycles; /* Total calibration cycles */ |
199 | 206 |
200 static u64 total_bitmap_size, /* Total bit count for all bitmaps */ | 207 static u64 total_bitmap_size, /* Total bit count for all bitmaps */ |
201 total_bitmap_entries; /* Number of bitmaps counted */ | 208 total_bitmap_entries; /* Number of bitmaps counted */ |
202 | 209 |
203 static u32 cpu_core_count; /* CPU core count */ | 210 static s32 cpu_core_count; /* CPU core count */ |
204 | 211 |
205 #ifdef HAVE_AFFINITY | 212 #ifdef HAVE_AFFINITY |
206 | 213 |
207 static u8 use_affinity; /* Using -Z */ | 214 static s32 cpu_aff = -1; » /* Selected CPU core */ |
208 | |
209 static u32 cpu_aff_main,» /* Affinity for main process */ | |
210 cpu_aff_child; /* Affinity for fuzzed child */ | |
211 | 215 |
212 #endif /* HAVE_AFFINITY */ | 216 #endif /* HAVE_AFFINITY */ |
213 | 217 |
214 static FILE* plot_file; /* Gnuplot output file */ | 218 static FILE* plot_file; /* Gnuplot output file */ |
215 | 219 |
216 struct queue_entry { | 220 struct queue_entry { |
217 | 221 |
218 u8* fname; /* File name for the test case */ | 222 u8* fname; /* File name for the test case */ |
219 u32 len; /* Input length */ | 223 u32 len; /* Input length */ |
220 | 224 |
(...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
333 struct timeval tv; | 337 struct timeval tv; |
334 struct timezone tz; | 338 struct timezone tz; |
335 | 339 |
336 gettimeofday(&tv, &tz); | 340 gettimeofday(&tv, &tz); |
337 | 341 |
338 return (tv.tv_sec * 1000000ULL) + tv.tv_usec; | 342 return (tv.tv_sec * 1000000ULL) + tv.tv_usec; |
339 | 343 |
340 } | 344 } |
341 | 345 |
342 | 346 |
343 #ifdef HAVE_AFFINITY | |
344 | |
345 /* Set CPU affinity (on systems that support it). */ | |
346 | |
347 static void set_cpu_affinity(u32 cpu_id) { | |
348 | |
349 cpu_set_t c; | |
350 | |
351 CPU_ZERO(&c); | |
352 CPU_SET(cpu_id, &c); | |
353 | |
354 if (sched_setaffinity(0, sizeof(c), &c)) | |
355 PFATAL("sched_setaffinity failed"); | |
356 | |
357 } | |
358 | |
359 #endif /* HAVE_AFFINITY */ | |
360 | |
361 | |
362 /* Generate a random number (from 0 to limit - 1). This may | 347 /* Generate a random number (from 0 to limit - 1). This may |
363 have slight bias. */ | 348 have slight bias. */ |
364 | 349 |
365 static inline u32 UR(u32 limit) { | 350 static inline u32 UR(u32 limit) { |
366 | 351 |
367 if (!rand_cnt--) { | 352 if (unlikely(!rand_cnt--)) { |
368 | 353 |
369 u32 seed[2]; | 354 u32 seed[2]; |
370 | 355 |
371 ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom"); | 356 ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom"); |
372 | 357 |
373 srandom(seed[0]); | 358 srandom(seed[0]); |
374 rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG); | 359 rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG); |
375 | 360 |
376 } | 361 } |
377 | 362 |
(...skipping 13 matching lines...) Expand all Loading... |
391 u32 j = i + UR(cnt - i); | 376 u32 j = i + UR(cnt - i); |
392 void *s = ptrs[i]; | 377 void *s = ptrs[i]; |
393 ptrs[i] = ptrs[j]; | 378 ptrs[i] = ptrs[j]; |
394 ptrs[j] = s; | 379 ptrs[j] = s; |
395 | 380 |
396 } | 381 } |
397 | 382 |
398 } | 383 } |
399 | 384 |
400 | 385 |
| 386 #ifdef HAVE_AFFINITY |
| 387 |
| 388 /* Build a list of processes bound to specific cores. Returns -1 if nothing |
| 389 can be found. Assumes an upper bound of 4k CPUs. */ |
| 390 |
| 391 static void bind_to_free_cpu(void) { |
| 392 |
| 393 DIR* d; |
| 394 struct dirent* de; |
| 395 cpu_set_t c; |
| 396 |
| 397 u8 cpu_used[4096] = { 0 }; |
| 398 u32 i; |
| 399 |
| 400 if (cpu_core_count < 2) return; |
| 401 |
| 402 if (getenv("AFL_NO_AFFINITY")) { |
| 403 |
| 404 WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set)."); |
| 405 return; |
| 406 |
| 407 } |
| 408 |
| 409 d = opendir("/proc"); |
| 410 |
| 411 if (!d) { |
| 412 |
| 413 WARNF("Unable to access /proc - can't scan for free CPU cores."); |
| 414 return; |
| 415 |
| 416 } |
| 417 |
| 418 ACTF("Checking CPU core loadout..."); |
| 419 |
| 420 /* Introduce some jitter, in case multiple AFL tasks are doing the same |
| 421 thing at the same time... */ |
| 422 |
| 423 usleep(R(1000) * 250); |
| 424 |
| 425 /* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list. |
| 426 Flag all processes bound to a specific CPU using cpu_used[]. This will |
| 427 fail for some exotic binding setups, but is likely good enough in almost |
| 428 all real-world use cases. */ |
| 429 |
| 430 while ((de = readdir(d))) { |
| 431 |
| 432 u8* fn; |
| 433 FILE* f; |
| 434 u8 tmp[MAX_LINE]; |
| 435 u8 has_vmsize = 0; |
| 436 |
| 437 if (!isdigit(de->d_name[0])) continue; |
| 438 |
| 439 fn = alloc_printf("/proc/%s/status", de->d_name); |
| 440 |
| 441 if (!(f = fopen(fn, "r"))) { |
| 442 ck_free(fn); |
| 443 continue; |
| 444 } |
| 445 |
| 446 while (fgets(tmp, MAX_LINE, f)) { |
| 447 |
| 448 u32 hval; |
| 449 |
| 450 /* Processes without VmSize are probably kernel tasks. */ |
| 451 |
| 452 if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1; |
| 453 |
| 454 if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) && |
| 455 !strchr(tmp, '-') && !strchr(tmp, ',') && |
| 456 sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) && |
| 457 has_vmsize) { |
| 458 |
| 459 cpu_used[hval] = 1; |
| 460 break; |
| 461 |
| 462 } |
| 463 |
| 464 } |
| 465 |
| 466 ck_free(fn); |
| 467 fclose(f); |
| 468 |
| 469 } |
| 470 |
| 471 closedir(d); |
| 472 |
| 473 for (i = 0; i < cpu_core_count; i++) if (!cpu_used[i]) break; |
| 474 |
| 475 if (i == cpu_core_count) { |
| 476 |
| 477 SAYF("\n" cLRD "[-] " cRST |
| 478 "Uh-oh, looks like all %u CPU cores on your system are allocated to\n" |
| 479 " other instances of afl-fuzz (or similar CPU-locked tasks). Startin
g\n" |
| 480 " another fuzzer on this machine is probably a bad plan, but if you
are\n" |
| 481 " absolutely sure, you can set AFL_NO_AFFINITY and try again.\n", |
| 482 cpu_core_count); |
| 483 |
| 484 FATAL("No more free CPU cores"); |
| 485 |
| 486 } |
| 487 |
| 488 OKF("Found a free CPU core, binding to #%u.", i); |
| 489 |
| 490 cpu_aff = i; |
| 491 |
| 492 CPU_ZERO(&c); |
| 493 CPU_SET(i, &c); |
| 494 |
| 495 if (sched_setaffinity(0, sizeof(c), &c)) |
| 496 PFATAL("sched_setaffinity failed"); |
| 497 |
| 498 } |
| 499 |
| 500 #endif /* HAVE_AFFINITY */ |
| 501 |
401 #ifndef IGNORE_FINDS | 502 #ifndef IGNORE_FINDS |
402 | 503 |
403 /* Helper function to compare buffers; returns first and last differing offset.
We | 504 /* Helper function to compare buffers; returns first and last differing offset.
We |
404 use this to find reasonable locations for splicing two files. */ | 505 use this to find reasonable locations for splicing two files. */ |
405 | 506 |
406 static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { | 507 static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { |
407 | 508 |
408 s32 f_loc = -1; | 509 s32 f_loc = -1; |
409 s32 l_loc = -1; | 510 s32 l_loc = -1; |
410 u32 pos; | 511 u32 pos; |
(...skipping 350 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
761 | 862 |
762 | 863 |
763 /* Check if the current execution path brings anything new to the table. | 864 /* Check if the current execution path brings anything new to the table. |
764 Update virgin bits to reflect the finds. Returns 1 if the only change is | 865 Update virgin bits to reflect the finds. Returns 1 if the only change is |
765 the hit-count for a particular tuple; 2 if there are new tuples seen. | 866 the hit-count for a particular tuple; 2 if there are new tuples seen. |
766 Updates the map, so subsequent calls will always return 0. | 867 Updates the map, so subsequent calls will always return 0. |
767 | 868 |
768 This function is called after every exec() on a fairly large buffer, so | 869 This function is called after every exec() on a fairly large buffer, so |
769 it needs to be fast. We do this in 32-bit and 64-bit flavors. */ | 870 it needs to be fast. We do this in 32-bit and 64-bit flavors. */ |
770 | 871 |
771 #define FFL(_b) (0xffULL << ((_b) << 3)) | |
772 #define FF(_b) (0xff << ((_b) << 3)) | |
773 | |
774 static inline u8 has_new_bits(u8* virgin_map) { | 872 static inline u8 has_new_bits(u8* virgin_map) { |
775 | 873 |
776 #ifdef __x86_64__ | 874 #ifdef __x86_64__ |
777 | 875 |
778 u64* current = (u64*)trace_bits; | 876 u64* current = (u64*)trace_bits; |
779 u64* virgin = (u64*)virgin_map; | 877 u64* virgin = (u64*)virgin_map; |
780 | 878 |
781 u32 i = (MAP_SIZE >> 3); | 879 u32 i = (MAP_SIZE >> 3); |
782 | 880 |
783 #else | 881 #else |
784 | 882 |
785 u32* current = (u32*)trace_bits; | 883 u32* current = (u32*)trace_bits; |
786 u32* virgin = (u32*)virgin_map; | 884 u32* virgin = (u32*)virgin_map; |
787 | 885 |
788 u32 i = (MAP_SIZE >> 2); | 886 u32 i = (MAP_SIZE >> 2); |
789 | 887 |
790 #endif /* ^__x86_64__ */ | 888 #endif /* ^__x86_64__ */ |
791 | 889 |
792 u8 ret = 0; | 890 u8 ret = 0; |
793 | 891 |
794 while (i--) { | 892 while (i--) { |
795 | 893 |
796 #ifdef __x86_64__ | 894 /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap |
| 895 that have not been already cleared from the virgin map - since this will |
| 896 almost always be the case. */ |
797 | 897 |
798 u64 cur = *current; | 898 if (unlikely(*current) && unlikely(*current & *virgin)) { |
799 u64 vir = *virgin; | |
800 | 899 |
801 #else | 900 if (likely(ret < 2)) { |
802 | 901 |
803 u32 cur = *current; | 902 u8* cur = (u8*)current; |
804 u32 vir = *virgin; | 903 u8* vir = (u8*)virgin; |
805 | 904 |
806 #endif /* ^__x86_64__ */ | 905 /* Looks like we have not found any new bytes yet; see if any non-zero |
807 | 906 bytes in current[] are pristine in virgin[]. */ |
808 /* Optimize for *current == ~*virgin, since this will almost always be the | |
809 case. */ | |
810 | |
811 if (cur & vir) { | |
812 | |
813 if (ret < 2) { | |
814 | |
815 /* This trace did not have any new bytes yet; see if there's any | |
816 current[] byte that is non-zero when virgin[] is 0xff. */ | |
817 | 907 |
818 #ifdef __x86_64__ | 908 #ifdef __x86_64__ |
819 | 909 |
820 if (((cur & FFL(0)) && (vir & FFL(0)) == FFL(0)) || | 910 if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || |
821 ((cur & FFL(1)) && (vir & FFL(1)) == FFL(1)) || | 911 (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) || |
822 ((cur & FFL(2)) && (vir & FFL(2)) == FFL(2)) || | 912 (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) || |
823 ((cur & FFL(3)) && (vir & FFL(3)) == FFL(3)) || | 913 (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2; |
824 ((cur & FFL(4)) && (vir & FFL(4)) == FFL(4)) || | |
825 ((cur & FFL(5)) && (vir & FFL(5)) == FFL(5)) || | |
826 ((cur & FFL(6)) && (vir & FFL(6)) == FFL(6)) || | |
827 ((cur & FFL(7)) && (vir & FFL(7)) == FFL(7))) ret = 2; | |
828 else ret = 1; | 914 else ret = 1; |
829 | 915 |
830 #else | 916 #else |
831 | 917 |
832 if (((cur & FF(0)) && (vir & FF(0)) == FF(0)) || | 918 if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) || |
833 ((cur & FF(1)) && (vir & FF(1)) == FF(1)) || | 919 (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2; |
834 ((cur & FF(2)) && (vir & FF(2)) == FF(2)) || | |
835 ((cur & FF(3)) && (vir & FF(3)) == FF(3))) ret = 2; | |
836 else ret = 1; | 920 else ret = 1; |
837 | 921 |
838 #endif /* ^__x86_64__ */ | 922 #endif /* ^__x86_64__ */ |
839 | 923 |
840 } | 924 } |
841 | 925 |
842 *virgin = vir & ~cur; | 926 *virgin &= ~*current; |
843 | 927 |
844 } | 928 } |
845 | 929 |
846 current++; | 930 current++; |
847 virgin++; | 931 virgin++; |
848 | 932 |
849 } | 933 } |
850 | 934 |
851 if (ret && virgin_map == virgin_bits) bitmap_changed = 1; | 935 if (ret && virgin_map == virgin_bits) bitmap_changed = 1; |
852 | 936 |
(...skipping 27 matching lines...) Expand all Loading... |
880 v = (v & 0x33333333) + ((v >> 2) & 0x33333333); | 964 v = (v & 0x33333333) + ((v >> 2) & 0x33333333); |
881 ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24; | 965 ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24; |
882 | 966 |
883 } | 967 } |
884 | 968 |
885 return ret; | 969 return ret; |
886 | 970 |
887 } | 971 } |
888 | 972 |
889 | 973 |
| 974 #define FF(_b) (0xff << ((_b) << 3)) |
| 975 |
890 /* Count the number of bytes set in the bitmap. Called fairly sporadically, | 976 /* Count the number of bytes set in the bitmap. Called fairly sporadically, |
891 mostly to update the status screen or calibrate and examine confirmed | 977 mostly to update the status screen or calibrate and examine confirmed |
892 new paths. */ | 978 new paths. */ |
893 | 979 |
894 static u32 count_bytes(u8* mem) { | 980 static u32 count_bytes(u8* mem) { |
895 | 981 |
896 u32* ptr = (u32*)mem; | 982 u32* ptr = (u32*)mem; |
897 u32 i = (MAP_SIZE >> 2); | 983 u32 i = (MAP_SIZE >> 2); |
898 u32 ret = 0; | 984 u32 ret = 0; |
899 | 985 |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
941 return ret; | 1027 return ret; |
942 | 1028 |
943 } | 1029 } |
944 | 1030 |
945 | 1031 |
946 /* Destructively simplify trace by eliminating hit count information | 1032 /* Destructively simplify trace by eliminating hit count information |
947 and replacing it with 0x80 or 0x01 depending on whether the tuple | 1033 and replacing it with 0x80 or 0x01 depending on whether the tuple |
948 is hit or not. Called on every new crash or hang, should be | 1034 is hit or not. Called on every new crash or hang, should be |
949 reasonably fast. */ | 1035 reasonably fast. */ |
950 | 1036 |
951 #define AREP4(_sym) (_sym), (_sym), (_sym), (_sym) | 1037 static const u8 simplify_lookup[256] = { |
952 #define AREP8(_sym) AREP4(_sym), AREP4(_sym) | |
953 #define AREP16(_sym) AREP8(_sym), AREP8(_sym) | |
954 #define AREP32(_sym) AREP16(_sym), AREP16(_sym) | |
955 #define AREP64(_sym) AREP32(_sym), AREP32(_sym) | |
956 #define AREP128(_sym) AREP64(_sym), AREP64(_sym) | |
957 | 1038 |
958 static u8 simplify_lookup[256] = { | 1039 [0] = 1, |
959 /* 4 */ 1, 128, 128, 128, | 1040 [1 ... 255] = 128 |
960 /* +4 */ AREP4(128), | 1041 |
961 /* +8 */ AREP8(128), | |
962 /* +16 */ AREP16(128), | |
963 /* +32 */ AREP32(128), | |
964 /* +64 */ AREP64(128), | |
965 /* +128 */ AREP128(128) | |
966 }; | 1042 }; |
967 | 1043 |
968 #ifdef __x86_64__ | 1044 #ifdef __x86_64__ |
969 | 1045 |
970 static void simplify_trace(u64* mem) { | 1046 static void simplify_trace(u64* mem) { |
971 | 1047 |
972 u32 i = MAP_SIZE >> 3; | 1048 u32 i = MAP_SIZE >> 3; |
973 | 1049 |
974 while (i--) { | 1050 while (i--) { |
975 | 1051 |
976 /* Optimize for sparse bitmaps. */ | 1052 /* Optimize for sparse bitmaps. */ |
977 | 1053 |
978 if (*mem) { | 1054 if (unlikely(*mem)) { |
979 | 1055 |
980 u8* mem8 = (u8*)mem; | 1056 u8* mem8 = (u8*)mem; |
981 | 1057 |
982 mem8[0] = simplify_lookup[mem8[0]]; | 1058 mem8[0] = simplify_lookup[mem8[0]]; |
983 mem8[1] = simplify_lookup[mem8[1]]; | 1059 mem8[1] = simplify_lookup[mem8[1]]; |
984 mem8[2] = simplify_lookup[mem8[2]]; | 1060 mem8[2] = simplify_lookup[mem8[2]]; |
985 mem8[3] = simplify_lookup[mem8[3]]; | 1061 mem8[3] = simplify_lookup[mem8[3]]; |
986 mem8[4] = simplify_lookup[mem8[4]]; | 1062 mem8[4] = simplify_lookup[mem8[4]]; |
987 mem8[5] = simplify_lookup[mem8[5]]; | 1063 mem8[5] = simplify_lookup[mem8[5]]; |
988 mem8[6] = simplify_lookup[mem8[6]]; | 1064 mem8[6] = simplify_lookup[mem8[6]]; |
(...skipping 10 matching lines...) Expand all Loading... |
999 #else | 1075 #else |
1000 | 1076 |
1001 static void simplify_trace(u32* mem) { | 1077 static void simplify_trace(u32* mem) { |
1002 | 1078 |
1003 u32 i = MAP_SIZE >> 2; | 1079 u32 i = MAP_SIZE >> 2; |
1004 | 1080 |
1005 while (i--) { | 1081 while (i--) { |
1006 | 1082 |
1007 /* Optimize for sparse bitmaps. */ | 1083 /* Optimize for sparse bitmaps. */ |
1008 | 1084 |
1009 if (*mem) { | 1085 if (unlikely(*mem)) { |
1010 | 1086 |
1011 u8* mem8 = (u8*)mem; | 1087 u8* mem8 = (u8*)mem; |
1012 | 1088 |
1013 mem8[0] = simplify_lookup[mem8[0]]; | 1089 mem8[0] = simplify_lookup[mem8[0]]; |
1014 mem8[1] = simplify_lookup[mem8[1]]; | 1090 mem8[1] = simplify_lookup[mem8[1]]; |
1015 mem8[2] = simplify_lookup[mem8[2]]; | 1091 mem8[2] = simplify_lookup[mem8[2]]; |
1016 mem8[3] = simplify_lookup[mem8[3]]; | 1092 mem8[3] = simplify_lookup[mem8[3]]; |
1017 | 1093 |
1018 } else *mem = 0x01010101; | 1094 } else *mem = 0x01010101; |
1019 | 1095 |
1020 mem++; | 1096 mem++; |
1021 } | 1097 } |
1022 | 1098 |
1023 } | 1099 } |
1024 | 1100 |
1025 #endif /* ^__x86_64__ */ | 1101 #endif /* ^__x86_64__ */ |
1026 | 1102 |
1027 | 1103 |
1028 /* Destructively classify execution counts in a trace. This is used as a | 1104 /* Destructively classify execution counts in a trace. This is used as a |
1029 preprocessing step for any newly acquired traces. Called on every exec, | 1105 preprocessing step for any newly acquired traces. Called on every exec, |
1030 must be fast. */ | 1106 must be fast. */ |
1031 | 1107 |
1032 static u8 count_class_lookup[256] = { | 1108 static const u8 count_class_lookup8[256] = { |
1033 | 1109 |
1034 /* 0 - 3: 4 */ 0, 1, 2, 4, | 1110 [0] = 0, |
1035 /* 4 - 7: +4 */ AREP4(8), | 1111 [1] = 1, |
1036 /* 8 - 15: +8 */ AREP8(16), | 1112 [2] = 2, |
1037 /* 16 - 31: +16 */ AREP16(32), | 1113 [3] = 4, |
1038 /* 32 - 127: +96 */ AREP64(64), AREP32(64), | 1114 [4 ... 7] = 8, |
1039 /* 128+: +128 */ AREP128(128) | 1115 [8 ... 15] = 16, |
| 1116 [16 ... 31] = 32, |
| 1117 [32 ... 127] = 64, |
| 1118 [128 ... 255] = 128 |
1040 | 1119 |
1041 }; | 1120 }; |
1042 | 1121 |
| 1122 static u16 count_class_lookup16[65536]; |
| 1123 |
| 1124 |
| 1125 static void init_count_class16(void) { |
| 1126 |
| 1127 u32 b1, b2; |
| 1128 |
| 1129 for (b1 = 0; b1 < 256; b1++) |
| 1130 for (b2 = 0; b2 < 256; b2++) |
| 1131 count_class_lookup16[(b1 << 8) + b2] = |
| 1132 (count_class_lookup8[b1] << 8) | |
| 1133 count_class_lookup8[b2]; |
| 1134 |
| 1135 } |
| 1136 |
| 1137 |
1043 #ifdef __x86_64__ | 1138 #ifdef __x86_64__ |
1044 | 1139 |
1045 static inline void classify_counts(u64* mem) { | 1140 static inline void classify_counts(u64* mem) { |
1046 | 1141 |
1047 u32 i = MAP_SIZE >> 3; | 1142 u32 i = MAP_SIZE >> 3; |
1048 | 1143 |
1049 while (i--) { | 1144 while (i--) { |
1050 | 1145 |
1051 /* Optimize for sparse bitmaps. */ | 1146 /* Optimize for sparse bitmaps. */ |
1052 | 1147 |
1053 if (*mem) { | 1148 if (unlikely(*mem)) { |
1054 | 1149 |
1055 u8* mem8 = (u8*)mem; | 1150 u16* mem16 = (u16*)mem; |
1056 | 1151 |
1057 mem8[0] = count_class_lookup[mem8[0]]; | 1152 mem16[0] = count_class_lookup16[mem16[0]]; |
1058 mem8[1] = count_class_lookup[mem8[1]]; | 1153 mem16[1] = count_class_lookup16[mem16[1]]; |
1059 mem8[2] = count_class_lookup[mem8[2]]; | 1154 mem16[2] = count_class_lookup16[mem16[2]]; |
1060 mem8[3] = count_class_lookup[mem8[3]]; | 1155 mem16[3] = count_class_lookup16[mem16[3]]; |
1061 mem8[4] = count_class_lookup[mem8[4]]; | |
1062 mem8[5] = count_class_lookup[mem8[5]]; | |
1063 mem8[6] = count_class_lookup[mem8[6]]; | |
1064 mem8[7] = count_class_lookup[mem8[7]]; | |
1065 | 1156 |
1066 } | 1157 } |
1067 | 1158 |
1068 mem++; | 1159 mem++; |
1069 | 1160 |
1070 } | 1161 } |
1071 | 1162 |
1072 } | 1163 } |
1073 | 1164 |
1074 #else | 1165 #else |
1075 | 1166 |
1076 static inline void classify_counts(u32* mem) { | 1167 static inline void classify_counts(u32* mem) { |
1077 | 1168 |
1078 u32 i = MAP_SIZE >> 2; | 1169 u32 i = MAP_SIZE >> 2; |
1079 | 1170 |
1080 while (i--) { | 1171 while (i--) { |
1081 | 1172 |
1082 /* Optimize for sparse bitmaps. */ | 1173 /* Optimize for sparse bitmaps. */ |
1083 | 1174 |
1084 if (*mem) { | 1175 if (unlikely(*mem)) { |
1085 | 1176 |
1086 u8* mem8 = (u8*)mem; | 1177 u16* mem16 = (u16*)mem; |
1087 | 1178 |
1088 mem8[0] = count_class_lookup[mem8[0]]; | 1179 mem16[0] = count_class_lookup16[mem16[0]]; |
1089 mem8[1] = count_class_lookup[mem8[1]]; | 1180 mem16[1] = count_class_lookup16[mem16[1]]; |
1090 mem8[2] = count_class_lookup[mem8[2]]; | |
1091 mem8[3] = count_class_lookup[mem8[3]]; | |
1092 | 1181 |
1093 } | 1182 } |
1094 | 1183 |
1095 mem++; | 1184 mem++; |
1096 | 1185 |
1097 } | 1186 } |
1098 | 1187 |
1099 } | 1188 } |
1100 | 1189 |
1101 #endif /* ^__x86_64__ */ | 1190 #endif /* ^__x86_64__ */ |
(...skipping 788 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1890 if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed"); | 1979 if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed"); |
1891 | 1980 |
1892 forksrv_pid = fork(); | 1981 forksrv_pid = fork(); |
1893 | 1982 |
1894 if (forksrv_pid < 0) PFATAL("fork() failed"); | 1983 if (forksrv_pid < 0) PFATAL("fork() failed"); |
1895 | 1984 |
1896 if (!forksrv_pid) { | 1985 if (!forksrv_pid) { |
1897 | 1986 |
1898 struct rlimit r; | 1987 struct rlimit r; |
1899 | 1988 |
1900 #ifdef HAVE_AFFINITY | |
1901 if (use_affinity) set_cpu_affinity(cpu_aff_child); | |
1902 #endif /* HAVE_AFFINITY */ | |
1903 | |
1904 /* Umpf. On OpenBSD, the default fd limit for root users is set to | 1989 /* Umpf. On OpenBSD, the default fd limit for root users is set to |
1905 soft 128. Let's try to fix that... */ | 1990 soft 128. Let's try to fix that... */ |
1906 | 1991 |
1907 if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) { | 1992 if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) { |
1908 | 1993 |
1909 r.rlim_cur = FORKSRV_FD + 2; | 1994 r.rlim_cur = FORKSRV_FD + 2; |
1910 setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ | 1995 setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ |
1911 | 1996 |
1912 } | 1997 } |
1913 | 1998 |
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2192 if (dumb_mode == 1 || no_forkserver) { | 2277 if (dumb_mode == 1 || no_forkserver) { |
2193 | 2278 |
2194 child_pid = fork(); | 2279 child_pid = fork(); |
2195 | 2280 |
2196 if (child_pid < 0) PFATAL("fork() failed"); | 2281 if (child_pid < 0) PFATAL("fork() failed"); |
2197 | 2282 |
2198 if (!child_pid) { | 2283 if (!child_pid) { |
2199 | 2284 |
2200 struct rlimit r; | 2285 struct rlimit r; |
2201 | 2286 |
2202 #ifdef HAVE_AFFINITY | |
2203 if (use_affinity) set_cpu_affinity(cpu_aff_child); | |
2204 #endif /* HAVE_AFFINITY */ | |
2205 | |
2206 if (mem_limit) { | 2287 if (mem_limit) { |
2207 | 2288 |
2208 r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; | 2289 r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; |
2209 | 2290 |
2210 #ifdef RLIMIT_AS | 2291 #ifdef RLIMIT_AS |
2211 | 2292 |
2212 setrlimit(RLIMIT_AS, &r); /* Ignore errors */ | 2293 setrlimit(RLIMIT_AS, &r); /* Ignore errors */ |
2213 | 2294 |
2214 #else | 2295 #else |
2215 | 2296 |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2314 | 2395 |
2315 if ((res = read(fsrv_st_fd, &status, 4)) != 4) { | 2396 if ((res = read(fsrv_st_fd, &status, 4)) != 4) { |
2316 | 2397 |
2317 if (stop_soon) return 0; | 2398 if (stop_soon) return 0; |
2318 RPFATAL(res, "Unable to communicate with fork server (OOM?)"); | 2399 RPFATAL(res, "Unable to communicate with fork server (OOM?)"); |
2319 | 2400 |
2320 } | 2401 } |
2321 | 2402 |
2322 } | 2403 } |
2323 | 2404 |
2324 child_pid = 0; | 2405 if (!WIFSTOPPED(status)) child_pid = 0; |
| 2406 |
2325 it.it_value.tv_sec = 0; | 2407 it.it_value.tv_sec = 0; |
2326 it.it_value.tv_usec = 0; | 2408 it.it_value.tv_usec = 0; |
2327 | 2409 |
2328 setitimer(ITIMER_REAL, &it, NULL); | 2410 setitimer(ITIMER_REAL, &it, NULL); |
2329 | 2411 |
2330 total_execs++; | 2412 total_execs++; |
2331 | 2413 |
2332 /* Any subsequent operations on trace_bits must not be moved by the | 2414 /* Any subsequent operations on trace_bits must not be moved by the |
2333 compiler below this point. Past this location, trace_bits[] behave | 2415 compiler below this point. Past this location, trace_bits[] behave |
2334 very normally and do not have to be treated as volatile. */ | 2416 very normally and do not have to be treated as volatile. */ |
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2433 | 2515 |
2434 static void show_stats(void); | 2516 static void show_stats(void); |
2435 | 2517 |
2436 /* Calibrate a new test case. This is done when processing the input directory | 2518 /* Calibrate a new test case. This is done when processing the input directory |
2437 to warn about flaky or otherwise problematic test cases early on; and when | 2519 to warn about flaky or otherwise problematic test cases early on; and when |
2438 new paths are discovered to detect variable behavior and so on. */ | 2520 new paths are discovered to detect variable behavior and so on. */ |
2439 | 2521 |
2440 static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, | 2522 static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, |
2441 u32 handicap, u8 from_queue) { | 2523 u32 handicap, u8 from_queue) { |
2442 | 2524 |
2443 u8 fault = 0, new_bits = 0, var_detected = 0, first_run = (q->exec_cksum == 0
); | 2525 static u8 first_trace[MAP_SIZE]; |
| 2526 |
| 2527 u8 fault = 0, new_bits = 0, var_detected = 0, |
| 2528 first_run = (q->exec_cksum == 0); |
| 2529 |
2444 u64 start_us, stop_us; | 2530 u64 start_us, stop_us; |
2445 | 2531 |
2446 s32 old_sc = stage_cur, old_sm = stage_max, old_tmout = exec_tmout; | 2532 s32 old_sc = stage_cur, old_sm = stage_max, old_tmout = exec_tmout; |
2447 u8* old_sn = stage_name; | 2533 u8* old_sn = stage_name; |
2448 | 2534 |
2449 /* Be a bit more generous about timeouts when resuming sessions, or when | 2535 /* Be a bit more generous about timeouts when resuming sessions, or when |
2450 trying to calibrate already-added finds. This helps avoid trouble due | 2536 trying to calibrate already-added finds. This helps avoid trouble due |
2451 to intermittent latency. */ | 2537 to intermittent latency. */ |
2452 | 2538 |
2453 if (!from_queue || resuming_fuzz) | 2539 if (!from_queue || resuming_fuzz) |
2454 exec_tmout = MAX(exec_tmout + CAL_TMOUT_ADD, | 2540 exec_tmout = MAX(exec_tmout + CAL_TMOUT_ADD, |
2455 exec_tmout * CAL_TMOUT_PERC / 100); | 2541 exec_tmout * CAL_TMOUT_PERC / 100); |
2456 | 2542 |
2457 q->cal_failed++; | 2543 q->cal_failed++; |
2458 | 2544 |
2459 stage_name = "calibration"; | 2545 stage_name = "calibration"; |
2460 stage_max = no_var_check ? CAL_CYCLES_NO_VAR : CAL_CYCLES; | 2546 stage_max = CAL_CYCLES; |
2461 | 2547 |
2462 /* Make sure the forkserver is up before we do anything, and let's not | 2548 /* Make sure the forkserver is up before we do anything, and let's not |
2463 count its spin-up time toward binary calibration. */ | 2549 count its spin-up time toward binary calibration. */ |
2464 | 2550 |
2465 if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) | 2551 if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) |
2466 init_forkserver(argv); | 2552 init_forkserver(argv); |
2467 | 2553 |
| 2554 if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE); |
| 2555 |
2468 start_us = get_cur_time_us(); | 2556 start_us = get_cur_time_us(); |
2469 | 2557 |
2470 for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { | 2558 for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { |
2471 | 2559 |
2472 u32 cksum; | 2560 u32 cksum; |
2473 | 2561 |
2474 if (!first_run && !(stage_cur % stats_update_freq)) show_stats(); | 2562 if (!first_run && !(stage_cur % stats_update_freq)) show_stats(); |
2475 | 2563 |
2476 write_to_testcase(use_mem, q->len); | 2564 write_to_testcase(use_mem, q->len); |
2477 | 2565 |
2478 fault = run_target(argv); | 2566 fault = run_target(argv); |
2479 | 2567 |
2480 /* stop_soon is set by the handler for Ctrl+C. When it's pressed, | 2568 /* stop_soon is set by the handler for Ctrl+C. When it's pressed, |
2481 we want to bail out quickly. */ | 2569 we want to bail out quickly. */ |
2482 | 2570 |
2483 if (stop_soon || fault != crash_mode) goto abort_calibration; | 2571 if (stop_soon || fault != crash_mode) goto abort_calibration; |
2484 | 2572 |
2485 if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) { | 2573 if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) { |
2486 fault = FAULT_NOINST; | 2574 fault = FAULT_NOINST; |
2487 goto abort_calibration; | 2575 goto abort_calibration; |
2488 } | 2576 } |
2489 | 2577 |
2490 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); | 2578 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); |
2491 | 2579 |
2492 if (q->exec_cksum != cksum) { | 2580 if (q->exec_cksum != cksum) { |
2493 | 2581 |
2494 u8 hnb = has_new_bits(virgin_bits); | 2582 u8 hnb = has_new_bits(virgin_bits); |
2495 if (hnb > new_bits) new_bits = hnb; | 2583 if (hnb > new_bits) new_bits = hnb; |
2496 | 2584 |
2497 if (!no_var_check && q->exec_cksum) { | 2585 if (q->exec_cksum) { |
| 2586 |
| 2587 u32 i; |
| 2588 |
| 2589 for (i = 0; i < MAP_SIZE; i++) { |
| 2590 |
| 2591 if (!var_bytes[i] && first_trace[i] != trace_bits[i]) { |
| 2592 |
| 2593 var_bytes[i] = 1; |
| 2594 stage_max = CAL_CYCLES_LONG; |
| 2595 |
| 2596 } |
| 2597 |
| 2598 } |
2498 | 2599 |
2499 var_detected = 1; | 2600 var_detected = 1; |
2500 stage_max = CAL_CYCLES_LONG; | |
2501 | 2601 |
2502 } else q->exec_cksum = cksum; | 2602 } else { |
| 2603 |
| 2604 q->exec_cksum = cksum; |
| 2605 memcpy(first_trace, trace_bits, MAP_SIZE); |
| 2606 |
| 2607 } |
2503 | 2608 |
2504 } | 2609 } |
2505 | 2610 |
2506 } | 2611 } |
2507 | 2612 |
2508 stop_us = get_cur_time_us(); | 2613 stop_us = get_cur_time_us(); |
2509 | 2614 |
2510 total_cal_us += stop_us - start_us; | 2615 total_cal_us += stop_us - start_us; |
2511 total_cal_cycles += stage_max; | 2616 total_cal_cycles += stage_max; |
2512 | 2617 |
(...skipping 18 matching lines...) Expand all Loading... |
2531 | 2636 |
2532 abort_calibration: | 2637 abort_calibration: |
2533 | 2638 |
2534 if (new_bits == 2 && !q->has_new_cov) { | 2639 if (new_bits == 2 && !q->has_new_cov) { |
2535 q->has_new_cov = 1; | 2640 q->has_new_cov = 1; |
2536 queued_with_cov++; | 2641 queued_with_cov++; |
2537 } | 2642 } |
2538 | 2643 |
2539 /* Mark variable paths. */ | 2644 /* Mark variable paths. */ |
2540 | 2645 |
2541 if (var_detected && !q->var_behavior) { | 2646 if (var_detected) { |
2542 mark_as_variable(q); | 2647 |
2543 queued_variable++; | 2648 var_byte_count = count_bytes(var_bytes); |
| 2649 |
| 2650 if (!q->var_behavior) { |
| 2651 mark_as_variable(q); |
| 2652 queued_variable++; |
| 2653 } |
| 2654 |
2544 } | 2655 } |
2545 | 2656 |
2546 stage_name = old_sn; | 2657 stage_name = old_sn; |
2547 stage_cur = old_sc; | 2658 stage_cur = old_sc; |
2548 stage_max = old_sm; | 2659 stage_max = old_sm; |
2549 exec_tmout = old_tmout; | 2660 exec_tmout = old_tmout; |
2550 | 2661 |
2551 if (!first_run) show_stats(); | 2662 if (!first_run) show_stats(); |
2552 | 2663 |
2553 return fault; | 2664 return fault; |
(...skipping 568 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3122 #else | 3233 #else |
3123 | 3234 |
3124 fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes, | 3235 fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes, |
3125 kill_signal); | 3236 kill_signal); |
3126 | 3237 |
3127 #endif /* ^!SIMPLE_FILES */ | 3238 #endif /* ^!SIMPLE_FILES */ |
3128 | 3239 |
3129 unique_crashes++; | 3240 unique_crashes++; |
3130 | 3241 |
3131 last_crash_time = get_cur_time(); | 3242 last_crash_time = get_cur_time(); |
| 3243 last_crash_execs = total_execs; |
3132 | 3244 |
3133 break; | 3245 break; |
3134 | 3246 |
3135 case FAULT_ERROR: FATAL("Unable to execute target application"); | 3247 case FAULT_ERROR: FATAL("Unable to execute target application"); |
3136 | 3248 |
3137 default: return keeping; | 3249 default: return keeping; |
3138 | 3250 |
3139 } | 3251 } |
3140 | 3252 |
3141 /* If we're here, we apparently want to save the crash or hang | 3253 /* If we're here, we apparently want to save the crash or hang |
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3219 if (ret <= 4) return; | 3331 if (ret <= 4) return; |
3220 | 3332 |
3221 exec_tmout = ret; | 3333 exec_tmout = ret; |
3222 timeout_given = 3; | 3334 timeout_given = 3; |
3223 | 3335 |
3224 } | 3336 } |
3225 | 3337 |
3226 | 3338 |
3227 /* Update stats file for unattended monitoring. */ | 3339 /* Update stats file for unattended monitoring. */ |
3228 | 3340 |
3229 static void write_stats_file(double bitmap_cvg, double eps) { | 3341 static void write_stats_file(double bitmap_cvg, double stability, double eps) { |
3230 | 3342 |
3231 static double last_bcvg, last_eps; | 3343 static double last_bcvg, last_stab, last_eps; |
3232 | 3344 |
3233 u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); | 3345 u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); |
3234 s32 fd; | 3346 s32 fd; |
3235 FILE* f; | 3347 FILE* f; |
3236 | 3348 |
3237 fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); | 3349 fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); |
3238 | 3350 |
3239 if (fd < 0) PFATAL("Unable to create '%s'", fn); | 3351 if (fd < 0) PFATAL("Unable to create '%s'", fn); |
3240 | 3352 |
3241 ck_free(fn); | 3353 ck_free(fn); |
3242 | 3354 |
3243 f = fdopen(fd, "w"); | 3355 f = fdopen(fd, "w"); |
3244 | 3356 |
3245 if (!f) PFATAL("fdopen() failed"); | 3357 if (!f) PFATAL("fdopen() failed"); |
3246 | 3358 |
3247 /* Keep last values in case we're called from another context | 3359 /* Keep last values in case we're called from another context |
3248 where exec/sec stats and such are not readily available. */ | 3360 where exec/sec stats and such are not readily available. */ |
3249 | 3361 |
3250 if (!bitmap_cvg && !eps) { | 3362 if (!bitmap_cvg && !stability && !eps) { |
3251 bitmap_cvg = last_bcvg; | 3363 bitmap_cvg = last_bcvg; |
| 3364 stability = last_stab; |
3252 eps = last_eps; | 3365 eps = last_eps; |
3253 } else { | 3366 } else { |
3254 last_bcvg = bitmap_cvg; | 3367 last_bcvg = bitmap_cvg; |
| 3368 last_stab = stability; |
3255 last_eps = eps; | 3369 last_eps = eps; |
3256 } | 3370 } |
3257 | 3371 |
3258 fprintf(f, "start_time : %llu\n" | 3372 fprintf(f, "start_time : %llu\n" |
3259 "last_update : %llu\n" | 3373 "last_update : %llu\n" |
3260 "fuzzer_pid : %u\n" | 3374 "fuzzer_pid : %u\n" |
3261 "cycles_done : %llu\n" | 3375 "cycles_done : %llu\n" |
3262 "execs_done : %llu\n" | 3376 "execs_done : %llu\n" |
3263 "execs_per_sec : %0.02f\n" | 3377 "execs_per_sec : %0.02f\n" |
3264 "paths_total : %u\n" | 3378 "paths_total : %u\n" |
3265 "paths_favored : %u\n" | 3379 "paths_favored : %u\n" |
3266 "paths_found : %u\n" | 3380 "paths_found : %u\n" |
3267 "paths_imported : %u\n" | 3381 "paths_imported : %u\n" |
3268 "max_depth : %u\n" | 3382 "max_depth : %u\n" |
3269 "cur_path : %u\n" | 3383 "cur_path : %u\n" |
3270 "pending_favs : %u\n" | 3384 "pending_favs : %u\n" |
3271 "pending_total : %u\n" | 3385 "pending_total : %u\n" |
3272 "variable_paths : %u\n" | 3386 "variable_paths : %u\n" |
3273 "bitmap_cvg : %0.02f%%\n" | 3387 "stability : %0.02f%%\n" |
3274 "unique_crashes : %llu\n" | 3388 "bitmap_cvg : %0.02f%%\n" |
3275 "unique_hangs : %llu\n" | 3389 "unique_crashes : %llu\n" |
3276 "last_path : %llu\n" | 3390 "unique_hangs : %llu\n" |
3277 "last_crash : %llu\n" | 3391 "last_path : %llu\n" |
3278 "last_hang : %llu\n" | 3392 "last_crash : %llu\n" |
3279 "exec_timeout : %u\n" | 3393 "last_hang : %llu\n" |
3280 "afl_banner : %s\n" | 3394 "execs_since_crash : %llu\n" |
3281 "afl_version : " VERSION "\n" | 3395 "exec_timeout : %u\n" |
3282 "command_line : %s\n", | 3396 "afl_banner : %s\n" |
| 3397 "afl_version : " VERSION "\n" |
| 3398 "command_line : %s\n", |
3283 start_time / 1000, get_cur_time() / 1000, getpid(), | 3399 start_time / 1000, get_cur_time() / 1000, getpid(), |
3284 queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, | 3400 queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, |
3285 queued_paths, queued_favored, queued_discovered, queued_imported, | 3401 queued_paths, queued_favored, queued_discovered, queued_imported, |
3286 max_depth, current_entry, pending_favored, pending_not_fuzzed, | 3402 max_depth, current_entry, pending_favored, pending_not_fuzzed, |
3287 queued_variable, bitmap_cvg, unique_crashes, unique_hangs, | 3403 queued_variable, stability, bitmap_cvg, unique_crashes, |
3288 last_path_time / 1000, last_crash_time / 1000, | 3404 unique_hangs, last_path_time / 1000, last_crash_time / 1000, |
3289 last_hang_time / 1000, exec_tmout, use_banner, orig_cmdline); | 3405 last_hang_time / 1000, total_execs - last_crash_execs, |
| 3406 exec_tmout, use_banner, orig_cmdline); |
3290 /* ignore errors */ | 3407 /* ignore errors */ |
3291 | 3408 |
3292 fclose(f); | 3409 fclose(f); |
3293 | 3410 |
3294 } | 3411 } |
3295 | 3412 |
3296 | 3413 |
3297 /* Update the plot file if there is a reason to. */ | 3414 /* Update the plot file if there is a reason to. */ |
3298 | 3415 |
3299 static void maybe_update_plot_file(double bitmap_cvg, double eps) { | 3416 static void maybe_update_plot_file(double bitmap_cvg, double eps) { |
(...skipping 402 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3702 static void check_term_size(void); | 3819 static void check_term_size(void); |
3703 | 3820 |
3704 | 3821 |
3705 /* A spiffy retro stats screen! This is called every stats_update_freq | 3822 /* A spiffy retro stats screen! This is called every stats_update_freq |
3706 execve() calls, plus in several other circumstances. */ | 3823 execve() calls, plus in several other circumstances. */ |
3707 | 3824 |
3708 static void show_stats(void) { | 3825 static void show_stats(void) { |
3709 | 3826 |
3710 static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; | 3827 static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; |
3711 static double avg_exec; | 3828 static double avg_exec; |
3712 double t_byte_ratio; | 3829 double t_byte_ratio, stab_ratio; |
3713 | 3830 |
3714 u64 cur_ms; | 3831 u64 cur_ms; |
3715 u32 t_bytes, t_bits; | 3832 u32 t_bytes, t_bits; |
3716 | 3833 |
3717 u32 banner_len, banner_pad; | 3834 u32 banner_len, banner_pad; |
3718 u8 tmp[256]; | 3835 u8 tmp[256]; |
3719 | 3836 |
3720 cur_ms = get_cur_time(); | 3837 cur_ms = get_cur_time(); |
3721 | 3838 |
3722 /* If not enough time has passed since last UI update, bail out. */ | 3839 /* If not enough time has passed since last UI update, bail out. */ |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3755 /* Tell the callers when to contact us (as measured in execs). */ | 3872 /* Tell the callers when to contact us (as measured in execs). */ |
3756 | 3873 |
3757 stats_update_freq = avg_exec / (UI_TARGET_HZ * 10); | 3874 stats_update_freq = avg_exec / (UI_TARGET_HZ * 10); |
3758 if (!stats_update_freq) stats_update_freq = 1; | 3875 if (!stats_update_freq) stats_update_freq = 1; |
3759 | 3876 |
3760 /* Do some bitmap stats. */ | 3877 /* Do some bitmap stats. */ |
3761 | 3878 |
3762 t_bytes = count_non_255_bytes(virgin_bits); | 3879 t_bytes = count_non_255_bytes(virgin_bits); |
3763 t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE; | 3880 t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE; |
3764 | 3881 |
| 3882 if (t_bytes) |
| 3883 stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes; |
| 3884 else |
| 3885 stab_ratio = 100; |
| 3886 |
3765 /* Roughly every minute, update fuzzer stats and save auto tokens. */ | 3887 /* Roughly every minute, update fuzzer stats and save auto tokens. */ |
3766 | 3888 |
3767 if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) { | 3889 if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) { |
3768 | 3890 |
3769 last_stats_ms = cur_ms; | 3891 last_stats_ms = cur_ms; |
3770 write_stats_file(t_byte_ratio, avg_exec); | 3892 write_stats_file(t_byte_ratio, stab_ratio, avg_exec); |
3771 save_auto(); | 3893 save_auto(); |
3772 write_bitmap(); | 3894 write_bitmap(); |
3773 | 3895 |
3774 } | 3896 } |
3775 | 3897 |
3776 /* Every now and then, write plot data. */ | 3898 /* Every now and then, write plot data. */ |
3777 | 3899 |
3778 if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) { | 3900 if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) { |
3779 | 3901 |
3780 last_plot_ms = cur_ms; | 3902 last_plot_ms = cur_ms; |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3922 /* This gets funny because we want to print several variable-length variables | 4044 /* This gets funny because we want to print several variable-length variables |
3923 together, but then cram them into a fixed-width field - so we need to | 4045 together, but then cram them into a fixed-width field - so we need to |
3924 put them in a temporary buffer first. */ | 4046 put them in a temporary buffer first. */ |
3925 | 4047 |
3926 sprintf(tmp, "%s%s (%0.02f%%)", DI(current_entry), | 4048 sprintf(tmp, "%s%s (%0.02f%%)", DI(current_entry), |
3927 queue_cur->favored ? "" : "*", | 4049 queue_cur->favored ? "" : "*", |
3928 ((double)current_entry * 100) / queued_paths); | 4050 ((double)current_entry * 100) / queued_paths); |
3929 | 4051 |
3930 SAYF(bV bSTOP " now processing : " cRST "%-17s " bSTG bV bSTOP, tmp); | 4052 SAYF(bV bSTOP " now processing : " cRST "%-17s " bSTG bV bSTOP, tmp); |
3931 | 4053 |
3932 | 4054 sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) * |
3933 sprintf(tmp, "%s (%0.02f%%)", DI(t_bytes), t_byte_ratio); | 4055 100 / MAP_SIZE, t_byte_ratio); |
3934 | 4056 |
3935 SAYF(" map density : %s%-21s " bSTG bV "\n", t_byte_ratio > 70 ? cLRD : | 4057 SAYF(" map density : %s%-21s " bSTG bV "\n", t_byte_ratio > 70 ? cLRD : |
3936 ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp); | 4058 ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp); |
3937 | 4059 |
3938 sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths), | 4060 sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths), |
3939 ((double)cur_skipped_paths * 100) / queued_paths); | 4061 ((double)cur_skipped_paths * 100) / queued_paths); |
3940 | 4062 |
3941 SAYF(bV bSTOP " paths timed out : " cRST "%-17s " bSTG bV, tmp); | 4063 SAYF(bV bSTOP " paths timed out : " cRST "%-17s " bSTG bV, tmp); |
3942 | 4064 |
3943 sprintf(tmp, "%0.02f bits/tuple", | 4065 sprintf(tmp, "%0.02f bits/tuple", |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4067 DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]))
; | 4189 DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]))
; |
4068 | 4190 |
4069 SAYF(bV bSTOP " dictionary : " cRST "%-37s " bSTG bV bSTOP | 4191 SAYF(bV bSTOP " dictionary : " cRST "%-37s " bSTG bV bSTOP |
4070 " imported : " cRST "%-10s " bSTG bV "\n", tmp, | 4192 " imported : " cRST "%-10s " bSTG bV "\n", tmp, |
4071 sync_id ? DI(queued_imported) : (u8*)"n/a"); | 4193 sync_id ? DI(queued_imported) : (u8*)"n/a"); |
4072 | 4194 |
4073 sprintf(tmp, "%s/%s, %s/%s", | 4195 sprintf(tmp, "%s/%s, %s/%s", |
4074 DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]), | 4196 DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]), |
4075 DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE])); | 4197 DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE])); |
4076 | 4198 |
4077 SAYF(bV bSTOP " havoc : " cRST "%-37s " bSTG bV bSTOP | 4199 SAYF(bV bSTOP " havoc : " cRST "%-37s " bSTG bV bSTOP, tmp); |
4078 " variable : %s%-10s " bSTG bV "\n", tmp, queued_variable ? cLRD : cRST, | 4200 |
4079 no_var_check ? (u8*)"n/a" : DI(queued_variable)); | 4201 if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio); |
| 4202 else strcpy(tmp, "n/a"); |
| 4203 |
| 4204 SAYF(" stability : %s%-10s " bSTG bV "\n", (stab_ratio < 85 && var_byte_count
> 40) |
| 4205 ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20)) |
| 4206 ? cMGN : cRST), tmp); |
4080 | 4207 |
4081 if (!bytes_trim_out) { | 4208 if (!bytes_trim_out) { |
4082 | 4209 |
4083 sprintf(tmp, "n/a, "); | 4210 sprintf(tmp, "n/a, "); |
4084 | 4211 |
4085 } else { | 4212 } else { |
4086 | 4213 |
4087 sprintf(tmp, "%0.02f%%/%s, ", | 4214 sprintf(tmp, "%0.02f%%/%s, ", |
4088 ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in, | 4215 ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in, |
4089 DI(trim_execs)); | 4216 DI(trim_execs)); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4125 | 4252 |
4126 if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count) | 4253 if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count) |
4127 cpu_color = cLGN; | 4254 cpu_color = cLGN; |
4128 | 4255 |
4129 /* If we're clearly oversubscribed, use red. */ | 4256 /* If we're clearly oversubscribed, use red. */ |
4130 | 4257 |
4131 if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD; | 4258 if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD; |
4132 | 4259 |
4133 #ifdef HAVE_AFFINITY | 4260 #ifdef HAVE_AFFINITY |
4134 | 4261 |
4135 if (use_affinity) { | 4262 if (cpu_aff >= 0) { |
4136 | 4263 |
4137 SAYF(SP10 cGRA "[cpu@%02u:%s%3u%%" cGRA "]\r" cRST, | 4264 SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST, |
4138 MIN(cpu_aff_child, 99), cpu_color, | 4265 MIN(cpu_aff, 999), cpu_color, |
4139 MIN(cur_utilization, 999)); | 4266 MIN(cur_utilization, 999)); |
4140 | 4267 |
4141 } else { | 4268 } else { |
4142 | 4269 |
4143 SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, | 4270 SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, |
4144 cpu_color, MIN(cur_utilization, 999)); | 4271 cpu_color, MIN(cur_utilization, 999)); |
4145 | 4272 |
4146 } | 4273 } |
| 4274 |
4147 #else | 4275 #else |
4148 | 4276 |
4149 SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, | 4277 SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, |
4150 cpu_color, MIN(cur_utilization, 999)); | 4278 cpu_color, MIN(cur_utilization, 999)); |
4151 | 4279 |
4152 #endif /* ^HAVE_AFFINITY */ | 4280 #endif /* ^HAVE_AFFINITY */ |
4153 | 4281 |
4154 } else SAYF("\r"); | 4282 } else SAYF("\r"); |
4155 | 4283 |
4156 /* Hallelujah! */ | 4284 /* Hallelujah! */ |
(...skipping 722 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4879 | 5007 |
4880 orig_perf = perf_score = calculate_score(queue_cur); | 5008 orig_perf = perf_score = calculate_score(queue_cur); |
4881 | 5009 |
4882 /* Skip right away if -d is given, if we have done deterministic fuzzing on | 5010 /* Skip right away if -d is given, if we have done deterministic fuzzing on |
4883 this entry ourselves (was_fuzzed), or if it has gone through deterministic | 5011 this entry ourselves (was_fuzzed), or if it has gone through deterministic |
4884 testing in earlier, resumed runs (passed_det). */ | 5012 testing in earlier, resumed runs (passed_det). */ |
4885 | 5013 |
4886 if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) | 5014 if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) |
4887 goto havoc_stage; | 5015 goto havoc_stage; |
4888 | 5016 |
| 5017 /* Skip deterministic fuzzing if exec path checksum puts this out of scope |
| 5018 for this master instance. */ |
| 5019 |
| 5020 if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1) |
| 5021 goto havoc_stage; |
| 5022 |
4889 /********************************************* | 5023 /********************************************* |
4890 * SIMPLE BITFLIP (+dictionary construction) * | 5024 * SIMPLE BITFLIP (+dictionary construction) * |
4891 *********************************************/ | 5025 *********************************************/ |
4892 | 5026 |
4893 #define FLIP_BIT(_ar, _b) do { \ | 5027 #define FLIP_BIT(_ar, _b) do { \ |
4894 u8* _arf = (u8*)(_ar); \ | 5028 u8* _arf = (u8*)(_ar); \ |
4895 u32 _bf = (_b); \ | 5029 u32 _bf = (_b); \ |
4896 _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ | 5030 _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ |
4897 } while (0) | 5031 } while (0) |
4898 | 5032 |
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5048 } | 5182 } |
5049 | 5183 |
5050 new_hit_cnt = queued_paths + unique_crashes; | 5184 new_hit_cnt = queued_paths + unique_crashes; |
5051 | 5185 |
5052 stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; | 5186 stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; |
5053 stage_cycles[STAGE_FLIP4] += stage_max; | 5187 stage_cycles[STAGE_FLIP4] += stage_max; |
5054 | 5188 |
5055 /* Effector map setup. These macros calculate: | 5189 /* Effector map setup. These macros calculate: |
5056 | 5190 |
5057 EFF_APOS - position of a particular file offset in the map. | 5191 EFF_APOS - position of a particular file offset in the map. |
5058 EFF_ALEN - length of an map with a particular number of bytes. | 5192 EFF_ALEN - length of a map with a particular number of bytes. |
5059 EFF_SPAN_ALEN - map span for a sequence of bytes. | 5193 EFF_SPAN_ALEN - map span for a sequence of bytes. |
5060 | 5194 |
5061 */ | 5195 */ |
5062 | 5196 |
5063 #define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) | 5197 #define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) |
5064 #define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) | 5198 #define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) |
5065 #define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) | 5199 #define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) |
5066 #define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) | 5200 #define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) |
5067 | 5201 |
5068 /* Initialize effector map for the next step (see comments below). Always | 5202 /* Initialize effector map for the next step (see comments below). Always |
(...skipping 1410 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6479 sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || | 6613 sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || |
6480 syncing_case < min_accept) continue; | 6614 syncing_case < min_accept) continue; |
6481 | 6615 |
6482 /* OK, sounds like a new one. Let's give it a try. */ | 6616 /* OK, sounds like a new one. Let's give it a try. */ |
6483 | 6617 |
6484 if (syncing_case >= next_min_accept) | 6618 if (syncing_case >= next_min_accept) |
6485 next_min_accept = syncing_case + 1; | 6619 next_min_accept = syncing_case + 1; |
6486 | 6620 |
6487 path = alloc_printf("%s/%s", qd_path, qd_ent->d_name); | 6621 path = alloc_printf("%s/%s", qd_path, qd_ent->d_name); |
6488 | 6622 |
| 6623 /* Allow this to fail in case the other fuzzer is resuming or so... */ |
| 6624 |
6489 fd = open(path, O_RDONLY); | 6625 fd = open(path, O_RDONLY); |
6490 if (fd < 0) PFATAL("Unable to open '%s'", path); | 6626 |
| 6627 if (fd < 0) { |
| 6628 ck_free(path); |
| 6629 continue; |
| 6630 } |
6491 | 6631 |
6492 if (fstat(fd, &st)) PFATAL("fstat() failed"); | 6632 if (fstat(fd, &st)) PFATAL("fstat() failed"); |
6493 | 6633 |
6494 /* Ignore zero-sized or oversized files. */ | 6634 /* Ignore zero-sized or oversized files. */ |
6495 | 6635 |
6496 if (st.st_size && st.st_size <= MAX_FILE) { | 6636 if (st.st_size && st.st_size <= MAX_FILE) { |
6497 | 6637 |
6498 u8 fault; | 6638 u8 fault; |
6499 u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); | 6639 u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); |
6500 | 6640 |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6714 | 6854 |
6715 if (memmem(f_data, f_len, "libasan.so", 10) || | 6855 if (memmem(f_data, f_len, "libasan.so", 10) || |
6716 memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1; | 6856 memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1; |
6717 | 6857 |
6718 /* Detect persistent & deferred init signatures in the binary. */ | 6858 /* Detect persistent & deferred init signatures in the binary. */ |
6719 | 6859 |
6720 if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) { | 6860 if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) { |
6721 | 6861 |
6722 OKF(cPIN "Persistent mode binary detected."); | 6862 OKF(cPIN "Persistent mode binary detected."); |
6723 setenv(PERSIST_ENV_VAR, "1", 1); | 6863 setenv(PERSIST_ENV_VAR, "1", 1); |
6724 no_var_check = 1; | 6864 persistent_mode = 1; |
6725 | 6865 |
6726 } else if (getenv("AFL_PERSISTENT")) { | 6866 } else if (getenv("AFL_PERSISTENT")) { |
6727 | 6867 |
6728 WARNF("AFL_PERSISTENT is no longer supported and may misbehave!"); | 6868 WARNF("AFL_PERSISTENT is no longer supported and may misbehave!"); |
6729 | 6869 |
6730 } | 6870 } |
6731 | 6871 |
6732 if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) { | 6872 if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) { |
6733 | 6873 |
6734 OKF(cPIN "Deferred forkserver binary detected."); | 6874 OKF(cPIN "Deferred forkserver binary detected."); |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6831 "Fuzzing behavior settings:\n\n" | 6971 "Fuzzing behavior settings:\n\n" |
6832 | 6972 |
6833 " -d - quick & dirty mode (skips deterministic steps)\n" | 6973 " -d - quick & dirty mode (skips deterministic steps)\n" |
6834 " -n - fuzz without instrumentation (dumb mode)\n" | 6974 " -n - fuzz without instrumentation (dumb mode)\n" |
6835 " -x dir - optional fuzzer dictionary (see README)\n\n" | 6975 " -x dir - optional fuzzer dictionary (see README)\n\n" |
6836 | 6976 |
6837 "Other stuff:\n\n" | 6977 "Other stuff:\n\n" |
6838 | 6978 |
6839 " -T text - text banner to show on the screen\n" | 6979 " -T text - text banner to show on the screen\n" |
6840 " -M / -S id - distributed mode (see parallel_fuzzing.txt)\n" | 6980 " -M / -S id - distributed mode (see parallel_fuzzing.txt)\n" |
6841 #ifdef HAVE_AFFINITY | |
6842 " -Z core_id - set CPU affinity (see perf_tips.txt)\n" | |
6843 #endif /* HAVE_AFFINITY */ | |
6844 " -C - crash exploration mode (the peruvian rabbit thing)\n\n
" | 6981 " -C - crash exploration mode (the peruvian rabbit thing)\n\n
" |
6845 | 6982 |
6846 "For additional tips, please consult %s/README.\n\n", | 6983 "For additional tips, please consult %s/README.\n\n", |
6847 | 6984 |
6848 argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); | 6985 argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); |
6849 | 6986 |
6850 exit(1); | 6987 exit(1); |
6851 | 6988 |
6852 } | 6989 } |
6853 | 6990 |
(...skipping 277 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7131 #else | 7268 #else |
7132 | 7269 |
7133 int s_name[2] = { CTL_HW, HW_NCPU }; | 7270 int s_name[2] = { CTL_HW, HW_NCPU }; |
7134 | 7271 |
7135 if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; | 7272 if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; |
7136 | 7273 |
7137 #endif /* ^__APPLE__ */ | 7274 #endif /* ^__APPLE__ */ |
7138 | 7275 |
7139 #else | 7276 #else |
7140 | 7277 |
7141 if (!cpu_core_count) { | 7278 #ifdef HAVE_AFFINITY |
7142 | 7279 |
7143 /* On Linux, a simple way is to look at /proc/stat, especially since we'd | 7280 cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN); |
7144 be parsing it anyway for other reasons later on. But do this only if | |
7145 cpu_core_count hasn't been obtained before as a result of specifying | |
7146 -Z. */ | |
7147 | 7281 |
7148 FILE* f = fopen("/proc/stat", "r"); | 7282 #else |
7149 u8 tmp[1024]; | |
7150 | 7283 |
7151 if (!f) return; | 7284 FILE* f = fopen("/proc/stat", "r"); |
| 7285 u8 tmp[1024]; |
7152 | 7286 |
7153 while (fgets(tmp, sizeof(tmp), f)) | 7287 if (!f) return; |
7154 if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++; | |
7155 | 7288 |
7156 fclose(f); | 7289 while (fgets(tmp, sizeof(tmp), f)) |
7157 } | 7290 if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++; |
| 7291 |
| 7292 fclose(f); |
| 7293 |
| 7294 #endif /* ^HAVE_AFFINITY */ |
7158 | 7295 |
7159 #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ | 7296 #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ |
7160 | 7297 |
7161 if (cpu_core_count) { | 7298 if (cpu_core_count > 0) { |
7162 | 7299 |
7163 cur_runnable = (u32)get_runnable_processes(); | 7300 cur_runnable = (u32)get_runnable_processes(); |
7164 | 7301 |
7165 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) | 7302 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) |
7166 | 7303 |
7167 /* Add ourselves, since the 1-minute average doesn't include that yet. */ | 7304 /* Add ourselves, since the 1-minute average doesn't include that yet. */ |
7168 | 7305 |
7169 cur_runnable++; | 7306 cur_runnable++; |
7170 | 7307 |
7171 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ | 7308 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ |
7172 | 7309 |
7173 OKF("You have %u CPU cores and %u runnable tasks (utilization: %0.0f%%).", | 7310 OKF("You have %u CPU cores and %u runnable tasks (utilization: %0.0f%%).", |
7174 cpu_core_count, cur_runnable, cur_runnable * 100.0 / cpu_core_count); | 7311 cpu_core_count, cur_runnable, cur_runnable * 100.0 / cpu_core_count); |
7175 | 7312 |
7176 if (cpu_core_count > 1) { | 7313 if (cpu_core_count > 1) { |
7177 | 7314 |
7178 if (cur_runnable > cpu_core_count * 1.5) { | 7315 if (cur_runnable > cpu_core_count * 1.5) { |
7179 | 7316 |
7180 WARNF("System under apparent load, performance may be spotty."); | 7317 WARNF("System under apparent load, performance may be spotty."); |
7181 | 7318 |
7182 } else if (cur_runnable + 1 <= cpu_core_count) { | 7319 } else if (cur_runnable + 1 <= cpu_core_count) { |
7183 | 7320 |
7184 OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path); | 7321 OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path); |
7185 | 7322 |
7186 } | 7323 } |
7187 | 7324 |
7188 } | 7325 } |
7189 | 7326 |
7190 } else WARNF("Unable to figure out the number of CPU cores."); | 7327 } else { |
7191 | 7328 |
7192 #ifdef HAVE_AFFINITY | 7329 cpu_core_count = 0; |
| 7330 WARNF("Unable to figure out the number of CPU cores."); |
7193 | 7331 |
7194 if (use_affinity) | 7332 } |
7195 OKF("Using specified CPU affinity: main = %u, child = %u", | |
7196 cpu_aff_main, cpu_aff_child); | |
7197 else if (cpu_core_count > 1) | |
7198 OKF(cBRI "Try setting CPU affinity (-Z) for a performance boost!" cRST); | |
7199 | |
7200 #endif /* HAVE_AFFINITY */ | |
7201 | 7333 |
7202 } | 7334 } |
7203 | 7335 |
7204 | 7336 |
7205 /* Validate and fix up out_dir and sync_dir when using -S. */ | 7337 /* Validate and fix up out_dir and sync_dir when using -S. */ |
7206 | 7338 |
7207 static void fix_up_sync(void) { | 7339 static void fix_up_sync(void) { |
7208 | 7340 |
7209 u8* x = sync_id; | 7341 u8* x = sync_id; |
7210 | 7342 |
(...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7476 /* Main entry point */ | 7608 /* Main entry point */ |
7477 | 7609 |
7478 int main(int argc, char** argv) { | 7610 int main(int argc, char** argv) { |
7479 | 7611 |
7480 s32 opt; | 7612 s32 opt; |
7481 u64 prev_queued = 0; | 7613 u64 prev_queued = 0; |
7482 u32 sync_interval_cnt = 0, seek_to; | 7614 u32 sync_interval_cnt = 0, seek_to; |
7483 u8 *extras_dir = 0; | 7615 u8 *extras_dir = 0; |
7484 u8 mem_limit_given = 0; | 7616 u8 mem_limit_given = 0; |
7485 u8 exit_1 = !!getenv("AFL_BENCH_JUST_ONE"); | 7617 u8 exit_1 = !!getenv("AFL_BENCH_JUST_ONE"); |
| 7618 char** use_argv; |
7486 | 7619 |
7487 char** use_argv; | 7620 struct timeval tv; |
| 7621 struct timezone tz; |
7488 | 7622 |
7489 SAYF(cCYA "afl-fuzz " cBRI VERSION cRST " by <lcamtuf@google.com>\n"); | 7623 SAYF(cCYA "afl-fuzz " cBRI VERSION cRST " by <lcamtuf@google.com>\n"); |
7490 | 7624 |
7491 doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; | 7625 doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; |
7492 | 7626 |
7493 while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QZ:")) > 0) | 7627 gettimeofday(&tv, &tz); |
| 7628 srandom(tv.tv_sec ^ tv.tv_usec ^ getpid()); |
| 7629 |
| 7630 while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:Q")) > 0) |
7494 | 7631 |
7495 switch (opt) { | 7632 switch (opt) { |
7496 | 7633 |
7497 case 'i': | 7634 case 'i': /* input dir */ |
7498 | 7635 |
7499 if (in_dir) FATAL("Multiple -i options not supported"); | 7636 if (in_dir) FATAL("Multiple -i options not supported"); |
7500 in_dir = optarg; | 7637 in_dir = optarg; |
7501 | 7638 |
7502 if (!strcmp(in_dir, "-")) in_place_resume = 1; | 7639 if (!strcmp(in_dir, "-")) in_place_resume = 1; |
7503 | 7640 |
7504 break; | 7641 break; |
7505 | 7642 |
7506 case 'o': /* output dir */ | 7643 case 'o': /* output dir */ |
7507 | 7644 |
7508 if (out_dir) FATAL("Multiple -o options not supported"); | 7645 if (out_dir) FATAL("Multiple -o options not supported"); |
7509 out_dir = optarg; | 7646 out_dir = optarg; |
7510 break; | 7647 break; |
7511 | 7648 |
7512 case 'M': | 7649 case 'M': { /* master sync ID */ |
7513 | 7650 |
7514 force_deterministic = 1; | 7651 u8* c; |
7515 /* Fall through */ | |
7516 | 7652 |
7517 case 'S': /* sync ID */ | 7653 if (sync_id) FATAL("Multiple -S or -M options not supported"); |
| 7654 sync_id = optarg; |
| 7655 |
| 7656 if ((c = strchr(sync_id, ':'))) { |
| 7657 |
| 7658 *c = 0; |
| 7659 |
| 7660 if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 || |
| 7661 !master_id || !master_max || master_id > master_max || |
| 7662 master_max > 1000000) FATAL("Bogus master ID passed to -M"); |
| 7663 |
| 7664 } |
| 7665 |
| 7666 force_deterministic = 1; |
| 7667 |
| 7668 } |
| 7669 |
| 7670 break; |
| 7671 |
| 7672 case 'S': |
7518 | 7673 |
7519 if (sync_id) FATAL("Multiple -S or -M options not supported"); | 7674 if (sync_id) FATAL("Multiple -S or -M options not supported"); |
7520 sync_id = optarg; | 7675 sync_id = optarg; |
7521 break; | 7676 break; |
7522 | 7677 |
7523 case 'f': /* target file */ | 7678 case 'f': /* target file */ |
7524 | 7679 |
7525 if (out_file) FATAL("Multiple -f options not supported"); | 7680 if (out_file) FATAL("Multiple -f options not supported"); |
7526 out_file = optarg; | 7681 out_file = optarg; |
7527 break; | 7682 break; |
7528 | 7683 |
7529 case 'x': | 7684 case 'x': /* dictionary */ |
7530 | 7685 |
7531 if (extras_dir) FATAL("Multiple -x options not supported"); | 7686 if (extras_dir) FATAL("Multiple -x options not supported"); |
7532 extras_dir = optarg; | 7687 extras_dir = optarg; |
7533 break; | 7688 break; |
7534 | 7689 |
7535 case 't': { | 7690 case 't': { /* timeout */ |
7536 | 7691 |
7537 u8 suffix = 0; | 7692 u8 suffix = 0; |
7538 | 7693 |
7539 if (timeout_given) FATAL("Multiple -t options not supported"); | 7694 if (timeout_given) FATAL("Multiple -t options not supported"); |
7540 | 7695 |
7541 if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 || | 7696 if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 || |
7542 optarg[0] == '-') FATAL("Bad syntax used for -t"); | 7697 optarg[0] == '-') FATAL("Bad syntax used for -t"); |
7543 | 7698 |
7544 if (exec_tmout < 5) FATAL("Dangerously low value of -t"); | 7699 if (exec_tmout < 5) FATAL("Dangerously low value of -t"); |
7545 | 7700 |
7546 if (suffix == '+') timeout_given = 2; else timeout_given = 1; | 7701 if (suffix == '+') timeout_given = 2; else timeout_given = 1; |
7547 | 7702 |
7548 break; | 7703 break; |
7549 | 7704 |
7550 } | 7705 } |
7551 | 7706 |
7552 case 'm': { | 7707 case 'm': { /* mem limit */ |
7553 | 7708 |
7554 u8 suffix = 'M'; | 7709 u8 suffix = 'M'; |
7555 | 7710 |
7556 if (mem_limit_given) FATAL("Multiple -m options not supported"); | 7711 if (mem_limit_given) FATAL("Multiple -m options not supported"); |
7557 mem_limit_given = 1; | 7712 mem_limit_given = 1; |
7558 | 7713 |
7559 if (!strcmp(optarg, "none")) { | 7714 if (!strcmp(optarg, "none")) { |
7560 | 7715 |
7561 mem_limit = 0; | 7716 mem_limit = 0; |
7562 break; | 7717 break; |
(...skipping 16 matching lines...) Expand all Loading... |
7579 | 7734 |
7580 if (mem_limit < 5) FATAL("Dangerously low value of -m"); | 7735 if (mem_limit < 5) FATAL("Dangerously low value of -m"); |
7581 | 7736 |
7582 if (sizeof(rlim_t) == 4 && mem_limit > 2000) | 7737 if (sizeof(rlim_t) == 4 && mem_limit > 2000) |
7583 FATAL("Value of -m out of range on 32-bit systems"); | 7738 FATAL("Value of -m out of range on 32-bit systems"); |
7584 | 7739 |
7585 } | 7740 } |
7586 | 7741 |
7587 break; | 7742 break; |
7588 | 7743 |
7589 #ifdef HAVE_AFFINITY | 7744 case 'd': /* skip deterministic */ |
7590 | |
7591 case 'Z': { | |
7592 | |
7593 s32 i; | |
7594 | |
7595 if (use_affinity) FATAL("Multiple -Z options not supported"); | |
7596 use_affinity = 1; | |
7597 | |
7598 cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN); | |
7599 | |
7600 i = sscanf(optarg, "%u,%u", &cpu_aff_main, &cpu_aff_child); | |
7601 | |
7602 if (i < 1 || cpu_aff_main >= cpu_core_count) | |
7603 FATAL("Bogus primary core ID passed to -Z (expected 0-%u)", | |
7604 cpu_core_count - 1); | |
7605 | |
7606 if (i == 1) cpu_aff_child = cpu_aff_main; | |
7607 | |
7608 if (cpu_aff_child >= cpu_core_count) | |
7609 FATAL("Bogus secondary core ID passed to -Z (expected 0-%u)", | |
7610 cpu_core_count - 1); | |
7611 | |
7612 break; | |
7613 | |
7614 } | |
7615 | |
7616 #endif /* HAVE_AFFINITY */ | |
7617 | |
7618 case 'd': | |
7619 | 7745 |
7620 if (skip_deterministic) FATAL("Multiple -d options not supported"); | 7746 if (skip_deterministic) FATAL("Multiple -d options not supported"); |
7621 skip_deterministic = 1; | 7747 skip_deterministic = 1; |
7622 use_splicing = 1; | 7748 use_splicing = 1; |
7623 break; | 7749 break; |
7624 | 7750 |
7625 case 'B': | 7751 case 'B': /* load bitmap */ |
7626 | 7752 |
7627 /* This is a secret undocumented option! It is useful if you find | 7753 /* This is a secret undocumented option! It is useful if you find |
7628 an interesting test case during a normal fuzzing process, and want | 7754 an interesting test case during a normal fuzzing process, and want |
7629 to mutate it without rediscovering any of the test cases already | 7755 to mutate it without rediscovering any of the test cases already |
7630 found during an earlier run. | 7756 found during an earlier run. |
7631 | 7757 |
7632 To use this mode, you need to point -B to the fuzz_bitmap produced | 7758 To use this mode, you need to point -B to the fuzz_bitmap produced |
7633 by an earlier run for the exact same binary... and that's it. | 7759 by an earlier run for the exact same binary... and that's it. |
7634 | 7760 |
7635 I only used this once or twice to get variants of a particular | 7761 I only used this once or twice to get variants of a particular |
7636 file, so I'm not making this an official setting. */ | 7762 file, so I'm not making this an official setting. */ |
7637 | 7763 |
7638 if (in_bitmap) FATAL("Multiple -B options not supported"); | 7764 if (in_bitmap) FATAL("Multiple -B options not supported"); |
7639 | 7765 |
7640 in_bitmap = optarg; | 7766 in_bitmap = optarg; |
7641 read_bitmap(in_bitmap); | 7767 read_bitmap(in_bitmap); |
7642 break; | 7768 break; |
7643 | 7769 |
7644 case 'C': | 7770 case 'C': /* crash mode */ |
7645 | 7771 |
7646 if (crash_mode) FATAL("Multiple -C options not supported"); | 7772 if (crash_mode) FATAL("Multiple -C options not supported"); |
7647 crash_mode = FAULT_CRASH; | 7773 crash_mode = FAULT_CRASH; |
7648 break; | 7774 break; |
7649 | 7775 |
7650 case 'n': | 7776 case 'n': /* dumb mode */ |
7651 | 7777 |
7652 if (dumb_mode) FATAL("Multiple -n options not supported"); | 7778 if (dumb_mode) FATAL("Multiple -n options not supported"); |
7653 if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1; | 7779 if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1; |
7654 | 7780 |
7655 break; | 7781 break; |
7656 | 7782 |
7657 case 'T': | 7783 case 'T': /* banner */ |
7658 | 7784 |
7659 if (use_banner) FATAL("Multiple -T options not supported"); | 7785 if (use_banner) FATAL("Multiple -T options not supported"); |
7660 use_banner = optarg; | 7786 use_banner = optarg; |
7661 break; | 7787 break; |
7662 | 7788 |
7663 case 'Q': | 7789 case 'Q': /* QEMU mode */ |
7664 | 7790 |
7665 if (qemu_mode) FATAL("Multiple -Q options not supported"); | 7791 if (qemu_mode) FATAL("Multiple -Q options not supported"); |
7666 qemu_mode = 1; | 7792 qemu_mode = 1; |
7667 | 7793 |
7668 if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU; | 7794 if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU; |
7669 | 7795 |
7670 break; | 7796 break; |
7671 | 7797 |
7672 default: | 7798 default: |
7673 | 7799 |
7674 usage(argv[0]); | 7800 usage(argv[0]); |
7675 | 7801 |
7676 } | 7802 } |
7677 | 7803 |
7678 if (optind == argc || !in_dir || !out_dir) usage(argv[0]); | 7804 if (optind == argc || !in_dir || !out_dir) usage(argv[0]); |
7679 | 7805 |
7680 setup_signal_handlers(); | 7806 setup_signal_handlers(); |
7681 check_asan_opts(); | 7807 check_asan_opts(); |
7682 | 7808 |
7683 #ifdef HAVE_AFFINITY | |
7684 if (use_affinity) set_cpu_affinity(cpu_aff_main); | |
7685 #endif /* HAVE_AFFINITY */ | |
7686 | |
7687 if (sync_id) fix_up_sync(); | 7809 if (sync_id) fix_up_sync(); |
7688 | 7810 |
7689 if (!strcmp(in_dir, out_dir)) | 7811 if (!strcmp(in_dir, out_dir)) |
7690 FATAL("Input and output directories can't be the same"); | 7812 FATAL("Input and output directories can't be the same"); |
7691 | 7813 |
7692 if (dumb_mode) { | 7814 if (dumb_mode) { |
7693 | 7815 |
7694 if (crash_mode) FATAL("-C and -n are mutually exclusive"); | 7816 if (crash_mode) FATAL("-C and -n are mutually exclusive"); |
7695 if (qemu_mode) FATAL("-Q and -n are mutually exclusive"); | 7817 if (qemu_mode) FATAL("-Q and -n are mutually exclusive"); |
7696 | 7818 |
7697 } | 7819 } |
7698 | 7820 |
7699 if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1; | 7821 if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1; |
7700 if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1; | 7822 if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1; |
7701 if (getenv("AFL_NO_VAR_CHECK")) no_var_check = 1; | |
7702 if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1; | 7823 if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1; |
7703 | 7824 |
7704 if (dumb_mode == 2 && no_forkserver) | 7825 if (dumb_mode == 2 && no_forkserver) |
7705 FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive"); | 7826 FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive"); |
7706 | 7827 |
| 7828 if (getenv("AFL_PRELOAD")) { |
| 7829 setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1); |
| 7830 setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1); |
| 7831 } |
| 7832 |
7707 if (getenv("AFL_LD_PRELOAD")) | 7833 if (getenv("AFL_LD_PRELOAD")) |
7708 setenv("LD_PRELOAD", getenv("AFL_LD_PRELOAD"), 1); | 7834 FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD"); |
7709 | 7835 |
7710 save_cmdline(argc, argv); | 7836 save_cmdline(argc, argv); |
7711 | 7837 |
7712 fix_up_banner(argv[optind]); | 7838 fix_up_banner(argv[optind]); |
7713 | 7839 |
7714 check_if_tty(); | 7840 check_if_tty(); |
7715 | 7841 |
7716 get_core_count(); | 7842 get_core_count(); |
| 7843 |
| 7844 #ifdef HAVE_AFFINITY |
| 7845 bind_to_free_cpu(); |
| 7846 #endif /* HAVE_AFFINITY */ |
| 7847 |
7717 check_crash_handling(); | 7848 check_crash_handling(); |
7718 check_cpu_governor(); | 7849 check_cpu_governor(); |
7719 | 7850 |
7720 setup_post(); | 7851 setup_post(); |
7721 setup_shm(); | 7852 setup_shm(); |
| 7853 init_count_class16(); |
7722 | 7854 |
7723 setup_dirs_fds(); | 7855 setup_dirs_fds(); |
7724 read_testcases(); | 7856 read_testcases(); |
7725 load_auto(); | 7857 load_auto(); |
7726 | 7858 |
7727 pivot_inputs(); | 7859 pivot_inputs(); |
7728 | 7860 |
7729 if (extras_dir) load_extras(extras_dir); | 7861 if (extras_dir) load_extras(extras_dir); |
7730 | 7862 |
7731 if (!timeout_given) find_timeout(); | 7863 if (!timeout_given) find_timeout(); |
(...skipping 12 matching lines...) Expand all Loading... |
7744 use_argv = argv + optind; | 7876 use_argv = argv + optind; |
7745 | 7877 |
7746 perform_dry_run(use_argv); | 7878 perform_dry_run(use_argv); |
7747 | 7879 |
7748 cull_queue(); | 7880 cull_queue(); |
7749 | 7881 |
7750 show_init_stats(); | 7882 show_init_stats(); |
7751 | 7883 |
7752 seek_to = find_start_position(); | 7884 seek_to = find_start_position(); |
7753 | 7885 |
7754 write_stats_file(0, 0); | 7886 write_stats_file(0, 0, 0); |
7755 save_auto(); | 7887 save_auto(); |
7756 | 7888 |
7757 if (stop_soon) goto stop_fuzzing; | 7889 if (stop_soon) goto stop_fuzzing; |
7758 | 7890 |
7759 /* Woop woop woop */ | 7891 /* Woop woop woop */ |
7760 | 7892 |
7761 if (!not_on_tty) { | 7893 if (!not_on_tty) { |
7762 sleep(4); | 7894 sleep(4); |
7763 start_time += 4000; | 7895 start_time += 4000; |
7764 if (stop_soon) goto stop_fuzzing; | 7896 if (stop_soon) goto stop_fuzzing; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7820 if (stop_soon) break; | 7952 if (stop_soon) break; |
7821 | 7953 |
7822 queue_cur = queue_cur->next; | 7954 queue_cur = queue_cur->next; |
7823 current_entry++; | 7955 current_entry++; |
7824 | 7956 |
7825 } | 7957 } |
7826 | 7958 |
7827 if (queue_cur) show_stats(); | 7959 if (queue_cur) show_stats(); |
7828 | 7960 |
7829 write_bitmap(); | 7961 write_bitmap(); |
7830 write_stats_file(0, 0); | 7962 write_stats_file(0, 0, 0); |
7831 save_auto(); | 7963 save_auto(); |
7832 | 7964 |
7833 stop_fuzzing: | 7965 stop_fuzzing: |
7834 | 7966 |
7835 SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST, | 7967 SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST, |
7836 stop_soon == 2 ? "programatically" : "by user"); | 7968 stop_soon == 2 ? "programmatically" : "by user"); |
7837 | 7969 |
7838 /* Running for more than 30 minutes but still doing first cycle? */ | 7970 /* Running for more than 30 minutes but still doing first cycle? */ |
7839 | 7971 |
7840 if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) { | 7972 if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) { |
7841 | 7973 |
7842 SAYF("\n" cYEL "[!] " cRST | 7974 SAYF("\n" cYEL "[!] " cRST |
7843 "Stopped during the first cycle, results may be incomplete.\n" | 7975 "Stopped during the first cycle, results may be incomplete.\n" |
7844 " (For info on resuming, see %s/README.)\n", doc_path); | 7976 " (For info on resuming, see %s/README.)\n", doc_path); |
7845 | 7977 |
7846 } | 7978 } |
7847 | 7979 |
7848 fclose(plot_file); | 7980 fclose(plot_file); |
7849 destroy_queue(); | 7981 destroy_queue(); |
7850 destroy_extras(); | 7982 destroy_extras(); |
7851 ck_free(target_path); | 7983 ck_free(target_path); |
7852 | 7984 |
7853 alloc_report(); | 7985 alloc_report(); |
7854 | 7986 |
7855 OKF("We're done here. Have a nice day!\n"); | 7987 OKF("We're done here. Have a nice day!\n"); |
7856 | 7988 |
7857 exit(0); | 7989 exit(0); |
7858 | 7990 |
7859 } | 7991 } |
7860 | 7992 |
7861 #endif /* !AFL_LIB */ | 7993 #endif /* !AFL_LIB */ |
OLD | NEW |