OLD | NEW |
(Empty) | |
| 1 /* |
| 2 american fuzzy lop - fuzzer code |
| 3 -------------------------------- |
| 4 |
| 5 Written and maintained by Michal Zalewski <lcamtuf@google.com> |
| 6 |
| 7 Forkserver design by Jann Horn <jannhorn@googlemail.com> |
| 8 |
| 9 Copyright 2013, 2014, 2015, 2016 Google Inc. All rights reserved. |
| 10 |
| 11 Licensed under the Apache License, Version 2.0 (the "License"); |
| 12 you may not use this file except in compliance with the License. |
| 13 You may obtain a copy of the License at: |
| 14 |
| 15 http://www.apache.org/licenses/LICENSE-2.0 |
| 16 |
| 17 This is the real deal: the program takes an instrumented binary and |
| 18 attempts a variety of basic fuzzing tricks, paying close attention to |
| 19 how they affect the execution path. |
| 20 |
| 21 */ |
| 22 |
| 23 #define AFL_MAIN |
| 24 #define MESSAGES_TO_STDOUT |
| 25 |
| 26 #define _GNU_SOURCE |
| 27 #define _FILE_OFFSET_BITS 64 |
| 28 |
| 29 #include "config.h" |
| 30 #include "types.h" |
| 31 #include "debug.h" |
| 32 #include "alloc-inl.h" |
| 33 #include "hash.h" |
| 34 |
| 35 #include <stdio.h> |
| 36 #include <unistd.h> |
| 37 #include <stdlib.h> |
| 38 #include <string.h> |
| 39 #include <time.h> |
| 40 #include <errno.h> |
| 41 #include <signal.h> |
| 42 #include <dirent.h> |
| 43 #include <ctype.h> |
| 44 #include <fcntl.h> |
| 45 #include <termios.h> |
| 46 #include <dlfcn.h> |
| 47 #include <sched.h> |
| 48 |
| 49 #include <sys/wait.h> |
| 50 #include <sys/time.h> |
| 51 #include <sys/shm.h> |
| 52 #include <sys/stat.h> |
| 53 #include <sys/types.h> |
| 54 #include <sys/resource.h> |
| 55 #include <sys/mman.h> |
| 56 #include <sys/ioctl.h> |
| 57 #include <sys/file.h> |
| 58 |
| 59 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) |
| 60 # include <sys/sysctl.h> |
| 61 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ |
| 62 |
| 63 /* For supporting -Z on systems that have sched_setaffinity. */ |
| 64 |
| 65 #ifdef __linux__ |
| 66 # define HAVE_AFFINITY 1 |
| 67 #endif /* __linux__ */ |
| 68 |
| 69 /* A toggle to export some variables when building as a library. Not very |
| 70 useful for the general public. */ |
| 71 |
| 72 #ifdef AFL_LIB |
| 73 # define EXP_ST |
| 74 #else |
| 75 # define EXP_ST static |
| 76 #endif /* ^AFL_LIB */ |
| 77 |
| 78 /* Lots of globals, but mostly for the status UI and other things where it |
| 79 really makes no sense to haul them around as function parameters. */ |
| 80 |
| 81 |
| 82 EXP_ST u8 *in_dir, /* Input directory with test cases */ |
| 83 *out_file, /* File to fuzz, if any */ |
| 84 *out_dir, /* Working & output directory */ |
| 85 *sync_dir, /* Synchronization directory */ |
| 86 *sync_id, /* Fuzzer ID */ |
| 87 *use_banner, /* Display banner */ |
| 88 *in_bitmap, /* Input bitmap */ |
| 89 *doc_path, /* Path to documentation dir */ |
| 90 *target_path, /* Path to target binary */ |
| 91 *orig_cmdline; /* Original command line */ |
| 92 |
| 93 EXP_ST u32 exec_tmout = EXEC_TIMEOUT; /* Configurable exec timeout (ms) */ |
| 94 EXP_ST u64 mem_limit = MEM_LIMIT; /* Memory cap for child (MB) */ |
| 95 |
| 96 static u32 stats_update_freq = 1; /* Stats update frequency (execs) */ |
| 97 |
| 98 EXP_ST u8 skip_deterministic, /* Skip deterministic stages? */ |
| 99 force_deterministic, /* Force deterministic stages? */ |
| 100 use_splicing, /* Recombine input files? */ |
| 101 dumb_mode, /* Run in non-instrumented mode? */ |
| 102 score_changed, /* Scoring for favorites changed? */ |
| 103 kill_signal, /* Signal that killed the child */ |
| 104 resuming_fuzz, /* Resuming an older fuzzing job? */ |
| 105 timeout_given, /* Specific timeout given? */ |
| 106 not_on_tty, /* stdout is not a tty */ |
| 107 term_too_small, /* terminal dimensions too small */ |
| 108 uses_asan, /* Target uses ASAN? */ |
| 109 no_forkserver, /* Disable forkserver? */ |
| 110 crash_mode, /* Crash mode! Yeah! */ |
| 111 in_place_resume, /* Attempt in-place resume? */ |
| 112 auto_changed, /* Auto-generated tokens changed? */ |
| 113 no_cpu_meter_red, /* Feng shui on the status screen */ |
| 114 no_var_check, /* Don't detect variable behavior */ |
| 115 shuffle_queue, /* Shuffle input queue? */ |
| 116 bitmap_changed = 1, /* Time to update bitmap? */ |
| 117 qemu_mode, /* Running in QEMU mode? */ |
| 118 skip_requested, /* Skip request, via SIGUSR1 */ |
| 119 run_over10m; /* Run time over 10 minutes? */ |
| 120 |
| 121 static s32 out_fd, /* Persistent fd for out_file */ |
| 122 dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */ |
| 123 dev_null_fd = -1, /* Persistent fd for /dev/null */ |
| 124 fsrv_ctl_fd, /* Fork server control pipe (write) */ |
| 125 fsrv_st_fd; /* Fork server status pipe (read) */ |
| 126 |
| 127 static s32 forksrv_pid, /* PID of the fork server */ |
| 128 child_pid = -1, /* PID of the fuzzed program */ |
| 129 out_dir_fd = -1; /* FD of the lock file */ |
| 130 |
| 131 EXP_ST u8* trace_bits; /* SHM with instrumentation bitmap */ |
| 132 |
| 133 EXP_ST u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */ |
| 134 virgin_hang[MAP_SIZE], /* Bits we haven't seen in hangs */ |
| 135 virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */ |
| 136 |
| 137 static s32 shm_id; /* ID of the SHM region */ |
| 138 |
| 139 static volatile u8 stop_soon, /* Ctrl-C pressed? */ |
| 140 clear_screen = 1, /* Window resized? */ |
| 141 child_timed_out; /* Traced process timed out? */ |
| 142 |
| 143 EXP_ST u32 queued_paths, /* Total number of queued testcases */ |
| 144 queued_variable, /* Testcases with variable behavior */ |
| 145 queued_at_start, /* Total number of initial inputs */ |
| 146 queued_discovered, /* Items discovered during this run */ |
| 147 queued_imported, /* Items imported via -S */ |
| 148 queued_favored, /* Paths deemed favorable */ |
| 149 queued_with_cov, /* Paths with new coverage bytes */ |
| 150 pending_not_fuzzed, /* Queued but not done yet */ |
| 151 pending_favored, /* Pending favored paths */ |
| 152 cur_skipped_paths, /* Abandoned inputs in cur cycle */ |
| 153 cur_depth, /* Current path depth */ |
| 154 max_depth, /* Max path depth */ |
| 155 useless_at_start, /* Number of useless starting paths */ |
| 156 current_entry, /* Current queue entry ID */ |
| 157 havoc_div = 1; /* Cycle count divisor for havoc */ |
| 158 |
| 159 EXP_ST u64 total_crashes, /* Total number of crashes */ |
| 160 unique_crashes, /* Crashes with unique signatures */ |
| 161 total_hangs, /* Total number of hangs */ |
| 162 unique_hangs, /* Hangs with unique signatures */ |
| 163 total_execs, /* Total execve() calls */ |
| 164 start_time, /* Unix start time (ms) */ |
| 165 last_path_time, /* Time for most recent path (ms) */ |
| 166 last_crash_time, /* Time for most recent crash (ms) */ |
| 167 last_hang_time, /* Time for most recent hang (ms) */ |
| 168 queue_cycle, /* Queue round counter */ |
| 169 cycles_wo_finds, /* Cycles without any new paths */ |
| 170 trim_execs, /* Execs done to trim input files */ |
| 171 bytes_trim_in, /* Bytes coming into the trimmer */ |
| 172 bytes_trim_out, /* Bytes coming outa the trimmer */ |
| 173 blocks_eff_total, /* Blocks subject to effector maps */ |
| 174 blocks_eff_select; /* Blocks selected as fuzzable */ |
| 175 |
| 176 static u32 subseq_hangs; /* Number of hangs in a row */ |
| 177 |
| 178 static u8 *stage_name = "init", /* Name of the current fuzz stage */ |
| 179 *stage_short, /* Short stage name */ |
| 180 *syncing_party; /* Currently syncing with... */ |
| 181 |
| 182 static s32 stage_cur, stage_max; /* Stage progression */ |
| 183 static s32 splicing_with = -1; /* Splicing with which test case? */ |
| 184 |
| 185 static u32 syncing_case; /* Syncing with case #... */ |
| 186 |
| 187 static s32 stage_cur_byte, /* Byte offset of current stage op */ |
| 188 stage_cur_val; /* Value used for stage op */ |
| 189 |
| 190 static u8 stage_val_type; /* Value type (STAGE_VAL_*) */ |
| 191 |
| 192 static u64 stage_finds[32], /* Patterns found per fuzz stage */ |
| 193 stage_cycles[32]; /* Execs per fuzz stage */ |
| 194 |
| 195 static u32 rand_cnt; /* Random number counter */ |
| 196 |
| 197 static u64 total_cal_us, /* Total calibration time (us) */ |
| 198 total_cal_cycles; /* Total calibration cycles */ |
| 199 |
| 200 static u64 total_bitmap_size, /* Total bit count for all bitmaps */ |
| 201 total_bitmap_entries; /* Number of bitmaps counted */ |
| 202 |
| 203 static u32 cpu_core_count; /* CPU core count */ |
| 204 |
| 205 #ifdef HAVE_AFFINITY |
| 206 |
| 207 static u8 use_affinity; /* Using -Z */ |
| 208 |
| 209 static u32 cpu_aff_main, /* Affinity for main process */ |
| 210 cpu_aff_child; /* Affinity for fuzzed child */ |
| 211 |
| 212 #endif /* HAVE_AFFINITY */ |
| 213 |
| 214 static FILE* plot_file; /* Gnuplot output file */ |
| 215 |
| 216 struct queue_entry { |
| 217 |
| 218 u8* fname; /* File name for the test case */ |
| 219 u32 len; /* Input length */ |
| 220 |
| 221 u8 cal_failed, /* Calibration failed? */ |
| 222 trim_done, /* Trimmed? */ |
| 223 was_fuzzed, /* Had any fuzzing done yet? */ |
| 224 passed_det, /* Deterministic stages passed? */ |
| 225 has_new_cov, /* Triggers new coverage? */ |
| 226 var_behavior, /* Variable behavior? */ |
| 227 favored, /* Currently favored? */ |
| 228 fs_redundant; /* Marked as redundant in the fs? */ |
| 229 |
| 230 u32 bitmap_size, /* Number of bits set in bitmap */ |
| 231 exec_cksum; /* Checksum of the execution trace */ |
| 232 |
| 233 u64 exec_us, /* Execution time (us) */ |
| 234 handicap, /* Number of queue cycles behind */ |
| 235 depth; /* Path depth */ |
| 236 |
| 237 u8* trace_mini; /* Trace bytes, if kept */ |
| 238 u32 tc_ref; /* Trace bytes ref count */ |
| 239 |
| 240 struct queue_entry *next, /* Next element, if any */ |
| 241 *next_100; /* 100 elements ahead */ |
| 242 |
| 243 }; |
| 244 |
| 245 static struct queue_entry *queue, /* Fuzzing queue (linked list) */ |
| 246 *queue_cur, /* Current offset within the queue */ |
| 247 *queue_top, /* Top of the list */ |
| 248 *q_prev100; /* Previous 100 marker */ |
| 249 |
| 250 static struct queue_entry* |
| 251 top_rated[MAP_SIZE]; /* Top entries for bitmap bytes */ |
| 252 |
| 253 struct extra_data { |
| 254 u8* data; /* Dictionary token data */ |
| 255 u32 len; /* Dictionary token length */ |
| 256 u32 hit_cnt; /* Use count in the corpus */ |
| 257 }; |
| 258 |
| 259 static struct extra_data* extras; /* Extra tokens to fuzz with */ |
| 260 static u32 extras_cnt; /* Total number of tokens read */ |
| 261 |
| 262 static struct extra_data* a_extras; /* Automatically selected extras */ |
| 263 static u32 a_extras_cnt; /* Total number of tokens available */ |
| 264 |
| 265 static u8* (*post_handler)(u8* buf, u32* len); |
| 266 |
| 267 /* Interesting values, as per config.h */ |
| 268 |
| 269 static s8 interesting_8[] = { INTERESTING_8 }; |
| 270 static s16 interesting_16[] = { INTERESTING_8, INTERESTING_16 }; |
| 271 static s32 interesting_32[] = { INTERESTING_8, INTERESTING_16, INTERESTING_32 }; |
| 272 |
| 273 /* Fuzzing stages */ |
| 274 |
| 275 enum { |
| 276 /* 00 */ STAGE_FLIP1, |
| 277 /* 01 */ STAGE_FLIP2, |
| 278 /* 02 */ STAGE_FLIP4, |
| 279 /* 03 */ STAGE_FLIP8, |
| 280 /* 04 */ STAGE_FLIP16, |
| 281 /* 05 */ STAGE_FLIP32, |
| 282 /* 06 */ STAGE_ARITH8, |
| 283 /* 07 */ STAGE_ARITH16, |
| 284 /* 08 */ STAGE_ARITH32, |
| 285 /* 09 */ STAGE_INTEREST8, |
| 286 /* 10 */ STAGE_INTEREST16, |
| 287 /* 11 */ STAGE_INTEREST32, |
| 288 /* 12 */ STAGE_EXTRAS_UO, |
| 289 /* 13 */ STAGE_EXTRAS_UI, |
| 290 /* 14 */ STAGE_EXTRAS_AO, |
| 291 /* 15 */ STAGE_HAVOC, |
| 292 /* 16 */ STAGE_SPLICE |
| 293 }; |
| 294 |
| 295 /* Stage value types */ |
| 296 |
| 297 enum { |
| 298 /* 00 */ STAGE_VAL_NONE, |
| 299 /* 01 */ STAGE_VAL_LE, |
| 300 /* 02 */ STAGE_VAL_BE |
| 301 }; |
| 302 |
| 303 /* Execution status fault codes */ |
| 304 |
| 305 enum { |
| 306 /* 00 */ FAULT_NONE, |
| 307 /* 01 */ FAULT_HANG, |
| 308 /* 02 */ FAULT_CRASH, |
| 309 /* 03 */ FAULT_ERROR, |
| 310 /* 04 */ FAULT_NOINST, |
| 311 /* 05 */ FAULT_NOBITS |
| 312 }; |
| 313 |
| 314 |
| 315 /* Get unix time in milliseconds */ |
| 316 |
| 317 static u64 get_cur_time(void) { |
| 318 |
| 319 struct timeval tv; |
| 320 struct timezone tz; |
| 321 |
| 322 gettimeofday(&tv, &tz); |
| 323 |
| 324 return (tv.tv_sec * 1000ULL) + (tv.tv_usec / 1000); |
| 325 |
| 326 } |
| 327 |
| 328 |
| 329 /* Get unix time in microseconds */ |
| 330 |
| 331 static u64 get_cur_time_us(void) { |
| 332 |
| 333 struct timeval tv; |
| 334 struct timezone tz; |
| 335 |
| 336 gettimeofday(&tv, &tz); |
| 337 |
| 338 return (tv.tv_sec * 1000000ULL) + tv.tv_usec; |
| 339 |
| 340 } |
| 341 |
| 342 |
| 343 #ifdef HAVE_AFFINITY |
| 344 |
| 345 /* Set CPU affinity (on systems that support it). */ |
| 346 |
| 347 static void set_cpu_affinity(u32 cpu_id) { |
| 348 |
| 349 cpu_set_t c; |
| 350 |
| 351 CPU_ZERO(&c); |
| 352 CPU_SET(cpu_id, &c); |
| 353 |
| 354 if (sched_setaffinity(0, sizeof(c), &c)) |
| 355 PFATAL("sched_setaffinity failed"); |
| 356 |
| 357 } |
| 358 |
| 359 #endif /* HAVE_AFFINITY */ |
| 360 |
| 361 |
| 362 /* Generate a random number (from 0 to limit - 1). This may |
| 363 have slight bias. */ |
| 364 |
| 365 static inline u32 UR(u32 limit) { |
| 366 |
| 367 if (!rand_cnt--) { |
| 368 |
| 369 u32 seed[2]; |
| 370 |
| 371 ck_read(dev_urandom_fd, &seed, sizeof(seed), "/dev/urandom"); |
| 372 |
| 373 srandom(seed[0]); |
| 374 rand_cnt = (RESEED_RNG / 2) + (seed[1] % RESEED_RNG); |
| 375 |
| 376 } |
| 377 |
| 378 return random() % limit; |
| 379 |
| 380 } |
| 381 |
| 382 |
| 383 /* Shuffle an array of pointers. Might be slightly biased. */ |
| 384 |
| 385 static void shuffle_ptrs(void** ptrs, u32 cnt) { |
| 386 |
| 387 u32 i; |
| 388 |
| 389 for (i = 0; i < cnt - 2; i++) { |
| 390 |
| 391 u32 j = i + UR(cnt - i); |
| 392 void *s = ptrs[i]; |
| 393 ptrs[i] = ptrs[j]; |
| 394 ptrs[j] = s; |
| 395 |
| 396 } |
| 397 |
| 398 } |
| 399 |
| 400 |
| 401 #ifndef IGNORE_FINDS |
| 402 |
| 403 /* Helper function to compare buffers; returns first and last differing offset.
We |
| 404 use this to find reasonable locations for splicing two files. */ |
| 405 |
| 406 static void locate_diffs(u8* ptr1, u8* ptr2, u32 len, s32* first, s32* last) { |
| 407 |
| 408 s32 f_loc = -1; |
| 409 s32 l_loc = -1; |
| 410 u32 pos; |
| 411 |
| 412 for (pos = 0; pos < len; pos++) { |
| 413 |
| 414 if (*(ptr1++) != *(ptr2++)) { |
| 415 |
| 416 if (f_loc == -1) f_loc = pos; |
| 417 l_loc = pos; |
| 418 |
| 419 } |
| 420 |
| 421 } |
| 422 |
| 423 *first = f_loc; |
| 424 *last = l_loc; |
| 425 |
| 426 return; |
| 427 |
| 428 } |
| 429 |
| 430 #endif /* !IGNORE_FINDS */ |
| 431 |
| 432 |
| 433 /* Describe integer. Uses 12 cyclic static buffers for return values. The value |
| 434 returned should be five characters or less for all the integers we reasonably |
| 435 expect to see. */ |
| 436 |
| 437 static u8* DI(u64 val) { |
| 438 |
| 439 static u8 tmp[12][16]; |
| 440 static u8 cur; |
| 441 |
| 442 cur = (cur + 1) % 12; |
| 443 |
| 444 #define CHK_FORMAT(_divisor, _limit_mult, _fmt, _cast) do { \ |
| 445 if (val < (_divisor) * (_limit_mult)) { \ |
| 446 sprintf(tmp[cur], _fmt, ((_cast)val) / (_divisor)); \ |
| 447 return tmp[cur]; \ |
| 448 } \ |
| 449 } while (0) |
| 450 |
| 451 /* 0-9999 */ |
| 452 CHK_FORMAT(1, 10000, "%llu", u64); |
| 453 |
| 454 /* 10.0k - 99.9k */ |
| 455 CHK_FORMAT(1000, 99.95, "%0.01fk", double); |
| 456 |
| 457 /* 100k - 999k */ |
| 458 CHK_FORMAT(1000, 1000, "%lluk", u64); |
| 459 |
| 460 /* 1.00M - 9.99M */ |
| 461 CHK_FORMAT(1000 * 1000, 9.995, "%0.02fM", double); |
| 462 |
| 463 /* 10.0M - 99.9M */ |
| 464 CHK_FORMAT(1000 * 1000, 99.95, "%0.01fM", double); |
| 465 |
| 466 /* 100M - 999M */ |
| 467 CHK_FORMAT(1000 * 1000, 1000, "%lluM", u64); |
| 468 |
| 469 /* 1.00G - 9.99G */ |
| 470 CHK_FORMAT(1000LL * 1000 * 1000, 9.995, "%0.02fG", double); |
| 471 |
| 472 /* 10.0G - 99.9G */ |
| 473 CHK_FORMAT(1000LL * 1000 * 1000, 99.95, "%0.01fG", double); |
| 474 |
| 475 /* 100G - 999G */ |
| 476 CHK_FORMAT(1000LL * 1000 * 1000, 1000, "%lluG", u64); |
| 477 |
| 478 /* 1.00T - 9.99G */ |
| 479 CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 9.995, "%0.02fT", double); |
| 480 |
| 481 /* 10.0T - 99.9T */ |
| 482 CHK_FORMAT(1000LL * 1000 * 1000 * 1000, 99.95, "%0.01fT", double); |
| 483 |
| 484 /* 100T+ */ |
| 485 strcpy(tmp[cur], "infty"); |
| 486 return tmp[cur]; |
| 487 |
| 488 } |
| 489 |
| 490 |
| 491 /* Describe float. Similar to the above, except with a single |
| 492 static buffer. */ |
| 493 |
| 494 static u8* DF(double val) { |
| 495 |
| 496 static u8 tmp[16]; |
| 497 |
| 498 if (val < 99.995) { |
| 499 sprintf(tmp, "%0.02f", val); |
| 500 return tmp; |
| 501 } |
| 502 |
| 503 if (val < 999.95) { |
| 504 sprintf(tmp, "%0.01f", val); |
| 505 return tmp; |
| 506 } |
| 507 |
| 508 return DI((u64)val); |
| 509 |
| 510 } |
| 511 |
| 512 |
| 513 /* Describe integer as memory size. */ |
| 514 |
| 515 static u8* DMS(u64 val) { |
| 516 |
| 517 static u8 tmp[12][16]; |
| 518 static u8 cur; |
| 519 |
| 520 cur = (cur + 1) % 12; |
| 521 |
| 522 /* 0-9999 */ |
| 523 CHK_FORMAT(1, 10000, "%llu B", u64); |
| 524 |
| 525 /* 10.0k - 99.9k */ |
| 526 CHK_FORMAT(1024, 99.95, "%0.01f kB", double); |
| 527 |
| 528 /* 100k - 999k */ |
| 529 CHK_FORMAT(1024, 1000, "%llu kB", u64); |
| 530 |
| 531 /* 1.00M - 9.99M */ |
| 532 CHK_FORMAT(1024 * 1024, 9.995, "%0.02f MB", double); |
| 533 |
| 534 /* 10.0M - 99.9M */ |
| 535 CHK_FORMAT(1024 * 1024, 99.95, "%0.01f MB", double); |
| 536 |
| 537 /* 100M - 999M */ |
| 538 CHK_FORMAT(1024 * 1024, 1000, "%llu MB", u64); |
| 539 |
| 540 /* 1.00G - 9.99G */ |
| 541 CHK_FORMAT(1024LL * 1024 * 1024, 9.995, "%0.02f GB", double); |
| 542 |
| 543 /* 10.0G - 99.9G */ |
| 544 CHK_FORMAT(1024LL * 1024 * 1024, 99.95, "%0.01f GB", double); |
| 545 |
| 546 /* 100G - 999G */ |
| 547 CHK_FORMAT(1024LL * 1024 * 1024, 1000, "%llu GB", u64); |
| 548 |
| 549 /* 1.00T - 9.99G */ |
| 550 CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 9.995, "%0.02f TB", double); |
| 551 |
| 552 /* 10.0T - 99.9T */ |
| 553 CHK_FORMAT(1024LL * 1024 * 1024 * 1024, 99.95, "%0.01f TB", double); |
| 554 |
| 555 #undef CHK_FORMAT |
| 556 |
| 557 /* 100T+ */ |
| 558 strcpy(tmp[cur], "infty"); |
| 559 return tmp[cur]; |
| 560 |
| 561 } |
| 562 |
| 563 |
| 564 /* Describe time delta. Returns one static buffer, 34 chars of less. */ |
| 565 |
| 566 static u8* DTD(u64 cur_ms, u64 event_ms) { |
| 567 |
| 568 static u8 tmp[64]; |
| 569 u64 delta; |
| 570 s32 t_d, t_h, t_m, t_s; |
| 571 |
| 572 if (!event_ms) return "none seen yet"; |
| 573 |
| 574 delta = cur_ms - event_ms; |
| 575 |
| 576 t_d = delta / 1000 / 60 / 60 / 24; |
| 577 t_h = (delta / 1000 / 60 / 60) % 24; |
| 578 t_m = (delta / 1000 / 60) % 60; |
| 579 t_s = (delta / 1000) % 60; |
| 580 |
| 581 sprintf(tmp, "%s days, %u hrs, %u min, %u sec", DI(t_d), t_h, t_m, t_s); |
| 582 return tmp; |
| 583 |
| 584 } |
| 585 |
| 586 |
| 587 /* Mark deterministic checks as done for a particular queue entry. We use the |
| 588 .state file to avoid repeating deterministic fuzzing when resuming aborted |
| 589 scans. */ |
| 590 |
| 591 static void mark_as_det_done(struct queue_entry* q) { |
| 592 |
| 593 u8* fn = strrchr(q->fname, '/'); |
| 594 s32 fd; |
| 595 |
| 596 fn = alloc_printf("%s/queue/.state/deterministic_done/%s", out_dir, fn + 1); |
| 597 |
| 598 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 599 if (fd < 0) PFATAL("Unable to create '%s'", fn); |
| 600 close(fd); |
| 601 |
| 602 ck_free(fn); |
| 603 |
| 604 q->passed_det = 1; |
| 605 |
| 606 } |
| 607 |
| 608 |
| 609 /* Mark as variable. Create symlinks if possible to make it easier to examine |
| 610 the files. */ |
| 611 |
| 612 static void mark_as_variable(struct queue_entry* q) { |
| 613 |
| 614 u8 *fn = strrchr(q->fname, '/') + 1, *ldest; |
| 615 |
| 616 ldest = alloc_printf("../../%s", fn); |
| 617 fn = alloc_printf("%s/queue/.state/variable_behavior/%s", out_dir, fn); |
| 618 |
| 619 if (symlink(ldest, fn)) { |
| 620 |
| 621 s32 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 622 if (fd < 0) PFATAL("Unable to create '%s'", fn); |
| 623 close(fd); |
| 624 |
| 625 } |
| 626 |
| 627 ck_free(ldest); |
| 628 ck_free(fn); |
| 629 |
| 630 q->var_behavior = 1; |
| 631 |
| 632 } |
| 633 |
| 634 |
| 635 /* Mark / unmark as redundant (edge-only). This is not used for restoring state, |
| 636 but may be useful for post-processing datasets. */ |
| 637 |
| 638 static void mark_as_redundant(struct queue_entry* q, u8 state) { |
| 639 |
| 640 u8* fn; |
| 641 s32 fd; |
| 642 |
| 643 if (state == q->fs_redundant) return; |
| 644 |
| 645 q->fs_redundant = state; |
| 646 |
| 647 fn = strrchr(q->fname, '/'); |
| 648 fn = alloc_printf("%s/queue/.state/redundant_edges/%s", out_dir, fn + 1); |
| 649 |
| 650 if (state) { |
| 651 |
| 652 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 653 if (fd < 0) PFATAL("Unable to create '%s'", fn); |
| 654 close(fd); |
| 655 |
| 656 } else { |
| 657 |
| 658 if (unlink(fn)) PFATAL("Unable to remove '%s'", fn); |
| 659 |
| 660 } |
| 661 |
| 662 ck_free(fn); |
| 663 |
| 664 } |
| 665 |
| 666 |
| 667 /* Append new test case to the queue. */ |
| 668 |
| 669 static void add_to_queue(u8* fname, u32 len, u8 passed_det) { |
| 670 |
| 671 struct queue_entry* q = ck_alloc(sizeof(struct queue_entry)); |
| 672 |
| 673 q->fname = fname; |
| 674 q->len = len; |
| 675 q->depth = cur_depth + 1; |
| 676 q->passed_det = passed_det; |
| 677 |
| 678 if (q->depth > max_depth) max_depth = q->depth; |
| 679 |
| 680 if (queue_top) { |
| 681 |
| 682 queue_top->next = q; |
| 683 queue_top = q; |
| 684 |
| 685 } else q_prev100 = queue = queue_top = q; |
| 686 |
| 687 queued_paths++; |
| 688 pending_not_fuzzed++; |
| 689 |
| 690 cycles_wo_finds = 0; |
| 691 |
| 692 if (!(queued_paths % 100)) { |
| 693 |
| 694 q_prev100->next_100 = q; |
| 695 q_prev100 = q; |
| 696 |
| 697 } |
| 698 |
| 699 last_path_time = get_cur_time(); |
| 700 |
| 701 } |
| 702 |
| 703 |
| 704 /* Destroy the entire queue. */ |
| 705 |
| 706 EXP_ST void destroy_queue(void) { |
| 707 |
| 708 struct queue_entry *q = queue, *n; |
| 709 |
| 710 while (q) { |
| 711 |
| 712 n = q->next; |
| 713 ck_free(q->fname); |
| 714 ck_free(q->trace_mini); |
| 715 ck_free(q); |
| 716 q = n; |
| 717 |
| 718 } |
| 719 |
| 720 } |
| 721 |
| 722 |
| 723 /* Write bitmap to file. The bitmap is useful mostly for the secret |
| 724 -B option, to focus a separate fuzzing session on a particular |
| 725 interesting input without rediscovering all the others. */ |
| 726 |
| 727 EXP_ST void write_bitmap(void) { |
| 728 |
| 729 u8* fname; |
| 730 s32 fd; |
| 731 |
| 732 if (!bitmap_changed) return; |
| 733 bitmap_changed = 0; |
| 734 |
| 735 fname = alloc_printf("%s/fuzz_bitmap", out_dir); |
| 736 fd = open(fname, O_WRONLY | O_CREAT | O_TRUNC, 0600); |
| 737 |
| 738 if (fd < 0) PFATAL("Unable to open '%s'", fname); |
| 739 |
| 740 ck_write(fd, virgin_bits, MAP_SIZE, fname); |
| 741 |
| 742 close(fd); |
| 743 ck_free(fname); |
| 744 |
| 745 } |
| 746 |
| 747 |
| 748 /* Read bitmap from file. This is for the -B option again. */ |
| 749 |
| 750 EXP_ST void read_bitmap(u8* fname) { |
| 751 |
| 752 s32 fd = open(fname, O_RDONLY); |
| 753 |
| 754 if (fd < 0) PFATAL("Unable to open '%s'", fname); |
| 755 |
| 756 ck_read(fd, virgin_bits, MAP_SIZE, fname); |
| 757 |
| 758 close(fd); |
| 759 |
| 760 } |
| 761 |
| 762 |
| 763 /* Check if the current execution path brings anything new to the table. |
| 764 Update virgin bits to reflect the finds. Returns 1 if the only change is |
| 765 the hit-count for a particular tuple; 2 if there are new tuples seen. |
| 766 Updates the map, so subsequent calls will always return 0. |
| 767 |
| 768 This function is called after every exec() on a fairly large buffer, so |
| 769 it needs to be fast. We do this in 32-bit and 64-bit flavors. */ |
| 770 |
| 771 #define FFL(_b) (0xffULL << ((_b) << 3)) |
| 772 #define FF(_b) (0xff << ((_b) << 3)) |
| 773 |
| 774 static inline u8 has_new_bits(u8* virgin_map) { |
| 775 |
| 776 #ifdef __x86_64__ |
| 777 |
| 778 u64* current = (u64*)trace_bits; |
| 779 u64* virgin = (u64*)virgin_map; |
| 780 |
| 781 u32 i = (MAP_SIZE >> 3); |
| 782 |
| 783 #else |
| 784 |
| 785 u32* current = (u32*)trace_bits; |
| 786 u32* virgin = (u32*)virgin_map; |
| 787 |
| 788 u32 i = (MAP_SIZE >> 2); |
| 789 |
| 790 #endif /* ^__x86_64__ */ |
| 791 |
| 792 u8 ret = 0; |
| 793 |
| 794 while (i--) { |
| 795 |
| 796 #ifdef __x86_64__ |
| 797 |
| 798 u64 cur = *current; |
| 799 u64 vir = *virgin; |
| 800 |
| 801 #else |
| 802 |
| 803 u32 cur = *current; |
| 804 u32 vir = *virgin; |
| 805 |
| 806 #endif /* ^__x86_64__ */ |
| 807 |
| 808 /* Optimize for *current == ~*virgin, since this will almost always be the |
| 809 case. */ |
| 810 |
| 811 if (cur & vir) { |
| 812 |
| 813 if (ret < 2) { |
| 814 |
| 815 /* This trace did not have any new bytes yet; see if there's any |
| 816 current[] byte that is non-zero when virgin[] is 0xff. */ |
| 817 |
| 818 #ifdef __x86_64__ |
| 819 |
| 820 if (((cur & FFL(0)) && (vir & FFL(0)) == FFL(0)) || |
| 821 ((cur & FFL(1)) && (vir & FFL(1)) == FFL(1)) || |
| 822 ((cur & FFL(2)) && (vir & FFL(2)) == FFL(2)) || |
| 823 ((cur & FFL(3)) && (vir & FFL(3)) == FFL(3)) || |
| 824 ((cur & FFL(4)) && (vir & FFL(4)) == FFL(4)) || |
| 825 ((cur & FFL(5)) && (vir & FFL(5)) == FFL(5)) || |
| 826 ((cur & FFL(6)) && (vir & FFL(6)) == FFL(6)) || |
| 827 ((cur & FFL(7)) && (vir & FFL(7)) == FFL(7))) ret = 2; |
| 828 else ret = 1; |
| 829 |
| 830 #else |
| 831 |
| 832 if (((cur & FF(0)) && (vir & FF(0)) == FF(0)) || |
| 833 ((cur & FF(1)) && (vir & FF(1)) == FF(1)) || |
| 834 ((cur & FF(2)) && (vir & FF(2)) == FF(2)) || |
| 835 ((cur & FF(3)) && (vir & FF(3)) == FF(3))) ret = 2; |
| 836 else ret = 1; |
| 837 |
| 838 #endif /* ^__x86_64__ */ |
| 839 |
| 840 } |
| 841 |
| 842 *virgin = vir & ~cur; |
| 843 |
| 844 } |
| 845 |
| 846 current++; |
| 847 virgin++; |
| 848 |
| 849 } |
| 850 |
| 851 if (ret && virgin_map == virgin_bits) bitmap_changed = 1; |
| 852 |
| 853 return ret; |
| 854 |
| 855 } |
| 856 |
| 857 |
| 858 /* Count the number of bits set in the provided bitmap. Used for the status |
| 859 screen several times every second, does not have to be fast. */ |
| 860 |
| 861 static u32 count_bits(u8* mem) { |
| 862 |
| 863 u32* ptr = (u32*)mem; |
| 864 u32 i = (MAP_SIZE >> 2); |
| 865 u32 ret = 0; |
| 866 |
| 867 while (i--) { |
| 868 |
| 869 u32 v = *(ptr++); |
| 870 |
| 871 /* This gets called on the inverse, virgin bitmap; optimize for sparse |
| 872 data. */ |
| 873 |
| 874 if (v == 0xffffffff) { |
| 875 ret += 32; |
| 876 continue; |
| 877 } |
| 878 |
| 879 v -= ((v >> 1) & 0x55555555); |
| 880 v = (v & 0x33333333) + ((v >> 2) & 0x33333333); |
| 881 ret += (((v + (v >> 4)) & 0xF0F0F0F) * 0x01010101) >> 24; |
| 882 |
| 883 } |
| 884 |
| 885 return ret; |
| 886 |
| 887 } |
| 888 |
| 889 |
| 890 /* Count the number of bytes set in the bitmap. Called fairly sporadically, |
| 891 mostly to update the status screen or calibrate and examine confirmed |
| 892 new paths. */ |
| 893 |
| 894 static u32 count_bytes(u8* mem) { |
| 895 |
| 896 u32* ptr = (u32*)mem; |
| 897 u32 i = (MAP_SIZE >> 2); |
| 898 u32 ret = 0; |
| 899 |
| 900 while (i--) { |
| 901 |
| 902 u32 v = *(ptr++); |
| 903 |
| 904 if (!v) continue; |
| 905 if (v & FF(0)) ret++; |
| 906 if (v & FF(1)) ret++; |
| 907 if (v & FF(2)) ret++; |
| 908 if (v & FF(3)) ret++; |
| 909 |
| 910 } |
| 911 |
| 912 return ret; |
| 913 |
| 914 } |
| 915 |
| 916 |
| 917 /* Count the number of non-255 bytes set in the bitmap. Used strictly for the |
| 918 status screen, several calls per second or so. */ |
| 919 |
| 920 static u32 count_non_255_bytes(u8* mem) { |
| 921 |
| 922 u32* ptr = (u32*)mem; |
| 923 u32 i = (MAP_SIZE >> 2); |
| 924 u32 ret = 0; |
| 925 |
| 926 while (i--) { |
| 927 |
| 928 u32 v = *(ptr++); |
| 929 |
| 930 /* This is called on the virgin bitmap, so optimize for the most likely |
| 931 case. */ |
| 932 |
| 933 if (v == 0xffffffff) continue; |
| 934 if ((v & FF(0)) != FF(0)) ret++; |
| 935 if ((v & FF(1)) != FF(1)) ret++; |
| 936 if ((v & FF(2)) != FF(2)) ret++; |
| 937 if ((v & FF(3)) != FF(3)) ret++; |
| 938 |
| 939 } |
| 940 |
| 941 return ret; |
| 942 |
| 943 } |
| 944 |
| 945 |
| 946 /* Destructively simplify trace by eliminating hit count information |
| 947 and replacing it with 0x80 or 0x01 depending on whether the tuple |
| 948 is hit or not. Called on every new crash or hang, should be |
| 949 reasonably fast. */ |
| 950 |
| 951 #define AREP4(_sym) (_sym), (_sym), (_sym), (_sym) |
| 952 #define AREP8(_sym) AREP4(_sym), AREP4(_sym) |
| 953 #define AREP16(_sym) AREP8(_sym), AREP8(_sym) |
| 954 #define AREP32(_sym) AREP16(_sym), AREP16(_sym) |
| 955 #define AREP64(_sym) AREP32(_sym), AREP32(_sym) |
| 956 #define AREP128(_sym) AREP64(_sym), AREP64(_sym) |
| 957 |
| 958 static u8 simplify_lookup[256] = { |
| 959 /* 4 */ 1, 128, 128, 128, |
| 960 /* +4 */ AREP4(128), |
| 961 /* +8 */ AREP8(128), |
| 962 /* +16 */ AREP16(128), |
| 963 /* +32 */ AREP32(128), |
| 964 /* +64 */ AREP64(128), |
| 965 /* +128 */ AREP128(128) |
| 966 }; |
| 967 |
| 968 #ifdef __x86_64__ |
| 969 |
| 970 static void simplify_trace(u64* mem) { |
| 971 |
| 972 u32 i = MAP_SIZE >> 3; |
| 973 |
| 974 while (i--) { |
| 975 |
| 976 /* Optimize for sparse bitmaps. */ |
| 977 |
| 978 if (*mem) { |
| 979 |
| 980 u8* mem8 = (u8*)mem; |
| 981 |
| 982 mem8[0] = simplify_lookup[mem8[0]]; |
| 983 mem8[1] = simplify_lookup[mem8[1]]; |
| 984 mem8[2] = simplify_lookup[mem8[2]]; |
| 985 mem8[3] = simplify_lookup[mem8[3]]; |
| 986 mem8[4] = simplify_lookup[mem8[4]]; |
| 987 mem8[5] = simplify_lookup[mem8[5]]; |
| 988 mem8[6] = simplify_lookup[mem8[6]]; |
| 989 mem8[7] = simplify_lookup[mem8[7]]; |
| 990 |
| 991 } else *mem = 0x0101010101010101ULL; |
| 992 |
| 993 mem++; |
| 994 |
| 995 } |
| 996 |
| 997 } |
| 998 |
| 999 #else |
| 1000 |
| 1001 static void simplify_trace(u32* mem) { |
| 1002 |
| 1003 u32 i = MAP_SIZE >> 2; |
| 1004 |
| 1005 while (i--) { |
| 1006 |
| 1007 /* Optimize for sparse bitmaps. */ |
| 1008 |
| 1009 if (*mem) { |
| 1010 |
| 1011 u8* mem8 = (u8*)mem; |
| 1012 |
| 1013 mem8[0] = simplify_lookup[mem8[0]]; |
| 1014 mem8[1] = simplify_lookup[mem8[1]]; |
| 1015 mem8[2] = simplify_lookup[mem8[2]]; |
| 1016 mem8[3] = simplify_lookup[mem8[3]]; |
| 1017 |
| 1018 } else *mem = 0x01010101; |
| 1019 |
| 1020 mem++; |
| 1021 } |
| 1022 |
| 1023 } |
| 1024 |
| 1025 #endif /* ^__x86_64__ */ |
| 1026 |
| 1027 |
| 1028 /* Destructively classify execution counts in a trace. This is used as a |
| 1029 preprocessing step for any newly acquired traces. Called on every exec, |
| 1030 must be fast. */ |
| 1031 |
| 1032 static u8 count_class_lookup[256] = { |
| 1033 |
| 1034 /* 0 - 3: 4 */ 0, 1, 2, 4, |
| 1035 /* 4 - 7: +4 */ AREP4(8), |
| 1036 /* 8 - 15: +8 */ AREP8(16), |
| 1037 /* 16 - 31: +16 */ AREP16(32), |
| 1038 /* 32 - 127: +96 */ AREP64(64), AREP32(64), |
| 1039 /* 128+: +128 */ AREP128(128) |
| 1040 |
| 1041 }; |
| 1042 |
| 1043 #ifdef __x86_64__ |
| 1044 |
| 1045 static inline void classify_counts(u64* mem) { |
| 1046 |
| 1047 u32 i = MAP_SIZE >> 3; |
| 1048 |
| 1049 while (i--) { |
| 1050 |
| 1051 /* Optimize for sparse bitmaps. */ |
| 1052 |
| 1053 if (*mem) { |
| 1054 |
| 1055 u8* mem8 = (u8*)mem; |
| 1056 |
| 1057 mem8[0] = count_class_lookup[mem8[0]]; |
| 1058 mem8[1] = count_class_lookup[mem8[1]]; |
| 1059 mem8[2] = count_class_lookup[mem8[2]]; |
| 1060 mem8[3] = count_class_lookup[mem8[3]]; |
| 1061 mem8[4] = count_class_lookup[mem8[4]]; |
| 1062 mem8[5] = count_class_lookup[mem8[5]]; |
| 1063 mem8[6] = count_class_lookup[mem8[6]]; |
| 1064 mem8[7] = count_class_lookup[mem8[7]]; |
| 1065 |
| 1066 } |
| 1067 |
| 1068 mem++; |
| 1069 |
| 1070 } |
| 1071 |
| 1072 } |
| 1073 |
| 1074 #else |
| 1075 |
| 1076 static inline void classify_counts(u32* mem) { |
| 1077 |
| 1078 u32 i = MAP_SIZE >> 2; |
| 1079 |
| 1080 while (i--) { |
| 1081 |
| 1082 /* Optimize for sparse bitmaps. */ |
| 1083 |
| 1084 if (*mem) { |
| 1085 |
| 1086 u8* mem8 = (u8*)mem; |
| 1087 |
| 1088 mem8[0] = count_class_lookup[mem8[0]]; |
| 1089 mem8[1] = count_class_lookup[mem8[1]]; |
| 1090 mem8[2] = count_class_lookup[mem8[2]]; |
| 1091 mem8[3] = count_class_lookup[mem8[3]]; |
| 1092 |
| 1093 } |
| 1094 |
| 1095 mem++; |
| 1096 |
| 1097 } |
| 1098 |
| 1099 } |
| 1100 |
| 1101 #endif /* ^__x86_64__ */ |
| 1102 |
| 1103 |
| 1104 /* Get rid of shared memory (atexit handler). */ |
| 1105 |
| 1106 static void remove_shm(void) { |
| 1107 |
| 1108 shmctl(shm_id, IPC_RMID, NULL); |
| 1109 |
| 1110 } |
| 1111 |
| 1112 |
| 1113 /* Compact trace bytes into a smaller bitmap. We effectively just drop the |
| 1114 count information here. This is called only sporadically, for some |
| 1115 new paths. */ |
| 1116 |
| 1117 static void minimize_bits(u8* dst, u8* src) { |
| 1118 |
| 1119 u32 i = 0; |
| 1120 |
| 1121 while (i < MAP_SIZE) { |
| 1122 |
| 1123 if (*(src++)) dst[i >> 3] |= 1 << (i & 7); |
| 1124 i++; |
| 1125 |
| 1126 } |
| 1127 |
| 1128 } |
| 1129 |
| 1130 |
| 1131 /* When we bump into a new path, we call this to see if the path appears |
| 1132 more "favorable" than any of the existing ones. The purpose of the |
| 1133 "favorables" is to have a minimal set of paths that trigger all the bits |
| 1134 seen in the bitmap so far, and focus on fuzzing them at the expense of |
| 1135 the rest. |
| 1136 |
| 1137 The first step of the process is to maintain a list of top_rated[] entries |
| 1138 for every byte in the bitmap. We win that slot if there is no previous |
| 1139 contender, or if the contender has a more favorable speed x size factor. */ |
| 1140 |
| 1141 static void update_bitmap_score(struct queue_entry* q) { |
| 1142 |
| 1143 u32 i; |
| 1144 u64 fav_factor = q->exec_us * q->len; |
| 1145 |
| 1146 /* For every byte set in trace_bits[], see if there is a previous winner, |
| 1147 and how it compares to us. */ |
| 1148 |
| 1149 for (i = 0; i < MAP_SIZE; i++) |
| 1150 |
| 1151 if (trace_bits[i]) { |
| 1152 |
| 1153 if (top_rated[i]) { |
| 1154 |
| 1155 /* Faster-executing or smaller test cases are favored. */ |
| 1156 |
| 1157 if (fav_factor > top_rated[i]->exec_us * top_rated[i]->len) continue; |
| 1158 |
| 1159 /* Looks like we're going to win. Decrease ref count for the |
| 1160 previous winner, discard its trace_bits[] if necessary. */ |
| 1161 |
| 1162 if (!--top_rated[i]->tc_ref) { |
| 1163 ck_free(top_rated[i]->trace_mini); |
| 1164 top_rated[i]->trace_mini = 0; |
| 1165 } |
| 1166 |
| 1167 } |
| 1168 |
| 1169 /* Insert ourselves as the new winner. */ |
| 1170 |
| 1171 top_rated[i] = q; |
| 1172 q->tc_ref++; |
| 1173 |
| 1174 if (!q->trace_mini) { |
| 1175 q->trace_mini = ck_alloc(MAP_SIZE >> 3); |
| 1176 minimize_bits(q->trace_mini, trace_bits); |
| 1177 } |
| 1178 |
| 1179 score_changed = 1; |
| 1180 |
| 1181 } |
| 1182 |
| 1183 } |
| 1184 |
| 1185 |
| 1186 /* The second part of the mechanism discussed above is a routine that |
| 1187 goes over top_rated[] entries, and then sequentially grabs winners for |
| 1188 previously-unseen bytes (temp_v) and marks them as favored, at least |
| 1189 until the next run. The favored entries are given more air time during |
| 1190 all fuzzing steps. */ |
| 1191 |
| 1192 static void cull_queue(void) { |
| 1193 |
| 1194 struct queue_entry* q; |
| 1195 static u8 temp_v[MAP_SIZE >> 3]; |
| 1196 u32 i; |
| 1197 |
| 1198 if (dumb_mode || !score_changed) return; |
| 1199 |
| 1200 score_changed = 0; |
| 1201 |
| 1202 memset(temp_v, 255, MAP_SIZE >> 3); |
| 1203 |
| 1204 queued_favored = 0; |
| 1205 pending_favored = 0; |
| 1206 |
| 1207 q = queue; |
| 1208 |
| 1209 while (q) { |
| 1210 q->favored = 0; |
| 1211 q = q->next; |
| 1212 } |
| 1213 |
| 1214 /* Let's see if anything in the bitmap isn't captured in temp_v. |
| 1215 If yes, and if it has a top_rated[] contender, let's use it. */ |
| 1216 |
| 1217 for (i = 0; i < MAP_SIZE; i++) |
| 1218 if (top_rated[i] && (temp_v[i >> 3] & (1 << (i & 7)))) { |
| 1219 |
| 1220 u32 j = MAP_SIZE >> 3; |
| 1221 |
| 1222 /* Remove all bits belonging to the current entry from temp_v. */ |
| 1223 |
| 1224 while (j--) |
| 1225 if (top_rated[i]->trace_mini[j]) |
| 1226 temp_v[j] &= ~top_rated[i]->trace_mini[j]; |
| 1227 |
| 1228 top_rated[i]->favored = 1; |
| 1229 queued_favored++; |
| 1230 |
| 1231 if (!top_rated[i]->was_fuzzed) pending_favored++; |
| 1232 |
| 1233 } |
| 1234 |
| 1235 q = queue; |
| 1236 |
| 1237 while (q) { |
| 1238 mark_as_redundant(q, !q->favored); |
| 1239 q = q->next; |
| 1240 } |
| 1241 |
| 1242 } |
| 1243 |
| 1244 |
| 1245 /* Configure shared memory and virgin_bits. This is called at startup. */ |
| 1246 |
| 1247 EXP_ST void setup_shm(void) { |
| 1248 |
| 1249 u8* shm_str; |
| 1250 |
| 1251 if (!in_bitmap) memset(virgin_bits, 255, MAP_SIZE); |
| 1252 |
| 1253 memset(virgin_hang, 255, MAP_SIZE); |
| 1254 memset(virgin_crash, 255, MAP_SIZE); |
| 1255 |
| 1256 shm_id = shmget(IPC_PRIVATE, MAP_SIZE, IPC_CREAT | IPC_EXCL | 0600); |
| 1257 |
| 1258 if (shm_id < 0) PFATAL("shmget() failed"); |
| 1259 |
| 1260 atexit(remove_shm); |
| 1261 |
| 1262 shm_str = alloc_printf("%d", shm_id); |
| 1263 |
| 1264 /* If somebody is asking us to fuzz instrumented binaries in dumb mode, |
| 1265 we don't want them to detect instrumentation, since we won't be sending |
| 1266 fork server commands. This should be replaced with better auto-detection |
| 1267 later on, perhaps? */ |
| 1268 |
| 1269 if (!dumb_mode) setenv(SHM_ENV_VAR, shm_str, 1); |
| 1270 |
| 1271 ck_free(shm_str); |
| 1272 |
| 1273 trace_bits = shmat(shm_id, NULL, 0); |
| 1274 |
| 1275 if (!trace_bits) PFATAL("shmat() failed"); |
| 1276 |
| 1277 } |
| 1278 |
| 1279 |
| 1280 /* Load postprocessor, if available. */ |
| 1281 |
| 1282 static void setup_post(void) { |
| 1283 |
| 1284 void* dh; |
| 1285 u8* fn = getenv("AFL_POST_LIBRARY"); |
| 1286 u32 tlen = 6; |
| 1287 |
| 1288 if (!fn) return; |
| 1289 |
| 1290 ACTF("Loading postprocessor from '%s'...", fn); |
| 1291 |
| 1292 dh = dlopen(fn, RTLD_NOW); |
| 1293 if (!dh) FATAL("%s", dlerror()); |
| 1294 |
| 1295 post_handler = dlsym(dh, "afl_postprocess"); |
| 1296 if (!post_handler) FATAL("Symbol 'afl_postprocess' not found."); |
| 1297 |
| 1298 /* Do a quick test. It's better to segfault now than later =) */ |
| 1299 |
| 1300 post_handler("hello", &tlen); |
| 1301 |
| 1302 OKF("Postprocessor installed successfully."); |
| 1303 |
| 1304 } |
| 1305 |
| 1306 |
| 1307 /* Read all testcases from the input directory, then queue them for testing. |
| 1308 Called at startup. */ |
| 1309 |
| 1310 static void read_testcases(void) { |
| 1311 |
| 1312 struct dirent **nl; |
| 1313 s32 nl_cnt; |
| 1314 u32 i; |
| 1315 u8* fn; |
| 1316 |
| 1317 /* Auto-detect non-in-place resumption attempts. */ |
| 1318 |
| 1319 fn = alloc_printf("%s/queue", in_dir); |
| 1320 if (!access(fn, F_OK)) in_dir = fn; else ck_free(fn); |
| 1321 |
| 1322 ACTF("Scanning '%s'...", in_dir); |
| 1323 |
| 1324 /* We use scandir() + alphasort() rather than readdir() because otherwise, |
| 1325 the ordering of test cases would vary somewhat randomly and would be |
| 1326 difficult to control. */ |
| 1327 |
| 1328 nl_cnt = scandir(in_dir, &nl, NULL, alphasort); |
| 1329 |
| 1330 if (nl_cnt < 0) { |
| 1331 |
| 1332 if (errno == ENOENT || errno == ENOTDIR) |
| 1333 |
| 1334 SAYF("\n" cLRD "[-] " cRST |
| 1335 "The input directory does not seem to be valid - try again. The fuzze
r needs\n" |
| 1336 " one or more test case to start with - ideally, a small file unde
r 1 kB\n" |
| 1337 " or so. The cases must be stored as regular files directly in the
input\n" |
| 1338 " directory.\n"); |
| 1339 |
| 1340 PFATAL("Unable to open '%s'", in_dir); |
| 1341 |
| 1342 } |
| 1343 |
| 1344 if (shuffle_queue && nl_cnt > 1) { |
| 1345 |
| 1346 ACTF("Shuffling queue..."); |
| 1347 shuffle_ptrs((void**)nl, nl_cnt); |
| 1348 |
| 1349 } |
| 1350 |
| 1351 for (i = 0; i < nl_cnt; i++) { |
| 1352 |
| 1353 struct stat st; |
| 1354 |
| 1355 u8* fn = alloc_printf("%s/%s", in_dir, nl[i]->d_name); |
| 1356 u8* dfn = alloc_printf("%s/.state/deterministic_done/%s", in_dir, nl[i]->d_n
ame); |
| 1357 |
| 1358 u8 passed_det = 0; |
| 1359 |
| 1360 free(nl[i]); /* not tracked */ |
| 1361 |
| 1362 if (lstat(fn, &st) || access(fn, R_OK)) |
| 1363 PFATAL("Unable to access '%s'", fn); |
| 1364 |
| 1365 /* This also takes care of . and .. */ |
| 1366 |
| 1367 if (!S_ISREG(st.st_mode) || !st.st_size || strstr(fn, "/README.txt")) { |
| 1368 |
| 1369 ck_free(fn); |
| 1370 ck_free(dfn); |
| 1371 continue; |
| 1372 |
| 1373 } |
| 1374 |
| 1375 if (st.st_size > MAX_FILE) |
| 1376 FATAL("Test case '%s' is too big (%s, limit is %s)", fn, |
| 1377 DMS(st.st_size), DMS(MAX_FILE)); |
| 1378 |
| 1379 /* Check for metadata that indicates that deterministic fuzzing |
| 1380 is complete for this entry. We don't want to repeat deterministic |
| 1381 fuzzing when resuming aborted scans, because it would be pointless |
| 1382 and probably very time-consuming. */ |
| 1383 |
| 1384 if (!access(dfn, F_OK)) passed_det = 1; |
| 1385 ck_free(dfn); |
| 1386 |
| 1387 add_to_queue(fn, st.st_size, passed_det); |
| 1388 |
| 1389 } |
| 1390 |
| 1391 free(nl); /* not tracked */ |
| 1392 |
| 1393 if (!queued_paths) { |
| 1394 |
| 1395 SAYF("\n" cLRD "[-] " cRST |
| 1396 "Looks like there are no valid test cases in the input directory! The f
uzzer\n" |
| 1397 " needs one or more test case to start with - ideally, a small file
under\n" |
| 1398 " 1 kB or so. The cases must be stored as regular files directly in
the\n" |
| 1399 " input directory.\n"); |
| 1400 |
| 1401 FATAL("No usable test cases in '%s'", in_dir); |
| 1402 |
| 1403 } |
| 1404 |
| 1405 last_path_time = 0; |
| 1406 queued_at_start = queued_paths; |
| 1407 |
| 1408 } |
| 1409 |
| 1410 |
| 1411 /* Helper function for load_extras. */ |
| 1412 |
| 1413 static int compare_extras_len(const void* p1, const void* p2) { |
| 1414 struct extra_data *e1 = (struct extra_data*)p1, |
| 1415 *e2 = (struct extra_data*)p2; |
| 1416 |
| 1417 return e1->len - e2->len; |
| 1418 } |
| 1419 |
| 1420 static int compare_extras_use_d(const void* p1, const void* p2) { |
| 1421 struct extra_data *e1 = (struct extra_data*)p1, |
| 1422 *e2 = (struct extra_data*)p2; |
| 1423 |
| 1424 return e2->hit_cnt - e1->hit_cnt; |
| 1425 } |
| 1426 |
| 1427 |
| 1428 /* Read extras from a file, sort by size. */ |
| 1429 |
| 1430 static void load_extras_file(u8* fname, u32* min_len, u32* max_len, |
| 1431 u32 dict_level) { |
| 1432 |
| 1433 FILE* f; |
| 1434 u8 buf[MAX_LINE]; |
| 1435 u8 *lptr; |
| 1436 u32 cur_line = 0; |
| 1437 |
| 1438 f = fopen(fname, "r"); |
| 1439 |
| 1440 if (!f) PFATAL("Unable to open '%s'", fname); |
| 1441 |
| 1442 while ((lptr = fgets(buf, MAX_LINE, f))) { |
| 1443 |
| 1444 u8 *rptr, *wptr; |
| 1445 u32 klen = 0; |
| 1446 |
| 1447 cur_line++; |
| 1448 |
| 1449 /* Trim on left and right. */ |
| 1450 |
| 1451 while (isspace(*lptr)) lptr++; |
| 1452 |
| 1453 rptr = lptr + strlen(lptr) - 1; |
| 1454 while (rptr >= lptr && isspace(*rptr)) rptr--; |
| 1455 rptr++; |
| 1456 *rptr = 0; |
| 1457 |
| 1458 /* Skip empty lines and comments. */ |
| 1459 |
| 1460 if (!*lptr || *lptr == '#') continue; |
| 1461 |
| 1462 /* All other lines must end with '"', which we can consume. */ |
| 1463 |
| 1464 rptr--; |
| 1465 |
| 1466 if (rptr < lptr || *rptr != '"') |
| 1467 FATAL("Malformed name=\"value\" pair in line %u.", cur_line); |
| 1468 |
| 1469 *rptr = 0; |
| 1470 |
| 1471 /* Skip alphanumerics and dashes (label). */ |
| 1472 |
| 1473 while (isalnum(*lptr) || *lptr == '_') lptr++; |
| 1474 |
| 1475 /* If @number follows, parse that. */ |
| 1476 |
| 1477 if (*lptr == '@') { |
| 1478 |
| 1479 lptr++; |
| 1480 if (atoi(lptr) > dict_level) continue; |
| 1481 while (isdigit(*lptr)) lptr++; |
| 1482 |
| 1483 } |
| 1484 |
| 1485 /* Skip whitespace and = signs. */ |
| 1486 |
| 1487 while (isspace(*lptr) || *lptr == '=') lptr++; |
| 1488 |
| 1489 /* Consume opening '"'. */ |
| 1490 |
| 1491 if (*lptr != '"') |
| 1492 FATAL("Malformed name=\"keyword\" pair in line %u.", cur_line); |
| 1493 |
| 1494 lptr++; |
| 1495 |
| 1496 if (!*lptr) FATAL("Empty keyword in line %u.", cur_line); |
| 1497 |
| 1498 /* Okay, let's allocate memory and copy data between "...", handling |
| 1499 \xNN escaping, \\, and \". */ |
| 1500 |
| 1501 extras = ck_realloc_block(extras, (extras_cnt + 1) * |
| 1502 sizeof(struct extra_data)); |
| 1503 |
| 1504 wptr = extras[extras_cnt].data = ck_alloc(rptr - lptr); |
| 1505 |
| 1506 while (*lptr) { |
| 1507 |
| 1508 char* hexdigits = "0123456789abcdef"; |
| 1509 |
| 1510 switch (*lptr) { |
| 1511 |
| 1512 case 1 ... 31: |
| 1513 case 128 ... 255: |
| 1514 FATAL("Non-printable characters in line %u.", cur_line); |
| 1515 |
| 1516 case '\\': |
| 1517 |
| 1518 lptr++; |
| 1519 |
| 1520 if (*lptr == '\\' || *lptr == '"') { |
| 1521 *(wptr++) = *(lptr++); |
| 1522 klen++; |
| 1523 break; |
| 1524 } |
| 1525 |
| 1526 if (*lptr != 'x' || !isxdigit(lptr[1]) || !isxdigit(lptr[2])) |
| 1527 FATAL("Invalid escaping (not \\xNN) in line %u.", cur_line); |
| 1528 |
| 1529 *(wptr++) = |
| 1530 ((strchr(hexdigits, tolower(lptr[1])) - hexdigits) << 4) | |
| 1531 (strchr(hexdigits, tolower(lptr[2])) - hexdigits); |
| 1532 |
| 1533 lptr += 3; |
| 1534 klen++; |
| 1535 |
| 1536 break; |
| 1537 |
| 1538 default: |
| 1539 |
| 1540 *(wptr++) = *(lptr++); |
| 1541 klen++; |
| 1542 |
| 1543 } |
| 1544 |
| 1545 } |
| 1546 |
| 1547 extras[extras_cnt].len = klen; |
| 1548 |
| 1549 if (extras[extras_cnt].len > MAX_DICT_FILE) |
| 1550 FATAL("Keyword too big in line %u (%s, limit is %s)", cur_line, |
| 1551 DMS(klen), DMS(MAX_DICT_FILE)); |
| 1552 |
| 1553 if (*min_len > klen) *min_len = klen; |
| 1554 if (*max_len < klen) *max_len = klen; |
| 1555 |
| 1556 extras_cnt++; |
| 1557 |
| 1558 } |
| 1559 |
| 1560 fclose(f); |
| 1561 |
| 1562 } |
| 1563 |
| 1564 |
| 1565 /* Read extras from the extras directory and sort them by size. */ |
| 1566 |
| 1567 static void load_extras(u8* dir) { |
| 1568 |
| 1569 DIR* d; |
| 1570 struct dirent* de; |
| 1571 u32 min_len = MAX_DICT_FILE, max_len = 0, dict_level = 0; |
| 1572 u8* x; |
| 1573 |
| 1574 /* If the name ends with @, extract level and continue. */ |
| 1575 |
| 1576 if ((x = strchr(dir, '@'))) { |
| 1577 |
| 1578 *x = 0; |
| 1579 dict_level = atoi(x + 1); |
| 1580 |
| 1581 } |
| 1582 |
| 1583 ACTF("Loading extra dictionary from '%s' (level %u)...", dir, dict_level); |
| 1584 |
| 1585 d = opendir(dir); |
| 1586 |
| 1587 if (!d) { |
| 1588 |
| 1589 if (errno == ENOTDIR) { |
| 1590 load_extras_file(dir, &min_len, &max_len, dict_level); |
| 1591 goto check_and_sort; |
| 1592 } |
| 1593 |
| 1594 PFATAL("Unable to open '%s'", dir); |
| 1595 |
| 1596 } |
| 1597 |
| 1598 if (x) FATAL("Dictionary levels not supported for directories."); |
| 1599 |
| 1600 while ((de = readdir(d))) { |
| 1601 |
| 1602 struct stat st; |
| 1603 u8* fn = alloc_printf("%s/%s", dir, de->d_name); |
| 1604 s32 fd; |
| 1605 |
| 1606 if (lstat(fn, &st) || access(fn, R_OK)) |
| 1607 PFATAL("Unable to access '%s'", fn); |
| 1608 |
| 1609 /* This also takes care of . and .. */ |
| 1610 if (!S_ISREG(st.st_mode) || !st.st_size) { |
| 1611 |
| 1612 ck_free(fn); |
| 1613 continue; |
| 1614 |
| 1615 } |
| 1616 |
| 1617 if (st.st_size > MAX_DICT_FILE) |
| 1618 FATAL("Extra '%s' is too big (%s, limit is %s)", fn, |
| 1619 DMS(st.st_size), DMS(MAX_DICT_FILE)); |
| 1620 |
| 1621 if (min_len > st.st_size) min_len = st.st_size; |
| 1622 if (max_len < st.st_size) max_len = st.st_size; |
| 1623 |
| 1624 extras = ck_realloc_block(extras, (extras_cnt + 1) * |
| 1625 sizeof(struct extra_data)); |
| 1626 |
| 1627 extras[extras_cnt].data = ck_alloc(st.st_size); |
| 1628 extras[extras_cnt].len = st.st_size; |
| 1629 |
| 1630 fd = open(fn, O_RDONLY); |
| 1631 |
| 1632 if (fd < 0) PFATAL("Unable to open '%s'", fn); |
| 1633 |
| 1634 ck_read(fd, extras[extras_cnt].data, st.st_size, fn); |
| 1635 |
| 1636 close(fd); |
| 1637 ck_free(fn); |
| 1638 |
| 1639 extras_cnt++; |
| 1640 |
| 1641 } |
| 1642 |
| 1643 closedir(d); |
| 1644 |
| 1645 check_and_sort: |
| 1646 |
| 1647 if (!extras_cnt) FATAL("No usable files in '%s'", dir); |
| 1648 |
| 1649 qsort(extras, extras_cnt, sizeof(struct extra_data), compare_extras_len); |
| 1650 |
| 1651 OKF("Loaded %u extra tokens, size range %s to %s.", extras_cnt, |
| 1652 DMS(min_len), DMS(max_len)); |
| 1653 |
| 1654 if (max_len > 32) |
| 1655 WARNF("Some tokens are relatively large (%s) - consider trimming.", |
| 1656 DMS(max_len)); |
| 1657 |
| 1658 if (extras_cnt > MAX_DET_EXTRAS) |
| 1659 WARNF("More than %u tokens - will use them probabilistically.", |
| 1660 MAX_DET_EXTRAS); |
| 1661 |
| 1662 } |
| 1663 |
| 1664 |
| 1665 |
| 1666 |
| 1667 /* Helper function for maybe_add_auto() */ |
| 1668 |
| 1669 static inline u8 memcmp_nocase(u8* m1, u8* m2, u32 len) { |
| 1670 |
| 1671 while (len--) if (tolower(*(m1++)) ^ tolower(*(m2++))) return 1; |
| 1672 return 0; |
| 1673 |
| 1674 } |
| 1675 |
| 1676 |
| 1677 /* Maybe add automatic extra. */ |
| 1678 |
| 1679 static void maybe_add_auto(u8* mem, u32 len) { |
| 1680 |
| 1681 u32 i; |
| 1682 |
| 1683 /* Allow users to specify that they don't want auto dictionaries. */ |
| 1684 |
| 1685 if (!MAX_AUTO_EXTRAS || !USE_AUTO_EXTRAS) return; |
| 1686 |
| 1687 /* Skip runs of identical bytes. */ |
| 1688 |
| 1689 for (i = 1; i < len; i++) |
| 1690 if (mem[0] ^ mem[i]) break; |
| 1691 |
| 1692 if (i == len) return; |
| 1693 |
| 1694 /* Reject builtin interesting values. */ |
| 1695 |
| 1696 if (len == 2) { |
| 1697 |
| 1698 i = sizeof(interesting_16) >> 1; |
| 1699 |
| 1700 while (i--) |
| 1701 if (*((u16*)mem) == interesting_16[i] || |
| 1702 *((u16*)mem) == SWAP16(interesting_16[i])) return; |
| 1703 |
| 1704 } |
| 1705 |
| 1706 if (len == 4) { |
| 1707 |
| 1708 i = sizeof(interesting_32) >> 2; |
| 1709 |
| 1710 while (i--) |
| 1711 if (*((u32*)mem) == interesting_32[i] || |
| 1712 *((u32*)mem) == SWAP32(interesting_32[i])) return; |
| 1713 |
| 1714 } |
| 1715 |
| 1716 /* Reject anything that matches existing extras. Do a case-insensitive |
| 1717 match. We optimize by exploiting the fact that extras[] are sorted |
| 1718 by size. */ |
| 1719 |
| 1720 for (i = 0; i < extras_cnt; i++) |
| 1721 if (extras[i].len >= len) break; |
| 1722 |
| 1723 for (; i < extras_cnt && extras[i].len == len; i++) |
| 1724 if (!memcmp_nocase(extras[i].data, mem, len)) return; |
| 1725 |
| 1726 /* Last but not least, check a_extras[] for matches. There are no |
| 1727 guarantees of a particular sort order. */ |
| 1728 |
| 1729 auto_changed = 1; |
| 1730 |
| 1731 for (i = 0; i < a_extras_cnt; i++) { |
| 1732 |
| 1733 if (a_extras[i].len == len && !memcmp_nocase(a_extras[i].data, mem, len)) { |
| 1734 |
| 1735 a_extras[i].hit_cnt++; |
| 1736 goto sort_a_extras; |
| 1737 |
| 1738 } |
| 1739 |
| 1740 } |
| 1741 |
| 1742 /* At this point, looks like we're dealing with a new entry. So, let's |
| 1743 append it if we have room. Otherwise, let's randomly evict some other |
| 1744 entry from the bottom half of the list. */ |
| 1745 |
| 1746 if (a_extras_cnt < MAX_AUTO_EXTRAS) { |
| 1747 |
| 1748 a_extras = ck_realloc_block(a_extras, (a_extras_cnt + 1) * |
| 1749 sizeof(struct extra_data)); |
| 1750 |
| 1751 a_extras[a_extras_cnt].data = ck_memdup(mem, len); |
| 1752 a_extras[a_extras_cnt].len = len; |
| 1753 a_extras_cnt++; |
| 1754 |
| 1755 } else { |
| 1756 |
| 1757 i = MAX_AUTO_EXTRAS / 2 + |
| 1758 UR((MAX_AUTO_EXTRAS + 1) / 2); |
| 1759 |
| 1760 ck_free(a_extras[i].data); |
| 1761 |
| 1762 a_extras[i].data = ck_memdup(mem, len); |
| 1763 a_extras[i].len = len; |
| 1764 a_extras[i].hit_cnt = 0; |
| 1765 |
| 1766 } |
| 1767 |
| 1768 sort_a_extras: |
| 1769 |
| 1770 /* First, sort all auto extras by use count, descending order. */ |
| 1771 |
| 1772 qsort(a_extras, a_extras_cnt, sizeof(struct extra_data), |
| 1773 compare_extras_use_d); |
| 1774 |
| 1775 /* Then, sort the top USE_AUTO_EXTRAS entries by size. */ |
| 1776 |
| 1777 qsort(a_extras, MIN(USE_AUTO_EXTRAS, a_extras_cnt), |
| 1778 sizeof(struct extra_data), compare_extras_len); |
| 1779 |
| 1780 } |
| 1781 |
| 1782 |
| 1783 /* Save automatically generated extras. */ |
| 1784 |
| 1785 static void save_auto(void) { |
| 1786 |
| 1787 u32 i; |
| 1788 |
| 1789 if (!auto_changed) return; |
| 1790 auto_changed = 0; |
| 1791 |
| 1792 for (i = 0; i < MIN(USE_AUTO_EXTRAS, a_extras_cnt); i++) { |
| 1793 |
| 1794 u8* fn = alloc_printf("%s/queue/.state/auto_extras/auto_%06u", out_dir, i); |
| 1795 s32 fd; |
| 1796 |
| 1797 fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); |
| 1798 |
| 1799 if (fd < 0) PFATAL("Unable to create '%s'", fn); |
| 1800 |
| 1801 ck_write(fd, a_extras[i].data, a_extras[i].len, fn); |
| 1802 |
| 1803 close(fd); |
| 1804 ck_free(fn); |
| 1805 |
| 1806 } |
| 1807 |
| 1808 } |
| 1809 |
| 1810 |
| 1811 /* Load automatically generated extras. */ |
| 1812 |
| 1813 static void load_auto(void) { |
| 1814 |
| 1815 u32 i; |
| 1816 |
| 1817 for (i = 0; i < USE_AUTO_EXTRAS; i++) { |
| 1818 |
| 1819 u8 tmp[MAX_AUTO_EXTRA + 1]; |
| 1820 u8* fn = alloc_printf("%s/.state/auto_extras/auto_%06u", in_dir, i); |
| 1821 s32 fd, len; |
| 1822 |
| 1823 fd = open(fn, O_RDONLY, 0600); |
| 1824 |
| 1825 if (fd < 0) { |
| 1826 |
| 1827 if (errno != ENOENT) PFATAL("Unable to open '%s'", fn); |
| 1828 ck_free(fn); |
| 1829 break; |
| 1830 |
| 1831 } |
| 1832 |
| 1833 /* We read one byte more to cheaply detect tokens that are too |
| 1834 long (and skip them). */ |
| 1835 |
| 1836 len = read(fd, tmp, MAX_AUTO_EXTRA + 1); |
| 1837 |
| 1838 if (len < 0) PFATAL("Unable to read from '%s'", fn); |
| 1839 |
| 1840 if (len >= MIN_AUTO_EXTRA && len <= MAX_AUTO_EXTRA) |
| 1841 maybe_add_auto(tmp, len); |
| 1842 |
| 1843 close(fd); |
| 1844 ck_free(fn); |
| 1845 |
| 1846 } |
| 1847 |
| 1848 if (i) OKF("Loaded %u auto-discovered dictionary tokens.", i); |
| 1849 else OKF("No auto-generated dictionary tokens to reuse."); |
| 1850 |
| 1851 } |
| 1852 |
| 1853 |
| 1854 /* Destroy extras. */ |
| 1855 |
| 1856 static void destroy_extras(void) { |
| 1857 |
| 1858 u32 i; |
| 1859 |
| 1860 for (i = 0; i < extras_cnt; i++) |
| 1861 ck_free(extras[i].data); |
| 1862 |
| 1863 ck_free(extras); |
| 1864 |
| 1865 for (i = 0; i < a_extras_cnt; i++) |
| 1866 ck_free(a_extras[i].data); |
| 1867 |
| 1868 ck_free(a_extras); |
| 1869 |
| 1870 } |
| 1871 |
| 1872 |
| 1873 /* Spin up fork server (instrumented mode only). The idea is explained here: |
| 1874 |
| 1875 http://lcamtuf.blogspot.com/2014/10/fuzzing-binaries-without-execve.html |
| 1876 |
| 1877 In essence, the instrumentation allows us to skip execve(), and just keep |
| 1878 cloning a stopped child. So, we just execute once, and then send commands |
| 1879 through a pipe. The other part of this logic is in afl-as.h. */ |
| 1880 |
| 1881 EXP_ST void init_forkserver(char** argv) { |
| 1882 |
| 1883 static struct itimerval it; |
| 1884 int st_pipe[2], ctl_pipe[2]; |
| 1885 int status; |
| 1886 s32 rlen; |
| 1887 |
| 1888 ACTF("Spinning up the fork server..."); |
| 1889 |
| 1890 if (pipe(st_pipe) || pipe(ctl_pipe)) PFATAL("pipe() failed"); |
| 1891 |
| 1892 forksrv_pid = fork(); |
| 1893 |
| 1894 if (forksrv_pid < 0) PFATAL("fork() failed"); |
| 1895 |
| 1896 if (!forksrv_pid) { |
| 1897 |
| 1898 struct rlimit r; |
| 1899 |
| 1900 #ifdef HAVE_AFFINITY |
| 1901 if (use_affinity) set_cpu_affinity(cpu_aff_child); |
| 1902 #endif /* HAVE_AFFINITY */ |
| 1903 |
| 1904 /* Umpf. On OpenBSD, the default fd limit for root users is set to |
| 1905 soft 128. Let's try to fix that... */ |
| 1906 |
| 1907 if (!getrlimit(RLIMIT_NOFILE, &r) && r.rlim_cur < FORKSRV_FD + 2) { |
| 1908 |
| 1909 r.rlim_cur = FORKSRV_FD + 2; |
| 1910 setrlimit(RLIMIT_NOFILE, &r); /* Ignore errors */ |
| 1911 |
| 1912 } |
| 1913 |
| 1914 if (mem_limit) { |
| 1915 |
| 1916 r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; |
| 1917 |
| 1918 #ifdef RLIMIT_AS |
| 1919 |
| 1920 setrlimit(RLIMIT_AS, &r); /* Ignore errors */ |
| 1921 |
| 1922 #else |
| 1923 |
| 1924 /* This takes care of OpenBSD, which doesn't have RLIMIT_AS, but |
| 1925 according to reliable sources, RLIMIT_DATA covers anonymous |
| 1926 maps - so we should be getting good protection against OOM bugs. */ |
| 1927 |
| 1928 setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ |
| 1929 |
| 1930 #endif /* ^RLIMIT_AS */ |
| 1931 |
| 1932 |
| 1933 } |
| 1934 |
| 1935 /* Dumping cores is slow and can lead to anomalies if SIGKILL is delivered |
| 1936 before the dump is complete. */ |
| 1937 |
| 1938 r.rlim_max = r.rlim_cur = 0; |
| 1939 |
| 1940 setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ |
| 1941 |
| 1942 /* Isolate the process and configure standard descriptors. If out_file is |
| 1943 specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ |
| 1944 |
| 1945 setsid(); |
| 1946 |
| 1947 dup2(dev_null_fd, 1); |
| 1948 dup2(dev_null_fd, 2); |
| 1949 |
| 1950 if (out_file) { |
| 1951 |
| 1952 dup2(dev_null_fd, 0); |
| 1953 |
| 1954 } else { |
| 1955 |
| 1956 dup2(out_fd, 0); |
| 1957 close(out_fd); |
| 1958 |
| 1959 } |
| 1960 |
| 1961 /* Set up control and status pipes, close the unneeded original fds. */ |
| 1962 |
| 1963 if (dup2(ctl_pipe[0], FORKSRV_FD) < 0) PFATAL("dup2() failed"); |
| 1964 if (dup2(st_pipe[1], FORKSRV_FD + 1) < 0) PFATAL("dup2() failed"); |
| 1965 |
| 1966 close(ctl_pipe[0]); |
| 1967 close(ctl_pipe[1]); |
| 1968 close(st_pipe[0]); |
| 1969 close(st_pipe[1]); |
| 1970 |
| 1971 close(out_dir_fd); |
| 1972 close(dev_null_fd); |
| 1973 close(dev_urandom_fd); |
| 1974 close(fileno(plot_file)); |
| 1975 |
| 1976 /* This should improve performance a bit, since it stops the linker from |
| 1977 doing extra work post-fork(). */ |
| 1978 |
| 1979 if (!getenv("LD_BIND_LAZY")) setenv("LD_BIND_NOW", "1", 0); |
| 1980 |
| 1981 /* Set sane defaults for ASAN if nothing else specified. */ |
| 1982 |
| 1983 setenv("ASAN_OPTIONS", "abort_on_error=1:" |
| 1984 "detect_leaks=0:" |
| 1985 "symbolize=0:" |
| 1986 "allocator_may_return_null=1", 0); |
| 1987 |
| 1988 /* MSAN is tricky, because it doesn't support abort_on_error=1 at this |
| 1989 point. So, we do this in a very hacky way. */ |
| 1990 |
| 1991 setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" |
| 1992 "symbolize=0:" |
| 1993 "abort_on_error=1:" |
| 1994 "allocator_may_return_null=1:" |
| 1995 "msan_track_origins=0", 0); |
| 1996 |
| 1997 execv(target_path, argv); |
| 1998 |
| 1999 /* Use a distinctive bitmap signature to tell the parent about execv() |
| 2000 falling through. */ |
| 2001 |
| 2002 *(u32*)trace_bits = EXEC_FAIL_SIG; |
| 2003 exit(0); |
| 2004 |
| 2005 } |
| 2006 |
| 2007 /* Close the unneeded endpoints. */ |
| 2008 |
| 2009 close(ctl_pipe[0]); |
| 2010 close(st_pipe[1]); |
| 2011 |
| 2012 fsrv_ctl_fd = ctl_pipe[1]; |
| 2013 fsrv_st_fd = st_pipe[0]; |
| 2014 |
| 2015 /* Wait for the fork server to come up, but don't wait too long. */ |
| 2016 |
| 2017 it.it_value.tv_sec = ((exec_tmout * FORK_WAIT_MULT) / 1000); |
| 2018 it.it_value.tv_usec = ((exec_tmout * FORK_WAIT_MULT) % 1000) * 1000; |
| 2019 |
| 2020 setitimer(ITIMER_REAL, &it, NULL); |
| 2021 |
| 2022 rlen = read(fsrv_st_fd, &status, 4); |
| 2023 |
| 2024 it.it_value.tv_sec = 0; |
| 2025 it.it_value.tv_usec = 0; |
| 2026 |
| 2027 setitimer(ITIMER_REAL, &it, NULL); |
| 2028 |
| 2029 /* If we have a four-byte "hello" message from the server, we're all set. |
| 2030 Otherwise, try to figure out what went wrong. */ |
| 2031 |
| 2032 if (rlen == 4) { |
| 2033 OKF("All right - fork server is up."); |
| 2034 return; |
| 2035 } |
| 2036 |
| 2037 if (child_timed_out) |
| 2038 FATAL("Timeout while initializing fork server (adjusting -t may help)"); |
| 2039 |
| 2040 if (waitpid(forksrv_pid, &status, 0) <= 0) |
| 2041 PFATAL("waitpid() failed"); |
| 2042 |
| 2043 if (WIFSIGNALED(status)) { |
| 2044 |
| 2045 if (mem_limit && mem_limit < 500 && uses_asan) { |
| 2046 |
| 2047 SAYF("\n" cLRD "[-] " cRST |
| 2048 "Whoops, the target binary crashed suddenly, before receiving any inp
ut\n" |
| 2049 " from the fuzzer! Since it seems to be built with ASAN and you ha
ve a\n" |
| 2050 " restrictive memory limit configured, this is expected; please re
ad\n" |
| 2051 " %s/notes_for_asan.txt for help.\n", doc_path); |
| 2052 |
| 2053 } else if (!mem_limit) { |
| 2054 |
| 2055 SAYF("\n" cLRD "[-] " cRST |
| 2056 "Whoops, the target binary crashed suddenly, before receiving any inp
ut\n" |
| 2057 " from the fuzzer! There are several probable explanations:\n\n" |
| 2058 |
| 2059 " - The binary is just buggy and explodes entirely on its own. If
so, you\n" |
| 2060 " need to fix the underlying problem or find a better replacemen
t.\n\n" |
| 2061 |
| 2062 #ifdef __APPLE__ |
| 2063 |
| 2064 " - On MacOS X, the semantics of fork() syscalls are non-standard
and may\n" |
| 2065 " break afl-fuzz performance optimizations when running platform
-specific\n" |
| 2066 " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.
\n\n" |
| 2067 |
| 2068 #endif /* __APPLE__ */ |
| 2069 |
| 2070 " - Less likely, there is a horrible bug in the fuzzer. If other o
ptions\n" |
| 2071 " fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n")
; |
| 2072 |
| 2073 } else { |
| 2074 |
| 2075 SAYF("\n" cLRD "[-] " cRST |
| 2076 "Whoops, the target binary crashed suddenly, before receiving any inp
ut\n" |
| 2077 " from the fuzzer! There are several probable explanations:\n\n" |
| 2078 |
| 2079 " - The current memory limit (%s) is too restrictive, causing the\
n" |
| 2080 " target to hit an OOM condition in the dynamic linker. Try bump
ing up\n" |
| 2081 " the limit with the -m setting in the command line. A simple wa
y confirm\n" |
| 2082 " this diagnosis would be:\n\n" |
| 2083 |
| 2084 #ifdef RLIMIT_AS |
| 2085 " ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n" |
| 2086 #else |
| 2087 " ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n" |
| 2088 #endif /* ^RLIMIT_AS */ |
| 2089 |
| 2090 " Tip: you can use http://jwilk.net/software/recidivm to quickly
\n" |
| 2091 " estimate the required amount of virtual memory for the binary.
\n\n" |
| 2092 |
| 2093 " - The binary is just buggy and explodes entirely on its own. If
so, you\n" |
| 2094 " need to fix the underlying problem or find a better replacemen
t.\n\n" |
| 2095 |
| 2096 #ifdef __APPLE__ |
| 2097 |
| 2098 " - On MacOS X, the semantics of fork() syscalls are non-standard
and may\n" |
| 2099 " break afl-fuzz performance optimizations when running platform
-specific\n" |
| 2100 " targets. To fix this, set AFL_NO_FORKSRV=1 in the environment.
\n\n" |
| 2101 |
| 2102 #endif /* __APPLE__ */ |
| 2103 |
| 2104 " - Less likely, there is a horrible bug in the fuzzer. If other o
ptions\n" |
| 2105 " fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n", |
| 2106 DMS(mem_limit << 20), mem_limit - 1); |
| 2107 |
| 2108 } |
| 2109 |
| 2110 FATAL("Fork server crashed with signal %d", WTERMSIG(status)); |
| 2111 |
| 2112 } |
| 2113 |
| 2114 if (*(u32*)trace_bits == EXEC_FAIL_SIG) |
| 2115 FATAL("Unable to execute target application ('%s')", argv[0]); |
| 2116 |
| 2117 if (mem_limit && mem_limit < 500 && uses_asan) { |
| 2118 |
| 2119 SAYF("\n" cLRD "[-] " cRST |
| 2120 "Hmm, looks like the target binary terminated before we could complet
e a\n" |
| 2121 " handshake with the injected code. Since it seems to be built wit
h ASAN and\n" |
| 2122 " you have a restrictive memory limit configured, this is expected
; please\n" |
| 2123 " read %s/notes_for_asan.txt for help.\n", doc_path); |
| 2124 |
| 2125 } else if (!mem_limit) { |
| 2126 |
| 2127 SAYF("\n" cLRD "[-] " cRST |
| 2128 "Hmm, looks like the target binary terminated before we could complete
a\n" |
| 2129 " handshake with the injected code. Perhaps there is a horrible bug
in the\n" |
| 2130 " fuzzer. Poke <lcamtuf@coredump.cx> for troubleshooting tips.\n"); |
| 2131 |
| 2132 } else { |
| 2133 |
| 2134 SAYF("\n" cLRD "[-] " cRST |
| 2135 "Hmm, looks like the target binary terminated before we could complete
a\n" |
| 2136 " handshake with the injected code. There are %s probable explanatio
ns:\n\n" |
| 2137 |
| 2138 "%s" |
| 2139 " - The current memory limit (%s) is too restrictive, causing an OOM
\n" |
| 2140 " fault in the dynamic linker. This can be fixed with the -m optio
n. A\n" |
| 2141 " simple way to confirm the diagnosis may be:\n\n" |
| 2142 |
| 2143 #ifdef RLIMIT_AS |
| 2144 " ( ulimit -Sv $[%llu << 10]; /path/to/fuzzed_app )\n\n" |
| 2145 #else |
| 2146 " ( ulimit -Sd $[%llu << 10]; /path/to/fuzzed_app )\n\n" |
| 2147 #endif /* ^RLIMIT_AS */ |
| 2148 |
| 2149 " Tip: you can use http://jwilk.net/software/recidivm to quickly\n
" |
| 2150 " estimate the required amount of virtual memory for the binary.\n
\n" |
| 2151 |
| 2152 " - Less likely, there is a horrible bug in the fuzzer. If other opt
ions\n" |
| 2153 " fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.\n", |
| 2154 getenv(DEFER_ENV_VAR) ? "three" : "two", |
| 2155 getenv(DEFER_ENV_VAR) ? |
| 2156 " - You are using deferred forkserver, but __AFL_INIT() is never\n" |
| 2157 " reached before the program terminates.\n\n" : "", |
| 2158 DMS(mem_limit << 20), mem_limit - 1); |
| 2159 |
| 2160 } |
| 2161 |
| 2162 FATAL("Fork server handshake failed"); |
| 2163 |
| 2164 } |
| 2165 |
| 2166 |
| 2167 /* Execute target application, monitoring for timeouts. Return status |
| 2168 information. The called program will update trace_bits[]. */ |
| 2169 |
| 2170 static u8 run_target(char** argv) { |
| 2171 |
| 2172 static struct itimerval it; |
| 2173 static u32 prev_timed_out = 0; |
| 2174 |
| 2175 int status = 0; |
| 2176 u32 tb4; |
| 2177 |
| 2178 child_timed_out = 0; |
| 2179 |
| 2180 /* After this memset, trace_bits[] are effectively volatile, so we |
| 2181 must prevent any earlier operations from venturing into that |
| 2182 territory. */ |
| 2183 |
| 2184 memset(trace_bits, 0, MAP_SIZE); |
| 2185 MEM_BARRIER(); |
| 2186 |
| 2187 /* If we're running in "dumb" mode, we can't rely on the fork server |
| 2188 logic compiled into the target program, so we will just keep calling |
| 2189 execve(). There is a bit of code duplication between here and |
| 2190 init_forkserver(), but c'est la vie. */ |
| 2191 |
| 2192 if (dumb_mode == 1 || no_forkserver) { |
| 2193 |
| 2194 child_pid = fork(); |
| 2195 |
| 2196 if (child_pid < 0) PFATAL("fork() failed"); |
| 2197 |
| 2198 if (!child_pid) { |
| 2199 |
| 2200 struct rlimit r; |
| 2201 |
| 2202 #ifdef HAVE_AFFINITY |
| 2203 if (use_affinity) set_cpu_affinity(cpu_aff_child); |
| 2204 #endif /* HAVE_AFFINITY */ |
| 2205 |
| 2206 if (mem_limit) { |
| 2207 |
| 2208 r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20; |
| 2209 |
| 2210 #ifdef RLIMIT_AS |
| 2211 |
| 2212 setrlimit(RLIMIT_AS, &r); /* Ignore errors */ |
| 2213 |
| 2214 #else |
| 2215 |
| 2216 setrlimit(RLIMIT_DATA, &r); /* Ignore errors */ |
| 2217 |
| 2218 #endif /* ^RLIMIT_AS */ |
| 2219 |
| 2220 } |
| 2221 |
| 2222 r.rlim_max = r.rlim_cur = 0; |
| 2223 |
| 2224 setrlimit(RLIMIT_CORE, &r); /* Ignore errors */ |
| 2225 |
| 2226 /* Isolate the process and configure standard descriptors. If out_file is |
| 2227 specified, stdin is /dev/null; otherwise, out_fd is cloned instead. */ |
| 2228 |
| 2229 setsid(); |
| 2230 |
| 2231 dup2(dev_null_fd, 1); |
| 2232 dup2(dev_null_fd, 2); |
| 2233 |
| 2234 if (out_file) { |
| 2235 |
| 2236 dup2(dev_null_fd, 0); |
| 2237 |
| 2238 } else { |
| 2239 |
| 2240 dup2(out_fd, 0); |
| 2241 close(out_fd); |
| 2242 |
| 2243 } |
| 2244 |
| 2245 /* On Linux, would be faster to use O_CLOEXEC. Maybe TODO. */ |
| 2246 |
| 2247 close(dev_null_fd); |
| 2248 close(out_dir_fd); |
| 2249 close(dev_urandom_fd); |
| 2250 close(fileno(plot_file)); |
| 2251 |
| 2252 /* Set sane defaults for ASAN if nothing else specified. */ |
| 2253 |
| 2254 setenv("ASAN_OPTIONS", "abort_on_error=1:" |
| 2255 "detect_leaks=0:" |
| 2256 "symbolize=0:" |
| 2257 "allocator_may_return_null=1", 0); |
| 2258 |
| 2259 setenv("MSAN_OPTIONS", "exit_code=" STRINGIFY(MSAN_ERROR) ":" |
| 2260 "symbolize=0:" |
| 2261 "msan_track_origins=0", 0); |
| 2262 |
| 2263 execv(target_path, argv); |
| 2264 |
| 2265 /* Use a distinctive bitmap value to tell the parent about execv() |
| 2266 falling through. */ |
| 2267 |
| 2268 *(u32*)trace_bits = EXEC_FAIL_SIG; |
| 2269 exit(0); |
| 2270 |
| 2271 } |
| 2272 |
| 2273 } else { |
| 2274 |
| 2275 s32 res; |
| 2276 |
| 2277 /* In non-dumb mode, we have the fork server up and running, so simply |
| 2278 tell it to have at it, and then read back PID. */ |
| 2279 |
| 2280 if ((res = write(fsrv_ctl_fd, &prev_timed_out, 4)) != 4) { |
| 2281 |
| 2282 if (stop_soon) return 0; |
| 2283 RPFATAL(res, "Unable to request new process from fork server (OOM?)"); |
| 2284 |
| 2285 } |
| 2286 |
| 2287 if ((res = read(fsrv_st_fd, &child_pid, 4)) != 4) { |
| 2288 |
| 2289 if (stop_soon) return 0; |
| 2290 RPFATAL(res, "Unable to request new process from fork server (OOM?)"); |
| 2291 |
| 2292 } |
| 2293 |
| 2294 if (child_pid <= 0) FATAL("Fork server is misbehaving (OOM?)"); |
| 2295 |
| 2296 } |
| 2297 |
| 2298 /* Configure timeout, as requested by user, then wait for child to terminate.
*/ |
| 2299 |
| 2300 it.it_value.tv_sec = (exec_tmout / 1000); |
| 2301 it.it_value.tv_usec = (exec_tmout % 1000) * 1000; |
| 2302 |
| 2303 setitimer(ITIMER_REAL, &it, NULL); |
| 2304 |
| 2305 /* The SIGALRM handler simply kills the child_pid and sets child_timed_out. */ |
| 2306 |
| 2307 if (dumb_mode == 1 || no_forkserver) { |
| 2308 |
| 2309 if (waitpid(child_pid, &status, 0) <= 0) PFATAL("waitpid() failed"); |
| 2310 |
| 2311 } else { |
| 2312 |
| 2313 s32 res; |
| 2314 |
| 2315 if ((res = read(fsrv_st_fd, &status, 4)) != 4) { |
| 2316 |
| 2317 if (stop_soon) return 0; |
| 2318 RPFATAL(res, "Unable to communicate with fork server (OOM?)"); |
| 2319 |
| 2320 } |
| 2321 |
| 2322 } |
| 2323 |
| 2324 child_pid = 0; |
| 2325 it.it_value.tv_sec = 0; |
| 2326 it.it_value.tv_usec = 0; |
| 2327 |
| 2328 setitimer(ITIMER_REAL, &it, NULL); |
| 2329 |
| 2330 total_execs++; |
| 2331 |
| 2332 /* Any subsequent operations on trace_bits must not be moved by the |
| 2333 compiler below this point. Past this location, trace_bits[] behave |
| 2334 very normally and do not have to be treated as volatile. */ |
| 2335 |
| 2336 MEM_BARRIER(); |
| 2337 |
| 2338 tb4 = *(u32*)trace_bits; |
| 2339 |
| 2340 #ifdef __x86_64__ |
| 2341 classify_counts((u64*)trace_bits); |
| 2342 #else |
| 2343 classify_counts((u32*)trace_bits); |
| 2344 #endif /* ^__x86_64__ */ |
| 2345 |
| 2346 prev_timed_out = child_timed_out; |
| 2347 |
| 2348 /* Report outcome to caller. */ |
| 2349 |
| 2350 if (child_timed_out) return FAULT_HANG; |
| 2351 |
| 2352 if (WIFSIGNALED(status) && !stop_soon) { |
| 2353 kill_signal = WTERMSIG(status); |
| 2354 return FAULT_CRASH; |
| 2355 } |
| 2356 |
| 2357 /* A somewhat nasty hack for MSAN, which doesn't support abort_on_error and |
| 2358 must use a special exit code. */ |
| 2359 |
| 2360 if (uses_asan && WEXITSTATUS(status) == MSAN_ERROR) { |
| 2361 kill_signal = 0; |
| 2362 return FAULT_CRASH; |
| 2363 } |
| 2364 |
| 2365 if ((dumb_mode == 1 || no_forkserver) && tb4 == EXEC_FAIL_SIG) |
| 2366 return FAULT_ERROR; |
| 2367 |
| 2368 return FAULT_NONE; |
| 2369 |
| 2370 } |
| 2371 |
| 2372 |
| 2373 /* Write modified data to file for testing. If out_file is set, the old file |
| 2374 is unlinked and a new one is created. Otherwise, out_fd is rewound and |
| 2375 truncated. */ |
| 2376 |
| 2377 static void write_to_testcase(void* mem, u32 len) { |
| 2378 |
| 2379 s32 fd = out_fd; |
| 2380 |
| 2381 if (out_file) { |
| 2382 |
| 2383 unlink(out_file); /* Ignore errors. */ |
| 2384 |
| 2385 fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 2386 |
| 2387 if (fd < 0) PFATAL("Unable to create '%s'", out_file); |
| 2388 |
| 2389 } else lseek(fd, 0, SEEK_SET); |
| 2390 |
| 2391 ck_write(fd, mem, len, out_file); |
| 2392 |
| 2393 if (!out_file) { |
| 2394 |
| 2395 if (ftruncate(fd, len)) PFATAL("ftruncate() failed"); |
| 2396 lseek(fd, 0, SEEK_SET); |
| 2397 |
| 2398 } else close(fd); |
| 2399 |
| 2400 } |
| 2401 |
| 2402 |
| 2403 /* The same, but with an adjustable gap. Used for trimming. */ |
| 2404 |
| 2405 static void write_with_gap(void* mem, u32 len, u32 skip_at, u32 skip_len) { |
| 2406 |
| 2407 s32 fd = out_fd; |
| 2408 u32 tail_len = len - skip_at - skip_len; |
| 2409 |
| 2410 if (out_file) { |
| 2411 |
| 2412 unlink(out_file); /* Ignore errors. */ |
| 2413 |
| 2414 fd = open(out_file, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 2415 |
| 2416 if (fd < 0) PFATAL("Unable to create '%s'", out_file); |
| 2417 |
| 2418 } else lseek(fd, 0, SEEK_SET); |
| 2419 |
| 2420 if (skip_at) ck_write(fd, mem, skip_at, out_file); |
| 2421 |
| 2422 if (tail_len) ck_write(fd, mem + skip_at + skip_len, tail_len, out_file); |
| 2423 |
| 2424 if (!out_file) { |
| 2425 |
| 2426 if (ftruncate(fd, len - skip_len)) PFATAL("ftruncate() failed"); |
| 2427 lseek(fd, 0, SEEK_SET); |
| 2428 |
| 2429 } else close(fd); |
| 2430 |
| 2431 } |
| 2432 |
| 2433 |
| 2434 static void show_stats(void); |
| 2435 |
| 2436 /* Calibrate a new test case. This is done when processing the input directory |
| 2437 to warn about flaky or otherwise problematic test cases early on; and when |
| 2438 new paths are discovered to detect variable behavior and so on. */ |
| 2439 |
| 2440 static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem, |
| 2441 u32 handicap, u8 from_queue) { |
| 2442 |
| 2443 u8 fault = 0, new_bits = 0, var_detected = 0, first_run = (q->exec_cksum == 0
); |
| 2444 u64 start_us, stop_us; |
| 2445 |
| 2446 s32 old_sc = stage_cur, old_sm = stage_max, old_tmout = exec_tmout; |
| 2447 u8* old_sn = stage_name; |
| 2448 |
| 2449 /* Be a bit more generous about timeouts when resuming sessions, or when |
| 2450 trying to calibrate already-added finds. This helps avoid trouble due |
| 2451 to intermittent latency. */ |
| 2452 |
| 2453 if (!from_queue || resuming_fuzz) |
| 2454 exec_tmout = MAX(exec_tmout + CAL_TMOUT_ADD, |
| 2455 exec_tmout * CAL_TMOUT_PERC / 100); |
| 2456 |
| 2457 q->cal_failed++; |
| 2458 |
| 2459 stage_name = "calibration"; |
| 2460 stage_max = no_var_check ? CAL_CYCLES_NO_VAR : CAL_CYCLES; |
| 2461 |
| 2462 /* Make sure the forkserver is up before we do anything, and let's not |
| 2463 count its spin-up time toward binary calibration. */ |
| 2464 |
| 2465 if (dumb_mode != 1 && !no_forkserver && !forksrv_pid) |
| 2466 init_forkserver(argv); |
| 2467 |
| 2468 start_us = get_cur_time_us(); |
| 2469 |
| 2470 for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { |
| 2471 |
| 2472 u32 cksum; |
| 2473 |
| 2474 if (!first_run && !(stage_cur % stats_update_freq)) show_stats(); |
| 2475 |
| 2476 write_to_testcase(use_mem, q->len); |
| 2477 |
| 2478 fault = run_target(argv); |
| 2479 |
| 2480 /* stop_soon is set by the handler for Ctrl+C. When it's pressed, |
| 2481 we want to bail out quickly. */ |
| 2482 |
| 2483 if (stop_soon || fault != crash_mode) goto abort_calibration; |
| 2484 |
| 2485 if (!dumb_mode && !stage_cur && !count_bytes(trace_bits)) { |
| 2486 fault = FAULT_NOINST; |
| 2487 goto abort_calibration; |
| 2488 } |
| 2489 |
| 2490 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); |
| 2491 |
| 2492 if (q->exec_cksum != cksum) { |
| 2493 |
| 2494 u8 hnb = has_new_bits(virgin_bits); |
| 2495 if (hnb > new_bits) new_bits = hnb; |
| 2496 |
| 2497 if (!no_var_check && q->exec_cksum) { |
| 2498 |
| 2499 var_detected = 1; |
| 2500 stage_max = CAL_CYCLES_LONG; |
| 2501 |
| 2502 } else q->exec_cksum = cksum; |
| 2503 |
| 2504 } |
| 2505 |
| 2506 } |
| 2507 |
| 2508 stop_us = get_cur_time_us(); |
| 2509 |
| 2510 total_cal_us += stop_us - start_us; |
| 2511 total_cal_cycles += stage_max; |
| 2512 |
| 2513 /* OK, let's collect some stats about the performance of this test case. |
| 2514 This is used for fuzzing air time calculations in calculate_score(). */ |
| 2515 |
| 2516 q->exec_us = (stop_us - start_us) / stage_max; |
| 2517 q->bitmap_size = count_bytes(trace_bits); |
| 2518 q->handicap = handicap; |
| 2519 q->cal_failed = 0; |
| 2520 |
| 2521 total_bitmap_size += q->bitmap_size; |
| 2522 total_bitmap_entries++; |
| 2523 |
| 2524 update_bitmap_score(q); |
| 2525 |
| 2526 /* If this case didn't result in new output from the instrumentation, tell |
| 2527 parent. This is a non-critical problem, but something to warn the user |
| 2528 about. */ |
| 2529 |
| 2530 if (!dumb_mode && first_run && !fault && !new_bits) fault = FAULT_NOBITS; |
| 2531 |
| 2532 abort_calibration: |
| 2533 |
| 2534 if (new_bits == 2 && !q->has_new_cov) { |
| 2535 q->has_new_cov = 1; |
| 2536 queued_with_cov++; |
| 2537 } |
| 2538 |
| 2539 /* Mark variable paths. */ |
| 2540 |
| 2541 if (var_detected && !q->var_behavior) { |
| 2542 mark_as_variable(q); |
| 2543 queued_variable++; |
| 2544 } |
| 2545 |
| 2546 stage_name = old_sn; |
| 2547 stage_cur = old_sc; |
| 2548 stage_max = old_sm; |
| 2549 exec_tmout = old_tmout; |
| 2550 |
| 2551 if (!first_run) show_stats(); |
| 2552 |
| 2553 return fault; |
| 2554 |
| 2555 } |
| 2556 |
| 2557 |
| 2558 /* Examine map coverage. Called once, for first test case. */ |
| 2559 |
| 2560 static void check_map_coverage(void) { |
| 2561 |
| 2562 u32 i; |
| 2563 |
| 2564 if (count_bytes(trace_bits) < 100) return; |
| 2565 |
| 2566 for (i = (1 << (MAP_SIZE_POW2 - 1)); i < MAP_SIZE; i++) |
| 2567 if (trace_bits[i]) return; |
| 2568 |
| 2569 WARNF("Recompile binary with newer version of afl to improve coverage!"); |
| 2570 |
| 2571 } |
| 2572 |
| 2573 |
| 2574 /* Perform dry run of all test cases to confirm that the app is working as |
| 2575 expected. This is done only for the initial inputs, and only once. */ |
| 2576 |
| 2577 static void perform_dry_run(char** argv) { |
| 2578 |
| 2579 struct queue_entry* q = queue; |
| 2580 u32 cal_failures = 0; |
| 2581 u8* skip_crashes = getenv("AFL_SKIP_CRASHES"); |
| 2582 |
| 2583 while (q) { |
| 2584 |
| 2585 u8* use_mem; |
| 2586 u8 res; |
| 2587 s32 fd; |
| 2588 |
| 2589 u8* fn = strrchr(q->fname, '/') + 1; |
| 2590 |
| 2591 ACTF("Attempting dry run with '%s'...", fn); |
| 2592 |
| 2593 fd = open(q->fname, O_RDONLY); |
| 2594 if (fd < 0) PFATAL("Unable to open '%s'", q->fname); |
| 2595 |
| 2596 use_mem = ck_alloc_nozero(q->len); |
| 2597 |
| 2598 if (read(fd, use_mem, q->len) != q->len) |
| 2599 FATAL("Short read from '%s'", q->fname); |
| 2600 |
| 2601 close(fd); |
| 2602 |
| 2603 res = calibrate_case(argv, q, use_mem, 0, 1); |
| 2604 ck_free(use_mem); |
| 2605 |
| 2606 if (stop_soon) return; |
| 2607 |
| 2608 if (res == crash_mode || res == FAULT_NOBITS) |
| 2609 SAYF(cGRA " len = %u, map size = %u, exec speed = %llu us\n" cRST, |
| 2610 q->len, q->bitmap_size, q->exec_us); |
| 2611 |
| 2612 switch (res) { |
| 2613 |
| 2614 case FAULT_NONE: |
| 2615 |
| 2616 if (q == queue) check_map_coverage(); |
| 2617 |
| 2618 if (crash_mode) FATAL("Test case '%s' does *NOT* crash", fn); |
| 2619 |
| 2620 break; |
| 2621 |
| 2622 case FAULT_HANG: |
| 2623 |
| 2624 if (timeout_given) { |
| 2625 |
| 2626 /* The -t nn+ syntax in the command line sets timeout_given to '2' and |
| 2627 instructs afl-fuzz to tolerate but skip queue entries that time |
| 2628 out. */ |
| 2629 |
| 2630 if (timeout_given > 1) { |
| 2631 WARNF("Test case results in a hang (skipping)"); |
| 2632 q->cal_failed = CAL_CHANCES; |
| 2633 cal_failures++; |
| 2634 break; |
| 2635 } |
| 2636 |
| 2637 SAYF("\n" cLRD "[-] " cRST |
| 2638 "The program took more than %u ms to process one of the initial t
est cases.\n" |
| 2639 " Usually, the right thing to do is to relax the -t option - o
r to delete it\n" |
| 2640 " altogether and allow the fuzzer to auto-calibrate. That said
, if you know\n" |
| 2641 " what you are doing and want to simply skip the unruly test c
ases, append\n" |
| 2642 " '+' at the end of the value passed to -t ('-t %u+').\n", exe
c_tmout, |
| 2643 exec_tmout); |
| 2644 |
| 2645 FATAL("Test case '%s' results in a hang", fn); |
| 2646 |
| 2647 } else { |
| 2648 |
| 2649 SAYF("\n" cLRD "[-] " cRST |
| 2650 "The program took more than %u ms to process one of the initial t
est cases.\n" |
| 2651 " This is bad news; raising the limit with the -t option is po
ssible, but\n" |
| 2652 " will probably make the fuzzing process extremely slow.\n\n" |
| 2653 |
| 2654 " If this test case is just a fluke, the other option is to ju
st avoid it\n" |
| 2655 " altogether, and find one that is less of a CPU hog.\n", exec
_tmout); |
| 2656 |
| 2657 FATAL("Test case '%s' results in a hang", fn); |
| 2658 |
| 2659 } |
| 2660 |
| 2661 case FAULT_CRASH: |
| 2662 |
| 2663 if (crash_mode) break; |
| 2664 |
| 2665 if (skip_crashes) { |
| 2666 WARNF("Test case results in a crash (skipping)"); |
| 2667 q->cal_failed = CAL_CHANCES; |
| 2668 cal_failures++; |
| 2669 break; |
| 2670 } |
| 2671 |
| 2672 if (mem_limit) { |
| 2673 |
| 2674 SAYF("\n" cLRD "[-] " cRST |
| 2675 "Oops, the program crashed with one of the test cases provided. T
here are\n" |
| 2676 " several possible explanations:\n\n" |
| 2677 |
| 2678 " - The test case causes known crashes under normal working co
nditions. If\n" |
| 2679 " so, please remove it. The fuzzer should be seeded with int
eresting\n" |
| 2680 " inputs - but not ones that cause an outright crash.\n\n" |
| 2681 |
| 2682 " - The current memory limit (%s) is too low for this program,
causing\n" |
| 2683 " it to die due to OOM when parsing valid files. To fix this
, try\n" |
| 2684 " bumping it up with the -m setting in the command line. If
in doubt,\n" |
| 2685 " try something along the lines of:\n\n" |
| 2686 |
| 2687 #ifdef RLIMIT_AS |
| 2688 " ( ulimit -Sv $[%llu << 10]; /path/to/binary [...] <testcas
e )\n\n" |
| 2689 #else |
| 2690 " ( ulimit -Sd $[%llu << 10]; /path/to/binary [...] <testcas
e )\n\n" |
| 2691 #endif /* ^RLIMIT_AS */ |
| 2692 |
| 2693 " Tip: you can use http://jwilk.net/software/recidivm to qui
ckly\n" |
| 2694 " estimate the required amount of virtual memory for the bin
ary. Also,\n" |
| 2695 " if you are using ASAN, see %s/notes_for_asan.txt.\n\n" |
| 2696 |
| 2697 #ifdef __APPLE__ |
| 2698 |
| 2699 " - On MacOS X, the semantics of fork() syscalls are non-stand
ard and may\n" |
| 2700 " break afl-fuzz performance optimizations when running plat
form-specific\n" |
| 2701 " binaries. To fix this, set AFL_NO_FORKSRV=1 in the environ
ment.\n\n" |
| 2702 |
| 2703 #endif /* __APPLE__ */ |
| 2704 |
| 2705 " - Least likely, there is a horrible bug in the fuzzer. If ot
her options\n" |
| 2706 " fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.
\n", |
| 2707 DMS(mem_limit << 20), mem_limit - 1, doc_path); |
| 2708 |
| 2709 } else { |
| 2710 |
| 2711 SAYF("\n" cLRD "[-] " cRST |
| 2712 "Oops, the program crashed with one of the test cases provided. T
here are\n" |
| 2713 " several possible explanations:\n\n" |
| 2714 |
| 2715 " - The test case causes known crashes under normal working co
nditions. If\n" |
| 2716 " so, please remove it. The fuzzer should be seeded with int
eresting\n" |
| 2717 " inputs - but not ones that cause an outright crash.\n\n" |
| 2718 |
| 2719 #ifdef __APPLE__ |
| 2720 |
| 2721 " - On MacOS X, the semantics of fork() syscalls are non-stand
ard and may\n" |
| 2722 " break afl-fuzz performance optimizations when running plat
form-specific\n" |
| 2723 " binaries. To fix this, set AFL_NO_FORKSRV=1 in the environ
ment.\n\n" |
| 2724 |
| 2725 #endif /* __APPLE__ */ |
| 2726 |
| 2727 " - Least likely, there is a horrible bug in the fuzzer. If ot
her options\n" |
| 2728 " fail, poke <lcamtuf@coredump.cx> for troubleshooting tips.
\n"); |
| 2729 |
| 2730 } |
| 2731 |
| 2732 FATAL("Test case '%s' results in a crash", fn); |
| 2733 |
| 2734 case FAULT_ERROR: |
| 2735 |
| 2736 FATAL("Unable to execute target application ('%s')", argv[0]); |
| 2737 |
| 2738 case FAULT_NOINST: |
| 2739 |
| 2740 FATAL("No instrumentation detected"); |
| 2741 |
| 2742 case FAULT_NOBITS: |
| 2743 |
| 2744 useless_at_start++; |
| 2745 |
| 2746 if (!in_bitmap && !shuffle_queue) |
| 2747 WARNF("No new instrumentation output, test case may be useless."); |
| 2748 |
| 2749 break; |
| 2750 |
| 2751 } |
| 2752 |
| 2753 if (q->var_behavior) WARNF("Instrumentation output varies across runs."); |
| 2754 |
| 2755 q = q->next; |
| 2756 |
| 2757 } |
| 2758 |
| 2759 if (cal_failures) { |
| 2760 |
| 2761 if (cal_failures == queued_paths) |
| 2762 FATAL("All test cases time out%s, giving up!", |
| 2763 skip_crashes ? " or crash" : ""); |
| 2764 |
| 2765 WARNF("Skipped %u test cases (%0.02f%%) due to timeouts%s.", cal_failures, |
| 2766 ((double)cal_failures) * 100 / queued_paths, |
| 2767 skip_crashes ? " or crashes" : ""); |
| 2768 |
| 2769 if (cal_failures * 5 > queued_paths) |
| 2770 WARNF(cLRD "High percentage of rejected test cases, check settings!"); |
| 2771 |
| 2772 } |
| 2773 |
| 2774 OKF("All test cases processed."); |
| 2775 |
| 2776 } |
| 2777 |
| 2778 |
| 2779 /* Helper function: link() if possible, copy otherwise. */ |
| 2780 |
| 2781 static void link_or_copy(u8* old_path, u8* new_path) { |
| 2782 |
| 2783 s32 i = link(old_path, new_path); |
| 2784 s32 sfd, dfd; |
| 2785 u8* tmp; |
| 2786 |
| 2787 if (!i) return; |
| 2788 |
| 2789 sfd = open(old_path, O_RDONLY); |
| 2790 if (sfd < 0) PFATAL("Unable to open '%s'", old_path); |
| 2791 |
| 2792 dfd = open(new_path, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 2793 if (dfd < 0) PFATAL("Unable to create '%s'", new_path); |
| 2794 |
| 2795 tmp = ck_alloc(64 * 1024); |
| 2796 |
| 2797 while ((i = read(sfd, tmp, 64 * 1024)) > 0) |
| 2798 ck_write(dfd, tmp, i, new_path); |
| 2799 |
| 2800 if (i < 0) PFATAL("read() failed"); |
| 2801 |
| 2802 ck_free(tmp); |
| 2803 close(sfd); |
| 2804 close(dfd); |
| 2805 |
| 2806 } |
| 2807 |
| 2808 |
| 2809 static void nuke_resume_dir(void); |
| 2810 |
| 2811 /* Create hard links for input test cases in the output directory, choosing |
| 2812 good names and pivoting accordingly. */ |
| 2813 |
| 2814 static void pivot_inputs(void) { |
| 2815 |
| 2816 struct queue_entry* q = queue; |
| 2817 u32 id = 0; |
| 2818 |
| 2819 ACTF("Creating hard links for all input files..."); |
| 2820 |
| 2821 while (q) { |
| 2822 |
| 2823 u8 *nfn, *rsl = strrchr(q->fname, '/'); |
| 2824 u32 orig_id; |
| 2825 |
| 2826 if (!rsl) rsl = q->fname; else rsl++; |
| 2827 |
| 2828 /* If the original file name conforms to the syntax and the recorded |
| 2829 ID matches the one we'd assign, just use the original file name. |
| 2830 This is valuable for resuming fuzzing runs. */ |
| 2831 |
| 2832 #ifndef SIMPLE_FILES |
| 2833 # define CASE_PREFIX "id:" |
| 2834 #else |
| 2835 # define CASE_PREFIX "id_" |
| 2836 #endif /* ^!SIMPLE_FILES */ |
| 2837 |
| 2838 if (!strncmp(rsl, CASE_PREFIX, 3) && |
| 2839 sscanf(rsl + 3, "%06u", &orig_id) == 1 && orig_id == id) { |
| 2840 |
| 2841 u8* src_str; |
| 2842 u32 src_id; |
| 2843 |
| 2844 resuming_fuzz = 1; |
| 2845 nfn = alloc_printf("%s/queue/%s", out_dir, rsl); |
| 2846 |
| 2847 /* Since we're at it, let's also try to find parent and figure out the |
| 2848 appropriate depth for this entry. */ |
| 2849 |
| 2850 src_str = strchr(rsl + 3, ':'); |
| 2851 |
| 2852 if (src_str && sscanf(src_str + 1, "%06u", &src_id) == 1) { |
| 2853 |
| 2854 struct queue_entry* s = queue; |
| 2855 while (src_id-- && s) s = s->next; |
| 2856 if (s) q->depth = s->depth + 1; |
| 2857 |
| 2858 if (max_depth < q->depth) max_depth = q->depth; |
| 2859 |
| 2860 } |
| 2861 |
| 2862 } else { |
| 2863 |
| 2864 /* No dice - invent a new name, capturing the original one as a |
| 2865 substring. */ |
| 2866 |
| 2867 #ifndef SIMPLE_FILES |
| 2868 |
| 2869 u8* use_name = strstr(rsl, ",orig:"); |
| 2870 |
| 2871 if (use_name) use_name += 6; else use_name = rsl; |
| 2872 nfn = alloc_printf("%s/queue/id:%06u,orig:%s", out_dir, id, use_name); |
| 2873 |
| 2874 #else |
| 2875 |
| 2876 nfn = alloc_printf("%s/queue/id_%06u", out_dir, id); |
| 2877 |
| 2878 #endif /* ^!SIMPLE_FILES */ |
| 2879 |
| 2880 } |
| 2881 |
| 2882 /* Pivot to the new queue entry. */ |
| 2883 |
| 2884 link_or_copy(q->fname, nfn); |
| 2885 ck_free(q->fname); |
| 2886 q->fname = nfn; |
| 2887 |
| 2888 /* Make sure that the passed_det value carries over, too. */ |
| 2889 |
| 2890 if (q->passed_det) mark_as_det_done(q); |
| 2891 |
| 2892 q = q->next; |
| 2893 id++; |
| 2894 |
| 2895 } |
| 2896 |
| 2897 if (in_place_resume) nuke_resume_dir(); |
| 2898 |
| 2899 } |
| 2900 |
| 2901 |
| 2902 #ifndef SIMPLE_FILES |
| 2903 |
| 2904 /* Construct a file name for a new test case, capturing the operation |
| 2905 that led to its discovery. Uses a static buffer. */ |
| 2906 |
| 2907 static u8* describe_op(u8 hnb) { |
| 2908 |
| 2909 static u8 ret[256]; |
| 2910 |
| 2911 if (syncing_party) { |
| 2912 |
| 2913 sprintf(ret, "sync:%s,src:%06u", syncing_party, syncing_case); |
| 2914 |
| 2915 } else { |
| 2916 |
| 2917 sprintf(ret, "src:%06u", current_entry); |
| 2918 |
| 2919 if (splicing_with >= 0) |
| 2920 sprintf(ret + strlen(ret), "+%06u", splicing_with); |
| 2921 |
| 2922 sprintf(ret + strlen(ret), ",op:%s", stage_short); |
| 2923 |
| 2924 if (stage_cur_byte >= 0) { |
| 2925 |
| 2926 sprintf(ret + strlen(ret), ",pos:%u", stage_cur_byte); |
| 2927 |
| 2928 if (stage_val_type != STAGE_VAL_NONE) |
| 2929 sprintf(ret + strlen(ret), ",val:%s%+d", |
| 2930 (stage_val_type == STAGE_VAL_BE) ? "be:" : "", |
| 2931 stage_cur_val); |
| 2932 |
| 2933 } else sprintf(ret + strlen(ret), ",rep:%u", stage_cur_val); |
| 2934 |
| 2935 } |
| 2936 |
| 2937 if (hnb == 2) strcat(ret, ",+cov"); |
| 2938 |
| 2939 return ret; |
| 2940 |
| 2941 } |
| 2942 |
| 2943 #endif /* !SIMPLE_FILES */ |
| 2944 |
| 2945 |
| 2946 /* Write a message accompanying the crash directory :-) */ |
| 2947 |
| 2948 static void write_crash_readme(void) { |
| 2949 |
| 2950 u8* fn = alloc_printf("%s/crashes/README.txt", out_dir); |
| 2951 s32 fd; |
| 2952 FILE* f; |
| 2953 |
| 2954 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 2955 ck_free(fn); |
| 2956 |
| 2957 /* Do not die on errors here - that would be impolite. */ |
| 2958 |
| 2959 if (fd < 0) return; |
| 2960 |
| 2961 f = fdopen(fd, "w"); |
| 2962 |
| 2963 if (!f) { |
| 2964 close(fd); |
| 2965 return; |
| 2966 } |
| 2967 |
| 2968 fprintf(f, "Command line used to find this crash:\n\n" |
| 2969 |
| 2970 "%s\n\n" |
| 2971 |
| 2972 "If you can't reproduce a bug outside of afl-fuzz, be sure to set t
he same\n" |
| 2973 "memory limit. The limit used for this fuzzing session was %s.\n\n" |
| 2974 |
| 2975 "Need a tool to minimize test cases before investigating the crashe
s or sending\n" |
| 2976 "them to a vendor? Check out the afl-tmin that comes with the fuzze
r!\n\n" |
| 2977 |
| 2978 "Found any cool bugs in open-source tools using afl-fuzz? If yes, p
lease drop\n" |
| 2979 "me a mail at <lcamtuf@coredump.cx> once the issues are fixed - I'd
love to\n" |
| 2980 "add your finds to the gallery at:\n\n" |
| 2981 |
| 2982 " http://lcamtuf.coredump.cx/afl/\n\n" |
| 2983 |
| 2984 "Thanks :-)\n", |
| 2985 |
| 2986 orig_cmdline, DMS(mem_limit << 20)); /* ignore errors */ |
| 2987 |
| 2988 fclose(f); |
| 2989 |
| 2990 } |
| 2991 |
| 2992 |
| 2993 /* Check if the result of an execve() during routine fuzzing is interesting, |
| 2994 save or queue the input test case for further analysis if so. Returns 1 if |
| 2995 entry is saved, 0 otherwise. */ |
| 2996 |
| 2997 static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) { |
| 2998 |
| 2999 u8 *fn = ""; |
| 3000 u8 hnb; |
| 3001 s32 fd; |
| 3002 u8 keeping = 0, res; |
| 3003 |
| 3004 if (fault == crash_mode) { |
| 3005 |
| 3006 /* Keep only if there are new bits in the map, add to queue for |
| 3007 future fuzzing, etc. */ |
| 3008 |
| 3009 if (!(hnb = has_new_bits(virgin_bits))) { |
| 3010 if (crash_mode) total_crashes++; |
| 3011 return 0; |
| 3012 } |
| 3013 |
| 3014 #ifndef SIMPLE_FILES |
| 3015 |
| 3016 fn = alloc_printf("%s/queue/id:%06u,%s", out_dir, queued_paths, |
| 3017 describe_op(hnb)); |
| 3018 |
| 3019 #else |
| 3020 |
| 3021 fn = alloc_printf("%s/queue/id_%06u", out_dir, queued_paths); |
| 3022 |
| 3023 #endif /* ^!SIMPLE_FILES */ |
| 3024 |
| 3025 add_to_queue(fn, len, 0); |
| 3026 |
| 3027 if (hnb == 2) { |
| 3028 queue_top->has_new_cov = 1; |
| 3029 queued_with_cov++; |
| 3030 } |
| 3031 |
| 3032 queue_top->exec_cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); |
| 3033 |
| 3034 /* Try to calibrate inline; this also calls update_bitmap_score() when |
| 3035 successful. */ |
| 3036 |
| 3037 res = calibrate_case(argv, queue_top, mem, queue_cycle - 1, 0); |
| 3038 |
| 3039 if (res == FAULT_ERROR) |
| 3040 FATAL("Unable to execute target application"); |
| 3041 |
| 3042 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 3043 if (fd < 0) PFATAL("Unable to create '%s'", fn); |
| 3044 ck_write(fd, mem, len, fn); |
| 3045 close(fd); |
| 3046 |
| 3047 keeping = 1; |
| 3048 |
| 3049 } |
| 3050 |
| 3051 switch (fault) { |
| 3052 |
| 3053 case FAULT_HANG: |
| 3054 |
| 3055 /* Hangs are not very interesting, but we're still obliged to keep |
| 3056 a handful of samples. We use the presence of new bits in the |
| 3057 hang-specific bitmap as a signal of uniqueness. In "dumb" mode, we |
| 3058 just keep everything. */ |
| 3059 |
| 3060 total_hangs++; |
| 3061 |
| 3062 if (unique_hangs >= KEEP_UNIQUE_HANG) return keeping; |
| 3063 |
| 3064 if (!dumb_mode) { |
| 3065 |
| 3066 #ifdef __x86_64__ |
| 3067 simplify_trace((u64*)trace_bits); |
| 3068 #else |
| 3069 simplify_trace((u32*)trace_bits); |
| 3070 #endif /* ^__x86_64__ */ |
| 3071 |
| 3072 if (!has_new_bits(virgin_hang)) return keeping; |
| 3073 |
| 3074 } |
| 3075 |
| 3076 #ifndef SIMPLE_FILES |
| 3077 |
| 3078 fn = alloc_printf("%s/hangs/id:%06llu,%s", out_dir, |
| 3079 unique_hangs, describe_op(0)); |
| 3080 |
| 3081 #else |
| 3082 |
| 3083 fn = alloc_printf("%s/hangs/id_%06llu", out_dir, |
| 3084 unique_hangs); |
| 3085 |
| 3086 #endif /* ^!SIMPLE_FILES */ |
| 3087 |
| 3088 unique_hangs++; |
| 3089 |
| 3090 last_hang_time = get_cur_time(); |
| 3091 |
| 3092 break; |
| 3093 |
| 3094 case FAULT_CRASH: |
| 3095 |
| 3096 /* This is handled in a manner roughly similar to hangs, |
| 3097 except for slightly different limits. */ |
| 3098 |
| 3099 total_crashes++; |
| 3100 |
| 3101 if (unique_crashes >= KEEP_UNIQUE_CRASH) return keeping; |
| 3102 |
| 3103 if (!dumb_mode) { |
| 3104 |
| 3105 #ifdef __x86_64__ |
| 3106 simplify_trace((u64*)trace_bits); |
| 3107 #else |
| 3108 simplify_trace((u32*)trace_bits); |
| 3109 #endif /* ^__x86_64__ */ |
| 3110 |
| 3111 if (!has_new_bits(virgin_crash)) return keeping; |
| 3112 |
| 3113 } |
| 3114 |
| 3115 if (!unique_crashes) write_crash_readme(); |
| 3116 |
| 3117 #ifndef SIMPLE_FILES |
| 3118 |
| 3119 fn = alloc_printf("%s/crashes/id:%06llu,sig:%02u,%s", out_dir, |
| 3120 unique_crashes, kill_signal, describe_op(0)); |
| 3121 |
| 3122 #else |
| 3123 |
| 3124 fn = alloc_printf("%s/crashes/id_%06llu_%02u", out_dir, unique_crashes, |
| 3125 kill_signal); |
| 3126 |
| 3127 #endif /* ^!SIMPLE_FILES */ |
| 3128 |
| 3129 unique_crashes++; |
| 3130 |
| 3131 last_crash_time = get_cur_time(); |
| 3132 |
| 3133 break; |
| 3134 |
| 3135 case FAULT_ERROR: FATAL("Unable to execute target application"); |
| 3136 |
| 3137 default: return keeping; |
| 3138 |
| 3139 } |
| 3140 |
| 3141 /* If we're here, we apparently want to save the crash or hang |
| 3142 test case, too. */ |
| 3143 |
| 3144 fd = open(fn, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 3145 if (fd < 0) PFATAL("Unable to create '%s'", fn); |
| 3146 ck_write(fd, mem, len, fn); |
| 3147 close(fd); |
| 3148 |
| 3149 ck_free(fn); |
| 3150 |
| 3151 return keeping; |
| 3152 |
| 3153 } |
| 3154 |
| 3155 |
| 3156 /* When resuming, try to find the queue position to start from. This makes sense |
| 3157 only when resuming, and when we can find the original fuzzer_stats. */ |
| 3158 |
| 3159 static u32 find_start_position(void) { |
| 3160 |
| 3161 static u8 tmp[4096]; /* Ought to be enough for anybody. */ |
| 3162 |
| 3163 u8 *fn, *off; |
| 3164 s32 fd, i; |
| 3165 u32 ret; |
| 3166 |
| 3167 if (!resuming_fuzz) return 0; |
| 3168 |
| 3169 if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); |
| 3170 else fn = alloc_printf("%s/../fuzzer_stats", in_dir); |
| 3171 |
| 3172 fd = open(fn, O_RDONLY); |
| 3173 ck_free(fn); |
| 3174 |
| 3175 if (fd < 0) return 0; |
| 3176 |
| 3177 i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ |
| 3178 close(fd); |
| 3179 |
| 3180 off = strstr(tmp, "cur_path : "); |
| 3181 if (!off) return 0; |
| 3182 |
| 3183 ret = atoi(off + 17); |
| 3184 if (ret >= queued_paths) ret = 0; |
| 3185 return ret; |
| 3186 |
| 3187 } |
| 3188 |
| 3189 |
| 3190 /* The same, but for timeouts. The idea is that when resuming sessions without |
| 3191 -t given, we don't want to keep auto-scaling the timeout over and over |
| 3192 again to prevent it from growing due to random flukes. */ |
| 3193 |
| 3194 static void find_timeout(void) { |
| 3195 |
| 3196 static u8 tmp[4096]; /* Ought to be enough for anybody. */ |
| 3197 |
| 3198 u8 *fn, *off; |
| 3199 s32 fd, i; |
| 3200 u32 ret; |
| 3201 |
| 3202 if (!resuming_fuzz) return; |
| 3203 |
| 3204 if (in_place_resume) fn = alloc_printf("%s/fuzzer_stats", out_dir); |
| 3205 else fn = alloc_printf("%s/../fuzzer_stats", in_dir); |
| 3206 |
| 3207 fd = open(fn, O_RDONLY); |
| 3208 ck_free(fn); |
| 3209 |
| 3210 if (fd < 0) return; |
| 3211 |
| 3212 i = read(fd, tmp, sizeof(tmp) - 1); (void)i; /* Ignore errors */ |
| 3213 close(fd); |
| 3214 |
| 3215 off = strstr(tmp, "exec_timeout : "); |
| 3216 if (!off) return; |
| 3217 |
| 3218 ret = atoi(off + 17); |
| 3219 if (ret <= 4) return; |
| 3220 |
| 3221 exec_tmout = ret; |
| 3222 timeout_given = 3; |
| 3223 |
| 3224 } |
| 3225 |
| 3226 |
| 3227 /* Update stats file for unattended monitoring. */ |
| 3228 |
| 3229 static void write_stats_file(double bitmap_cvg, double eps) { |
| 3230 |
| 3231 static double last_bcvg, last_eps; |
| 3232 |
| 3233 u8* fn = alloc_printf("%s/fuzzer_stats", out_dir); |
| 3234 s32 fd; |
| 3235 FILE* f; |
| 3236 |
| 3237 fd = open(fn, O_WRONLY | O_CREAT | O_TRUNC, 0600); |
| 3238 |
| 3239 if (fd < 0) PFATAL("Unable to create '%s'", fn); |
| 3240 |
| 3241 ck_free(fn); |
| 3242 |
| 3243 f = fdopen(fd, "w"); |
| 3244 |
| 3245 if (!f) PFATAL("fdopen() failed"); |
| 3246 |
| 3247 /* Keep last values in case we're called from another context |
| 3248 where exec/sec stats and such are not readily available. */ |
| 3249 |
| 3250 if (!bitmap_cvg && !eps) { |
| 3251 bitmap_cvg = last_bcvg; |
| 3252 eps = last_eps; |
| 3253 } else { |
| 3254 last_bcvg = bitmap_cvg; |
| 3255 last_eps = eps; |
| 3256 } |
| 3257 |
| 3258 fprintf(f, "start_time : %llu\n" |
| 3259 "last_update : %llu\n" |
| 3260 "fuzzer_pid : %u\n" |
| 3261 "cycles_done : %llu\n" |
| 3262 "execs_done : %llu\n" |
| 3263 "execs_per_sec : %0.02f\n" |
| 3264 "paths_total : %u\n" |
| 3265 "paths_favored : %u\n" |
| 3266 "paths_found : %u\n" |
| 3267 "paths_imported : %u\n" |
| 3268 "max_depth : %u\n" |
| 3269 "cur_path : %u\n" |
| 3270 "pending_favs : %u\n" |
| 3271 "pending_total : %u\n" |
| 3272 "variable_paths : %u\n" |
| 3273 "bitmap_cvg : %0.02f%%\n" |
| 3274 "unique_crashes : %llu\n" |
| 3275 "unique_hangs : %llu\n" |
| 3276 "last_path : %llu\n" |
| 3277 "last_crash : %llu\n" |
| 3278 "last_hang : %llu\n" |
| 3279 "exec_timeout : %u\n" |
| 3280 "afl_banner : %s\n" |
| 3281 "afl_version : " VERSION "\n" |
| 3282 "command_line : %s\n", |
| 3283 start_time / 1000, get_cur_time() / 1000, getpid(), |
| 3284 queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps, |
| 3285 queued_paths, queued_favored, queued_discovered, queued_imported, |
| 3286 max_depth, current_entry, pending_favored, pending_not_fuzzed, |
| 3287 queued_variable, bitmap_cvg, unique_crashes, unique_hangs, |
| 3288 last_path_time / 1000, last_crash_time / 1000, |
| 3289 last_hang_time / 1000, exec_tmout, use_banner, orig_cmdline); |
| 3290 /* ignore errors */ |
| 3291 |
| 3292 fclose(f); |
| 3293 |
| 3294 } |
| 3295 |
| 3296 |
| 3297 /* Update the plot file if there is a reason to. */ |
| 3298 |
| 3299 static void maybe_update_plot_file(double bitmap_cvg, double eps) { |
| 3300 |
| 3301 static u32 prev_qp, prev_pf, prev_pnf, prev_ce, prev_md; |
| 3302 static u64 prev_qc, prev_uc, prev_uh; |
| 3303 |
| 3304 if (prev_qp == queued_paths && prev_pf == pending_favored && |
| 3305 prev_pnf == pending_not_fuzzed && prev_ce == current_entry && |
| 3306 prev_qc == queue_cycle && prev_uc == unique_crashes && |
| 3307 prev_uh == unique_hangs && prev_md == max_depth) return; |
| 3308 |
| 3309 prev_qp = queued_paths; |
| 3310 prev_pf = pending_favored; |
| 3311 prev_pnf = pending_not_fuzzed; |
| 3312 prev_ce = current_entry; |
| 3313 prev_qc = queue_cycle; |
| 3314 prev_uc = unique_crashes; |
| 3315 prev_uh = unique_hangs; |
| 3316 prev_md = max_depth; |
| 3317 |
| 3318 /* Fields in the file: |
| 3319 |
| 3320 unix_time, cycles_done, cur_path, paths_total, paths_not_fuzzed, |
| 3321 favored_not_fuzzed, unique_crashes, unique_hangs, max_depth, |
| 3322 execs_per_sec */ |
| 3323 |
| 3324 fprintf(plot_file, |
| 3325 "%llu, %llu, %u, %u, %u, %u, %0.02f%%, %llu, %llu, %u, %0.02f\n", |
| 3326 get_cur_time() / 1000, queue_cycle - 1, current_entry, queued_paths, |
| 3327 pending_not_fuzzed, pending_favored, bitmap_cvg, unique_crashes, |
| 3328 unique_hangs, max_depth, eps); /* ignore errors */ |
| 3329 |
| 3330 fflush(plot_file); |
| 3331 |
| 3332 } |
| 3333 |
| 3334 |
| 3335 |
| 3336 /* A helper function for maybe_delete_out_dir(), deleting all prefixed |
| 3337 files in a directory. */ |
| 3338 |
| 3339 static u8 delete_files(u8* path, u8* prefix) { |
| 3340 |
| 3341 DIR* d; |
| 3342 struct dirent* d_ent; |
| 3343 |
| 3344 d = opendir(path); |
| 3345 |
| 3346 if (!d) return 0; |
| 3347 |
| 3348 while ((d_ent = readdir(d))) { |
| 3349 |
| 3350 if (d_ent->d_name[0] != '.' && (!prefix || |
| 3351 !strncmp(d_ent->d_name, prefix, strlen(prefix)))) { |
| 3352 |
| 3353 u8* fname = alloc_printf("%s/%s", path, d_ent->d_name); |
| 3354 if (unlink(fname)) PFATAL("Unable to delete '%s'", fname); |
| 3355 ck_free(fname); |
| 3356 |
| 3357 } |
| 3358 |
| 3359 } |
| 3360 |
| 3361 closedir(d); |
| 3362 |
| 3363 return !!rmdir(path); |
| 3364 |
| 3365 } |
| 3366 |
| 3367 |
| 3368 /* Get the number of runnable processes, with some simple smoothing. */ |
| 3369 |
| 3370 static double get_runnable_processes(void) { |
| 3371 |
| 3372 static double res; |
| 3373 |
| 3374 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) |
| 3375 |
| 3376 /* I don't see any portable sysctl or so that would quickly give us the |
| 3377 number of runnable processes; the 1-minute load average can be a |
| 3378 semi-decent approximation, though. */ |
| 3379 |
| 3380 if (getloadavg(&res, 1) != 1) return 0; |
| 3381 |
| 3382 #else |
| 3383 |
| 3384 /* On Linux, /proc/stat is probably the best way; load averages are |
| 3385 computed in funny ways and sometimes don't reflect extremely short-lived |
| 3386 processes well. */ |
| 3387 |
| 3388 FILE* f = fopen("/proc/stat", "r"); |
| 3389 u8 tmp[1024]; |
| 3390 u32 val = 0; |
| 3391 |
| 3392 if (!f) return 0; |
| 3393 |
| 3394 while (fgets(tmp, sizeof(tmp), f)) { |
| 3395 |
| 3396 if (!strncmp(tmp, "procs_running ", 14) || |
| 3397 !strncmp(tmp, "procs_blocked ", 14)) val += atoi(tmp + 14); |
| 3398 |
| 3399 } |
| 3400 |
| 3401 fclose(f); |
| 3402 |
| 3403 if (!res) { |
| 3404 |
| 3405 res = val; |
| 3406 |
| 3407 } else { |
| 3408 |
| 3409 res = res * (1.0 - 1.0 / AVG_SMOOTHING) + |
| 3410 ((double)val) * (1.0 / AVG_SMOOTHING); |
| 3411 |
| 3412 } |
| 3413 |
| 3414 #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ |
| 3415 |
| 3416 return res; |
| 3417 |
| 3418 } |
| 3419 |
| 3420 |
| 3421 /* Delete the temporary directory used for in-place session resume. */ |
| 3422 |
| 3423 static void nuke_resume_dir(void) { |
| 3424 |
| 3425 u8* fn; |
| 3426 |
| 3427 fn = alloc_printf("%s/_resume/.state/deterministic_done", out_dir); |
| 3428 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3429 ck_free(fn); |
| 3430 |
| 3431 fn = alloc_printf("%s/_resume/.state/auto_extras", out_dir); |
| 3432 if (delete_files(fn, "auto_")) goto dir_cleanup_failed; |
| 3433 ck_free(fn); |
| 3434 |
| 3435 fn = alloc_printf("%s/_resume/.state/redundant_edges", out_dir); |
| 3436 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3437 ck_free(fn); |
| 3438 |
| 3439 fn = alloc_printf("%s/_resume/.state/variable_behavior", out_dir); |
| 3440 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3441 ck_free(fn); |
| 3442 |
| 3443 fn = alloc_printf("%s/_resume/.state", out_dir); |
| 3444 if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed; |
| 3445 ck_free(fn); |
| 3446 |
| 3447 fn = alloc_printf("%s/_resume", out_dir); |
| 3448 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3449 ck_free(fn); |
| 3450 |
| 3451 return; |
| 3452 |
| 3453 dir_cleanup_failed: |
| 3454 |
| 3455 FATAL("_resume directory cleanup failed"); |
| 3456 |
| 3457 } |
| 3458 |
| 3459 |
| 3460 /* Delete fuzzer output directory if we recognize it as ours, if the fuzzer |
| 3461 is not currently running, and if the last run time isn't too great. */ |
| 3462 |
| 3463 static void maybe_delete_out_dir(void) { |
| 3464 |
| 3465 FILE* f; |
| 3466 u8 *fn = alloc_printf("%s/fuzzer_stats", out_dir); |
| 3467 |
| 3468 /* See if the output directory is locked. If yes, bail out. If not, |
| 3469 create a lock that will persist for the lifetime of the process |
| 3470 (this requires leaving the descriptor open).*/ |
| 3471 |
| 3472 out_dir_fd = open(out_dir, O_RDONLY); |
| 3473 if (out_dir_fd < 0) PFATAL("Unable to open '%s'", out_dir); |
| 3474 |
| 3475 #ifndef __sun |
| 3476 |
| 3477 if (flock(out_dir_fd, LOCK_EX | LOCK_NB) && errno == EWOULDBLOCK) { |
| 3478 |
| 3479 SAYF("\n" cLRD "[-] " cRST |
| 3480 "Looks like the job output directory is being actively used by another\
n" |
| 3481 " instance of afl-fuzz. You will need to choose a different %s\n" |
| 3482 " or stop the other process first.\n", |
| 3483 sync_id ? "fuzzer ID" : "output location"); |
| 3484 |
| 3485 FATAL("Directory '%s' is in use", out_dir); |
| 3486 |
| 3487 } |
| 3488 |
| 3489 #endif /* !__sun */ |
| 3490 |
| 3491 f = fopen(fn, "r"); |
| 3492 |
| 3493 if (f) { |
| 3494 |
| 3495 u64 start_time, last_update; |
| 3496 |
| 3497 if (fscanf(f, "start_time : %llu\n" |
| 3498 "last_update : %llu\n", &start_time, &last_update) != 2) |
| 3499 FATAL("Malformed data in '%s'", fn); |
| 3500 |
| 3501 fclose(f); |
| 3502 |
| 3503 /* Let's see how much work is at stake. */ |
| 3504 |
| 3505 if (!in_place_resume && last_update - start_time > OUTPUT_GRACE * 60) { |
| 3506 |
| 3507 SAYF("\n" cLRD "[-] " cRST |
| 3508 "The job output directory already exists and contains the results of
more\n" |
| 3509 " than %u minutes worth of fuzzing. To avoid data loss, afl-fuzz w
ill *NOT*\n" |
| 3510 " automatically delete this data for you.\n\n" |
| 3511 |
| 3512 " If you wish to start a new session, remove or rename the directo
ry manually,\n" |
| 3513 " or specify a different output location for this job. To resume t
he old\n" |
| 3514 " session, put '-' as the input directory in the command line ('-i
-') and\n" |
| 3515 " try again.\n", OUTPUT_GRACE); |
| 3516 |
| 3517 FATAL("At-risk data found in '%s'", out_dir); |
| 3518 |
| 3519 } |
| 3520 |
| 3521 } |
| 3522 |
| 3523 ck_free(fn); |
| 3524 |
| 3525 /* The idea for in-place resume is pretty simple: we temporarily move the old |
| 3526 queue/ to a new location that gets deleted once import to the new queue/ |
| 3527 is finished. If _resume/ already exists, the current queue/ may be |
| 3528 incomplete due to an earlier abort, so we want to use the old _resume/ |
| 3529 dir instead, and we let rename() fail silently. */ |
| 3530 |
| 3531 if (in_place_resume) { |
| 3532 |
| 3533 u8* orig_q = alloc_printf("%s/queue", out_dir); |
| 3534 |
| 3535 in_dir = alloc_printf("%s/_resume", out_dir); |
| 3536 |
| 3537 rename(orig_q, in_dir); /* Ignore errors */ |
| 3538 |
| 3539 OKF("Output directory exists, will attempt session resume."); |
| 3540 |
| 3541 ck_free(orig_q); |
| 3542 |
| 3543 } else { |
| 3544 |
| 3545 OKF("Output directory exists but deemed OK to reuse."); |
| 3546 |
| 3547 } |
| 3548 |
| 3549 ACTF("Deleting old session data..."); |
| 3550 |
| 3551 /* Okay, let's get the ball rolling! First, we need to get rid of the entries |
| 3552 in <out_dir>/.synced/.../id:*, if any are present. */ |
| 3553 |
| 3554 fn = alloc_printf("%s/.synced", out_dir); |
| 3555 if (delete_files(fn, NULL)) goto dir_cleanup_failed; |
| 3556 ck_free(fn); |
| 3557 |
| 3558 /* Next, we need to clean up <out_dir>/queue/.state/ subdirectories: */ |
| 3559 |
| 3560 fn = alloc_printf("%s/queue/.state/deterministic_done", out_dir); |
| 3561 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3562 ck_free(fn); |
| 3563 |
| 3564 fn = alloc_printf("%s/queue/.state/auto_extras", out_dir); |
| 3565 if (delete_files(fn, "auto_")) goto dir_cleanup_failed; |
| 3566 ck_free(fn); |
| 3567 |
| 3568 fn = alloc_printf("%s/queue/.state/redundant_edges", out_dir); |
| 3569 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3570 ck_free(fn); |
| 3571 |
| 3572 fn = alloc_printf("%s/queue/.state/variable_behavior", out_dir); |
| 3573 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3574 ck_free(fn); |
| 3575 |
| 3576 /* Then, get rid of the .state subdirectory itself (should be empty by now) |
| 3577 and everything matching <out_dir>/queue/id:*. */ |
| 3578 |
| 3579 fn = alloc_printf("%s/queue/.state", out_dir); |
| 3580 if (rmdir(fn) && errno != ENOENT) goto dir_cleanup_failed; |
| 3581 ck_free(fn); |
| 3582 |
| 3583 fn = alloc_printf("%s/queue", out_dir); |
| 3584 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3585 ck_free(fn); |
| 3586 |
| 3587 /* All right, let's do <out_dir>/crashes/id:* and <out_dir>/hangs/id:*. */ |
| 3588 |
| 3589 if (!in_place_resume) { |
| 3590 |
| 3591 fn = alloc_printf("%s/crashes/README.txt", out_dir); |
| 3592 unlink(fn); /* Ignore errors */ |
| 3593 ck_free(fn); |
| 3594 |
| 3595 } |
| 3596 |
| 3597 fn = alloc_printf("%s/crashes", out_dir); |
| 3598 |
| 3599 /* Make backup of the crashes directory if it's not empty and if we're |
| 3600 doing in-place resume. */ |
| 3601 |
| 3602 if (in_place_resume && rmdir(fn)) { |
| 3603 |
| 3604 time_t cur_t = time(0); |
| 3605 struct tm* t = localtime(&cur_t); |
| 3606 |
| 3607 #ifndef SIMPLE_FILES |
| 3608 |
| 3609 u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn, |
| 3610 t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, |
| 3611 t->tm_hour, t->tm_min, t->tm_sec); |
| 3612 |
| 3613 #else |
| 3614 |
| 3615 u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn, |
| 3616 t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, |
| 3617 t->tm_hour, t->tm_min, t->tm_sec); |
| 3618 |
| 3619 #endif /* ^!SIMPLE_FILES */ |
| 3620 |
| 3621 rename(fn, nfn); /* Ignore errors. */ |
| 3622 ck_free(nfn); |
| 3623 |
| 3624 } |
| 3625 |
| 3626 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3627 ck_free(fn); |
| 3628 |
| 3629 fn = alloc_printf("%s/hangs", out_dir); |
| 3630 |
| 3631 /* Backup hangs, too. */ |
| 3632 |
| 3633 if (in_place_resume && rmdir(fn)) { |
| 3634 |
| 3635 time_t cur_t = time(0); |
| 3636 struct tm* t = localtime(&cur_t); |
| 3637 |
| 3638 #ifndef SIMPLE_FILES |
| 3639 |
| 3640 u8* nfn = alloc_printf("%s.%04u-%02u-%02u-%02u:%02u:%02u", fn, |
| 3641 t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, |
| 3642 t->tm_hour, t->tm_min, t->tm_sec); |
| 3643 |
| 3644 #else |
| 3645 |
| 3646 u8* nfn = alloc_printf("%s_%04u%02u%02u%02u%02u%02u", fn, |
| 3647 t->tm_year + 1900, t->tm_mon + 1, t->tm_mday, |
| 3648 t->tm_hour, t->tm_min, t->tm_sec); |
| 3649 |
| 3650 #endif /* ^!SIMPLE_FILES */ |
| 3651 |
| 3652 rename(fn, nfn); /* Ignore errors. */ |
| 3653 ck_free(nfn); |
| 3654 |
| 3655 } |
| 3656 |
| 3657 if (delete_files(fn, CASE_PREFIX)) goto dir_cleanup_failed; |
| 3658 ck_free(fn); |
| 3659 |
| 3660 /* And now, for some finishing touches. */ |
| 3661 |
| 3662 fn = alloc_printf("%s/.cur_input", out_dir); |
| 3663 if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; |
| 3664 ck_free(fn); |
| 3665 |
| 3666 fn = alloc_printf("%s/fuzz_bitmap", out_dir); |
| 3667 if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; |
| 3668 ck_free(fn); |
| 3669 |
| 3670 if (!in_place_resume) { |
| 3671 fn = alloc_printf("%s/fuzzer_stats", out_dir); |
| 3672 if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; |
| 3673 ck_free(fn); |
| 3674 } |
| 3675 |
| 3676 fn = alloc_printf("%s/plot_data", out_dir); |
| 3677 if (unlink(fn) && errno != ENOENT) goto dir_cleanup_failed; |
| 3678 ck_free(fn); |
| 3679 |
| 3680 OKF("Output dir cleanup successful."); |
| 3681 |
| 3682 /* Wow... is that all? If yes, celebrate! */ |
| 3683 |
| 3684 return; |
| 3685 |
| 3686 dir_cleanup_failed: |
| 3687 |
| 3688 SAYF("\n" cLRD "[-] " cRST |
| 3689 "Whoops, the fuzzer tried to reuse your output directory, but bumped into
\n" |
| 3690 " some files that shouldn't be there or that couldn't be removed - so
it\n" |
| 3691 " decided to abort! This happened while processing this path:\n\n" |
| 3692 |
| 3693 " %s\n\n" |
| 3694 " Please examine and manually delete the files, or specify a different
\n" |
| 3695 " output location for the tool.\n", fn); |
| 3696 |
| 3697 FATAL("Output directory cleanup failed"); |
| 3698 |
| 3699 } |
| 3700 |
| 3701 |
| 3702 static void check_term_size(void); |
| 3703 |
| 3704 |
| 3705 /* A spiffy retro stats screen! This is called every stats_update_freq |
| 3706 execve() calls, plus in several other circumstances. */ |
| 3707 |
| 3708 static void show_stats(void) { |
| 3709 |
| 3710 static u64 last_stats_ms, last_plot_ms, last_ms, last_execs; |
| 3711 static double avg_exec; |
| 3712 double t_byte_ratio; |
| 3713 |
| 3714 u64 cur_ms; |
| 3715 u32 t_bytes, t_bits; |
| 3716 |
| 3717 u32 banner_len, banner_pad; |
| 3718 u8 tmp[256]; |
| 3719 |
| 3720 cur_ms = get_cur_time(); |
| 3721 |
| 3722 /* If not enough time has passed since last UI update, bail out. */ |
| 3723 |
| 3724 if (cur_ms - last_ms < 1000 / UI_TARGET_HZ) return; |
| 3725 |
| 3726 /* Check if we're past the 10 minute mark. */ |
| 3727 |
| 3728 if (cur_ms - start_time > 10 * 60 * 1000) run_over10m = 1; |
| 3729 |
| 3730 /* Calculate smoothed exec speed stats. */ |
| 3731 |
| 3732 if (!last_execs) { |
| 3733 |
| 3734 avg_exec = ((double)total_execs) * 1000 / (cur_ms - start_time); |
| 3735 |
| 3736 } else { |
| 3737 |
| 3738 double cur_avg = ((double)(total_execs - last_execs)) * 1000 / |
| 3739 (cur_ms - last_ms); |
| 3740 |
| 3741 /* If there is a dramatic (5x+) jump in speed, reset the indicator |
| 3742 more quickly. */ |
| 3743 |
| 3744 if (cur_avg * 5 < avg_exec || cur_avg / 5 > avg_exec) |
| 3745 avg_exec = cur_avg; |
| 3746 |
| 3747 avg_exec = avg_exec * (1.0 - 1.0 / AVG_SMOOTHING) + |
| 3748 cur_avg * (1.0 / AVG_SMOOTHING); |
| 3749 |
| 3750 } |
| 3751 |
| 3752 last_ms = cur_ms; |
| 3753 last_execs = total_execs; |
| 3754 |
| 3755 /* Tell the callers when to contact us (as measured in execs). */ |
| 3756 |
| 3757 stats_update_freq = avg_exec / (UI_TARGET_HZ * 10); |
| 3758 if (!stats_update_freq) stats_update_freq = 1; |
| 3759 |
| 3760 /* Do some bitmap stats. */ |
| 3761 |
| 3762 t_bytes = count_non_255_bytes(virgin_bits); |
| 3763 t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE; |
| 3764 |
| 3765 /* Roughly every minute, update fuzzer stats and save auto tokens. */ |
| 3766 |
| 3767 if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) { |
| 3768 |
| 3769 last_stats_ms = cur_ms; |
| 3770 write_stats_file(t_byte_ratio, avg_exec); |
| 3771 save_auto(); |
| 3772 write_bitmap(); |
| 3773 |
| 3774 } |
| 3775 |
| 3776 /* Every now and then, write plot data. */ |
| 3777 |
| 3778 if (cur_ms - last_plot_ms > PLOT_UPDATE_SEC * 1000) { |
| 3779 |
| 3780 last_plot_ms = cur_ms; |
| 3781 maybe_update_plot_file(t_byte_ratio, avg_exec); |
| 3782 |
| 3783 } |
| 3784 |
| 3785 /* Honor AFL_EXIT_WHEN_DONE and AFL_BENCH_UNTIL_CRASH. */ |
| 3786 |
| 3787 if (!dumb_mode && cycles_wo_finds > 20 && !pending_not_fuzzed && |
| 3788 getenv("AFL_EXIT_WHEN_DONE")) stop_soon = 2; |
| 3789 |
| 3790 if (total_crashes && getenv("AFL_BENCH_UNTIL_CRASH")) stop_soon = 2; |
| 3791 |
| 3792 /* If we're not on TTY, bail out. */ |
| 3793 |
| 3794 if (not_on_tty) return; |
| 3795 |
| 3796 /* Compute some mildly useful bitmap stats. */ |
| 3797 |
| 3798 t_bits = (MAP_SIZE << 3) - count_bits(virgin_bits); |
| 3799 |
| 3800 /* Now, for the visuals... */ |
| 3801 |
| 3802 if (clear_screen) { |
| 3803 |
| 3804 SAYF(TERM_CLEAR CURSOR_HIDE); |
| 3805 clear_screen = 0; |
| 3806 |
| 3807 check_term_size(); |
| 3808 |
| 3809 } |
| 3810 |
| 3811 SAYF(TERM_HOME); |
| 3812 |
| 3813 if (term_too_small) { |
| 3814 |
| 3815 SAYF(cBRI "Your terminal is too small to display the UI.\n" |
| 3816 "Please resize terminal window to at least 80x25.\n" cRST); |
| 3817 |
| 3818 return; |
| 3819 |
| 3820 } |
| 3821 |
| 3822 /* Let's start by drawing a centered banner. */ |
| 3823 |
| 3824 banner_len = (crash_mode ? 24 : 22) + strlen(VERSION) + strlen(use_banner); |
| 3825 banner_pad = (80 - banner_len) / 2; |
| 3826 memset(tmp, ' ', banner_pad); |
| 3827 |
| 3828 sprintf(tmp + banner_pad, "%s " cLCY VERSION cLGN |
| 3829 " (%s)", crash_mode ? cPIN "peruvian were-rabbit" : |
| 3830 cYEL "american fuzzy lop", use_banner); |
| 3831 |
| 3832 SAYF("\n%s\n\n", tmp); |
| 3833 |
| 3834 /* "Handy" shortcuts for drawing boxes... */ |
| 3835 |
| 3836 #define bSTG bSTART cGRA |
| 3837 #define bH2 bH bH |
| 3838 #define bH5 bH2 bH2 bH |
| 3839 #define bH10 bH5 bH5 |
| 3840 #define bH20 bH10 bH10 |
| 3841 #define bH30 bH20 bH10 |
| 3842 #define SP5 " " |
| 3843 #define SP10 SP5 SP5 |
| 3844 #define SP20 SP10 SP10 |
| 3845 |
| 3846 /* Lord, forgive me this. */ |
| 3847 |
| 3848 SAYF(SET_G1 bSTG bLT bH bSTOP cCYA " process timing " bSTG bH30 bH5 bH2 bHB |
| 3849 bH bSTOP cCYA " overall results " bSTG bH5 bRT "\n"); |
| 3850 |
| 3851 if (dumb_mode) { |
| 3852 |
| 3853 strcpy(tmp, cRST); |
| 3854 |
| 3855 } else { |
| 3856 |
| 3857 /* First queue cycle: don't stop now! */ |
| 3858 if (queue_cycle == 1) strcpy(tmp, cMGN); else |
| 3859 |
| 3860 /* Subsequent cycles, but we're still making finds. */ |
| 3861 if (cycles_wo_finds < 3) strcpy(tmp, cYEL); else |
| 3862 |
| 3863 /* No finds for a long time and no test cases to try. */ |
| 3864 if (cycles_wo_finds > 20 && !pending_not_fuzzed) strcpy(tmp, cLGN); |
| 3865 |
| 3866 /* Default: cautiously OK to stop? */ |
| 3867 else strcpy(tmp, cLBL); |
| 3868 |
| 3869 } |
| 3870 |
| 3871 SAYF(bV bSTOP " run time : " cRST "%-34s " bSTG bV bSTOP |
| 3872 " cycles done : %s%-5s " bSTG bV "\n", |
| 3873 DTD(cur_ms, start_time), tmp, DI(queue_cycle - 1)); |
| 3874 |
| 3875 /* We want to warn people about not seeing new paths after a full cycle, |
| 3876 except when resuming fuzzing or running in non-instrumented mode. */ |
| 3877 |
| 3878 if (!dumb_mode && (last_path_time || resuming_fuzz || queue_cycle == 1 || |
| 3879 in_bitmap || crash_mode)) { |
| 3880 |
| 3881 SAYF(bV bSTOP " last new path : " cRST "%-34s ", |
| 3882 DTD(cur_ms, last_path_time)); |
| 3883 |
| 3884 } else { |
| 3885 |
| 3886 if (dumb_mode) |
| 3887 |
| 3888 SAYF(bV bSTOP " last new path : " cPIN "n/a" cRST |
| 3889 " (non-instrumented mode) "); |
| 3890 |
| 3891 else |
| 3892 |
| 3893 SAYF(bV bSTOP " last new path : " cRST "none yet " cLRD |
| 3894 "(odd, check syntax!) "); |
| 3895 |
| 3896 } |
| 3897 |
| 3898 SAYF(bSTG bV bSTOP " total paths : " cRST "%-5s " bSTG bV "\n", |
| 3899 DI(queued_paths)); |
| 3900 |
| 3901 /* Highlight crashes in red if found, denote going over the KEEP_UNIQUE_CRASH |
| 3902 limit with a '+' appended to the count. */ |
| 3903 |
| 3904 sprintf(tmp, "%s%s", DI(unique_crashes), |
| 3905 (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : ""); |
| 3906 |
| 3907 SAYF(bV bSTOP " last uniq crash : " cRST "%-34s " bSTG bV bSTOP |
| 3908 " uniq crashes : %s%-6s " bSTG bV "\n", |
| 3909 DTD(cur_ms, last_crash_time), unique_crashes ? cLRD : cRST, |
| 3910 tmp); |
| 3911 |
| 3912 sprintf(tmp, "%s%s", DI(unique_hangs), |
| 3913 (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); |
| 3914 |
| 3915 SAYF(bV bSTOP " last uniq hang : " cRST "%-34s " bSTG bV bSTOP |
| 3916 " uniq hangs : " cRST "%-6s " bSTG bV "\n", |
| 3917 DTD(cur_ms, last_hang_time), tmp); |
| 3918 |
| 3919 SAYF(bVR bH bSTOP cCYA " cycle progress " bSTG bH20 bHB bH bSTOP cCYA |
| 3920 " map coverage " bSTG bH bHT bH20 bH2 bH bVL "\n"); |
| 3921 |
| 3922 /* This gets funny because we want to print several variable-length variables |
| 3923 together, but then cram them into a fixed-width field - so we need to |
| 3924 put them in a temporary buffer first. */ |
| 3925 |
| 3926 sprintf(tmp, "%s%s (%0.02f%%)", DI(current_entry), |
| 3927 queue_cur->favored ? "" : "*", |
| 3928 ((double)current_entry * 100) / queued_paths); |
| 3929 |
| 3930 SAYF(bV bSTOP " now processing : " cRST "%-17s " bSTG bV bSTOP, tmp); |
| 3931 |
| 3932 |
| 3933 sprintf(tmp, "%s (%0.02f%%)", DI(t_bytes), t_byte_ratio); |
| 3934 |
| 3935 SAYF(" map density : %s%-21s " bSTG bV "\n", t_byte_ratio > 70 ? cLRD : |
| 3936 ((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp); |
| 3937 |
| 3938 sprintf(tmp, "%s (%0.02f%%)", DI(cur_skipped_paths), |
| 3939 ((double)cur_skipped_paths * 100) / queued_paths); |
| 3940 |
| 3941 SAYF(bV bSTOP " paths timed out : " cRST "%-17s " bSTG bV, tmp); |
| 3942 |
| 3943 sprintf(tmp, "%0.02f bits/tuple", |
| 3944 t_bytes ? (((double)t_bits) / t_bytes) : 0); |
| 3945 |
| 3946 SAYF(bSTOP " count coverage : " cRST "%-21s " bSTG bV "\n", tmp); |
| 3947 |
| 3948 SAYF(bVR bH bSTOP cCYA " stage progress " bSTG bH20 bX bH bSTOP cCYA |
| 3949 " findings in depth " bSTG bH20 bVL "\n"); |
| 3950 |
| 3951 sprintf(tmp, "%s (%0.02f%%)", DI(queued_favored), |
| 3952 ((double)queued_favored) * 100 / queued_paths); |
| 3953 |
| 3954 /* Yeah... it's still going on... halp? */ |
| 3955 |
| 3956 SAYF(bV bSTOP " now trying : " cRST "%-21s " bSTG bV bSTOP |
| 3957 " favored paths : " cRST "%-22s " bSTG bV "\n", stage_name, tmp); |
| 3958 |
| 3959 if (!stage_max) { |
| 3960 |
| 3961 sprintf(tmp, "%s/-", DI(stage_cur)); |
| 3962 |
| 3963 } else { |
| 3964 |
| 3965 sprintf(tmp, "%s/%s (%0.02f%%)", DI(stage_cur), DI(stage_max), |
| 3966 ((double)stage_cur) * 100 / stage_max); |
| 3967 |
| 3968 } |
| 3969 |
| 3970 SAYF(bV bSTOP " stage execs : " cRST "%-21s " bSTG bV bSTOP, tmp); |
| 3971 |
| 3972 sprintf(tmp, "%s (%0.02f%%)", DI(queued_with_cov), |
| 3973 ((double)queued_with_cov) * 100 / queued_paths); |
| 3974 |
| 3975 SAYF(" new edges on : " cRST "%-22s " bSTG bV "\n", tmp); |
| 3976 |
| 3977 sprintf(tmp, "%s (%s%s unique)", DI(total_crashes), DI(unique_crashes), |
| 3978 (unique_crashes >= KEEP_UNIQUE_CRASH) ? "+" : ""); |
| 3979 |
| 3980 if (crash_mode) { |
| 3981 |
| 3982 SAYF(bV bSTOP " total execs : " cRST "%-21s " bSTG bV bSTOP |
| 3983 " new crashes : %s%-22s " bSTG bV "\n", DI(total_execs), |
| 3984 unique_crashes ? cLRD : cRST, tmp); |
| 3985 |
| 3986 } else { |
| 3987 |
| 3988 SAYF(bV bSTOP " total execs : " cRST "%-21s " bSTG bV bSTOP |
| 3989 " total crashes : %s%-22s " bSTG bV "\n", DI(total_execs), |
| 3990 unique_crashes ? cLRD : cRST, tmp); |
| 3991 |
| 3992 } |
| 3993 |
| 3994 /* Show a warning about slow execution. */ |
| 3995 |
| 3996 if (avg_exec < 100) { |
| 3997 |
| 3998 sprintf(tmp, "%s/sec (%s)", DF(avg_exec), avg_exec < 20 ? |
| 3999 "zzzz..." : "slow!"); |
| 4000 |
| 4001 SAYF(bV bSTOP " exec speed : " cLRD "%-21s ", tmp); |
| 4002 |
| 4003 } else { |
| 4004 |
| 4005 sprintf(tmp, "%s/sec", DF(avg_exec)); |
| 4006 SAYF(bV bSTOP " exec speed : " cRST "%-21s ", tmp); |
| 4007 |
| 4008 } |
| 4009 |
| 4010 sprintf(tmp, "%s (%s%s unique)", DI(total_hangs), DI(unique_hangs), |
| 4011 (unique_hangs >= KEEP_UNIQUE_HANG) ? "+" : ""); |
| 4012 |
| 4013 SAYF (bSTG bV bSTOP " total hangs : " cRST "%-22s " bSTG bV "\n", tmp); |
| 4014 |
| 4015 /* Aaaalmost there... hold on! */ |
| 4016 |
| 4017 SAYF(bVR bH cCYA bSTOP " fuzzing strategy yields " bSTG bH10 bH bHT bH10 |
| 4018 bH5 bHB bH bSTOP cCYA " path geometry " bSTG bH5 bH2 bH bVL "\n"); |
| 4019 |
| 4020 if (skip_deterministic) { |
| 4021 |
| 4022 strcpy(tmp, "n/a, n/a, n/a"); |
| 4023 |
| 4024 } else { |
| 4025 |
| 4026 sprintf(tmp, "%s/%s, %s/%s, %s/%s", |
| 4027 DI(stage_finds[STAGE_FLIP1]), DI(stage_cycles[STAGE_FLIP1]), |
| 4028 DI(stage_finds[STAGE_FLIP2]), DI(stage_cycles[STAGE_FLIP2]), |
| 4029 DI(stage_finds[STAGE_FLIP4]), DI(stage_cycles[STAGE_FLIP4])); |
| 4030 |
| 4031 } |
| 4032 |
| 4033 SAYF(bV bSTOP " bit flips : " cRST "%-37s " bSTG bV bSTOP " levels : " |
| 4034 cRST "%-10s " bSTG bV "\n", tmp, DI(max_depth)); |
| 4035 |
| 4036 if (!skip_deterministic) |
| 4037 sprintf(tmp, "%s/%s, %s/%s, %s/%s", |
| 4038 DI(stage_finds[STAGE_FLIP8]), DI(stage_cycles[STAGE_FLIP8]), |
| 4039 DI(stage_finds[STAGE_FLIP16]), DI(stage_cycles[STAGE_FLIP16]), |
| 4040 DI(stage_finds[STAGE_FLIP32]), DI(stage_cycles[STAGE_FLIP32])); |
| 4041 |
| 4042 SAYF(bV bSTOP " byte flips : " cRST "%-37s " bSTG bV bSTOP " pending : " |
| 4043 cRST "%-10s " bSTG bV "\n", tmp, DI(pending_not_fuzzed)); |
| 4044 |
| 4045 if (!skip_deterministic) |
| 4046 sprintf(tmp, "%s/%s, %s/%s, %s/%s", |
| 4047 DI(stage_finds[STAGE_ARITH8]), DI(stage_cycles[STAGE_ARITH8]), |
| 4048 DI(stage_finds[STAGE_ARITH16]), DI(stage_cycles[STAGE_ARITH16]), |
| 4049 DI(stage_finds[STAGE_ARITH32]), DI(stage_cycles[STAGE_ARITH32])); |
| 4050 |
| 4051 SAYF(bV bSTOP " arithmetics : " cRST "%-37s " bSTG bV bSTOP " pend fav : " |
| 4052 cRST "%-10s " bSTG bV "\n", tmp, DI(pending_favored)); |
| 4053 |
| 4054 if (!skip_deterministic) |
| 4055 sprintf(tmp, "%s/%s, %s/%s, %s/%s", |
| 4056 DI(stage_finds[STAGE_INTEREST8]), DI(stage_cycles[STAGE_INTEREST8]), |
| 4057 DI(stage_finds[STAGE_INTEREST16]), DI(stage_cycles[STAGE_INTEREST16]
), |
| 4058 DI(stage_finds[STAGE_INTEREST32]), DI(stage_cycles[STAGE_INTEREST32]
)); |
| 4059 |
| 4060 SAYF(bV bSTOP " known ints : " cRST "%-37s " bSTG bV bSTOP " own finds : " |
| 4061 cRST "%-10s " bSTG bV "\n", tmp, DI(queued_discovered)); |
| 4062 |
| 4063 if (!skip_deterministic) |
| 4064 sprintf(tmp, "%s/%s, %s/%s, %s/%s", |
| 4065 DI(stage_finds[STAGE_EXTRAS_UO]), DI(stage_cycles[STAGE_EXTRAS_UO]), |
| 4066 DI(stage_finds[STAGE_EXTRAS_UI]), DI(stage_cycles[STAGE_EXTRAS_UI]), |
| 4067 DI(stage_finds[STAGE_EXTRAS_AO]), DI(stage_cycles[STAGE_EXTRAS_AO]))
; |
| 4068 |
| 4069 SAYF(bV bSTOP " dictionary : " cRST "%-37s " bSTG bV bSTOP |
| 4070 " imported : " cRST "%-10s " bSTG bV "\n", tmp, |
| 4071 sync_id ? DI(queued_imported) : (u8*)"n/a"); |
| 4072 |
| 4073 sprintf(tmp, "%s/%s, %s/%s", |
| 4074 DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]), |
| 4075 DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE])); |
| 4076 |
| 4077 SAYF(bV bSTOP " havoc : " cRST "%-37s " bSTG bV bSTOP |
| 4078 " variable : %s%-10s " bSTG bV "\n", tmp, queued_variable ? cLRD : cRST, |
| 4079 no_var_check ? (u8*)"n/a" : DI(queued_variable)); |
| 4080 |
| 4081 if (!bytes_trim_out) { |
| 4082 |
| 4083 sprintf(tmp, "n/a, "); |
| 4084 |
| 4085 } else { |
| 4086 |
| 4087 sprintf(tmp, "%0.02f%%/%s, ", |
| 4088 ((double)(bytes_trim_in - bytes_trim_out)) * 100 / bytes_trim_in, |
| 4089 DI(trim_execs)); |
| 4090 |
| 4091 } |
| 4092 |
| 4093 if (!blocks_eff_total) { |
| 4094 |
| 4095 u8 tmp2[128]; |
| 4096 |
| 4097 sprintf(tmp2, "n/a"); |
| 4098 strcat(tmp, tmp2); |
| 4099 |
| 4100 } else { |
| 4101 |
| 4102 u8 tmp2[128]; |
| 4103 |
| 4104 sprintf(tmp2, "%0.02f%%", |
| 4105 ((double)(blocks_eff_total - blocks_eff_select)) * 100 / |
| 4106 blocks_eff_total); |
| 4107 |
| 4108 strcat(tmp, tmp2); |
| 4109 |
| 4110 } |
| 4111 |
| 4112 SAYF(bV bSTOP " trim : " cRST "%-37s " bSTG bVR bH20 bH2 bH2 bRB "\n" |
| 4113 bLB bH30 bH20 bH2 bH bRB bSTOP cRST RESET_G1, tmp); |
| 4114 |
| 4115 /* Provide some CPU utilization stats. */ |
| 4116 |
| 4117 if (cpu_core_count) { |
| 4118 |
| 4119 double cur_runnable = get_runnable_processes(); |
| 4120 u32 cur_utilization = cur_runnable * 100 / cpu_core_count; |
| 4121 |
| 4122 u8* cpu_color = cCYA; |
| 4123 |
| 4124 /* If we could still run one or more processes, use green. */ |
| 4125 |
| 4126 if (cpu_core_count > 1 && cur_runnable + 1 <= cpu_core_count) |
| 4127 cpu_color = cLGN; |
| 4128 |
| 4129 /* If we're clearly oversubscribed, use red. */ |
| 4130 |
| 4131 if (!no_cpu_meter_red && cur_utilization >= 150) cpu_color = cLRD; |
| 4132 |
| 4133 #ifdef HAVE_AFFINITY |
| 4134 |
| 4135 if (use_affinity) { |
| 4136 |
| 4137 SAYF(SP10 cGRA "[cpu@%02u:%s%3u%%" cGRA "]\r" cRST, |
| 4138 MIN(cpu_aff_child, 99), cpu_color, |
| 4139 MIN(cur_utilization, 999)); |
| 4140 |
| 4141 } else { |
| 4142 |
| 4143 SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, |
| 4144 cpu_color, MIN(cur_utilization, 999)); |
| 4145 |
| 4146 } |
| 4147 #else |
| 4148 |
| 4149 SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST, |
| 4150 cpu_color, MIN(cur_utilization, 999)); |
| 4151 |
| 4152 #endif /* ^HAVE_AFFINITY */ |
| 4153 |
| 4154 } else SAYF("\r"); |
| 4155 |
| 4156 /* Hallelujah! */ |
| 4157 |
| 4158 fflush(0); |
| 4159 |
| 4160 } |
| 4161 |
| 4162 |
| 4163 /* Display quick statistics at the end of processing the input directory, |
| 4164 plus a bunch of warnings. Some calibration stuff also ended up here, |
| 4165 along with several hardcoded constants. Maybe clean up eventually. */ |
| 4166 |
| 4167 static void show_init_stats(void) { |
| 4168 |
| 4169 struct queue_entry* q = queue; |
| 4170 u32 min_bits = 0, max_bits = 0; |
| 4171 u64 min_us = 0, max_us = 0; |
| 4172 u64 avg_us = 0; |
| 4173 u32 max_len = 0; |
| 4174 |
| 4175 if (total_cal_cycles) avg_us = total_cal_us / total_cal_cycles; |
| 4176 |
| 4177 while (q) { |
| 4178 |
| 4179 if (!min_us || q->exec_us < min_us) min_us = q->exec_us; |
| 4180 if (q->exec_us > max_us) max_us = q->exec_us; |
| 4181 |
| 4182 if (!min_bits || q->bitmap_size < min_bits) min_bits = q->bitmap_size; |
| 4183 if (q->bitmap_size > max_bits) max_bits = q->bitmap_size; |
| 4184 |
| 4185 if (q->len > max_len) max_len = q->len; |
| 4186 |
| 4187 q = q->next; |
| 4188 |
| 4189 } |
| 4190 |
| 4191 SAYF("\n"); |
| 4192 |
| 4193 if (avg_us > (qemu_mode ? 50000 : 10000)) |
| 4194 WARNF(cLRD "The target binary is pretty slow! See %s/perf_tips.txt.", |
| 4195 doc_path); |
| 4196 |
| 4197 /* Let's keep things moving with slow binaries. */ |
| 4198 |
| 4199 if (avg_us > 50000) havoc_div = 10; /* 0-19 execs/sec */ |
| 4200 else if (avg_us > 20000) havoc_div = 5; /* 20-49 execs/sec */ |
| 4201 else if (avg_us > 10000) havoc_div = 2; /* 50-100 execs/sec */ |
| 4202 |
| 4203 if (!resuming_fuzz) { |
| 4204 |
| 4205 if (max_len > 50 * 1024) |
| 4206 WARNF(cLRD "Some test cases are huge (%s) - see %s/perf_tips.txt!", |
| 4207 DMS(max_len), doc_path); |
| 4208 else if (max_len > 10 * 1024) |
| 4209 WARNF("Some test cases are big (%s) - see %s/perf_tips.txt.", |
| 4210 DMS(max_len), doc_path); |
| 4211 |
| 4212 if (useless_at_start && !in_bitmap) |
| 4213 WARNF(cLRD "Some test cases look useless. Consider using a smaller set."); |
| 4214 |
| 4215 if (queued_paths > 100) |
| 4216 WARNF(cLRD "You probably have far too many input files! Consider trimming
down."); |
| 4217 else if (queued_paths > 20) |
| 4218 WARNF("You have lots of input files; try starting small."); |
| 4219 |
| 4220 } |
| 4221 |
| 4222 OKF("Here are some useful stats:\n\n" |
| 4223 |
| 4224 cGRA " Test case count : " cRST "%u favored, %u variable, %u total\n" |
| 4225 cGRA " Bitmap range : " cRST "%u to %u bits (average: %0.02f bits)\n
" |
| 4226 cGRA " Exec timing : " cRST "%s to %s us (average: %s us)\n", |
| 4227 queued_favored, queued_variable, queued_paths, min_bits, max_bits, |
| 4228 ((double)total_bitmap_size) / (total_bitmap_entries ? total_bitmap_entries
: 1), |
| 4229 DI(min_us), DI(max_us), DI(avg_us)); |
| 4230 |
| 4231 if (!timeout_given) { |
| 4232 |
| 4233 /* Figure out the appropriate timeout. The basic idea is: 5x average or |
| 4234 1x max, rounded up to EXEC_TM_ROUND ms and capped at 1 second. |
| 4235 |
| 4236 If the program is slow, the multiplier is lowered to 2x or 3x, because |
| 4237 random scheduler jitter is less likely to have any impact, and because |
| 4238 our patience is wearing thin =) */ |
| 4239 |
| 4240 if (avg_us > 50000) exec_tmout = avg_us * 2 / 1000; |
| 4241 else if (avg_us > 10000) exec_tmout = avg_us * 3 / 1000; |
| 4242 else exec_tmout = avg_us * 5 / 1000; |
| 4243 |
| 4244 exec_tmout = MAX(exec_tmout, max_us / 1000); |
| 4245 exec_tmout = (exec_tmout + EXEC_TM_ROUND) / EXEC_TM_ROUND * EXEC_TM_ROUND; |
| 4246 |
| 4247 if (exec_tmout > EXEC_TIMEOUT) exec_tmout = EXEC_TIMEOUT; |
| 4248 |
| 4249 ACTF("No -t option specified, so I'll use exec timeout of %u ms.", |
| 4250 exec_tmout); |
| 4251 |
| 4252 timeout_given = 1; |
| 4253 |
| 4254 } else if (timeout_given == 3) { |
| 4255 |
| 4256 ACTF("Applying timeout settings from resumed session (%u ms).", exec_tmout); |
| 4257 |
| 4258 } |
| 4259 |
| 4260 OKF("All set and ready to roll!"); |
| 4261 |
| 4262 } |
| 4263 |
| 4264 |
| 4265 /* Find first power of two greater or equal to val. */ |
| 4266 |
| 4267 static u32 next_p2(u32 val) { |
| 4268 |
| 4269 u32 ret = 1; |
| 4270 while (val > ret) ret <<= 1; |
| 4271 return ret; |
| 4272 |
| 4273 } |
| 4274 |
| 4275 |
| 4276 /* Trim all new test cases to save cycles when doing deterministic checks. The |
| 4277 trimmer uses power-of-two increments somewhere between 1/16 and 1/1024 of |
| 4278 file size, to keep the stage short and sweet. */ |
| 4279 |
| 4280 static u8 trim_case(char** argv, struct queue_entry* q, u8* in_buf) { |
| 4281 |
| 4282 static u8 tmp[64]; |
| 4283 static u8 clean_trace[MAP_SIZE]; |
| 4284 |
| 4285 u8 needs_write = 0, fault = 0; |
| 4286 u32 trim_exec = 0; |
| 4287 u32 remove_len; |
| 4288 u32 len_p2; |
| 4289 |
| 4290 /* Although the trimmer will be less useful when variable behavior is |
| 4291 detected, it will still work to some extent, so we don't check for |
| 4292 this. */ |
| 4293 |
| 4294 if (q->len < 5) return 0; |
| 4295 |
| 4296 stage_name = tmp; |
| 4297 bytes_trim_in += q->len; |
| 4298 |
| 4299 /* Select initial chunk len, starting with large steps. */ |
| 4300 |
| 4301 len_p2 = next_p2(q->len); |
| 4302 |
| 4303 remove_len = MAX(len_p2 / TRIM_START_STEPS, TRIM_MIN_BYTES); |
| 4304 |
| 4305 /* Continue until the number of steps gets too high or the stepover |
| 4306 gets too small. */ |
| 4307 |
| 4308 while (remove_len >= MAX(len_p2 / TRIM_END_STEPS, TRIM_MIN_BYTES)) { |
| 4309 |
| 4310 u32 remove_pos = remove_len; |
| 4311 |
| 4312 sprintf(tmp, "trim %s/%s", DI(remove_len), DI(remove_len)); |
| 4313 |
| 4314 stage_cur = 0; |
| 4315 stage_max = q->len / remove_len; |
| 4316 |
| 4317 while (remove_pos < q->len) { |
| 4318 |
| 4319 u32 trim_avail = MIN(remove_len, q->len - remove_pos); |
| 4320 u32 cksum; |
| 4321 |
| 4322 write_with_gap(in_buf, q->len, remove_pos, trim_avail); |
| 4323 |
| 4324 fault = run_target(argv); |
| 4325 trim_execs++; |
| 4326 |
| 4327 if (stop_soon || fault == FAULT_ERROR) goto abort_trimming; |
| 4328 |
| 4329 /* Note that we don't keep track of crashes or hangs here; maybe TODO? */ |
| 4330 |
| 4331 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); |
| 4332 |
| 4333 /* If the deletion had no impact on the trace, make it permanent. This |
| 4334 isn't perfect for variable-path inputs, but we're just making a |
| 4335 best-effort pass, so it's not a big deal if we end up with false |
| 4336 negatives every now and then. */ |
| 4337 |
| 4338 if (cksum == q->exec_cksum) { |
| 4339 |
| 4340 u32 move_tail = q->len - remove_pos - trim_avail; |
| 4341 |
| 4342 q->len -= trim_avail; |
| 4343 len_p2 = next_p2(q->len); |
| 4344 |
| 4345 memmove(in_buf + remove_pos, in_buf + remove_pos + trim_avail, |
| 4346 move_tail); |
| 4347 |
| 4348 /* Let's save a clean trace, which will be needed by |
| 4349 update_bitmap_score once we're done with the trimming stuff. */ |
| 4350 |
| 4351 if (!needs_write) { |
| 4352 |
| 4353 needs_write = 1; |
| 4354 memcpy(clean_trace, trace_bits, MAP_SIZE); |
| 4355 |
| 4356 } |
| 4357 |
| 4358 } else remove_pos += remove_len; |
| 4359 |
| 4360 /* Since this can be slow, update the screen every now and then. */ |
| 4361 |
| 4362 if (!(trim_exec++ % stats_update_freq)) show_stats(); |
| 4363 stage_cur++; |
| 4364 |
| 4365 } |
| 4366 |
| 4367 remove_len >>= 1; |
| 4368 |
| 4369 } |
| 4370 |
| 4371 /* If we have made changes to in_buf, we also need to update the on-disk |
| 4372 version of the test case. */ |
| 4373 |
| 4374 if (needs_write) { |
| 4375 |
| 4376 s32 fd; |
| 4377 |
| 4378 unlink(q->fname); /* ignore errors */ |
| 4379 |
| 4380 fd = open(q->fname, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 4381 |
| 4382 if (fd < 0) PFATAL("Unable to create '%s'", q->fname); |
| 4383 |
| 4384 ck_write(fd, in_buf, q->len, q->fname); |
| 4385 close(fd); |
| 4386 |
| 4387 memcpy(trace_bits, clean_trace, MAP_SIZE); |
| 4388 update_bitmap_score(q); |
| 4389 |
| 4390 } |
| 4391 |
| 4392 |
| 4393 |
| 4394 abort_trimming: |
| 4395 |
| 4396 bytes_trim_out += q->len; |
| 4397 return fault; |
| 4398 |
| 4399 } |
| 4400 |
| 4401 |
| 4402 /* Write a modified test case, run program, process results. Handle |
| 4403 error conditions, returning 1 if it's time to bail out. This is |
| 4404 a helper function for fuzz_one(). */ |
| 4405 |
| 4406 EXP_ST u8 common_fuzz_stuff(char** argv, u8* out_buf, u32 len) { |
| 4407 |
| 4408 u8 fault; |
| 4409 |
| 4410 if (post_handler) { |
| 4411 |
| 4412 out_buf = post_handler(out_buf, &len); |
| 4413 if (!out_buf || !len) return 0; |
| 4414 |
| 4415 } |
| 4416 |
| 4417 write_to_testcase(out_buf, len); |
| 4418 |
| 4419 fault = run_target(argv); |
| 4420 |
| 4421 if (stop_soon) return 1; |
| 4422 |
| 4423 if (fault == FAULT_HANG) { |
| 4424 |
| 4425 if (subseq_hangs++ > HANG_LIMIT) { |
| 4426 cur_skipped_paths++; |
| 4427 return 1; |
| 4428 } |
| 4429 |
| 4430 } else subseq_hangs = 0; |
| 4431 |
| 4432 /* Users can hit us with SIGUSR1 to request the current input |
| 4433 to be abandoned. */ |
| 4434 |
| 4435 if (skip_requested) { |
| 4436 |
| 4437 skip_requested = 0; |
| 4438 cur_skipped_paths++; |
| 4439 return 1; |
| 4440 |
| 4441 } |
| 4442 |
| 4443 /* This handles FAULT_ERROR for us: */ |
| 4444 |
| 4445 queued_discovered += save_if_interesting(argv, out_buf, len, fault); |
| 4446 |
| 4447 if (!(stage_cur % stats_update_freq) || stage_cur + 1 == stage_max) |
| 4448 show_stats(); |
| 4449 |
| 4450 return 0; |
| 4451 |
| 4452 } |
| 4453 |
| 4454 |
| 4455 /* Helper to choose random block len for block operations in fuzz_one(). |
| 4456 Doesn't return zero, provided that max_len is > 0. */ |
| 4457 |
| 4458 static u32 choose_block_len(u32 limit) { |
| 4459 |
| 4460 u32 min_value, max_value; |
| 4461 u32 rlim = MIN(queue_cycle, 3); |
| 4462 |
| 4463 if (!run_over10m) rlim = 1; |
| 4464 |
| 4465 switch (UR(rlim)) { |
| 4466 |
| 4467 case 0: min_value = 1; |
| 4468 max_value = HAVOC_BLK_SMALL; |
| 4469 break; |
| 4470 |
| 4471 case 1: min_value = HAVOC_BLK_SMALL; |
| 4472 max_value = HAVOC_BLK_MEDIUM; |
| 4473 break; |
| 4474 |
| 4475 default: min_value = HAVOC_BLK_MEDIUM; |
| 4476 max_value = HAVOC_BLK_LARGE; |
| 4477 |
| 4478 |
| 4479 } |
| 4480 |
| 4481 if (min_value >= limit) min_value = 1; |
| 4482 |
| 4483 return min_value + UR(MIN(max_value, limit) - min_value + 1); |
| 4484 |
| 4485 } |
| 4486 |
| 4487 |
| 4488 /* Calculate case desirability score to adjust the length of havoc fuzzing. |
| 4489 A helper function for fuzz_one(). Maybe some of these constants should |
| 4490 go into config.h. */ |
| 4491 |
| 4492 static u32 calculate_score(struct queue_entry* q) { |
| 4493 |
| 4494 u32 avg_exec_us = total_cal_us / total_cal_cycles; |
| 4495 u32 avg_bitmap_size = total_bitmap_size / total_bitmap_entries; |
| 4496 u32 perf_score = 100; |
| 4497 |
| 4498 /* Adjust score based on execution speed of this path, compared to the |
| 4499 global average. Multiplier ranges from 0.1x to 3x. Fast inputs are |
| 4500 less expensive to fuzz, so we're giving them more air time. */ |
| 4501 |
| 4502 if (q->exec_us * 0.1 > avg_exec_us) perf_score = 10; |
| 4503 else if (q->exec_us * 0.25 > avg_exec_us) perf_score = 25; |
| 4504 else if (q->exec_us * 0.5 > avg_exec_us) perf_score = 50; |
| 4505 else if (q->exec_us * 0.75 > avg_exec_us) perf_score = 75; |
| 4506 else if (q->exec_us * 4 < avg_exec_us) perf_score = 300; |
| 4507 else if (q->exec_us * 3 < avg_exec_us) perf_score = 200; |
| 4508 else if (q->exec_us * 2 < avg_exec_us) perf_score = 150; |
| 4509 |
| 4510 /* Adjust score based on bitmap size. The working theory is that better |
| 4511 coverage translates to better targets. Multiplier from 0.25x to 3x. */ |
| 4512 |
| 4513 if (q->bitmap_size * 0.3 > avg_bitmap_size) perf_score *= 3; |
| 4514 else if (q->bitmap_size * 0.5 > avg_bitmap_size) perf_score *= 2; |
| 4515 else if (q->bitmap_size * 0.75 > avg_bitmap_size) perf_score *= 1.5; |
| 4516 else if (q->bitmap_size * 3 < avg_bitmap_size) perf_score *= 0.25; |
| 4517 else if (q->bitmap_size * 2 < avg_bitmap_size) perf_score *= 0.5; |
| 4518 else if (q->bitmap_size * 1.5 < avg_bitmap_size) perf_score *= 0.75; |
| 4519 |
| 4520 /* Adjust score based on handicap. Handicap is proportional to how late |
| 4521 in the game we learned about this path. Latecomers are allowed to run |
| 4522 for a bit longer until they catch up with the rest. */ |
| 4523 |
| 4524 if (q->handicap >= 4) { |
| 4525 |
| 4526 perf_score *= 4; |
| 4527 q->handicap -= 4; |
| 4528 |
| 4529 } else if (q->handicap) { |
| 4530 |
| 4531 perf_score *= 2; |
| 4532 q->handicap--; |
| 4533 |
| 4534 } |
| 4535 |
| 4536 /* Final adjustment based on input depth, under the assumption that fuzzing |
| 4537 deeper test cases is more likely to reveal stuff that can't be |
| 4538 discovered with traditional fuzzers. */ |
| 4539 |
| 4540 switch (q->depth) { |
| 4541 |
| 4542 case 0 ... 3: break; |
| 4543 case 4 ... 7: perf_score *= 2; break; |
| 4544 case 8 ... 13: perf_score *= 4; break; |
| 4545 case 14 ... 25: perf_score *= 6; break; |
| 4546 default: perf_score *= 8; |
| 4547 |
| 4548 } |
| 4549 |
| 4550 /* Make sure that we don't go over limit. */ |
| 4551 |
| 4552 if (perf_score > HAVOC_MAX_MULT * 100) perf_score = HAVOC_MAX_MULT * 100; |
| 4553 |
| 4554 return perf_score; |
| 4555 |
| 4556 } |
| 4557 |
| 4558 |
| 4559 /* Helper function to see if a particular change (xor_val = old ^ new) could |
| 4560 be a product of deterministic bit flips with the lengths and stepovers |
| 4561 attempted by afl-fuzz. This is used to avoid dupes in some of the |
| 4562 deterministic fuzzing operations that follow bit flips. We also |
| 4563 return 1 if xor_val is zero, which implies that the old and attempted new |
| 4564 values are identical and the exec would be a waste of time. */ |
| 4565 |
| 4566 static u8 could_be_bitflip(u32 xor_val) { |
| 4567 |
| 4568 u32 sh = 0; |
| 4569 |
| 4570 if (!xor_val) return 1; |
| 4571 |
| 4572 /* Shift left until first bit set. */ |
| 4573 |
| 4574 while (!(xor_val & 1)) { sh++; xor_val >>= 1; } |
| 4575 |
| 4576 /* 1-, 2-, and 4-bit patterns are OK anywhere. */ |
| 4577 |
| 4578 if (xor_val == 1 || xor_val == 3 || xor_val == 15) return 1; |
| 4579 |
| 4580 /* 8-, 16-, and 32-bit patterns are OK only if shift factor is |
| 4581 divisible by 8, since that's the stepover for these ops. */ |
| 4582 |
| 4583 if (sh & 7) return 0; |
| 4584 |
| 4585 if (xor_val == 0xff || xor_val == 0xffff || xor_val == 0xffffffff) |
| 4586 return 1; |
| 4587 |
| 4588 return 0; |
| 4589 |
| 4590 } |
| 4591 |
| 4592 |
| 4593 /* Helper function to see if a particular value is reachable through |
| 4594 arithmetic operations. Used for similar purposes. */ |
| 4595 |
| 4596 static u8 could_be_arith(u32 old_val, u32 new_val, u8 blen) { |
| 4597 |
| 4598 u32 i, ov = 0, nv = 0, diffs = 0; |
| 4599 |
| 4600 if (old_val == new_val) return 1; |
| 4601 |
| 4602 /* See if one-byte adjustments to any byte could produce this result. */ |
| 4603 |
| 4604 for (i = 0; i < blen; i++) { |
| 4605 |
| 4606 u8 a = old_val >> (8 * i), |
| 4607 b = new_val >> (8 * i); |
| 4608 |
| 4609 if (a != b) { diffs++; ov = a; nv = b; } |
| 4610 |
| 4611 } |
| 4612 |
| 4613 /* If only one byte differs and the values are within range, return 1. */ |
| 4614 |
| 4615 if (diffs == 1) { |
| 4616 |
| 4617 if ((u8)(ov - nv) <= ARITH_MAX || |
| 4618 (u8)(nv - ov) <= ARITH_MAX) return 1; |
| 4619 |
| 4620 } |
| 4621 |
| 4622 if (blen == 1) return 0; |
| 4623 |
| 4624 /* See if two-byte adjustments to any byte would produce this result. */ |
| 4625 |
| 4626 diffs = 0; |
| 4627 |
| 4628 for (i = 0; i < blen / 2; i++) { |
| 4629 |
| 4630 u16 a = old_val >> (16 * i), |
| 4631 b = new_val >> (16 * i); |
| 4632 |
| 4633 if (a != b) { diffs++; ov = a; nv = b; } |
| 4634 |
| 4635 } |
| 4636 |
| 4637 /* If only one word differs and the values are within range, return 1. */ |
| 4638 |
| 4639 if (diffs == 1) { |
| 4640 |
| 4641 if ((u16)(ov - nv) <= ARITH_MAX || |
| 4642 (u16)(nv - ov) <= ARITH_MAX) return 1; |
| 4643 |
| 4644 ov = SWAP16(ov); nv = SWAP16(nv); |
| 4645 |
| 4646 if ((u16)(ov - nv) <= ARITH_MAX || |
| 4647 (u16)(nv - ov) <= ARITH_MAX) return 1; |
| 4648 |
| 4649 } |
| 4650 |
| 4651 /* Finally, let's do the same thing for dwords. */ |
| 4652 |
| 4653 if (blen == 4) { |
| 4654 |
| 4655 if ((u32)(old_val - new_val) <= ARITH_MAX || |
| 4656 (u32)(new_val - old_val) <= ARITH_MAX) return 1; |
| 4657 |
| 4658 new_val = SWAP32(new_val); |
| 4659 old_val = SWAP32(old_val); |
| 4660 |
| 4661 if ((u32)(old_val - new_val) <= ARITH_MAX || |
| 4662 (u32)(new_val - old_val) <= ARITH_MAX) return 1; |
| 4663 |
| 4664 } |
| 4665 |
| 4666 return 0; |
| 4667 |
| 4668 } |
| 4669 |
| 4670 |
| 4671 /* Last but not least, a similar helper to see if insertion of an |
| 4672 interesting integer is redundant given the insertions done for |
| 4673 shorter blen. The last param (check_le) is set if the caller |
| 4674 already executed LE insertion for current blen and wants to see |
| 4675 if BE variant passed in new_val is unique. */ |
| 4676 |
| 4677 static u8 could_be_interest(u32 old_val, u32 new_val, u8 blen, u8 check_le) { |
| 4678 |
| 4679 u32 i, j; |
| 4680 |
| 4681 if (old_val == new_val) return 1; |
| 4682 |
| 4683 /* See if one-byte insertions from interesting_8 over old_val could |
| 4684 produce new_val. */ |
| 4685 |
| 4686 for (i = 0; i < blen; i++) { |
| 4687 |
| 4688 for (j = 0; j < sizeof(interesting_8); j++) { |
| 4689 |
| 4690 u32 tval = (old_val & ~(0xff << (i * 8))) | |
| 4691 (((u8)interesting_8[j]) << (i * 8)); |
| 4692 |
| 4693 if (new_val == tval) return 1; |
| 4694 |
| 4695 } |
| 4696 |
| 4697 } |
| 4698 |
| 4699 /* Bail out unless we're also asked to examine two-byte LE insertions |
| 4700 as a preparation for BE attempts. */ |
| 4701 |
| 4702 if (blen == 2 && !check_le) return 0; |
| 4703 |
| 4704 /* See if two-byte insertions over old_val could give us new_val. */ |
| 4705 |
| 4706 for (i = 0; i < blen - 1; i++) { |
| 4707 |
| 4708 for (j = 0; j < sizeof(interesting_16) / 2; j++) { |
| 4709 |
| 4710 u32 tval = (old_val & ~(0xffff << (i * 8))) | |
| 4711 (((u16)interesting_16[j]) << (i * 8)); |
| 4712 |
| 4713 if (new_val == tval) return 1; |
| 4714 |
| 4715 /* Continue here only if blen > 2. */ |
| 4716 |
| 4717 if (blen > 2) { |
| 4718 |
| 4719 tval = (old_val & ~(0xffff << (i * 8))) | |
| 4720 (SWAP16(interesting_16[j]) << (i * 8)); |
| 4721 |
| 4722 if (new_val == tval) return 1; |
| 4723 |
| 4724 } |
| 4725 |
| 4726 } |
| 4727 |
| 4728 } |
| 4729 |
| 4730 if (blen == 4 && check_le) { |
| 4731 |
| 4732 /* See if four-byte insertions could produce the same result |
| 4733 (LE only). */ |
| 4734 |
| 4735 for (j = 0; j < sizeof(interesting_32) / 4; j++) |
| 4736 if (new_val == (u32)interesting_32[j]) return 1; |
| 4737 |
| 4738 } |
| 4739 |
| 4740 return 0; |
| 4741 |
| 4742 } |
| 4743 |
| 4744 |
| 4745 /* Take the current entry from the queue, fuzz it for a while. This |
| 4746 function is a tad too long... returns 0 if fuzzed successfully, 1 if |
| 4747 skipped or bailed out. */ |
| 4748 |
| 4749 static u8 fuzz_one(char** argv) { |
| 4750 |
| 4751 s32 len, fd, temp_len, i, j; |
| 4752 u8 *in_buf, *out_buf, *orig_in, *ex_tmp, *eff_map = 0; |
| 4753 u64 havoc_queued, orig_hit_cnt, new_hit_cnt; |
| 4754 u32 splice_cycle = 0, perf_score = 100, orig_perf, prev_cksum, eff_cnt = 1; |
| 4755 |
| 4756 u8 ret_val = 1; |
| 4757 |
| 4758 u8 a_collect[MAX_AUTO_EXTRA]; |
| 4759 u32 a_len = 0; |
| 4760 |
| 4761 #ifdef IGNORE_FINDS |
| 4762 |
| 4763 /* In IGNORE_FINDS mode, skip any entries that weren't in the |
| 4764 initial data set. */ |
| 4765 |
| 4766 if (queue_cur->depth > 1) return 1; |
| 4767 |
| 4768 #else |
| 4769 |
| 4770 if (pending_favored) { |
| 4771 |
| 4772 /* If we have any favored, non-fuzzed new arrivals in the queue, |
| 4773 possibly skip to them at the expense of already-fuzzed or non-favored |
| 4774 cases. */ |
| 4775 |
| 4776 if ((queue_cur->was_fuzzed || !queue_cur->favored) && |
| 4777 UR(100) < SKIP_TO_NEW_PROB) return 1; |
| 4778 |
| 4779 } else if (!dumb_mode && !queue_cur->favored && queued_paths > 10) { |
| 4780 |
| 4781 /* Otherwise, still possibly skip non-favored cases, albeit less often. |
| 4782 The odds of skipping stuff are higher for already-fuzzed inputs and |
| 4783 lower for never-fuzzed entries. */ |
| 4784 |
| 4785 if (queue_cycle > 1 && !queue_cur->was_fuzzed) { |
| 4786 |
| 4787 if (UR(100) < SKIP_NFAV_NEW_PROB) return 1; |
| 4788 |
| 4789 } else { |
| 4790 |
| 4791 if (UR(100) < SKIP_NFAV_OLD_PROB) return 1; |
| 4792 |
| 4793 } |
| 4794 |
| 4795 } |
| 4796 |
| 4797 #endif /* ^IGNORE_FINDS */ |
| 4798 |
| 4799 if (not_on_tty) |
| 4800 ACTF("Fuzzing test case #%u (%u total)...", current_entry, queued_paths); |
| 4801 |
| 4802 /* Map the test case into memory. */ |
| 4803 |
| 4804 fd = open(queue_cur->fname, O_RDONLY); |
| 4805 |
| 4806 if (fd < 0) PFATAL("Unable to open '%s'", queue_cur->fname); |
| 4807 |
| 4808 len = queue_cur->len; |
| 4809 |
| 4810 orig_in = in_buf = mmap(0, len, PROT_READ | PROT_WRITE, MAP_PRIVATE, fd, 0); |
| 4811 |
| 4812 if (orig_in == MAP_FAILED) PFATAL("Unable to mmap '%s'", queue_cur->fname); |
| 4813 |
| 4814 close(fd); |
| 4815 |
| 4816 /* We could mmap() out_buf as MAP_PRIVATE, but we end up clobbering every |
| 4817 single byte anyway, so it wouldn't give us any performance or memory usage |
| 4818 benefits. */ |
| 4819 |
| 4820 out_buf = ck_alloc_nozero(len); |
| 4821 |
| 4822 subseq_hangs = 0; |
| 4823 |
| 4824 cur_depth = queue_cur->depth; |
| 4825 |
| 4826 /******************************************* |
| 4827 * CALIBRATION (only if failed earlier on) * |
| 4828 *******************************************/ |
| 4829 |
| 4830 if (queue_cur->cal_failed) { |
| 4831 |
| 4832 u8 res = FAULT_HANG; |
| 4833 |
| 4834 if (queue_cur->cal_failed < CAL_CHANCES) { |
| 4835 |
| 4836 res = calibrate_case(argv, queue_cur, in_buf, queue_cycle - 1, 0); |
| 4837 |
| 4838 if (res == FAULT_ERROR) |
| 4839 FATAL("Unable to execute target application"); |
| 4840 |
| 4841 } |
| 4842 |
| 4843 if (stop_soon || res != crash_mode) { |
| 4844 cur_skipped_paths++; |
| 4845 goto abandon_entry; |
| 4846 } |
| 4847 |
| 4848 } |
| 4849 |
| 4850 /************ |
| 4851 * TRIMMING * |
| 4852 ************/ |
| 4853 |
| 4854 if (!dumb_mode && !queue_cur->trim_done) { |
| 4855 |
| 4856 u8 res = trim_case(argv, queue_cur, in_buf); |
| 4857 |
| 4858 if (res == FAULT_ERROR) |
| 4859 FATAL("Unable to execute target application"); |
| 4860 |
| 4861 if (stop_soon) { |
| 4862 cur_skipped_paths++; |
| 4863 goto abandon_entry; |
| 4864 } |
| 4865 |
| 4866 /* Don't retry trimming, even if it failed. */ |
| 4867 |
| 4868 queue_cur->trim_done = 1; |
| 4869 |
| 4870 if (len != queue_cur->len) len = queue_cur->len; |
| 4871 |
| 4872 } |
| 4873 |
| 4874 memcpy(out_buf, in_buf, len); |
| 4875 |
| 4876 /********************* |
| 4877 * PERFORMANCE SCORE * |
| 4878 *********************/ |
| 4879 |
| 4880 orig_perf = perf_score = calculate_score(queue_cur); |
| 4881 |
| 4882 /* Skip right away if -d is given, if we have done deterministic fuzzing on |
| 4883 this entry ourselves (was_fuzzed), or if it has gone through deterministic |
| 4884 testing in earlier, resumed runs (passed_det). */ |
| 4885 |
| 4886 if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det) |
| 4887 goto havoc_stage; |
| 4888 |
| 4889 /********************************************* |
| 4890 * SIMPLE BITFLIP (+dictionary construction) * |
| 4891 *********************************************/ |
| 4892 |
| 4893 #define FLIP_BIT(_ar, _b) do { \ |
| 4894 u8* _arf = (u8*)(_ar); \ |
| 4895 u32 _bf = (_b); \ |
| 4896 _arf[(_bf) >> 3] ^= (128 >> ((_bf) & 7)); \ |
| 4897 } while (0) |
| 4898 |
| 4899 /* Single walking bit. */ |
| 4900 |
| 4901 stage_short = "flip1"; |
| 4902 stage_max = len << 3; |
| 4903 stage_name = "bitflip 1/1"; |
| 4904 |
| 4905 stage_val_type = STAGE_VAL_NONE; |
| 4906 |
| 4907 orig_hit_cnt = queued_paths + unique_crashes; |
| 4908 |
| 4909 prev_cksum = queue_cur->exec_cksum; |
| 4910 |
| 4911 for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { |
| 4912 |
| 4913 stage_cur_byte = stage_cur >> 3; |
| 4914 |
| 4915 FLIP_BIT(out_buf, stage_cur); |
| 4916 |
| 4917 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 4918 |
| 4919 FLIP_BIT(out_buf, stage_cur); |
| 4920 |
| 4921 /* While flipping the least significant bit in every byte, pull of an extra |
| 4922 trick to detect possible syntax tokens. In essence, the idea is that if |
| 4923 you have a binary blob like this: |
| 4924 |
| 4925 xxxxxxxxIHDRxxxxxxxx |
| 4926 |
| 4927 ...and changing the leading and trailing bytes causes variable or no |
| 4928 changes in program flow, but touching any character in the "IHDR" string |
| 4929 always produces the same, distinctive path, it's highly likely that |
| 4930 "IHDR" is an atomically-checked magic value of special significance to |
| 4931 the fuzzed format. |
| 4932 |
| 4933 We do this here, rather than as a separate stage, because it's a nice |
| 4934 way to keep the operation approximately "free" (i.e., no extra execs). |
| 4935 |
| 4936 Empirically, performing the check when flipping the least significant bit |
| 4937 is advantageous, compared to doing it at the time of more disruptive |
| 4938 changes, where the program flow may be affected in more violent ways. |
| 4939 |
| 4940 The caveat is that we won't generate dictionaries in the -d mode or -S |
| 4941 mode - but that's probably a fair trade-off. |
| 4942 |
| 4943 This won't work particularly well with paths that exhibit variable |
| 4944 behavior, but fails gracefully, so we'll carry out the checks anyway. |
| 4945 |
| 4946 */ |
| 4947 |
| 4948 if (!dumb_mode && (stage_cur & 7) == 7) { |
| 4949 |
| 4950 u32 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); |
| 4951 |
| 4952 if (stage_cur == stage_max - 1 && cksum == prev_cksum) { |
| 4953 |
| 4954 /* If at end of file and we are still collecting a string, grab the |
| 4955 final character and force output. */ |
| 4956 |
| 4957 if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3]; |
| 4958 a_len++; |
| 4959 |
| 4960 if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) |
| 4961 maybe_add_auto(a_collect, a_len); |
| 4962 |
| 4963 } else if (cksum != prev_cksum) { |
| 4964 |
| 4965 /* Otherwise, if the checksum has changed, see if we have something |
| 4966 worthwhile queued up, and collect that if the answer is yes. */ |
| 4967 |
| 4968 if (a_len >= MIN_AUTO_EXTRA && a_len <= MAX_AUTO_EXTRA) |
| 4969 maybe_add_auto(a_collect, a_len); |
| 4970 |
| 4971 a_len = 0; |
| 4972 prev_cksum = cksum; |
| 4973 |
| 4974 } |
| 4975 |
| 4976 /* Continue collecting string, but only if the bit flip actually made |
| 4977 any difference - we don't want no-op tokens. */ |
| 4978 |
| 4979 if (cksum != queue_cur->exec_cksum) { |
| 4980 |
| 4981 if (a_len < MAX_AUTO_EXTRA) a_collect[a_len] = out_buf[stage_cur >> 3];
|
| 4982 a_len++; |
| 4983 |
| 4984 } |
| 4985 |
| 4986 } |
| 4987 |
| 4988 } |
| 4989 |
| 4990 new_hit_cnt = queued_paths + unique_crashes; |
| 4991 |
| 4992 stage_finds[STAGE_FLIP1] += new_hit_cnt - orig_hit_cnt; |
| 4993 stage_cycles[STAGE_FLIP1] += stage_max; |
| 4994 |
| 4995 if (queue_cur->passed_det) goto havoc_stage; |
| 4996 |
| 4997 /* Two walking bits. */ |
| 4998 |
| 4999 stage_name = "bitflip 2/1"; |
| 5000 stage_short = "flip2"; |
| 5001 stage_max = (len << 3) - 1; |
| 5002 |
| 5003 orig_hit_cnt = new_hit_cnt; |
| 5004 |
| 5005 for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { |
| 5006 |
| 5007 stage_cur_byte = stage_cur >> 3; |
| 5008 |
| 5009 FLIP_BIT(out_buf, stage_cur); |
| 5010 FLIP_BIT(out_buf, stage_cur + 1); |
| 5011 |
| 5012 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5013 |
| 5014 FLIP_BIT(out_buf, stage_cur); |
| 5015 FLIP_BIT(out_buf, stage_cur + 1); |
| 5016 |
| 5017 } |
| 5018 |
| 5019 new_hit_cnt = queued_paths + unique_crashes; |
| 5020 |
| 5021 stage_finds[STAGE_FLIP2] += new_hit_cnt - orig_hit_cnt; |
| 5022 stage_cycles[STAGE_FLIP2] += stage_max; |
| 5023 |
| 5024 /* Four walking bits. */ |
| 5025 |
| 5026 stage_name = "bitflip 4/1"; |
| 5027 stage_short = "flip4"; |
| 5028 stage_max = (len << 3) - 3; |
| 5029 |
| 5030 orig_hit_cnt = new_hit_cnt; |
| 5031 |
| 5032 for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { |
| 5033 |
| 5034 stage_cur_byte = stage_cur >> 3; |
| 5035 |
| 5036 FLIP_BIT(out_buf, stage_cur); |
| 5037 FLIP_BIT(out_buf, stage_cur + 1); |
| 5038 FLIP_BIT(out_buf, stage_cur + 2); |
| 5039 FLIP_BIT(out_buf, stage_cur + 3); |
| 5040 |
| 5041 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5042 |
| 5043 FLIP_BIT(out_buf, stage_cur); |
| 5044 FLIP_BIT(out_buf, stage_cur + 1); |
| 5045 FLIP_BIT(out_buf, stage_cur + 2); |
| 5046 FLIP_BIT(out_buf, stage_cur + 3); |
| 5047 |
| 5048 } |
| 5049 |
| 5050 new_hit_cnt = queued_paths + unique_crashes; |
| 5051 |
| 5052 stage_finds[STAGE_FLIP4] += new_hit_cnt - orig_hit_cnt; |
| 5053 stage_cycles[STAGE_FLIP4] += stage_max; |
| 5054 |
| 5055 /* Effector map setup. These macros calculate: |
| 5056 |
| 5057 EFF_APOS - position of a particular file offset in the map. |
| 5058 EFF_ALEN - length of an map with a particular number of bytes. |
| 5059 EFF_SPAN_ALEN - map span for a sequence of bytes. |
| 5060 |
| 5061 */ |
| 5062 |
| 5063 #define EFF_APOS(_p) ((_p) >> EFF_MAP_SCALE2) |
| 5064 #define EFF_REM(_x) ((_x) & ((1 << EFF_MAP_SCALE2) - 1)) |
| 5065 #define EFF_ALEN(_l) (EFF_APOS(_l) + !!EFF_REM(_l)) |
| 5066 #define EFF_SPAN_ALEN(_p, _l) (EFF_APOS((_p) + (_l) - 1) - EFF_APOS(_p) + 1) |
| 5067 |
| 5068 /* Initialize effector map for the next step (see comments below). Always |
| 5069 flag first and last byte as doing something. */ |
| 5070 |
| 5071 eff_map = ck_alloc(EFF_ALEN(len)); |
| 5072 eff_map[0] = 1; |
| 5073 |
| 5074 if (EFF_APOS(len - 1) != 0) { |
| 5075 eff_map[EFF_APOS(len - 1)] = 1; |
| 5076 eff_cnt++; |
| 5077 } |
| 5078 |
| 5079 /* Walking byte. */ |
| 5080 |
| 5081 stage_name = "bitflip 8/8"; |
| 5082 stage_short = "flip8"; |
| 5083 stage_max = len; |
| 5084 |
| 5085 orig_hit_cnt = new_hit_cnt; |
| 5086 |
| 5087 for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { |
| 5088 |
| 5089 stage_cur_byte = stage_cur; |
| 5090 |
| 5091 out_buf[stage_cur] ^= 0xFF; |
| 5092 |
| 5093 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5094 |
| 5095 /* We also use this stage to pull off a simple trick: we identify |
| 5096 bytes that seem to have no effect on the current execution path |
| 5097 even when fully flipped - and we skip them during more expensive |
| 5098 deterministic stages, such as arithmetics or known ints. */ |
| 5099 |
| 5100 if (!eff_map[EFF_APOS(stage_cur)]) { |
| 5101 |
| 5102 u32 cksum; |
| 5103 |
| 5104 /* If in dumb mode or if the file is very short, just flag everything |
| 5105 without wasting time on checksums. */ |
| 5106 |
| 5107 if (!dumb_mode && len >= EFF_MIN_LEN) |
| 5108 cksum = hash32(trace_bits, MAP_SIZE, HASH_CONST); |
| 5109 else |
| 5110 cksum = ~queue_cur->exec_cksum; |
| 5111 |
| 5112 if (cksum != queue_cur->exec_cksum) { |
| 5113 eff_map[EFF_APOS(stage_cur)] = 1; |
| 5114 eff_cnt++; |
| 5115 } |
| 5116 |
| 5117 } |
| 5118 |
| 5119 out_buf[stage_cur] ^= 0xFF; |
| 5120 |
| 5121 } |
| 5122 |
| 5123 /* If the effector map is more than EFF_MAX_PERC dense, just flag the |
| 5124 whole thing as worth fuzzing, since we wouldn't be saving much time |
| 5125 anyway. */ |
| 5126 |
| 5127 if (eff_cnt != EFF_ALEN(len) && |
| 5128 eff_cnt * 100 / EFF_ALEN(len) > EFF_MAX_PERC) { |
| 5129 |
| 5130 memset(eff_map, 1, EFF_ALEN(len)); |
| 5131 |
| 5132 blocks_eff_select += EFF_ALEN(len); |
| 5133 |
| 5134 } else { |
| 5135 |
| 5136 blocks_eff_select += eff_cnt; |
| 5137 |
| 5138 } |
| 5139 |
| 5140 blocks_eff_total += EFF_ALEN(len); |
| 5141 |
| 5142 new_hit_cnt = queued_paths + unique_crashes; |
| 5143 |
| 5144 stage_finds[STAGE_FLIP8] += new_hit_cnt - orig_hit_cnt; |
| 5145 stage_cycles[STAGE_FLIP8] += stage_max; |
| 5146 |
| 5147 /* Two walking bytes. */ |
| 5148 |
| 5149 if (len < 2) goto skip_bitflip; |
| 5150 |
| 5151 stage_name = "bitflip 16/8"; |
| 5152 stage_short = "flip16"; |
| 5153 stage_cur = 0; |
| 5154 stage_max = len - 1; |
| 5155 |
| 5156 orig_hit_cnt = new_hit_cnt; |
| 5157 |
| 5158 for (i = 0; i < len - 1; i++) { |
| 5159 |
| 5160 /* Let's consult the effector map... */ |
| 5161 |
| 5162 if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { |
| 5163 stage_max--; |
| 5164 continue; |
| 5165 } |
| 5166 |
| 5167 stage_cur_byte = i; |
| 5168 |
| 5169 *(u16*)(out_buf + i) ^= 0xFFFF; |
| 5170 |
| 5171 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5172 stage_cur++; |
| 5173 |
| 5174 *(u16*)(out_buf + i) ^= 0xFFFF; |
| 5175 |
| 5176 |
| 5177 } |
| 5178 |
| 5179 new_hit_cnt = queued_paths + unique_crashes; |
| 5180 |
| 5181 stage_finds[STAGE_FLIP16] += new_hit_cnt - orig_hit_cnt; |
| 5182 stage_cycles[STAGE_FLIP16] += stage_max; |
| 5183 |
| 5184 if (len < 4) goto skip_bitflip; |
| 5185 |
| 5186 /* Four walking bytes. */ |
| 5187 |
| 5188 stage_name = "bitflip 32/8"; |
| 5189 stage_short = "flip32"; |
| 5190 stage_cur = 0; |
| 5191 stage_max = len - 3; |
| 5192 |
| 5193 orig_hit_cnt = new_hit_cnt; |
| 5194 |
| 5195 for (i = 0; i < len - 3; i++) { |
| 5196 |
| 5197 /* Let's consult the effector map... */ |
| 5198 if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && |
| 5199 !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { |
| 5200 stage_max--; |
| 5201 continue; |
| 5202 } |
| 5203 |
| 5204 stage_cur_byte = i; |
| 5205 |
| 5206 *(u32*)(out_buf + i) ^= 0xFFFFFFFF; |
| 5207 |
| 5208 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5209 stage_cur++; |
| 5210 |
| 5211 *(u32*)(out_buf + i) ^= 0xFFFFFFFF; |
| 5212 |
| 5213 } |
| 5214 |
| 5215 new_hit_cnt = queued_paths + unique_crashes; |
| 5216 |
| 5217 stage_finds[STAGE_FLIP32] += new_hit_cnt - orig_hit_cnt; |
| 5218 stage_cycles[STAGE_FLIP32] += stage_max; |
| 5219 |
| 5220 skip_bitflip: |
| 5221 |
| 5222 /********************** |
| 5223 * ARITHMETIC INC/DEC * |
| 5224 **********************/ |
| 5225 |
| 5226 /* 8-bit arithmetics. */ |
| 5227 |
| 5228 stage_name = "arith 8/8"; |
| 5229 stage_short = "arith8"; |
| 5230 stage_cur = 0; |
| 5231 stage_max = 2 * len * ARITH_MAX; |
| 5232 |
| 5233 stage_val_type = STAGE_VAL_LE; |
| 5234 |
| 5235 orig_hit_cnt = new_hit_cnt; |
| 5236 |
| 5237 for (i = 0; i < len; i++) { |
| 5238 |
| 5239 u8 orig = out_buf[i]; |
| 5240 |
| 5241 /* Let's consult the effector map... */ |
| 5242 |
| 5243 if (!eff_map[EFF_APOS(i)]) { |
| 5244 stage_max -= 2 * ARITH_MAX; |
| 5245 continue; |
| 5246 } |
| 5247 |
| 5248 stage_cur_byte = i; |
| 5249 |
| 5250 for (j = 1; j <= ARITH_MAX; j++) { |
| 5251 |
| 5252 u8 r = orig ^ (orig + j); |
| 5253 |
| 5254 /* Do arithmetic operations only if the result couldn't be a product |
| 5255 of a bitflip. */ |
| 5256 |
| 5257 if (!could_be_bitflip(r)) { |
| 5258 |
| 5259 stage_cur_val = j; |
| 5260 out_buf[i] = orig + j; |
| 5261 |
| 5262 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5263 stage_cur++; |
| 5264 |
| 5265 } else stage_max--; |
| 5266 |
| 5267 r = orig ^ (orig - j); |
| 5268 |
| 5269 if (!could_be_bitflip(r)) { |
| 5270 |
| 5271 stage_cur_val = -j; |
| 5272 out_buf[i] = orig - j; |
| 5273 |
| 5274 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5275 stage_cur++; |
| 5276 |
| 5277 } else stage_max--; |
| 5278 |
| 5279 out_buf[i] = orig; |
| 5280 |
| 5281 } |
| 5282 |
| 5283 } |
| 5284 |
| 5285 new_hit_cnt = queued_paths + unique_crashes; |
| 5286 |
| 5287 stage_finds[STAGE_ARITH8] += new_hit_cnt - orig_hit_cnt; |
| 5288 stage_cycles[STAGE_ARITH8] += stage_max; |
| 5289 |
| 5290 /* 16-bit arithmetics, both endians. */ |
| 5291 |
| 5292 if (len < 2) goto skip_arith; |
| 5293 |
| 5294 stage_name = "arith 16/8"; |
| 5295 stage_short = "arith16"; |
| 5296 stage_cur = 0; |
| 5297 stage_max = 4 * (len - 1) * ARITH_MAX; |
| 5298 |
| 5299 orig_hit_cnt = new_hit_cnt; |
| 5300 |
| 5301 for (i = 0; i < len - 1; i++) { |
| 5302 |
| 5303 u16 orig = *(u16*)(out_buf + i); |
| 5304 |
| 5305 /* Let's consult the effector map... */ |
| 5306 |
| 5307 if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { |
| 5308 stage_max -= 4 * ARITH_MAX; |
| 5309 continue; |
| 5310 } |
| 5311 |
| 5312 stage_cur_byte = i; |
| 5313 |
| 5314 for (j = 1; j <= ARITH_MAX; j++) { |
| 5315 |
| 5316 u16 r1 = orig ^ (orig + j), |
| 5317 r2 = orig ^ (orig - j), |
| 5318 r3 = orig ^ SWAP16(SWAP16(orig) + j), |
| 5319 r4 = orig ^ SWAP16(SWAP16(orig) - j); |
| 5320 |
| 5321 /* Try little endian addition and subtraction first. Do it only |
| 5322 if the operation would affect more than one byte (hence the |
| 5323 & 0xff overflow checks) and if it couldn't be a product of |
| 5324 a bitflip. */ |
| 5325 |
| 5326 stage_val_type = STAGE_VAL_LE; |
| 5327 |
| 5328 if ((orig & 0xff) + j > 0xff && !could_be_bitflip(r1)) { |
| 5329 |
| 5330 stage_cur_val = j; |
| 5331 *(u16*)(out_buf + i) = orig + j; |
| 5332 |
| 5333 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5334 stage_cur++; |
| 5335 |
| 5336 } else stage_max--; |
| 5337 |
| 5338 if ((orig & 0xff) < j && !could_be_bitflip(r2)) { |
| 5339 |
| 5340 stage_cur_val = -j; |
| 5341 *(u16*)(out_buf + i) = orig - j; |
| 5342 |
| 5343 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5344 stage_cur++; |
| 5345 |
| 5346 } else stage_max--; |
| 5347 |
| 5348 /* Big endian comes next. Same deal. */ |
| 5349 |
| 5350 stage_val_type = STAGE_VAL_BE; |
| 5351 |
| 5352 |
| 5353 if ((orig >> 8) + j > 0xff && !could_be_bitflip(r3)) { |
| 5354 |
| 5355 stage_cur_val = j; |
| 5356 *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) + j); |
| 5357 |
| 5358 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5359 stage_cur++; |
| 5360 |
| 5361 } else stage_max--; |
| 5362 |
| 5363 if ((orig >> 8) < j && !could_be_bitflip(r4)) { |
| 5364 |
| 5365 stage_cur_val = -j; |
| 5366 *(u16*)(out_buf + i) = SWAP16(SWAP16(orig) - j); |
| 5367 |
| 5368 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5369 stage_cur++; |
| 5370 |
| 5371 } else stage_max--; |
| 5372 |
| 5373 *(u16*)(out_buf + i) = orig; |
| 5374 |
| 5375 } |
| 5376 |
| 5377 } |
| 5378 |
| 5379 new_hit_cnt = queued_paths + unique_crashes; |
| 5380 |
| 5381 stage_finds[STAGE_ARITH16] += new_hit_cnt - orig_hit_cnt; |
| 5382 stage_cycles[STAGE_ARITH16] += stage_max; |
| 5383 |
| 5384 /* 32-bit arithmetics, both endians. */ |
| 5385 |
| 5386 if (len < 4) goto skip_arith; |
| 5387 |
| 5388 stage_name = "arith 32/8"; |
| 5389 stage_short = "arith32"; |
| 5390 stage_cur = 0; |
| 5391 stage_max = 4 * (len - 3) * ARITH_MAX; |
| 5392 |
| 5393 orig_hit_cnt = new_hit_cnt; |
| 5394 |
| 5395 for (i = 0; i < len - 3; i++) { |
| 5396 |
| 5397 u32 orig = *(u32*)(out_buf + i); |
| 5398 |
| 5399 /* Let's consult the effector map... */ |
| 5400 |
| 5401 if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && |
| 5402 !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { |
| 5403 stage_max -= 4 * ARITH_MAX; |
| 5404 continue; |
| 5405 } |
| 5406 |
| 5407 stage_cur_byte = i; |
| 5408 |
| 5409 for (j = 1; j <= ARITH_MAX; j++) { |
| 5410 |
| 5411 u32 r1 = orig ^ (orig + j), |
| 5412 r2 = orig ^ (orig - j), |
| 5413 r3 = orig ^ SWAP32(SWAP32(orig) + j), |
| 5414 r4 = orig ^ SWAP32(SWAP32(orig) - j); |
| 5415 |
| 5416 /* Little endian first. Same deal as with 16-bit: we only want to |
| 5417 try if the operation would have effect on more than two bytes. */ |
| 5418 |
| 5419 stage_val_type = STAGE_VAL_LE; |
| 5420 |
| 5421 if ((orig & 0xffff) + j > 0xffff && !could_be_bitflip(r1)) { |
| 5422 |
| 5423 stage_cur_val = j; |
| 5424 *(u32*)(out_buf + i) = orig + j; |
| 5425 |
| 5426 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5427 stage_cur++; |
| 5428 |
| 5429 } else stage_max--; |
| 5430 |
| 5431 if ((orig & 0xffff) < j && !could_be_bitflip(r2)) { |
| 5432 |
| 5433 stage_cur_val = -j; |
| 5434 *(u32*)(out_buf + i) = orig - j; |
| 5435 |
| 5436 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5437 stage_cur++; |
| 5438 |
| 5439 } else stage_max--; |
| 5440 |
| 5441 /* Big endian next. */ |
| 5442 |
| 5443 stage_val_type = STAGE_VAL_BE; |
| 5444 |
| 5445 if ((SWAP32(orig) & 0xffff) + j > 0xffff && !could_be_bitflip(r3)) { |
| 5446 |
| 5447 stage_cur_val = j; |
| 5448 *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) + j); |
| 5449 |
| 5450 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5451 stage_cur++; |
| 5452 |
| 5453 } else stage_max--; |
| 5454 |
| 5455 if ((SWAP32(orig) & 0xffff) < j && !could_be_bitflip(r4)) { |
| 5456 |
| 5457 stage_cur_val = -j; |
| 5458 *(u32*)(out_buf + i) = SWAP32(SWAP32(orig) - j); |
| 5459 |
| 5460 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5461 stage_cur++; |
| 5462 |
| 5463 } else stage_max--; |
| 5464 |
| 5465 *(u32*)(out_buf + i) = orig; |
| 5466 |
| 5467 } |
| 5468 |
| 5469 } |
| 5470 |
| 5471 new_hit_cnt = queued_paths + unique_crashes; |
| 5472 |
| 5473 stage_finds[STAGE_ARITH32] += new_hit_cnt - orig_hit_cnt; |
| 5474 stage_cycles[STAGE_ARITH32] += stage_max; |
| 5475 |
| 5476 skip_arith: |
| 5477 |
| 5478 /********************** |
| 5479 * INTERESTING VALUES * |
| 5480 **********************/ |
| 5481 |
| 5482 stage_name = "interest 8/8"; |
| 5483 stage_short = "int8"; |
| 5484 stage_cur = 0; |
| 5485 stage_max = len * sizeof(interesting_8); |
| 5486 |
| 5487 stage_val_type = STAGE_VAL_LE; |
| 5488 |
| 5489 orig_hit_cnt = new_hit_cnt; |
| 5490 |
| 5491 /* Setting 8-bit integers. */ |
| 5492 |
| 5493 for (i = 0; i < len; i++) { |
| 5494 |
| 5495 u8 orig = out_buf[i]; |
| 5496 |
| 5497 /* Let's consult the effector map... */ |
| 5498 |
| 5499 if (!eff_map[EFF_APOS(i)]) { |
| 5500 stage_max -= sizeof(interesting_8); |
| 5501 continue; |
| 5502 } |
| 5503 |
| 5504 stage_cur_byte = i; |
| 5505 |
| 5506 for (j = 0; j < sizeof(interesting_8); j++) { |
| 5507 |
| 5508 /* Skip if the value could be a product of bitflips or arithmetics. */ |
| 5509 |
| 5510 if (could_be_bitflip(orig ^ (u8)interesting_8[j]) || |
| 5511 could_be_arith(orig, (u8)interesting_8[j], 1)) { |
| 5512 stage_max--; |
| 5513 continue; |
| 5514 } |
| 5515 |
| 5516 stage_cur_val = interesting_8[j]; |
| 5517 out_buf[i] = interesting_8[j]; |
| 5518 |
| 5519 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5520 |
| 5521 out_buf[i] = orig; |
| 5522 stage_cur++; |
| 5523 |
| 5524 } |
| 5525 |
| 5526 } |
| 5527 |
| 5528 new_hit_cnt = queued_paths + unique_crashes; |
| 5529 |
| 5530 stage_finds[STAGE_INTEREST8] += new_hit_cnt - orig_hit_cnt; |
| 5531 stage_cycles[STAGE_INTEREST8] += stage_max; |
| 5532 |
| 5533 /* Setting 16-bit integers, both endians. */ |
| 5534 |
| 5535 if (len < 2) goto skip_interest; |
| 5536 |
| 5537 stage_name = "interest 16/8"; |
| 5538 stage_short = "int16"; |
| 5539 stage_cur = 0; |
| 5540 stage_max = 2 * (len - 1) * (sizeof(interesting_16) >> 1); |
| 5541 |
| 5542 orig_hit_cnt = new_hit_cnt; |
| 5543 |
| 5544 for (i = 0; i < len - 1; i++) { |
| 5545 |
| 5546 u16 orig = *(u16*)(out_buf + i); |
| 5547 |
| 5548 /* Let's consult the effector map... */ |
| 5549 |
| 5550 if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)]) { |
| 5551 stage_max -= sizeof(interesting_16); |
| 5552 continue; |
| 5553 } |
| 5554 |
| 5555 stage_cur_byte = i; |
| 5556 |
| 5557 for (j = 0; j < sizeof(interesting_16) / 2; j++) { |
| 5558 |
| 5559 stage_cur_val = interesting_16[j]; |
| 5560 |
| 5561 /* Skip if this could be a product of a bitflip, arithmetics, |
| 5562 or single-byte interesting value insertion. */ |
| 5563 |
| 5564 if (!could_be_bitflip(orig ^ (u16)interesting_16[j]) && |
| 5565 !could_be_arith(orig, (u16)interesting_16[j], 2) && |
| 5566 !could_be_interest(orig, (u16)interesting_16[j], 2, 0)) { |
| 5567 |
| 5568 stage_val_type = STAGE_VAL_LE; |
| 5569 |
| 5570 *(u16*)(out_buf + i) = interesting_16[j]; |
| 5571 |
| 5572 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5573 stage_cur++; |
| 5574 |
| 5575 } else stage_max--; |
| 5576 |
| 5577 if ((u16)interesting_16[j] != SWAP16(interesting_16[j]) && |
| 5578 !could_be_bitflip(orig ^ SWAP16(interesting_16[j])) && |
| 5579 !could_be_arith(orig, SWAP16(interesting_16[j]), 2) && |
| 5580 !could_be_interest(orig, SWAP16(interesting_16[j]), 2, 1)) { |
| 5581 |
| 5582 stage_val_type = STAGE_VAL_BE; |
| 5583 |
| 5584 *(u16*)(out_buf + i) = SWAP16(interesting_16[j]); |
| 5585 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5586 stage_cur++; |
| 5587 |
| 5588 } else stage_max--; |
| 5589 |
| 5590 } |
| 5591 |
| 5592 *(u16*)(out_buf + i) = orig; |
| 5593 |
| 5594 } |
| 5595 |
| 5596 new_hit_cnt = queued_paths + unique_crashes; |
| 5597 |
| 5598 stage_finds[STAGE_INTEREST16] += new_hit_cnt - orig_hit_cnt; |
| 5599 stage_cycles[STAGE_INTEREST16] += stage_max; |
| 5600 |
| 5601 if (len < 4) goto skip_interest; |
| 5602 |
| 5603 /* Setting 32-bit integers, both endians. */ |
| 5604 |
| 5605 stage_name = "interest 32/8"; |
| 5606 stage_short = "int32"; |
| 5607 stage_cur = 0; |
| 5608 stage_max = 2 * (len - 3) * (sizeof(interesting_32) >> 2); |
| 5609 |
| 5610 orig_hit_cnt = new_hit_cnt; |
| 5611 |
| 5612 for (i = 0; i < len - 3; i++) { |
| 5613 |
| 5614 u32 orig = *(u32*)(out_buf + i); |
| 5615 |
| 5616 /* Let's consult the effector map... */ |
| 5617 |
| 5618 if (!eff_map[EFF_APOS(i)] && !eff_map[EFF_APOS(i + 1)] && |
| 5619 !eff_map[EFF_APOS(i + 2)] && !eff_map[EFF_APOS(i + 3)]) { |
| 5620 stage_max -= sizeof(interesting_32) >> 1; |
| 5621 continue; |
| 5622 } |
| 5623 |
| 5624 stage_cur_byte = i; |
| 5625 |
| 5626 for (j = 0; j < sizeof(interesting_32) / 4; j++) { |
| 5627 |
| 5628 stage_cur_val = interesting_32[j]; |
| 5629 |
| 5630 /* Skip if this could be a product of a bitflip, arithmetics, |
| 5631 or word interesting value insertion. */ |
| 5632 |
| 5633 if (!could_be_bitflip(orig ^ (u32)interesting_32[j]) && |
| 5634 !could_be_arith(orig, interesting_32[j], 4) && |
| 5635 !could_be_interest(orig, interesting_32[j], 4, 0)) { |
| 5636 |
| 5637 stage_val_type = STAGE_VAL_LE; |
| 5638 |
| 5639 *(u32*)(out_buf + i) = interesting_32[j]; |
| 5640 |
| 5641 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5642 stage_cur++; |
| 5643 |
| 5644 } else stage_max--; |
| 5645 |
| 5646 if ((u32)interesting_32[j] != SWAP32(interesting_32[j]) && |
| 5647 !could_be_bitflip(orig ^ SWAP32(interesting_32[j])) && |
| 5648 !could_be_arith(orig, SWAP32(interesting_32[j]), 4) && |
| 5649 !could_be_interest(orig, SWAP32(interesting_32[j]), 4, 1)) { |
| 5650 |
| 5651 stage_val_type = STAGE_VAL_BE; |
| 5652 |
| 5653 *(u32*)(out_buf + i) = SWAP32(interesting_32[j]); |
| 5654 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5655 stage_cur++; |
| 5656 |
| 5657 } else stage_max--; |
| 5658 |
| 5659 } |
| 5660 |
| 5661 *(u32*)(out_buf + i) = orig; |
| 5662 |
| 5663 } |
| 5664 |
| 5665 new_hit_cnt = queued_paths + unique_crashes; |
| 5666 |
| 5667 stage_finds[STAGE_INTEREST32] += new_hit_cnt - orig_hit_cnt; |
| 5668 stage_cycles[STAGE_INTEREST32] += stage_max; |
| 5669 |
| 5670 skip_interest: |
| 5671 |
| 5672 /******************** |
| 5673 * DICTIONARY STUFF * |
| 5674 ********************/ |
| 5675 |
| 5676 if (!extras_cnt) goto skip_user_extras; |
| 5677 |
| 5678 /* Overwrite with user-supplied extras. */ |
| 5679 |
| 5680 stage_name = "user extras (over)"; |
| 5681 stage_short = "ext_UO"; |
| 5682 stage_cur = 0; |
| 5683 stage_max = extras_cnt * len; |
| 5684 |
| 5685 stage_val_type = STAGE_VAL_NONE; |
| 5686 |
| 5687 orig_hit_cnt = new_hit_cnt; |
| 5688 |
| 5689 for (i = 0; i < len; i++) { |
| 5690 |
| 5691 u32 last_len = 0; |
| 5692 |
| 5693 stage_cur_byte = i; |
| 5694 |
| 5695 /* Extras are sorted by size, from smallest to largest. This means |
| 5696 that we don't have to worry about restoring the buffer in |
| 5697 between writes at a particular offset determined by the outer |
| 5698 loop. */ |
| 5699 |
| 5700 for (j = 0; j < extras_cnt; j++) { |
| 5701 |
| 5702 /* Skip extras probabilistically if extras_cnt > MAX_DET_EXTRAS. Also |
| 5703 skip them if there's no room to insert the payload, if the token |
| 5704 is redundant, or if its entire span has no bytes set in the effector |
| 5705 map. */ |
| 5706 |
| 5707 if ((extras_cnt > MAX_DET_EXTRAS && UR(extras_cnt) >= MAX_DET_EXTRAS) || |
| 5708 extras[j].len > len - i || |
| 5709 !memcmp(extras[j].data, out_buf + i, extras[j].len) || |
| 5710 !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, extras[j].len))) { |
| 5711 |
| 5712 stage_max--; |
| 5713 continue; |
| 5714 |
| 5715 } |
| 5716 |
| 5717 last_len = extras[j].len; |
| 5718 memcpy(out_buf + i, extras[j].data, last_len); |
| 5719 |
| 5720 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5721 |
| 5722 stage_cur++; |
| 5723 |
| 5724 } |
| 5725 |
| 5726 /* Restore all the clobbered memory. */ |
| 5727 memcpy(out_buf + i, in_buf + i, last_len); |
| 5728 |
| 5729 } |
| 5730 |
| 5731 new_hit_cnt = queued_paths + unique_crashes; |
| 5732 |
| 5733 stage_finds[STAGE_EXTRAS_UO] += new_hit_cnt - orig_hit_cnt; |
| 5734 stage_cycles[STAGE_EXTRAS_UO] += stage_max; |
| 5735 |
| 5736 /* Insertion of user-supplied extras. */ |
| 5737 |
| 5738 stage_name = "user extras (insert)"; |
| 5739 stage_short = "ext_UI"; |
| 5740 stage_cur = 0; |
| 5741 stage_max = extras_cnt * len; |
| 5742 |
| 5743 orig_hit_cnt = new_hit_cnt; |
| 5744 |
| 5745 ex_tmp = ck_alloc(len + MAX_DICT_FILE); |
| 5746 |
| 5747 for (i = 0; i < len; i++) { |
| 5748 |
| 5749 stage_cur_byte = i; |
| 5750 |
| 5751 for (j = 0; j < extras_cnt; j++) { |
| 5752 |
| 5753 if (len + extras[j].len > MAX_FILE) { |
| 5754 stage_max--; |
| 5755 continue; |
| 5756 } |
| 5757 |
| 5758 /* Insert token */ |
| 5759 memcpy(ex_tmp + i, extras[j].data, extras[j].len); |
| 5760 |
| 5761 /* Copy tail */ |
| 5762 memcpy(ex_tmp + i + extras[j].len, out_buf + i, len - i); |
| 5763 |
| 5764 if (common_fuzz_stuff(argv, ex_tmp, len + extras[j].len)) { |
| 5765 ck_free(ex_tmp); |
| 5766 goto abandon_entry; |
| 5767 } |
| 5768 |
| 5769 stage_cur++; |
| 5770 |
| 5771 } |
| 5772 |
| 5773 /* Copy head */ |
| 5774 ex_tmp[i] = out_buf[i]; |
| 5775 |
| 5776 } |
| 5777 |
| 5778 ck_free(ex_tmp); |
| 5779 |
| 5780 new_hit_cnt = queued_paths + unique_crashes; |
| 5781 |
| 5782 stage_finds[STAGE_EXTRAS_UI] += new_hit_cnt - orig_hit_cnt; |
| 5783 stage_cycles[STAGE_EXTRAS_UI] += stage_max; |
| 5784 |
| 5785 skip_user_extras: |
| 5786 |
| 5787 if (!a_extras_cnt) goto skip_extras; |
| 5788 |
| 5789 stage_name = "auto extras (over)"; |
| 5790 stage_short = "ext_AO"; |
| 5791 stage_cur = 0; |
| 5792 stage_max = MIN(a_extras_cnt, USE_AUTO_EXTRAS) * len; |
| 5793 |
| 5794 stage_val_type = STAGE_VAL_NONE; |
| 5795 |
| 5796 orig_hit_cnt = new_hit_cnt; |
| 5797 |
| 5798 for (i = 0; i < len; i++) { |
| 5799 |
| 5800 u32 last_len = 0; |
| 5801 |
| 5802 stage_cur_byte = i; |
| 5803 |
| 5804 for (j = 0; j < MIN(a_extras_cnt, USE_AUTO_EXTRAS); j++) { |
| 5805 |
| 5806 /* See the comment in the earlier code; extras are sorted by size. */ |
| 5807 |
| 5808 if (a_extras[j].len > len - i || |
| 5809 !memcmp(a_extras[j].data, out_buf + i, a_extras[j].len) || |
| 5810 !memchr(eff_map + EFF_APOS(i), 1, EFF_SPAN_ALEN(i, a_extras[j].len)))
{ |
| 5811 |
| 5812 stage_max--; |
| 5813 continue; |
| 5814 |
| 5815 } |
| 5816 |
| 5817 last_len = a_extras[j].len; |
| 5818 memcpy(out_buf + i, a_extras[j].data, last_len); |
| 5819 |
| 5820 if (common_fuzz_stuff(argv, out_buf, len)) goto abandon_entry; |
| 5821 |
| 5822 stage_cur++; |
| 5823 |
| 5824 } |
| 5825 |
| 5826 /* Restore all the clobbered memory. */ |
| 5827 memcpy(out_buf + i, in_buf + i, last_len); |
| 5828 |
| 5829 } |
| 5830 |
| 5831 new_hit_cnt = queued_paths + unique_crashes; |
| 5832 |
| 5833 stage_finds[STAGE_EXTRAS_AO] += new_hit_cnt - orig_hit_cnt; |
| 5834 stage_cycles[STAGE_EXTRAS_AO] += stage_max; |
| 5835 |
| 5836 skip_extras: |
| 5837 |
| 5838 /* If we made this to here without jumping to havoc_stage or abandon_entry, |
| 5839 we're properly done with deterministic steps and can mark it as such |
| 5840 in the .state/ directory. */ |
| 5841 |
| 5842 if (!queue_cur->passed_det) mark_as_det_done(queue_cur); |
| 5843 |
| 5844 /**************** |
| 5845 * RANDOM HAVOC * |
| 5846 ****************/ |
| 5847 |
| 5848 havoc_stage: |
| 5849 |
| 5850 stage_cur_byte = -1; |
| 5851 |
| 5852 /* The havoc stage mutation code is also invoked when splicing files; if the |
| 5853 splice_cycle variable is set, generate different descriptions and such. */ |
| 5854 |
| 5855 if (!splice_cycle) { |
| 5856 |
| 5857 stage_name = "havoc"; |
| 5858 stage_short = "havoc"; |
| 5859 stage_max = HAVOC_CYCLES * perf_score / havoc_div / 100; |
| 5860 |
| 5861 } else { |
| 5862 |
| 5863 static u8 tmp[32]; |
| 5864 |
| 5865 perf_score = orig_perf; |
| 5866 |
| 5867 sprintf(tmp, "splice %u", splice_cycle); |
| 5868 stage_name = tmp; |
| 5869 stage_short = "splice"; |
| 5870 stage_max = SPLICE_HAVOC * perf_score / havoc_div / 100; |
| 5871 |
| 5872 } |
| 5873 |
| 5874 if (stage_max < HAVOC_MIN) stage_max = HAVOC_MIN; |
| 5875 |
| 5876 temp_len = len; |
| 5877 |
| 5878 orig_hit_cnt = queued_paths + unique_crashes; |
| 5879 |
| 5880 havoc_queued = queued_paths; |
| 5881 |
| 5882 /* We essentially just do several thousand runs (depending on perf_score) |
| 5883 where we take the input file and make random stacked tweaks. */ |
| 5884 |
| 5885 for (stage_cur = 0; stage_cur < stage_max; stage_cur++) { |
| 5886 |
| 5887 u32 use_stacking = 1 << (1 + UR(HAVOC_STACK_POW2)); |
| 5888 |
| 5889 stage_cur_val = use_stacking; |
| 5890 |
| 5891 for (i = 0; i < use_stacking; i++) { |
| 5892 |
| 5893 switch (UR(15 + ((extras_cnt + a_extras_cnt) ? 2 : 0))) { |
| 5894 |
| 5895 case 0: |
| 5896 |
| 5897 /* Flip a single bit somewhere. Spooky! */ |
| 5898 |
| 5899 FLIP_BIT(out_buf, UR(temp_len << 3)); |
| 5900 break; |
| 5901 |
| 5902 case 1: |
| 5903 |
| 5904 /* Set byte to interesting value. */ |
| 5905 |
| 5906 out_buf[UR(temp_len)] = interesting_8[UR(sizeof(interesting_8))]; |
| 5907 break; |
| 5908 |
| 5909 case 2: |
| 5910 |
| 5911 /* Set word to interesting value, randomly choosing endian. */ |
| 5912 |
| 5913 if (temp_len < 2) break; |
| 5914 |
| 5915 if (UR(2)) { |
| 5916 |
| 5917 *(u16*)(out_buf + UR(temp_len - 1)) = |
| 5918 interesting_16[UR(sizeof(interesting_16) >> 1)]; |
| 5919 |
| 5920 } else { |
| 5921 |
| 5922 *(u16*)(out_buf + UR(temp_len - 1)) = SWAP16( |
| 5923 interesting_16[UR(sizeof(interesting_16) >> 1)]); |
| 5924 |
| 5925 } |
| 5926 |
| 5927 break; |
| 5928 |
| 5929 case 3: |
| 5930 |
| 5931 /* Set dword to interesting value, randomly choosing endian. */ |
| 5932 |
| 5933 if (temp_len < 4) break; |
| 5934 |
| 5935 if (UR(2)) { |
| 5936 |
| 5937 *(u32*)(out_buf + UR(temp_len - 3)) = |
| 5938 interesting_32[UR(sizeof(interesting_32) >> 2)]; |
| 5939 |
| 5940 } else { |
| 5941 |
| 5942 *(u32*)(out_buf + UR(temp_len - 3)) = SWAP32( |
| 5943 interesting_32[UR(sizeof(interesting_32) >> 2)]); |
| 5944 |
| 5945 } |
| 5946 |
| 5947 break; |
| 5948 |
| 5949 case 4: |
| 5950 |
| 5951 /* Randomly subtract from byte. */ |
| 5952 |
| 5953 out_buf[UR(temp_len)] -= 1 + UR(ARITH_MAX); |
| 5954 break; |
| 5955 |
| 5956 case 5: |
| 5957 |
| 5958 /* Randomly add to byte. */ |
| 5959 |
| 5960 out_buf[UR(temp_len)] += 1 + UR(ARITH_MAX); |
| 5961 break; |
| 5962 |
| 5963 case 6: |
| 5964 |
| 5965 /* Randomly subtract from word, random endian. */ |
| 5966 |
| 5967 if (temp_len < 2) break; |
| 5968 |
| 5969 if (UR(2)) { |
| 5970 |
| 5971 u32 pos = UR(temp_len - 1); |
| 5972 |
| 5973 *(u16*)(out_buf + pos) -= 1 + UR(ARITH_MAX); |
| 5974 |
| 5975 } else { |
| 5976 |
| 5977 u32 pos = UR(temp_len - 1); |
| 5978 u16 num = 1 + UR(ARITH_MAX); |
| 5979 |
| 5980 *(u16*)(out_buf + pos) = |
| 5981 SWAP16(SWAP16(*(u16*)(out_buf + pos)) - num); |
| 5982 |
| 5983 } |
| 5984 |
| 5985 break; |
| 5986 |
| 5987 case 7: |
| 5988 |
| 5989 /* Randomly add to word, random endian. */ |
| 5990 |
| 5991 if (temp_len < 2) break; |
| 5992 |
| 5993 if (UR(2)) { |
| 5994 |
| 5995 u32 pos = UR(temp_len - 1); |
| 5996 |
| 5997 *(u16*)(out_buf + pos) += 1 + UR(ARITH_MAX); |
| 5998 |
| 5999 } else { |
| 6000 |
| 6001 u32 pos = UR(temp_len - 1); |
| 6002 u16 num = 1 + UR(ARITH_MAX); |
| 6003 |
| 6004 *(u16*)(out_buf + pos) = |
| 6005 SWAP16(SWAP16(*(u16*)(out_buf + pos)) + num); |
| 6006 |
| 6007 } |
| 6008 |
| 6009 break; |
| 6010 |
| 6011 case 8: |
| 6012 |
| 6013 /* Randomly subtract from dword, random endian. */ |
| 6014 |
| 6015 if (temp_len < 4) break; |
| 6016 |
| 6017 if (UR(2)) { |
| 6018 |
| 6019 u32 pos = UR(temp_len - 3); |
| 6020 |
| 6021 *(u32*)(out_buf + pos) -= 1 + UR(ARITH_MAX); |
| 6022 |
| 6023 } else { |
| 6024 |
| 6025 u32 pos = UR(temp_len - 3); |
| 6026 u32 num = 1 + UR(ARITH_MAX); |
| 6027 |
| 6028 *(u32*)(out_buf + pos) = |
| 6029 SWAP32(SWAP32(*(u32*)(out_buf + pos)) - num); |
| 6030 |
| 6031 } |
| 6032 |
| 6033 break; |
| 6034 |
| 6035 case 9: |
| 6036 |
| 6037 /* Randomly add to dword, random endian. */ |
| 6038 |
| 6039 if (temp_len < 4) break; |
| 6040 |
| 6041 if (UR(2)) { |
| 6042 |
| 6043 u32 pos = UR(temp_len - 3); |
| 6044 |
| 6045 *(u32*)(out_buf + pos) += 1 + UR(ARITH_MAX); |
| 6046 |
| 6047 } else { |
| 6048 |
| 6049 u32 pos = UR(temp_len - 3); |
| 6050 u32 num = 1 + UR(ARITH_MAX); |
| 6051 |
| 6052 *(u32*)(out_buf + pos) = |
| 6053 SWAP32(SWAP32(*(u32*)(out_buf + pos)) + num); |
| 6054 |
| 6055 } |
| 6056 |
| 6057 break; |
| 6058 |
| 6059 case 10: |
| 6060 |
| 6061 /* Just set a random byte to a random value. Because, |
| 6062 why not. We use XOR with 1-255 to eliminate the |
| 6063 possibility of a no-op. */ |
| 6064 |
| 6065 out_buf[UR(temp_len)] ^= 1 + UR(255); |
| 6066 break; |
| 6067 |
| 6068 case 11 ... 12: { |
| 6069 |
| 6070 /* Delete bytes. We're making this a bit more likely |
| 6071 than insertion (the next option) in hopes of keeping |
| 6072 files reasonably small. */ |
| 6073 |
| 6074 u32 del_from, del_len; |
| 6075 |
| 6076 if (temp_len < 2) break; |
| 6077 |
| 6078 /* Don't delete too much. */ |
| 6079 |
| 6080 del_len = choose_block_len(temp_len - 1); |
| 6081 |
| 6082 del_from = UR(temp_len - del_len + 1); |
| 6083 |
| 6084 memmove(out_buf + del_from, out_buf + del_from + del_len, |
| 6085 temp_len - del_from - del_len); |
| 6086 |
| 6087 temp_len -= del_len; |
| 6088 |
| 6089 break; |
| 6090 |
| 6091 } |
| 6092 |
| 6093 case 13: |
| 6094 |
| 6095 if (temp_len + HAVOC_BLK_LARGE < MAX_FILE) { |
| 6096 |
| 6097 /* Clone bytes (75%) or insert a block of constant bytes (25%). */ |
| 6098 |
| 6099 u32 clone_from, clone_to, clone_len; |
| 6100 u8* new_buf; |
| 6101 |
| 6102 clone_len = choose_block_len(temp_len); |
| 6103 |
| 6104 clone_from = UR(temp_len - clone_len + 1); |
| 6105 clone_to = UR(temp_len); |
| 6106 |
| 6107 new_buf = ck_alloc_nozero(temp_len + clone_len); |
| 6108 |
| 6109 /* Head */ |
| 6110 |
| 6111 memcpy(new_buf, out_buf, clone_to); |
| 6112 |
| 6113 /* Inserted part */ |
| 6114 |
| 6115 if (UR(4)) |
| 6116 memcpy(new_buf + clone_to, out_buf + clone_from, clone_len); |
| 6117 else |
| 6118 memset(new_buf + clone_to, UR(256), clone_len); |
| 6119 |
| 6120 /* Tail */ |
| 6121 memcpy(new_buf + clone_to + clone_len, out_buf + clone_to, |
| 6122 temp_len - clone_to); |
| 6123 |
| 6124 ck_free(out_buf); |
| 6125 out_buf = new_buf; |
| 6126 temp_len += clone_len; |
| 6127 |
| 6128 } |
| 6129 |
| 6130 break; |
| 6131 |
| 6132 case 14: { |
| 6133 |
| 6134 /* Overwrite bytes with a randomly selected chunk (75%) or fixed |
| 6135 bytes (25%). */ |
| 6136 |
| 6137 u32 copy_from, copy_to, copy_len; |
| 6138 |
| 6139 if (temp_len < 2) break; |
| 6140 |
| 6141 copy_len = choose_block_len(temp_len - 1); |
| 6142 |
| 6143 copy_from = UR(temp_len - copy_len + 1); |
| 6144 copy_to = UR(temp_len - copy_len + 1); |
| 6145 |
| 6146 if (UR(4)) { |
| 6147 |
| 6148 if (copy_from != copy_to) |
| 6149 memmove(out_buf + copy_to, out_buf + copy_from, copy_len); |
| 6150 |
| 6151 } else memset(out_buf + copy_to, UR(256), copy_len); |
| 6152 |
| 6153 break; |
| 6154 |
| 6155 } |
| 6156 |
| 6157 /* Values 15 and 16 can be selected only if there are any extras |
| 6158 present in the dictionaries. */ |
| 6159 |
| 6160 case 15: { |
| 6161 |
| 6162 /* Overwrite bytes with an extra. */ |
| 6163 |
| 6164 if (!extras_cnt || (a_extras_cnt && UR(2))) { |
| 6165 |
| 6166 /* No user-specified extras or odds in our favor. Let's use an |
| 6167 auto-detected one. */ |
| 6168 |
| 6169 u32 use_extra = UR(a_extras_cnt); |
| 6170 u32 extra_len = a_extras[use_extra].len; |
| 6171 u32 insert_at; |
| 6172 |
| 6173 if (extra_len > temp_len) break; |
| 6174 |
| 6175 insert_at = UR(temp_len - extra_len + 1); |
| 6176 memcpy(out_buf + insert_at, a_extras[use_extra].data, extra_len); |
| 6177 |
| 6178 } else { |
| 6179 |
| 6180 /* No auto extras or odds in our favor. Use the dictionary. */ |
| 6181 |
| 6182 u32 use_extra = UR(extras_cnt); |
| 6183 u32 extra_len = extras[use_extra].len; |
| 6184 u32 insert_at; |
| 6185 |
| 6186 if (extra_len > temp_len) break; |
| 6187 |
| 6188 insert_at = UR(temp_len - extra_len + 1); |
| 6189 memcpy(out_buf + insert_at, extras[use_extra].data, extra_len); |
| 6190 |
| 6191 } |
| 6192 |
| 6193 break; |
| 6194 |
| 6195 } |
| 6196 |
| 6197 case 16: { |
| 6198 |
| 6199 u32 use_extra, extra_len, insert_at = UR(temp_len); |
| 6200 u8* new_buf; |
| 6201 |
| 6202 /* Insert an extra. Do the same dice-rolling stuff as for the |
| 6203 previous case. */ |
| 6204 |
| 6205 if (!extras_cnt || (a_extras_cnt && UR(2))) { |
| 6206 |
| 6207 use_extra = UR(a_extras_cnt); |
| 6208 extra_len = a_extras[use_extra].len; |
| 6209 |
| 6210 if (temp_len + extra_len >= MAX_FILE) break; |
| 6211 |
| 6212 new_buf = ck_alloc_nozero(temp_len + extra_len); |
| 6213 |
| 6214 /* Head */ |
| 6215 memcpy(new_buf, out_buf, insert_at); |
| 6216 |
| 6217 /* Inserted part */ |
| 6218 memcpy(new_buf + insert_at, a_extras[use_extra].data, extra_len); |
| 6219 |
| 6220 } else { |
| 6221 |
| 6222 use_extra = UR(extras_cnt); |
| 6223 extra_len = extras[use_extra].len; |
| 6224 |
| 6225 if (temp_len + extra_len >= MAX_FILE) break; |
| 6226 |
| 6227 new_buf = ck_alloc_nozero(temp_len + extra_len); |
| 6228 |
| 6229 /* Head */ |
| 6230 memcpy(new_buf, out_buf, insert_at); |
| 6231 |
| 6232 /* Inserted part */ |
| 6233 memcpy(new_buf + insert_at, extras[use_extra].data, extra_len); |
| 6234 |
| 6235 } |
| 6236 |
| 6237 /* Tail */ |
| 6238 memcpy(new_buf + insert_at + extra_len, out_buf + insert_at, |
| 6239 temp_len - insert_at); |
| 6240 |
| 6241 ck_free(out_buf); |
| 6242 out_buf = new_buf; |
| 6243 temp_len += extra_len; |
| 6244 |
| 6245 break; |
| 6246 |
| 6247 } |
| 6248 |
| 6249 } |
| 6250 |
| 6251 } |
| 6252 |
| 6253 if (common_fuzz_stuff(argv, out_buf, temp_len)) |
| 6254 goto abandon_entry; |
| 6255 |
| 6256 /* out_buf might have been mangled a bit, so let's restore it to its |
| 6257 original size and shape. */ |
| 6258 |
| 6259 if (temp_len < len) out_buf = ck_realloc(out_buf, len); |
| 6260 temp_len = len; |
| 6261 memcpy(out_buf, in_buf, len); |
| 6262 |
| 6263 /* If we're finding new stuff, let's run for a bit longer, limits |
| 6264 permitting. */ |
| 6265 |
| 6266 if (queued_paths != havoc_queued) { |
| 6267 |
| 6268 if (perf_score <= HAVOC_MAX_MULT * 100) { |
| 6269 stage_max *= 2; |
| 6270 perf_score *= 2; |
| 6271 } |
| 6272 |
| 6273 havoc_queued = queued_paths; |
| 6274 |
| 6275 } |
| 6276 |
| 6277 } |
| 6278 |
| 6279 new_hit_cnt = queued_paths + unique_crashes; |
| 6280 |
| 6281 if (!splice_cycle) { |
| 6282 stage_finds[STAGE_HAVOC] += new_hit_cnt - orig_hit_cnt; |
| 6283 stage_cycles[STAGE_HAVOC] += stage_max; |
| 6284 } else { |
| 6285 stage_finds[STAGE_SPLICE] += new_hit_cnt - orig_hit_cnt; |
| 6286 stage_cycles[STAGE_SPLICE] += stage_max; |
| 6287 } |
| 6288 |
| 6289 #ifndef IGNORE_FINDS |
| 6290 |
| 6291 /************ |
| 6292 * SPLICING * |
| 6293 ************/ |
| 6294 |
| 6295 /* This is a last-resort strategy triggered by a full round with no findings. |
| 6296 It takes the current input file, randomly selects another input, and |
| 6297 splices them together at some offset, then relies on the havoc |
| 6298 code to mutate that blob. */ |
| 6299 |
| 6300 retry_splicing: |
| 6301 |
| 6302 if (use_splicing && splice_cycle++ < SPLICE_CYCLES && |
| 6303 queued_paths > 1 && queue_cur->len > 1) { |
| 6304 |
| 6305 struct queue_entry* target; |
| 6306 u32 tid, split_at; |
| 6307 u8* new_buf; |
| 6308 s32 f_diff, l_diff; |
| 6309 |
| 6310 /* First of all, if we've modified in_buf for havoc, let's clean that |
| 6311 up... */ |
| 6312 |
| 6313 if (in_buf != orig_in) { |
| 6314 ck_free(in_buf); |
| 6315 in_buf = orig_in; |
| 6316 len = queue_cur->len; |
| 6317 } |
| 6318 |
| 6319 /* Pick a random queue entry and seek to it. Don't splice with yourself. */ |
| 6320 |
| 6321 do { tid = UR(queued_paths); } while (tid == current_entry); |
| 6322 |
| 6323 splicing_with = tid; |
| 6324 target = queue; |
| 6325 |
| 6326 while (tid >= 100) { target = target->next_100; tid -= 100; } |
| 6327 while (tid--) target = target->next; |
| 6328 |
| 6329 /* Make sure that the target has a reasonable length. */ |
| 6330 |
| 6331 while (target && (target->len < 2 || target == queue_cur)) { |
| 6332 target = target->next; |
| 6333 splicing_with++; |
| 6334 } |
| 6335 |
| 6336 if (!target) goto retry_splicing; |
| 6337 |
| 6338 /* Read the testcase into a new buffer. */ |
| 6339 |
| 6340 fd = open(target->fname, O_RDONLY); |
| 6341 |
| 6342 if (fd < 0) PFATAL("Unable to open '%s'", target->fname); |
| 6343 |
| 6344 new_buf = ck_alloc_nozero(target->len); |
| 6345 |
| 6346 ck_read(fd, new_buf, target->len, target->fname); |
| 6347 |
| 6348 close(fd); |
| 6349 |
| 6350 /* Find a suitable splicing location, somewhere between the first and |
| 6351 the last differing byte. Bail out if the difference is just a single |
| 6352 byte or so. */ |
| 6353 |
| 6354 locate_diffs(in_buf, new_buf, MIN(len, target->len), &f_diff, &l_diff); |
| 6355 |
| 6356 if (f_diff < 0 || l_diff < 2 || f_diff == l_diff) { |
| 6357 ck_free(new_buf); |
| 6358 goto retry_splicing; |
| 6359 } |
| 6360 |
| 6361 /* Split somewhere between the first and last differing byte. */ |
| 6362 |
| 6363 split_at = f_diff + UR(l_diff - f_diff); |
| 6364 |
| 6365 /* Do the thing. */ |
| 6366 |
| 6367 len = target->len; |
| 6368 memcpy(new_buf, in_buf, split_at); |
| 6369 in_buf = new_buf; |
| 6370 |
| 6371 ck_free(out_buf); |
| 6372 out_buf = ck_alloc_nozero(len); |
| 6373 memcpy(out_buf, in_buf, len); |
| 6374 |
| 6375 goto havoc_stage; |
| 6376 |
| 6377 } |
| 6378 |
| 6379 #endif /* !IGNORE_FINDS */ |
| 6380 |
| 6381 ret_val = 0; |
| 6382 |
| 6383 abandon_entry: |
| 6384 |
| 6385 splicing_with = -1; |
| 6386 |
| 6387 /* Update pending_not_fuzzed count if we made it through the calibration |
| 6388 cycle and have not seen this entry before. */ |
| 6389 |
| 6390 if (!stop_soon && !queue_cur->cal_failed && !queue_cur->was_fuzzed) { |
| 6391 queue_cur->was_fuzzed = 1; |
| 6392 pending_not_fuzzed--; |
| 6393 if (queue_cur->favored) pending_favored--; |
| 6394 } |
| 6395 |
| 6396 munmap(orig_in, queue_cur->len); |
| 6397 |
| 6398 if (in_buf != orig_in) ck_free(in_buf); |
| 6399 ck_free(out_buf); |
| 6400 ck_free(eff_map); |
| 6401 |
| 6402 return ret_val; |
| 6403 |
| 6404 #undef FLIP_BIT |
| 6405 |
| 6406 } |
| 6407 |
| 6408 |
| 6409 /* Grab interesting test cases from other fuzzers. */ |
| 6410 |
| 6411 static void sync_fuzzers(char** argv) { |
| 6412 |
| 6413 DIR* sd; |
| 6414 struct dirent* sd_ent; |
| 6415 u32 sync_cnt = 0; |
| 6416 |
| 6417 sd = opendir(sync_dir); |
| 6418 if (!sd) PFATAL("Unable to open '%s'", sync_dir); |
| 6419 |
| 6420 stage_max = stage_cur = 0; |
| 6421 cur_depth = 0; |
| 6422 |
| 6423 /* Look at the entries created for every other fuzzer in the sync directory. *
/ |
| 6424 |
| 6425 while ((sd_ent = readdir(sd))) { |
| 6426 |
| 6427 static u8 stage_tmp[128]; |
| 6428 |
| 6429 DIR* qd; |
| 6430 struct dirent* qd_ent; |
| 6431 u8 *qd_path, *qd_synced_path; |
| 6432 u32 min_accept = 0, next_min_accept; |
| 6433 |
| 6434 s32 id_fd; |
| 6435 |
| 6436 /* Skip dot files and our own output directory. */ |
| 6437 |
| 6438 if (sd_ent->d_name[0] == '.' || !strcmp(sync_id, sd_ent->d_name)) continue; |
| 6439 |
| 6440 /* Skip anything that doesn't have a queue/ subdirectory. */ |
| 6441 |
| 6442 qd_path = alloc_printf("%s/%s/queue", sync_dir, sd_ent->d_name); |
| 6443 |
| 6444 if (!(qd = opendir(qd_path))) { |
| 6445 ck_free(qd_path); |
| 6446 continue; |
| 6447 } |
| 6448 |
| 6449 /* Retrieve the ID of the last seen test case. */ |
| 6450 |
| 6451 qd_synced_path = alloc_printf("%s/.synced/%s", out_dir, sd_ent->d_name); |
| 6452 |
| 6453 id_fd = open(qd_synced_path, O_RDWR | O_CREAT, 0600); |
| 6454 |
| 6455 if (id_fd < 0) PFATAL("Unable to create '%s'", qd_synced_path); |
| 6456 |
| 6457 if (read(id_fd, &min_accept, sizeof(u32)) > 0) |
| 6458 lseek(id_fd, 0, SEEK_SET); |
| 6459 |
| 6460 next_min_accept = min_accept; |
| 6461 |
| 6462 /* Show stats */ |
| 6463 |
| 6464 sprintf(stage_tmp, "sync %u", ++sync_cnt); |
| 6465 stage_name = stage_tmp; |
| 6466 stage_cur = 0; |
| 6467 stage_max = 0; |
| 6468 |
| 6469 /* For every file queued by this fuzzer, parse ID and see if we have looked
at |
| 6470 it before; exec a test case if not. */ |
| 6471 |
| 6472 while ((qd_ent = readdir(qd))) { |
| 6473 |
| 6474 u8* path; |
| 6475 s32 fd; |
| 6476 struct stat st; |
| 6477 |
| 6478 if (qd_ent->d_name[0] == '.' || |
| 6479 sscanf(qd_ent->d_name, CASE_PREFIX "%06u", &syncing_case) != 1 || |
| 6480 syncing_case < min_accept) continue; |
| 6481 |
| 6482 /* OK, sounds like a new one. Let's give it a try. */ |
| 6483 |
| 6484 if (syncing_case >= next_min_accept) |
| 6485 next_min_accept = syncing_case + 1; |
| 6486 |
| 6487 path = alloc_printf("%s/%s", qd_path, qd_ent->d_name); |
| 6488 |
| 6489 fd = open(path, O_RDONLY); |
| 6490 if (fd < 0) PFATAL("Unable to open '%s'", path); |
| 6491 |
| 6492 if (fstat(fd, &st)) PFATAL("fstat() failed"); |
| 6493 |
| 6494 /* Ignore zero-sized or oversized files. */ |
| 6495 |
| 6496 if (st.st_size && st.st_size <= MAX_FILE) { |
| 6497 |
| 6498 u8 fault; |
| 6499 u8* mem = mmap(0, st.st_size, PROT_READ, MAP_PRIVATE, fd, 0); |
| 6500 |
| 6501 if (mem == MAP_FAILED) PFATAL("Unable to mmap '%s'", path); |
| 6502 |
| 6503 /* See what happens. We rely on save_if_interesting() to catch major |
| 6504 errors and save the test case. */ |
| 6505 |
| 6506 write_to_testcase(mem, st.st_size); |
| 6507 |
| 6508 fault = run_target(argv); |
| 6509 |
| 6510 if (stop_soon) return; |
| 6511 |
| 6512 syncing_party = sd_ent->d_name; |
| 6513 queued_imported += save_if_interesting(argv, mem, st.st_size, fault); |
| 6514 syncing_party = 0; |
| 6515 |
| 6516 munmap(mem, st.st_size); |
| 6517 |
| 6518 if (!(stage_cur++ % stats_update_freq)) show_stats(); |
| 6519 |
| 6520 } |
| 6521 |
| 6522 ck_free(path); |
| 6523 close(fd); |
| 6524 |
| 6525 } |
| 6526 |
| 6527 ck_write(id_fd, &next_min_accept, sizeof(u32), qd_synced_path); |
| 6528 |
| 6529 close(id_fd); |
| 6530 closedir(qd); |
| 6531 ck_free(qd_path); |
| 6532 ck_free(qd_synced_path); |
| 6533 |
| 6534 } |
| 6535 |
| 6536 closedir(sd); |
| 6537 |
| 6538 } |
| 6539 |
| 6540 |
| 6541 /* Handle stop signal (Ctrl-C, etc). */ |
| 6542 |
| 6543 static void handle_stop_sig(int sig) { |
| 6544 |
| 6545 stop_soon = 1; |
| 6546 |
| 6547 if (child_pid > 0) kill(child_pid, SIGKILL); |
| 6548 if (forksrv_pid > 0) kill(forksrv_pid, SIGKILL); |
| 6549 |
| 6550 } |
| 6551 |
| 6552 |
| 6553 /* Handle skip request (SIGUSR1). */ |
| 6554 |
| 6555 static void handle_skipreq(int sig) { |
| 6556 |
| 6557 skip_requested = 1; |
| 6558 |
| 6559 } |
| 6560 |
| 6561 /* Handle timeout (SIGALRM). */ |
| 6562 |
| 6563 static void handle_timeout(int sig) { |
| 6564 |
| 6565 if (child_pid > 0) { |
| 6566 |
| 6567 child_timed_out = 1; |
| 6568 kill(child_pid, SIGKILL); |
| 6569 |
| 6570 } else if (child_pid == -1 && forksrv_pid > 0) { |
| 6571 |
| 6572 child_timed_out = 1; |
| 6573 kill(forksrv_pid, SIGKILL); |
| 6574 |
| 6575 } |
| 6576 |
| 6577 } |
| 6578 |
| 6579 |
| 6580 /* Do a PATH search and find target binary to see that it exists and |
| 6581 isn't a shell script - a common and painful mistake. We also check for |
| 6582 a valid ELF header and for evidence of AFL instrumentation. */ |
| 6583 |
| 6584 EXP_ST void check_binary(u8* fname) { |
| 6585 |
| 6586 u8* env_path = 0; |
| 6587 struct stat st; |
| 6588 |
| 6589 s32 fd; |
| 6590 u8* f_data; |
| 6591 u32 f_len = 0; |
| 6592 |
| 6593 ACTF("Validating target binary..."); |
| 6594 |
| 6595 if (strchr(fname, '/') || !(env_path = getenv("PATH"))) { |
| 6596 |
| 6597 target_path = ck_strdup(fname); |
| 6598 if (stat(target_path, &st) || !S_ISREG(st.st_mode) || |
| 6599 !(st.st_mode & 0111) || (f_len = st.st_size) < 4) |
| 6600 FATAL("Program '%s' not found or not executable", fname); |
| 6601 |
| 6602 } else { |
| 6603 |
| 6604 while (env_path) { |
| 6605 |
| 6606 u8 *cur_elem, *delim = strchr(env_path, ':'); |
| 6607 |
| 6608 if (delim) { |
| 6609 |
| 6610 cur_elem = ck_alloc(delim - env_path + 1); |
| 6611 memcpy(cur_elem, env_path, delim - env_path); |
| 6612 delim++; |
| 6613 |
| 6614 } else cur_elem = ck_strdup(env_path); |
| 6615 |
| 6616 env_path = delim; |
| 6617 |
| 6618 if (cur_elem[0]) |
| 6619 target_path = alloc_printf("%s/%s", cur_elem, fname); |
| 6620 else |
| 6621 target_path = ck_strdup(fname); |
| 6622 |
| 6623 ck_free(cur_elem); |
| 6624 |
| 6625 if (!stat(target_path, &st) && S_ISREG(st.st_mode) && |
| 6626 (st.st_mode & 0111) && (f_len = st.st_size) >= 4) break; |
| 6627 |
| 6628 ck_free(target_path); |
| 6629 target_path = 0; |
| 6630 |
| 6631 } |
| 6632 |
| 6633 if (!target_path) FATAL("Program '%s' not found or not executable", fname); |
| 6634 |
| 6635 } |
| 6636 |
| 6637 if (getenv("AFL_SKIP_BIN_CHECK")) return; |
| 6638 |
| 6639 /* Check for blatant user errors. */ |
| 6640 |
| 6641 if ((!strncmp(target_path, "/tmp/", 5) && !strchr(target_path + 5, '/')) || |
| 6642 (!strncmp(target_path, "/var/tmp/", 9) && !strchr(target_path + 9, '/'))) |
| 6643 FATAL("Please don't keep binaries in /tmp or /var/tmp"); |
| 6644 |
| 6645 fd = open(target_path, O_RDONLY); |
| 6646 |
| 6647 if (fd < 0) PFATAL("Unable to open '%s'", target_path); |
| 6648 |
| 6649 f_data = mmap(0, f_len, PROT_READ, MAP_PRIVATE, fd, 0); |
| 6650 |
| 6651 if (f_data == MAP_FAILED) PFATAL("Unable to mmap file '%s'", target_path); |
| 6652 |
| 6653 close(fd); |
| 6654 |
| 6655 if (f_data[0] == '#' && f_data[1] == '!') { |
| 6656 |
| 6657 SAYF("\n" cLRD "[-] " cRST |
| 6658 "Oops, the target binary looks like a shell script. Some build systems
will\n" |
| 6659 " sometimes generate shell stubs for dynamically linked programs; tr
y static\n" |
| 6660 " library mode (./configure --disable-shared) if that's the case.\n\
n" |
| 6661 |
| 6662 " Another possible cause is that you are actually trying to use a sh
ell\n" |
| 6663 " wrapper around the fuzzed component. Invoking shell can slow down
the\n" |
| 6664 " fuzzing process by a factor of 20x or more; it's best to write the
wrapper\n" |
| 6665 " in a compiled language instead.\n"); |
| 6666 |
| 6667 FATAL("Program '%s' is a shell script", target_path); |
| 6668 |
| 6669 } |
| 6670 |
| 6671 #ifndef __APPLE__ |
| 6672 |
| 6673 if (f_data[0] != 0x7f || memcmp(f_data + 1, "ELF", 3)) |
| 6674 FATAL("Program '%s' is not an ELF binary", target_path); |
| 6675 |
| 6676 #else |
| 6677 |
| 6678 if (f_data[0] != 0xCF || f_data[1] != 0xFA || f_data[2] != 0xED) |
| 6679 FATAL("Program '%s' is not a 64-bit Mach-O binary", target_path); |
| 6680 |
| 6681 #endif /* ^!__APPLE__ */ |
| 6682 |
| 6683 if (!qemu_mode && !dumb_mode && |
| 6684 !memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { |
| 6685 |
| 6686 SAYF("\n" cLRD "[-] " cRST |
| 6687 "Looks like the target binary is not instrumented! The fuzzer depends o
n\n" |
| 6688 " compile-time instrumentation to isolate interesting test cases whi
le\n" |
| 6689 " mutating the input data. For more information, and for tips on how
to\n" |
| 6690 " instrument binaries, please see %s/README.\n\n" |
| 6691 |
| 6692 " When source code is not available, you may be able to leverage QEM
U\n" |
| 6693 " mode support. Consult the README for tips on how to enable this.\n
" |
| 6694 |
| 6695 " (It is also possible to use afl-fuzz as a traditional, \"dumb\" fu
zzer.\n" |
| 6696 " For that, you can use the -n option - but expect much worse result
s.)\n", |
| 6697 doc_path); |
| 6698 |
| 6699 FATAL("No instrumentation detected"); |
| 6700 |
| 6701 } |
| 6702 |
| 6703 if (qemu_mode && |
| 6704 memmem(f_data, f_len, SHM_ENV_VAR, strlen(SHM_ENV_VAR) + 1)) { |
| 6705 |
| 6706 SAYF("\n" cLRD "[-] " cRST |
| 6707 "This program appears to be instrumented with afl-gcc, but is being run
in\n" |
| 6708 " QEMU mode (-Q). This is probably not what you want - this setup wi
ll be\n" |
| 6709 " slow and offer no practical benefits.\n"); |
| 6710 |
| 6711 FATAL("Instrumentation found in -Q mode"); |
| 6712 |
| 6713 } |
| 6714 |
| 6715 if (memmem(f_data, f_len, "libasan.so", 10) || |
| 6716 memmem(f_data, f_len, "__msan_init", 11)) uses_asan = 1; |
| 6717 |
| 6718 /* Detect persistent & deferred init signatures in the binary. */ |
| 6719 |
| 6720 if (memmem(f_data, f_len, PERSIST_SIG, strlen(PERSIST_SIG) + 1)) { |
| 6721 |
| 6722 OKF(cPIN "Persistent mode binary detected."); |
| 6723 setenv(PERSIST_ENV_VAR, "1", 1); |
| 6724 no_var_check = 1; |
| 6725 |
| 6726 } else if (getenv("AFL_PERSISTENT")) { |
| 6727 |
| 6728 WARNF("AFL_PERSISTENT is no longer supported and may misbehave!"); |
| 6729 |
| 6730 } |
| 6731 |
| 6732 if (memmem(f_data, f_len, DEFER_SIG, strlen(DEFER_SIG) + 1)) { |
| 6733 |
| 6734 OKF(cPIN "Deferred forkserver binary detected."); |
| 6735 setenv(DEFER_ENV_VAR, "1", 1); |
| 6736 |
| 6737 } else if (getenv("AFL_DEFER_FORKSRV")) { |
| 6738 |
| 6739 WARNF("AFL_DEFER_FORKSRV is no longer supported and may misbehave!"); |
| 6740 |
| 6741 } |
| 6742 |
| 6743 if (munmap(f_data, f_len)) PFATAL("unmap() failed"); |
| 6744 |
| 6745 } |
| 6746 |
| 6747 |
| 6748 /* Trim and possibly create a banner for the run. */ |
| 6749 |
| 6750 static void fix_up_banner(u8* name) { |
| 6751 |
| 6752 if (!use_banner) { |
| 6753 |
| 6754 if (sync_id) { |
| 6755 |
| 6756 use_banner = sync_id; |
| 6757 |
| 6758 } else { |
| 6759 |
| 6760 u8* trim = strrchr(name, '/'); |
| 6761 if (!trim) use_banner = name; else use_banner = trim + 1; |
| 6762 |
| 6763 } |
| 6764 |
| 6765 } |
| 6766 |
| 6767 if (strlen(use_banner) > 40) { |
| 6768 |
| 6769 u8* tmp = ck_alloc(44); |
| 6770 sprintf(tmp, "%.40s...", use_banner); |
| 6771 use_banner = tmp; |
| 6772 |
| 6773 } |
| 6774 |
| 6775 } |
| 6776 |
| 6777 |
| 6778 /* Check if we're on TTY. */ |
| 6779 |
| 6780 static void check_if_tty(void) { |
| 6781 |
| 6782 struct winsize ws; |
| 6783 |
| 6784 if (ioctl(1, TIOCGWINSZ, &ws)) { |
| 6785 |
| 6786 if (errno == ENOTTY) { |
| 6787 OKF("Looks like we're not running on a tty, so I'll be a bit less verbose.
"); |
| 6788 not_on_tty = 1; |
| 6789 } |
| 6790 |
| 6791 return; |
| 6792 } |
| 6793 |
| 6794 } |
| 6795 |
| 6796 |
| 6797 /* Check terminal dimensions after resize. */ |
| 6798 |
| 6799 static void check_term_size(void) { |
| 6800 |
| 6801 struct winsize ws; |
| 6802 |
| 6803 term_too_small = 0; |
| 6804 |
| 6805 if (ioctl(1, TIOCGWINSZ, &ws)) return; |
| 6806 |
| 6807 if (ws.ws_row < 25 || ws.ws_col < 80) term_too_small = 1; |
| 6808 |
| 6809 } |
| 6810 |
| 6811 |
| 6812 |
| 6813 /* Display usage hints. */ |
| 6814 |
| 6815 static void usage(u8* argv0) { |
| 6816 |
| 6817 SAYF("\n%s [ options ] -- /path/to/fuzzed_app [ ... ]\n\n" |
| 6818 |
| 6819 "Required parameters:\n\n" |
| 6820 |
| 6821 " -i dir - input directory with test cases\n" |
| 6822 " -o dir - output directory for fuzzer findings\n\n" |
| 6823 |
| 6824 "Execution control settings:\n\n" |
| 6825 |
| 6826 " -f file - location read by the fuzzed program (stdin)\n" |
| 6827 " -t msec - timeout for each run (auto-scaled, 50-%u ms)\n" |
| 6828 " -m megs - memory limit for child process (%u MB)\n" |
| 6829 " -Q - use binary-only instrumentation (QEMU mode)\n\n" |
| 6830 |
| 6831 "Fuzzing behavior settings:\n\n" |
| 6832 |
| 6833 " -d - quick & dirty mode (skips deterministic steps)\n" |
| 6834 " -n - fuzz without instrumentation (dumb mode)\n" |
| 6835 " -x dir - optional fuzzer dictionary (see README)\n\n" |
| 6836 |
| 6837 "Other stuff:\n\n" |
| 6838 |
| 6839 " -T text - text banner to show on the screen\n" |
| 6840 " -M / -S id - distributed mode (see parallel_fuzzing.txt)\n" |
| 6841 #ifdef HAVE_AFFINITY |
| 6842 " -Z core_id - set CPU affinity (see perf_tips.txt)\n" |
| 6843 #endif /* HAVE_AFFINITY */ |
| 6844 " -C - crash exploration mode (the peruvian rabbit thing)\n\n
" |
| 6845 |
| 6846 "For additional tips, please consult %s/README.\n\n", |
| 6847 |
| 6848 argv0, EXEC_TIMEOUT, MEM_LIMIT, doc_path); |
| 6849 |
| 6850 exit(1); |
| 6851 |
| 6852 } |
| 6853 |
| 6854 |
| 6855 /* Prepare output directories and fds. */ |
| 6856 |
| 6857 EXP_ST void setup_dirs_fds(void) { |
| 6858 |
| 6859 u8* tmp; |
| 6860 s32 fd; |
| 6861 |
| 6862 ACTF("Setting up output directories..."); |
| 6863 |
| 6864 if (sync_id && mkdir(sync_dir, 0700) && errno != EEXIST) |
| 6865 PFATAL("Unable to create '%s'", sync_dir); |
| 6866 |
| 6867 if (mkdir(out_dir, 0700)) { |
| 6868 |
| 6869 if (errno != EEXIST) PFATAL("Unable to create '%s'", out_dir); |
| 6870 |
| 6871 maybe_delete_out_dir(); |
| 6872 |
| 6873 } else { |
| 6874 |
| 6875 if (in_place_resume) |
| 6876 FATAL("Resume attempted but old output directory not found"); |
| 6877 |
| 6878 out_dir_fd = open(out_dir, O_RDONLY); |
| 6879 |
| 6880 #ifndef __sun |
| 6881 |
| 6882 if (out_dir_fd < 0 || flock(out_dir_fd, LOCK_EX | LOCK_NB)) |
| 6883 PFATAL("Unable to flock() output directory."); |
| 6884 |
| 6885 #endif /* !__sun */ |
| 6886 |
| 6887 } |
| 6888 |
| 6889 /* Queue directory for any starting & discovered paths. */ |
| 6890 |
| 6891 tmp = alloc_printf("%s/queue", out_dir); |
| 6892 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6893 ck_free(tmp); |
| 6894 |
| 6895 /* Top-level directory for queue metadata used for session |
| 6896 resume and related tasks. */ |
| 6897 |
| 6898 tmp = alloc_printf("%s/queue/.state/", out_dir); |
| 6899 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6900 ck_free(tmp); |
| 6901 |
| 6902 /* Directory for flagging queue entries that went through |
| 6903 deterministic fuzzing in the past. */ |
| 6904 |
| 6905 tmp = alloc_printf("%s/queue/.state/deterministic_done/", out_dir); |
| 6906 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6907 ck_free(tmp); |
| 6908 |
| 6909 /* Directory with the auto-selected dictionary entries. */ |
| 6910 |
| 6911 tmp = alloc_printf("%s/queue/.state/auto_extras/", out_dir); |
| 6912 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6913 ck_free(tmp); |
| 6914 |
| 6915 /* The set of paths currently deemed redundant. */ |
| 6916 |
| 6917 tmp = alloc_printf("%s/queue/.state/redundant_edges/", out_dir); |
| 6918 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6919 ck_free(tmp); |
| 6920 |
| 6921 /* The set of paths showing variable behavior. */ |
| 6922 |
| 6923 tmp = alloc_printf("%s/queue/.state/variable_behavior/", out_dir); |
| 6924 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6925 ck_free(tmp); |
| 6926 |
| 6927 /* Sync directory for keeping track of cooperating fuzzers. */ |
| 6928 |
| 6929 if (sync_id) { |
| 6930 |
| 6931 tmp = alloc_printf("%s/.synced/", out_dir); |
| 6932 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6933 ck_free(tmp); |
| 6934 |
| 6935 } |
| 6936 |
| 6937 /* All recorded crashes. */ |
| 6938 |
| 6939 tmp = alloc_printf("%s/crashes", out_dir); |
| 6940 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6941 ck_free(tmp); |
| 6942 |
| 6943 /* All recorded hangs. */ |
| 6944 |
| 6945 tmp = alloc_printf("%s/hangs", out_dir); |
| 6946 if (mkdir(tmp, 0700)) PFATAL("Unable to create '%s'", tmp); |
| 6947 ck_free(tmp); |
| 6948 |
| 6949 /* Generally useful file descriptors. */ |
| 6950 |
| 6951 dev_null_fd = open("/dev/null", O_RDWR); |
| 6952 if (dev_null_fd < 0) PFATAL("Unable to open /dev/null"); |
| 6953 |
| 6954 dev_urandom_fd = open("/dev/urandom", O_RDONLY); |
| 6955 if (dev_urandom_fd < 0) PFATAL("Unable to open /dev/urandom"); |
| 6956 |
| 6957 /* Gnuplot output file. */ |
| 6958 |
| 6959 tmp = alloc_printf("%s/plot_data", out_dir); |
| 6960 fd = open(tmp, O_WRONLY | O_CREAT | O_EXCL, 0600); |
| 6961 if (fd < 0) PFATAL("Unable to create '%s'", tmp); |
| 6962 ck_free(tmp); |
| 6963 |
| 6964 plot_file = fdopen(fd, "w"); |
| 6965 if (!plot_file) PFATAL("fdopen() failed"); |
| 6966 |
| 6967 fprintf(plot_file, "# unix_time, cycles_done, cur_path, paths_total, " |
| 6968 "pending_total, pending_favs, map_size, unique_crashes, " |
| 6969 "unique_hangs, max_depth, execs_per_sec\n"); |
| 6970 /* ignore errors */ |
| 6971 |
| 6972 } |
| 6973 |
| 6974 |
| 6975 /* Setup the output file for fuzzed data, if not using -f. */ |
| 6976 |
| 6977 EXP_ST void setup_stdio_file(void) { |
| 6978 |
| 6979 u8* fn = alloc_printf("%s/.cur_input", out_dir); |
| 6980 |
| 6981 unlink(fn); /* Ignore errors */ |
| 6982 |
| 6983 out_fd = open(fn, O_RDWR | O_CREAT | O_EXCL, 0600); |
| 6984 |
| 6985 if (out_fd < 0) PFATAL("Unable to create '%s'", fn); |
| 6986 |
| 6987 ck_free(fn); |
| 6988 |
| 6989 } |
| 6990 |
| 6991 |
| 6992 /* Make sure that core dumps don't go to a program. */ |
| 6993 |
| 6994 static void check_crash_handling(void) { |
| 6995 |
| 6996 #ifdef __APPLE__ |
| 6997 |
| 6998 /* Yuck! There appears to be no simple C API to query for the state of |
| 6999 loaded daemons on MacOS X, and I'm a bit hesitant to do something |
| 7000 more sophisticated, such as disabling crash reporting via Mach ports, |
| 7001 until I get a box to test the code. So, for now, we check for crash |
| 7002 reporting the awful way. */ |
| 7003 |
| 7004 if (system("launchctl list 2>/dev/null | grep -q '\\.ReportCrash$'")) return; |
| 7005 |
| 7006 SAYF("\n" cLRD "[-] " cRST |
| 7007 "Whoops, your system is configured to forward crash notifications to an\n
" |
| 7008 " external crash reporting utility. This will cause issues due to the\
n" |
| 7009 " extended delay between the fuzzed binary malfunctioning and this fac
t\n" |
| 7010 " being relayed to the fuzzer via the standard waitpid() API.\n\n" |
| 7011 " To avoid having crashes misinterpreted as hangs, please run the\n" |
| 7012 " following commands:\n\n" |
| 7013 |
| 7014 " SL=/System/Library; PL=com.apple.ReportCrash\n" |
| 7015 " launchctl unload -w ${SL}/LaunchAgents/${PL}.plist\n" |
| 7016 " sudo launchctl unload -w ${SL}/LaunchDaemons/${PL}.Root.plist\n"); |
| 7017 |
| 7018 if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) |
| 7019 FATAL("Crash reporter detected"); |
| 7020 |
| 7021 #else |
| 7022 |
| 7023 /* This is Linux specific, but I don't think there's anything equivalent on |
| 7024 *BSD, so we can just let it slide for now. */ |
| 7025 |
| 7026 s32 fd = open("/proc/sys/kernel/core_pattern", O_RDONLY); |
| 7027 u8 fchar; |
| 7028 |
| 7029 if (fd < 0) return; |
| 7030 |
| 7031 ACTF("Checking core_pattern..."); |
| 7032 |
| 7033 if (read(fd, &fchar, 1) == 1 && fchar == '|') { |
| 7034 |
| 7035 SAYF("\n" cLRD "[-] " cRST |
| 7036 "Hmm, your system is configured to send core dump notifications to an\n
" |
| 7037 " external utility. This will cause issues: there will be an extende
d delay\n" |
| 7038 " between stumbling upon a crash and having this information relayed
to the\n" |
| 7039 " fuzzer via the standard waitpid() API.\n\n" |
| 7040 |
| 7041 " To avoid having crashes misinterpreted as hangs, please log in as
root\n" |
| 7042 " and temporarily modify /proc/sys/kernel/core_pattern, like so:\n\n
" |
| 7043 |
| 7044 " echo core >/proc/sys/kernel/core_pattern\n"); |
| 7045 |
| 7046 if (!getenv("AFL_I_DONT_CARE_ABOUT_MISSING_CRASHES")) |
| 7047 FATAL("Pipe at the beginning of 'core_pattern'"); |
| 7048 |
| 7049 } |
| 7050 |
| 7051 close(fd); |
| 7052 |
| 7053 #endif /* ^__APPLE__ */ |
| 7054 |
| 7055 } |
| 7056 |
| 7057 |
| 7058 /* Check CPU governor. */ |
| 7059 |
| 7060 static void check_cpu_governor(void) { |
| 7061 |
| 7062 FILE* f; |
| 7063 u8 tmp[128]; |
| 7064 u64 min = 0, max = 0; |
| 7065 |
| 7066 if (getenv("AFL_SKIP_CPUFREQ")) return; |
| 7067 |
| 7068 f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_governor", "r"); |
| 7069 if (!f) return; |
| 7070 |
| 7071 ACTF("Checking CPU scaling governor..."); |
| 7072 |
| 7073 if (!fgets(tmp, 128, f)) PFATAL("fgets() failed"); |
| 7074 |
| 7075 fclose(f); |
| 7076 |
| 7077 if (!strncmp(tmp, "perf", 4)) return; |
| 7078 |
| 7079 f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_min_freq", "r"); |
| 7080 |
| 7081 if (f) { |
| 7082 if (fscanf(f, "%llu", &min) != 1) min = 0; |
| 7083 fclose(f); |
| 7084 } |
| 7085 |
| 7086 f = fopen("/sys/devices/system/cpu/cpu0/cpufreq/scaling_max_freq", "r"); |
| 7087 |
| 7088 if (f) { |
| 7089 if (fscanf(f, "%llu", &max) != 1) max = 0; |
| 7090 fclose(f); |
| 7091 } |
| 7092 |
| 7093 if (min == max) return; |
| 7094 |
| 7095 SAYF("\n" cLRD "[-] " cRST |
| 7096 "Whoops, your system uses on-demand CPU frequency scaling, adjusted\n" |
| 7097 " between %llu and %llu MHz. Unfortunately, the scaling algorithm in t
he\n" |
| 7098 " kernel is imperfect and can miss the short-lived processes spawned b
y\n" |
| 7099 " afl-fuzz. To keep things moving, run these commands as root:\n\n" |
| 7100 |
| 7101 " cd /sys/devices/system/cpu\n" |
| 7102 " echo performance | tee cpu*/cpufreq/scaling_governor\n\n" |
| 7103 |
| 7104 " You can later go back to the original state by replacing 'performanc
e' with\n" |
| 7105 " 'ondemand'. If you don't want to change the settings, set AFL_SKIP_C
PUFREQ\n" |
| 7106 " to make afl-fuzz skip this check - but expect some performance drop.
\n", |
| 7107 min / 1024, max / 1024); |
| 7108 |
| 7109 FATAL("Suboptimal CPU scaling governor"); |
| 7110 |
| 7111 } |
| 7112 |
| 7113 |
| 7114 /* Count the number of logical CPU cores. */ |
| 7115 |
| 7116 static void get_core_count(void) { |
| 7117 |
| 7118 u32 cur_runnable = 0; |
| 7119 |
| 7120 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) |
| 7121 |
| 7122 size_t s = sizeof(cpu_core_count); |
| 7123 |
| 7124 /* On *BSD systems, we can just use a sysctl to get the number of CPUs. */ |
| 7125 |
| 7126 #ifdef __APPLE__ |
| 7127 |
| 7128 if (sysctlbyname("hw.logicalcpu", &cpu_core_count, &s, NULL, 0) < 0) |
| 7129 return; |
| 7130 |
| 7131 #else |
| 7132 |
| 7133 int s_name[2] = { CTL_HW, HW_NCPU }; |
| 7134 |
| 7135 if (sysctl(s_name, 2, &cpu_core_count, &s, NULL, 0) < 0) return; |
| 7136 |
| 7137 #endif /* ^__APPLE__ */ |
| 7138 |
| 7139 #else |
| 7140 |
| 7141 if (!cpu_core_count) { |
| 7142 |
| 7143 /* On Linux, a simple way is to look at /proc/stat, especially since we'd |
| 7144 be parsing it anyway for other reasons later on. But do this only if |
| 7145 cpu_core_count hasn't been obtained before as a result of specifying |
| 7146 -Z. */ |
| 7147 |
| 7148 FILE* f = fopen("/proc/stat", "r"); |
| 7149 u8 tmp[1024]; |
| 7150 |
| 7151 if (!f) return; |
| 7152 |
| 7153 while (fgets(tmp, sizeof(tmp), f)) |
| 7154 if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++; |
| 7155 |
| 7156 fclose(f); |
| 7157 } |
| 7158 |
| 7159 #endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */ |
| 7160 |
| 7161 if (cpu_core_count) { |
| 7162 |
| 7163 cur_runnable = (u32)get_runnable_processes(); |
| 7164 |
| 7165 #if defined(__APPLE__) || defined(__FreeBSD__) || defined (__OpenBSD__) |
| 7166 |
| 7167 /* Add ourselves, since the 1-minute average doesn't include that yet. */ |
| 7168 |
| 7169 cur_runnable++; |
| 7170 |
| 7171 #endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */ |
| 7172 |
| 7173 OKF("You have %u CPU cores and %u runnable tasks (utilization: %0.0f%%).", |
| 7174 cpu_core_count, cur_runnable, cur_runnable * 100.0 / cpu_core_count); |
| 7175 |
| 7176 if (cpu_core_count > 1) { |
| 7177 |
| 7178 if (cur_runnable > cpu_core_count * 1.5) { |
| 7179 |
| 7180 WARNF("System under apparent load, performance may be spotty."); |
| 7181 |
| 7182 } else if (cur_runnable + 1 <= cpu_core_count) { |
| 7183 |
| 7184 OKF("Try parallel jobs - see %s/parallel_fuzzing.txt.", doc_path); |
| 7185 |
| 7186 } |
| 7187 |
| 7188 } |
| 7189 |
| 7190 } else WARNF("Unable to figure out the number of CPU cores."); |
| 7191 |
| 7192 #ifdef HAVE_AFFINITY |
| 7193 |
| 7194 if (use_affinity) |
| 7195 OKF("Using specified CPU affinity: main = %u, child = %u", |
| 7196 cpu_aff_main, cpu_aff_child); |
| 7197 else if (cpu_core_count > 1) |
| 7198 OKF(cBRI "Try setting CPU affinity (-Z) for a performance boost!" cRST); |
| 7199 |
| 7200 #endif /* HAVE_AFFINITY */ |
| 7201 |
| 7202 } |
| 7203 |
| 7204 |
| 7205 /* Validate and fix up out_dir and sync_dir when using -S. */ |
| 7206 |
| 7207 static void fix_up_sync(void) { |
| 7208 |
| 7209 u8* x = sync_id; |
| 7210 |
| 7211 if (dumb_mode) |
| 7212 FATAL("-S / -M and -n are mutually exclusive"); |
| 7213 |
| 7214 if (skip_deterministic) { |
| 7215 |
| 7216 if (force_deterministic) |
| 7217 FATAL("use -S instead of -M -d"); |
| 7218 else |
| 7219 FATAL("-S already implies -d"); |
| 7220 |
| 7221 } |
| 7222 |
| 7223 while (*x) { |
| 7224 |
| 7225 if (!isalnum(*x) && *x != '_' && *x != '-') |
| 7226 FATAL("Non-alphanumeric fuzzer ID specified via -S or -M"); |
| 7227 |
| 7228 x++; |
| 7229 |
| 7230 } |
| 7231 |
| 7232 if (strlen(sync_id) > 32) FATAL("Fuzzer ID too long"); |
| 7233 |
| 7234 x = alloc_printf("%s/%s", out_dir, sync_id); |
| 7235 |
| 7236 sync_dir = out_dir; |
| 7237 out_dir = x; |
| 7238 |
| 7239 if (!force_deterministic) { |
| 7240 skip_deterministic = 1; |
| 7241 use_splicing = 1; |
| 7242 } |
| 7243 |
| 7244 } |
| 7245 |
| 7246 |
| 7247 /* Handle screen resize (SIGWINCH). */ |
| 7248 |
| 7249 static void handle_resize(int sig) { |
| 7250 clear_screen = 1; |
| 7251 } |
| 7252 |
| 7253 |
| 7254 /* Check ASAN options. */ |
| 7255 |
| 7256 static void check_asan_opts(void) { |
| 7257 u8* x = getenv("ASAN_OPTIONS"); |
| 7258 |
| 7259 if (x) { |
| 7260 |
| 7261 if (!strstr(x, "abort_on_error=1")) |
| 7262 FATAL("Custom ASAN_OPTIONS set without abort_on_error=1 - please fix!"); |
| 7263 |
| 7264 if (!strstr(x, "symbolize=0")) |
| 7265 FATAL("Custom ASAN_OPTIONS set without symbolize=0 - please fix!"); |
| 7266 |
| 7267 } |
| 7268 |
| 7269 x = getenv("MSAN_OPTIONS"); |
| 7270 |
| 7271 if (x) { |
| 7272 |
| 7273 if (!strstr(x, "exit_code=" STRINGIFY(MSAN_ERROR))) |
| 7274 FATAL("Custom MSAN_OPTIONS set without exit_code=" |
| 7275 STRINGIFY(MSAN_ERROR) " - please fix!"); |
| 7276 |
| 7277 if (!strstr(x, "symbolize=0")) |
| 7278 FATAL("Custom MSAN_OPTIONS set without symbolize=0 - please fix!"); |
| 7279 |
| 7280 } |
| 7281 |
| 7282 } |
| 7283 |
| 7284 |
| 7285 /* Detect @@ in args. */ |
| 7286 |
| 7287 EXP_ST void detect_file_args(char** argv) { |
| 7288 |
| 7289 u32 i = 0; |
| 7290 u8* cwd = getcwd(NULL, 0); |
| 7291 |
| 7292 if (!cwd) PFATAL("getcwd() failed"); |
| 7293 |
| 7294 while (argv[i]) { |
| 7295 |
| 7296 u8* aa_loc = strstr(argv[i], "@@"); |
| 7297 |
| 7298 if (aa_loc) { |
| 7299 |
| 7300 u8 *aa_subst, *n_arg; |
| 7301 |
| 7302 /* If we don't have a file name chosen yet, use a safe default. */ |
| 7303 |
| 7304 if (!out_file) |
| 7305 out_file = alloc_printf("%s/.cur_input", out_dir); |
| 7306 |
| 7307 /* Be sure that we're always using fully-qualified paths. */ |
| 7308 |
| 7309 if (out_file[0] == '/') aa_subst = out_file; |
| 7310 else aa_subst = alloc_printf("%s/%s", cwd, out_file); |
| 7311 |
| 7312 /* Construct a replacement argv value. */ |
| 7313 |
| 7314 *aa_loc = 0; |
| 7315 n_arg = alloc_printf("%s%s%s", argv[i], aa_subst, aa_loc + 2); |
| 7316 argv[i] = n_arg; |
| 7317 *aa_loc = '@'; |
| 7318 |
| 7319 if (out_file[0] != '/') ck_free(aa_subst); |
| 7320 |
| 7321 } |
| 7322 |
| 7323 i++; |
| 7324 |
| 7325 } |
| 7326 |
| 7327 free(cwd); /* not tracked */ |
| 7328 |
| 7329 } |
| 7330 |
| 7331 |
| 7332 /* Set up signal handlers. More complicated that needs to be, because libc on |
| 7333 Solaris doesn't resume interrupted reads(), sets SA_RESETHAND when you call |
| 7334 siginterrupt(), and does other stupid things. */ |
| 7335 |
| 7336 EXP_ST void setup_signal_handlers(void) { |
| 7337 |
| 7338 struct sigaction sa; |
| 7339 |
| 7340 sa.sa_handler = NULL; |
| 7341 sa.sa_flags = SA_RESTART; |
| 7342 sa.sa_sigaction = NULL; |
| 7343 |
| 7344 sigemptyset(&sa.sa_mask); |
| 7345 |
| 7346 /* Various ways of saying "stop". */ |
| 7347 |
| 7348 sa.sa_handler = handle_stop_sig; |
| 7349 sigaction(SIGHUP, &sa, NULL); |
| 7350 sigaction(SIGINT, &sa, NULL); |
| 7351 sigaction(SIGTERM, &sa, NULL); |
| 7352 |
| 7353 /* Exec timeout notifications. */ |
| 7354 |
| 7355 sa.sa_handler = handle_timeout; |
| 7356 sigaction(SIGALRM, &sa, NULL); |
| 7357 |
| 7358 /* Window resize */ |
| 7359 |
| 7360 sa.sa_handler = handle_resize; |
| 7361 sigaction(SIGWINCH, &sa, NULL); |
| 7362 |
| 7363 /* SIGUSR1: skip entry */ |
| 7364 |
| 7365 sa.sa_handler = handle_skipreq; |
| 7366 sigaction(SIGUSR1, &sa, NULL); |
| 7367 |
| 7368 /* Things we don't care about. */ |
| 7369 |
| 7370 sa.sa_handler = SIG_IGN; |
| 7371 sigaction(SIGTSTP, &sa, NULL); |
| 7372 sigaction(SIGPIPE, &sa, NULL); |
| 7373 |
| 7374 } |
| 7375 |
| 7376 |
| 7377 /* Rewrite argv for QEMU. */ |
| 7378 |
| 7379 static char** get_qemu_argv(u8* own_loc, char** argv, int argc) { |
| 7380 |
| 7381 char** new_argv = ck_alloc(sizeof(char*) * (argc + 4)); |
| 7382 u8 *tmp, *cp, *rsl, *own_copy; |
| 7383 |
| 7384 memcpy(new_argv + 3, argv + 1, sizeof(char*) * argc); |
| 7385 |
| 7386 new_argv[2] = target_path; |
| 7387 new_argv[1] = "--"; |
| 7388 |
| 7389 /* Now we need to actually find the QEMU binary to put in argv[0]. */ |
| 7390 |
| 7391 tmp = getenv("AFL_PATH"); |
| 7392 |
| 7393 if (tmp) { |
| 7394 |
| 7395 cp = alloc_printf("%s/afl-qemu-trace", tmp); |
| 7396 |
| 7397 if (access(cp, X_OK)) |
| 7398 FATAL("Unable to find '%s'", tmp); |
| 7399 |
| 7400 target_path = new_argv[0] = cp; |
| 7401 return new_argv; |
| 7402 |
| 7403 } |
| 7404 |
| 7405 own_copy = ck_strdup(own_loc); |
| 7406 rsl = strrchr(own_copy, '/'); |
| 7407 |
| 7408 if (rsl) { |
| 7409 |
| 7410 *rsl = 0; |
| 7411 |
| 7412 cp = alloc_printf("%s/afl-qemu-trace", own_copy); |
| 7413 ck_free(own_copy); |
| 7414 |
| 7415 if (!access(cp, X_OK)) { |
| 7416 |
| 7417 target_path = new_argv[0] = cp; |
| 7418 return new_argv; |
| 7419 |
| 7420 } |
| 7421 |
| 7422 } else ck_free(own_copy); |
| 7423 |
| 7424 if (!access(BIN_PATH "/afl-qemu-trace", X_OK)) { |
| 7425 |
| 7426 target_path = new_argv[0] = ck_strdup(BIN_PATH "/afl-qemu-trace"); |
| 7427 return new_argv; |
| 7428 |
| 7429 } |
| 7430 |
| 7431 SAYF("\n" cLRD "[-] " cRST |
| 7432 "Oops, unable to find the 'afl-qemu-trace' binary. The binary must be bui
lt\n" |
| 7433 " separately by following the instructions in qemu_mode/README.qemu. I
f you\n" |
| 7434 " already have the binary installed, you may need to specify AFL_PATH
in the\n" |
| 7435 " environment.\n\n" |
| 7436 |
| 7437 " Of course, even without QEMU, afl-fuzz can still work with binaries
that are\n" |
| 7438 " instrumented at compile time with afl-gcc. It is also possible to us
e it as a\n" |
| 7439 " traditional \"dumb\" fuzzer by specifying '-n' in the command line.\
n"); |
| 7440 |
| 7441 FATAL("Failed to locate 'afl-qemu-trace'."); |
| 7442 |
| 7443 } |
| 7444 |
| 7445 |
| 7446 /* Make a copy of the current command line. */ |
| 7447 |
| 7448 static void save_cmdline(u32 argc, char** argv) { |
| 7449 |
| 7450 u32 len = 1, i; |
| 7451 u8* buf; |
| 7452 |
| 7453 for (i = 0; i < argc; i++) |
| 7454 len += strlen(argv[i]) + 1; |
| 7455 |
| 7456 buf = orig_cmdline = ck_alloc(len); |
| 7457 |
| 7458 for (i = 0; i < argc; i++) { |
| 7459 |
| 7460 u32 l = strlen(argv[i]); |
| 7461 |
| 7462 memcpy(buf, argv[i], l); |
| 7463 buf += l; |
| 7464 |
| 7465 if (i != argc - 1) *(buf++) = ' '; |
| 7466 |
| 7467 } |
| 7468 |
| 7469 *buf = 0; |
| 7470 |
| 7471 } |
| 7472 |
| 7473 |
| 7474 #ifndef AFL_LIB |
| 7475 |
| 7476 /* Main entry point */ |
| 7477 |
| 7478 int main(int argc, char** argv) { |
| 7479 |
| 7480 s32 opt; |
| 7481 u64 prev_queued = 0; |
| 7482 u32 sync_interval_cnt = 0, seek_to; |
| 7483 u8 *extras_dir = 0; |
| 7484 u8 mem_limit_given = 0; |
| 7485 u8 exit_1 = !!getenv("AFL_BENCH_JUST_ONE"); |
| 7486 |
| 7487 char** use_argv; |
| 7488 |
| 7489 SAYF(cCYA "afl-fuzz " cBRI VERSION cRST " by <lcamtuf@google.com>\n"); |
| 7490 |
| 7491 doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH; |
| 7492 |
| 7493 while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QZ:")) > 0) |
| 7494 |
| 7495 switch (opt) { |
| 7496 |
| 7497 case 'i': |
| 7498 |
| 7499 if (in_dir) FATAL("Multiple -i options not supported"); |
| 7500 in_dir = optarg; |
| 7501 |
| 7502 if (!strcmp(in_dir, "-")) in_place_resume = 1; |
| 7503 |
| 7504 break; |
| 7505 |
| 7506 case 'o': /* output dir */ |
| 7507 |
| 7508 if (out_dir) FATAL("Multiple -o options not supported"); |
| 7509 out_dir = optarg; |
| 7510 break; |
| 7511 |
| 7512 case 'M': |
| 7513 |
| 7514 force_deterministic = 1; |
| 7515 /* Fall through */ |
| 7516 |
| 7517 case 'S': /* sync ID */ |
| 7518 |
| 7519 if (sync_id) FATAL("Multiple -S or -M options not supported"); |
| 7520 sync_id = optarg; |
| 7521 break; |
| 7522 |
| 7523 case 'f': /* target file */ |
| 7524 |
| 7525 if (out_file) FATAL("Multiple -f options not supported"); |
| 7526 out_file = optarg; |
| 7527 break; |
| 7528 |
| 7529 case 'x': |
| 7530 |
| 7531 if (extras_dir) FATAL("Multiple -x options not supported"); |
| 7532 extras_dir = optarg; |
| 7533 break; |
| 7534 |
| 7535 case 't': { |
| 7536 |
| 7537 u8 suffix = 0; |
| 7538 |
| 7539 if (timeout_given) FATAL("Multiple -t options not supported"); |
| 7540 |
| 7541 if (sscanf(optarg, "%u%c", &exec_tmout, &suffix) < 1 || |
| 7542 optarg[0] == '-') FATAL("Bad syntax used for -t"); |
| 7543 |
| 7544 if (exec_tmout < 5) FATAL("Dangerously low value of -t"); |
| 7545 |
| 7546 if (suffix == '+') timeout_given = 2; else timeout_given = 1; |
| 7547 |
| 7548 break; |
| 7549 |
| 7550 } |
| 7551 |
| 7552 case 'm': { |
| 7553 |
| 7554 u8 suffix = 'M'; |
| 7555 |
| 7556 if (mem_limit_given) FATAL("Multiple -m options not supported"); |
| 7557 mem_limit_given = 1; |
| 7558 |
| 7559 if (!strcmp(optarg, "none")) { |
| 7560 |
| 7561 mem_limit = 0; |
| 7562 break; |
| 7563 |
| 7564 } |
| 7565 |
| 7566 if (sscanf(optarg, "%llu%c", &mem_limit, &suffix) < 1 || |
| 7567 optarg[0] == '-') FATAL("Bad syntax used for -m"); |
| 7568 |
| 7569 switch (suffix) { |
| 7570 |
| 7571 case 'T': mem_limit *= 1024 * 1024; break; |
| 7572 case 'G': mem_limit *= 1024; break; |
| 7573 case 'k': mem_limit /= 1024; break; |
| 7574 case 'M': break; |
| 7575 |
| 7576 default: FATAL("Unsupported suffix or bad syntax for -m"); |
| 7577 |
| 7578 } |
| 7579 |
| 7580 if (mem_limit < 5) FATAL("Dangerously low value of -m"); |
| 7581 |
| 7582 if (sizeof(rlim_t) == 4 && mem_limit > 2000) |
| 7583 FATAL("Value of -m out of range on 32-bit systems"); |
| 7584 |
| 7585 } |
| 7586 |
| 7587 break; |
| 7588 |
| 7589 #ifdef HAVE_AFFINITY |
| 7590 |
| 7591 case 'Z': { |
| 7592 |
| 7593 s32 i; |
| 7594 |
| 7595 if (use_affinity) FATAL("Multiple -Z options not supported"); |
| 7596 use_affinity = 1; |
| 7597 |
| 7598 cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN); |
| 7599 |
| 7600 i = sscanf(optarg, "%u,%u", &cpu_aff_main, &cpu_aff_child); |
| 7601 |
| 7602 if (i < 1 || cpu_aff_main >= cpu_core_count) |
| 7603 FATAL("Bogus primary core ID passed to -Z (expected 0-%u)", |
| 7604 cpu_core_count - 1); |
| 7605 |
| 7606 if (i == 1) cpu_aff_child = cpu_aff_main; |
| 7607 |
| 7608 if (cpu_aff_child >= cpu_core_count) |
| 7609 FATAL("Bogus secondary core ID passed to -Z (expected 0-%u)", |
| 7610 cpu_core_count - 1); |
| 7611 |
| 7612 break; |
| 7613 |
| 7614 } |
| 7615 |
| 7616 #endif /* HAVE_AFFINITY */ |
| 7617 |
| 7618 case 'd': |
| 7619 |
| 7620 if (skip_deterministic) FATAL("Multiple -d options not supported"); |
| 7621 skip_deterministic = 1; |
| 7622 use_splicing = 1; |
| 7623 break; |
| 7624 |
| 7625 case 'B': |
| 7626 |
| 7627 /* This is a secret undocumented option! It is useful if you find |
| 7628 an interesting test case during a normal fuzzing process, and want |
| 7629 to mutate it without rediscovering any of the test cases already |
| 7630 found during an earlier run. |
| 7631 |
| 7632 To use this mode, you need to point -B to the fuzz_bitmap produced |
| 7633 by an earlier run for the exact same binary... and that's it. |
| 7634 |
| 7635 I only used this once or twice to get variants of a particular |
| 7636 file, so I'm not making this an official setting. */ |
| 7637 |
| 7638 if (in_bitmap) FATAL("Multiple -B options not supported"); |
| 7639 |
| 7640 in_bitmap = optarg; |
| 7641 read_bitmap(in_bitmap); |
| 7642 break; |
| 7643 |
| 7644 case 'C': |
| 7645 |
| 7646 if (crash_mode) FATAL("Multiple -C options not supported"); |
| 7647 crash_mode = FAULT_CRASH; |
| 7648 break; |
| 7649 |
| 7650 case 'n': |
| 7651 |
| 7652 if (dumb_mode) FATAL("Multiple -n options not supported"); |
| 7653 if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1; |
| 7654 |
| 7655 break; |
| 7656 |
| 7657 case 'T': |
| 7658 |
| 7659 if (use_banner) FATAL("Multiple -T options not supported"); |
| 7660 use_banner = optarg; |
| 7661 break; |
| 7662 |
| 7663 case 'Q': |
| 7664 |
| 7665 if (qemu_mode) FATAL("Multiple -Q options not supported"); |
| 7666 qemu_mode = 1; |
| 7667 |
| 7668 if (!mem_limit_given) mem_limit = MEM_LIMIT_QEMU; |
| 7669 |
| 7670 break; |
| 7671 |
| 7672 default: |
| 7673 |
| 7674 usage(argv[0]); |
| 7675 |
| 7676 } |
| 7677 |
| 7678 if (optind == argc || !in_dir || !out_dir) usage(argv[0]); |
| 7679 |
| 7680 setup_signal_handlers(); |
| 7681 check_asan_opts(); |
| 7682 |
| 7683 #ifdef HAVE_AFFINITY |
| 7684 if (use_affinity) set_cpu_affinity(cpu_aff_main); |
| 7685 #endif /* HAVE_AFFINITY */ |
| 7686 |
| 7687 if (sync_id) fix_up_sync(); |
| 7688 |
| 7689 if (!strcmp(in_dir, out_dir)) |
| 7690 FATAL("Input and output directories can't be the same"); |
| 7691 |
| 7692 if (dumb_mode) { |
| 7693 |
| 7694 if (crash_mode) FATAL("-C and -n are mutually exclusive"); |
| 7695 if (qemu_mode) FATAL("-Q and -n are mutually exclusive"); |
| 7696 |
| 7697 } |
| 7698 |
| 7699 if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1; |
| 7700 if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1; |
| 7701 if (getenv("AFL_NO_VAR_CHECK")) no_var_check = 1; |
| 7702 if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1; |
| 7703 |
| 7704 if (dumb_mode == 2 && no_forkserver) |
| 7705 FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive"); |
| 7706 |
| 7707 if (getenv("AFL_LD_PRELOAD")) |
| 7708 setenv("LD_PRELOAD", getenv("AFL_LD_PRELOAD"), 1); |
| 7709 |
| 7710 save_cmdline(argc, argv); |
| 7711 |
| 7712 fix_up_banner(argv[optind]); |
| 7713 |
| 7714 check_if_tty(); |
| 7715 |
| 7716 get_core_count(); |
| 7717 check_crash_handling(); |
| 7718 check_cpu_governor(); |
| 7719 |
| 7720 setup_post(); |
| 7721 setup_shm(); |
| 7722 |
| 7723 setup_dirs_fds(); |
| 7724 read_testcases(); |
| 7725 load_auto(); |
| 7726 |
| 7727 pivot_inputs(); |
| 7728 |
| 7729 if (extras_dir) load_extras(extras_dir); |
| 7730 |
| 7731 if (!timeout_given) find_timeout(); |
| 7732 |
| 7733 detect_file_args(argv + optind + 1); |
| 7734 |
| 7735 if (!out_file) setup_stdio_file(); |
| 7736 |
| 7737 check_binary(argv[optind]); |
| 7738 |
| 7739 start_time = get_cur_time(); |
| 7740 |
| 7741 if (qemu_mode) |
| 7742 use_argv = get_qemu_argv(argv[0], argv + optind, argc - optind); |
| 7743 else |
| 7744 use_argv = argv + optind; |
| 7745 |
| 7746 perform_dry_run(use_argv); |
| 7747 |
| 7748 cull_queue(); |
| 7749 |
| 7750 show_init_stats(); |
| 7751 |
| 7752 seek_to = find_start_position(); |
| 7753 |
| 7754 write_stats_file(0, 0); |
| 7755 save_auto(); |
| 7756 |
| 7757 if (stop_soon) goto stop_fuzzing; |
| 7758 |
| 7759 /* Woop woop woop */ |
| 7760 |
| 7761 if (!not_on_tty) { |
| 7762 sleep(4); |
| 7763 start_time += 4000; |
| 7764 if (stop_soon) goto stop_fuzzing; |
| 7765 } |
| 7766 |
| 7767 while (1) { |
| 7768 |
| 7769 u8 skipped_fuzz; |
| 7770 |
| 7771 cull_queue(); |
| 7772 |
| 7773 if (!queue_cur) { |
| 7774 |
| 7775 queue_cycle++; |
| 7776 current_entry = 0; |
| 7777 cur_skipped_paths = 0; |
| 7778 queue_cur = queue; |
| 7779 |
| 7780 while (seek_to) { |
| 7781 current_entry++; |
| 7782 seek_to--; |
| 7783 queue_cur = queue_cur->next; |
| 7784 } |
| 7785 |
| 7786 show_stats(); |
| 7787 |
| 7788 if (not_on_tty) { |
| 7789 ACTF("Entering queue cycle %llu.", queue_cycle); |
| 7790 fflush(stdout); |
| 7791 } |
| 7792 |
| 7793 /* If we had a full queue cycle with no new finds, try |
| 7794 recombination strategies next. */ |
| 7795 |
| 7796 if (queued_paths == prev_queued) { |
| 7797 |
| 7798 if (use_splicing) cycles_wo_finds++; else use_splicing = 1; |
| 7799 |
| 7800 } else cycles_wo_finds = 0; |
| 7801 |
| 7802 prev_queued = queued_paths; |
| 7803 |
| 7804 if (sync_id && queue_cycle == 1 && getenv("AFL_IMPORT_FIRST")) |
| 7805 sync_fuzzers(use_argv); |
| 7806 |
| 7807 } |
| 7808 |
| 7809 skipped_fuzz = fuzz_one(use_argv); |
| 7810 |
| 7811 if (!stop_soon && sync_id && !skipped_fuzz) { |
| 7812 |
| 7813 if (!(sync_interval_cnt++ % SYNC_INTERVAL)) |
| 7814 sync_fuzzers(use_argv); |
| 7815 |
| 7816 } |
| 7817 |
| 7818 if (!stop_soon && exit_1) stop_soon = 2; |
| 7819 |
| 7820 if (stop_soon) break; |
| 7821 |
| 7822 queue_cur = queue_cur->next; |
| 7823 current_entry++; |
| 7824 |
| 7825 } |
| 7826 |
| 7827 if (queue_cur) show_stats(); |
| 7828 |
| 7829 write_bitmap(); |
| 7830 write_stats_file(0, 0); |
| 7831 save_auto(); |
| 7832 |
| 7833 stop_fuzzing: |
| 7834 |
| 7835 SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST, |
| 7836 stop_soon == 2 ? "programatically" : "by user"); |
| 7837 |
| 7838 /* Running for more than 30 minutes but still doing first cycle? */ |
| 7839 |
| 7840 if (queue_cycle == 1 && get_cur_time() - start_time > 30 * 60 * 1000) { |
| 7841 |
| 7842 SAYF("\n" cYEL "[!] " cRST |
| 7843 "Stopped during the first cycle, results may be incomplete.\n" |
| 7844 " (For info on resuming, see %s/README.)\n", doc_path); |
| 7845 |
| 7846 } |
| 7847 |
| 7848 fclose(plot_file); |
| 7849 destroy_queue(); |
| 7850 destroy_extras(); |
| 7851 ck_free(target_path); |
| 7852 |
| 7853 alloc_report(); |
| 7854 |
| 7855 OKF("We're done here. Have a nice day!\n"); |
| 7856 |
| 7857 exit(0); |
| 7858 |
| 7859 } |
| 7860 |
| 7861 #endif /* !AFL_LIB */ |
OLD | NEW |