Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(32)

Unified Diff: third_party/afl/src/afl-fuzz.c

Issue 2238013002: Roll src/third_party/afl/src/ 2.14b..2.30b (16 versions). (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Note in "Local Modifications" that we have removed dictionaries/. Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « third_party/afl/src/afl-as.h ('k') | third_party/afl/src/afl-gcc.c » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: third_party/afl/src/afl-fuzz.c
diff --git a/third_party/afl/src/afl-fuzz.c b/third_party/afl/src/afl-fuzz.c
index 2d362933aa5c93379f228f2a9238bb477747e571..5ca8c8f02b8f7f3d0a18d3c560f5fd5f895640c9 100644
--- a/third_party/afl/src/afl-fuzz.c
+++ b/third_party/afl/src/afl-fuzz.c
@@ -60,7 +60,8 @@
# include <sys/sysctl.h>
#endif /* __APPLE__ || __FreeBSD__ || __OpenBSD__ */
-/* For supporting -Z on systems that have sched_setaffinity. */
+/* For systems that have sched_setaffinity; right now just Linux, but one
+ can hope... */
#ifdef __linux__
# define HAVE_AFFINITY 1
@@ -111,12 +112,12 @@ EXP_ST u8 skip_deterministic, /* Skip deterministic stages? */
in_place_resume, /* Attempt in-place resume? */
auto_changed, /* Auto-generated tokens changed? */
no_cpu_meter_red, /* Feng shui on the status screen */
- no_var_check, /* Don't detect variable behavior */
shuffle_queue, /* Shuffle input queue? */
bitmap_changed = 1, /* Time to update bitmap? */
qemu_mode, /* Running in QEMU mode? */
skip_requested, /* Skip request, via SIGUSR1 */
- run_over10m; /* Run time over 10 minutes? */
+ run_over10m, /* Run time over 10 minutes? */
+ persistent_mode; /* Running in persistent mode? */
static s32 out_fd, /* Persistent fd for out_file */
dev_urandom_fd = -1, /* Persistent fd for /dev/urandom */
@@ -134,6 +135,8 @@ EXP_ST u8 virgin_bits[MAP_SIZE], /* Regions yet untouched by fuzzing */
virgin_hang[MAP_SIZE], /* Bits we haven't seen in hangs */
virgin_crash[MAP_SIZE]; /* Bits we haven't seen in crashes */
+static u8 var_bytes[MAP_SIZE]; /* Bytes that appear to be variable */
+
static s32 shm_id; /* ID of the SHM region */
static volatile u8 stop_soon, /* Ctrl-C pressed? */
@@ -153,6 +156,7 @@ EXP_ST u32 queued_paths, /* Total number of queued testcases */
cur_depth, /* Current path depth */
max_depth, /* Max path depth */
useless_at_start, /* Number of useless starting paths */
+ var_byte_count, /* Bitmap bytes with var behavior */
current_entry, /* Current queue entry ID */
havoc_div = 1; /* Cycle count divisor for havoc */
@@ -165,6 +169,7 @@ EXP_ST u64 total_crashes, /* Total number of crashes */
last_path_time, /* Time for most recent path (ms) */
last_crash_time, /* Time for most recent crash (ms) */
last_hang_time, /* Time for most recent hang (ms) */
+ last_crash_execs, /* Exec counter at last crash */
queue_cycle, /* Queue round counter */
cycles_wo_finds, /* Cycles without any new paths */
trim_execs, /* Execs done to trim input files */
@@ -182,6 +187,8 @@ static u8 *stage_name = "init", /* Name of the current fuzz stage */
static s32 stage_cur, stage_max; /* Stage progression */
static s32 splicing_with = -1; /* Splicing with which test case? */
+static u32 master_id, master_max; /* Master instance job splitting */
+
static u32 syncing_case; /* Syncing with case #... */
static s32 stage_cur_byte, /* Byte offset of current stage op */
@@ -200,14 +207,11 @@ static u64 total_cal_us, /* Total calibration time (us) */
static u64 total_bitmap_size, /* Total bit count for all bitmaps */
total_bitmap_entries; /* Number of bitmaps counted */
-static u32 cpu_core_count; /* CPU core count */
+static s32 cpu_core_count; /* CPU core count */
#ifdef HAVE_AFFINITY
-static u8 use_affinity; /* Using -Z */
-
-static u32 cpu_aff_main, /* Affinity for main process */
- cpu_aff_child; /* Affinity for fuzzed child */
+static s32 cpu_aff = -1; /* Selected CPU core */
#endif /* HAVE_AFFINITY */
@@ -340,31 +344,12 @@ static u64 get_cur_time_us(void) {
}
-#ifdef HAVE_AFFINITY
-
-/* Set CPU affinity (on systems that support it). */
-
-static void set_cpu_affinity(u32 cpu_id) {
-
- cpu_set_t c;
-
- CPU_ZERO(&c);
- CPU_SET(cpu_id, &c);
-
- if (sched_setaffinity(0, sizeof(c), &c))
- PFATAL("sched_setaffinity failed");
-
-}
-
-#endif /* HAVE_AFFINITY */
-
-
/* Generate a random number (from 0 to limit - 1). This may
have slight bias. */
static inline u32 UR(u32 limit) {
- if (!rand_cnt--) {
+ if (unlikely(!rand_cnt--)) {
u32 seed[2];
@@ -398,6 +383,122 @@ static void shuffle_ptrs(void** ptrs, u32 cnt) {
}
+#ifdef HAVE_AFFINITY
+
+/* Build a list of processes bound to specific cores. Returns -1 if nothing
+ can be found. Assumes an upper bound of 4k CPUs. */
+
+static void bind_to_free_cpu(void) {
+
+ DIR* d;
+ struct dirent* de;
+ cpu_set_t c;
+
+ u8 cpu_used[4096] = { 0 };
+ u32 i;
+
+ if (cpu_core_count < 2) return;
+
+ if (getenv("AFL_NO_AFFINITY")) {
+
+ WARNF("Not binding to a CPU core (AFL_NO_AFFINITY set).");
+ return;
+
+ }
+
+ d = opendir("/proc");
+
+ if (!d) {
+
+ WARNF("Unable to access /proc - can't scan for free CPU cores.");
+ return;
+
+ }
+
+ ACTF("Checking CPU core loadout...");
+
+ /* Introduce some jitter, in case multiple AFL tasks are doing the same
+ thing at the same time... */
+
+ usleep(R(1000) * 250);
+
+ /* Scan all /proc/<pid>/status entries, checking for Cpus_allowed_list.
+ Flag all processes bound to a specific CPU using cpu_used[]. This will
+ fail for some exotic binding setups, but is likely good enough in almost
+ all real-world use cases. */
+
+ while ((de = readdir(d))) {
+
+ u8* fn;
+ FILE* f;
+ u8 tmp[MAX_LINE];
+ u8 has_vmsize = 0;
+
+ if (!isdigit(de->d_name[0])) continue;
+
+ fn = alloc_printf("/proc/%s/status", de->d_name);
+
+ if (!(f = fopen(fn, "r"))) {
+ ck_free(fn);
+ continue;
+ }
+
+ while (fgets(tmp, MAX_LINE, f)) {
+
+ u32 hval;
+
+ /* Processes without VmSize are probably kernel tasks. */
+
+ if (!strncmp(tmp, "VmSize:\t", 8)) has_vmsize = 1;
+
+ if (!strncmp(tmp, "Cpus_allowed_list:\t", 19) &&
+ !strchr(tmp, '-') && !strchr(tmp, ',') &&
+ sscanf(tmp + 19, "%u", &hval) == 1 && hval < sizeof(cpu_used) &&
+ has_vmsize) {
+
+ cpu_used[hval] = 1;
+ break;
+
+ }
+
+ }
+
+ ck_free(fn);
+ fclose(f);
+
+ }
+
+ closedir(d);
+
+ for (i = 0; i < cpu_core_count; i++) if (!cpu_used[i]) break;
+
+ if (i == cpu_core_count) {
+
+ SAYF("\n" cLRD "[-] " cRST
+ "Uh-oh, looks like all %u CPU cores on your system are allocated to\n"
+ " other instances of afl-fuzz (or similar CPU-locked tasks). Starting\n"
+ " another fuzzer on this machine is probably a bad plan, but if you are\n"
+ " absolutely sure, you can set AFL_NO_AFFINITY and try again.\n",
+ cpu_core_count);
+
+ FATAL("No more free CPU cores");
+
+ }
+
+ OKF("Found a free CPU core, binding to #%u.", i);
+
+ cpu_aff = i;
+
+ CPU_ZERO(&c);
+ CPU_SET(i, &c);
+
+ if (sched_setaffinity(0, sizeof(c), &c))
+ PFATAL("sched_setaffinity failed");
+
+}
+
+#endif /* HAVE_AFFINITY */
+
#ifndef IGNORE_FINDS
/* Helper function to compare buffers; returns first and last differing offset. We
@@ -768,9 +869,6 @@ EXP_ST void read_bitmap(u8* fname) {
This function is called after every exec() on a fairly large buffer, so
it needs to be fast. We do this in 32-bit and 64-bit flavors. */
-#define FFL(_b) (0xffULL << ((_b) << 3))
-#define FF(_b) (0xff << ((_b) << 3))
-
static inline u8 has_new_bits(u8* virgin_map) {
#ifdef __x86_64__
@@ -793,53 +891,39 @@ static inline u8 has_new_bits(u8* virgin_map) {
while (i--) {
-#ifdef __x86_64__
-
- u64 cur = *current;
- u64 vir = *virgin;
-
-#else
-
- u32 cur = *current;
- u32 vir = *virgin;
+ /* Optimize for (*current & *virgin) == 0 - i.e., no bits in current bitmap
+ that have not been already cleared from the virgin map - since this will
+ almost always be the case. */
-#endif /* ^__x86_64__ */
-
- /* Optimize for *current == ~*virgin, since this will almost always be the
- case. */
+ if (unlikely(*current) && unlikely(*current & *virgin)) {
- if (cur & vir) {
+ if (likely(ret < 2)) {
- if (ret < 2) {
+ u8* cur = (u8*)current;
+ u8* vir = (u8*)virgin;
- /* This trace did not have any new bytes yet; see if there's any
- current[] byte that is non-zero when virgin[] is 0xff. */
+ /* Looks like we have not found any new bytes yet; see if any non-zero
+ bytes in current[] are pristine in virgin[]. */
#ifdef __x86_64__
- if (((cur & FFL(0)) && (vir & FFL(0)) == FFL(0)) ||
- ((cur & FFL(1)) && (vir & FFL(1)) == FFL(1)) ||
- ((cur & FFL(2)) && (vir & FFL(2)) == FFL(2)) ||
- ((cur & FFL(3)) && (vir & FFL(3)) == FFL(3)) ||
- ((cur & FFL(4)) && (vir & FFL(4)) == FFL(4)) ||
- ((cur & FFL(5)) && (vir & FFL(5)) == FFL(5)) ||
- ((cur & FFL(6)) && (vir & FFL(6)) == FFL(6)) ||
- ((cur & FFL(7)) && (vir & FFL(7)) == FFL(7))) ret = 2;
+ if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+ (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff) ||
+ (cur[4] && vir[4] == 0xff) || (cur[5] && vir[5] == 0xff) ||
+ (cur[6] && vir[6] == 0xff) || (cur[7] && vir[7] == 0xff)) ret = 2;
else ret = 1;
#else
- if (((cur & FF(0)) && (vir & FF(0)) == FF(0)) ||
- ((cur & FF(1)) && (vir & FF(1)) == FF(1)) ||
- ((cur & FF(2)) && (vir & FF(2)) == FF(2)) ||
- ((cur & FF(3)) && (vir & FF(3)) == FF(3))) ret = 2;
+ if ((cur[0] && vir[0] == 0xff) || (cur[1] && vir[1] == 0xff) ||
+ (cur[2] && vir[2] == 0xff) || (cur[3] && vir[3] == 0xff)) ret = 2;
else ret = 1;
#endif /* ^__x86_64__ */
}
- *virgin = vir & ~cur;
+ *virgin &= ~*current;
}
@@ -887,6 +971,8 @@ static u32 count_bits(u8* mem) {
}
+#define FF(_b) (0xff << ((_b) << 3))
+
/* Count the number of bytes set in the bitmap. Called fairly sporadically,
mostly to update the status screen or calibrate and examine confirmed
new paths. */
@@ -948,21 +1034,11 @@ static u32 count_non_255_bytes(u8* mem) {
is hit or not. Called on every new crash or hang, should be
reasonably fast. */
-#define AREP4(_sym) (_sym), (_sym), (_sym), (_sym)
-#define AREP8(_sym) AREP4(_sym), AREP4(_sym)
-#define AREP16(_sym) AREP8(_sym), AREP8(_sym)
-#define AREP32(_sym) AREP16(_sym), AREP16(_sym)
-#define AREP64(_sym) AREP32(_sym), AREP32(_sym)
-#define AREP128(_sym) AREP64(_sym), AREP64(_sym)
-
-static u8 simplify_lookup[256] = {
- /* 4 */ 1, 128, 128, 128,
- /* +4 */ AREP4(128),
- /* +8 */ AREP8(128),
- /* +16 */ AREP16(128),
- /* +32 */ AREP32(128),
- /* +64 */ AREP64(128),
- /* +128 */ AREP128(128)
+static const u8 simplify_lookup[256] = {
+
+ [0] = 1,
+ [1 ... 255] = 128
+
};
#ifdef __x86_64__
@@ -975,7 +1051,7 @@ static void simplify_trace(u64* mem) {
/* Optimize for sparse bitmaps. */
- if (*mem) {
+ if (unlikely(*mem)) {
u8* mem8 = (u8*)mem;
@@ -1006,7 +1082,7 @@ static void simplify_trace(u32* mem) {
/* Optimize for sparse bitmaps. */
- if (*mem) {
+ if (unlikely(*mem)) {
u8* mem8 = (u8*)mem;
@@ -1029,17 +1105,36 @@ static void simplify_trace(u32* mem) {
preprocessing step for any newly acquired traces. Called on every exec,
must be fast. */
-static u8 count_class_lookup[256] = {
+static const u8 count_class_lookup8[256] = {
- /* 0 - 3: 4 */ 0, 1, 2, 4,
- /* 4 - 7: +4 */ AREP4(8),
- /* 8 - 15: +8 */ AREP8(16),
- /* 16 - 31: +16 */ AREP16(32),
- /* 32 - 127: +96 */ AREP64(64), AREP32(64),
- /* 128+: +128 */ AREP128(128)
+ [0] = 0,
+ [1] = 1,
+ [2] = 2,
+ [3] = 4,
+ [4 ... 7] = 8,
+ [8 ... 15] = 16,
+ [16 ... 31] = 32,
+ [32 ... 127] = 64,
+ [128 ... 255] = 128
};
+static u16 count_class_lookup16[65536];
+
+
+static void init_count_class16(void) {
+
+ u32 b1, b2;
+
+ for (b1 = 0; b1 < 256; b1++)
+ for (b2 = 0; b2 < 256; b2++)
+ count_class_lookup16[(b1 << 8) + b2] =
+ (count_class_lookup8[b1] << 8) |
+ count_class_lookup8[b2];
+
+}
+
+
#ifdef __x86_64__
static inline void classify_counts(u64* mem) {
@@ -1050,18 +1145,14 @@ static inline void classify_counts(u64* mem) {
/* Optimize for sparse bitmaps. */
- if (*mem) {
+ if (unlikely(*mem)) {
- u8* mem8 = (u8*)mem;
+ u16* mem16 = (u16*)mem;
- mem8[0] = count_class_lookup[mem8[0]];
- mem8[1] = count_class_lookup[mem8[1]];
- mem8[2] = count_class_lookup[mem8[2]];
- mem8[3] = count_class_lookup[mem8[3]];
- mem8[4] = count_class_lookup[mem8[4]];
- mem8[5] = count_class_lookup[mem8[5]];
- mem8[6] = count_class_lookup[mem8[6]];
- mem8[7] = count_class_lookup[mem8[7]];
+ mem16[0] = count_class_lookup16[mem16[0]];
+ mem16[1] = count_class_lookup16[mem16[1]];
+ mem16[2] = count_class_lookup16[mem16[2]];
+ mem16[3] = count_class_lookup16[mem16[3]];
}
@@ -1081,14 +1172,12 @@ static inline void classify_counts(u32* mem) {
/* Optimize for sparse bitmaps. */
- if (*mem) {
+ if (unlikely(*mem)) {
- u8* mem8 = (u8*)mem;
+ u16* mem16 = (u16*)mem;
- mem8[0] = count_class_lookup[mem8[0]];
- mem8[1] = count_class_lookup[mem8[1]];
- mem8[2] = count_class_lookup[mem8[2]];
- mem8[3] = count_class_lookup[mem8[3]];
+ mem16[0] = count_class_lookup16[mem16[0]];
+ mem16[1] = count_class_lookup16[mem16[1]];
}
@@ -1897,10 +1986,6 @@ EXP_ST void init_forkserver(char** argv) {
struct rlimit r;
-#ifdef HAVE_AFFINITY
- if (use_affinity) set_cpu_affinity(cpu_aff_child);
-#endif /* HAVE_AFFINITY */
-
/* Umpf. On OpenBSD, the default fd limit for root users is set to
soft 128. Let's try to fix that... */
@@ -2199,10 +2284,6 @@ static u8 run_target(char** argv) {
struct rlimit r;
-#ifdef HAVE_AFFINITY
- if (use_affinity) set_cpu_affinity(cpu_aff_child);
-#endif /* HAVE_AFFINITY */
-
if (mem_limit) {
r.rlim_max = r.rlim_cur = ((rlim_t)mem_limit) << 20;
@@ -2321,7 +2402,8 @@ static u8 run_target(char** argv) {
}
- child_pid = 0;
+ if (!WIFSTOPPED(status)) child_pid = 0;
+
it.it_value.tv_sec = 0;
it.it_value.tv_usec = 0;
@@ -2440,7 +2522,11 @@ static void show_stats(void);
static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
u32 handicap, u8 from_queue) {
- u8 fault = 0, new_bits = 0, var_detected = 0, first_run = (q->exec_cksum == 0);
+ static u8 first_trace[MAP_SIZE];
+
+ u8 fault = 0, new_bits = 0, var_detected = 0,
+ first_run = (q->exec_cksum == 0);
+
u64 start_us, stop_us;
s32 old_sc = stage_cur, old_sm = stage_max, old_tmout = exec_tmout;
@@ -2457,7 +2543,7 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
q->cal_failed++;
stage_name = "calibration";
- stage_max = no_var_check ? CAL_CYCLES_NO_VAR : CAL_CYCLES;
+ stage_max = CAL_CYCLES;
/* Make sure the forkserver is up before we do anything, and let's not
count its spin-up time toward binary calibration. */
@@ -2465,6 +2551,8 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
if (dumb_mode != 1 && !no_forkserver && !forksrv_pid)
init_forkserver(argv);
+ if (q->exec_cksum) memcpy(first_trace, trace_bits, MAP_SIZE);
+
start_us = get_cur_time_us();
for (stage_cur = 0; stage_cur < stage_max; stage_cur++) {
@@ -2494,12 +2582,29 @@ static u8 calibrate_case(char** argv, struct queue_entry* q, u8* use_mem,
u8 hnb = has_new_bits(virgin_bits);
if (hnb > new_bits) new_bits = hnb;
- if (!no_var_check && q->exec_cksum) {
+ if (q->exec_cksum) {
+
+ u32 i;
+
+ for (i = 0; i < MAP_SIZE; i++) {
+
+ if (!var_bytes[i] && first_trace[i] != trace_bits[i]) {
+
+ var_bytes[i] = 1;
+ stage_max = CAL_CYCLES_LONG;
+
+ }
+
+ }
var_detected = 1;
- stage_max = CAL_CYCLES_LONG;
- } else q->exec_cksum = cksum;
+ } else {
+
+ q->exec_cksum = cksum;
+ memcpy(first_trace, trace_bits, MAP_SIZE);
+
+ }
}
@@ -2538,9 +2643,15 @@ abort_calibration:
/* Mark variable paths. */
- if (var_detected && !q->var_behavior) {
- mark_as_variable(q);
- queued_variable++;
+ if (var_detected) {
+
+ var_byte_count = count_bytes(var_bytes);
+
+ if (!q->var_behavior) {
+ mark_as_variable(q);
+ queued_variable++;
+ }
+
}
stage_name = old_sn;
@@ -3129,6 +3240,7 @@ static u8 save_if_interesting(char** argv, void* mem, u32 len, u8 fault) {
unique_crashes++;
last_crash_time = get_cur_time();
+ last_crash_execs = total_execs;
break;
@@ -3226,9 +3338,9 @@ static void find_timeout(void) {
/* Update stats file for unattended monitoring. */
-static void write_stats_file(double bitmap_cvg, double eps) {
+static void write_stats_file(double bitmap_cvg, double stability, double eps) {
- static double last_bcvg, last_eps;
+ static double last_bcvg, last_stab, last_eps;
u8* fn = alloc_printf("%s/fuzzer_stats", out_dir);
s32 fd;
@@ -3247,46 +3359,51 @@ static void write_stats_file(double bitmap_cvg, double eps) {
/* Keep last values in case we're called from another context
where exec/sec stats and such are not readily available. */
- if (!bitmap_cvg && !eps) {
+ if (!bitmap_cvg && !stability && !eps) {
bitmap_cvg = last_bcvg;
+ stability = last_stab;
eps = last_eps;
} else {
last_bcvg = bitmap_cvg;
+ last_stab = stability;
last_eps = eps;
}
- fprintf(f, "start_time : %llu\n"
- "last_update : %llu\n"
- "fuzzer_pid : %u\n"
- "cycles_done : %llu\n"
- "execs_done : %llu\n"
- "execs_per_sec : %0.02f\n"
- "paths_total : %u\n"
- "paths_favored : %u\n"
- "paths_found : %u\n"
- "paths_imported : %u\n"
- "max_depth : %u\n"
- "cur_path : %u\n"
- "pending_favs : %u\n"
- "pending_total : %u\n"
- "variable_paths : %u\n"
- "bitmap_cvg : %0.02f%%\n"
- "unique_crashes : %llu\n"
- "unique_hangs : %llu\n"
- "last_path : %llu\n"
- "last_crash : %llu\n"
- "last_hang : %llu\n"
- "exec_timeout : %u\n"
- "afl_banner : %s\n"
- "afl_version : " VERSION "\n"
- "command_line : %s\n",
+ fprintf(f, "start_time : %llu\n"
+ "last_update : %llu\n"
+ "fuzzer_pid : %u\n"
+ "cycles_done : %llu\n"
+ "execs_done : %llu\n"
+ "execs_per_sec : %0.02f\n"
+ "paths_total : %u\n"
+ "paths_favored : %u\n"
+ "paths_found : %u\n"
+ "paths_imported : %u\n"
+ "max_depth : %u\n"
+ "cur_path : %u\n"
+ "pending_favs : %u\n"
+ "pending_total : %u\n"
+ "variable_paths : %u\n"
+ "stability : %0.02f%%\n"
+ "bitmap_cvg : %0.02f%%\n"
+ "unique_crashes : %llu\n"
+ "unique_hangs : %llu\n"
+ "last_path : %llu\n"
+ "last_crash : %llu\n"
+ "last_hang : %llu\n"
+ "execs_since_crash : %llu\n"
+ "exec_timeout : %u\n"
+ "afl_banner : %s\n"
+ "afl_version : " VERSION "\n"
+ "command_line : %s\n",
start_time / 1000, get_cur_time() / 1000, getpid(),
queue_cycle ? (queue_cycle - 1) : 0, total_execs, eps,
queued_paths, queued_favored, queued_discovered, queued_imported,
max_depth, current_entry, pending_favored, pending_not_fuzzed,
- queued_variable, bitmap_cvg, unique_crashes, unique_hangs,
- last_path_time / 1000, last_crash_time / 1000,
- last_hang_time / 1000, exec_tmout, use_banner, orig_cmdline);
+ queued_variable, stability, bitmap_cvg, unique_crashes,
+ unique_hangs, last_path_time / 1000, last_crash_time / 1000,
+ last_hang_time / 1000, total_execs - last_crash_execs,
+ exec_tmout, use_banner, orig_cmdline);
/* ignore errors */
fclose(f);
@@ -3709,7 +3826,7 @@ static void show_stats(void) {
static u64 last_stats_ms, last_plot_ms, last_ms, last_execs;
static double avg_exec;
- double t_byte_ratio;
+ double t_byte_ratio, stab_ratio;
u64 cur_ms;
u32 t_bytes, t_bits;
@@ -3762,12 +3879,17 @@ static void show_stats(void) {
t_bytes = count_non_255_bytes(virgin_bits);
t_byte_ratio = ((double)t_bytes * 100) / MAP_SIZE;
+ if (t_bytes)
+ stab_ratio = 100 - ((double)var_byte_count) * 100 / t_bytes;
+ else
+ stab_ratio = 100;
+
/* Roughly every minute, update fuzzer stats and save auto tokens. */
if (cur_ms - last_stats_ms > STATS_UPDATE_SEC * 1000) {
last_stats_ms = cur_ms;
- write_stats_file(t_byte_ratio, avg_exec);
+ write_stats_file(t_byte_ratio, stab_ratio, avg_exec);
save_auto();
write_bitmap();
@@ -3929,8 +4051,8 @@ static void show_stats(void) {
SAYF(bV bSTOP " now processing : " cRST "%-17s " bSTG bV bSTOP, tmp);
-
- sprintf(tmp, "%s (%0.02f%%)", DI(t_bytes), t_byte_ratio);
+ sprintf(tmp, "%0.02f%% / %0.02f%%", ((double)queue_cur->bitmap_size) *
+ 100 / MAP_SIZE, t_byte_ratio);
SAYF(" map density : %s%-21s " bSTG bV "\n", t_byte_ratio > 70 ? cLRD :
((t_bytes < 200 && !dumb_mode) ? cPIN : cRST), tmp);
@@ -4074,9 +4196,14 @@ static void show_stats(void) {
DI(stage_finds[STAGE_HAVOC]), DI(stage_cycles[STAGE_HAVOC]),
DI(stage_finds[STAGE_SPLICE]), DI(stage_cycles[STAGE_SPLICE]));
- SAYF(bV bSTOP " havoc : " cRST "%-37s " bSTG bV bSTOP
- " variable : %s%-10s " bSTG bV "\n", tmp, queued_variable ? cLRD : cRST,
- no_var_check ? (u8*)"n/a" : DI(queued_variable));
+ SAYF(bV bSTOP " havoc : " cRST "%-37s " bSTG bV bSTOP, tmp);
+
+ if (t_bytes) sprintf(tmp, "%0.02f%%", stab_ratio);
+ else strcpy(tmp, "n/a");
+
+ SAYF(" stability : %s%-10s " bSTG bV "\n", (stab_ratio < 85 && var_byte_count > 40)
+ ? cLRD : ((queued_variable && (!persistent_mode || var_byte_count > 20))
+ ? cMGN : cRST), tmp);
if (!bytes_trim_out) {
@@ -4132,10 +4259,10 @@ static void show_stats(void) {
#ifdef HAVE_AFFINITY
- if (use_affinity) {
+ if (cpu_aff >= 0) {
- SAYF(SP10 cGRA "[cpu@%02u:%s%3u%%" cGRA "]\r" cRST,
- MIN(cpu_aff_child, 99), cpu_color,
+ SAYF(SP10 cGRA "[cpu%03u:%s%3u%%" cGRA "]\r" cRST,
+ MIN(cpu_aff, 999), cpu_color,
MIN(cur_utilization, 999));
} else {
@@ -4144,6 +4271,7 @@ static void show_stats(void) {
cpu_color, MIN(cur_utilization, 999));
}
+
#else
SAYF(SP10 cGRA " [cpu:%s%3u%%" cGRA "]\r" cRST,
@@ -4886,6 +5014,12 @@ static u8 fuzz_one(char** argv) {
if (skip_deterministic || queue_cur->was_fuzzed || queue_cur->passed_det)
goto havoc_stage;
+ /* Skip deterministic fuzzing if exec path checksum puts this out of scope
+ for this master instance. */
+
+ if (master_max && (queue_cur->exec_cksum % master_max) != master_id - 1)
+ goto havoc_stage;
+
/*********************************************
* SIMPLE BITFLIP (+dictionary construction) *
*********************************************/
@@ -5055,7 +5189,7 @@ static u8 fuzz_one(char** argv) {
/* Effector map setup. These macros calculate:
EFF_APOS - position of a particular file offset in the map.
- EFF_ALEN - length of an map with a particular number of bytes.
+ EFF_ALEN - length of a map with a particular number of bytes.
EFF_SPAN_ALEN - map span for a sequence of bytes.
*/
@@ -6486,8 +6620,14 @@ static void sync_fuzzers(char** argv) {
path = alloc_printf("%s/%s", qd_path, qd_ent->d_name);
+ /* Allow this to fail in case the other fuzzer is resuming or so... */
+
fd = open(path, O_RDONLY);
- if (fd < 0) PFATAL("Unable to open '%s'", path);
+
+ if (fd < 0) {
+ ck_free(path);
+ continue;
+ }
if (fstat(fd, &st)) PFATAL("fstat() failed");
@@ -6721,7 +6861,7 @@ EXP_ST void check_binary(u8* fname) {
OKF(cPIN "Persistent mode binary detected.");
setenv(PERSIST_ENV_VAR, "1", 1);
- no_var_check = 1;
+ persistent_mode = 1;
} else if (getenv("AFL_PERSISTENT")) {
@@ -6838,9 +6978,6 @@ static void usage(u8* argv0) {
" -T text - text banner to show on the screen\n"
" -M / -S id - distributed mode (see parallel_fuzzing.txt)\n"
-#ifdef HAVE_AFFINITY
- " -Z core_id - set CPU affinity (see perf_tips.txt)\n"
-#endif /* HAVE_AFFINITY */
" -C - crash exploration mode (the peruvian rabbit thing)\n\n"
"For additional tips, please consult %s/README.\n\n",
@@ -7138,27 +7275,27 @@ static void get_core_count(void) {
#else
- if (!cpu_core_count) {
+#ifdef HAVE_AFFINITY
- /* On Linux, a simple way is to look at /proc/stat, especially since we'd
- be parsing it anyway for other reasons later on. But do this only if
- cpu_core_count hasn't been obtained before as a result of specifying
- -Z. */
+ cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
- FILE* f = fopen("/proc/stat", "r");
- u8 tmp[1024];
+#else
- if (!f) return;
+ FILE* f = fopen("/proc/stat", "r");
+ u8 tmp[1024];
- while (fgets(tmp, sizeof(tmp), f))
- if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++;
+ if (!f) return;
- fclose(f);
- }
+ while (fgets(tmp, sizeof(tmp), f))
+ if (!strncmp(tmp, "cpu", 3) && isdigit(tmp[3])) cpu_core_count++;
+
+ fclose(f);
+
+#endif /* ^HAVE_AFFINITY */
#endif /* ^(__APPLE__ || __FreeBSD__ || __OpenBSD__) */
- if (cpu_core_count) {
+ if (cpu_core_count > 0) {
cur_runnable = (u32)get_runnable_processes();
@@ -7187,17 +7324,12 @@ static void get_core_count(void) {
}
- } else WARNF("Unable to figure out the number of CPU cores.");
-
-#ifdef HAVE_AFFINITY
+ } else {
- if (use_affinity)
- OKF("Using specified CPU affinity: main = %u, child = %u",
- cpu_aff_main, cpu_aff_child);
- else if (cpu_core_count > 1)
- OKF(cBRI "Try setting CPU affinity (-Z) for a performance boost!" cRST);
+ cpu_core_count = 0;
+ WARNF("Unable to figure out the number of CPU cores.");
-#endif /* HAVE_AFFINITY */
+ }
}
@@ -7483,18 +7615,23 @@ int main(int argc, char** argv) {
u8 *extras_dir = 0;
u8 mem_limit_given = 0;
u8 exit_1 = !!getenv("AFL_BENCH_JUST_ONE");
-
char** use_argv;
+ struct timeval tv;
+ struct timezone tz;
+
SAYF(cCYA "afl-fuzz " cBRI VERSION cRST " by <lcamtuf@google.com>\n");
doc_path = access(DOC_PATH, F_OK) ? "docs" : DOC_PATH;
- while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:QZ:")) > 0)
+ gettimeofday(&tv, &tz);
+ srandom(tv.tv_sec ^ tv.tv_usec ^ getpid());
+
+ while ((opt = getopt(argc, argv, "+i:o:f:m:t:T:dnCB:S:M:x:Q")) > 0)
switch (opt) {
- case 'i':
+ case 'i': /* input dir */
if (in_dir) FATAL("Multiple -i options not supported");
in_dir = optarg;
@@ -7509,12 +7646,30 @@ int main(int argc, char** argv) {
out_dir = optarg;
break;
- case 'M':
+ case 'M': { /* master sync ID */
+
+ u8* c;
+
+ if (sync_id) FATAL("Multiple -S or -M options not supported");
+ sync_id = optarg;
+
+ if ((c = strchr(sync_id, ':'))) {
+
+ *c = 0;
- force_deterministic = 1;
- /* Fall through */
+ if (sscanf(c + 1, "%u/%u", &master_id, &master_max) != 2 ||
+ !master_id || !master_max || master_id > master_max ||
+ master_max > 1000000) FATAL("Bogus master ID passed to -M");
- case 'S': /* sync ID */
+ }
+
+ force_deterministic = 1;
+
+ }
+
+ break;
+
+ case 'S':
if (sync_id) FATAL("Multiple -S or -M options not supported");
sync_id = optarg;
@@ -7526,13 +7681,13 @@ int main(int argc, char** argv) {
out_file = optarg;
break;
- case 'x':
+ case 'x': /* dictionary */
if (extras_dir) FATAL("Multiple -x options not supported");
extras_dir = optarg;
break;
- case 't': {
+ case 't': { /* timeout */
u8 suffix = 0;
@@ -7549,7 +7704,7 @@ int main(int argc, char** argv) {
}
- case 'm': {
+ case 'm': { /* mem limit */
u8 suffix = 'M';
@@ -7586,43 +7741,14 @@ int main(int argc, char** argv) {
break;
-#ifdef HAVE_AFFINITY
-
- case 'Z': {
-
- s32 i;
-
- if (use_affinity) FATAL("Multiple -Z options not supported");
- use_affinity = 1;
-
- cpu_core_count = sysconf(_SC_NPROCESSORS_ONLN);
-
- i = sscanf(optarg, "%u,%u", &cpu_aff_main, &cpu_aff_child);
-
- if (i < 1 || cpu_aff_main >= cpu_core_count)
- FATAL("Bogus primary core ID passed to -Z (expected 0-%u)",
- cpu_core_count - 1);
-
- if (i == 1) cpu_aff_child = cpu_aff_main;
-
- if (cpu_aff_child >= cpu_core_count)
- FATAL("Bogus secondary core ID passed to -Z (expected 0-%u)",
- cpu_core_count - 1);
-
- break;
-
- }
-
-#endif /* HAVE_AFFINITY */
-
- case 'd':
+ case 'd': /* skip deterministic */
if (skip_deterministic) FATAL("Multiple -d options not supported");
skip_deterministic = 1;
use_splicing = 1;
break;
- case 'B':
+ case 'B': /* load bitmap */
/* This is a secret undocumented option! It is useful if you find
an interesting test case during a normal fuzzing process, and want
@@ -7641,26 +7767,26 @@ int main(int argc, char** argv) {
read_bitmap(in_bitmap);
break;
- case 'C':
+ case 'C': /* crash mode */
if (crash_mode) FATAL("Multiple -C options not supported");
crash_mode = FAULT_CRASH;
break;
- case 'n':
+ case 'n': /* dumb mode */
if (dumb_mode) FATAL("Multiple -n options not supported");
if (getenv("AFL_DUMB_FORKSRV")) dumb_mode = 2; else dumb_mode = 1;
break;
- case 'T':
+ case 'T': /* banner */
if (use_banner) FATAL("Multiple -T options not supported");
use_banner = optarg;
break;
- case 'Q':
+ case 'Q': /* QEMU mode */
if (qemu_mode) FATAL("Multiple -Q options not supported");
qemu_mode = 1;
@@ -7680,10 +7806,6 @@ int main(int argc, char** argv) {
setup_signal_handlers();
check_asan_opts();
-#ifdef HAVE_AFFINITY
- if (use_affinity) set_cpu_affinity(cpu_aff_main);
-#endif /* HAVE_AFFINITY */
-
if (sync_id) fix_up_sync();
if (!strcmp(in_dir, out_dir))
@@ -7698,14 +7820,18 @@ int main(int argc, char** argv) {
if (getenv("AFL_NO_FORKSRV")) no_forkserver = 1;
if (getenv("AFL_NO_CPU_RED")) no_cpu_meter_red = 1;
- if (getenv("AFL_NO_VAR_CHECK")) no_var_check = 1;
if (getenv("AFL_SHUFFLE_QUEUE")) shuffle_queue = 1;
if (dumb_mode == 2 && no_forkserver)
FATAL("AFL_DUMB_FORKSRV and AFL_NO_FORKSRV are mutually exclusive");
+ if (getenv("AFL_PRELOAD")) {
+ setenv("LD_PRELOAD", getenv("AFL_PRELOAD"), 1);
+ setenv("DYLD_INSERT_LIBRARIES", getenv("AFL_PRELOAD"), 1);
+ }
+
if (getenv("AFL_LD_PRELOAD"))
- setenv("LD_PRELOAD", getenv("AFL_LD_PRELOAD"), 1);
+ FATAL("Use AFL_PRELOAD instead of AFL_LD_PRELOAD");
save_cmdline(argc, argv);
@@ -7714,11 +7840,17 @@ int main(int argc, char** argv) {
check_if_tty();
get_core_count();
+
+#ifdef HAVE_AFFINITY
+ bind_to_free_cpu();
+#endif /* HAVE_AFFINITY */
+
check_crash_handling();
check_cpu_governor();
setup_post();
setup_shm();
+ init_count_class16();
setup_dirs_fds();
read_testcases();
@@ -7751,7 +7883,7 @@ int main(int argc, char** argv) {
seek_to = find_start_position();
- write_stats_file(0, 0);
+ write_stats_file(0, 0, 0);
save_auto();
if (stop_soon) goto stop_fuzzing;
@@ -7827,13 +7959,13 @@ int main(int argc, char** argv) {
if (queue_cur) show_stats();
write_bitmap();
- write_stats_file(0, 0);
+ write_stats_file(0, 0, 0);
save_auto();
stop_fuzzing:
SAYF(CURSOR_SHOW cLRD "\n\n+++ Testing aborted %s +++\n" cRST,
- stop_soon == 2 ? "programatically" : "by user");
+ stop_soon == 2 ? "programmatically" : "by user");
/* Running for more than 30 minutes but still doing first cycle? */
« no previous file with comments | « third_party/afl/src/afl-as.h ('k') | third_party/afl/src/afl-gcc.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698