Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(215)

Side by Side Diff: third_party/afl/src/libdislocator/libdislocator.so.c

Issue 2238013002: Roll src/third_party/afl/src/ 2.14b..2.30b (16 versions). (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Note in "Local Modifications" that we have removed dictionaries/. Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2
3 american fuzzy lop - dislocator, an abusive allocator
4 -----------------------------------------------------
5
6 Written and maintained by Michal Zalewski <lcamtuf@google.com>
7
8 Copyright 2016 Google Inc. All rights reserved.
9
10 Licensed under the Apache License, Version 2.0 (the "License");
11 you may not use this file except in compliance with the License.
12 You may obtain a copy of the License at:
13
14 http://www.apache.org/licenses/LICENSE-2.0
15
16 This is a companion library that can be used as a drop-in replacement
17 for the libc allocator in the fuzzed binaries. See README.dislocator for
18 more info.
19
20 */
21
22 #include <stdio.h>
23 #include <stdlib.h>
24 #include <string.h>
25 #include <limits.h>
26 #include <sys/mman.h>
27
28 #include "../config.h"
29 #include "../types.h"
30
31 #ifndef PAGE_SIZE
32 # define PAGE_SIZE 4096
33 #endif /* !PAGE_SIZE */
34
35 #ifndef MAP_ANONYMOUS
36 # define MAP_ANONYMOUS MAP_ANON
37 #endif /* !MAP_ANONYMOUS */
38
39 /* Error / message handling: */
40
41 #define DEBUGF(_x...) do { \
42 if (alloc_verbose) { \
43 if (++call_depth == 1) { \
44 fprintf(stderr, "[AFL] " _x); \
45 fprintf(stderr, "\n"); \
46 } \
47 call_depth--; \
48 } \
49 } while (0)
50
51 #define FATAL(_x...) do { \
52 if (++call_depth == 1) { \
53 fprintf(stderr, "*** [AFL] " _x); \
54 fprintf(stderr, " ***\n"); \
55 abort(); \
56 } \
57 call_depth--; \
58 } while (0)
59
60 /* Macro to count the number of pages needed to store a buffer: */
61
62 #define PG_COUNT(_l) (((_l) + (PAGE_SIZE - 1)) / PAGE_SIZE)
63
64 /* Canary & clobber bytes: */
65
66 #define ALLOC_CANARY 0xAACCAACC
67 #define ALLOC_CLOBBER 0x41
68
69 #define PTR_C(_p) (((u32*)(_p))[-1])
70 #define PTR_L(_p) (((u32*)(_p))[-2])
71
72 /* Configurable stuff (use AFL_LD_* to set): */
73
74 static u32 max_mem = MAX_ALLOC; /* Max heap usage to permit */
75 static u8 alloc_verbose, /* Additional debug messages */
76 hard_fail; /* abort() when max_mem exceeded? */
77
78 static __thread size_t total_mem; /* Currently allocated mem */
79
80 static __thread u32 call_depth; /* To avoid recursion via fprintf() */
81
82
83 /* This is the main alloc function. It allocates one page more than necessary,
84 sets that tailing page to PROT_NONE, and then increments the return address
85 so that it is right-aligned to that boundary. Since it always uses mmap(),
86 the returned memory will be zeroed. */
87
88 static void* __dislocator_alloc(size_t len) {
89
90 void* ret;
91
92 if (total_mem + len > max_mem) {
93
94 if (hard_fail)
95 FATAL("total allocs exceed %u MB", max_mem / 1024 / 1024);
96
97 DEBUGF("total allocs exceed %u MB, returning NULL",
98 max_mem / 1024 / 1024);
99
100 return NULL;
101
102 }
103
104 /* We will also store buffer length and a canary below the actual buffer, so
105 let's add 8 bytes for that. */
106
107 ret = mmap(NULL, (1 + PG_COUNT(len + 8)) * PAGE_SIZE, PROT_READ | PROT_WRITE,
108 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
109
110 if (ret == (void*)-1) {
111
112 if (hard_fail) FATAL("mmap() failed on alloc (OOM?)");
113
114 DEBUGF("mmap() failed on alloc (OOM?)");
115
116 return NULL;
117
118 }
119
120 /* Set PROT_NONE on the last page. */
121
122 if (mprotect(ret + PG_COUNT(len + 8) * PAGE_SIZE, PAGE_SIZE, PROT_NONE))
123 FATAL("mprotect() failed when allocating memory");
124
125 /* Offset the return pointer so that it's right-aligned to the page
126 boundary. */
127
128 ret += PAGE_SIZE * PG_COUNT(len + 8) - len - 8;
129
130 /* Store allocation metadata. */
131
132 ret += 8;
133
134 PTR_L(ret) = len;
135 PTR_C(ret) = ALLOC_CANARY;
136
137 total_mem += len;
138
139 return ret;
140
141 }
142
143
144 /* The "user-facing" wrapper for calloc(). This just checks for overflows and
145 displays debug messages if requested. */
146
147 void* calloc(size_t elem_len, size_t elem_cnt) {
148
149 void* ret;
150
151 size_t len = elem_len * elem_cnt;
152
153 /* Perform some sanity checks to detect obvious issues... */
154
155 if (elem_cnt && len / elem_cnt != elem_len)
156 FATAL("calloc(%zu, %zu) would overflow", elem_len, elem_cnt);
157
158 ret = __dislocator_alloc(len);
159
160 DEBUGF("calloc(%zu, %zu) = %p [%zu total]", elem_len, elem_cnt, ret,
161 total_mem);
162
163 return ret;
164
165 }
166
167
168 /* The wrapper for malloc(). Roughly the same, also clobbers the returned
169 memory (unlike calloc(), malloc() is not guaranteed to return zeroed
170 memory). */
171
172 void* malloc(size_t len) {
173
174 void* ret;
175
176 ret = __dislocator_alloc(len);
177
178 DEBUGF("malloc(%zu) = %p [%zu total]", len, ret, total_mem);
179
180 if (ret && len) memset(ret, ALLOC_CLOBBER, len);
181
182 return ret;
183
184 }
185
186
187 /* The wrapper for free(). This simply marks the entire region as PROT_NONE.
188 If the region is already freed, the code will segfault during the attempt to
189 read the canary. Not very graceful, but works, right? */
190
191 void free(void* ptr) {
192
193 u32 len;
194
195 DEBUGF("free(%p)", ptr);
196
197 if (!ptr) return;
198
199 if (PTR_C(ptr) != ALLOC_CANARY) FATAL("bad allocator canary on free()");
200
201 len = PTR_L(ptr);
202
203 total_mem -= len;
204
205 /* Protect everything. Note that the extra page at the end is already
206 set as PROT_NONE, so we don't need to touch that. */
207
208 ptr -= PAGE_SIZE * PG_COUNT(len + 8) - len - 8;
209
210 if (mprotect(ptr - 8, PG_COUNT(len + 8) * PAGE_SIZE, PROT_NONE))
211 FATAL("mprotect() failed when freeing memory");
212
213 /* Keep the mapping; this is wasteful, but prevents ptr reuse. */
214
215 }
216
217
218 /* Realloc is pretty straightforward, too. We forcibly reallocate the buffer,
219 move data, and then free (aka mprotect()) the original one. */
220
221 void* realloc(void* ptr, size_t len) {
222
223 void* ret;
224
225 ret = malloc(len);
226
227 if (ret && ptr) {
228
229 if (PTR_C(ptr) != ALLOC_CANARY) FATAL("bad allocator canary on realloc()");
230
231 memcpy(ret, ptr, MIN(len, PTR_L(ptr)));
232 free(ptr);
233
234 }
235
236 DEBUGF("realloc(%p, %zu) = %p [%zu total]", ptr, len, ret, total_mem);
237
238 return ret;
239
240 }
241
242
243 __attribute__((constructor)) void __dislocator_init(void) {
244
245 u8* tmp = getenv("AFL_LD_LIMIT_MB");
246
247 if (tmp) {
248
249 max_mem = atoi(tmp) * 1024 * 1024;
250 if (!max_mem) FATAL("Bad value for AFL_LD_LIMIT_MB");
251
252 }
253
254 alloc_verbose = !!getenv("AFL_LD_VERBOSE");
255 hard_fail = !!getenv("AFL_LD_HARD_FAIL");
256
257 }
OLDNEW
« no previous file with comments | « third_party/afl/src/libdislocator/README.dislocator ('k') | third_party/afl/src/libtokencap/Makefile » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698