Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(329)

Side by Side Diff: fusl/src/malloc/malloc.c

Issue 1714623002: [fusl] clang-format fusl (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: headers too Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #define _GNU_SOURCE 1 #define _GNU_SOURCE
2 #include <stdlib.h> 2 #include <stdlib.h>
3 #include <string.h> 3 #include <string.h>
4 #include <limits.h> 4 #include <limits.h>
5 #include <stdint.h> 5 #include <stdint.h>
6 #include <errno.h> 6 #include <errno.h>
7 #include <sys/mman.h> 7 #include <sys/mman.h>
8 #include "libc.h" 8 #include "libc.h"
9 #include "atomic.h" 9 #include "atomic.h"
10 #include "pthread_impl.h" 10 #include "pthread_impl.h"
11 11
12 #if defined(__GNUC__) && defined(__PIC__) 12 #if defined(__GNUC__) && defined(__PIC__)
13 #define inline inline __attribute__((always_inline)) 13 #define inline inline __attribute__((always_inline))
14 #endif 14 #endif
15 15
16 void *__mmap(void *, size_t, int, int, int, off_t); 16 void* __mmap(void*, size_t, int, int, int, off_t);
17 int __munmap(void *, size_t); 17 int __munmap(void*, size_t);
18 void *__mremap(void *, size_t, size_t, int, ...); 18 void* __mremap(void*, size_t, size_t, int, ...);
19 int __madvise(void *, size_t, int); 19 int __madvise(void*, size_t, int);
20 20
21 struct chunk { 21 struct chunk {
22 » size_t psize, csize; 22 size_t psize, csize;
23 » struct chunk *next, *prev; 23 struct chunk *next, *prev;
24 }; 24 };
25 25
26 struct bin { 26 struct bin {
27 » volatile int lock[2]; 27 volatile int lock[2];
28 » struct chunk *head; 28 struct chunk* head;
29 » struct chunk *tail; 29 struct chunk* tail;
30 }; 30 };
31 31
32 static struct { 32 static struct {
33 » volatile uint64_t binmap; 33 volatile uint64_t binmap;
34 » struct bin bins[64]; 34 struct bin bins[64];
35 » volatile int free_lock[2]; 35 volatile int free_lock[2];
36 } mal; 36 } mal;
37 37
38 38 #define SIZE_ALIGN (4 * sizeof(size_t))
39 #define SIZE_ALIGN (4*sizeof(size_t))
40 #define SIZE_MASK (-SIZE_ALIGN) 39 #define SIZE_MASK (-SIZE_ALIGN)
41 #define OVERHEAD (2*sizeof(size_t)) 40 #define OVERHEAD (2 * sizeof(size_t))
42 #define MMAP_THRESHOLD (0x1c00*SIZE_ALIGN) 41 #define MMAP_THRESHOLD (0x1c00 * SIZE_ALIGN)
43 #define DONTCARE 16 42 #define DONTCARE 16
44 #define RECLAIM 163840 43 #define RECLAIM 163840
45 44
46 #define CHUNK_SIZE(c) ((c)->csize & -2) 45 #define CHUNK_SIZE(c) ((c)->csize & -2)
47 #define CHUNK_PSIZE(c) ((c)->psize & -2) 46 #define CHUNK_PSIZE(c) ((c)->psize & -2)
48 #define PREV_CHUNK(c) ((struct chunk *)((char *)(c) - CHUNK_PSIZE(c))) 47 #define PREV_CHUNK(c) ((struct chunk*)((char*)(c)-CHUNK_PSIZE(c)))
49 #define NEXT_CHUNK(c) ((struct chunk *)((char *)(c) + CHUNK_SIZE(c))) 48 #define NEXT_CHUNK(c) ((struct chunk*)((char*)(c) + CHUNK_SIZE(c)))
50 #define MEM_TO_CHUNK(p) (struct chunk *)((char *)(p) - OVERHEAD) 49 #define MEM_TO_CHUNK(p) (struct chunk*)((char*)(p)-OVERHEAD)
51 #define CHUNK_TO_MEM(c) (void *)((char *)(c) + OVERHEAD) 50 #define CHUNK_TO_MEM(c) (void*)((char*)(c) + OVERHEAD)
52 #define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head)) 51 #define BIN_TO_CHUNK(i) (MEM_TO_CHUNK(&mal.bins[i].head))
53 52
54 #define C_INUSE ((size_t)1) 53 #define C_INUSE ((size_t)1)
55 54
56 #define IS_MMAPPED(c) !((c)->csize & (C_INUSE)) 55 #define IS_MMAPPED(c) !((c)->csize & (C_INUSE))
57 56
58
59 /* Synchronization tools */ 57 /* Synchronization tools */
60 58
61 static inline void lock(volatile int *lk) 59 static inline void lock(volatile int* lk) {
62 { 60 if (libc.threads_minus_1)
63 » if (libc.threads_minus_1) 61 while (a_swap(lk, 1))
64 » » while(a_swap(lk, 1)) __wait(lk, lk+1, 1, 1); 62 __wait(lk, lk + 1, 1, 1);
65 } 63 }
66 64
67 static inline void unlock(volatile int *lk) 65 static inline void unlock(volatile int* lk) {
68 { 66 if (lk[0]) {
69 » if (lk[0]) { 67 a_store(lk, 0);
70 » » a_store(lk, 0); 68 if (lk[1])
71 » » if (lk[1]) __wake(lk, 1, 1); 69 __wake(lk, 1, 1);
72 » } 70 }
73 } 71 }
74 72
75 static inline void lock_bin(int i) 73 static inline void lock_bin(int i) {
76 { 74 lock(mal.bins[i].lock);
77 » lock(mal.bins[i].lock); 75 if (!mal.bins[i].head)
78 » if (!mal.bins[i].head) 76 mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
79 » » mal.bins[i].head = mal.bins[i].tail = BIN_TO_CHUNK(i);
80 } 77 }
81 78
82 static inline void unlock_bin(int i) 79 static inline void unlock_bin(int i) {
83 { 80 unlock(mal.bins[i].lock);
84 » unlock(mal.bins[i].lock);
85 } 81 }
86 82
87 static int first_set(uint64_t x) 83 static int first_set(uint64_t x) {
88 {
89 #if 1 84 #if 1
90 » return a_ctz_64(x); 85 return a_ctz_64(x);
91 #else 86 #else
92 » static const char debruijn64[64] = { 87 static const char debruijn64[64] = {
93 » » 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28, 88 0, 1, 2, 53, 3, 7, 54, 27, 4, 38, 41, 8, 34, 55, 48, 28,
94 » » 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11, 89 62, 5, 39, 46, 44, 42, 22, 9, 24, 35, 59, 56, 49, 18, 29, 11,
95 » » 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10, 90 63, 52, 6, 26, 37, 40, 33, 47, 61, 45, 43, 21, 23, 58, 17, 10,
96 » » 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12 91 51, 25, 36, 32, 60, 20, 57, 16, 50, 31, 19, 15, 30, 14, 13, 12};
97 » }; 92 static const char debruijn32[32] = {
98 » static const char debruijn32[32] = { 93 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13,
99 » » 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, 94 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14};
100 » » 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 95 if (sizeof(long) < 8) {
101 » }; 96 uint32_t y = x;
102 » if (sizeof(long) < 8) { 97 if (!y) {
103 » » uint32_t y = x; 98 y = x >> 32;
104 » » if (!y) { 99 return 32 + debruijn32[(y & -y) * 0x076be629 >> 27];
105 » » » y = x>>32; 100 }
106 » » » return 32 + debruijn32[(y&-y)*0x076be629 >> 27]; 101 return debruijn32[(y & -y) * 0x076be629 >> 27];
107 » » } 102 }
108 » » return debruijn32[(y&-y)*0x076be629 >> 27]; 103 return debruijn64[(x & -x) * 0x022fdd63cc95386dull >> 58];
109 » }
110 » return debruijn64[(x&-x)*0x022fdd63cc95386dull >> 58];
111 #endif 104 #endif
112 } 105 }
113 106
114 static int bin_index(size_t x) 107 static int bin_index(size_t x) {
115 { 108 x = x / SIZE_ALIGN - 1;
116 » x = x / SIZE_ALIGN - 1; 109 if (x <= 32)
117 » if (x <= 32) return x; 110 return x;
118 » if (x > 0x1c00) return 63; 111 if (x > 0x1c00)
119 » return ((union { float v; uint32_t r; }){(int)x}.r>>21) - 496; 112 return 63;
113 return ((union {
114 float v;
115 uint32_t r;
116 }){(int)x}
117 .r >>
118 21) -
119 496;
120 } 120 }
121 121
122 static int bin_index_up(size_t x) 122 static int bin_index_up(size_t x) {
123 { 123 x = x / SIZE_ALIGN - 1;
124 » x = x / SIZE_ALIGN - 1; 124 if (x <= 32)
125 » if (x <= 32) return x; 125 return x;
126 » return (((union { float v; uint32_t r; }){(int)x}.r+0x1fffff)>>21) - 496 ; 126 return (((union {
127 float v;
128 uint32_t r;
129 }){(int)x}
130 .r +
131 0x1fffff) >>
132 21) -
133 496;
127 } 134 }
128 135
129 #if 0 136 #if 0
130 void __dump_heap(int x) 137 void __dump_heap(int x)
131 { 138 {
132 struct chunk *c; 139 struct chunk *c;
133 int i; 140 int i;
134 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c)) 141 for (c = (void *)mal.heap; CHUNK_SIZE(c); c = NEXT_CHUNK(c))
135 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n", 142 fprintf(stderr, "base %p size %zu (%d) flags %d/%d\n",
136 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)), 143 c, CHUNK_SIZE(c), bin_index(CHUNK_SIZE(c)),
137 c->csize & 15, 144 c->csize & 15,
138 NEXT_CHUNK(c)->psize & 15); 145 NEXT_CHUNK(c)->psize & 15);
139 for (i=0; i<64; i++) { 146 for (i=0; i<64; i++) {
140 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) { 147 if (mal.bins[i].head != BIN_TO_CHUNK(i) && mal.bins[i].head) {
141 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head); 148 fprintf(stderr, "bin %d: %p\n", i, mal.bins[i].head);
142 if (!(mal.binmap & 1ULL<<i)) 149 if (!(mal.binmap & 1ULL<<i))
143 fprintf(stderr, "missing from binmap!\n"); 150 fprintf(stderr, "missing from binmap!\n");
144 } else if (mal.binmap & 1ULL<<i) 151 } else if (mal.binmap & 1ULL<<i)
145 fprintf(stderr, "binmap wrongly contains %d!\n", i); 152 fprintf(stderr, "binmap wrongly contains %d!\n", i);
146 } 153 }
147 } 154 }
148 #endif 155 #endif
149 156
150 void *__expand_heap(size_t *); 157 void* __expand_heap(size_t*);
151 158
152 static struct chunk *expand_heap(size_t n) 159 static struct chunk* expand_heap(size_t n) {
153 { 160 static int heap_lock[2];
154 » static int heap_lock[2]; 161 static void* end;
155 » static void *end; 162 void* p;
156 » void *p; 163 struct chunk* w;
157 » struct chunk *w; 164
158 165 /* The argument n already accounts for the caller's chunk
159 » /* The argument n already accounts for the caller's chunk 166 * overhead needs, but if the heap can't be extended in-place,
160 » * overhead needs, but if the heap can't be extended in-place, 167 * we need room for an extra zero-sized sentinel chunk. */
161 » * we need room for an extra zero-sized sentinel chunk. */ 168 n += SIZE_ALIGN;
162 » n += SIZE_ALIGN; 169
163 170 lock(heap_lock);
164 » lock(heap_lock); 171
165 172 p = __expand_heap(&n);
166 » p = __expand_heap(&n); 173 if (!p) {
167 » if (!p) { 174 unlock(heap_lock);
168 » » unlock(heap_lock); 175 return 0;
169 » » return 0; 176 }
170 » } 177
171 178 /* If not just expanding existing space, we need to make a
172 » /* If not just expanding existing space, we need to make a 179 * new sentinel chunk below the allocated space. */
173 » * new sentinel chunk below the allocated space. */ 180 if (p != end) {
174 » if (p != end) { 181 /* Valid/safe because of the prologue increment. */
175 » » /* Valid/safe because of the prologue increment. */ 182 n -= SIZE_ALIGN;
176 » » n -= SIZE_ALIGN; 183 p = (char*)p + SIZE_ALIGN;
177 » » p = (char *)p + SIZE_ALIGN; 184 w = MEM_TO_CHUNK(p);
178 » » w = MEM_TO_CHUNK(p); 185 w->psize = 0 | C_INUSE;
179 » » w->psize = 0 | C_INUSE; 186 }
180 » } 187
181 188 /* Record new heap end and fill in footer. */
182 » /* Record new heap end and fill in footer. */ 189 end = (char*)p + n;
183 » end = (char *)p + n; 190 w = MEM_TO_CHUNK(end);
184 » w = MEM_TO_CHUNK(end); 191 w->psize = n | C_INUSE;
185 » w->psize = n | C_INUSE; 192 w->csize = 0 | C_INUSE;
186 » w->csize = 0 | C_INUSE; 193
187 194 /* Fill in header, which may be new or may be replacing a
188 » /* Fill in header, which may be new or may be replacing a 195 * zero-size sentinel header at the old end-of-heap. */
189 » * zero-size sentinel header at the old end-of-heap. */ 196 w = MEM_TO_CHUNK(p);
190 » w = MEM_TO_CHUNK(p); 197 w->csize = n | C_INUSE;
191 » w->csize = n | C_INUSE; 198
192 199 unlock(heap_lock);
193 » unlock(heap_lock); 200
194 201 return w;
195 » return w; 202 }
196 } 203
197 204 static int adjust_size(size_t* n) {
198 static int adjust_size(size_t *n) 205 /* Result of pointer difference must fit in ptrdiff_t. */
199 { 206 if (*n - 1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) {
200 » /* Result of pointer difference must fit in ptrdiff_t. */ 207 if (*n) {
201 » if (*n-1 > PTRDIFF_MAX - SIZE_ALIGN - PAGE_SIZE) { 208 errno = ENOMEM;
202 » » if (*n) { 209 return -1;
203 » » » errno = ENOMEM; 210 } else {
204 » » » return -1; 211 *n = SIZE_ALIGN;
205 » » } else { 212 return 0;
206 » » » *n = SIZE_ALIGN; 213 }
207 » » » return 0; 214 }
208 » » } 215 *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK;
209 » } 216 return 0;
210 » *n = (*n + OVERHEAD + SIZE_ALIGN - 1) & SIZE_MASK; 217 }
211 » return 0; 218
212 } 219 static void unbin(struct chunk* c, int i) {
213 220 if (c->prev == c->next)
214 static void unbin(struct chunk *c, int i) 221 a_and_64(&mal.binmap, ~(1ULL << i));
215 { 222 c->prev->next = c->next;
216 » if (c->prev == c->next) 223 c->next->prev = c->prev;
217 » » a_and_64(&mal.binmap, ~(1ULL<<i)); 224 c->csize |= C_INUSE;
218 » c->prev->next = c->next; 225 NEXT_CHUNK(c)->psize |= C_INUSE;
219 » c->next->prev = c->prev; 226 }
220 » c->csize |= C_INUSE; 227
221 » NEXT_CHUNK(c)->psize |= C_INUSE; 228 static int alloc_fwd(struct chunk* c) {
222 } 229 int i;
223 230 size_t k;
224 static int alloc_fwd(struct chunk *c) 231 while (!((k = c->csize) & C_INUSE)) {
225 { 232 i = bin_index(k);
226 » int i; 233 lock_bin(i);
227 » size_t k; 234 if (c->csize == k) {
228 » while (!((k=c->csize) & C_INUSE)) { 235 unbin(c, i);
229 » » i = bin_index(k); 236 unlock_bin(i);
230 » » lock_bin(i); 237 return 1;
231 » » if (c->csize == k) { 238 }
232 » » » unbin(c, i); 239 unlock_bin(i);
233 » » » unlock_bin(i); 240 }
234 » » » return 1; 241 return 0;
235 » » } 242 }
236 » » unlock_bin(i); 243
237 » } 244 static int alloc_rev(struct chunk* c) {
238 » return 0; 245 int i;
239 } 246 size_t k;
240 247 while (!((k = c->psize) & C_INUSE)) {
241 static int alloc_rev(struct chunk *c) 248 i = bin_index(k);
242 { 249 lock_bin(i);
243 » int i; 250 if (c->psize == k) {
244 » size_t k; 251 unbin(PREV_CHUNK(c), i);
245 » while (!((k=c->psize) & C_INUSE)) { 252 unlock_bin(i);
246 » » i = bin_index(k); 253 return 1;
247 » » lock_bin(i); 254 }
248 » » if (c->psize == k) { 255 unlock_bin(i);
249 » » » unbin(PREV_CHUNK(c), i); 256 }
250 » » » unlock_bin(i); 257 return 0;
251 » » » return 1; 258 }
252 » » }
253 » » unlock_bin(i);
254 » }
255 » return 0;
256 }
257
258 259
259 /* pretrim - trims a chunk _prior_ to removing it from its bin. 260 /* pretrim - trims a chunk _prior_ to removing it from its bin.
260 * Must be called with i as the ideal bin for size n, j the bin 261 * Must be called with i as the ideal bin for size n, j the bin
261 * for the _free_ chunk self, and bin j locked. */ 262 * for the _free_ chunk self, and bin j locked. */
262 static int pretrim(struct chunk *self, size_t n, int i, int j) 263 static int pretrim(struct chunk* self, size_t n, int i, int j) {
263 { 264 size_t n1;
264 size_t n1; 265 struct chunk *next, *split;
265 struct chunk *next, *split; 266
266 267 /* We cannot pretrim if it would require re-binning. */
267 /* We cannot pretrim if it would require re-binning. */ 268 if (j < 40)
268 if (j < 40) return 0; 269 return 0;
269 if (j < i+3) { 270 if (j < i + 3) {
270 if (j != 63) return 0; 271 if (j != 63)
271 n1 = CHUNK_SIZE(self); 272 return 0;
272 if (n1-n <= MMAP_THRESHOLD) return 0; 273 n1 = CHUNK_SIZE(self);
273 } else { 274 if (n1 - n <= MMAP_THRESHOLD)
274 n1 = CHUNK_SIZE(self); 275 return 0;
275 } 276 } else {
276 if (bin_index(n1-n) != j) return 0; 277 n1 = CHUNK_SIZE(self);
277 278 }
278 next = NEXT_CHUNK(self); 279 if (bin_index(n1 - n) != j)
279 split = (void *)((char *)self + n); 280 return 0;
280 281
281 split->prev = self->prev; 282 next = NEXT_CHUNK(self);
282 split->next = self->next; 283 split = (void*)((char*)self + n);
283 split->prev->next = split; 284
284 split->next->prev = split; 285 split->prev = self->prev;
285 split->psize = n | C_INUSE; 286 split->next = self->next;
286 split->csize = n1-n; 287 split->prev->next = split;
287 next->psize = n1-n; 288 split->next->prev = split;
288 self->csize = n | C_INUSE; 289 split->psize = n | C_INUSE;
289 return 1; 290 split->csize = n1 - n;
290 } 291 next->psize = n1 - n;
291 292 self->csize = n | C_INUSE;
292 static void trim(struct chunk *self, size_t n) 293 return 1;
293 { 294 }
294 size_t n1 = CHUNK_SIZE(self); 295
295 struct chunk *next, *split; 296 static void trim(struct chunk* self, size_t n) {
296 297 size_t n1 = CHUNK_SIZE(self);
297 if (n >= n1 - DONTCARE) return; 298 struct chunk *next, *split;
298 299
299 next = NEXT_CHUNK(self); 300 if (n >= n1 - DONTCARE)
300 split = (void *)((char *)self + n); 301 return;
301 302
302 split->psize = n | C_INUSE; 303 next = NEXT_CHUNK(self);
303 split->csize = n1-n | C_INUSE; 304 split = (void*)((char*)self + n);
304 next->psize = n1-n | C_INUSE; 305
305 self->csize = n | C_INUSE; 306 split->psize = n | C_INUSE;
306 307 split->csize = n1 - n | C_INUSE;
307 free(CHUNK_TO_MEM(split)); 308 next->psize = n1 - n | C_INUSE;
308 } 309 self->csize = n | C_INUSE;
309 310
310 void *malloc(size_t n) 311 free(CHUNK_TO_MEM(split));
311 { 312 }
312 struct chunk *c; 313
313 int i, j; 314 void* malloc(size_t n) {
314 315 struct chunk* c;
315 if (adjust_size(&n) < 0) return 0; 316 int i, j;
316 317
317 if (n > MMAP_THRESHOLD) { 318 if (adjust_size(&n) < 0)
318 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE; 319 return 0;
319 char *base = __mmap(0, len, PROT_READ|PROT_WRITE, 320
320 MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); 321 if (n > MMAP_THRESHOLD) {
321 if (base == (void *)-1) return 0; 322 size_t len = n + OVERHEAD + PAGE_SIZE - 1 & -PAGE_SIZE;
322 c = (void *)(base + SIZE_ALIGN - OVERHEAD); 323 char* base = __mmap(0, len, PROT_READ | PROT_WRITE,
323 c->csize = len - (SIZE_ALIGN - OVERHEAD); 324 MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
324 c->psize = SIZE_ALIGN - OVERHEAD; 325 if (base == (void*)-1)
325 return CHUNK_TO_MEM(c); 326 return 0;
326 } 327 c = (void*)(base + SIZE_ALIGN - OVERHEAD);
327 328 c->csize = len - (SIZE_ALIGN - OVERHEAD);
328 i = bin_index_up(n); 329 c->psize = SIZE_ALIGN - OVERHEAD;
329 for (;;) { 330 return CHUNK_TO_MEM(c);
330 uint64_t mask = mal.binmap & -(1ULL<<i); 331 }
331 if (!mask) { 332
332 c = expand_heap(n); 333 i = bin_index_up(n);
333 if (!c) return 0; 334 for (;;) {
334 if (alloc_rev(c)) { 335 uint64_t mask = mal.binmap & -(1ULL << i);
335 struct chunk *x = c; 336 if (!mask) {
336 c = PREV_CHUNK(c); 337 c = expand_heap(n);
337 NEXT_CHUNK(x)->psize = c->csize = 338 if (!c)
338 x->csize + CHUNK_SIZE(c); 339 return 0;
339 } 340 if (alloc_rev(c)) {
340 break; 341 struct chunk* x = c;
341 } 342 c = PREV_CHUNK(c);
342 j = first_set(mask); 343 NEXT_CHUNK(x)->psize = c->csize = x->csize + CHUNK_SIZE(c);
343 lock_bin(j); 344 }
344 c = mal.bins[j].head; 345 break;
345 if (c != BIN_TO_CHUNK(j)) { 346 }
346 if (!pretrim(c, n, i, j)) unbin(c, j); 347 j = first_set(mask);
347 unlock_bin(j); 348 lock_bin(j);
348 break; 349 c = mal.bins[j].head;
349 } 350 if (c != BIN_TO_CHUNK(j)) {
350 unlock_bin(j); 351 if (!pretrim(c, n, i, j))
351 } 352 unbin(c, j);
352 353 unlock_bin(j);
353 /* Now patch up in case we over-allocated */ 354 break;
354 trim(c, n); 355 }
355 356 unlock_bin(j);
356 return CHUNK_TO_MEM(c); 357 }
357 } 358
358 359 /* Now patch up in case we over-allocated */
359 void *__malloc0(size_t n) 360 trim(c, n);
360 { 361
361 void *p = malloc(n); 362 return CHUNK_TO_MEM(c);
362 if (p && !IS_MMAPPED(MEM_TO_CHUNK(p))) { 363 }
363 size_t *z; 364
364 n = (n + sizeof *z - 1)/sizeof *z; 365 void* __malloc0(size_t n) {
365 for (z=p; n; n--, z++) if (*z) *z=0; 366 void* p = malloc(n);
366 } 367 if (p && !IS_MMAPPED(MEM_TO_CHUNK(p))) {
367 return p; 368 size_t* z;
368 } 369 n = (n + sizeof *z - 1) / sizeof *z;
369 370 for (z = p; n; n--, z++)
370 void *realloc(void *p, size_t n) 371 if (*z)
371 { 372 *z = 0;
372 struct chunk *self, *next; 373 }
373 size_t n0, n1; 374 return p;
374 void *new; 375 }
375 376
376 if (!p) return malloc(n); 377 void* realloc(void* p, size_t n) {
377 378 struct chunk *self, *next;
378 if (adjust_size(&n) < 0) return 0; 379 size_t n0, n1;
379 380 void* new;
380 self = MEM_TO_CHUNK(p); 381
381 n1 = n0 = CHUNK_SIZE(self); 382 if (!p)
382 383 return malloc(n);
383 if (IS_MMAPPED(self)) { 384
384 size_t extra = self->psize; 385 if (adjust_size(&n) < 0)
385 char *base = (char *)self - extra; 386 return 0;
386 size_t oldlen = n0 + extra; 387
387 size_t newlen = n + extra; 388 self = MEM_TO_CHUNK(p);
388 /* Crash on realloc of freed chunk */ 389 n1 = n0 = CHUNK_SIZE(self);
389 if (extra & 1) a_crash(); 390
390 if (newlen < PAGE_SIZE && (new = malloc(n))) { 391 if (IS_MMAPPED(self)) {
391 memcpy(new, p, n-OVERHEAD); 392 size_t extra = self->psize;
392 free(p); 393 char* base = (char*)self - extra;
393 return new; 394 size_t oldlen = n0 + extra;
394 } 395 size_t newlen = n + extra;
395 newlen = (newlen + PAGE_SIZE-1) & -PAGE_SIZE; 396 /* Crash on realloc of freed chunk */
396 if (oldlen == newlen) return p; 397 if (extra & 1)
397 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE); 398 a_crash();
398 if (base == (void *)-1) 399 if (newlen < PAGE_SIZE && (new = malloc(n))) {
399 return newlen < oldlen ? p : 0; 400 memcpy(new, p, n - OVERHEAD);
400 self = (void *)(base + extra); 401 free(p);
401 self->csize = newlen - extra; 402 return new;
402 return CHUNK_TO_MEM(self); 403 }
403 } 404 newlen = (newlen + PAGE_SIZE - 1) & -PAGE_SIZE;
404 405 if (oldlen == newlen)
405 next = NEXT_CHUNK(self); 406 return p;
406 407 base = __mremap(base, oldlen, newlen, MREMAP_MAYMOVE);
407 /* Crash on corrupted footer (likely from buffer overflow) */ 408 if (base == (void*)-1)
408 if (next->psize != self->csize) a_crash(); 409 return newlen < oldlen ? p : 0;
409 410 self = (void*)(base + extra);
410 /* Merge adjacent chunks if we need more space. This is not 411 self->csize = newlen - extra;
411 * a waste of time even if we fail to get enough space, because our 412 return CHUNK_TO_MEM(self);
412 * subsequent call to free would otherwise have to do the merge. */ 413 }
413 if (n > n1 && alloc_fwd(next)) { 414
414 n1 += CHUNK_SIZE(next); 415 next = NEXT_CHUNK(self);
415 next = NEXT_CHUNK(next); 416
416 } 417 /* Crash on corrupted footer (likely from buffer overflow) */
417 /* FIXME: find what's wrong here and reenable it..? */ 418 if (next->psize != self->csize)
418 if (0 && n > n1 && alloc_rev(self)) { 419 a_crash();
419 self = PREV_CHUNK(self); 420
420 n1 += CHUNK_SIZE(self); 421 /* Merge adjacent chunks if we need more space. This is not
421 } 422 * a waste of time even if we fail to get enough space, because our
422 self->csize = n1 | C_INUSE; 423 * subsequent call to free would otherwise have to do the merge. */
423 next->psize = n1 | C_INUSE; 424 if (n > n1 && alloc_fwd(next)) {
424 425 n1 += CHUNK_SIZE(next);
425 /* If we got enough space, split off the excess and return */ 426 next = NEXT_CHUNK(next);
426 if (n <= n1) { 427 }
427 //memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD); 428 /* FIXME: find what's wrong here and reenable it..? */
428 trim(self, n); 429 if (0 && n > n1 && alloc_rev(self)) {
429 return CHUNK_TO_MEM(self); 430 self = PREV_CHUNK(self);
430 } 431 n1 += CHUNK_SIZE(self);
431 432 }
432 /* As a last resort, allocate a new chunk and copy to it. */ 433 self->csize = n1 | C_INUSE;
433 new = malloc(n-OVERHEAD); 434 next->psize = n1 | C_INUSE;
434 if (!new) return 0; 435
435 memcpy(new, p, n0-OVERHEAD); 436 /* If we got enough space, split off the excess and return */
436 free(CHUNK_TO_MEM(self)); 437 if (n <= n1) {
437 return new; 438 // memmove(CHUNK_TO_MEM(self), p, n0-OVERHEAD);
438 } 439 trim(self, n);
439 440 return CHUNK_TO_MEM(self);
440 void free(void *p) 441 }
441 { 442
442 struct chunk *self = MEM_TO_CHUNK(p); 443 /* As a last resort, allocate a new chunk and copy to it. */
443 struct chunk *next; 444 new = malloc(n - OVERHEAD);
444 size_t final_size, new_size, size; 445 if (!new)
445 int reclaim=0; 446 return 0;
446 int i; 447 memcpy(new, p, n0 - OVERHEAD);
447 448 free(CHUNK_TO_MEM(self));
448 if (!p) return; 449 return new;
449 450 }
450 if (IS_MMAPPED(self)) { 451
451 size_t extra = self->psize; 452 void free(void* p) {
452 char *base = (char *)self - extra; 453 struct chunk* self = MEM_TO_CHUNK(p);
453 size_t len = CHUNK_SIZE(self) + extra; 454 struct chunk* next;
454 /* Crash on double free */ 455 size_t final_size, new_size, size;
455 if (extra & 1) a_crash(); 456 int reclaim = 0;
456 __munmap(base, len); 457 int i;
457 return; 458
458 } 459 if (!p)
459 460 return;
460 final_size = new_size = CHUNK_SIZE(self); 461
461 next = NEXT_CHUNK(self); 462 if (IS_MMAPPED(self)) {
462 463 size_t extra = self->psize;
463 /* Crash on corrupted footer (likely from buffer overflow) */ 464 char* base = (char*)self - extra;
464 if (next->psize != self->csize) a_crash(); 465 size_t len = CHUNK_SIZE(self) + extra;
465 466 /* Crash on double free */
466 for (;;) { 467 if (extra & 1)
467 if (self->psize & next->csize & C_INUSE) { 468 a_crash();
468 self->csize = final_size | C_INUSE; 469 __munmap(base, len);
469 next->psize = final_size | C_INUSE; 470 return;
470 i = bin_index(final_size); 471 }
471 lock_bin(i); 472
472 lock(mal.free_lock); 473 final_size = new_size = CHUNK_SIZE(self);
473 if (self->psize & next->csize & C_INUSE) 474 next = NEXT_CHUNK(self);
474 break; 475
475 unlock(mal.free_lock); 476 /* Crash on corrupted footer (likely from buffer overflow) */
476 unlock_bin(i); 477 if (next->psize != self->csize)
477 } 478 a_crash();
478 479
479 if (alloc_rev(self)) { 480 for (;;) {
480 self = PREV_CHUNK(self); 481 if (self->psize & next->csize & C_INUSE) {
481 size = CHUNK_SIZE(self); 482 self->csize = final_size | C_INUSE;
482 final_size += size; 483 next->psize = final_size | C_INUSE;
483 if (new_size+size > RECLAIM && (new_size+size^size) > si ze) 484 i = bin_index(final_size);
484 reclaim = 1; 485 lock_bin(i);
485 } 486 lock(mal.free_lock);
486 487 if (self->psize & next->csize & C_INUSE)
487 if (alloc_fwd(next)) { 488 break;
488 size = CHUNK_SIZE(next); 489 unlock(mal.free_lock);
489 final_size += size; 490 unlock_bin(i);
490 if (new_size+size > RECLAIM && (new_size+size^size) > si ze) 491 }
491 reclaim = 1; 492
492 next = NEXT_CHUNK(next); 493 if (alloc_rev(self)) {
493 } 494 self = PREV_CHUNK(self);
494 } 495 size = CHUNK_SIZE(self);
495 496 final_size += size;
496 if (!(mal.binmap & 1ULL<<i)) 497 if (new_size + size > RECLAIM && (new_size + size ^ size) > size)
497 a_or_64(&mal.binmap, 1ULL<<i); 498 reclaim = 1;
498 499 }
499 self->csize = final_size; 500
500 next->psize = final_size; 501 if (alloc_fwd(next)) {
501 unlock(mal.free_lock); 502 size = CHUNK_SIZE(next);
502 503 final_size += size;
503 self->next = BIN_TO_CHUNK(i); 504 if (new_size + size > RECLAIM && (new_size + size ^ size) > size)
504 self->prev = mal.bins[i].tail; 505 reclaim = 1;
505 self->next->prev = self; 506 next = NEXT_CHUNK(next);
506 self->prev->next = self; 507 }
507 508 }
508 /* Replace middle of large chunks with fresh zero pages */ 509
509 if (reclaim) { 510 if (!(mal.binmap & 1ULL << i))
510 uintptr_t a = (uintptr_t)self + SIZE_ALIGN+PAGE_SIZE-1 & -PAGE_S IZE; 511 a_or_64(&mal.binmap, 1ULL << i);
511 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE; 512
513 self->csize = final_size;
514 next->psize = final_size;
515 unlock(mal.free_lock);
516
517 self->next = BIN_TO_CHUNK(i);
518 self->prev = mal.bins[i].tail;
519 self->next->prev = self;
520 self->prev->next = self;
521
522 /* Replace middle of large chunks with fresh zero pages */
523 if (reclaim) {
524 uintptr_t a = (uintptr_t)self + SIZE_ALIGN + PAGE_SIZE - 1 & -PAGE_SIZE;
525 uintptr_t b = (uintptr_t)next - SIZE_ALIGN & -PAGE_SIZE;
512 #if 1 526 #if 1
513 » » __madvise((void *)a, b-a, MADV_DONTNEED); 527 __madvise((void*)a, b - a, MADV_DONTNEED);
514 #else 528 #else
515 » » __mmap((void *)a, b-a, PROT_READ|PROT_WRITE, 529 __mmap((void*)a, b - a, PROT_READ | PROT_WRITE,
516 » » » MAP_PRIVATE|MAP_ANONYMOUS|MAP_FIXED, -1, 0); 530 MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
517 #endif 531 #endif
518 » } 532 }
519 533
520 » unlock_bin(i); 534 unlock_bin(i);
521 } 535 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698