OLD | NEW |
| (Empty) |
1 // Copyright (c) 2016, the Dartino project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE.md file. | |
4 | |
5 // This code is ported from the LK repository. To keep the code in | |
6 // sync the define FLETCH_TARGET_OS_LK provides the code from the LK | |
7 // repository. Without the define FLETCH_TARGET_OS_LK this code will | |
8 // build and link for the disco_fletch project. | |
9 #ifdef FLETCH_TARGET_OS_LK | |
10 | |
11 #include <debug.h> | |
12 #include <trace.h> | |
13 #include <assert.h> | |
14 #include <stdio.h> | |
15 #include <stdlib.h> | |
16 #include <string.h> | |
17 #include <kernel/thread.h> | |
18 #include <kernel/mutex.h> | |
19 #include <kernel/spinlock.h> | |
20 #include <lib/cmpctmalloc.h> | |
21 #include <lib/heap.h> | |
22 #include <lib/page_alloc.h> | |
23 | |
24 #else // FLETCH_TARGET_OS_LK | |
25 | |
26 #include "platforms/stm/disco_fletch/src/cmpctmalloc.h" | |
27 | |
28 #include <inttypes.h> | |
29 #include <stdbool.h> | |
30 #include <stddef.h> | |
31 #include <stdio.h> | |
32 #include <stdlib.h> | |
33 #include <string.h> | |
34 #include <unistd.h> | |
35 | |
36 #include "platforms/stm/disco_fletch/src/globals.h" | |
37 | |
38 void* page_alloc(size_t pages); | |
39 void page_free(void* start, size_t pages); | |
40 | |
41 typedef uintptr_t addr_t; | |
42 typedef uintptr_t vaddr_t; | |
43 | |
44 #define LTRACEF(...) | |
45 #define LTRACE_ENTRY | |
46 #define DEBUG_ASSERT ASSERT | |
47 #define ASSERT(condition) \ | |
48 while (false && (condition)) { \ | |
49 } | |
50 #define STATIC_ASSERT(condition) | |
51 #define dprintf(...) fprintf(__VA_ARGS__) | |
52 #define INFO stdout | |
53 | |
54 #endif // FLETCH_TARGET_OS_LK | |
55 | |
56 // Malloc implementation tuned for space. | |
57 // | |
58 // Allocation strategy takes place with a global mutex. Freelist entries are | |
59 // kept in linked lists with 8 different sizes per binary order of magnitude | |
60 // and the header size is two words with eager coalescing on free. | |
61 | |
62 #ifdef DEBUG | |
63 #define CMPCT_DEBUG | |
64 #endif | |
65 | |
66 #ifdef FLETCH_TARGET_OS_LK | |
67 #define LOCAL_TRACE 0 | |
68 #endif | |
69 | |
70 #define ALLOC_FILL 0x99 | |
71 #define FREE_FILL 0x77 | |
72 #define PADDING_FILL 0x55 | |
73 | |
74 #ifdef FLETCH_TARGET_OS_LK | |
75 #if WITH_KERNEL_VM && !defined(HEAP_GROW_SIZE) | |
76 #define HEAP_GROW_SIZE (1 * 1024 * 1024) /* Grow aggressively */ | |
77 #elif !defined(HEAP_GROW_SIZE) | |
78 #define HEAP_GROW_SIZE (4 * 1024) /* Grow less aggressively */ | |
79 #endif | |
80 #else | |
81 #define HEAP_GROW_SIZE (4 * 1024) /* Grow less aggressively */ | |
82 #endif | |
83 | |
84 STATIC_ASSERT(IS_PAGE_ALIGNED(HEAP_GROW_SIZE)); | |
85 | |
86 // Individual allocations above 4Mbytes are just fetched directly from the | |
87 // block allocator. | |
88 #define HEAP_ALLOC_VIRTUAL_BITS 22 | |
89 | |
90 // When we grow the heap we have to have somewhere in the freelist to put the | |
91 // resulting freelist entry, so the freelist has to have a certain number of | |
92 // buckets. | |
93 STATIC_ASSERT(HEAP_GROW_SIZE <= (1u << HEAP_ALLOC_VIRTUAL_BITS)); | |
94 | |
95 // Buckets for allocations. The smallest 15 buckets are 8, 16, 24, etc. up to | |
96 // 120 bytes. After that we round up to the nearest size that can be written | |
97 // /^0*1...0*$/, giving 8 buckets per order of binary magnitude. The freelist | |
98 // entries in a given bucket have at least the given size, plus the header | |
99 // size. On 64 bit, the 8 byte bucket is useless, since the freelist header | |
100 // is 16 bytes larger than the header, but we have it for simplicity. | |
101 #define NUMBER_OF_BUCKETS (1 + 15 + (HEAP_ALLOC_VIRTUAL_BITS - 7) * 8) | |
102 | |
103 // All individual memory areas on the heap start with this. | |
104 typedef struct header_struct { | |
105 struct header_struct *left; // Pointer to the previous area in memory order
. | |
106 size_t size; | |
107 } header_t; | |
108 | |
109 typedef struct free_struct { | |
110 header_t header; | |
111 struct free_struct *next; | |
112 struct free_struct *prev; | |
113 } free_t; | |
114 | |
115 struct heap { | |
116 size_t size; | |
117 size_t remaining; | |
118 #ifdef FLETCH_TARGET_OS_LK | |
119 mutex_t lock; | |
120 #endif | |
121 free_t *free_lists[NUMBER_OF_BUCKETS]; | |
122 // We have some 32 bit words that tell us whether there is an entry in the | |
123 // freelist. | |
124 #define BUCKET_WORDS (((NUMBER_OF_BUCKETS) + 31) >> 5) | |
125 uint32_t free_list_bits[BUCKET_WORDS]; | |
126 }; | |
127 | |
128 // Heap static vars. | |
129 static struct heap theheap; | |
130 | |
131 static ssize_t heap_grow(size_t len, free_t **bucket); | |
132 | |
133 static void lock(void) | |
134 { | |
135 #ifdef FLETCH_TARGET_OS_LK | |
136 mutex_acquire(&theheap.lock); | |
137 #endif | |
138 } | |
139 | |
140 static void unlock(void) | |
141 { | |
142 #ifdef FLETCH_TARGET_OS_LK | |
143 mutex_release(&theheap.lock); | |
144 #endif | |
145 } | |
146 | |
147 static void dump_free(header_t *header) | |
148 { | |
149 dprintf(INFO, "\t\tbase %p, end 0x%lx, len 0x%zx\n", header, (vaddr_t)header
+ header->size, header->size); | |
150 } | |
151 | |
152 void cmpct_dump(void) | |
153 { | |
154 lock(); | |
155 dprintf(INFO, "Heap dump (using cmpctmalloc):\n"); | |
156 dprintf(INFO, "\tsize %lu, remaining %lu\n", | |
157 (unsigned long)theheap.size, | |
158 (unsigned long)theheap.remaining); | |
159 | |
160 dprintf(INFO, "\tfree list:\n"); | |
161 for (int i = 0; i < NUMBER_OF_BUCKETS; i++) { | |
162 bool header_printed = false; | |
163 free_t *free_area = theheap.free_lists[i]; | |
164 for (; free_area != NULL; free_area = free_area->next) { | |
165 ASSERT(free_area != free_area->next); | |
166 if (!header_printed) { | |
167 dprintf(INFO, "\tbucket %d\n", i); | |
168 header_printed = true; | |
169 } | |
170 dump_free(&free_area->header); | |
171 } | |
172 } | |
173 unlock(); | |
174 } | |
175 | |
176 // Operates in sizes that don't include the allocation header. | |
177 static int size_to_index_helper( | |
178 size_t size, size_t *rounded_up_out, int adjust, int increment) | |
179 { | |
180 // First buckets are simply 8-spaced up to 128. | |
181 if (size <= 128) { | |
182 if (sizeof(size_t) == 8u && size <= sizeof(free_t) - sizeof(header_t)) { | |
183 *rounded_up_out = sizeof(free_t) - sizeof(header_t); | |
184 } else { | |
185 *rounded_up_out = size; | |
186 } | |
187 // No allocation is smaller than 8 bytes, so the first bucket is for 8 | |
188 // byte spaces (not including the header). For 64 bit, the free list | |
189 // struct is 16 bytes larger than the header, so no allocation can be | |
190 // smaller than that (otherwise how to free it), but we have empty 8 | |
191 // and 16 byte buckets for simplicity. | |
192 return (size >> 3) - 1; | |
193 } | |
194 | |
195 // We are going to go up to the next size to round up, but if we hit a | |
196 // bucket size exactly we don't want to go up. By subtracting 8 here, we | |
197 // will do the right thing (the carry propagates up for the round numbers | |
198 // we are interested in). | |
199 size += adjust; | |
200 // After 128 the buckets are logarithmically spaced, every 16 up to 256, | |
201 // every 32 up to 512 etc. This can be thought of as rows of 8 buckets. | |
202 // GCC intrinsic count-leading-zeros. | |
203 // Eg. 128-255 has 24 leading zeros and we want row to be 4. | |
204 unsigned row = sizeof(size_t) * 8 - 4 - __builtin_clzl(size); | |
205 // For row 4 we want to shift down 4 bits. | |
206 unsigned column = (size >> row) & 7; | |
207 int row_column = (row << 3) | column; | |
208 row_column += increment; | |
209 size = (8 + (row_column & 7)) << (row_column >> 3); | |
210 *rounded_up_out = size; | |
211 // We start with 15 buckets, 8, 16, 24, 32, 40, 48, 56, 64, 72, 80, 88, 96, | |
212 // 104, 112, 120. Then we have row 4, sizes 128 and up, with the | |
213 // row-column 8 and up. | |
214 int answer = row_column + 15 - 32; | |
215 DEBUG_ASSERT(answer < NUMBER_OF_BUCKETS); | |
216 return answer; | |
217 } | |
218 | |
219 // Round up size to next bucket when allocating. | |
220 static int size_to_index_allocating(size_t size, size_t *rounded_up_out) | |
221 { | |
222 size_t rounded = ROUNDUP(size, 8); | |
223 return size_to_index_helper(rounded, rounded_up_out, -8, 1); | |
224 } | |
225 | |
226 // Round down size to next bucket when freeing. | |
227 static int size_to_index_freeing(size_t size) | |
228 { | |
229 size_t dummy; | |
230 return size_to_index_helper(size, &dummy, 0, 0); | |
231 } | |
232 | |
233 inline header_t *tag_as_free(void *left) | |
234 { | |
235 return (header_t *)((uintptr_t)left | 1); | |
236 } | |
237 | |
238 inline bool is_tagged_as_free(header_t *header) | |
239 { | |
240 return ((uintptr_t)(header->left) & 1) != 0; | |
241 } | |
242 | |
243 inline header_t *untag(void *left) | |
244 { | |
245 return (header_t *)((uintptr_t)left & ~1); | |
246 } | |
247 | |
248 inline header_t *right_header(header_t *header) | |
249 { | |
250 return (header_t *)((char *)header + header->size); | |
251 } | |
252 | |
253 inline static void set_free_list_bit(int index) | |
254 { | |
255 theheap.free_list_bits[index >> 5] |= (1u << (31 - (index & 0x1f))); | |
256 } | |
257 | |
258 inline static void clear_free_list_bit(int index) | |
259 { | |
260 theheap.free_list_bits[index >> 5] &= ~(1u << (31 - (index & 0x1f))); | |
261 } | |
262 | |
263 static int find_nonempty_bucket(int index) | |
264 { | |
265 uint32_t mask = (1u << (31 - (index & 0x1f))) - 1; | |
266 mask = mask * 2 + 1; | |
267 mask &= theheap.free_list_bits[index >> 5]; | |
268 if (mask != 0) return (index & ~0x1f) + __builtin_clz(mask); | |
269 for (index = ROUNDUP(index + 1, 32); index <= NUMBER_OF_BUCKETS; index += 32
) { | |
270 mask = theheap.free_list_bits[index >> 5]; | |
271 if (mask != 0u) return index + __builtin_clz(mask); | |
272 } | |
273 return -1; | |
274 } | |
275 | |
276 static bool is_start_of_os_allocation(header_t *header) | |
277 { | |
278 return header->left == untag(NULL); | |
279 } | |
280 | |
281 static void create_free_area(void *address, void *left, size_t size, free_t **bu
cket) | |
282 { | |
283 free_t *free_area = (free_t *)address; | |
284 free_area->header.size = size; | |
285 free_area->header.left = tag_as_free(left); | |
286 if (bucket == NULL) { | |
287 int index = size_to_index_freeing(size - sizeof(header_t)); | |
288 set_free_list_bit(index); | |
289 bucket = &theheap.free_lists[index]; | |
290 } | |
291 free_t *old_head = *bucket; | |
292 if (old_head != NULL) old_head->prev = free_area; | |
293 free_area->next = old_head; | |
294 free_area->prev = NULL; | |
295 *bucket = free_area; | |
296 theheap.remaining += size; | |
297 #ifdef CMPCT_DEBUG | |
298 memset(free_area + 1, FREE_FILL, size - sizeof(free_t)); | |
299 #endif | |
300 } | |
301 | |
302 static bool is_end_of_os_allocation(char *address) | |
303 { | |
304 return ((header_t *)address)->size == 0; | |
305 } | |
306 | |
307 static void free_to_os(header_t *header, size_t size) | |
308 { | |
309 DEBUG_ASSERT(IS_PAGE_ALIGNED(size)); | |
310 page_free(header, size >> PAGE_SIZE_SHIFT); | |
311 theheap.size -= size; | |
312 } | |
313 | |
314 static void free_memory(void *address, void *left, size_t size) | |
315 { | |
316 left = untag(left); | |
317 if (IS_PAGE_ALIGNED(left) && | |
318 is_start_of_os_allocation(left) && | |
319 is_end_of_os_allocation((char *)address + size)) { | |
320 free_to_os(left, size + ((header_t *)left)->size + sizeof(header_t)); | |
321 } else { | |
322 create_free_area(address, left, size, NULL); | |
323 } | |
324 } | |
325 | |
326 static void unlink_free(free_t *free_area, int bucket) | |
327 { | |
328 theheap.remaining -= free_area->header.size; | |
329 ASSERT(theheap.remaining < 4000000000u); | |
330 free_t *next = free_area->next; | |
331 free_t *prev = free_area->prev; | |
332 if (theheap.free_lists[bucket] == free_area) { | |
333 theheap.free_lists[bucket] = next; | |
334 if (next == NULL) clear_free_list_bit(bucket); | |
335 } | |
336 if (prev != NULL) prev->next = next; | |
337 if (next != NULL) next->prev = prev; | |
338 } | |
339 | |
340 static void unlink_free_unknown_bucket(free_t *free_area) | |
341 { | |
342 return unlink_free(free_area, size_to_index_freeing(free_area->header.size -
sizeof(header_t))); | |
343 } | |
344 | |
345 static void *create_allocation_header( | |
346 void *address, size_t offset, size_t size, void *left) | |
347 { | |
348 header_t *standalone = (header_t *)((char *)address + offset); | |
349 standalone->left = untag(left); | |
350 standalone->size = size; | |
351 return standalone + 1; | |
352 } | |
353 | |
354 static void FixLeftPointer(header_t *right, header_t *new_left) | |
355 { | |
356 int tag = (uintptr_t)right->left & 1; | |
357 right->left = (header_t *)(((uintptr_t)new_left & ~1) | tag); | |
358 } | |
359 | |
360 static void WasteFreeMemory(void) | |
361 { | |
362 while (theheap.remaining != 0) cmpct_alloc(1); | |
363 } | |
364 | |
365 // If we just make a big allocation it gets rounded off. If we actually | |
366 // want to use a reasonably accurate amount of memory for test purposes, we | |
367 // have to do many small allocations. | |
368 static void *TestTrimHelper(ssize_t target) | |
369 { | |
370 char *answer = NULL; | |
371 size_t remaining = theheap.remaining; | |
372 while (theheap.remaining - target > 512) { | |
373 char *next_block = cmpct_alloc(8 + ((theheap.remaining - target) >> 2)); | |
374 *(char**)next_block = answer; | |
375 answer = next_block; | |
376 if (theheap.remaining > remaining) return answer; | |
377 // Abandon attemt to hit particular freelist entry size if we accidentall
y got more memory | |
378 // from the OS. | |
379 remaining = theheap.remaining; | |
380 } | |
381 return answer; | |
382 } | |
383 | |
384 static void TestTrimFreeHelper(char *block) | |
385 { | |
386 while (block) { | |
387 char *next_block = *(char **)block; | |
388 cmpct_free(block); | |
389 block = next_block; | |
390 } | |
391 } | |
392 | |
393 #ifdef FLETCH_TARGET_OS_LK | |
394 static void cmpct_test_trim(void) | |
395 #else | |
396 void cmpct_test_trim(void) | |
397 #endif | |
398 { | |
399 WasteFreeMemory(); | |
400 | |
401 size_t test_sizes[200]; | |
402 int sizes = 0; | |
403 | |
404 for (size_t s = 1; s < PAGE_SIZE * 4; s = (s + 1) * 1.1) { | |
405 test_sizes[sizes++] = s; | |
406 ASSERT(sizes < 200); | |
407 } | |
408 for (ssize_t s = -32; s <= 32; s += 8) { | |
409 test_sizes[sizes++] = PAGE_SIZE + s; | |
410 ASSERT(sizes < 200); | |
411 } | |
412 | |
413 // Test allocations at the start of an OS allocation. | |
414 for (int with_second_alloc = 0; with_second_alloc < 2; with_second_alloc++)
{ | |
415 for (int i = 0; i < sizes; i++) { | |
416 size_t s = test_sizes[i]; | |
417 | |
418 char *a, *a2 = NULL; | |
419 a = cmpct_alloc(s); | |
420 if (with_second_alloc) { | |
421 a2 = cmpct_alloc(1); | |
422 if (s < PAGE_SIZE >> 1) { | |
423 // It is the intention of the test that a is at the start of
an OS allocation | |
424 // and that a2 is "right after" it. Otherwise we are not te
sting what I | |
425 // thought. OS allocations are certainly not smaller than a
page, so check in | |
426 // that case. | |
427 ASSERT((uintptr_t)(a2 - a) < s * 1.13 + 48); | |
428 } | |
429 } | |
430 cmpct_trim(); | |
431 size_t remaining = theheap.remaining; | |
432 // We should have < 1 page on either side of the a allocation. | |
433 ASSERT(remaining < PAGE_SIZE * 2); | |
434 cmpct_free(a); | |
435 if (with_second_alloc) { | |
436 // Now only a2 is holding onto the OS allocation. | |
437 ASSERT(theheap.remaining > remaining); | |
438 } else { | |
439 ASSERT(theheap.remaining == 0); | |
440 } | |
441 remaining = theheap.remaining; | |
442 cmpct_trim(); | |
443 ASSERT(theheap.remaining <= remaining); | |
444 // If a was at least one page then the trim should have freed up tha
t page. | |
445 if (s >= PAGE_SIZE && with_second_alloc) ASSERT(theheap.remaining <
remaining); | |
446 if (with_second_alloc) cmpct_free(a2); | |
447 } | |
448 ASSERT(theheap.remaining == 0); | |
449 } | |
450 | |
451 ASSERT(theheap.remaining == 0); | |
452 | |
453 // Now test allocations near the end of an OS allocation. | |
454 for (ssize_t wobble = -64; wobble <= 64; wobble += 8) { | |
455 for (int i = 0; i < sizes; i++) { | |
456 size_t s = test_sizes[i]; | |
457 | |
458 if ((ssize_t)s + wobble < 0) continue; | |
459 | |
460 char *start_of_os_alloc = cmpct_alloc(1); | |
461 | |
462 // If the OS allocations are very small this test does not make sens
e. | |
463 if (theheap.remaining <= s + wobble) { | |
464 cmpct_free(start_of_os_alloc); | |
465 continue; | |
466 } | |
467 | |
468 char *big_bit_in_the_middle = TestTrimHelper(s + wobble); | |
469 size_t remaining = theheap.remaining; | |
470 | |
471 // If the remaining is big we started a new OS allocation and the te
st | |
472 // makes no sense. | |
473 if (remaining > 128 + s * 1.13 + wobble) { | |
474 cmpct_free(start_of_os_alloc); | |
475 TestTrimFreeHelper(big_bit_in_the_middle); | |
476 continue; | |
477 } | |
478 | |
479 cmpct_free(start_of_os_alloc); | |
480 remaining = theheap.remaining; | |
481 | |
482 // This trim should sometimes trim a page off the end of the OS allo
cation. | |
483 cmpct_trim(); | |
484 ASSERT(theheap.remaining <= remaining); | |
485 remaining = theheap.remaining; | |
486 | |
487 // We should have < 1 page on either side of the big allocation. | |
488 ASSERT(remaining < PAGE_SIZE * 2); | |
489 | |
490 TestTrimFreeHelper(big_bit_in_the_middle); | |
491 } | |
492 } | |
493 } | |
494 | |
495 | |
496 #ifdef FLETCH_TARGET_OS_LK | |
497 static void cmpct_test_buckets(void) | |
498 #else | |
499 void cmpct_test_buckets(void) | |
500 #endif | |
501 { | |
502 size_t rounded; | |
503 unsigned bucket; | |
504 // Check for the 8-spaced buckets up to 128. | |
505 for (unsigned i = 1; i <= 128; i++) { | |
506 // Round up when allocating. | |
507 bucket = size_to_index_allocating(i, &rounded); | |
508 unsigned expected = (ROUNDUP(i, 8) >> 3) - 1; | |
509 ASSERT(bucket == expected); | |
510 ASSERT(IS_ALIGNED(rounded, 8)); | |
511 ASSERT(rounded >= i); | |
512 if (i >= sizeof(free_t) - sizeof(header_t)) { | |
513 // Once we get above the size of the free area struct (4 words), we | |
514 // won't round up much for these small size. | |
515 ASSERT(rounded - i < 8); | |
516 } | |
517 // Only rounded sizes are freed. | |
518 if ((i & 7) == 0) { | |
519 // Up to size 128 we have exact buckets for each multiple of 8. | |
520 ASSERT(bucket == (unsigned)size_to_index_freeing(i)); | |
521 } | |
522 } | |
523 int bucket_base = 7; | |
524 for (unsigned j = 16; j < 1024; j *= 2, bucket_base += 8) { | |
525 // Note the "<=", which ensures that we test the powers of 2 twice to en
sure | |
526 // that both ways of calculating the bucket number match. | |
527 for (unsigned i = j * 8; i <= j * 16; i++) { | |
528 // Round up to j multiple in this range when allocating. | |
529 bucket = size_to_index_allocating(i, &rounded); | |
530 unsigned expected = bucket_base + ROUNDUP(i, j) / j; | |
531 ASSERT(bucket == expected); | |
532 ASSERT(IS_ALIGNED(rounded, j)); | |
533 ASSERT(rounded >= i); | |
534 ASSERT(rounded - i < j); | |
535 // Only 8-rounded sizes are freed or chopped off the end of a free a
rea | |
536 // when allocating. | |
537 if ((i & 7) == 0) { | |
538 // When freeing, if we don't hit the size of the bucket precisel
y, | |
539 // we have to put the free space into a smaller bucket, because | |
540 // the buckets have entries that will always be big enough for | |
541 // the corresponding allocation size (so we don't have to | |
542 // traverse the free chains to find a big enough one). | |
543 if ((i % j) == 0) { | |
544 ASSERT((int)bucket == size_to_index_freeing(i)); | |
545 } else { | |
546 ASSERT((int)bucket - 1 == size_to_index_freeing(i)); | |
547 } | |
548 } | |
549 } | |
550 } | |
551 } | |
552 | |
553 static void cmpct_test_get_back_newly_freed_helper(size_t size) | |
554 { | |
555 void *allocated = cmpct_alloc(size); | |
556 if (allocated == NULL) return; | |
557 char *allocated2 = cmpct_alloc(8); | |
558 char *expected_position = (char *)allocated + size; | |
559 if (allocated2 < expected_position || allocated2 > expected_position + 128)
{ | |
560 // If the allocated2 allocation is not in the same OS allocation as the | |
561 // first allocation then the test may not work as expected (the memory | |
562 // may be returned to the OS when we free the first allocation, and we | |
563 // might not get it back). | |
564 cmpct_free(allocated); | |
565 cmpct_free(allocated2); | |
566 return; | |
567 } | |
568 | |
569 cmpct_free(allocated); | |
570 void *allocated3 = cmpct_alloc(size); | |
571 // To avoid churn and fragmentation we would want to get the newly freed | |
572 // memory back again when we allocate the same size shortly after. | |
573 ASSERT(allocated3 == allocated); | |
574 cmpct_free(allocated2); | |
575 cmpct_free(allocated3); | |
576 } | |
577 | |
578 #ifdef FLETCH_TARGET_OS_LK | |
579 static void cmpct_test_get_back_newly_freed(void) | |
580 #else | |
581 void cmpct_test_get_back_newly_freed(void) | |
582 #endif | |
583 { | |
584 size_t increment = 16; | |
585 for (size_t i = 128; i <= 0x8000000; i *= 2, increment *= 2) { | |
586 for (size_t j = i; j < i * 2; j += increment) { | |
587 cmpct_test_get_back_newly_freed_helper(i - 8); | |
588 cmpct_test_get_back_newly_freed_helper(i); | |
589 cmpct_test_get_back_newly_freed_helper(i + 1); | |
590 } | |
591 } | |
592 for (size_t i = 1024; i <= 2048; i++) { | |
593 cmpct_test_get_back_newly_freed_helper(i); | |
594 } | |
595 } | |
596 | |
597 #ifdef FLETCH_TARGET_OS_LK | |
598 static void cmpct_test_return_to_os(void) | |
599 #else | |
600 void cmpct_test_return_to_os(void) | |
601 #endif | |
602 { | |
603 cmpct_trim(); | |
604 size_t remaining = theheap.remaining; | |
605 // This goes in a new OS allocation since the trim above removed any free | |
606 // area big enough to contain it. | |
607 void *a = cmpct_alloc(5000); | |
608 void *b = cmpct_alloc(2500); | |
609 cmpct_free(a); | |
610 cmpct_free(b); | |
611 // If things work as expected the new allocation is at the start of an OS | |
612 // allocation. There's just one sentinel and one header to the left of it. | |
613 // It that's not the case then the allocation was met from some space in | |
614 // the middle of an OS allocation, and our test won't work as expected, so | |
615 // bail out. | |
616 if (((uintptr_t)a & (PAGE_SIZE - 1)) != sizeof(header_t) * 2) return; | |
617 // No trim needed when the entire OS allocation is free. | |
618 ASSERT(remaining == theheap.remaining); | |
619 } | |
620 | |
621 static void *large_alloc(size_t size) | |
622 { | |
623 #ifdef CMPCT_DEBUG | |
624 size_t requested_size = size; | |
625 #endif | |
626 size = ROUNDUP(size, 8); | |
627 free_t *free_area = NULL; | |
628 lock(); | |
629 heap_grow(size, &free_area); | |
630 void *result = | |
631 create_allocation_header(free_area, 0, free_area->header.size, free_area
->header.left); | |
632 // Normally the 'remaining free space' counter would be decremented when we | |
633 // unlink the free area from its bucket. However in this case the free | |
634 // area was too big to go in any bucket and we had it in our own | |
635 // "free_area" variable so there is no unlinking and we have to adjust the | |
636 // counter here. | |
637 theheap.remaining -= free_area->header.size; | |
638 unlock(); | |
639 #ifdef CMPCT_DEBUG | |
640 memset(result, ALLOC_FILL, requested_size); | |
641 memset((char *)result + requested_size, PADDING_FILL, free_area->header.size
- requested_size); | |
642 #endif | |
643 return result; | |
644 } | |
645 | |
646 void cmpct_trim(void) | |
647 { | |
648 // Look at free list entries that are at least as large as one page plus a | |
649 // header. They might be at the start or the end of a block, so we can trim | |
650 // them and free the page(s). | |
651 lock(); | |
652 for (int bucket = size_to_index_freeing(PAGE_SIZE); | |
653 bucket < NUMBER_OF_BUCKETS; | |
654 bucket++) { | |
655 free_t * next; | |
656 for (free_t *free_area = theheap.free_lists[bucket]; | |
657 free_area != NULL; | |
658 free_area = next) { | |
659 DEBUG_ASSERT(free_area->header.size >= PAGE_SIZE + sizeof(header_t))
; | |
660 next = free_area->next; | |
661 header_t *right = right_header(&free_area->header); | |
662 if (is_end_of_os_allocation((char *)right)) { | |
663 char *old_os_allocation_end = (char *)ROUNDUP((uintptr_t)right,
PAGE_SIZE); | |
664 // The page will end with a smaller free list entry and a header
-sized sentinel. | |
665 char *new_os_allocation_end = (char *) | |
666 ROUNDUP((uintptr_t)free_area + sizeof(header_t) + sizeof(fre
e_t), PAGE_SIZE); | |
667 size_t freed_up = old_os_allocation_end - new_os_allocation_end; | |
668 DEBUG_ASSERT(IS_PAGE_ALIGNED(freed_up)); | |
669 // Rare, because we only look at large freelist entries, but unl
ucky rounding | |
670 // could mean we can't actually free anything here. | |
671 if (freed_up == 0) continue; | |
672 unlink_free(free_area, bucket); | |
673 size_t new_free_size = free_area->header.size - freed_up; | |
674 DEBUG_ASSERT(new_free_size >= sizeof(free_t)); | |
675 // Right sentinel, not free, stops attempts to coalesce right. | |
676 create_allocation_header(free_area, new_free_size, 0, free_area)
; | |
677 // Also puts it in the correct bucket. | |
678 create_free_area(free_area, untag(free_area->header.left), new_f
ree_size, NULL); | |
679 page_free(new_os_allocation_end, freed_up >> PAGE_SIZE_SHIFT); | |
680 theheap.size -= freed_up; | |
681 } else if (is_start_of_os_allocation(untag(free_area->header.left)))
{ | |
682 char *old_os_allocation_start = | |
683 (char *)ROUNDDOWN((uintptr_t)free_area, PAGE_SIZE); | |
684 // For the sentinel, we need at least one header-size of space b
etween the page | |
685 // edge and the first allocation to the right of the free area. | |
686 char *new_os_allocation_start = | |
687 (char *)ROUNDDOWN((uintptr_t)(right - 1), PAGE_SIZE); | |
688 size_t freed_up = new_os_allocation_start - old_os_allocation_st
art; | |
689 DEBUG_ASSERT(IS_PAGE_ALIGNED(freed_up)); | |
690 // This should not happen because we only look at the large free
list buckets. | |
691 if (freed_up == 0) continue; | |
692 unlink_free(free_area, bucket); | |
693 size_t sentinel_size = sizeof(header_t); | |
694 size_t new_free_size = free_area->header.size - freed_up; | |
695 if (new_free_size < sizeof(free_t)) { | |
696 sentinel_size += new_free_size; | |
697 new_free_size = 0; | |
698 } | |
699 // Left sentinel, not free, stops attempts to coalesce left. | |
700 create_allocation_header(new_os_allocation_start, 0, sentinel_si
ze, NULL); | |
701 if (new_free_size == 0) { | |
702 FixLeftPointer(right, (header_t *)new_os_allocation_start); | |
703 } else { | |
704 DEBUG_ASSERT(new_free_size >= sizeof(free_t)); | |
705 char *new_free = new_os_allocation_start + sentinel_size; | |
706 // Also puts it in the correct bucket. | |
707 create_free_area(new_free, new_os_allocation_start, new_free
_size, NULL); | |
708 FixLeftPointer(right, (header_t *)new_free); | |
709 } | |
710 page_free(old_os_allocation_start, freed_up >> PAGE_SIZE_SHIFT); | |
711 theheap.size -= freed_up; | |
712 } | |
713 } | |
714 } | |
715 unlock(); | |
716 } | |
717 | |
718 void *cmpct_alloc(size_t size) | |
719 { | |
720 if (size == 0u) return NULL; | |
721 | |
722 if (size + sizeof(header_t) > (1u << HEAP_ALLOC_VIRTUAL_BITS)) return large_
alloc(size); | |
723 | |
724 size_t rounded_up; | |
725 int start_bucket = size_to_index_allocating(size, &rounded_up); | |
726 | |
727 rounded_up += sizeof(header_t); | |
728 | |
729 lock(); | |
730 int bucket = find_nonempty_bucket(start_bucket); | |
731 if (bucket == -1) { | |
732 // Grow heap by at least 12% if we can. | |
733 size_t growby = MIN(1u << HEAP_ALLOC_VIRTUAL_BITS, | |
734 MAX(theheap.size >> 3, | |
735 MAX(HEAP_GROW_SIZE, rounded_up))); | |
736 while (heap_grow(growby, NULL) < 0) { | |
737 if (growby <= rounded_up) { | |
738 unlock(); | |
739 return NULL; | |
740 } | |
741 growby = MAX(growby >> 1, rounded_up); | |
742 } | |
743 bucket = find_nonempty_bucket(start_bucket); | |
744 } | |
745 free_t *head = theheap.free_lists[bucket]; | |
746 size_t left_over = head->header.size - rounded_up; | |
747 // We can't carve off the rest for a new free space if it's smaller than the | |
748 // free-list linked structure. We also don't carve it off if it's less than | |
749 // 1.6% the size of the allocation. This is to avoid small long-lived | |
750 // allocations being placed right next to large allocations, hindering | |
751 // coalescing and returning pages to the OS. | |
752 if (left_over >= sizeof(free_t) && left_over > (size >> 6)) { | |
753 header_t *right = right_header(&head->header); | |
754 unlink_free(head, bucket); | |
755 void *free = (char *)head + rounded_up; | |
756 create_free_area(free, head, left_over, NULL); | |
757 FixLeftPointer(right, (header_t *)free); | |
758 head->header.size -= left_over; | |
759 } else { | |
760 unlink_free(head, bucket); | |
761 } | |
762 void *result = | |
763 create_allocation_header(head, 0, head->header.size, head->header.left); | |
764 #ifdef CMPCT_DEBUG | |
765 memset(result, ALLOC_FILL, size); | |
766 memset(((char *)result) + size, PADDING_FILL, rounded_up - size - sizeof(hea
der_t)); | |
767 #endif | |
768 unlock(); | |
769 return result; | |
770 } | |
771 | |
772 void *cmpct_memalign(size_t size, size_t alignment) | |
773 { | |
774 if (alignment < 8) return cmpct_alloc(size); | |
775 size_t padded_size = | |
776 size + alignment + sizeof(free_t) + sizeof(header_t); | |
777 char *unaligned = (char *)cmpct_alloc(padded_size); | |
778 lock(); | |
779 size_t mask = alignment - 1; | |
780 uintptr_t payload_int = (uintptr_t)unaligned + sizeof(free_t) + | |
781 sizeof(header_t) + mask; | |
782 char *payload = (char *)(payload_int & ~mask); | |
783 if (unaligned != payload) { | |
784 header_t *unaligned_header = (header_t *)unaligned - 1; | |
785 header_t *header = (header_t *)payload - 1; | |
786 size_t left_over = payload - unaligned; | |
787 create_allocation_header( | |
788 header, 0, unaligned_header->size - left_over, unaligned_header); | |
789 header_t *right = right_header(unaligned_header); | |
790 unaligned_header->size = left_over; | |
791 FixLeftPointer(right, header); | |
792 unlock(); | |
793 cmpct_free(unaligned); | |
794 } else { | |
795 unlock(); | |
796 } | |
797 // TODO: Free the part after the aligned allocation. | |
798 return payload; | |
799 } | |
800 | |
801 void cmpct_free(void *payload) | |
802 { | |
803 if (payload == NULL) return; | |
804 header_t *header = (header_t *)payload - 1; | |
805 DEBUG_ASSERT(!is_tagged_as_free(header)); // Double free! | |
806 size_t size = header->size; | |
807 lock(); | |
808 header_t *left = header->left; | |
809 if (left != NULL && is_tagged_as_free(left)) { | |
810 // Coalesce with left free object. | |
811 unlink_free_unknown_bucket((free_t *)left); | |
812 header_t *right = right_header(header); | |
813 if (is_tagged_as_free(right)) { | |
814 // Coalesce both sides. | |
815 unlink_free_unknown_bucket((free_t *)right); | |
816 header_t *right_right = right_header(right); | |
817 FixLeftPointer(right_right, left); | |
818 free_memory(left, left->left, left->size + size + right->size); | |
819 } else { | |
820 // Coalesce only left. | |
821 FixLeftPointer(right, left); | |
822 free_memory(left, left->left, left->size + size); | |
823 } | |
824 } else { | |
825 header_t *right = right_header(header); | |
826 if (is_tagged_as_free(right)) { | |
827 // Coalesce only right. | |
828 header_t *right_right = right_header(right); | |
829 unlink_free_unknown_bucket((free_t *)right); | |
830 FixLeftPointer(right_right, header); | |
831 free_memory(header, left, size + right->size); | |
832 } else { | |
833 free_memory(header, left, size); | |
834 } | |
835 } | |
836 unlock(); | |
837 } | |
838 | |
839 void *cmpct_realloc(void *payload, size_t size) | |
840 { | |
841 if (payload == NULL) return cmpct_alloc(size); | |
842 header_t *header = (header_t *)payload - 1; | |
843 size_t old_size = header->size - sizeof(header_t); | |
844 void *new_payload = cmpct_alloc(size); | |
845 memcpy(new_payload, payload, MIN(size, old_size)); | |
846 cmpct_free(payload); | |
847 return new_payload; | |
848 } | |
849 | |
850 static void add_to_heap(void *new_area, size_t size, free_t **bucket) | |
851 { | |
852 void *top = (char *)new_area + size; | |
853 header_t *left_sentinel = (header_t *)new_area; | |
854 // Not free, stops attempts to coalesce left. | |
855 create_allocation_header(left_sentinel, 0, sizeof(header_t), NULL); | |
856 header_t *new_header = left_sentinel + 1; | |
857 size_t free_size = size - 2 * sizeof(header_t); | |
858 create_free_area(new_header, left_sentinel, free_size, bucket); | |
859 header_t *right_sentinel = (header_t *)(top - sizeof(header_t)); | |
860 // Not free, stops attempts to coalesce right. | |
861 create_allocation_header(right_sentinel, 0, 0, new_header); | |
862 } | |
863 | |
864 // Create a new free-list entry of at least size bytes (including the | |
865 // allocation header). Called with the lock, apart from during init. | |
866 static ssize_t heap_grow(size_t size, free_t **bucket) | |
867 { | |
868 // The new free list entry will have a header on each side (the | |
869 // sentinels) so we need to grow the gross heap size by this much more. | |
870 size += 2 * sizeof(header_t); | |
871 size = ROUNDUP(size, PAGE_SIZE); | |
872 void *ptr = page_alloc(size >> PAGE_SIZE_SHIFT); | |
873 theheap.size += size; | |
874 if (ptr == NULL) return -1; | |
875 LTRACEF("growing heap by 0x%zx bytes, new ptr %p\n", size, ptr); | |
876 add_to_heap(ptr, size, bucket); | |
877 return size; | |
878 } | |
879 | |
880 void cmpct_init(void) | |
881 { | |
882 LTRACE_ENTRY; | |
883 | |
884 // Create a mutex. | |
885 #ifdef FLETCH_TARGET_OS_LK | |
886 mutex_init(&theheap.lock); | |
887 #endif | |
888 | |
889 // Initialize the free list. | |
890 for (int i = 0; i < NUMBER_OF_BUCKETS; i++) { | |
891 theheap.free_lists[i] = NULL; | |
892 } | |
893 for (int i = 0; i < BUCKET_WORDS; i++) { | |
894 theheap.free_list_bits[i] = 0; | |
895 } | |
896 | |
897 size_t initial_alloc = HEAP_GROW_SIZE - 2 * sizeof(header_t); | |
898 | |
899 theheap.remaining = 0; | |
900 | |
901 heap_grow(initial_alloc, NULL); | |
902 } | |
OLD | NEW |