OLD | NEW |
| (Empty) |
1 // Copyright (c) 2016, the Dartino project authors. Please see the AUTHORS file | |
2 // for details. All rights reserved. Use of this source code is governed by a | |
3 // BSD-style license that can be found in the LICENSE.md file. | |
4 | |
5 #include "platforms/stm/disco_fletch/src/page_allocator.h" | |
6 | |
7 #include "src/shared/assert.h" | |
8 | |
9 uint32_t PageAllocator::AddArena(const char* name, uintptr_t start, | |
10 size_t size, uint8_t* map, size_t map_size) { | |
11 for (int i = 0; i < kMaxArenas; i++) { | |
12 if (arenas_[i].IsFree()) { | |
13 arenas_[i].Initialize(name, start, size, map, map_size); | |
14 return 1 << i; | |
15 } | |
16 } | |
17 FATAL("Too many arenas added"); | |
18 return 0; | |
19 } | |
20 | |
21 void* PageAllocator::AllocatePages(size_t pages, uint32_t arenas_bitmap) { | |
22 for (int i = 0; i < kMaxArenas; i++) { | |
23 if ((arenas_bitmap & (1 << i)) != 0) { | |
24 void* result = arenas_[i].AllocatePages(pages); | |
25 if (result != NULL) return result; | |
26 } | |
27 } | |
28 return NULL; | |
29 } | |
30 | |
31 void PageAllocator::FreePages(void* start, size_t pages) { | |
32 ASSERT(IS_PAGE_ALIGNED(start)); | |
33 for (int i = 0; i < kMaxArenas; i++) { | |
34 if (arenas_[i].ContainsPageAt(start)) { | |
35 arenas_[i].FreePages(start, pages); | |
36 return; | |
37 } | |
38 } | |
39 FATAL("Free of unallocated pages"); | |
40 } | |
41 | |
42 void PageAllocator::Arena::Initialize(const char* name, uintptr_t arena_start, | |
43 size_t arena_size, | |
44 uint8_t* map, size_t map_size) { | |
45 uintptr_t start = ROUNDUP(arena_start, PAGE_SIZE); | |
46 size_t size = ROUNDDOWN(arena_start + arena_size, PAGE_SIZE) - start; | |
47 ASSERT(IS_PAGE_ALIGNED(start)); | |
48 ASSERT(IS_PAGE_ALIGNED(size)); | |
49 name_ = name; | |
50 pages_ = size >> PAGE_SIZE_SHIFT; | |
51 | |
52 if (map != NULL && map_size >= pages_) { | |
53 // There is a supplied map that can hold the state of all the pages. | |
54 map_ = map; | |
55 base_ = reinterpret_cast<uint8_t*>(start); | |
56 } else { | |
57 // Allocate a map with one byte per page from the beginning of the arena. | |
58 map_ = reinterpret_cast<uint8_t*>(start); | |
59 pages_ -= ROUNDUP(pages_, PAGE_SIZE) / PAGE_SIZE; | |
60 base_ = reinterpret_cast<uint8_t*>(PAGE_ALIGN(start + pages_)); | |
61 } | |
62 memset(map_, 0, pages_); | |
63 } | |
64 | |
65 void* PageAllocator::Arena::AllocatePages(size_t pages) { | |
66 if (pages == 0 || pages > pages_) return NULL; | |
67 for (size_t i = 0; i < pages_ - pages + 1; i++) { | |
68 bool found = true; | |
69 for (size_t j = 0; j < pages; j++) { | |
70 if (map_[i + j] != 0) { | |
71 i += j; | |
72 found = false; | |
73 break; | |
74 } | |
75 } | |
76 if (found) { | |
77 memset(map_ + i, 1, pages); | |
78 return base_ + (i << PAGE_SIZE_SHIFT); // i * PAGE_SIZE. | |
79 } | |
80 } | |
81 return NULL; | |
82 } | |
83 | |
84 void PageAllocator::Arena::FreePages(void* start, size_t pages) { | |
85 size_t index = (reinterpret_cast<uint8_t*>(start) - base_) >> PAGE_SIZE_SHIFT; | |
86 for (int i = 0; i < pages; i++) { | |
87 ASSERT(map_[index + i] != 0); | |
88 map_[index + i] = 0; | |
89 } | |
90 } | |
OLD | NEW |