Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(82)

Side by Side Diff: runtime/vm/freelist.cc

Issue 136563002: Landing: Write protect executable pages in the VM. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « runtime/vm/freelist.h ('k') | runtime/vm/freelist_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/freelist.h" 5 #include "vm/freelist.h"
6 6
7 #include <map> 7 #include <map>
8 #include <utility> 8 #include <utility>
9 9
10 #include "vm/bit_set.h" 10 #include "vm/bit_set.h"
11 #include "vm/object.h" 11 #include "vm/object.h"
12 #include "vm/raw_object.h" 12 #include "vm/raw_object.h"
13 13
14 namespace dart { 14 namespace dart {
15 15
16 16
17 FreeListElement* FreeListElement::AsElement(uword addr, intptr_t size) { 17 FreeListElement* FreeListElement::AsElement(uword addr, intptr_t size) {
18 // Precondition: the (page containing the) header of the element is
19 // writable.
18 ASSERT(size >= kObjectAlignment); 20 ASSERT(size >= kObjectAlignment);
19 ASSERT(Utils::IsAligned(size, kObjectAlignment)); 21 ASSERT(Utils::IsAligned(size, kObjectAlignment));
20 22
21 FreeListElement* result = reinterpret_cast<FreeListElement*>(addr); 23 FreeListElement* result = reinterpret_cast<FreeListElement*>(addr);
22 24
23 uword tags = 0; 25 uword tags = 0;
24 tags = RawObject::SizeTag::update(size, tags); 26 tags = RawObject::SizeTag::update(size, tags);
25 tags = RawObject::ClassIdTag::update(kFreeListElement, tags); 27 tags = RawObject::ClassIdTag::update(kFreeListElement, tags);
26 28
27 result->tags_ = tags; 29 result->tags_ = tags;
28 if (size > RawObject::SizeTag::kMaxSizeTag) { 30 if (size > RawObject::SizeTag::kMaxSizeTag) {
29 *result->SizeAddress() = size; 31 *result->SizeAddress() = size;
30 } 32 }
31 result->set_next(NULL); 33 result->set_next(NULL);
32
33 return result; 34 return result;
35 // Postcondition: the (page containing the) header of the element is
36 // writable.
34 } 37 }
35 38
36 39
37 void FreeListElement::InitOnce() { 40 void FreeListElement::InitOnce() {
38 ASSERT(sizeof(FreeListElement) == kObjectAlignment); 41 ASSERT(sizeof(FreeListElement) == kObjectAlignment);
39 ASSERT(OFFSET_OF(FreeListElement, tags_) == Object::tags_offset()); 42 ASSERT(OFFSET_OF(FreeListElement, tags_) == Object::tags_offset());
40 } 43 }
41 44
42 45
43 FreeList::FreeList() { 46 FreeList::FreeList() {
44 Reset(); 47 Reset();
45 } 48 }
46 49
47 50
48 FreeList::~FreeList() { 51 FreeList::~FreeList() {
49 // Nothing to release. 52 // Nothing to release.
50 } 53 }
51 54
52 55
53 uword FreeList::TryAllocate(intptr_t size) { 56 uword FreeList::TryAllocate(intptr_t size, bool is_protected) {
57 // Precondition: is_protected is false or else all free list elements are
58 // in non-writable pages.
59
60 // Postcondition: if allocation succeeds, the allocated block is writable.
54 int index = IndexForSize(size); 61 int index = IndexForSize(size);
55 if ((index != kNumLists) && free_map_.Test(index)) { 62 if ((index != kNumLists) && free_map_.Test(index)) {
56 return reinterpret_cast<uword>(DequeueElement(index)); 63 FreeListElement* element = DequeueElement(index);
64 if (is_protected) {
65 bool status =
66 VirtualMemory::Protect(reinterpret_cast<void*>(element),
67 size,
68 VirtualMemory::kReadWrite /*Execute*/);
69 ASSERT(status);
70 }
71 return reinterpret_cast<uword>(element);
57 } 72 }
58 73
59 if ((index + 1) < kNumLists) { 74 if ((index + 1) < kNumLists) {
60 intptr_t next_index = free_map_.Next(index + 1); 75 intptr_t next_index = free_map_.Next(index + 1);
61 if (next_index != -1) { 76 if (next_index != -1) {
62 // Dequeue an element from the list, split and enqueue the remainder in 77 // Dequeue an element from the list, split and enqueue the remainder in
63 // the appropriate list. 78 // the appropriate list.
64 FreeListElement* element = DequeueElement(next_index); 79 FreeListElement* element = DequeueElement(next_index);
65 SplitElementAfterAndEnqueue(element, size); 80 if (is_protected) {
81 // Make the allocated block and the header of the remainder element
82 // writable. The remainder will be non-writable if necessary after
83 // the call to SplitElementAfterAndEnqueue.
84 // If the remainder size is zero, only the element itself needs to
85 // be made writable.
86 intptr_t remainder_size = element->Size() - size;
87 intptr_t region_size = size +
88 ((remainder_size > 0) ? FreeListElement::kHeaderSize : 0);
89 bool status =
90 VirtualMemory::Protect(reinterpret_cast<void*>(element),
91 region_size,
92 VirtualMemory::kReadWrite /*Execute*/);
93 ASSERT(status);
94 }
95 SplitElementAfterAndEnqueue(element, size, is_protected);
66 return reinterpret_cast<uword>(element); 96 return reinterpret_cast<uword>(element);
67 } 97 }
68 } 98 }
69 99
70 FreeListElement* previous = NULL; 100 FreeListElement* previous = NULL;
71 FreeListElement* current = free_lists_[kNumLists]; 101 FreeListElement* current = free_lists_[kNumLists];
72 while (current != NULL) { 102 while (current != NULL) {
73 if (current->Size() >= size) { 103 if (current->Size() >= size) {
74 // Found an element large enough to hold the requested size. Dequeue, 104 // Found an element large enough to hold the requested size. Dequeue,
75 // split and enqueue the remainder. 105 // split and enqueue the remainder.
106 if (is_protected) {
107 // Make the allocated block and the header of the remainder element
108 // writable. The remainder will be non-writable if necessary after
109 // the call to SplitElementAfterAndEnqueue.
110 intptr_t remainder_size = current->Size() - size;
111 intptr_t region_size = size +
112 ((remainder_size > 0) ? FreeListElement::kHeaderSize : 0);
113 bool status =
114 VirtualMemory::Protect(reinterpret_cast<void*>(current),
115 region_size,
116 VirtualMemory::kReadWrite /*Execute*/);
117 ASSERT(status);
118 }
76 if (previous == NULL) { 119 if (previous == NULL) {
77 free_lists_[kNumLists] = current->next(); 120 free_lists_[kNumLists] = current->next();
78 } else { 121 } else {
122 // If the previous free list element's next field is protected, it
123 // needs to be unprotected before storing to it and reprotected
124 // after.
125 bool target_is_protected = false;
126 uword target_address = NULL;
127 if (is_protected) {
128 uword writable_start = reinterpret_cast<uword>(current);
129 uword writable_end =
130 writable_start + size + FreeListElement::kHeaderSize - 1;
131 target_address = previous->next_address();
132 target_is_protected =
133 !VirtualMemory::InSamePage(target_address, writable_start) &&
134 !VirtualMemory::InSamePage(target_address, writable_end);
135 }
136 if (target_is_protected) {
137 bool status =
138 VirtualMemory::Protect(reinterpret_cast<void*>(target_address),
139 kWordSize,
140 VirtualMemory::kReadWrite /*Execute*/);
141 ASSERT(status);
142 }
79 previous->set_next(current->next()); 143 previous->set_next(current->next());
144 if (target_is_protected) {
145 bool status =
146 VirtualMemory::Protect(reinterpret_cast<void*>(target_address),
147 kWordSize,
148 VirtualMemory::kReadExecute);
149 ASSERT(status);
150 }
80 } 151 }
81 SplitElementAfterAndEnqueue(current, size); 152 SplitElementAfterAndEnqueue(current, size, is_protected);
82 return reinterpret_cast<uword>(current); 153 return reinterpret_cast<uword>(current);
83 } 154 }
84 previous = current; 155 previous = current;
85 current = current->next(); 156 current = current->next();
86 } 157 }
87 return 0; 158 return 0;
88 } 159 }
89 160
90 161
91 void FreeList::Free(uword addr, intptr_t size) { 162 void FreeList::Free(uword addr, intptr_t size) {
163 // Precondition required by AsElement and EnqueueElement: the (page
164 // containing the) header of the freed block should be writable. This is
165 // the case when called for newly allocated pages because they are
166 // allocated as writable. It is the case when called during GC sweeping
167 // because the entire heap is writable.
92 intptr_t index = IndexForSize(size); 168 intptr_t index = IndexForSize(size);
93 FreeListElement* element = FreeListElement::AsElement(addr, size); 169 FreeListElement* element = FreeListElement::AsElement(addr, size);
94 EnqueueElement(element, index); 170 EnqueueElement(element, index);
171 // Postcondition: the (page containing the) header is left writable.
95 } 172 }
96 173
97 174
98 void FreeList::Reset() { 175 void FreeList::Reset() {
99 free_map_.Reset(); 176 free_map_.Reset();
100 for (int i = 0; i < (kNumLists + 1); i++) { 177 for (int i = 0; i < (kNumLists + 1); i++) {
101 free_lists_[i] = NULL; 178 free_lists_[i] = NULL;
102 } 179 }
103 } 180 }
104 181
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
206 } 283 }
207 284
208 285
209 void FreeList::Print() const { 286 void FreeList::Print() const {
210 PrintSmall(); 287 PrintSmall();
211 PrintLarge(); 288 PrintLarge();
212 } 289 }
213 290
214 291
215 void FreeList::SplitElementAfterAndEnqueue(FreeListElement* element, 292 void FreeList::SplitElementAfterAndEnqueue(FreeListElement* element,
216 intptr_t size) { 293 intptr_t size,
294 bool is_protected) {
295 // Precondition required by AsElement and EnqueueElement: either
296 // element->Size() == size, or else the (page containing the) header of
297 // the remainder element starting at element + size is writable.
217 intptr_t remainder_size = element->Size() - size; 298 intptr_t remainder_size = element->Size() - size;
218 if (remainder_size == 0) return; 299 if (remainder_size == 0) return;
219 300
220 element = FreeListElement::AsElement(reinterpret_cast<uword>(element) + size, 301 uword remainder_address = reinterpret_cast<uword>(element) + size;
221 remainder_size); 302 element = FreeListElement::AsElement(remainder_address, remainder_size);
222 intptr_t remainder_index = IndexForSize(remainder_size); 303 intptr_t remainder_index = IndexForSize(remainder_size);
223 EnqueueElement(element, remainder_index); 304 EnqueueElement(element, remainder_index);
305
306 // Postcondition: when allocating in a protected page, the remainder
307 // element is no longer writable unless it is in the same page as the
308 // allocated element. (The allocated element is still writable, and the
309 // remainder element will be protected when the allocated one is).
310 if (is_protected &&
311 !VirtualMemory::InSamePage(remainder_address - 1, remainder_address)) {
312 bool status =
313 VirtualMemory::Protect(reinterpret_cast<void*>(remainder_address),
314 remainder_size,
315 VirtualMemory::kReadExecute);
316 ASSERT(status);
317 }
224 } 318 }
225 319
226 } // namespace dart 320 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/freelist.h ('k') | runtime/vm/freelist_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698