OLD | NEW |
---|---|
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/pages.h" | 5 #include "vm/pages.h" |
6 | 6 |
7 #include "platform/assert.h" | 7 #include "platform/assert.h" |
8 #include "vm/compiler_stats.h" | 8 #include "vm/compiler_stats.h" |
9 #include "vm/gc_marker.h" | 9 #include "vm/gc_marker.h" |
10 #include "vm/gc_sweeper.h" | 10 #include "vm/gc_sweeper.h" |
(...skipping 13 matching lines...) Expand all Loading... | |
24 DEFINE_FLAG(bool, print_free_list_after_gc, false, | 24 DEFINE_FLAG(bool, print_free_list_after_gc, false, |
25 "Print free list statistics after a GC"); | 25 "Print free list statistics after a GC"); |
26 DEFINE_FLAG(bool, collect_code, true, | 26 DEFINE_FLAG(bool, collect_code, true, |
27 "Attempt to GC infrequently used code."); | 27 "Attempt to GC infrequently used code."); |
28 DEFINE_FLAG(int, code_collection_interval_in_us, 30000000, | 28 DEFINE_FLAG(int, code_collection_interval_in_us, 30000000, |
29 "Time between attempts to collect unused code."); | 29 "Time between attempts to collect unused code."); |
30 DEFINE_FLAG(bool, log_code_drop, false, | 30 DEFINE_FLAG(bool, log_code_drop, false, |
31 "Emit a log message when pointers to unused code are dropped."); | 31 "Emit a log message when pointers to unused code are dropped."); |
32 DEFINE_FLAG(bool, always_drop_code, false, | 32 DEFINE_FLAG(bool, always_drop_code, false, |
33 "Always try to drop code if the function's usage counter is >= 0"); | 33 "Always try to drop code if the function's usage counter is >= 0"); |
34 DECLARE_FLAG(bool, write_protect_code); | |
34 | 35 |
35 HeapPage* HeapPage::Initialize(VirtualMemory* memory, PageType type) { | 36 HeapPage* HeapPage::Initialize(VirtualMemory* memory, PageType type) { |
36 ASSERT(memory->size() > VirtualMemory::PageSize()); | 37 ASSERT(memory->size() > VirtualMemory::PageSize()); |
37 bool is_executable = (type == kExecutable); | 38 bool is_executable = (type == kExecutable); |
38 memory->Commit(is_executable); | 39 memory->Commit(is_executable); |
39 | 40 |
40 HeapPage* result = reinterpret_cast<HeapPage*>(memory->address()); | 41 HeapPage* result = reinterpret_cast<HeapPage*>(memory->address()); |
41 result->memory_ = memory; | 42 result->memory_ = memory; |
42 result->next_ = NULL; | 43 result->next_ = NULL; |
43 result->executable_ = is_executable; | 44 result->executable_ = is_executable; |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
103 void HeapPage::WriteProtect(bool read_only) { | 104 void HeapPage::WriteProtect(bool read_only) { |
104 VirtualMemory::Protection prot; | 105 VirtualMemory::Protection prot; |
105 if (read_only) { | 106 if (read_only) { |
106 if (executable_) { | 107 if (executable_) { |
107 prot = VirtualMemory::kReadExecute; | 108 prot = VirtualMemory::kReadExecute; |
108 } else { | 109 } else { |
109 prot = VirtualMemory::kReadOnly; | 110 prot = VirtualMemory::kReadOnly; |
110 } | 111 } |
111 } else { | 112 } else { |
112 if (executable_) { | 113 if (executable_) { |
113 prot = VirtualMemory::kReadWriteExecute; | 114 prot = VirtualMemory::kReadWriteExecute; |
Ivan Posva
2014/02/03 05:44:35
This should not be a legal state.
Florian Schneider
2014/02/10 11:56:17
Done. Should rwx then be removed completely from t
| |
114 } else { | 115 } else { |
115 prot = VirtualMemory::kReadWrite; | 116 prot = VirtualMemory::kReadWrite; |
116 } | 117 } |
117 } | 118 } |
118 memory_->Protect(prot); | 119 bool status = memory_->Protect(prot); |
120 ASSERT(status); | |
119 } | 121 } |
120 | 122 |
121 | 123 |
122 PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words) | 124 PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words) |
123 : freelist_(), | 125 : freelist_(), |
124 heap_(heap), | 126 heap_(heap), |
125 pages_(NULL), | 127 pages_(NULL), |
126 pages_tail_(NULL), | 128 pages_tail_(NULL), |
127 large_pages_(NULL), | 129 large_pages_(NULL), |
128 max_capacity_in_words_(max_capacity_in_words), | 130 max_capacity_in_words_(max_capacity_in_words), |
(...skipping 19 matching lines...) Expand all Loading... | |
148 VirtualMemory::PageSize()); | 150 VirtualMemory::PageSize()); |
149 return page_size >> kWordSizeLog2; | 151 return page_size >> kWordSizeLog2; |
150 } | 152 } |
151 | 153 |
152 | 154 |
153 HeapPage* PageSpace::AllocatePage(HeapPage::PageType type) { | 155 HeapPage* PageSpace::AllocatePage(HeapPage::PageType type) { |
154 HeapPage* page = HeapPage::Allocate(kPageSizeInWords, type); | 156 HeapPage* page = HeapPage::Allocate(kPageSizeInWords, type); |
155 if (pages_ == NULL) { | 157 if (pages_ == NULL) { |
156 pages_ = page; | 158 pages_ = page; |
157 } else { | 159 } else { |
160 const bool is_protected = (pages_tail_->type() == HeapPage::kExecutable) | |
161 && FLAG_write_protect_code; | |
162 if (is_protected) { | |
163 pages_tail_->WriteProtect(false); | |
164 } | |
158 pages_tail_->set_next(page); | 165 pages_tail_->set_next(page); |
166 if (is_protected) { | |
167 pages_tail_->WriteProtect(true); | |
168 } | |
159 } | 169 } |
160 pages_tail_ = page; | 170 pages_tail_ = page; |
161 capacity_in_words_ += kPageSizeInWords; | 171 capacity_in_words_ += kPageSizeInWords; |
162 page->set_object_end(page->memory_->end()); | 172 page->set_object_end(page->memory_->end()); |
163 return page; | 173 return page; |
164 } | 174 } |
165 | 175 |
166 | 176 |
167 HeapPage* PageSpace::AllocateLargePage(intptr_t size, HeapPage::PageType type) { | 177 HeapPage* PageSpace::AllocateLargePage(intptr_t size, HeapPage::PageType type) { |
168 intptr_t page_size_in_words = LargePageSizeInWordsFor(size); | 178 intptr_t page_size_in_words = LargePageSizeInWordsFor(size); |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
214 } | 224 } |
215 | 225 |
216 | 226 |
217 uword PageSpace::TryAllocate(intptr_t size, | 227 uword PageSpace::TryAllocate(intptr_t size, |
218 HeapPage::PageType type, | 228 HeapPage::PageType type, |
219 GrowthPolicy growth_policy) { | 229 GrowthPolicy growth_policy) { |
220 ASSERT(size >= kObjectAlignment); | 230 ASSERT(size >= kObjectAlignment); |
221 ASSERT(Utils::IsAligned(size, kObjectAlignment)); | 231 ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
222 uword result = 0; | 232 uword result = 0; |
223 if (size < kAllocatablePageSize) { | 233 if (size < kAllocatablePageSize) { |
224 result = freelist_[type].TryAllocate(size); | 234 const bool is_protected = (type == HeapPage::kExecutable) |
235 && FLAG_write_protect_code; | |
236 result = freelist_[type].TryAllocate(size, is_protected); | |
225 if ((result == 0) && | 237 if ((result == 0) && |
226 (page_space_controller_.CanGrowPageSpace(size) || | 238 (page_space_controller_.CanGrowPageSpace(size) || |
227 growth_policy == kForceGrowth) && | 239 growth_policy == kForceGrowth) && |
228 CanIncreaseCapacityInWords(kPageSizeInWords)) { | 240 CanIncreaseCapacityInWords(kPageSizeInWords)) { |
229 HeapPage* page = AllocatePage(type); | 241 HeapPage* page = AllocatePage(type); |
230 ASSERT(page != NULL); | 242 ASSERT(page != NULL); |
231 // Start of the newly allocated page is the allocated object. | 243 // Start of the newly allocated page is the allocated object. |
232 result = page->object_start(); | 244 result = page->object_start(); |
233 // Enqueue the remainder in the free list. | 245 // Enqueue the remainder in the free list. |
234 uword free_start = result + size; | 246 uword free_start = result + size; |
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
440 } | 452 } |
441 | 453 |
442 if (FLAG_verify_before_gc) { | 454 if (FLAG_verify_before_gc) { |
443 OS::PrintErr("Verifying before MarkSweep..."); | 455 OS::PrintErr("Verifying before MarkSweep..."); |
444 heap_->Verify(); | 456 heap_->Verify(); |
445 OS::PrintErr(" done.\n"); | 457 OS::PrintErr(" done.\n"); |
446 } | 458 } |
447 | 459 |
448 const int64_t start = OS::GetCurrentTimeMicros(); | 460 const int64_t start = OS::GetCurrentTimeMicros(); |
449 | 461 |
462 if (FLAG_write_protect_code) { | |
463 // Make code pages writable. | |
464 HeapPage* current_page = pages_; | |
465 while (current_page != NULL) { | |
466 if (current_page->type() == HeapPage::kExecutable) { | |
467 current_page->WriteProtect(false); | |
468 } | |
469 current_page = current_page->next(); | |
470 } | |
471 current_page = large_pages_; | |
472 while (current_page != NULL) { | |
473 if (current_page->type() == HeapPage::kExecutable) { | |
474 current_page->WriteProtect(false); | |
475 } | |
476 current_page = current_page->next(); | |
477 } | |
478 } | |
479 | |
450 // Mark all reachable old-gen objects. | 480 // Mark all reachable old-gen objects. |
451 bool collect_code = FLAG_collect_code && ShouldCollectCode(); | 481 bool collect_code = FLAG_collect_code && ShouldCollectCode(); |
452 GCMarker marker(heap_); | 482 GCMarker marker(heap_); |
453 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); | 483 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); |
454 | 484 |
455 int64_t mid1 = OS::GetCurrentTimeMicros(); | 485 int64_t mid1 = OS::GetCurrentTimeMicros(); |
456 | 486 |
457 // Reset the bump allocation page to unused. | 487 // Reset the bump allocation page to unused. |
458 // Reset the freelists and setup sweeping. | 488 // Reset the freelists and setup sweeping. |
459 freelist_[HeapPage::kData].Reset(); | 489 freelist_[HeapPage::kData].Reset(); |
(...skipping 29 matching lines...) Expand all Loading... | |
489 if (page_in_use == 0) { | 519 if (page_in_use == 0) { |
490 FreeLargePage(page, prev_page); | 520 FreeLargePage(page, prev_page); |
491 } else { | 521 } else { |
492 used_in_words += (page_in_use >> kWordSizeLog2); | 522 used_in_words += (page_in_use >> kWordSizeLog2); |
493 prev_page = page; | 523 prev_page = page; |
494 } | 524 } |
495 // Advance to the next page. | 525 // Advance to the next page. |
496 page = next_page; | 526 page = next_page; |
497 } | 527 } |
498 | 528 |
529 if (FLAG_write_protect_code) { | |
530 // Make code pages read-only. | |
531 HeapPage* current_page = pages_; | |
532 while (current_page != NULL) { | |
533 if (current_page->type() == HeapPage::kExecutable) { | |
534 current_page->WriteProtect(true); | |
535 } | |
536 current_page = current_page->next(); | |
537 } | |
538 current_page = large_pages_; | |
539 while (current_page != NULL) { | |
540 if (current_page->type() == HeapPage::kExecutable) { | |
541 current_page->WriteProtect(true); | |
542 } | |
543 current_page = current_page->next(); | |
544 } | |
545 } | |
546 | |
499 // Record data and print if requested. | 547 // Record data and print if requested. |
500 intptr_t used_before_in_words = used_in_words_; | 548 intptr_t used_before_in_words = used_in_words_; |
501 used_in_words_ = used_in_words; | 549 used_in_words_ = used_in_words; |
502 | 550 |
503 int64_t end = OS::GetCurrentTimeMicros(); | 551 int64_t end = OS::GetCurrentTimeMicros(); |
504 | 552 |
505 // Record signals for growth control. | 553 // Record signals for growth control. |
506 page_space_controller_.EvaluateGarbageCollection(used_before_in_words, | 554 page_space_controller_.EvaluateGarbageCollection(used_before_in_words, |
507 used_in_words, | 555 used_in_words, |
508 start, end); | 556 start, end); |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
641 return 0; | 689 return 0; |
642 } else { | 690 } else { |
643 ASSERT(total_time >= gc_time); | 691 ASSERT(total_time >= gc_time); |
644 int result= static_cast<int>((static_cast<double>(gc_time) / | 692 int result= static_cast<int>((static_cast<double>(gc_time) / |
645 static_cast<double>(total_time)) * 100); | 693 static_cast<double>(total_time)) * 100); |
646 return result; | 694 return result; |
647 } | 695 } |
648 } | 696 } |
649 | 697 |
650 } // namespace dart | 698 } // namespace dart |
OLD | NEW |