OLD | NEW |
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/pages.h" | 5 #include "vm/pages.h" |
6 | 6 |
7 #include "platform/assert.h" | 7 #include "platform/assert.h" |
8 #include "vm/compiler_stats.h" | 8 #include "vm/compiler_stats.h" |
9 #include "vm/gc_marker.h" | 9 #include "vm/gc_marker.h" |
10 #include "vm/gc_sweeper.h" | 10 #include "vm/gc_sweeper.h" |
(...skipping 13 matching lines...) Expand all Loading... |
24 DEFINE_FLAG(bool, print_free_list_after_gc, false, | 24 DEFINE_FLAG(bool, print_free_list_after_gc, false, |
25 "Print free list statistics after a GC"); | 25 "Print free list statistics after a GC"); |
26 DEFINE_FLAG(bool, collect_code, true, | 26 DEFINE_FLAG(bool, collect_code, true, |
27 "Attempt to GC infrequently used code."); | 27 "Attempt to GC infrequently used code."); |
28 DEFINE_FLAG(int, code_collection_interval_in_us, 30000000, | 28 DEFINE_FLAG(int, code_collection_interval_in_us, 30000000, |
29 "Time between attempts to collect unused code."); | 29 "Time between attempts to collect unused code."); |
30 DEFINE_FLAG(bool, log_code_drop, false, | 30 DEFINE_FLAG(bool, log_code_drop, false, |
31 "Emit a log message when pointers to unused code are dropped."); | 31 "Emit a log message when pointers to unused code are dropped."); |
32 DEFINE_FLAG(bool, always_drop_code, false, | 32 DEFINE_FLAG(bool, always_drop_code, false, |
33 "Always try to drop code if the function's usage counter is >= 0"); | 33 "Always try to drop code if the function's usage counter is >= 0"); |
| 34 DECLARE_FLAG(bool, write_protect_code); |
34 | 35 |
35 HeapPage* HeapPage::Initialize(VirtualMemory* memory, PageType type) { | 36 HeapPage* HeapPage::Initialize(VirtualMemory* memory, PageType type) { |
36 ASSERT(memory->size() > VirtualMemory::PageSize()); | 37 ASSERT(memory->size() > VirtualMemory::PageSize()); |
37 bool is_executable = (type == kExecutable); | 38 bool is_executable = (type == kExecutable); |
38 memory->Commit(is_executable); | 39 memory->Commit(is_executable); |
39 | 40 |
40 HeapPage* result = reinterpret_cast<HeapPage*>(memory->address()); | 41 HeapPage* result = reinterpret_cast<HeapPage*>(memory->address()); |
41 result->memory_ = memory; | 42 result->memory_ = memory; |
42 result->next_ = NULL; | 43 result->next_ = NULL; |
43 result->executable_ = is_executable; | 44 result->executable_ = is_executable; |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
102 | 103 |
103 void HeapPage::WriteProtect(bool read_only) { | 104 void HeapPage::WriteProtect(bool read_only) { |
104 VirtualMemory::Protection prot; | 105 VirtualMemory::Protection prot; |
105 if (read_only) { | 106 if (read_only) { |
106 if (executable_) { | 107 if (executable_) { |
107 prot = VirtualMemory::kReadExecute; | 108 prot = VirtualMemory::kReadExecute; |
108 } else { | 109 } else { |
109 prot = VirtualMemory::kReadOnly; | 110 prot = VirtualMemory::kReadOnly; |
110 } | 111 } |
111 } else { | 112 } else { |
112 if (executable_) { | 113 prot = VirtualMemory::kReadWrite; |
113 prot = VirtualMemory::kReadWriteExecute; | |
114 } else { | |
115 prot = VirtualMemory::kReadWrite; | |
116 } | |
117 } | 114 } |
118 memory_->Protect(prot); | 115 bool status = memory_->Protect(prot); |
| 116 ASSERT(status); |
119 } | 117 } |
120 | 118 |
121 | 119 |
122 PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words) | 120 PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words) |
123 : freelist_(), | 121 : freelist_(), |
124 heap_(heap), | 122 heap_(heap), |
125 pages_(NULL), | 123 pages_(NULL), |
126 pages_tail_(NULL), | 124 pages_tail_(NULL), |
127 large_pages_(NULL), | 125 large_pages_(NULL), |
128 max_capacity_in_words_(max_capacity_in_words), | 126 max_capacity_in_words_(max_capacity_in_words), |
(...skipping 19 matching lines...) Expand all Loading... |
148 VirtualMemory::PageSize()); | 146 VirtualMemory::PageSize()); |
149 return page_size >> kWordSizeLog2; | 147 return page_size >> kWordSizeLog2; |
150 } | 148 } |
151 | 149 |
152 | 150 |
153 HeapPage* PageSpace::AllocatePage(HeapPage::PageType type) { | 151 HeapPage* PageSpace::AllocatePage(HeapPage::PageType type) { |
154 HeapPage* page = HeapPage::Allocate(kPageSizeInWords, type); | 152 HeapPage* page = HeapPage::Allocate(kPageSizeInWords, type); |
155 if (pages_ == NULL) { | 153 if (pages_ == NULL) { |
156 pages_ = page; | 154 pages_ = page; |
157 } else { | 155 } else { |
| 156 const bool is_protected = (pages_tail_->type() == HeapPage::kExecutable) |
| 157 && FLAG_write_protect_code; |
| 158 if (is_protected) { |
| 159 pages_tail_->WriteProtect(false); |
| 160 } |
158 pages_tail_->set_next(page); | 161 pages_tail_->set_next(page); |
| 162 if (is_protected) { |
| 163 pages_tail_->WriteProtect(true); |
| 164 } |
159 } | 165 } |
160 pages_tail_ = page; | 166 pages_tail_ = page; |
161 capacity_in_words_ += kPageSizeInWords; | 167 capacity_in_words_ += kPageSizeInWords; |
162 page->set_object_end(page->memory_->end()); | 168 page->set_object_end(page->memory_->end()); |
163 return page; | 169 return page; |
164 } | 170 } |
165 | 171 |
166 | 172 |
167 HeapPage* PageSpace::AllocateLargePage(intptr_t size, HeapPage::PageType type) { | 173 HeapPage* PageSpace::AllocateLargePage(intptr_t size, HeapPage::PageType type) { |
168 intptr_t page_size_in_words = LargePageSizeInWordsFor(size); | 174 intptr_t page_size_in_words = LargePageSizeInWordsFor(size); |
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
214 } | 220 } |
215 | 221 |
216 | 222 |
217 uword PageSpace::TryAllocate(intptr_t size, | 223 uword PageSpace::TryAllocate(intptr_t size, |
218 HeapPage::PageType type, | 224 HeapPage::PageType type, |
219 GrowthPolicy growth_policy) { | 225 GrowthPolicy growth_policy) { |
220 ASSERT(size >= kObjectAlignment); | 226 ASSERT(size >= kObjectAlignment); |
221 ASSERT(Utils::IsAligned(size, kObjectAlignment)); | 227 ASSERT(Utils::IsAligned(size, kObjectAlignment)); |
222 uword result = 0; | 228 uword result = 0; |
223 if (size < kAllocatablePageSize) { | 229 if (size < kAllocatablePageSize) { |
224 result = freelist_[type].TryAllocate(size); | 230 const bool is_protected = (type == HeapPage::kExecutable) |
| 231 && FLAG_write_protect_code; |
| 232 result = freelist_[type].TryAllocate(size, is_protected); |
225 if ((result == 0) && | 233 if ((result == 0) && |
226 (page_space_controller_.CanGrowPageSpace(size) || | 234 (page_space_controller_.CanGrowPageSpace(size) || |
227 growth_policy == kForceGrowth) && | 235 growth_policy == kForceGrowth) && |
228 CanIncreaseCapacityInWords(kPageSizeInWords)) { | 236 CanIncreaseCapacityInWords(kPageSizeInWords)) { |
229 HeapPage* page = AllocatePage(type); | 237 HeapPage* page = AllocatePage(type); |
230 ASSERT(page != NULL); | 238 ASSERT(page != NULL); |
231 // Start of the newly allocated page is the allocated object. | 239 // Start of the newly allocated page is the allocated object. |
232 result = page->object_start(); | 240 result = page->object_start(); |
233 // Enqueue the remainder in the free list. | 241 // Enqueue the remainder in the free list. |
234 uword free_start = result + size; | 242 uword free_start = result + size; |
(...skipping 205 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
440 } | 448 } |
441 | 449 |
442 if (FLAG_verify_before_gc) { | 450 if (FLAG_verify_before_gc) { |
443 OS::PrintErr("Verifying before MarkSweep..."); | 451 OS::PrintErr("Verifying before MarkSweep..."); |
444 heap_->Verify(); | 452 heap_->Verify(); |
445 OS::PrintErr(" done.\n"); | 453 OS::PrintErr(" done.\n"); |
446 } | 454 } |
447 | 455 |
448 const int64_t start = OS::GetCurrentTimeMicros(); | 456 const int64_t start = OS::GetCurrentTimeMicros(); |
449 | 457 |
| 458 if (FLAG_write_protect_code) { |
| 459 // Make code pages writable. |
| 460 HeapPage* current_page = pages_; |
| 461 while (current_page != NULL) { |
| 462 if (current_page->type() == HeapPage::kExecutable) { |
| 463 current_page->WriteProtect(false); |
| 464 } |
| 465 current_page = current_page->next(); |
| 466 } |
| 467 current_page = large_pages_; |
| 468 while (current_page != NULL) { |
| 469 if (current_page->type() == HeapPage::kExecutable) { |
| 470 current_page->WriteProtect(false); |
| 471 } |
| 472 current_page = current_page->next(); |
| 473 } |
| 474 } |
| 475 |
450 // Mark all reachable old-gen objects. | 476 // Mark all reachable old-gen objects. |
451 bool collect_code = FLAG_collect_code && ShouldCollectCode(); | 477 bool collect_code = FLAG_collect_code && ShouldCollectCode(); |
452 GCMarker marker(heap_); | 478 GCMarker marker(heap_); |
453 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); | 479 marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); |
454 | 480 |
455 int64_t mid1 = OS::GetCurrentTimeMicros(); | 481 int64_t mid1 = OS::GetCurrentTimeMicros(); |
456 | 482 |
457 // Reset the bump allocation page to unused. | 483 // Reset the bump allocation page to unused. |
458 // Reset the freelists and setup sweeping. | 484 // Reset the freelists and setup sweeping. |
459 freelist_[HeapPage::kData].Reset(); | 485 freelist_[HeapPage::kData].Reset(); |
(...skipping 29 matching lines...) Expand all Loading... |
489 if (page_in_use == 0) { | 515 if (page_in_use == 0) { |
490 FreeLargePage(page, prev_page); | 516 FreeLargePage(page, prev_page); |
491 } else { | 517 } else { |
492 used_in_words += (page_in_use >> kWordSizeLog2); | 518 used_in_words += (page_in_use >> kWordSizeLog2); |
493 prev_page = page; | 519 prev_page = page; |
494 } | 520 } |
495 // Advance to the next page. | 521 // Advance to the next page. |
496 page = next_page; | 522 page = next_page; |
497 } | 523 } |
498 | 524 |
| 525 if (FLAG_write_protect_code) { |
| 526 // Make code pages read-only. |
| 527 HeapPage* current_page = pages_; |
| 528 while (current_page != NULL) { |
| 529 if (current_page->type() == HeapPage::kExecutable) { |
| 530 current_page->WriteProtect(true); |
| 531 } |
| 532 current_page = current_page->next(); |
| 533 } |
| 534 current_page = large_pages_; |
| 535 while (current_page != NULL) { |
| 536 if (current_page->type() == HeapPage::kExecutable) { |
| 537 current_page->WriteProtect(true); |
| 538 } |
| 539 current_page = current_page->next(); |
| 540 } |
| 541 } |
| 542 |
499 // Record data and print if requested. | 543 // Record data and print if requested. |
500 intptr_t used_before_in_words = used_in_words_; | 544 intptr_t used_before_in_words = used_in_words_; |
501 used_in_words_ = used_in_words; | 545 used_in_words_ = used_in_words; |
502 | 546 |
503 int64_t end = OS::GetCurrentTimeMicros(); | 547 int64_t end = OS::GetCurrentTimeMicros(); |
504 | 548 |
505 // Record signals for growth control. | 549 // Record signals for growth control. |
506 page_space_controller_.EvaluateGarbageCollection(used_before_in_words, | 550 page_space_controller_.EvaluateGarbageCollection(used_before_in_words, |
507 used_in_words, | 551 used_in_words, |
508 start, end); | 552 start, end); |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
641 return 0; | 685 return 0; |
642 } else { | 686 } else { |
643 ASSERT(total_time >= gc_time); | 687 ASSERT(total_time >= gc_time); |
644 int result= static_cast<int>((static_cast<double>(gc_time) / | 688 int result= static_cast<int>((static_cast<double>(gc_time) / |
645 static_cast<double>(total_time)) * 100); | 689 static_cast<double>(total_time)) * 100); |
646 return result; | 690 return result; |
647 } | 691 } |
648 } | 692 } |
649 | 693 |
650 } // namespace dart | 694 } // namespace dart |
OLD | NEW |