Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(476)

Side by Side Diff: runtime/vm/pages.cc

Issue 503363005: - Add and enable concurrent sweeper. (Closed) Base URL: http://dart.googlecode.com/svn/branches/bleeding_edge/dart/
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2012, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/pages.h" 5 #include "vm/pages.h"
6 6
7 #include "platform/assert.h" 7 #include "platform/assert.h"
8 #include "vm/compiler_stats.h" 8 #include "vm/compiler_stats.h"
9 #include "vm/gc_marker.h" 9 #include "vm/gc_marker.h"
10 #include "vm/gc_sweeper.h" 10 #include "vm/gc_sweeper.h"
(...skipping 15 matching lines...) Expand all
26 DEFINE_FLAG(bool, print_free_list_after_gc, false, 26 DEFINE_FLAG(bool, print_free_list_after_gc, false,
27 "Print free list statistics after a GC"); 27 "Print free list statistics after a GC");
28 DEFINE_FLAG(bool, collect_code, true, 28 DEFINE_FLAG(bool, collect_code, true,
29 "Attempt to GC infrequently used code."); 29 "Attempt to GC infrequently used code.");
30 DEFINE_FLAG(int, code_collection_interval_in_us, 30000000, 30 DEFINE_FLAG(int, code_collection_interval_in_us, 30000000,
31 "Time between attempts to collect unused code."); 31 "Time between attempts to collect unused code.");
32 DEFINE_FLAG(bool, log_code_drop, false, 32 DEFINE_FLAG(bool, log_code_drop, false,
33 "Emit a log message when pointers to unused code are dropped."); 33 "Emit a log message when pointers to unused code are dropped.");
34 DEFINE_FLAG(bool, always_drop_code, false, 34 DEFINE_FLAG(bool, always_drop_code, false,
35 "Always try to drop code if the function's usage counter is >= 0"); 35 "Always try to drop code if the function's usage counter is >= 0");
36 DEFINE_FLAG(bool, concurrent_sweep, true,
37 "Concurrent sweep for old generation.");
36 38
37 HeapPage* HeapPage::Initialize(VirtualMemory* memory, PageType type) { 39 HeapPage* HeapPage::Initialize(VirtualMemory* memory, PageType type) {
38 ASSERT(memory->size() > VirtualMemory::PageSize()); 40 ASSERT(memory->size() > VirtualMemory::PageSize());
39 bool is_executable = (type == kExecutable); 41 bool is_executable = (type == kExecutable);
40 memory->Commit(is_executable); 42 memory->Commit(is_executable);
41 43
42 HeapPage* result = reinterpret_cast<HeapPage*>(memory->address()); 44 HeapPage* result = reinterpret_cast<HeapPage*>(memory->address());
43 result->memory_ = memory; 45 result->memory_ = memory;
44 result->next_ = NULL; 46 result->next_ = NULL;
45 result->executable_ = is_executable; 47 result->executable_ = is_executable;
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
114 prot = VirtualMemory::kReadWrite; 116 prot = VirtualMemory::kReadWrite;
115 } 117 }
116 bool status = memory_->Protect(prot); 118 bool status = memory_->Protect(prot);
117 ASSERT(status); 119 ASSERT(status);
118 } 120 }
119 121
120 122
121 PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words) 123 PageSpace::PageSpace(Heap* heap, intptr_t max_capacity_in_words)
122 : freelist_(), 124 : freelist_(),
123 heap_(heap), 125 heap_(heap),
126 pages_lock_(new Mutex),
koda 2014/08/26 23:34:48 Mutex -> Mutex()
Ivan Posva 2014/08/27 01:00:22 Done.
124 pages_(NULL), 127 pages_(NULL),
125 pages_tail_(NULL), 128 pages_tail_(NULL),
129 exec_pages_(NULL),
130 exec_pages_tail_(NULL),
126 large_pages_(NULL), 131 large_pages_(NULL),
127 max_capacity_in_words_(max_capacity_in_words), 132 max_capacity_in_words_(max_capacity_in_words),
128 tasks_lock_(new Monitor()), 133 tasks_lock_(new Monitor()),
129 tasks_(0), 134 tasks_(0),
130 page_space_controller_(heap, 135 page_space_controller_(heap,
131 FLAG_heap_growth_space_ratio, 136 FLAG_heap_growth_space_ratio,
132 FLAG_heap_growth_rate, 137 FLAG_heap_growth_rate,
133 FLAG_heap_growth_time_ratio), 138 FLAG_heap_growth_time_ratio),
134 gc_time_micros_(0), 139 gc_time_micros_(0),
135 collections_(0) { 140 collections_(0) {
136 } 141 }
137 142
138 143
139 PageSpace::~PageSpace() { 144 PageSpace::~PageSpace() {
140 { 145 {
141 MonitorLocker ml(tasks_lock()); 146 MonitorLocker ml(tasks_lock());
142 while (tasks() > 0) { 147 while (tasks() > 0) {
143 ml.Wait(); 148 ml.Wait();
144 } 149 }
145 } 150 }
146 FreePages(pages_); 151 FreePages(pages_);
152 FreePages(exec_pages_);
147 FreePages(large_pages_); 153 FreePages(large_pages_);
154 delete pages_lock_;
148 delete tasks_lock_; 155 delete tasks_lock_;
149 } 156 }
150 157
151 158
152 intptr_t PageSpace::LargePageSizeInWordsFor(intptr_t size) { 159 intptr_t PageSpace::LargePageSizeInWordsFor(intptr_t size) {
153 intptr_t page_size = Utils::RoundUp(size + HeapPage::ObjectStartOffset(), 160 intptr_t page_size = Utils::RoundUp(size + HeapPage::ObjectStartOffset(),
154 VirtualMemory::PageSize()); 161 VirtualMemory::PageSize());
155 return page_size >> kWordSizeLog2; 162 return page_size >> kWordSizeLog2;
156 } 163 }
157 164
158 165
159 HeapPage* PageSpace::AllocatePage(HeapPage::PageType type) { 166 HeapPage* PageSpace::AllocatePage(HeapPage::PageType type) {
160 HeapPage* page = HeapPage::Allocate(kPageSizeInWords, type); 167 HeapPage* page = HeapPage::Allocate(kPageSizeInWords, type);
161 if (pages_ == NULL) { 168
162 pages_ = page; 169 bool is_exec = (type == HeapPage::kExecutable);
170
171 MutexLocker ml(pages_lock_);
172 if (!is_exec) {
173 if (pages_ == NULL) {
174 pages_ = page;
175 } else {
176 pages_tail_->set_next(page);
177 }
178 pages_tail_ = page;
163 } else { 179 } else {
164 const bool is_protected = (pages_tail_->type() == HeapPage::kExecutable) 180 if (exec_pages_ == NULL) {
165 && FLAG_write_protect_code; 181 exec_pages_ = page;
166 if (is_protected) { 182 } else {
167 pages_tail_->WriteProtect(false); 183 const bool is_protected = is_exec && FLAG_write_protect_code;
koda 2014/08/26 23:34:49 "is_protected" relates to exec_pages_tail_, which
Ivan Posva 2014/08/27 01:00:22 Done.
184 if (is_protected) {
185 exec_pages_tail_->WriteProtect(false);
186 }
187 exec_pages_tail_->set_next(page);
188 if (is_protected) {
189 exec_pages_tail_->WriteProtect(true);
190 }
168 } 191 }
169 pages_tail_->set_next(page); 192 exec_pages_tail_ = page;
170 if (is_protected) {
171 pages_tail_->WriteProtect(true);
172 }
173 } 193 }
174 pages_tail_ = page;
175 usage_.capacity_in_words += kPageSizeInWords; 194 usage_.capacity_in_words += kPageSizeInWords;
176 page->set_object_end(page->memory_->end()); 195 page->set_object_end(page->memory_->end());
177 return page; 196 return page;
178 } 197 }
179 198
180 199
181 HeapPage* PageSpace::AllocateLargePage(intptr_t size, HeapPage::PageType type) { 200 HeapPage* PageSpace::AllocateLargePage(intptr_t size, HeapPage::PageType type) {
182 intptr_t page_size_in_words = LargePageSizeInWordsFor(size); 201 intptr_t page_size_in_words = LargePageSizeInWordsFor(size);
183 HeapPage* page = HeapPage::Allocate(page_size_in_words, type); 202 HeapPage* page = HeapPage::Allocate(page_size_in_words, type);
184 page->set_next(large_pages_); 203 page->set_next(large_pages_);
(...skipping 18 matching lines...) Expand all
203 if (new_page_size_in_words < old_page_size_in_words) { 222 if (new_page_size_in_words < old_page_size_in_words) {
204 memory->Truncate(memory->start(), new_page_size_in_words << kWordSizeLog2); 223 memory->Truncate(memory->start(), new_page_size_in_words << kWordSizeLog2);
205 usage_.capacity_in_words -= old_page_size_in_words; 224 usage_.capacity_in_words -= old_page_size_in_words;
206 usage_.capacity_in_words += new_page_size_in_words; 225 usage_.capacity_in_words += new_page_size_in_words;
207 page->set_object_end(page->object_start() + new_object_size_in_bytes); 226 page->set_object_end(page->object_start() + new_object_size_in_bytes);
208 } 227 }
209 } 228 }
210 229
211 230
212 void PageSpace::FreePage(HeapPage* page, HeapPage* previous_page) { 231 void PageSpace::FreePage(HeapPage* page, HeapPage* previous_page) {
213 usage_.capacity_in_words -= (page->memory_->size() >> kWordSizeLog2); 232 bool is_exec = (page->type() == HeapPage::kExecutable);
214 // Remove the page from the list. 233 {
215 if (previous_page != NULL) { 234 MutexLocker ml(pages_lock_);
216 previous_page->set_next(page->next()); 235 usage_.capacity_in_words -= (page->memory_->size() >> kWordSizeLog2);
217 } else { 236 if (!is_exec) {
218 pages_ = page->next(); 237 // Remove the page from the list of data pages.
219 } 238 if (previous_page != NULL) {
220 if (page == pages_tail_) { 239 previous_page->set_next(page->next());
221 pages_tail_ = previous_page; 240 } else {
241 pages_ = page->next();
242 }
243 if (page == pages_tail_) {
244 pages_tail_ = previous_page;
245 }
246 } else {
247 // Remove the page from the list of executable pages.
248 if (previous_page != NULL) {
249 previous_page->set_next(page->next());
250 } else {
251 exec_pages_ = page->next();
252 }
253 if (page == exec_pages_tail_) {
254 exec_pages_tail_ = previous_page;
255 }
256 }
222 } 257 }
223 // TODO(iposva): Consider adding to a pool of empty pages. 258 // TODO(iposva): Consider adding to a pool of empty pages.
224 page->Deallocate(); 259 page->Deallocate();
225 } 260 }
226 261
227 262
228 void PageSpace::FreeLargePage(HeapPage* page, HeapPage* previous_page) { 263 void PageSpace::FreeLargePage(HeapPage* page, HeapPage* previous_page) {
229 usage_.capacity_in_words -= (page->memory_->size() >> kWordSizeLog2); 264 usage_.capacity_in_words -= (page->memory_->size() >> kWordSizeLog2);
230 // Remove the page from the list. 265 // Remove the page from the list.
231 if (previous_page != NULL) { 266 if (previous_page != NULL) {
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
330 } 365 }
331 366
332 367
333 void PageSpace::FreeExternal(intptr_t size) { 368 void PageSpace::FreeExternal(intptr_t size) {
334 intptr_t size_in_words = size >> kWordSizeLog2; 369 intptr_t size_in_words = size >> kWordSizeLog2;
335 usage_.external_in_words -= size_in_words; 370 usage_.external_in_words -= size_in_words;
336 } 371 }
337 372
338 373
339 bool PageSpace::Contains(uword addr) const { 374 bool PageSpace::Contains(uword addr) const {
375 MutexLocker ml(pages_lock_);
376 NoGCScope no_gc;
340 HeapPage* page = pages_; 377 HeapPage* page = pages_;
341 while (page != NULL) { 378 while (page != NULL) {
342 if (page->Contains(addr)) { 379 if (page->Contains(addr)) {
343 return true; 380 return true;
344 } 381 }
345 page = NextPageAnySize(page); 382 page = NextPageAnySize(page);
346 } 383 }
347 return false; 384 return false;
348 } 385 }
349 386
350 387
351 bool PageSpace::Contains(uword addr, HeapPage::PageType type) const { 388 bool PageSpace::Contains(uword addr, HeapPage::PageType type) const {
389 MutexLocker ml(pages_lock_);
390 NoGCScope no_gc;
352 HeapPage* page = pages_; 391 HeapPage* page = pages_;
353 while (page != NULL) { 392 while (page != NULL) {
354 if ((page->type() == type) && page->Contains(addr)) { 393 if ((page->type() == type) && page->Contains(addr)) {
355 return true; 394 return true;
356 } 395 }
357 page = NextPageAnySize(page); 396 page = NextPageAnySize(page);
358 } 397 }
359 return false; 398 return false;
360 } 399 }
361 400
362 401
363 void PageSpace::StartEndAddress(uword* start, uword* end) const { 402 void PageSpace::StartEndAddress(uword* start, uword* end) const {
364 ASSERT(pages_ != NULL || large_pages_ != NULL); 403 MutexLocker ml(pages_lock_);
404 NoGCScope no_gc;
405 ASSERT((pages_ != NULL) || (exec_pages_ != NULL) || (large_pages_ != NULL));
365 *start = static_cast<uword>(~0); 406 *start = static_cast<uword>(~0);
366 *end = 0; 407 *end = 0;
367 for (HeapPage* page = pages_; page != NULL; page = NextPageAnySize(page)) { 408 for (HeapPage* page = pages_; page != NULL; page = NextPageAnySize(page)) {
368 *start = Utils::Minimum(*start, page->object_start()); 409 *start = Utils::Minimum(*start, page->object_start());
369 *end = Utils::Maximum(*end, page->object_end()); 410 *end = Utils::Maximum(*end, page->object_end());
370 } 411 }
371 ASSERT(*start != static_cast<uword>(~0)); 412 ASSERT(*start != static_cast<uword>(~0));
372 ASSERT(*end != 0); 413 ASSERT(*end != 0);
373 } 414 }
374 415
375 416
376 void PageSpace::VisitObjects(ObjectVisitor* visitor) const { 417 void PageSpace::VisitObjects(ObjectVisitor* visitor) const {
418 MutexLocker ml(pages_lock_);
419 NoGCScope no_gc;
377 HeapPage* page = pages_; 420 HeapPage* page = pages_;
378 while (page != NULL) { 421 while (page != NULL) {
379 page->VisitObjects(visitor); 422 page->VisitObjects(visitor);
380 page = NextPageAnySize(page); 423 page = NextPageAnySize(page);
381 } 424 }
382 } 425 }
383 426
384 427
385 void PageSpace::VisitObjectPointers(ObjectPointerVisitor* visitor) const { 428 void PageSpace::VisitObjectPointers(ObjectPointerVisitor* visitor) const {
429 MutexLocker ml(pages_lock_);
430 NoGCScope no_gc;
386 HeapPage* page = pages_; 431 HeapPage* page = pages_;
387 while (page != NULL) { 432 while (page != NULL) {
388 page->VisitObjectPointers(visitor); 433 page->VisitObjectPointers(visitor);
389 page = NextPageAnySize(page); 434 page = NextPageAnySize(page);
390 } 435 }
391 } 436 }
392 437
393 438
394 RawObject* PageSpace::FindObject(FindObjectVisitor* visitor, 439 RawObject* PageSpace::FindObject(FindObjectVisitor* visitor,
395 HeapPage::PageType type) const { 440 HeapPage::PageType type) const {
396 ASSERT(Isolate::Current()->no_gc_scope_depth() != 0); 441 ASSERT(Isolate::Current()->no_gc_scope_depth() != 0);
442 MutexLocker ml(pages_lock_);
443 NoGCScope no_gc;
397 HeapPage* page = pages_; 444 HeapPage* page = pages_;
398 while (page != NULL) { 445 while (page != NULL) {
399 if (page->type() == type) { 446 if (page->type() == type) {
400 RawObject* obj = page->FindObject(visitor); 447 RawObject* obj = page->FindObject(visitor);
401 if (obj != Object::null()) { 448 if (obj != Object::null()) {
402 return obj; 449 return obj;
403 } 450 }
404 } 451 }
405 page = NextPageAnySize(page); 452 page = NextPageAnySize(page);
406 } 453 }
407 return Object::null(); 454 return Object::null();
408 } 455 }
409 456
410 457
411 void PageSpace::WriteProtect(bool read_only) { 458 void PageSpace::WriteProtect(bool read_only) {
459 MutexLocker ml(pages_lock_);
460 NoGCScope no_gc;
412 HeapPage* page = pages_; 461 HeapPage* page = pages_;
413 while (page != NULL) { 462 while (page != NULL) {
414 page->WriteProtect(read_only); 463 page->WriteProtect(read_only);
415 page = NextPageAnySize(page); 464 page = NextPageAnySize(page);
416 } 465 }
417 } 466 }
418 467
419 468
420 void PageSpace::PrintToJSONObject(JSONObject* object) { 469 void PageSpace::PrintToJSONObject(JSONObject* object) {
421 Isolate* isolate = Isolate::Current(); 470 Isolate* isolate = Isolate::Current();
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
466 heap_map.AddProperty("unit_size_bytes", 515 heap_map.AddProperty("unit_size_bytes",
467 static_cast<intptr_t>(kObjectAlignment)); 516 static_cast<intptr_t>(kObjectAlignment));
468 heap_map.AddProperty("page_size_bytes", kPageSizeInWords * kWordSize); 517 heap_map.AddProperty("page_size_bytes", kPageSizeInWords * kWordSize);
469 { 518 {
470 JSONObject class_list(&heap_map, "class_list"); 519 JSONObject class_list(&heap_map, "class_list");
471 isolate->class_table()->PrintToJSONObject(&class_list); 520 isolate->class_table()->PrintToJSONObject(&class_list);
472 } 521 }
473 { 522 {
474 // "pages" is an array [page0, page1, ..., pageN], each page of the form 523 // "pages" is an array [page0, page1, ..., pageN], each page of the form
475 // {"object_start": "0x...", "objects": [size, class id, size, ...]} 524 // {"object_start": "0x...", "objects": [size, class id, size, ...]}
525 MutexLocker ml(pages_lock_);
526 NoGCScope no_gc;
476 JSONArray all_pages(&heap_map, "pages"); 527 JSONArray all_pages(&heap_map, "pages");
477 for (HeapPage* page = pages_; page != NULL; page = page->next()) { 528 for (HeapPage* page = pages_; page != NULL; page = page->next()) {
478 JSONObject page_container(&all_pages); 529 JSONObject page_container(&all_pages);
479 page_container.AddPropertyF("object_start", 530 page_container.AddPropertyF("object_start",
480 "0x%" Px "", page->object_start()); 531 "0x%" Px "", page->object_start());
481 JSONArray page_map(&page_container, "objects"); 532 JSONArray page_map(&page_container, "objects");
482 HeapMapAsJSONVisitor printer(&page_map); 533 HeapMapAsJSONVisitor printer(&page_map);
483 page->VisitObjects(&printer); 534 page->VisitObjects(&printer);
484 } 535 }
536 for (HeapPage* page = exec_pages_; page != NULL; page = page->next()) {
537 JSONObject page_container(&all_pages);
538 page_container.AddPropertyF("object_start",
539 "0x%" Px "", page->object_start());
540 JSONArray page_map(&page_container, "objects");
541 HeapMapAsJSONVisitor printer(&page_map);
542 page->VisitObjects(&printer);
543 }
485 } 544 }
486 } 545 }
487 546
488 547
489 bool PageSpace::ShouldCollectCode() { 548 bool PageSpace::ShouldCollectCode() {
490 // Try to collect code if enough time has passed since the last attempt. 549 // Try to collect code if enough time has passed since the last attempt.
491 const int64_t start = OS::GetCurrentTimeMicros(); 550 const int64_t start = OS::GetCurrentTimeMicros();
492 const int64_t last_code_collection_in_us = 551 const int64_t last_code_collection_in_us =
493 page_space_controller_.last_code_collection_in_us(); 552 page_space_controller_.last_code_collection_in_us();
494 553
495 if ((start - last_code_collection_in_us) > 554 if ((start - last_code_collection_in_us) >
496 FLAG_code_collection_interval_in_us) { 555 FLAG_code_collection_interval_in_us) {
497 if (FLAG_log_code_drop) { 556 if (FLAG_log_code_drop) {
498 OS::Print("Trying to detach code.\n"); 557 OS::Print("Trying to detach code.\n");
499 } 558 }
500 page_space_controller_.set_last_code_collection_in_us(start); 559 page_space_controller_.set_last_code_collection_in_us(start);
501 return true; 560 return true;
502 } 561 }
503 return false; 562 return false;
504 } 563 }
505 564
506 565
507 void PageSpace::WriteProtectCode(bool read_only) { 566 void PageSpace::WriteProtectCode(bool read_only) {
508 if (FLAG_write_protect_code) { 567 if (FLAG_write_protect_code) {
509 HeapPage* current_page = pages_; 568 MutexLocker ml(pages_lock_);
510 while (current_page != NULL) { 569 NoGCScope no_gc;
511 if (current_page->type() == HeapPage::kExecutable) { 570 // No need to go through all of the data pages first.
512 current_page->WriteProtect(read_only); 571 HeapPage* page = exec_pages_;
572 while (page != NULL) {
573 ASSERT(page->type() == HeapPage::kExecutable);
574 page->WriteProtect(read_only);
575 page = page->next();
576 }
577 page = large_pages_;
578 while (page != NULL) {
579 if (page->type() == HeapPage::kExecutable) {
580 page->WriteProtect(read_only);
513 } 581 }
514 current_page = NextPageAnySize(current_page); 582 page = page->next();
515 } 583 }
516 } 584 }
517 } 585 }
518 586
519 587
520 void PageSpace::MarkSweep(bool invoke_api_callbacks) { 588 void PageSpace::MarkSweep(bool invoke_api_callbacks) {
521 Isolate* isolate = heap_->isolate(); 589 Isolate* isolate = heap_->isolate();
522 ASSERT(isolate == Isolate::Current()); 590 ASSERT(isolate == Isolate::Current());
523 591
524 // Wait for pending tasks to complete and then account for the driver task. 592 // Wait for pending tasks to complete and then account for the driver task.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
562 int64_t mid1 = OS::GetCurrentTimeMicros(); 630 int64_t mid1 = OS::GetCurrentTimeMicros();
563 631
564 // Reset the bump allocation page to unused. 632 // Reset the bump allocation page to unused.
565 // Reset the freelists and setup sweeping. 633 // Reset the freelists and setup sweeping.
566 freelist_[HeapPage::kData].Reset(); 634 freelist_[HeapPage::kData].Reset();
567 freelist_[HeapPage::kExecutable].Reset(); 635 freelist_[HeapPage::kExecutable].Reset();
568 636
569 int64_t mid2 = OS::GetCurrentTimeMicros(); 637 int64_t mid2 = OS::GetCurrentTimeMicros();
570 int64_t mid3 = 0; 638 int64_t mid3 = 0;
571 639
572 GCSweeper sweeper(heap_); 640 {
641 GCSweeper sweeper(heap_);
573 642
574 // During stop-the-world phases we should use bulk lock when adding elements 643 // During stop-the-world phases we should use bulk lock when adding elements
575 // to the free list. 644 // to the free list.
576 {
577 MutexLocker mld(freelist_[HeapPage::kData].mutex()); 645 MutexLocker mld(freelist_[HeapPage::kData].mutex());
578 MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); 646 MutexLocker mle(freelist_[HeapPage::kExecutable].mutex());
579 647
648 // Large and executable pages are always swept immediately.
580 HeapPage* prev_page = NULL; 649 HeapPage* prev_page = NULL;
581 HeapPage* page = pages_; 650 HeapPage* page = large_pages_;
582 while (page != NULL) {
583 HeapPage* next_page = page->next();
584 bool page_in_use = sweeper.SweepPage(page, &freelist_[page->type()]);
585 if (page_in_use) {
586 prev_page = page;
587 } else {
588 FreePage(page, prev_page);
589 }
590 // Advance to the next page.
591 page = next_page;
592 }
593
594 mid3 = OS::GetCurrentTimeMicros();
595
596 prev_page = NULL;
597 page = large_pages_;
598 while (page != NULL) { 651 while (page != NULL) {
599 HeapPage* next_page = page->next(); 652 HeapPage* next_page = page->next();
600 const intptr_t words_to_end = sweeper.SweepLargePage(page); 653 const intptr_t words_to_end = sweeper.SweepLargePage(page);
601 if (words_to_end == 0) { 654 if (words_to_end == 0) {
602 FreeLargePage(page, prev_page); 655 FreeLargePage(page, prev_page);
603 } else { 656 } else {
604 TruncateLargePage(page, words_to_end << kWordSizeLog2); 657 TruncateLargePage(page, words_to_end << kWordSizeLog2);
605 prev_page = page; 658 prev_page = page;
606 } 659 }
607 // Advance to the next page. 660 // Advance to the next page.
608 page = next_page; 661 page = next_page;
609 } 662 }
663
664 prev_page = NULL;
665 page = exec_pages_;
666 FreeList* freelist = &freelist_[HeapPage::kExecutable];
667 while (page != NULL) {
668 HeapPage* next_page = page->next();
669 bool page_in_use = sweeper.SweepPage(page, freelist);
670 if (page_in_use) {
671 prev_page = page;
672 } else {
673 FreePage(page, prev_page);
674 }
675 // Advance to the next page.
676 page = next_page;
677 }
678
679 mid3 = OS::GetCurrentTimeMicros();
680
681 if (!FLAG_concurrent_sweep) {
682 // Sweep all regular sized pages now.
683 prev_page = NULL;
684 page = pages_;
685 while (page != NULL) {
686 HeapPage* next_page = page->next();
687 bool page_in_use = sweeper.SweepPage(page, &freelist_[page->type()]);
688 if (page_in_use) {
689 prev_page = page;
690 } else {
691 FreePage(page, prev_page);
692 }
693 // Advance to the next page.
694 page = next_page;
695 }
696 } else {
697 // Start the concurrent sweeper task now.
698 GCSweeper::SweepConcurrent(
699 isolate, this, pages_, pages_tail_, &freelist_[HeapPage::kData]);
700 }
610 } 701 }
611 702
612 // Make code pages read-only. 703 // Make code pages read-only.
613 WriteProtectCode(true); 704 WriteProtectCode(true);
614 705
615 int64_t end = OS::GetCurrentTimeMicros(); 706 int64_t end = OS::GetCurrentTimeMicros();
616 707
617 // Record signals for growth control. Include size of external allocations. 708 // Record signals for growth control. Include size of external allocations.
618 page_space_controller_.EvaluateGarbageCollection(usage_before, usage_, 709 page_space_controller_.EvaluateGarbageCollection(usage_before, usage_,
619 start, end); 710 start, end);
(...skipping 12 matching lines...) Expand all
632 723
633 if (FLAG_verify_after_gc) { 724 if (FLAG_verify_after_gc) {
634 OS::PrintErr("Verifying after MarkSweep..."); 725 OS::PrintErr("Verifying after MarkSweep...");
635 heap_->Verify(); 726 heap_->Verify();
636 OS::PrintErr(" done.\n"); 727 OS::PrintErr(" done.\n");
637 } 728 }
638 729
639 // Done, reset the task count. 730 // Done, reset the task count.
640 { 731 {
641 MonitorLocker ml(tasks_lock()); 732 MonitorLocker ml(tasks_lock());
642 ASSERT(tasks() == 1);
643 set_tasks(tasks() - 1); 733 set_tasks(tasks() - 1);
644 ml.Notify(); 734 ml.Notify();
645 } 735 }
646 } 736 }
647 737
648 738
649 PageSpaceController::PageSpaceController(Heap* heap, 739 PageSpaceController::PageSpaceController(Heap* heap,
650 int heap_growth_ratio, 740 int heap_growth_ratio,
651 int heap_growth_max, 741 int heap_growth_max,
652 int garbage_collection_time_ratio) 742 int garbage_collection_time_ratio)
(...skipping 12 matching lines...) Expand all
665 755
666 756
667 bool PageSpaceController::NeedsGarbageCollection(SpaceUsage after) const { 757 bool PageSpaceController::NeedsGarbageCollection(SpaceUsage after) const {
668 if (!is_enabled_) { 758 if (!is_enabled_) {
669 return false; 759 return false;
670 } 760 }
671 if (heap_growth_ratio_ == 100) { 761 if (heap_growth_ratio_ == 100) {
672 return false; 762 return false;
673 } 763 }
674 intptr_t capacity_increase_in_words = 764 intptr_t capacity_increase_in_words =
675 after.capacity_in_words - last_usage_.capacity_in_words; 765 after.capacity_in_words - last_usage_.capacity_in_words;
676 ASSERT(capacity_increase_in_words >= 0); 766 // The concurrent sweeper might have freed more capacity than was allocated.
767 capacity_increase_in_words =
768 Utils::Maximum<intptr_t>(0, capacity_increase_in_words);
677 capacity_increase_in_words = 769 capacity_increase_in_words =
678 Utils::RoundUp(capacity_increase_in_words, PageSpace::kPageSizeInWords); 770 Utils::RoundUp(capacity_increase_in_words, PageSpace::kPageSizeInWords);
679 intptr_t capacity_increase_in_pages = 771 intptr_t capacity_increase_in_pages =
680 capacity_increase_in_words / PageSpace::kPageSizeInWords; 772 capacity_increase_in_words / PageSpace::kPageSizeInWords;
681 double multiplier = 1.0; 773 double multiplier = 1.0;
682 // To avoid waste, the first GC should be triggered before too long. After 774 // To avoid waste, the first GC should be triggered before too long. After
683 // kInitialTimeoutSeconds, gradually lower the capacity limit. 775 // kInitialTimeoutSeconds, gradually lower the capacity limit.
684 static const double kInitialTimeoutSeconds = 1.00; 776 static const double kInitialTimeoutSeconds = 1.00;
685 if (history_.IsEmpty()) { 777 if (history_.IsEmpty()) {
686 double seconds_since_init = MicrosecondsToSeconds( 778 double seconds_since_init = MicrosecondsToSeconds(
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
760 return 0; 852 return 0;
761 } else { 853 } else {
762 ASSERT(total_time >= gc_time); 854 ASSERT(total_time >= gc_time);
763 int result= static_cast<int>((static_cast<double>(gc_time) / 855 int result= static_cast<int>((static_cast<double>(gc_time) /
764 static_cast<double>(total_time)) * 100); 856 static_cast<double>(total_time)) * 100);
765 return result; 857 return result;
766 } 858 }
767 } 859 }
768 860
769 } // namespace dart 861 } // namespace dart
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698