Chromium Code Reviews| Index: runtime/vm/pages.cc |
| diff --git a/runtime/vm/pages.cc b/runtime/vm/pages.cc |
| index 5d5df0e2d559398f7494b34059206646c3399233..57441c9016a26e3756e12782efe2a93257480e25 100644 |
| --- a/runtime/vm/pages.cc |
| +++ b/runtime/vm/pages.cc |
| @@ -11,7 +11,7 @@ |
| #include "vm/lockers.h" |
| #include "vm/object.h" |
| #include "vm/os_thread.h" |
| -#include "vm/thread_registry.h" |
| +#include "vm/safepoint.h" |
| #include "vm/verified_memory.h" |
| #include "vm/virtual_memory.h" |
| @@ -791,6 +791,7 @@ void PageSpace::WriteProtectCode(bool read_only) { |
| void PageSpace::MarkSweep(bool invoke_api_callbacks) { |
| + Thread* thread = Thread::Current(); |
| Isolate* isolate = heap_->isolate(); |
| ASSERT(isolate == Isolate::Current()); |
| @@ -798,114 +799,97 @@ void PageSpace::MarkSweep(bool invoke_api_callbacks) { |
| { |
| MonitorLocker locker(tasks_lock()); |
| while (tasks() > 0) { |
| - locker.Wait(); |
| + locker.WaitWithSafepointCheck(thread); |
| } |
| set_tasks(1); |
| } |
| - // Ensure that all threads for this isolate are at a safepoint (either stopped |
| - // or in native code). If two threads are racing at this point, the loser |
| - // will continue with its collection after waiting for the winner to complete. |
| + // Ensure that all threads for this isolate are at a safepoint (either |
| + // stopped or in native code). If two threads are racing at this point, the |
| + // loser will continue with its collection after waiting for the winner to |
| + // complete. |
| // TODO(koda): Consider moving SafepointThreads into allocation failure/retry |
|
zra
2016/01/08 23:32:07
Remove TODO?
siva
2016/01/12 21:26:22
Done.
|
| // logic to avoid needless collections. |
| - isolate->thread_registry()->SafepointThreads(); |
| - |
| - // Perform various cleanup that relies on no tasks interfering. |
| - isolate->class_table()->FreeOldTables(); |
| - |
| - NoSafepointScope no_safepoints; |
| + { |
| + SafepointOperationScope safepoint_scope(thread); |
| - if (FLAG_print_free_list_before_gc) { |
| - OS::Print("Data Freelist (before GC):\n"); |
| - freelist_[HeapPage::kData].Print(); |
| - OS::Print("Executable Freelist (before GC):\n"); |
| - freelist_[HeapPage::kExecutable].Print(); |
| - } |
| + // Perform various cleanup that relies on no tasks interfering. |
| + isolate->class_table()->FreeOldTables(); |
| - if (FLAG_verify_before_gc) { |
| - OS::PrintErr("Verifying before marking..."); |
| - heap_->VerifyGC(); |
| - OS::PrintErr(" done.\n"); |
| - } |
| + NoSafepointScope no_safepoints; |
| - const int64_t start = OS::GetCurrentTimeMicros(); |
| + if (FLAG_print_free_list_before_gc) { |
| + OS::Print("Data Freelist (before GC):\n"); |
| + freelist_[HeapPage::kData].Print(); |
| + OS::Print("Executable Freelist (before GC):\n"); |
| + freelist_[HeapPage::kExecutable].Print(); |
| + } |
| - // Make code pages writable. |
| - WriteProtectCode(false); |
| + if (FLAG_verify_before_gc) { |
| + OS::PrintErr("Verifying before marking..."); |
| + heap_->VerifyGC(); |
| + OS::PrintErr(" done.\n"); |
| + } |
| - // Save old value before GCMarker visits the weak persistent handles. |
| - SpaceUsage usage_before = GetCurrentUsage(); |
| + const int64_t start = OS::GetCurrentTimeMicros(); |
| - // Mark all reachable old-gen objects. |
| - bool collect_code = FLAG_collect_code && ShouldCollectCode(); |
| - GCMarker marker(heap_); |
| - marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); |
| - usage_.used_in_words = marker.marked_words(); |
| + // Make code pages writable. |
| + WriteProtectCode(false); |
| - int64_t mid1 = OS::GetCurrentTimeMicros(); |
| + // Save old value before GCMarker visits the weak persistent handles. |
| + SpaceUsage usage_before = GetCurrentUsage(); |
| - // Abandon the remainder of the bump allocation block. |
| - AbandonBumpAllocation(); |
| - // Reset the freelists and setup sweeping. |
| - freelist_[HeapPage::kData].Reset(); |
| - freelist_[HeapPage::kExecutable].Reset(); |
| + // Mark all reachable old-gen objects. |
| + bool collect_code = FLAG_collect_code && ShouldCollectCode(); |
| + GCMarker marker(heap_); |
| + marker.MarkObjects(isolate, this, invoke_api_callbacks, collect_code); |
| + usage_.used_in_words = marker.marked_words(); |
| - int64_t mid2 = OS::GetCurrentTimeMicros(); |
| - int64_t mid3 = 0; |
| + int64_t mid1 = OS::GetCurrentTimeMicros(); |
| - { |
| - if (FLAG_verify_before_gc) { |
| - OS::PrintErr("Verifying before sweeping..."); |
| - heap_->VerifyGC(kAllowMarked); |
| - OS::PrintErr(" done.\n"); |
| - } |
| - GCSweeper sweeper; |
| + // Abandon the remainder of the bump allocation block. |
| + AbandonBumpAllocation(); |
| + // Reset the freelists and setup sweeping. |
| + freelist_[HeapPage::kData].Reset(); |
| + freelist_[HeapPage::kExecutable].Reset(); |
| - // During stop-the-world phases we should use bulk lock when adding elements |
| - // to the free list. |
| - MutexLocker mld(freelist_[HeapPage::kData].mutex()); |
| - MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); |
| + int64_t mid2 = OS::GetCurrentTimeMicros(); |
| + int64_t mid3 = 0; |
| - // Large and executable pages are always swept immediately. |
| - HeapPage* prev_page = NULL; |
| - HeapPage* page = large_pages_; |
| - while (page != NULL) { |
| - HeapPage* next_page = page->next(); |
| - const intptr_t words_to_end = sweeper.SweepLargePage(page); |
| - if (words_to_end == 0) { |
| - FreeLargePage(page, prev_page); |
| - } else { |
| - TruncateLargePage(page, words_to_end << kWordSizeLog2); |
| - prev_page = page; |
| + { |
| + if (FLAG_verify_before_gc) { |
| + OS::PrintErr("Verifying before sweeping..."); |
| + heap_->VerifyGC(kAllowMarked); |
| + OS::PrintErr(" done.\n"); |
| } |
| - // Advance to the next page. |
| - page = next_page; |
| - } |
| + GCSweeper sweeper; |
| - prev_page = NULL; |
| - page = exec_pages_; |
| - FreeList* freelist = &freelist_[HeapPage::kExecutable]; |
| - while (page != NULL) { |
| - HeapPage* next_page = page->next(); |
| - bool page_in_use = sweeper.SweepPage(page, freelist, true); |
| - if (page_in_use) { |
| - prev_page = page; |
| - } else { |
| - FreePage(page, prev_page); |
| - } |
| - // Advance to the next page. |
| - page = next_page; |
| - } |
| + // During stop-the-world phases we should use bulk lock when adding |
| + // elements to the free list. |
| + MutexLocker mld(freelist_[HeapPage::kData].mutex()); |
| + MutexLocker mle(freelist_[HeapPage::kExecutable].mutex()); |
| - mid3 = OS::GetCurrentTimeMicros(); |
| + // Large and executable pages are always swept immediately. |
| + HeapPage* prev_page = NULL; |
| + HeapPage* page = large_pages_; |
| + while (page != NULL) { |
| + HeapPage* next_page = page->next(); |
| + const intptr_t words_to_end = sweeper.SweepLargePage(page); |
| + if (words_to_end == 0) { |
| + FreeLargePage(page, prev_page); |
| + } else { |
| + TruncateLargePage(page, words_to_end << kWordSizeLog2); |
| + prev_page = page; |
| + } |
| + // Advance to the next page. |
| + page = next_page; |
| + } |
| - if (!FLAG_concurrent_sweep) { |
| - // Sweep all regular sized pages now. |
| prev_page = NULL; |
| - page = pages_; |
| + page = exec_pages_; |
| + FreeList* freelist = &freelist_[HeapPage::kExecutable]; |
| while (page != NULL) { |
| HeapPage* next_page = page->next(); |
| - bool page_in_use = sweeper.SweepPage( |
| - page, &freelist_[page->type()], true); |
| + bool page_in_use = sweeper.SweepPage(page, freelist, true); |
| if (page_in_use) { |
| prev_page = page; |
| } else { |
| @@ -914,47 +898,65 @@ void PageSpace::MarkSweep(bool invoke_api_callbacks) { |
| // Advance to the next page. |
| page = next_page; |
| } |
| - if (FLAG_verify_after_gc) { |
| - OS::PrintErr("Verifying after sweeping..."); |
| - heap_->VerifyGC(kForbidMarked); |
| - OS::PrintErr(" done.\n"); |
| + |
| + mid3 = OS::GetCurrentTimeMicros(); |
| + |
| + if (!FLAG_concurrent_sweep) { |
| + // Sweep all regular sized pages now. |
| + prev_page = NULL; |
| + page = pages_; |
| + while (page != NULL) { |
| + HeapPage* next_page = page->next(); |
| + bool page_in_use = sweeper.SweepPage( |
| + page, &freelist_[page->type()], true); |
| + if (page_in_use) { |
| + prev_page = page; |
| + } else { |
| + FreePage(page, prev_page); |
| + } |
| + // Advance to the next page. |
| + page = next_page; |
| + } |
| + if (FLAG_verify_after_gc) { |
| + OS::PrintErr("Verifying after sweeping..."); |
| + heap_->VerifyGC(kForbidMarked); |
| + OS::PrintErr(" done.\n"); |
| + } |
| + } else { |
| + // Start the concurrent sweeper task now. |
| + GCSweeper::SweepConcurrent( |
| + isolate, pages_, pages_tail_, &freelist_[HeapPage::kData]); |
| } |
| - } else { |
| - // Start the concurrent sweeper task now. |
| - GCSweeper::SweepConcurrent( |
| - isolate, pages_, pages_tail_, &freelist_[HeapPage::kData]); |
| } |
| - } |
| - // Make code pages read-only. |
| - WriteProtectCode(true); |
| + // Make code pages read-only. |
| + WriteProtectCode(true); |
| - int64_t end = OS::GetCurrentTimeMicros(); |
| + int64_t end = OS::GetCurrentTimeMicros(); |
| - // Record signals for growth control. Include size of external allocations. |
| - page_space_controller_.EvaluateGarbageCollection(usage_before, |
| - GetCurrentUsage(), |
| - start, end); |
| + // Record signals for growth control. Include size of external allocations. |
| + page_space_controller_.EvaluateGarbageCollection(usage_before, |
| + GetCurrentUsage(), |
| + start, end); |
| - heap_->RecordTime(kMarkObjects, mid1 - start); |
| - heap_->RecordTime(kResetFreeLists, mid2 - mid1); |
| - heap_->RecordTime(kSweepPages, mid3 - mid2); |
| - heap_->RecordTime(kSweepLargePages, end - mid3); |
| + heap_->RecordTime(kMarkObjects, mid1 - start); |
| + heap_->RecordTime(kResetFreeLists, mid2 - mid1); |
| + heap_->RecordTime(kSweepPages, mid3 - mid2); |
| + heap_->RecordTime(kSweepLargePages, end - mid3); |
| - if (FLAG_print_free_list_after_gc) { |
| - OS::Print("Data Freelist (after GC):\n"); |
| - freelist_[HeapPage::kData].Print(); |
| - OS::Print("Executable Freelist (after GC):\n"); |
| - freelist_[HeapPage::kExecutable].Print(); |
| - } |
| + if (FLAG_print_free_list_after_gc) { |
| + OS::Print("Data Freelist (after GC):\n"); |
| + freelist_[HeapPage::kData].Print(); |
| + OS::Print("Executable Freelist (after GC):\n"); |
| + freelist_[HeapPage::kExecutable].Print(); |
| + } |
| - UpdateMaxUsed(); |
| - if (heap_ != NULL) { |
| - heap_->UpdateGlobalMaxUsed(); |
| + UpdateMaxUsed(); |
| + if (heap_ != NULL) { |
| + heap_->UpdateGlobalMaxUsed(); |
| + } |
| } |
| - isolate->thread_registry()->ResumeAllThreads(); |
| - |
| // Done, reset the task count. |
| { |
| MonitorLocker ml(tasks_lock()); |