Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(226)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1614953002: [heap] Cleanup: Remove WAS_SWEPT flag. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Addressed comments Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 522 matching lines...) Expand 10 before | Expand all | Expand 10 after
533 new SweeperTask(heap(), heap()->code_space()), 533 new SweeperTask(heap(), heap()->code_space()),
534 v8::Platform::kShortRunningTask); 534 v8::Platform::kShortRunningTask);
535 V8::GetCurrentPlatform()->CallOnBackgroundThread( 535 V8::GetCurrentPlatform()->CallOnBackgroundThread(
536 new SweeperTask(heap(), heap()->map_space()), 536 new SweeperTask(heap(), heap()->map_space()),
537 v8::Platform::kShortRunningTask); 537 v8::Platform::kShortRunningTask);
538 } 538 }
539 539
540 540
541 void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) { 541 void MarkCompactCollector::SweepOrWaitUntilSweepingCompleted(Page* page) {
542 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner()); 542 PagedSpace* owner = reinterpret_cast<PagedSpace*>(page->owner());
543 if (!page->SweepingCompleted()) { 543 if (!page->SweepingDone()) {
544 SweepInParallel(page, owner); 544 SweepInParallel(page, owner);
545 if (!page->SweepingCompleted()) { 545 if (!page->SweepingDone()) {
546 // We were not able to sweep that page, i.e., a concurrent 546 // We were not able to sweep that page, i.e., a concurrent
547 // sweeper thread currently owns this page. Wait for the sweeper 547 // sweeper thread currently owns this page. Wait for the sweeper
548 // thread to be done with this page. 548 // thread to be done with this page.
549 page->WaitUntilSweepingCompleted(); 549 page->WaitUntilSweepingCompleted();
550 } 550 }
551 } 551 }
552 } 552 }
553 553
554 554
555 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { 555 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) {
(...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after
714 PageIterator it(space); 714 PageIterator it(space);
715 while (it.has_next()) { 715 while (it.has_next()) {
716 Page* p = it.next(); 716 Page* p = it.next();
717 if (p->NeverEvacuate()) continue; 717 if (p->NeverEvacuate()) continue;
718 if (p->IsFlagSet(Page::POPULAR_PAGE)) { 718 if (p->IsFlagSet(Page::POPULAR_PAGE)) {
719 // This page had slots buffer overflow on previous GC, skip it. 719 // This page had slots buffer overflow on previous GC, skip it.
720 p->ClearFlag(Page::POPULAR_PAGE); 720 p->ClearFlag(Page::POPULAR_PAGE);
721 continue; 721 continue;
722 } 722 }
723 // Invariant: Evacuation candidates are just created when marking is 723 // Invariant: Evacuation candidates are just created when marking is
724 // started. At the end of a GC all evacuation candidates are cleared and 724 // started. This means that sweeping has finished. Furthermore, at the end
725 // their slot buffers are released. 725 // of a GC all evacuation candidates are cleared and their slot buffers are
726 // released.
726 CHECK(!p->IsEvacuationCandidate()); 727 CHECK(!p->IsEvacuationCandidate());
727 CHECK(p->slots_buffer() == NULL); 728 CHECK(p->slots_buffer() == nullptr);
729 CHECK(p->SweepingDone());
728 DCHECK(p->area_size() == area_size); 730 DCHECK(p->area_size() == area_size);
729 int live_bytes = 731 pages.push_back(std::make_pair(p->LiveBytesFromFreeList(), p));
730 p->WasSwept() ? p->LiveBytesFromFreeList() : p->LiveBytes();
731 pages.push_back(std::make_pair(live_bytes, p));
732 } 732 }
733 733
734 int candidate_count = 0; 734 int candidate_count = 0;
735 int total_live_bytes = 0; 735 int total_live_bytes = 0;
736 736
737 const bool reduce_memory = heap()->ShouldReduceMemory(); 737 const bool reduce_memory = heap()->ShouldReduceMemory();
738 if (FLAG_manual_evacuation_candidates_selection) { 738 if (FLAG_manual_evacuation_candidates_selection) {
739 for (size_t i = 0; i < pages.size(); i++) { 739 for (size_t i = 0; i < pages.size(); i++) {
740 Page* p = pages[i].second; 740 Page* p = pages[i].second;
741 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) { 741 if (p->IsFlagSet(MemoryChunk::FORCE_EVACUATION_CANDIDATE_FOR_TESTING)) {
(...skipping 2475 matching lines...) Expand 10 before | Expand all | Expand 10 after
3217 // might have stale store buffer entries that become "valid" again 3217 // might have stale store buffer entries that become "valid" again
3218 // after reusing the memory. Note that all existing store buffer 3218 // after reusing the memory. Note that all existing store buffer
3219 // entries of such pages are filtered before rescanning. 3219 // entries of such pages are filtered before rescanning.
3220 DCHECK(p->IsEvacuationCandidate()); 3220 DCHECK(p->IsEvacuationCandidate());
3221 p->SetFlag(Page::COMPACTION_WAS_ABORTED); 3221 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3222 p->set_scan_on_scavenge(true); 3222 p->set_scan_on_scavenge(true);
3223 abandoned_pages++; 3223 abandoned_pages++;
3224 break; 3224 break;
3225 case MemoryChunk::kCompactingFinalize: 3225 case MemoryChunk::kCompactingFinalize:
3226 DCHECK(p->IsEvacuationCandidate()); 3226 DCHECK(p->IsEvacuationCandidate());
3227 p->SetWasSwept(); 3227 DCHECK(p->SweepingDone());
3228 p->Unlink(); 3228 p->Unlink();
3229 break; 3229 break;
3230 case MemoryChunk::kCompactingDone: 3230 case MemoryChunk::kCompactingDone:
3231 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); 3231 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
3232 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3232 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3233 break; 3233 break;
3234 default: 3234 default:
3235 // We should not observe kCompactingInProgress, or kCompactingDone. 3235 // We should not observe kCompactingInProgress, or kCompactingDone.
3236 UNREACHABLE(); 3236 UNREACHABLE();
3237 } 3237 }
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
3283 3283
3284 void MarkCompactCollector::EvacuatePages( 3284 void MarkCompactCollector::EvacuatePages(
3285 CompactionSpaceCollection* compaction_spaces, 3285 CompactionSpaceCollection* compaction_spaces,
3286 SlotsBuffer** evacuation_slots_buffer) { 3286 SlotsBuffer** evacuation_slots_buffer) {
3287 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, 3287 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
3288 evacuation_slots_buffer); 3288 evacuation_slots_buffer);
3289 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3289 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3290 Page* p = evacuation_candidates_[i]; 3290 Page* p = evacuation_candidates_[i];
3291 DCHECK(p->IsEvacuationCandidate() || 3291 DCHECK(p->IsEvacuationCandidate() ||
3292 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3292 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3293 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == 3293 DCHECK(p->SweepingDone());
3294 MemoryChunk::kSweepingDone);
3295 if (p->parallel_compaction_state().TrySetValue( 3294 if (p->parallel_compaction_state().TrySetValue(
3296 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { 3295 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3297 if (p->IsEvacuationCandidate()) { 3296 if (p->IsEvacuationCandidate()) {
3298 DCHECK_EQ(p->parallel_compaction_state().Value(), 3297 DCHECK_EQ(p->parallel_compaction_state().Value(),
3299 MemoryChunk::kCompactingInProgress); 3298 MemoryChunk::kCompactingInProgress);
3300 double start = heap()->MonotonicallyIncreasingTimeInMs(); 3299 double start = heap()->MonotonicallyIncreasingTimeInMs();
3301 intptr_t live_bytes = p->LiveBytes(); 3300 intptr_t live_bytes = p->LiveBytes();
3302 AlwaysAllocateScope always_allocate(isolate()); 3301 AlwaysAllocateScope always_allocate(isolate());
3303 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) { 3302 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
3304 p->parallel_compaction_state().SetValue( 3303 p->parallel_compaction_state().SetValue(
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
3358 // Sweeps a page. After sweeping the page can be iterated. 3357 // Sweeps a page. After sweeping the page can be iterated.
3359 // Slots in live objects pointing into evacuation candidates are updated 3358 // Slots in live objects pointing into evacuation candidates are updated
3360 // if requested. 3359 // if requested.
3361 // Returns the size of the biggest continuous freed memory chunk in bytes. 3360 // Returns the size of the biggest continuous freed memory chunk in bytes.
3362 template <SweepingMode sweeping_mode, 3361 template <SweepingMode sweeping_mode,
3363 MarkCompactCollector::SweepingParallelism parallelism, 3362 MarkCompactCollector::SweepingParallelism parallelism,
3364 SkipListRebuildingMode skip_list_mode, 3363 SkipListRebuildingMode skip_list_mode,
3365 FreeSpaceTreatmentMode free_space_mode> 3364 FreeSpaceTreatmentMode free_space_mode>
3366 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p, 3365 static int Sweep(PagedSpace* space, FreeList* free_list, Page* p,
3367 ObjectVisitor* v) { 3366 ObjectVisitor* v) {
3368 DCHECK(!p->IsEvacuationCandidate() && !p->WasSwept()); 3367 DCHECK(!p->IsEvacuationCandidate() && !p->SweepingDone());
3369 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST, 3368 DCHECK_EQ(skip_list_mode == REBUILD_SKIP_LIST,
3370 space->identity() == CODE_SPACE); 3369 space->identity() == CODE_SPACE);
3371 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST)); 3370 DCHECK((p->skip_list() == NULL) || (skip_list_mode == REBUILD_SKIP_LIST));
3372 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD || 3371 DCHECK(parallelism == MarkCompactCollector::SWEEP_ON_MAIN_THREAD ||
3373 sweeping_mode == SWEEP_ONLY); 3372 sweeping_mode == SWEEP_ONLY);
3374 3373
3375 Address free_start = p->area_start(); 3374 Address free_start = p->area_start();
3376 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0); 3375 DCHECK(reinterpret_cast<intptr_t>(free_start) % (32 * kPointerSize) == 0);
3377 3376
3378 // If we use the skip list for code space pages, we have to lock the skip 3377 // If we use the skip list for code space pages, we have to lock the skip
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
3421 Bitmap::Clear(p); 3420 Bitmap::Clear(p);
3422 3421
3423 if (free_start != p->area_end()) { 3422 if (free_start != p->area_end()) {
3424 int size = static_cast<int>(p->area_end() - free_start); 3423 int size = static_cast<int>(p->area_end() - free_start);
3425 if (free_space_mode == ZAP_FREE_SPACE) { 3424 if (free_space_mode == ZAP_FREE_SPACE) {
3426 memset(free_start, 0xcc, size); 3425 memset(free_start, 0xcc, size);
3427 } 3426 }
3428 freed_bytes = Free<parallelism>(space, free_list, free_start, size); 3427 freed_bytes = Free<parallelism>(space, free_list, free_start, size);
3429 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 3428 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
3430 } 3429 }
3431 3430 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3432 if (parallelism == MarkCompactCollector::SWEEP_IN_PARALLEL) {
3433 // When concurrent sweeping is active, the page will be marked after
3434 // sweeping by the main thread.
3435 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingFinalize);
3436 } else {
3437 p->SetWasSwept();
3438 }
3439 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); 3431 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes));
3440 } 3432 }
3441 3433
3442 3434
3443 void MarkCompactCollector::InvalidateCode(Code* code) { 3435 void MarkCompactCollector::InvalidateCode(Code* code) {
3444 if (heap_->incremental_marking()->IsCompacting() && 3436 if (heap_->incremental_marking()->IsCompacting() &&
3445 !ShouldSkipEvacuationSlotRecording(code)) { 3437 !ShouldSkipEvacuationSlotRecording(code)) {
3446 DCHECK(compacting_); 3438 DCHECK(compacting_);
3447 3439
3448 // If the object is white than no slots were recorded on it yet. 3440 // If the object is white than no slots were recorded on it yet.
(...skipping 98 matching lines...) Expand 10 before | Expand all | Expand 10 after
3547 } 3539 }
3548 } 3540 }
3549 3541
3550 3542
3551 void MarkCompactCollector::SweepAbortedPages() { 3543 void MarkCompactCollector::SweepAbortedPages() {
3552 // Second pass on aborted pages. 3544 // Second pass on aborted pages.
3553 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3545 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3554 Page* p = evacuation_candidates_[i]; 3546 Page* p = evacuation_candidates_[i];
3555 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { 3547 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3556 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); 3548 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
3549 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3557 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3550 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3558 switch (space->identity()) { 3551 switch (space->identity()) {
3559 case OLD_SPACE: 3552 case OLD_SPACE:
3560 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, 3553 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3561 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); 3554 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3562 break; 3555 break;
3563 case CODE_SPACE: 3556 case CODE_SPACE:
3564 if (FLAG_zap_code_space) { 3557 if (FLAG_zap_code_space) {
3565 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST, 3558 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, REBUILD_SKIP_LIST,
3566 ZAP_FREE_SPACE>(space, NULL, p, nullptr); 3559 ZAP_FREE_SPACE>(space, NULL, p, nullptr);
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
3709 } 3702 }
3710 } 3703 }
3711 3704
3712 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { 3705 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3713 if (FLAG_gc_verbose) { 3706 if (FLAG_gc_verbose) {
3714 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", 3707 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3715 reinterpret_cast<intptr_t>(p)); 3708 reinterpret_cast<intptr_t>(p));
3716 } 3709 }
3717 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3710 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3718 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 3711 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3712 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3719 3713
3720 switch (space->identity()) { 3714 switch (space->identity()) {
3721 case OLD_SPACE: 3715 case OLD_SPACE:
3722 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, 3716 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3723 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, 3717 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3724 &updating_visitor); 3718 &updating_visitor);
3725 break; 3719 break;
3726 case CODE_SPACE: 3720 case CODE_SPACE:
3727 if (FLAG_zap_code_space) { 3721 if (FLAG_zap_code_space) {
3728 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, 3722 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
(...skipping 30 matching lines...) Expand all
3759 3753
3760 void MarkCompactCollector::ReleaseEvacuationCandidates() { 3754 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3761 int npages = evacuation_candidates_.length(); 3755 int npages = evacuation_candidates_.length();
3762 for (int i = 0; i < npages; i++) { 3756 for (int i = 0; i < npages; i++) {
3763 Page* p = evacuation_candidates_[i]; 3757 Page* p = evacuation_candidates_[i];
3764 if (!p->IsEvacuationCandidate()) continue; 3758 if (!p->IsEvacuationCandidate()) continue;
3765 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3759 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3766 space->Free(p->area_start(), p->area_size()); 3760 space->Free(p->area_start(), p->area_size());
3767 p->set_scan_on_scavenge(false); 3761 p->set_scan_on_scavenge(false);
3768 p->ResetLiveBytes(); 3762 p->ResetLiveBytes();
3769 CHECK(p->WasSwept()); 3763 CHECK(p->SweepingDone());
3770 space->ReleasePage(p); 3764 space->ReleasePage(p, true);
3771 } 3765 }
3772 evacuation_candidates_.Rewind(0); 3766 evacuation_candidates_.Rewind(0);
3773 compacting_ = false; 3767 compacting_ = false;
3774 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); 3768 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
3775 heap()->FreeQueuedChunks(); 3769 heap()->FreeQueuedChunks();
3776 } 3770 }
3777 3771
3778 3772
3779 int MarkCompactCollector::SweepInParallel(PagedSpace* space, 3773 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
3780 int required_freed_bytes, 3774 int required_freed_bytes,
(...skipping 14 matching lines...) Expand all
3795 } 3789 }
3796 } 3790 }
3797 return max_freed_overall; 3791 return max_freed_overall;
3798 } 3792 }
3799 3793
3800 3794
3801 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { 3795 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
3802 int max_freed = 0; 3796 int max_freed = 0;
3803 if (page->TryLock()) { 3797 if (page->TryLock()) {
3804 // If this page was already swept in the meantime, we can return here. 3798 // If this page was already swept in the meantime, we can return here.
3805 if (page->parallel_sweeping_state().Value() != 3799 if (page->concurrent_sweeping_state().Value() != Page::kSweepingPending) {
3806 MemoryChunk::kSweepingPending) {
3807 page->mutex()->Unlock(); 3800 page->mutex()->Unlock();
3808 return 0; 3801 return 0;
3809 } 3802 }
3810 page->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingInProgress); 3803 page->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3811 FreeList* free_list; 3804 FreeList* free_list;
3812 FreeList private_free_list(space); 3805 FreeList private_free_list(space);
3813 if (space->identity() == OLD_SPACE) { 3806 if (space->identity() == OLD_SPACE) {
3814 free_list = free_list_old_space_.get(); 3807 free_list = free_list_old_space_.get();
3815 max_freed = 3808 max_freed =
3816 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, 3809 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3817 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL); 3810 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3818 } else if (space->identity() == CODE_SPACE) { 3811 } else if (space->identity() == CODE_SPACE) {
3819 free_list = free_list_code_space_.get(); 3812 free_list = free_list_code_space_.get();
3820 max_freed = 3813 max_freed =
3821 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST, 3814 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, REBUILD_SKIP_LIST,
3822 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL); 3815 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3823 } else { 3816 } else {
3824 free_list = free_list_map_space_.get(); 3817 free_list = free_list_map_space_.get();
3825 max_freed = 3818 max_freed =
3826 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST, 3819 Sweep<SWEEP_ONLY, SWEEP_IN_PARALLEL, IGNORE_SKIP_LIST,
3827 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL); 3820 IGNORE_FREE_SPACE>(space, &private_free_list, page, NULL);
3828 } 3821 }
3829 free_list->Concatenate(&private_free_list); 3822 free_list->Concatenate(&private_free_list);
3823 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone);
3830 page->mutex()->Unlock(); 3824 page->mutex()->Unlock();
3831 } 3825 }
3832 return max_freed; 3826 return max_freed;
3833 } 3827 }
3834 3828
3835 3829
3836 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) { 3830 void MarkCompactCollector::StartSweepSpace(PagedSpace* space) {
3837 space->ClearStats(); 3831 space->ClearStats();
3838 3832
3839 PageIterator it(space); 3833 PageIterator it(space);
3840 3834
3841 int will_be_swept = 0; 3835 int will_be_swept = 0;
3842 bool unused_page_present = false; 3836 bool unused_page_present = false;
3843 3837
3844 while (it.has_next()) { 3838 while (it.has_next()) {
3845 Page* p = it.next(); 3839 Page* p = it.next();
3846 DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone); 3840 DCHECK(p->SweepingDone());
3847
3848 // Clear sweeping flags indicating that marking bits are still intact.
3849 p->ClearWasSwept();
3850 3841
3851 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || 3842 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION) ||
3852 p->IsEvacuationCandidate()) { 3843 p->IsEvacuationCandidate()) {
3853 // Will be processed in EvacuateNewSpaceAndCandidates. 3844 // Will be processed in EvacuateNewSpaceAndCandidates.
3854 DCHECK(evacuation_candidates_.length() > 0); 3845 DCHECK(evacuation_candidates_.length() > 0);
3855 continue; 3846 continue;
3856 } 3847 }
3857 3848
3858 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { 3849 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) {
3859 // We need to sweep the page to get it into an iterable state again. Note 3850 // We need to sweep the page to get it into an iterable state again. Note
3860 // that this adds unusable memory into the free list that is later on 3851 // that this adds unusable memory into the free list that is later on
3861 // (in the free list) dropped again. Since we only use the flag for 3852 // (in the free list) dropped again. Since we only use the flag for
3862 // testing this is fine. 3853 // testing this is fine.
3854 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3863 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, 3855 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3864 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); 3856 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3865 continue; 3857 continue;
3866 } 3858 }
3867 3859
3868 // One unused page is kept, all further are released before sweeping them. 3860 // One unused page is kept, all further are released before sweeping them.
3869 if (p->LiveBytes() == 0) { 3861 if (p->LiveBytes() == 0) {
3870 if (unused_page_present) { 3862 if (unused_page_present) {
3871 if (FLAG_gc_verbose) { 3863 if (FLAG_gc_verbose) {
3872 PrintIsolate(isolate(), "sweeping: released page: %p", p); 3864 PrintIsolate(isolate(), "sweeping: released page: %p", p);
3873 } 3865 }
3874 space->ReleasePage(p); 3866 space->ReleasePage(p, false);
3875 continue; 3867 continue;
3876 } 3868 }
3877 unused_page_present = true; 3869 unused_page_present = true;
3878 } 3870 }
3879 3871
3872 p->concurrent_sweeping_state().SetValue(Page::kSweepingPending);
3880 sweeping_list(space).push_back(p); 3873 sweeping_list(space).push_back(p);
3881 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingPending);
3882 int to_sweep = p->area_size() - p->LiveBytes(); 3874 int to_sweep = p->area_size() - p->LiveBytes();
3883 space->accounting_stats_.ShrinkSpace(to_sweep); 3875 space->accounting_stats_.ShrinkSpace(to_sweep);
3884 will_be_swept++; 3876 will_be_swept++;
3885 } 3877 }
3886 3878
3887 if (FLAG_gc_verbose) { 3879 if (FLAG_gc_verbose) {
3888 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", 3880 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d",
3889 AllocationSpaceName(space->identity()), will_be_swept); 3881 AllocationSpaceName(space->identity()), will_be_swept);
3890 } 3882 }
3891 std::sort(sweeping_list(space).begin(), sweeping_list(space).end(), 3883 std::sort(sweeping_list(space).begin(), sweeping_list(space).end(),
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
3933 // buffer entries are already filter out. We can just release the memory. 3925 // buffer entries are already filter out. We can just release the memory.
3934 heap()->FreeQueuedChunks(); 3926 heap()->FreeQueuedChunks();
3935 3927
3936 if (FLAG_print_cumulative_gc_stat) { 3928 if (FLAG_print_cumulative_gc_stat) {
3937 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() - 3929 heap_->tracer()->AddSweepingTime(heap_->MonotonicallyIncreasingTimeInMs() -
3938 start_time); 3930 start_time);
3939 } 3931 }
3940 } 3932 }
3941 3933
3942 3934
3943 void MarkCompactCollector::ParallelSweepSpaceComplete(PagedSpace* space) {
3944 for (Page* p : sweeping_list(space)) {
3945 if (p->parallel_sweeping_state().Value() ==
3946 MemoryChunk::kSweepingFinalize) {
3947 p->parallel_sweeping_state().SetValue(MemoryChunk::kSweepingDone);
3948 p->SetWasSwept();
3949 }
3950 DCHECK(p->parallel_sweeping_state().Value() == MemoryChunk::kSweepingDone);
3951 }
3952 }
3953
3954
3955 void MarkCompactCollector::ParallelSweepSpacesComplete() { 3935 void MarkCompactCollector::ParallelSweepSpacesComplete() {
3956 ParallelSweepSpaceComplete(heap()->old_space());
3957 ParallelSweepSpaceComplete(heap()->code_space());
3958 ParallelSweepSpaceComplete(heap()->map_space());
3959 sweeping_list(heap()->old_space()).clear(); 3936 sweeping_list(heap()->old_space()).clear();
3960 sweeping_list(heap()->code_space()).clear(); 3937 sweeping_list(heap()->code_space()).clear();
3961 sweeping_list(heap()->map_space()).clear(); 3938 sweeping_list(heap()->map_space()).clear();
3962 } 3939 }
3963 3940
3964 3941
3965 // TODO(1466) ReportDeleteIfNeeded is not called currently. 3942 // TODO(1466) ReportDeleteIfNeeded is not called currently.
3966 // Our profiling tools do not expect intersections between 3943 // Our profiling tools do not expect intersections between
3967 // code objects. We should either reenable it or change our tools. 3944 // code objects. We should either reenable it or change our tools.
3968 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj, 3945 void MarkCompactCollector::ReportDeleteIfNeeded(HeapObject* obj,
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
4029 MarkBit mark_bit = Marking::MarkBitFrom(host); 4006 MarkBit mark_bit = Marking::MarkBitFrom(host);
4030 if (Marking::IsBlack(mark_bit)) { 4007 if (Marking::IsBlack(mark_bit)) {
4031 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 4008 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4032 RecordRelocSlot(&rinfo, target); 4009 RecordRelocSlot(&rinfo, target);
4033 } 4010 }
4034 } 4011 }
4035 } 4012 }
4036 4013
4037 } // namespace internal 4014 } // namespace internal
4038 } // namespace v8 4015 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698