| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 310 ClearNonLiveReferences(); | 310 ClearNonLiveReferences(); |
| 311 | 311 |
| 312 RecordObjectStats(); | 312 RecordObjectStats(); |
| 313 | 313 |
| 314 #ifdef VERIFY_HEAP | 314 #ifdef VERIFY_HEAP |
| 315 if (FLAG_verify_heap) { | 315 if (FLAG_verify_heap) { |
| 316 VerifyMarking(heap_); | 316 VerifyMarking(heap_); |
| 317 } | 317 } |
| 318 #endif | 318 #endif |
| 319 | 319 |
| 320 StartSweepSpaces(); | 320 SweepSpaces(); |
| 321 | 321 |
| 322 EvacuateNewSpaceAndCandidates(); | 322 EvacuateNewSpaceAndCandidates(); |
| 323 | 323 |
| 324 Finish(); | 324 Finish(); |
| 325 } | 325 } |
| 326 | 326 |
| 327 | 327 |
| 328 #ifdef VERIFY_HEAP | 328 #ifdef VERIFY_HEAP |
| 329 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { | 329 void MarkCompactCollector::VerifyMarkbitsAreClean(PagedSpace* space) { |
| 330 for (Page* p : *space) { | 330 for (Page* p : *space) { |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 440 | 440 |
| 441 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | 441 DISALLOW_COPY_AND_ASSIGN(SweeperTask); |
| 442 }; | 442 }; |
| 443 | 443 |
| 444 void MarkCompactCollector::Sweeper::StartSweeping() { | 444 void MarkCompactCollector::Sweeper::StartSweeping() { |
| 445 sweeping_in_progress_ = true; | 445 sweeping_in_progress_ = true; |
| 446 ForAllSweepingSpaces([this](AllocationSpace space) { | 446 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 447 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(), | 447 std::sort(sweeping_list_[space].begin(), sweeping_list_[space].end(), |
| 448 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); | 448 [](Page* a, Page* b) { return a->LiveBytes() < b->LiveBytes(); }); |
| 449 }); | 449 }); |
| 450 if (FLAG_concurrent_sweeping) { |
| 451 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 452 if (space == NEW_SPACE) return; |
| 453 StartSweepingHelper(space); |
| 454 }); |
| 455 } |
| 450 } | 456 } |
| 451 | 457 |
| 452 void MarkCompactCollector::Sweeper::StartSweeperTasks() { | 458 void MarkCompactCollector::Sweeper::StartSweepingHelper( |
| 453 if (FLAG_concurrent_sweeping && sweeping_in_progress_) { | 459 AllocationSpace space_to_start) { |
| 454 ForAllSweepingSpaces([this](AllocationSpace space) { | 460 num_sweeping_tasks_.Increment(1); |
| 455 if (space == NEW_SPACE) return; | 461 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 456 num_sweeping_tasks_.Increment(1); | 462 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space_to_start), |
| 457 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 463 v8::Platform::kShortRunningTask); |
| 458 new SweeperTask(this, &pending_sweeper_tasks_semaphore_, space), | |
| 459 v8::Platform::kShortRunningTask); | |
| 460 }); | |
| 461 } | |
| 462 } | 464 } |
| 463 | 465 |
| 464 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( | 466 void MarkCompactCollector::Sweeper::SweepOrWaitUntilSweepingCompleted( |
| 465 Page* page) { | 467 Page* page) { |
| 466 if (!page->SweepingDone()) { | 468 if (!page->SweepingDone()) { |
| 467 ParallelSweepPage(page, page->owner()->identity()); | 469 ParallelSweepPage(page, page->owner()->identity()); |
| 468 if (!page->SweepingDone()) { | 470 if (!page->SweepingDone()) { |
| 469 // We were not able to sweep that page, i.e., a concurrent | 471 // We were not able to sweep that page, i.e., a concurrent |
| 470 // sweeper thread currently owns this page. Wait for the sweeper | 472 // sweeper thread currently owns this page. Wait for the sweeper |
| 471 // thread to be done with this page. | 473 // thread to be done with this page. |
| 472 page->WaitUntilSweepingCompleted(); | 474 page->WaitUntilSweepingCompleted(); |
| 473 } | 475 } |
| 474 } | 476 } |
| 475 } | 477 } |
| 476 | 478 |
| 477 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { | 479 void MarkCompactCollector::SweepAndRefill(CompactionSpace* space) { |
| 478 if (FLAG_concurrent_sweeping && | 480 if (FLAG_concurrent_sweeping && !sweeper().IsSweepingCompleted()) { |
| 479 !sweeper().IsSweepingCompleted(space->identity())) { | |
| 480 sweeper().ParallelSweepSpace(space->identity(), 0); | 481 sweeper().ParallelSweepSpace(space->identity(), 0); |
| 481 space->RefillFreeList(); | 482 space->RefillFreeList(); |
| 482 } | 483 } |
| 483 } | 484 } |
| 484 | 485 |
| 485 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) { | 486 Page* MarkCompactCollector::Sweeper::GetSweptPageSafe(PagedSpace* space) { |
| 486 base::LockGuard<base::Mutex> guard(&mutex_); | 487 base::LockGuard<base::Mutex> guard(&mutex_); |
| 487 SweptList& list = swept_list_[space->identity()]; | 488 SweptList& list = swept_list_[space->identity()]; |
| 488 if (list.length() > 0) { | 489 if (list.length() > 0) { |
| 489 return list.RemoveLast(); | 490 return list.RemoveLast(); |
| 490 } | 491 } |
| 491 return nullptr; | 492 return nullptr; |
| 492 } | 493 } |
| 493 | 494 |
| 494 void MarkCompactCollector::Sweeper::EnsureCompleted() { | 495 void MarkCompactCollector::Sweeper::EnsureCompleted() { |
| 495 if (!sweeping_in_progress_) return; | 496 if (!sweeping_in_progress_) return; |
| 496 | 497 |
| 497 // If sweeping is not completed or not running at all, we try to complete it | 498 // If sweeping is not completed or not running at all, we try to complete it |
| 498 // here. | 499 // here. |
| 499 ForAllSweepingSpaces([this](AllocationSpace space) { | 500 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { |
| 500 if (!FLAG_concurrent_sweeping || !this->IsSweepingCompleted(space)) { | 501 ForAllSweepingSpaces( |
| 501 ParallelSweepSpace(space, 0); | 502 [this](AllocationSpace space) { ParallelSweepSpace(space, 0); }); |
| 502 } | 503 } |
| 503 }); | |
| 504 | 504 |
| 505 if (FLAG_concurrent_sweeping) { | 505 if (FLAG_concurrent_sweeping) { |
| 506 while (num_sweeping_tasks_.Value() > 0) { | 506 while (num_sweeping_tasks_.Value() > 0) { |
| 507 pending_sweeper_tasks_semaphore_.Wait(); | 507 pending_sweeper_tasks_semaphore_.Wait(); |
| 508 num_sweeping_tasks_.Increment(-1); | 508 num_sweeping_tasks_.Increment(-1); |
| 509 } | 509 } |
| 510 } | 510 } |
| 511 | 511 |
| 512 ForAllSweepingSpaces([this](AllocationSpace space) { | 512 ForAllSweepingSpaces([this](AllocationSpace space) { |
| 513 if (space == NEW_SPACE) { | 513 if (space == NEW_SPACE) { |
| 514 swept_list_[NEW_SPACE].Clear(); | 514 swept_list_[NEW_SPACE].Clear(); |
| 515 } | 515 } |
| 516 DCHECK(sweeping_list_[space].empty()); | 516 DCHECK(sweeping_list_[space].empty()); |
| 517 }); | 517 }); |
| 518 late_pages_ = false; |
| 518 sweeping_in_progress_ = false; | 519 sweeping_in_progress_ = false; |
| 519 } | 520 } |
| 520 | 521 |
| 521 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { | 522 void MarkCompactCollector::Sweeper::EnsureNewSpaceCompleted() { |
| 522 if (!sweeping_in_progress_) return; | 523 if (!sweeping_in_progress_) return; |
| 523 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted(NEW_SPACE)) { | 524 if (!FLAG_concurrent_sweeping || !IsSweepingCompleted()) { |
| 524 for (Page* p : *heap_->new_space()) { | 525 for (Page* p : *heap_->new_space()) { |
| 525 SweepOrWaitUntilSweepingCompleted(p); | 526 SweepOrWaitUntilSweepingCompleted(p); |
| 526 } | 527 } |
| 527 } | 528 } |
| 528 } | 529 } |
| 529 | 530 |
| 530 void MarkCompactCollector::EnsureSweepingCompleted() { | 531 void MarkCompactCollector::EnsureSweepingCompleted() { |
| 531 if (!sweeper().sweeping_in_progress()) return; | 532 if (!sweeper().sweeping_in_progress()) return; |
| 532 | 533 |
| 533 sweeper().EnsureCompleted(); | 534 sweeper().EnsureCompleted(); |
| 534 heap()->old_space()->RefillFreeList(); | 535 heap()->old_space()->RefillFreeList(); |
| 535 heap()->code_space()->RefillFreeList(); | 536 heap()->code_space()->RefillFreeList(); |
| 536 heap()->map_space()->RefillFreeList(); | 537 heap()->map_space()->RefillFreeList(); |
| 537 | 538 |
| 538 #ifdef VERIFY_HEAP | 539 #ifdef VERIFY_HEAP |
| 539 if (FLAG_verify_heap && !evacuation()) { | 540 if (FLAG_verify_heap && !evacuation()) { |
| 540 VerifyEvacuation(heap_); | 541 VerifyEvacuation(heap_); |
| 541 } | 542 } |
| 542 #endif | 543 #endif |
| 543 } | 544 } |
| 544 | 545 |
| 545 bool MarkCompactCollector::Sweeper::AreSweeperTasksRunning() { | 546 bool MarkCompactCollector::Sweeper::IsSweepingCompleted() { |
| 546 DCHECK(FLAG_concurrent_sweeping); | 547 DCHECK(FLAG_concurrent_sweeping); |
| 547 while (pending_sweeper_tasks_semaphore_.WaitFor( | 548 while (pending_sweeper_tasks_semaphore_.WaitFor( |
| 548 base::TimeDelta::FromSeconds(0))) { | 549 base::TimeDelta::FromSeconds(0))) { |
| 549 num_sweeping_tasks_.Increment(-1); | 550 num_sweeping_tasks_.Increment(-1); |
| 550 } | 551 } |
| 551 return num_sweeping_tasks_.Value() != 0; | 552 return num_sweeping_tasks_.Value() == 0; |
| 552 } | |
| 553 | |
| 554 bool MarkCompactCollector::Sweeper::IsSweepingCompleted(AllocationSpace space) { | |
| 555 DCHECK(FLAG_concurrent_sweeping); | |
| 556 if (AreSweeperTasksRunning()) return false; | |
| 557 base::LockGuard<base::Mutex> guard(&mutex_); | |
| 558 return sweeping_list_[space].empty(); | |
| 559 } | 553 } |
| 560 | 554 |
| 561 const char* AllocationSpaceName(AllocationSpace space) { | 555 const char* AllocationSpaceName(AllocationSpace space) { |
| 562 switch (space) { | 556 switch (space) { |
| 563 case NEW_SPACE: | 557 case NEW_SPACE: |
| 564 return "NEW_SPACE"; | 558 return "NEW_SPACE"; |
| 565 case OLD_SPACE: | 559 case OLD_SPACE: |
| 566 return "OLD_SPACE"; | 560 return "OLD_SPACE"; |
| 567 case CODE_SPACE: | 561 case CODE_SPACE: |
| 568 return "CODE_SPACE"; | 562 return "CODE_SPACE"; |
| (...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 832 if (!was_marked_incrementally_ && FLAG_verify_heap) { | 826 if (!was_marked_incrementally_ && FLAG_verify_heap) { |
| 833 VerifyMarkbitsAreClean(); | 827 VerifyMarkbitsAreClean(); |
| 834 } | 828 } |
| 835 #endif | 829 #endif |
| 836 } | 830 } |
| 837 | 831 |
| 838 | 832 |
| 839 void MarkCompactCollector::Finish() { | 833 void MarkCompactCollector::Finish() { |
| 840 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); | 834 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_FINISH); |
| 841 | 835 |
| 842 sweeper().StartSweeperTasks(); | 836 if (sweeper().contains_late_pages() && FLAG_concurrent_sweeping) { |
| 837 // If we added some more pages during MC, we need to start at least one |
| 838 // more task as all other tasks might already be finished. |
| 839 sweeper().StartSweepingHelper(OLD_SPACE); |
| 840 } |
| 843 | 841 |
| 844 // The hashing of weak_object_to_code_table is no longer valid. | 842 // The hashing of weak_object_to_code_table is no longer valid. |
| 845 heap()->weak_object_to_code_table()->Rehash( | 843 heap()->weak_object_to_code_table()->Rehash( |
| 846 heap()->isolate()->factory()->undefined_value()); | 844 heap()->isolate()->factory()->undefined_value()); |
| 847 | 845 |
| 848 // Clear the marking state of live large objects. | 846 // Clear the marking state of live large objects. |
| 849 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); | 847 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); |
| 850 | 848 |
| 851 #ifdef DEBUG | 849 #ifdef DEBUG |
| 852 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); | 850 DCHECK(state_ == SWEEP_SPACES || state_ == RELOCATE_OBJECTS); |
| (...skipping 2174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3027 } | 3025 } |
| 3028 | 3026 |
| 3029 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | 3027 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
| 3030 intptr_t live_bytes) { | 3028 intptr_t live_bytes) { |
| 3031 if (!FLAG_parallel_compaction) return 1; | 3029 if (!FLAG_parallel_compaction) return 1; |
| 3032 // Compute the number of needed tasks based on a target compaction time, the | 3030 // Compute the number of needed tasks based on a target compaction time, the |
| 3033 // profiled compaction speed and marked live memory. | 3031 // profiled compaction speed and marked live memory. |
| 3034 // | 3032 // |
| 3035 // The number of parallel compaction tasks is limited by: | 3033 // The number of parallel compaction tasks is limited by: |
| 3036 // - #evacuation pages | 3034 // - #evacuation pages |
| 3037 // - #cores | 3035 // - (#cores - 1) |
| 3038 const double kTargetCompactionTimeInMs = .5; | 3036 const double kTargetCompactionTimeInMs = .5; |
| 3037 const int kNumSweepingTasks = 3; |
| 3039 | 3038 |
| 3040 double compaction_speed = | 3039 double compaction_speed = |
| 3041 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3040 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
| 3042 | 3041 |
| 3043 const int available_cores = Max( | 3042 const int available_cores = Max( |
| 3044 1, static_cast<int>( | 3043 1, static_cast<int>( |
| 3045 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads())); | 3044 V8::GetCurrentPlatform()->NumberOfAvailableBackgroundThreads()) - |
| 3045 kNumSweepingTasks - 1); |
| 3046 int tasks; | 3046 int tasks; |
| 3047 if (compaction_speed > 0) { | 3047 if (compaction_speed > 0) { |
| 3048 tasks = 1 + static_cast<int>(live_bytes / compaction_speed / | 3048 tasks = 1 + static_cast<int>(live_bytes / compaction_speed / |
| 3049 kTargetCompactionTimeInMs); | 3049 kTargetCompactionTimeInMs); |
| 3050 } else { | 3050 } else { |
| 3051 tasks = pages; | 3051 tasks = pages; |
| 3052 } | 3052 } |
| 3053 const int tasks_capped_pages = Min(pages, tasks); | 3053 const int tasks_capped_pages = Min(pages, tasks); |
| 3054 return Min(available_cores, tasks_capped_pages); | 3054 return Min(available_cores, tasks_capped_pages); |
| 3055 } | 3055 } |
| (...skipping 366 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3422 // still contain stale pointers. We only free the chunks after pointer updates | 3422 // still contain stale pointers. We only free the chunks after pointer updates |
| 3423 // to still have access to page headers. | 3423 // to still have access to page headers. |
| 3424 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); | 3424 heap()->memory_allocator()->unmapper()->FreeQueuedChunks(); |
| 3425 | 3425 |
| 3426 { | 3426 { |
| 3427 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3427 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
| 3428 | 3428 |
| 3429 for (Page* p : newspace_evacuation_candidates_) { | 3429 for (Page* p : newspace_evacuation_candidates_) { |
| 3430 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { | 3430 if (p->IsFlagSet(Page::PAGE_NEW_NEW_PROMOTION)) { |
| 3431 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); | 3431 p->ClearFlag(Page::PAGE_NEW_NEW_PROMOTION); |
| 3432 sweeper().AddPage(p->owner()->identity(), p); | 3432 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3433 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { | 3433 } else if (p->IsFlagSet(Page::PAGE_NEW_OLD_PROMOTION)) { |
| 3434 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); | 3434 p->ClearFlag(Page::PAGE_NEW_OLD_PROMOTION); |
| 3435 p->ForAllFreeListCategories( | 3435 p->ForAllFreeListCategories( |
| 3436 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); | 3436 [](FreeListCategory* category) { DCHECK(!category->is_linked()); }); |
| 3437 sweeper().AddPage(p->owner()->identity(), p); | 3437 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3438 } | 3438 } |
| 3439 } | 3439 } |
| 3440 newspace_evacuation_candidates_.Rewind(0); | 3440 newspace_evacuation_candidates_.Rewind(0); |
| 3441 | 3441 |
| 3442 for (Page* p : evacuation_candidates_) { | 3442 for (Page* p : evacuation_candidates_) { |
| 3443 // Important: skip list should be cleared only after roots were updated | 3443 // Important: skip list should be cleared only after roots were updated |
| 3444 // because root iteration traverses the stack and might have to find | 3444 // because root iteration traverses the stack and might have to find |
| 3445 // code objects from non-updated pc pointing into evacuation candidate. | 3445 // code objects from non-updated pc pointing into evacuation candidate. |
| 3446 SkipList* list = p->skip_list(); | 3446 SkipList* list = p->skip_list(); |
| 3447 if (list != NULL) list->Clear(); | 3447 if (list != NULL) list->Clear(); |
| 3448 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3448 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
| 3449 sweeper().AddPage(p->owner()->identity(), p); | 3449 sweeper().AddLatePage(p->owner()->identity(), p); |
| 3450 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); | 3450 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
| 3451 } | 3451 } |
| 3452 } | 3452 } |
| 3453 | 3453 |
| 3454 // Deallocate evacuated candidate pages. | 3454 // Deallocate evacuated candidate pages. |
| 3455 ReleaseEvacuationCandidates(); | 3455 ReleaseEvacuationCandidates(); |
| 3456 } | 3456 } |
| 3457 | 3457 |
| 3458 #ifdef VERIFY_HEAP | 3458 #ifdef VERIFY_HEAP |
| 3459 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { | 3459 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { |
| (...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3738 base::LockGuard<base::Mutex> guard(&mutex_); | 3738 base::LockGuard<base::Mutex> guard(&mutex_); |
| 3739 swept_list_[identity].Add(page); | 3739 swept_list_[identity].Add(page); |
| 3740 } | 3740 } |
| 3741 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3741 page->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
| 3742 page->mutex()->Unlock(); | 3742 page->mutex()->Unlock(); |
| 3743 } | 3743 } |
| 3744 return max_freed; | 3744 return max_freed; |
| 3745 } | 3745 } |
| 3746 | 3746 |
| 3747 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { | 3747 void MarkCompactCollector::Sweeper::AddPage(AllocationSpace space, Page* page) { |
| 3748 DCHECK(!FLAG_concurrent_sweeping || !AreSweeperTasksRunning()); | 3748 DCHECK(!sweeping_in_progress_); |
| 3749 PrepareToBeSweptPage(space, page); | 3749 PrepareToBeSweptPage(space, page); |
| 3750 sweeping_list_[space].push_back(page); | 3750 sweeping_list_[space].push_back(page); |
| 3751 } | 3751 } |
| 3752 | 3752 |
| 3753 void MarkCompactCollector::Sweeper::AddLatePage(AllocationSpace space, |
| 3754 Page* page) { |
| 3755 DCHECK(sweeping_in_progress_); |
| 3756 PrepareToBeSweptPage(space, page); |
| 3757 late_pages_ = true; |
| 3758 AddSweepingPageSafe(space, page); |
| 3759 } |
| 3760 |
| 3753 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, | 3761 void MarkCompactCollector::Sweeper::PrepareToBeSweptPage(AllocationSpace space, |
| 3754 Page* page) { | 3762 Page* page) { |
| 3755 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); | 3763 page->concurrent_sweeping_state().SetValue(Page::kSweepingPending); |
| 3756 DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes())); | 3764 DCHECK_GE(page->area_size(), static_cast<size_t>(page->LiveBytes())); |
| 3757 size_t to_sweep = page->area_size() - page->LiveBytes(); | 3765 size_t to_sweep = page->area_size() - page->LiveBytes(); |
| 3758 if (space != NEW_SPACE) | 3766 if (space != NEW_SPACE) |
| 3759 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); | 3767 heap_->paged_space(space)->accounting_stats_.ShrinkSpace(to_sweep); |
| 3760 } | 3768 } |
| 3761 | 3769 |
| 3762 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( | 3770 Page* MarkCompactCollector::Sweeper::GetSweepingPageSafe( |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3822 sweeper().AddPage(space->identity(), p); | 3830 sweeper().AddPage(space->identity(), p); |
| 3823 will_be_swept++; | 3831 will_be_swept++; |
| 3824 } | 3832 } |
| 3825 | 3833 |
| 3826 if (FLAG_gc_verbose) { | 3834 if (FLAG_gc_verbose) { |
| 3827 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", | 3835 PrintIsolate(isolate(), "sweeping: space=%s initialized_for_sweeping=%d", |
| 3828 AllocationSpaceName(space->identity()), will_be_swept); | 3836 AllocationSpaceName(space->identity()), will_be_swept); |
| 3829 } | 3837 } |
| 3830 } | 3838 } |
| 3831 | 3839 |
| 3832 void MarkCompactCollector::StartSweepSpaces() { | 3840 void MarkCompactCollector::SweepSpaces() { |
| 3833 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); | 3841 TRACE_GC(heap()->tracer(), GCTracer::Scope::MC_SWEEP); |
| 3834 #ifdef DEBUG | 3842 #ifdef DEBUG |
| 3835 state_ = SWEEP_SPACES; | 3843 state_ = SWEEP_SPACES; |
| 3836 #endif | 3844 #endif |
| 3837 | 3845 |
| 3838 { | 3846 { |
| 3839 { | 3847 { |
| 3840 GCTracer::Scope sweep_scope(heap()->tracer(), | 3848 GCTracer::Scope sweep_scope(heap()->tracer(), |
| 3841 GCTracer::Scope::MC_SWEEP_OLD); | 3849 GCTracer::Scope::MC_SWEEP_OLD); |
| 3842 StartSweepSpace(heap()->old_space()); | 3850 StartSweepSpace(heap()->old_space()); |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3892 // The target is always in old space, we don't have to record the slot in | 3900 // The target is always in old space, we don't have to record the slot in |
| 3893 // the old-to-new remembered set. | 3901 // the old-to-new remembered set. |
| 3894 DCHECK(!heap()->InNewSpace(target)); | 3902 DCHECK(!heap()->InNewSpace(target)); |
| 3895 RecordRelocSlot(host, &rinfo, target); | 3903 RecordRelocSlot(host, &rinfo, target); |
| 3896 } | 3904 } |
| 3897 } | 3905 } |
| 3898 } | 3906 } |
| 3899 | 3907 |
| 3900 } // namespace internal | 3908 } // namespace internal |
| 3901 } // namespace v8 | 3909 } // namespace v8 |
| OLD | NEW |