| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/code-stubs.h" | 8 #include "src/code-stubs.h" |
| 9 #include "src/compilation-cache.h" | 9 #include "src/compilation-cache.h" |
| 10 #include "src/cpu-profiler.h" | 10 #include "src/cpu-profiler.h" |
| (...skipping 27 matching lines...) Expand all Loading... |
| 38 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT | 38 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT |
| 39 #ifdef DEBUG | 39 #ifdef DEBUG |
| 40 state_(IDLE), | 40 state_(IDLE), |
| 41 #endif | 41 #endif |
| 42 sweep_precisely_(false), | 42 sweep_precisely_(false), |
| 43 reduce_memory_footprint_(false), | 43 reduce_memory_footprint_(false), |
| 44 abort_incremental_marking_(false), | 44 abort_incremental_marking_(false), |
| 45 marking_parity_(ODD_MARKING_PARITY), | 45 marking_parity_(ODD_MARKING_PARITY), |
| 46 compacting_(false), | 46 compacting_(false), |
| 47 was_marked_incrementally_(false), | 47 was_marked_incrementally_(false), |
| 48 sweeping_pending_(false), | 48 sweeping_in_progress_(false), |
| 49 pending_sweeper_jobs_semaphore_(0), | 49 pending_sweeper_jobs_semaphore_(0), |
| 50 sequential_sweeping_(false), | 50 sequential_sweeping_(false), |
| 51 tracer_(NULL), | 51 tracer_(NULL), |
| 52 migration_slots_buffer_(NULL), | 52 migration_slots_buffer_(NULL), |
| 53 heap_(heap), | 53 heap_(heap), |
| 54 code_flusher_(NULL), | 54 code_flusher_(NULL), |
| 55 have_code_to_deoptimize_(false) { } | 55 have_code_to_deoptimize_(false) { } |
| 56 | 56 |
| 57 #ifdef VERIFY_HEAP | 57 #ifdef VERIFY_HEAP |
| 58 class VerifyMarkingVisitor: public ObjectVisitor { | 58 class VerifyMarkingVisitor: public ObjectVisitor { |
| (...skipping 507 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 566 Heap* heap_; | 566 Heap* heap_; |
| 567 PagedSpace* space_; | 567 PagedSpace* space_; |
| 568 | 568 |
| 569 DISALLOW_COPY_AND_ASSIGN(SweeperTask); | 569 DISALLOW_COPY_AND_ASSIGN(SweeperTask); |
| 570 }; | 570 }; |
| 571 | 571 |
| 572 | 572 |
| 573 void MarkCompactCollector::StartSweeperThreads() { | 573 void MarkCompactCollector::StartSweeperThreads() { |
| 574 ASSERT(free_list_old_pointer_space_.get()->IsEmpty()); | 574 ASSERT(free_list_old_pointer_space_.get()->IsEmpty()); |
| 575 ASSERT(free_list_old_data_space_.get()->IsEmpty()); | 575 ASSERT(free_list_old_data_space_.get()->IsEmpty()); |
| 576 sweeping_pending_ = true; | 576 sweeping_in_progress_ = true; |
| 577 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { | 577 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { |
| 578 isolate()->sweeper_threads()[i]->StartSweeping(); | 578 isolate()->sweeper_threads()[i]->StartSweeping(); |
| 579 } | 579 } |
| 580 if (FLAG_job_based_sweeping) { | 580 if (FLAG_job_based_sweeping) { |
| 581 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 581 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 582 new SweeperTask(heap(), heap()->old_data_space()), | 582 new SweeperTask(heap(), heap()->old_data_space()), |
| 583 v8::Platform::kShortRunningTask); | 583 v8::Platform::kShortRunningTask); |
| 584 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 584 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 585 new SweeperTask(heap(), heap()->old_pointer_space()), | 585 new SweeperTask(heap(), heap()->old_pointer_space()), |
| 586 v8::Platform::kShortRunningTask); | 586 v8::Platform::kShortRunningTask); |
| 587 } | 587 } |
| 588 } | 588 } |
| 589 | 589 |
| 590 | 590 |
| 591 void MarkCompactCollector::WaitUntilSweepingCompleted() { | 591 void MarkCompactCollector::EnsureSweepingCompleted() { |
| 592 ASSERT(sweeping_pending_ == true); | 592 ASSERT(sweeping_in_progress_ == true); |
| 593 |
| 594 // If sweeping is not completed, we try to complete it here. If we do not |
| 595 // have sweeper threads we have to complete since we do not have a good |
| 596 // indicator for a swept space in that case. |
| 597 if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) { |
| 598 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0); |
| 599 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0); |
| 600 } |
| 601 |
| 593 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { | 602 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { |
| 594 isolate()->sweeper_threads()[i]->WaitForSweeperThread(); | 603 isolate()->sweeper_threads()[i]->WaitForSweeperThread(); |
| 595 } | 604 } |
| 596 if (FLAG_job_based_sweeping) { | 605 if (FLAG_job_based_sweeping) { |
| 597 // Wait twice for both jobs. | 606 // Wait twice for both jobs. |
| 598 pending_sweeper_jobs_semaphore_.Wait(); | 607 pending_sweeper_jobs_semaphore_.Wait(); |
| 599 pending_sweeper_jobs_semaphore_.Wait(); | 608 pending_sweeper_jobs_semaphore_.Wait(); |
| 600 } | 609 } |
| 601 ParallelSweepSpacesComplete(); | 610 ParallelSweepSpacesComplete(); |
| 602 sweeping_pending_ = false; | 611 sweeping_in_progress_ = false; |
| 603 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE)); | 612 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE)); |
| 604 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE)); | 613 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE)); |
| 605 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); | 614 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); |
| 606 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); | 615 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); |
| 607 } | 616 } |
| 608 | 617 |
| 609 | 618 |
| 610 bool MarkCompactCollector::IsSweepingCompleted() { | 619 bool MarkCompactCollector::IsSweepingCompleted() { |
| 611 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { | 620 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { |
| 612 if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) { | 621 if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) { |
| 613 return false; | 622 return false; |
| 614 } | 623 } |
| 615 } | 624 } |
| 625 |
| 616 if (FLAG_job_based_sweeping) { | 626 if (FLAG_job_based_sweeping) { |
| 617 if (!pending_sweeper_jobs_semaphore_.WaitFor( | 627 if (!pending_sweeper_jobs_semaphore_.WaitFor( |
| 618 base::TimeDelta::FromSeconds(0))) { | 628 base::TimeDelta::FromSeconds(0))) { |
| 619 return false; | 629 return false; |
| 620 } | 630 } |
| 621 pending_sweeper_jobs_semaphore_.Signal(); | 631 pending_sweeper_jobs_semaphore_.Signal(); |
| 622 } | 632 } |
| 633 |
| 623 return true; | 634 return true; |
| 624 } | 635 } |
| 625 | 636 |
| 626 | 637 |
| 627 void MarkCompactCollector::RefillFreeList(PagedSpace* space) { | 638 void MarkCompactCollector::RefillFreeList(PagedSpace* space) { |
| 628 FreeList* free_list; | 639 FreeList* free_list; |
| 629 | 640 |
| 630 if (space == heap()->old_pointer_space()) { | 641 if (space == heap()->old_pointer_space()) { |
| 631 free_list = free_list_old_pointer_space_.get(); | 642 free_list = free_list_old_pointer_space_.get(); |
| 632 } else if (space == heap()->old_data_space()) { | 643 } else if (space == heap()->old_data_space()) { |
| 633 free_list = free_list_old_data_space_.get(); | 644 free_list = free_list_old_data_space_.get(); |
| 634 } else { | 645 } else { |
| 635 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure | 646 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure |
| 636 // to only refill them for old data and pointer spaces. | 647 // to only refill them for old data and pointer spaces. |
| 637 return; | 648 return; |
| 638 } | 649 } |
| 639 | 650 |
| 640 intptr_t freed_bytes = space->free_list()->Concatenate(free_list); | 651 intptr_t freed_bytes = space->free_list()->Concatenate(free_list); |
| 641 space->AddToAccountingStats(freed_bytes); | 652 space->AddToAccountingStats(freed_bytes); |
| 642 space->DecrementUnsweptFreeBytes(freed_bytes); | 653 space->DecrementUnsweptFreeBytes(freed_bytes); |
| 643 } | 654 } |
| 644 | 655 |
| 645 | 656 |
| 646 bool MarkCompactCollector::AreSweeperThreadsActivated() { | 657 bool MarkCompactCollector::AreSweeperThreadsActivated() { |
| 647 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping; | 658 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping; |
| 648 } | 659 } |
| 649 | 660 |
| 650 | 661 |
| 651 bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace* space) { | |
| 652 return (space == NULL || space->is_swept_concurrently()) && | |
| 653 sweeping_pending_; | |
| 654 } | |
| 655 | |
| 656 | |
| 657 void Marking::TransferMark(Address old_start, Address new_start) { | 662 void Marking::TransferMark(Address old_start, Address new_start) { |
| 658 // This is only used when resizing an object. | 663 // This is only used when resizing an object. |
| 659 ASSERT(MemoryChunk::FromAddress(old_start) == | 664 ASSERT(MemoryChunk::FromAddress(old_start) == |
| 660 MemoryChunk::FromAddress(new_start)); | 665 MemoryChunk::FromAddress(new_start)); |
| 661 | 666 |
| 662 if (!heap_->incremental_marking()->IsMarking()) return; | 667 if (!heap_->incremental_marking()->IsMarking()) return; |
| 663 | 668 |
| 664 // If the mark doesn't move, we don't check the color of the object. | 669 // If the mark doesn't move, we don't check the color of the object. |
| 665 // It doesn't matter whether the object is black, since it hasn't changed | 670 // It doesn't matter whether the object is black, since it hasn't changed |
| 666 // size, so the adjustment to the live data count will be zero anyway. | 671 // size, so the adjustment to the live data count will be zero anyway. |
| (...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 952 // variable. | 957 // variable. |
| 953 tracer_ = tracer; | 958 tracer_ = tracer; |
| 954 | 959 |
| 955 #ifdef DEBUG | 960 #ifdef DEBUG |
| 956 ASSERT(state_ == IDLE); | 961 ASSERT(state_ == IDLE); |
| 957 state_ = PREPARE_GC; | 962 state_ = PREPARE_GC; |
| 958 #endif | 963 #endif |
| 959 | 964 |
| 960 ASSERT(!FLAG_never_compact || !FLAG_always_compact); | 965 ASSERT(!FLAG_never_compact || !FLAG_always_compact); |
| 961 | 966 |
| 962 if (IsConcurrentSweepingInProgress()) { | 967 if (sweeping_in_progress()) { |
| 963 // Instead of waiting we could also abort the sweeper threads here. | 968 // Instead of waiting we could also abort the sweeper threads here. |
| 964 WaitUntilSweepingCompleted(); | 969 EnsureSweepingCompleted(); |
| 965 } | 970 } |
| 966 | 971 |
| 967 // Clear marking bits if incremental marking is aborted. | 972 // Clear marking bits if incremental marking is aborted. |
| 968 if (was_marked_incrementally_ && abort_incremental_marking_) { | 973 if (was_marked_incrementally_ && abort_incremental_marking_) { |
| 969 heap()->incremental_marking()->Abort(); | 974 heap()->incremental_marking()->Abort(); |
| 970 ClearMarkbits(); | 975 ClearMarkbits(); |
| 971 AbortCompaction(); | 976 AbortCompaction(); |
| 972 was_marked_incrementally_ = false; | 977 was_marked_incrementally_ = false; |
| 973 } | 978 } |
| 974 | 979 |
| (...skipping 3024 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3999 cell = it.CurrentCell(); | 4004 cell = it.CurrentCell(); |
| 4000 if (*cell != 0) break; | 4005 if (*cell != 0) break; |
| 4001 } | 4006 } |
| 4002 | 4007 |
| 4003 if (it.Done()) { | 4008 if (it.Done()) { |
| 4004 size = p->area_end() - p->area_start(); | 4009 size = p->area_end() - p->area_start(); |
| 4005 freed_bytes = Free<mode>(space, free_list, p->area_start(), | 4010 freed_bytes = Free<mode>(space, free_list, p->area_start(), |
| 4006 static_cast<int>(size)); | 4011 static_cast<int>(size)); |
| 4007 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 4012 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 4008 ASSERT_EQ(0, p->LiveBytes()); | 4013 ASSERT_EQ(0, p->LiveBytes()); |
| 4009 return freed_bytes; | 4014 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| 4010 } | 4015 } |
| 4011 | 4016 |
| 4012 // Grow the size of the start-of-page free space a little to get up to the | 4017 // Grow the size of the start-of-page free space a little to get up to the |
| 4013 // first live object. | 4018 // first live object. |
| 4014 Address free_end = StartOfLiveObject(cell_base, *cell); | 4019 Address free_end = StartOfLiveObject(cell_base, *cell); |
| 4015 // Free the first free space. | 4020 // Free the first free space. |
| 4016 size = free_end - p->area_start(); | 4021 size = free_end - p->area_start(); |
| 4017 freed_bytes = Free<mode>(space, free_list, p->area_start(), | 4022 freed_bytes = Free<mode>(space, free_list, p->area_start(), |
| 4018 static_cast<int>(size)); | 4023 static_cast<int>(size)); |
| 4019 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 4024 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4056 | 4061 |
| 4057 // Handle the free space at the end of the page. | 4062 // Handle the free space at the end of the page. |
| 4058 if (cell_base - free_start > 32 * kPointerSize) { | 4063 if (cell_base - free_start > 32 * kPointerSize) { |
| 4059 free_start = DigestFreeStart(free_start, free_start_cell); | 4064 free_start = DigestFreeStart(free_start, free_start_cell); |
| 4060 freed_bytes = Free<mode>(space, free_list, free_start, | 4065 freed_bytes = Free<mode>(space, free_list, free_start, |
| 4061 static_cast<int>(p->area_end() - free_start)); | 4066 static_cast<int>(p->area_end() - free_start)); |
| 4062 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 4067 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
| 4063 } | 4068 } |
| 4064 | 4069 |
| 4065 p->ResetLiveBytes(); | 4070 p->ResetLiveBytes(); |
| 4066 return max_freed_bytes; | 4071 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
| 4067 } | 4072 } |
| 4068 | 4073 |
| 4069 | 4074 |
| 4070 int MarkCompactCollector::SweepInParallel(PagedSpace* space, | 4075 int MarkCompactCollector::SweepInParallel(PagedSpace* space, |
| 4071 int required_freed_bytes) { | 4076 int required_freed_bytes) { |
| 4072 PageIterator it(space); | 4077 PageIterator it(space); |
| 4073 FreeList* free_list = space == heap()->old_pointer_space() | 4078 FreeList* free_list = space == heap()->old_pointer_space() |
| 4074 ? free_list_old_pointer_space_.get() | 4079 ? free_list_old_pointer_space_.get() |
| 4075 : free_list_old_data_space_.get(); | 4080 : free_list_old_data_space_.get(); |
| 4076 FreeList private_free_list(space); | 4081 FreeList private_free_list(space); |
| (...skipping 13 matching lines...) Expand all Loading... |
| 4090 max_freed_overall = Max(max_freed, max_freed_overall); | 4095 max_freed_overall = Max(max_freed, max_freed_overall); |
| 4091 } | 4096 } |
| 4092 if (p == space->end_of_unswept_pages()) break; | 4097 if (p == space->end_of_unswept_pages()) break; |
| 4093 } | 4098 } |
| 4094 return max_freed_overall; | 4099 return max_freed_overall; |
| 4095 } | 4100 } |
| 4096 | 4101 |
| 4097 | 4102 |
| 4098 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { | 4103 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| 4099 space->set_is_iterable(sweeper == PRECISE); | 4104 space->set_is_iterable(sweeper == PRECISE); |
| 4100 space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE); | |
| 4101 space->ClearStats(); | 4105 space->ClearStats(); |
| 4102 | 4106 |
| 4103 // We defensively initialize end_of_unswept_pages_ here with the first page | 4107 // We defensively initialize end_of_unswept_pages_ here with the first page |
| 4104 // of the pages list. | 4108 // of the pages list. |
| 4105 space->set_end_of_unswept_pages(space->FirstPage()); | 4109 space->set_end_of_unswept_pages(space->FirstPage()); |
| 4106 | 4110 |
| 4107 PageIterator it(space); | 4111 PageIterator it(space); |
| 4108 | 4112 |
| 4109 int pages_swept = 0; | 4113 int pages_swept = 0; |
| 4110 bool unused_page_present = false; | 4114 bool unused_page_present = false; |
| (...skipping 24 matching lines...) Expand all Loading... |
| 4135 // Adjust unswept free bytes because releasing a page expects said | 4139 // Adjust unswept free bytes because releasing a page expects said |
| 4136 // counter to be accurate for unswept pages. | 4140 // counter to be accurate for unswept pages. |
| 4137 space->IncreaseUnsweptFreeBytes(p); | 4141 space->IncreaseUnsweptFreeBytes(p); |
| 4138 space->ReleasePage(p); | 4142 space->ReleasePage(p); |
| 4139 continue; | 4143 continue; |
| 4140 } | 4144 } |
| 4141 unused_page_present = true; | 4145 unused_page_present = true; |
| 4142 } | 4146 } |
| 4143 | 4147 |
| 4144 switch (sweeper) { | 4148 switch (sweeper) { |
| 4145 case CONSERVATIVE: { | |
| 4146 if (FLAG_gc_verbose) { | |
| 4147 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", | |
| 4148 reinterpret_cast<intptr_t>(p)); | |
| 4149 } | |
| 4150 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); | |
| 4151 pages_swept++; | |
| 4152 break; | |
| 4153 } | |
| 4154 case CONCURRENT_CONSERVATIVE: | 4149 case CONCURRENT_CONSERVATIVE: |
| 4155 case PARALLEL_CONSERVATIVE: { | 4150 case PARALLEL_CONSERVATIVE: { |
| 4156 if (!parallel_sweeping_active) { | 4151 if (!parallel_sweeping_active) { |
| 4157 if (FLAG_gc_verbose) { | 4152 if (FLAG_gc_verbose) { |
| 4158 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", | 4153 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", |
| 4159 reinterpret_cast<intptr_t>(p)); | 4154 reinterpret_cast<intptr_t>(p)); |
| 4160 } | 4155 } |
| 4161 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); | 4156 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); |
| 4162 pages_swept++; | 4157 pages_swept++; |
| 4163 parallel_sweeping_active = true; | 4158 parallel_sweeping_active = true; |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4205 // Give pages that are queued to be freed back to the OS. | 4200 // Give pages that are queued to be freed back to the OS. |
| 4206 heap()->FreeQueuedChunks(); | 4201 heap()->FreeQueuedChunks(); |
| 4207 } | 4202 } |
| 4208 | 4203 |
| 4209 | 4204 |
| 4210 void MarkCompactCollector::SweepSpaces() { | 4205 void MarkCompactCollector::SweepSpaces() { |
| 4211 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); | 4206 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); |
| 4212 #ifdef DEBUG | 4207 #ifdef DEBUG |
| 4213 state_ = SWEEP_SPACES; | 4208 state_ = SWEEP_SPACES; |
| 4214 #endif | 4209 #endif |
| 4215 SweeperType how_to_sweep = CONSERVATIVE; | 4210 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; |
| 4216 if (AreSweeperThreadsActivated()) { | 4211 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; |
| 4217 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; | 4212 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; |
| 4218 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; | 4213 |
| 4219 } | |
| 4220 if (sweep_precisely_) how_to_sweep = PRECISE; | 4214 if (sweep_precisely_) how_to_sweep = PRECISE; |
| 4221 | 4215 |
| 4222 MoveEvacuationCandidatesToEndOfPagesList(); | 4216 MoveEvacuationCandidatesToEndOfPagesList(); |
| 4223 | 4217 |
| 4224 // Noncompacting collections simply sweep the spaces to clear the mark | 4218 // Noncompacting collections simply sweep the spaces to clear the mark |
| 4225 // bits and free the nonlive blocks (for old and map spaces). We sweep | 4219 // bits and free the nonlive blocks (for old and map spaces). We sweep |
| 4226 // the map space last because freeing non-live maps overwrites them and | 4220 // the map space last because freeing non-live maps overwrites them and |
| 4227 // the other spaces rely on possibly non-live maps to get the sizes for | 4221 // the other spaces rely on possibly non-live maps to get the sizes for |
| 4228 // non-live objects. | 4222 // non-live objects. |
| 4229 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE); | 4223 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE); |
| 4230 { SequentialSweepingScope scope(this); | 4224 { SequentialSweepingScope scope(this); |
| 4231 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | 4225 SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
| 4232 SweepSpace(heap()->old_data_space(), how_to_sweep); | 4226 SweepSpace(heap()->old_data_space(), how_to_sweep); |
| 4233 } | 4227 } |
| 4234 | 4228 |
| 4235 if (how_to_sweep == PARALLEL_CONSERVATIVE || | 4229 if (how_to_sweep == PARALLEL_CONSERVATIVE || |
| 4236 how_to_sweep == CONCURRENT_CONSERVATIVE) { | 4230 how_to_sweep == CONCURRENT_CONSERVATIVE) { |
| 4237 StartSweeperThreads(); | 4231 StartSweeperThreads(); |
| 4238 } | 4232 } |
| 4239 | 4233 |
| 4240 if (how_to_sweep == PARALLEL_CONSERVATIVE) { | 4234 if (how_to_sweep == PARALLEL_CONSERVATIVE) { |
| 4241 WaitUntilSweepingCompleted(); | 4235 EnsureSweepingCompleted(); |
| 4242 } | 4236 } |
| 4243 } | 4237 } |
| 4244 RemoveDeadInvalidatedCode(); | 4238 RemoveDeadInvalidatedCode(); |
| 4245 | 4239 |
| 4246 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CODE); | 4240 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CODE); |
| 4247 SweepSpace(heap()->code_space(), PRECISE); | 4241 SweepSpace(heap()->code_space(), PRECISE); |
| 4248 } | 4242 } |
| 4249 | 4243 |
| 4250 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CELL); | 4244 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CELL); |
| 4251 SweepSpace(heap()->cell_space(), PRECISE); | 4245 SweepSpace(heap()->cell_space(), PRECISE); |
| (...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4501 while (buffer != NULL) { | 4495 while (buffer != NULL) { |
| 4502 SlotsBuffer* next_buffer = buffer->next(); | 4496 SlotsBuffer* next_buffer = buffer->next(); |
| 4503 DeallocateBuffer(buffer); | 4497 DeallocateBuffer(buffer); |
| 4504 buffer = next_buffer; | 4498 buffer = next_buffer; |
| 4505 } | 4499 } |
| 4506 *buffer_address = NULL; | 4500 *buffer_address = NULL; |
| 4507 } | 4501 } |
| 4508 | 4502 |
| 4509 | 4503 |
| 4510 } } // namespace v8::internal | 4504 } } // namespace v8::internal |
| OLD | NEW |