Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(105)

Side by Side Diff: src/mark-compact.cc

Issue 393523002: Revert "Remove sequential sweeping mode and perform lazy sweeping when no sweeper threads are activ… (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/v8.h" 5 #include "src/v8.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/code-stubs.h" 8 #include "src/code-stubs.h"
9 #include "src/compilation-cache.h" 9 #include "src/compilation-cache.h"
10 #include "src/cpu-profiler.h" 10 #include "src/cpu-profiler.h"
(...skipping 27 matching lines...) Expand all
38 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT 38 MarkCompactCollector::MarkCompactCollector(Heap* heap) : // NOLINT
39 #ifdef DEBUG 39 #ifdef DEBUG
40 state_(IDLE), 40 state_(IDLE),
41 #endif 41 #endif
42 sweep_precisely_(false), 42 sweep_precisely_(false),
43 reduce_memory_footprint_(false), 43 reduce_memory_footprint_(false),
44 abort_incremental_marking_(false), 44 abort_incremental_marking_(false),
45 marking_parity_(ODD_MARKING_PARITY), 45 marking_parity_(ODD_MARKING_PARITY),
46 compacting_(false), 46 compacting_(false),
47 was_marked_incrementally_(false), 47 was_marked_incrementally_(false),
48 sweeping_in_progress_(false), 48 sweeping_pending_(false),
49 pending_sweeper_jobs_semaphore_(0), 49 pending_sweeper_jobs_semaphore_(0),
50 sequential_sweeping_(false), 50 sequential_sweeping_(false),
51 tracer_(NULL), 51 tracer_(NULL),
52 migration_slots_buffer_(NULL), 52 migration_slots_buffer_(NULL),
53 heap_(heap), 53 heap_(heap),
54 code_flusher_(NULL), 54 code_flusher_(NULL),
55 have_code_to_deoptimize_(false) { } 55 have_code_to_deoptimize_(false) { }
56 56
57 #ifdef VERIFY_HEAP 57 #ifdef VERIFY_HEAP
58 class VerifyMarkingVisitor: public ObjectVisitor { 58 class VerifyMarkingVisitor: public ObjectVisitor {
(...skipping 507 matching lines...) Expand 10 before | Expand all | Expand 10 after
566 Heap* heap_; 566 Heap* heap_;
567 PagedSpace* space_; 567 PagedSpace* space_;
568 568
569 DISALLOW_COPY_AND_ASSIGN(SweeperTask); 569 DISALLOW_COPY_AND_ASSIGN(SweeperTask);
570 }; 570 };
571 571
572 572
573 void MarkCompactCollector::StartSweeperThreads() { 573 void MarkCompactCollector::StartSweeperThreads() {
574 ASSERT(free_list_old_pointer_space_.get()->IsEmpty()); 574 ASSERT(free_list_old_pointer_space_.get()->IsEmpty());
575 ASSERT(free_list_old_data_space_.get()->IsEmpty()); 575 ASSERT(free_list_old_data_space_.get()->IsEmpty());
576 sweeping_in_progress_ = true; 576 sweeping_pending_ = true;
577 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { 577 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
578 isolate()->sweeper_threads()[i]->StartSweeping(); 578 isolate()->sweeper_threads()[i]->StartSweeping();
579 } 579 }
580 if (FLAG_job_based_sweeping) { 580 if (FLAG_job_based_sweeping) {
581 V8::GetCurrentPlatform()->CallOnBackgroundThread( 581 V8::GetCurrentPlatform()->CallOnBackgroundThread(
582 new SweeperTask(heap(), heap()->old_data_space()), 582 new SweeperTask(heap(), heap()->old_data_space()),
583 v8::Platform::kShortRunningTask); 583 v8::Platform::kShortRunningTask);
584 V8::GetCurrentPlatform()->CallOnBackgroundThread( 584 V8::GetCurrentPlatform()->CallOnBackgroundThread(
585 new SweeperTask(heap(), heap()->old_pointer_space()), 585 new SweeperTask(heap(), heap()->old_pointer_space()),
586 v8::Platform::kShortRunningTask); 586 v8::Platform::kShortRunningTask);
587 } 587 }
588 } 588 }
589 589
590 590
591 void MarkCompactCollector::EnsureSweepingCompleted() { 591 void MarkCompactCollector::WaitUntilSweepingCompleted() {
592 ASSERT(sweeping_in_progress_ == true); 592 ASSERT(sweeping_pending_ == true);
593
594 // If sweeping is not completed, we try to complete it here. If we do not
595 // have sweeper threads we have to complete since we do not have a good
596 // indicator for a swept space in that case.
597 if (!AreSweeperThreadsActivated() || !IsSweepingCompleted()) {
598 SweepInParallel(heap()->paged_space(OLD_DATA_SPACE), 0);
599 SweepInParallel(heap()->paged_space(OLD_POINTER_SPACE), 0);
600 }
601
602 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { 593 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
603 isolate()->sweeper_threads()[i]->WaitForSweeperThread(); 594 isolate()->sweeper_threads()[i]->WaitForSweeperThread();
604 } 595 }
605 if (FLAG_job_based_sweeping) { 596 if (FLAG_job_based_sweeping) {
606 // Wait twice for both jobs. 597 // Wait twice for both jobs.
607 pending_sweeper_jobs_semaphore_.Wait(); 598 pending_sweeper_jobs_semaphore_.Wait();
608 pending_sweeper_jobs_semaphore_.Wait(); 599 pending_sweeper_jobs_semaphore_.Wait();
609 } 600 }
610 ParallelSweepSpacesComplete(); 601 ParallelSweepSpacesComplete();
611 sweeping_in_progress_ = false; 602 sweeping_pending_ = false;
612 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE)); 603 RefillFreeList(heap()->paged_space(OLD_DATA_SPACE));
613 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE)); 604 RefillFreeList(heap()->paged_space(OLD_POINTER_SPACE));
614 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes(); 605 heap()->paged_space(OLD_DATA_SPACE)->ResetUnsweptFreeBytes();
615 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes(); 606 heap()->paged_space(OLD_POINTER_SPACE)->ResetUnsweptFreeBytes();
616 } 607 }
617 608
618 609
619 bool MarkCompactCollector::IsSweepingCompleted() { 610 bool MarkCompactCollector::IsSweepingCompleted() {
620 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) { 611 for (int i = 0; i < isolate()->num_sweeper_threads(); i++) {
621 if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) { 612 if (!isolate()->sweeper_threads()[i]->SweepingCompleted()) {
622 return false; 613 return false;
623 } 614 }
624 } 615 }
625
626 if (FLAG_job_based_sweeping) { 616 if (FLAG_job_based_sweeping) {
627 if (!pending_sweeper_jobs_semaphore_.WaitFor( 617 if (!pending_sweeper_jobs_semaphore_.WaitFor(
628 base::TimeDelta::FromSeconds(0))) { 618 base::TimeDelta::FromSeconds(0))) {
629 return false; 619 return false;
630 } 620 }
631 pending_sweeper_jobs_semaphore_.Signal(); 621 pending_sweeper_jobs_semaphore_.Signal();
632 } 622 }
633
634 return true; 623 return true;
635 } 624 }
636 625
637 626
638 void MarkCompactCollector::RefillFreeList(PagedSpace* space) { 627 void MarkCompactCollector::RefillFreeList(PagedSpace* space) {
639 FreeList* free_list; 628 FreeList* free_list;
640 629
641 if (space == heap()->old_pointer_space()) { 630 if (space == heap()->old_pointer_space()) {
642 free_list = free_list_old_pointer_space_.get(); 631 free_list = free_list_old_pointer_space_.get();
643 } else if (space == heap()->old_data_space()) { 632 } else if (space == heap()->old_data_space()) {
644 free_list = free_list_old_data_space_.get(); 633 free_list = free_list_old_data_space_.get();
645 } else { 634 } else {
646 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure 635 // Any PagedSpace might invoke RefillFreeLists, so we need to make sure
647 // to only refill them for old data and pointer spaces. 636 // to only refill them for old data and pointer spaces.
648 return; 637 return;
649 } 638 }
650 639
651 intptr_t freed_bytes = space->free_list()->Concatenate(free_list); 640 intptr_t freed_bytes = space->free_list()->Concatenate(free_list);
652 space->AddToAccountingStats(freed_bytes); 641 space->AddToAccountingStats(freed_bytes);
653 space->DecrementUnsweptFreeBytes(freed_bytes); 642 space->DecrementUnsweptFreeBytes(freed_bytes);
654 } 643 }
655 644
656 645
657 bool MarkCompactCollector::AreSweeperThreadsActivated() { 646 bool MarkCompactCollector::AreSweeperThreadsActivated() {
658 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping; 647 return isolate()->sweeper_threads() != NULL || FLAG_job_based_sweeping;
659 } 648 }
660 649
661 650
651 bool MarkCompactCollector::IsConcurrentSweepingInProgress(PagedSpace* space) {
652 return (space == NULL || space->is_swept_concurrently()) &&
653 sweeping_pending_;
654 }
655
656
662 void Marking::TransferMark(Address old_start, Address new_start) { 657 void Marking::TransferMark(Address old_start, Address new_start) {
663 // This is only used when resizing an object. 658 // This is only used when resizing an object.
664 ASSERT(MemoryChunk::FromAddress(old_start) == 659 ASSERT(MemoryChunk::FromAddress(old_start) ==
665 MemoryChunk::FromAddress(new_start)); 660 MemoryChunk::FromAddress(new_start));
666 661
667 if (!heap_->incremental_marking()->IsMarking()) return; 662 if (!heap_->incremental_marking()->IsMarking()) return;
668 663
669 // If the mark doesn't move, we don't check the color of the object. 664 // If the mark doesn't move, we don't check the color of the object.
670 // It doesn't matter whether the object is black, since it hasn't changed 665 // It doesn't matter whether the object is black, since it hasn't changed
671 // size, so the adjustment to the live data count will be zero anyway. 666 // size, so the adjustment to the live data count will be zero anyway.
(...skipping 285 matching lines...) Expand 10 before | Expand all | Expand 10 after
957 // variable. 952 // variable.
958 tracer_ = tracer; 953 tracer_ = tracer;
959 954
960 #ifdef DEBUG 955 #ifdef DEBUG
961 ASSERT(state_ == IDLE); 956 ASSERT(state_ == IDLE);
962 state_ = PREPARE_GC; 957 state_ = PREPARE_GC;
963 #endif 958 #endif
964 959
965 ASSERT(!FLAG_never_compact || !FLAG_always_compact); 960 ASSERT(!FLAG_never_compact || !FLAG_always_compact);
966 961
967 if (sweeping_in_progress()) { 962 if (IsConcurrentSweepingInProgress()) {
968 // Instead of waiting we could also abort the sweeper threads here. 963 // Instead of waiting we could also abort the sweeper threads here.
969 EnsureSweepingCompleted(); 964 WaitUntilSweepingCompleted();
970 } 965 }
971 966
972 // Clear marking bits if incremental marking is aborted. 967 // Clear marking bits if incremental marking is aborted.
973 if (was_marked_incrementally_ && abort_incremental_marking_) { 968 if (was_marked_incrementally_ && abort_incremental_marking_) {
974 heap()->incremental_marking()->Abort(); 969 heap()->incremental_marking()->Abort();
975 ClearMarkbits(); 970 ClearMarkbits();
976 AbortCompaction(); 971 AbortCompaction();
977 was_marked_incrementally_ = false; 972 was_marked_incrementally_ = false;
978 } 973 }
979 974
(...skipping 3024 matching lines...) Expand 10 before | Expand all | Expand 10 after
4004 cell = it.CurrentCell(); 3999 cell = it.CurrentCell();
4005 if (*cell != 0) break; 4000 if (*cell != 0) break;
4006 } 4001 }
4007 4002
4008 if (it.Done()) { 4003 if (it.Done()) {
4009 size = p->area_end() - p->area_start(); 4004 size = p->area_end() - p->area_start();
4010 freed_bytes = Free<mode>(space, free_list, p->area_start(), 4005 freed_bytes = Free<mode>(space, free_list, p->area_start(),
4011 static_cast<int>(size)); 4006 static_cast<int>(size));
4012 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 4007 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4013 ASSERT_EQ(0, p->LiveBytes()); 4008 ASSERT_EQ(0, p->LiveBytes());
4014 return free_list->GuaranteedAllocatable(max_freed_bytes); 4009 return freed_bytes;
4015 } 4010 }
4016 4011
4017 // Grow the size of the start-of-page free space a little to get up to the 4012 // Grow the size of the start-of-page free space a little to get up to the
4018 // first live object. 4013 // first live object.
4019 Address free_end = StartOfLiveObject(cell_base, *cell); 4014 Address free_end = StartOfLiveObject(cell_base, *cell);
4020 // Free the first free space. 4015 // Free the first free space.
4021 size = free_end - p->area_start(); 4016 size = free_end - p->area_start();
4022 freed_bytes = Free<mode>(space, free_list, p->area_start(), 4017 freed_bytes = Free<mode>(space, free_list, p->area_start(),
4023 static_cast<int>(size)); 4018 static_cast<int>(size));
4024 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 4019 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
4061 4056
4062 // Handle the free space at the end of the page. 4057 // Handle the free space at the end of the page.
4063 if (cell_base - free_start > 32 * kPointerSize) { 4058 if (cell_base - free_start > 32 * kPointerSize) {
4064 free_start = DigestFreeStart(free_start, free_start_cell); 4059 free_start = DigestFreeStart(free_start, free_start_cell);
4065 freed_bytes = Free<mode>(space, free_list, free_start, 4060 freed_bytes = Free<mode>(space, free_list, free_start,
4066 static_cast<int>(p->area_end() - free_start)); 4061 static_cast<int>(p->area_end() - free_start));
4067 max_freed_bytes = Max(freed_bytes, max_freed_bytes); 4062 max_freed_bytes = Max(freed_bytes, max_freed_bytes);
4068 } 4063 }
4069 4064
4070 p->ResetLiveBytes(); 4065 p->ResetLiveBytes();
4071 return free_list->GuaranteedAllocatable(max_freed_bytes); 4066 return max_freed_bytes;
4072 } 4067 }
4073 4068
4074 4069
4075 int MarkCompactCollector::SweepInParallel(PagedSpace* space, 4070 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
4076 int required_freed_bytes) { 4071 int required_freed_bytes) {
4077 PageIterator it(space); 4072 PageIterator it(space);
4078 FreeList* free_list = space == heap()->old_pointer_space() 4073 FreeList* free_list = space == heap()->old_pointer_space()
4079 ? free_list_old_pointer_space_.get() 4074 ? free_list_old_pointer_space_.get()
4080 : free_list_old_data_space_.get(); 4075 : free_list_old_data_space_.get();
4081 FreeList private_free_list(space); 4076 FreeList private_free_list(space);
(...skipping 13 matching lines...) Expand all
4095 max_freed_overall = Max(max_freed, max_freed_overall); 4090 max_freed_overall = Max(max_freed, max_freed_overall);
4096 } 4091 }
4097 if (p == space->end_of_unswept_pages()) break; 4092 if (p == space->end_of_unswept_pages()) break;
4098 } 4093 }
4099 return max_freed_overall; 4094 return max_freed_overall;
4100 } 4095 }
4101 4096
4102 4097
4103 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { 4098 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) {
4104 space->set_is_iterable(sweeper == PRECISE); 4099 space->set_is_iterable(sweeper == PRECISE);
4100 space->set_is_swept_concurrently(sweeper == CONCURRENT_CONSERVATIVE);
4105 space->ClearStats(); 4101 space->ClearStats();
4106 4102
4107 // We defensively initialize end_of_unswept_pages_ here with the first page 4103 // We defensively initialize end_of_unswept_pages_ here with the first page
4108 // of the pages list. 4104 // of the pages list.
4109 space->set_end_of_unswept_pages(space->FirstPage()); 4105 space->set_end_of_unswept_pages(space->FirstPage());
4110 4106
4111 PageIterator it(space); 4107 PageIterator it(space);
4112 4108
4113 int pages_swept = 0; 4109 int pages_swept = 0;
4114 bool unused_page_present = false; 4110 bool unused_page_present = false;
(...skipping 24 matching lines...) Expand all
4139 // Adjust unswept free bytes because releasing a page expects said 4135 // Adjust unswept free bytes because releasing a page expects said
4140 // counter to be accurate for unswept pages. 4136 // counter to be accurate for unswept pages.
4141 space->IncreaseUnsweptFreeBytes(p); 4137 space->IncreaseUnsweptFreeBytes(p);
4142 space->ReleasePage(p); 4138 space->ReleasePage(p);
4143 continue; 4139 continue;
4144 } 4140 }
4145 unused_page_present = true; 4141 unused_page_present = true;
4146 } 4142 }
4147 4143
4148 switch (sweeper) { 4144 switch (sweeper) {
4145 case CONSERVATIVE: {
4146 if (FLAG_gc_verbose) {
4147 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4148 reinterpret_cast<intptr_t>(p));
4149 }
4150 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
4151 pages_swept++;
4152 break;
4153 }
4149 case CONCURRENT_CONSERVATIVE: 4154 case CONCURRENT_CONSERVATIVE:
4150 case PARALLEL_CONSERVATIVE: { 4155 case PARALLEL_CONSERVATIVE: {
4151 if (!parallel_sweeping_active) { 4156 if (!parallel_sweeping_active) {
4152 if (FLAG_gc_verbose) { 4157 if (FLAG_gc_verbose) {
4153 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", 4158 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n",
4154 reinterpret_cast<intptr_t>(p)); 4159 reinterpret_cast<intptr_t>(p));
4155 } 4160 }
4156 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p); 4161 SweepConservatively<SWEEP_ON_MAIN_THREAD>(space, NULL, p);
4157 pages_swept++; 4162 pages_swept++;
4158 parallel_sweeping_active = true; 4163 parallel_sweeping_active = true;
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after
4200 // Give pages that are queued to be freed back to the OS. 4205 // Give pages that are queued to be freed back to the OS.
4201 heap()->FreeQueuedChunks(); 4206 heap()->FreeQueuedChunks();
4202 } 4207 }
4203 4208
4204 4209
4205 void MarkCompactCollector::SweepSpaces() { 4210 void MarkCompactCollector::SweepSpaces() {
4206 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); 4211 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP);
4207 #ifdef DEBUG 4212 #ifdef DEBUG
4208 state_ = SWEEP_SPACES; 4213 state_ = SWEEP_SPACES;
4209 #endif 4214 #endif
4210 SweeperType how_to_sweep = CONCURRENT_CONSERVATIVE; 4215 SweeperType how_to_sweep = CONSERVATIVE;
4211 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE; 4216 if (AreSweeperThreadsActivated()) {
4212 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE; 4217 if (FLAG_parallel_sweeping) how_to_sweep = PARALLEL_CONSERVATIVE;
4213 4218 if (FLAG_concurrent_sweeping) how_to_sweep = CONCURRENT_CONSERVATIVE;
4219 }
4214 if (sweep_precisely_) how_to_sweep = PRECISE; 4220 if (sweep_precisely_) how_to_sweep = PRECISE;
4215 4221
4216 MoveEvacuationCandidatesToEndOfPagesList(); 4222 MoveEvacuationCandidatesToEndOfPagesList();
4217 4223
4218 // Noncompacting collections simply sweep the spaces to clear the mark 4224 // Noncompacting collections simply sweep the spaces to clear the mark
4219 // bits and free the nonlive blocks (for old and map spaces). We sweep 4225 // bits and free the nonlive blocks (for old and map spaces). We sweep
4220 // the map space last because freeing non-live maps overwrites them and 4226 // the map space last because freeing non-live maps overwrites them and
4221 // the other spaces rely on possibly non-live maps to get the sizes for 4227 // the other spaces rely on possibly non-live maps to get the sizes for
4222 // non-live objects. 4228 // non-live objects.
4223 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE); 4229 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_OLDSPACE);
4224 { SequentialSweepingScope scope(this); 4230 { SequentialSweepingScope scope(this);
4225 SweepSpace(heap()->old_pointer_space(), how_to_sweep); 4231 SweepSpace(heap()->old_pointer_space(), how_to_sweep);
4226 SweepSpace(heap()->old_data_space(), how_to_sweep); 4232 SweepSpace(heap()->old_data_space(), how_to_sweep);
4227 } 4233 }
4228 4234
4229 if (how_to_sweep == PARALLEL_CONSERVATIVE || 4235 if (how_to_sweep == PARALLEL_CONSERVATIVE ||
4230 how_to_sweep == CONCURRENT_CONSERVATIVE) { 4236 how_to_sweep == CONCURRENT_CONSERVATIVE) {
4231 StartSweeperThreads(); 4237 StartSweeperThreads();
4232 } 4238 }
4233 4239
4234 if (how_to_sweep == PARALLEL_CONSERVATIVE) { 4240 if (how_to_sweep == PARALLEL_CONSERVATIVE) {
4235 EnsureSweepingCompleted(); 4241 WaitUntilSweepingCompleted();
4236 } 4242 }
4237 } 4243 }
4238 RemoveDeadInvalidatedCode(); 4244 RemoveDeadInvalidatedCode();
4239 4245
4240 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CODE); 4246 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CODE);
4241 SweepSpace(heap()->code_space(), PRECISE); 4247 SweepSpace(heap()->code_space(), PRECISE);
4242 } 4248 }
4243 4249
4244 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CELL); 4250 { GCTracer::Scope sweep_scope(tracer_, GCTracer::Scope::MC_SWEEP_CELL);
4245 SweepSpace(heap()->cell_space(), PRECISE); 4251 SweepSpace(heap()->cell_space(), PRECISE);
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
4495 while (buffer != NULL) { 4501 while (buffer != NULL) {
4496 SlotsBuffer* next_buffer = buffer->next(); 4502 SlotsBuffer* next_buffer = buffer->next();
4497 DeallocateBuffer(buffer); 4503 DeallocateBuffer(buffer);
4498 buffer = next_buffer; 4504 buffer = next_buffer;
4499 } 4505 }
4500 *buffer_address = NULL; 4506 *buffer_address = NULL;
4501 } 4507 }
4502 4508
4503 4509
4504 } } // namespace v8::internal 4510 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/mark-compact.h ('k') | src/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698