Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(235)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1356533002: Reland "[heap] Introduce parallel compaction algorithm." (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Fix accounting for moved free list memory Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h" 10 #include "src/compilation-cache.h"
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
50 : // NOLINT 50 : // NOLINT
51 #ifdef DEBUG 51 #ifdef DEBUG
52 state_(IDLE), 52 state_(IDLE),
53 #endif 53 #endif
54 marking_parity_(ODD_MARKING_PARITY), 54 marking_parity_(ODD_MARKING_PARITY),
55 compacting_(false), 55 compacting_(false),
56 was_marked_incrementally_(false), 56 was_marked_incrementally_(false),
57 sweeping_in_progress_(false), 57 sweeping_in_progress_(false),
58 parallel_compaction_in_progress_(false), 58 parallel_compaction_in_progress_(false),
59 pending_sweeper_jobs_semaphore_(0), 59 pending_sweeper_jobs_semaphore_(0),
60 pending_compaction_jobs_semaphore_(0), 60 pending_compaction_tasks_semaphore_(0),
61 concurrent_compaction_tasks_active_(0),
61 evacuation_(false), 62 evacuation_(false),
62 slots_buffer_allocator_(nullptr), 63 slots_buffer_allocator_(nullptr),
63 migration_slots_buffer_(nullptr), 64 migration_slots_buffer_(nullptr),
64 heap_(heap), 65 heap_(heap),
65 marking_deque_memory_(NULL), 66 marking_deque_memory_(NULL),
66 marking_deque_memory_committed_(0), 67 marking_deque_memory_committed_(0),
67 code_flusher_(NULL), 68 code_flusher_(NULL),
68 have_code_to_deoptimize_(false) { 69 have_code_to_deoptimize_(false) {
69 } 70 }
70 71
(...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after
467 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 468 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
468 Marking::MarkWhite(Marking::MarkBitFrom(obj)); 469 Marking::MarkWhite(Marking::MarkBitFrom(obj));
469 Page::FromAddress(obj->address())->ResetProgressBar(); 470 Page::FromAddress(obj->address())->ResetProgressBar();
470 Page::FromAddress(obj->address())->ResetLiveBytes(); 471 Page::FromAddress(obj->address())->ResetLiveBytes();
471 } 472 }
472 } 473 }
473 474
474 475
475 class MarkCompactCollector::CompactionTask : public v8::Task { 476 class MarkCompactCollector::CompactionTask : public v8::Task {
476 public: 477 public:
477 explicit CompactionTask(Heap* heap) : heap_(heap) {} 478 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
479 : heap_(heap), spaces_(spaces) {}
478 480
479 virtual ~CompactionTask() {} 481 virtual ~CompactionTask() {}
480 482
481 private: 483 private:
482 // v8::Task overrides. 484 // v8::Task overrides.
483 void Run() override { 485 void Run() override {
484 // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be 486 heap_->mark_compact_collector()->EvacuatePages(spaces_);
485 // called by one thread concurrently.
486 heap_->mark_compact_collector()->EvacuatePages();
487 heap_->mark_compact_collector() 487 heap_->mark_compact_collector()
488 ->pending_compaction_jobs_semaphore_.Signal(); 488 ->pending_compaction_tasks_semaphore_.Signal();
489 } 489 }
490 490
491 Heap* heap_; 491 Heap* heap_;
492 CompactionSpaceCollection* spaces_;
492 493
493 DISALLOW_COPY_AND_ASSIGN(CompactionTask); 494 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
494 }; 495 };
495 496
496 497
497 class MarkCompactCollector::SweeperTask : public v8::Task { 498 class MarkCompactCollector::SweeperTask : public v8::Task {
498 public: 499 public:
499 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} 500 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
500 501
501 virtual ~SweeperTask() {} 502 virtual ~SweeperTask() {}
(...skipping 2842 matching lines...) Expand 10 before | Expand all | Expand 10 after
3344 while (it.has_next()) { 3345 while (it.has_next()) {
3345 NewSpacePage* p = it.next(); 3346 NewSpacePage* p = it.next();
3346 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); 3347 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
3347 } 3348 }
3348 3349
3349 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3350 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3350 new_space->set_age_mark(new_space->top()); 3351 new_space->set_age_mark(new_space->top());
3351 } 3352 }
3352 3353
3353 3354
3354 void MarkCompactCollector::EvacuateLiveObjectsFromPage( 3355 bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
3355 Page* p, PagedSpace* target_space) { 3356 Page* p, PagedSpace* target_space) {
3356 AlwaysAllocateScope always_allocate(isolate()); 3357 AlwaysAllocateScope always_allocate(isolate());
3357 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); 3358 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3358 p->SetWasSwept();
3359 3359
3360 int offsets[16]; 3360 int offsets[16];
3361 3361
3362 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 3362 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3363 Address cell_base = it.CurrentCellBase(); 3363 Address cell_base = it.CurrentCellBase();
3364 MarkBit::CellType* cell = it.CurrentCell(); 3364 MarkBit::CellType* cell = it.CurrentCell();
3365 3365
3366 if (*cell == 0) continue; 3366 if (*cell == 0) continue;
3367 3367
3368 int live_objects = MarkWordToObjectStarts(*cell, offsets); 3368 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3369 for (int i = 0; i < live_objects; i++) { 3369 for (int i = 0; i < live_objects; i++) {
3370 Address object_addr = cell_base + offsets[i] * kPointerSize; 3370 Address object_addr = cell_base + offsets[i] * kPointerSize;
3371 HeapObject* object = HeapObject::FromAddress(object_addr); 3371 HeapObject* object = HeapObject::FromAddress(object_addr);
3372 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3372 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3373 3373
3374 int size = object->Size(); 3374 int size = object->Size();
3375 AllocationAlignment alignment = object->RequiredAlignment(); 3375 AllocationAlignment alignment = object->RequiredAlignment();
3376 HeapObject* target_object = nullptr; 3376 HeapObject* target_object = nullptr;
3377 AllocationResult allocation = target_space->AllocateRaw(size, alignment); 3377 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
3378 if (!allocation.To(&target_object)) { 3378 if (!allocation.To(&target_object)) {
3379 // If allocation failed, use emergency memory and re-try allocation. 3379 return false;
3380 CHECK(target_space->HasEmergencyMemory());
3381 target_space->UseEmergencyMemory();
3382 allocation = target_space->AllocateRaw(size, alignment);
3383 } 3380 }
3384 if (!allocation.To(&target_object)) {
3385 // OS refused to give us memory.
3386 V8::FatalProcessOutOfMemory("Evacuation");
3387 return;
3388 }
3389
3390 MigrateObject(target_object, object, size, target_space->identity()); 3381 MigrateObject(target_object, object, size, target_space->identity());
3391 DCHECK(object->map_word().IsForwardingAddress()); 3382 DCHECK(object->map_word().IsForwardingAddress());
3392 } 3383 }
3393 3384
3394 // Clear marking bits for current cell. 3385 // Clear marking bits for current cell.
3395 *cell = 0; 3386 *cell = 0;
3396 } 3387 }
3397 p->ResetLiveBytes(); 3388 p->ResetLiveBytes();
3389 return true;
3398 } 3390 }
3399 3391
3400 3392
3401 void MarkCompactCollector::EvacuatePagesInParallel() { 3393 void MarkCompactCollector::EvacuatePagesInParallel() {
3394 if (evacuation_candidates_.length() == 0) return;
3395
3396 int num_tasks = 1;
3397 if (FLAG_parallel_compaction) {
3398 num_tasks = NumberOfParallelCompactionTasks();
3399 }
3400
3401 // Set up compaction spaces.
3402 CompactionSpaceCollection** compaction_spaces_for_tasks =
3403 new CompactionSpaceCollection*[num_tasks];
3404 for (int i = 0; i < num_tasks; i++) {
3405 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
3406 }
3407
3408 compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory(
3409 heap()->old_space());
3410 compaction_spaces_for_tasks[0]
3411 ->Get(CODE_SPACE)
3412 ->MoveOverFreeMemory(heap()->code_space());
3413
3402 parallel_compaction_in_progress_ = true; 3414 parallel_compaction_in_progress_ = true;
3403 V8::GetCurrentPlatform()->CallOnBackgroundThread( 3415 // Kick off parallel tasks.
3404 new CompactionTask(heap()), v8::Platform::kShortRunningTask); 3416 for (int i = 1; i < num_tasks; i++) {
3417 concurrent_compaction_tasks_active_++;
3418 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3419 new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
3420 v8::Platform::kShortRunningTask);
3421 }
3422
3423 // Contribute in main thread. Counter and signal are in principal not needed.
3424 concurrent_compaction_tasks_active_++;
3425 EvacuatePages(compaction_spaces_for_tasks[0]);
3426 pending_compaction_tasks_semaphore_.Signal();
3427
3428 WaitUntilCompactionCompleted();
3429
3430 // Merge back memory (compacted and unused) from compaction spaces.
3431 for (int i = 0; i < num_tasks; i++) {
3432 heap()->old_space()->MergeCompactionSpace(
3433 compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
3434 heap()->code_space()->MergeCompactionSpace(
3435 compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
3436 delete compaction_spaces_for_tasks[i];
3437 }
3438 delete[] compaction_spaces_for_tasks;
3439
3440 // Finalize sequentially.
3441 const int num_pages = evacuation_candidates_.length();
3442 int abandoned_pages = 0;
3443 for (int i = 0; i < num_pages; i++) {
3444 Page* p = evacuation_candidates_[i];
3445 switch (p->parallel_compaction_state().Value()) {
3446 case MemoryChunk::ParallelCompactingState::kCompactingAborted:
3447 // We have partially compacted the page, i.e., some objects may have
3448 // moved, others are still in place.
3449 // We need to:
3450 // - Leave the evacuation candidate flag for later processing of
3451 // slots buffer entries.
3452 // - Leave the slots buffer there for processing of entries added by
3453 // the write barrier.
3454 // - Rescan the page as slot recording in the migration buffer only
3455 // happens upon moving (which we potentially didn't do).
3456 // - Leave the page in the list of pages of a space since we could not
3457 // fully evacuate it.
3458 DCHECK(p->IsEvacuationCandidate());
3459 p->SetFlag(Page::RESCAN_ON_EVACUATION);
3460 abandoned_pages++;
3461 break;
3462 case MemoryChunk::kCompactingFinalize:
3463 DCHECK(p->IsEvacuationCandidate());
3464 p->SetWasSwept();
3465 p->Unlink();
3466 break;
3467 case MemoryChunk::kCompactingDone:
3468 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
3469 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3470 break;
3471 default:
3472 // We should not observe kCompactingInProgress, or kCompactingDone.
3473 UNREACHABLE();
3474 }
3475 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3476 }
3477 if (num_pages > 0) {
3478 if (FLAG_trace_fragmentation) {
3479 if (abandoned_pages != 0) {
3480 PrintF(
3481 " Abandoned (at least partially) %d out of %d page compactions due"
3482 " to lack of memory\n",
3483 abandoned_pages, num_pages);
3484 } else {
3485 PrintF(" Compacted %d pages\n", num_pages);
3486 }
3487 }
3488 }
3405 } 3489 }
3406 3490
3407 3491
3408 void MarkCompactCollector::WaitUntilCompactionCompleted() { 3492 void MarkCompactCollector::WaitUntilCompactionCompleted() {
3409 pending_compaction_jobs_semaphore_.Wait(); 3493 while (concurrent_compaction_tasks_active_-- > 0) {
3494 pending_compaction_tasks_semaphore_.Wait();
3495 }
3410 parallel_compaction_in_progress_ = false; 3496 parallel_compaction_in_progress_ = false;
3411 } 3497 }
3412 3498
3413 3499
3414 void MarkCompactCollector::EvacuatePages() { 3500 void MarkCompactCollector::EvacuatePages(
3415 int npages = evacuation_candidates_.length(); 3501 CompactionSpaceCollection* compaction_spaces) {
3416 int abandoned_pages = 0; 3502 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3417 for (int i = 0; i < npages; i++) {
3418 Page* p = evacuation_candidates_[i]; 3503 Page* p = evacuation_candidates_[i];
3419 DCHECK(p->IsEvacuationCandidate() || 3504 DCHECK(p->IsEvacuationCandidate() ||
3420 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3505 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3421 DCHECK(static_cast<int>(p->parallel_sweeping()) == 3506 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3422 MemoryChunk::SWEEPING_DONE); 3507 MemoryChunk::SWEEPING_DONE);
3423 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3508 if (p->parallel_compaction_state().TrySetValue(
3424 // Allocate emergency memory for the case when compaction fails due to out 3509 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3425 // of memory. 3510 if (p->IsEvacuationCandidate()) {
3426 if (!space->HasEmergencyMemory()) { 3511 DCHECK_EQ(p->parallel_compaction_state().Value(),
3427 space->CreateEmergencyMemory(); // If the OS lets us. 3512 MemoryChunk::kCompactingInProgress);
3428 } 3513 if (EvacuateLiveObjectsFromPage(
3429 if (p->IsEvacuationCandidate()) { 3514 p, compaction_spaces->Get(p->owner()->identity()))) {
3430 // During compaction we might have to request a new page in order to free 3515 p->parallel_compaction_state().SetValue(
3431 // up a page. Check that we actually got an emergency page above so we 3516 MemoryChunk::kCompactingFinalize);
3432 // can guarantee that this succeeds. 3517 } else {
3433 if (space->HasEmergencyMemory()) { 3518 p->parallel_compaction_state().SetValue(
3434 EvacuateLiveObjectsFromPage(p, static_cast<PagedSpace*>(p->owner())); 3519 MemoryChunk::kCompactingAborted);
3435 // Unlink the page from the list of pages here. We must not iterate 3520 }
3436 // over that page later (e.g. when scan on scavenge pages are
3437 // processed). The page itself will be freed later and is still
3438 // reachable from the evacuation candidates list.
3439 p->Unlink();
3440 } else { 3521 } else {
3441 // Without room for expansion evacuation is not guaranteed to succeed. 3522 // There could be popular pages in the list of evacuation candidates
3442 // Pessimistically abandon unevacuated pages. 3523 // which we do compact.
3443 for (int j = i; j < npages; j++) { 3524 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3444 Page* page = evacuation_candidates_[j];
3445 slots_buffer_allocator_->DeallocateChain(
3446 page->slots_buffer_address());
3447 page->ClearEvacuationCandidate();
3448 page->SetFlag(Page::RESCAN_ON_EVACUATION);
3449 }
3450 abandoned_pages = npages - i;
3451 break;
3452 } 3525 }
3453 } 3526 }
3454 } 3527 }
3455 if (npages > 0) {
3456 // Release emergency memory.
3457 PagedSpaces spaces(heap());
3458 for (PagedSpace* space = spaces.next(); space != NULL;
3459 space = spaces.next()) {
3460 if (space->HasEmergencyMemory()) {
3461 space->FreeEmergencyMemory();
3462 }
3463 }
3464 if (FLAG_trace_fragmentation) {
3465 if (abandoned_pages != 0) {
3466 PrintF(
3467 " Abandon %d out of %d page defragmentations due to lack of "
3468 "memory\n",
3469 abandoned_pages, npages);
3470 } else {
3471 PrintF(" Defragmented %d pages\n", npages);
3472 }
3473 }
3474 }
3475 } 3528 }
3476 3529
3477 3530
3478 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { 3531 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3479 public: 3532 public:
3480 virtual Object* RetainAs(Object* object) { 3533 virtual Object* RetainAs(Object* object) {
3481 if (object->IsHeapObject()) { 3534 if (object->IsHeapObject()) {
3482 HeapObject* heap_object = HeapObject::cast(object); 3535 HeapObject* heap_object = HeapObject::cast(object);
3483 MapWord map_word = heap_object->map_word(); 3536 MapWord map_word = heap_object->map_word();
3484 if (map_word.IsForwardingAddress()) { 3537 if (map_word.IsForwardingAddress()) {
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
3650 GCTracer::Scope gc_scope(heap()->tracer(), 3703 GCTracer::Scope gc_scope(heap()->tracer(),
3651 GCTracer::Scope::MC_SWEEP_NEWSPACE); 3704 GCTracer::Scope::MC_SWEEP_NEWSPACE);
3652 EvacuationScope evacuation_scope(this); 3705 EvacuationScope evacuation_scope(this);
3653 EvacuateNewSpace(); 3706 EvacuateNewSpace();
3654 } 3707 }
3655 3708
3656 { 3709 {
3657 GCTracer::Scope gc_scope(heap()->tracer(), 3710 GCTracer::Scope gc_scope(heap()->tracer(),
3658 GCTracer::Scope::MC_EVACUATE_PAGES); 3711 GCTracer::Scope::MC_EVACUATE_PAGES);
3659 EvacuationScope evacuation_scope(this); 3712 EvacuationScope evacuation_scope(this);
3660 if (FLAG_parallel_compaction) { 3713 EvacuatePagesInParallel();
3661 EvacuatePagesInParallel();
3662 WaitUntilCompactionCompleted();
3663 } else {
3664 EvacuatePages();
3665 }
3666 } 3714 }
3667 3715
3668 // Second pass: find pointers to new space and update them. 3716 // Second pass: find pointers to new space and update them.
3669 PointersUpdatingVisitor updating_visitor(heap()); 3717 PointersUpdatingVisitor updating_visitor(heap());
3670 3718
3671 { 3719 {
3672 GCTracer::Scope gc_scope(heap()->tracer(), 3720 GCTracer::Scope gc_scope(heap()->tracer(),
3673 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); 3721 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3674 // Update pointers in to space. 3722 // Update pointers in to space.
3675 SemiSpaceIterator to_it(heap()->new_space()); 3723 SemiSpaceIterator to_it(heap()->new_space());
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
3715 Page* p = evacuation_candidates_[i]; 3763 Page* p = evacuation_candidates_[i];
3716 DCHECK(p->IsEvacuationCandidate() || 3764 DCHECK(p->IsEvacuationCandidate() ||
3717 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3765 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3718 3766
3719 if (p->IsEvacuationCandidate()) { 3767 if (p->IsEvacuationCandidate()) {
3720 UpdateSlotsRecordedIn(p->slots_buffer()); 3768 UpdateSlotsRecordedIn(p->slots_buffer());
3721 if (FLAG_trace_fragmentation_verbose) { 3769 if (FLAG_trace_fragmentation_verbose) {
3722 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), 3770 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3723 SlotsBuffer::SizeOfChain(p->slots_buffer())); 3771 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3724 } 3772 }
3773 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
3725 3774
3726 // Important: skip list should be cleared only after roots were updated 3775 // Important: skip list should be cleared only after roots were updated
3727 // because root iteration traverses the stack and might have to find 3776 // because root iteration traverses the stack and might have to find
3728 // code objects from non-updated pc pointing into evacuation candidate. 3777 // code objects from non-updated pc pointing into evacuation candidate.
3729 SkipList* list = p->skip_list(); 3778 SkipList* list = p->skip_list();
3730 if (list != NULL) list->Clear(); 3779 if (list != NULL) list->Clear();
3731 } else { 3780 }
3781 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3732 if (FLAG_gc_verbose) { 3782 if (FLAG_gc_verbose) {
3733 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", 3783 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n",
3734 reinterpret_cast<intptr_t>(p)); 3784 reinterpret_cast<intptr_t>(p));
3735 } 3785 }
3736 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3786 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3737 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 3787 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
3738 3788
3739 switch (space->identity()) { 3789 switch (space->identity()) {
3740 case OLD_SPACE: 3790 case OLD_SPACE:
3741 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, 3791 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3742 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, 3792 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3743 &updating_visitor); 3793 &updating_visitor);
3744 break; 3794 break;
3745 case CODE_SPACE: 3795 case CODE_SPACE:
3746 if (FLAG_zap_code_space) { 3796 if (FLAG_zap_code_space) {
3747 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, 3797 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3748 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p, 3798 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p,
3749 &updating_visitor); 3799 &updating_visitor);
3750 } else { 3800 } else {
3751 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, 3801 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD,
3752 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, 3802 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p,
3753 &updating_visitor); 3803 &updating_visitor);
3754 } 3804 }
3755 break; 3805 break;
3756 default: 3806 default:
3757 UNREACHABLE(); 3807 UNREACHABLE();
3758 break; 3808 break;
3759 } 3809 }
3760 } 3810 }
3811 if (p->IsEvacuationCandidate() &&
3812 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) {
3813 // Case where we've aborted compacting a page. Clear the flag here to
3814 // avoid release the page later on.
3815 p->ClearEvacuationCandidate();
3816 }
3761 } 3817 }
3762 } 3818 }
3763 3819
3764 GCTracer::Scope gc_scope(heap()->tracer(), 3820 GCTracer::Scope gc_scope(heap()->tracer(),
3765 GCTracer::Scope::MC_UPDATE_MISC_POINTERS); 3821 GCTracer::Scope::MC_UPDATE_MISC_POINTERS);
3766 3822
3767 heap_->string_table()->Iterate(&updating_visitor); 3823 heap_->string_table()->Iterate(&updating_visitor);
3768 3824
3769 // Update pointers from external string table. 3825 // Update pointers from external string table.
3770 heap_->UpdateReferencesInExternalStringTable( 3826 heap_->UpdateReferencesInExternalStringTable(
(...skipping 26 matching lines...) Expand all
3797 3853
3798 3854
3799 void MarkCompactCollector::ReleaseEvacuationCandidates() { 3855 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3800 int npages = evacuation_candidates_.length(); 3856 int npages = evacuation_candidates_.length();
3801 for (int i = 0; i < npages; i++) { 3857 for (int i = 0; i < npages; i++) {
3802 Page* p = evacuation_candidates_[i]; 3858 Page* p = evacuation_candidates_[i];
3803 if (!p->IsEvacuationCandidate()) continue; 3859 if (!p->IsEvacuationCandidate()) continue;
3804 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3860 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3805 space->Free(p->area_start(), p->area_size()); 3861 space->Free(p->area_start(), p->area_size());
3806 p->set_scan_on_scavenge(false); 3862 p->set_scan_on_scavenge(false);
3807 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
3808 p->ResetLiveBytes(); 3863 p->ResetLiveBytes();
3809 space->ReleasePage(p); 3864 space->ReleasePage(p);
3810 } 3865 }
3811 evacuation_candidates_.Rewind(0); 3866 evacuation_candidates_.Rewind(0);
3812 compacting_ = false; 3867 compacting_ = false;
3813 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); 3868 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
3814 heap()->FreeQueuedChunks(); 3869 heap()->FreeQueuedChunks();
3815 } 3870 }
3816 3871
3817 3872
(...skipping 595 matching lines...) Expand 10 before | Expand all | Expand 10 after
4413 // EvacuateNewSpaceAndCandidates iterates over new space objects and for 4468 // EvacuateNewSpaceAndCandidates iterates over new space objects and for
4414 // ArrayBuffers either re-registers them as live or promotes them. This is 4469 // ArrayBuffers either re-registers them as live or promotes them. This is
4415 // needed to properly free them. 4470 // needed to properly free them.
4416 heap()->array_buffer_tracker()->FreeDead(false); 4471 heap()->array_buffer_tracker()->FreeDead(false);
4417 4472
4418 // Clear the marking state of live large objects. 4473 // Clear the marking state of live large objects.
4419 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); 4474 heap_->lo_space()->ClearMarkingStateOfLiveObjects();
4420 4475
4421 // Deallocate evacuated candidate pages. 4476 // Deallocate evacuated candidate pages.
4422 ReleaseEvacuationCandidates(); 4477 ReleaseEvacuationCandidates();
4423 CodeRange* code_range = heap()->isolate()->code_range();
4424 if (code_range != NULL && code_range->valid()) {
4425 code_range->ReserveEmergencyBlock();
4426 }
4427 4478
4428 if (FLAG_print_cumulative_gc_stat) { 4479 if (FLAG_print_cumulative_gc_stat) {
4429 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - 4480 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() -
4430 start_time); 4481 start_time);
4431 } 4482 }
4432 4483
4433 #ifdef VERIFY_HEAP 4484 #ifdef VERIFY_HEAP
4434 if (FLAG_verify_heap && !sweeping_in_progress_) { 4485 if (FLAG_verify_heap && !sweeping_in_progress_) {
4435 VerifyEvacuation(heap()); 4486 VerifyEvacuation(heap());
4436 } 4487 }
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
4544 MarkBit mark_bit = Marking::MarkBitFrom(host); 4595 MarkBit mark_bit = Marking::MarkBitFrom(host);
4545 if (Marking::IsBlack(mark_bit)) { 4596 if (Marking::IsBlack(mark_bit)) {
4546 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4597 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4547 RecordRelocSlot(&rinfo, target); 4598 RecordRelocSlot(&rinfo, target);
4548 } 4599 }
4549 } 4600 }
4550 } 4601 }
4551 4602
4552 } // namespace internal 4603 } // namespace internal
4553 } // namespace v8 4604 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698