| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 50 : // NOLINT | 50 : // NOLINT |
| 51 #ifdef DEBUG | 51 #ifdef DEBUG |
| 52 state_(IDLE), | 52 state_(IDLE), |
| 53 #endif | 53 #endif |
| 54 marking_parity_(ODD_MARKING_PARITY), | 54 marking_parity_(ODD_MARKING_PARITY), |
| 55 compacting_(false), | 55 compacting_(false), |
| 56 was_marked_incrementally_(false), | 56 was_marked_incrementally_(false), |
| 57 sweeping_in_progress_(false), | 57 sweeping_in_progress_(false), |
| 58 parallel_compaction_in_progress_(false), | 58 parallel_compaction_in_progress_(false), |
| 59 pending_sweeper_jobs_semaphore_(0), | 59 pending_sweeper_jobs_semaphore_(0), |
| 60 pending_compaction_tasks_semaphore_(0), | 60 pending_compaction_jobs_semaphore_(0), |
| 61 concurrent_compaction_tasks_active_(0), | |
| 62 evacuation_(false), | 61 evacuation_(false), |
| 63 slots_buffer_allocator_(nullptr), | 62 slots_buffer_allocator_(nullptr), |
| 64 migration_slots_buffer_(nullptr), | 63 migration_slots_buffer_(nullptr), |
| 65 heap_(heap), | 64 heap_(heap), |
| 66 marking_deque_memory_(NULL), | 65 marking_deque_memory_(NULL), |
| 67 marking_deque_memory_committed_(0), | 66 marking_deque_memory_committed_(0), |
| 68 code_flusher_(NULL), | 67 code_flusher_(NULL), |
| 69 have_code_to_deoptimize_(false) { | 68 have_code_to_deoptimize_(false) { |
| 70 } | 69 } |
| 71 | 70 |
| (...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 468 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 467 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 469 Marking::MarkWhite(Marking::MarkBitFrom(obj)); | 468 Marking::MarkWhite(Marking::MarkBitFrom(obj)); |
| 470 Page::FromAddress(obj->address())->ResetProgressBar(); | 469 Page::FromAddress(obj->address())->ResetProgressBar(); |
| 471 Page::FromAddress(obj->address())->ResetLiveBytes(); | 470 Page::FromAddress(obj->address())->ResetLiveBytes(); |
| 472 } | 471 } |
| 473 } | 472 } |
| 474 | 473 |
| 475 | 474 |
| 476 class MarkCompactCollector::CompactionTask : public v8::Task { | 475 class MarkCompactCollector::CompactionTask : public v8::Task { |
| 477 public: | 476 public: |
| 478 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces) | 477 explicit CompactionTask(Heap* heap) : heap_(heap) {} |
| 479 : heap_(heap), spaces_(spaces) {} | |
| 480 | 478 |
| 481 virtual ~CompactionTask() {} | 479 virtual ~CompactionTask() {} |
| 482 | 480 |
| 483 private: | 481 private: |
| 484 // v8::Task overrides. | 482 // v8::Task overrides. |
| 485 void Run() override { | 483 void Run() override { |
| 486 heap_->mark_compact_collector()->EvacuatePages(spaces_); | 484 // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be |
| 485 // called by one thread concurrently. |
| 486 heap_->mark_compact_collector()->EvacuatePages(); |
| 487 heap_->mark_compact_collector() | 487 heap_->mark_compact_collector() |
| 488 ->pending_compaction_tasks_semaphore_.Signal(); | 488 ->pending_compaction_jobs_semaphore_.Signal(); |
| 489 } | 489 } |
| 490 | 490 |
| 491 Heap* heap_; | 491 Heap* heap_; |
| 492 CompactionSpaceCollection* spaces_; | |
| 493 | 492 |
| 494 DISALLOW_COPY_AND_ASSIGN(CompactionTask); | 493 DISALLOW_COPY_AND_ASSIGN(CompactionTask); |
| 495 }; | 494 }; |
| 496 | 495 |
| 497 | 496 |
| 498 class MarkCompactCollector::SweeperTask : public v8::Task { | 497 class MarkCompactCollector::SweeperTask : public v8::Task { |
| 499 public: | 498 public: |
| 500 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} | 499 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} |
| 501 | 500 |
| 502 virtual ~SweeperTask() {} | 501 virtual ~SweeperTask() {} |
| (...skipping 2814 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3317 while (it.has_next()) { | 3316 while (it.has_next()) { |
| 3318 NewSpacePage* p = it.next(); | 3317 NewSpacePage* p = it.next(); |
| 3319 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); | 3318 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); |
| 3320 } | 3319 } |
| 3321 | 3320 |
| 3322 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 3321 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 3323 new_space->set_age_mark(new_space->top()); | 3322 new_space->set_age_mark(new_space->top()); |
| 3324 } | 3323 } |
| 3325 | 3324 |
| 3326 | 3325 |
| 3327 bool MarkCompactCollector::EvacuateLiveObjectsFromPage( | 3326 void MarkCompactCollector::EvacuateLiveObjectsFromPage( |
| 3328 Page* p, PagedSpace* target_space) { | 3327 Page* p, PagedSpace* target_space) { |
| 3329 AlwaysAllocateScope always_allocate(isolate()); | 3328 AlwaysAllocateScope always_allocate(isolate()); |
| 3330 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); | 3329 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3330 p->SetWasSwept(); |
| 3331 | 3331 |
| 3332 int offsets[16]; | 3332 int offsets[16]; |
| 3333 | 3333 |
| 3334 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | 3334 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| 3335 Address cell_base = it.CurrentCellBase(); | 3335 Address cell_base = it.CurrentCellBase(); |
| 3336 MarkBit::CellType* cell = it.CurrentCell(); | 3336 MarkBit::CellType* cell = it.CurrentCell(); |
| 3337 | 3337 |
| 3338 if (*cell == 0) continue; | 3338 if (*cell == 0) continue; |
| 3339 | 3339 |
| 3340 int live_objects = MarkWordToObjectStarts(*cell, offsets); | 3340 int live_objects = MarkWordToObjectStarts(*cell, offsets); |
| 3341 for (int i = 0; i < live_objects; i++) { | 3341 for (int i = 0; i < live_objects; i++) { |
| 3342 Address object_addr = cell_base + offsets[i] * kPointerSize; | 3342 Address object_addr = cell_base + offsets[i] * kPointerSize; |
| 3343 HeapObject* object = HeapObject::FromAddress(object_addr); | 3343 HeapObject* object = HeapObject::FromAddress(object_addr); |
| 3344 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3344 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3345 | 3345 |
| 3346 int size = object->Size(); | 3346 int size = object->Size(); |
| 3347 AllocationAlignment alignment = object->RequiredAlignment(); | 3347 AllocationAlignment alignment = object->RequiredAlignment(); |
| 3348 HeapObject* target_object = nullptr; | 3348 HeapObject* target_object = nullptr; |
| 3349 AllocationResult allocation = target_space->AllocateRaw(size, alignment); | 3349 AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
| 3350 if (!allocation.To(&target_object)) { | 3350 if (!allocation.To(&target_object)) { |
| 3351 return false; | 3351 // If allocation failed, use emergency memory and re-try allocation. |
| 3352 CHECK(target_space->HasEmergencyMemory()); |
| 3353 target_space->UseEmergencyMemory(); |
| 3354 allocation = target_space->AllocateRaw(size, alignment); |
| 3352 } | 3355 } |
| 3356 if (!allocation.To(&target_object)) { |
| 3357 // OS refused to give us memory. |
| 3358 V8::FatalProcessOutOfMemory("Evacuation"); |
| 3359 return; |
| 3360 } |
| 3361 |
| 3353 MigrateObject(target_object, object, size, target_space->identity()); | 3362 MigrateObject(target_object, object, size, target_space->identity()); |
| 3354 DCHECK(object->map_word().IsForwardingAddress()); | 3363 DCHECK(object->map_word().IsForwardingAddress()); |
| 3355 } | 3364 } |
| 3356 | 3365 |
| 3357 // Clear marking bits for current cell. | 3366 // Clear marking bits for current cell. |
| 3358 *cell = 0; | 3367 *cell = 0; |
| 3359 } | 3368 } |
| 3360 p->ResetLiveBytes(); | 3369 p->ResetLiveBytes(); |
| 3361 return true; | |
| 3362 } | 3370 } |
| 3363 | 3371 |
| 3364 | 3372 |
| 3365 void MarkCompactCollector::EvacuatePagesInParallel() { | 3373 void MarkCompactCollector::EvacuatePagesInParallel() { |
| 3366 if (evacuation_candidates_.length() == 0) return; | |
| 3367 | |
| 3368 int num_tasks = 1; | |
| 3369 if (FLAG_parallel_compaction) { | |
| 3370 num_tasks = NumberOfParallelCompactionTasks(); | |
| 3371 } | |
| 3372 | |
| 3373 // Set up compaction spaces. | |
| 3374 CompactionSpaceCollection** compaction_spaces_for_tasks = | |
| 3375 new CompactionSpaceCollection*[num_tasks]; | |
| 3376 for (int i = 0; i < num_tasks; i++) { | |
| 3377 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); | |
| 3378 } | |
| 3379 | |
| 3380 compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory( | |
| 3381 heap()->old_space()); | |
| 3382 compaction_spaces_for_tasks[0] | |
| 3383 ->Get(CODE_SPACE) | |
| 3384 ->MoveOverFreeMemory(heap()->code_space()); | |
| 3385 | |
| 3386 parallel_compaction_in_progress_ = true; | 3374 parallel_compaction_in_progress_ = true; |
| 3387 // Kick off parallel tasks. | 3375 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 3388 for (int i = 1; i < num_tasks; i++) { | 3376 new CompactionTask(heap()), v8::Platform::kShortRunningTask); |
| 3389 concurrent_compaction_tasks_active_++; | |
| 3390 V8::GetCurrentPlatform()->CallOnBackgroundThread( | |
| 3391 new CompactionTask(heap(), compaction_spaces_for_tasks[i]), | |
| 3392 v8::Platform::kShortRunningTask); | |
| 3393 } | |
| 3394 | |
| 3395 // Contribute in main thread. Counter and signal are in principal not needed. | |
| 3396 concurrent_compaction_tasks_active_++; | |
| 3397 EvacuatePages(compaction_spaces_for_tasks[0]); | |
| 3398 pending_compaction_tasks_semaphore_.Signal(); | |
| 3399 | |
| 3400 WaitUntilCompactionCompleted(); | |
| 3401 | |
| 3402 // Merge back memory (compacted and unused) from compaction spaces. | |
| 3403 for (int i = 0; i < num_tasks; i++) { | |
| 3404 heap()->old_space()->MergeCompactionSpace( | |
| 3405 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); | |
| 3406 heap()->code_space()->MergeCompactionSpace( | |
| 3407 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); | |
| 3408 delete compaction_spaces_for_tasks[i]; | |
| 3409 } | |
| 3410 delete[] compaction_spaces_for_tasks; | |
| 3411 | |
| 3412 // Finalize sequentially. | |
| 3413 const int num_pages = evacuation_candidates_.length(); | |
| 3414 int abandoned_pages = 0; | |
| 3415 for (int i = 0; i < num_pages; i++) { | |
| 3416 Page* p = evacuation_candidates_[i]; | |
| 3417 switch (p->parallel_compaction_state().Value()) { | |
| 3418 case MemoryChunk::ParallelCompactingState::kCompactingAborted: | |
| 3419 // We have partially compacted the page, i.e., some objects may have | |
| 3420 // moved, others are still in place. | |
| 3421 // We need to: | |
| 3422 // - Leave the evacuation candidate flag for later processing of | |
| 3423 // slots buffer entries. | |
| 3424 // - Leave the slots buffer there for processing of entries added by | |
| 3425 // the write barrier. | |
| 3426 // - Rescan the page as slot recording in the migration buffer only | |
| 3427 // happens upon moving (which we potentially didn't do). | |
| 3428 // - Leave the page in the list of pages of a space since we could not | |
| 3429 // fully evacuate it. | |
| 3430 DCHECK(p->IsEvacuationCandidate()); | |
| 3431 p->SetFlag(Page::RESCAN_ON_EVACUATION); | |
| 3432 abandoned_pages++; | |
| 3433 break; | |
| 3434 case MemoryChunk::kCompactingFinalize: | |
| 3435 DCHECK(p->IsEvacuationCandidate()); | |
| 3436 p->SetWasSwept(); | |
| 3437 p->Unlink(); | |
| 3438 break; | |
| 3439 case MemoryChunk::kCompactingDone: | |
| 3440 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); | |
| 3441 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | |
| 3442 break; | |
| 3443 default: | |
| 3444 // We should not observe kCompactingInProgress, or kCompactingDone. | |
| 3445 UNREACHABLE(); | |
| 3446 } | |
| 3447 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
| 3448 } | |
| 3449 if (num_pages > 0) { | |
| 3450 if (FLAG_trace_fragmentation) { | |
| 3451 if (abandoned_pages != 0) { | |
| 3452 PrintF( | |
| 3453 " Abandoned (at least partially) %d out of %d page compactions due" | |
| 3454 " to lack of memory\n", | |
| 3455 abandoned_pages, num_pages); | |
| 3456 } else { | |
| 3457 PrintF(" Compacted %d pages\n", num_pages); | |
| 3458 } | |
| 3459 } | |
| 3460 } | |
| 3461 } | 3377 } |
| 3462 | 3378 |
| 3463 | 3379 |
| 3464 void MarkCompactCollector::WaitUntilCompactionCompleted() { | 3380 void MarkCompactCollector::WaitUntilCompactionCompleted() { |
| 3465 while (concurrent_compaction_tasks_active_-- > 0) { | 3381 pending_compaction_jobs_semaphore_.Wait(); |
| 3466 pending_compaction_tasks_semaphore_.Wait(); | |
| 3467 } | |
| 3468 parallel_compaction_in_progress_ = false; | 3382 parallel_compaction_in_progress_ = false; |
| 3469 } | 3383 } |
| 3470 | 3384 |
| 3471 | 3385 |
| 3472 void MarkCompactCollector::EvacuatePages( | 3386 void MarkCompactCollector::EvacuatePages() { |
| 3473 CompactionSpaceCollection* compaction_spaces) { | 3387 int npages = evacuation_candidates_.length(); |
| 3474 for (int i = 0; i < evacuation_candidates_.length(); i++) { | 3388 int abandoned_pages = 0; |
| 3389 for (int i = 0; i < npages; i++) { |
| 3475 Page* p = evacuation_candidates_[i]; | 3390 Page* p = evacuation_candidates_[i]; |
| 3476 DCHECK(p->IsEvacuationCandidate() || | 3391 DCHECK(p->IsEvacuationCandidate() || |
| 3477 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3392 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3478 DCHECK(static_cast<int>(p->parallel_sweeping()) == | 3393 DCHECK(static_cast<int>(p->parallel_sweeping()) == |
| 3479 MemoryChunk::SWEEPING_DONE); | 3394 MemoryChunk::SWEEPING_DONE); |
| 3480 if (p->parallel_compaction_state().TrySetValue( | 3395 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3481 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | 3396 // Allocate emergency memory for the case when compaction fails due to out |
| 3482 if (p->IsEvacuationCandidate()) { | 3397 // of memory. |
| 3483 DCHECK_EQ(p->parallel_compaction_state().Value(), | 3398 if (!space->HasEmergencyMemory()) { |
| 3484 MemoryChunk::kCompactingInProgress); | 3399 space->CreateEmergencyMemory(); // If the OS lets us. |
| 3485 if (EvacuateLiveObjectsFromPage( | 3400 } |
| 3486 p, compaction_spaces->Get(p->owner()->identity()))) { | 3401 if (p->IsEvacuationCandidate()) { |
| 3487 p->parallel_compaction_state().SetValue( | 3402 // During compaction we might have to request a new page in order to free |
| 3488 MemoryChunk::kCompactingFinalize); | 3403 // up a page. Check that we actually got an emergency page above so we |
| 3489 } else { | 3404 // can guarantee that this succeeds. |
| 3490 p->parallel_compaction_state().SetValue( | 3405 if (space->HasEmergencyMemory()) { |
| 3491 MemoryChunk::kCompactingAborted); | 3406 EvacuateLiveObjectsFromPage(p, static_cast<PagedSpace*>(p->owner())); |
| 3407 // Unlink the page from the list of pages here. We must not iterate |
| 3408 // over that page later (e.g. when scan on scavenge pages are |
| 3409 // processed). The page itself will be freed later and is still |
| 3410 // reachable from the evacuation candidates list. |
| 3411 p->Unlink(); |
| 3412 } else { |
| 3413 // Without room for expansion evacuation is not guaranteed to succeed. |
| 3414 // Pessimistically abandon unevacuated pages. |
| 3415 for (int j = i; j < npages; j++) { |
| 3416 Page* page = evacuation_candidates_[j]; |
| 3417 slots_buffer_allocator_->DeallocateChain( |
| 3418 page->slots_buffer_address()); |
| 3419 page->ClearEvacuationCandidate(); |
| 3420 page->SetFlag(Page::RESCAN_ON_EVACUATION); |
| 3492 } | 3421 } |
| 3493 } else { | 3422 abandoned_pages = npages - i; |
| 3494 // There could be popular pages in the list of evacuation candidates | 3423 break; |
| 3495 // which we do compact. | |
| 3496 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
| 3497 } | 3424 } |
| 3498 } | 3425 } |
| 3499 } | 3426 } |
| 3427 if (npages > 0) { |
| 3428 // Release emergency memory. |
| 3429 PagedSpaces spaces(heap()); |
| 3430 for (PagedSpace* space = spaces.next(); space != NULL; |
| 3431 space = spaces.next()) { |
| 3432 if (space->HasEmergencyMemory()) { |
| 3433 space->FreeEmergencyMemory(); |
| 3434 } |
| 3435 } |
| 3436 if (FLAG_trace_fragmentation) { |
| 3437 if (abandoned_pages != 0) { |
| 3438 PrintF( |
| 3439 " Abandon %d out of %d page defragmentations due to lack of " |
| 3440 "memory\n", |
| 3441 abandoned_pages, npages); |
| 3442 } else { |
| 3443 PrintF(" Defragmented %d pages\n", npages); |
| 3444 } |
| 3445 } |
| 3446 } |
| 3500 } | 3447 } |
| 3501 | 3448 |
| 3502 | 3449 |
| 3503 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3450 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| 3504 public: | 3451 public: |
| 3505 virtual Object* RetainAs(Object* object) { | 3452 virtual Object* RetainAs(Object* object) { |
| 3506 if (object->IsHeapObject()) { | 3453 if (object->IsHeapObject()) { |
| 3507 HeapObject* heap_object = HeapObject::cast(object); | 3454 HeapObject* heap_object = HeapObject::cast(object); |
| 3508 MapWord map_word = heap_object->map_word(); | 3455 MapWord map_word = heap_object->map_word(); |
| 3509 if (map_word.IsForwardingAddress()) { | 3456 if (map_word.IsForwardingAddress()) { |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3675 GCTracer::Scope gc_scope(heap()->tracer(), | 3622 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3676 GCTracer::Scope::MC_SWEEP_NEWSPACE); | 3623 GCTracer::Scope::MC_SWEEP_NEWSPACE); |
| 3677 EvacuationScope evacuation_scope(this); | 3624 EvacuationScope evacuation_scope(this); |
| 3678 EvacuateNewSpace(); | 3625 EvacuateNewSpace(); |
| 3679 } | 3626 } |
| 3680 | 3627 |
| 3681 { | 3628 { |
| 3682 GCTracer::Scope gc_scope(heap()->tracer(), | 3629 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3683 GCTracer::Scope::MC_EVACUATE_PAGES); | 3630 GCTracer::Scope::MC_EVACUATE_PAGES); |
| 3684 EvacuationScope evacuation_scope(this); | 3631 EvacuationScope evacuation_scope(this); |
| 3685 EvacuatePagesInParallel(); | 3632 if (FLAG_parallel_compaction) { |
| 3633 EvacuatePagesInParallel(); |
| 3634 WaitUntilCompactionCompleted(); |
| 3635 } else { |
| 3636 EvacuatePages(); |
| 3637 } |
| 3686 } | 3638 } |
| 3687 | 3639 |
| 3688 // Second pass: find pointers to new space and update them. | 3640 // Second pass: find pointers to new space and update them. |
| 3689 PointersUpdatingVisitor updating_visitor(heap()); | 3641 PointersUpdatingVisitor updating_visitor(heap()); |
| 3690 | 3642 |
| 3691 { | 3643 { |
| 3692 GCTracer::Scope gc_scope(heap()->tracer(), | 3644 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3693 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); | 3645 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); |
| 3694 // Update pointers in to space. | 3646 // Update pointers in to space. |
| 3695 SemiSpaceIterator to_it(heap()->new_space()); | 3647 SemiSpaceIterator to_it(heap()->new_space()); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3735 Page* p = evacuation_candidates_[i]; | 3687 Page* p = evacuation_candidates_[i]; |
| 3736 DCHECK(p->IsEvacuationCandidate() || | 3688 DCHECK(p->IsEvacuationCandidate() || |
| 3737 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3689 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3738 | 3690 |
| 3739 if (p->IsEvacuationCandidate()) { | 3691 if (p->IsEvacuationCandidate()) { |
| 3740 UpdateSlotsRecordedIn(p->slots_buffer()); | 3692 UpdateSlotsRecordedIn(p->slots_buffer()); |
| 3741 if (FLAG_trace_fragmentation_verbose) { | 3693 if (FLAG_trace_fragmentation_verbose) { |
| 3742 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), | 3694 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), |
| 3743 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 3695 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
| 3744 } | 3696 } |
| 3745 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | |
| 3746 | 3697 |
| 3747 // Important: skip list should be cleared only after roots were updated | 3698 // Important: skip list should be cleared only after roots were updated |
| 3748 // because root iteration traverses the stack and might have to find | 3699 // because root iteration traverses the stack and might have to find |
| 3749 // code objects from non-updated pc pointing into evacuation candidate. | 3700 // code objects from non-updated pc pointing into evacuation candidate. |
| 3750 SkipList* list = p->skip_list(); | 3701 SkipList* list = p->skip_list(); |
| 3751 if (list != NULL) list->Clear(); | 3702 if (list != NULL) list->Clear(); |
| 3752 } | 3703 } else { |
| 3753 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
| 3754 if (FLAG_gc_verbose) { | 3704 if (FLAG_gc_verbose) { |
| 3755 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", | 3705 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
| 3756 reinterpret_cast<intptr_t>(p)); | 3706 reinterpret_cast<intptr_t>(p)); |
| 3757 } | 3707 } |
| 3758 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3708 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3759 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 3709 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 3760 | 3710 |
| 3761 switch (space->identity()) { | 3711 switch (space->identity()) { |
| 3762 case OLD_SPACE: | 3712 case OLD_SPACE: |
| 3763 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3713 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3764 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, | 3714 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, |
| 3765 &updating_visitor); | 3715 &updating_visitor); |
| 3766 break; | 3716 break; |
| 3767 case CODE_SPACE: | 3717 case CODE_SPACE: |
| 3768 if (FLAG_zap_code_space) { | 3718 if (FLAG_zap_code_space) { |
| 3769 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3719 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3770 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p, | 3720 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p, |
| 3771 &updating_visitor); | 3721 &updating_visitor); |
| 3772 } else { | 3722 } else { |
| 3773 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3723 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3774 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, | 3724 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, |
| 3775 &updating_visitor); | 3725 &updating_visitor); |
| 3776 } | 3726 } |
| 3777 break; | 3727 break; |
| 3778 default: | 3728 default: |
| 3779 UNREACHABLE(); | 3729 UNREACHABLE(); |
| 3780 break; | 3730 break; |
| 3781 } | 3731 } |
| 3782 } | 3732 } |
| 3783 if (p->IsEvacuationCandidate() && | |
| 3784 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | |
| 3785 // Case where we've aborted compacting a page. Clear the flag here to | |
| 3786 // avoid release the page later on. | |
| 3787 p->ClearEvacuationCandidate(); | |
| 3788 } | |
| 3789 } | 3733 } |
| 3790 } | 3734 } |
| 3791 | 3735 |
| 3792 GCTracer::Scope gc_scope(heap()->tracer(), | 3736 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3793 GCTracer::Scope::MC_UPDATE_MISC_POINTERS); | 3737 GCTracer::Scope::MC_UPDATE_MISC_POINTERS); |
| 3794 | 3738 |
| 3795 heap_->string_table()->Iterate(&updating_visitor); | 3739 heap_->string_table()->Iterate(&updating_visitor); |
| 3796 | 3740 |
| 3797 // Update pointers from external string table. | 3741 // Update pointers from external string table. |
| 3798 heap_->UpdateReferencesInExternalStringTable( | 3742 heap_->UpdateReferencesInExternalStringTable( |
| (...skipping 26 matching lines...) Expand all Loading... |
| 3825 | 3769 |
| 3826 | 3770 |
| 3827 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 3771 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
| 3828 int npages = evacuation_candidates_.length(); | 3772 int npages = evacuation_candidates_.length(); |
| 3829 for (int i = 0; i < npages; i++) { | 3773 for (int i = 0; i < npages; i++) { |
| 3830 Page* p = evacuation_candidates_[i]; | 3774 Page* p = evacuation_candidates_[i]; |
| 3831 if (!p->IsEvacuationCandidate()) continue; | 3775 if (!p->IsEvacuationCandidate()) continue; |
| 3832 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3776 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3833 space->Free(p->area_start(), p->area_size()); | 3777 space->Free(p->area_start(), p->area_size()); |
| 3834 p->set_scan_on_scavenge(false); | 3778 p->set_scan_on_scavenge(false); |
| 3779 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
| 3835 p->ResetLiveBytes(); | 3780 p->ResetLiveBytes(); |
| 3836 space->ReleasePage(p); | 3781 space->ReleasePage(p); |
| 3837 } | 3782 } |
| 3838 evacuation_candidates_.Rewind(0); | 3783 evacuation_candidates_.Rewind(0); |
| 3839 compacting_ = false; | 3784 compacting_ = false; |
| 3840 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); | 3785 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); |
| 3841 heap()->FreeQueuedChunks(); | 3786 heap()->FreeQueuedChunks(); |
| 3842 } | 3787 } |
| 3843 | 3788 |
| 3844 | 3789 |
| (...skipping 595 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4440 // EvacuateNewSpaceAndCandidates iterates over new space objects and for | 4385 // EvacuateNewSpaceAndCandidates iterates over new space objects and for |
| 4441 // ArrayBuffers either re-registers them as live or promotes them. This is | 4386 // ArrayBuffers either re-registers them as live or promotes them. This is |
| 4442 // needed to properly free them. | 4387 // needed to properly free them. |
| 4443 heap()->array_buffer_tracker()->FreeDead(false); | 4388 heap()->array_buffer_tracker()->FreeDead(false); |
| 4444 | 4389 |
| 4445 // Clear the marking state of live large objects. | 4390 // Clear the marking state of live large objects. |
| 4446 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); | 4391 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); |
| 4447 | 4392 |
| 4448 // Deallocate evacuated candidate pages. | 4393 // Deallocate evacuated candidate pages. |
| 4449 ReleaseEvacuationCandidates(); | 4394 ReleaseEvacuationCandidates(); |
| 4395 CodeRange* code_range = heap()->isolate()->code_range(); |
| 4396 if (code_range != NULL && code_range->valid()) { |
| 4397 code_range->ReserveEmergencyBlock(); |
| 4398 } |
| 4450 | 4399 |
| 4451 if (FLAG_print_cumulative_gc_stat) { | 4400 if (FLAG_print_cumulative_gc_stat) { |
| 4452 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - | 4401 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - |
| 4453 start_time); | 4402 start_time); |
| 4454 } | 4403 } |
| 4455 | 4404 |
| 4456 #ifdef VERIFY_HEAP | 4405 #ifdef VERIFY_HEAP |
| 4457 if (FLAG_verify_heap && !sweeping_in_progress_) { | 4406 if (FLAG_verify_heap && !sweeping_in_progress_) { |
| 4458 VerifyEvacuation(heap()); | 4407 VerifyEvacuation(heap()); |
| 4459 } | 4408 } |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4567 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4516 MarkBit mark_bit = Marking::MarkBitFrom(host); |
| 4568 if (Marking::IsBlack(mark_bit)) { | 4517 if (Marking::IsBlack(mark_bit)) { |
| 4569 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4518 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
| 4570 RecordRelocSlot(&rinfo, target); | 4519 RecordRelocSlot(&rinfo, target); |
| 4571 } | 4520 } |
| 4572 } | 4521 } |
| 4573 } | 4522 } |
| 4574 | 4523 |
| 4575 } // namespace internal | 4524 } // namespace internal |
| 4576 } // namespace v8 | 4525 } // namespace v8 |
| OLD | NEW |