| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
| 6 | 6 |
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
| 9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
| 10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 50 : // NOLINT | 50 : // NOLINT |
| 51 #ifdef DEBUG | 51 #ifdef DEBUG |
| 52 state_(IDLE), | 52 state_(IDLE), |
| 53 #endif | 53 #endif |
| 54 marking_parity_(ODD_MARKING_PARITY), | 54 marking_parity_(ODD_MARKING_PARITY), |
| 55 compacting_(false), | 55 compacting_(false), |
| 56 was_marked_incrementally_(false), | 56 was_marked_incrementally_(false), |
| 57 sweeping_in_progress_(false), | 57 sweeping_in_progress_(false), |
| 58 parallel_compaction_in_progress_(false), | 58 parallel_compaction_in_progress_(false), |
| 59 pending_sweeper_jobs_semaphore_(0), | 59 pending_sweeper_jobs_semaphore_(0), |
| 60 pending_compaction_jobs_semaphore_(0), | 60 pending_compaction_tasks_semaphore_(0), |
| 61 concurrent_compaction_tasks_active_(0), |
| 61 evacuation_(false), | 62 evacuation_(false), |
| 62 slots_buffer_allocator_(nullptr), | 63 slots_buffer_allocator_(nullptr), |
| 63 migration_slots_buffer_(nullptr), | 64 migration_slots_buffer_(nullptr), |
| 64 heap_(heap), | 65 heap_(heap), |
| 65 marking_deque_memory_(NULL), | 66 marking_deque_memory_(NULL), |
| 66 marking_deque_memory_committed_(0), | 67 marking_deque_memory_committed_(0), |
| 67 code_flusher_(NULL), | 68 code_flusher_(NULL), |
| 68 have_code_to_deoptimize_(false) { | 69 have_code_to_deoptimize_(false) { |
| 69 } | 70 } |
| 70 | 71 |
| (...skipping 396 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 467 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 468 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 468 Marking::MarkWhite(Marking::MarkBitFrom(obj)); | 469 Marking::MarkWhite(Marking::MarkBitFrom(obj)); |
| 469 Page::FromAddress(obj->address())->ResetProgressBar(); | 470 Page::FromAddress(obj->address())->ResetProgressBar(); |
| 470 Page::FromAddress(obj->address())->ResetLiveBytes(); | 471 Page::FromAddress(obj->address())->ResetLiveBytes(); |
| 471 } | 472 } |
| 472 } | 473 } |
| 473 | 474 |
| 474 | 475 |
| 475 class MarkCompactCollector::CompactionTask : public v8::Task { | 476 class MarkCompactCollector::CompactionTask : public v8::Task { |
| 476 public: | 477 public: |
| 477 explicit CompactionTask(Heap* heap) : heap_(heap) {} | 478 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces) |
| 479 : heap_(heap), spaces_(spaces) {} |
| 478 | 480 |
| 479 virtual ~CompactionTask() {} | 481 virtual ~CompactionTask() {} |
| 480 | 482 |
| 481 private: | 483 private: |
| 482 // v8::Task overrides. | 484 // v8::Task overrides. |
| 483 void Run() override { | 485 void Run() override { |
| 484 // TODO(mlippautz, hpayer): EvacuatePages is not thread-safe and can just be | 486 heap_->mark_compact_collector()->EvacuatePages(spaces_); |
| 485 // called by one thread concurrently. | |
| 486 heap_->mark_compact_collector()->EvacuatePages(); | |
| 487 heap_->mark_compact_collector() | 487 heap_->mark_compact_collector() |
| 488 ->pending_compaction_jobs_semaphore_.Signal(); | 488 ->pending_compaction_tasks_semaphore_.Signal(); |
| 489 } | 489 } |
| 490 | 490 |
| 491 Heap* heap_; | 491 Heap* heap_; |
| 492 CompactionSpaceCollection* spaces_; |
| 492 | 493 |
| 493 DISALLOW_COPY_AND_ASSIGN(CompactionTask); | 494 DISALLOW_COPY_AND_ASSIGN(CompactionTask); |
| 494 }; | 495 }; |
| 495 | 496 |
| 496 | 497 |
| 497 class MarkCompactCollector::SweeperTask : public v8::Task { | 498 class MarkCompactCollector::SweeperTask : public v8::Task { |
| 498 public: | 499 public: |
| 499 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} | 500 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} |
| 500 | 501 |
| 501 virtual ~SweeperTask() {} | 502 virtual ~SweeperTask() {} |
| (...skipping 2814 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3316 while (it.has_next()) { | 3317 while (it.has_next()) { |
| 3317 NewSpacePage* p = it.next(); | 3318 NewSpacePage* p = it.next(); |
| 3318 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); | 3319 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); |
| 3319 } | 3320 } |
| 3320 | 3321 |
| 3321 heap_->IncrementYoungSurvivorsCounter(survivors_size); | 3322 heap_->IncrementYoungSurvivorsCounter(survivors_size); |
| 3322 new_space->set_age_mark(new_space->top()); | 3323 new_space->set_age_mark(new_space->top()); |
| 3323 } | 3324 } |
| 3324 | 3325 |
| 3325 | 3326 |
| 3326 void MarkCompactCollector::EvacuateLiveObjectsFromPage( | 3327 bool MarkCompactCollector::EvacuateLiveObjectsFromPage( |
| 3327 Page* p, PagedSpace* target_space) { | 3328 Page* p, PagedSpace* target_space) { |
| 3328 AlwaysAllocateScope always_allocate(isolate()); | 3329 AlwaysAllocateScope always_allocate(isolate()); |
| 3329 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); | 3330 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3330 p->SetWasSwept(); | |
| 3331 | 3331 |
| 3332 int offsets[16]; | 3332 int offsets[16]; |
| 3333 | 3333 |
| 3334 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { | 3334 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { |
| 3335 Address cell_base = it.CurrentCellBase(); | 3335 Address cell_base = it.CurrentCellBase(); |
| 3336 MarkBit::CellType* cell = it.CurrentCell(); | 3336 MarkBit::CellType* cell = it.CurrentCell(); |
| 3337 | 3337 |
| 3338 if (*cell == 0) continue; | 3338 if (*cell == 0) continue; |
| 3339 | 3339 |
| 3340 int live_objects = MarkWordToObjectStarts(*cell, offsets); | 3340 int live_objects = MarkWordToObjectStarts(*cell, offsets); |
| 3341 for (int i = 0; i < live_objects; i++) { | 3341 for (int i = 0; i < live_objects; i++) { |
| 3342 Address object_addr = cell_base + offsets[i] * kPointerSize; | 3342 Address object_addr = cell_base + offsets[i] * kPointerSize; |
| 3343 HeapObject* object = HeapObject::FromAddress(object_addr); | 3343 HeapObject* object = HeapObject::FromAddress(object_addr); |
| 3344 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3344 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
| 3345 | 3345 |
| 3346 int size = object->Size(); | 3346 int size = object->Size(); |
| 3347 AllocationAlignment alignment = object->RequiredAlignment(); | 3347 AllocationAlignment alignment = object->RequiredAlignment(); |
| 3348 HeapObject* target_object = nullptr; | 3348 HeapObject* target_object = nullptr; |
| 3349 AllocationResult allocation = target_space->AllocateRaw(size, alignment); | 3349 AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
| 3350 if (!allocation.To(&target_object)) { | 3350 if (!allocation.To(&target_object)) { |
| 3351 // If allocation failed, use emergency memory and re-try allocation. | 3351 return false; |
| 3352 CHECK(target_space->HasEmergencyMemory()); | |
| 3353 target_space->UseEmergencyMemory(); | |
| 3354 allocation = target_space->AllocateRaw(size, alignment); | |
| 3355 } | 3352 } |
| 3356 if (!allocation.To(&target_object)) { | |
| 3357 // OS refused to give us memory. | |
| 3358 V8::FatalProcessOutOfMemory("Evacuation"); | |
| 3359 return; | |
| 3360 } | |
| 3361 | |
| 3362 MigrateObject(target_object, object, size, target_space->identity()); | 3353 MigrateObject(target_object, object, size, target_space->identity()); |
| 3363 DCHECK(object->map_word().IsForwardingAddress()); | 3354 DCHECK(object->map_word().IsForwardingAddress()); |
| 3364 } | 3355 } |
| 3365 | 3356 |
| 3366 // Clear marking bits for current cell. | 3357 // Clear marking bits for current cell. |
| 3367 *cell = 0; | 3358 *cell = 0; |
| 3368 } | 3359 } |
| 3369 p->ResetLiveBytes(); | 3360 p->ResetLiveBytes(); |
| 3361 return true; |
| 3370 } | 3362 } |
| 3371 | 3363 |
| 3372 | 3364 |
| 3373 void MarkCompactCollector::EvacuatePagesInParallel() { | 3365 void MarkCompactCollector::EvacuatePagesInParallel() { |
| 3366 if (evacuation_candidates_.length() == 0) return; |
| 3367 |
| 3368 int num_tasks = 1; |
| 3369 if (FLAG_parallel_compaction) { |
| 3370 num_tasks = NumberOfParallelCompactionTasks(); |
| 3371 } |
| 3372 |
| 3373 // Set up compaction spaces. |
| 3374 CompactionSpaceCollection** compaction_spaces_for_tasks = |
| 3375 new CompactionSpaceCollection*[num_tasks]; |
| 3376 for (int i = 0; i < num_tasks; i++) { |
| 3377 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); |
| 3378 } |
| 3379 |
| 3380 compaction_spaces_for_tasks[0]->Get(OLD_SPACE)->MoveOverFreeMemory( |
| 3381 heap()->old_space()); |
| 3382 compaction_spaces_for_tasks[0] |
| 3383 ->Get(CODE_SPACE) |
| 3384 ->MoveOverFreeMemory(heap()->code_space()); |
| 3385 |
| 3374 parallel_compaction_in_progress_ = true; | 3386 parallel_compaction_in_progress_ = true; |
| 3375 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 3387 // Kick off parallel tasks. |
| 3376 new CompactionTask(heap()), v8::Platform::kShortRunningTask); | 3388 for (int i = 1; i < num_tasks; i++) { |
| 3389 concurrent_compaction_tasks_active_++; |
| 3390 V8::GetCurrentPlatform()->CallOnBackgroundThread( |
| 3391 new CompactionTask(heap(), compaction_spaces_for_tasks[i]), |
| 3392 v8::Platform::kShortRunningTask); |
| 3393 } |
| 3394 |
| 3395 // Contribute in main thread. Counter and signal are in principal not needed. |
| 3396 concurrent_compaction_tasks_active_++; |
| 3397 EvacuatePages(compaction_spaces_for_tasks[0]); |
| 3398 pending_compaction_tasks_semaphore_.Signal(); |
| 3399 |
| 3400 WaitUntilCompactionCompleted(); |
| 3401 |
| 3402 // Merge back memory (compacted and unused) from compaction spaces. |
| 3403 for (int i = 0; i < num_tasks; i++) { |
| 3404 heap()->old_space()->MergeCompactionSpace( |
| 3405 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); |
| 3406 heap()->code_space()->MergeCompactionSpace( |
| 3407 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); |
| 3408 delete compaction_spaces_for_tasks[i]; |
| 3409 } |
| 3410 delete[] compaction_spaces_for_tasks; |
| 3411 |
| 3412 // Finalize sequentially. |
| 3413 const int num_pages = evacuation_candidates_.length(); |
| 3414 int abandoned_pages = 0; |
| 3415 for (int i = 0; i < num_pages; i++) { |
| 3416 Page* p = evacuation_candidates_[i]; |
| 3417 switch (p->parallel_compaction_state().Value()) { |
| 3418 case MemoryChunk::ParallelCompactingState::kCompactingAborted: |
| 3419 // We have partially compacted the page, i.e., some objects may have |
| 3420 // moved, others are still in place. |
| 3421 // We need to: |
| 3422 // - Leave the evacuation candidate flag for later processing of |
| 3423 // slots buffer entries. |
| 3424 // - Leave the slots buffer there for processing of entries added by |
| 3425 // the write barrier. |
| 3426 // - Rescan the page as slot recording in the migration buffer only |
| 3427 // happens upon moving (which we potentially didn't do). |
| 3428 // - Leave the page in the list of pages of a space since we could not |
| 3429 // fully evacuate it. |
| 3430 DCHECK(p->IsEvacuationCandidate()); |
| 3431 p->SetFlag(Page::RESCAN_ON_EVACUATION); |
| 3432 abandoned_pages++; |
| 3433 break; |
| 3434 case MemoryChunk::kCompactingFinalize: |
| 3435 DCHECK(p->IsEvacuationCandidate()); |
| 3436 p->SetWasSwept(); |
| 3437 p->Unlink(); |
| 3438 break; |
| 3439 case MemoryChunk::kCompactingDone: |
| 3440 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); |
| 3441 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3442 break; |
| 3443 default: |
| 3444 // We should not observe kCompactingInProgress, or kCompactingDone. |
| 3445 UNREACHABLE(); |
| 3446 } |
| 3447 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); |
| 3448 } |
| 3449 if (num_pages > 0) { |
| 3450 if (FLAG_trace_fragmentation) { |
| 3451 if (abandoned_pages != 0) { |
| 3452 PrintF( |
| 3453 " Abandoned (at least partially) %d out of %d page compactions due" |
| 3454 " to lack of memory\n", |
| 3455 abandoned_pages, num_pages); |
| 3456 } else { |
| 3457 PrintF(" Compacted %d pages\n", num_pages); |
| 3458 } |
| 3459 } |
| 3460 } |
| 3377 } | 3461 } |
| 3378 | 3462 |
| 3379 | 3463 |
| 3380 void MarkCompactCollector::WaitUntilCompactionCompleted() { | 3464 void MarkCompactCollector::WaitUntilCompactionCompleted() { |
| 3381 pending_compaction_jobs_semaphore_.Wait(); | 3465 while (concurrent_compaction_tasks_active_-- > 0) { |
| 3466 pending_compaction_tasks_semaphore_.Wait(); |
| 3467 } |
| 3382 parallel_compaction_in_progress_ = false; | 3468 parallel_compaction_in_progress_ = false; |
| 3383 } | 3469 } |
| 3384 | 3470 |
| 3385 | 3471 |
| 3386 void MarkCompactCollector::EvacuatePages() { | 3472 void MarkCompactCollector::EvacuatePages( |
| 3387 int npages = evacuation_candidates_.length(); | 3473 CompactionSpaceCollection* compaction_spaces) { |
| 3388 int abandoned_pages = 0; | 3474 for (int i = 0; i < evacuation_candidates_.length(); i++) { |
| 3389 for (int i = 0; i < npages; i++) { | |
| 3390 Page* p = evacuation_candidates_[i]; | 3475 Page* p = evacuation_candidates_[i]; |
| 3391 DCHECK(p->IsEvacuationCandidate() || | 3476 DCHECK(p->IsEvacuationCandidate() || |
| 3392 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3477 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3393 DCHECK(static_cast<int>(p->parallel_sweeping()) == | 3478 DCHECK(static_cast<int>(p->parallel_sweeping()) == |
| 3394 MemoryChunk::SWEEPING_DONE); | 3479 MemoryChunk::SWEEPING_DONE); |
| 3395 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3480 if (p->parallel_compaction_state().TrySetValue( |
| 3396 // Allocate emergency memory for the case when compaction fails due to out | 3481 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { |
| 3397 // of memory. | 3482 if (p->IsEvacuationCandidate()) { |
| 3398 if (!space->HasEmergencyMemory()) { | 3483 DCHECK_EQ(p->parallel_compaction_state().Value(), |
| 3399 space->CreateEmergencyMemory(); // If the OS lets us. | 3484 MemoryChunk::kCompactingInProgress); |
| 3400 } | 3485 if (EvacuateLiveObjectsFromPage( |
| 3401 if (p->IsEvacuationCandidate()) { | 3486 p, compaction_spaces->Get(p->owner()->identity()))) { |
| 3402 // During compaction we might have to request a new page in order to free | 3487 p->parallel_compaction_state().SetValue( |
| 3403 // up a page. Check that we actually got an emergency page above so we | 3488 MemoryChunk::kCompactingFinalize); |
| 3404 // can guarantee that this succeeds. | 3489 } else { |
| 3405 if (space->HasEmergencyMemory()) { | 3490 p->parallel_compaction_state().SetValue( |
| 3406 EvacuateLiveObjectsFromPage(p, static_cast<PagedSpace*>(p->owner())); | 3491 MemoryChunk::kCompactingAborted); |
| 3407 // Unlink the page from the list of pages here. We must not iterate | 3492 } |
| 3408 // over that page later (e.g. when scan on scavenge pages are | |
| 3409 // processed). The page itself will be freed later and is still | |
| 3410 // reachable from the evacuation candidates list. | |
| 3411 p->Unlink(); | |
| 3412 } else { | 3493 } else { |
| 3413 // Without room for expansion evacuation is not guaranteed to succeed. | 3494 // There could be popular pages in the list of evacuation candidates |
| 3414 // Pessimistically abandon unevacuated pages. | 3495 // which we do compact. |
| 3415 for (int j = i; j < npages; j++) { | 3496 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); |
| 3416 Page* page = evacuation_candidates_[j]; | |
| 3417 slots_buffer_allocator_->DeallocateChain( | |
| 3418 page->slots_buffer_address()); | |
| 3419 page->ClearEvacuationCandidate(); | |
| 3420 page->SetFlag(Page::RESCAN_ON_EVACUATION); | |
| 3421 } | |
| 3422 abandoned_pages = npages - i; | |
| 3423 break; | |
| 3424 } | 3497 } |
| 3425 } | 3498 } |
| 3426 } | 3499 } |
| 3427 if (npages > 0) { | |
| 3428 // Release emergency memory. | |
| 3429 PagedSpaces spaces(heap()); | |
| 3430 for (PagedSpace* space = spaces.next(); space != NULL; | |
| 3431 space = spaces.next()) { | |
| 3432 if (space->HasEmergencyMemory()) { | |
| 3433 space->FreeEmergencyMemory(); | |
| 3434 } | |
| 3435 } | |
| 3436 if (FLAG_trace_fragmentation) { | |
| 3437 if (abandoned_pages != 0) { | |
| 3438 PrintF( | |
| 3439 " Abandon %d out of %d page defragmentations due to lack of " | |
| 3440 "memory\n", | |
| 3441 abandoned_pages, npages); | |
| 3442 } else { | |
| 3443 PrintF(" Defragmented %d pages\n", npages); | |
| 3444 } | |
| 3445 } | |
| 3446 } | |
| 3447 } | 3500 } |
| 3448 | 3501 |
| 3449 | 3502 |
| 3450 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3503 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
| 3451 public: | 3504 public: |
| 3452 virtual Object* RetainAs(Object* object) { | 3505 virtual Object* RetainAs(Object* object) { |
| 3453 if (object->IsHeapObject()) { | 3506 if (object->IsHeapObject()) { |
| 3454 HeapObject* heap_object = HeapObject::cast(object); | 3507 HeapObject* heap_object = HeapObject::cast(object); |
| 3455 MapWord map_word = heap_object->map_word(); | 3508 MapWord map_word = heap_object->map_word(); |
| 3456 if (map_word.IsForwardingAddress()) { | 3509 if (map_word.IsForwardingAddress()) { |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3622 GCTracer::Scope gc_scope(heap()->tracer(), | 3675 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3623 GCTracer::Scope::MC_SWEEP_NEWSPACE); | 3676 GCTracer::Scope::MC_SWEEP_NEWSPACE); |
| 3624 EvacuationScope evacuation_scope(this); | 3677 EvacuationScope evacuation_scope(this); |
| 3625 EvacuateNewSpace(); | 3678 EvacuateNewSpace(); |
| 3626 } | 3679 } |
| 3627 | 3680 |
| 3628 { | 3681 { |
| 3629 GCTracer::Scope gc_scope(heap()->tracer(), | 3682 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3630 GCTracer::Scope::MC_EVACUATE_PAGES); | 3683 GCTracer::Scope::MC_EVACUATE_PAGES); |
| 3631 EvacuationScope evacuation_scope(this); | 3684 EvacuationScope evacuation_scope(this); |
| 3632 if (FLAG_parallel_compaction) { | 3685 EvacuatePagesInParallel(); |
| 3633 EvacuatePagesInParallel(); | |
| 3634 WaitUntilCompactionCompleted(); | |
| 3635 } else { | |
| 3636 EvacuatePages(); | |
| 3637 } | |
| 3638 } | 3686 } |
| 3639 | 3687 |
| 3640 // Second pass: find pointers to new space and update them. | 3688 // Second pass: find pointers to new space and update them. |
| 3641 PointersUpdatingVisitor updating_visitor(heap()); | 3689 PointersUpdatingVisitor updating_visitor(heap()); |
| 3642 | 3690 |
| 3643 { | 3691 { |
| 3644 GCTracer::Scope gc_scope(heap()->tracer(), | 3692 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3645 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); | 3693 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); |
| 3646 // Update pointers in to space. | 3694 // Update pointers in to space. |
| 3647 SemiSpaceIterator to_it(heap()->new_space()); | 3695 SemiSpaceIterator to_it(heap()->new_space()); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3687 Page* p = evacuation_candidates_[i]; | 3735 Page* p = evacuation_candidates_[i]; |
| 3688 DCHECK(p->IsEvacuationCandidate() || | 3736 DCHECK(p->IsEvacuationCandidate() || |
| 3689 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3737 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
| 3690 | 3738 |
| 3691 if (p->IsEvacuationCandidate()) { | 3739 if (p->IsEvacuationCandidate()) { |
| 3692 UpdateSlotsRecordedIn(p->slots_buffer()); | 3740 UpdateSlotsRecordedIn(p->slots_buffer()); |
| 3693 if (FLAG_trace_fragmentation_verbose) { | 3741 if (FLAG_trace_fragmentation_verbose) { |
| 3694 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), | 3742 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), |
| 3695 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 3743 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
| 3696 } | 3744 } |
| 3745 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
| 3697 | 3746 |
| 3698 // Important: skip list should be cleared only after roots were updated | 3747 // Important: skip list should be cleared only after roots were updated |
| 3699 // because root iteration traverses the stack and might have to find | 3748 // because root iteration traverses the stack and might have to find |
| 3700 // code objects from non-updated pc pointing into evacuation candidate. | 3749 // code objects from non-updated pc pointing into evacuation candidate. |
| 3701 SkipList* list = p->skip_list(); | 3750 SkipList* list = p->skip_list(); |
| 3702 if (list != NULL) list->Clear(); | 3751 if (list != NULL) list->Clear(); |
| 3703 } else { | 3752 } |
| 3753 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 3704 if (FLAG_gc_verbose) { | 3754 if (FLAG_gc_verbose) { |
| 3705 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", | 3755 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
| 3706 reinterpret_cast<intptr_t>(p)); | 3756 reinterpret_cast<intptr_t>(p)); |
| 3707 } | 3757 } |
| 3708 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3758 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3709 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 3759 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 3710 | 3760 |
| 3711 switch (space->identity()) { | 3761 switch (space->identity()) { |
| 3712 case OLD_SPACE: | 3762 case OLD_SPACE: |
| 3713 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3763 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3714 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, | 3764 IGNORE_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, |
| 3715 &updating_visitor); | 3765 &updating_visitor); |
| 3716 break; | 3766 break; |
| 3717 case CODE_SPACE: | 3767 case CODE_SPACE: |
| 3718 if (FLAG_zap_code_space) { | 3768 if (FLAG_zap_code_space) { |
| 3719 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3769 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3720 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p, | 3770 REBUILD_SKIP_LIST, ZAP_FREE_SPACE>(space, NULL, p, |
| 3721 &updating_visitor); | 3771 &updating_visitor); |
| 3722 } else { | 3772 } else { |
| 3723 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, | 3773 Sweep<SWEEP_AND_VISIT_LIVE_OBJECTS, SWEEP_ON_MAIN_THREAD, |
| 3724 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, | 3774 REBUILD_SKIP_LIST, IGNORE_FREE_SPACE>(space, NULL, p, |
| 3725 &updating_visitor); | 3775 &updating_visitor); |
| 3726 } | 3776 } |
| 3727 break; | 3777 break; |
| 3728 default: | 3778 default: |
| 3729 UNREACHABLE(); | 3779 UNREACHABLE(); |
| 3730 break; | 3780 break; |
| 3731 } | 3781 } |
| 3732 } | 3782 } |
| 3783 if (p->IsEvacuationCandidate() && |
| 3784 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| 3785 // Case where we've aborted compacting a page. Clear the flag here to |
| 3786 // avoid release the page later on. |
| 3787 p->ClearEvacuationCandidate(); |
| 3788 } |
| 3733 } | 3789 } |
| 3734 } | 3790 } |
| 3735 | 3791 |
| 3736 GCTracer::Scope gc_scope(heap()->tracer(), | 3792 GCTracer::Scope gc_scope(heap()->tracer(), |
| 3737 GCTracer::Scope::MC_UPDATE_MISC_POINTERS); | 3793 GCTracer::Scope::MC_UPDATE_MISC_POINTERS); |
| 3738 | 3794 |
| 3739 heap_->string_table()->Iterate(&updating_visitor); | 3795 heap_->string_table()->Iterate(&updating_visitor); |
| 3740 | 3796 |
| 3741 // Update pointers from external string table. | 3797 // Update pointers from external string table. |
| 3742 heap_->UpdateReferencesInExternalStringTable( | 3798 heap_->UpdateReferencesInExternalStringTable( |
| (...skipping 26 matching lines...) Expand all Loading... |
| 3769 | 3825 |
| 3770 | 3826 |
| 3771 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 3827 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
| 3772 int npages = evacuation_candidates_.length(); | 3828 int npages = evacuation_candidates_.length(); |
| 3773 for (int i = 0; i < npages; i++) { | 3829 for (int i = 0; i < npages; i++) { |
| 3774 Page* p = evacuation_candidates_[i]; | 3830 Page* p = evacuation_candidates_[i]; |
| 3775 if (!p->IsEvacuationCandidate()) continue; | 3831 if (!p->IsEvacuationCandidate()) continue; |
| 3776 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3832 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3777 space->Free(p->area_start(), p->area_size()); | 3833 space->Free(p->area_start(), p->area_size()); |
| 3778 p->set_scan_on_scavenge(false); | 3834 p->set_scan_on_scavenge(false); |
| 3779 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | |
| 3780 p->ResetLiveBytes(); | 3835 p->ResetLiveBytes(); |
| 3781 space->ReleasePage(p); | 3836 space->ReleasePage(p); |
| 3782 } | 3837 } |
| 3783 evacuation_candidates_.Rewind(0); | 3838 evacuation_candidates_.Rewind(0); |
| 3784 compacting_ = false; | 3839 compacting_ = false; |
| 3785 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); | 3840 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); |
| 3786 heap()->FreeQueuedChunks(); | 3841 heap()->FreeQueuedChunks(); |
| 3787 } | 3842 } |
| 3788 | 3843 |
| 3789 | 3844 |
| (...skipping 595 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4385 // EvacuateNewSpaceAndCandidates iterates over new space objects and for | 4440 // EvacuateNewSpaceAndCandidates iterates over new space objects and for |
| 4386 // ArrayBuffers either re-registers them as live or promotes them. This is | 4441 // ArrayBuffers either re-registers them as live or promotes them. This is |
| 4387 // needed to properly free them. | 4442 // needed to properly free them. |
| 4388 heap()->array_buffer_tracker()->FreeDead(false); | 4443 heap()->array_buffer_tracker()->FreeDead(false); |
| 4389 | 4444 |
| 4390 // Clear the marking state of live large objects. | 4445 // Clear the marking state of live large objects. |
| 4391 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); | 4446 heap_->lo_space()->ClearMarkingStateOfLiveObjects(); |
| 4392 | 4447 |
| 4393 // Deallocate evacuated candidate pages. | 4448 // Deallocate evacuated candidate pages. |
| 4394 ReleaseEvacuationCandidates(); | 4449 ReleaseEvacuationCandidates(); |
| 4395 CodeRange* code_range = heap()->isolate()->code_range(); | |
| 4396 if (code_range != NULL && code_range->valid()) { | |
| 4397 code_range->ReserveEmergencyBlock(); | |
| 4398 } | |
| 4399 | 4450 |
| 4400 if (FLAG_print_cumulative_gc_stat) { | 4451 if (FLAG_print_cumulative_gc_stat) { |
| 4401 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - | 4452 heap_->tracer()->AddSweepingTime(base::OS::TimeCurrentMillis() - |
| 4402 start_time); | 4453 start_time); |
| 4403 } | 4454 } |
| 4404 | 4455 |
| 4405 #ifdef VERIFY_HEAP | 4456 #ifdef VERIFY_HEAP |
| 4406 if (FLAG_verify_heap && !sweeping_in_progress_) { | 4457 if (FLAG_verify_heap && !sweeping_in_progress_) { |
| 4407 VerifyEvacuation(heap()); | 4458 VerifyEvacuation(heap()); |
| 4408 } | 4459 } |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4516 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4567 MarkBit mark_bit = Marking::MarkBitFrom(host); |
| 4517 if (Marking::IsBlack(mark_bit)) { | 4568 if (Marking::IsBlack(mark_bit)) { |
| 4518 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4569 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
| 4519 RecordRelocSlot(&rinfo, target); | 4570 RecordRelocSlot(&rinfo, target); |
| 4520 } | 4571 } |
| 4521 } | 4572 } |
| 4522 } | 4573 } |
| 4523 | 4574 |
| 4524 } // namespace internal | 4575 } // namespace internal |
| 4525 } // namespace v8 | 4576 } // namespace v8 |
| OLD | NEW |