| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 22 matching lines...) Expand all Loading... |
| 33 #include "execution.h" | 33 #include "execution.h" |
| 34 #include "gdb-jit.h" | 34 #include "gdb-jit.h" |
| 35 #include "global-handles.h" | 35 #include "global-handles.h" |
| 36 #include "heap-profiler.h" | 36 #include "heap-profiler.h" |
| 37 #include "ic-inl.h" | 37 #include "ic-inl.h" |
| 38 #include "incremental-marking.h" | 38 #include "incremental-marking.h" |
| 39 #include "mark-compact.h" | 39 #include "mark-compact.h" |
| 40 #include "objects-visiting.h" | 40 #include "objects-visiting.h" |
| 41 #include "objects-visiting-inl.h" | 41 #include "objects-visiting-inl.h" |
| 42 #include "stub-cache.h" | 42 #include "stub-cache.h" |
| 43 #include "sweeper-thread.h" |
| 43 | 44 |
| 44 namespace v8 { | 45 namespace v8 { |
| 45 namespace internal { | 46 namespace internal { |
| 46 | 47 |
| 47 | 48 |
| 48 const char* Marking::kWhiteBitPattern = "00"; | 49 const char* Marking::kWhiteBitPattern = "00"; |
| 49 const char* Marking::kBlackBitPattern = "10"; | 50 const char* Marking::kBlackBitPattern = "10"; |
| 50 const char* Marking::kGreyBitPattern = "11"; | 51 const char* Marking::kGreyBitPattern = "11"; |
| 51 const char* Marking::kImpossibleBitPattern = "01"; | 52 const char* Marking::kImpossibleBitPattern = "01"; |
| 52 | 53 |
| (...skipping 443 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 496 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 497 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { |
| 497 MarkBit mark_bit = Marking::MarkBitFrom(obj); | 498 MarkBit mark_bit = Marking::MarkBitFrom(obj); |
| 498 mark_bit.Clear(); | 499 mark_bit.Clear(); |
| 499 mark_bit.Next().Clear(); | 500 mark_bit.Next().Clear(); |
| 500 Page::FromAddress(obj->address())->ResetProgressBar(); | 501 Page::FromAddress(obj->address())->ResetProgressBar(); |
| 501 Page::FromAddress(obj->address())->ResetLiveBytes(); | 502 Page::FromAddress(obj->address())->ResetLiveBytes(); |
| 502 } | 503 } |
| 503 } | 504 } |
| 504 | 505 |
| 505 | 506 |
| 507 void MarkCompactCollector::StartSweeperThreads() { |
| 508 SweeperThread::set_sweeping_pending(true); |
| 509 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| 510 heap()->isolate()->sweeper_threads()[i]->StartSweeping(); |
| 511 } |
| 512 } |
| 513 |
| 514 |
| 515 void MarkCompactCollector::WaitUntilSweepingCompleted() { |
| 516 if (SweeperThread::sweeping_pending()) { |
| 517 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| 518 heap()->isolate()->sweeper_threads()[i]->WaitForSweeperThread(); |
| 519 } |
| 520 SweeperThread::set_sweeping_pending(false); |
| 521 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_DATA_SPACE)); |
| 522 StealMemoryFromSweeperThreads(heap()->paged_space(OLD_POINTER_SPACE)); |
| 523 heap()->FreeQueuedChunks(); |
| 524 } |
| 525 } |
| 526 |
| 527 |
| 528 intptr_t MarkCompactCollector:: |
| 529 StealMemoryFromSweeperThreads(PagedSpace* space) { |
| 530 intptr_t freed_bytes = 0; |
| 531 for (int i = 0; i < FLAG_sweeper_threads; i++) { |
| 532 freed_bytes += heap()->isolate()->sweeper_threads()[i]->StealMemory(space); |
| 533 } |
| 534 return freed_bytes; |
| 535 } |
| 536 |
| 537 |
| 538 bool MarkCompactCollector::AreSweeperThreadsActivated() { |
| 539 return heap()->isolate()->sweeper_threads() != NULL; |
| 540 } |
| 541 |
| 542 |
| 506 bool Marking::TransferMark(Address old_start, Address new_start) { | 543 bool Marking::TransferMark(Address old_start, Address new_start) { |
| 507 // This is only used when resizing an object. | 544 // This is only used when resizing an object. |
| 508 ASSERT(MemoryChunk::FromAddress(old_start) == | 545 ASSERT(MemoryChunk::FromAddress(old_start) == |
| 509 MemoryChunk::FromAddress(new_start)); | 546 MemoryChunk::FromAddress(new_start)); |
| 510 | 547 |
| 511 // If the mark doesn't move, we don't check the color of the object. | 548 // If the mark doesn't move, we don't check the color of the object. |
| 512 // It doesn't matter whether the object is black, since it hasn't changed | 549 // It doesn't matter whether the object is black, since it hasn't changed |
| 513 // size, so the adjustment to the live data count will be zero anyway. | 550 // size, so the adjustment to the live data count will be zero anyway. |
| 514 if (old_start == new_start) return false; | 551 if (old_start == new_start) return false; |
| 515 | 552 |
| (...skipping 282 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 798 // variable. | 835 // variable. |
| 799 tracer_ = tracer; | 836 tracer_ = tracer; |
| 800 | 837 |
| 801 #ifdef DEBUG | 838 #ifdef DEBUG |
| 802 ASSERT(state_ == IDLE); | 839 ASSERT(state_ == IDLE); |
| 803 state_ = PREPARE_GC; | 840 state_ = PREPARE_GC; |
| 804 #endif | 841 #endif |
| 805 | 842 |
| 806 ASSERT(!FLAG_never_compact || !FLAG_always_compact); | 843 ASSERT(!FLAG_never_compact || !FLAG_always_compact); |
| 807 | 844 |
| 845 if (AreSweeperThreadsActivated() && FLAG_concurrent_sweeping) { |
| 846 // Instead of waiting we could also abort the sweeper threads here. |
| 847 WaitUntilSweepingCompleted(); |
| 848 } |
| 849 |
| 808 // Clear marking bits if incremental marking is aborted. | 850 // Clear marking bits if incremental marking is aborted. |
| 809 if (was_marked_incrementally_ && abort_incremental_marking_) { | 851 if (was_marked_incrementally_ && abort_incremental_marking_) { |
| 810 heap()->incremental_marking()->Abort(); | 852 heap()->incremental_marking()->Abort(); |
| 811 ClearMarkbits(); | 853 ClearMarkbits(); |
| 812 AbortCompaction(); | 854 AbortCompaction(); |
| 813 was_marked_incrementally_ = false; | 855 was_marked_incrementally_ = false; |
| 814 } | 856 } |
| 815 | 857 |
| 816 // Don't start compaction if we are in the middle of incremental | 858 // Don't start compaction if we are in the middle of incremental |
| 817 // marking cycle. We did not collect any slots. | 859 // marking cycle. We did not collect any slots. |
| (...skipping 2306 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3124 } else { | 3166 } else { |
| 3125 if (FLAG_gc_verbose) { | 3167 if (FLAG_gc_verbose) { |
| 3126 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", | 3168 PrintF("Sweeping 0x%" V8PRIxPTR " during evacuation.\n", |
| 3127 reinterpret_cast<intptr_t>(p)); | 3169 reinterpret_cast<intptr_t>(p)); |
| 3128 } | 3170 } |
| 3129 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3171 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
| 3130 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 3172 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
| 3131 | 3173 |
| 3132 switch (space->identity()) { | 3174 switch (space->identity()) { |
| 3133 case OLD_DATA_SPACE: | 3175 case OLD_DATA_SPACE: |
| 3134 SweepConservatively(space, p); | 3176 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
| 3135 break; | 3177 break; |
| 3136 case OLD_POINTER_SPACE: | 3178 case OLD_POINTER_SPACE: |
| 3137 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( | 3179 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, IGNORE_SKIP_LIST>( |
| 3138 space, p, &updating_visitor); | 3180 space, p, &updating_visitor); |
| 3139 break; | 3181 break; |
| 3140 case CODE_SPACE: | 3182 case CODE_SPACE: |
| 3141 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( | 3183 SweepPrecisely<SWEEP_AND_VISIT_LIVE_OBJECTS, REBUILD_SKIP_LIST>( |
| 3142 space, p, &updating_visitor); | 3184 space, p, &updating_visitor); |
| 3143 break; | 3185 break; |
| 3144 default: | 3186 default: |
| (...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3483 } | 3525 } |
| 3484 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; | 3526 uint32_t first_set_bit = ((cell ^ (cell - 1)) + 1) >> 1; |
| 3485 ASSERT((first_set_bit & cell) == first_set_bit); | 3527 ASSERT((first_set_bit & cell) == first_set_bit); |
| 3486 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); | 3528 int live_objects = MarkWordToObjectStarts(first_set_bit, offsets); |
| 3487 ASSERT(live_objects == 1); | 3529 ASSERT(live_objects == 1); |
| 3488 USE(live_objects); | 3530 USE(live_objects); |
| 3489 return block_address + offsets[0] * kPointerSize; | 3531 return block_address + offsets[0] * kPointerSize; |
| 3490 } | 3532 } |
| 3491 | 3533 |
| 3492 | 3534 |
| 3535 template<MarkCompactCollector::SweepingParallelism mode> |
| 3536 static intptr_t Free(PagedSpace* space, |
| 3537 FreeList* free_list, |
| 3538 Address start, |
| 3539 int size) { |
| 3540 if (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY) { |
| 3541 return space->Free(start, size); |
| 3542 } else { |
| 3543 return size - free_list->Free(start, size); |
| 3544 } |
| 3545 } |
| 3546 |
| 3547 |
| 3493 // Sweeps a space conservatively. After this has been done the larger free | 3548 // Sweeps a space conservatively. After this has been done the larger free |
| 3494 // spaces have been put on the free list and the smaller ones have been | 3549 // spaces have been put on the free list and the smaller ones have been |
| 3495 // ignored and left untouched. A free space is always either ignored or put | 3550 // ignored and left untouched. A free space is always either ignored or put |
| 3496 // on the free list, never split up into two parts. This is important | 3551 // on the free list, never split up into two parts. This is important |
| 3497 // because it means that any FreeSpace maps left actually describe a region of | 3552 // because it means that any FreeSpace maps left actually describe a region of |
| 3498 // memory that can be ignored when scanning. Dead objects other than free | 3553 // memory that can be ignored when scanning. Dead objects other than free |
| 3499 // spaces will not contain the free space map. | 3554 // spaces will not contain the free space map. |
| 3500 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, Page* p) { | 3555 template<MarkCompactCollector::SweepingParallelism mode> |
| 3556 intptr_t MarkCompactCollector::SweepConservatively(PagedSpace* space, |
| 3557 FreeList* free_list, |
| 3558 Page* p) { |
| 3501 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); | 3559 ASSERT(!p->IsEvacuationCandidate() && !p->WasSwept()); |
| 3502 double start_time = 0.0; | 3560 ASSERT((mode == MarkCompactCollector::SWEEP_IN_PARALLEL && |
| 3503 if (FLAG_print_cumulative_gc_stat) { | 3561 free_list != NULL) || |
| 3504 start_time = OS::TimeCurrentMillis(); | 3562 (mode == MarkCompactCollector::SWEEP_SEQUENTIALLY && |
| 3505 } | 3563 free_list == NULL)); |
| 3506 | 3564 |
| 3507 MarkBit::CellType* cells = p->markbits()->cells(); | 3565 MarkBit::CellType* cells = p->markbits()->cells(); |
| 3508 p->MarkSweptConservatively(); | 3566 p->MarkSweptConservatively(); |
| 3509 | 3567 |
| 3510 int last_cell_index = | 3568 int last_cell_index = |
| 3511 Bitmap::IndexToCell( | 3569 Bitmap::IndexToCell( |
| 3512 Bitmap::CellAlignIndex( | 3570 Bitmap::CellAlignIndex( |
| 3513 p->AddressToMarkbitIndex(p->area_end()))); | 3571 p->AddressToMarkbitIndex(p->area_end()))); |
| 3514 | 3572 |
| 3515 int cell_index = | 3573 int cell_index = |
| 3516 Bitmap::IndexToCell( | 3574 Bitmap::IndexToCell( |
| 3517 Bitmap::CellAlignIndex( | 3575 Bitmap::CellAlignIndex( |
| 3518 p->AddressToMarkbitIndex(p->area_start()))); | 3576 p->AddressToMarkbitIndex(p->area_start()))); |
| 3519 | 3577 |
| 3520 intptr_t freed_bytes = 0; | 3578 intptr_t freed_bytes = 0; |
| 3521 | 3579 |
| 3522 // This is the start of the 32 word block that we are currently looking at. | 3580 // This is the start of the 32 word block that we are currently looking at. |
| 3523 Address block_address = p->area_start(); | 3581 Address block_address = p->area_start(); |
| 3524 | 3582 |
| 3525 // Skip over all the dead objects at the start of the page and mark them free. | 3583 // Skip over all the dead objects at the start of the page and mark them free. |
| 3526 for (; | 3584 for (; |
| 3527 cell_index < last_cell_index; | 3585 cell_index < last_cell_index; |
| 3528 cell_index++, block_address += 32 * kPointerSize) { | 3586 cell_index++, block_address += 32 * kPointerSize) { |
| 3529 if (cells[cell_index] != 0) break; | 3587 if (cells[cell_index] != 0) break; |
| 3530 } | 3588 } |
| 3531 size_t size = block_address - p->area_start(); | 3589 size_t size = block_address - p->area_start(); |
| 3532 if (cell_index == last_cell_index) { | 3590 if (cell_index == last_cell_index) { |
| 3533 freed_bytes += static_cast<int>(space->Free(p->area_start(), | 3591 freed_bytes += Free<mode>(space, free_list, p->area_start(), size); |
| 3534 static_cast<int>(size))); | |
| 3535 ASSERT_EQ(0, p->LiveBytes()); | 3592 ASSERT_EQ(0, p->LiveBytes()); |
| 3536 return freed_bytes; | 3593 return freed_bytes; |
| 3537 } | 3594 } |
| 3538 // Grow the size of the start-of-page free space a little to get up to the | 3595 // Grow the size of the start-of-page free space a little to get up to the |
| 3539 // first live object. | 3596 // first live object. |
| 3540 Address free_end = StartOfLiveObject(block_address, cells[cell_index]); | 3597 Address free_end = StartOfLiveObject(block_address, cells[cell_index]); |
| 3541 // Free the first free space. | 3598 // Free the first free space. |
| 3542 size = free_end - p->area_start(); | 3599 size = free_end - p->area_start(); |
| 3543 freed_bytes += space->Free(p->area_start(), | 3600 freed_bytes += Free<mode>(space, free_list, p->area_start(), size); |
| 3544 static_cast<int>(size)); | 3601 |
| 3545 // The start of the current free area is represented in undigested form by | 3602 // The start of the current free area is represented in undigested form by |
| 3546 // the address of the last 32-word section that contained a live object and | 3603 // the address of the last 32-word section that contained a live object and |
| 3547 // the marking bitmap for that cell, which describes where the live object | 3604 // the marking bitmap for that cell, which describes where the live object |
| 3548 // started. Unless we find a large free space in the bitmap we will not | 3605 // started. Unless we find a large free space in the bitmap we will not |
| 3549 // digest this pair into a real address. We start the iteration here at the | 3606 // digest this pair into a real address. We start the iteration here at the |
| 3550 // first word in the marking bit map that indicates a live object. | 3607 // first word in the marking bit map that indicates a live object. |
| 3551 Address free_start = block_address; | 3608 Address free_start = block_address; |
| 3552 uint32_t free_start_cell = cells[cell_index]; | 3609 uint32_t free_start_cell = cells[cell_index]; |
| 3553 | 3610 |
| 3554 for ( ; | 3611 for ( ; |
| 3555 cell_index < last_cell_index; | 3612 cell_index < last_cell_index; |
| 3556 cell_index++, block_address += 32 * kPointerSize) { | 3613 cell_index++, block_address += 32 * kPointerSize) { |
| 3557 ASSERT(static_cast<unsigned>(cell_index) == | 3614 ASSERT((unsigned)cell_index == |
| 3558 Bitmap::IndexToCell( | 3615 Bitmap::IndexToCell( |
| 3559 Bitmap::CellAlignIndex( | 3616 Bitmap::CellAlignIndex( |
| 3560 p->AddressToMarkbitIndex(block_address)))); | 3617 p->AddressToMarkbitIndex(block_address)))); |
| 3561 uint32_t cell = cells[cell_index]; | 3618 uint32_t cell = cells[cell_index]; |
| 3562 if (cell != 0) { | 3619 if (cell != 0) { |
| 3563 // We have a live object. Check approximately whether it is more than 32 | 3620 // We have a live object. Check approximately whether it is more than 32 |
| 3564 // words since the last live object. | 3621 // words since the last live object. |
| 3565 if (block_address - free_start > 32 * kPointerSize) { | 3622 if (block_address - free_start > 32 * kPointerSize) { |
| 3566 free_start = DigestFreeStart(free_start, free_start_cell); | 3623 free_start = DigestFreeStart(free_start, free_start_cell); |
| 3567 if (block_address - free_start > 32 * kPointerSize) { | 3624 if (block_address - free_start > 32 * kPointerSize) { |
| 3568 // Now that we know the exact start of the free space it still looks | 3625 // Now that we know the exact start of the free space it still looks |
| 3569 // like we have a large enough free space to be worth bothering with. | 3626 // like we have a large enough free space to be worth bothering with. |
| 3570 // so now we need to find the start of the first live object at the | 3627 // so now we need to find the start of the first live object at the |
| 3571 // end of the free space. | 3628 // end of the free space. |
| 3572 free_end = StartOfLiveObject(block_address, cell); | 3629 free_end = StartOfLiveObject(block_address, cell); |
| 3573 freed_bytes += space->Free(free_start, | 3630 freed_bytes += Free<mode>(space, free_list, free_start, |
| 3574 static_cast<int>(free_end - free_start)); | 3631 static_cast<int>(free_end - free_start)); |
| 3575 } | 3632 } |
| 3576 } | 3633 } |
| 3577 // Update our undigested record of where the current free area started. | 3634 // Update our undigested record of where the current free area started. |
| 3578 free_start = block_address; | 3635 free_start = block_address; |
| 3579 free_start_cell = cell; | 3636 free_start_cell = cell; |
| 3580 // Clear marking bits for current cell. | 3637 // Clear marking bits for current cell. |
| 3581 cells[cell_index] = 0; | 3638 cells[cell_index] = 0; |
| 3582 } | 3639 } |
| 3583 } | 3640 } |
| 3584 | 3641 |
| 3585 // Handle the free space at the end of the page. | 3642 // Handle the free space at the end of the page. |
| 3586 if (block_address - free_start > 32 * kPointerSize) { | 3643 if (block_address - free_start > 32 * kPointerSize) { |
| 3587 free_start = DigestFreeStart(free_start, free_start_cell); | 3644 free_start = DigestFreeStart(free_start, free_start_cell); |
| 3588 freed_bytes += space->Free(free_start, | 3645 freed_bytes += Free<mode>(space, free_list, free_start, |
| 3589 static_cast<int>(block_address - free_start)); | 3646 static_cast<int>(block_address - free_start)); |
| 3590 } | 3647 } |
| 3591 | 3648 |
| 3592 p->ResetLiveBytes(); | 3649 p->ResetLiveBytes(); |
| 3650 return freed_bytes; |
| 3651 } |
| 3593 | 3652 |
| 3594 if (FLAG_print_cumulative_gc_stat) { | 3653 |
| 3595 space->heap()->AddSweepingTime(OS::TimeCurrentMillis() - start_time); | 3654 void MarkCompactCollector::SweepInParallel(PagedSpace* space, |
| 3655 FreeList* private_free_list, |
| 3656 FreeList* free_list) { |
| 3657 PageIterator it(space); |
| 3658 while (it.has_next()) { |
| 3659 Page* p = it.next(); |
| 3660 |
| 3661 if (p->TryParallelSweeping()) { |
| 3662 SweepConservatively<SWEEP_IN_PARALLEL>(space, private_free_list, p); |
| 3663 free_list->Concatenate(private_free_list); |
| 3664 } |
| 3596 } | 3665 } |
| 3597 return freed_bytes; | |
| 3598 } | 3666 } |
| 3599 | 3667 |
| 3600 | 3668 |
| 3601 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { | 3669 void MarkCompactCollector::SweepSpace(PagedSpace* space, SweeperType sweeper) { |
| 3602 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || | 3670 space->set_was_swept_conservatively(sweeper == CONSERVATIVE || |
| 3603 sweeper == LAZY_CONSERVATIVE); | 3671 sweeper == LAZY_CONSERVATIVE); |
| 3604 | |
| 3605 space->ClearStats(); | 3672 space->ClearStats(); |
| 3606 | 3673 |
| 3607 PageIterator it(space); | 3674 PageIterator it(space); |
| 3608 | 3675 |
| 3609 intptr_t freed_bytes = 0; | 3676 intptr_t freed_bytes = 0; |
| 3610 int pages_swept = 0; | 3677 int pages_swept = 0; |
| 3611 bool lazy_sweeping_active = false; | 3678 bool lazy_sweeping_active = false; |
| 3612 bool unused_page_present = false; | 3679 bool unused_page_present = false; |
| 3613 | 3680 |
| 3614 while (it.has_next()) { | 3681 while (it.has_next()) { |
| 3615 Page* p = it.next(); | 3682 Page* p = it.next(); |
| 3616 | 3683 |
| 3684 ASSERT(p->parallel_sweeping() == 0); |
| 3617 // Clear sweeping flags indicating that marking bits are still intact. | 3685 // Clear sweeping flags indicating that marking bits are still intact. |
| 3618 p->ClearSweptPrecisely(); | 3686 p->ClearSweptPrecisely(); |
| 3619 p->ClearSweptConservatively(); | 3687 p->ClearSweptConservatively(); |
| 3620 | 3688 |
| 3621 if (p->IsEvacuationCandidate()) { | 3689 if (p->IsEvacuationCandidate()) { |
| 3622 ASSERT(evacuation_candidates_.length() > 0); | 3690 ASSERT(evacuation_candidates_.length() > 0); |
| 3623 continue; | 3691 continue; |
| 3624 } | 3692 } |
| 3625 | 3693 |
| 3626 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { | 3694 if (p->IsFlagSet(Page::RESCAN_ON_EVACUATION)) { |
| (...skipping 25 matching lines...) Expand all Loading... |
| 3652 space->IncreaseUnsweptFreeBytes(p); | 3720 space->IncreaseUnsweptFreeBytes(p); |
| 3653 continue; | 3721 continue; |
| 3654 } | 3722 } |
| 3655 | 3723 |
| 3656 switch (sweeper) { | 3724 switch (sweeper) { |
| 3657 case CONSERVATIVE: { | 3725 case CONSERVATIVE: { |
| 3658 if (FLAG_gc_verbose) { | 3726 if (FLAG_gc_verbose) { |
| 3659 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", | 3727 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively.\n", |
| 3660 reinterpret_cast<intptr_t>(p)); | 3728 reinterpret_cast<intptr_t>(p)); |
| 3661 } | 3729 } |
| 3662 SweepConservatively(space, p); | 3730 SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
| 3663 pages_swept++; | 3731 pages_swept++; |
| 3664 break; | 3732 break; |
| 3665 } | 3733 } |
| 3666 case LAZY_CONSERVATIVE: { | 3734 case LAZY_CONSERVATIVE: { |
| 3667 if (FLAG_gc_verbose) { | 3735 if (FLAG_gc_verbose) { |
| 3668 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n", | 3736 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively as needed.\n", |
| 3669 reinterpret_cast<intptr_t>(p)); | 3737 reinterpret_cast<intptr_t>(p)); |
| 3670 } | 3738 } |
| 3671 freed_bytes += SweepConservatively(space, p); | 3739 freed_bytes += SweepConservatively<SWEEP_SEQUENTIALLY>(space, NULL, p); |
| 3672 pages_swept++; | 3740 pages_swept++; |
| 3673 space->SetPagesToSweep(p->next_page()); | 3741 space->SetPagesToSweep(p->next_page()); |
| 3674 lazy_sweeping_active = true; | 3742 lazy_sweeping_active = true; |
| 3675 break; | 3743 break; |
| 3676 } | 3744 } |
| 3745 case PARALLEL_CONSERVATIVE: { |
| 3746 if (FLAG_gc_verbose) { |
| 3747 PrintF("Sweeping 0x%" V8PRIxPTR " conservatively in parallel.\n", |
| 3748 reinterpret_cast<intptr_t>(p)); |
| 3749 } |
| 3750 p->set_parallel_sweeping(1); |
| 3751 break; |
| 3752 } |
| 3677 case PRECISE: { | 3753 case PRECISE: { |
| 3678 if (FLAG_gc_verbose) { | 3754 if (FLAG_gc_verbose) { |
| 3679 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", | 3755 PrintF("Sweeping 0x%" V8PRIxPTR " precisely.\n", |
| 3680 reinterpret_cast<intptr_t>(p)); | 3756 reinterpret_cast<intptr_t>(p)); |
| 3681 } | 3757 } |
| 3682 if (space->identity() == CODE_SPACE) { | 3758 if (space->identity() == CODE_SPACE) { |
| 3683 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL); | 3759 SweepPrecisely<SWEEP_ONLY, REBUILD_SKIP_LIST>(space, p, NULL); |
| 3684 } else { | 3760 } else { |
| 3685 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL); | 3761 SweepPrecisely<SWEEP_ONLY, IGNORE_SKIP_LIST>(space, p, NULL); |
| 3686 } | 3762 } |
| (...skipping 19 matching lines...) Expand all Loading... |
| 3706 | 3782 |
| 3707 void MarkCompactCollector::SweepSpaces() { | 3783 void MarkCompactCollector::SweepSpaces() { |
| 3708 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); | 3784 GCTracer::Scope gc_scope(tracer_, GCTracer::Scope::MC_SWEEP); |
| 3709 #ifdef DEBUG | 3785 #ifdef DEBUG |
| 3710 state_ = SWEEP_SPACES; | 3786 state_ = SWEEP_SPACES; |
| 3711 #endif | 3787 #endif |
| 3712 SweeperType how_to_sweep = | 3788 SweeperType how_to_sweep = |
| 3713 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; | 3789 FLAG_lazy_sweeping ? LAZY_CONSERVATIVE : CONSERVATIVE; |
| 3714 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE; | 3790 if (FLAG_expose_gc) how_to_sweep = CONSERVATIVE; |
| 3715 if (sweep_precisely_) how_to_sweep = PRECISE; | 3791 if (sweep_precisely_) how_to_sweep = PRECISE; |
| 3792 if (AreSweeperThreadsActivated()) how_to_sweep = PARALLEL_CONSERVATIVE; |
| 3716 // Noncompacting collections simply sweep the spaces to clear the mark | 3793 // Noncompacting collections simply sweep the spaces to clear the mark |
| 3717 // bits and free the nonlive blocks (for old and map spaces). We sweep | 3794 // bits and free the nonlive blocks (for old and map spaces). We sweep |
| 3718 // the map space last because freeing non-live maps overwrites them and | 3795 // the map space last because freeing non-live maps overwrites them and |
| 3719 // the other spaces rely on possibly non-live maps to get the sizes for | 3796 // the other spaces rely on possibly non-live maps to get the sizes for |
| 3720 // non-live objects. | 3797 // non-live objects. |
| 3798 |
| 3721 SweepSpace(heap()->old_pointer_space(), how_to_sweep); | 3799 SweepSpace(heap()->old_pointer_space(), how_to_sweep); |
| 3722 SweepSpace(heap()->old_data_space(), how_to_sweep); | 3800 SweepSpace(heap()->old_data_space(), how_to_sweep); |
| 3723 | 3801 |
| 3724 RemoveDeadInvalidatedCode(); | 3802 RemoveDeadInvalidatedCode(); |
| 3725 SweepSpace(heap()->code_space(), PRECISE); | 3803 SweepSpace(heap()->code_space(), PRECISE); |
| 3726 | 3804 |
| 3727 SweepSpace(heap()->cell_space(), PRECISE); | 3805 SweepSpace(heap()->cell_space(), PRECISE); |
| 3728 | 3806 |
| 3729 EvacuateNewSpaceAndCandidates(); | 3807 EvacuateNewSpaceAndCandidates(); |
| 3730 | 3808 |
| 3809 if (AreSweeperThreadsActivated()) { |
| 3810 // TODO(hpayer): The starting of the sweeper threads should be after |
| 3811 // SweepSpace old data space. |
| 3812 StartSweeperThreads(); |
| 3813 if (FLAG_parallel_sweeping && !FLAG_concurrent_sweeping) { |
| 3814 WaitUntilSweepingCompleted(); |
| 3815 } |
| 3816 } |
| 3817 |
| 3731 // ClearNonLiveTransitions depends on precise sweeping of map space to | 3818 // ClearNonLiveTransitions depends on precise sweeping of map space to |
| 3732 // detect whether unmarked map became dead in this collection or in one | 3819 // detect whether unmarked map became dead in this collection or in one |
| 3733 // of the previous ones. | 3820 // of the previous ones. |
| 3734 SweepSpace(heap()->map_space(), PRECISE); | 3821 SweepSpace(heap()->map_space(), PRECISE); |
| 3735 | 3822 |
| 3736 // Deallocate unmarked objects and clear marked bits for marked objects. | 3823 // Deallocate unmarked objects and clear marked bits for marked objects. |
| 3737 heap_->lo_space()->FreeUnmarkedObjects(); | 3824 heap_->lo_space()->FreeUnmarkedObjects(); |
| 3738 } | 3825 } |
| 3739 | 3826 |
| 3740 | 3827 |
| (...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3928 while (buffer != NULL) { | 4015 while (buffer != NULL) { |
| 3929 SlotsBuffer* next_buffer = buffer->next(); | 4016 SlotsBuffer* next_buffer = buffer->next(); |
| 3930 DeallocateBuffer(buffer); | 4017 DeallocateBuffer(buffer); |
| 3931 buffer = next_buffer; | 4018 buffer = next_buffer; |
| 3932 } | 4019 } |
| 3933 *buffer_address = NULL; | 4020 *buffer_address = NULL; |
| 3934 } | 4021 } |
| 3935 | 4022 |
| 3936 | 4023 |
| 3937 } } // namespace v8::internal | 4024 } } // namespace v8::internal |
| OLD | NEW |