OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 1657 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1668 heap_->UpdateAllocationSite<Heap::kCached>(object, | 1668 heap_->UpdateAllocationSite<Heap::kCached>(object, |
1669 local_pretenuring_feedback_); | 1669 local_pretenuring_feedback_); |
1670 int size = object->Size(); | 1670 int size = object->Size(); |
1671 HeapObject* target_object = nullptr; | 1671 HeapObject* target_object = nullptr; |
1672 if (heap_->ShouldBePromoted(object->address(), size) && | 1672 if (heap_->ShouldBePromoted(object->address(), size) && |
1673 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, | 1673 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, |
1674 &target_object)) { | 1674 &target_object)) { |
1675 // If we end up needing more special cases, we should factor this out. | 1675 // If we end up needing more special cases, we should factor this out. |
1676 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { | 1676 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { |
1677 heap_->array_buffer_tracker()->Promote( | 1677 heap_->array_buffer_tracker()->Promote( |
1678 JSArrayBuffer::cast(target_object)); | 1678 JSArrayBuffer::cast(target_object), |
1679 reinterpret_cast<JSArrayBuffer*>(object)); | |
1679 } | 1680 } |
1680 promoted_size_ += size; | 1681 promoted_size_ += size; |
1681 return true; | 1682 return true; |
1682 } | 1683 } |
1683 HeapObject* target = nullptr; | 1684 HeapObject* target = nullptr; |
1684 AllocationSpace space = AllocateTargetObject(object, &target); | 1685 AllocationSpace space = AllocateTargetObject(object, &target); |
1685 MigrateObject(HeapObject::cast(target), object, size, space); | 1686 MigrateObject(HeapObject::cast(target), object, size, space); |
1686 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | 1687 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
1687 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | 1688 heap_->array_buffer_tracker()->SemiSpaceCopy( |
1689 JSArrayBuffer::cast(target), | |
1690 reinterpret_cast<JSArrayBuffer*>(object)); | |
1688 } | 1691 } |
1689 semispace_copied_size_ += size; | 1692 semispace_copied_size_ += size; |
1690 return true; | 1693 return true; |
1691 } | 1694 } |
1692 | 1695 |
1693 intptr_t promoted_size() { return promoted_size_; } | 1696 intptr_t promoted_size() { return promoted_size_; } |
1694 intptr_t semispace_copied_size() { return semispace_copied_size_; } | 1697 intptr_t semispace_copied_size() { return semispace_copied_size_; } |
1695 | 1698 |
1696 private: | 1699 private: |
1697 enum NewSpaceAllocationMode { | 1700 enum NewSpaceAllocationMode { |
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1804 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { | 1807 static void TryMoveToOldSpace(Page* page, PagedSpace* owner) { |
1805 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { | 1808 if (page->heap()->new_space()->ReplaceWithEmptyPage(page)) { |
1806 Page* new_page = Page::ConvertNewToOld(page, owner); | 1809 Page* new_page = Page::ConvertNewToOld(page, owner); |
1807 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); | 1810 new_page->SetFlag(Page::PAGE_NEW_OLD_PROMOTION); |
1808 } | 1811 } |
1809 } | 1812 } |
1810 | 1813 |
1811 inline bool Visit(HeapObject* object) { | 1814 inline bool Visit(HeapObject* object) { |
1812 if (V8_UNLIKELY(object->IsJSArrayBuffer())) { | 1815 if (V8_UNLIKELY(object->IsJSArrayBuffer())) { |
1813 object->GetHeap()->array_buffer_tracker()->Promote( | 1816 object->GetHeap()->array_buffer_tracker()->Promote( |
1814 JSArrayBuffer::cast(object)); | 1817 JSArrayBuffer::cast(object), JSArrayBuffer::cast(object)); |
1815 } | 1818 } |
1816 RecordMigratedSlotVisitor visitor; | 1819 RecordMigratedSlotVisitor visitor; |
1817 object->IterateBodyFast(&visitor); | 1820 object->IterateBodyFast(&visitor); |
1818 promoted_size_ += object->Size(); | 1821 promoted_size_ += object->Size(); |
1819 return true; | 1822 return true; |
1820 } | 1823 } |
1821 | 1824 |
1822 intptr_t promoted_size() { return promoted_size_; } | 1825 intptr_t promoted_size() { return promoted_size_; } |
1823 | 1826 |
1824 private: | 1827 private: |
1825 intptr_t promoted_size_; | 1828 intptr_t promoted_size_; |
1826 }; | 1829 }; |
1827 | 1830 |
1828 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 1831 class MarkCompactCollector::EvacuateOldSpaceVisitor final |
1829 : public MarkCompactCollector::EvacuateVisitorBase { | 1832 : public MarkCompactCollector::EvacuateVisitorBase { |
1830 public: | 1833 public: |
1831 EvacuateOldSpaceVisitor(Heap* heap, | 1834 EvacuateOldSpaceVisitor(Heap* heap, |
1832 CompactionSpaceCollection* compaction_spaces) | 1835 CompactionSpaceCollection* compaction_spaces) |
1833 : EvacuateVisitorBase(heap, compaction_spaces) {} | 1836 : EvacuateVisitorBase(heap, compaction_spaces) {} |
1834 | 1837 |
1835 inline bool Visit(HeapObject* object) override { | 1838 inline bool Visit(HeapObject* object) override { |
1836 CompactionSpace* target_space = compaction_spaces_->Get( | 1839 CompactionSpace* target_space = compaction_spaces_->Get( |
1837 Page::FromAddress(object->address())->owner()->identity()); | 1840 Page::FromAddress(object->address())->owner()->identity()); |
1838 HeapObject* target_object = nullptr; | 1841 HeapObject* target_object = nullptr; |
1839 if (TryEvacuateObject(target_space, object, &target_object)) { | 1842 if (TryEvacuateObject(target_space, object, &target_object)) { |
1840 DCHECK(object->map_word().IsForwardingAddress()); | 1843 DCHECK(object->map_word().IsForwardingAddress()); |
1844 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { | |
1845 heap_->array_buffer_tracker()->Compact( | |
1846 JSArrayBuffer::cast(target_object), | |
1847 reinterpret_cast<JSArrayBuffer*>(object)); | |
1848 } | |
1841 return true; | 1849 return true; |
1842 } | 1850 } |
1851 if (V8_UNLIKELY(object->IsJSArrayBuffer())) { | |
1852 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object)); | |
Hannes Payer (out of office)
2016/05/11 11:12:34
Why do you need to mark it as live in the evacuati
Michael Lippautz
2016/05/11 18:43:26
You are right, done.
| |
1853 } | |
1843 return false; | 1854 return false; |
1844 } | 1855 } |
1845 }; | 1856 }; |
1846 | 1857 |
1847 class MarkCompactCollector::EvacuateRecordOnlyVisitor final | 1858 class MarkCompactCollector::EvacuateRecordOnlyVisitor final |
1848 : public MarkCompactCollector::HeapObjectVisitor { | 1859 : public MarkCompactCollector::HeapObjectVisitor { |
1849 public: | 1860 public: |
1850 explicit EvacuateRecordOnlyVisitor(AllocationSpace space) : space_(space) {} | 1861 explicit EvacuateRecordOnlyVisitor(Heap* heap, AllocationSpace space) |
1862 : heap_(heap), space_(space) {} | |
1851 | 1863 |
1852 inline bool Visit(HeapObject* object) { | 1864 inline bool Visit(HeapObject* object) { |
1853 if (space_ == OLD_SPACE) { | 1865 if (space_ == OLD_SPACE) { |
1854 RecordMigratedSlotVisitor visitor; | 1866 RecordMigratedSlotVisitor visitor; |
1855 object->IterateBody(&visitor); | 1867 object->IterateBody(&visitor); |
1868 if (V8_UNLIKELY(object->IsJSArrayBuffer())) { | |
1869 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(object)); | |
1870 } | |
1856 } else { | 1871 } else { |
1857 DCHECK_EQ(space_, CODE_SPACE); | 1872 DCHECK_EQ(space_, CODE_SPACE); |
1858 // Add a typed slot for the whole code object. | 1873 // Add a typed slot for the whole code object. |
1859 RememberedSet<OLD_TO_OLD>::InsertTyped( | 1874 RememberedSet<OLD_TO_OLD>::InsertTyped( |
1860 Page::FromAddress(object->address()), RELOCATED_CODE_OBJECT, | 1875 Page::FromAddress(object->address()), RELOCATED_CODE_OBJECT, |
1861 object->address()); | 1876 object->address()); |
1862 } | 1877 } |
1863 return true; | 1878 return true; |
1864 } | 1879 } |
1865 | 1880 |
1866 private: | 1881 private: |
1882 Heap* heap_; | |
1867 AllocationSpace space_; | 1883 AllocationSpace space_; |
1868 }; | 1884 }; |
1869 | 1885 |
1870 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { | 1886 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
1871 PageIterator it(space); | 1887 PageIterator it(space); |
1872 while (it.has_next()) { | 1888 while (it.has_next()) { |
1873 Page* p = it.next(); | 1889 Page* p = it.next(); |
1874 if (!p->IsFlagSet(Page::BLACK_PAGE)) { | 1890 if (!p->IsFlagSet(Page::BLACK_PAGE)) { |
1875 DiscoverGreyObjectsOnPage(p); | 1891 DiscoverGreyObjectsOnPage(p); |
1876 } | 1892 } |
(...skipping 1288 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3165 case kPageNewToOld: | 3181 case kPageNewToOld: |
3166 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor); | 3182 result = EvacuateSinglePage<kKeepMarking>(page, &new_space_page_visitor); |
3167 DCHECK(result); | 3183 DCHECK(result); |
3168 USE(result); | 3184 USE(result); |
3169 break; | 3185 break; |
3170 case kObjectsOldToOld: | 3186 case kObjectsOldToOld: |
3171 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_); | 3187 result = EvacuateSinglePage<kClearMarkbits>(page, &old_space_visitor_); |
3172 if (!result) { | 3188 if (!result) { |
3173 // Aborted compaction page. We can record slots here to have them | 3189 // Aborted compaction page. We can record slots here to have them |
3174 // processed in parallel later on. | 3190 // processed in parallel later on. |
3175 EvacuateRecordOnlyVisitor record_visitor(page->owner()->identity()); | 3191 EvacuateRecordOnlyVisitor record_visitor(page->heap(), |
3192 page->owner()->identity()); | |
3176 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor); | 3193 result = EvacuateSinglePage<kKeepMarking>(page, &record_visitor); |
3177 DCHECK(result); | 3194 DCHECK(result); |
3178 USE(result); | 3195 USE(result); |
3179 // We need to return failure here to indicate that we want this page | 3196 // We need to return failure here to indicate that we want this page |
3180 // added to the sweeper. | 3197 // added to the sweeper. |
3181 return false; | 3198 return false; |
3182 } | 3199 } |
3183 break; | 3200 break; |
3184 default: | 3201 default: |
3185 UNREACHABLE(); | 3202 UNREACHABLE(); |
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3408 Bitmap::Clear(p); | 3425 Bitmap::Clear(p); |
3409 | 3426 |
3410 if (free_start != p->area_end()) { | 3427 if (free_start != p->area_end()) { |
3411 int size = static_cast<int>(p->area_end() - free_start); | 3428 int size = static_cast<int>(p->area_end() - free_start); |
3412 if (free_space_mode == ZAP_FREE_SPACE) { | 3429 if (free_space_mode == ZAP_FREE_SPACE) { |
3413 memset(free_start, 0xcc, size); | 3430 memset(free_start, 0xcc, size); |
3414 } | 3431 } |
3415 freed_bytes = space->UnaccountedFree(free_start, size); | 3432 freed_bytes = space->UnaccountedFree(free_start, size); |
3416 max_freed_bytes = Max(freed_bytes, max_freed_bytes); | 3433 max_freed_bytes = Max(freed_bytes, max_freed_bytes); |
3417 } | 3434 } |
3435 p->FreeDeadArraybuffersAndResetTracker(); | |
3418 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3436 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3419 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); | 3437 return FreeList::GuaranteedAllocatable(static_cast<int>(max_freed_bytes)); |
3420 } | 3438 } |
3421 | 3439 |
3422 void MarkCompactCollector::InvalidateCode(Code* code) { | 3440 void MarkCompactCollector::InvalidateCode(Code* code) { |
3423 if (heap_->incremental_marking()->IsCompacting() && | 3441 if (heap_->incremental_marking()->IsCompacting() && |
3424 !ShouldSkipEvacuationSlotRecording(code)) { | 3442 !ShouldSkipEvacuationSlotRecording(code)) { |
3425 DCHECK(compacting_); | 3443 DCHECK(compacting_); |
3426 | 3444 |
3427 // If the object is white than no slots were recorded on it yet. | 3445 // If the object is white than no slots were recorded on it yet. |
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3550 // because root iteration traverses the stack and might have to find | 3568 // because root iteration traverses the stack and might have to find |
3551 // code objects from non-updated pc pointing into evacuation candidate. | 3569 // code objects from non-updated pc pointing into evacuation candidate. |
3552 SkipList* list = p->skip_list(); | 3570 SkipList* list = p->skip_list(); |
3553 if (list != NULL) list->Clear(); | 3571 if (list != NULL) list->Clear(); |
3554 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3572 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
3555 sweeper().AddLatePage(p->owner()->identity(), p); | 3573 sweeper().AddLatePage(p->owner()->identity(), p); |
3556 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); | 3574 p->ClearFlag(Page::COMPACTION_WAS_ABORTED); |
3557 } | 3575 } |
3558 } | 3576 } |
3559 | 3577 |
3560 // EvacuateNewSpaceAndCandidates iterates over new space objects and for | 3578 // Free up backing stores for arrays buffers that died in new space in this |
3561 // ArrayBuffers either re-registers them as live or promotes them. This is | 3579 // cycle. |
3562 // needed to properly free them. | 3580 heap()->array_buffer_tracker()->FreeDeadInNewSpace(); |
Hannes Payer (out of office)
2016/05/11 11:12:34
Aborted once will be swept after compaction and Fr
Michael Lippautz
2016/05/11 18:43:26
1) This call is about new space pages
2) "FreeDead
| |
3563 heap()->array_buffer_tracker()->FreeDead(false); | |
3564 | 3581 |
3565 // Deallocate evacuated candidate pages. | 3582 // Deallocate evacuated candidate pages. |
3566 ReleaseEvacuationCandidates(); | 3583 ReleaseEvacuationCandidates(); |
3567 } | 3584 } |
3568 | 3585 |
3569 #ifdef VERIFY_HEAP | 3586 #ifdef VERIFY_HEAP |
3570 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { | 3587 if (FLAG_verify_heap && !sweeper().sweeping_in_progress()) { |
3571 VerifyEvacuation(heap()); | 3588 VerifyEvacuation(heap()); |
3572 } | 3589 } |
3573 #endif | 3590 #endif |
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3823 PageIterator it(space); | 3840 PageIterator it(space); |
3824 | 3841 |
3825 int will_be_swept = 0; | 3842 int will_be_swept = 0; |
3826 bool unused_page_present = false; | 3843 bool unused_page_present = false; |
3827 | 3844 |
3828 while (it.has_next()) { | 3845 while (it.has_next()) { |
3829 Page* p = it.next(); | 3846 Page* p = it.next(); |
3830 DCHECK(p->SweepingDone()); | 3847 DCHECK(p->SweepingDone()); |
3831 | 3848 |
3832 if (p->IsEvacuationCandidate()) { | 3849 if (p->IsEvacuationCandidate()) { |
3850 p->FreeDeadArraybuffersAndResetTracker(); | |
3833 // Will be processed in EvacuateNewSpaceAndCandidates. | 3851 // Will be processed in EvacuateNewSpaceAndCandidates. |
3834 DCHECK(evacuation_candidates_.length() > 0); | 3852 DCHECK(evacuation_candidates_.length() > 0); |
3835 continue; | 3853 continue; |
3836 } | 3854 } |
3837 | 3855 |
3838 // We can not sweep black pages, since all mark bits are set for these | 3856 // We can not sweep black pages, since all mark bits are set for these |
3839 // pages. | 3857 // pages. |
3840 if (p->IsFlagSet(Page::BLACK_PAGE)) { | 3858 if (p->IsFlagSet(Page::BLACK_PAGE)) { |
3859 p->FreeDeadArraybuffersAndResetTracker(); | |
Hannes Payer (out of office)
2016/05/11 11:12:34
There will be no dead array buffers on a black pag
Michael Lippautz
2016/05/11 18:43:26
True, but we need the "*AndResetTracker" part of t
| |
3841 Bitmap::Clear(p); | 3860 Bitmap::Clear(p); |
3842 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); | 3861 p->concurrent_sweeping_state().SetValue(Page::kSweepingDone); |
3843 p->ClearFlag(Page::BLACK_PAGE); | 3862 p->ClearFlag(Page::BLACK_PAGE); |
3844 // TODO(hpayer): Free unused memory of last black page. | 3863 // TODO(hpayer): Free unused memory of last black page. |
3845 continue; | 3864 continue; |
3846 } | 3865 } |
3847 | 3866 |
3848 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { | 3867 if (p->IsFlagSet(Page::NEVER_ALLOCATE_ON_PAGE)) { |
3849 // We need to sweep the page to get it into an iterable state again. Note | 3868 // We need to sweep the page to get it into an iterable state again. Note |
3850 // that this adds unusable memory into the free list that is later on | 3869 // that this adds unusable memory into the free list that is later on |
(...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3947 MarkBit mark_bit = Marking::MarkBitFrom(host); | 3966 MarkBit mark_bit = Marking::MarkBitFrom(host); |
3948 if (Marking::IsBlack(mark_bit)) { | 3967 if (Marking::IsBlack(mark_bit)) { |
3949 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 3968 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
3950 RecordRelocSlot(host, &rinfo, target); | 3969 RecordRelocSlot(host, &rinfo, target); |
3951 } | 3970 } |
3952 } | 3971 } |
3953 } | 3972 } |
3954 | 3973 |
3955 } // namespace internal | 3974 } // namespace internal |
3956 } // namespace v8 | 3975 } // namespace v8 |
OLD | NEW |