OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
313 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { | 313 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { |
314 { | 314 { |
315 GCTracer::Scope gc_scope(heap()->tracer(), | 315 GCTracer::Scope gc_scope(heap()->tracer(), |
316 GCTracer::Scope::MC_CLEAR_STORE_BUFFER); | 316 GCTracer::Scope::MC_CLEAR_STORE_BUFFER); |
317 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); | 317 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); |
318 } | 318 } |
319 | 319 |
320 { | 320 { |
321 GCTracer::Scope gc_scope(heap()->tracer(), | 321 GCTracer::Scope gc_scope(heap()->tracer(), |
322 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); | 322 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); |
323 int number_of_pages = evacuation_candidates_.length(); | 323 for (Page* p : evacuation_candidates_) { |
324 for (int i = 0; i < number_of_pages; i++) { | |
325 Page* p = evacuation_candidates_[i]; | |
326 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); | 324 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); |
327 } | 325 } |
328 } | 326 } |
329 #ifdef VERIFY_HEAP | 327 #ifdef VERIFY_HEAP |
330 if (FLAG_verify_heap) { | 328 if (FLAG_verify_heap) { |
331 VerifyValidStoreAndSlotsBufferEntries(); | 329 VerifyValidStoreAndSlotsBufferEntries(); |
332 } | 330 } |
333 #endif | 331 #endif |
334 } | 332 } |
335 | 333 |
(...skipping 488 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
824 "compaction-selection: space=%s reduce_memory=%d pages=%d " | 822 "compaction-selection: space=%s reduce_memory=%d pages=%d " |
825 "total_live_bytes=%d\n", | 823 "total_live_bytes=%d\n", |
826 AllocationSpaceName(space->identity()), reduce_memory, | 824 AllocationSpaceName(space->identity()), reduce_memory, |
827 candidate_count, total_live_bytes / KB); | 825 candidate_count, total_live_bytes / KB); |
828 } | 826 } |
829 } | 827 } |
830 | 828 |
831 | 829 |
832 void MarkCompactCollector::AbortCompaction() { | 830 void MarkCompactCollector::AbortCompaction() { |
833 if (compacting_) { | 831 if (compacting_) { |
834 int npages = evacuation_candidates_.length(); | 832 for (Page* p : evacuation_candidates_) { |
835 for (int i = 0; i < npages; i++) { | |
836 Page* p = evacuation_candidates_[i]; | |
837 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | 833 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
838 p->ClearEvacuationCandidate(); | 834 p->ClearEvacuationCandidate(); |
839 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 835 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
840 } | 836 } |
841 compacting_ = false; | 837 compacting_ = false; |
842 evacuation_candidates_.Rewind(0); | 838 evacuation_candidates_.Rewind(0); |
843 } | 839 } |
844 DCHECK_EQ(0, evacuation_candidates_.length()); | 840 DCHECK_EQ(0, evacuation_candidates_.length()); |
845 } | 841 } |
846 | 842 |
(...skipping 694 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1541 class MarkCompactCollector::HeapObjectVisitor { | 1537 class MarkCompactCollector::HeapObjectVisitor { |
1542 public: | 1538 public: |
1543 virtual ~HeapObjectVisitor() {} | 1539 virtual ~HeapObjectVisitor() {} |
1544 virtual bool Visit(HeapObject* object) = 0; | 1540 virtual bool Visit(HeapObject* object) = 0; |
1545 }; | 1541 }; |
1546 | 1542 |
1547 | 1543 |
1548 class MarkCompactCollector::EvacuateVisitorBase | 1544 class MarkCompactCollector::EvacuateVisitorBase |
1549 : public MarkCompactCollector::HeapObjectVisitor { | 1545 : public MarkCompactCollector::HeapObjectVisitor { |
1550 public: | 1546 public: |
1551 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer) | 1547 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer, |
1552 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {} | 1548 CompactionSpaceCollection* compaction_spaces) |
1549 : heap_(heap), | |
1550 evacuation_slots_buffer_(evacuation_slots_buffer), | |
1551 compaction_spaces_(compaction_spaces) {} | |
1553 | 1552 |
1554 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, | 1553 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, |
1555 HeapObject** target_object) { | 1554 HeapObject** target_object) { |
1556 int size = object->Size(); | 1555 int size = object->Size(); |
1557 AllocationAlignment alignment = object->RequiredAlignment(); | 1556 AllocationAlignment alignment = object->RequiredAlignment(); |
1558 AllocationResult allocation = target_space->AllocateRaw(size, alignment); | 1557 AllocationResult allocation = target_space->AllocateRaw(size, alignment); |
1559 if (allocation.To(target_object)) { | 1558 if (allocation.To(target_object)) { |
1560 heap_->mark_compact_collector()->MigrateObject( | 1559 heap_->mark_compact_collector()->MigrateObject( |
1561 *target_object, object, size, target_space->identity(), | 1560 *target_object, object, size, target_space->identity(), |
1562 evacuation_slots_buffer_); | 1561 evacuation_slots_buffer_, compaction_spaces_->local_store_buffer()); |
1563 return true; | 1562 return true; |
1564 } | 1563 } |
1565 return false; | 1564 return false; |
1566 } | 1565 } |
1567 | 1566 |
1568 protected: | 1567 protected: |
1569 Heap* heap_; | 1568 Heap* heap_; |
1570 SlotsBuffer** evacuation_slots_buffer_; | 1569 SlotsBuffer** evacuation_slots_buffer_; |
1570 CompactionSpaceCollection* compaction_spaces_; | |
1571 }; | 1571 }; |
1572 | 1572 |
1573 | 1573 |
1574 class MarkCompactCollector::EvacuateNewSpaceVisitor final | 1574 class MarkCompactCollector::EvacuateNewSpaceVisitor final |
1575 : public MarkCompactCollector::EvacuateVisitorBase { | 1575 : public MarkCompactCollector::EvacuateVisitorBase { |
1576 public: | 1576 public: |
1577 static const intptr_t kLabSize = 4 * KB; | 1577 static const intptr_t kLabSize = 4 * KB; |
1578 static const intptr_t kMaxLabObjectSize = 256; | 1578 static const intptr_t kMaxLabObjectSize = 256; |
1579 | 1579 |
1580 explicit EvacuateNewSpaceVisitor(Heap* heap, | 1580 explicit EvacuateNewSpaceVisitor(Heap* heap, |
1581 SlotsBuffer** evacuation_slots_buffer, | 1581 SlotsBuffer** evacuation_slots_buffer, |
1582 HashMap* local_pretenuring_feedback) | 1582 CompactionSpaceCollection* compaction_spaces) |
1583 : EvacuateVisitorBase(heap, evacuation_slots_buffer), | 1583 : EvacuateVisitorBase(heap, evacuation_slots_buffer, compaction_spaces), |
1584 buffer_(LocalAllocationBuffer::InvalidBuffer()), | 1584 buffer_(LocalAllocationBuffer::InvalidBuffer()), |
1585 space_to_allocate_(NEW_SPACE), | 1585 space_to_allocate_(NEW_SPACE), |
1586 promoted_size_(0), | 1586 promoted_size_(0), |
1587 semispace_copied_size_(0), | 1587 semispace_copied_size_(0), |
1588 local_pretenuring_feedback_(local_pretenuring_feedback) {} | 1588 local_pretenuring_feedback_( |
1589 compaction_spaces->local_pretenuring_feedback()) {} | |
1589 | 1590 |
1590 bool Visit(HeapObject* object) override { | 1591 bool Visit(HeapObject* object) override { |
1591 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_); | 1592 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_); |
1592 int size = object->Size(); | 1593 int size = object->Size(); |
1593 HeapObject* target_object = nullptr; | 1594 HeapObject* target_object = nullptr; |
1594 if (heap_->ShouldBePromoted(object->address(), size) && | 1595 if (heap_->ShouldBePromoted(object->address(), size) && |
1595 TryEvacuateObject(heap_->old_space(), object, &target_object)) { | 1596 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, |
1597 &target_object)) { | |
1596 // If we end up needing more special cases, we should factor this out. | 1598 // If we end up needing more special cases, we should factor this out. |
1597 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { | 1599 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { |
1598 heap_->array_buffer_tracker()->Promote( | 1600 heap_->array_buffer_tracker()->Promote( |
1599 JSArrayBuffer::cast(target_object)); | 1601 JSArrayBuffer::cast(target_object)); |
1600 } | 1602 } |
1601 promoted_size_ += size; | 1603 promoted_size_ += size; |
1602 return true; | 1604 return true; |
1603 } | 1605 } |
1604 HeapObject* target = nullptr; | 1606 HeapObject* target = nullptr; |
1605 AllocationSpace space = AllocateTargetObject(object, &target); | 1607 AllocationSpace space = AllocateTargetObject(object, &target); |
1606 heap_->mark_compact_collector()->MigrateObject( | 1608 heap_->mark_compact_collector()->MigrateObject( |
1607 HeapObject::cast(target), object, size, space, | 1609 HeapObject::cast(target), object, size, space, |
1608 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_); | 1610 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_, |
1611 compaction_spaces_->local_store_buffer()); | |
1609 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | 1612 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { |
1610 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | 1613 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); |
1611 } | 1614 } |
1612 semispace_copied_size_ += size; | 1615 semispace_copied_size_ += size; |
1613 return true; | 1616 return true; |
1614 } | 1617 } |
1615 | 1618 |
1616 intptr_t promoted_size() { return promoted_size_; } | 1619 intptr_t promoted_size() { return promoted_size_; } |
1617 intptr_t semispace_copied_size() { return semispace_copied_size_; } | 1620 intptr_t semispace_copied_size() { return semispace_copied_size_; } |
1618 | 1621 |
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1670 if (allocation.IsRetry()) { | 1673 if (allocation.IsRetry()) { |
1671 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; | 1674 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; |
1672 } | 1675 } |
1673 } | 1676 } |
1674 } | 1677 } |
1675 return allocation; | 1678 return allocation; |
1676 } | 1679 } |
1677 | 1680 |
1678 inline AllocationResult AllocateInOldSpace(int size_in_bytes, | 1681 inline AllocationResult AllocateInOldSpace(int size_in_bytes, |
1679 AllocationAlignment alignment) { | 1682 AllocationAlignment alignment) { |
1680 AllocationResult allocation = | 1683 AllocationResult allocation = compaction_spaces_->Get(OLD_SPACE) |
1681 heap_->old_space()->AllocateRaw(size_in_bytes, alignment); | 1684 ->AllocateRaw(size_in_bytes, alignment); |
1682 if (allocation.IsRetry()) { | 1685 if (allocation.IsRetry()) { |
1683 FatalProcessOutOfMemory( | 1686 FatalProcessOutOfMemory( |
1684 "MarkCompactCollector: semi-space copy, fallback in old gen\n"); | 1687 "MarkCompactCollector: semi-space copy, fallback in old gen\n"); |
1685 } | 1688 } |
1686 return allocation; | 1689 return allocation; |
1687 } | 1690 } |
1688 | 1691 |
1689 inline AllocationResult AllocateInLab(int size_in_bytes, | 1692 inline AllocationResult AllocateInLab(int size_in_bytes, |
1690 AllocationAlignment alignment) { | 1693 AllocationAlignment alignment) { |
1691 AllocationResult allocation; | 1694 AllocationResult allocation; |
(...skipping 26 matching lines...) Expand all Loading... | |
1718 HashMap* local_pretenuring_feedback_; | 1721 HashMap* local_pretenuring_feedback_; |
1719 }; | 1722 }; |
1720 | 1723 |
1721 | 1724 |
1722 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 1725 class MarkCompactCollector::EvacuateOldSpaceVisitor final |
1723 : public MarkCompactCollector::EvacuateVisitorBase { | 1726 : public MarkCompactCollector::EvacuateVisitorBase { |
1724 public: | 1727 public: |
1725 EvacuateOldSpaceVisitor(Heap* heap, | 1728 EvacuateOldSpaceVisitor(Heap* heap, |
1726 CompactionSpaceCollection* compaction_spaces, | 1729 CompactionSpaceCollection* compaction_spaces, |
1727 SlotsBuffer** evacuation_slots_buffer) | 1730 SlotsBuffer** evacuation_slots_buffer) |
1728 : EvacuateVisitorBase(heap, evacuation_slots_buffer), | 1731 : EvacuateVisitorBase(heap, evacuation_slots_buffer, compaction_spaces) {} |
1729 compaction_spaces_(compaction_spaces) {} | |
1730 | 1732 |
1731 bool Visit(HeapObject* object) override { | 1733 bool Visit(HeapObject* object) override { |
1732 CompactionSpace* target_space = compaction_spaces_->Get( | 1734 CompactionSpace* target_space = compaction_spaces_->Get( |
1733 Page::FromAddress(object->address())->owner()->identity()); | 1735 Page::FromAddress(object->address())->owner()->identity()); |
1734 HeapObject* target_object = nullptr; | 1736 HeapObject* target_object = nullptr; |
1735 if (TryEvacuateObject(target_space, object, &target_object)) { | 1737 if (TryEvacuateObject(target_space, object, &target_object)) { |
1736 DCHECK(object->map_word().IsForwardingAddress()); | 1738 DCHECK(object->map_word().IsForwardingAddress()); |
1737 return true; | 1739 return true; |
1738 } | 1740 } |
1739 return false; | 1741 return false; |
1740 } | 1742 } |
1741 | 1743 |
1742 private: | 1744 private: |
1743 CompactionSpaceCollection* compaction_spaces_; | |
1744 }; | 1745 }; |
1745 | 1746 |
1746 | 1747 |
1747 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { | 1748 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { |
1748 PageIterator it(space); | 1749 PageIterator it(space); |
1749 while (it.has_next()) { | 1750 while (it.has_next()) { |
1750 Page* p = it.next(); | 1751 Page* p = it.next(); |
1751 DiscoverGreyObjectsOnPage(p); | 1752 DiscoverGreyObjectsOnPage(p); |
1752 if (marking_deque()->IsFull()) return; | 1753 if (marking_deque()->IsFull()) return; |
1753 } | 1754 } |
(...skipping 789 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2543 while (obj != Smi::FromInt(0)) { | 2544 while (obj != Smi::FromInt(0)) { |
2544 TransitionArray* array = TransitionArray::cast(obj); | 2545 TransitionArray* array = TransitionArray::cast(obj); |
2545 obj = array->next_link(); | 2546 obj = array->next_link(); |
2546 array->set_next_link(undefined, SKIP_WRITE_BARRIER); | 2547 array->set_next_link(undefined, SKIP_WRITE_BARRIER); |
2547 } | 2548 } |
2548 heap()->set_encountered_transition_arrays(Smi::FromInt(0)); | 2549 heap()->set_encountered_transition_arrays(Smi::FromInt(0)); |
2549 } | 2550 } |
2550 | 2551 |
2551 | 2552 |
2552 void MarkCompactCollector::RecordMigratedSlot( | 2553 void MarkCompactCollector::RecordMigratedSlot( |
2553 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) { | 2554 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer, |
2555 LocalStoreBuffer* local_store_buffer) { | |
2554 // When parallel compaction is in progress, store and slots buffer entries | 2556 // When parallel compaction is in progress, store and slots buffer entries |
2555 // require synchronization. | 2557 // require synchronization. |
2556 if (heap_->InNewSpace(value)) { | 2558 if (heap_->InNewSpace(value)) { |
2557 if (compaction_in_progress_) { | 2559 if (compaction_in_progress_) { |
2558 heap_->store_buffer()->MarkSynchronized(slot); | 2560 local_store_buffer->Record(slot); |
2559 } else { | 2561 } else { |
2560 heap_->store_buffer()->Mark(slot); | 2562 heap_->store_buffer()->Mark(slot); |
2561 } | 2563 } |
2562 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { | 2564 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { |
2563 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, | 2565 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, |
2564 reinterpret_cast<Object**>(slot), | 2566 reinterpret_cast<Object**>(slot), |
2565 SlotsBuffer::IGNORE_OVERFLOW); | 2567 SlotsBuffer::IGNORE_OVERFLOW); |
2566 } | 2568 } |
2567 } | 2569 } |
2568 | 2570 |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2630 if (!success) { | 2632 if (!success) { |
2631 EvictPopularEvacuationCandidate(target_page); | 2633 EvictPopularEvacuationCandidate(target_page); |
2632 } | 2634 } |
2633 } | 2635 } |
2634 } | 2636 } |
2635 | 2637 |
2636 | 2638 |
2637 class RecordMigratedSlotVisitor final : public ObjectVisitor { | 2639 class RecordMigratedSlotVisitor final : public ObjectVisitor { |
2638 public: | 2640 public: |
2639 RecordMigratedSlotVisitor(MarkCompactCollector* collector, | 2641 RecordMigratedSlotVisitor(MarkCompactCollector* collector, |
2640 SlotsBuffer** evacuation_slots_buffer) | 2642 SlotsBuffer** evacuation_slots_buffer, |
2643 LocalStoreBuffer* local_store_buffer) | |
2641 : collector_(collector), | 2644 : collector_(collector), |
2642 evacuation_slots_buffer_(evacuation_slots_buffer) {} | 2645 evacuation_slots_buffer_(evacuation_slots_buffer), |
2646 local_store_buffer_(local_store_buffer) {} | |
2643 | 2647 |
2644 V8_INLINE void VisitPointer(Object** p) override { | 2648 V8_INLINE void VisitPointer(Object** p) override { |
2645 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), | 2649 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), |
2646 evacuation_slots_buffer_); | 2650 evacuation_slots_buffer_, |
2651 local_store_buffer_); | |
2647 } | 2652 } |
2648 | 2653 |
2649 V8_INLINE void VisitPointers(Object** start, Object** end) override { | 2654 V8_INLINE void VisitPointers(Object** start, Object** end) override { |
2650 while (start < end) { | 2655 while (start < end) { |
2651 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), | 2656 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), |
2652 evacuation_slots_buffer_); | 2657 evacuation_slots_buffer_, |
2658 local_store_buffer_); | |
2653 ++start; | 2659 ++start; |
2654 } | 2660 } |
2655 } | 2661 } |
2656 | 2662 |
2657 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { | 2663 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { |
2658 if (collector_->compacting_) { | 2664 if (collector_->compacting_) { |
2659 Address code_entry = Memory::Address_at(code_entry_slot); | 2665 Address code_entry = Memory::Address_at(code_entry_slot); |
2660 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, | 2666 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, |
2661 evacuation_slots_buffer_); | 2667 evacuation_slots_buffer_); |
2662 } | 2668 } |
2663 } | 2669 } |
2664 | 2670 |
2665 private: | 2671 private: |
2666 MarkCompactCollector* collector_; | 2672 MarkCompactCollector* collector_; |
2667 SlotsBuffer** evacuation_slots_buffer_; | 2673 SlotsBuffer** evacuation_slots_buffer_; |
2674 LocalStoreBuffer* local_store_buffer_; | |
2668 }; | 2675 }; |
2669 | 2676 |
2670 | 2677 |
2671 // We scavenge new space simultaneously with sweeping. This is done in two | 2678 // We scavenge new space simultaneously with sweeping. This is done in two |
2672 // passes. | 2679 // passes. |
2673 // | 2680 // |
2674 // The first pass migrates all alive objects from one semispace to another or | 2681 // The first pass migrates all alive objects from one semispace to another or |
2675 // promotes them to old space. Forwarding address is written directly into | 2682 // promotes them to old space. Forwarding address is written directly into |
2676 // first word of object without any encoding. If object is dead we write | 2683 // first word of object without any encoding. If object is dead we write |
2677 // NULL as a forwarding address. | 2684 // NULL as a forwarding address. |
2678 // | 2685 // |
2679 // The second pass updates pointers to new space in all spaces. It is possible | 2686 // The second pass updates pointers to new space in all spaces. It is possible |
2680 // to encounter pointers to dead new space objects during traversal of pointers | 2687 // to encounter pointers to dead new space objects during traversal of pointers |
2681 // to new space. We should clear them to avoid encountering them during next | 2688 // to new space. We should clear them to avoid encountering them during next |
2682 // pointer iteration. This is an issue if the store buffer overflows and we | 2689 // pointer iteration. This is an issue if the store buffer overflows and we |
2683 // have to scan the entire old space, including dead objects, looking for | 2690 // have to scan the entire old space, including dead objects, looking for |
2684 // pointers to new space. | 2691 // pointers to new space. |
2685 void MarkCompactCollector::MigrateObject( | 2692 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, |
2686 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest, | 2693 int size, AllocationSpace dest, |
2687 SlotsBuffer** evacuation_slots_buffer) { | 2694 SlotsBuffer** evacuation_slots_buffer, |
2695 LocalStoreBuffer* local_store_buffer) { | |
2688 Address dst_addr = dst->address(); | 2696 Address dst_addr = dst->address(); |
2689 Address src_addr = src->address(); | 2697 Address src_addr = src->address(); |
2690 DCHECK(heap()->AllowedToBeMigrated(src, dest)); | 2698 DCHECK(heap()->AllowedToBeMigrated(src, dest)); |
2691 DCHECK(dest != LO_SPACE); | 2699 DCHECK(dest != LO_SPACE); |
2692 if (dest == OLD_SPACE) { | 2700 if (dest == OLD_SPACE) { |
2693 DCHECK_OBJECT_SIZE(size); | 2701 DCHECK_OBJECT_SIZE(size); |
2694 DCHECK(evacuation_slots_buffer != nullptr); | 2702 DCHECK(evacuation_slots_buffer != nullptr); |
2695 DCHECK(IsAligned(size, kPointerSize)); | 2703 DCHECK(IsAligned(size, kPointerSize)); |
2696 | 2704 |
2697 heap()->MoveBlock(dst->address(), src->address(), size); | 2705 heap()->MoveBlock(dst->address(), src->address(), size); |
2698 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer); | 2706 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer, |
2707 local_store_buffer); | |
2699 dst->IterateBody(&visitor); | 2708 dst->IterateBody(&visitor); |
2700 } else if (dest == CODE_SPACE) { | 2709 } else if (dest == CODE_SPACE) { |
2701 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); | 2710 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); |
2702 DCHECK(evacuation_slots_buffer != nullptr); | 2711 DCHECK(evacuation_slots_buffer != nullptr); |
2703 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); | 2712 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); |
2704 heap()->MoveBlock(dst_addr, src_addr, size); | 2713 heap()->MoveBlock(dst_addr, src_addr, size); |
2705 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); | 2714 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); |
2706 Code::cast(dst)->Relocate(dst_addr - src_addr); | 2715 Code::cast(dst)->Relocate(dst_addr - src_addr); |
2707 } else { | 2716 } else { |
2708 DCHECK_OBJECT_SIZE(size); | 2717 DCHECK_OBJECT_SIZE(size); |
(...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3050 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3059 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
3051 | 3060 |
3052 // The target object is black but we don't know if the source slot is black. | 3061 // The target object is black but we don't know if the source slot is black. |
3053 // The source object could have died and the slot could be part of a free | 3062 // The source object could have died and the slot could be part of a free |
3054 // space. Use the mark bit iterator to find out about liveness of the slot. | 3063 // space. Use the mark bit iterator to find out about liveness of the slot. |
3055 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); | 3064 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); |
3056 } | 3065 } |
3057 | 3066 |
3058 | 3067 |
3059 void MarkCompactCollector::EvacuateNewSpacePrologue() { | 3068 void MarkCompactCollector::EvacuateNewSpacePrologue() { |
3060 // There are soft limits in the allocation code, designed trigger a mark | |
3061 // sweep collection by failing allocations. But since we are already in | |
3062 // a mark-sweep allocation, there is no sense in trying to trigger one. | |
3063 AlwaysAllocateScope scope(isolate()); | |
3064 | |
3065 NewSpace* new_space = heap()->new_space(); | 3069 NewSpace* new_space = heap()->new_space(); |
3066 | 3070 NewSpacePageIterator it(new_space->bottom(), new_space->top()); |
3067 // Store allocation range before flipping semispaces. | 3071 // Append the list of new space pages to be processed. |
3068 Address from_bottom = new_space->bottom(); | |
3069 Address from_top = new_space->top(); | |
3070 | |
3071 // Flip the semispaces. After flipping, to space is empty, from space has | |
3072 // live objects. | |
3073 new_space->Flip(); | |
3074 new_space->ResetAllocationInfo(); | |
3075 | |
3076 newspace_evacuation_candidates_.Clear(); | |
3077 NewSpacePageIterator it(from_bottom, from_top); | |
3078 while (it.has_next()) { | 3072 while (it.has_next()) { |
3079 newspace_evacuation_candidates_.Add(it.next()); | 3073 newspace_evacuation_candidates_.Add(it.next()); |
3080 } | 3074 } |
3075 new_space->Flip(); | |
3076 new_space->ResetAllocationInfo(); | |
3081 } | 3077 } |
3082 | 3078 |
3083 | 3079 |
3084 HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() { | 3080 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { |
3085 HashMap* local_pretenuring_feedback = new HashMap( | 3081 newspace_evacuation_candidates_.Rewind(0); |
3086 HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity); | |
3087 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_, | |
3088 local_pretenuring_feedback); | |
3089 // First pass: traverse all objects in inactive semispace, remove marks, | |
3090 // migrate live objects and write forwarding addresses. This stage puts | |
3091 // new entries in the store buffer and may cause some pages to be marked | |
3092 // scan-on-scavenge. | |
3093 for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) { | |
3094 NewSpacePage* p = | |
3095 reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]); | |
3096 bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits); | |
3097 USE(ok); | |
3098 DCHECK(ok); | |
3099 } | |
3100 heap_->IncrementPromotedObjectsSize(new_space_visitor.promoted_size()); | |
3101 heap_->IncrementSemiSpaceCopiedObjectSize( | |
3102 new_space_visitor.semispace_copied_size()); | |
3103 heap_->IncrementYoungSurvivorsCounter( | |
3104 new_space_visitor.promoted_size() + | |
3105 new_space_visitor.semispace_copied_size()); | |
3106 return local_pretenuring_feedback; | |
3107 } | 3082 } |
3108 | 3083 |
3109 | 3084 |
3110 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( | 3085 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( |
3111 SlotsBuffer* evacuation_slots_buffer) { | 3086 SlotsBuffer* evacuation_slots_buffer) { |
3112 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); | 3087 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); |
3113 evacuation_slots_buffers_.Add(evacuation_slots_buffer); | 3088 evacuation_slots_buffers_.Add(evacuation_slots_buffer); |
3114 } | 3089 } |
3115 | 3090 |
3116 | 3091 |
3117 int MarkCompactCollector::NumberOfParallelCompactionTasks() { | 3092 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, |
3093 intptr_t live_bytes) { | |
3118 if (!FLAG_parallel_compaction) return 1; | 3094 if (!FLAG_parallel_compaction) return 1; |
3119 // Compute the number of needed tasks based on a target compaction time, the | 3095 // Compute the number of needed tasks based on a target compaction time, the |
3120 // profiled compaction speed and marked live memory. | 3096 // profiled compaction speed and marked live memory. |
3121 // | 3097 // |
3122 // The number of parallel compaction tasks is limited by: | 3098 // The number of parallel compaction tasks is limited by: |
3123 // - #evacuation pages | 3099 // - #evacuation pages |
3124 // - (#cores - 1) | 3100 // - (#cores - 1) |
3125 // - a hard limit | |
3126 const double kTargetCompactionTimeInMs = 1; | 3101 const double kTargetCompactionTimeInMs = 1; |
3127 const int kMaxCompactionTasks = 8; | 3102 const int kNumSweepingTasks = 3; |
3128 | 3103 |
3129 intptr_t compaction_speed = | 3104 intptr_t compaction_speed = |
3130 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3105 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3131 if (compaction_speed == 0) return 1; | |
3132 | 3106 |
3133 intptr_t live_bytes = 0; | 3107 const int cores = |
3134 for (Page* page : evacuation_candidates_) { | 3108 Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1); |
3135 live_bytes += page->LiveBytes(); | 3109 int tasks; |
3110 if (compaction_speed > 0) { | |
3111 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) / | |
3112 compaction_speed / kTargetCompactionTimeInMs); | |
3113 } else { | |
3114 tasks = pages; | |
3136 } | 3115 } |
3137 | 3116 const int tasks_capped_pages = Min(pages, tasks); |
3138 const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1); | |
3139 const int tasks = | |
3140 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed / | |
3141 kTargetCompactionTimeInMs); | |
3142 const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks); | |
3143 const int tasks_capped_cores = Min(cores, tasks_capped_pages); | 3117 const int tasks_capped_cores = Min(cores, tasks_capped_pages); |
3144 const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores); | 3118 return tasks_capped_cores; |
3145 return tasks_capped_hard; | |
3146 } | 3119 } |
3147 | 3120 |
3148 | 3121 |
3149 void MarkCompactCollector::EvacuatePagesInParallel() { | 3122 void MarkCompactCollector::EvacuatePagesInParallel() { |
3150 const int num_pages = evacuation_candidates_.length(); | 3123 int num_pages = 0; |
3151 if (num_pages == 0) return; | 3124 intptr_t live_bytes = 0; |
3125 for (Page* page : evacuation_candidates_) { | |
3126 num_pages++; | |
3127 live_bytes += page->LiveBytes(); | |
3128 } | |
3129 for (NewSpacePage* page : newspace_evacuation_candidates_) { | |
3130 num_pages++; | |
3131 live_bytes += page->LiveBytes(); | |
3132 } | |
3133 DCHECK_GE(num_pages, 1); | |
3134 | |
3152 | 3135 |
3153 // Used for trace summary. | 3136 // Used for trace summary. |
3154 intptr_t live_bytes = 0; | |
3155 intptr_t compaction_speed = 0; | 3137 intptr_t compaction_speed = 0; |
3156 if (FLAG_trace_fragmentation) { | 3138 if (FLAG_trace_fragmentation) { |
3157 for (Page* page : evacuation_candidates_) { | |
3158 live_bytes += page->LiveBytes(); | |
3159 } | |
3160 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3139 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); |
3161 } | 3140 } |
3162 const int num_tasks = NumberOfParallelCompactionTasks(); | 3141 |
3142 const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes); | |
3163 | 3143 |
3164 // Set up compaction spaces. | 3144 // Set up compaction spaces. |
3165 CompactionSpaceCollection** compaction_spaces_for_tasks = | 3145 CompactionSpaceCollection** compaction_spaces_for_tasks = |
3166 new CompactionSpaceCollection*[num_tasks]; | 3146 new CompactionSpaceCollection*[num_tasks]; |
3167 for (int i = 0; i < num_tasks; i++) { | 3147 for (int i = 0; i < num_tasks; i++) { |
3168 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); | 3148 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); |
3169 } | 3149 } |
3170 | 3150 |
3171 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, | 3151 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, |
3172 num_tasks); | 3152 num_tasks); |
3173 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, | 3153 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, |
3174 num_tasks); | 3154 num_tasks); |
3175 | 3155 |
3176 uint32_t* task_ids = new uint32_t[num_tasks - 1]; | 3156 uint32_t* task_ids = new uint32_t[num_tasks - 1]; |
3177 // Kick off parallel tasks. | 3157 // Kick off parallel tasks. |
3178 StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks); | 3158 StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks); |
3179 // Wait for unfinished and not-yet-started tasks. | 3159 // Wait for unfinished and not-yet-started tasks. |
3180 WaitUntilCompactionCompleted(task_ids, num_tasks - 1); | 3160 WaitUntilCompactionCompleted(task_ids, num_tasks - 1); |
3181 delete[] task_ids; | 3161 delete[] task_ids; |
3182 | 3162 |
3183 double compaction_duration = 0.0; | 3163 double compaction_duration = 0.0; |
3184 intptr_t compacted_memory = 0; | 3164 intptr_t compacted_memory = 0; |
3185 // Merge back memory (compacted and unused) from compaction spaces. | 3165 // Merge back memory (compacted and unused) from compaction spaces and update |
3166 // pretenuring feedback. | |
3186 for (int i = 0; i < num_tasks; i++) { | 3167 for (int i = 0; i < num_tasks; i++) { |
3187 heap()->old_space()->MergeCompactionSpace( | 3168 heap()->old_space()->MergeCompactionSpace( |
3188 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); | 3169 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); |
3189 heap()->code_space()->MergeCompactionSpace( | 3170 heap()->code_space()->MergeCompactionSpace( |
3190 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); | 3171 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); |
3191 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted(); | 3172 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted(); |
3192 compaction_duration += compaction_spaces_for_tasks[i]->duration(); | 3173 compaction_duration += compaction_spaces_for_tasks[i]->duration(); |
3174 heap()->MergeAllocationSitePretenuringFeedback( | |
3175 *compaction_spaces_for_tasks[i]->local_pretenuring_feedback()); | |
3176 compaction_spaces_for_tasks[i]->local_store_buffer()->Process( | |
3177 heap()->store_buffer()); | |
3193 delete compaction_spaces_for_tasks[i]; | 3178 delete compaction_spaces_for_tasks[i]; |
3194 } | 3179 } |
3195 delete[] compaction_spaces_for_tasks; | 3180 delete[] compaction_spaces_for_tasks; |
3196 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory); | 3181 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory); |
3197 | 3182 |
3198 // Finalize sequentially. | 3183 // Finalize sequentially. |
3184 for (NewSpacePage* p : newspace_evacuation_candidates_) { | |
3185 DCHECK_EQ(p->parallel_compaction_state().Value(), | |
3186 MemoryChunk::kCompactingFinalize); | |
3187 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
3188 } | |
3189 | |
3199 int abandoned_pages = 0; | 3190 int abandoned_pages = 0; |
3200 for (int i = 0; i < num_pages; i++) { | 3191 for (Page* p : evacuation_candidates_) { |
3201 Page* p = evacuation_candidates_[i]; | |
3202 switch (p->parallel_compaction_state().Value()) { | 3192 switch (p->parallel_compaction_state().Value()) { |
3203 case MemoryChunk::ParallelCompactingState::kCompactingAborted: | 3193 case MemoryChunk::ParallelCompactingState::kCompactingAborted: |
3204 // We have partially compacted the page, i.e., some objects may have | 3194 // We have partially compacted the page, i.e., some objects may have |
3205 // moved, others are still in place. | 3195 // moved, others are still in place. |
3206 // We need to: | 3196 // We need to: |
3207 // - Leave the evacuation candidate flag for later processing of | 3197 // - Leave the evacuation candidate flag for later processing of |
3208 // slots buffer entries. | 3198 // slots buffer entries. |
3209 // - Leave the slots buffer there for processing of entries added by | 3199 // - Leave the slots buffer there for processing of entries added by |
3210 // the write barrier. | 3200 // the write barrier. |
3211 // - Rescan the page as slot recording in the migration buffer only | 3201 // - Rescan the page as slot recording in the migration buffer only |
(...skipping 12 matching lines...) Expand all Loading... | |
3224 case MemoryChunk::kCompactingFinalize: | 3214 case MemoryChunk::kCompactingFinalize: |
3225 DCHECK(p->IsEvacuationCandidate()); | 3215 DCHECK(p->IsEvacuationCandidate()); |
3226 p->SetWasSwept(); | 3216 p->SetWasSwept(); |
3227 p->Unlink(); | 3217 p->Unlink(); |
3228 break; | 3218 break; |
3229 case MemoryChunk::kCompactingDone: | 3219 case MemoryChunk::kCompactingDone: |
3230 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); | 3220 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); |
3231 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3221 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
3232 break; | 3222 break; |
3233 default: | 3223 default: |
3234 // We should not observe kCompactingInProgress, or kCompactingDone. | 3224 // MemoryChunk::kCompactingInProgress. |
3235 UNREACHABLE(); | 3225 UNREACHABLE(); |
3236 } | 3226 } |
3237 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | 3227 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); |
3238 } | 3228 } |
3239 if (FLAG_trace_fragmentation) { | 3229 if (FLAG_trace_fragmentation) { |
3240 PrintIsolate(isolate(), | 3230 PrintIsolate(isolate(), |
3241 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " | 3231 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " |
3242 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX | 3232 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX |
3243 "d compaction_speed=%" V8_PTR_PREFIX "d\n", | 3233 "d compaction_speed=%" V8_PTR_PREFIX "d\n", |
3244 isolate()->time_millis_since_init(), FLAG_parallel_compaction, | 3234 isolate()->time_millis_since_init(), FLAG_parallel_compaction, |
(...skipping 28 matching lines...) Expand all Loading... | |
3273 // semaphore signal. | 3263 // semaphore signal. |
3274 for (int i = 0; i < len; i++) { | 3264 for (int i = 0; i < len; i++) { |
3275 if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) { | 3265 if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) { |
3276 pending_compaction_tasks_semaphore_.Wait(); | 3266 pending_compaction_tasks_semaphore_.Wait(); |
3277 } | 3267 } |
3278 } | 3268 } |
3279 compaction_in_progress_ = false; | 3269 compaction_in_progress_ = false; |
3280 } | 3270 } |
3281 | 3271 |
3282 | 3272 |
3273 bool MarkCompactCollector::EvacuateSinglePage( | |
3274 MemoryChunk* p, HeapObjectVisitor* visitor, | |
3275 CompactionSpaceCollection* compaction_spaces) { | |
3276 bool aborted = false; | |
3277 if (p->parallel_compaction_state().TrySetValue( | |
3278 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | |
3279 if (p->IsEvacuationCandidate() || p->InNewSpace()) { | |
3280 DCHECK_EQ(p->parallel_compaction_state().Value(), | |
3281 MemoryChunk::kCompactingInProgress); | |
3282 int saved_live_bytes = p->LiveBytes(); | |
3283 double evacuation_time; | |
3284 bool success; | |
3285 { | |
3286 AlwaysAllocateScope always_allocate(isolate()); | |
3287 TimedScope timed_scope(heap(), &evacuation_time); | |
3288 success = VisitLiveObjects(p, visitor, kClearMarkbits); | |
3289 } | |
3290 if (success) { | |
3291 compaction_spaces->ReportCompactionProgress(evacuation_time, | |
3292 saved_live_bytes); | |
3293 p->parallel_compaction_state().SetValue( | |
3294 MemoryChunk::kCompactingFinalize); | |
3295 } else { | |
3296 p->parallel_compaction_state().SetValue( | |
3297 MemoryChunk::kCompactingAborted); | |
3298 aborted = true; | |
3299 } | |
3300 } else { | |
3301 // There could be popular pages in the list of evacuation candidates | |
3302 // which we do compact. | |
3303 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
3304 } | |
3305 } | |
3306 return !aborted; | |
3307 } | |
3308 | |
3309 | |
3283 void MarkCompactCollector::EvacuatePages( | 3310 void MarkCompactCollector::EvacuatePages( |
3284 CompactionSpaceCollection* compaction_spaces, | 3311 CompactionSpaceCollection* compaction_spaces, |
3285 SlotsBuffer** evacuation_slots_buffer) { | 3312 SlotsBuffer** evacuation_slots_buffer) { |
3286 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, | 3313 EvacuateOldSpaceVisitor old_space_visitor(heap(), compaction_spaces, |
3287 evacuation_slots_buffer); | 3314 evacuation_slots_buffer); |
3288 for (int i = 0; i < evacuation_candidates_.length(); i++) { | 3315 EvacuateNewSpaceVisitor new_space_visitor(heap(), evacuation_slots_buffer, |
3289 Page* p = evacuation_candidates_[i]; | 3316 compaction_spaces); |
3317 for (NewSpacePage* p : newspace_evacuation_candidates_) { | |
3318 DCHECK(p->InNewSpace()); | |
3319 DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()), | |
3320 MemoryChunk::kSweepingDone); | |
3321 bool success = EvacuateSinglePage(p, &new_space_visitor, compaction_spaces); | |
3322 DCHECK(success); | |
3323 USE(success); | |
3324 } | |
3325 for (Page* p : evacuation_candidates_) { | |
3290 DCHECK(p->IsEvacuationCandidate() || | 3326 DCHECK(p->IsEvacuationCandidate() || |
3291 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3327 p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION)); |
3292 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == | 3328 DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()), |
3293 MemoryChunk::kSweepingDone); | 3329 MemoryChunk::kSweepingDone); |
3294 if (p->parallel_compaction_state().TrySetValue( | 3330 EvacuateSinglePage(p, &old_space_visitor, compaction_spaces); |
3295 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | |
3296 if (p->IsEvacuationCandidate()) { | |
3297 DCHECK_EQ(p->parallel_compaction_state().Value(), | |
3298 MemoryChunk::kCompactingInProgress); | |
3299 double start = heap()->MonotonicallyIncreasingTimeInMs(); | |
3300 intptr_t live_bytes = p->LiveBytes(); | |
3301 AlwaysAllocateScope always_allocate(isolate()); | |
3302 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) { | |
3303 p->ResetLiveBytes(); | |
3304 p->parallel_compaction_state().SetValue( | |
3305 MemoryChunk::kCompactingFinalize); | |
3306 compaction_spaces->ReportCompactionProgress( | |
3307 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); | |
3308 } else { | |
3309 p->parallel_compaction_state().SetValue( | |
3310 MemoryChunk::kCompactingAborted); | |
3311 } | |
3312 } else { | |
3313 // There could be popular pages in the list of evacuation candidates | |
3314 // which we do compact. | |
3315 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
3316 } | |
3317 } | |
3318 } | 3331 } |
3332 | |
3333 heap()->IncrementPromotedObjectsSize(new_space_visitor.promoted_size()); | |
3334 heap()->IncrementSemiSpaceCopiedObjectSize( | |
3335 new_space_visitor.semispace_copied_size()); | |
3336 heap()->IncrementYoungSurvivorsCounter( | |
3337 new_space_visitor.promoted_size() + | |
3338 new_space_visitor.semispace_copied_size()); | |
3319 } | 3339 } |
3320 | 3340 |
3321 | 3341 |
3322 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3342 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { |
3323 public: | 3343 public: |
3324 virtual Object* RetainAs(Object* object) { | 3344 virtual Object* RetainAs(Object* object) { |
3325 if (object->IsHeapObject()) { | 3345 if (object->IsHeapObject()) { |
3326 HeapObject* heap_object = HeapObject::cast(object); | 3346 HeapObject* heap_object = HeapObject::cast(object); |
3327 MapWord map_word = heap_object->map_word(); | 3347 MapWord map_word = heap_object->map_word(); |
3328 if (map_word.IsForwardingAddress()) { | 3348 if (map_word.IsForwardingAddress()) { |
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3461 // Return true if the given code is deoptimized or will be deoptimized. | 3481 // Return true if the given code is deoptimized or will be deoptimized. |
3462 bool MarkCompactCollector::WillBeDeoptimized(Code* code) { | 3482 bool MarkCompactCollector::WillBeDeoptimized(Code* code) { |
3463 return code->is_optimized_code() && code->marked_for_deoptimization(); | 3483 return code->is_optimized_code() && code->marked_for_deoptimization(); |
3464 } | 3484 } |
3465 | 3485 |
3466 | 3486 |
3467 void MarkCompactCollector::RemoveObjectSlots(Address start_slot, | 3487 void MarkCompactCollector::RemoveObjectSlots(Address start_slot, |
3468 Address end_slot) { | 3488 Address end_slot) { |
3469 // Remove entries by replacing them with an old-space slot containing a smi | 3489 // Remove entries by replacing them with an old-space slot containing a smi |
3470 // that is located in an unmovable page. | 3490 // that is located in an unmovable page. |
3471 int npages = evacuation_candidates_.length(); | 3491 for (Page* p : evacuation_candidates_) { |
3472 for (int i = 0; i < npages; i++) { | |
3473 Page* p = evacuation_candidates_[i]; | |
3474 DCHECK(p->IsEvacuationCandidate() || | 3492 DCHECK(p->IsEvacuationCandidate() || |
3475 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3493 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
3476 if (p->IsEvacuationCandidate()) { | 3494 if (p->IsEvacuationCandidate()) { |
3477 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, | 3495 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, |
3478 end_slot); | 3496 end_slot); |
3479 } | 3497 } |
3480 } | 3498 } |
3481 } | 3499 } |
3482 | 3500 |
3483 | 3501 |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3543 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3561 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); |
3544 Map* map = object->synchronized_map(); | 3562 Map* map = object->synchronized_map(); |
3545 int size = object->SizeFromMap(map); | 3563 int size = object->SizeFromMap(map); |
3546 object->IterateBody(map->instance_type(), size, visitor); | 3564 object->IterateBody(map->instance_type(), size, visitor); |
3547 } | 3565 } |
3548 } | 3566 } |
3549 | 3567 |
3550 | 3568 |
3551 void MarkCompactCollector::SweepAbortedPages() { | 3569 void MarkCompactCollector::SweepAbortedPages() { |
3552 // Second pass on aborted pages. | 3570 // Second pass on aborted pages. |
3553 for (int i = 0; i < evacuation_candidates_.length(); i++) { | 3571 for (Page* p : evacuation_candidates_) { |
3554 Page* p = evacuation_candidates_[i]; | |
3555 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3572 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { |
3556 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); | 3573 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); |
3557 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3574 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3558 switch (space->identity()) { | 3575 switch (space->identity()) { |
3559 case OLD_SPACE: | 3576 case OLD_SPACE: |
3560 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 3577 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, |
3561 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); | 3578 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); |
3562 break; | 3579 break; |
3563 case CODE_SPACE: | 3580 case CODE_SPACE: |
3564 if (FLAG_zap_code_space) { | 3581 if (FLAG_zap_code_space) { |
(...skipping 10 matching lines...) Expand all Loading... | |
3575 } | 3592 } |
3576 } | 3593 } |
3577 } | 3594 } |
3578 } | 3595 } |
3579 | 3596 |
3580 | 3597 |
3581 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3598 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { |
3582 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3599 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); |
3583 Heap::RelocationLock relocation_lock(heap()); | 3600 Heap::RelocationLock relocation_lock(heap()); |
3584 | 3601 |
3585 HashMap* local_pretenuring_feedback = nullptr; | |
3586 { | 3602 { |
3587 GCTracer::Scope gc_scope(heap()->tracer(), | 3603 GCTracer::Scope gc_scope(heap()->tracer(), |
3588 GCTracer::Scope::MC_EVACUATE_NEW_SPACE); | 3604 GCTracer::Scope::MC_EVACUATE_NEW_SPACE); |
3589 EvacuationScope evacuation_scope(this); | 3605 EvacuationScope evacuation_scope(this); |
3606 | |
3590 EvacuateNewSpacePrologue(); | 3607 EvacuateNewSpacePrologue(); |
3591 local_pretenuring_feedback = EvacuateNewSpaceInParallel(); | |
3592 heap_->new_space()->set_age_mark(heap_->new_space()->top()); | |
3593 } | |
3594 | |
3595 { | |
3596 GCTracer::Scope gc_scope(heap()->tracer(), | |
3597 GCTracer::Scope::MC_EVACUATE_CANDIDATES); | |
3598 EvacuationScope evacuation_scope(this); | |
3599 EvacuatePagesInParallel(); | 3608 EvacuatePagesInParallel(); |
3600 } | 3609 EvacuateNewSpaceEpilogue(); |
3601 | 3610 heap()->new_space()->set_age_mark(heap()->new_space()->top()); |
3602 { | |
3603 heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback); | |
3604 delete local_pretenuring_feedback; | |
3605 } | 3611 } |
3606 | 3612 |
3607 UpdatePointersAfterEvacuation(); | 3613 UpdatePointersAfterEvacuation(); |
3608 | 3614 |
3609 { | 3615 { |
3610 GCTracer::Scope gc_scope(heap()->tracer(), | 3616 GCTracer::Scope gc_scope(heap()->tracer(), |
3611 GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3617 GCTracer::Scope::MC_EVACUATE_CLEAN_UP); |
3612 // After updating all pointers, we can finally sweep the aborted pages, | 3618 // After updating all pointers, we can finally sweep the aborted pages, |
3613 // effectively overriding any forward pointers. | 3619 // effectively overriding any forward pointers. |
3614 SweepAbortedPages(); | 3620 SweepAbortedPages(); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3671 &updating_visitor); | 3677 &updating_visitor); |
3672 } | 3678 } |
3673 // Update roots. | 3679 // Update roots. |
3674 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 3680 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); |
3675 | 3681 |
3676 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), | 3682 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), |
3677 &Heap::ScavengeStoreBufferCallback); | 3683 &Heap::ScavengeStoreBufferCallback); |
3678 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); | 3684 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); |
3679 } | 3685 } |
3680 | 3686 |
3681 int npages = evacuation_candidates_.length(); | |
3682 { | 3687 { |
3683 GCTracer::Scope gc_scope( | 3688 GCTracer::Scope gc_scope( |
3684 heap()->tracer(), | 3689 heap()->tracer(), |
3685 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); | 3690 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); |
3686 for (int i = 0; i < npages; i++) { | 3691 for (Page* p : evacuation_candidates_) { |
3687 Page* p = evacuation_candidates_[i]; | |
3688 DCHECK(p->IsEvacuationCandidate() || | 3692 DCHECK(p->IsEvacuationCandidate() || |
3689 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3693 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); |
3690 | 3694 |
3691 if (p->IsEvacuationCandidate()) { | 3695 if (p->IsEvacuationCandidate()) { |
3692 UpdateSlotsRecordedIn(p->slots_buffer()); | 3696 UpdateSlotsRecordedIn(p->slots_buffer()); |
3693 if (FLAG_trace_fragmentation_verbose) { | 3697 if (FLAG_trace_fragmentation_verbose) { |
3694 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), | 3698 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), |
3695 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 3699 SlotsBuffer::SizeOfChain(p->slots_buffer())); |
3696 } | 3700 } |
3697 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | 3701 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
3751 heap_->UpdateReferencesInExternalStringTable( | 3755 heap_->UpdateReferencesInExternalStringTable( |
3752 &UpdateReferenceInExternalStringTableEntry); | 3756 &UpdateReferenceInExternalStringTableEntry); |
3753 | 3757 |
3754 EvacuationWeakObjectRetainer evacuation_object_retainer; | 3758 EvacuationWeakObjectRetainer evacuation_object_retainer; |
3755 heap()->ProcessAllWeakReferences(&evacuation_object_retainer); | 3759 heap()->ProcessAllWeakReferences(&evacuation_object_retainer); |
3756 } | 3760 } |
3757 } | 3761 } |
3758 | 3762 |
3759 | 3763 |
3760 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { | 3764 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { |
3761 int npages = evacuation_candidates_.length(); | 3765 for (Page* p : evacuation_candidates_) { |
3762 for (int i = 0; i < npages; i++) { | |
3763 Page* p = evacuation_candidates_[i]; | |
3764 if (!p->IsEvacuationCandidate()) continue; | 3766 if (!p->IsEvacuationCandidate()) continue; |
3765 p->Unlink(); | 3767 p->Unlink(); |
3766 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3768 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3767 p->InsertAfter(space->LastPage()); | 3769 p->InsertAfter(space->LastPage()); |
3768 } | 3770 } |
3769 } | 3771 } |
3770 | 3772 |
3771 | 3773 |
3772 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 3774 void MarkCompactCollector::ReleaseEvacuationCandidates() { |
3773 int npages = evacuation_candidates_.length(); | 3775 for (Page* p : evacuation_candidates_) { |
3774 for (int i = 0; i < npages; i++) { | |
3775 Page* p = evacuation_candidates_[i]; | |
3776 if (!p->IsEvacuationCandidate()) continue; | 3776 if (!p->IsEvacuationCandidate()) continue; |
3777 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3777 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); |
3778 space->Free(p->area_start(), p->area_size()); | 3778 space->Free(p->area_start(), p->area_size()); |
3779 p->set_scan_on_scavenge(false); | 3779 p->set_scan_on_scavenge(false); |
3780 p->ResetLiveBytes(); | 3780 p->ResetLiveBytes(); |
3781 CHECK(p->WasSwept()); | 3781 CHECK(p->WasSwept()); |
3782 space->ReleasePage(p); | 3782 space->ReleasePage(p); |
3783 } | 3783 } |
3784 evacuation_candidates_.Rewind(0); | 3784 evacuation_candidates_.Rewind(0); |
3785 compacting_ = false; | 3785 compacting_ = false; |
3786 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); | 3786 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); |
3787 heap()->FreeQueuedChunks(); | 3787 heap()->FreeQueuedChunks(); |
3788 } | 3788 } |
3789 | 3789 |
3790 | 3790 |
3791 int MarkCompactCollector::SweepInParallel(PagedSpace* space, | 3791 int MarkCompactCollector::SweepInParallel(PagedSpace* space, |
3792 int required_freed_bytes) { | 3792 int required_freed_bytes, |
3793 int max_pages) { | |
Hannes Payer (out of office)
2016/01/18 11:46:34
We should optimize for sweeping as many pages as p
Michael Lippautz
2016/01/19 14:56:52
As discussed offline, this will go in separately:
| |
3794 int page_count = 0; | |
3793 int max_freed = 0; | 3795 int max_freed = 0; |
3794 int max_freed_overall = 0; | 3796 int max_freed_overall = 0; |
3795 PageIterator it(space); | 3797 PageIterator it(space); |
3796 while (it.has_next()) { | 3798 while (it.has_next()) { |
3797 Page* p = it.next(); | 3799 Page* p = it.next(); |
3798 max_freed = SweepInParallel(p, space); | 3800 max_freed = SweepInParallel(p, space); |
3799 DCHECK(max_freed >= 0); | 3801 DCHECK(max_freed >= 0); |
3800 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { | 3802 if ((required_freed_bytes > 0) && (max_freed >= required_freed_bytes)) { |
3801 return max_freed; | 3803 return max_freed; |
3802 } | 3804 } |
3803 max_freed_overall = Max(max_freed, max_freed_overall); | 3805 max_freed_overall = Max(max_freed, max_freed_overall); |
3804 if (p == space->end_of_unswept_pages()) break; | 3806 if (p == space->end_of_unswept_pages()) break; |
3807 page_count++; | |
3808 if ((max_pages > 0) && (page_count == max_pages)) { | |
3809 return max_freed; | |
3810 } | |
3805 } | 3811 } |
3806 return max_freed_overall; | 3812 return max_freed_overall; |
3807 } | 3813 } |
3808 | 3814 |
3809 | 3815 |
3810 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { | 3816 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { |
3811 int max_freed = 0; | 3817 int max_freed = 0; |
3812 if (page->TryLock()) { | 3818 if (page->TryLock()) { |
3813 // If this page was already swept in the meantime, we can return here. | 3819 // If this page was already swept in the meantime, we can return here. |
3814 if (page->parallel_sweeping_state().Value() != | 3820 if (page->parallel_sweeping_state().Value() != |
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
4064 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4070 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4065 if (Marking::IsBlack(mark_bit)) { | 4071 if (Marking::IsBlack(mark_bit)) { |
4066 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 4072 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); |
4067 RecordRelocSlot(&rinfo, target); | 4073 RecordRelocSlot(&rinfo, target); |
4068 } | 4074 } |
4069 } | 4075 } |
4070 } | 4076 } |
4071 | 4077 |
4072 } // namespace internal | 4078 } // namespace internal |
4073 } // namespace v8 | 4079 } // namespace v8 |
OLD | NEW |