Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(500)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1577853007: [heap] Parallel newspace evacuation, semispace copy, and compaction \o/ (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Various non-functional changes Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
315 GCTracer::Scope gc_scope(heap()->tracer(), 315 GCTracer::Scope gc_scope(heap()->tracer(),
316 GCTracer::Scope::MC_CLEAR_STORE_BUFFER); 316 GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
317 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); 317 heap_->store_buffer()->ClearInvalidStoreBufferEntries();
318 } 318 }
319 319
320 { 320 {
321 GCTracer::Scope gc_scope(heap()->tracer(), 321 GCTracer::Scope gc_scope(heap()->tracer(),
322 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); 322 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
323 int number_of_pages = evacuation_candidates_.length(); 323 int number_of_pages = evacuation_candidates_.length();
324 for (int i = 0; i < number_of_pages; i++) { 324 for (int i = 0; i < number_of_pages; i++) {
325 Page* p = evacuation_candidates_[i]; 325 MemoryChunk* p = evacuation_candidates_[i];
326 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); 326 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
327 } 327 }
328 } 328 }
329 #ifdef VERIFY_HEAP 329 #ifdef VERIFY_HEAP
330 if (FLAG_verify_heap) { 330 if (FLAG_verify_heap) {
331 VerifyValidStoreAndSlotsBufferEntries(); 331 VerifyValidStoreAndSlotsBufferEntries();
332 } 332 }
333 #endif 333 #endif
334 } 334 }
335 335
(...skipping 490 matching lines...) Expand 10 before | Expand all | Expand 10 after
826 AllocationSpaceName(space->identity()), reduce_memory, 826 AllocationSpaceName(space->identity()), reduce_memory,
827 candidate_count, total_live_bytes / KB); 827 candidate_count, total_live_bytes / KB);
828 } 828 }
829 } 829 }
830 830
831 831
832 void MarkCompactCollector::AbortCompaction() { 832 void MarkCompactCollector::AbortCompaction() {
833 if (compacting_) { 833 if (compacting_) {
834 int npages = evacuation_candidates_.length(); 834 int npages = evacuation_candidates_.length();
835 for (int i = 0; i < npages; i++) { 835 for (int i = 0; i < npages; i++) {
836 Page* p = evacuation_candidates_[i]; 836 MemoryChunk* p = evacuation_candidates_[i];
837 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); 837 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
838 p->ClearEvacuationCandidate(); 838 p->ClearEvacuationCandidate();
839 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 839 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
840 } 840 }
841 compacting_ = false; 841 compacting_ = false;
842 evacuation_candidates_.Rewind(0); 842 evacuation_candidates_.Rewind(0);
843 } 843 }
844 DCHECK_EQ(0, evacuation_candidates_.length()); 844 DCHECK_EQ(0, evacuation_candidates_.length());
845 } 845 }
846 846
(...skipping 697 matching lines...) Expand 10 before | Expand all | Expand 10 after
1544 class MarkCompactCollector::HeapObjectVisitor { 1544 class MarkCompactCollector::HeapObjectVisitor {
1545 public: 1545 public:
1546 virtual ~HeapObjectVisitor() {} 1546 virtual ~HeapObjectVisitor() {}
1547 virtual bool Visit(HeapObject* object) = 0; 1547 virtual bool Visit(HeapObject* object) = 0;
1548 }; 1548 };
1549 1549
1550 1550
1551 class MarkCompactCollector::EvacuateVisitorBase 1551 class MarkCompactCollector::EvacuateVisitorBase
1552 : public MarkCompactCollector::HeapObjectVisitor { 1552 : public MarkCompactCollector::HeapObjectVisitor {
1553 public: 1553 public:
1554 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer) 1554 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer,
1555 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {} 1555 CompactionSpaceCollection* compaction_spaces)
1556 : heap_(heap),
1557 evacuation_slots_buffer_(evacuation_slots_buffer),
1558 compaction_spaces_(compaction_spaces) {}
1556 1559
1557 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, 1560 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1558 HeapObject** target_object) { 1561 HeapObject** target_object) {
1559 int size = object->Size(); 1562 int size = object->Size();
1560 AllocationAlignment alignment = object->RequiredAlignment(); 1563 AllocationAlignment alignment = object->RequiredAlignment();
1561 AllocationResult allocation = target_space->AllocateRaw(size, alignment); 1564 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1562 if (allocation.To(target_object)) { 1565 if (allocation.To(target_object)) {
1563 heap_->mark_compact_collector()->MigrateObject( 1566 heap_->mark_compact_collector()->MigrateObject(
1564 *target_object, object, size, target_space->identity(), 1567 *target_object, object, size, target_space->identity(),
1565 evacuation_slots_buffer_); 1568 evacuation_slots_buffer_, compaction_spaces_->local_store_buffer());
1566 return true; 1569 return true;
1567 } 1570 }
1568 return false; 1571 return false;
1569 } 1572 }
1570 1573
1571 protected: 1574 protected:
1572 Heap* heap_; 1575 Heap* heap_;
1573 SlotsBuffer** evacuation_slots_buffer_; 1576 SlotsBuffer** evacuation_slots_buffer_;
1577 CompactionSpaceCollection* compaction_spaces_;
1574 }; 1578 };
1575 1579
1576 1580
1577 class MarkCompactCollector::EvacuateNewSpaceVisitor final 1581 class MarkCompactCollector::EvacuateNewSpaceVisitor final
1578 : public MarkCompactCollector::EvacuateVisitorBase { 1582 : public MarkCompactCollector::EvacuateVisitorBase {
1579 public: 1583 public:
1580 static const intptr_t kLabSize = 4 * KB; 1584 static const intptr_t kLabSize = 4 * KB;
1581 static const intptr_t kMaxLabObjectSize = 256; 1585 static const intptr_t kMaxLabObjectSize = 256;
1582 1586
1583 explicit EvacuateNewSpaceVisitor(Heap* heap, 1587 explicit EvacuateNewSpaceVisitor(Heap* heap,
1584 SlotsBuffer** evacuation_slots_buffer, 1588 SlotsBuffer** evacuation_slots_buffer,
1585 HashMap* local_pretenuring_feedback) 1589 CompactionSpaceCollection* compaction_spaces)
1586 : EvacuateVisitorBase(heap, evacuation_slots_buffer), 1590 : EvacuateVisitorBase(heap, evacuation_slots_buffer, compaction_spaces),
1587 buffer_(LocalAllocationBuffer::InvalidBuffer()), 1591 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1588 space_to_allocate_(NEW_SPACE), 1592 space_to_allocate_(NEW_SPACE),
1589 promoted_size_(0), 1593 promoted_size_(0),
1590 semispace_copied_size_(0), 1594 semispace_copied_size_(0),
1591 local_pretenuring_feedback_(local_pretenuring_feedback) {} 1595 local_pretenuring_feedback_(
1596 compaction_spaces->local_pretenuring_feedback()) {}
1592 1597
1593 bool Visit(HeapObject* object) override { 1598 bool Visit(HeapObject* object) override {
1594 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_); 1599 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
1595 int size = object->Size(); 1600 int size = object->Size();
1596 HeapObject* target_object = nullptr; 1601 HeapObject* target_object = nullptr;
1597 if (heap_->ShouldBePromoted(object->address(), size) && 1602 if (heap_->ShouldBePromoted(object->address(), size) &&
1598 TryEvacuateObject(heap_->old_space(), object, &target_object)) { 1603 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1604 &target_object)) {
1599 // If we end up needing more special cases, we should factor this out. 1605 // If we end up needing more special cases, we should factor this out.
1600 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { 1606 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1601 heap_->array_buffer_tracker()->Promote( 1607 heap_->array_buffer_tracker()->Promote(
1602 JSArrayBuffer::cast(target_object)); 1608 JSArrayBuffer::cast(target_object));
1603 } 1609 }
1604 promoted_size_ += size; 1610 promoted_size_ += size;
1605 return true; 1611 return true;
1606 } 1612 }
1607 HeapObject* target = nullptr; 1613 HeapObject* target = nullptr;
1608 AllocationSpace space = AllocateTargetObject(object, &target); 1614 AllocationSpace space = AllocateTargetObject(object, &target);
1609 heap_->mark_compact_collector()->MigrateObject( 1615 heap_->mark_compact_collector()->MigrateObject(
1610 HeapObject::cast(target), object, size, space, 1616 HeapObject::cast(target), object, size, space,
1611 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_); 1617 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
1618 compaction_spaces_->local_store_buffer());
1612 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { 1619 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1613 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); 1620 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1614 } 1621 }
1615 semispace_copied_size_ += size; 1622 semispace_copied_size_ += size;
1616 return true; 1623 return true;
1617 } 1624 }
1618 1625
1619 intptr_t promoted_size() { return promoted_size_; } 1626 intptr_t promoted_size() { return promoted_size_; }
1620 intptr_t semispace_copied_size() { return semispace_copied_size_; } 1627 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1621 1628
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1673 if (allocation.IsRetry()) { 1680 if (allocation.IsRetry()) {
1674 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; 1681 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1675 } 1682 }
1676 } 1683 }
1677 } 1684 }
1678 return allocation; 1685 return allocation;
1679 } 1686 }
1680 1687
1681 inline AllocationResult AllocateInOldSpace(int size_in_bytes, 1688 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1682 AllocationAlignment alignment) { 1689 AllocationAlignment alignment) {
1683 AllocationResult allocation = 1690 AllocationResult allocation = compaction_spaces_->Get(OLD_SPACE)
1684 heap_->old_space()->AllocateRaw(size_in_bytes, alignment); 1691 ->AllocateRaw(size_in_bytes, alignment);
1685 if (allocation.IsRetry()) { 1692 if (allocation.IsRetry()) {
1686 FatalProcessOutOfMemory( 1693 FatalProcessOutOfMemory(
1687 "MarkCompactCollector: semi-space copy, fallback in old gen\n"); 1694 "MarkCompactCollector: semi-space copy, fallback in old gen\n");
1688 } 1695 }
1689 return allocation; 1696 return allocation;
1690 } 1697 }
1691 1698
1692 inline AllocationResult AllocateInLab(int size_in_bytes, 1699 inline AllocationResult AllocateInLab(int size_in_bytes,
1693 AllocationAlignment alignment) { 1700 AllocationAlignment alignment) {
1694 AllocationResult allocation; 1701 AllocationResult allocation;
(...skipping 26 matching lines...) Expand all
1721 HashMap* local_pretenuring_feedback_; 1728 HashMap* local_pretenuring_feedback_;
1722 }; 1729 };
1723 1730
1724 1731
1725 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1732 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1726 : public MarkCompactCollector::EvacuateVisitorBase { 1733 : public MarkCompactCollector::EvacuateVisitorBase {
1727 public: 1734 public:
1728 EvacuateOldSpaceVisitor(Heap* heap, 1735 EvacuateOldSpaceVisitor(Heap* heap,
1729 CompactionSpaceCollection* compaction_spaces, 1736 CompactionSpaceCollection* compaction_spaces,
1730 SlotsBuffer** evacuation_slots_buffer) 1737 SlotsBuffer** evacuation_slots_buffer)
1731 : EvacuateVisitorBase(heap, evacuation_slots_buffer), 1738 : EvacuateVisitorBase(heap, evacuation_slots_buffer, compaction_spaces) {}
1732 compaction_spaces_(compaction_spaces) {}
1733 1739
1734 bool Visit(HeapObject* object) override { 1740 bool Visit(HeapObject* object) override {
1735 CompactionSpace* target_space = compaction_spaces_->Get( 1741 CompactionSpace* target_space = compaction_spaces_->Get(
1736 Page::FromAddress(object->address())->owner()->identity()); 1742 Page::FromAddress(object->address())->owner()->identity());
1737 HeapObject* target_object = nullptr; 1743 HeapObject* target_object = nullptr;
1738 if (TryEvacuateObject(target_space, object, &target_object)) { 1744 if (TryEvacuateObject(target_space, object, &target_object)) {
1739 DCHECK(object->map_word().IsForwardingAddress()); 1745 DCHECK(object->map_word().IsForwardingAddress());
1740 return true; 1746 return true;
1741 } 1747 }
1742 return false; 1748 return false;
1743 } 1749 }
1744 1750
1745 private: 1751 private:
1746 CompactionSpaceCollection* compaction_spaces_;
1747 }; 1752 };
1748 1753
1749 1754
1750 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { 1755 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1751 PageIterator it(space); 1756 PageIterator it(space);
1752 while (it.has_next()) { 1757 while (it.has_next()) {
1753 Page* p = it.next(); 1758 Page* p = it.next();
1754 DiscoverGreyObjectsOnPage(p); 1759 DiscoverGreyObjectsOnPage(p);
1755 if (marking_deque()->IsFull()) return; 1760 if (marking_deque()->IsFull()) return;
1756 } 1761 }
(...skipping 789 matching lines...) Expand 10 before | Expand all | Expand 10 after
2546 while (obj != Smi::FromInt(0)) { 2551 while (obj != Smi::FromInt(0)) {
2547 TransitionArray* array = TransitionArray::cast(obj); 2552 TransitionArray* array = TransitionArray::cast(obj);
2548 obj = array->next_link(); 2553 obj = array->next_link();
2549 array->set_next_link(undefined, SKIP_WRITE_BARRIER); 2554 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2550 } 2555 }
2551 heap()->set_encountered_transition_arrays(Smi::FromInt(0)); 2556 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2552 } 2557 }
2553 2558
2554 2559
2555 void MarkCompactCollector::RecordMigratedSlot( 2560 void MarkCompactCollector::RecordMigratedSlot(
2556 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) { 2561 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
2562 LocalStoreBuffer* local_store_buffer) {
2557 // When parallel compaction is in progress, store and slots buffer entries 2563 // When parallel compaction is in progress, store and slots buffer entries
2558 // require synchronization. 2564 // require synchronization.
2559 if (heap_->InNewSpace(value)) { 2565 if (heap_->InNewSpace(value)) {
2560 if (compaction_in_progress_) { 2566 if (compaction_in_progress_) {
2561 heap_->store_buffer()->MarkSynchronized(slot); 2567 local_store_buffer->Record(slot);
2562 } else { 2568 } else {
2563 heap_->store_buffer()->Mark(slot); 2569 heap_->store_buffer()->Mark(slot);
2564 } 2570 }
2565 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { 2571 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2566 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, 2572 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2567 reinterpret_cast<Object**>(slot), 2573 reinterpret_cast<Object**>(slot),
2568 SlotsBuffer::IGNORE_OVERFLOW); 2574 SlotsBuffer::IGNORE_OVERFLOW);
2569 } 2575 }
2570 } 2576 }
2571 2577
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
2633 if (!success) { 2639 if (!success) {
2634 EvictPopularEvacuationCandidate(target_page); 2640 EvictPopularEvacuationCandidate(target_page);
2635 } 2641 }
2636 } 2642 }
2637 } 2643 }
2638 2644
2639 2645
2640 class RecordMigratedSlotVisitor final : public ObjectVisitor { 2646 class RecordMigratedSlotVisitor final : public ObjectVisitor {
2641 public: 2647 public:
2642 RecordMigratedSlotVisitor(MarkCompactCollector* collector, 2648 RecordMigratedSlotVisitor(MarkCompactCollector* collector,
2643 SlotsBuffer** evacuation_slots_buffer) 2649 SlotsBuffer** evacuation_slots_buffer,
2650 LocalStoreBuffer* local_store_buffer)
2644 : collector_(collector), 2651 : collector_(collector),
2645 evacuation_slots_buffer_(evacuation_slots_buffer) {} 2652 evacuation_slots_buffer_(evacuation_slots_buffer),
2653 local_store_buffer_(local_store_buffer) {}
2646 2654
2647 V8_INLINE void VisitPointer(Object** p) override { 2655 V8_INLINE void VisitPointer(Object** p) override {
2648 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), 2656 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
2649 evacuation_slots_buffer_); 2657 evacuation_slots_buffer_,
2658 local_store_buffer_);
2650 } 2659 }
2651 2660
2652 V8_INLINE void VisitPointers(Object** start, Object** end) override { 2661 V8_INLINE void VisitPointers(Object** start, Object** end) override {
2653 while (start < end) { 2662 while (start < end) {
2654 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), 2663 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
2655 evacuation_slots_buffer_); 2664 evacuation_slots_buffer_,
2665 local_store_buffer_);
2656 ++start; 2666 ++start;
2657 } 2667 }
2658 } 2668 }
2659 2669
2660 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { 2670 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
2661 if (collector_->compacting_) { 2671 if (collector_->compacting_) {
2662 Address code_entry = Memory::Address_at(code_entry_slot); 2672 Address code_entry = Memory::Address_at(code_entry_slot);
2663 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, 2673 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
2664 evacuation_slots_buffer_); 2674 evacuation_slots_buffer_);
2665 } 2675 }
2666 } 2676 }
2667 2677
2668 private: 2678 private:
2669 MarkCompactCollector* collector_; 2679 MarkCompactCollector* collector_;
2670 SlotsBuffer** evacuation_slots_buffer_; 2680 SlotsBuffer** evacuation_slots_buffer_;
2681 LocalStoreBuffer* local_store_buffer_;
2671 }; 2682 };
2672 2683
2673 2684
2674 // We scavenge new space simultaneously with sweeping. This is done in two 2685 // We scavenge new space simultaneously with sweeping. This is done in two
2675 // passes. 2686 // passes.
2676 // 2687 //
2677 // The first pass migrates all alive objects from one semispace to another or 2688 // The first pass migrates all alive objects from one semispace to another or
2678 // promotes them to old space. Forwarding address is written directly into 2689 // promotes them to old space. Forwarding address is written directly into
2679 // first word of object without any encoding. If object is dead we write 2690 // first word of object without any encoding. If object is dead we write
2680 // NULL as a forwarding address. 2691 // NULL as a forwarding address.
2681 // 2692 //
2682 // The second pass updates pointers to new space in all spaces. It is possible 2693 // The second pass updates pointers to new space in all spaces. It is possible
2683 // to encounter pointers to dead new space objects during traversal of pointers 2694 // to encounter pointers to dead new space objects during traversal of pointers
2684 // to new space. We should clear them to avoid encountering them during next 2695 // to new space. We should clear them to avoid encountering them during next
2685 // pointer iteration. This is an issue if the store buffer overflows and we 2696 // pointer iteration. This is an issue if the store buffer overflows and we
2686 // have to scan the entire old space, including dead objects, looking for 2697 // have to scan the entire old space, including dead objects, looking for
2687 // pointers to new space. 2698 // pointers to new space.
2688 void MarkCompactCollector::MigrateObject( 2699 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
2689 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest, 2700 int size, AllocationSpace dest,
2690 SlotsBuffer** evacuation_slots_buffer) { 2701 SlotsBuffer** evacuation_slots_buffer,
2702 LocalStoreBuffer* local_store_buffer) {
2691 Address dst_addr = dst->address(); 2703 Address dst_addr = dst->address();
2692 Address src_addr = src->address(); 2704 Address src_addr = src->address();
2693 DCHECK(heap()->AllowedToBeMigrated(src, dest)); 2705 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2694 DCHECK(dest != LO_SPACE); 2706 DCHECK(dest != LO_SPACE);
2695 if (dest == OLD_SPACE) { 2707 if (dest == OLD_SPACE) {
2696 DCHECK_OBJECT_SIZE(size); 2708 DCHECK_OBJECT_SIZE(size);
2697 DCHECK(evacuation_slots_buffer != nullptr); 2709 DCHECK(evacuation_slots_buffer != nullptr);
2698 DCHECK(IsAligned(size, kPointerSize)); 2710 DCHECK(IsAligned(size, kPointerSize));
2699 2711
2700 heap()->MoveBlock(dst->address(), src->address(), size); 2712 heap()->MoveBlock(dst->address(), src->address(), size);
2701 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer); 2713 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
2714 local_store_buffer);
2702 dst->IterateBody(&visitor); 2715 dst->IterateBody(&visitor);
2703 } else if (dest == CODE_SPACE) { 2716 } else if (dest == CODE_SPACE) {
2704 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); 2717 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
2705 DCHECK(evacuation_slots_buffer != nullptr); 2718 DCHECK(evacuation_slots_buffer != nullptr);
2706 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); 2719 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2707 heap()->MoveBlock(dst_addr, src_addr, size); 2720 heap()->MoveBlock(dst_addr, src_addr, size);
2708 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); 2721 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
2709 Code::cast(dst)->Relocate(dst_addr - src_addr); 2722 Code::cast(dst)->Relocate(dst_addr - src_addr);
2710 } else { 2723 } else {
2711 DCHECK_OBJECT_SIZE(size); 2724 DCHECK_OBJECT_SIZE(size);
(...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after
3053 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3066 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3054 3067
3055 // The target object is black but we don't know if the source slot is black. 3068 // The target object is black but we don't know if the source slot is black.
3056 // The source object could have died and the slot could be part of a free 3069 // The source object could have died and the slot could be part of a free
3057 // space. Use the mark bit iterator to find out about liveness of the slot. 3070 // space. Use the mark bit iterator to find out about liveness of the slot.
3058 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); 3071 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
3059 } 3072 }
3060 3073
3061 3074
3062 void MarkCompactCollector::EvacuateNewSpacePrologue() { 3075 void MarkCompactCollector::EvacuateNewSpacePrologue() {
3063 // There are soft limits in the allocation code, designed trigger a mark
3064 // sweep collection by failing allocations. But since we are already in
3065 // a mark-sweep allocation, there is no sense in trying to trigger one.
3066 AlwaysAllocateScope scope(isolate());
3067
3068 NewSpace* new_space = heap()->new_space(); 3076 NewSpace* new_space = heap()->new_space();
3069 3077 NewSpacePageIterator it(new_space->bottom(), new_space->top());
3070 // Store allocation range before flipping semispaces. 3078 // Append the list of new space pages to be processed.
3071 Address from_bottom = new_space->bottom(); 3079 while (it.has_next()) {
3072 Address from_top = new_space->top(); 3080 evacuation_candidates_.Add(it.next());
ulan 2016/01/15 10:44:43 As discussed offline, it would be cleaner to have
Michael Lippautz 2016/01/15 13:09:52 Done.
3073 3081 }
3074 // Flip the semispaces. After flipping, to space is empty, from space has
3075 // live objects.
3076 new_space->Flip(); 3082 new_space->Flip();
3077 new_space->ResetAllocationInfo(); 3083 new_space->ResetAllocationInfo();
3084 }
3078 3085
3079 newspace_evacuation_candidates_.Clear(); 3086
3080 NewSpacePageIterator it(from_bottom, from_top); 3087 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3081 while (it.has_next()) { 3088 // NewSpace pages have been appended to this list. We remove them by
3082 newspace_evacuation_candidates_.Add(it.next()); 3089 // iterating over the list from the end.
3090 MemoryChunk* p = nullptr;
3091 while (evacuation_candidates_.length() > 0 &&
3092 ((p = evacuation_candidates_.last()) != nullptr) && p->InNewSpace()) {
3093 evacuation_candidates_.Remove(evacuation_candidates_.length() - 1);
3083 } 3094 }
3084 } 3095 }
3085 3096
3086 3097
3087 HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
3088 HashMap* local_pretenuring_feedback = new HashMap(
3089 HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
3090 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
3091 local_pretenuring_feedback);
3092 // First pass: traverse all objects in inactive semispace, remove marks,
3093 // migrate live objects and write forwarding addresses. This stage puts
3094 // new entries in the store buffer and may cause some pages to be marked
3095 // scan-on-scavenge.
3096 for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
3097 NewSpacePage* p =
3098 reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
3099 bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
3100 USE(ok);
3101 DCHECK(ok);
3102 }
3103 heap_->IncrementPromotedObjectsSize(
3104 static_cast<int>(new_space_visitor.promoted_size()));
3105 heap_->IncrementSemiSpaceCopiedObjectSize(
3106 static_cast<int>(new_space_visitor.semispace_copied_size()));
3107 heap_->IncrementYoungSurvivorsCounter(
3108 static_cast<int>(new_space_visitor.promoted_size()) +
3109 static_cast<int>(new_space_visitor.semispace_copied_size()));
3110 return local_pretenuring_feedback;
3111 }
3112
3113
3114 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( 3098 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3115 SlotsBuffer* evacuation_slots_buffer) { 3099 SlotsBuffer* evacuation_slots_buffer) {
3116 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); 3100 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3117 evacuation_slots_buffers_.Add(evacuation_slots_buffer); 3101 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3118 } 3102 }
3119 3103
3120 3104
3121 int MarkCompactCollector::NumberOfParallelCompactionTasks() { 3105 int MarkCompactCollector::NumberOfParallelCompactionTasks() {
3122 if (!FLAG_parallel_compaction) return 1; 3106 if (!FLAG_parallel_compaction) return 1;
3123 // Compute the number of needed tasks based on a target compaction time, the 3107 // Compute the number of needed tasks based on a target compaction time, the
3124 // profiled compaction speed and marked live memory. 3108 // profiled compaction speed and marked live memory.
3125 // 3109 //
3126 // The number of parallel compaction tasks is limited by: 3110 // The number of parallel compaction tasks is limited by:
3127 // - #evacuation pages 3111 // - #evacuation pages
3128 // - (#cores - 1) 3112 // - (#cores - 1)
3129 // - a hard limit
3130 const double kTargetCompactionTimeInMs = 1; 3113 const double kTargetCompactionTimeInMs = 1;
3131 const int kMaxCompactionTasks = 8;
Michael Lippautz 2016/01/14 19:51:55 I removed the hard limit here. We are still capped
3132 3114
3133 intptr_t compaction_speed = 3115 intptr_t compaction_speed =
3134 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3116 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3135 if (compaction_speed == 0) return 1;
3136 3117
3137 intptr_t live_bytes = 0; 3118 intptr_t live_bytes = 0;
3138 for (Page* page : evacuation_candidates_) { 3119 for (MemoryChunk* chunk : evacuation_candidates_) {
3139 live_bytes += page->LiveBytes(); 3120 live_bytes += chunk->LiveBytes();
3140 } 3121 }
3141 3122
3142 const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1); 3123 const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
3143 const int tasks = 3124 int tasks;
3144 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed / 3125 if (compaction_speed > 0) {
3145 kTargetCompactionTimeInMs); 3126 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
3127 compaction_speed / kTargetCompactionTimeInMs);
3128 } else {
3129 tasks = evacuation_candidates_.length();
3130 }
3146 const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks); 3131 const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
3147 const int tasks_capped_cores = Min(cores, tasks_capped_pages); 3132 const int tasks_capped_cores = Min(cores, tasks_capped_pages);
3148 const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores); 3133 return tasks_capped_cores;
3149 return tasks_capped_hard;
3150 } 3134 }
3151 3135
3152 3136
3153 void MarkCompactCollector::EvacuatePagesInParallel() { 3137 void MarkCompactCollector::EvacuatePagesInParallel() {
3154 const int num_pages = evacuation_candidates_.length(); 3138 DCHECK_GE(evacuation_candidates_.length(), 1);
3155 if (num_pages == 0) return;
3156 3139
3157 // Used for trace summary. 3140 // Used for trace summary.
3158 intptr_t live_bytes = 0; 3141 intptr_t live_bytes = 0;
3159 intptr_t compaction_speed = 0; 3142 intptr_t compaction_speed = 0;
3160 if (FLAG_trace_fragmentation) { 3143 if (FLAG_trace_fragmentation) {
3161 for (Page* page : evacuation_candidates_) { 3144 for (MemoryChunk* page : evacuation_candidates_) {
3162 live_bytes += page->LiveBytes(); 3145 live_bytes += page->LiveBytes();
3163 } 3146 }
3164 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3147 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3165 } 3148 }
3166 const int num_tasks = NumberOfParallelCompactionTasks(); 3149 const int num_tasks = NumberOfParallelCompactionTasks();
3167 3150
3168 // Set up compaction spaces. 3151 // Set up compaction spaces.
3169 CompactionSpaceCollection** compaction_spaces_for_tasks = 3152 CompactionSpaceCollection** compaction_spaces_for_tasks =
3170 new CompactionSpaceCollection*[num_tasks]; 3153 new CompactionSpaceCollection*[num_tasks];
3171 for (int i = 0; i < num_tasks; i++) { 3154 for (int i = 0; i < num_tasks; i++) {
3172 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); 3155 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
3173 } 3156 }
3174 3157
3175 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, 3158 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3176 num_tasks); 3159 num_tasks);
3177 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, 3160 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3178 num_tasks); 3161 num_tasks);
3179 3162
3180 uint32_t* task_ids = new uint32_t[num_tasks - 1]; 3163 uint32_t* task_ids = new uint32_t[num_tasks - 1];
3181 // Kick off parallel tasks. 3164 // Kick off parallel tasks.
3182 StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks); 3165 StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
3183 // Wait for unfinished and not-yet-started tasks. 3166 // Wait for unfinished and not-yet-started tasks.
3184 WaitUntilCompactionCompleted(task_ids, num_tasks - 1); 3167 WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
3185 delete[] task_ids; 3168 delete[] task_ids;
3186 3169
3187 double compaction_duration = 0.0; 3170 double compaction_duration = 0.0;
3188 intptr_t compacted_memory = 0; 3171 intptr_t compacted_memory = 0;
3189 // Merge back memory (compacted and unused) from compaction spaces. 3172 // Merge back memory (compacted and unused) from compaction spaces and update
3173 // pretenuring feedback.
3190 for (int i = 0; i < num_tasks; i++) { 3174 for (int i = 0; i < num_tasks; i++) {
3191 heap()->old_space()->MergeCompactionSpace( 3175 heap()->old_space()->MergeCompactionSpace(
3192 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); 3176 compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
3193 heap()->code_space()->MergeCompactionSpace( 3177 heap()->code_space()->MergeCompactionSpace(
3194 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); 3178 compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
3195 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted(); 3179 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
3196 compaction_duration += compaction_spaces_for_tasks[i]->duration(); 3180 compaction_duration += compaction_spaces_for_tasks[i]->duration();
3181 heap()->MergeAllocationSitePretenuringFeedback(
3182 *compaction_spaces_for_tasks[i]->local_pretenuring_feedback());
3183 compaction_spaces_for_tasks[i]->local_store_buffer()->Process(
3184 heap()->store_buffer());
3197 delete compaction_spaces_for_tasks[i]; 3185 delete compaction_spaces_for_tasks[i];
3198 } 3186 }
3199 delete[] compaction_spaces_for_tasks; 3187 delete[] compaction_spaces_for_tasks;
3200 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory); 3188 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
3201 3189
3202 // Finalize sequentially. 3190 // Finalize sequentially.
3203 int abandoned_pages = 0; 3191 int abandoned_pages = 0;
3204 for (int i = 0; i < num_pages; i++) { 3192 for (MemoryChunk* p : evacuation_candidates_) {
3205 Page* p = evacuation_candidates_[i];
3206 switch (p->parallel_compaction_state().Value()) { 3193 switch (p->parallel_compaction_state().Value()) {
3207 case MemoryChunk::ParallelCompactingState::kCompactingAborted: 3194 case MemoryChunk::ParallelCompactingState::kCompactingAborted:
3208 // We have partially compacted the page, i.e., some objects may have 3195 // We have partially compacted the page, i.e., some objects may have
3209 // moved, others are still in place. 3196 // moved, others are still in place.
3210 // We need to: 3197 // We need to:
3211 // - Leave the evacuation candidate flag for later processing of 3198 // - Leave the evacuation candidate flag for later processing of
3212 // slots buffer entries. 3199 // slots buffer entries.
3213 // - Leave the slots buffer there for processing of entries added by 3200 // - Leave the slots buffer there for processing of entries added by
3214 // the write barrier. 3201 // the write barrier.
3215 // - Rescan the page as slot recording in the migration buffer only 3202 // - Rescan the page as slot recording in the migration buffer only
3216 // happens upon moving (which we potentially didn't do). 3203 // happens upon moving (which we potentially didn't do).
3217 // - Leave the page in the list of pages of a space since we could not 3204 // - Leave the page in the list of pages of a space since we could not
3218 // fully evacuate it. 3205 // fully evacuate it.
3219 // - Mark them for rescanning for store buffer entries as we otherwise 3206 // - Mark them for rescanning for store buffer entries as we otherwise
3220 // might have stale store buffer entries that become "valid" again 3207 // might have stale store buffer entries that become "valid" again
3221 // after reusing the memory. Note that all existing store buffer 3208 // after reusing the memory. Note that all existing store buffer
3222 // entries of such pages are filtered before rescanning. 3209 // entries of such pages are filtered before rescanning.
3223 DCHECK(p->IsEvacuationCandidate()); 3210 DCHECK(p->IsEvacuationCandidate());
3211 DCHECK(!p->InNewSpace());
3224 p->SetFlag(Page::COMPACTION_WAS_ABORTED); 3212 p->SetFlag(Page::COMPACTION_WAS_ABORTED);
3225 p->set_scan_on_scavenge(true); 3213 p->set_scan_on_scavenge(true);
3226 abandoned_pages++; 3214 abandoned_pages++;
3227 break; 3215 break;
3228 case MemoryChunk::kCompactingFinalize: 3216 case MemoryChunk::kCompactingFinalize:
3229 DCHECK(p->IsEvacuationCandidate()); 3217 DCHECK(p->IsEvacuationCandidate() || p->InNewSpace());
3230 p->SetWasSwept(); 3218 if (!p->InNewSpace()) {
3231 p->Unlink(); 3219 reinterpret_cast<Page*>(p)->SetWasSwept();
3220 p->Unlink();
3221 }
3232 break; 3222 break;
3233 case MemoryChunk::kCompactingDone: 3223 case MemoryChunk::kCompactingDone:
3234 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); 3224 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
3235 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3225 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3236 break; 3226 break;
3237 default: 3227 default:
3238 // We should not observe kCompactingInProgress, or kCompactingDone. 3228 // MemoryChunk::kCompactingInProgress.
3239 UNREACHABLE(); 3229 UNREACHABLE();
3240 } 3230 }
3241 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); 3231 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3242 } 3232 }
3243 if (FLAG_trace_fragmentation) { 3233 if (FLAG_trace_fragmentation) {
3244 PrintIsolate(isolate(), 3234 PrintIsolate(isolate(),
3245 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " 3235 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
3246 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX 3236 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
3247 "d compaction_speed=%" V8_PTR_PREFIX "d\n", 3237 "d compaction_speed=%" V8_PTR_PREFIX "d\n",
3248 isolate()->time_millis_since_init(), FLAG_parallel_compaction, 3238 isolate()->time_millis_since_init(), FLAG_parallel_compaction,
3249 num_pages, abandoned_pages, num_tasks, 3239 evacuation_candidates_.length(), abandoned_pages, num_tasks,
3250 base::SysInfo::NumberOfProcessors(), live_bytes, 3240 base::SysInfo::NumberOfProcessors(), live_bytes,
3251 compaction_speed); 3241 compaction_speed);
3252 } 3242 }
3253 } 3243 }
3254 3244
3255 3245
3256 void MarkCompactCollector::StartParallelCompaction( 3246 void MarkCompactCollector::StartParallelCompaction(
3257 CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids, 3247 CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
3258 int len) { 3248 int len) {
3259 compaction_in_progress_ = true; 3249 compaction_in_progress_ = true;
(...skipping 20 matching lines...) Expand all
3280 pending_compaction_tasks_semaphore_.Wait(); 3270 pending_compaction_tasks_semaphore_.Wait();
3281 } 3271 }
3282 } 3272 }
3283 compaction_in_progress_ = false; 3273 compaction_in_progress_ = false;
3284 } 3274 }
3285 3275
3286 3276
3287 void MarkCompactCollector::EvacuatePages( 3277 void MarkCompactCollector::EvacuatePages(
3288 CompactionSpaceCollection* compaction_spaces, 3278 CompactionSpaceCollection* compaction_spaces,
3289 SlotsBuffer** evacuation_slots_buffer) { 3279 SlotsBuffer** evacuation_slots_buffer) {
3290 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, 3280 EvacuateOldSpaceVisitor old_space_visitor(heap(), compaction_spaces,
3291 evacuation_slots_buffer); 3281 evacuation_slots_buffer);
3292 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3282 EvacuateNewSpaceVisitor new_space_visitor(heap(), evacuation_slots_buffer,
3293 Page* p = evacuation_candidates_[i]; 3283 compaction_spaces);
3284 // We run through the list in reverse order to process newspace pages first,
3285 // effectively reducing the number of old-to-new references and thus the
3286 // load on the store buffer. Note that processing is still interleaved.
3287 MemoryChunk* p = nullptr;
3288 for (int i = evacuation_candidates_.length() - 1; i >= 0; --i) {
3289 p = evacuation_candidates_[i];
3294 DCHECK(p->IsEvacuationCandidate() || 3290 DCHECK(p->IsEvacuationCandidate() ||
3295 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3291 p->IsFlagSet(Page::RESCAN_ON_EVACUATION) || p->InNewSpace());
3296 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == 3292 DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()),
3297 MemoryChunk::kSweepingDone); 3293 MemoryChunk::kSweepingDone);
3298 if (p->parallel_compaction_state().TrySetValue( 3294 if (p->parallel_compaction_state().TrySetValue(
3299 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { 3295 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3300 if (p->IsEvacuationCandidate()) { 3296 if (p->IsEvacuationCandidate() || p->InNewSpace()) {
3301 DCHECK_EQ(p->parallel_compaction_state().Value(), 3297 DCHECK_EQ(p->parallel_compaction_state().Value(),
3302 MemoryChunk::kCompactingInProgress); 3298 MemoryChunk::kCompactingInProgress);
3303 double start = heap()->MonotonicallyIncreasingTimeInMs(); 3299 int saved_live_bytes = p->LiveBytes();
3304 intptr_t live_bytes = p->LiveBytes(); 3300 double evacuation_time;
3305 AlwaysAllocateScope always_allocate(isolate()); 3301 bool success;
3306 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) { 3302 {
3303 AlwaysAllocateScope always_allocate(isolate());
3304 TimedScope timed_scope(heap(), &evacuation_time);
3305 success =
3306 p->InNewSpace()
3307 ? VisitLiveObjects(p, &new_space_visitor, kClearMarkbits)
3308 : VisitLiveObjects(p, &old_space_visitor, kClearMarkbits);
3309 }
3310 // New space evacuation bails out to a regular semispace copy in OOM
3311 // cases. A failing semispace copy fails hard, before reaching this
3312 // point.
3313 DCHECK(!p->InNewSpace() || success);
3314 if (success) {
3315 compaction_spaces->ReportCompactionProgress(evacuation_time,
3316 saved_live_bytes);
3307 p->ResetLiveBytes(); 3317 p->ResetLiveBytes();
3308 p->parallel_compaction_state().SetValue( 3318 p->parallel_compaction_state().SetValue(
3309 MemoryChunk::kCompactingFinalize); 3319 MemoryChunk::kCompactingFinalize);
3310 compaction_spaces->ReportCompactionProgress(
3311 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
3312 } else { 3320 } else {
3313 p->parallel_compaction_state().SetValue( 3321 p->parallel_compaction_state().SetValue(
3314 MemoryChunk::kCompactingAborted); 3322 MemoryChunk::kCompactingAborted);
3315 } 3323 }
3316 } else { 3324 } else {
3317 // There could be popular pages in the list of evacuation candidates 3325 // There could be popular pages in the list of evacuation candidates
3318 // which we do compact. 3326 // which we do compact.
3319 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); 3327 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3320 } 3328 }
3321 } 3329 }
3322 } 3330 }
3331
3332 heap()->IncrementPromotedObjectsSize(new_space_visitor.promoted_size());
3333 heap()->IncrementSemiSpaceCopiedObjectSize(
3334 new_space_visitor.semispace_copied_size());
3335 heap()->IncrementYoungSurvivorsCounter(
3336 new_space_visitor.promoted_size() +
3337 new_space_visitor.semispace_copied_size());
3323 } 3338 }
3324 3339
3325 3340
3326 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { 3341 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3327 public: 3342 public:
3328 virtual Object* RetainAs(Object* object) { 3343 virtual Object* RetainAs(Object* object) {
3329 if (object->IsHeapObject()) { 3344 if (object->IsHeapObject()) {
3330 HeapObject* heap_object = HeapObject::cast(object); 3345 HeapObject* heap_object = HeapObject::cast(object);
3331 MapWord map_word = heap_object->map_word(); 3346 MapWord map_word = heap_object->map_word();
3332 if (map_word.IsForwardingAddress()) { 3347 if (map_word.IsForwardingAddress()) {
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
3467 return code->is_optimized_code() && code->marked_for_deoptimization(); 3482 return code->is_optimized_code() && code->marked_for_deoptimization();
3468 } 3483 }
3469 3484
3470 3485
3471 void MarkCompactCollector::RemoveObjectSlots(Address start_slot, 3486 void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
3472 Address end_slot) { 3487 Address end_slot) {
3473 // Remove entries by replacing them with an old-space slot containing a smi 3488 // Remove entries by replacing them with an old-space slot containing a smi
3474 // that is located in an unmovable page. 3489 // that is located in an unmovable page.
3475 int npages = evacuation_candidates_.length(); 3490 int npages = evacuation_candidates_.length();
3476 for (int i = 0; i < npages; i++) { 3491 for (int i = 0; i < npages; i++) {
3477 Page* p = evacuation_candidates_[i]; 3492 MemoryChunk* p = evacuation_candidates_[i];
3478 DCHECK(p->IsEvacuationCandidate() || 3493 DCHECK(p->IsEvacuationCandidate() ||
3479 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3494 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3480 if (p->IsEvacuationCandidate()) { 3495 if (p->IsEvacuationCandidate()) {
3481 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, 3496 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
3482 end_slot); 3497 end_slot);
3483 } 3498 }
3484 } 3499 }
3485 } 3500 }
3486 3501
3487 3502
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
3548 Map* map = object->synchronized_map(); 3563 Map* map = object->synchronized_map();
3549 int size = object->SizeFromMap(map); 3564 int size = object->SizeFromMap(map);
3550 object->IterateBody(map->instance_type(), size, visitor); 3565 object->IterateBody(map->instance_type(), size, visitor);
3551 } 3566 }
3552 } 3567 }
3553 3568
3554 3569
3555 void MarkCompactCollector::SweepAbortedPages() { 3570 void MarkCompactCollector::SweepAbortedPages() {
3556 // Second pass on aborted pages. 3571 // Second pass on aborted pages.
3557 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3572 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3558 Page* p = evacuation_candidates_[i]; 3573 Page* p = reinterpret_cast<Page*>(evacuation_candidates_[i]);
3559 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { 3574 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3560 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); 3575 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
3561 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3576 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3562 switch (space->identity()) { 3577 switch (space->identity()) {
3563 case OLD_SPACE: 3578 case OLD_SPACE:
3564 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, 3579 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3565 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); 3580 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3566 break; 3581 break;
3567 case CODE_SPACE: 3582 case CODE_SPACE:
3568 if (FLAG_zap_code_space) { 3583 if (FLAG_zap_code_space) {
(...skipping 10 matching lines...) Expand all
3579 } 3594 }
3580 } 3595 }
3581 } 3596 }
3582 } 3597 }
3583 3598
3584 3599
3585 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 3600 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3586 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); 3601 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3587 Heap::RelocationLock relocation_lock(heap()); 3602 Heap::RelocationLock relocation_lock(heap());
3588 3603
3589 HashMap* local_pretenuring_feedback = nullptr;
3590 { 3604 {
3591 GCTracer::Scope gc_scope(heap()->tracer(), 3605 GCTracer::Scope gc_scope(heap()->tracer(),
3592 GCTracer::Scope::MC_EVACUATE_NEW_SPACE); 3606 GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
3593 EvacuationScope evacuation_scope(this); 3607 EvacuationScope evacuation_scope(this);
3608
3594 EvacuateNewSpacePrologue(); 3609 EvacuateNewSpacePrologue();
3595 local_pretenuring_feedback = EvacuateNewSpaceInParallel();
3596 heap_->new_space()->set_age_mark(heap_->new_space()->top());
3597 }
3598
3599 {
3600 GCTracer::Scope gc_scope(heap()->tracer(),
3601 GCTracer::Scope::MC_EVACUATE_CANDIDATES);
3602 EvacuationScope evacuation_scope(this);
3603 EvacuatePagesInParallel(); 3610 EvacuatePagesInParallel();
3604 } 3611 EvacuateNewSpaceEpilogue();
3605 3612 heap()->new_space()->set_age_mark(heap()->new_space()->top());
3606 {
3607 heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
3608 delete local_pretenuring_feedback;
3609 } 3613 }
3610 3614
3611 UpdatePointersAfterEvacuation(); 3615 UpdatePointersAfterEvacuation();
3612 3616
3613 { 3617 {
3614 GCTracer::Scope gc_scope(heap()->tracer(), 3618 GCTracer::Scope gc_scope(heap()->tracer(),
3615 GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 3619 GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3616 // After updating all pointers, we can finally sweep the aborted pages, 3620 // After updating all pointers, we can finally sweep the aborted pages,
3617 // effectively overriding any forward pointers. 3621 // effectively overriding any forward pointers.
3618 SweepAbortedPages(); 3622 SweepAbortedPages();
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
3681 &Heap::ScavengeStoreBufferCallback); 3685 &Heap::ScavengeStoreBufferCallback);
3682 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 3686 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3683 } 3687 }
3684 3688
3685 int npages = evacuation_candidates_.length(); 3689 int npages = evacuation_candidates_.length();
3686 { 3690 {
3687 GCTracer::Scope gc_scope( 3691 GCTracer::Scope gc_scope(
3688 heap()->tracer(), 3692 heap()->tracer(),
3689 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); 3693 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
3690 for (int i = 0; i < npages; i++) { 3694 for (int i = 0; i < npages; i++) {
3691 Page* p = evacuation_candidates_[i]; 3695 Page* p = reinterpret_cast<Page*>(evacuation_candidates_[i]);
3692 DCHECK(p->IsEvacuationCandidate() || 3696 DCHECK(p->IsEvacuationCandidate() ||
3693 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3697 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3694 3698
3695 if (p->IsEvacuationCandidate()) { 3699 if (p->IsEvacuationCandidate()) {
3696 UpdateSlotsRecordedIn(p->slots_buffer()); 3700 UpdateSlotsRecordedIn(p->slots_buffer());
3697 if (FLAG_trace_fragmentation_verbose) { 3701 if (FLAG_trace_fragmentation_verbose) {
3698 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), 3702 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3699 SlotsBuffer::SizeOfChain(p->slots_buffer())); 3703 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3700 } 3704 }
3701 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); 3705 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
3757 3761
3758 EvacuationWeakObjectRetainer evacuation_object_retainer; 3762 EvacuationWeakObjectRetainer evacuation_object_retainer;
3759 heap()->ProcessAllWeakReferences(&evacuation_object_retainer); 3763 heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
3760 } 3764 }
3761 } 3765 }
3762 3766
3763 3767
3764 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { 3768 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3765 int npages = evacuation_candidates_.length(); 3769 int npages = evacuation_candidates_.length();
3766 for (int i = 0; i < npages; i++) { 3770 for (int i = 0; i < npages; i++) {
3767 Page* p = evacuation_candidates_[i]; 3771 Page* p = reinterpret_cast<Page*>(evacuation_candidates_[i]);
3768 if (!p->IsEvacuationCandidate()) continue; 3772 if (!p->IsEvacuationCandidate()) continue;
3769 p->Unlink(); 3773 p->Unlink();
3770 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3774 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3771 p->InsertAfter(space->LastPage()); 3775 p->InsertAfter(space->LastPage());
3772 } 3776 }
3773 } 3777 }
3774 3778
3775 3779
3776 void MarkCompactCollector::ReleaseEvacuationCandidates() { 3780 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3777 int npages = evacuation_candidates_.length(); 3781 int npages = evacuation_candidates_.length();
3778 for (int i = 0; i < npages; i++) { 3782 for (int i = 0; i < npages; i++) {
3779 Page* p = evacuation_candidates_[i]; 3783 Page* p = reinterpret_cast<Page*>(evacuation_candidates_[i]);
3780 if (!p->IsEvacuationCandidate()) continue; 3784 if (!p->IsEvacuationCandidate()) continue;
3781 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3785 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3782 space->Free(p->area_start(), p->area_size()); 3786 space->Free(p->area_start(), p->area_size());
3783 p->set_scan_on_scavenge(false); 3787 p->set_scan_on_scavenge(false);
3784 p->ResetLiveBytes(); 3788 p->ResetLiveBytes();
3785 CHECK(p->WasSwept()); 3789 CHECK(p->WasSwept());
3786 space->ReleasePage(p); 3790 space->ReleasePage(p);
3787 } 3791 }
3788 evacuation_candidates_.Rewind(0); 3792 evacuation_candidates_.Rewind(0);
3789 compacting_ = false; 3793 compacting_ = false;
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after
4068 MarkBit mark_bit = Marking::MarkBitFrom(host); 4072 MarkBit mark_bit = Marking::MarkBitFrom(host);
4069 if (Marking::IsBlack(mark_bit)) { 4073 if (Marking::IsBlack(mark_bit)) {
4070 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 4074 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4071 RecordRelocSlot(&rinfo, target); 4075 RecordRelocSlot(&rinfo, target);
4072 } 4076 }
4073 } 4077 }
4074 } 4078 }
4075 4079
4076 } // namespace internal 4080 } // namespace internal
4077 } // namespace v8 4081 } // namespace v8
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698