Chromium Code Reviews| OLD | NEW | 
|---|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" | 
| 6 | 6 | 
| 7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" | 
| 8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" | 
| 9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" | 
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" | 
| 11 #include "src/compilation-cache.h" | 11 #include "src/compilation-cache.h" | 
| 12 #include "src/deoptimizer.h" | 12 #include "src/deoptimizer.h" | 
| 13 #include "src/execution.h" | 13 #include "src/execution.h" | 
| 14 #include "src/frames-inl.h" | 14 #include "src/frames-inl.h" | 
| 15 #include "src/gdb-jit.h" | 15 #include "src/gdb-jit.h" | 
| 16 #include "src/global-handles.h" | 16 #include "src/global-handles.h" | 
| 17 #include "src/heap/array-buffer-tracker.h" | 17 #include "src/heap/array-buffer-tracker.h" | 
| 18 #include "src/heap/gc-tracer.h" | 18 #include "src/heap/gc-tracer.h" | 
| 19 #include "src/heap/incremental-marking.h" | 19 #include "src/heap/incremental-marking.h" | 
| 20 #include "src/heap/mark-compact-inl.h" | 20 #include "src/heap/mark-compact-inl.h" | 
| 21 #include "src/heap/object-stats.h" | 21 #include "src/heap/object-stats.h" | 
| 22 #include "src/heap/objects-visiting.h" | 22 #include "src/heap/objects-visiting.h" | 
| 23 #include "src/heap/objects-visiting-inl.h" | 23 #include "src/heap/objects-visiting-inl.h" | 
| 24 #include "src/heap/slots-buffer.h" | 24 #include "src/heap/slots-buffer.h" | 
| 25 #include "src/heap/spaces-inl.h" | 25 #include "src/heap/spaces-inl.h" | 
| 26 #include "src/ic/ic.h" | 26 #include "src/ic/ic.h" | 
| 27 #include "src/ic/stub-cache.h" | 27 #include "src/ic/stub-cache.h" | 
| 28 #include "src/profiler/cpu-profiler.h" | 28 #include "src/profiler/cpu-profiler.h" | 
| 29 #include "src/utils-inl.h" | |
| 29 #include "src/v8.h" | 30 #include "src/v8.h" | 
| 30 | 31 | 
| 31 namespace v8 { | 32 namespace v8 { | 
| 32 namespace internal { | 33 namespace internal { | 
| 33 | 34 | 
| 34 | 35 | 
| 35 const char* Marking::kWhiteBitPattern = "00"; | 36 const char* Marking::kWhiteBitPattern = "00"; | 
| 36 const char* Marking::kBlackBitPattern = "11"; | 37 const char* Marking::kBlackBitPattern = "11"; | 
| 37 const char* Marking::kGreyBitPattern = "10"; | 38 const char* Marking::kGreyBitPattern = "10"; | 
| 38 const char* Marking::kImpossibleBitPattern = "01"; | 39 const char* Marking::kImpossibleBitPattern = "01"; | 
| (...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 313 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { | 314 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { | 
| 314 { | 315 { | 
| 315 GCTracer::Scope gc_scope(heap()->tracer(), | 316 GCTracer::Scope gc_scope(heap()->tracer(), | 
| 316 GCTracer::Scope::MC_CLEAR_STORE_BUFFER); | 317 GCTracer::Scope::MC_CLEAR_STORE_BUFFER); | 
| 317 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); | 318 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); | 
| 318 } | 319 } | 
| 319 | 320 | 
| 320 { | 321 { | 
| 321 GCTracer::Scope gc_scope(heap()->tracer(), | 322 GCTracer::Scope gc_scope(heap()->tracer(), | 
| 322 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); | 323 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); | 
| 323 int number_of_pages = evacuation_candidates_.length(); | 324 for (Page* p : evacuation_candidates_) { | 
| 324 for (int i = 0; i < number_of_pages; i++) { | |
| 325 Page* p = evacuation_candidates_[i]; | |
| 326 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); | 325 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); | 
| 327 } | 326 } | 
| 328 } | 327 } | 
| 329 #ifdef VERIFY_HEAP | 328 #ifdef VERIFY_HEAP | 
| 330 if (FLAG_verify_heap) { | 329 if (FLAG_verify_heap) { | 
| 331 VerifyValidStoreAndSlotsBufferEntries(); | 330 VerifyValidStoreAndSlotsBufferEntries(); | 
| 332 } | 331 } | 
| 333 #endif | 332 #endif | 
| 334 } | 333 } | 
| 335 | 334 | 
| (...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 471 | 470 | 
| 472 LargeObjectIterator it(heap_->lo_space()); | 471 LargeObjectIterator it(heap_->lo_space()); | 
| 473 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 472 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { | 
| 474 Marking::MarkWhite(Marking::MarkBitFrom(obj)); | 473 Marking::MarkWhite(Marking::MarkBitFrom(obj)); | 
| 475 Page::FromAddress(obj->address())->ResetProgressBar(); | 474 Page::FromAddress(obj->address())->ResetProgressBar(); | 
| 476 Page::FromAddress(obj->address())->ResetLiveBytes(); | 475 Page::FromAddress(obj->address())->ResetLiveBytes(); | 
| 477 } | 476 } | 
| 478 } | 477 } | 
| 479 | 478 | 
| 480 | 479 | 
| 481 class MarkCompactCollector::CompactionTask : public CancelableTask { | |
| 482 public: | |
| 483 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces) | |
| 484 : CancelableTask(heap->isolate()), spaces_(spaces) {} | |
| 485 | |
| 486 virtual ~CompactionTask() {} | |
| 487 | |
| 488 private: | |
| 489 // v8::internal::CancelableTask overrides. | |
| 490 void RunInternal() override { | |
| 491 MarkCompactCollector* mark_compact = | |
| 492 isolate()->heap()->mark_compact_collector(); | |
| 493 SlotsBuffer* evacuation_slots_buffer = nullptr; | |
| 494 mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer); | |
| 495 mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer); | |
| 496 mark_compact->pending_compaction_tasks_semaphore_.Signal(); | |
| 497 } | |
| 498 | |
| 499 CompactionSpaceCollection* spaces_; | |
| 500 | |
| 501 DISALLOW_COPY_AND_ASSIGN(CompactionTask); | |
| 502 }; | |
| 503 | |
| 504 | |
| 505 class MarkCompactCollector::SweeperTask : public v8::Task { | 480 class MarkCompactCollector::SweeperTask : public v8::Task { | 
| 506 public: | 481 public: | 
| 507 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} | 482 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} | 
| 508 | 483 | 
| 509 virtual ~SweeperTask() {} | 484 virtual ~SweeperTask() {} | 
| 510 | 485 | 
| 511 private: | 486 private: | 
| 512 // v8::Task overrides. | 487 // v8::Task overrides. | 
| 513 void Run() override { | 488 void Run() override { | 
| 514 heap_->mark_compact_collector()->SweepInParallel(space_, 0); | 489 heap_->mark_compact_collector()->SweepInParallel(space_, 0); | 
| (...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 824 "compaction-selection: space=%s reduce_memory=%d pages=%d " | 799 "compaction-selection: space=%s reduce_memory=%d pages=%d " | 
| 825 "total_live_bytes=%d\n", | 800 "total_live_bytes=%d\n", | 
| 826 AllocationSpaceName(space->identity()), reduce_memory, | 801 AllocationSpaceName(space->identity()), reduce_memory, | 
| 827 candidate_count, total_live_bytes / KB); | 802 candidate_count, total_live_bytes / KB); | 
| 828 } | 803 } | 
| 829 } | 804 } | 
| 830 | 805 | 
| 831 | 806 | 
| 832 void MarkCompactCollector::AbortCompaction() { | 807 void MarkCompactCollector::AbortCompaction() { | 
| 833 if (compacting_) { | 808 if (compacting_) { | 
| 834 int npages = evacuation_candidates_.length(); | 809 for (Page* p : evacuation_candidates_) { | 
| 835 for (int i = 0; i < npages; i++) { | |
| 836 Page* p = evacuation_candidates_[i]; | |
| 837 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | 810 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | 
| 838 p->ClearEvacuationCandidate(); | 811 p->ClearEvacuationCandidate(); | 
| 839 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 812 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 
| 840 } | 813 } | 
| 841 compacting_ = false; | 814 compacting_ = false; | 
| 842 evacuation_candidates_.Rewind(0); | 815 evacuation_candidates_.Rewind(0); | 
| 843 } | 816 } | 
| 844 DCHECK_EQ(0, evacuation_candidates_.length()); | 817 DCHECK_EQ(0, evacuation_candidates_.length()); | 
| 845 } | 818 } | 
| 846 | 819 | 
| (...skipping 694 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1541 class MarkCompactCollector::HeapObjectVisitor { | 1514 class MarkCompactCollector::HeapObjectVisitor { | 
| 1542 public: | 1515 public: | 
| 1543 virtual ~HeapObjectVisitor() {} | 1516 virtual ~HeapObjectVisitor() {} | 
| 1544 virtual bool Visit(HeapObject* object) = 0; | 1517 virtual bool Visit(HeapObject* object) = 0; | 
| 1545 }; | 1518 }; | 
| 1546 | 1519 | 
| 1547 | 1520 | 
| 1548 class MarkCompactCollector::EvacuateVisitorBase | 1521 class MarkCompactCollector::EvacuateVisitorBase | 
| 1549 : public MarkCompactCollector::HeapObjectVisitor { | 1522 : public MarkCompactCollector::HeapObjectVisitor { | 
| 1550 public: | 1523 public: | 
| 1551 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer) | 1524 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, | 
| 1552 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {} | 1525 SlotsBuffer** evacuation_slots_buffer, | 
| 1526 LocalStoreBuffer* local_store_buffer) | |
| 1527 : heap_(heap), | |
| 1528 evacuation_slots_buffer_(evacuation_slots_buffer), | |
| 1529 compaction_spaces_(compaction_spaces), | |
| 1530 local_store_buffer_(local_store_buffer) {} | |
| 1553 | 1531 | 
| 1554 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, | 1532 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, | 
| 1555 HeapObject** target_object) { | 1533 HeapObject** target_object) { | 
| 1556 int size = object->Size(); | 1534 int size = object->Size(); | 
| 1557 AllocationAlignment alignment = object->RequiredAlignment(); | 1535 AllocationAlignment alignment = object->RequiredAlignment(); | 
| 1558 AllocationResult allocation = target_space->AllocateRaw(size, alignment); | 1536 AllocationResult allocation = target_space->AllocateRaw(size, alignment); | 
| 1559 if (allocation.To(target_object)) { | 1537 if (allocation.To(target_object)) { | 
| 1560 heap_->mark_compact_collector()->MigrateObject( | 1538 heap_->mark_compact_collector()->MigrateObject( | 
| 1561 *target_object, object, size, target_space->identity(), | 1539 *target_object, object, size, target_space->identity(), | 
| 1562 evacuation_slots_buffer_); | 1540 evacuation_slots_buffer_, local_store_buffer_); | 
| 1563 return true; | 1541 return true; | 
| 1564 } | 1542 } | 
| 1565 return false; | 1543 return false; | 
| 1566 } | 1544 } | 
| 1567 | 1545 | 
| 1568 protected: | 1546 protected: | 
| 1569 Heap* heap_; | 1547 Heap* heap_; | 
| 1570 SlotsBuffer** evacuation_slots_buffer_; | 1548 SlotsBuffer** evacuation_slots_buffer_; | 
| 1549 CompactionSpaceCollection* compaction_spaces_; | |
| 1550 LocalStoreBuffer* local_store_buffer_; | |
| 1571 }; | 1551 }; | 
| 1572 | 1552 | 
| 1573 | 1553 | 
| 1574 class MarkCompactCollector::EvacuateNewSpaceVisitor final | 1554 class MarkCompactCollector::EvacuateNewSpaceVisitor final | 
| 1575 : public MarkCompactCollector::EvacuateVisitorBase { | 1555 : public MarkCompactCollector::EvacuateVisitorBase { | 
| 1576 public: | 1556 public: | 
| 1577 static const intptr_t kLabSize = 4 * KB; | 1557 static const intptr_t kLabSize = 4 * KB; | 
| 1578 static const intptr_t kMaxLabObjectSize = 256; | 1558 static const intptr_t kMaxLabObjectSize = 256; | 
| 1579 | 1559 | 
| 1580 explicit EvacuateNewSpaceVisitor(Heap* heap, | 1560 explicit EvacuateNewSpaceVisitor(Heap* heap, | 
| 1561 CompactionSpaceCollection* compaction_spaces, | |
| 1581 SlotsBuffer** evacuation_slots_buffer, | 1562 SlotsBuffer** evacuation_slots_buffer, | 
| 1563 LocalStoreBuffer* local_store_buffer, | |
| 1582 HashMap* local_pretenuring_feedback) | 1564 HashMap* local_pretenuring_feedback) | 
| 1583 : EvacuateVisitorBase(heap, evacuation_slots_buffer), | 1565 : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer, | 
| 1566 local_store_buffer), | |
| 1584 buffer_(LocalAllocationBuffer::InvalidBuffer()), | 1567 buffer_(LocalAllocationBuffer::InvalidBuffer()), | 
| 1585 space_to_allocate_(NEW_SPACE), | 1568 space_to_allocate_(NEW_SPACE), | 
| 1586 promoted_size_(0), | 1569 promoted_size_(0), | 
| 1587 semispace_copied_size_(0), | 1570 semispace_copied_size_(0), | 
| 1588 local_pretenuring_feedback_(local_pretenuring_feedback) {} | 1571 local_pretenuring_feedback_(local_pretenuring_feedback) {} | 
| 1589 | 1572 | 
| 1590 bool Visit(HeapObject* object) override { | 1573 bool Visit(HeapObject* object) override { | 
| 1591 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_); | 1574 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_); | 
| 1592 int size = object->Size(); | 1575 int size = object->Size(); | 
| 1593 HeapObject* target_object = nullptr; | 1576 HeapObject* target_object = nullptr; | 
| 1594 if (heap_->ShouldBePromoted(object->address(), size) && | 1577 if (heap_->ShouldBePromoted(object->address(), size) && | 
| 1595 TryEvacuateObject(heap_->old_space(), object, &target_object)) { | 1578 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, | 
| 1579 &target_object)) { | |
| 1596 // If we end up needing more special cases, we should factor this out. | 1580 // If we end up needing more special cases, we should factor this out. | 
| 1597 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { | 1581 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { | 
| 1598 heap_->array_buffer_tracker()->Promote( | 1582 heap_->array_buffer_tracker()->Promote( | 
| 1599 JSArrayBuffer::cast(target_object)); | 1583 JSArrayBuffer::cast(target_object)); | 
| 1600 } | 1584 } | 
| 1601 promoted_size_ += size; | 1585 promoted_size_ += size; | 
| 1602 return true; | 1586 return true; | 
| 1603 } | 1587 } | 
| 1604 HeapObject* target = nullptr; | 1588 HeapObject* target = nullptr; | 
| 1605 AllocationSpace space = AllocateTargetObject(object, &target); | 1589 AllocationSpace space = AllocateTargetObject(object, &target); | 
| 1606 heap_->mark_compact_collector()->MigrateObject( | 1590 heap_->mark_compact_collector()->MigrateObject( | 
| 1607 HeapObject::cast(target), object, size, space, | 1591 HeapObject::cast(target), object, size, space, | 
| 1608 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_); | 1592 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_, | 
| 1593 local_store_buffer_); | |
| 
 
Hannes Payer (out of office)
2016/01/25 16:04:13
Symmetrically to the slots buffer we should only p
 
Michael Lippautz
2016/01/25 16:26:14
Done.
 
 | |
| 1609 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | 1594 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { | 
| 1610 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | 1595 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); | 
| 1611 } | 1596 } | 
| 1612 semispace_copied_size_ += size; | 1597 semispace_copied_size_ += size; | 
| 1613 return true; | 1598 return true; | 
| 1614 } | 1599 } | 
| 1615 | 1600 | 
| 1616 intptr_t promoted_size() { return promoted_size_; } | 1601 intptr_t promoted_size() { return promoted_size_; } | 
| 1617 intptr_t semispace_copied_size() { return semispace_copied_size_; } | 1602 intptr_t semispace_copied_size() { return semispace_copied_size_; } | 
| 1618 | 1603 | 
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1670 if (allocation.IsRetry()) { | 1655 if (allocation.IsRetry()) { | 
| 1671 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; | 1656 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; | 
| 1672 } | 1657 } | 
| 1673 } | 1658 } | 
| 1674 } | 1659 } | 
| 1675 return allocation; | 1660 return allocation; | 
| 1676 } | 1661 } | 
| 1677 | 1662 | 
| 1678 inline AllocationResult AllocateInOldSpace(int size_in_bytes, | 1663 inline AllocationResult AllocateInOldSpace(int size_in_bytes, | 
| 1679 AllocationAlignment alignment) { | 1664 AllocationAlignment alignment) { | 
| 1680 AllocationResult allocation = | 1665 AllocationResult allocation = compaction_spaces_->Get(OLD_SPACE) | 
| 1681 heap_->old_space()->AllocateRaw(size_in_bytes, alignment); | 1666 ->AllocateRaw(size_in_bytes, alignment); | 
| 1682 if (allocation.IsRetry()) { | 1667 if (allocation.IsRetry()) { | 
| 1683 FatalProcessOutOfMemory( | 1668 FatalProcessOutOfMemory( | 
| 1684 "MarkCompactCollector: semi-space copy, fallback in old gen\n"); | 1669 "MarkCompactCollector: semi-space copy, fallback in old gen\n"); | 
| 1685 } | 1670 } | 
| 1686 return allocation; | 1671 return allocation; | 
| 1687 } | 1672 } | 
| 1688 | 1673 | 
| 1689 inline AllocationResult AllocateInLab(int size_in_bytes, | 1674 inline AllocationResult AllocateInLab(int size_in_bytes, | 
| 1690 AllocationAlignment alignment) { | 1675 AllocationAlignment alignment) { | 
| 1691 AllocationResult allocation; | 1676 AllocationResult allocation; | 
| (...skipping 25 matching lines...) Expand all Loading... | |
| 1717 intptr_t semispace_copied_size_; | 1702 intptr_t semispace_copied_size_; | 
| 1718 HashMap* local_pretenuring_feedback_; | 1703 HashMap* local_pretenuring_feedback_; | 
| 1719 }; | 1704 }; | 
| 1720 | 1705 | 
| 1721 | 1706 | 
| 1722 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 1707 class MarkCompactCollector::EvacuateOldSpaceVisitor final | 
| 1723 : public MarkCompactCollector::EvacuateVisitorBase { | 1708 : public MarkCompactCollector::EvacuateVisitorBase { | 
| 1724 public: | 1709 public: | 
| 1725 EvacuateOldSpaceVisitor(Heap* heap, | 1710 EvacuateOldSpaceVisitor(Heap* heap, | 
| 1726 CompactionSpaceCollection* compaction_spaces, | 1711 CompactionSpaceCollection* compaction_spaces, | 
| 1727 SlotsBuffer** evacuation_slots_buffer) | 1712 SlotsBuffer** evacuation_slots_buffer, | 
| 1728 : EvacuateVisitorBase(heap, evacuation_slots_buffer), | 1713 LocalStoreBuffer* local_store_buffer) | 
| 1729 compaction_spaces_(compaction_spaces) {} | 1714 : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer, | 
| 1715 local_store_buffer) {} | |
| 1730 | 1716 | 
| 1731 bool Visit(HeapObject* object) override { | 1717 bool Visit(HeapObject* object) override { | 
| 1732 CompactionSpace* target_space = compaction_spaces_->Get( | 1718 CompactionSpace* target_space = compaction_spaces_->Get( | 
| 1733 Page::FromAddress(object->address())->owner()->identity()); | 1719 Page::FromAddress(object->address())->owner()->identity()); | 
| 1734 HeapObject* target_object = nullptr; | 1720 HeapObject* target_object = nullptr; | 
| 1735 if (TryEvacuateObject(target_space, object, &target_object)) { | 1721 if (TryEvacuateObject(target_space, object, &target_object)) { | 
| 1736 DCHECK(object->map_word().IsForwardingAddress()); | 1722 DCHECK(object->map_word().IsForwardingAddress()); | 
| 1737 return true; | 1723 return true; | 
| 1738 } | 1724 } | 
| 1739 return false; | 1725 return false; | 
| 1740 } | 1726 } | 
| 1741 | |
| 1742 private: | |
| 1743 CompactionSpaceCollection* compaction_spaces_; | |
| 1744 }; | 1727 }; | 
| 1745 | 1728 | 
| 1746 | 1729 | 
| 1747 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { | 1730 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { | 
| 1748 PageIterator it(space); | 1731 PageIterator it(space); | 
| 1749 while (it.has_next()) { | 1732 while (it.has_next()) { | 
| 1750 Page* p = it.next(); | 1733 Page* p = it.next(); | 
| 1751 DiscoverGreyObjectsOnPage(p); | 1734 DiscoverGreyObjectsOnPage(p); | 
| 1752 if (marking_deque()->IsFull()) return; | 1735 if (marking_deque()->IsFull()) return; | 
| 1753 } | 1736 } | 
| (...skipping 789 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2543 while (obj != Smi::FromInt(0)) { | 2526 while (obj != Smi::FromInt(0)) { | 
| 2544 TransitionArray* array = TransitionArray::cast(obj); | 2527 TransitionArray* array = TransitionArray::cast(obj); | 
| 2545 obj = array->next_link(); | 2528 obj = array->next_link(); | 
| 2546 array->set_next_link(undefined, SKIP_WRITE_BARRIER); | 2529 array->set_next_link(undefined, SKIP_WRITE_BARRIER); | 
| 2547 } | 2530 } | 
| 2548 heap()->set_encountered_transition_arrays(Smi::FromInt(0)); | 2531 heap()->set_encountered_transition_arrays(Smi::FromInt(0)); | 
| 2549 } | 2532 } | 
| 2550 | 2533 | 
| 2551 | 2534 | 
| 2552 void MarkCompactCollector::RecordMigratedSlot( | 2535 void MarkCompactCollector::RecordMigratedSlot( | 
| 2553 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) { | 2536 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer, | 
| 2537 LocalStoreBuffer* local_store_buffer) { | |
| 2554 // When parallel compaction is in progress, store and slots buffer entries | 2538 // When parallel compaction is in progress, store and slots buffer entries | 
| 2555 // require synchronization. | 2539 // require synchronization. | 
| 2556 if (heap_->InNewSpace(value)) { | 2540 if (heap_->InNewSpace(value)) { | 
| 2557 if (compaction_in_progress_) { | 2541 if (compaction_in_progress_) { | 
| 2558 heap_->store_buffer()->MarkSynchronized(slot); | 2542 local_store_buffer->Record(slot); | 
| 2559 } else { | 2543 } else { | 
| 2560 heap_->store_buffer()->Mark(slot); | 2544 heap_->store_buffer()->Mark(slot); | 
| 2561 } | 2545 } | 
| 2562 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { | 2546 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { | 
| 2563 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, | 2547 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, | 
| 2564 reinterpret_cast<Object**>(slot), | 2548 reinterpret_cast<Object**>(slot), | 
| 2565 SlotsBuffer::IGNORE_OVERFLOW); | 2549 SlotsBuffer::IGNORE_OVERFLOW); | 
| 2566 } | 2550 } | 
| 2567 } | 2551 } | 
| 2568 | 2552 | 
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2630 if (!success) { | 2614 if (!success) { | 
| 2631 EvictPopularEvacuationCandidate(target_page); | 2615 EvictPopularEvacuationCandidate(target_page); | 
| 2632 } | 2616 } | 
| 2633 } | 2617 } | 
| 2634 } | 2618 } | 
| 2635 | 2619 | 
| 2636 | 2620 | 
| 2637 class RecordMigratedSlotVisitor final : public ObjectVisitor { | 2621 class RecordMigratedSlotVisitor final : public ObjectVisitor { | 
| 2638 public: | 2622 public: | 
| 2639 RecordMigratedSlotVisitor(MarkCompactCollector* collector, | 2623 RecordMigratedSlotVisitor(MarkCompactCollector* collector, | 
| 2640 SlotsBuffer** evacuation_slots_buffer) | 2624 SlotsBuffer** evacuation_slots_buffer, | 
| 2625 LocalStoreBuffer* local_store_buffer) | |
| 2641 : collector_(collector), | 2626 : collector_(collector), | 
| 2642 evacuation_slots_buffer_(evacuation_slots_buffer) {} | 2627 evacuation_slots_buffer_(evacuation_slots_buffer), | 
| 2628 local_store_buffer_(local_store_buffer) {} | |
| 2643 | 2629 | 
| 2644 V8_INLINE void VisitPointer(Object** p) override { | 2630 V8_INLINE void VisitPointer(Object** p) override { | 
| 2645 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), | 2631 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), | 
| 2646 evacuation_slots_buffer_); | 2632 evacuation_slots_buffer_, | 
| 2633 local_store_buffer_); | |
| 2647 } | 2634 } | 
| 2648 | 2635 | 
| 2649 V8_INLINE void VisitPointers(Object** start, Object** end) override { | 2636 V8_INLINE void VisitPointers(Object** start, Object** end) override { | 
| 2650 while (start < end) { | 2637 while (start < end) { | 
| 2651 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), | 2638 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), | 
| 2652 evacuation_slots_buffer_); | 2639 evacuation_slots_buffer_, | 
| 2640 local_store_buffer_); | |
| 2653 ++start; | 2641 ++start; | 
| 2654 } | 2642 } | 
| 2655 } | 2643 } | 
| 2656 | 2644 | 
| 2657 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { | 2645 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { | 
| 2658 if (collector_->compacting_) { | 2646 if (collector_->compacting_) { | 
| 2659 Address code_entry = Memory::Address_at(code_entry_slot); | 2647 Address code_entry = Memory::Address_at(code_entry_slot); | 
| 2660 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, | 2648 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, | 
| 2661 evacuation_slots_buffer_); | 2649 evacuation_slots_buffer_); | 
| 2662 } | 2650 } | 
| 2663 } | 2651 } | 
| 2664 | 2652 | 
| 2665 private: | 2653 private: | 
| 2666 MarkCompactCollector* collector_; | 2654 MarkCompactCollector* collector_; | 
| 2667 SlotsBuffer** evacuation_slots_buffer_; | 2655 SlotsBuffer** evacuation_slots_buffer_; | 
| 2656 LocalStoreBuffer* local_store_buffer_; | |
| 2668 }; | 2657 }; | 
| 2669 | 2658 | 
| 2670 | 2659 | 
| 2671 // We scavenge new space simultaneously with sweeping. This is done in two | 2660 // We scavenge new space simultaneously with sweeping. This is done in two | 
| 2672 // passes. | 2661 // passes. | 
| 2673 // | 2662 // | 
| 2674 // The first pass migrates all alive objects from one semispace to another or | 2663 // The first pass migrates all alive objects from one semispace to another or | 
| 2675 // promotes them to old space. Forwarding address is written directly into | 2664 // promotes them to old space. Forwarding address is written directly into | 
| 2676 // first word of object without any encoding. If object is dead we write | 2665 // first word of object without any encoding. If object is dead we write | 
| 2677 // NULL as a forwarding address. | 2666 // NULL as a forwarding address. | 
| 2678 // | 2667 // | 
| 2679 // The second pass updates pointers to new space in all spaces. It is possible | 2668 // The second pass updates pointers to new space in all spaces. It is possible | 
| 2680 // to encounter pointers to dead new space objects during traversal of pointers | 2669 // to encounter pointers to dead new space objects during traversal of pointers | 
| 2681 // to new space. We should clear them to avoid encountering them during next | 2670 // to new space. We should clear them to avoid encountering them during next | 
| 2682 // pointer iteration. This is an issue if the store buffer overflows and we | 2671 // pointer iteration. This is an issue if the store buffer overflows and we | 
| 2683 // have to scan the entire old space, including dead objects, looking for | 2672 // have to scan the entire old space, including dead objects, looking for | 
| 2684 // pointers to new space. | 2673 // pointers to new space. | 
| 2685 void MarkCompactCollector::MigrateObject( | 2674 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, | 
| 2686 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest, | 2675 int size, AllocationSpace dest, | 
| 2687 SlotsBuffer** evacuation_slots_buffer) { | 2676 SlotsBuffer** evacuation_slots_buffer, | 
| 2677 LocalStoreBuffer* local_store_buffer) { | |
| 2688 Address dst_addr = dst->address(); | 2678 Address dst_addr = dst->address(); | 
| 2689 Address src_addr = src->address(); | 2679 Address src_addr = src->address(); | 
| 2690 DCHECK(heap()->AllowedToBeMigrated(src, dest)); | 2680 DCHECK(heap()->AllowedToBeMigrated(src, dest)); | 
| 2691 DCHECK(dest != LO_SPACE); | 2681 DCHECK(dest != LO_SPACE); | 
| 2692 if (dest == OLD_SPACE) { | 2682 if (dest == OLD_SPACE) { | 
| 2693 DCHECK_OBJECT_SIZE(size); | 2683 DCHECK_OBJECT_SIZE(size); | 
| 2694 DCHECK(evacuation_slots_buffer != nullptr); | 2684 DCHECK(evacuation_slots_buffer != nullptr); | 
| 2695 DCHECK(IsAligned(size, kPointerSize)); | 2685 DCHECK(IsAligned(size, kPointerSize)); | 
| 2696 | 2686 | 
| 2697 heap()->MoveBlock(dst->address(), src->address(), size); | 2687 heap()->MoveBlock(dst->address(), src->address(), size); | 
| 2698 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer); | 2688 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer, | 
| 2689 local_store_buffer); | |
| 2699 dst->IterateBody(&visitor); | 2690 dst->IterateBody(&visitor); | 
| 2700 } else if (dest == CODE_SPACE) { | 2691 } else if (dest == CODE_SPACE) { | 
| 2701 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); | 2692 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); | 
| 2702 DCHECK(evacuation_slots_buffer != nullptr); | 2693 DCHECK(evacuation_slots_buffer != nullptr); | 
| 2703 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); | 2694 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); | 
| 2704 heap()->MoveBlock(dst_addr, src_addr, size); | 2695 heap()->MoveBlock(dst_addr, src_addr, size); | 
| 2705 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); | 2696 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); | 
| 2706 Code::cast(dst)->Relocate(dst_addr - src_addr); | 2697 Code::cast(dst)->Relocate(dst_addr - src_addr); | 
| 2707 } else { | 2698 } else { | 
| 2708 DCHECK_OBJECT_SIZE(size); | 2699 DCHECK_OBJECT_SIZE(size); | 
| (...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3050 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3041 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 
| 3051 | 3042 | 
| 3052 // The target object is black but we don't know if the source slot is black. | 3043 // The target object is black but we don't know if the source slot is black. | 
| 3053 // The source object could have died and the slot could be part of a free | 3044 // The source object could have died and the slot could be part of a free | 
| 3054 // space. Use the mark bit iterator to find out about liveness of the slot. | 3045 // space. Use the mark bit iterator to find out about liveness of the slot. | 
| 3055 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); | 3046 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); | 
| 3056 } | 3047 } | 
| 3057 | 3048 | 
| 3058 | 3049 | 
| 3059 void MarkCompactCollector::EvacuateNewSpacePrologue() { | 3050 void MarkCompactCollector::EvacuateNewSpacePrologue() { | 
| 3060 // There are soft limits in the allocation code, designed trigger a mark | |
| 3061 // sweep collection by failing allocations. But since we are already in | |
| 3062 // a mark-sweep allocation, there is no sense in trying to trigger one. | |
| 3063 AlwaysAllocateScope scope(isolate()); | |
| 3064 | |
| 3065 NewSpace* new_space = heap()->new_space(); | 3051 NewSpace* new_space = heap()->new_space(); | 
| 3066 | 3052 NewSpacePageIterator it(new_space->bottom(), new_space->top()); | 
| 3067 // Store allocation range before flipping semispaces. | 3053 // Append the list of new space pages to be processed. | 
| 3068 Address from_bottom = new_space->bottom(); | |
| 3069 Address from_top = new_space->top(); | |
| 3070 | |
| 3071 // Flip the semispaces. After flipping, to space is empty, from space has | |
| 3072 // live objects. | |
| 3073 new_space->Flip(); | |
| 3074 new_space->ResetAllocationInfo(); | |
| 3075 | |
| 3076 newspace_evacuation_candidates_.Clear(); | |
| 3077 NewSpacePageIterator it(from_bottom, from_top); | |
| 3078 while (it.has_next()) { | 3054 while (it.has_next()) { | 
| 3079 newspace_evacuation_candidates_.Add(it.next()); | 3055 newspace_evacuation_candidates_.Add(it.next()); | 
| 3080 } | 3056 } | 
| 3057 new_space->Flip(); | |
| 3058 new_space->ResetAllocationInfo(); | |
| 3081 } | 3059 } | 
| 3082 | 3060 | 
| 3083 | 3061 | 
| 3084 HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() { | 3062 void MarkCompactCollector::EvacuateNewSpaceEpilogue() { | 
| 3085 HashMap* local_pretenuring_feedback = new HashMap( | 3063 newspace_evacuation_candidates_.Rewind(0); | 
| 3086 HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity); | |
| 3087 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_, | |
| 3088 local_pretenuring_feedback); | |
| 3089 // First pass: traverse all objects in inactive semispace, remove marks, | |
| 3090 // migrate live objects and write forwarding addresses. This stage puts | |
| 3091 // new entries in the store buffer and may cause some pages to be marked | |
| 3092 // scan-on-scavenge. | |
| 3093 for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) { | |
| 3094 NewSpacePage* p = | |
| 3095 reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]); | |
| 3096 bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits); | |
| 3097 USE(ok); | |
| 3098 DCHECK(ok); | |
| 3099 } | |
| 3100 heap_->IncrementPromotedObjectsSize( | |
| 3101 static_cast<int>(new_space_visitor.promoted_size())); | |
| 3102 heap_->IncrementSemiSpaceCopiedObjectSize( | |
| 3103 static_cast<int>(new_space_visitor.semispace_copied_size())); | |
| 3104 heap_->IncrementYoungSurvivorsCounter( | |
| 3105 static_cast<int>(new_space_visitor.promoted_size()) + | |
| 3106 static_cast<int>(new_space_visitor.semispace_copied_size())); | |
| 3107 return local_pretenuring_feedback; | |
| 3108 } | 3064 } | 
| 3109 | 3065 | 
| 3110 | 3066 | 
| 3111 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( | 3067 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( | 
| 3112 SlotsBuffer* evacuation_slots_buffer) { | 3068 SlotsBuffer* evacuation_slots_buffer) { | 
| 3113 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); | 3069 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); | 
| 3114 evacuation_slots_buffers_.Add(evacuation_slots_buffer); | 3070 evacuation_slots_buffers_.Add(evacuation_slots_buffer); | 
| 3115 } | 3071 } | 
| 3116 | 3072 | 
| 3117 | 3073 | 
| 3118 int MarkCompactCollector::NumberOfParallelCompactionTasks() { | 3074 class MarkCompactCollector::Evacuator : public Malloced { | 
| 3075 public: | |
| 3076 Evacuator(MarkCompactCollector* collector, | |
| 3077 const List<Page*>& evacuation_candidates, | |
| 3078 const List<NewSpacePage*>& newspace_evacuation_candidates) | |
| 3079 : collector_(collector), | |
| 3080 evacuation_candidates_(evacuation_candidates), | |
| 3081 newspace_evacuation_candidates_(newspace_evacuation_candidates), | |
| 3082 compaction_spaces_(collector->heap()), | |
| 3083 local_slots_buffer_(nullptr), | |
| 3084 local_store_buffer_(), | |
| 3085 local_pretenuring_feedback_(HashMap::PointersMatch, | |
| 3086 kInitialLocalPretenuringFeedbackCapacity), | |
| 3087 new_space_visitor_(collector->heap(), &compaction_spaces_, | |
| 3088 &local_slots_buffer_, &local_store_buffer_, | |
| 3089 &local_pretenuring_feedback_), | |
| 3090 old_space_visitor_(collector->heap(), &compaction_spaces_, | |
| 3091 &local_slots_buffer_, &local_store_buffer_), | |
| 3092 duration_(0.0), | |
| 3093 bytes_compacted_(0), | |
| 3094 task_id_(0) {} | |
| 3095 | |
| 3096 // Evacuate the configured set of pages in parallel. | |
| 3097 inline void EvacuatePages(); | |
| 3098 | |
| 3099 // Merge back locally cached info sequentially. Note that this method needs | |
| 3100 // to be called from the main thread. | |
| 3101 inline void Finalize(); | |
| 3102 | |
| 3103 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; } | |
| 3104 | |
| 3105 uint32_t task_id() { return task_id_; } | |
| 3106 void set_task_id(uint32_t id) { task_id_ = id; } | |
| 3107 | |
| 3108 private: | |
| 3109 static const int kInitialLocalPretenuringFeedbackCapacity = 256; | |
| 3110 | |
| 3111 Heap* heap() { return collector_->heap(); } | |
| 3112 | |
| 3113 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) { | |
| 3114 duration_ += duration; | |
| 3115 bytes_compacted_ += bytes_compacted; | |
| 3116 } | |
| 3117 | |
| 3118 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor); | |
| 3119 | |
| 3120 MarkCompactCollector* collector_; | |
| 3121 | |
| 3122 // Pages to process. | |
| 3123 const List<Page*>& evacuation_candidates_; | |
| 3124 const List<NewSpacePage*>& newspace_evacuation_candidates_; | |
| 3125 | |
| 3126 // Locally cached collector data. | |
| 3127 CompactionSpaceCollection compaction_spaces_; | |
| 3128 SlotsBuffer* local_slots_buffer_; | |
| 3129 LocalStoreBuffer local_store_buffer_; | |
| 3130 HashMap local_pretenuring_feedback_; | |
| 3131 | |
| 3132 // Vistors for the corresponding spaces. | |
| 3133 EvacuateNewSpaceVisitor new_space_visitor_; | |
| 3134 EvacuateOldSpaceVisitor old_space_visitor_; | |
| 3135 | |
| 3136 // Book keeping info. | |
| 3137 double duration_; | |
| 3138 intptr_t bytes_compacted_; | |
| 3139 | |
| 3140 // Task id, if this evacuator is executed on a background task instead of | |
| 3141 // the main thread. Can be used to try to abort the task currently scheduled | |
| 3142 // to executed to evacuate pages. | |
| 3143 uint32_t task_id_; | |
| 3144 }; | |
| 3145 | |
| 3146 | |
| 3147 bool MarkCompactCollector::Evacuator::EvacuateSinglePage( | |
| 3148 MemoryChunk* p, HeapObjectVisitor* visitor) { | |
| 3149 bool aborted = false; | |
| 3150 if (p->parallel_compaction_state().TrySetValue( | |
| 3151 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | |
| 3152 if (p->IsEvacuationCandidate() || p->InNewSpace()) { | |
| 3153 DCHECK_EQ(p->parallel_compaction_state().Value(), | |
| 3154 MemoryChunk::kCompactingInProgress); | |
| 3155 int saved_live_bytes = p->LiveBytes(); | |
| 3156 double evacuation_time; | |
| 3157 bool success; | |
| 3158 { | |
| 3159 AlwaysAllocateScope always_allocate(heap()->isolate()); | |
| 3160 TimedScope timed_scope(&evacuation_time); | |
| 3161 success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits); | |
| 
 
Hannes Payer (out of office)
2016/01/25 16:04:13
DCHECK that new space abortion cannot be aborted.
 
Michael Lippautz
2016/01/25 16:26:14
The caller DCHECKs appropriately that we never abo
 
Hannes Payer (out of office)
2016/01/25 17:07:23
Acknowledged.
 
 | |
| 3162 } | |
| 3163 if (success) { | |
| 3164 ReportCompactionProgress(evacuation_time, saved_live_bytes); | |
| 3165 p->parallel_compaction_state().SetValue( | |
| 3166 MemoryChunk::kCompactingFinalize); | |
| 3167 } else { | |
| 3168 p->parallel_compaction_state().SetValue( | |
| 3169 MemoryChunk::kCompactingAborted); | |
| 3170 aborted = true; | |
| 3171 } | |
| 3172 } else { | |
| 3173 // There could be popular pages in the list of evacuation candidates | |
| 
 
Hannes Payer (out of office)
2016/01/25 16:04:13
not?
 
Michael Lippautz
2016/01/25 16:26:15
Done.
 
 | |
| 3174 // which we do compact. | |
| 3175 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
| 3176 } | |
| 3177 } | |
| 3178 return !aborted; | |
| 
 
Hannes Payer (out of office)
2016/01/25 16:04:13
Just use success.
 
Michael Lippautz
2016/01/25 16:26:15
Can't do that because in theory we could abort an
 
Hannes Payer (out of office)
2016/01/25 17:07:23
As discussed offline: it should be possible.
 
Michael Lippautz
2016/01/25 17:10:55
Done.
 
 | |
| 3179 } | |
| 3180 | |
| 3181 | |
| 3182 void MarkCompactCollector::Evacuator::EvacuatePages() { | |
| 3183 for (NewSpacePage* p : newspace_evacuation_candidates_) { | |
| 3184 DCHECK(p->InNewSpace()); | |
| 3185 DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()), | |
| 3186 MemoryChunk::kSweepingDone); | |
| 3187 bool success = EvacuateSinglePage(p, &new_space_visitor_); | |
| 3188 DCHECK(success); | |
| 3189 USE(success); | |
| 3190 } | |
| 3191 for (Page* p : evacuation_candidates_) { | |
| 3192 DCHECK(p->IsEvacuationCandidate() || | |
| 3193 p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION)); | |
| 3194 DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()), | |
| 3195 MemoryChunk::kSweepingDone); | |
| 3196 EvacuateSinglePage(p, &old_space_visitor_); | |
| 3197 } | |
| 3198 } | |
| 3199 | |
| 3200 | |
| 3201 void MarkCompactCollector::Evacuator::Finalize() { | |
| 3202 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE)); | |
| 3203 heap()->code_space()->MergeCompactionSpace( | |
| 3204 compaction_spaces_.Get(CODE_SPACE)); | |
| 3205 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_); | |
| 3206 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size()); | |
| 3207 heap()->IncrementSemiSpaceCopiedObjectSize( | |
| 3208 new_space_visitor_.semispace_copied_size()); | |
| 3209 heap()->IncrementYoungSurvivorsCounter( | |
| 3210 new_space_visitor_.promoted_size() + | |
| 3211 new_space_visitor_.semispace_copied_size()); | |
| 3212 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_); | |
| 3213 local_store_buffer_.Process(heap()->store_buffer()); | |
| 3214 collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_); | |
| 3215 } | |
| 3216 | |
| 3217 | |
| 3218 class MarkCompactCollector::CompactionTask : public CancelableTask { | |
| 3219 public: | |
| 3220 explicit CompactionTask(Heap* heap, Evacuator* evacuator) | |
| 3221 : CancelableTask(heap->isolate()), evacuator_(evacuator) { | |
| 3222 evacuator->set_task_id(id()); | |
| 3223 } | |
| 3224 | |
| 3225 virtual ~CompactionTask() {} | |
| 3226 | |
| 3227 private: | |
| 3228 // v8::internal::CancelableTask overrides. | |
| 3229 void RunInternal() override { | |
| 3230 evacuator_->EvacuatePages(); | |
| 3231 isolate() | |
| 3232 ->heap() | |
| 
 
Hannes Payer (out of office)
2016/01/25 16:04:13
Let's keep a reference to the heap since we are al
 
Michael Lippautz
2016/01/25 16:26:15
Done.
 
 | |
| 3233 ->mark_compact_collector() | |
| 3234 ->pending_compaction_tasks_semaphore_.Signal(); | |
| 3235 } | |
| 3236 | |
| 3237 Evacuator* evacuator_; | |
| 3238 | |
| 3239 DISALLOW_COPY_AND_ASSIGN(CompactionTask); | |
| 3240 }; | |
| 3241 | |
| 3242 | |
| 3243 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages, | |
| 3244 intptr_t live_bytes) { | |
| 3119 if (!FLAG_parallel_compaction) return 1; | 3245 if (!FLAG_parallel_compaction) return 1; | 
| 3120 // Compute the number of needed tasks based on a target compaction time, the | 3246 // Compute the number of needed tasks based on a target compaction time, the | 
| 3121 // profiled compaction speed and marked live memory. | 3247 // profiled compaction speed and marked live memory. | 
| 3122 // | 3248 // | 
| 3123 // The number of parallel compaction tasks is limited by: | 3249 // The number of parallel compaction tasks is limited by: | 
| 3124 // - #evacuation pages | 3250 // - #evacuation pages | 
| 3125 // - (#cores - 1) | 3251 // - (#cores - 1) | 
| 3126 // - a hard limit | |
| 3127 const double kTargetCompactionTimeInMs = 1; | 3252 const double kTargetCompactionTimeInMs = 1; | 
| 3128 const int kMaxCompactionTasks = 8; | 3253 const int kNumSweepingTasks = 3; | 
| 3129 | 3254 | 
| 3130 intptr_t compaction_speed = | 3255 intptr_t compaction_speed = | 
| 3131 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3256 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 
| 3132 if (compaction_speed == 0) return 1; | |
| 3133 | 3257 | 
| 3134 intptr_t live_bytes = 0; | 3258 const int cores = | 
| 
 
Hannes Payer (out of office)
2016/01/25 16:04:13
call it available_cores
 
Michael Lippautz
2016/01/25 16:26:15
Done.
 
 | |
| 3135 for (Page* page : evacuation_candidates_) { | 3259 Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1); | 
| 3136 live_bytes += page->LiveBytes(); | 3260 int tasks; | 
| 3261 if (compaction_speed > 0) { | |
| 3262 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) / | |
| 3263 compaction_speed / kTargetCompactionTimeInMs); | |
| 3264 } else { | |
| 3265 tasks = pages; | |
| 3137 } | 3266 } | 
| 3138 | 3267 const int tasks_capped_pages = Min(pages, tasks); | 
| 3139 const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1); | |
| 3140 const int tasks = | |
| 3141 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed / | |
| 3142 kTargetCompactionTimeInMs); | |
| 3143 const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks); | |
| 3144 const int tasks_capped_cores = Min(cores, tasks_capped_pages); | 3268 const int tasks_capped_cores = Min(cores, tasks_capped_pages); | 
| 
 
Hannes Payer (out of office)
2016/01/25 16:04:13
just return Min(cores, tasks_capped_pages);
 
Michael Lippautz
2016/01/25 16:26:14
Done.
 
 | |
| 3145 const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores); | 3269 return tasks_capped_cores; | 
| 3146 return tasks_capped_hard; | |
| 3147 } | 3270 } | 
| 3148 | 3271 | 
| 3149 | 3272 | 
| 3150 void MarkCompactCollector::EvacuatePagesInParallel() { | 3273 void MarkCompactCollector::EvacuatePagesInParallel() { | 
| 3151 const int num_pages = evacuation_candidates_.length(); | 3274 int num_pages = 0; | 
| 3152 if (num_pages == 0) return; | 3275 intptr_t live_bytes = 0; | 
| 3276 for (Page* page : evacuation_candidates_) { | |
| 3277 num_pages++; | |
| 3278 live_bytes += page->LiveBytes(); | |
| 3279 } | |
| 3280 for (NewSpacePage* page : newspace_evacuation_candidates_) { | |
| 3281 num_pages++; | |
| 3282 live_bytes += page->LiveBytes(); | |
| 3283 } | |
| 3284 DCHECK_GE(num_pages, 1); | |
| 3285 | |
| 3153 | 3286 | 
| 3154 // Used for trace summary. | 3287 // Used for trace summary. | 
| 3155 intptr_t live_bytes = 0; | |
| 3156 intptr_t compaction_speed = 0; | 3288 intptr_t compaction_speed = 0; | 
| 3157 if (FLAG_trace_fragmentation) { | 3289 if (FLAG_trace_fragmentation) { | 
| 3158 for (Page* page : evacuation_candidates_) { | |
| 3159 live_bytes += page->LiveBytes(); | |
| 3160 } | |
| 3161 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 3290 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); | 
| 3162 } | 3291 } | 
| 3163 const int num_tasks = NumberOfParallelCompactionTasks(); | 3292 | 
| 3293 const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes); | |
| 3164 | 3294 | 
| 3165 // Set up compaction spaces. | 3295 // Set up compaction spaces. | 
| 3296 Evacuator** evacuators = new Evacuator*[num_tasks]; | |
| 3166 CompactionSpaceCollection** compaction_spaces_for_tasks = | 3297 CompactionSpaceCollection** compaction_spaces_for_tasks = | 
| 3167 new CompactionSpaceCollection*[num_tasks]; | 3298 new CompactionSpaceCollection*[num_tasks]; | 
| 3168 for (int i = 0; i < num_tasks; i++) { | 3299 for (int i = 0; i < num_tasks; i++) { | 
| 3169 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); | 3300 evacuators[i] = new Evacuator(this, evacuation_candidates_, | 
| 3301 newspace_evacuation_candidates_); | |
| 3302 compaction_spaces_for_tasks[i] = evacuators[i]->compaction_spaces(); | |
| 3170 } | 3303 } | 
| 3171 | |
| 3172 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, | 3304 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, | 
| 3173 num_tasks); | 3305 num_tasks); | 
| 3174 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, | 3306 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, | 
| 3175 num_tasks); | 3307 num_tasks); | 
| 3308 delete[] compaction_spaces_for_tasks; | |
| 3176 | 3309 | 
| 3177 uint32_t* task_ids = new uint32_t[num_tasks - 1]; | |
| 3178 // Kick off parallel tasks. | 3310 // Kick off parallel tasks. | 
| 3179 StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks); | 3311 StartParallelCompaction(evacuators, num_tasks); | 
| 3180 // Wait for unfinished and not-yet-started tasks. | 3312 // Wait for unfinished and not-yet-started tasks. | 
| 3181 WaitUntilCompactionCompleted(task_ids, num_tasks - 1); | 3313 WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1); | 
| 3182 delete[] task_ids; | |
| 3183 | 3314 | 
| 3184 double compaction_duration = 0.0; | 3315 // Finalize local evacuators by merging back all locally cached data. | 
| 3185 intptr_t compacted_memory = 0; | |
| 3186 // Merge back memory (compacted and unused) from compaction spaces. | |
| 3187 for (int i = 0; i < num_tasks; i++) { | 3316 for (int i = 0; i < num_tasks; i++) { | 
| 3188 heap()->old_space()->MergeCompactionSpace( | 3317 evacuators[i]->Finalize(); | 
| 3189 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); | 3318 delete evacuators[i]; | 
| 3190 heap()->code_space()->MergeCompactionSpace( | |
| 3191 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); | |
| 3192 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted(); | |
| 3193 compaction_duration += compaction_spaces_for_tasks[i]->duration(); | |
| 3194 delete compaction_spaces_for_tasks[i]; | |
| 3195 } | 3319 } | 
| 3196 delete[] compaction_spaces_for_tasks; | 3320 delete[] evacuators; | 
| 3197 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory); | |
| 3198 | 3321 | 
| 3199 // Finalize sequentially. | 3322 // Finalize pages sequentially. | 
| 3323 for (NewSpacePage* p : newspace_evacuation_candidates_) { | |
| 3324 DCHECK_EQ(p->parallel_compaction_state().Value(), | |
| 3325 MemoryChunk::kCompactingFinalize); | |
| 3326 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
| 3327 } | |
| 3328 | |
| 3200 int abandoned_pages = 0; | 3329 int abandoned_pages = 0; | 
| 3201 for (int i = 0; i < num_pages; i++) { | 3330 for (Page* p : evacuation_candidates_) { | 
| 3202 Page* p = evacuation_candidates_[i]; | |
| 3203 switch (p->parallel_compaction_state().Value()) { | 3331 switch (p->parallel_compaction_state().Value()) { | 
| 3204 case MemoryChunk::ParallelCompactingState::kCompactingAborted: | 3332 case MemoryChunk::ParallelCompactingState::kCompactingAborted: | 
| 3205 // We have partially compacted the page, i.e., some objects may have | 3333 // We have partially compacted the page, i.e., some objects may have | 
| 3206 // moved, others are still in place. | 3334 // moved, others are still in place. | 
| 3207 // We need to: | 3335 // We need to: | 
| 3208 // - Leave the evacuation candidate flag for later processing of | 3336 // - Leave the evacuation candidate flag for later processing of | 
| 3209 // slots buffer entries. | 3337 // slots buffer entries. | 
| 3210 // - Leave the slots buffer there for processing of entries added by | 3338 // - Leave the slots buffer there for processing of entries added by | 
| 3211 // the write barrier. | 3339 // the write barrier. | 
| 3212 // - Rescan the page as slot recording in the migration buffer only | 3340 // - Rescan the page as slot recording in the migration buffer only | 
| (...skipping 12 matching lines...) Expand all Loading... | |
| 3225 case MemoryChunk::kCompactingFinalize: | 3353 case MemoryChunk::kCompactingFinalize: | 
| 3226 DCHECK(p->IsEvacuationCandidate()); | 3354 DCHECK(p->IsEvacuationCandidate()); | 
| 3227 p->SetWasSwept(); | 3355 p->SetWasSwept(); | 
| 3228 p->Unlink(); | 3356 p->Unlink(); | 
| 3229 break; | 3357 break; | 
| 3230 case MemoryChunk::kCompactingDone: | 3358 case MemoryChunk::kCompactingDone: | 
| 3231 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); | 3359 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); | 
| 3232 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3360 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 
| 3233 break; | 3361 break; | 
| 3234 default: | 3362 default: | 
| 3235 // We should not observe kCompactingInProgress, or kCompactingDone. | 3363 // MemoryChunk::kCompactingInProgress. | 
| 3236 UNREACHABLE(); | 3364 UNREACHABLE(); | 
| 3237 } | 3365 } | 
| 3238 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | 3366 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | 
| 3239 } | 3367 } | 
| 3240 if (FLAG_trace_fragmentation) { | 3368 if (FLAG_trace_fragmentation) { | 
| 3241 PrintIsolate(isolate(), | 3369 PrintIsolate(isolate(), | 
| 3242 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " | 3370 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " | 
| 3243 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX | 3371 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX | 
| 3244 "d compaction_speed=%" V8_PTR_PREFIX "d\n", | 3372 "d compaction_speed=%" V8_PTR_PREFIX "d\n", | 
| 3245 isolate()->time_millis_since_init(), FLAG_parallel_compaction, | 3373 isolate()->time_millis_since_init(), FLAG_parallel_compaction, | 
| 3246 num_pages, abandoned_pages, num_tasks, | 3374 num_pages, abandoned_pages, num_tasks, | 
| 3247 base::SysInfo::NumberOfProcessors(), live_bytes, | 3375 base::SysInfo::NumberOfProcessors(), live_bytes, | 
| 3248 compaction_speed); | 3376 compaction_speed); | 
| 3249 } | 3377 } | 
| 3250 } | 3378 } | 
| 3251 | 3379 | 
| 3252 | 3380 | 
| 3253 void MarkCompactCollector::StartParallelCompaction( | 3381 void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators, | 
| 3254 CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids, | 3382 int len) { | 
| 3255 int len) { | |
| 3256 compaction_in_progress_ = true; | 3383 compaction_in_progress_ = true; | 
| 3257 for (int i = 1; i < len; i++) { | 3384 for (int i = 1; i < len; i++) { | 
| 3258 CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]); | 3385 CompactionTask* task = new CompactionTask(heap(), evacuators[i]); | 
| 3259 task_ids[i - 1] = task->id(); | |
| 3260 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 3386 V8::GetCurrentPlatform()->CallOnBackgroundThread( | 
| 3261 task, v8::Platform::kShortRunningTask); | 3387 task, v8::Platform::kShortRunningTask); | 
| 3262 } | 3388 } | 
| 3263 | 3389 | 
| 3264 // Contribute in main thread. | 3390 // Contribute on main thread. | 
| 3265 EvacuatePages(compaction_spaces[0], &migration_slots_buffer_); | 3391 evacuators[0]->EvacuatePages(); | 
| 3266 } | 3392 } | 
| 3267 | 3393 | 
| 3268 | 3394 | 
| 3269 void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids, | 3395 void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators, | 
| 3270 int len) { | 3396 int len) { | 
| 3271 // Try to cancel compaction tasks that have not been run (as they might be | 3397 // Try to cancel compaction tasks that have not been run (as they might be | 
| 3272 // stuck in a worker queue). Tasks that cannot be canceled, have either | 3398 // stuck in a worker queue). Tasks that cannot be canceled, have either | 
| 3273 // already completed or are still running, hence we need to wait for their | 3399 // already completed or are still running, hence we need to wait for their | 
| 3274 // semaphore signal. | 3400 // semaphore signal. | 
| 3275 for (int i = 0; i < len; i++) { | 3401 for (int i = 0; i < len; i++) { | 
| 3276 if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) { | 3402 if (!heap()->isolate()->cancelable_task_manager()->TryAbort( | 
| 3403 evacuators[i]->task_id())) { | |
| 3277 pending_compaction_tasks_semaphore_.Wait(); | 3404 pending_compaction_tasks_semaphore_.Wait(); | 
| 3278 } | 3405 } | 
| 3279 } | 3406 } | 
| 3280 compaction_in_progress_ = false; | 3407 compaction_in_progress_ = false; | 
| 3281 } | 3408 } | 
| 3282 | 3409 | 
| 3283 | 3410 | 
| 3284 void MarkCompactCollector::EvacuatePages( | |
| 3285 CompactionSpaceCollection* compaction_spaces, | |
| 3286 SlotsBuffer** evacuation_slots_buffer) { | |
| 3287 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces, | |
| 3288 evacuation_slots_buffer); | |
| 3289 for (int i = 0; i < evacuation_candidates_.length(); i++) { | |
| 3290 Page* p = evacuation_candidates_[i]; | |
| 3291 DCHECK(p->IsEvacuationCandidate() || | |
| 3292 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | |
| 3293 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) == | |
| 3294 MemoryChunk::kSweepingDone); | |
| 3295 if (p->parallel_compaction_state().TrySetValue( | |
| 3296 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { | |
| 3297 if (p->IsEvacuationCandidate()) { | |
| 3298 DCHECK_EQ(p->parallel_compaction_state().Value(), | |
| 3299 MemoryChunk::kCompactingInProgress); | |
| 3300 double start = heap()->MonotonicallyIncreasingTimeInMs(); | |
| 3301 intptr_t live_bytes = p->LiveBytes(); | |
| 3302 AlwaysAllocateScope always_allocate(isolate()); | |
| 3303 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) { | |
| 3304 p->parallel_compaction_state().SetValue( | |
| 3305 MemoryChunk::kCompactingFinalize); | |
| 3306 compaction_spaces->ReportCompactionProgress( | |
| 3307 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes); | |
| 3308 } else { | |
| 3309 p->parallel_compaction_state().SetValue( | |
| 3310 MemoryChunk::kCompactingAborted); | |
| 3311 } | |
| 3312 } else { | |
| 3313 // There could be popular pages in the list of evacuation candidates | |
| 3314 // which we do compact. | |
| 3315 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); | |
| 3316 } | |
| 3317 } | |
| 3318 } | |
| 3319 } | |
| 3320 | |
| 3321 | |
| 3322 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 3411 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { | 
| 3323 public: | 3412 public: | 
| 3324 virtual Object* RetainAs(Object* object) { | 3413 virtual Object* RetainAs(Object* object) { | 
| 3325 if (object->IsHeapObject()) { | 3414 if (object->IsHeapObject()) { | 
| 3326 HeapObject* heap_object = HeapObject::cast(object); | 3415 HeapObject* heap_object = HeapObject::cast(object); | 
| 3327 MapWord map_word = heap_object->map_word(); | 3416 MapWord map_word = heap_object->map_word(); | 
| 3328 if (map_word.IsForwardingAddress()) { | 3417 if (map_word.IsForwardingAddress()) { | 
| 3329 return map_word.ToForwardingAddress(); | 3418 return map_word.ToForwardingAddress(); | 
| 3330 } | 3419 } | 
| 3331 } | 3420 } | 
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3461 // Return true if the given code is deoptimized or will be deoptimized. | 3550 // Return true if the given code is deoptimized or will be deoptimized. | 
| 3462 bool MarkCompactCollector::WillBeDeoptimized(Code* code) { | 3551 bool MarkCompactCollector::WillBeDeoptimized(Code* code) { | 
| 3463 return code->is_optimized_code() && code->marked_for_deoptimization(); | 3552 return code->is_optimized_code() && code->marked_for_deoptimization(); | 
| 3464 } | 3553 } | 
| 3465 | 3554 | 
| 3466 | 3555 | 
| 3467 void MarkCompactCollector::RemoveObjectSlots(Address start_slot, | 3556 void MarkCompactCollector::RemoveObjectSlots(Address start_slot, | 
| 3468 Address end_slot) { | 3557 Address end_slot) { | 
| 3469 // Remove entries by replacing them with an old-space slot containing a smi | 3558 // Remove entries by replacing them with an old-space slot containing a smi | 
| 3470 // that is located in an unmovable page. | 3559 // that is located in an unmovable page. | 
| 3471 int npages = evacuation_candidates_.length(); | 3560 for (Page* p : evacuation_candidates_) { | 
| 3472 for (int i = 0; i < npages; i++) { | |
| 3473 Page* p = evacuation_candidates_[i]; | |
| 3474 DCHECK(p->IsEvacuationCandidate() || | 3561 DCHECK(p->IsEvacuationCandidate() || | 
| 3475 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3562 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 
| 3476 if (p->IsEvacuationCandidate()) { | 3563 if (p->IsEvacuationCandidate()) { | 
| 3477 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, | 3564 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, | 
| 3478 end_slot); | 3565 end_slot); | 
| 3479 } | 3566 } | 
| 3480 } | 3567 } | 
| 3481 } | 3568 } | 
| 3482 | 3569 | 
| 3483 | 3570 | 
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3543 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 3630 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); | 
| 3544 Map* map = object->synchronized_map(); | 3631 Map* map = object->synchronized_map(); | 
| 3545 int size = object->SizeFromMap(map); | 3632 int size = object->SizeFromMap(map); | 
| 3546 object->IterateBody(map->instance_type(), size, visitor); | 3633 object->IterateBody(map->instance_type(), size, visitor); | 
| 3547 } | 3634 } | 
| 3548 } | 3635 } | 
| 3549 | 3636 | 
| 3550 | 3637 | 
| 3551 void MarkCompactCollector::SweepAbortedPages() { | 3638 void MarkCompactCollector::SweepAbortedPages() { | 
| 3552 // Second pass on aborted pages. | 3639 // Second pass on aborted pages. | 
| 3553 for (int i = 0; i < evacuation_candidates_.length(); i++) { | 3640 for (Page* p : evacuation_candidates_) { | 
| 3554 Page* p = evacuation_candidates_[i]; | |
| 3555 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 3641 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { | 
| 3556 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); | 3642 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); | 
| 3557 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3643 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 
| 3558 switch (space->identity()) { | 3644 switch (space->identity()) { | 
| 3559 case OLD_SPACE: | 3645 case OLD_SPACE: | 
| 3560 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 3646 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, | 
| 3561 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); | 3647 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); | 
| 3562 break; | 3648 break; | 
| 3563 case CODE_SPACE: | 3649 case CODE_SPACE: | 
| 3564 if (FLAG_zap_code_space) { | 3650 if (FLAG_zap_code_space) { | 
| (...skipping 10 matching lines...) Expand all Loading... | |
| 3575 } | 3661 } | 
| 3576 } | 3662 } | 
| 3577 } | 3663 } | 
| 3578 } | 3664 } | 
| 3579 | 3665 | 
| 3580 | 3666 | 
| 3581 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 3667 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { | 
| 3582 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 3668 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); | 
| 3583 Heap::RelocationLock relocation_lock(heap()); | 3669 Heap::RelocationLock relocation_lock(heap()); | 
| 3584 | 3670 | 
| 3585 HashMap* local_pretenuring_feedback = nullptr; | |
| 3586 { | 3671 { | 
| 3587 GCTracer::Scope gc_scope(heap()->tracer(), | 3672 GCTracer::Scope gc_scope(heap()->tracer(), | 
| 3588 GCTracer::Scope::MC_EVACUATE_NEW_SPACE); | 3673 GCTracer::Scope::MC_EVACUATE_NEW_SPACE); | 
| 3589 EvacuationScope evacuation_scope(this); | 3674 EvacuationScope evacuation_scope(this); | 
| 3675 | |
| 3590 EvacuateNewSpacePrologue(); | 3676 EvacuateNewSpacePrologue(); | 
| 3591 local_pretenuring_feedback = EvacuateNewSpaceInParallel(); | |
| 3592 heap_->new_space()->set_age_mark(heap_->new_space()->top()); | |
| 3593 } | |
| 3594 | |
| 3595 { | |
| 3596 GCTracer::Scope gc_scope(heap()->tracer(), | |
| 3597 GCTracer::Scope::MC_EVACUATE_CANDIDATES); | |
| 3598 EvacuationScope evacuation_scope(this); | |
| 3599 EvacuatePagesInParallel(); | 3677 EvacuatePagesInParallel(); | 
| 3600 } | 3678 EvacuateNewSpaceEpilogue(); | 
| 3601 | 3679 heap()->new_space()->set_age_mark(heap()->new_space()->top()); | 
| 3602 { | |
| 3603 heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback); | |
| 3604 delete local_pretenuring_feedback; | |
| 3605 } | 3680 } | 
| 3606 | 3681 | 
| 3607 UpdatePointersAfterEvacuation(); | 3682 UpdatePointersAfterEvacuation(); | 
| 3608 | 3683 | 
| 3609 { | 3684 { | 
| 3610 GCTracer::Scope gc_scope(heap()->tracer(), | 3685 GCTracer::Scope gc_scope(heap()->tracer(), | 
| 3611 GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 3686 GCTracer::Scope::MC_EVACUATE_CLEAN_UP); | 
| 3612 // After updating all pointers, we can finally sweep the aborted pages, | 3687 // After updating all pointers, we can finally sweep the aborted pages, | 
| 3613 // effectively overriding any forward pointers. | 3688 // effectively overriding any forward pointers. | 
| 3614 SweepAbortedPages(); | 3689 SweepAbortedPages(); | 
| (...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3671 &updating_visitor); | 3746 &updating_visitor); | 
| 3672 } | 3747 } | 
| 3673 // Update roots. | 3748 // Update roots. | 
| 3674 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 3749 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); | 
| 3675 | 3750 | 
| 3676 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), | 3751 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), | 
| 3677 &Heap::ScavengeStoreBufferCallback); | 3752 &Heap::ScavengeStoreBufferCallback); | 
| 3678 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); | 3753 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); | 
| 3679 } | 3754 } | 
| 3680 | 3755 | 
| 3681 int npages = evacuation_candidates_.length(); | |
| 3682 { | 3756 { | 
| 3683 GCTracer::Scope gc_scope( | 3757 GCTracer::Scope gc_scope( | 
| 3684 heap()->tracer(), | 3758 heap()->tracer(), | 
| 3685 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); | 3759 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); | 
| 3686 for (int i = 0; i < npages; i++) { | 3760 for (Page* p : evacuation_candidates_) { | 
| 3687 Page* p = evacuation_candidates_[i]; | |
| 3688 DCHECK(p->IsEvacuationCandidate() || | 3761 DCHECK(p->IsEvacuationCandidate() || | 
| 3689 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 3762 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); | 
| 3690 | 3763 | 
| 3691 if (p->IsEvacuationCandidate()) { | 3764 if (p->IsEvacuationCandidate()) { | 
| 3692 UpdateSlotsRecordedIn(p->slots_buffer()); | 3765 UpdateSlotsRecordedIn(p->slots_buffer()); | 
| 3693 if (FLAG_trace_fragmentation_verbose) { | 3766 if (FLAG_trace_fragmentation_verbose) { | 
| 3694 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), | 3767 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), | 
| 3695 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 3768 SlotsBuffer::SizeOfChain(p->slots_buffer())); | 
| 3696 } | 3769 } | 
| 3697 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | 3770 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); | 
| (...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3751 heap_->UpdateReferencesInExternalStringTable( | 3824 heap_->UpdateReferencesInExternalStringTable( | 
| 3752 &UpdateReferenceInExternalStringTableEntry); | 3825 &UpdateReferenceInExternalStringTableEntry); | 
| 3753 | 3826 | 
| 3754 EvacuationWeakObjectRetainer evacuation_object_retainer; | 3827 EvacuationWeakObjectRetainer evacuation_object_retainer; | 
| 3755 heap()->ProcessAllWeakReferences(&evacuation_object_retainer); | 3828 heap()->ProcessAllWeakReferences(&evacuation_object_retainer); | 
| 3756 } | 3829 } | 
| 3757 } | 3830 } | 
| 3758 | 3831 | 
| 3759 | 3832 | 
| 3760 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 3833 void MarkCompactCollector::ReleaseEvacuationCandidates() { | 
| 3761 int npages = evacuation_candidates_.length(); | 3834 for (Page* p : evacuation_candidates_) { | 
| 3762 for (int i = 0; i < npages; i++) { | |
| 3763 Page* p = evacuation_candidates_[i]; | |
| 3764 if (!p->IsEvacuationCandidate()) continue; | 3835 if (!p->IsEvacuationCandidate()) continue; | 
| 3765 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 3836 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); | 
| 3766 space->Free(p->area_start(), p->area_size()); | 3837 space->Free(p->area_start(), p->area_size()); | 
| 3767 p->set_scan_on_scavenge(false); | 3838 p->set_scan_on_scavenge(false); | 
| 3768 p->ResetLiveBytes(); | 3839 p->ResetLiveBytes(); | 
| 3769 CHECK(p->WasSwept()); | 3840 CHECK(p->WasSwept()); | 
| 3770 space->ReleasePage(p); | 3841 space->ReleasePage(p); | 
| 3771 } | 3842 } | 
| 3772 evacuation_candidates_.Rewind(0); | 3843 evacuation_candidates_.Rewind(0); | 
| 3773 compacting_ = false; | 3844 compacting_ = false; | 
| (...skipping 255 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 4029 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4100 MarkBit mark_bit = Marking::MarkBitFrom(host); | 
| 4030 if (Marking::IsBlack(mark_bit)) { | 4101 if (Marking::IsBlack(mark_bit)) { | 
| 4031 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 4102 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); | 
| 4032 RecordRelocSlot(&rinfo, target); | 4103 RecordRelocSlot(&rinfo, target); | 
| 4033 } | 4104 } | 
| 4034 } | 4105 } | 
| 4035 } | 4106 } | 
| 4036 | 4107 | 
| 4037 } // namespace internal | 4108 } // namespace internal | 
| 4038 } // namespace v8 | 4109 } // namespace v8 | 
| OLD | NEW |