Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(65)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1643473002: Revert of [heap] Parallel newspace evacuation, semispace copy, and compaction \o/ (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h" 11 #include "src/compilation-cache.h"
12 #include "src/deoptimizer.h" 12 #include "src/deoptimizer.h"
13 #include "src/execution.h" 13 #include "src/execution.h"
14 #include "src/frames-inl.h" 14 #include "src/frames-inl.h"
15 #include "src/gdb-jit.h" 15 #include "src/gdb-jit.h"
16 #include "src/global-handles.h" 16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-tracker.h" 17 #include "src/heap/array-buffer-tracker.h"
18 #include "src/heap/gc-tracer.h" 18 #include "src/heap/gc-tracer.h"
19 #include "src/heap/incremental-marking.h" 19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact-inl.h" 20 #include "src/heap/mark-compact-inl.h"
21 #include "src/heap/object-stats.h" 21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting.h"
22 #include "src/heap/objects-visiting-inl.h" 23 #include "src/heap/objects-visiting-inl.h"
23 #include "src/heap/objects-visiting.h"
24 #include "src/heap/slots-buffer.h" 24 #include "src/heap/slots-buffer.h"
25 #include "src/heap/spaces-inl.h" 25 #include "src/heap/spaces-inl.h"
26 #include "src/ic/ic.h" 26 #include "src/ic/ic.h"
27 #include "src/ic/stub-cache.h" 27 #include "src/ic/stub-cache.h"
28 #include "src/profiler/cpu-profiler.h" 28 #include "src/profiler/cpu-profiler.h"
29 #include "src/utils-inl.h"
30 #include "src/v8.h" 29 #include "src/v8.h"
31 30
32 namespace v8 { 31 namespace v8 {
33 namespace internal { 32 namespace internal {
34 33
35 34
36 const char* Marking::kWhiteBitPattern = "00"; 35 const char* Marking::kWhiteBitPattern = "00";
37 const char* Marking::kBlackBitPattern = "11"; 36 const char* Marking::kBlackBitPattern = "11";
38 const char* Marking::kGreyBitPattern = "10"; 37 const char* Marking::kGreyBitPattern = "10";
39 const char* Marking::kImpossibleBitPattern = "01"; 38 const char* Marking::kImpossibleBitPattern = "01";
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { 313 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
315 { 314 {
316 GCTracer::Scope gc_scope(heap()->tracer(), 315 GCTracer::Scope gc_scope(heap()->tracer(),
317 GCTracer::Scope::MC_CLEAR_STORE_BUFFER); 316 GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
318 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); 317 heap_->store_buffer()->ClearInvalidStoreBufferEntries();
319 } 318 }
320 319
321 { 320 {
322 GCTracer::Scope gc_scope(heap()->tracer(), 321 GCTracer::Scope gc_scope(heap()->tracer(),
323 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); 322 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
324 for (Page* p : evacuation_candidates_) { 323 int number_of_pages = evacuation_candidates_.length();
324 for (int i = 0; i < number_of_pages; i++) {
325 Page* p = evacuation_candidates_[i];
325 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); 326 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
326 } 327 }
327 } 328 }
328 #ifdef VERIFY_HEAP 329 #ifdef VERIFY_HEAP
329 if (FLAG_verify_heap) { 330 if (FLAG_verify_heap) {
330 VerifyValidStoreAndSlotsBufferEntries(); 331 VerifyValidStoreAndSlotsBufferEntries();
331 } 332 }
332 #endif 333 #endif
333 } 334 }
334 335
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
470 471
471 LargeObjectIterator it(heap_->lo_space()); 472 LargeObjectIterator it(heap_->lo_space());
472 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 473 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
473 Marking::MarkWhite(Marking::MarkBitFrom(obj)); 474 Marking::MarkWhite(Marking::MarkBitFrom(obj));
474 Page::FromAddress(obj->address())->ResetProgressBar(); 475 Page::FromAddress(obj->address())->ResetProgressBar();
475 Page::FromAddress(obj->address())->ResetLiveBytes(); 476 Page::FromAddress(obj->address())->ResetLiveBytes();
476 } 477 }
477 } 478 }
478 479
479 480
481 class MarkCompactCollector::CompactionTask : public CancelableTask {
482 public:
483 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
484 : CancelableTask(heap->isolate()), spaces_(spaces) {}
485
486 virtual ~CompactionTask() {}
487
488 private:
489 // v8::internal::CancelableTask overrides.
490 void RunInternal() override {
491 MarkCompactCollector* mark_compact =
492 isolate()->heap()->mark_compact_collector();
493 SlotsBuffer* evacuation_slots_buffer = nullptr;
494 mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
495 mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
496 mark_compact->pending_compaction_tasks_semaphore_.Signal();
497 }
498
499 CompactionSpaceCollection* spaces_;
500
501 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
502 };
503
504
480 class MarkCompactCollector::SweeperTask : public v8::Task { 505 class MarkCompactCollector::SweeperTask : public v8::Task {
481 public: 506 public:
482 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} 507 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
483 508
484 virtual ~SweeperTask() {} 509 virtual ~SweeperTask() {}
485 510
486 private: 511 private:
487 // v8::Task overrides. 512 // v8::Task overrides.
488 void Run() override { 513 void Run() override {
489 heap_->mark_compact_collector()->SweepInParallel(space_, 0); 514 heap_->mark_compact_collector()->SweepInParallel(space_, 0);
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after
799 "compaction-selection: space=%s reduce_memory=%d pages=%d " 824 "compaction-selection: space=%s reduce_memory=%d pages=%d "
800 "total_live_bytes=%d\n", 825 "total_live_bytes=%d\n",
801 AllocationSpaceName(space->identity()), reduce_memory, 826 AllocationSpaceName(space->identity()), reduce_memory,
802 candidate_count, total_live_bytes / KB); 827 candidate_count, total_live_bytes / KB);
803 } 828 }
804 } 829 }
805 830
806 831
807 void MarkCompactCollector::AbortCompaction() { 832 void MarkCompactCollector::AbortCompaction() {
808 if (compacting_) { 833 if (compacting_) {
809 for (Page* p : evacuation_candidates_) { 834 int npages = evacuation_candidates_.length();
835 for (int i = 0; i < npages; i++) {
836 Page* p = evacuation_candidates_[i];
810 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); 837 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
811 p->ClearEvacuationCandidate(); 838 p->ClearEvacuationCandidate();
812 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 839 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
813 } 840 }
814 compacting_ = false; 841 compacting_ = false;
815 evacuation_candidates_.Rewind(0); 842 evacuation_candidates_.Rewind(0);
816 } 843 }
817 DCHECK_EQ(0, evacuation_candidates_.length()); 844 DCHECK_EQ(0, evacuation_candidates_.length());
818 } 845 }
819 846
(...skipping 694 matching lines...) Expand 10 before | Expand all | Expand 10 after
1514 class MarkCompactCollector::HeapObjectVisitor { 1541 class MarkCompactCollector::HeapObjectVisitor {
1515 public: 1542 public:
1516 virtual ~HeapObjectVisitor() {} 1543 virtual ~HeapObjectVisitor() {}
1517 virtual bool Visit(HeapObject* object) = 0; 1544 virtual bool Visit(HeapObject* object) = 0;
1518 }; 1545 };
1519 1546
1520 1547
1521 class MarkCompactCollector::EvacuateVisitorBase 1548 class MarkCompactCollector::EvacuateVisitorBase
1522 : public MarkCompactCollector::HeapObjectVisitor { 1549 : public MarkCompactCollector::HeapObjectVisitor {
1523 public: 1550 public:
1524 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces, 1551 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer)
1525 SlotsBuffer** evacuation_slots_buffer, 1552 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {}
1526 LocalStoreBuffer* local_store_buffer)
1527 : heap_(heap),
1528 evacuation_slots_buffer_(evacuation_slots_buffer),
1529 compaction_spaces_(compaction_spaces),
1530 local_store_buffer_(local_store_buffer) {}
1531 1553
1532 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, 1554 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1533 HeapObject** target_object) { 1555 HeapObject** target_object) {
1534 int size = object->Size(); 1556 int size = object->Size();
1535 AllocationAlignment alignment = object->RequiredAlignment(); 1557 AllocationAlignment alignment = object->RequiredAlignment();
1536 AllocationResult allocation = target_space->AllocateRaw(size, alignment); 1558 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1537 if (allocation.To(target_object)) { 1559 if (allocation.To(target_object)) {
1538 heap_->mark_compact_collector()->MigrateObject( 1560 heap_->mark_compact_collector()->MigrateObject(
1539 *target_object, object, size, target_space->identity(), 1561 *target_object, object, size, target_space->identity(),
1540 evacuation_slots_buffer_, local_store_buffer_); 1562 evacuation_slots_buffer_);
1541 return true; 1563 return true;
1542 } 1564 }
1543 return false; 1565 return false;
1544 } 1566 }
1545 1567
1546 protected: 1568 protected:
1547 Heap* heap_; 1569 Heap* heap_;
1548 SlotsBuffer** evacuation_slots_buffer_; 1570 SlotsBuffer** evacuation_slots_buffer_;
1549 CompactionSpaceCollection* compaction_spaces_;
1550 LocalStoreBuffer* local_store_buffer_;
1551 }; 1571 };
1552 1572
1553 1573
1554 class MarkCompactCollector::EvacuateNewSpaceVisitor final 1574 class MarkCompactCollector::EvacuateNewSpaceVisitor final
1555 : public MarkCompactCollector::EvacuateVisitorBase { 1575 : public MarkCompactCollector::EvacuateVisitorBase {
1556 public: 1576 public:
1557 static const intptr_t kLabSize = 4 * KB; 1577 static const intptr_t kLabSize = 4 * KB;
1558 static const intptr_t kMaxLabObjectSize = 256; 1578 static const intptr_t kMaxLabObjectSize = 256;
1559 1579
1560 explicit EvacuateNewSpaceVisitor(Heap* heap, 1580 explicit EvacuateNewSpaceVisitor(Heap* heap,
1561 CompactionSpaceCollection* compaction_spaces,
1562 SlotsBuffer** evacuation_slots_buffer, 1581 SlotsBuffer** evacuation_slots_buffer,
1563 LocalStoreBuffer* local_store_buffer,
1564 HashMap* local_pretenuring_feedback) 1582 HashMap* local_pretenuring_feedback)
1565 : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer, 1583 : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1566 local_store_buffer),
1567 buffer_(LocalAllocationBuffer::InvalidBuffer()), 1584 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1568 space_to_allocate_(NEW_SPACE), 1585 space_to_allocate_(NEW_SPACE),
1569 promoted_size_(0), 1586 promoted_size_(0),
1570 semispace_copied_size_(0), 1587 semispace_copied_size_(0),
1571 local_pretenuring_feedback_(local_pretenuring_feedback) {} 1588 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1572 1589
1573 bool Visit(HeapObject* object) override { 1590 bool Visit(HeapObject* object) override {
1574 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_); 1591 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
1575 int size = object->Size(); 1592 int size = object->Size();
1576 HeapObject* target_object = nullptr; 1593 HeapObject* target_object = nullptr;
1577 if (heap_->ShouldBePromoted(object->address(), size) && 1594 if (heap_->ShouldBePromoted(object->address(), size) &&
1578 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object, 1595 TryEvacuateObject(heap_->old_space(), object, &target_object)) {
1579 &target_object)) {
1580 // If we end up needing more special cases, we should factor this out. 1596 // If we end up needing more special cases, we should factor this out.
1581 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { 1597 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1582 heap_->array_buffer_tracker()->Promote( 1598 heap_->array_buffer_tracker()->Promote(
1583 JSArrayBuffer::cast(target_object)); 1599 JSArrayBuffer::cast(target_object));
1584 } 1600 }
1585 promoted_size_ += size; 1601 promoted_size_ += size;
1586 return true; 1602 return true;
1587 } 1603 }
1588 HeapObject* target = nullptr; 1604 HeapObject* target = nullptr;
1589 AllocationSpace space = AllocateTargetObject(object, &target); 1605 AllocationSpace space = AllocateTargetObject(object, &target);
1590 heap_->mark_compact_collector()->MigrateObject( 1606 heap_->mark_compact_collector()->MigrateObject(
1591 HeapObject::cast(target), object, size, space, 1607 HeapObject::cast(target), object, size, space,
1592 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_, 1608 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_);
1593 (space == NEW_SPACE) ? nullptr : local_store_buffer_);
1594 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { 1609 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1595 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); 1610 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1596 } 1611 }
1597 semispace_copied_size_ += size; 1612 semispace_copied_size_ += size;
1598 return true; 1613 return true;
1599 } 1614 }
1600 1615
1601 intptr_t promoted_size() { return promoted_size_; } 1616 intptr_t promoted_size() { return promoted_size_; }
1602 intptr_t semispace_copied_size() { return semispace_copied_size_; } 1617 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1603 1618
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
1656 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; 1671 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1657 } 1672 }
1658 } 1673 }
1659 } 1674 }
1660 return allocation; 1675 return allocation;
1661 } 1676 }
1662 1677
1663 inline AllocationResult AllocateInOldSpace(int size_in_bytes, 1678 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1664 AllocationAlignment alignment) { 1679 AllocationAlignment alignment) {
1665 AllocationResult allocation = 1680 AllocationResult allocation =
1666 compaction_spaces_->Get(OLD_SPACE)->AllocateRaw(size_in_bytes, 1681 heap_->old_space()->AllocateRaw(size_in_bytes, alignment);
1667 alignment);
1668 if (allocation.IsRetry()) { 1682 if (allocation.IsRetry()) {
1669 FatalProcessOutOfMemory( 1683 FatalProcessOutOfMemory(
1670 "MarkCompactCollector: semi-space copy, fallback in old gen\n"); 1684 "MarkCompactCollector: semi-space copy, fallback in old gen\n");
1671 } 1685 }
1672 return allocation; 1686 return allocation;
1673 } 1687 }
1674 1688
1675 inline AllocationResult AllocateInLab(int size_in_bytes, 1689 inline AllocationResult AllocateInLab(int size_in_bytes,
1676 AllocationAlignment alignment) { 1690 AllocationAlignment alignment) {
1677 AllocationResult allocation; 1691 AllocationResult allocation;
(...skipping 25 matching lines...) Expand all
1703 intptr_t semispace_copied_size_; 1717 intptr_t semispace_copied_size_;
1704 HashMap* local_pretenuring_feedback_; 1718 HashMap* local_pretenuring_feedback_;
1705 }; 1719 };
1706 1720
1707 1721
1708 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1722 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1709 : public MarkCompactCollector::EvacuateVisitorBase { 1723 : public MarkCompactCollector::EvacuateVisitorBase {
1710 public: 1724 public:
1711 EvacuateOldSpaceVisitor(Heap* heap, 1725 EvacuateOldSpaceVisitor(Heap* heap,
1712 CompactionSpaceCollection* compaction_spaces, 1726 CompactionSpaceCollection* compaction_spaces,
1713 SlotsBuffer** evacuation_slots_buffer, 1727 SlotsBuffer** evacuation_slots_buffer)
1714 LocalStoreBuffer* local_store_buffer) 1728 : EvacuateVisitorBase(heap, evacuation_slots_buffer),
1715 : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer, 1729 compaction_spaces_(compaction_spaces) {}
1716 local_store_buffer) {}
1717 1730
1718 bool Visit(HeapObject* object) override { 1731 bool Visit(HeapObject* object) override {
1719 CompactionSpace* target_space = compaction_spaces_->Get( 1732 CompactionSpace* target_space = compaction_spaces_->Get(
1720 Page::FromAddress(object->address())->owner()->identity()); 1733 Page::FromAddress(object->address())->owner()->identity());
1721 HeapObject* target_object = nullptr; 1734 HeapObject* target_object = nullptr;
1722 if (TryEvacuateObject(target_space, object, &target_object)) { 1735 if (TryEvacuateObject(target_space, object, &target_object)) {
1723 DCHECK(object->map_word().IsForwardingAddress()); 1736 DCHECK(object->map_word().IsForwardingAddress());
1724 return true; 1737 return true;
1725 } 1738 }
1726 return false; 1739 return false;
1727 } 1740 }
1741
1742 private:
1743 CompactionSpaceCollection* compaction_spaces_;
1728 }; 1744 };
1729 1745
1730 1746
1731 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { 1747 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1732 PageIterator it(space); 1748 PageIterator it(space);
1733 while (it.has_next()) { 1749 while (it.has_next()) {
1734 Page* p = it.next(); 1750 Page* p = it.next();
1735 DiscoverGreyObjectsOnPage(p); 1751 DiscoverGreyObjectsOnPage(p);
1736 if (marking_deque()->IsFull()) return; 1752 if (marking_deque()->IsFull()) return;
1737 } 1753 }
(...skipping 787 matching lines...) Expand 10 before | Expand all | Expand 10 after
2525 HeapObject* undefined = heap()->undefined_value(); 2541 HeapObject* undefined = heap()->undefined_value();
2526 Object* obj = heap()->encountered_transition_arrays(); 2542 Object* obj = heap()->encountered_transition_arrays();
2527 while (obj != Smi::FromInt(0)) { 2543 while (obj != Smi::FromInt(0)) {
2528 TransitionArray* array = TransitionArray::cast(obj); 2544 TransitionArray* array = TransitionArray::cast(obj);
2529 obj = array->next_link(); 2545 obj = array->next_link();
2530 array->set_next_link(undefined, SKIP_WRITE_BARRIER); 2546 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2531 } 2547 }
2532 heap()->set_encountered_transition_arrays(Smi::FromInt(0)); 2548 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2533 } 2549 }
2534 2550
2551
2535 void MarkCompactCollector::RecordMigratedSlot( 2552 void MarkCompactCollector::RecordMigratedSlot(
2536 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer, 2553 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
2537 LocalStoreBuffer* local_store_buffer) {
2538 // When parallel compaction is in progress, store and slots buffer entries 2554 // When parallel compaction is in progress, store and slots buffer entries
2539 // require synchronization. 2555 // require synchronization.
2540 if (heap_->InNewSpace(value)) { 2556 if (heap_->InNewSpace(value)) {
2541 if (compaction_in_progress_) { 2557 if (compaction_in_progress_) {
2542 local_store_buffer->Record(slot); 2558 heap_->store_buffer()->MarkSynchronized(slot);
2543 } else { 2559 } else {
2544 heap_->store_buffer()->Mark(slot); 2560 heap_->store_buffer()->Mark(slot);
2545 } 2561 }
2546 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { 2562 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2547 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, 2563 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2548 reinterpret_cast<Object**>(slot), 2564 reinterpret_cast<Object**>(slot),
2549 SlotsBuffer::IGNORE_OVERFLOW); 2565 SlotsBuffer::IGNORE_OVERFLOW);
2550 } 2566 }
2551 } 2567 }
2552 2568
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
2614 if (!success) { 2630 if (!success) {
2615 EvictPopularEvacuationCandidate(target_page); 2631 EvictPopularEvacuationCandidate(target_page);
2616 } 2632 }
2617 } 2633 }
2618 } 2634 }
2619 2635
2620 2636
2621 class RecordMigratedSlotVisitor final : public ObjectVisitor { 2637 class RecordMigratedSlotVisitor final : public ObjectVisitor {
2622 public: 2638 public:
2623 RecordMigratedSlotVisitor(MarkCompactCollector* collector, 2639 RecordMigratedSlotVisitor(MarkCompactCollector* collector,
2624 SlotsBuffer** evacuation_slots_buffer, 2640 SlotsBuffer** evacuation_slots_buffer)
2625 LocalStoreBuffer* local_store_buffer)
2626 : collector_(collector), 2641 : collector_(collector),
2627 evacuation_slots_buffer_(evacuation_slots_buffer), 2642 evacuation_slots_buffer_(evacuation_slots_buffer) {}
2628 local_store_buffer_(local_store_buffer) {}
2629 2643
2630 V8_INLINE void VisitPointer(Object** p) override { 2644 V8_INLINE void VisitPointer(Object** p) override {
2631 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), 2645 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
2632 evacuation_slots_buffer_, 2646 evacuation_slots_buffer_);
2633 local_store_buffer_);
2634 } 2647 }
2635 2648
2636 V8_INLINE void VisitPointers(Object** start, Object** end) override { 2649 V8_INLINE void VisitPointers(Object** start, Object** end) override {
2637 while (start < end) { 2650 while (start < end) {
2638 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), 2651 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
2639 evacuation_slots_buffer_, 2652 evacuation_slots_buffer_);
2640 local_store_buffer_);
2641 ++start; 2653 ++start;
2642 } 2654 }
2643 } 2655 }
2644 2656
2645 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { 2657 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
2646 if (collector_->compacting_) { 2658 if (collector_->compacting_) {
2647 Address code_entry = Memory::Address_at(code_entry_slot); 2659 Address code_entry = Memory::Address_at(code_entry_slot);
2648 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, 2660 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
2649 evacuation_slots_buffer_); 2661 evacuation_slots_buffer_);
2650 } 2662 }
2651 } 2663 }
2652 2664
2653 private: 2665 private:
2654 MarkCompactCollector* collector_; 2666 MarkCompactCollector* collector_;
2655 SlotsBuffer** evacuation_slots_buffer_; 2667 SlotsBuffer** evacuation_slots_buffer_;
2656 LocalStoreBuffer* local_store_buffer_;
2657 }; 2668 };
2658 2669
2659 2670
2660 // We scavenge new space simultaneously with sweeping. This is done in two 2671 // We scavenge new space simultaneously with sweeping. This is done in two
2661 // passes. 2672 // passes.
2662 // 2673 //
2663 // The first pass migrates all alive objects from one semispace to another or 2674 // The first pass migrates all alive objects from one semispace to another or
2664 // promotes them to old space. Forwarding address is written directly into 2675 // promotes them to old space. Forwarding address is written directly into
2665 // first word of object without any encoding. If object is dead we write 2676 // first word of object without any encoding. If object is dead we write
2666 // NULL as a forwarding address. 2677 // NULL as a forwarding address.
2667 // 2678 //
2668 // The second pass updates pointers to new space in all spaces. It is possible 2679 // The second pass updates pointers to new space in all spaces. It is possible
2669 // to encounter pointers to dead new space objects during traversal of pointers 2680 // to encounter pointers to dead new space objects during traversal of pointers
2670 // to new space. We should clear them to avoid encountering them during next 2681 // to new space. We should clear them to avoid encountering them during next
2671 // pointer iteration. This is an issue if the store buffer overflows and we 2682 // pointer iteration. This is an issue if the store buffer overflows and we
2672 // have to scan the entire old space, including dead objects, looking for 2683 // have to scan the entire old space, including dead objects, looking for
2673 // pointers to new space. 2684 // pointers to new space.
2674 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, 2685 void MarkCompactCollector::MigrateObject(
2675 int size, AllocationSpace dest, 2686 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
2676 SlotsBuffer** evacuation_slots_buffer, 2687 SlotsBuffer** evacuation_slots_buffer) {
2677 LocalStoreBuffer* local_store_buffer) {
2678 Address dst_addr = dst->address(); 2688 Address dst_addr = dst->address();
2679 Address src_addr = src->address(); 2689 Address src_addr = src->address();
2680 DCHECK(heap()->AllowedToBeMigrated(src, dest)); 2690 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2681 DCHECK(dest != LO_SPACE); 2691 DCHECK(dest != LO_SPACE);
2682 if (dest == OLD_SPACE) { 2692 if (dest == OLD_SPACE) {
2683 DCHECK_OBJECT_SIZE(size); 2693 DCHECK_OBJECT_SIZE(size);
2684 DCHECK(evacuation_slots_buffer != nullptr); 2694 DCHECK(evacuation_slots_buffer != nullptr);
2685 DCHECK(IsAligned(size, kPointerSize)); 2695 DCHECK(IsAligned(size, kPointerSize));
2686 2696
2687 heap()->MoveBlock(dst->address(), src->address(), size); 2697 heap()->MoveBlock(dst->address(), src->address(), size);
2688 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer, 2698 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer);
2689 local_store_buffer);
2690 dst->IterateBody(&visitor); 2699 dst->IterateBody(&visitor);
2691 } else if (dest == CODE_SPACE) { 2700 } else if (dest == CODE_SPACE) {
2692 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); 2701 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
2693 DCHECK(evacuation_slots_buffer != nullptr); 2702 DCHECK(evacuation_slots_buffer != nullptr);
2694 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); 2703 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2695 heap()->MoveBlock(dst_addr, src_addr, size); 2704 heap()->MoveBlock(dst_addr, src_addr, size);
2696 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); 2705 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
2697 Code::cast(dst)->Relocate(dst_addr - src_addr); 2706 Code::cast(dst)->Relocate(dst_addr - src_addr);
2698 } else { 2707 } else {
2699 DCHECK_OBJECT_SIZE(size); 2708 DCHECK_OBJECT_SIZE(size);
(...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after
3041 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3050 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3042 3051
3043 // The target object is black but we don't know if the source slot is black. 3052 // The target object is black but we don't know if the source slot is black.
3044 // The source object could have died and the slot could be part of a free 3053 // The source object could have died and the slot could be part of a free
3045 // space. Use the mark bit iterator to find out about liveness of the slot. 3054 // space. Use the mark bit iterator to find out about liveness of the slot.
3046 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); 3055 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
3047 } 3056 }
3048 3057
3049 3058
3050 void MarkCompactCollector::EvacuateNewSpacePrologue() { 3059 void MarkCompactCollector::EvacuateNewSpacePrologue() {
3060 // There are soft limits in the allocation code, designed trigger a mark
3061 // sweep collection by failing allocations. But since we are already in
3062 // a mark-sweep allocation, there is no sense in trying to trigger one.
3063 AlwaysAllocateScope scope(isolate());
3064
3051 NewSpace* new_space = heap()->new_space(); 3065 NewSpace* new_space = heap()->new_space();
3052 NewSpacePageIterator it(new_space->bottom(), new_space->top()); 3066
3053 // Append the list of new space pages to be processed. 3067 // Store allocation range before flipping semispaces.
3068 Address from_bottom = new_space->bottom();
3069 Address from_top = new_space->top();
3070
3071 // Flip the semispaces. After flipping, to space is empty, from space has
3072 // live objects.
3073 new_space->Flip();
3074 new_space->ResetAllocationInfo();
3075
3076 newspace_evacuation_candidates_.Clear();
3077 NewSpacePageIterator it(from_bottom, from_top);
3054 while (it.has_next()) { 3078 while (it.has_next()) {
3055 newspace_evacuation_candidates_.Add(it.next()); 3079 newspace_evacuation_candidates_.Add(it.next());
3056 } 3080 }
3057 new_space->Flip();
3058 new_space->ResetAllocationInfo();
3059 }
3060
3061 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3062 newspace_evacuation_candidates_.Rewind(0);
3063 } 3081 }
3064 3082
3065 3083
3084 HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() {
3085 HashMap* local_pretenuring_feedback = new HashMap(
3086 HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
3087 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
3088 local_pretenuring_feedback);
3089 // First pass: traverse all objects in inactive semispace, remove marks,
3090 // migrate live objects and write forwarding addresses. This stage puts
3091 // new entries in the store buffer and may cause some pages to be marked
3092 // scan-on-scavenge.
3093 for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
3094 NewSpacePage* p =
3095 reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
3096 bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
3097 USE(ok);
3098 DCHECK(ok);
3099 }
3100 heap_->IncrementPromotedObjectsSize(
3101 static_cast<int>(new_space_visitor.promoted_size()));
3102 heap_->IncrementSemiSpaceCopiedObjectSize(
3103 static_cast<int>(new_space_visitor.semispace_copied_size()));
3104 heap_->IncrementYoungSurvivorsCounter(
3105 static_cast<int>(new_space_visitor.promoted_size()) +
3106 static_cast<int>(new_space_visitor.semispace_copied_size()));
3107 return local_pretenuring_feedback;
3108 }
3109
3110
3066 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( 3111 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3067 SlotsBuffer* evacuation_slots_buffer) { 3112 SlotsBuffer* evacuation_slots_buffer) {
3068 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); 3113 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3069 evacuation_slots_buffers_.Add(evacuation_slots_buffer); 3114 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3070 } 3115 }
3071 3116
3072 class MarkCompactCollector::Evacuator : public Malloced {
3073 public:
3074 Evacuator(MarkCompactCollector* collector,
3075 const List<Page*>& evacuation_candidates,
3076 const List<NewSpacePage*>& newspace_evacuation_candidates)
3077 : collector_(collector),
3078 evacuation_candidates_(evacuation_candidates),
3079 newspace_evacuation_candidates_(newspace_evacuation_candidates),
3080 compaction_spaces_(collector->heap()),
3081 local_slots_buffer_(nullptr),
3082 local_store_buffer_(),
3083 local_pretenuring_feedback_(HashMap::PointersMatch,
3084 kInitialLocalPretenuringFeedbackCapacity),
3085 new_space_visitor_(collector->heap(), &compaction_spaces_,
3086 &local_slots_buffer_, &local_store_buffer_,
3087 &local_pretenuring_feedback_),
3088 old_space_visitor_(collector->heap(), &compaction_spaces_,
3089 &local_slots_buffer_, &local_store_buffer_),
3090 duration_(0.0),
3091 bytes_compacted_(0),
3092 task_id_(0) {}
3093 3117
3094 // Evacuate the configured set of pages in parallel. 3118 int MarkCompactCollector::NumberOfParallelCompactionTasks() {
3095 inline void EvacuatePages();
3096
3097 // Merge back locally cached info sequentially. Note that this method needs
3098 // to be called from the main thread.
3099 inline void Finalize();
3100
3101 CompactionSpaceCollection* compaction_spaces() { return &compaction_spaces_; }
3102
3103 uint32_t task_id() { return task_id_; }
3104 void set_task_id(uint32_t id) { task_id_ = id; }
3105
3106 private:
3107 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3108
3109 Heap* heap() { return collector_->heap(); }
3110
3111 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3112 duration_ += duration;
3113 bytes_compacted_ += bytes_compacted;
3114 }
3115
3116 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
3117
3118 MarkCompactCollector* collector_;
3119
3120 // Pages to process.
3121 const List<Page*>& evacuation_candidates_;
3122 const List<NewSpacePage*>& newspace_evacuation_candidates_;
3123
3124 // Locally cached collector data.
3125 CompactionSpaceCollection compaction_spaces_;
3126 SlotsBuffer* local_slots_buffer_;
3127 LocalStoreBuffer local_store_buffer_;
3128 HashMap local_pretenuring_feedback_;
3129
3130 // Vistors for the corresponding spaces.
3131 EvacuateNewSpaceVisitor new_space_visitor_;
3132 EvacuateOldSpaceVisitor old_space_visitor_;
3133
3134 // Book keeping info.
3135 double duration_;
3136 intptr_t bytes_compacted_;
3137
3138 // Task id, if this evacuator is executed on a background task instead of
3139 // the main thread. Can be used to try to abort the task currently scheduled
3140 // to executed to evacuate pages.
3141 uint32_t task_id_;
3142 };
3143
3144 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
3145 MemoryChunk* p, HeapObjectVisitor* visitor) {
3146 bool success = true;
3147 if (p->parallel_compaction_state().TrySetValue(
3148 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3149 if (p->IsEvacuationCandidate() || p->InNewSpace()) {
3150 DCHECK_EQ(p->parallel_compaction_state().Value(),
3151 MemoryChunk::kCompactingInProgress);
3152 int saved_live_bytes = p->LiveBytes();
3153 double evacuation_time;
3154 {
3155 AlwaysAllocateScope always_allocate(heap()->isolate());
3156 TimedScope timed_scope(&evacuation_time);
3157 success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
3158 }
3159 if (success) {
3160 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3161 p->parallel_compaction_state().SetValue(
3162 MemoryChunk::kCompactingFinalize);
3163 } else {
3164 p->parallel_compaction_state().SetValue(
3165 MemoryChunk::kCompactingAborted);
3166 }
3167 } else {
3168 // There could be popular pages in the list of evacuation candidates
3169 // which we do not compact.
3170 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3171 }
3172 }
3173 return success;
3174 }
3175
3176 void MarkCompactCollector::Evacuator::EvacuatePages() {
3177 for (NewSpacePage* p : newspace_evacuation_candidates_) {
3178 DCHECK(p->InNewSpace());
3179 DCHECK_EQ(p->concurrent_sweeping_state().Value(),
3180 NewSpacePage::kSweepingDone);
3181 bool success = EvacuateSinglePage(p, &new_space_visitor_);
3182 DCHECK(success);
3183 USE(success);
3184 }
3185 for (Page* p : evacuation_candidates_) {
3186 DCHECK(p->IsEvacuationCandidate() ||
3187 p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
3188 DCHECK_EQ(p->concurrent_sweeping_state().Value(), Page::kSweepingDone);
3189 EvacuateSinglePage(p, &old_space_visitor_);
3190 }
3191 }
3192
3193 void MarkCompactCollector::Evacuator::Finalize() {
3194 heap()->old_space()->MergeCompactionSpace(compaction_spaces_.Get(OLD_SPACE));
3195 heap()->code_space()->MergeCompactionSpace(
3196 compaction_spaces_.Get(CODE_SPACE));
3197 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3198 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
3199 heap()->IncrementSemiSpaceCopiedObjectSize(
3200 new_space_visitor_.semispace_copied_size());
3201 heap()->IncrementYoungSurvivorsCounter(
3202 new_space_visitor_.promoted_size() +
3203 new_space_visitor_.semispace_copied_size());
3204 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3205 local_store_buffer_.Process(heap()->store_buffer());
3206 collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
3207 }
3208
3209 class MarkCompactCollector::CompactionTask : public CancelableTask {
3210 public:
3211 explicit CompactionTask(Heap* heap, Evacuator* evacuator)
3212 : CancelableTask(heap->isolate()), heap_(heap), evacuator_(evacuator) {
3213 evacuator->set_task_id(id());
3214 }
3215
3216 virtual ~CompactionTask() {}
3217
3218 private:
3219 // v8::internal::CancelableTask overrides.
3220 void RunInternal() override {
3221 evacuator_->EvacuatePages();
3222 heap_->mark_compact_collector()
3223 ->pending_compaction_tasks_semaphore_.Signal();
3224 }
3225
3226 Heap* heap_;
3227 Evacuator* evacuator_;
3228
3229 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
3230 };
3231
3232 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3233 intptr_t live_bytes) {
3234 if (!FLAG_parallel_compaction) return 1; 3119 if (!FLAG_parallel_compaction) return 1;
3235 // Compute the number of needed tasks based on a target compaction time, the 3120 // Compute the number of needed tasks based on a target compaction time, the
3236 // profiled compaction speed and marked live memory. 3121 // profiled compaction speed and marked live memory.
3237 // 3122 //
3238 // The number of parallel compaction tasks is limited by: 3123 // The number of parallel compaction tasks is limited by:
3239 // - #evacuation pages 3124 // - #evacuation pages
3240 // - (#cores - 1) 3125 // - (#cores - 1)
3126 // - a hard limit
3241 const double kTargetCompactionTimeInMs = 1; 3127 const double kTargetCompactionTimeInMs = 1;
3242 const int kNumSweepingTasks = 3; 3128 const int kMaxCompactionTasks = 8;
3243 3129
3244 intptr_t compaction_speed = 3130 intptr_t compaction_speed =
3245 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3131 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3132 if (compaction_speed == 0) return 1;
3246 3133
3247 const int available_cores = 3134 intptr_t live_bytes = 0;
3248 Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1); 3135 for (Page* page : evacuation_candidates_) {
3249 int tasks; 3136 live_bytes += page->LiveBytes();
3250 if (compaction_speed > 0) {
3251 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
3252 compaction_speed / kTargetCompactionTimeInMs);
3253 } else {
3254 tasks = pages;
3255 } 3137 }
3256 const int tasks_capped_pages = Min(pages, tasks); 3138
3257 return Min(available_cores, tasks_capped_pages); 3139 const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
3140 const int tasks =
3141 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
3142 kTargetCompactionTimeInMs);
3143 const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
3144 const int tasks_capped_cores = Min(cores, tasks_capped_pages);
3145 const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores);
3146 return tasks_capped_hard;
3258 } 3147 }
3259 3148
3260 3149
3261 void MarkCompactCollector::EvacuatePagesInParallel() { 3150 void MarkCompactCollector::EvacuatePagesInParallel() {
3262 int num_pages = 0; 3151 const int num_pages = evacuation_candidates_.length();
3263 intptr_t live_bytes = 0; 3152 if (num_pages == 0) return;
3264 for (Page* page : evacuation_candidates_) {
3265 num_pages++;
3266 live_bytes += page->LiveBytes();
3267 }
3268 for (NewSpacePage* page : newspace_evacuation_candidates_) {
3269 num_pages++;
3270 live_bytes += page->LiveBytes();
3271 }
3272 DCHECK_GE(num_pages, 1);
3273 3153
3274 // Used for trace summary. 3154 // Used for trace summary.
3155 intptr_t live_bytes = 0;
3275 intptr_t compaction_speed = 0; 3156 intptr_t compaction_speed = 0;
3276 if (FLAG_trace_fragmentation) { 3157 if (FLAG_trace_fragmentation) {
3158 for (Page* page : evacuation_candidates_) {
3159 live_bytes += page->LiveBytes();
3160 }
3277 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3161 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3278 } 3162 }
3279 3163 const int num_tasks = NumberOfParallelCompactionTasks();
3280 const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
3281 3164
3282 // Set up compaction spaces. 3165 // Set up compaction spaces.
3283 Evacuator** evacuators = new Evacuator*[num_tasks];
3284 CompactionSpaceCollection** compaction_spaces_for_tasks = 3166 CompactionSpaceCollection** compaction_spaces_for_tasks =
3285 new CompactionSpaceCollection*[num_tasks]; 3167 new CompactionSpaceCollection*[num_tasks];
3286 for (int i = 0; i < num_tasks; i++) { 3168 for (int i = 0; i < num_tasks; i++) {
3287 evacuators[i] = new Evacuator(this, evacuation_candidates_, 3169 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap());
3288 newspace_evacuation_candidates_);
3289 compaction_spaces_for_tasks[i] = evacuators[i]->compaction_spaces();
3290 } 3170 }
3171
3291 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, 3172 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3292 num_tasks); 3173 num_tasks);
3293 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, 3174 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3294 num_tasks); 3175 num_tasks);
3176
3177 uint32_t* task_ids = new uint32_t[num_tasks - 1];
3178 // Kick off parallel tasks.
3179 StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks);
3180 // Wait for unfinished and not-yet-started tasks.
3181 WaitUntilCompactionCompleted(task_ids, num_tasks - 1);
3182 delete[] task_ids;
3183
3184 double compaction_duration = 0.0;
3185 intptr_t compacted_memory = 0;
3186 // Merge back memory (compacted and unused) from compaction spaces.
3187 for (int i = 0; i < num_tasks; i++) {
3188 heap()->old_space()->MergeCompactionSpace(
3189 compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
3190 heap()->code_space()->MergeCompactionSpace(
3191 compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
3192 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
3193 compaction_duration += compaction_spaces_for_tasks[i]->duration();
3194 delete compaction_spaces_for_tasks[i];
3195 }
3295 delete[] compaction_spaces_for_tasks; 3196 delete[] compaction_spaces_for_tasks;
3197 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
3296 3198
3297 // Kick off parallel tasks. 3199 // Finalize sequentially.
3298 StartParallelCompaction(evacuators, num_tasks);
3299 // Wait for unfinished and not-yet-started tasks.
3300 WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
3301
3302 // Finalize local evacuators by merging back all locally cached data.
3303 for (int i = 0; i < num_tasks; i++) {
3304 evacuators[i]->Finalize();
3305 delete evacuators[i];
3306 }
3307 delete[] evacuators;
3308
3309 // Finalize pages sequentially.
3310 for (NewSpacePage* p : newspace_evacuation_candidates_) {
3311 DCHECK_EQ(p->parallel_compaction_state().Value(),
3312 MemoryChunk::kCompactingFinalize);
3313 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3314 }
3315
3316 int abandoned_pages = 0; 3200 int abandoned_pages = 0;
3317 for (Page* p : evacuation_candidates_) { 3201 for (int i = 0; i < num_pages; i++) {
3202 Page* p = evacuation_candidates_[i];
3318 switch (p->parallel_compaction_state().Value()) { 3203 switch (p->parallel_compaction_state().Value()) {
3319 case MemoryChunk::ParallelCompactingState::kCompactingAborted: 3204 case MemoryChunk::ParallelCompactingState::kCompactingAborted:
3320 // We have partially compacted the page, i.e., some objects may have 3205 // We have partially compacted the page, i.e., some objects may have
3321 // moved, others are still in place. 3206 // moved, others are still in place.
3322 // We need to: 3207 // We need to:
3323 // - Leave the evacuation candidate flag for later processing of 3208 // - Leave the evacuation candidate flag for later processing of
3324 // slots buffer entries. 3209 // slots buffer entries.
3325 // - Leave the slots buffer there for processing of entries added by 3210 // - Leave the slots buffer there for processing of entries added by
3326 // the write barrier. 3211 // the write barrier.
3327 // - Rescan the page as slot recording in the migration buffer only 3212 // - Rescan the page as slot recording in the migration buffer only
(...skipping 12 matching lines...) Expand all
3340 case MemoryChunk::kCompactingFinalize: 3225 case MemoryChunk::kCompactingFinalize:
3341 DCHECK(p->IsEvacuationCandidate()); 3226 DCHECK(p->IsEvacuationCandidate());
3342 DCHECK(p->SweepingDone()); 3227 DCHECK(p->SweepingDone());
3343 p->Unlink(); 3228 p->Unlink();
3344 break; 3229 break;
3345 case MemoryChunk::kCompactingDone: 3230 case MemoryChunk::kCompactingDone:
3346 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); 3231 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
3347 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3232 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3348 break; 3233 break;
3349 default: 3234 default:
3350 // MemoryChunk::kCompactingInProgress. 3235 // We should not observe kCompactingInProgress, or kCompactingDone.
3351 UNREACHABLE(); 3236 UNREACHABLE();
3352 } 3237 }
3353 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); 3238 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3354 } 3239 }
3355 if (FLAG_trace_fragmentation) { 3240 if (FLAG_trace_fragmentation) {
3356 PrintIsolate(isolate(), 3241 PrintIsolate(isolate(),
3357 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " 3242 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
3358 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX 3243 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
3359 "d compaction_speed=%" V8_PTR_PREFIX "d\n", 3244 "d compaction_speed=%" V8_PTR_PREFIX "d\n",
3360 isolate()->time_millis_since_init(), FLAG_parallel_compaction, 3245 isolate()->time_millis_since_init(), FLAG_parallel_compaction,
3361 num_pages, abandoned_pages, num_tasks, 3246 num_pages, abandoned_pages, num_tasks,
3362 base::SysInfo::NumberOfProcessors(), live_bytes, 3247 base::SysInfo::NumberOfProcessors(), live_bytes,
3363 compaction_speed); 3248 compaction_speed);
3364 } 3249 }
3365 } 3250 }
3366 3251
3367 void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators, 3252
3368 int len) { 3253 void MarkCompactCollector::StartParallelCompaction(
3254 CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids,
3255 int len) {
3369 compaction_in_progress_ = true; 3256 compaction_in_progress_ = true;
3370 for (int i = 1; i < len; i++) { 3257 for (int i = 1; i < len; i++) {
3371 CompactionTask* task = new CompactionTask(heap(), evacuators[i]); 3258 CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]);
3259 task_ids[i - 1] = task->id();
3372 V8::GetCurrentPlatform()->CallOnBackgroundThread( 3260 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3373 task, v8::Platform::kShortRunningTask); 3261 task, v8::Platform::kShortRunningTask);
3374 } 3262 }
3375 3263
3376 // Contribute on main thread. 3264 // Contribute in main thread.
3377 evacuators[0]->EvacuatePages(); 3265 EvacuatePages(compaction_spaces[0], &migration_slots_buffer_);
3378 } 3266 }
3379 3267
3380 void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators, 3268
3269 void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids,
3381 int len) { 3270 int len) {
3382 // Try to cancel compaction tasks that have not been run (as they might be 3271 // Try to cancel compaction tasks that have not been run (as they might be
3383 // stuck in a worker queue). Tasks that cannot be canceled, have either 3272 // stuck in a worker queue). Tasks that cannot be canceled, have either
3384 // already completed or are still running, hence we need to wait for their 3273 // already completed or are still running, hence we need to wait for their
3385 // semaphore signal. 3274 // semaphore signal.
3386 for (int i = 0; i < len; i++) { 3275 for (int i = 0; i < len; i++) {
3387 if (!heap()->isolate()->cancelable_task_manager()->TryAbort( 3276 if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) {
3388 evacuators[i]->task_id())) {
3389 pending_compaction_tasks_semaphore_.Wait(); 3277 pending_compaction_tasks_semaphore_.Wait();
3390 } 3278 }
3391 } 3279 }
3392 compaction_in_progress_ = false; 3280 compaction_in_progress_ = false;
3393 } 3281 }
3394 3282
3395 3283
3284 void MarkCompactCollector::EvacuatePages(
3285 CompactionSpaceCollection* compaction_spaces,
3286 SlotsBuffer** evacuation_slots_buffer) {
3287 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
3288 evacuation_slots_buffer);
3289 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3290 Page* p = evacuation_candidates_[i];
3291 DCHECK(p->IsEvacuationCandidate() ||
3292 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3293 DCHECK(p->SweepingDone());
3294 if (p->parallel_compaction_state().TrySetValue(
3295 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3296 if (p->IsEvacuationCandidate()) {
3297 DCHECK_EQ(p->parallel_compaction_state().Value(),
3298 MemoryChunk::kCompactingInProgress);
3299 double start = heap()->MonotonicallyIncreasingTimeInMs();
3300 intptr_t live_bytes = p->LiveBytes();
3301 AlwaysAllocateScope always_allocate(isolate());
3302 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
3303 p->parallel_compaction_state().SetValue(
3304 MemoryChunk::kCompactingFinalize);
3305 compaction_spaces->ReportCompactionProgress(
3306 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
3307 } else {
3308 p->parallel_compaction_state().SetValue(
3309 MemoryChunk::kCompactingAborted);
3310 }
3311 } else {
3312 // There could be popular pages in the list of evacuation candidates
3313 // which we do compact.
3314 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3315 }
3316 }
3317 }
3318 }
3319
3320
3396 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { 3321 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3397 public: 3322 public:
3398 virtual Object* RetainAs(Object* object) { 3323 virtual Object* RetainAs(Object* object) {
3399 if (object->IsHeapObject()) { 3324 if (object->IsHeapObject()) {
3400 HeapObject* heap_object = HeapObject::cast(object); 3325 HeapObject* heap_object = HeapObject::cast(object);
3401 MapWord map_word = heap_object->map_word(); 3326 MapWord map_word = heap_object->map_word();
3402 if (map_word.IsForwardingAddress()) { 3327 if (map_word.IsForwardingAddress()) {
3403 return map_word.ToForwardingAddress(); 3328 return map_word.ToForwardingAddress();
3404 } 3329 }
3405 } 3330 }
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
3528 // Return true if the given code is deoptimized or will be deoptimized. 3453 // Return true if the given code is deoptimized or will be deoptimized.
3529 bool MarkCompactCollector::WillBeDeoptimized(Code* code) { 3454 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3530 return code->is_optimized_code() && code->marked_for_deoptimization(); 3455 return code->is_optimized_code() && code->marked_for_deoptimization();
3531 } 3456 }
3532 3457
3533 3458
3534 void MarkCompactCollector::RemoveObjectSlots(Address start_slot, 3459 void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
3535 Address end_slot) { 3460 Address end_slot) {
3536 // Remove entries by replacing them with an old-space slot containing a smi 3461 // Remove entries by replacing them with an old-space slot containing a smi
3537 // that is located in an unmovable page. 3462 // that is located in an unmovable page.
3538 for (Page* p : evacuation_candidates_) { 3463 int npages = evacuation_candidates_.length();
3464 for (int i = 0; i < npages; i++) {
3465 Page* p = evacuation_candidates_[i];
3539 DCHECK(p->IsEvacuationCandidate() || 3466 DCHECK(p->IsEvacuationCandidate() ||
3540 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3467 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3541 if (p->IsEvacuationCandidate()) { 3468 if (p->IsEvacuationCandidate()) {
3542 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, 3469 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
3543 end_slot); 3470 end_slot);
3544 } 3471 }
3545 } 3472 }
3546 } 3473 }
3547 3474
3548 3475
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
3608 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3535 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3609 Map* map = object->synchronized_map(); 3536 Map* map = object->synchronized_map();
3610 int size = object->SizeFromMap(map); 3537 int size = object->SizeFromMap(map);
3611 object->IterateBody(map->instance_type(), size, visitor); 3538 object->IterateBody(map->instance_type(), size, visitor);
3612 } 3539 }
3613 } 3540 }
3614 3541
3615 3542
3616 void MarkCompactCollector::SweepAbortedPages() { 3543 void MarkCompactCollector::SweepAbortedPages() {
3617 // Second pass on aborted pages. 3544 // Second pass on aborted pages.
3618 for (Page* p : evacuation_candidates_) { 3545 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3546 Page* p = evacuation_candidates_[i];
3619 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { 3547 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3620 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); 3548 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
3621 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress); 3549 p->concurrent_sweeping_state().SetValue(Page::kSweepingInProgress);
3622 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3550 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3623 switch (space->identity()) { 3551 switch (space->identity()) {
3624 case OLD_SPACE: 3552 case OLD_SPACE:
3625 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, 3553 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3626 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); 3554 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3627 break; 3555 break;
3628 case CODE_SPACE: 3556 case CODE_SPACE:
(...skipping 11 matching lines...) Expand all
3640 } 3568 }
3641 } 3569 }
3642 } 3570 }
3643 } 3571 }
3644 3572
3645 3573
3646 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 3574 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3647 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); 3575 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3648 Heap::RelocationLock relocation_lock(heap()); 3576 Heap::RelocationLock relocation_lock(heap());
3649 3577
3578 HashMap* local_pretenuring_feedback = nullptr;
3650 { 3579 {
3651 GCTracer::Scope gc_scope(heap()->tracer(), 3580 GCTracer::Scope gc_scope(heap()->tracer(),
3652 GCTracer::Scope::MC_EVACUATE_NEW_SPACE); 3581 GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
3653 EvacuationScope evacuation_scope(this); 3582 EvacuationScope evacuation_scope(this);
3583 EvacuateNewSpacePrologue();
3584 local_pretenuring_feedback = EvacuateNewSpaceInParallel();
3585 heap_->new_space()->set_age_mark(heap_->new_space()->top());
3586 }
3654 3587
3655 EvacuateNewSpacePrologue(); 3588 {
3589 GCTracer::Scope gc_scope(heap()->tracer(),
3590 GCTracer::Scope::MC_EVACUATE_CANDIDATES);
3591 EvacuationScope evacuation_scope(this);
3656 EvacuatePagesInParallel(); 3592 EvacuatePagesInParallel();
3657 EvacuateNewSpaceEpilogue(); 3593 }
3658 heap()->new_space()->set_age_mark(heap()->new_space()->top()); 3594
3595 {
3596 heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
3597 delete local_pretenuring_feedback;
3659 } 3598 }
3660 3599
3661 UpdatePointersAfterEvacuation(); 3600 UpdatePointersAfterEvacuation();
3662 3601
3663 { 3602 {
3664 GCTracer::Scope gc_scope(heap()->tracer(), 3603 GCTracer::Scope gc_scope(heap()->tracer(),
3665 GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 3604 GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3666 // After updating all pointers, we can finally sweep the aborted pages, 3605 // After updating all pointers, we can finally sweep the aborted pages,
3667 // effectively overriding any forward pointers. 3606 // effectively overriding any forward pointers.
3668 SweepAbortedPages(); 3607 SweepAbortedPages();
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
3725 &updating_visitor); 3664 &updating_visitor);
3726 } 3665 }
3727 // Update roots. 3666 // Update roots.
3728 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 3667 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3729 3668
3730 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), 3669 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3731 &Heap::ScavengeStoreBufferCallback); 3670 &Heap::ScavengeStoreBufferCallback);
3732 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 3671 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3733 } 3672 }
3734 3673
3674 int npages = evacuation_candidates_.length();
3735 { 3675 {
3736 GCTracer::Scope gc_scope( 3676 GCTracer::Scope gc_scope(
3737 heap()->tracer(), 3677 heap()->tracer(),
3738 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); 3678 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
3739 for (Page* p : evacuation_candidates_) { 3679 for (int i = 0; i < npages; i++) {
3680 Page* p = evacuation_candidates_[i];
3740 DCHECK(p->IsEvacuationCandidate() || 3681 DCHECK(p->IsEvacuationCandidate() ||
3741 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3682 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3742 3683
3743 if (p->IsEvacuationCandidate()) { 3684 if (p->IsEvacuationCandidate()) {
3744 UpdateSlotsRecordedIn(p->slots_buffer()); 3685 UpdateSlotsRecordedIn(p->slots_buffer());
3745 if (FLAG_trace_fragmentation_verbose) { 3686 if (FLAG_trace_fragmentation_verbose) {
3746 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), 3687 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3747 SlotsBuffer::SizeOfChain(p->slots_buffer())); 3688 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3748 } 3689 }
3749 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); 3690 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
3804 heap_->UpdateReferencesInExternalStringTable( 3745 heap_->UpdateReferencesInExternalStringTable(
3805 &UpdateReferenceInExternalStringTableEntry); 3746 &UpdateReferenceInExternalStringTableEntry);
3806 3747
3807 EvacuationWeakObjectRetainer evacuation_object_retainer; 3748 EvacuationWeakObjectRetainer evacuation_object_retainer;
3808 heap()->ProcessAllWeakReferences(&evacuation_object_retainer); 3749 heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
3809 } 3750 }
3810 } 3751 }
3811 3752
3812 3753
3813 void MarkCompactCollector::ReleaseEvacuationCandidates() { 3754 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3814 for (Page* p : evacuation_candidates_) { 3755 int npages = evacuation_candidates_.length();
3756 for (int i = 0; i < npages; i++) {
3757 Page* p = evacuation_candidates_[i];
3815 if (!p->IsEvacuationCandidate()) continue; 3758 if (!p->IsEvacuationCandidate()) continue;
3816 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3759 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3817 space->Free(p->area_start(), p->area_size()); 3760 space->Free(p->area_start(), p->area_size());
3818 p->set_scan_on_scavenge(false); 3761 p->set_scan_on_scavenge(false);
3819 p->ResetLiveBytes(); 3762 p->ResetLiveBytes();
3820 CHECK(p->SweepingDone()); 3763 CHECK(p->SweepingDone());
3821 space->ReleasePage(p, true); 3764 space->ReleasePage(p, true);
3822 } 3765 }
3823 evacuation_candidates_.Rewind(0); 3766 evacuation_candidates_.Rewind(0);
3824 compacting_ = false; 3767 compacting_ = false;
(...skipping 238 matching lines...) Expand 10 before | Expand all | Expand 10 after
4063 MarkBit mark_bit = Marking::MarkBitFrom(host); 4006 MarkBit mark_bit = Marking::MarkBitFrom(host);
4064 if (Marking::IsBlack(mark_bit)) { 4007 if (Marking::IsBlack(mark_bit)) {
4065 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 4008 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4066 RecordRelocSlot(&rinfo, target); 4009 RecordRelocSlot(&rinfo, target);
4067 } 4010 }
4068 } 4011 }
4069 } 4012 }
4070 4013
4071 } // namespace internal 4014 } // namespace internal
4072 } // namespace v8 4015 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698