Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(466)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1577853007: [heap] Parallel newspace evacuation, semispace copy, and compaction \o/ (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Refactoring Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/base/sys-info.h" 9 #include "src/base/sys-info.h"
10 #include "src/code-stubs.h" 10 #include "src/code-stubs.h"
11 #include "src/compilation-cache.h" 11 #include "src/compilation-cache.h"
12 #include "src/deoptimizer.h" 12 #include "src/deoptimizer.h"
13 #include "src/execution.h" 13 #include "src/execution.h"
14 #include "src/frames-inl.h" 14 #include "src/frames-inl.h"
15 #include "src/gdb-jit.h" 15 #include "src/gdb-jit.h"
16 #include "src/global-handles.h" 16 #include "src/global-handles.h"
17 #include "src/heap/array-buffer-tracker.h" 17 #include "src/heap/array-buffer-tracker.h"
18 #include "src/heap/gc-tracer.h" 18 #include "src/heap/gc-tracer.h"
19 #include "src/heap/incremental-marking.h" 19 #include "src/heap/incremental-marking.h"
20 #include "src/heap/mark-compact-inl.h" 20 #include "src/heap/mark-compact-inl.h"
21 #include "src/heap/object-stats.h" 21 #include "src/heap/object-stats.h"
22 #include "src/heap/objects-visiting.h" 22 #include "src/heap/objects-visiting.h"
23 #include "src/heap/objects-visiting-inl.h" 23 #include "src/heap/objects-visiting-inl.h"
24 #include "src/heap/slots-buffer.h" 24 #include "src/heap/slots-buffer.h"
25 #include "src/heap/spaces-inl.h" 25 #include "src/heap/spaces-inl.h"
26 #include "src/ic/ic.h" 26 #include "src/ic/ic.h"
27 #include "src/ic/stub-cache.h" 27 #include "src/ic/stub-cache.h"
28 #include "src/profiler/cpu-profiler.h" 28 #include "src/profiler/cpu-profiler.h"
29 #include "src/utils-inl.h"
29 #include "src/v8.h" 30 #include "src/v8.h"
30 31
31 namespace v8 { 32 namespace v8 {
32 namespace internal { 33 namespace internal {
33 34
34 35
35 const char* Marking::kWhiteBitPattern = "00"; 36 const char* Marking::kWhiteBitPattern = "00";
36 const char* Marking::kBlackBitPattern = "11"; 37 const char* Marking::kBlackBitPattern = "11";
37 const char* Marking::kGreyBitPattern = "10"; 38 const char* Marking::kGreyBitPattern = "10";
38 const char* Marking::kImpossibleBitPattern = "01"; 39 const char* Marking::kImpossibleBitPattern = "01";
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
313 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() { 314 void MarkCompactCollector::ClearInvalidStoreAndSlotsBufferEntries() {
314 { 315 {
315 GCTracer::Scope gc_scope(heap()->tracer(), 316 GCTracer::Scope gc_scope(heap()->tracer(),
316 GCTracer::Scope::MC_CLEAR_STORE_BUFFER); 317 GCTracer::Scope::MC_CLEAR_STORE_BUFFER);
317 heap_->store_buffer()->ClearInvalidStoreBufferEntries(); 318 heap_->store_buffer()->ClearInvalidStoreBufferEntries();
318 } 319 }
319 320
320 { 321 {
321 GCTracer::Scope gc_scope(heap()->tracer(), 322 GCTracer::Scope gc_scope(heap()->tracer(),
322 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER); 323 GCTracer::Scope::MC_CLEAR_SLOTS_BUFFER);
323 int number_of_pages = evacuation_candidates_.length(); 324 for (Page* p : evacuation_candidates_) {
324 for (int i = 0; i < number_of_pages; i++) {
325 Page* p = evacuation_candidates_[i];
326 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer()); 325 SlotsBuffer::RemoveInvalidSlots(heap_, p->slots_buffer());
327 } 326 }
328 } 327 }
329 #ifdef VERIFY_HEAP 328 #ifdef VERIFY_HEAP
330 if (FLAG_verify_heap) { 329 if (FLAG_verify_heap) {
331 VerifyValidStoreAndSlotsBufferEntries(); 330 VerifyValidStoreAndSlotsBufferEntries();
332 } 331 }
333 #endif 332 #endif
334 } 333 }
335 334
(...skipping 135 matching lines...) Expand 10 before | Expand all | Expand 10 after
471 470
472 LargeObjectIterator it(heap_->lo_space()); 471 LargeObjectIterator it(heap_->lo_space());
473 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) { 472 for (HeapObject* obj = it.Next(); obj != NULL; obj = it.Next()) {
474 Marking::MarkWhite(Marking::MarkBitFrom(obj)); 473 Marking::MarkWhite(Marking::MarkBitFrom(obj));
475 Page::FromAddress(obj->address())->ResetProgressBar(); 474 Page::FromAddress(obj->address())->ResetProgressBar();
476 Page::FromAddress(obj->address())->ResetLiveBytes(); 475 Page::FromAddress(obj->address())->ResetLiveBytes();
477 } 476 }
478 } 477 }
479 478
480 479
481 class MarkCompactCollector::CompactionTask : public CancelableTask {
482 public:
483 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
484 : CancelableTask(heap->isolate()), spaces_(spaces) {}
485
486 virtual ~CompactionTask() {}
487
488 private:
489 // v8::internal::CancelableTask overrides.
490 void RunInternal() override {
491 MarkCompactCollector* mark_compact =
492 isolate()->heap()->mark_compact_collector();
493 SlotsBuffer* evacuation_slots_buffer = nullptr;
494 mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
495 mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
496 mark_compact->pending_compaction_tasks_semaphore_.Signal();
497 }
498
499 CompactionSpaceCollection* spaces_;
500
501 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
502 };
503
504
505 class MarkCompactCollector::SweeperTask : public v8::Task { 480 class MarkCompactCollector::SweeperTask : public v8::Task {
506 public: 481 public:
507 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {} 482 SweeperTask(Heap* heap, PagedSpace* space) : heap_(heap), space_(space) {}
508 483
509 virtual ~SweeperTask() {} 484 virtual ~SweeperTask() {}
510 485
511 private: 486 private:
512 // v8::Task overrides. 487 // v8::Task overrides.
513 void Run() override { 488 void Run() override {
514 heap_->mark_compact_collector()->SweepInParallel(space_, 0); 489 heap_->mark_compact_collector()->SweepInParallel(space_, 0);
(...skipping 309 matching lines...) Expand 10 before | Expand all | Expand 10 after
824 "compaction-selection: space=%s reduce_memory=%d pages=%d " 799 "compaction-selection: space=%s reduce_memory=%d pages=%d "
825 "total_live_bytes=%d\n", 800 "total_live_bytes=%d\n",
826 AllocationSpaceName(space->identity()), reduce_memory, 801 AllocationSpaceName(space->identity()), reduce_memory,
827 candidate_count, total_live_bytes / KB); 802 candidate_count, total_live_bytes / KB);
828 } 803 }
829 } 804 }
830 805
831 806
832 void MarkCompactCollector::AbortCompaction() { 807 void MarkCompactCollector::AbortCompaction() {
833 if (compacting_) { 808 if (compacting_) {
834 int npages = evacuation_candidates_.length(); 809 for (Page* p : evacuation_candidates_) {
835 for (int i = 0; i < npages; i++) {
836 Page* p = evacuation_candidates_[i];
837 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); 810 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
838 p->ClearEvacuationCandidate(); 811 p->ClearEvacuationCandidate();
839 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION); 812 p->ClearFlag(MemoryChunk::RESCAN_ON_EVACUATION);
840 } 813 }
841 compacting_ = false; 814 compacting_ = false;
842 evacuation_candidates_.Rewind(0); 815 evacuation_candidates_.Rewind(0);
843 } 816 }
844 DCHECK_EQ(0, evacuation_candidates_.length()); 817 DCHECK_EQ(0, evacuation_candidates_.length());
845 } 818 }
846 819
(...skipping 694 matching lines...) Expand 10 before | Expand all | Expand 10 after
1541 class MarkCompactCollector::HeapObjectVisitor { 1514 class MarkCompactCollector::HeapObjectVisitor {
1542 public: 1515 public:
1543 virtual ~HeapObjectVisitor() {} 1516 virtual ~HeapObjectVisitor() {}
1544 virtual bool Visit(HeapObject* object) = 0; 1517 virtual bool Visit(HeapObject* object) = 0;
1545 }; 1518 };
1546 1519
1547 1520
1548 class MarkCompactCollector::EvacuateVisitorBase 1521 class MarkCompactCollector::EvacuateVisitorBase
1549 : public MarkCompactCollector::HeapObjectVisitor { 1522 : public MarkCompactCollector::HeapObjectVisitor {
1550 public: 1523 public:
1551 EvacuateVisitorBase(Heap* heap, SlotsBuffer** evacuation_slots_buffer) 1524 EvacuateVisitorBase(Heap* heap, CompactionSpaceCollection* compaction_spaces,
1552 : heap_(heap), evacuation_slots_buffer_(evacuation_slots_buffer) {} 1525 SlotsBuffer** evacuation_slots_buffer,
1526 LocalStoreBuffer* local_store_buffer)
1527 : heap_(heap),
1528 evacuation_slots_buffer_(evacuation_slots_buffer),
1529 compaction_spaces_(compaction_spaces),
1530 local_store_buffer_(local_store_buffer) {}
1553 1531
1554 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object, 1532 bool TryEvacuateObject(PagedSpace* target_space, HeapObject* object,
1555 HeapObject** target_object) { 1533 HeapObject** target_object) {
1556 int size = object->Size(); 1534 int size = object->Size();
1557 AllocationAlignment alignment = object->RequiredAlignment(); 1535 AllocationAlignment alignment = object->RequiredAlignment();
1558 AllocationResult allocation = target_space->AllocateRaw(size, alignment); 1536 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
1559 if (allocation.To(target_object)) { 1537 if (allocation.To(target_object)) {
1560 heap_->mark_compact_collector()->MigrateObject( 1538 heap_->mark_compact_collector()->MigrateObject(
1561 *target_object, object, size, target_space->identity(), 1539 *target_object, object, size, target_space->identity(),
1562 evacuation_slots_buffer_); 1540 evacuation_slots_buffer_, local_store_buffer_);
1563 return true; 1541 return true;
1564 } 1542 }
1565 return false; 1543 return false;
1566 } 1544 }
1567 1545
1568 protected: 1546 protected:
1569 Heap* heap_; 1547 Heap* heap_;
1570 SlotsBuffer** evacuation_slots_buffer_; 1548 SlotsBuffer** evacuation_slots_buffer_;
1549 CompactionSpaceCollection* compaction_spaces_;
1550 LocalStoreBuffer* local_store_buffer_;
1571 }; 1551 };
1572 1552
1573 1553
1574 class MarkCompactCollector::EvacuateNewSpaceVisitor final 1554 class MarkCompactCollector::EvacuateNewSpaceVisitor final
1575 : public MarkCompactCollector::EvacuateVisitorBase { 1555 : public MarkCompactCollector::EvacuateVisitorBase {
1576 public: 1556 public:
1577 static const intptr_t kLabSize = 4 * KB; 1557 static const intptr_t kLabSize = 4 * KB;
1578 static const intptr_t kMaxLabObjectSize = 256; 1558 static const intptr_t kMaxLabObjectSize = 256;
1579 1559
1580 explicit EvacuateNewSpaceVisitor(Heap* heap, 1560 explicit EvacuateNewSpaceVisitor(Heap* heap,
1561 CompactionSpaceCollection* compaction_spaces,
1581 SlotsBuffer** evacuation_slots_buffer, 1562 SlotsBuffer** evacuation_slots_buffer,
1563 LocalStoreBuffer* local_store_buffer,
1582 HashMap* local_pretenuring_feedback) 1564 HashMap* local_pretenuring_feedback)
1583 : EvacuateVisitorBase(heap, evacuation_slots_buffer), 1565 : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
1566 local_store_buffer),
1584 buffer_(LocalAllocationBuffer::InvalidBuffer()), 1567 buffer_(LocalAllocationBuffer::InvalidBuffer()),
1585 space_to_allocate_(NEW_SPACE), 1568 space_to_allocate_(NEW_SPACE),
1586 promoted_size_(0), 1569 promoted_size_(0),
1587 semispace_copied_size_(0), 1570 semispace_copied_size_(0),
1588 local_pretenuring_feedback_(local_pretenuring_feedback) {} 1571 local_pretenuring_feedback_(local_pretenuring_feedback) {}
1589 1572
1590 bool Visit(HeapObject* object) override { 1573 bool Visit(HeapObject* object) override {
1591 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_); 1574 heap_->UpdateAllocationSite(object, local_pretenuring_feedback_);
1592 int size = object->Size(); 1575 int size = object->Size();
1593 HeapObject* target_object = nullptr; 1576 HeapObject* target_object = nullptr;
1594 if (heap_->ShouldBePromoted(object->address(), size) && 1577 if (heap_->ShouldBePromoted(object->address(), size) &&
1595 TryEvacuateObject(heap_->old_space(), object, &target_object)) { 1578 TryEvacuateObject(compaction_spaces_->Get(OLD_SPACE), object,
1579 &target_object)) {
1596 // If we end up needing more special cases, we should factor this out. 1580 // If we end up needing more special cases, we should factor this out.
1597 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) { 1581 if (V8_UNLIKELY(target_object->IsJSArrayBuffer())) {
1598 heap_->array_buffer_tracker()->Promote( 1582 heap_->array_buffer_tracker()->Promote(
1599 JSArrayBuffer::cast(target_object)); 1583 JSArrayBuffer::cast(target_object));
1600 } 1584 }
1601 promoted_size_ += size; 1585 promoted_size_ += size;
1602 return true; 1586 return true;
1603 } 1587 }
1604 HeapObject* target = nullptr; 1588 HeapObject* target = nullptr;
1605 AllocationSpace space = AllocateTargetObject(object, &target); 1589 AllocationSpace space = AllocateTargetObject(object, &target);
1606 heap_->mark_compact_collector()->MigrateObject( 1590 heap_->mark_compact_collector()->MigrateObject(
1607 HeapObject::cast(target), object, size, space, 1591 HeapObject::cast(target), object, size, space,
1608 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_); 1592 (space == NEW_SPACE) ? nullptr : evacuation_slots_buffer_,
1593 local_store_buffer_);
1609 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { 1594 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1610 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); 1595 heap_->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1611 } 1596 }
1612 semispace_copied_size_ += size; 1597 semispace_copied_size_ += size;
1613 return true; 1598 return true;
1614 } 1599 }
1615 1600
1616 intptr_t promoted_size() { return promoted_size_; } 1601 intptr_t promoted_size() { return promoted_size_; }
1617 intptr_t semispace_copied_size() { return semispace_copied_size_; } 1602 intptr_t semispace_copied_size() { return semispace_copied_size_; }
1618 1603
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
1670 if (allocation.IsRetry()) { 1655 if (allocation.IsRetry()) {
1671 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE; 1656 if (mode == kStickyBailoutOldSpace) space_to_allocate_ = OLD_SPACE;
1672 } 1657 }
1673 } 1658 }
1674 } 1659 }
1675 return allocation; 1660 return allocation;
1676 } 1661 }
1677 1662
1678 inline AllocationResult AllocateInOldSpace(int size_in_bytes, 1663 inline AllocationResult AllocateInOldSpace(int size_in_bytes,
1679 AllocationAlignment alignment) { 1664 AllocationAlignment alignment) {
1680 AllocationResult allocation = 1665 AllocationResult allocation = compaction_spaces_->Get(OLD_SPACE)
1681 heap_->old_space()->AllocateRaw(size_in_bytes, alignment); 1666 ->AllocateRaw(size_in_bytes, alignment);
1682 if (allocation.IsRetry()) { 1667 if (allocation.IsRetry()) {
1683 FatalProcessOutOfMemory( 1668 FatalProcessOutOfMemory(
1684 "MarkCompactCollector: semi-space copy, fallback in old gen\n"); 1669 "MarkCompactCollector: semi-space copy, fallback in old gen\n");
1685 } 1670 }
1686 return allocation; 1671 return allocation;
1687 } 1672 }
1688 1673
1689 inline AllocationResult AllocateInLab(int size_in_bytes, 1674 inline AllocationResult AllocateInLab(int size_in_bytes,
1690 AllocationAlignment alignment) { 1675 AllocationAlignment alignment) {
1691 AllocationResult allocation; 1676 AllocationResult allocation;
(...skipping 25 matching lines...) Expand all
1717 intptr_t semispace_copied_size_; 1702 intptr_t semispace_copied_size_;
1718 HashMap* local_pretenuring_feedback_; 1703 HashMap* local_pretenuring_feedback_;
1719 }; 1704 };
1720 1705
1721 1706
1722 class MarkCompactCollector::EvacuateOldSpaceVisitor final 1707 class MarkCompactCollector::EvacuateOldSpaceVisitor final
1723 : public MarkCompactCollector::EvacuateVisitorBase { 1708 : public MarkCompactCollector::EvacuateVisitorBase {
1724 public: 1709 public:
1725 EvacuateOldSpaceVisitor(Heap* heap, 1710 EvacuateOldSpaceVisitor(Heap* heap,
1726 CompactionSpaceCollection* compaction_spaces, 1711 CompactionSpaceCollection* compaction_spaces,
1727 SlotsBuffer** evacuation_slots_buffer) 1712 SlotsBuffer** evacuation_slots_buffer,
1728 : EvacuateVisitorBase(heap, evacuation_slots_buffer), 1713 LocalStoreBuffer* local_store_buffer)
1729 compaction_spaces_(compaction_spaces) {} 1714 : EvacuateVisitorBase(heap, compaction_spaces, evacuation_slots_buffer,
1715 local_store_buffer) {}
1730 1716
1731 bool Visit(HeapObject* object) override { 1717 bool Visit(HeapObject* object) override {
1732 CompactionSpace* target_space = compaction_spaces_->Get( 1718 CompactionSpace* target_space = compaction_spaces_->Get(
1733 Page::FromAddress(object->address())->owner()->identity()); 1719 Page::FromAddress(object->address())->owner()->identity());
1734 HeapObject* target_object = nullptr; 1720 HeapObject* target_object = nullptr;
1735 if (TryEvacuateObject(target_space, object, &target_object)) { 1721 if (TryEvacuateObject(target_space, object, &target_object)) {
1736 DCHECK(object->map_word().IsForwardingAddress()); 1722 DCHECK(object->map_word().IsForwardingAddress());
1737 return true; 1723 return true;
1738 } 1724 }
1739 return false; 1725 return false;
1740 } 1726 }
1741 1727
1742 private: 1728 private:
Hannes Payer (out of office) 2016/01/20 13:19:39 The private section is not very useful anymore.
Michael Lippautz 2016/01/21 10:00:08 Done.
1743 CompactionSpaceCollection* compaction_spaces_;
1744 }; 1729 };
1745 1730
1746 1731
1747 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) { 1732 void MarkCompactCollector::DiscoverGreyObjectsInSpace(PagedSpace* space) {
1748 PageIterator it(space); 1733 PageIterator it(space);
1749 while (it.has_next()) { 1734 while (it.has_next()) {
1750 Page* p = it.next(); 1735 Page* p = it.next();
1751 DiscoverGreyObjectsOnPage(p); 1736 DiscoverGreyObjectsOnPage(p);
1752 if (marking_deque()->IsFull()) return; 1737 if (marking_deque()->IsFull()) return;
1753 } 1738 }
(...skipping 789 matching lines...) Expand 10 before | Expand all | Expand 10 after
2543 while (obj != Smi::FromInt(0)) { 2528 while (obj != Smi::FromInt(0)) {
2544 TransitionArray* array = TransitionArray::cast(obj); 2529 TransitionArray* array = TransitionArray::cast(obj);
2545 obj = array->next_link(); 2530 obj = array->next_link();
2546 array->set_next_link(undefined, SKIP_WRITE_BARRIER); 2531 array->set_next_link(undefined, SKIP_WRITE_BARRIER);
2547 } 2532 }
2548 heap()->set_encountered_transition_arrays(Smi::FromInt(0)); 2533 heap()->set_encountered_transition_arrays(Smi::FromInt(0));
2549 } 2534 }
2550 2535
2551 2536
2552 void MarkCompactCollector::RecordMigratedSlot( 2537 void MarkCompactCollector::RecordMigratedSlot(
2553 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) { 2538 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer,
2539 LocalStoreBuffer* local_store_buffer) {
2554 // When parallel compaction is in progress, store and slots buffer entries 2540 // When parallel compaction is in progress, store and slots buffer entries
2555 // require synchronization. 2541 // require synchronization.
2556 if (heap_->InNewSpace(value)) { 2542 if (heap_->InNewSpace(value)) {
2557 if (compaction_in_progress_) { 2543 if (compaction_in_progress_) {
2558 heap_->store_buffer()->MarkSynchronized(slot); 2544 local_store_buffer->Record(slot);
2559 } else { 2545 } else {
2560 heap_->store_buffer()->Mark(slot); 2546 heap_->store_buffer()->Mark(slot);
2561 } 2547 }
2562 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { 2548 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2563 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer, 2549 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2564 reinterpret_cast<Object**>(slot), 2550 reinterpret_cast<Object**>(slot),
2565 SlotsBuffer::IGNORE_OVERFLOW); 2551 SlotsBuffer::IGNORE_OVERFLOW);
2566 } 2552 }
2567 } 2553 }
2568 2554
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
2630 if (!success) { 2616 if (!success) {
2631 EvictPopularEvacuationCandidate(target_page); 2617 EvictPopularEvacuationCandidate(target_page);
2632 } 2618 }
2633 } 2619 }
2634 } 2620 }
2635 2621
2636 2622
2637 class RecordMigratedSlotVisitor final : public ObjectVisitor { 2623 class RecordMigratedSlotVisitor final : public ObjectVisitor {
2638 public: 2624 public:
2639 RecordMigratedSlotVisitor(MarkCompactCollector* collector, 2625 RecordMigratedSlotVisitor(MarkCompactCollector* collector,
2640 SlotsBuffer** evacuation_slots_buffer) 2626 SlotsBuffer** evacuation_slots_buffer,
2627 LocalStoreBuffer* local_store_buffer)
2641 : collector_(collector), 2628 : collector_(collector),
2642 evacuation_slots_buffer_(evacuation_slots_buffer) {} 2629 evacuation_slots_buffer_(evacuation_slots_buffer),
2630 local_store_buffer_(local_store_buffer) {}
2643 2631
2644 V8_INLINE void VisitPointer(Object** p) override { 2632 V8_INLINE void VisitPointer(Object** p) override {
2645 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), 2633 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p),
2646 evacuation_slots_buffer_); 2634 evacuation_slots_buffer_,
2635 local_store_buffer_);
2647 } 2636 }
2648 2637
2649 V8_INLINE void VisitPointers(Object** start, Object** end) override { 2638 V8_INLINE void VisitPointers(Object** start, Object** end) override {
2650 while (start < end) { 2639 while (start < end) {
2651 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), 2640 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start),
2652 evacuation_slots_buffer_); 2641 evacuation_slots_buffer_,
2642 local_store_buffer_);
2653 ++start; 2643 ++start;
2654 } 2644 }
2655 } 2645 }
2656 2646
2657 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { 2647 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override {
2658 if (collector_->compacting_) { 2648 if (collector_->compacting_) {
2659 Address code_entry = Memory::Address_at(code_entry_slot); 2649 Address code_entry = Memory::Address_at(code_entry_slot);
2660 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, 2650 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
2661 evacuation_slots_buffer_); 2651 evacuation_slots_buffer_);
2662 } 2652 }
2663 } 2653 }
2664 2654
2665 private: 2655 private:
2666 MarkCompactCollector* collector_; 2656 MarkCompactCollector* collector_;
2667 SlotsBuffer** evacuation_slots_buffer_; 2657 SlotsBuffer** evacuation_slots_buffer_;
2658 LocalStoreBuffer* local_store_buffer_;
2668 }; 2659 };
2669 2660
2670 2661
2671 // We scavenge new space simultaneously with sweeping. This is done in two 2662 // We scavenge new space simultaneously with sweeping. This is done in two
2672 // passes. 2663 // passes.
2673 // 2664 //
2674 // The first pass migrates all alive objects from one semispace to another or 2665 // The first pass migrates all alive objects from one semispace to another or
2675 // promotes them to old space. Forwarding address is written directly into 2666 // promotes them to old space. Forwarding address is written directly into
2676 // first word of object without any encoding. If object is dead we write 2667 // first word of object without any encoding. If object is dead we write
2677 // NULL as a forwarding address. 2668 // NULL as a forwarding address.
2678 // 2669 //
2679 // The second pass updates pointers to new space in all spaces. It is possible 2670 // The second pass updates pointers to new space in all spaces. It is possible
2680 // to encounter pointers to dead new space objects during traversal of pointers 2671 // to encounter pointers to dead new space objects during traversal of pointers
2681 // to new space. We should clear them to avoid encountering them during next 2672 // to new space. We should clear them to avoid encountering them during next
2682 // pointer iteration. This is an issue if the store buffer overflows and we 2673 // pointer iteration. This is an issue if the store buffer overflows and we
2683 // have to scan the entire old space, including dead objects, looking for 2674 // have to scan the entire old space, including dead objects, looking for
2684 // pointers to new space. 2675 // pointers to new space.
2685 void MarkCompactCollector::MigrateObject( 2676 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src,
2686 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest, 2677 int size, AllocationSpace dest,
2687 SlotsBuffer** evacuation_slots_buffer) { 2678 SlotsBuffer** evacuation_slots_buffer,
2679 LocalStoreBuffer* local_store_buffer) {
2688 Address dst_addr = dst->address(); 2680 Address dst_addr = dst->address();
2689 Address src_addr = src->address(); 2681 Address src_addr = src->address();
2690 DCHECK(heap()->AllowedToBeMigrated(src, dest)); 2682 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2691 DCHECK(dest != LO_SPACE); 2683 DCHECK(dest != LO_SPACE);
2692 if (dest == OLD_SPACE) { 2684 if (dest == OLD_SPACE) {
2693 DCHECK_OBJECT_SIZE(size); 2685 DCHECK_OBJECT_SIZE(size);
2694 DCHECK(evacuation_slots_buffer != nullptr); 2686 DCHECK(evacuation_slots_buffer != nullptr);
2695 DCHECK(IsAligned(size, kPointerSize)); 2687 DCHECK(IsAligned(size, kPointerSize));
2696 2688
2697 heap()->MoveBlock(dst->address(), src->address(), size); 2689 heap()->MoveBlock(dst->address(), src->address(), size);
2698 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer); 2690 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer,
2691 local_store_buffer);
2699 dst->IterateBody(&visitor); 2692 dst->IterateBody(&visitor);
2700 } else if (dest == CODE_SPACE) { 2693 } else if (dest == CODE_SPACE) {
2701 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); 2694 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space());
2702 DCHECK(evacuation_slots_buffer != nullptr); 2695 DCHECK(evacuation_slots_buffer != nullptr);
2703 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); 2696 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2704 heap()->MoveBlock(dst_addr, src_addr, size); 2697 heap()->MoveBlock(dst_addr, src_addr, size);
2705 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); 2698 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
2706 Code::cast(dst)->Relocate(dst_addr - src_addr); 2699 Code::cast(dst)->Relocate(dst_addr - src_addr);
2707 } else { 2700 } else {
2708 DCHECK_OBJECT_SIZE(size); 2701 DCHECK_OBJECT_SIZE(size);
(...skipping 341 matching lines...) Expand 10 before | Expand all | Expand 10 after
3050 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3043 CHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3051 3044
3052 // The target object is black but we don't know if the source slot is black. 3045 // The target object is black but we don't know if the source slot is black.
3053 // The source object could have died and the slot could be part of a free 3046 // The source object could have died and the slot could be part of a free
3054 // space. Use the mark bit iterator to find out about liveness of the slot. 3047 // space. Use the mark bit iterator to find out about liveness of the slot.
3055 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot)); 3048 CHECK(IsSlotInBlackObjectSlow(Page::FromAddress(slot), slot));
3056 } 3049 }
3057 3050
3058 3051
3059 void MarkCompactCollector::EvacuateNewSpacePrologue() { 3052 void MarkCompactCollector::EvacuateNewSpacePrologue() {
3060 // There are soft limits in the allocation code, designed trigger a mark
3061 // sweep collection by failing allocations. But since we are already in
3062 // a mark-sweep allocation, there is no sense in trying to trigger one.
3063 AlwaysAllocateScope scope(isolate());
3064
3065 NewSpace* new_space = heap()->new_space(); 3053 NewSpace* new_space = heap()->new_space();
3066 3054 NewSpacePageIterator it(new_space->bottom(), new_space->top());
3067 // Store allocation range before flipping semispaces. 3055 // Append the list of new space pages to be processed.
3068 Address from_bottom = new_space->bottom();
3069 Address from_top = new_space->top();
3070
3071 // Flip the semispaces. After flipping, to space is empty, from space has
3072 // live objects.
3073 new_space->Flip();
3074 new_space->ResetAllocationInfo();
3075
3076 newspace_evacuation_candidates_.Clear();
3077 NewSpacePageIterator it(from_bottom, from_top);
3078 while (it.has_next()) { 3056 while (it.has_next()) {
3079 newspace_evacuation_candidates_.Add(it.next()); 3057 newspace_evacuation_candidates_.Add(it.next());
3080 } 3058 }
3059 new_space->Flip();
3060 new_space->ResetAllocationInfo();
3081 } 3061 }
3082 3062
3083 3063
3084 HashMap* MarkCompactCollector::EvacuateNewSpaceInParallel() { 3064 void MarkCompactCollector::EvacuateNewSpaceEpilogue() {
3085 HashMap* local_pretenuring_feedback = new HashMap( 3065 newspace_evacuation_candidates_.Rewind(0);
3086 HashMap::PointersMatch, kInitialLocalPretenuringFeedbackCapacity);
3087 EvacuateNewSpaceVisitor new_space_visitor(heap(), &migration_slots_buffer_,
3088 local_pretenuring_feedback);
3089 // First pass: traverse all objects in inactive semispace, remove marks,
3090 // migrate live objects and write forwarding addresses. This stage puts
3091 // new entries in the store buffer and may cause some pages to be marked
3092 // scan-on-scavenge.
3093 for (int i = 0; i < newspace_evacuation_candidates_.length(); i++) {
3094 NewSpacePage* p =
3095 reinterpret_cast<NewSpacePage*>(newspace_evacuation_candidates_[i]);
3096 bool ok = VisitLiveObjects(p, &new_space_visitor, kClearMarkbits);
3097 USE(ok);
3098 DCHECK(ok);
3099 }
3100 heap_->IncrementPromotedObjectsSize(
3101 static_cast<int>(new_space_visitor.promoted_size()));
3102 heap_->IncrementSemiSpaceCopiedObjectSize(
3103 static_cast<int>(new_space_visitor.semispace_copied_size()));
3104 heap_->IncrementYoungSurvivorsCounter(
3105 static_cast<int>(new_space_visitor.promoted_size()) +
3106 static_cast<int>(new_space_visitor.semispace_copied_size()));
3107 return local_pretenuring_feedback;
3108 } 3066 }
3109 3067
3110 3068
3111 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized( 3069 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3112 SlotsBuffer* evacuation_slots_buffer) { 3070 SlotsBuffer* evacuation_slots_buffer) {
3113 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_); 3071 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3114 evacuation_slots_buffers_.Add(evacuation_slots_buffer); 3072 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3115 } 3073 }
3116 3074
3117 3075
3118 int MarkCompactCollector::NumberOfParallelCompactionTasks() { 3076 class MarkCompactCollector::Evacuator : public Malloced {
3077 public:
3078 Evacuator(MarkCompactCollector* collector,
3079 const List<Page*>& evacuation_candidates,
3080 const List<NewSpacePage*>& newspace_evacuation_candidates)
3081 : collector_(collector),
3082 evacuation_candidates_(evacuation_candidates),
3083 newspace_evacuation_candidates_(newspace_evacuation_candidates),
3084 local_compaction_spaces_(collector->heap()),
3085 local_slots_buffer_(nullptr),
3086 local_store_buffer_(),
3087 local_pretenuring_feedback_(HashMap::PointersMatch,
3088 kInitialLocalPretenuringFeedbackCapacity),
3089 new_space_visitor_(collector->heap(), &local_compaction_spaces_,
3090 &local_slots_buffer_, &local_store_buffer_,
3091 &local_pretenuring_feedback_),
3092 old_space_visitor_(collector->heap(), &local_compaction_spaces_,
3093 &local_slots_buffer_, &local_store_buffer_),
3094 duration_(0.0),
3095 bytes_compacted_(0),
3096 task_id_(0) {}
3097
3098 inline void EvacuatePages();
3099 inline void Finalize();
3100
3101 CompactionSpaceCollection* compaction_spaces() {
3102 return &local_compaction_spaces_;
3103 }
3104
3105 uint32_t task_id() { return task_id_; }
3106 void set_task_id(uint32_t id) { task_id_ = id; }
3107
3108 private:
3109 static const int kInitialLocalPretenuringFeedbackCapacity = 256;
3110
3111 Heap* heap() { return collector_->heap(); }
3112
3113 void ReportCompactionProgress(double duration, intptr_t bytes_compacted) {
3114 duration_ += duration;
3115 bytes_compacted_ += bytes_compacted;
3116 }
3117
3118 inline bool EvacuateSinglePage(MemoryChunk* p, HeapObjectVisitor* visitor);
3119
3120 MarkCompactCollector* collector_;
3121
3122 // Pages to process.
3123 const List<Page*>& evacuation_candidates_;
3124 const List<NewSpacePage*>& newspace_evacuation_candidates_;
3125
3126 // Locally cached collector data.
3127 CompactionSpaceCollection local_compaction_spaces_;
3128 SlotsBuffer* local_slots_buffer_;
3129 LocalStoreBuffer local_store_buffer_;
3130 HashMap local_pretenuring_feedback_;
3131
3132 // Vistors for the corresponding spaces.
3133 EvacuateNewSpaceVisitor new_space_visitor_;
3134 EvacuateOldSpaceVisitor old_space_visitor_;
3135
3136 // Book keeping info.
3137 double duration_;
3138 intptr_t bytes_compacted_;
3139
3140 // Task id, if this evacuator is executed on a background task instead of
3141 // the main thread. Can be used to try to abort the task currently scheduled
3142 // to executed to evacuate pages.
3143 uint32_t task_id_;
3144 };
3145
3146
3147 bool MarkCompactCollector::Evacuator::EvacuateSinglePage(
3148 MemoryChunk* p, HeapObjectVisitor* visitor) {
3149 bool aborted = false;
3150 if (p->parallel_compaction_state().TrySetValue(
3151 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3152 if (p->IsEvacuationCandidate() || p->InNewSpace()) {
3153 DCHECK_EQ(p->parallel_compaction_state().Value(),
3154 MemoryChunk::kCompactingInProgress);
3155 int saved_live_bytes = p->LiveBytes();
3156 double evacuation_time;
3157 bool success;
3158 {
3159 AlwaysAllocateScope always_allocate(heap()->isolate());
3160 TimedScope timed_scope(&evacuation_time);
3161 success = collector_->VisitLiveObjects(p, visitor, kClearMarkbits);
3162 }
3163 if (success) {
3164 ReportCompactionProgress(evacuation_time, saved_live_bytes);
3165 p->parallel_compaction_state().SetValue(
3166 MemoryChunk::kCompactingFinalize);
3167 } else {
3168 p->parallel_compaction_state().SetValue(
3169 MemoryChunk::kCompactingAborted);
3170 aborted = true;
3171 }
3172 } else {
3173 // There could be popular pages in the list of evacuation candidates
3174 // which we do compact.
3175 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3176 }
3177 }
3178 return !aborted;
3179 }
3180
3181
3182 void MarkCompactCollector::Evacuator::EvacuatePages() {
3183 for (NewSpacePage* p : newspace_evacuation_candidates_) {
3184 DCHECK(p->InNewSpace());
3185 DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()),
3186 MemoryChunk::kSweepingDone);
3187 bool success = EvacuateSinglePage(p, &new_space_visitor_);
3188 DCHECK(success);
3189 USE(success);
3190 }
3191 for (Page* p : evacuation_candidates_) {
3192 DCHECK(p->IsEvacuationCandidate() ||
3193 p->IsFlagSet(MemoryChunk::RESCAN_ON_EVACUATION));
3194 DCHECK_EQ(static_cast<int>(p->parallel_sweeping_state().Value()),
3195 MemoryChunk::kSweepingDone);
3196 EvacuateSinglePage(p, &old_space_visitor_);
3197 }
3198 }
3199
3200
3201 void MarkCompactCollector::Evacuator::Finalize() {
3202 heap()->old_space()->MergeCompactionSpace(
3203 local_compaction_spaces_.Get(OLD_SPACE));
3204 heap()->code_space()->MergeCompactionSpace(
3205 local_compaction_spaces_.Get(CODE_SPACE));
3206 heap()->tracer()->AddCompactionEvent(duration_, bytes_compacted_);
3207 heap()->IncrementPromotedObjectsSize(new_space_visitor_.promoted_size());
3208 heap()->IncrementSemiSpaceCopiedObjectSize(
3209 new_space_visitor_.semispace_copied_size());
3210 heap()->IncrementYoungSurvivorsCounter(
3211 new_space_visitor_.promoted_size() +
3212 new_space_visitor_.semispace_copied_size());
3213 heap()->MergeAllocationSitePretenuringFeedback(local_pretenuring_feedback_);
3214 local_store_buffer_.Process(heap()->store_buffer());
3215 collector_->AddEvacuationSlotsBufferSynchronized(local_slots_buffer_);
3216 }
3217
3218
3219 class MarkCompactCollector::CompactionTask : public CancelableTask {
3220 public:
3221 explicit CompactionTask(Heap* heap, Evacuator* evacuator)
3222 : CancelableTask(heap->isolate()), evacuator_(evacuator) {
3223 evacuator->set_task_id(id());
3224 }
3225
3226 virtual ~CompactionTask() {}
3227
3228 private:
3229 // v8::internal::CancelableTask overrides.
3230 void RunInternal() override {
3231 evacuator_->EvacuatePages();
3232 isolate()
3233 ->heap()
3234 ->mark_compact_collector()
3235 ->pending_compaction_tasks_semaphore_.Signal();
3236 }
3237
3238 Evacuator* evacuator_;
3239
3240 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
3241 };
3242
3243
3244 int MarkCompactCollector::NumberOfParallelCompactionTasks(int pages,
3245 intptr_t live_bytes) {
3119 if (!FLAG_parallel_compaction) return 1; 3246 if (!FLAG_parallel_compaction) return 1;
3120 // Compute the number of needed tasks based on a target compaction time, the 3247 // Compute the number of needed tasks based on a target compaction time, the
3121 // profiled compaction speed and marked live memory. 3248 // profiled compaction speed and marked live memory.
3122 // 3249 //
3123 // The number of parallel compaction tasks is limited by: 3250 // The number of parallel compaction tasks is limited by:
3124 // - #evacuation pages 3251 // - #evacuation pages
3125 // - (#cores - 1) 3252 // - (#cores - 1)
3126 // - a hard limit
3127 const double kTargetCompactionTimeInMs = 1; 3253 const double kTargetCompactionTimeInMs = 1;
3128 const int kMaxCompactionTasks = 8; 3254 const int kNumSweepingTasks = 3;
3129 3255
3130 intptr_t compaction_speed = 3256 intptr_t compaction_speed =
3131 heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3257 heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3132 if (compaction_speed == 0) return 1;
3133 3258
3134 intptr_t live_bytes = 0; 3259 const int cores =
3135 for (Page* page : evacuation_candidates_) { 3260 Max(1, base::SysInfo::NumberOfProcessors() - kNumSweepingTasks - 1);
3136 live_bytes += page->LiveBytes(); 3261 int tasks;
3262 if (compaction_speed > 0) {
3263 tasks = 1 + static_cast<int>(static_cast<double>(live_bytes) /
3264 compaction_speed / kTargetCompactionTimeInMs);
3265 } else {
3266 tasks = pages;
3137 } 3267 }
3138 3268 const int tasks_capped_pages = Min(pages, tasks);
3139 const int cores = Max(1, base::SysInfo::NumberOfProcessors() - 1);
3140 const int tasks =
3141 1 + static_cast<int>(static_cast<double>(live_bytes) / compaction_speed /
3142 kTargetCompactionTimeInMs);
3143 const int tasks_capped_pages = Min(evacuation_candidates_.length(), tasks);
3144 const int tasks_capped_cores = Min(cores, tasks_capped_pages); 3269 const int tasks_capped_cores = Min(cores, tasks_capped_pages);
3145 const int tasks_capped_hard = Min(kMaxCompactionTasks, tasks_capped_cores); 3270 return tasks_capped_cores;
3146 return tasks_capped_hard;
3147 } 3271 }
3148 3272
3149 3273
3150 void MarkCompactCollector::EvacuatePagesInParallel() { 3274 void MarkCompactCollector::EvacuatePagesInParallel() {
3151 const int num_pages = evacuation_candidates_.length(); 3275 int num_pages = 0;
3152 if (num_pages == 0) return; 3276 intptr_t live_bytes = 0;
3277 for (Page* page : evacuation_candidates_) {
3278 num_pages++;
3279 live_bytes += page->LiveBytes();
3280 }
3281 for (NewSpacePage* page : newspace_evacuation_candidates_) {
3282 num_pages++;
3283 live_bytes += page->LiveBytes();
3284 }
3285 DCHECK_GE(num_pages, 1);
3286
3153 3287
3154 // Used for trace summary. 3288 // Used for trace summary.
3155 intptr_t live_bytes = 0;
3156 intptr_t compaction_speed = 0; 3289 intptr_t compaction_speed = 0;
3157 if (FLAG_trace_fragmentation) { 3290 if (FLAG_trace_fragmentation) {
3158 for (Page* page : evacuation_candidates_) {
3159 live_bytes += page->LiveBytes();
3160 }
3161 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond(); 3291 compaction_speed = heap()->tracer()->CompactionSpeedInBytesPerMillisecond();
3162 } 3292 }
3163 const int num_tasks = NumberOfParallelCompactionTasks(); 3293
3294 const int num_tasks = NumberOfParallelCompactionTasks(num_pages, live_bytes);
3164 3295
3165 // Set up compaction spaces. 3296 // Set up compaction spaces.
3297 Evacuator** evacuators = new Evacuator*[num_tasks];
3166 CompactionSpaceCollection** compaction_spaces_for_tasks = 3298 CompactionSpaceCollection** compaction_spaces_for_tasks =
3167 new CompactionSpaceCollection*[num_tasks]; 3299 new CompactionSpaceCollection*[num_tasks];
3168 for (int i = 0; i < num_tasks; i++) { 3300 for (int i = 0; i < num_tasks; i++) {
3169 compaction_spaces_for_tasks[i] = new CompactionSpaceCollection(heap()); 3301 evacuators[i] = new Evacuator(this, evacuation_candidates_,
3302 newspace_evacuation_candidates_);
3303 compaction_spaces_for_tasks[i] = evacuators[i]->compaction_spaces();
3170 } 3304 }
3171
3172 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, 3305 heap()->old_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3173 num_tasks); 3306 num_tasks);
3174 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks, 3307 heap()->code_space()->DivideUponCompactionSpaces(compaction_spaces_for_tasks,
3175 num_tasks); 3308 num_tasks);
3309 delete[] compaction_spaces_for_tasks;
3176 3310
3177 uint32_t* task_ids = new uint32_t[num_tasks - 1];
3178 // Kick off parallel tasks. 3311 // Kick off parallel tasks.
3179 StartParallelCompaction(compaction_spaces_for_tasks, task_ids, num_tasks); 3312 StartParallelCompaction(evacuators, num_tasks);
3180 // Wait for unfinished and not-yet-started tasks. 3313 // Wait for unfinished and not-yet-started tasks.
3181 WaitUntilCompactionCompleted(task_ids, num_tasks - 1); 3314 WaitUntilCompactionCompleted(&evacuators[1], num_tasks - 1);
3182 delete[] task_ids;
3183 3315
3184 double compaction_duration = 0.0; 3316 // Finalize local evacuators by merging back all locally cached data.
3185 intptr_t compacted_memory = 0;
3186 // Merge back memory (compacted and unused) from compaction spaces.
3187 for (int i = 0; i < num_tasks; i++) { 3317 for (int i = 0; i < num_tasks; i++) {
3188 heap()->old_space()->MergeCompactionSpace( 3318 evacuators[i]->Finalize();
3189 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); 3319 delete evacuators[i];
3190 heap()->code_space()->MergeCompactionSpace(
3191 compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
3192 compacted_memory += compaction_spaces_for_tasks[i]->bytes_compacted();
3193 compaction_duration += compaction_spaces_for_tasks[i]->duration();
3194 delete compaction_spaces_for_tasks[i];
3195 } 3320 }
3196 delete[] compaction_spaces_for_tasks; 3321 delete[] evacuators;
3197 heap()->tracer()->AddCompactionEvent(compaction_duration, compacted_memory);
3198 3322
3199 // Finalize sequentially. 3323 // Finalize pages sequentially.
3324 for (NewSpacePage* p : newspace_evacuation_candidates_) {
3325 DCHECK_EQ(p->parallel_compaction_state().Value(),
3326 MemoryChunk::kCompactingFinalize);
3327 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3328 }
3329
3200 int abandoned_pages = 0; 3330 int abandoned_pages = 0;
3201 for (int i = 0; i < num_pages; i++) { 3331 for (Page* p : evacuation_candidates_) {
3202 Page* p = evacuation_candidates_[i];
3203 switch (p->parallel_compaction_state().Value()) { 3332 switch (p->parallel_compaction_state().Value()) {
3204 case MemoryChunk::ParallelCompactingState::kCompactingAborted: 3333 case MemoryChunk::ParallelCompactingState::kCompactingAborted:
3205 // We have partially compacted the page, i.e., some objects may have 3334 // We have partially compacted the page, i.e., some objects may have
3206 // moved, others are still in place. 3335 // moved, others are still in place.
3207 // We need to: 3336 // We need to:
3208 // - Leave the evacuation candidate flag for later processing of 3337 // - Leave the evacuation candidate flag for later processing of
3209 // slots buffer entries. 3338 // slots buffer entries.
3210 // - Leave the slots buffer there for processing of entries added by 3339 // - Leave the slots buffer there for processing of entries added by
3211 // the write barrier. 3340 // the write barrier.
3212 // - Rescan the page as slot recording in the migration buffer only 3341 // - Rescan the page as slot recording in the migration buffer only
(...skipping 12 matching lines...) Expand all
3225 case MemoryChunk::kCompactingFinalize: 3354 case MemoryChunk::kCompactingFinalize:
3226 DCHECK(p->IsEvacuationCandidate()); 3355 DCHECK(p->IsEvacuationCandidate());
3227 p->SetWasSwept(); 3356 p->SetWasSwept();
3228 p->Unlink(); 3357 p->Unlink();
3229 break; 3358 break;
3230 case MemoryChunk::kCompactingDone: 3359 case MemoryChunk::kCompactingDone:
3231 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE)); 3360 DCHECK(p->IsFlagSet(Page::POPULAR_PAGE));
3232 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3361 DCHECK(p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3233 break; 3362 break;
3234 default: 3363 default:
3235 // We should not observe kCompactingInProgress, or kCompactingDone. 3364 // MemoryChunk::kCompactingInProgress.
3236 UNREACHABLE(); 3365 UNREACHABLE();
3237 } 3366 }
3238 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); 3367 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3239 } 3368 }
3240 if (FLAG_trace_fragmentation) { 3369 if (FLAG_trace_fragmentation) {
3241 PrintIsolate(isolate(), 3370 PrintIsolate(isolate(),
3242 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d " 3371 "%8.0f ms: compaction: parallel=%d pages=%d aborted=%d "
3243 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX 3372 "tasks=%d cores=%d live_bytes=%" V8_PTR_PREFIX
3244 "d compaction_speed=%" V8_PTR_PREFIX "d\n", 3373 "d compaction_speed=%" V8_PTR_PREFIX "d\n",
3245 isolate()->time_millis_since_init(), FLAG_parallel_compaction, 3374 isolate()->time_millis_since_init(), FLAG_parallel_compaction,
3246 num_pages, abandoned_pages, num_tasks, 3375 num_pages, abandoned_pages, num_tasks,
3247 base::SysInfo::NumberOfProcessors(), live_bytes, 3376 base::SysInfo::NumberOfProcessors(), live_bytes,
3248 compaction_speed); 3377 compaction_speed);
3249 } 3378 }
3250 } 3379 }
3251 3380
3252 3381
3253 void MarkCompactCollector::StartParallelCompaction( 3382 void MarkCompactCollector::StartParallelCompaction(Evacuator** evacuators,
3254 CompactionSpaceCollection** compaction_spaces, uint32_t* task_ids, 3383 int len) {
3255 int len) {
3256 compaction_in_progress_ = true; 3384 compaction_in_progress_ = true;
3257 for (int i = 1; i < len; i++) { 3385 for (int i = 1; i < len; i++) {
3258 CompactionTask* task = new CompactionTask(heap(), compaction_spaces[i]); 3386 CompactionTask* task = new CompactionTask(heap(), evacuators[i]);
3259 task_ids[i - 1] = task->id();
3260 V8::GetCurrentPlatform()->CallOnBackgroundThread( 3387 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3261 task, v8::Platform::kShortRunningTask); 3388 task, v8::Platform::kShortRunningTask);
3262 } 3389 }
3263 3390
3264 // Contribute in main thread. 3391 // Contribute on main thread.
3265 EvacuatePages(compaction_spaces[0], &migration_slots_buffer_); 3392 evacuators[0]->EvacuatePages();
3266 } 3393 }
3267 3394
3268 3395
3269 void MarkCompactCollector::WaitUntilCompactionCompleted(uint32_t* task_ids, 3396 void MarkCompactCollector::WaitUntilCompactionCompleted(Evacuator** evacuators,
3270 int len) { 3397 int len) {
3271 // Try to cancel compaction tasks that have not been run (as they might be 3398 // Try to cancel compaction tasks that have not been run (as they might be
3272 // stuck in a worker queue). Tasks that cannot be canceled, have either 3399 // stuck in a worker queue). Tasks that cannot be canceled, have either
3273 // already completed or are still running, hence we need to wait for their 3400 // already completed or are still running, hence we need to wait for their
3274 // semaphore signal. 3401 // semaphore signal.
3275 for (int i = 0; i < len; i++) { 3402 for (int i = 0; i < len; i++) {
3276 if (!heap()->isolate()->cancelable_task_manager()->TryAbort(task_ids[i])) { 3403 if (!heap()->isolate()->cancelable_task_manager()->TryAbort(
3404 evacuators[i]->task_id())) {
3277 pending_compaction_tasks_semaphore_.Wait(); 3405 pending_compaction_tasks_semaphore_.Wait();
3278 } 3406 }
3279 } 3407 }
3280 compaction_in_progress_ = false; 3408 compaction_in_progress_ = false;
3281 } 3409 }
3282 3410
3283 3411
3284 void MarkCompactCollector::EvacuatePages(
3285 CompactionSpaceCollection* compaction_spaces,
3286 SlotsBuffer** evacuation_slots_buffer) {
3287 EvacuateOldSpaceVisitor visitor(heap(), compaction_spaces,
3288 evacuation_slots_buffer);
3289 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3290 Page* p = evacuation_candidates_[i];
3291 DCHECK(p->IsEvacuationCandidate() ||
3292 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3293 DCHECK(static_cast<int>(p->parallel_sweeping_state().Value()) ==
3294 MemoryChunk::kSweepingDone);
3295 if (p->parallel_compaction_state().TrySetValue(
3296 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3297 if (p->IsEvacuationCandidate()) {
3298 DCHECK_EQ(p->parallel_compaction_state().Value(),
3299 MemoryChunk::kCompactingInProgress);
3300 double start = heap()->MonotonicallyIncreasingTimeInMs();
3301 intptr_t live_bytes = p->LiveBytes();
3302 AlwaysAllocateScope always_allocate(isolate());
3303 if (VisitLiveObjects(p, &visitor, kClearMarkbits)) {
3304 p->ResetLiveBytes();
3305 p->parallel_compaction_state().SetValue(
3306 MemoryChunk::kCompactingFinalize);
3307 compaction_spaces->ReportCompactionProgress(
3308 heap()->MonotonicallyIncreasingTimeInMs() - start, live_bytes);
3309 } else {
3310 p->parallel_compaction_state().SetValue(
3311 MemoryChunk::kCompactingAborted);
3312 }
3313 } else {
3314 // There could be popular pages in the list of evacuation candidates
3315 // which we do compact.
3316 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
3317 }
3318 }
3319 }
3320 }
3321
3322
3323 class EvacuationWeakObjectRetainer : public WeakObjectRetainer { 3412 class EvacuationWeakObjectRetainer : public WeakObjectRetainer {
3324 public: 3413 public:
3325 virtual Object* RetainAs(Object* object) { 3414 virtual Object* RetainAs(Object* object) {
3326 if (object->IsHeapObject()) { 3415 if (object->IsHeapObject()) {
3327 HeapObject* heap_object = HeapObject::cast(object); 3416 HeapObject* heap_object = HeapObject::cast(object);
3328 MapWord map_word = heap_object->map_word(); 3417 MapWord map_word = heap_object->map_word();
3329 if (map_word.IsForwardingAddress()) { 3418 if (map_word.IsForwardingAddress()) {
3330 return map_word.ToForwardingAddress(); 3419 return map_word.ToForwardingAddress();
3331 } 3420 }
3332 } 3421 }
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after
3462 // Return true if the given code is deoptimized or will be deoptimized. 3551 // Return true if the given code is deoptimized or will be deoptimized.
3463 bool MarkCompactCollector::WillBeDeoptimized(Code* code) { 3552 bool MarkCompactCollector::WillBeDeoptimized(Code* code) {
3464 return code->is_optimized_code() && code->marked_for_deoptimization(); 3553 return code->is_optimized_code() && code->marked_for_deoptimization();
3465 } 3554 }
3466 3555
3467 3556
3468 void MarkCompactCollector::RemoveObjectSlots(Address start_slot, 3557 void MarkCompactCollector::RemoveObjectSlots(Address start_slot,
3469 Address end_slot) { 3558 Address end_slot) {
3470 // Remove entries by replacing them with an old-space slot containing a smi 3559 // Remove entries by replacing them with an old-space slot containing a smi
3471 // that is located in an unmovable page. 3560 // that is located in an unmovable page.
3472 int npages = evacuation_candidates_.length(); 3561 for (Page* p : evacuation_candidates_) {
3473 for (int i = 0; i < npages; i++) {
3474 Page* p = evacuation_candidates_[i];
3475 DCHECK(p->IsEvacuationCandidate() || 3562 DCHECK(p->IsEvacuationCandidate() ||
3476 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3563 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3477 if (p->IsEvacuationCandidate()) { 3564 if (p->IsEvacuationCandidate()) {
3478 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot, 3565 SlotsBuffer::RemoveObjectSlots(heap_, p->slots_buffer(), start_slot,
3479 end_slot); 3566 end_slot);
3480 } 3567 }
3481 } 3568 }
3482 } 3569 }
3483 3570
3484 3571
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
3544 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3631 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3545 Map* map = object->synchronized_map(); 3632 Map* map = object->synchronized_map();
3546 int size = object->SizeFromMap(map); 3633 int size = object->SizeFromMap(map);
3547 object->IterateBody(map->instance_type(), size, visitor); 3634 object->IterateBody(map->instance_type(), size, visitor);
3548 } 3635 }
3549 } 3636 }
3550 3637
3551 3638
3552 void MarkCompactCollector::SweepAbortedPages() { 3639 void MarkCompactCollector::SweepAbortedPages() {
3553 // Second pass on aborted pages. 3640 // Second pass on aborted pages.
3554 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3641 for (Page* p : evacuation_candidates_) {
3555 Page* p = evacuation_candidates_[i];
3556 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) { 3642 if (p->IsFlagSet(Page::COMPACTION_WAS_ABORTED)) {
3557 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED); 3643 p->ClearFlag(MemoryChunk::COMPACTION_WAS_ABORTED);
3558 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3644 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3559 switch (space->identity()) { 3645 switch (space->identity()) {
3560 case OLD_SPACE: 3646 case OLD_SPACE:
3561 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST, 3647 Sweep<SWEEP_ONLY, SWEEP_ON_MAIN_THREAD, IGNORE_SKIP_LIST,
3562 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr); 3648 IGNORE_FREE_SPACE>(space, nullptr, p, nullptr);
3563 break; 3649 break;
3564 case CODE_SPACE: 3650 case CODE_SPACE:
3565 if (FLAG_zap_code_space) { 3651 if (FLAG_zap_code_space) {
(...skipping 10 matching lines...) Expand all
3576 } 3662 }
3577 } 3663 }
3578 } 3664 }
3579 } 3665 }
3580 3666
3581 3667
3582 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() { 3668 void MarkCompactCollector::EvacuateNewSpaceAndCandidates() {
3583 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE); 3669 GCTracer::Scope gc_scope(heap()->tracer(), GCTracer::Scope::MC_EVACUATE);
3584 Heap::RelocationLock relocation_lock(heap()); 3670 Heap::RelocationLock relocation_lock(heap());
3585 3671
3586 HashMap* local_pretenuring_feedback = nullptr;
3587 { 3672 {
3588 GCTracer::Scope gc_scope(heap()->tracer(), 3673 GCTracer::Scope gc_scope(heap()->tracer(),
3589 GCTracer::Scope::MC_EVACUATE_NEW_SPACE); 3674 GCTracer::Scope::MC_EVACUATE_NEW_SPACE);
3590 EvacuationScope evacuation_scope(this); 3675 EvacuationScope evacuation_scope(this);
3676
3591 EvacuateNewSpacePrologue(); 3677 EvacuateNewSpacePrologue();
3592 local_pretenuring_feedback = EvacuateNewSpaceInParallel();
3593 heap_->new_space()->set_age_mark(heap_->new_space()->top());
3594 }
3595
3596 {
3597 GCTracer::Scope gc_scope(heap()->tracer(),
3598 GCTracer::Scope::MC_EVACUATE_CANDIDATES);
3599 EvacuationScope evacuation_scope(this);
3600 EvacuatePagesInParallel(); 3678 EvacuatePagesInParallel();
3601 } 3679 EvacuateNewSpaceEpilogue();
3602 3680 heap()->new_space()->set_age_mark(heap()->new_space()->top());
3603 {
3604 heap_->MergeAllocationSitePretenuringFeedback(*local_pretenuring_feedback);
3605 delete local_pretenuring_feedback;
3606 } 3681 }
3607 3682
3608 UpdatePointersAfterEvacuation(); 3683 UpdatePointersAfterEvacuation();
3609 3684
3610 { 3685 {
3611 GCTracer::Scope gc_scope(heap()->tracer(), 3686 GCTracer::Scope gc_scope(heap()->tracer(),
3612 GCTracer::Scope::MC_EVACUATE_CLEAN_UP); 3687 GCTracer::Scope::MC_EVACUATE_CLEAN_UP);
3613 // After updating all pointers, we can finally sweep the aborted pages, 3688 // After updating all pointers, we can finally sweep the aborted pages,
3614 // effectively overriding any forward pointers. 3689 // effectively overriding any forward pointers.
3615 SweepAbortedPages(); 3690 SweepAbortedPages();
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
3672 &updating_visitor); 3747 &updating_visitor);
3673 } 3748 }
3674 // Update roots. 3749 // Update roots.
3675 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE); 3750 heap_->IterateRoots(&updating_visitor, VISIT_ALL_IN_SWEEP_NEWSPACE);
3676 3751
3677 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), 3752 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3678 &Heap::ScavengeStoreBufferCallback); 3753 &Heap::ScavengeStoreBufferCallback);
3679 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 3754 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3680 } 3755 }
3681 3756
3682 int npages = evacuation_candidates_.length();
3683 { 3757 {
3684 GCTracer::Scope gc_scope( 3758 GCTracer::Scope gc_scope(
3685 heap()->tracer(), 3759 heap()->tracer(),
3686 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED); 3760 GCTracer::Scope::MC_EVACUATE_UPDATE_POINTERS_BETWEEN_EVACUATED);
3687 for (int i = 0; i < npages; i++) { 3761 for (Page* p : evacuation_candidates_) {
3688 Page* p = evacuation_candidates_[i];
3689 DCHECK(p->IsEvacuationCandidate() || 3762 DCHECK(p->IsEvacuationCandidate() ||
3690 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3763 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3691 3764
3692 if (p->IsEvacuationCandidate()) { 3765 if (p->IsEvacuationCandidate()) {
3693 UpdateSlotsRecordedIn(p->slots_buffer()); 3766 UpdateSlotsRecordedIn(p->slots_buffer());
3694 if (FLAG_trace_fragmentation_verbose) { 3767 if (FLAG_trace_fragmentation_verbose) {
3695 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p), 3768 PrintF(" page %p slots buffer: %d\n", reinterpret_cast<void*>(p),
3696 SlotsBuffer::SizeOfChain(p->slots_buffer())); 3769 SlotsBuffer::SizeOfChain(p->slots_buffer()));
3697 } 3770 }
3698 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address()); 3771 slots_buffer_allocator_->DeallocateChain(p->slots_buffer_address());
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
3752 heap_->UpdateReferencesInExternalStringTable( 3825 heap_->UpdateReferencesInExternalStringTable(
3753 &UpdateReferenceInExternalStringTableEntry); 3826 &UpdateReferenceInExternalStringTableEntry);
3754 3827
3755 EvacuationWeakObjectRetainer evacuation_object_retainer; 3828 EvacuationWeakObjectRetainer evacuation_object_retainer;
3756 heap()->ProcessAllWeakReferences(&evacuation_object_retainer); 3829 heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
3757 } 3830 }
3758 } 3831 }
3759 3832
3760 3833
3761 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { 3834 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3762 int npages = evacuation_candidates_.length(); 3835 for (Page* p : evacuation_candidates_) {
3763 for (int i = 0; i < npages; i++) {
3764 Page* p = evacuation_candidates_[i];
3765 if (!p->IsEvacuationCandidate()) continue; 3836 if (!p->IsEvacuationCandidate()) continue;
3766 p->Unlink(); 3837 p->Unlink();
3767 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3838 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3768 p->InsertAfter(space->LastPage()); 3839 p->InsertAfter(space->LastPage());
3769 } 3840 }
3770 } 3841 }
3771 3842
3772 3843
3773 void MarkCompactCollector::ReleaseEvacuationCandidates() { 3844 void MarkCompactCollector::ReleaseEvacuationCandidates() {
3774 int npages = evacuation_candidates_.length(); 3845 for (Page* p : evacuation_candidates_) {
3775 for (int i = 0; i < npages; i++) {
3776 Page* p = evacuation_candidates_[i];
3777 if (!p->IsEvacuationCandidate()) continue; 3846 if (!p->IsEvacuationCandidate()) continue;
3778 PagedSpace* space = static_cast<PagedSpace*>(p->owner()); 3847 PagedSpace* space = static_cast<PagedSpace*>(p->owner());
3779 space->Free(p->area_start(), p->area_size()); 3848 space->Free(p->area_start(), p->area_size());
3780 p->set_scan_on_scavenge(false); 3849 p->set_scan_on_scavenge(false);
3781 p->ResetLiveBytes(); 3850 p->ResetLiveBytes();
3782 CHECK(p->WasSwept()); 3851 CHECK(p->WasSwept());
3783 space->ReleasePage(p); 3852 space->ReleasePage(p);
3784 } 3853 }
3785 evacuation_candidates_.Rewind(0); 3854 evacuation_candidates_.Rewind(0);
3786 compacting_ = false; 3855 compacting_ = false;
3787 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages(); 3856 heap()->FilterStoreBufferEntriesOnAboutToBeFreedPages();
3788 heap()->FreeQueuedChunks(); 3857 heap()->FreeQueuedChunks();
3789 } 3858 }
3790 3859
3791 3860
3792 int MarkCompactCollector::SweepInParallel(PagedSpace* space, 3861 int MarkCompactCollector::SweepInParallel(PagedSpace* space,
3793 int required_freed_bytes) { 3862 int required_freed_bytes,
3863 int max_pages) {
3864 int page_count = 0;
3794 int max_freed = 0; 3865 int max_freed = 0;
3795 int max_freed_overall = 0; 3866 int max_freed_overall = 0;
3796 PageIterator it(space); 3867 PageIterator it(space);
3797 while (it.has_next()) { 3868 while (it.has_next()) {
3798 Page* p = it.next(); 3869 Page* p = it.next();
3799 max_freed = SweepInParallel(p, space); 3870 max_freed = SweepInParallel(p, space);
3800 DCHECK(max_freed >= 0); 3871 DCHECK(max_freed >= 0);
3801 if (required_freed_bytes > 0 && max_freed >= required_freed_bytes) { 3872 if ((required_freed_bytes > 0) && (max_freed >= required_freed_bytes)) {
3802 return max_freed; 3873 return max_freed;
3803 } 3874 }
3804 max_freed_overall = Max(max_freed, max_freed_overall); 3875 max_freed_overall = Max(max_freed, max_freed_overall);
3805 if (p == space->end_of_unswept_pages()) break; 3876 if (p == space->end_of_unswept_pages()) break;
3877 page_count++;
3878 if ((max_pages > 0) && (page_count == max_pages)) {
3879 return max_freed;
3880 }
3806 } 3881 }
3807 return max_freed_overall; 3882 return max_freed_overall;
3808 } 3883 }
3809 3884
3810 3885
3811 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) { 3886 int MarkCompactCollector::SweepInParallel(Page* page, PagedSpace* space) {
3812 int max_freed = 0; 3887 int max_freed = 0;
3813 if (page->TryLock()) { 3888 if (page->TryLock()) {
3814 // If this page was already swept in the meantime, we can return here. 3889 // If this page was already swept in the meantime, we can return here.
3815 if (page->parallel_sweeping_state().Value() != 3890 if (page->parallel_sweeping_state().Value() !=
(...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after
4065 MarkBit mark_bit = Marking::MarkBitFrom(host); 4140 MarkBit mark_bit = Marking::MarkBitFrom(host);
4066 if (Marking::IsBlack(mark_bit)) { 4141 if (Marking::IsBlack(mark_bit)) {
4067 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host); 4142 RelocInfo rinfo(isolate(), pc, RelocInfo::CODE_TARGET, 0, host);
4068 RecordRelocSlot(&rinfo, target); 4143 RecordRelocSlot(&rinfo, target);
4069 } 4144 }
4070 } 4145 }
4071 } 4146 }
4072 4147
4073 } // namespace internal 4148 } // namespace internal
4074 } // namespace v8 4149 } // namespace v8
OLDNEW
« src/heap/mark-compact.h ('K') | « src/heap/mark-compact.h ('k') | src/heap/spaces.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698