Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(284)

Side by Side Diff: src/heap/mark-compact.cc

Issue 1341973003: [heap] Scalable slots buffer for parallel compaction. (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/slots-buffer.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/mark-compact.h" 5 #include "src/heap/mark-compact.h"
6 6
7 #include "src/base/atomicops.h" 7 #include "src/base/atomicops.h"
8 #include "src/base/bits.h" 8 #include "src/base/bits.h"
9 #include "src/code-stubs.h" 9 #include "src/code-stubs.h"
10 #include "src/compilation-cache.h" 10 #include "src/compilation-cache.h"
(...skipping 465 matching lines...) Expand 10 before | Expand all | Expand 10 after
476 class MarkCompactCollector::CompactionTask : public v8::Task { 476 class MarkCompactCollector::CompactionTask : public v8::Task {
477 public: 477 public:
478 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces) 478 explicit CompactionTask(Heap* heap, CompactionSpaceCollection* spaces)
479 : heap_(heap), spaces_(spaces) {} 479 : heap_(heap), spaces_(spaces) {}
480 480
481 virtual ~CompactionTask() {} 481 virtual ~CompactionTask() {}
482 482
483 private: 483 private:
484 // v8::Task overrides. 484 // v8::Task overrides.
485 void Run() override { 485 void Run() override {
486 heap_->mark_compact_collector()->EvacuatePages(spaces_); 486 MarkCompactCollector* mark_compact = heap_->mark_compact_collector();
487 heap_->mark_compact_collector() 487 SlotsBuffer* evacuation_slots_buffer = nullptr;
488 ->pending_compaction_tasks_semaphore_.Signal(); 488 mark_compact->EvacuatePages(spaces_, &evacuation_slots_buffer);
489 mark_compact->AddEvacuationSlotsBufferSynchronized(evacuation_slots_buffer);
490 mark_compact->pending_compaction_tasks_semaphore_.Signal();
489 } 491 }
490 492
491 Heap* heap_; 493 Heap* heap_;
492 CompactionSpaceCollection* spaces_; 494 CompactionSpaceCollection* spaces_;
493 495
494 DISALLOW_COPY_AND_ASSIGN(CompactionTask); 496 DISALLOW_COPY_AND_ASSIGN(CompactionTask);
495 }; 497 };
496 498
497 499
498 class MarkCompactCollector::SweeperTask : public v8::Task { 500 class MarkCompactCollector::SweeperTask : public v8::Task {
(...skipping 1224 matching lines...) Expand 10 before | Expand all | Expand 10 after
1723 // Shouldn't happen. We are sweeping linearly, and to-space 1725 // Shouldn't happen. We are sweeping linearly, and to-space
1724 // has the same number of pages as from-space, so there is 1726 // has the same number of pages as from-space, so there is
1725 // always room. 1727 // always room.
1726 UNREACHABLE(); 1728 UNREACHABLE();
1727 } 1729 }
1728 allocation = new_space->AllocateRaw(size, alignment); 1730 allocation = new_space->AllocateRaw(size, alignment);
1729 DCHECK(!allocation.IsRetry()); 1731 DCHECK(!allocation.IsRetry());
1730 } 1732 }
1731 Object* target = allocation.ToObjectChecked(); 1733 Object* target = allocation.ToObjectChecked();
1732 1734
1733 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE); 1735 MigrateObject(HeapObject::cast(target), object, size, NEW_SPACE, nullptr);
1734 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { 1736 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
1735 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target)); 1737 heap()->array_buffer_tracker()->MarkLive(JSArrayBuffer::cast(target));
1736 } 1738 }
1737 heap()->IncrementSemiSpaceCopiedObjectSize(size); 1739 heap()->IncrementSemiSpaceCopiedObjectSize(size);
1738 } 1740 }
1739 *cells = 0; 1741 *cells = 0;
1740 } 1742 }
1741 return survivors_size; 1743 return survivors_size;
1742 } 1744 }
1743 1745
(...skipping 807 matching lines...) Expand 10 before | Expand all | Expand 10 after
2551 Object* weak_cell_obj = heap()->encountered_weak_cells(); 2553 Object* weak_cell_obj = heap()->encountered_weak_cells();
2552 while (weak_cell_obj != Smi::FromInt(0)) { 2554 while (weak_cell_obj != Smi::FromInt(0)) {
2553 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj); 2555 WeakCell* weak_cell = reinterpret_cast<WeakCell*>(weak_cell_obj);
2554 weak_cell_obj = weak_cell->next(); 2556 weak_cell_obj = weak_cell->next();
2555 weak_cell->clear_next(heap()); 2557 weak_cell->clear_next(heap());
2556 } 2558 }
2557 heap()->set_encountered_weak_cells(Smi::FromInt(0)); 2559 heap()->set_encountered_weak_cells(Smi::FromInt(0));
2558 } 2560 }
2559 2561
2560 2562
2561 void MarkCompactCollector::RecordMigratedSlot(Object* value, Address slot) { 2563 void MarkCompactCollector::RecordMigratedSlot(
2564 Object* value, Address slot, SlotsBuffer** evacuation_slots_buffer) {
2562 // When parallel compaction is in progress, store and slots buffer entries 2565 // When parallel compaction is in progress, store and slots buffer entries
2563 // require synchronization. 2566 // require synchronization.
2564 if (heap_->InNewSpace(value)) { 2567 if (heap_->InNewSpace(value)) {
2565 if (parallel_compaction_in_progress_) { 2568 if (parallel_compaction_in_progress_) {
2566 heap_->store_buffer()->MarkSynchronized(slot); 2569 heap_->store_buffer()->MarkSynchronized(slot);
2567 } else { 2570 } else {
2568 heap_->store_buffer()->Mark(slot); 2571 heap_->store_buffer()->Mark(slot);
2569 } 2572 }
2570 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) { 2573 } else if (value->IsHeapObject() && IsOnEvacuationCandidate(value)) {
2571 if (parallel_compaction_in_progress_) { 2574 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2572 SlotsBuffer::AddToSynchronized( 2575 reinterpret_cast<Object**>(slot),
2573 slots_buffer_allocator_, &migration_slots_buffer_, 2576 SlotsBuffer::IGNORE_OVERFLOW);
2574 &migration_slots_buffer_mutex_, reinterpret_cast<Object**>(slot),
2575 SlotsBuffer::IGNORE_OVERFLOW);
2576 } else {
2577 SlotsBuffer::AddTo(slots_buffer_allocator_, &migration_slots_buffer_,
2578 reinterpret_cast<Object**>(slot),
2579 SlotsBuffer::IGNORE_OVERFLOW);
2580 }
2581 } 2577 }
2582 } 2578 }
2583 2579
2584 2580
2585 void MarkCompactCollector::RecordMigratedCodeEntrySlot( 2581 void MarkCompactCollector::RecordMigratedCodeEntrySlot(
2586 Address code_entry, Address code_entry_slot) { 2582 Address code_entry, Address code_entry_slot,
2583 SlotsBuffer** evacuation_slots_buffer) {
2587 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) { 2584 if (Page::FromAddress(code_entry)->IsEvacuationCandidate()) {
2588 if (parallel_compaction_in_progress_) { 2585 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2589 SlotsBuffer::AddToSynchronized( 2586 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2590 slots_buffer_allocator_, &migration_slots_buffer_,
2591 &migration_slots_buffer_mutex_, SlotsBuffer::CODE_ENTRY_SLOT,
2592 code_entry_slot, SlotsBuffer::IGNORE_OVERFLOW);
2593 } else {
2594 SlotsBuffer::AddTo(slots_buffer_allocator_, &migration_slots_buffer_,
2595 SlotsBuffer::CODE_ENTRY_SLOT, code_entry_slot,
2596 SlotsBuffer::IGNORE_OVERFLOW);
2597 }
2598 }
2599 }
2600
2601
2602 void MarkCompactCollector::RecordMigratedCodeObjectSlot(Address code_object) {
2603 if (parallel_compaction_in_progress_) {
2604 SlotsBuffer::AddToSynchronized(
2605 slots_buffer_allocator_, &migration_slots_buffer_,
2606 &migration_slots_buffer_mutex_, SlotsBuffer::RELOCATED_CODE_OBJECT,
2607 code_object, SlotsBuffer::IGNORE_OVERFLOW);
2608 } else {
2609 SlotsBuffer::AddTo(slots_buffer_allocator_, &migration_slots_buffer_,
2610 SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
2611 SlotsBuffer::IGNORE_OVERFLOW); 2587 SlotsBuffer::IGNORE_OVERFLOW);
2612 } 2588 }
2613 } 2589 }
2614 2590
2615 2591
2592 void MarkCompactCollector::RecordMigratedCodeObjectSlot(
2593 Address code_object, SlotsBuffer** evacuation_slots_buffer) {
2594 SlotsBuffer::AddTo(slots_buffer_allocator_, evacuation_slots_buffer,
2595 SlotsBuffer::RELOCATED_CODE_OBJECT, code_object,
2596 SlotsBuffer::IGNORE_OVERFLOW);
2597 }
2598
2599
2616 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) { 2600 static inline SlotsBuffer::SlotType SlotTypeForRMode(RelocInfo::Mode rmode) {
2617 if (RelocInfo::IsCodeTarget(rmode)) { 2601 if (RelocInfo::IsCodeTarget(rmode)) {
2618 return SlotsBuffer::CODE_TARGET_SLOT; 2602 return SlotsBuffer::CODE_TARGET_SLOT;
2619 } else if (RelocInfo::IsCell(rmode)) { 2603 } else if (RelocInfo::IsCell(rmode)) {
2620 return SlotsBuffer::CELL_TARGET_SLOT; 2604 return SlotsBuffer::CELL_TARGET_SLOT;
2621 } else if (RelocInfo::IsEmbeddedObject(rmode)) { 2605 } else if (RelocInfo::IsEmbeddedObject(rmode)) {
2622 return SlotsBuffer::EMBEDDED_OBJECT_SLOT; 2606 return SlotsBuffer::EMBEDDED_OBJECT_SLOT;
2623 } else if (RelocInfo::IsDebugBreakSlot(rmode)) { 2607 } else if (RelocInfo::IsDebugBreakSlot(rmode)) {
2624 return SlotsBuffer::DEBUG_TARGET_SLOT; 2608 return SlotsBuffer::DEBUG_TARGET_SLOT;
2625 } 2609 }
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
2668 // promotes them to old space. Forwarding address is written directly into 2652 // promotes them to old space. Forwarding address is written directly into
2669 // first word of object without any encoding. If object is dead we write 2653 // first word of object without any encoding. If object is dead we write
2670 // NULL as a forwarding address. 2654 // NULL as a forwarding address.
2671 // 2655 //
2672 // The second pass updates pointers to new space in all spaces. It is possible 2656 // The second pass updates pointers to new space in all spaces. It is possible
2673 // to encounter pointers to dead new space objects during traversal of pointers 2657 // to encounter pointers to dead new space objects during traversal of pointers
2674 // to new space. We should clear them to avoid encountering them during next 2658 // to new space. We should clear them to avoid encountering them during next
2675 // pointer iteration. This is an issue if the store buffer overflows and we 2659 // pointer iteration. This is an issue if the store buffer overflows and we
2676 // have to scan the entire old space, including dead objects, looking for 2660 // have to scan the entire old space, including dead objects, looking for
2677 // pointers to new space. 2661 // pointers to new space.
2678 void MarkCompactCollector::MigrateObject(HeapObject* dst, HeapObject* src, 2662 void MarkCompactCollector::MigrateObject(
2679 int size, AllocationSpace dest) { 2663 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest,
2664 SlotsBuffer** evacuation_slots_buffer) {
2680 Address dst_addr = dst->address(); 2665 Address dst_addr = dst->address();
2681 Address src_addr = src->address(); 2666 Address src_addr = src->address();
2682 DCHECK(heap()->AllowedToBeMigrated(src, dest)); 2667 DCHECK(heap()->AllowedToBeMigrated(src, dest));
2683 DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize); 2668 DCHECK(dest != LO_SPACE && size <= Page::kMaxRegularHeapObjectSize);
2684 if (dest == OLD_SPACE) { 2669 if (dest == OLD_SPACE) {
2670 DCHECK(evacuation_slots_buffer != nullptr);
2685 DCHECK(IsAligned(size, kPointerSize)); 2671 DCHECK(IsAligned(size, kPointerSize));
2686 switch (src->ContentType()) { 2672 switch (src->ContentType()) {
2687 case HeapObjectContents::kTaggedValues: 2673 case HeapObjectContents::kTaggedValues:
2688 MigrateObjectTagged(dst, src, size); 2674 MigrateObjectTagged(dst, src, size, evacuation_slots_buffer);
2689 break; 2675 break;
2690 2676
2691 case HeapObjectContents::kMixedValues: 2677 case HeapObjectContents::kMixedValues:
2692 MigrateObjectMixed(dst, src, size); 2678 MigrateObjectMixed(dst, src, size, evacuation_slots_buffer);
2693 break; 2679 break;
2694 2680
2695 case HeapObjectContents::kRawValues: 2681 case HeapObjectContents::kRawValues:
2696 MigrateObjectRaw(dst, src, size); 2682 MigrateObjectRaw(dst, src, size);
2697 break; 2683 break;
2698 } 2684 }
2699 2685
2700 if (compacting_ && dst->IsJSFunction()) { 2686 if (compacting_ && dst->IsJSFunction()) {
2701 Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset; 2687 Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset;
2702 Address code_entry = Memory::Address_at(code_entry_slot); 2688 Address code_entry = Memory::Address_at(code_entry_slot);
2703 RecordMigratedCodeEntrySlot(code_entry, code_entry_slot); 2689 RecordMigratedCodeEntrySlot(code_entry, code_entry_slot,
2690 evacuation_slots_buffer);
2704 } 2691 }
2705 } else if (dest == CODE_SPACE) { 2692 } else if (dest == CODE_SPACE) {
2693 DCHECK(evacuation_slots_buffer != nullptr);
2706 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); 2694 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr));
2707 heap()->MoveBlock(dst_addr, src_addr, size); 2695 heap()->MoveBlock(dst_addr, src_addr, size);
2708 RecordMigratedCodeObjectSlot(dst_addr); 2696 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer);
2709 Code::cast(dst)->Relocate(dst_addr - src_addr); 2697 Code::cast(dst)->Relocate(dst_addr - src_addr);
2710 } else { 2698 } else {
2699 DCHECK(evacuation_slots_buffer == nullptr);
2711 DCHECK(dest == NEW_SPACE); 2700 DCHECK(dest == NEW_SPACE);
2712 heap()->MoveBlock(dst_addr, src_addr, size); 2701 heap()->MoveBlock(dst_addr, src_addr, size);
2713 } 2702 }
2714 heap()->OnMoveEvent(dst, src, size); 2703 heap()->OnMoveEvent(dst, src, size);
2715 Memory::Address_at(src_addr) = dst_addr; 2704 Memory::Address_at(src_addr) = dst_addr;
2716 } 2705 }
2717 2706
2718 2707
2719 void MarkCompactCollector::MigrateObjectTagged(HeapObject* dst, HeapObject* src, 2708 void MarkCompactCollector::MigrateObjectTagged(
2720 int size) { 2709 HeapObject* dst, HeapObject* src, int size,
2710 SlotsBuffer** evacuation_slots_buffer) {
2721 Address src_slot = src->address(); 2711 Address src_slot = src->address();
2722 Address dst_slot = dst->address(); 2712 Address dst_slot = dst->address();
2723 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { 2713 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2724 Object* value = Memory::Object_at(src_slot); 2714 Object* value = Memory::Object_at(src_slot);
2725 Memory::Object_at(dst_slot) = value; 2715 Memory::Object_at(dst_slot) = value;
2726 RecordMigratedSlot(value, dst_slot); 2716 RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
2727 src_slot += kPointerSize; 2717 src_slot += kPointerSize;
2728 dst_slot += kPointerSize; 2718 dst_slot += kPointerSize;
2729 } 2719 }
2730 } 2720 }
2731 2721
2732 2722
2733 void MarkCompactCollector::MigrateObjectMixed(HeapObject* dst, HeapObject* src, 2723 void MarkCompactCollector::MigrateObjectMixed(
2734 int size) { 2724 HeapObject* dst, HeapObject* src, int size,
2725 SlotsBuffer** evacuation_slots_buffer) {
2735 if (src->IsFixedTypedArrayBase()) { 2726 if (src->IsFixedTypedArrayBase()) {
2736 heap()->MoveBlock(dst->address(), src->address(), size); 2727 heap()->MoveBlock(dst->address(), src->address(), size);
2737 Address base_pointer_slot = 2728 Address base_pointer_slot =
2738 dst->address() + FixedTypedArrayBase::kBasePointerOffset; 2729 dst->address() + FixedTypedArrayBase::kBasePointerOffset;
2739 RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot); 2730 RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot,
2731 evacuation_slots_buffer);
2740 } else if (src->IsBytecodeArray()) { 2732 } else if (src->IsBytecodeArray()) {
2741 heap()->MoveBlock(dst->address(), src->address(), size); 2733 heap()->MoveBlock(dst->address(), src->address(), size);
2742 Address constant_pool_slot = 2734 Address constant_pool_slot =
2743 dst->address() + BytecodeArray::kConstantPoolOffset; 2735 dst->address() + BytecodeArray::kConstantPoolOffset;
2744 RecordMigratedSlot(Memory::Object_at(constant_pool_slot), 2736 RecordMigratedSlot(Memory::Object_at(constant_pool_slot),
2745 constant_pool_slot); 2737 constant_pool_slot, evacuation_slots_buffer);
2746 } else if (src->IsJSArrayBuffer()) { 2738 } else if (src->IsJSArrayBuffer()) {
2747 heap()->MoveBlock(dst->address(), src->address(), size); 2739 heap()->MoveBlock(dst->address(), src->address(), size);
2748 2740
2749 // Visit inherited JSObject properties and byte length of ArrayBuffer 2741 // Visit inherited JSObject properties and byte length of ArrayBuffer
2750 Address regular_slot = 2742 Address regular_slot =
2751 dst->address() + JSArrayBuffer::BodyDescriptor::kStartOffset; 2743 dst->address() + JSArrayBuffer::BodyDescriptor::kStartOffset;
2752 Address regular_slots_end = 2744 Address regular_slots_end =
2753 dst->address() + JSArrayBuffer::kByteLengthOffset + kPointerSize; 2745 dst->address() + JSArrayBuffer::kByteLengthOffset + kPointerSize;
2754 while (regular_slot < regular_slots_end) { 2746 while (regular_slot < regular_slots_end) {
2755 RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot); 2747 RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot,
2748 evacuation_slots_buffer);
2756 regular_slot += kPointerSize; 2749 regular_slot += kPointerSize;
2757 } 2750 }
2758 2751
2759 // Skip backing store and visit just internal fields 2752 // Skip backing store and visit just internal fields
2760 Address internal_field_slot = dst->address() + JSArrayBuffer::kSize; 2753 Address internal_field_slot = dst->address() + JSArrayBuffer::kSize;
2761 Address internal_fields_end = 2754 Address internal_fields_end =
2762 dst->address() + JSArrayBuffer::kSizeWithInternalFields; 2755 dst->address() + JSArrayBuffer::kSizeWithInternalFields;
2763 while (internal_field_slot < internal_fields_end) { 2756 while (internal_field_slot < internal_fields_end) {
2764 RecordMigratedSlot(Memory::Object_at(internal_field_slot), 2757 RecordMigratedSlot(Memory::Object_at(internal_field_slot),
2765 internal_field_slot); 2758 internal_field_slot, evacuation_slots_buffer);
2766 internal_field_slot += kPointerSize; 2759 internal_field_slot += kPointerSize;
2767 } 2760 }
2768 } else if (FLAG_unbox_double_fields) { 2761 } else if (FLAG_unbox_double_fields) {
2769 Address dst_addr = dst->address(); 2762 Address dst_addr = dst->address();
2770 Address src_addr = src->address(); 2763 Address src_addr = src->address();
2771 Address src_slot = src_addr; 2764 Address src_slot = src_addr;
2772 Address dst_slot = dst_addr; 2765 Address dst_slot = dst_addr;
2773 2766
2774 LayoutDescriptorHelper helper(src->map()); 2767 LayoutDescriptorHelper helper(src->map());
2775 DCHECK(!helper.all_fields_tagged()); 2768 DCHECK(!helper.all_fields_tagged());
2776 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { 2769 for (int remaining = size / kPointerSize; remaining > 0; remaining--) {
2777 Object* value = Memory::Object_at(src_slot); 2770 Object* value = Memory::Object_at(src_slot);
2778 2771
2779 Memory::Object_at(dst_slot) = value; 2772 Memory::Object_at(dst_slot) = value;
2780 2773
2781 if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) { 2774 if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) {
2782 RecordMigratedSlot(value, dst_slot); 2775 RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer);
2783 } 2776 }
2784 2777
2785 src_slot += kPointerSize; 2778 src_slot += kPointerSize;
2786 dst_slot += kPointerSize; 2779 dst_slot += kPointerSize;
2787 } 2780 }
2788 } else { 2781 } else {
2789 UNREACHABLE(); 2782 UNREACHABLE();
2790 } 2783 }
2791 } 2784 }
2792 2785
(...skipping 294 matching lines...) Expand 10 before | Expand all | Expand 10 after
3087 bool MarkCompactCollector::TryPromoteObject(HeapObject* object, 3080 bool MarkCompactCollector::TryPromoteObject(HeapObject* object,
3088 int object_size) { 3081 int object_size) {
3089 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); 3082 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
3090 3083
3091 OldSpace* old_space = heap()->old_space(); 3084 OldSpace* old_space = heap()->old_space();
3092 3085
3093 HeapObject* target = nullptr; 3086 HeapObject* target = nullptr;
3094 AllocationAlignment alignment = object->RequiredAlignment(); 3087 AllocationAlignment alignment = object->RequiredAlignment();
3095 AllocationResult allocation = old_space->AllocateRaw(object_size, alignment); 3088 AllocationResult allocation = old_space->AllocateRaw(object_size, alignment);
3096 if (allocation.To(&target)) { 3089 if (allocation.To(&target)) {
3097 MigrateObject(target, object, object_size, old_space->identity()); 3090 MigrateObject(target, object, object_size, old_space->identity(),
3091 &migration_slots_buffer_);
3098 // If we end up needing more special cases, we should factor this out. 3092 // If we end up needing more special cases, we should factor this out.
3099 if (V8_UNLIKELY(target->IsJSArrayBuffer())) { 3093 if (V8_UNLIKELY(target->IsJSArrayBuffer())) {
3100 heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target)); 3094 heap()->array_buffer_tracker()->Promote(JSArrayBuffer::cast(target));
3101 } 3095 }
3102 heap()->IncrementPromotedObjectsSize(object_size); 3096 heap()->IncrementPromotedObjectsSize(object_size);
3103 return true; 3097 return true;
3104 } 3098 }
3105 3099
3106 return false; 3100 return false;
3107 } 3101 }
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
3319 while (it.has_next()) { 3313 while (it.has_next()) {
3320 NewSpacePage* p = it.next(); 3314 NewSpacePage* p = it.next();
3321 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p); 3315 survivors_size += DiscoverAndEvacuateBlackObjectsOnPage(new_space, p);
3322 } 3316 }
3323 3317
3324 heap_->IncrementYoungSurvivorsCounter(survivors_size); 3318 heap_->IncrementYoungSurvivorsCounter(survivors_size);
3325 new_space->set_age_mark(new_space->top()); 3319 new_space->set_age_mark(new_space->top());
3326 } 3320 }
3327 3321
3328 3322
3323 void MarkCompactCollector::AddEvacuationSlotsBufferSynchronized(
3324 SlotsBuffer* evacuation_slots_buffer) {
3325 base::LockGuard<base::Mutex> lock_guard(&evacuation_slots_buffers_mutex_);
3326 evacuation_slots_buffers_.Add(evacuation_slots_buffer);
3327 }
3328
3329
3329 bool MarkCompactCollector::EvacuateLiveObjectsFromPage( 3330 bool MarkCompactCollector::EvacuateLiveObjectsFromPage(
3330 Page* p, PagedSpace* target_space) { 3331 Page* p, PagedSpace* target_space, SlotsBuffer** evacuation_slots_buffer) {
3331 AlwaysAllocateScope always_allocate(isolate()); 3332 AlwaysAllocateScope always_allocate(isolate());
3332 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept()); 3333 DCHECK(p->IsEvacuationCandidate() && !p->WasSwept());
3333 3334
3334 int offsets[16]; 3335 int offsets[16];
3335 3336
3336 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) { 3337 for (MarkBitCellIterator it(p); !it.Done(); it.Advance()) {
3337 Address cell_base = it.CurrentCellBase(); 3338 Address cell_base = it.CurrentCellBase();
3338 MarkBit::CellType* cell = it.CurrentCell(); 3339 MarkBit::CellType* cell = it.CurrentCell();
3339 3340
3340 if (*cell == 0) continue; 3341 if (*cell == 0) continue;
3341 3342
3342 int live_objects = MarkWordToObjectStarts(*cell, offsets); 3343 int live_objects = MarkWordToObjectStarts(*cell, offsets);
3343 for (int i = 0; i < live_objects; i++) { 3344 for (int i = 0; i < live_objects; i++) {
3344 Address object_addr = cell_base + offsets[i] * kPointerSize; 3345 Address object_addr = cell_base + offsets[i] * kPointerSize;
3345 HeapObject* object = HeapObject::FromAddress(object_addr); 3346 HeapObject* object = HeapObject::FromAddress(object_addr);
3346 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object))); 3347 DCHECK(Marking::IsBlack(Marking::MarkBitFrom(object)));
3347 3348
3348 int size = object->Size(); 3349 int size = object->Size();
3349 AllocationAlignment alignment = object->RequiredAlignment(); 3350 AllocationAlignment alignment = object->RequiredAlignment();
3350 HeapObject* target_object = nullptr; 3351 HeapObject* target_object = nullptr;
3351 AllocationResult allocation = target_space->AllocateRaw(size, alignment); 3352 AllocationResult allocation = target_space->AllocateRaw(size, alignment);
3352 if (!allocation.To(&target_object)) { 3353 if (!allocation.To(&target_object)) {
3353 return false; 3354 return false;
3354 } 3355 }
3355 MigrateObject(target_object, object, size, target_space->identity()); 3356
3357 MigrateObject(target_object, object, size, target_space->identity(),
3358 evacuation_slots_buffer);
3356 DCHECK(object->map_word().IsForwardingAddress()); 3359 DCHECK(object->map_word().IsForwardingAddress());
3357 } 3360 }
3358 3361
3359 // Clear marking bits for current cell. 3362 // Clear marking bits for current cell.
3360 *cell = 0; 3363 *cell = 0;
3361 } 3364 }
3362 p->ResetLiveBytes(); 3365 p->ResetLiveBytes();
3363 return true; 3366 return true;
3364 } 3367 }
3365 3368
(...skipping 23 matching lines...) Expand all
3389 // Kick off parallel tasks. 3392 // Kick off parallel tasks.
3390 for (int i = 1; i < num_tasks; i++) { 3393 for (int i = 1; i < num_tasks; i++) {
3391 concurrent_compaction_tasks_active_++; 3394 concurrent_compaction_tasks_active_++;
3392 V8::GetCurrentPlatform()->CallOnBackgroundThread( 3395 V8::GetCurrentPlatform()->CallOnBackgroundThread(
3393 new CompactionTask(heap(), compaction_spaces_for_tasks[i]), 3396 new CompactionTask(heap(), compaction_spaces_for_tasks[i]),
3394 v8::Platform::kShortRunningTask); 3397 v8::Platform::kShortRunningTask);
3395 } 3398 }
3396 3399
3397 // Contribute in main thread. Counter and signal are in principal not needed. 3400 // Contribute in main thread. Counter and signal are in principal not needed.
3398 concurrent_compaction_tasks_active_++; 3401 concurrent_compaction_tasks_active_++;
3399 EvacuatePages(compaction_spaces_for_tasks[0]); 3402 EvacuatePages(compaction_spaces_for_tasks[0], &migration_slots_buffer_);
3400 pending_compaction_tasks_semaphore_.Signal(); 3403 pending_compaction_tasks_semaphore_.Signal();
3401 3404
3402 WaitUntilCompactionCompleted(); 3405 WaitUntilCompactionCompleted();
3403 3406
3404 // Merge back memory (compacted and unused) from compaction spaces. 3407 // Merge back memory (compacted and unused) from compaction spaces.
3405 for (int i = 0; i < num_tasks; i++) { 3408 for (int i = 0; i < num_tasks; i++) {
3406 heap()->old_space()->MergeCompactionSpace( 3409 heap()->old_space()->MergeCompactionSpace(
3407 compaction_spaces_for_tasks[i]->Get(OLD_SPACE)); 3410 compaction_spaces_for_tasks[i]->Get(OLD_SPACE));
3408 heap()->code_space()->MergeCompactionSpace( 3411 heap()->code_space()->MergeCompactionSpace(
3409 compaction_spaces_for_tasks[i]->Get(CODE_SPACE)); 3412 compaction_spaces_for_tasks[i]->Get(CODE_SPACE));
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
3465 3468
3466 void MarkCompactCollector::WaitUntilCompactionCompleted() { 3469 void MarkCompactCollector::WaitUntilCompactionCompleted() {
3467 while (concurrent_compaction_tasks_active_-- > 0) { 3470 while (concurrent_compaction_tasks_active_-- > 0) {
3468 pending_compaction_tasks_semaphore_.Wait(); 3471 pending_compaction_tasks_semaphore_.Wait();
3469 } 3472 }
3470 parallel_compaction_in_progress_ = false; 3473 parallel_compaction_in_progress_ = false;
3471 } 3474 }
3472 3475
3473 3476
3474 void MarkCompactCollector::EvacuatePages( 3477 void MarkCompactCollector::EvacuatePages(
3475 CompactionSpaceCollection* compaction_spaces) { 3478 CompactionSpaceCollection* compaction_spaces,
3479 SlotsBuffer** evacuation_slots_buffer) {
3476 for (int i = 0; i < evacuation_candidates_.length(); i++) { 3480 for (int i = 0; i < evacuation_candidates_.length(); i++) {
3477 Page* p = evacuation_candidates_[i]; 3481 Page* p = evacuation_candidates_[i];
3478 DCHECK(p->IsEvacuationCandidate() || 3482 DCHECK(p->IsEvacuationCandidate() ||
3479 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3483 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3480 DCHECK(static_cast<int>(p->parallel_sweeping()) == 3484 DCHECK(static_cast<int>(p->parallel_sweeping()) ==
3481 MemoryChunk::SWEEPING_DONE); 3485 MemoryChunk::SWEEPING_DONE);
3482 if (p->parallel_compaction_state().TrySetValue( 3486 if (p->parallel_compaction_state().TrySetValue(
3483 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) { 3487 MemoryChunk::kCompactingDone, MemoryChunk::kCompactingInProgress)) {
3484 if (p->IsEvacuationCandidate()) { 3488 if (p->IsEvacuationCandidate()) {
3485 DCHECK_EQ(p->parallel_compaction_state().Value(), 3489 DCHECK_EQ(p->parallel_compaction_state().Value(),
3486 MemoryChunk::kCompactingInProgress); 3490 MemoryChunk::kCompactingInProgress);
3487 if (EvacuateLiveObjectsFromPage( 3491 if (EvacuateLiveObjectsFromPage(
3488 p, compaction_spaces->Get(p->owner()->identity()))) { 3492 p, compaction_spaces->Get(p->owner()->identity()),
3493 evacuation_slots_buffer)) {
3489 p->parallel_compaction_state().SetValue( 3494 p->parallel_compaction_state().SetValue(
3490 MemoryChunk::kCompactingFinalize); 3495 MemoryChunk::kCompactingFinalize);
3491 } else { 3496 } else {
3492 p->parallel_compaction_state().SetValue( 3497 p->parallel_compaction_state().SetValue(
3493 MemoryChunk::kCompactingAborted); 3498 MemoryChunk::kCompactingAborted);
3494 } 3499 }
3495 } else { 3500 } else {
3496 // There could be popular pages in the list of evacuation candidates 3501 // There could be popular pages in the list of evacuation candidates
3497 // which we do compact. 3502 // which we do compact.
3498 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone); 3503 p->parallel_compaction_state().SetValue(MemoryChunk::kCompactingDone);
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after
3680 EvacuateNewSpace(); 3685 EvacuateNewSpace();
3681 } 3686 }
3682 3687
3683 { 3688 {
3684 GCTracer::Scope gc_scope(heap()->tracer(), 3689 GCTracer::Scope gc_scope(heap()->tracer(),
3685 GCTracer::Scope::MC_EVACUATE_PAGES); 3690 GCTracer::Scope::MC_EVACUATE_PAGES);
3686 EvacuationScope evacuation_scope(this); 3691 EvacuationScope evacuation_scope(this);
3687 EvacuatePagesInParallel(); 3692 EvacuatePagesInParallel();
3688 } 3693 }
3689 3694
3695 {
3696 GCTracer::Scope gc_scope(heap()->tracer(),
3697 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3698 UpdateSlotsRecordedIn(migration_slots_buffer_);
3699 if (FLAG_trace_fragmentation_verbose) {
3700 PrintF(" migration slots buffer: %d\n",
3701 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3702 }
3703 slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
3704 DCHECK(migration_slots_buffer_ == NULL);
3705
3706 // TODO(hpayer): Process the slots buffers in parallel. This has to be done
3707 // after evacuation of all pages finishes.
3708 int buffers = evacuation_slots_buffers_.length();
3709 for (int i = 0; i < buffers; i++) {
3710 SlotsBuffer* buffer = evacuation_slots_buffers_[i];
3711 UpdateSlotsRecordedIn(buffer);
3712 slots_buffer_allocator_->DeallocateChain(&buffer);
3713 }
3714 evacuation_slots_buffers_.Rewind(0);
3715 }
3716
3690 // Second pass: find pointers to new space and update them. 3717 // Second pass: find pointers to new space and update them.
3691 PointersUpdatingVisitor updating_visitor(heap()); 3718 PointersUpdatingVisitor updating_visitor(heap());
3692 3719
3693 { 3720 {
3694 GCTracer::Scope gc_scope(heap()->tracer(), 3721 GCTracer::Scope gc_scope(heap()->tracer(),
3695 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS); 3722 GCTracer::Scope::MC_UPDATE_NEW_TO_NEW_POINTERS);
3696 // Update pointers in to space. 3723 // Update pointers in to space.
3697 SemiSpaceIterator to_it(heap()->new_space()); 3724 SemiSpaceIterator to_it(heap()->new_space());
3698 for (HeapObject* object = to_it.Next(); object != NULL; 3725 for (HeapObject* object = to_it.Next(); object != NULL;
3699 object = to_it.Next()) { 3726 object = to_it.Next()) {
(...skipping 11 matching lines...) Expand all
3711 } 3738 }
3712 3739
3713 { 3740 {
3714 GCTracer::Scope gc_scope(heap()->tracer(), 3741 GCTracer::Scope gc_scope(heap()->tracer(),
3715 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS); 3742 GCTracer::Scope::MC_UPDATE_OLD_TO_NEW_POINTERS);
3716 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(), 3743 StoreBufferRebuildScope scope(heap_, heap_->store_buffer(),
3717 &Heap::ScavengeStoreBufferCallback); 3744 &Heap::ScavengeStoreBufferCallback);
3718 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer); 3745 heap_->store_buffer()->IteratePointersToNewSpace(&UpdatePointer);
3719 } 3746 }
3720 3747
3721 {
3722 GCTracer::Scope gc_scope(heap()->tracer(),
3723 GCTracer::Scope::MC_UPDATE_POINTERS_TO_EVACUATED);
3724 UpdateSlotsRecordedIn(migration_slots_buffer_);
3725 if (FLAG_trace_fragmentation_verbose) {
3726 PrintF(" migration slots buffer: %d\n",
3727 SlotsBuffer::SizeOfChain(migration_slots_buffer_));
3728 }
3729 }
3730
3731 int npages = evacuation_candidates_.length(); 3748 int npages = evacuation_candidates_.length();
3732 { 3749 {
3733 GCTracer::Scope gc_scope( 3750 GCTracer::Scope gc_scope(
3734 heap()->tracer(), 3751 heap()->tracer(),
3735 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED); 3752 GCTracer::Scope::MC_UPDATE_POINTERS_BETWEEN_EVACUATED);
3736 for (int i = 0; i < npages; i++) { 3753 for (int i = 0; i < npages; i++) {
3737 Page* p = evacuation_candidates_[i]; 3754 Page* p = evacuation_candidates_[i];
3738 DCHECK(p->IsEvacuationCandidate() || 3755 DCHECK(p->IsEvacuationCandidate() ||
3739 p->IsFlagSet(Page::RESCAN_ON_EVACUATION)); 3756 p->IsFlagSet(Page::RESCAN_ON_EVACUATION));
3740 3757
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
3798 3815
3799 // Update pointers from external string table. 3816 // Update pointers from external string table.
3800 heap_->UpdateReferencesInExternalStringTable( 3817 heap_->UpdateReferencesInExternalStringTable(
3801 &UpdateReferenceInExternalStringTableEntry); 3818 &UpdateReferenceInExternalStringTableEntry);
3802 3819
3803 EvacuationWeakObjectRetainer evacuation_object_retainer; 3820 EvacuationWeakObjectRetainer evacuation_object_retainer;
3804 heap()->ProcessAllWeakReferences(&evacuation_object_retainer); 3821 heap()->ProcessAllWeakReferences(&evacuation_object_retainer);
3805 3822
3806 heap_->isolate()->inner_pointer_to_code_cache()->Flush(); 3823 heap_->isolate()->inner_pointer_to_code_cache()->Flush();
3807 3824
3808 slots_buffer_allocator_->DeallocateChain(&migration_slots_buffer_);
3809 DCHECK(migration_slots_buffer_ == NULL);
3810
3811 // The hashing of weak_object_to_code_table is no longer valid. 3825 // The hashing of weak_object_to_code_table is no longer valid.
3812 heap()->weak_object_to_code_table()->Rehash( 3826 heap()->weak_object_to_code_table()->Rehash(
3813 heap()->isolate()->factory()->undefined_value()); 3827 heap()->isolate()->factory()->undefined_value());
3814 } 3828 }
3815 3829
3816 3830
3817 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() { 3831 void MarkCompactCollector::MoveEvacuationCandidatesToEndOfPagesList() {
3818 int npages = evacuation_candidates_.length(); 3832 int npages = evacuation_candidates_.length();
3819 for (int i = 0; i < npages; i++) { 3833 for (int i = 0; i < npages; i++) {
3820 Page* p = evacuation_candidates_[i]; 3834 Page* p = evacuation_candidates_[i];
(...skipping 748 matching lines...) Expand 10 before | Expand all | Expand 10 after
4569 MarkBit mark_bit = Marking::MarkBitFrom(host); 4583 MarkBit mark_bit = Marking::MarkBitFrom(host);
4570 if (Marking::IsBlack(mark_bit)) { 4584 if (Marking::IsBlack(mark_bit)) {
4571 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); 4585 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host);
4572 RecordRelocSlot(&rinfo, target); 4586 RecordRelocSlot(&rinfo, target);
4573 } 4587 }
4574 } 4588 }
4575 } 4589 }
4576 4590
4577 } // namespace internal 4591 } // namespace internal
4578 } // namespace v8 4592 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/mark-compact.h ('k') | src/heap/slots-buffer.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698