OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/mark-compact.h" | 5 #include "src/heap/mark-compact.h" |
6 | 6 |
7 #include "src/base/atomicops.h" | 7 #include "src/base/atomicops.h" |
8 #include "src/base/bits.h" | 8 #include "src/base/bits.h" |
9 #include "src/base/sys-info.h" | 9 #include "src/base/sys-info.h" |
10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
(...skipping 2603 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2614 bool success = SlotsBuffer::AddTo( | 2614 bool success = SlotsBuffer::AddTo( |
2615 slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type, | 2615 slots_buffer_allocator_, target_page->slots_buffer_address(), slot_type, |
2616 addr, SlotsBuffer::FAIL_ON_OVERFLOW); | 2616 addr, SlotsBuffer::FAIL_ON_OVERFLOW); |
2617 if (!success) { | 2617 if (!success) { |
2618 EvictPopularEvacuationCandidate(target_page); | 2618 EvictPopularEvacuationCandidate(target_page); |
2619 } | 2619 } |
2620 } | 2620 } |
2621 } | 2621 } |
2622 | 2622 |
2623 | 2623 |
| 2624 class RecordMigratedSlotVisitor final : public ObjectVisitor { |
| 2625 public: |
| 2626 RecordMigratedSlotVisitor(MarkCompactCollector* collector, |
| 2627 SlotsBuffer** evacuation_slots_buffer) |
| 2628 : collector_(collector), |
| 2629 evacuation_slots_buffer_(evacuation_slots_buffer) {} |
| 2630 |
| 2631 V8_INLINE void VisitPointer(Object** p) override { |
| 2632 collector_->RecordMigratedSlot(*p, reinterpret_cast<Address>(p), |
| 2633 evacuation_slots_buffer_); |
| 2634 } |
| 2635 |
| 2636 V8_INLINE void VisitPointers(Object** start, Object** end) override { |
| 2637 while (start < end) { |
| 2638 collector_->RecordMigratedSlot(*start, reinterpret_cast<Address>(start), |
| 2639 evacuation_slots_buffer_); |
| 2640 ++start; |
| 2641 } |
| 2642 } |
| 2643 |
| 2644 V8_INLINE void VisitCodeEntry(Address code_entry_slot) override { |
| 2645 if (collector_->compacting_) { |
| 2646 Address code_entry = Memory::Address_at(code_entry_slot); |
| 2647 collector_->RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, |
| 2648 evacuation_slots_buffer_); |
| 2649 } |
| 2650 } |
| 2651 |
| 2652 private: |
| 2653 MarkCompactCollector* collector_; |
| 2654 SlotsBuffer** evacuation_slots_buffer_; |
| 2655 }; |
| 2656 |
| 2657 |
2624 // We scavenge new space simultaneously with sweeping. This is done in two | 2658 // We scavenge new space simultaneously with sweeping. This is done in two |
2625 // passes. | 2659 // passes. |
2626 // | 2660 // |
2627 // The first pass migrates all alive objects from one semispace to another or | 2661 // The first pass migrates all alive objects from one semispace to another or |
2628 // promotes them to old space. Forwarding address is written directly into | 2662 // promotes them to old space. Forwarding address is written directly into |
2629 // first word of object without any encoding. If object is dead we write | 2663 // first word of object without any encoding. If object is dead we write |
2630 // NULL as a forwarding address. | 2664 // NULL as a forwarding address. |
2631 // | 2665 // |
2632 // The second pass updates pointers to new space in all spaces. It is possible | 2666 // The second pass updates pointers to new space in all spaces. It is possible |
2633 // to encounter pointers to dead new space objects during traversal of pointers | 2667 // to encounter pointers to dead new space objects during traversal of pointers |
2634 // to new space. We should clear them to avoid encountering them during next | 2668 // to new space. We should clear them to avoid encountering them during next |
2635 // pointer iteration. This is an issue if the store buffer overflows and we | 2669 // pointer iteration. This is an issue if the store buffer overflows and we |
2636 // have to scan the entire old space, including dead objects, looking for | 2670 // have to scan the entire old space, including dead objects, looking for |
2637 // pointers to new space. | 2671 // pointers to new space. |
2638 void MarkCompactCollector::MigrateObject( | 2672 void MarkCompactCollector::MigrateObject( |
2639 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest, | 2673 HeapObject* dst, HeapObject* src, int size, AllocationSpace dest, |
2640 SlotsBuffer** evacuation_slots_buffer) { | 2674 SlotsBuffer** evacuation_slots_buffer) { |
2641 Address dst_addr = dst->address(); | 2675 Address dst_addr = dst->address(); |
2642 Address src_addr = src->address(); | 2676 Address src_addr = src->address(); |
2643 DCHECK(heap()->AllowedToBeMigrated(src, dest)); | 2677 DCHECK(heap()->AllowedToBeMigrated(src, dest)); |
2644 DCHECK(dest != LO_SPACE); | 2678 DCHECK(dest != LO_SPACE); |
2645 if (dest == OLD_SPACE) { | 2679 if (dest == OLD_SPACE) { |
2646 DCHECK_OBJECT_SIZE(size); | 2680 DCHECK_OBJECT_SIZE(size); |
2647 DCHECK(evacuation_slots_buffer != nullptr); | 2681 DCHECK(evacuation_slots_buffer != nullptr); |
2648 DCHECK(IsAligned(size, kPointerSize)); | 2682 DCHECK(IsAligned(size, kPointerSize)); |
2649 switch (src->ContentType()) { | |
2650 case HeapObjectContents::kTaggedValues: | |
2651 MigrateObjectTagged(dst, src, size, evacuation_slots_buffer); | |
2652 break; | |
2653 | 2683 |
2654 case HeapObjectContents::kMixedValues: | 2684 heap()->MoveBlock(dst->address(), src->address(), size); |
2655 MigrateObjectMixed(dst, src, size, evacuation_slots_buffer); | 2685 RecordMigratedSlotVisitor visitor(this, evacuation_slots_buffer); |
2656 break; | 2686 dst->IterateBody(&visitor); |
2657 | |
2658 case HeapObjectContents::kRawValues: | |
2659 MigrateObjectRaw(dst, src, size); | |
2660 break; | |
2661 } | |
2662 | |
2663 if (compacting_ && dst->IsJSFunction()) { | |
2664 Address code_entry_slot = dst->address() + JSFunction::kCodeEntryOffset; | |
2665 Address code_entry = Memory::Address_at(code_entry_slot); | |
2666 RecordMigratedCodeEntrySlot(code_entry, code_entry_slot, | |
2667 evacuation_slots_buffer); | |
2668 } | |
2669 } else if (dest == CODE_SPACE) { | 2687 } else if (dest == CODE_SPACE) { |
2670 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); | 2688 DCHECK_CODEOBJECT_SIZE(size, heap()->code_space()); |
2671 DCHECK(evacuation_slots_buffer != nullptr); | 2689 DCHECK(evacuation_slots_buffer != nullptr); |
2672 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); | 2690 PROFILE(isolate(), CodeMoveEvent(src_addr, dst_addr)); |
2673 heap()->MoveBlock(dst_addr, src_addr, size); | 2691 heap()->MoveBlock(dst_addr, src_addr, size); |
2674 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); | 2692 RecordMigratedCodeObjectSlot(dst_addr, evacuation_slots_buffer); |
2675 Code::cast(dst)->Relocate(dst_addr - src_addr); | 2693 Code::cast(dst)->Relocate(dst_addr - src_addr); |
2676 } else { | 2694 } else { |
2677 DCHECK_OBJECT_SIZE(size); | 2695 DCHECK_OBJECT_SIZE(size); |
2678 DCHECK(evacuation_slots_buffer == nullptr); | 2696 DCHECK(evacuation_slots_buffer == nullptr); |
2679 DCHECK(dest == NEW_SPACE); | 2697 DCHECK(dest == NEW_SPACE); |
2680 heap()->MoveBlock(dst_addr, src_addr, size); | 2698 heap()->MoveBlock(dst_addr, src_addr, size); |
2681 } | 2699 } |
2682 heap()->OnMoveEvent(dst, src, size); | 2700 heap()->OnMoveEvent(dst, src, size); |
2683 Memory::Address_at(src_addr) = dst_addr; | 2701 Memory::Address_at(src_addr) = dst_addr; |
2684 } | 2702 } |
2685 | 2703 |
2686 | 2704 |
2687 void MarkCompactCollector::MigrateObjectTagged( | |
2688 HeapObject* dst, HeapObject* src, int size, | |
2689 SlotsBuffer** evacuation_slots_buffer) { | |
2690 Address src_slot = src->address(); | |
2691 Address dst_slot = dst->address(); | |
2692 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { | |
2693 Object* value = Memory::Object_at(src_slot); | |
2694 Memory::Object_at(dst_slot) = value; | |
2695 RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer); | |
2696 src_slot += kPointerSize; | |
2697 dst_slot += kPointerSize; | |
2698 } | |
2699 } | |
2700 | |
2701 | |
2702 void MarkCompactCollector::MigrateObjectMixed( | |
2703 HeapObject* dst, HeapObject* src, int size, | |
2704 SlotsBuffer** evacuation_slots_buffer) { | |
2705 if (src->IsFixedTypedArrayBase()) { | |
2706 heap()->MoveBlock(dst->address(), src->address(), size); | |
2707 Address base_pointer_slot = | |
2708 dst->address() + FixedTypedArrayBase::kBasePointerOffset; | |
2709 RecordMigratedSlot(Memory::Object_at(base_pointer_slot), base_pointer_slot, | |
2710 evacuation_slots_buffer); | |
2711 } else if (src->IsBytecodeArray()) { | |
2712 heap()->MoveBlock(dst->address(), src->address(), size); | |
2713 Address constant_pool_slot = | |
2714 dst->address() + BytecodeArray::kConstantPoolOffset; | |
2715 RecordMigratedSlot(Memory::Object_at(constant_pool_slot), | |
2716 constant_pool_slot, evacuation_slots_buffer); | |
2717 } else if (src->IsJSArrayBuffer()) { | |
2718 heap()->MoveBlock(dst->address(), src->address(), size); | |
2719 | |
2720 // Visit inherited JSObject properties and byte length of ArrayBuffer | |
2721 Address regular_slot = | |
2722 dst->address() + JSArrayBuffer::BodyDescriptor::kStartOffset; | |
2723 Address regular_slots_end = | |
2724 dst->address() + JSArrayBuffer::kByteLengthOffset + kPointerSize; | |
2725 while (regular_slot < regular_slots_end) { | |
2726 RecordMigratedSlot(Memory::Object_at(regular_slot), regular_slot, | |
2727 evacuation_slots_buffer); | |
2728 regular_slot += kPointerSize; | |
2729 } | |
2730 | |
2731 // Skip backing store and visit just internal fields | |
2732 Address internal_field_slot = dst->address() + JSArrayBuffer::kSize; | |
2733 Address internal_fields_end = | |
2734 dst->address() + JSArrayBuffer::kSizeWithInternalFields; | |
2735 while (internal_field_slot < internal_fields_end) { | |
2736 RecordMigratedSlot(Memory::Object_at(internal_field_slot), | |
2737 internal_field_slot, evacuation_slots_buffer); | |
2738 internal_field_slot += kPointerSize; | |
2739 } | |
2740 } else if (FLAG_unbox_double_fields) { | |
2741 Address dst_addr = dst->address(); | |
2742 Address src_addr = src->address(); | |
2743 Address src_slot = src_addr; | |
2744 Address dst_slot = dst_addr; | |
2745 | |
2746 LayoutDescriptorHelper helper(src->map()); | |
2747 DCHECK(!helper.all_fields_tagged()); | |
2748 for (int remaining = size / kPointerSize; remaining > 0; remaining--) { | |
2749 Object* value = Memory::Object_at(src_slot); | |
2750 | |
2751 Memory::Object_at(dst_slot) = value; | |
2752 | |
2753 if (helper.IsTagged(static_cast<int>(src_slot - src_addr))) { | |
2754 RecordMigratedSlot(value, dst_slot, evacuation_slots_buffer); | |
2755 } | |
2756 | |
2757 src_slot += kPointerSize; | |
2758 dst_slot += kPointerSize; | |
2759 } | |
2760 } else { | |
2761 UNREACHABLE(); | |
2762 } | |
2763 } | |
2764 | |
2765 | |
2766 void MarkCompactCollector::MigrateObjectRaw(HeapObject* dst, HeapObject* src, | |
2767 int size) { | |
2768 heap()->MoveBlock(dst->address(), src->address(), size); | |
2769 } | |
2770 | |
2771 | |
2772 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v, | 2705 static inline void UpdateSlot(Isolate* isolate, ObjectVisitor* v, |
2773 SlotsBuffer::SlotType slot_type, Address addr) { | 2706 SlotsBuffer::SlotType slot_type, Address addr) { |
2774 switch (slot_type) { | 2707 switch (slot_type) { |
2775 case SlotsBuffer::CODE_TARGET_SLOT: { | 2708 case SlotsBuffer::CODE_TARGET_SLOT: { |
2776 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL); | 2709 RelocInfo rinfo(addr, RelocInfo::CODE_TARGET, 0, NULL); |
2777 rinfo.Visit(isolate, v); | 2710 rinfo.Visit(isolate, v); |
2778 break; | 2711 break; |
2779 } | 2712 } |
2780 case SlotsBuffer::CELL_TARGET_SLOT: { | 2713 case SlotsBuffer::CELL_TARGET_SLOT: { |
2781 RelocInfo rinfo(addr, RelocInfo::CELL, 0, NULL); | 2714 RelocInfo rinfo(addr, RelocInfo::CELL, 0, NULL); |
(...skipping 338 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3120 InstanceType type = object->map()->instance_type(); | 3053 InstanceType type = object->map()->instance_type(); |
3121 // Slots in maps and code can't be invalid because they are never | 3054 // Slots in maps and code can't be invalid because they are never |
3122 // shrunk. | 3055 // shrunk. |
3123 if (type == MAP_TYPE || type == CODE_TYPE) return true; | 3056 if (type == MAP_TYPE || type == CODE_TYPE) return true; |
3124 | 3057 |
3125 // Consider slots in objects that contain ONLY raw data as invalid. | 3058 // Consider slots in objects that contain ONLY raw data as invalid. |
3126 return false; | 3059 return false; |
3127 } | 3060 } |
3128 | 3061 |
3129 case HeapObjectContents::kMixedValues: { | 3062 case HeapObjectContents::kMixedValues: { |
| 3063 int offset = static_cast<int>(slot - object->address()); |
3130 if (object->IsFixedTypedArrayBase()) { | 3064 if (object->IsFixedTypedArrayBase()) { |
3131 return static_cast<int>(slot - object->address()) == | 3065 return offset == FixedTypedArrayBase::kBasePointerOffset; |
3132 FixedTypedArrayBase::kBasePointerOffset; | |
3133 } else if (object->IsBytecodeArray()) { | 3066 } else if (object->IsBytecodeArray()) { |
3134 return static_cast<int>(slot - object->address()) == | 3067 return offset == BytecodeArray::kConstantPoolOffset; |
3135 BytecodeArray::kConstantPoolOffset; | |
3136 } else if (object->IsJSArrayBuffer()) { | 3068 } else if (object->IsJSArrayBuffer()) { |
3137 int off = static_cast<int>(slot - object->address()); | 3069 return (offset >= JSArrayBuffer::kPropertiesOffset && |
3138 return (off >= JSArrayBuffer::BodyDescriptor::kStartOffset && | 3070 offset <= JSArrayBuffer::kByteLengthOffset) || |
3139 off <= JSArrayBuffer::kByteLengthOffset) || | 3071 (offset >= JSArrayBuffer::kSize && |
3140 (off >= JSArrayBuffer::kSize && | 3072 offset < JSArrayBuffer::kSizeWithInternalFields); |
3141 off < JSArrayBuffer::kSizeWithInternalFields); | |
3142 } else if (FLAG_unbox_double_fields) { | 3073 } else if (FLAG_unbox_double_fields) { |
3143 // Filter out slots that happen to point to unboxed double fields. | 3074 // Filter out slots that happen to point to unboxed double fields. |
3144 LayoutDescriptorHelper helper(object->map()); | 3075 LayoutDescriptorHelper helper(object->map()); |
3145 DCHECK(!helper.all_fields_tagged()); | 3076 DCHECK(!helper.all_fields_tagged()); |
3146 return helper.IsTagged(static_cast<int>(slot - object->address())); | 3077 return helper.IsTagged(offset); |
3147 } | 3078 } |
3148 break; | 3079 break; |
3149 } | 3080 } |
3150 } | 3081 } |
3151 UNREACHABLE(); | 3082 UNREACHABLE(); |
3152 return true; | 3083 return true; |
3153 } | 3084 } |
3154 | 3085 |
3155 | 3086 |
3156 void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot, | 3087 void MarkCompactCollector::VerifyIsSlotInLiveObject(Address slot, |
(...skipping 1418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4575 MarkBit mark_bit = Marking::MarkBitFrom(host); | 4506 MarkBit mark_bit = Marking::MarkBitFrom(host); |
4576 if (Marking::IsBlack(mark_bit)) { | 4507 if (Marking::IsBlack(mark_bit)) { |
4577 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 4508 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
4578 RecordRelocSlot(&rinfo, target); | 4509 RecordRelocSlot(&rinfo, target); |
4579 } | 4510 } |
4580 } | 4511 } |
4581 } | 4512 } |
4582 | 4513 |
4583 } // namespace internal | 4514 } // namespace internal |
4584 } // namespace v8 | 4515 } // namespace v8 |
OLD | NEW |