OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/heap.h" | 5 #include "src/heap/heap.h" |
6 | 6 |
7 #include "src/accessors.h" | 7 #include "src/accessors.h" |
8 #include "src/api.h" | 8 #include "src/api.h" |
9 #include "src/ast/context-slot-cache.h" | 9 #include "src/ast/context-slot-cache.h" |
10 #include "src/base/bits.h" | 10 #include "src/base/bits.h" |
(...skipping 3165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3176 | 3176 |
3177 void Heap::AdjustLiveBytes(HeapObject* object, int by) { | 3177 void Heap::AdjustLiveBytes(HeapObject* object, int by) { |
3178 // As long as the inspected object is black and we are currently not iterating | 3178 // As long as the inspected object is black and we are currently not iterating |
3179 // the heap using HeapIterator, we can update the live byte count. We cannot | 3179 // the heap using HeapIterator, we can update the live byte count. We cannot |
3180 // update while using HeapIterator because the iterator is temporarily | 3180 // update while using HeapIterator because the iterator is temporarily |
3181 // marking the whole object graph, without updating live bytes. | 3181 // marking the whole object graph, without updating live bytes. |
3182 if (lo_space()->Contains(object)) { | 3182 if (lo_space()->Contains(object)) { |
3183 lo_space()->AdjustLiveBytes(by); | 3183 lo_space()->AdjustLiveBytes(by); |
3184 } else if (!in_heap_iterator() && | 3184 } else if (!in_heap_iterator() && |
3185 !mark_compact_collector()->sweeping_in_progress() && | 3185 !mark_compact_collector()->sweeping_in_progress() && |
3186 ObjectMarking::IsBlack(object)) { | 3186 Marking::IsBlack(ObjectMarking::MarkBitFrom(object))) { |
3187 DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone()); | 3187 DCHECK(MemoryChunk::FromAddress(object->address())->SweepingDone()); |
3188 MemoryChunk::IncrementLiveBytes(object, by); | 3188 MemoryChunk::IncrementLiveBytes(object, by); |
3189 } | 3189 } |
3190 } | 3190 } |
3191 | 3191 |
3192 | 3192 |
3193 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, | 3193 FixedArrayBase* Heap::LeftTrimFixedArray(FixedArrayBase* object, |
3194 int elements_to_trim) { | 3194 int elements_to_trim) { |
3195 CHECK_NOT_NULL(object); | 3195 CHECK_NOT_NULL(object); |
3196 DCHECK(CanMoveObjectStart(object)); | |
3197 DCHECK(!object->IsFixedTypedArrayBase()); | 3196 DCHECK(!object->IsFixedTypedArrayBase()); |
3198 DCHECK(!object->IsByteArray()); | 3197 DCHECK(!object->IsByteArray()); |
3199 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize; | 3198 const int element_size = object->IsFixedArray() ? kPointerSize : kDoubleSize; |
3200 const int bytes_to_trim = elements_to_trim * element_size; | 3199 const int bytes_to_trim = elements_to_trim * element_size; |
3201 Map* map = object->map(); | 3200 Map* map = object->map(); |
3202 | 3201 |
3203 // For now this trick is only applied to objects in new and paged space. | 3202 // For now this trick is only applied to objects in new and paged space. |
3204 // In large object space the object's start must coincide with chunk | 3203 // In large object space the object's start must coincide with chunk |
3205 // and thus the trick is just not applicable. | 3204 // and thus the trick is just not applicable. |
3206 DCHECK(!lo_space()->Contains(object)); | 3205 DCHECK(!lo_space()->Contains(object)); |
(...skipping 30 matching lines...) Expand all Loading... |
3237 Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object))) { | 3236 Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(object))) { |
3238 Page* page = Page::FromAddress(old_start); | 3237 Page* page = Page::FromAddress(old_start); |
3239 page->markbits()->ClearRange( | 3238 page->markbits()->ClearRange( |
3240 page->AddressToMarkbitIndex(old_start), | 3239 page->AddressToMarkbitIndex(old_start), |
3241 page->AddressToMarkbitIndex(old_start + bytes_to_trim)); | 3240 page->AddressToMarkbitIndex(old_start + bytes_to_trim)); |
3242 } | 3241 } |
3243 | 3242 |
3244 // Initialize header of the trimmed array. Since left trimming is only | 3243 // Initialize header of the trimmed array. Since left trimming is only |
3245 // performed on pages which are not concurrently swept creating a filler | 3244 // performed on pages which are not concurrently swept creating a filler |
3246 // object does not require synchronization. | 3245 // object does not require synchronization. |
| 3246 DCHECK(CanMoveObjectStart(object)); |
3247 Object** former_start = HeapObject::RawField(object, 0); | 3247 Object** former_start = HeapObject::RawField(object, 0); |
3248 int new_start_index = elements_to_trim * (element_size / kPointerSize); | 3248 int new_start_index = elements_to_trim * (element_size / kPointerSize); |
3249 former_start[new_start_index] = map; | 3249 former_start[new_start_index] = map; |
3250 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim); | 3250 former_start[new_start_index + 1] = Smi::FromInt(len - elements_to_trim); |
3251 | 3251 |
3252 FixedArrayBase* new_object = | 3252 FixedArrayBase* new_object = |
3253 FixedArrayBase::cast(HeapObject::FromAddress(new_start)); | 3253 FixedArrayBase::cast(HeapObject::FromAddress(new_start)); |
3254 | 3254 |
3255 // Maintain consistency of live bytes during incremental marking | 3255 // Maintain consistency of live bytes during incremental marking |
3256 AdjustLiveBytes(new_object, -bytes_to_trim); | 3256 AdjustLiveBytes(new_object, -bytes_to_trim); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3307 // We do not create a filler for objects in large object space. | 3307 // We do not create a filler for objects in large object space. |
3308 // TODO(hpayer): We should shrink the large object page if the size | 3308 // TODO(hpayer): We should shrink the large object page if the size |
3309 // of the object changed significantly. | 3309 // of the object changed significantly. |
3310 if (!lo_space()->Contains(object)) { | 3310 if (!lo_space()->Contains(object)) { |
3311 HeapObject* filler = | 3311 HeapObject* filler = |
3312 CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes); | 3312 CreateFillerObjectAt(new_end, bytes_to_trim, ClearRecordedSlots::kYes); |
3313 DCHECK_NOT_NULL(filler); | 3313 DCHECK_NOT_NULL(filler); |
3314 // Clear the mark bits of the black area that belongs now to the filler. | 3314 // Clear the mark bits of the black area that belongs now to the filler. |
3315 // This is an optimization. The sweeper will release black fillers anyway. | 3315 // This is an optimization. The sweeper will release black fillers anyway. |
3316 if (incremental_marking()->black_allocation() && | 3316 if (incremental_marking()->black_allocation() && |
3317 ObjectMarking::IsBlackOrGrey(filler)) { | 3317 Marking::IsBlackOrGrey(ObjectMarking::MarkBitFrom(filler))) { |
3318 Page* page = Page::FromAddress(new_end); | 3318 Page* page = Page::FromAddress(new_end); |
3319 page->markbits()->ClearRange( | 3319 page->markbits()->ClearRange( |
3320 page->AddressToMarkbitIndex(new_end), | 3320 page->AddressToMarkbitIndex(new_end), |
3321 page->AddressToMarkbitIndex(new_end + bytes_to_trim)); | 3321 page->AddressToMarkbitIndex(new_end + bytes_to_trim)); |
3322 } | 3322 } |
3323 } | 3323 } |
3324 | 3324 |
3325 // Initialize header of the trimmed array. We are storing the new length | 3325 // Initialize header of the trimmed array. We are storing the new length |
3326 // using release store after creating a filler for the left-over space to | 3326 // using release store after creating a filler for the left-over space to |
3327 // avoid races with the sweeper thread. | 3327 // avoid races with the sweeper thread. |
(...skipping 974 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4302 | 4302 |
4303 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) { | 4303 void Heap::RegisterReservationsForBlackAllocation(Reservation* reservations) { |
4304 // TODO(hpayer): We do not have to iterate reservations on black objects | 4304 // TODO(hpayer): We do not have to iterate reservations on black objects |
4305 // for marking. We just have to execute the special visiting side effect | 4305 // for marking. We just have to execute the special visiting side effect |
4306 // code that adds objects to global data structures, e.g. for array buffers. | 4306 // code that adds objects to global data structures, e.g. for array buffers. |
4307 | 4307 |
4308 // Code space, map space, and large object space do not use black pages. | 4308 // Code space, map space, and large object space do not use black pages. |
4309 // Hence we have to color all objects of the reservation first black to avoid | 4309 // Hence we have to color all objects of the reservation first black to avoid |
4310 // unnecessary marking deque load. | 4310 // unnecessary marking deque load. |
4311 if (incremental_marking()->black_allocation()) { | 4311 if (incremental_marking()->black_allocation()) { |
4312 for (int i = CODE_SPACE; i < Serializer::kNumberOfSpaces; i++) { | 4312 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) { |
4313 const Heap::Reservation& res = reservations[i]; | 4313 const Heap::Reservation& res = reservations[i]; |
4314 for (auto& chunk : res) { | 4314 for (auto& chunk : res) { |
4315 Address addr = chunk.start; | 4315 Address addr = chunk.start; |
4316 while (addr < chunk.end) { | 4316 while (addr < chunk.end) { |
4317 HeapObject* obj = HeapObject::FromAddress(addr); | 4317 HeapObject* obj = HeapObject::FromAddress(addr); |
4318 ObjectMarking::WhiteToBlack(obj); | 4318 Marking::MarkBlack(ObjectMarking::MarkBitFrom(obj)); |
4319 addr += obj->Size(); | 4319 addr += obj->Size(); |
4320 } | 4320 } |
4321 } | 4321 } |
4322 } | 4322 } |
4323 // Iterate black objects in old space, code space, map space, and large | |
4324 // object space for side effects. | |
4325 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) { | 4323 for (int i = OLD_SPACE; i < Serializer::kNumberOfSpaces; i++) { |
4326 const Heap::Reservation& res = reservations[i]; | 4324 const Heap::Reservation& res = reservations[i]; |
4327 for (auto& chunk : res) { | 4325 for (auto& chunk : res) { |
4328 Address addr = chunk.start; | 4326 Address addr = chunk.start; |
4329 while (addr < chunk.end) { | 4327 while (addr < chunk.end) { |
4330 HeapObject* obj = HeapObject::FromAddress(addr); | 4328 HeapObject* obj = HeapObject::FromAddress(addr); |
4331 incremental_marking()->IterateBlackObject(obj); | 4329 incremental_marking()->IterateBlackObject(obj); |
4332 addr += obj->Size(); | 4330 addr += obj->Size(); |
4333 } | 4331 } |
4334 } | 4332 } |
(...skipping 558 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4893 void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size, | 4891 void Heap::IterateAndScavengePromotedObject(HeapObject* target, int size, |
4894 bool was_marked_black) { | 4892 bool was_marked_black) { |
4895 // We are not collecting slots on new space objects during mutation | 4893 // We are not collecting slots on new space objects during mutation |
4896 // thus we have to scan for pointers to evacuation candidates when we | 4894 // thus we have to scan for pointers to evacuation candidates when we |
4897 // promote objects. But we should not record any slots in non-black | 4895 // promote objects. But we should not record any slots in non-black |
4898 // objects. Grey object's slots would be rescanned. | 4896 // objects. Grey object's slots would be rescanned. |
4899 // White object might not survive until the end of collection | 4897 // White object might not survive until the end of collection |
4900 // it would be a violation of the invariant to record it's slots. | 4898 // it would be a violation of the invariant to record it's slots. |
4901 bool record_slots = false; | 4899 bool record_slots = false; |
4902 if (incremental_marking()->IsCompacting()) { | 4900 if (incremental_marking()->IsCompacting()) { |
4903 record_slots = ObjectMarking::IsBlack(target); | 4901 MarkBit mark_bit = ObjectMarking::MarkBitFrom(target); |
| 4902 record_slots = Marking::IsBlack(mark_bit); |
4904 } | 4903 } |
4905 | 4904 |
4906 IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots); | 4905 IterateAndScavengePromotedObjectsVisitor visitor(this, target, record_slots); |
4907 if (target->IsJSFunction()) { | 4906 if (target->IsJSFunction()) { |
4908 // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for | 4907 // JSFunctions reachable through kNextFunctionLinkOffset are weak. Slots for |
4909 // this links are recorded during processing of weak lists. | 4908 // this links are recorded during processing of weak lists. |
4910 JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor); | 4909 JSFunction::BodyDescriptorWeakCode::IterateBody(target, size, &visitor); |
4911 } else { | 4910 } else { |
4912 target->IterateBody(target->map()->instance_type(), size, &visitor); | 4911 target->IterateBody(target->map()->instance_type(), size, &visitor); |
4913 } | 4912 } |
(...skipping 1217 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6131 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) { | 6130 explicit UnreachableObjectsFilter(Heap* heap) : heap_(heap) { |
6132 MarkReachableObjects(); | 6131 MarkReachableObjects(); |
6133 } | 6132 } |
6134 | 6133 |
6135 ~UnreachableObjectsFilter() { | 6134 ~UnreachableObjectsFilter() { |
6136 heap_->mark_compact_collector()->ClearMarkbits(); | 6135 heap_->mark_compact_collector()->ClearMarkbits(); |
6137 } | 6136 } |
6138 | 6137 |
6139 bool SkipObject(HeapObject* object) { | 6138 bool SkipObject(HeapObject* object) { |
6140 if (object->IsFiller()) return true; | 6139 if (object->IsFiller()) return true; |
6141 return ObjectMarking::IsWhite(object); | 6140 MarkBit mark_bit = ObjectMarking::MarkBitFrom(object); |
| 6141 return Marking::IsWhite(mark_bit); |
6142 } | 6142 } |
6143 | 6143 |
6144 private: | 6144 private: |
6145 class MarkingVisitor : public ObjectVisitor { | 6145 class MarkingVisitor : public ObjectVisitor { |
6146 public: | 6146 public: |
6147 MarkingVisitor() : marking_stack_(10) {} | 6147 MarkingVisitor() : marking_stack_(10) {} |
6148 | 6148 |
6149 void VisitPointers(Object** start, Object** end) override { | 6149 void VisitPointers(Object** start, Object** end) override { |
6150 for (Object** p = start; p < end; p++) { | 6150 for (Object** p = start; p < end; p++) { |
6151 if (!(*p)->IsHeapObject()) continue; | 6151 if (!(*p)->IsHeapObject()) continue; |
6152 HeapObject* obj = HeapObject::cast(*p); | 6152 HeapObject* obj = HeapObject::cast(*p); |
6153 // Use Marking instead of ObjectMarking to avoid adjusting live bytes | |
6154 // counter. | |
6155 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj); | 6153 MarkBit mark_bit = ObjectMarking::MarkBitFrom(obj); |
6156 if (Marking::IsWhite(mark_bit)) { | 6154 if (Marking::IsWhite(mark_bit)) { |
6157 Marking::WhiteToBlack(mark_bit); | 6155 Marking::WhiteToBlack(mark_bit); |
6158 marking_stack_.Add(obj); | 6156 marking_stack_.Add(obj); |
6159 } | 6157 } |
6160 } | 6158 } |
6161 } | 6159 } |
6162 | 6160 |
6163 void TransitiveClosure() { | 6161 void TransitiveClosure() { |
6164 while (!marking_stack_.is_empty()) { | 6162 while (!marking_stack_.is_empty()) { |
(...skipping 452 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6617 } | 6615 } |
6618 | 6616 |
6619 | 6617 |
6620 // static | 6618 // static |
6621 int Heap::GetStaticVisitorIdForMap(Map* map) { | 6619 int Heap::GetStaticVisitorIdForMap(Map* map) { |
6622 return StaticVisitorBase::GetVisitorId(map); | 6620 return StaticVisitorBase::GetVisitorId(map); |
6623 } | 6621 } |
6624 | 6622 |
6625 } // namespace internal | 6623 } // namespace internal |
6626 } // namespace v8 | 6624 } // namespace v8 |
OLD | NEW |