| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/heap/scavenger.h" | 5 #include "src/heap/scavenger.h" |
| 6 | 6 |
| 7 #include "src/contexts.h" | 7 #include "src/contexts.h" |
| 8 #include "src/heap/heap-inl.h" | 8 #include "src/heap/heap-inl.h" |
| 9 #include "src/heap/incremental-marking.h" | 9 #include "src/heap/incremental-marking.h" |
| 10 #include "src/heap/objects-visiting-inl.h" | 10 #include "src/heap/objects-visiting-inl.h" |
| (...skipping 182 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 193 MigrateObject(heap, object, target, object_size); | 193 MigrateObject(heap, object, target, object_size); |
| 194 | 194 |
| 195 // Update slot to new target using CAS. A concurrent sweeper thread my | 195 // Update slot to new target using CAS. A concurrent sweeper thread my |
| 196 // filter the slot concurrently. | 196 // filter the slot concurrently. |
| 197 HeapObject* old = *slot; | 197 HeapObject* old = *slot; |
| 198 base::Release_CompareAndSwap(reinterpret_cast<base::AtomicWord*>(slot), | 198 base::Release_CompareAndSwap(reinterpret_cast<base::AtomicWord*>(slot), |
| 199 reinterpret_cast<base::AtomicWord>(old), | 199 reinterpret_cast<base::AtomicWord>(old), |
| 200 reinterpret_cast<base::AtomicWord>(target)); | 200 reinterpret_cast<base::AtomicWord>(target)); |
| 201 | 201 |
| 202 if (object_contents == POINTER_OBJECT) { | 202 if (object_contents == POINTER_OBJECT) { |
| 203 heap->promotion_queue()->insert(target, object_size, | 203 // TODO(mlippautz): Query collector for marking state. |
| 204 ObjectMarking::IsBlack(object)); | 204 heap->promotion_queue()->insert( |
| 205 target, object_size, |
| 206 ObjectMarking::IsBlack(object, MarkingState::Internal(object))); |
| 205 } | 207 } |
| 206 heap->IncrementPromotedObjectsSize(object_size); | 208 heap->IncrementPromotedObjectsSize(object_size); |
| 207 return true; | 209 return true; |
| 208 } | 210 } |
| 209 return false; | 211 return false; |
| 210 } | 212 } |
| 211 | 213 |
| 212 template <ObjectContents object_contents, AllocationAlignment alignment> | 214 template <ObjectContents object_contents, AllocationAlignment alignment> |
| 213 static inline void EvacuateObject(Map* map, HeapObject** slot, | 215 static inline void EvacuateObject(Map* map, HeapObject** slot, |
| 214 HeapObject* object, int object_size) { | 216 HeapObject* object, int object_size) { |
| (...skipping 23 matching lines...) Expand all Loading... |
| 238 static inline void EvacuateJSFunction(Map* map, HeapObject** slot, | 240 static inline void EvacuateJSFunction(Map* map, HeapObject** slot, |
| 239 HeapObject* object) { | 241 HeapObject* object) { |
| 240 ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object); | 242 ObjectEvacuationStrategy<POINTER_OBJECT>::Visit(map, slot, object); |
| 241 | 243 |
| 242 if (marks_handling == IGNORE_MARKS) return; | 244 if (marks_handling == IGNORE_MARKS) return; |
| 243 | 245 |
| 244 MapWord map_word = object->map_word(); | 246 MapWord map_word = object->map_word(); |
| 245 DCHECK(map_word.IsForwardingAddress()); | 247 DCHECK(map_word.IsForwardingAddress()); |
| 246 HeapObject* target = map_word.ToForwardingAddress(); | 248 HeapObject* target = map_word.ToForwardingAddress(); |
| 247 | 249 |
| 248 if (ObjectMarking::IsBlack(target)) { | 250 // TODO(mlippautz): Notify collector of this object so we don't have to |
| 251 // retrieve the state our of thin air. |
| 252 if (ObjectMarking::IsBlack(target, MarkingState::Internal(target))) { |
| 249 // This object is black and it might not be rescanned by marker. | 253 // This object is black and it might not be rescanned by marker. |
| 250 // We should explicitly record code entry slot for compaction because | 254 // We should explicitly record code entry slot for compaction because |
| 251 // promotion queue processing (IteratePromotedObjectPointers) will | 255 // promotion queue processing (IteratePromotedObjectPointers) will |
| 252 // miss it as it is not HeapObject-tagged. | 256 // miss it as it is not HeapObject-tagged. |
| 253 Address code_entry_slot = | 257 Address code_entry_slot = |
| 254 target->address() + JSFunction::kCodeEntryOffset; | 258 target->address() + JSFunction::kCodeEntryOffset; |
| 255 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); | 259 Code* code = Code::cast(Code::GetObjectFromEntryAddress(code_entry_slot)); |
| 256 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot( | 260 map->GetHeap()->mark_compact_collector()->RecordCodeEntrySlot( |
| 257 target, code_entry_slot, code); | 261 target, code_entry_slot, code); |
| 258 } | 262 } |
| (...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 468 void ScavengeVisitor::ScavengePointer(Object** p) { | 472 void ScavengeVisitor::ScavengePointer(Object** p) { |
| 469 Object* object = *p; | 473 Object* object = *p; |
| 470 if (!heap_->InNewSpace(object)) return; | 474 if (!heap_->InNewSpace(object)) return; |
| 471 | 475 |
| 472 Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p), | 476 Scavenger::ScavengeObject(reinterpret_cast<HeapObject**>(p), |
| 473 reinterpret_cast<HeapObject*>(object)); | 477 reinterpret_cast<HeapObject*>(object)); |
| 474 } | 478 } |
| 475 | 479 |
| 476 } // namespace internal | 480 } // namespace internal |
| 477 } // namespace v8 | 481 } // namespace v8 |
| OLD | NEW |