OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/v8.h" | 5 #include "src/v8.h" |
6 | 6 |
7 #include "src/incremental-marking.h" | 7 #include "src/heap/incremental-marking.h" |
8 | 8 |
9 #include "src/code-stubs.h" | 9 #include "src/code-stubs.h" |
10 #include "src/compilation-cache.h" | 10 #include "src/compilation-cache.h" |
11 #include "src/conversions.h" | 11 #include "src/conversions.h" |
12 #include "src/objects-visiting.h" | 12 #include "src/objects-visiting.h" |
13 #include "src/objects-visiting-inl.h" | 13 #include "src/objects-visiting-inl.h" |
14 | 14 |
15 namespace v8 { | 15 namespace v8 { |
16 namespace internal { | 16 namespace internal { |
17 | 17 |
18 | 18 |
19 IncrementalMarking::IncrementalMarking(Heap* heap) | 19 IncrementalMarking::IncrementalMarking(Heap* heap) |
20 : heap_(heap), | 20 : heap_(heap), |
21 state_(STOPPED), | 21 state_(STOPPED), |
22 marking_deque_memory_(NULL), | 22 marking_deque_memory_(NULL), |
23 marking_deque_memory_committed_(false), | 23 marking_deque_memory_committed_(false), |
24 steps_count_(0), | 24 steps_count_(0), |
25 old_generation_space_available_at_start_of_incremental_(0), | 25 old_generation_space_available_at_start_of_incremental_(0), |
26 old_generation_space_used_at_start_of_incremental_(0), | 26 old_generation_space_used_at_start_of_incremental_(0), |
27 should_hurry_(false), | 27 should_hurry_(false), |
28 marking_speed_(0), | 28 marking_speed_(0), |
29 allocated_(0), | 29 allocated_(0), |
30 no_marking_scope_depth_(0), | 30 no_marking_scope_depth_(0), |
31 unscanned_bytes_of_large_object_(0) {} | 31 unscanned_bytes_of_large_object_(0) {} |
32 | 32 |
33 | 33 |
34 void IncrementalMarking::TearDown() { | 34 void IncrementalMarking::TearDown() { delete marking_deque_memory_; } |
35 delete marking_deque_memory_; | |
36 } | |
37 | 35 |
38 | 36 |
39 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, | 37 void IncrementalMarking::RecordWriteSlow(HeapObject* obj, Object** slot, |
40 Object** slot, | |
41 Object* value) { | 38 Object* value) { |
42 if (BaseRecordWrite(obj, slot, value) && slot != NULL) { | 39 if (BaseRecordWrite(obj, slot, value) && slot != NULL) { |
43 MarkBit obj_bit = Marking::MarkBitFrom(obj); | 40 MarkBit obj_bit = Marking::MarkBitFrom(obj); |
44 if (Marking::IsBlack(obj_bit)) { | 41 if (Marking::IsBlack(obj_bit)) { |
45 // Object is not going to be rescanned we need to record the slot. | 42 // Object is not going to be rescanned we need to record the slot. |
46 heap_->mark_compact_collector()->RecordSlot( | 43 heap_->mark_compact_collector()->RecordSlot(HeapObject::RawField(obj, 0), |
47 HeapObject::RawField(obj, 0), slot, value); | 44 slot, value); |
48 } | 45 } |
49 } | 46 } |
50 } | 47 } |
51 | 48 |
52 | 49 |
53 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, | 50 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, |
54 Object** slot, | |
55 Isolate* isolate) { | 51 Isolate* isolate) { |
56 DCHECK(obj->IsHeapObject()); | 52 DCHECK(obj->IsHeapObject()); |
57 IncrementalMarking* marking = isolate->heap()->incremental_marking(); | 53 IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
58 | 54 |
59 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | 55 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
60 int counter = chunk->write_barrier_counter(); | 56 int counter = chunk->write_barrier_counter(); |
61 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { | 57 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
62 marking->write_barriers_invoked_since_last_step_ += | 58 marking->write_barriers_invoked_since_last_step_ += |
63 MemoryChunk::kWriteBarrierCounterGranularity - | 59 MemoryChunk::kWriteBarrierCounterGranularity - |
64 chunk->write_barrier_counter(); | 60 chunk->write_barrier_counter(); |
65 chunk->set_write_barrier_counter( | 61 chunk->set_write_barrier_counter( |
66 MemoryChunk::kWriteBarrierCounterGranularity); | 62 MemoryChunk::kWriteBarrierCounterGranularity); |
67 } | 63 } |
68 | 64 |
69 marking->RecordWrite(obj, slot, *slot); | 65 marking->RecordWrite(obj, slot, *slot); |
70 } | 66 } |
71 | 67 |
72 | 68 |
73 void IncrementalMarking::RecordCodeTargetPatch(Code* host, | 69 void IncrementalMarking::RecordCodeTargetPatch(Code* host, Address pc, |
74 Address pc, | |
75 HeapObject* value) { | 70 HeapObject* value) { |
76 if (IsMarking()) { | 71 if (IsMarking()) { |
77 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 72 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
78 RecordWriteIntoCode(host, &rinfo, value); | 73 RecordWriteIntoCode(host, &rinfo, value); |
79 } | 74 } |
80 } | 75 } |
81 | 76 |
82 | 77 |
83 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { | 78 void IncrementalMarking::RecordCodeTargetPatch(Address pc, HeapObject* value) { |
84 if (IsMarking()) { | 79 if (IsMarking()) { |
85 Code* host = heap_->isolate()->inner_pointer_to_code_cache()-> | 80 Code* host = heap_->isolate() |
86 GcSafeFindCodeForInnerPointer(pc); | 81 ->inner_pointer_to_code_cache() |
| 82 ->GcSafeFindCodeForInnerPointer(pc); |
87 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); | 83 RelocInfo rinfo(pc, RelocInfo::CODE_TARGET, 0, host); |
88 RecordWriteIntoCode(host, &rinfo, value); | 84 RecordWriteIntoCode(host, &rinfo, value); |
89 } | 85 } |
90 } | 86 } |
91 | 87 |
92 | 88 |
93 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, | 89 void IncrementalMarking::RecordWriteOfCodeEntrySlow(JSFunction* host, |
94 Object** slot, | 90 Object** slot, |
95 Code* value) { | 91 Code* value) { |
96 if (BaseRecordWrite(host, slot, value)) { | 92 if (BaseRecordWrite(host, slot, value)) { |
97 DCHECK(slot != NULL); | 93 DCHECK(slot != NULL); |
98 heap_->mark_compact_collector()-> | 94 heap_->mark_compact_collector()->RecordCodeEntrySlot( |
99 RecordCodeEntrySlot(reinterpret_cast<Address>(slot), value); | 95 reinterpret_cast<Address>(slot), value); |
100 } | 96 } |
101 } | 97 } |
102 | 98 |
103 | 99 |
104 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj, | 100 void IncrementalMarking::RecordWriteIntoCodeSlow(HeapObject* obj, |
105 RelocInfo* rinfo, | 101 RelocInfo* rinfo, |
106 Object* value) { | 102 Object* value) { |
107 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); | 103 MarkBit value_bit = Marking::MarkBitFrom(HeapObject::cast(value)); |
108 if (Marking::IsWhite(value_bit)) { | 104 if (Marking::IsWhite(value_bit)) { |
109 MarkBit obj_bit = Marking::MarkBitFrom(obj); | 105 MarkBit obj_bit = Marking::MarkBitFrom(obj); |
(...skipping 23 matching lines...) Expand all Loading... |
133 if (Marking::IsBlack(mark_bit)) { | 129 if (Marking::IsBlack(mark_bit)) { |
134 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), | 130 MemoryChunk::IncrementLiveBytesFromGC(heap_obj->address(), |
135 -heap_obj->Size()); | 131 -heap_obj->Size()); |
136 } | 132 } |
137 Marking::AnyToGrey(mark_bit); | 133 Marking::AnyToGrey(mark_bit); |
138 } | 134 } |
139 } | 135 } |
140 | 136 |
141 | 137 |
142 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object, | 138 static inline void MarkBlackOrKeepGrey(HeapObject* heap_object, |
143 MarkBit mark_bit, | 139 MarkBit mark_bit, int size) { |
144 int size) { | |
145 DCHECK(!Marking::IsImpossible(mark_bit)); | 140 DCHECK(!Marking::IsImpossible(mark_bit)); |
146 if (mark_bit.Get()) return; | 141 if (mark_bit.Get()) return; |
147 mark_bit.Set(); | 142 mark_bit.Set(); |
148 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); | 143 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); |
149 DCHECK(Marking::IsBlack(mark_bit)); | 144 DCHECK(Marking::IsBlack(mark_bit)); |
150 } | 145 } |
151 | 146 |
152 | 147 |
153 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, | 148 static inline void MarkBlackOrKeepBlack(HeapObject* heap_object, |
154 MarkBit mark_bit, | 149 MarkBit mark_bit, int size) { |
155 int size) { | |
156 DCHECK(!Marking::IsImpossible(mark_bit)); | 150 DCHECK(!Marking::IsImpossible(mark_bit)); |
157 if (Marking::IsBlack(mark_bit)) return; | 151 if (Marking::IsBlack(mark_bit)) return; |
158 Marking::MarkBlack(mark_bit); | 152 Marking::MarkBlack(mark_bit); |
159 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); | 153 MemoryChunk::IncrementLiveBytesFromGC(heap_object->address(), size); |
160 DCHECK(Marking::IsBlack(mark_bit)); | 154 DCHECK(Marking::IsBlack(mark_bit)); |
161 } | 155 } |
162 | 156 |
163 | 157 |
164 class IncrementalMarkingMarkingVisitor | 158 class IncrementalMarkingMarkingVisitor |
165 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { | 159 : public StaticMarkingVisitor<IncrementalMarkingMarkingVisitor> { |
(...skipping 15 matching lines...) Expand all Loading... |
181 chunk->owner()->identity() == LO_SPACE) { | 175 chunk->owner()->identity() == LO_SPACE) { |
182 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); | 176 chunk->SetFlag(MemoryChunk::HAS_PROGRESS_BAR); |
183 } | 177 } |
184 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { | 178 if (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR)) { |
185 Heap* heap = map->GetHeap(); | 179 Heap* heap = map->GetHeap(); |
186 // When using a progress bar for large fixed arrays, scan only a chunk of | 180 // When using a progress bar for large fixed arrays, scan only a chunk of |
187 // the array and try to push it onto the marking deque again until it is | 181 // the array and try to push it onto the marking deque again until it is |
188 // fully scanned. Fall back to scanning it through to the end in case this | 182 // fully scanned. Fall back to scanning it through to the end in case this |
189 // fails because of a full deque. | 183 // fails because of a full deque. |
190 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); | 184 int object_size = FixedArray::BodyDescriptor::SizeOf(map, object); |
191 int start_offset = Max(FixedArray::BodyDescriptor::kStartOffset, | 185 int start_offset = |
192 chunk->progress_bar()); | 186 Max(FixedArray::BodyDescriptor::kStartOffset, chunk->progress_bar()); |
193 int end_offset = Min(object_size, | 187 int end_offset = |
194 start_offset + kProgressBarScanningChunk); | 188 Min(object_size, start_offset + kProgressBarScanningChunk); |
195 int already_scanned_offset = start_offset; | 189 int already_scanned_offset = start_offset; |
196 bool scan_until_end = false; | 190 bool scan_until_end = false; |
197 do { | 191 do { |
198 VisitPointersWithAnchor(heap, | 192 VisitPointersWithAnchor(heap, HeapObject::RawField(object, 0), |
199 HeapObject::RawField(object, 0), | |
200 HeapObject::RawField(object, start_offset), | 193 HeapObject::RawField(object, start_offset), |
201 HeapObject::RawField(object, end_offset)); | 194 HeapObject::RawField(object, end_offset)); |
202 start_offset = end_offset; | 195 start_offset = end_offset; |
203 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); | 196 end_offset = Min(object_size, end_offset + kProgressBarScanningChunk); |
204 scan_until_end = heap->incremental_marking()->marking_deque()->IsFull(); | 197 scan_until_end = heap->incremental_marking()->marking_deque()->IsFull(); |
205 } while (scan_until_end && start_offset < object_size); | 198 } while (scan_until_end && start_offset < object_size); |
206 chunk->set_progress_bar(start_offset); | 199 chunk->set_progress_bar(start_offset); |
207 if (start_offset < object_size) { | 200 if (start_offset < object_size) { |
208 heap->incremental_marking()->marking_deque()->UnshiftGrey(object); | 201 heap->incremental_marking()->marking_deque()->UnshiftGrey(object); |
209 heap->incremental_marking()->NotifyIncompleteScanOfObject( | 202 heap->incremental_marking()->NotifyIncompleteScanOfObject( |
(...skipping 28 matching lines...) Expand all Loading... |
238 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { | 231 INLINE(static void VisitPointers(Heap* heap, Object** start, Object** end)) { |
239 for (Object** p = start; p < end; p++) { | 232 for (Object** p = start; p < end; p++) { |
240 Object* obj = *p; | 233 Object* obj = *p; |
241 if (obj->IsHeapObject()) { | 234 if (obj->IsHeapObject()) { |
242 heap->mark_compact_collector()->RecordSlot(start, p, obj); | 235 heap->mark_compact_collector()->RecordSlot(start, p, obj); |
243 MarkObject(heap, obj); | 236 MarkObject(heap, obj); |
244 } | 237 } |
245 } | 238 } |
246 } | 239 } |
247 | 240 |
248 INLINE(static void VisitPointersWithAnchor(Heap* heap, | 241 INLINE(static void VisitPointersWithAnchor(Heap* heap, Object** anchor, |
249 Object** anchor, | 242 Object** start, Object** end)) { |
250 Object** start, | |
251 Object** end)) { | |
252 for (Object** p = start; p < end; p++) { | 243 for (Object** p = start; p < end; p++) { |
253 Object* obj = *p; | 244 Object* obj = *p; |
254 if (obj->IsHeapObject()) { | 245 if (obj->IsHeapObject()) { |
255 heap->mark_compact_collector()->RecordSlot(anchor, p, obj); | 246 heap->mark_compact_collector()->RecordSlot(anchor, p, obj); |
256 MarkObject(heap, obj); | 247 MarkObject(heap, obj); |
257 } | 248 } |
258 } | 249 } |
259 } | 250 } |
260 | 251 |
261 // Marks the object grey and pushes it on the marking stack. | 252 // Marks the object grey and pushes it on the marking stack. |
(...skipping 20 matching lines...) Expand all Loading... |
282 } | 273 } |
283 return false; | 274 return false; |
284 } | 275 } |
285 }; | 276 }; |
286 | 277 |
287 | 278 |
288 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { | 279 class IncrementalMarkingRootMarkingVisitor : public ObjectVisitor { |
289 public: | 280 public: |
290 explicit IncrementalMarkingRootMarkingVisitor( | 281 explicit IncrementalMarkingRootMarkingVisitor( |
291 IncrementalMarking* incremental_marking) | 282 IncrementalMarking* incremental_marking) |
292 : incremental_marking_(incremental_marking) { | 283 : incremental_marking_(incremental_marking) {} |
293 } | |
294 | 284 |
295 void VisitPointer(Object** p) { | 285 void VisitPointer(Object** p) { MarkObjectByPointer(p); } |
296 MarkObjectByPointer(p); | |
297 } | |
298 | 286 |
299 void VisitPointers(Object** start, Object** end) { | 287 void VisitPointers(Object** start, Object** end) { |
300 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); | 288 for (Object** p = start; p < end; p++) MarkObjectByPointer(p); |
301 } | 289 } |
302 | 290 |
303 private: | 291 private: |
304 void MarkObjectByPointer(Object** p) { | 292 void MarkObjectByPointer(Object** p) { |
305 Object* obj = *p; | 293 Object* obj = *p; |
306 if (!obj->IsHeapObject()) return; | 294 if (!obj->IsHeapObject()) return; |
307 | 295 |
(...skipping 19 matching lines...) Expand all Loading... |
327 | 315 |
328 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, | 316 void IncrementalMarking::SetOldSpacePageFlags(MemoryChunk* chunk, |
329 bool is_marking, | 317 bool is_marking, |
330 bool is_compacting) { | 318 bool is_compacting) { |
331 if (is_marking) { | 319 if (is_marking) { |
332 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); | 320 chunk->SetFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
333 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | 321 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
334 | 322 |
335 // It's difficult to filter out slots recorded for large objects. | 323 // It's difficult to filter out slots recorded for large objects. |
336 if (chunk->owner()->identity() == LO_SPACE && | 324 if (chunk->owner()->identity() == LO_SPACE && |
337 chunk->size() > static_cast<size_t>(Page::kPageSize) && | 325 chunk->size() > static_cast<size_t>(Page::kPageSize) && is_compacting) { |
338 is_compacting) { | |
339 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); | 326 chunk->SetFlag(MemoryChunk::RESCAN_ON_EVACUATION); |
340 } | 327 } |
341 } else if (chunk->owner()->identity() == CELL_SPACE || | 328 } else if (chunk->owner()->identity() == CELL_SPACE || |
342 chunk->owner()->identity() == PROPERTY_CELL_SPACE || | 329 chunk->owner()->identity() == PROPERTY_CELL_SPACE || |
343 chunk->scan_on_scavenge()) { | 330 chunk->scan_on_scavenge()) { |
344 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); | 331 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
345 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | 332 chunk->ClearFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
346 } else { | 333 } else { |
347 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); | 334 chunk->ClearFlag(MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING); |
348 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); | 335 chunk->SetFlag(MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING); |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
438 #ifndef DEBUG | 425 #ifndef DEBUG |
439 static const intptr_t kActivationThreshold = 8 * MB; | 426 static const intptr_t kActivationThreshold = 8 * MB; |
440 #else | 427 #else |
441 // TODO(gc) consider setting this to some low level so that some | 428 // TODO(gc) consider setting this to some low level so that some |
442 // debug tests run with incremental marking and some without. | 429 // debug tests run with incremental marking and some without. |
443 static const intptr_t kActivationThreshold = 0; | 430 static const intptr_t kActivationThreshold = 0; |
444 #endif | 431 #endif |
445 // Only start incremental marking in a safe state: 1) when incremental | 432 // Only start incremental marking in a safe state: 1) when incremental |
446 // marking is turned on, 2) when we are currently not in a GC, and | 433 // marking is turned on, 2) when we are currently not in a GC, and |
447 // 3) when we are currently not serializing or deserializing the heap. | 434 // 3) when we are currently not serializing or deserializing the heap. |
448 return FLAG_incremental_marking && | 435 return FLAG_incremental_marking && FLAG_incremental_marking_steps && |
449 FLAG_incremental_marking_steps && | 436 heap_->gc_state() == Heap::NOT_IN_GC && |
450 heap_->gc_state() == Heap::NOT_IN_GC && | 437 !heap_->isolate()->serializer_enabled() && |
451 !heap_->isolate()->serializer_enabled() && | 438 heap_->isolate()->IsInitialized() && |
452 heap_->isolate()->IsInitialized() && | 439 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold; |
453 heap_->PromotedSpaceSizeOfObjects() > kActivationThreshold; | |
454 } | 440 } |
455 | 441 |
456 | 442 |
457 void IncrementalMarking::ActivateGeneratedStub(Code* stub) { | 443 void IncrementalMarking::ActivateGeneratedStub(Code* stub) { |
458 DCHECK(RecordWriteStub::GetMode(stub) == | 444 DCHECK(RecordWriteStub::GetMode(stub) == RecordWriteStub::STORE_BUFFER_ONLY); |
459 RecordWriteStub::STORE_BUFFER_ONLY); | |
460 | 445 |
461 if (!IsMarking()) { | 446 if (!IsMarking()) { |
462 // Initially stub is generated in STORE_BUFFER_ONLY mode thus | 447 // Initially stub is generated in STORE_BUFFER_ONLY mode thus |
463 // we don't need to do anything if incremental marking is | 448 // we don't need to do anything if incremental marking is |
464 // not active. | 449 // not active. |
465 } else if (IsCompacting()) { | 450 } else if (IsCompacting()) { |
466 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); | 451 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); |
467 } else { | 452 } else { |
468 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); | 453 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); |
469 } | 454 } |
470 } | 455 } |
471 | 456 |
472 | 457 |
473 static void PatchIncrementalMarkingRecordWriteStubs( | 458 static void PatchIncrementalMarkingRecordWriteStubs( |
474 Heap* heap, RecordWriteStub::Mode mode) { | 459 Heap* heap, RecordWriteStub::Mode mode) { |
475 UnseededNumberDictionary* stubs = heap->code_stubs(); | 460 UnseededNumberDictionary* stubs = heap->code_stubs(); |
476 | 461 |
477 int capacity = stubs->Capacity(); | 462 int capacity = stubs->Capacity(); |
478 for (int i = 0; i < capacity; i++) { | 463 for (int i = 0; i < capacity; i++) { |
479 Object* k = stubs->KeyAt(i); | 464 Object* k = stubs->KeyAt(i); |
480 if (stubs->IsKey(k)) { | 465 if (stubs->IsKey(k)) { |
481 uint32_t key = NumberToUint32(k); | 466 uint32_t key = NumberToUint32(k); |
482 | 467 |
483 if (CodeStub::MajorKeyFromKey(key) == | 468 if (CodeStub::MajorKeyFromKey(key) == CodeStub::RecordWrite) { |
484 CodeStub::RecordWrite) { | |
485 Object* e = stubs->ValueAt(i); | 469 Object* e = stubs->ValueAt(i); |
486 if (e->IsCode()) { | 470 if (e->IsCode()) { |
487 RecordWriteStub::Patch(Code::cast(e), mode); | 471 RecordWriteStub::Patch(Code::cast(e), mode); |
488 } | 472 } |
489 } | 473 } |
490 } | 474 } |
491 } | 475 } |
492 } | 476 } |
493 | 477 |
494 | 478 |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
543 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); | 527 heap_->new_space()->LowerInlineAllocationLimit(kAllocatedThreshold); |
544 } | 528 } |
545 | 529 |
546 | 530 |
547 void IncrementalMarking::StartMarking(CompactionFlag flag) { | 531 void IncrementalMarking::StartMarking(CompactionFlag flag) { |
548 if (FLAG_trace_incremental_marking) { | 532 if (FLAG_trace_incremental_marking) { |
549 PrintF("[IncrementalMarking] Start marking\n"); | 533 PrintF("[IncrementalMarking] Start marking\n"); |
550 } | 534 } |
551 | 535 |
552 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) && | 536 is_compacting_ = !FLAG_never_compact && (flag == ALLOW_COMPACTION) && |
553 heap_->mark_compact_collector()->StartCompaction( | 537 heap_->mark_compact_collector()->StartCompaction( |
554 MarkCompactCollector::INCREMENTAL_COMPACTION); | 538 MarkCompactCollector::INCREMENTAL_COMPACTION); |
555 | 539 |
556 state_ = MARKING; | 540 state_ = MARKING; |
557 | 541 |
558 RecordWriteStub::Mode mode = is_compacting_ ? | 542 RecordWriteStub::Mode mode = is_compacting_ |
559 RecordWriteStub::INCREMENTAL_COMPACTION : RecordWriteStub::INCREMENTAL; | 543 ? RecordWriteStub::INCREMENTAL_COMPACTION |
| 544 : RecordWriteStub::INCREMENTAL; |
560 | 545 |
561 PatchIncrementalMarkingRecordWriteStubs(heap_, mode); | 546 PatchIncrementalMarkingRecordWriteStubs(heap_, mode); |
562 | 547 |
563 EnsureMarkingDequeIsCommitted(); | 548 EnsureMarkingDequeIsCommitted(); |
564 | 549 |
565 // Initialize marking stack. | 550 // Initialize marking stack. |
566 Address addr = static_cast<Address>(marking_deque_memory_->address()); | 551 Address addr = static_cast<Address>(marking_deque_memory_->address()); |
567 size_t size = marking_deque_memory_->size(); | 552 size_t size = marking_deque_memory_->size(); |
568 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; | 553 if (FLAG_force_marking_deque_overflows) size = 64 * kPointerSize; |
569 marking_deque_.Initialize(addr, addr + size); | 554 marking_deque_.Initialize(addr, addr + size); |
570 | 555 |
571 ActivateIncrementalWriteBarrier(); | 556 ActivateIncrementalWriteBarrier(); |
572 | 557 |
573 // Marking bits are cleared by the sweeper. | 558 // Marking bits are cleared by the sweeper. |
574 #ifdef VERIFY_HEAP | 559 #ifdef VERIFY_HEAP |
575 if (FLAG_verify_heap) { | 560 if (FLAG_verify_heap) { |
576 heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); | 561 heap_->mark_compact_collector()->VerifyMarkbitsAreClean(); |
577 } | 562 } |
578 #endif | 563 #endif |
579 | 564 |
580 heap_->CompletelyClearInstanceofCache(); | 565 heap_->CompletelyClearInstanceofCache(); |
581 heap_->isolate()->compilation_cache()->MarkCompactPrologue(); | 566 heap_->isolate()->compilation_cache()->MarkCompactPrologue(); |
582 | 567 |
583 if (FLAG_cleanup_code_caches_at_gc) { | 568 if (FLAG_cleanup_code_caches_at_gc) { |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
637 (obj->IsFiller() && Marking::IsWhite(mark_bit))); | 622 (obj->IsFiller() && Marking::IsWhite(mark_bit))); |
638 #endif | 623 #endif |
639 } | 624 } |
640 } else if (obj->map() != filler_map) { | 625 } else if (obj->map() != filler_map) { |
641 // Skip one word filler objects that appear on the | 626 // Skip one word filler objects that appear on the |
642 // stack when we perform in place array shift. | 627 // stack when we perform in place array shift. |
643 array[new_top] = obj; | 628 array[new_top] = obj; |
644 new_top = ((new_top + 1) & mask); | 629 new_top = ((new_top + 1) & mask); |
645 DCHECK(new_top != marking_deque_.bottom()); | 630 DCHECK(new_top != marking_deque_.bottom()); |
646 #ifdef DEBUG | 631 #ifdef DEBUG |
647 MarkBit mark_bit = Marking::MarkBitFrom(obj); | 632 MarkBit mark_bit = Marking::MarkBitFrom(obj); |
648 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | 633 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
649 DCHECK(Marking::IsGrey(mark_bit) || | 634 DCHECK(Marking::IsGrey(mark_bit) || |
650 (obj->IsFiller() && Marking::IsWhite(mark_bit)) || | 635 (obj->IsFiller() && Marking::IsWhite(mark_bit)) || |
651 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && | 636 (chunk->IsFlagSet(MemoryChunk::HAS_PROGRESS_BAR) && |
652 Marking::IsBlack(mark_bit))); | 637 Marking::IsBlack(mark_bit))); |
653 #endif | 638 #endif |
654 } | 639 } |
655 } | 640 } |
656 marking_deque_.set_top(new_top); | 641 marking_deque_.set_top(new_top); |
657 } | 642 } |
658 | 643 |
659 | 644 |
660 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { | 645 void IncrementalMarking::VisitObject(Map* map, HeapObject* obj, int size) { |
661 MarkBit map_mark_bit = Marking::MarkBitFrom(map); | 646 MarkBit map_mark_bit = Marking::MarkBitFrom(map); |
662 if (Marking::IsWhite(map_mark_bit)) { | 647 if (Marking::IsWhite(map_mark_bit)) { |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
831 // in principle possible. | 816 // in principle possible. |
832 Start(PREVENT_COMPACTION); | 817 Start(PREVENT_COMPACTION); |
833 } else { | 818 } else { |
834 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); | 819 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); |
835 } | 820 } |
836 } | 821 } |
837 | 822 |
838 | 823 |
839 void IncrementalMarking::Step(intptr_t allocated_bytes, | 824 void IncrementalMarking::Step(intptr_t allocated_bytes, |
840 CompletionAction action) { | 825 CompletionAction action) { |
841 if (heap_->gc_state() != Heap::NOT_IN_GC || | 826 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || |
842 !FLAG_incremental_marking || | |
843 !FLAG_incremental_marking_steps || | 827 !FLAG_incremental_marking_steps || |
844 (state_ != SWEEPING && state_ != MARKING)) { | 828 (state_ != SWEEPING && state_ != MARKING)) { |
845 return; | 829 return; |
846 } | 830 } |
847 | 831 |
848 allocated_ += allocated_bytes; | 832 allocated_ += allocated_bytes; |
849 | 833 |
850 if (allocated_ < kAllocatedThreshold && | 834 if (allocated_ < kAllocatedThreshold && |
851 write_barriers_invoked_since_last_step_ < | 835 write_barriers_invoked_since_last_step_ < |
852 kWriteBarriersInvokedThreshold) { | 836 kWriteBarriersInvokedThreshold) { |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
976 bytes_rescanned_ = 0; | 960 bytes_rescanned_ = 0; |
977 marking_speed_ = kInitialMarkingSpeed; | 961 marking_speed_ = kInitialMarkingSpeed; |
978 bytes_scanned_ = 0; | 962 bytes_scanned_ = 0; |
979 write_barriers_invoked_since_last_step_ = 0; | 963 write_barriers_invoked_since_last_step_ = 0; |
980 } | 964 } |
981 | 965 |
982 | 966 |
983 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 967 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
984 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); | 968 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); |
985 } | 969 } |
986 | 970 } |
987 } } // namespace v8::internal | 971 } // namespace v8::internal |
OLD | NEW |