OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #ifndef V8_INCREMENTAL_MARKING_H_ | |
6 #define V8_INCREMENTAL_MARKING_H_ | |
7 | |
8 | |
9 #include "src/execution.h" | |
10 #include "src/mark-compact.h" | |
11 #include "src/objects.h" | |
12 | |
13 namespace v8 { | |
14 namespace internal { | |
15 | |
16 | |
17 class IncrementalMarking { | |
18 public: | |
19 enum State { | |
20 STOPPED, | |
21 SWEEPING, | |
22 MARKING, | |
23 COMPLETE | |
24 }; | |
25 | |
26 enum CompletionAction { | |
27 GC_VIA_STACK_GUARD, | |
28 NO_GC_VIA_STACK_GUARD | |
29 }; | |
30 | |
31 explicit IncrementalMarking(Heap* heap); | |
32 | |
33 static void Initialize(); | |
34 | |
35 void TearDown(); | |
36 | |
37 State state() { | |
38 DCHECK(state_ == STOPPED || FLAG_incremental_marking); | |
39 return state_; | |
40 } | |
41 | |
42 bool should_hurry() { return should_hurry_; } | |
43 void set_should_hurry(bool val) { should_hurry_ = val; } | |
44 | |
45 inline bool IsStopped() { return state() == STOPPED; } | |
46 | |
47 INLINE(bool IsMarking()) { return state() >= MARKING; } | |
48 | |
49 inline bool IsMarkingIncomplete() { return state() == MARKING; } | |
50 | |
51 inline bool IsComplete() { return state() == COMPLETE; } | |
52 | |
53 bool WorthActivating(); | |
54 | |
55 enum CompactionFlag { ALLOW_COMPACTION, PREVENT_COMPACTION }; | |
56 | |
57 void Start(CompactionFlag flag = ALLOW_COMPACTION); | |
58 | |
59 void Stop(); | |
60 | |
61 void PrepareForScavenge(); | |
62 | |
63 void UpdateMarkingDequeAfterScavenge(); | |
64 | |
65 void Hurry(); | |
66 | |
67 void Finalize(); | |
68 | |
69 void Abort(); | |
70 | |
71 void MarkingComplete(CompletionAction action); | |
72 | |
73 // It's hard to know how much work the incremental marker should do to make | |
74 // progress in the face of the mutator creating new work for it. We start | |
75 // of at a moderate rate of work and gradually increase the speed of the | |
76 // incremental marker until it completes. | |
77 // Do some marking every time this much memory has been allocated or that many | |
78 // heavy (color-checking) write barriers have been invoked. | |
79 static const intptr_t kAllocatedThreshold = 65536; | |
80 static const intptr_t kWriteBarriersInvokedThreshold = 32768; | |
81 // Start off by marking this many times more memory than has been allocated. | |
82 static const intptr_t kInitialMarkingSpeed = 1; | |
83 // But if we are promoting a lot of data we need to mark faster to keep up | |
84 // with the data that is entering the old space through promotion. | |
85 static const intptr_t kFastMarking = 3; | |
86 // After this many steps we increase the marking/allocating factor. | |
87 static const intptr_t kMarkingSpeedAccellerationInterval = 1024; | |
88 // This is how much we increase the marking/allocating factor by. | |
89 static const intptr_t kMarkingSpeedAccelleration = 2; | |
90 static const intptr_t kMaxMarkingSpeed = 1000; | |
91 | |
92 void OldSpaceStep(intptr_t allocated); | |
93 | |
94 void Step(intptr_t allocated, CompletionAction action); | |
95 | |
96 inline void RestartIfNotMarking() { | |
97 if (state_ == COMPLETE) { | |
98 state_ = MARKING; | |
99 if (FLAG_trace_incremental_marking) { | |
100 PrintF("[IncrementalMarking] Restarting (new grey objects)\n"); | |
101 } | |
102 } | |
103 } | |
104 | |
105 static void RecordWriteFromCode(HeapObject* obj, | |
106 Object** slot, | |
107 Isolate* isolate); | |
108 | |
109 // Record a slot for compaction. Returns false for objects that are | |
110 // guaranteed to be rescanned or not guaranteed to survive. | |
111 // | |
112 // No slots in white objects should be recorded, as some slots are typed and | |
113 // cannot be interpreted correctly if the underlying object does not survive | |
114 // the incremental cycle (stays white). | |
115 INLINE(bool BaseRecordWrite(HeapObject* obj, Object** slot, Object* value)); | |
116 INLINE(void RecordWrite(HeapObject* obj, Object** slot, Object* value)); | |
117 INLINE(void RecordWriteIntoCode(HeapObject* obj, | |
118 RelocInfo* rinfo, | |
119 Object* value)); | |
120 INLINE(void RecordWriteOfCodeEntry(JSFunction* host, | |
121 Object** slot, | |
122 Code* value)); | |
123 | |
124 | |
125 void RecordWriteSlow(HeapObject* obj, Object** slot, Object* value); | |
126 void RecordWriteIntoCodeSlow(HeapObject* obj, | |
127 RelocInfo* rinfo, | |
128 Object* value); | |
129 void RecordWriteOfCodeEntrySlow(JSFunction* host, Object** slot, Code* value); | |
130 void RecordCodeTargetPatch(Code* host, Address pc, HeapObject* value); | |
131 void RecordCodeTargetPatch(Address pc, HeapObject* value); | |
132 | |
133 inline void RecordWrites(HeapObject* obj); | |
134 | |
135 inline void BlackToGreyAndUnshift(HeapObject* obj, MarkBit mark_bit); | |
136 | |
137 inline void WhiteToGreyAndPush(HeapObject* obj, MarkBit mark_bit); | |
138 | |
139 inline void SetOldSpacePageFlags(MemoryChunk* chunk) { | |
140 SetOldSpacePageFlags(chunk, IsMarking(), IsCompacting()); | |
141 } | |
142 | |
143 inline void SetNewSpacePageFlags(NewSpacePage* chunk) { | |
144 SetNewSpacePageFlags(chunk, IsMarking()); | |
145 } | |
146 | |
147 MarkingDeque* marking_deque() { return &marking_deque_; } | |
148 | |
149 bool IsCompacting() { return IsMarking() && is_compacting_; } | |
150 | |
151 void ActivateGeneratedStub(Code* stub); | |
152 | |
153 void NotifyOfHighPromotionRate() { | |
154 if (IsMarking()) { | |
155 if (marking_speed_ < kFastMarking) { | |
156 if (FLAG_trace_gc) { | |
157 PrintPID("Increasing marking speed to %d " | |
158 "due to high promotion rate\n", | |
159 static_cast<int>(kFastMarking)); | |
160 } | |
161 marking_speed_ = kFastMarking; | |
162 } | |
163 } | |
164 } | |
165 | |
166 void EnterNoMarkingScope() { | |
167 no_marking_scope_depth_++; | |
168 } | |
169 | |
170 void LeaveNoMarkingScope() { | |
171 no_marking_scope_depth_--; | |
172 } | |
173 | |
174 void UncommitMarkingDeque(); | |
175 | |
176 void NotifyIncompleteScanOfObject(int unscanned_bytes) { | |
177 unscanned_bytes_of_large_object_ = unscanned_bytes; | |
178 } | |
179 | |
180 private: | |
181 int64_t SpaceLeftInOldSpace(); | |
182 | |
183 void ResetStepCounters(); | |
184 | |
185 void StartMarking(CompactionFlag flag); | |
186 | |
187 void ActivateIncrementalWriteBarrier(PagedSpace* space); | |
188 static void ActivateIncrementalWriteBarrier(NewSpace* space); | |
189 void ActivateIncrementalWriteBarrier(); | |
190 | |
191 static void DeactivateIncrementalWriteBarrierForSpace(PagedSpace* space); | |
192 static void DeactivateIncrementalWriteBarrierForSpace(NewSpace* space); | |
193 void DeactivateIncrementalWriteBarrier(); | |
194 | |
195 static void SetOldSpacePageFlags(MemoryChunk* chunk, | |
196 bool is_marking, | |
197 bool is_compacting); | |
198 | |
199 static void SetNewSpacePageFlags(NewSpacePage* chunk, bool is_marking); | |
200 | |
201 void EnsureMarkingDequeIsCommitted(); | |
202 | |
203 INLINE(void ProcessMarkingDeque()); | |
204 | |
205 INLINE(intptr_t ProcessMarkingDeque(intptr_t bytes_to_process)); | |
206 | |
207 INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); | |
208 | |
209 Heap* heap_; | |
210 | |
211 State state_; | |
212 bool is_compacting_; | |
213 | |
214 base::VirtualMemory* marking_deque_memory_; | |
215 bool marking_deque_memory_committed_; | |
216 MarkingDeque marking_deque_; | |
217 | |
218 int steps_count_; | |
219 int64_t old_generation_space_available_at_start_of_incremental_; | |
220 int64_t old_generation_space_used_at_start_of_incremental_; | |
221 int64_t bytes_rescanned_; | |
222 bool should_hurry_; | |
223 int marking_speed_; | |
224 intptr_t bytes_scanned_; | |
225 intptr_t allocated_; | |
226 intptr_t write_barriers_invoked_since_last_step_; | |
227 | |
228 int no_marking_scope_depth_; | |
229 | |
230 int unscanned_bytes_of_large_object_; | |
231 | |
232 DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); | |
233 }; | |
234 | |
235 } } // namespace v8::internal | |
236 | |
237 #endif // V8_INCREMENTAL_MARKING_H_ | |
OLD | NEW |