| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_ | 5 #ifndef V8_HEAP_INCREMENTAL_MARKING_H_ |
| 6 #define V8_HEAP_INCREMENTAL_MARKING_H_ | 6 #define V8_HEAP_INCREMENTAL_MARKING_H_ |
| 7 | 7 |
| 8 #include "src/cancelable-task.h" | 8 #include "src/cancelable-task.h" |
| 9 #include "src/execution.h" | 9 #include "src/execution.h" |
| 10 #include "src/heap/heap.h" | 10 #include "src/heap/heap.h" |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 101 CompletionAction completion_action, | 101 CompletionAction completion_action, |
| 102 ForceCompletionAction force_completion, | 102 ForceCompletionAction force_completion, |
| 103 StepOrigin step_origin); | 103 StepOrigin step_origin); |
| 104 | 104 |
| 105 // It's hard to know how much work the incremental marker should do to make | 105 // It's hard to know how much work the incremental marker should do to make |
| 106 // progress in the face of the mutator creating new work for it. We start | 106 // progress in the face of the mutator creating new work for it. We start |
| 107 // of at a moderate rate of work and gradually increase the speed of the | 107 // of at a moderate rate of work and gradually increase the speed of the |
| 108 // incremental marker until it completes. | 108 // incremental marker until it completes. |
| 109 // Do some marking every time this much memory has been allocated or that many | 109 // Do some marking every time this much memory has been allocated or that many |
| 110 // heavy (color-checking) write barriers have been invoked. | 110 // heavy (color-checking) write barriers have been invoked. |
| 111 static const intptr_t kAllocatedThreshold = 65536; | 111 static const size_t kAllocatedThreshold = 64 * KB; |
| 112 static const intptr_t kWriteBarriersInvokedThreshold = 32768; | |
| 113 // Start off by marking this many times more memory than has been allocated. | |
| 114 static const intptr_t kInitialMarkingSpeed = 1; | |
| 115 // But if we are promoting a lot of data we need to mark faster to keep up | |
| 116 // with the data that is entering the old space through promotion. | |
| 117 static const intptr_t kFastMarking = 3; | |
| 118 // After this many steps we increase the marking/allocating factor. | |
| 119 static const intptr_t kMarkingSpeedAccellerationInterval = 1024; | |
| 120 // This is how much we increase the marking/allocating factor by. | |
| 121 static const intptr_t kMarkingSpeedAccelleration = 2; | |
| 122 static const intptr_t kMaxMarkingSpeed = 1000; | |
| 123 | 112 |
| 124 static const intptr_t kStepSizeInMs = 1; | 113 static const int kStepSizeInMs = 1; |
| 114 static const int kMaxStepSizeInMs = 5; |
| 125 | 115 |
| 126 // This is the upper bound for how many times we allow finalization of | 116 // This is the upper bound for how many times we allow finalization of |
| 127 // incremental marking to be postponed. | 117 // incremental marking to be postponed. |
| 128 static const size_t kMaxIdleMarkingDelayCounter = 3; | 118 static const int kMaxIdleMarkingDelayCounter = 3; |
| 129 | 119 |
| 130 void FinalizeSweeping(); | 120 void FinalizeSweeping(); |
| 131 | 121 |
| 132 void NotifyAllocatedBytes(intptr_t allocated_bytes); | 122 size_t Step(size_t bytes_to_process, CompletionAction action, |
| 133 | 123 ForceCompletionAction completion, StepOrigin step_origin); |
| 134 void Step(intptr_t bytes_to_process, CompletionAction action, | |
| 135 ForceCompletionAction completion, StepOrigin origin); | |
| 136 | 124 |
| 137 inline void RestartIfNotMarking(); | 125 inline void RestartIfNotMarking(); |
| 138 | 126 |
| 139 static void RecordWriteFromCode(HeapObject* obj, Object** slot, | 127 static void RecordWriteFromCode(HeapObject* obj, Object** slot, |
| 140 Isolate* isolate); | 128 Isolate* isolate); |
| 141 | 129 |
| 142 static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot, | 130 static void RecordWriteOfCodeEntryFromCode(JSFunction* host, Object** slot, |
| 143 Isolate* isolate); | 131 Isolate* isolate); |
| 144 | 132 |
| 145 // Record a slot for compaction. Returns false for objects that are | 133 // Record a slot for compaction. Returns false for objects that are |
| (...skipping 22 matching lines...) Expand all Loading... |
| 168 } | 156 } |
| 169 | 157 |
| 170 inline void SetNewSpacePageFlags(Page* chunk) { | 158 inline void SetNewSpacePageFlags(Page* chunk) { |
| 171 SetNewSpacePageFlags(chunk, IsMarking()); | 159 SetNewSpacePageFlags(chunk, IsMarking()); |
| 172 } | 160 } |
| 173 | 161 |
| 174 bool IsCompacting() { return IsMarking() && is_compacting_; } | 162 bool IsCompacting() { return IsMarking() && is_compacting_; } |
| 175 | 163 |
| 176 void ActivateGeneratedStub(Code* stub); | 164 void ActivateGeneratedStub(Code* stub); |
| 177 | 165 |
| 178 void NotifyOfHighPromotionRate(); | |
| 179 | |
| 180 void NotifyIncompleteScanOfObject(int unscanned_bytes) { | 166 void NotifyIncompleteScanOfObject(int unscanned_bytes) { |
| 181 unscanned_bytes_of_large_object_ = unscanned_bytes; | 167 unscanned_bytes_of_large_object_ = unscanned_bytes; |
| 182 } | 168 } |
| 183 | 169 |
| 184 void ClearIdleMarkingDelayCounter(); | 170 void ClearIdleMarkingDelayCounter(); |
| 185 | 171 |
| 186 bool IsIdleMarkingDelayCounterLimitReached(); | 172 bool IsIdleMarkingDelayCounterLimitReached(); |
| 187 | 173 |
| 188 static void MarkGrey(Heap* heap, HeapObject* object); | 174 static void MarkGrey(Heap* heap, HeapObject* object); |
| 189 | 175 |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 228 void AbortBlackAllocation(); | 214 void AbortBlackAllocation(); |
| 229 | 215 |
| 230 private: | 216 private: |
| 231 class Observer : public AllocationObserver { | 217 class Observer : public AllocationObserver { |
| 232 public: | 218 public: |
| 233 Observer(IncrementalMarking& incremental_marking, intptr_t step_size) | 219 Observer(IncrementalMarking& incremental_marking, intptr_t step_size) |
| 234 : AllocationObserver(step_size), | 220 : AllocationObserver(step_size), |
| 235 incremental_marking_(incremental_marking) {} | 221 incremental_marking_(incremental_marking) {} |
| 236 | 222 |
| 237 void Step(int bytes_allocated, Address, size_t) override { | 223 void Step(int bytes_allocated, Address, size_t) override { |
| 238 incremental_marking_.NotifyAllocatedBytes(bytes_allocated); | 224 incremental_marking_.AdvanceIncrementalMarkingOnAllocation(); |
| 239 } | 225 } |
| 240 | 226 |
| 241 private: | 227 private: |
| 242 IncrementalMarking& incremental_marking_; | 228 IncrementalMarking& incremental_marking_; |
| 243 }; | 229 }; |
| 244 | 230 |
| 245 int64_t SpaceLeftInOldSpace(); | 231 int64_t SpaceLeftInOldSpace(); |
| 246 | 232 |
| 247 void SpeedUp(); | |
| 248 | |
| 249 void ResetStepCounters(); | |
| 250 | |
| 251 void StartMarking(); | 233 void StartMarking(); |
| 252 | 234 |
| 253 void StartBlackAllocation(); | 235 void StartBlackAllocation(); |
| 254 void FinishBlackAllocation(); | 236 void FinishBlackAllocation(); |
| 255 | 237 |
| 256 void MarkRoots(); | 238 void MarkRoots(); |
| 257 void MarkObjectGroups(); | 239 void MarkObjectGroups(); |
| 258 void ProcessWeakCells(); | 240 void ProcessWeakCells(); |
| 259 // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to | 241 // Retain dying maps for <FLAG_retain_maps_for_n_gc> garbage collections to |
| 260 // increase chances of reusing of map transition tree in future. | 242 // increase chances of reusing of map transition tree in future. |
| (...skipping 15 matching lines...) Expand all Loading... |
| 276 INLINE(void ProcessMarkingDeque()); | 258 INLINE(void ProcessMarkingDeque()); |
| 277 | 259 |
| 278 INLINE(intptr_t ProcessMarkingDeque( | 260 INLINE(intptr_t ProcessMarkingDeque( |
| 279 intptr_t bytes_to_process, | 261 intptr_t bytes_to_process, |
| 280 ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION)); | 262 ForceCompletionAction completion = DO_NOT_FORCE_COMPLETION)); |
| 281 | 263 |
| 282 INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); | 264 INLINE(void VisitObject(Map* map, HeapObject* obj, int size)); |
| 283 | 265 |
| 284 void IncrementIdleMarkingDelayCounter(); | 266 void IncrementIdleMarkingDelayCounter(); |
| 285 | 267 |
| 268 void AdvanceIncrementalMarkingOnAllocation(); |
| 269 |
| 270 size_t StepSizeToKeepUpWithAllocations(); |
| 271 size_t StepSizeToMakeProgress(); |
| 272 |
| 286 Heap* heap_; | 273 Heap* heap_; |
| 287 | 274 |
| 288 Observer observer_; | 275 State state_; |
| 289 | 276 |
| 290 State state_; | 277 double start_time_ms_; |
| 278 size_t initial_old_generation_size_; |
| 279 size_t old_generation_allocation_counter_; |
| 280 size_t bytes_allocated_; |
| 281 size_t bytes_marked_ahead_of_schedule_; |
| 282 size_t unscanned_bytes_of_large_object_; |
| 283 |
| 284 int idle_marking_delay_counter_; |
| 285 int incremental_marking_finalization_rounds_; |
| 286 |
| 291 bool is_compacting_; | 287 bool is_compacting_; |
| 292 | |
| 293 int steps_count_; | |
| 294 int64_t old_generation_space_available_at_start_of_incremental_; | |
| 295 int64_t old_generation_space_used_at_start_of_incremental_; | |
| 296 int64_t bytes_rescanned_; | |
| 297 bool should_hurry_; | 288 bool should_hurry_; |
| 298 int marking_speed_; | |
| 299 intptr_t bytes_scanned_; | |
| 300 intptr_t allocated_; | |
| 301 intptr_t write_barriers_invoked_since_last_step_; | |
| 302 intptr_t bytes_marked_ahead_of_schedule_; | |
| 303 size_t idle_marking_delay_counter_; | |
| 304 | |
| 305 int unscanned_bytes_of_large_object_; | |
| 306 | |
| 307 bool was_activated_; | 289 bool was_activated_; |
| 308 | |
| 309 bool black_allocation_; | 290 bool black_allocation_; |
| 310 | |
| 311 bool finalize_marking_completed_; | 291 bool finalize_marking_completed_; |
| 312 | 292 |
| 313 int incremental_marking_finalization_rounds_; | |
| 314 | |
| 315 GCRequestType request_type_; | 293 GCRequestType request_type_; |
| 316 | 294 |
| 317 IncrementalMarkingJob incremental_marking_job_; | 295 IncrementalMarkingJob incremental_marking_job_; |
| 296 Observer new_generation_observer_; |
| 297 Observer old_generation_observer_; |
| 318 | 298 |
| 319 DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); | 299 DISALLOW_IMPLICIT_CONSTRUCTORS(IncrementalMarking); |
| 320 }; | 300 }; |
| 321 } // namespace internal | 301 } // namespace internal |
| 322 } // namespace v8 | 302 } // namespace v8 |
| 323 | 303 |
| 324 #endif // V8_HEAP_INCREMENTAL_MARKING_H_ | 304 #endif // V8_HEAP_INCREMENTAL_MARKING_H_ |
| OLD | NEW |