OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/incremental-marking.h" | 5 #include "src/heap/incremental-marking.h" |
6 | 6 |
7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
8 #include "src/compilation-cache.h" | 8 #include "src/compilation-cache.h" |
9 #include "src/conversions.h" | 9 #include "src/conversions.h" |
10 #include "src/heap/gc-idle-time-handler.h" | 10 #include "src/heap/gc-idle-time-handler.h" |
(...skipping 12 matching lines...) Expand all Loading... |
23 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD, | 23 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD, |
24 IncrementalMarking::FORCE_MARKING, | 24 IncrementalMarking::FORCE_MARKING, |
25 IncrementalMarking::DO_NOT_FORCE_COMPLETION); | 25 IncrementalMarking::DO_NOT_FORCE_COMPLETION); |
26 } | 26 } |
27 | 27 |
28 IncrementalMarking::IncrementalMarking(Heap* heap) | 28 IncrementalMarking::IncrementalMarking(Heap* heap) |
29 : heap_(heap), | 29 : heap_(heap), |
30 observer_(*this, kAllocatedThreshold), | 30 observer_(*this, kAllocatedThreshold), |
31 state_(STOPPED), | 31 state_(STOPPED), |
32 is_compacting_(false), | 32 is_compacting_(false), |
33 steps_count_(0), | |
34 old_generation_space_available_at_start_of_incremental_(0), | |
35 old_generation_space_used_at_start_of_incremental_(0), | |
36 bytes_rescanned_(0), | |
37 should_hurry_(false), | 33 should_hurry_(false), |
38 marking_speed_(0), | |
39 bytes_scanned_(0), | |
40 allocated_(0), | 34 allocated_(0), |
41 write_barriers_invoked_since_last_step_(0), | 35 write_barriers_invoked_since_last_step_(0), |
42 idle_marking_delay_counter_(0), | 36 idle_marking_delay_counter_(0), |
43 unscanned_bytes_of_large_object_(0), | 37 unscanned_bytes_of_large_object_(0), |
44 was_activated_(false), | 38 was_activated_(false), |
45 black_allocation_(false), | 39 black_allocation_(false), |
46 finalize_marking_completed_(false), | 40 finalize_marking_completed_(false), |
47 incremental_marking_finalization_rounds_(0), | 41 incremental_marking_finalization_rounds_(0), |
48 request_type_(NONE) {} | 42 request_type_(NONE) {} |
49 | 43 |
(...skipping 24 matching lines...) Expand all Loading... |
74 | 68 |
75 | 69 |
76 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, | 70 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, |
77 Isolate* isolate) { | 71 Isolate* isolate) { |
78 DCHECK(obj->IsHeapObject()); | 72 DCHECK(obj->IsHeapObject()); |
79 IncrementalMarking* marking = isolate->heap()->incremental_marking(); | 73 IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
80 | 74 |
81 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | 75 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); |
82 int counter = chunk->write_barrier_counter(); | 76 int counter = chunk->write_barrier_counter(); |
83 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { | 77 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { |
84 marking->write_barriers_invoked_since_last_step_ += | 78 marking->write_barriers_invoked_since_last_step_ = |
85 MemoryChunk::kWriteBarrierCounterGranularity - | 79 Min(kMaxWriteBarrierCounter, |
86 chunk->write_barrier_counter(); | 80 marking->write_barriers_invoked_since_last_step_ + |
| 81 MemoryChunk::kWriteBarrierCounterGranularity - |
| 82 chunk->write_barrier_counter()); |
87 chunk->set_write_barrier_counter( | 83 chunk->set_write_barrier_counter( |
88 MemoryChunk::kWriteBarrierCounterGranularity); | 84 MemoryChunk::kWriteBarrierCounterGranularity); |
89 } | 85 } |
90 | 86 |
91 marking->RecordWrite(obj, slot, *slot); | 87 marking->RecordWrite(obj, slot, *slot); |
92 } | 88 } |
93 | 89 |
94 // static | 90 // static |
95 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host, | 91 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host, |
96 Object** slot, | 92 Object** slot, |
(...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
460 // we don't need to do anything if incremental marking is | 456 // we don't need to do anything if incremental marking is |
461 // not active. | 457 // not active. |
462 } else if (IsCompacting()) { | 458 } else if (IsCompacting()) { |
463 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); | 459 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); |
464 } else { | 460 } else { |
465 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); | 461 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); |
466 } | 462 } |
467 } | 463 } |
468 | 464 |
469 | 465 |
470 void IncrementalMarking::NotifyOfHighPromotionRate() { | |
471 if (IsMarking()) { | |
472 if (marking_speed_ < kFastMarking) { | |
473 if (FLAG_trace_gc) { | |
474 heap()->isolate()->PrintWithTimestamp( | |
475 "Increasing marking speed to %d " | |
476 "due to high promotion rate\n", | |
477 static_cast<int>(kFastMarking)); | |
478 } | |
479 marking_speed_ = kFastMarking; | |
480 } | |
481 } | |
482 } | |
483 | |
484 | |
485 static void PatchIncrementalMarkingRecordWriteStubs( | 466 static void PatchIncrementalMarkingRecordWriteStubs( |
486 Heap* heap, RecordWriteStub::Mode mode) { | 467 Heap* heap, RecordWriteStub::Mode mode) { |
487 UnseededNumberDictionary* stubs = heap->code_stubs(); | 468 UnseededNumberDictionary* stubs = heap->code_stubs(); |
488 | 469 |
489 int capacity = stubs->Capacity(); | 470 int capacity = stubs->Capacity(); |
490 Isolate* isolate = heap->isolate(); | 471 Isolate* isolate = heap->isolate(); |
491 for (int i = 0; i < capacity; i++) { | 472 for (int i = 0; i < capacity; i++) { |
492 Object* k = stubs->KeyAt(i); | 473 Object* k = stubs->KeyAt(i); |
493 if (stubs->IsKey(isolate, k)) { | 474 if (stubs->IsKey(isolate, k)) { |
494 uint32_t key = NumberToUint32(k); | 475 uint32_t key = NumberToUint32(k); |
(...skipping 567 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1062 !heap()->mark_compact_collector()->marking_deque()->IsEmpty()); | 1043 !heap()->mark_compact_collector()->marking_deque()->IsEmpty()); |
1063 return remaining_time_in_ms; | 1044 return remaining_time_in_ms; |
1064 } | 1045 } |
1065 | 1046 |
1066 | 1047 |
1067 void IncrementalMarking::OldSpaceStep(intptr_t allocated) { | 1048 void IncrementalMarking::OldSpaceStep(intptr_t allocated) { |
1068 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) { | 1049 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) { |
1069 heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags, | 1050 heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags, |
1070 "old space step"); | 1051 "old space step"); |
1071 } else { | 1052 } else { |
1072 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD); | 1053 Step(allocated, GC_VIA_STACK_GUARD); |
1073 } | 1054 } |
1074 } | 1055 } |
1075 | 1056 |
1076 | 1057 |
1077 void IncrementalMarking::SpeedUp() { | |
1078 bool speed_up = false; | |
1079 | |
1080 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { | |
1081 if (FLAG_trace_incremental_marking) { | |
1082 heap()->isolate()->PrintWithTimestamp( | |
1083 "[IncrementalMarking] Speed up marking after %d steps\n", | |
1084 static_cast<int>(kMarkingSpeedAccellerationInterval)); | |
1085 } | |
1086 speed_up = true; | |
1087 } | |
1088 | |
1089 bool space_left_is_very_small = | |
1090 (old_generation_space_available_at_start_of_incremental_ < 10 * MB); | |
1091 | |
1092 bool only_1_nth_of_space_that_was_available_still_left = | |
1093 (SpaceLeftInOldSpace() * (marking_speed_ + 1) < | |
1094 old_generation_space_available_at_start_of_incremental_); | |
1095 | |
1096 if (space_left_is_very_small || | |
1097 only_1_nth_of_space_that_was_available_still_left) { | |
1098 if (FLAG_trace_incremental_marking) | |
1099 heap()->isolate()->PrintWithTimestamp( | |
1100 "[IncrementalMarking] Speed up marking because of low space left\n"); | |
1101 speed_up = true; | |
1102 } | |
1103 | |
1104 bool size_of_old_space_multiplied_by_n_during_marking = | |
1105 (heap_->PromotedTotalSize() > | |
1106 (marking_speed_ + 1) * | |
1107 old_generation_space_used_at_start_of_incremental_); | |
1108 if (size_of_old_space_multiplied_by_n_during_marking) { | |
1109 speed_up = true; | |
1110 if (FLAG_trace_incremental_marking) { | |
1111 heap()->isolate()->PrintWithTimestamp( | |
1112 "[IncrementalMarking] Speed up marking because of heap size " | |
1113 "increase\n"); | |
1114 } | |
1115 } | |
1116 | |
1117 int64_t promoted_during_marking = | |
1118 heap_->PromotedTotalSize() - | |
1119 old_generation_space_used_at_start_of_incremental_; | |
1120 intptr_t delay = marking_speed_ * MB; | |
1121 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); | |
1122 | |
1123 // We try to scan at at least twice the speed that we are allocating. | |
1124 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { | |
1125 if (FLAG_trace_incremental_marking) { | |
1126 heap()->isolate()->PrintWithTimestamp( | |
1127 "[IncrementalMarking] Speed up marking because marker was not " | |
1128 "keeping up\n"); | |
1129 } | |
1130 speed_up = true; | |
1131 } | |
1132 | |
1133 if (speed_up) { | |
1134 if (state_ != MARKING) { | |
1135 if (FLAG_trace_incremental_marking) { | |
1136 heap()->isolate()->PrintWithTimestamp( | |
1137 "[IncrementalMarking] Postponing speeding up marking until marking " | |
1138 "starts\n"); | |
1139 } | |
1140 } else { | |
1141 marking_speed_ += kMarkingSpeedAccelleration; | |
1142 marking_speed_ = static_cast<int>( | |
1143 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3))); | |
1144 if (FLAG_trace_incremental_marking) { | |
1145 heap()->isolate()->PrintWithTimestamp( | |
1146 "[IncrementalMarking] Marking speed increased to %d\n", | |
1147 marking_speed_); | |
1148 } | |
1149 } | |
1150 } | |
1151 } | |
1152 | |
1153 void IncrementalMarking::FinalizeSweeping() { | 1058 void IncrementalMarking::FinalizeSweeping() { |
1154 DCHECK(state_ == SWEEPING); | 1059 DCHECK(state_ == SWEEPING); |
1155 if (heap_->mark_compact_collector()->sweeping_in_progress() && | 1060 if (heap_->mark_compact_collector()->sweeping_in_progress() && |
1156 (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() || | 1061 (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() || |
1157 !FLAG_concurrent_sweeping)) { | 1062 !FLAG_concurrent_sweeping)) { |
1158 heap_->mark_compact_collector()->EnsureSweepingCompleted(); | 1063 heap_->mark_compact_collector()->EnsureSweepingCompleted(); |
1159 } | 1064 } |
1160 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { | 1065 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
1161 bytes_scanned_ = 0; | |
1162 StartMarking(); | 1066 StartMarking(); |
1163 } | 1067 } |
1164 } | 1068 } |
1165 | 1069 |
1166 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes, | 1070 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes, |
1167 CompletionAction action, | 1071 CompletionAction action, |
1168 ForceMarkingAction marking, | 1072 ForceMarkingAction marking, |
1169 ForceCompletionAction completion) { | 1073 ForceCompletionAction completion) { |
1170 DCHECK(allocated_bytes >= 0); | 1074 DCHECK(allocated_bytes >= 0); |
1171 | 1075 |
(...skipping 17 matching lines...) Expand all Loading... |
1189 } | 1093 } |
1190 | 1094 |
1191 intptr_t bytes_processed = 0; | 1095 intptr_t bytes_processed = 0; |
1192 { | 1096 { |
1193 HistogramTimerScope incremental_marking_scope( | 1097 HistogramTimerScope incremental_marking_scope( |
1194 heap_->isolate()->counters()->gc_incremental_marking()); | 1098 heap_->isolate()->counters()->gc_incremental_marking()); |
1195 TRACE_EVENT0("v8", "V8.GCIncrementalMarking"); | 1099 TRACE_EVENT0("v8", "V8.GCIncrementalMarking"); |
1196 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL); | 1100 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL); |
1197 double start = heap_->MonotonicallyIncreasingTimeInMs(); | 1101 double start = heap_->MonotonicallyIncreasingTimeInMs(); |
1198 | 1102 |
| 1103 // Make sure that the step size is large enough to justify the overhead |
| 1104 // of interrupting the generated code to perform the step. |
| 1105 intptr_t min_bytes_to_process = GCIdleTimeHandler::EstimateMarkingStepSize( |
| 1106 kMinIncrementalStepDurationInMs, |
| 1107 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond()); |
1199 // The marking speed is driven either by the allocation rate or by the rate | 1108 // The marking speed is driven either by the allocation rate or by the rate |
1200 // at which we are having to check the color of objects in the write | 1109 // at which we are having to check the color of objects in the write |
1201 // barrier. | 1110 // barrier. |
1202 // It is possible for a tight non-allocating loop to run a lot of write | 1111 // It is possible for a tight non-allocating loop to run a lot of write |
1203 // barriers before we get here and check them (marking can only take place | 1112 // barriers before we get here and check them (marking can only take place |
1204 // on | 1113 // on allocation). |
1205 // allocation), so to reduce the lumpiness we don't use the write barriers | 1114 intptr_t bytes_to_process = Max( |
1206 // invoked since last step directly to determine the amount of work to do. | 1115 min_bytes_to_process, kBytesToMarkPerAllocatedByte * allocated_ + |
1207 intptr_t bytes_to_process = | 1116 kBytesToMarkPerWriteBarrier * |
1208 marking_speed_ * | 1117 write_barriers_invoked_since_last_step_); |
1209 Max(allocated_, write_barriers_invoked_since_last_step_); | |
1210 allocated_ = 0; | 1118 allocated_ = 0; |
1211 write_barriers_invoked_since_last_step_ = 0; | 1119 write_barriers_invoked_since_last_step_ = 0; |
1212 | 1120 |
1213 bytes_scanned_ += bytes_to_process; | |
1214 | |
1215 if (state_ == SWEEPING) { | 1121 if (state_ == SWEEPING) { |
1216 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING); | 1122 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING); |
1217 FinalizeSweeping(); | 1123 FinalizeSweeping(); |
1218 } | 1124 } |
1219 | 1125 |
1220 if (state_ == MARKING) { | 1126 if (state_ == MARKING) { |
1221 const bool incremental_wrapper_tracing = | 1127 const bool incremental_wrapper_tracing = |
1222 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer(); | 1128 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer(); |
1223 const bool process_wrappers = | 1129 const bool process_wrappers = |
1224 incremental_wrapper_tracing && | 1130 incremental_wrapper_tracing && |
(...skipping 28 matching lines...) Expand all Loading... |
1253 FinalizeMarking(action); | 1159 FinalizeMarking(action); |
1254 } else { | 1160 } else { |
1255 MarkingComplete(action); | 1161 MarkingComplete(action); |
1256 } | 1162 } |
1257 } else { | 1163 } else { |
1258 IncrementIdleMarkingDelayCounter(); | 1164 IncrementIdleMarkingDelayCounter(); |
1259 } | 1165 } |
1260 } | 1166 } |
1261 } | 1167 } |
1262 | 1168 |
1263 steps_count_++; | |
1264 | |
1265 // Speed up marking if we are marking too slow or if we are almost done | |
1266 // with marking. | |
1267 SpeedUp(); | |
1268 | |
1269 double end = heap_->MonotonicallyIncreasingTimeInMs(); | 1169 double end = heap_->MonotonicallyIncreasingTimeInMs(); |
1270 double duration = (end - start); | 1170 double duration = (end - start); |
1271 // Note that we report zero bytes here when sweeping was in progress or | 1171 // Note that we report zero bytes here when sweeping was in progress or |
1272 // when we just started incremental marking. In these cases we did not | 1172 // when we just started incremental marking. In these cases we did not |
1273 // process the marking deque. | 1173 // process the marking deque. |
1274 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); | 1174 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); |
1275 } | 1175 } |
1276 return bytes_processed; | 1176 return bytes_processed; |
1277 } | 1177 } |
1278 | 1178 |
1279 | 1179 |
1280 void IncrementalMarking::ResetStepCounters() { | 1180 void IncrementalMarking::ResetStepCounters() { |
1281 steps_count_ = 0; | 1181 allocated_ = 0; |
1282 old_generation_space_available_at_start_of_incremental_ = | |
1283 SpaceLeftInOldSpace(); | |
1284 old_generation_space_used_at_start_of_incremental_ = | |
1285 heap_->PromotedTotalSize(); | |
1286 bytes_rescanned_ = 0; | |
1287 marking_speed_ = kInitialMarkingSpeed; | |
1288 bytes_scanned_ = 0; | |
1289 write_barriers_invoked_since_last_step_ = 0; | 1182 write_barriers_invoked_since_last_step_ = 0; |
1290 } | 1183 } |
1291 | 1184 |
1292 | 1185 |
1293 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | 1186 int64_t IncrementalMarking::SpaceLeftInOldSpace() { |
1294 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); | 1187 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); |
1295 } | 1188 } |
1296 | 1189 |
1297 | 1190 |
1298 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() { | 1191 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() { |
1299 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter; | 1192 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter; |
1300 } | 1193 } |
1301 | 1194 |
1302 | 1195 |
1303 void IncrementalMarking::IncrementIdleMarkingDelayCounter() { | 1196 void IncrementalMarking::IncrementIdleMarkingDelayCounter() { |
1304 idle_marking_delay_counter_++; | 1197 idle_marking_delay_counter_++; |
1305 } | 1198 } |
1306 | 1199 |
1307 | 1200 |
1308 void IncrementalMarking::ClearIdleMarkingDelayCounter() { | 1201 void IncrementalMarking::ClearIdleMarkingDelayCounter() { |
1309 idle_marking_delay_counter_ = 0; | 1202 idle_marking_delay_counter_ = 0; |
1310 } | 1203 } |
1311 | 1204 |
1312 } // namespace internal | 1205 } // namespace internal |
1313 } // namespace v8 | 1206 } // namespace v8 |
OLD | NEW |