Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(409)

Side by Side Diff: src/heap/incremental-marking.cc

Issue 2304613002: Revert of [heap] Simplify heuristics for incremental step size. (Closed)
Patch Set: Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/incremental-marking.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/incremental-marking.h" 5 #include "src/heap/incremental-marking.h"
6 6
7 #include "src/code-stubs.h" 7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h" 8 #include "src/compilation-cache.h"
9 #include "src/conversions.h" 9 #include "src/conversions.h"
10 #include "src/heap/gc-idle-time-handler.h" 10 #include "src/heap/gc-idle-time-handler.h"
(...skipping 12 matching lines...) Expand all
23 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD, 23 return StepActions(IncrementalMarking::NO_GC_VIA_STACK_GUARD,
24 IncrementalMarking::FORCE_MARKING, 24 IncrementalMarking::FORCE_MARKING,
25 IncrementalMarking::DO_NOT_FORCE_COMPLETION); 25 IncrementalMarking::DO_NOT_FORCE_COMPLETION);
26 } 26 }
27 27
28 IncrementalMarking::IncrementalMarking(Heap* heap) 28 IncrementalMarking::IncrementalMarking(Heap* heap)
29 : heap_(heap), 29 : heap_(heap),
30 observer_(*this, kAllocatedThreshold), 30 observer_(*this, kAllocatedThreshold),
31 state_(STOPPED), 31 state_(STOPPED),
32 is_compacting_(false), 32 is_compacting_(false),
33 steps_count_(0),
34 old_generation_space_available_at_start_of_incremental_(0),
35 old_generation_space_used_at_start_of_incremental_(0),
36 bytes_rescanned_(0),
33 should_hurry_(false), 37 should_hurry_(false),
38 marking_speed_(0),
39 bytes_scanned_(0),
34 allocated_(0), 40 allocated_(0),
35 write_barriers_invoked_since_last_step_(0), 41 write_barriers_invoked_since_last_step_(0),
36 idle_marking_delay_counter_(0), 42 idle_marking_delay_counter_(0),
37 unscanned_bytes_of_large_object_(0), 43 unscanned_bytes_of_large_object_(0),
38 was_activated_(false), 44 was_activated_(false),
39 black_allocation_(false), 45 black_allocation_(false),
40 finalize_marking_completed_(false), 46 finalize_marking_completed_(false),
41 incremental_marking_finalization_rounds_(0), 47 incremental_marking_finalization_rounds_(0),
42 request_type_(NONE) {} 48 request_type_(NONE) {}
43 49
(...skipping 24 matching lines...) Expand all
68 74
69 75
70 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, 76 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
71 Isolate* isolate) { 77 Isolate* isolate) {
72 DCHECK(obj->IsHeapObject()); 78 DCHECK(obj->IsHeapObject());
73 IncrementalMarking* marking = isolate->heap()->incremental_marking(); 79 IncrementalMarking* marking = isolate->heap()->incremental_marking();
74 80
75 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); 81 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
76 int counter = chunk->write_barrier_counter(); 82 int counter = chunk->write_barrier_counter();
77 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { 83 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
78 marking->write_barriers_invoked_since_last_step_ = 84 marking->write_barriers_invoked_since_last_step_ +=
79 Min(kMaxWriteBarrierCounter, 85 MemoryChunk::kWriteBarrierCounterGranularity -
80 marking->write_barriers_invoked_since_last_step_ + 86 chunk->write_barrier_counter();
81 MemoryChunk::kWriteBarrierCounterGranularity -
82 chunk->write_barrier_counter());
83 chunk->set_write_barrier_counter( 87 chunk->set_write_barrier_counter(
84 MemoryChunk::kWriteBarrierCounterGranularity); 88 MemoryChunk::kWriteBarrierCounterGranularity);
85 } 89 }
86 90
87 marking->RecordWrite(obj, slot, *slot); 91 marking->RecordWrite(obj, slot, *slot);
88 } 92 }
89 93
90 // static 94 // static
91 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host, 95 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
92 Object** slot, 96 Object** slot,
(...skipping 363 matching lines...) Expand 10 before | Expand all | Expand 10 after
456 // we don't need to do anything if incremental marking is 460 // we don't need to do anything if incremental marking is
457 // not active. 461 // not active.
458 } else if (IsCompacting()) { 462 } else if (IsCompacting()) {
459 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); 463 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
460 } else { 464 } else {
461 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); 465 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
462 } 466 }
463 } 467 }
464 468
465 469
470 void IncrementalMarking::NotifyOfHighPromotionRate() {
471 if (IsMarking()) {
472 if (marking_speed_ < kFastMarking) {
473 if (FLAG_trace_gc) {
474 heap()->isolate()->PrintWithTimestamp(
475 "Increasing marking speed to %d "
476 "due to high promotion rate\n",
477 static_cast<int>(kFastMarking));
478 }
479 marking_speed_ = kFastMarking;
480 }
481 }
482 }
483
484
466 static void PatchIncrementalMarkingRecordWriteStubs( 485 static void PatchIncrementalMarkingRecordWriteStubs(
467 Heap* heap, RecordWriteStub::Mode mode) { 486 Heap* heap, RecordWriteStub::Mode mode) {
468 UnseededNumberDictionary* stubs = heap->code_stubs(); 487 UnseededNumberDictionary* stubs = heap->code_stubs();
469 488
470 int capacity = stubs->Capacity(); 489 int capacity = stubs->Capacity();
471 Isolate* isolate = heap->isolate(); 490 Isolate* isolate = heap->isolate();
472 for (int i = 0; i < capacity; i++) { 491 for (int i = 0; i < capacity; i++) {
473 Object* k = stubs->KeyAt(i); 492 Object* k = stubs->KeyAt(i);
474 if (stubs->IsKey(isolate, k)) { 493 if (stubs->IsKey(isolate, k)) {
475 uint32_t key = NumberToUint32(k); 494 uint32_t key = NumberToUint32(k);
(...skipping 567 matching lines...) Expand 10 before | Expand all | Expand 10 after
1043 !heap()->mark_compact_collector()->marking_deque()->IsEmpty()); 1062 !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1044 return remaining_time_in_ms; 1063 return remaining_time_in_ms;
1045 } 1064 }
1046 1065
1047 1066
1048 void IncrementalMarking::OldSpaceStep(intptr_t allocated) { 1067 void IncrementalMarking::OldSpaceStep(intptr_t allocated) {
1049 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) { 1068 if (IsStopped() && ShouldActivateEvenWithoutIdleNotification()) {
1050 heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags, 1069 heap()->StartIncrementalMarking(Heap::kNoGCFlags, kNoGCCallbackFlags,
1051 "old space step"); 1070 "old space step");
1052 } else { 1071 } else {
1053 Step(allocated, GC_VIA_STACK_GUARD); 1072 Step(allocated * kFastMarking / kInitialMarkingSpeed, GC_VIA_STACK_GUARD);
1054 } 1073 }
1055 } 1074 }
1056 1075
1057 1076
1077 void IncrementalMarking::SpeedUp() {
1078 bool speed_up = false;
1079
1080 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
1081 if (FLAG_trace_incremental_marking) {
1082 heap()->isolate()->PrintWithTimestamp(
1083 "[IncrementalMarking] Speed up marking after %d steps\n",
1084 static_cast<int>(kMarkingSpeedAccellerationInterval));
1085 }
1086 speed_up = true;
1087 }
1088
1089 bool space_left_is_very_small =
1090 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
1091
1092 bool only_1_nth_of_space_that_was_available_still_left =
1093 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
1094 old_generation_space_available_at_start_of_incremental_);
1095
1096 if (space_left_is_very_small ||
1097 only_1_nth_of_space_that_was_available_still_left) {
1098 if (FLAG_trace_incremental_marking)
1099 heap()->isolate()->PrintWithTimestamp(
1100 "[IncrementalMarking] Speed up marking because of low space left\n");
1101 speed_up = true;
1102 }
1103
1104 bool size_of_old_space_multiplied_by_n_during_marking =
1105 (heap_->PromotedTotalSize() >
1106 (marking_speed_ + 1) *
1107 old_generation_space_used_at_start_of_incremental_);
1108 if (size_of_old_space_multiplied_by_n_during_marking) {
1109 speed_up = true;
1110 if (FLAG_trace_incremental_marking) {
1111 heap()->isolate()->PrintWithTimestamp(
1112 "[IncrementalMarking] Speed up marking because of heap size "
1113 "increase\n");
1114 }
1115 }
1116
1117 int64_t promoted_during_marking =
1118 heap_->PromotedTotalSize() -
1119 old_generation_space_used_at_start_of_incremental_;
1120 intptr_t delay = marking_speed_ * MB;
1121 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1122
1123 // We try to scan at at least twice the speed that we are allocating.
1124 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
1125 if (FLAG_trace_incremental_marking) {
1126 heap()->isolate()->PrintWithTimestamp(
1127 "[IncrementalMarking] Speed up marking because marker was not "
1128 "keeping up\n");
1129 }
1130 speed_up = true;
1131 }
1132
1133 if (speed_up) {
1134 if (state_ != MARKING) {
1135 if (FLAG_trace_incremental_marking) {
1136 heap()->isolate()->PrintWithTimestamp(
1137 "[IncrementalMarking] Postponing speeding up marking until marking "
1138 "starts\n");
1139 }
1140 } else {
1141 marking_speed_ += kMarkingSpeedAccelleration;
1142 marking_speed_ = static_cast<int>(
1143 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
1144 if (FLAG_trace_incremental_marking) {
1145 heap()->isolate()->PrintWithTimestamp(
1146 "[IncrementalMarking] Marking speed increased to %d\n",
1147 marking_speed_);
1148 }
1149 }
1150 }
1151 }
1152
1058 void IncrementalMarking::FinalizeSweeping() { 1153 void IncrementalMarking::FinalizeSweeping() {
1059 DCHECK(state_ == SWEEPING); 1154 DCHECK(state_ == SWEEPING);
1060 if (heap_->mark_compact_collector()->sweeping_in_progress() && 1155 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1061 (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() || 1156 (heap_->mark_compact_collector()->sweeper().IsSweepingCompleted() ||
1062 !FLAG_concurrent_sweeping)) { 1157 !FLAG_concurrent_sweeping)) {
1063 heap_->mark_compact_collector()->EnsureSweepingCompleted(); 1158 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1064 } 1159 }
1065 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { 1160 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1161 bytes_scanned_ = 0;
1066 StartMarking(); 1162 StartMarking();
1067 } 1163 }
1068 } 1164 }
1069 1165
1070 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes, 1166 intptr_t IncrementalMarking::Step(intptr_t allocated_bytes,
1071 CompletionAction action, 1167 CompletionAction action,
1072 ForceMarkingAction marking, 1168 ForceMarkingAction marking,
1073 ForceCompletionAction completion) { 1169 ForceCompletionAction completion) {
1074 DCHECK(allocated_bytes >= 0); 1170 DCHECK(allocated_bytes >= 0);
1075 1171
(...skipping 17 matching lines...) Expand all
1093 } 1189 }
1094 1190
1095 intptr_t bytes_processed = 0; 1191 intptr_t bytes_processed = 0;
1096 { 1192 {
1097 HistogramTimerScope incremental_marking_scope( 1193 HistogramTimerScope incremental_marking_scope(
1098 heap_->isolate()->counters()->gc_incremental_marking()); 1194 heap_->isolate()->counters()->gc_incremental_marking());
1099 TRACE_EVENT0("v8", "V8.GCIncrementalMarking"); 1195 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1100 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL); 1196 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1101 double start = heap_->MonotonicallyIncreasingTimeInMs(); 1197 double start = heap_->MonotonicallyIncreasingTimeInMs();
1102 1198
1103 // Make sure that the step size is large enough to justify the overhead
1104 // of interrupting the generated code to perform the step.
1105 intptr_t min_bytes_to_process = GCIdleTimeHandler::EstimateMarkingStepSize(
1106 kMinIncrementalStepDurationInMs,
1107 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1108 // The marking speed is driven either by the allocation rate or by the rate 1199 // The marking speed is driven either by the allocation rate or by the rate
1109 // at which we are having to check the color of objects in the write 1200 // at which we are having to check the color of objects in the write
1110 // barrier. 1201 // barrier.
1111 // It is possible for a tight non-allocating loop to run a lot of write 1202 // It is possible for a tight non-allocating loop to run a lot of write
1112 // barriers before we get here and check them (marking can only take place 1203 // barriers before we get here and check them (marking can only take place
1113 // on allocation). 1204 // on
1114 intptr_t bytes_to_process = Max( 1205 // allocation), so to reduce the lumpiness we don't use the write barriers
1115 min_bytes_to_process, kBytesToMarkPerAllocatedByte * allocated_ + 1206 // invoked since last step directly to determine the amount of work to do.
1116 kBytesToMarkPerWriteBarrier * 1207 intptr_t bytes_to_process =
1117 write_barriers_invoked_since_last_step_); 1208 marking_speed_ *
1209 Max(allocated_, write_barriers_invoked_since_last_step_);
1118 allocated_ = 0; 1210 allocated_ = 0;
1119 write_barriers_invoked_since_last_step_ = 0; 1211 write_barriers_invoked_since_last_step_ = 0;
1120 1212
1213 bytes_scanned_ += bytes_to_process;
1214
1121 if (state_ == SWEEPING) { 1215 if (state_ == SWEEPING) {
1122 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING); 1216 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1123 FinalizeSweeping(); 1217 FinalizeSweeping();
1124 } 1218 }
1125 1219
1126 if (state_ == MARKING) { 1220 if (state_ == MARKING) {
1127 const bool incremental_wrapper_tracing = 1221 const bool incremental_wrapper_tracing =
1128 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer(); 1222 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
1129 const bool process_wrappers = 1223 const bool process_wrappers =
1130 incremental_wrapper_tracing && 1224 incremental_wrapper_tracing &&
(...skipping 28 matching lines...) Expand all
1159 FinalizeMarking(action); 1253 FinalizeMarking(action);
1160 } else { 1254 } else {
1161 MarkingComplete(action); 1255 MarkingComplete(action);
1162 } 1256 }
1163 } else { 1257 } else {
1164 IncrementIdleMarkingDelayCounter(); 1258 IncrementIdleMarkingDelayCounter();
1165 } 1259 }
1166 } 1260 }
1167 } 1261 }
1168 1262
1263 steps_count_++;
1264
1265 // Speed up marking if we are marking too slow or if we are almost done
1266 // with marking.
1267 SpeedUp();
1268
1169 double end = heap_->MonotonicallyIncreasingTimeInMs(); 1269 double end = heap_->MonotonicallyIncreasingTimeInMs();
1170 double duration = (end - start); 1270 double duration = (end - start);
1171 // Note that we report zero bytes here when sweeping was in progress or 1271 // Note that we report zero bytes here when sweeping was in progress or
1172 // when we just started incremental marking. In these cases we did not 1272 // when we just started incremental marking. In these cases we did not
1173 // process the marking deque. 1273 // process the marking deque.
1174 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); 1274 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1175 } 1275 }
1176 return bytes_processed; 1276 return bytes_processed;
1177 } 1277 }
1178 1278
1179 1279
1180 void IncrementalMarking::ResetStepCounters() { 1280 void IncrementalMarking::ResetStepCounters() {
1181 allocated_ = 0; 1281 steps_count_ = 0;
1282 old_generation_space_available_at_start_of_incremental_ =
1283 SpaceLeftInOldSpace();
1284 old_generation_space_used_at_start_of_incremental_ =
1285 heap_->PromotedTotalSize();
1286 bytes_rescanned_ = 0;
1287 marking_speed_ = kInitialMarkingSpeed;
1288 bytes_scanned_ = 0;
1182 write_barriers_invoked_since_last_step_ = 0; 1289 write_barriers_invoked_since_last_step_ = 0;
1183 } 1290 }
1184 1291
1185 1292
1186 int64_t IncrementalMarking::SpaceLeftInOldSpace() { 1293 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1187 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); 1294 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1188 } 1295 }
1189 1296
1190 1297
1191 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() { 1298 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1192 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter; 1299 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1193 } 1300 }
1194 1301
1195 1302
1196 void IncrementalMarking::IncrementIdleMarkingDelayCounter() { 1303 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1197 idle_marking_delay_counter_++; 1304 idle_marking_delay_counter_++;
1198 } 1305 }
1199 1306
1200 1307
1201 void IncrementalMarking::ClearIdleMarkingDelayCounter() { 1308 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1202 idle_marking_delay_counter_ = 0; 1309 idle_marking_delay_counter_ = 0;
1203 } 1310 }
1204 1311
1205 } // namespace internal 1312 } // namespace internal
1206 } // namespace v8 1313 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/incremental-marking.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698