OLD | NEW |
---|---|
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/incremental-marking.h" | 5 #include "src/heap/incremental-marking.h" |
6 | 6 |
7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
8 #include "src/compilation-cache.h" | 8 #include "src/compilation-cache.h" |
9 #include "src/conversions.h" | 9 #include "src/conversions.h" |
10 #include "src/heap/gc-idle-time-handler.h" | 10 #include "src/heap/gc-idle-time-handler.h" |
11 #include "src/heap/gc-tracer.h" | 11 #include "src/heap/gc-tracer.h" |
12 #include "src/heap/mark-compact-inl.h" | 12 #include "src/heap/mark-compact-inl.h" |
13 #include "src/heap/object-stats.h" | 13 #include "src/heap/object-stats.h" |
14 #include "src/heap/objects-visiting-inl.h" | 14 #include "src/heap/objects-visiting-inl.h" |
15 #include "src/heap/objects-visiting.h" | 15 #include "src/heap/objects-visiting.h" |
16 #include "src/tracing/trace-event.h" | 16 #include "src/tracing/trace-event.h" |
17 #include "src/v8.h" | 17 #include "src/v8.h" |
18 | 18 |
19 namespace v8 { | 19 namespace v8 { |
20 namespace internal { | 20 namespace internal { |
21 | 21 |
22 IncrementalMarking::IncrementalMarking(Heap* heap) | 22 IncrementalMarking::IncrementalMarking(Heap* heap) |
23 : heap_(heap), | 23 : heap_(heap), |
24 observer_(*this, kAllocatedThreshold), | |
25 state_(STOPPED), | 24 state_(STOPPED), |
25 initial_old_generation_size_(0), | |
26 bytes_marked_ahead_of_schedule_(0), | |
27 unscanned_bytes_of_large_object_(0), | |
28 idle_marking_delay_counter_(0), | |
29 incremental_marking_finalization_rounds_(0), | |
26 is_compacting_(false), | 30 is_compacting_(false), |
27 steps_count_(0), | |
28 old_generation_space_available_at_start_of_incremental_(0), | |
29 old_generation_space_used_at_start_of_incremental_(0), | |
30 bytes_rescanned_(0), | |
31 should_hurry_(false), | 31 should_hurry_(false), |
32 marking_speed_(0), | |
33 bytes_scanned_(0), | |
34 allocated_(0), | |
35 write_barriers_invoked_since_last_step_(0), | |
36 bytes_marked_ahead_of_schedule_(0), | |
37 idle_marking_delay_counter_(0), | |
38 unscanned_bytes_of_large_object_(0), | |
39 was_activated_(false), | 32 was_activated_(false), |
40 black_allocation_(false), | 33 black_allocation_(false), |
41 finalize_marking_completed_(false), | 34 finalize_marking_completed_(false), |
42 incremental_marking_finalization_rounds_(0), | 35 request_type_(NONE), |
43 request_type_(NONE) {} | 36 new_generation_observer_(*this, kAllocatedThreshold), |
37 old_generation_observer_(*this, kAllocatedThreshold) {} | |
44 | 38 |
45 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { | 39 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { |
46 HeapObject* value_heap_obj = HeapObject::cast(value); | 40 HeapObject* value_heap_obj = HeapObject::cast(value); |
47 MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj); | 41 MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj); |
48 DCHECK(!Marking::IsImpossible(value_bit)); | 42 DCHECK(!Marking::IsImpossible(value_bit)); |
49 | 43 |
50 MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj); | 44 MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj); |
51 DCHECK(!Marking::IsImpossible(obj_bit)); | 45 DCHECK(!Marking::IsImpossible(obj_bit)); |
52 bool is_black = Marking::IsBlack(obj_bit); | 46 bool is_black = Marking::IsBlack(obj_bit); |
53 | 47 |
(...skipping 10 matching lines...) Expand all Loading... | |
64 if (BaseRecordWrite(obj, value) && slot != NULL) { | 58 if (BaseRecordWrite(obj, value) && slot != NULL) { |
65 // Object is not going to be rescanned we need to record the slot. | 59 // Object is not going to be rescanned we need to record the slot. |
66 heap_->mark_compact_collector()->RecordSlot(obj, slot, value); | 60 heap_->mark_compact_collector()->RecordSlot(obj, slot, value); |
67 } | 61 } |
68 } | 62 } |
69 | 63 |
70 | 64 |
71 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, | 65 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, |
72 Isolate* isolate) { | 66 Isolate* isolate) { |
73 DCHECK(obj->IsHeapObject()); | 67 DCHECK(obj->IsHeapObject()); |
74 IncrementalMarking* marking = isolate->heap()->incremental_marking(); | 68 IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
Hannes Payer (out of office)
2016/09/26 14:32:05
Nice. Change to: isolate->heap()->incremental_mark
ulan
2016/09/27 17:02:08
Done.
| |
75 | |
76 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address()); | |
77 int counter = chunk->write_barrier_counter(); | |
78 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) { | |
79 marking->write_barriers_invoked_since_last_step_ += | |
80 MemoryChunk::kWriteBarrierCounterGranularity - | |
81 chunk->write_barrier_counter(); | |
82 chunk->set_write_barrier_counter( | |
83 MemoryChunk::kWriteBarrierCounterGranularity); | |
84 } | |
85 | |
86 marking->RecordWrite(obj, slot, *slot); | 69 marking->RecordWrite(obj, slot, *slot); |
87 } | 70 } |
88 | 71 |
89 // static | 72 // static |
90 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host, | 73 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host, |
91 Object** slot, | 74 Object** slot, |
92 Isolate* isolate) { | 75 Isolate* isolate) { |
93 DCHECK(host->IsJSFunction()); | 76 DCHECK(host->IsJSFunction()); |
94 IncrementalMarking* marking = isolate->heap()->incremental_marking(); | 77 IncrementalMarking* marking = isolate->heap()->incremental_marking(); |
95 Code* value = Code::cast( | 78 Code* value = Code::cast( |
(...skipping 359 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
455 // we don't need to do anything if incremental marking is | 438 // we don't need to do anything if incremental marking is |
456 // not active. | 439 // not active. |
457 } else if (IsCompacting()) { | 440 } else if (IsCompacting()) { |
458 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); | 441 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); |
459 } else { | 442 } else { |
460 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); | 443 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); |
461 } | 444 } |
462 } | 445 } |
463 | 446 |
464 | 447 |
465 void IncrementalMarking::NotifyOfHighPromotionRate() { | |
466 if (IsMarking()) { | |
467 if (marking_speed_ < kFastMarking) { | |
468 if (FLAG_trace_gc) { | |
469 heap()->isolate()->PrintWithTimestamp( | |
470 "Increasing marking speed to %d " | |
471 "due to high promotion rate\n", | |
472 static_cast<int>(kFastMarking)); | |
473 } | |
474 marking_speed_ = kFastMarking; | |
475 } | |
476 } | |
477 } | |
478 | |
479 | |
480 static void PatchIncrementalMarkingRecordWriteStubs( | 448 static void PatchIncrementalMarkingRecordWriteStubs( |
481 Heap* heap, RecordWriteStub::Mode mode) { | 449 Heap* heap, RecordWriteStub::Mode mode) { |
482 UnseededNumberDictionary* stubs = heap->code_stubs(); | 450 UnseededNumberDictionary* stubs = heap->code_stubs(); |
483 | 451 |
484 int capacity = stubs->Capacity(); | 452 int capacity = stubs->Capacity(); |
485 Isolate* isolate = heap->isolate(); | 453 Isolate* isolate = heap->isolate(); |
486 for (int i = 0; i < capacity; i++) { | 454 for (int i = 0; i < capacity; i++) { |
487 Object* k = stubs->KeyAt(i); | 455 Object* k = stubs->KeyAt(i); |
488 if (stubs->IsKey(isolate, k)) { | 456 if (stubs->IsKey(isolate, k)) { |
489 uint32_t key = NumberToUint32(k); | 457 uint32_t key = NumberToUint32(k); |
(...skipping 26 matching lines...) Expand all Loading... | |
516 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC); | 484 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC); |
517 DCHECK(!heap_->isolate()->serializer_enabled()); | 485 DCHECK(!heap_->isolate()->serializer_enabled()); |
518 | 486 |
519 Counters* counters = heap_->isolate()->counters(); | 487 Counters* counters = heap_->isolate()->counters(); |
520 | 488 |
521 counters->incremental_marking_reason()->AddSample( | 489 counters->incremental_marking_reason()->AddSample( |
522 static_cast<int>(gc_reason)); | 490 static_cast<int>(gc_reason)); |
523 HistogramTimerScope incremental_marking_scope( | 491 HistogramTimerScope incremental_marking_scope( |
524 counters->gc_incremental_marking_start()); | 492 counters->gc_incremental_marking_start()); |
525 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart"); | 493 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart"); |
526 ResetStepCounters(); | |
527 heap_->tracer()->NotifyIncrementalMarkingStart(); | 494 heap_->tracer()->NotifyIncrementalMarkingStart(); |
528 | 495 |
496 start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs(); | |
497 initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects(); | |
498 old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter(); | |
499 bytes_allocated_ = 0; | |
500 bytes_marked_ahead_of_schedule_ = 0; | |
501 should_hurry_ = false; | |
529 was_activated_ = true; | 502 was_activated_ = true; |
530 | 503 |
531 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { | 504 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
532 StartMarking(); | 505 StartMarking(); |
533 } else { | 506 } else { |
534 if (FLAG_trace_incremental_marking) { | 507 if (FLAG_trace_incremental_marking) { |
535 heap()->isolate()->PrintWithTimestamp( | 508 heap()->isolate()->PrintWithTimestamp( |
536 "[IncrementalMarking] Start sweeping.\n"); | 509 "[IncrementalMarking] Start sweeping.\n"); |
537 } | 510 } |
538 state_ = SWEEPING; | 511 state_ = SWEEPING; |
539 } | 512 } |
540 | 513 |
541 heap_->new_space()->AddAllocationObserver(&observer_); | 514 heap_->new_space()->AddAllocationObserver(&new_generation_observer_); |
Hannes Payer (out of office)
2016/09/26 14:32:05
Use the space iterator since we are iterating over
| |
515 heap_->code_space()->AddAllocationObserver(&old_generation_observer_); | |
516 heap_->lo_space()->AddAllocationObserver(&old_generation_observer_); | |
517 heap_->map_space()->AddAllocationObserver(&old_generation_observer_); | |
518 heap_->old_space()->AddAllocationObserver(&old_generation_observer_); | |
542 | 519 |
543 incremental_marking_job()->Start(heap_); | 520 incremental_marking_job()->Start(heap_); |
544 } | 521 } |
545 | 522 |
546 | 523 |
547 void IncrementalMarking::StartMarking() { | 524 void IncrementalMarking::StartMarking() { |
548 if (heap_->isolate()->serializer_enabled()) { | 525 if (heap_->isolate()->serializer_enabled()) { |
549 // Black allocation currently starts when we start incremental marking, | 526 // Black allocation currently starts when we start incremental marking, |
550 // but we cannot enable black allocation while deserializing. Hence, we | 527 // but we cannot enable black allocation while deserializing. Hence, we |
551 // have to delay the start of incremental marking in that case. | 528 // have to delay the start of incremental marking in that case. |
(...skipping 445 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
997 static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB); | 974 static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB); |
998 int old_generation_limit_mb = | 975 int old_generation_limit_mb = |
999 static_cast<int>(heap()->old_generation_allocation_limit() / MB); | 976 static_cast<int>(heap()->old_generation_allocation_limit() / MB); |
1000 heap()->isolate()->PrintWithTimestamp( | 977 heap()->isolate()->PrintWithTimestamp( |
1001 "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, " | 978 "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, " |
1002 "overshoot %dMB\n", | 979 "overshoot %dMB\n", |
1003 old_generation_size_mb, old_generation_limit_mb, | 980 old_generation_size_mb, old_generation_limit_mb, |
1004 Max(0, old_generation_size_mb - old_generation_limit_mb)); | 981 Max(0, old_generation_size_mb - old_generation_limit_mb)); |
1005 } | 982 } |
1006 | 983 |
1007 heap_->new_space()->RemoveAllocationObserver(&observer_); | 984 heap_->new_space()->RemoveAllocationObserver(&new_generation_observer_); |
985 heap_->old_space()->RemoveAllocationObserver(&old_generation_observer_); | |
Hannes Payer (out of office)
2016/09/26 14:32:05
Use the space iterator since we are iterating over
ulan
2016/09/27 17:02:08
Done.
| |
986 heap_->code_space()->RemoveAllocationObserver(&old_generation_observer_); | |
987 heap_->map_space()->RemoveAllocationObserver(&old_generation_observer_); | |
988 heap_->lo_space()->RemoveAllocationObserver(&old_generation_observer_); | |
989 | |
1008 IncrementalMarking::set_should_hurry(false); | 990 IncrementalMarking::set_should_hurry(false); |
1009 ResetStepCounters(); | |
1010 if (IsMarking()) { | 991 if (IsMarking()) { |
1011 PatchIncrementalMarkingRecordWriteStubs(heap_, | 992 PatchIncrementalMarkingRecordWriteStubs(heap_, |
1012 RecordWriteStub::STORE_BUFFER_ONLY); | 993 RecordWriteStub::STORE_BUFFER_ONLY); |
1013 DeactivateIncrementalWriteBarrier(); | 994 DeactivateIncrementalWriteBarrier(); |
1014 } | 995 } |
1015 heap_->isolate()->stack_guard()->ClearGC(); | 996 heap_->isolate()->stack_guard()->ClearGC(); |
1016 state_ = STOPPED; | 997 state_ = STOPPED; |
1017 is_compacting_ = false; | 998 is_compacting_ = false; |
1018 FinishBlackAllocation(); | 999 FinishBlackAllocation(); |
1019 } | 1000 } |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1077 do { | 1058 do { |
1078 Step(step_size_in_bytes, completion_action, force_completion, step_origin); | 1059 Step(step_size_in_bytes, completion_action, force_completion, step_origin); |
1079 remaining_time_in_ms = | 1060 remaining_time_in_ms = |
1080 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs(); | 1061 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs(); |
1081 } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() && | 1062 } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() && |
1082 !heap()->mark_compact_collector()->marking_deque()->IsEmpty()); | 1063 !heap()->mark_compact_collector()->marking_deque()->IsEmpty()); |
1083 return remaining_time_in_ms; | 1064 return remaining_time_in_ms; |
1084 } | 1065 } |
1085 | 1066 |
1086 | 1067 |
1087 void IncrementalMarking::SpeedUp() { | |
1088 bool speed_up = false; | |
1089 | |
1090 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) { | |
1091 if (FLAG_trace_incremental_marking) { | |
1092 heap()->isolate()->PrintWithTimestamp( | |
1093 "[IncrementalMarking] Speed up marking after %d steps\n", | |
1094 static_cast<int>(kMarkingSpeedAccellerationInterval)); | |
1095 } | |
1096 speed_up = true; | |
1097 } | |
1098 | |
1099 bool space_left_is_very_small = | |
1100 (old_generation_space_available_at_start_of_incremental_ < 10 * MB); | |
1101 | |
1102 bool only_1_nth_of_space_that_was_available_still_left = | |
1103 (SpaceLeftInOldSpace() * (marking_speed_ + 1) < | |
1104 old_generation_space_available_at_start_of_incremental_); | |
1105 | |
1106 if (space_left_is_very_small || | |
1107 only_1_nth_of_space_that_was_available_still_left) { | |
1108 if (FLAG_trace_incremental_marking) | |
1109 heap()->isolate()->PrintWithTimestamp( | |
1110 "[IncrementalMarking] Speed up marking because of low space left\n"); | |
1111 speed_up = true; | |
1112 } | |
1113 | |
1114 bool size_of_old_space_multiplied_by_n_during_marking = | |
1115 (heap_->PromotedTotalSize() > | |
1116 (marking_speed_ + 1) * | |
1117 old_generation_space_used_at_start_of_incremental_); | |
1118 if (size_of_old_space_multiplied_by_n_during_marking) { | |
1119 speed_up = true; | |
1120 if (FLAG_trace_incremental_marking) { | |
1121 heap()->isolate()->PrintWithTimestamp( | |
1122 "[IncrementalMarking] Speed up marking because of heap size " | |
1123 "increase\n"); | |
1124 } | |
1125 } | |
1126 | |
1127 int64_t promoted_during_marking = | |
1128 heap_->PromotedTotalSize() - | |
1129 old_generation_space_used_at_start_of_incremental_; | |
1130 intptr_t delay = marking_speed_ * MB; | |
1131 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize(); | |
1132 | |
1133 // We try to scan at at least twice the speed that we are allocating. | |
1134 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) { | |
1135 if (FLAG_trace_incremental_marking) { | |
1136 heap()->isolate()->PrintWithTimestamp( | |
1137 "[IncrementalMarking] Speed up marking because marker was not " | |
1138 "keeping up\n"); | |
1139 } | |
1140 speed_up = true; | |
1141 } | |
1142 | |
1143 if (speed_up) { | |
1144 if (state_ != MARKING) { | |
1145 if (FLAG_trace_incremental_marking) { | |
1146 heap()->isolate()->PrintWithTimestamp( | |
1147 "[IncrementalMarking] Postponing speeding up marking until marking " | |
1148 "starts\n"); | |
1149 } | |
1150 } else { | |
1151 marking_speed_ += kMarkingSpeedAccelleration; | |
1152 marking_speed_ = static_cast<int>( | |
1153 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3))); | |
1154 if (FLAG_trace_incremental_marking) { | |
1155 heap()->isolate()->PrintWithTimestamp( | |
1156 "[IncrementalMarking] Marking speed increased to %d\n", | |
1157 marking_speed_); | |
1158 } | |
1159 } | |
1160 } | |
1161 } | |
1162 | |
1163 void IncrementalMarking::FinalizeSweeping() { | 1068 void IncrementalMarking::FinalizeSweeping() { |
1164 DCHECK(state_ == SWEEPING); | 1069 DCHECK(state_ == SWEEPING); |
1165 if (heap_->mark_compact_collector()->sweeping_in_progress() && | 1070 if (heap_->mark_compact_collector()->sweeping_in_progress() && |
1166 (!FLAG_concurrent_sweeping || | 1071 (!FLAG_concurrent_sweeping || |
1167 heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) { | 1072 heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) { |
1168 heap_->mark_compact_collector()->EnsureSweepingCompleted(); | 1073 heap_->mark_compact_collector()->EnsureSweepingCompleted(); |
1169 } | 1074 } |
1170 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { | 1075 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { |
1171 bytes_scanned_ = 0; | |
1172 StartMarking(); | 1076 StartMarking(); |
1173 } | 1077 } |
1174 } | 1078 } |
1175 | 1079 |
1176 void IncrementalMarking::NotifyAllocatedBytes(intptr_t allocated_bytes) { | 1080 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() { |
1081 // Update bytes_allocated_ based on the allocation counter. | |
1082 size_t current_counter = heap_->OldGenerationAllocationCounter(); | |
Hannes Payer (out of office)
2016/09/26 14:32:05
Can we not use the same name for OldGenerationAllo
ulan
2016/09/27 17:02:08
Done.
| |
1083 bytes_allocated_ += current_counter - old_generation_allocation_counter_; | |
1084 old_generation_allocation_counter_ = current_counter; | |
1085 return bytes_allocated_; | |
1086 } | |
1087 | |
1088 size_t IncrementalMarking::StepSizeToMakeProgress() { | |
1089 // We increase step size gradually based on the time passed in order to | |
1090 // leave marking work to standalone tasks. The ramp up duration and the | |
1091 // target step count are chosen based on benchmarks. | |
1092 const int kRampUpIntervalMs = 300; | |
1093 const size_t kTargetStepCount = 128; | |
1094 size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount, | |
1095 IncrementalMarking::kAllocatedThreshold); | |
1096 double time_passed_ms = | |
1097 heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_; | |
1098 double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0); | |
1099 return static_cast<size_t>(factor * step_size); | |
1100 } | |
1101 | |
1102 void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() { | |
1177 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || | 1103 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || |
1178 (state_ != SWEEPING && state_ != MARKING)) { | 1104 (state_ != SWEEPING && state_ != MARKING)) { |
1179 return; | 1105 return; |
1180 } | 1106 } |
1181 | 1107 |
1182 allocated_ += allocated_bytes; | 1108 size_t bytes_to_process = |
1109 StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress(); | |
1183 | 1110 |
1184 if (allocated_ >= kAllocatedThreshold || | 1111 if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) { |
1185 write_barriers_invoked_since_last_step_ >= | 1112 // The first step after Scavenge will see many allocated bytes. |
1186 kWriteBarriersInvokedThreshold) { | 1113 // Cap the step size to distribute the marking work more uniformly. |
1187 // The marking speed is driven either by the allocation rate or by the rate | 1114 size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize( |
1188 // at which we are having to check the color of objects in the write | 1115 kMaxStepSizeInMs, |
1189 // barrier. | 1116 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond()); |
1190 // It is possible for a tight non-allocating loop to run a lot of write | 1117 bytes_to_process = Min(bytes_to_process, max_step_size); |
Hannes Payer (out of office)
2016/09/26 14:32:05
Can we think of a way to avoid this big step after
ulan
2016/09/27 17:02:08
Acknowledged.
| |
1191 // barriers before we get here and check them (marking can only take place | 1118 |
1192 // on | 1119 intptr_t bytes_processed = 0; |
1193 // allocation), so to reduce the lumpiness we don't use the write barriers | 1120 if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) { |
1194 // invoked since last step directly to determine the amount of work to do. | 1121 // Steps performed in tasks have put us ahead of schedule. |
1195 intptr_t bytes_to_process = | 1122 // We skip processing of marking dequeue here and thus |
1196 marking_speed_ * | 1123 // shift marking time from inside V8 to standalone tasks. |
1197 Max(allocated_, write_barriers_invoked_since_last_step_); | 1124 bytes_marked_ahead_of_schedule_ -= bytes_to_process; |
1198 Step(bytes_to_process, GC_VIA_STACK_GUARD, FORCE_COMPLETION, | 1125 bytes_processed = bytes_to_process; |
1199 StepOrigin::kV8); | 1126 } else { |
1127 bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD, | |
1128 FORCE_COMPLETION, StepOrigin::kV8); | |
1129 } | |
1130 bytes_allocated_ -= Min(bytes_allocated_, bytes_to_process); | |
1200 } | 1131 } |
1201 } | 1132 } |
1202 | 1133 |
1203 void IncrementalMarking::Step(intptr_t bytes_to_process, | 1134 size_t IncrementalMarking::Step(size_t bytes_to_process, |
1204 CompletionAction action, | 1135 CompletionAction action, |
1205 ForceCompletionAction completion, | 1136 ForceCompletionAction completion, |
1206 StepOrigin step_origin) { | 1137 StepOrigin step_origin) { |
1207 HistogramTimerScope incremental_marking_scope( | 1138 HistogramTimerScope incremental_marking_scope( |
1208 heap_->isolate()->counters()->gc_incremental_marking()); | 1139 heap_->isolate()->counters()->gc_incremental_marking()); |
1209 TRACE_EVENT0("v8", "V8.GCIncrementalMarking"); | 1140 TRACE_EVENT0("v8", "V8.GCIncrementalMarking"); |
1210 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL); | 1141 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL); |
1211 double start = heap_->MonotonicallyIncreasingTimeInMs(); | 1142 double start = heap_->MonotonicallyIncreasingTimeInMs(); |
1212 | 1143 |
1213 bytes_scanned_ += bytes_to_process; | |
1214 | |
1215 allocated_ = 0; | |
1216 write_barriers_invoked_since_last_step_ = 0; | |
1217 | |
1218 if (state_ == SWEEPING) { | 1144 if (state_ == SWEEPING) { |
1219 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING); | 1145 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING); |
1220 FinalizeSweeping(); | 1146 FinalizeSweeping(); |
1221 } | 1147 } |
1222 | 1148 |
1223 intptr_t bytes_processed = 0; | 1149 size_t bytes_processed = 0; |
1224 if (state_ == MARKING) { | 1150 if (state_ == MARKING) { |
1225 const bool incremental_wrapper_tracing = | 1151 const bool incremental_wrapper_tracing = |
1226 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer(); | 1152 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer(); |
1227 const bool process_wrappers = | 1153 const bool process_wrappers = |
1228 incremental_wrapper_tracing && | 1154 incremental_wrapper_tracing && |
1229 (heap_->mark_compact_collector() | 1155 (heap_->mark_compact_collector() |
1230 ->RequiresImmediateWrapperProcessing() || | 1156 ->RequiresImmediateWrapperProcessing() || |
1231 heap_->mark_compact_collector()->marking_deque()->IsEmpty()); | 1157 heap_->mark_compact_collector()->marking_deque()->IsEmpty()); |
1232 bool wrapper_work_left = incremental_wrapper_tracing; | 1158 bool wrapper_work_left = incremental_wrapper_tracing; |
1233 if (!process_wrappers) { | 1159 if (!process_wrappers) { |
1234 if (step_origin == StepOrigin::kV8 && | 1160 bytes_processed = ProcessMarkingDeque(bytes_to_process); |
1235 bytes_marked_ahead_of_schedule_ >= bytes_to_process) { | 1161 if (step_origin == StepOrigin::kTask) { |
1236 // Steps performed in tasks have put us ahead of schedule. | 1162 bytes_marked_ahead_of_schedule_ += bytes_processed; |
1237 // We skip processing of marking dequeue here and thus | |
1238 // shift marking time from inside V8 to standalone tasks. | |
1239 bytes_marked_ahead_of_schedule_ -= bytes_to_process; | |
1240 } else { | |
1241 bytes_processed = ProcessMarkingDeque(bytes_to_process); | |
1242 if (step_origin == StepOrigin::kTask) { | |
1243 bytes_marked_ahead_of_schedule_ += bytes_processed; | |
1244 } | |
1245 } | 1163 } |
1246 } else { | 1164 } else { |
1247 const double wrapper_deadline = | 1165 const double wrapper_deadline = |
1248 heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs; | 1166 heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs; |
1249 TRACE_GC(heap()->tracer(), | 1167 TRACE_GC(heap()->tracer(), |
1250 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING); | 1168 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING); |
1251 heap_->mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer(); | 1169 heap_->mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer(); |
1252 wrapper_work_left = | 1170 wrapper_work_left = |
1253 heap_->mark_compact_collector() | 1171 heap_->mark_compact_collector() |
1254 ->embedder_heap_tracer() | 1172 ->embedder_heap_tracer() |
(...skipping 11 matching lines...) Expand all Loading... | |
1266 FinalizeMarking(action); | 1184 FinalizeMarking(action); |
1267 } else { | 1185 } else { |
1268 MarkingComplete(action); | 1186 MarkingComplete(action); |
1269 } | 1187 } |
1270 } else { | 1188 } else { |
1271 IncrementIdleMarkingDelayCounter(); | 1189 IncrementIdleMarkingDelayCounter(); |
1272 } | 1190 } |
1273 } | 1191 } |
1274 } | 1192 } |
1275 | 1193 |
1276 steps_count_++; | |
1277 | |
1278 // Speed up marking if we are marking too slow or if we are almost done | |
1279 // with marking. | |
1280 SpeedUp(); | |
1281 | |
1282 double end = heap_->MonotonicallyIncreasingTimeInMs(); | 1194 double end = heap_->MonotonicallyIncreasingTimeInMs(); |
1283 double duration = (end - start); | 1195 double duration = (end - start); |
1284 // Note that we report zero bytes here when sweeping was in progress or | 1196 // Note that we report zero bytes here when sweeping was in progress or |
1285 // when we just started incremental marking. In these cases we did not | 1197 // when we just started incremental marking. In these cases we did not |
1286 // process the marking deque. | 1198 // process the marking deque. |
1287 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); | 1199 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); |
1288 if (FLAG_trace_incremental_marking) { | 1200 if (FLAG_trace_incremental_marking) { |
1289 heap_->isolate()->PrintWithTimestamp( | 1201 heap_->isolate()->PrintWithTimestamp( |
1290 "[IncrementalMarking] Step %s %d bytes (%d) in %.1f\n", | 1202 "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n", |
1291 step_origin == StepOrigin::kV8 ? "in v8" : "in task", | 1203 step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed, |
1292 static_cast<int>(bytes_processed), static_cast<int>(bytes_to_process), | 1204 bytes_to_process, duration); |
1293 duration); | |
1294 } | 1205 } |
1206 return bytes_processed; | |
1295 } | 1207 } |
1296 | 1208 |
1297 | 1209 |
1298 void IncrementalMarking::ResetStepCounters() { | |
1299 steps_count_ = 0; | |
1300 old_generation_space_available_at_start_of_incremental_ = | |
1301 SpaceLeftInOldSpace(); | |
1302 old_generation_space_used_at_start_of_incremental_ = | |
1303 heap_->PromotedTotalSize(); | |
1304 bytes_rescanned_ = 0; | |
1305 marking_speed_ = kInitialMarkingSpeed; | |
1306 bytes_scanned_ = 0; | |
1307 write_barriers_invoked_since_last_step_ = 0; | |
1308 bytes_marked_ahead_of_schedule_ = 0; | |
1309 } | |
1310 | |
1311 | |
1312 int64_t IncrementalMarking::SpaceLeftInOldSpace() { | |
1313 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects(); | |
1314 } | |
1315 | |
1316 | |
1317 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() { | 1210 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() { |
1318 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter; | 1211 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter; |
1319 } | 1212 } |
1320 | 1213 |
1321 | 1214 |
1322 void IncrementalMarking::IncrementIdleMarkingDelayCounter() { | 1215 void IncrementalMarking::IncrementIdleMarkingDelayCounter() { |
1323 idle_marking_delay_counter_++; | 1216 idle_marking_delay_counter_++; |
1324 } | 1217 } |
1325 | 1218 |
1326 | 1219 |
1327 void IncrementalMarking::ClearIdleMarkingDelayCounter() { | 1220 void IncrementalMarking::ClearIdleMarkingDelayCounter() { |
1328 idle_marking_delay_counter_ = 0; | 1221 idle_marking_delay_counter_ = 0; |
1329 } | 1222 } |
1330 | 1223 |
1331 } // namespace internal | 1224 } // namespace internal |
1332 } // namespace v8 | 1225 } // namespace v8 |
OLD | NEW |