Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(196)

Side by Side Diff: src/heap/incremental-marking.cc

Issue 2359903002: [heap] New heuristics for incremental marking step size. (Closed)
Patch Set: use space iterator Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/heap/incremental-marking.h ('k') | src/ia32/code-stubs-ia32.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "src/heap/incremental-marking.h" 5 #include "src/heap/incremental-marking.h"
6 6
7 #include "src/code-stubs.h" 7 #include "src/code-stubs.h"
8 #include "src/compilation-cache.h" 8 #include "src/compilation-cache.h"
9 #include "src/conversions.h" 9 #include "src/conversions.h"
10 #include "src/heap/gc-idle-time-handler.h" 10 #include "src/heap/gc-idle-time-handler.h"
11 #include "src/heap/gc-tracer.h" 11 #include "src/heap/gc-tracer.h"
12 #include "src/heap/mark-compact-inl.h" 12 #include "src/heap/mark-compact-inl.h"
13 #include "src/heap/object-stats.h" 13 #include "src/heap/object-stats.h"
14 #include "src/heap/objects-visiting-inl.h" 14 #include "src/heap/objects-visiting-inl.h"
15 #include "src/heap/objects-visiting.h" 15 #include "src/heap/objects-visiting.h"
16 #include "src/tracing/trace-event.h" 16 #include "src/tracing/trace-event.h"
17 #include "src/v8.h" 17 #include "src/v8.h"
18 18
19 namespace v8 { 19 namespace v8 {
20 namespace internal { 20 namespace internal {
21 21
22 IncrementalMarking::IncrementalMarking(Heap* heap) 22 IncrementalMarking::IncrementalMarking(Heap* heap)
23 : heap_(heap), 23 : heap_(heap),
24 observer_(*this, kAllocatedThreshold),
25 state_(STOPPED), 24 state_(STOPPED),
25 initial_old_generation_size_(0),
26 bytes_marked_ahead_of_schedule_(0),
27 unscanned_bytes_of_large_object_(0),
28 idle_marking_delay_counter_(0),
29 incremental_marking_finalization_rounds_(0),
26 is_compacting_(false), 30 is_compacting_(false),
27 steps_count_(0),
28 old_generation_space_available_at_start_of_incremental_(0),
29 old_generation_space_used_at_start_of_incremental_(0),
30 bytes_rescanned_(0),
31 should_hurry_(false), 31 should_hurry_(false),
32 marking_speed_(0),
33 bytes_scanned_(0),
34 allocated_(0),
35 write_barriers_invoked_since_last_step_(0),
36 bytes_marked_ahead_of_schedule_(0),
37 idle_marking_delay_counter_(0),
38 unscanned_bytes_of_large_object_(0),
39 was_activated_(false), 32 was_activated_(false),
40 black_allocation_(false), 33 black_allocation_(false),
41 finalize_marking_completed_(false), 34 finalize_marking_completed_(false),
42 incremental_marking_finalization_rounds_(0), 35 request_type_(NONE),
43 request_type_(NONE) {} 36 new_generation_observer_(*this, kAllocatedThreshold),
37 old_generation_observer_(*this, kAllocatedThreshold) {}
44 38
45 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) { 39 bool IncrementalMarking::BaseRecordWrite(HeapObject* obj, Object* value) {
46 HeapObject* value_heap_obj = HeapObject::cast(value); 40 HeapObject* value_heap_obj = HeapObject::cast(value);
47 MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj); 41 MarkBit value_bit = ObjectMarking::MarkBitFrom(value_heap_obj);
48 DCHECK(!Marking::IsImpossible(value_bit)); 42 DCHECK(!Marking::IsImpossible(value_bit));
49 43
50 MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj); 44 MarkBit obj_bit = ObjectMarking::MarkBitFrom(obj);
51 DCHECK(!Marking::IsImpossible(obj_bit)); 45 DCHECK(!Marking::IsImpossible(obj_bit));
52 bool is_black = Marking::IsBlack(obj_bit); 46 bool is_black = Marking::IsBlack(obj_bit);
53 47
(...skipping 10 matching lines...) Expand all
64 if (BaseRecordWrite(obj, value) && slot != NULL) { 58 if (BaseRecordWrite(obj, value) && slot != NULL) {
65 // Object is not going to be rescanned we need to record the slot. 59 // Object is not going to be rescanned we need to record the slot.
66 heap_->mark_compact_collector()->RecordSlot(obj, slot, value); 60 heap_->mark_compact_collector()->RecordSlot(obj, slot, value);
67 } 61 }
68 } 62 }
69 63
70 64
71 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot, 65 void IncrementalMarking::RecordWriteFromCode(HeapObject* obj, Object** slot,
72 Isolate* isolate) { 66 Isolate* isolate) {
73 DCHECK(obj->IsHeapObject()); 67 DCHECK(obj->IsHeapObject());
74 IncrementalMarking* marking = isolate->heap()->incremental_marking(); 68 isolate->heap()->incremental_marking()->RecordWrite(obj, slot, *slot);
75
76 MemoryChunk* chunk = MemoryChunk::FromAddress(obj->address());
77 int counter = chunk->write_barrier_counter();
78 if (counter < (MemoryChunk::kWriteBarrierCounterGranularity / 2)) {
79 marking->write_barriers_invoked_since_last_step_ +=
80 MemoryChunk::kWriteBarrierCounterGranularity -
81 chunk->write_barrier_counter();
82 chunk->set_write_barrier_counter(
83 MemoryChunk::kWriteBarrierCounterGranularity);
84 }
85
86 marking->RecordWrite(obj, slot, *slot);
87 } 69 }
88 70
89 // static 71 // static
90 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host, 72 void IncrementalMarking::RecordWriteOfCodeEntryFromCode(JSFunction* host,
91 Object** slot, 73 Object** slot,
92 Isolate* isolate) { 74 Isolate* isolate) {
93 DCHECK(host->IsJSFunction()); 75 DCHECK(host->IsJSFunction());
94 IncrementalMarking* marking = isolate->heap()->incremental_marking(); 76 IncrementalMarking* marking = isolate->heap()->incremental_marking();
95 Code* value = Code::cast( 77 Code* value = Code::cast(
96 Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot))); 78 Code::GetObjectFromEntryAddress(reinterpret_cast<Address>(slot)));
(...skipping 358 matching lines...) Expand 10 before | Expand all | Expand 10 after
455 // we don't need to do anything if incremental marking is 437 // we don't need to do anything if incremental marking is
456 // not active. 438 // not active.
457 } else if (IsCompacting()) { 439 } else if (IsCompacting()) {
458 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION); 440 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL_COMPACTION);
459 } else { 441 } else {
460 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL); 442 RecordWriteStub::Patch(stub, RecordWriteStub::INCREMENTAL);
461 } 443 }
462 } 444 }
463 445
464 446
465 void IncrementalMarking::NotifyOfHighPromotionRate() {
466 if (IsMarking()) {
467 if (marking_speed_ < kFastMarking) {
468 if (FLAG_trace_gc) {
469 heap()->isolate()->PrintWithTimestamp(
470 "Increasing marking speed to %d "
471 "due to high promotion rate\n",
472 static_cast<int>(kFastMarking));
473 }
474 marking_speed_ = kFastMarking;
475 }
476 }
477 }
478
479
480 static void PatchIncrementalMarkingRecordWriteStubs( 447 static void PatchIncrementalMarkingRecordWriteStubs(
481 Heap* heap, RecordWriteStub::Mode mode) { 448 Heap* heap, RecordWriteStub::Mode mode) {
482 UnseededNumberDictionary* stubs = heap->code_stubs(); 449 UnseededNumberDictionary* stubs = heap->code_stubs();
483 450
484 int capacity = stubs->Capacity(); 451 int capacity = stubs->Capacity();
485 Isolate* isolate = heap->isolate(); 452 Isolate* isolate = heap->isolate();
486 for (int i = 0; i < capacity; i++) { 453 for (int i = 0; i < capacity; i++) {
487 Object* k = stubs->KeyAt(i); 454 Object* k = stubs->KeyAt(i);
488 if (stubs->IsKey(isolate, k)) { 455 if (stubs->IsKey(isolate, k)) {
489 uint32_t key = NumberToUint32(k); 456 uint32_t key = NumberToUint32(k);
(...skipping 26 matching lines...) Expand all
516 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC); 483 DCHECK(heap_->gc_state() == Heap::NOT_IN_GC);
517 DCHECK(!heap_->isolate()->serializer_enabled()); 484 DCHECK(!heap_->isolate()->serializer_enabled());
518 485
519 Counters* counters = heap_->isolate()->counters(); 486 Counters* counters = heap_->isolate()->counters();
520 487
521 counters->incremental_marking_reason()->AddSample( 488 counters->incremental_marking_reason()->AddSample(
522 static_cast<int>(gc_reason)); 489 static_cast<int>(gc_reason));
523 HistogramTimerScope incremental_marking_scope( 490 HistogramTimerScope incremental_marking_scope(
524 counters->gc_incremental_marking_start()); 491 counters->gc_incremental_marking_start());
525 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart"); 492 TRACE_EVENT0("v8", "V8.GCIncrementalMarkingStart");
526 ResetStepCounters();
527 heap_->tracer()->NotifyIncrementalMarkingStart(); 493 heap_->tracer()->NotifyIncrementalMarkingStart();
528 494
495 start_time_ms_ = heap()->MonotonicallyIncreasingTimeInMs();
496 initial_old_generation_size_ = heap_->PromotedSpaceSizeOfObjects();
497 old_generation_allocation_counter_ = heap_->OldGenerationAllocationCounter();
498 bytes_allocated_ = 0;
499 bytes_marked_ahead_of_schedule_ = 0;
500 should_hurry_ = false;
529 was_activated_ = true; 501 was_activated_ = true;
530 502
531 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { 503 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
532 StartMarking(); 504 StartMarking();
533 } else { 505 } else {
534 if (FLAG_trace_incremental_marking) { 506 if (FLAG_trace_incremental_marking) {
535 heap()->isolate()->PrintWithTimestamp( 507 heap()->isolate()->PrintWithTimestamp(
536 "[IncrementalMarking] Start sweeping.\n"); 508 "[IncrementalMarking] Start sweeping.\n");
537 } 509 }
538 state_ = SWEEPING; 510 state_ = SWEEPING;
539 } 511 }
540 512
541 heap_->new_space()->AddAllocationObserver(&observer_); 513 SpaceIterator it(heap_);
514 while (it.has_next()) {
515 Space* space = it.next();
516 if (space == heap_->new_space()) {
517 space->AddAllocationObserver(&new_generation_observer_);
518 } else {
519 space->AddAllocationObserver(&old_generation_observer_);
520 }
521 }
542 522
543 incremental_marking_job()->Start(heap_); 523 incremental_marking_job()->Start(heap_);
544 } 524 }
545 525
546 526
547 void IncrementalMarking::StartMarking() { 527 void IncrementalMarking::StartMarking() {
548 if (heap_->isolate()->serializer_enabled()) { 528 if (heap_->isolate()->serializer_enabled()) {
549 // Black allocation currently starts when we start incremental marking, 529 // Black allocation currently starts when we start incremental marking,
550 // but we cannot enable black allocation while deserializing. Hence, we 530 // but we cannot enable black allocation while deserializing. Hence, we
551 // have to delay the start of incremental marking in that case. 531 // have to delay the start of incremental marking in that case.
(...skipping 443 matching lines...) Expand 10 before | Expand all | Expand 10 after
995 static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB); 975 static_cast<int>(heap()->PromotedSpaceSizeOfObjects() / MB);
996 int old_generation_limit_mb = 976 int old_generation_limit_mb =
997 static_cast<int>(heap()->old_generation_allocation_limit() / MB); 977 static_cast<int>(heap()->old_generation_allocation_limit() / MB);
998 heap()->isolate()->PrintWithTimestamp( 978 heap()->isolate()->PrintWithTimestamp(
999 "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, " 979 "[IncrementalMarking] Stopping: old generation %dMB, limit %dMB, "
1000 "overshoot %dMB\n", 980 "overshoot %dMB\n",
1001 old_generation_size_mb, old_generation_limit_mb, 981 old_generation_size_mb, old_generation_limit_mb,
1002 Max(0, old_generation_size_mb - old_generation_limit_mb)); 982 Max(0, old_generation_size_mb - old_generation_limit_mb));
1003 } 983 }
1004 984
1005 heap_->new_space()->RemoveAllocationObserver(&observer_); 985 SpaceIterator it(heap_);
986 while (it.has_next()) {
987 Space* space = it.next();
988 if (space == heap_->new_space()) {
989 space->RemoveAllocationObserver(&new_generation_observer_);
990 } else {
991 space->RemoveAllocationObserver(&old_generation_observer_);
992 }
993 }
994
1006 IncrementalMarking::set_should_hurry(false); 995 IncrementalMarking::set_should_hurry(false);
1007 ResetStepCounters();
1008 if (IsMarking()) { 996 if (IsMarking()) {
1009 PatchIncrementalMarkingRecordWriteStubs(heap_, 997 PatchIncrementalMarkingRecordWriteStubs(heap_,
1010 RecordWriteStub::STORE_BUFFER_ONLY); 998 RecordWriteStub::STORE_BUFFER_ONLY);
1011 DeactivateIncrementalWriteBarrier(); 999 DeactivateIncrementalWriteBarrier();
1012 } 1000 }
1013 heap_->isolate()->stack_guard()->ClearGC(); 1001 heap_->isolate()->stack_guard()->ClearGC();
1014 state_ = STOPPED; 1002 state_ = STOPPED;
1015 is_compacting_ = false; 1003 is_compacting_ = false;
1016 FinishBlackAllocation(); 1004 FinishBlackAllocation();
1017 } 1005 }
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1075 do { 1063 do {
1076 Step(step_size_in_bytes, completion_action, force_completion, step_origin); 1064 Step(step_size_in_bytes, completion_action, force_completion, step_origin);
1077 remaining_time_in_ms = 1065 remaining_time_in_ms =
1078 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs(); 1066 deadline_in_ms - heap()->MonotonicallyIncreasingTimeInMs();
1079 } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() && 1067 } while (remaining_time_in_ms >= kStepSizeInMs && !IsComplete() &&
1080 !heap()->mark_compact_collector()->marking_deque()->IsEmpty()); 1068 !heap()->mark_compact_collector()->marking_deque()->IsEmpty());
1081 return remaining_time_in_ms; 1069 return remaining_time_in_ms;
1082 } 1070 }
1083 1071
1084 1072
1085 void IncrementalMarking::SpeedUp() {
1086 bool speed_up = false;
1087
1088 if ((steps_count_ % kMarkingSpeedAccellerationInterval) == 0) {
1089 if (FLAG_trace_incremental_marking) {
1090 heap()->isolate()->PrintWithTimestamp(
1091 "[IncrementalMarking] Speed up marking after %d steps\n",
1092 static_cast<int>(kMarkingSpeedAccellerationInterval));
1093 }
1094 speed_up = true;
1095 }
1096
1097 bool space_left_is_very_small =
1098 (old_generation_space_available_at_start_of_incremental_ < 10 * MB);
1099
1100 bool only_1_nth_of_space_that_was_available_still_left =
1101 (SpaceLeftInOldSpace() * (marking_speed_ + 1) <
1102 old_generation_space_available_at_start_of_incremental_);
1103
1104 if (space_left_is_very_small ||
1105 only_1_nth_of_space_that_was_available_still_left) {
1106 if (FLAG_trace_incremental_marking)
1107 heap()->isolate()->PrintWithTimestamp(
1108 "[IncrementalMarking] Speed up marking because of low space left\n");
1109 speed_up = true;
1110 }
1111
1112 bool size_of_old_space_multiplied_by_n_during_marking =
1113 (heap_->PromotedTotalSize() >
1114 (marking_speed_ + 1) *
1115 old_generation_space_used_at_start_of_incremental_);
1116 if (size_of_old_space_multiplied_by_n_during_marking) {
1117 speed_up = true;
1118 if (FLAG_trace_incremental_marking) {
1119 heap()->isolate()->PrintWithTimestamp(
1120 "[IncrementalMarking] Speed up marking because of heap size "
1121 "increase\n");
1122 }
1123 }
1124
1125 int64_t promoted_during_marking =
1126 heap_->PromotedTotalSize() -
1127 old_generation_space_used_at_start_of_incremental_;
1128 intptr_t delay = marking_speed_ * MB;
1129 intptr_t scavenge_slack = heap_->MaxSemiSpaceSize();
1130
1131 // We try to scan at at least twice the speed that we are allocating.
1132 if (promoted_during_marking > bytes_scanned_ / 2 + scavenge_slack + delay) {
1133 if (FLAG_trace_incremental_marking) {
1134 heap()->isolate()->PrintWithTimestamp(
1135 "[IncrementalMarking] Speed up marking because marker was not "
1136 "keeping up\n");
1137 }
1138 speed_up = true;
1139 }
1140
1141 if (speed_up) {
1142 if (state_ != MARKING) {
1143 if (FLAG_trace_incremental_marking) {
1144 heap()->isolate()->PrintWithTimestamp(
1145 "[IncrementalMarking] Postponing speeding up marking until marking "
1146 "starts\n");
1147 }
1148 } else {
1149 marking_speed_ += kMarkingSpeedAccelleration;
1150 marking_speed_ = static_cast<int>(
1151 Min(kMaxMarkingSpeed, static_cast<intptr_t>(marking_speed_ * 1.3)));
1152 if (FLAG_trace_incremental_marking) {
1153 heap()->isolate()->PrintWithTimestamp(
1154 "[IncrementalMarking] Marking speed increased to %d\n",
1155 marking_speed_);
1156 }
1157 }
1158 }
1159 }
1160
1161 void IncrementalMarking::FinalizeSweeping() { 1073 void IncrementalMarking::FinalizeSweeping() {
1162 DCHECK(state_ == SWEEPING); 1074 DCHECK(state_ == SWEEPING);
1163 if (heap_->mark_compact_collector()->sweeping_in_progress() && 1075 if (heap_->mark_compact_collector()->sweeping_in_progress() &&
1164 (!FLAG_concurrent_sweeping || 1076 (!FLAG_concurrent_sweeping ||
1165 heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) { 1077 heap_->mark_compact_collector()->sweeper().IsSweepingCompleted())) {
1166 heap_->mark_compact_collector()->EnsureSweepingCompleted(); 1078 heap_->mark_compact_collector()->EnsureSweepingCompleted();
1167 } 1079 }
1168 if (!heap_->mark_compact_collector()->sweeping_in_progress()) { 1080 if (!heap_->mark_compact_collector()->sweeping_in_progress()) {
1169 bytes_scanned_ = 0;
1170 StartMarking(); 1081 StartMarking();
1171 } 1082 }
1172 } 1083 }
1173 1084
1174 void IncrementalMarking::NotifyAllocatedBytes(intptr_t allocated_bytes) { 1085 size_t IncrementalMarking::StepSizeToKeepUpWithAllocations() {
1086 // Update bytes_allocated_ based on the allocation counter.
1087 size_t current_counter = heap_->OldGenerationAllocationCounter();
1088 bytes_allocated_ += current_counter - old_generation_allocation_counter_;
1089 old_generation_allocation_counter_ = current_counter;
1090 return bytes_allocated_;
1091 }
1092
1093 size_t IncrementalMarking::StepSizeToMakeProgress() {
1094 // We increase step size gradually based on the time passed in order to
1095 // leave marking work to standalone tasks. The ramp up duration and the
1096 // target step count are chosen based on benchmarks.
1097 const int kRampUpIntervalMs = 300;
1098 const size_t kTargetStepCount = 128;
1099 size_t step_size = Max(initial_old_generation_size_ / kTargetStepCount,
1100 IncrementalMarking::kAllocatedThreshold);
1101 double time_passed_ms =
1102 heap_->MonotonicallyIncreasingTimeInMs() - start_time_ms_;
1103 double factor = Min(time_passed_ms / kRampUpIntervalMs, 1.0);
1104 return static_cast<size_t>(factor * step_size);
1105 }
1106
1107 void IncrementalMarking::AdvanceIncrementalMarkingOnAllocation() {
1175 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking || 1108 if (heap_->gc_state() != Heap::NOT_IN_GC || !FLAG_incremental_marking ||
1176 (state_ != SWEEPING && state_ != MARKING)) { 1109 (state_ != SWEEPING && state_ != MARKING)) {
1177 return; 1110 return;
1178 } 1111 }
1179 1112
1180 allocated_ += allocated_bytes; 1113 size_t bytes_to_process =
1114 StepSizeToKeepUpWithAllocations() + StepSizeToMakeProgress();
1181 1115
1182 if (allocated_ >= kAllocatedThreshold || 1116 if (bytes_to_process >= IncrementalMarking::kAllocatedThreshold) {
1183 write_barriers_invoked_since_last_step_ >= 1117 // The first step after Scavenge will see many allocated bytes.
1184 kWriteBarriersInvokedThreshold) { 1118 // Cap the step size to distribute the marking work more uniformly.
1185 // The marking speed is driven either by the allocation rate or by the rate 1119 size_t max_step_size = GCIdleTimeHandler::EstimateMarkingStepSize(
1186 // at which we are having to check the color of objects in the write 1120 kMaxStepSizeInMs,
1187 // barrier. 1121 heap()->tracer()->IncrementalMarkingSpeedInBytesPerMillisecond());
1188 // It is possible for a tight non-allocating loop to run a lot of write 1122 bytes_to_process = Min(bytes_to_process, max_step_size);
1189 // barriers before we get here and check them (marking can only take place 1123
1190 // on 1124 intptr_t bytes_processed = 0;
1191 // allocation), so to reduce the lumpiness we don't use the write barriers 1125 if (bytes_marked_ahead_of_schedule_ >= bytes_to_process) {
1192 // invoked since last step directly to determine the amount of work to do. 1126 // Steps performed in tasks have put us ahead of schedule.
1193 intptr_t bytes_to_process = 1127 // We skip processing of marking dequeue here and thus
1194 marking_speed_ * 1128 // shift marking time from inside V8 to standalone tasks.
1195 Max(allocated_, write_barriers_invoked_since_last_step_); 1129 bytes_marked_ahead_of_schedule_ -= bytes_to_process;
1196 Step(bytes_to_process, GC_VIA_STACK_GUARD, FORCE_COMPLETION, 1130 bytes_processed = bytes_to_process;
1197 StepOrigin::kV8); 1131 } else {
1132 bytes_processed = Step(bytes_to_process, GC_VIA_STACK_GUARD,
1133 FORCE_COMPLETION, StepOrigin::kV8);
1134 }
1135 bytes_allocated_ -= Min(bytes_allocated_, bytes_to_process);
1198 } 1136 }
1199 } 1137 }
1200 1138
1201 void IncrementalMarking::Step(intptr_t bytes_to_process, 1139 size_t IncrementalMarking::Step(size_t bytes_to_process,
1202 CompletionAction action, 1140 CompletionAction action,
1203 ForceCompletionAction completion, 1141 ForceCompletionAction completion,
1204 StepOrigin step_origin) { 1142 StepOrigin step_origin) {
1205 HistogramTimerScope incremental_marking_scope( 1143 HistogramTimerScope incremental_marking_scope(
1206 heap_->isolate()->counters()->gc_incremental_marking()); 1144 heap_->isolate()->counters()->gc_incremental_marking());
1207 TRACE_EVENT0("v8", "V8.GCIncrementalMarking"); 1145 TRACE_EVENT0("v8", "V8.GCIncrementalMarking");
1208 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL); 1146 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL);
1209 double start = heap_->MonotonicallyIncreasingTimeInMs(); 1147 double start = heap_->MonotonicallyIncreasingTimeInMs();
1210 1148
1211 bytes_scanned_ += bytes_to_process;
1212
1213 allocated_ = 0;
1214 write_barriers_invoked_since_last_step_ = 0;
1215
1216 if (state_ == SWEEPING) { 1149 if (state_ == SWEEPING) {
1217 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING); 1150 TRACE_GC(heap_->tracer(), GCTracer::Scope::MC_INCREMENTAL_SWEEPING);
1218 FinalizeSweeping(); 1151 FinalizeSweeping();
1219 } 1152 }
1220 1153
1221 intptr_t bytes_processed = 0; 1154 size_t bytes_processed = 0;
1222 if (state_ == MARKING) { 1155 if (state_ == MARKING) {
1223 const bool incremental_wrapper_tracing = 1156 const bool incremental_wrapper_tracing =
1224 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer(); 1157 FLAG_incremental_marking_wrappers && heap_->UsingEmbedderHeapTracer();
1225 const bool process_wrappers = 1158 const bool process_wrappers =
1226 incremental_wrapper_tracing && 1159 incremental_wrapper_tracing &&
1227 (heap_->mark_compact_collector() 1160 (heap_->mark_compact_collector()
1228 ->RequiresImmediateWrapperProcessing() || 1161 ->RequiresImmediateWrapperProcessing() ||
1229 heap_->mark_compact_collector()->marking_deque()->IsEmpty()); 1162 heap_->mark_compact_collector()->marking_deque()->IsEmpty());
1230 bool wrapper_work_left = incremental_wrapper_tracing; 1163 bool wrapper_work_left = incremental_wrapper_tracing;
1231 if (!process_wrappers) { 1164 if (!process_wrappers) {
1232 if (step_origin == StepOrigin::kV8 && 1165 bytes_processed = ProcessMarkingDeque(bytes_to_process);
1233 bytes_marked_ahead_of_schedule_ >= bytes_to_process) { 1166 if (step_origin == StepOrigin::kTask) {
1234 // Steps performed in tasks have put us ahead of schedule. 1167 bytes_marked_ahead_of_schedule_ += bytes_processed;
1235 // We skip processing of marking dequeue here and thus
1236 // shift marking time from inside V8 to standalone tasks.
1237 bytes_marked_ahead_of_schedule_ -= bytes_to_process;
1238 } else {
1239 bytes_processed = ProcessMarkingDeque(bytes_to_process);
1240 if (step_origin == StepOrigin::kTask) {
1241 bytes_marked_ahead_of_schedule_ += bytes_processed;
1242 }
1243 } 1168 }
1244 } else { 1169 } else {
1245 const double wrapper_deadline = 1170 const double wrapper_deadline =
1246 heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs; 1171 heap_->MonotonicallyIncreasingTimeInMs() + kStepSizeInMs;
1247 TRACE_GC(heap()->tracer(), 1172 TRACE_GC(heap()->tracer(),
1248 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING); 1173 GCTracer::Scope::MC_INCREMENTAL_WRAPPER_TRACING);
1249 heap_->mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer(); 1174 heap_->mark_compact_collector()->RegisterWrappersWithEmbedderHeapTracer();
1250 wrapper_work_left = 1175 wrapper_work_left =
1251 heap_->mark_compact_collector() 1176 heap_->mark_compact_collector()
1252 ->embedder_heap_tracer() 1177 ->embedder_heap_tracer()
(...skipping 11 matching lines...) Expand all
1264 FinalizeMarking(action); 1189 FinalizeMarking(action);
1265 } else { 1190 } else {
1266 MarkingComplete(action); 1191 MarkingComplete(action);
1267 } 1192 }
1268 } else { 1193 } else {
1269 IncrementIdleMarkingDelayCounter(); 1194 IncrementIdleMarkingDelayCounter();
1270 } 1195 }
1271 } 1196 }
1272 } 1197 }
1273 1198
1274 steps_count_++;
1275
1276 // Speed up marking if we are marking too slow or if we are almost done
1277 // with marking.
1278 SpeedUp();
1279
1280 double end = heap_->MonotonicallyIncreasingTimeInMs(); 1199 double end = heap_->MonotonicallyIncreasingTimeInMs();
1281 double duration = (end - start); 1200 double duration = (end - start);
1282 // Note that we report zero bytes here when sweeping was in progress or 1201 // Note that we report zero bytes here when sweeping was in progress or
1283 // when we just started incremental marking. In these cases we did not 1202 // when we just started incremental marking. In these cases we did not
1284 // process the marking deque. 1203 // process the marking deque.
1285 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed); 1204 heap_->tracer()->AddIncrementalMarkingStep(duration, bytes_processed);
1286 if (FLAG_trace_incremental_marking) { 1205 if (FLAG_trace_incremental_marking) {
1287 heap_->isolate()->PrintWithTimestamp( 1206 heap_->isolate()->PrintWithTimestamp(
1288 "[IncrementalMarking] Step %s %d bytes (%d) in %.1f\n", 1207 "[IncrementalMarking] Step %s %zu bytes (%zu) in %.1f\n",
1289 step_origin == StepOrigin::kV8 ? "in v8" : "in task", 1208 step_origin == StepOrigin::kV8 ? "in v8" : "in task", bytes_processed,
1290 static_cast<int>(bytes_processed), static_cast<int>(bytes_to_process), 1209 bytes_to_process, duration);
1291 duration);
1292 } 1210 }
1211 return bytes_processed;
1293 } 1212 }
1294 1213
1295 1214
1296 void IncrementalMarking::ResetStepCounters() {
1297 steps_count_ = 0;
1298 old_generation_space_available_at_start_of_incremental_ =
1299 SpaceLeftInOldSpace();
1300 old_generation_space_used_at_start_of_incremental_ =
1301 heap_->PromotedTotalSize();
1302 bytes_rescanned_ = 0;
1303 marking_speed_ = kInitialMarkingSpeed;
1304 bytes_scanned_ = 0;
1305 write_barriers_invoked_since_last_step_ = 0;
1306 bytes_marked_ahead_of_schedule_ = 0;
1307 }
1308
1309
1310 int64_t IncrementalMarking::SpaceLeftInOldSpace() {
1311 return heap_->MaxOldGenerationSize() - heap_->PromotedSpaceSizeOfObjects();
1312 }
1313
1314
1315 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() { 1215 bool IncrementalMarking::IsIdleMarkingDelayCounterLimitReached() {
1316 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter; 1216 return idle_marking_delay_counter_ > kMaxIdleMarkingDelayCounter;
1317 } 1217 }
1318 1218
1319 1219
1320 void IncrementalMarking::IncrementIdleMarkingDelayCounter() { 1220 void IncrementalMarking::IncrementIdleMarkingDelayCounter() {
1321 idle_marking_delay_counter_++; 1221 idle_marking_delay_counter_++;
1322 } 1222 }
1323 1223
1324 1224
1325 void IncrementalMarking::ClearIdleMarkingDelayCounter() { 1225 void IncrementalMarking::ClearIdleMarkingDelayCounter() {
1326 idle_marking_delay_counter_ = 0; 1226 idle_marking_delay_counter_ = 0;
1327 } 1227 }
1328 1228
1329 } // namespace internal 1229 } // namespace internal
1330 } // namespace v8 1230 } // namespace v8
OLDNEW
« no previous file with comments | « src/heap/incremental-marking.h ('k') | src/ia32/code-stubs-ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698