Index: src/heap/heap.cc |
diff --git a/src/heap/heap.cc b/src/heap/heap.cc |
index 47717c162b2ec561049e6f528d3a7aa2c288007d..fc36be8041f47bd6520f9a9094d92b4e6004f654 100644 |
--- a/src/heap/heap.cc |
+++ b/src/heap/heap.cc |
@@ -775,8 +775,7 @@ void Heap::PreprocessStackTraces() { |
void Heap::HandleGCRequest() { |
if (incremental_marking()->request_type() == |
IncrementalMarking::COMPLETE_MARKING) { |
- CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt", |
- incremental_marking()->CallbackFlags()); |
+ CollectAllGarbage(Heap::kNoGCFlags, "GC interrupt"); |
return; |
} |
DCHECK(FLAG_overapproximate_weak_closure); |
@@ -986,7 +985,7 @@ bool Heap::CollectGarbage(GarbageCollector collector, const char* gc_reason, |
if (!mark_compact_collector()->abort_incremental_marking() && |
incremental_marking()->IsStopped() && |
incremental_marking()->ShouldActivateEvenWithoutIdleNotification()) { |
- incremental_marking()->Start(kNoGCFlags, kNoGCCallbackFlags, "GC epilogue"); |
+ incremental_marking()->Start(kNoGCFlags); |
} |
return next_gc_likely_to_collect_more; |
@@ -1013,18 +1012,9 @@ int Heap::NotifyContextDisposed(bool dependant_context) { |
} |
-void Heap::StartIncrementalMarking(int gc_flags, |
- const GCCallbackFlags gc_callback_flags, |
- const char* reason) { |
- DCHECK(incremental_marking()->IsStopped()); |
- incremental_marking()->Start(gc_flags, gc_callback_flags, reason); |
-} |
- |
- |
void Heap::StartIdleIncrementalMarking() { |
gc_idle_time_handler_.ResetNoProgressCounter(); |
- StartIncrementalMarking(kReduceMemoryFootprintMask, kNoGCCallbackFlags, |
- "idle"); |
+ incremental_marking()->Start(kReduceMemoryFootprintMask); |
} |
@@ -4772,63 +4762,27 @@ void Heap::MakeHeapIterable() { |
} |
-static double ComputeMutatorUtilization(double mutator_speed, double gc_speed) { |
- const double kMinMutatorUtilization = 0.0; |
- const double kConservativeGcSpeedInBytesPerMillisecond = 200000; |
- if (mutator_speed == 0) return kMinMutatorUtilization; |
- if (gc_speed == 0) gc_speed = kConservativeGcSpeedInBytesPerMillisecond; |
- // Derivation: |
- // mutator_utilization = mutator_time / (mutator_time + gc_time) |
- // mutator_time = 1 / mutator_speed |
- // gc_time = 1 / gc_speed |
- // mutator_utilization = (1 / mutator_speed) / |
- // (1 / mutator_speed + 1 / gc_speed) |
- // mutator_utilization = gc_speed / (mutator_speed + gc_speed) |
- return gc_speed / (mutator_speed + gc_speed); |
-} |
- |
- |
-double Heap::YoungGenerationMutatorUtilization() { |
+bool Heap::HasLowYoungGenerationAllocationRate() { |
+ const double high_mutator_utilization = 0.993; |
double mutator_speed = static_cast<double>( |
tracer()->NewSpaceAllocationThroughputInBytesPerMillisecond()); |
double gc_speed = static_cast<double>( |
tracer()->ScavengeSpeedInBytesPerMillisecond(kForSurvivedObjects)); |
- double result = ComputeMutatorUtilization(mutator_speed, gc_speed); |
- if (FLAG_trace_mutator_utilization) { |
- PrintIsolate(isolate(), |
- "Young generation mutator utilization = %.3f (" |
- "mutator_speed=%.f, gc_speed=%.f)\n", |
- result, mutator_speed, gc_speed); |
- } |
- return result; |
+ if (mutator_speed == 0 || gc_speed == 0) return false; |
+ double mutator_utilization = gc_speed / (mutator_speed + gc_speed); |
+ return mutator_utilization > high_mutator_utilization; |
} |
-double Heap::OldGenerationMutatorUtilization() { |
+bool Heap::HasLowOldGenerationAllocationRate() { |
+ const double high_mutator_utilization = 0.993; |
double mutator_speed = static_cast<double>( |
tracer()->OldGenerationAllocationThroughputInBytesPerMillisecond()); |
double gc_speed = static_cast<double>( |
tracer()->CombinedMarkCompactSpeedInBytesPerMillisecond()); |
- double result = ComputeMutatorUtilization(mutator_speed, gc_speed); |
- if (FLAG_trace_mutator_utilization) { |
- PrintIsolate(isolate(), |
- "Old generation mutator utilization = %.3f (" |
- "mutator_speed=%.f, gc_speed=%.f)\n", |
- result, mutator_speed, gc_speed); |
- } |
- return result; |
-} |
- |
- |
-bool Heap::HasLowYoungGenerationAllocationRate() { |
- const double high_mutator_utilization = 0.993; |
- return YoungGenerationMutatorUtilization() > high_mutator_utilization; |
-} |
- |
- |
-bool Heap::HasLowOldGenerationAllocationRate() { |
- const double high_mutator_utilization = 0.993; |
- return OldGenerationMutatorUtilization() > high_mutator_utilization; |
+ if (mutator_speed == 0 || gc_speed == 0) return false; |
+ double mutator_utilization = gc_speed / (mutator_speed + gc_speed); |
+ return mutator_utilization > high_mutator_utilization; |
} |
@@ -4854,32 +4808,13 @@ bool Heap::HasHighFragmentation(intptr_t used, intptr_t committed) { |
void Heap::ReduceNewSpaceSize() { |
- // TODO(ulan): Unify this constant with the similar constant in |
- // GCIdleTimeHandler once the change is merged to 4.5. |
- static const size_t kLowAllocationThroughput = 1000; |
- size_t allocation_throughput = |
- tracer()->CurrentAllocationThroughputInBytesPerMillisecond(); |
- if (FLAG_predictable || allocation_throughput == 0) return; |
- if (allocation_throughput < kLowAllocationThroughput) { |
+ if (!FLAG_predictable && HasLowAllocationRate()) { |
new_space_.Shrink(); |
UncommitFromSpace(); |
} |
} |
-void Heap::FinalizeIncrementalMarkingIfComplete(const char* comment) { |
- if (FLAG_overapproximate_weak_closure && |
- (incremental_marking()->IsReadyToOverApproximateWeakClosure() || |
- (!incremental_marking()->weak_closure_was_overapproximated() && |
- mark_compact_collector_.marking_deque()->IsEmpty()))) { |
- OverApproximateWeakClosure(comment); |
- } else if (incremental_marking()->IsComplete() || |
- (mark_compact_collector_.marking_deque()->IsEmpty())) { |
- CollectAllGarbage(kNoGCFlags, comment); |
- } |
-} |
- |
- |
bool Heap::TryFinalizeIdleIncrementalMarking( |
double idle_time_in_ms, size_t size_of_objects, |
size_t final_incremental_mark_compact_speed_in_bytes_per_ms) { |
@@ -4932,32 +4867,6 @@ GCIdleTimeHandler::HeapState Heap::ComputeHeapState() { |
} |
-double Heap::AdvanceIncrementalMarking( |
- intptr_t step_size_in_bytes, double deadline_in_ms, |
- IncrementalMarking::StepActions step_actions) { |
- DCHECK(!incremental_marking()->IsStopped()); |
- |
- if (step_size_in_bytes == 0) { |
- step_size_in_bytes = GCIdleTimeHandler::EstimateMarkingStepSize( |
- static_cast<size_t>(GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs), |
- static_cast<size_t>( |
- tracer()->FinalIncrementalMarkCompactSpeedInBytesPerMillisecond())); |
- } |
- |
- double remaining_time_in_ms = 0.0; |
- do { |
- incremental_marking()->Step( |
- step_size_in_bytes, step_actions.completion_action, |
- step_actions.force_marking, step_actions.force_completion); |
- remaining_time_in_ms = deadline_in_ms - MonotonicallyIncreasingTimeInMs(); |
- } while (remaining_time_in_ms >= |
- 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs && |
- !incremental_marking()->IsComplete() && |
- !mark_compact_collector_.marking_deque()->IsEmpty()); |
- return remaining_time_in_ms; |
-} |
- |
- |
bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, |
GCIdleTimeHandler::HeapState heap_state, |
double deadline_in_ms) { |
@@ -4967,9 +4876,19 @@ bool Heap::PerformIdleTimeAction(GCIdleTimeAction action, |
result = true; |
break; |
case DO_INCREMENTAL_MARKING: { |
- const double remaining_idle_time_in_ms = |
- AdvanceIncrementalMarking(action.parameter, deadline_in_ms, |
- IncrementalMarking::IdleStepActions()); |
+ DCHECK(!incremental_marking()->IsStopped()); |
+ double remaining_idle_time_in_ms = 0.0; |
+ do { |
+ incremental_marking()->Step( |
+ action.parameter, IncrementalMarking::NO_GC_VIA_STACK_GUARD, |
+ IncrementalMarking::FORCE_MARKING, |
+ IncrementalMarking::DO_NOT_FORCE_COMPLETION); |
+ remaining_idle_time_in_ms = |
+ deadline_in_ms - MonotonicallyIncreasingTimeInMs(); |
+ } while (remaining_idle_time_in_ms >= |
+ 2.0 * GCIdleTimeHandler::kIncrementalMarkingStepTimeInMs && |
+ !incremental_marking()->IsComplete() && |
+ !mark_compact_collector_.marking_deque()->IsEmpty()); |
if (remaining_idle_time_in_ms > 0.0) { |
action.additional_work = TryFinalizeIdleIncrementalMarking( |
remaining_idle_time_in_ms, heap_state.size_of_objects, |