Index: src/heap/gc-tracer.cc |
diff --git a/src/heap/gc-tracer.cc b/src/heap/gc-tracer.cc |
index 706d3546a2c5d574d7224e118395ca5c81ff6691..f90988cc2aeaa1b2ed27e5192ceb4a7bc35efb27 100644 |
--- a/src/heap/gc-tracer.cc |
+++ b/src/heap/gc-tracer.cc |
@@ -24,24 +24,30 @@ static intptr_t CountTotalHolesSize(Heap* heap) { |
GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope) |
: tracer_(tracer), scope_(scope) { |
start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs(); |
- // TODO(cbruni): remove once we fully moved to a trace-based system. |
- if (FLAG_runtime_call_stats) { |
+ if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) { |
+ RuntimeCallStats::Enter( |
+ tracer_->heap_->isolate()->counters()->tracing_runtime_call_stats(), |
ulan
2016/09/08 12:03:37
Naive question: why can't we use the same counter
Camillo Bruni
2016/09/08 15:47:52
You're right. Previously they weren't mutually exc
lpy
2016/09/08 18:23:53
Done.
lpy
2016/09/08 18:23:53
right, this is also what fmeawad@ is suggesting. D
|
+ &timer_, &RuntimeCallStats::GC); |
fmeawad
2016/09/07 17:31:28
This code is repeated 2 times, should we move it t
lpy
2016/09/08 18:23:53
I don't think we should make another enter and lea
|
+ } else if (FLAG_runtime_call_stats) { |
+ // TODO(cbruni): remove once we fully moved to a trace-based system. |
RuntimeCallStats::Enter( |
tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_, |
&RuntimeCallStats::GC); |
} |
- // TODO(lpy): Add a tracing equivalent for the runtime call stats. |
} |
GCTracer::Scope::~Scope() { |
tracer_->AddScopeSample( |
scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_); |
- // TODO(cbruni): remove once we fully moved to a trace-based system. |
- if (FLAG_runtime_call_stats) { |
+ if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) { |
+ RuntimeCallStats::Leave( |
+ tracer_->heap_->isolate()->counters()->tracing_runtime_call_stats(), |
+ &timer_); |
+ } else if (FLAG_runtime_call_stats) { |
+ // TODO(cbruni): remove once we fully moved to a trace-based system. |
RuntimeCallStats::Leave( |
tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_); |
} |
- // TODO(lpy): Add a tracing equivalent for the runtime call stats. |
} |
const char* GCTracer::Scope::Name(ScopeId id) { |
@@ -216,12 +222,15 @@ void GCTracer::Start(GarbageCollector collector, |
counters->aggregated_memory_heap_committed()->AddSample(start_time, |
committed_memory); |
counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory); |
- // TODO(cbruni): remove once we fully moved to a trace-based system. |
- if (FLAG_runtime_call_stats) { |
+ if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) { |
+ RuntimeCallStats::Enter( |
+ heap_->isolate()->counters()->tracing_runtime_call_stats(), &timer_, |
+ &RuntimeCallStats::GC); |
+ } else if (FLAG_runtime_call_stats) { |
+ // TODO(cbruni): remove once we fully moved to a trace-based system. |
RuntimeCallStats::Enter(heap_->isolate()->counters()->runtime_call_stats(), |
&timer_, &RuntimeCallStats::GC); |
} |
- // TODO(lpy): Add a tracing equivalent for the runtime call stats. |
} |
void GCTracer::MergeBaseline(const Event& baseline) { |
@@ -322,12 +331,14 @@ void GCTracer::Stop(GarbageCollector collector) { |
heap_->PrintShortHeapStatistics(); |
} |
- // TODO(cbruni): remove once we fully moved to a trace-based system. |
- if (FLAG_runtime_call_stats) { |
+ if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) { |
+ RuntimeCallStats::Leave( |
+ heap_->isolate()->counters()->tracing_runtime_call_stats(), &timer_); |
+ } else if (FLAG_runtime_call_stats) { |
+ // TODO(cbruni): remove once we fully moved to a trace-based system. |
RuntimeCallStats::Leave(heap_->isolate()->counters()->runtime_call_stats(), |
&timer_); |
} |
- // TODO(lpy): Add a tracing equivalent for the runtime call stats. |
} |