OLD | NEW |
---|---|
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "src/heap/gc-tracer.h" | 5 #include "src/heap/gc-tracer.h" |
6 | 6 |
7 #include "src/counters.h" | 7 #include "src/counters.h" |
8 #include "src/heap/heap-inl.h" | 8 #include "src/heap/heap-inl.h" |
9 #include "src/isolate.h" | 9 #include "src/isolate.h" |
10 | 10 |
11 namespace v8 { | 11 namespace v8 { |
12 namespace internal { | 12 namespace internal { |
13 | 13 |
14 static intptr_t CountTotalHolesSize(Heap* heap) { | 14 static intptr_t CountTotalHolesSize(Heap* heap) { |
15 intptr_t holes_size = 0; | 15 intptr_t holes_size = 0; |
16 OldSpaces spaces(heap); | 16 OldSpaces spaces(heap); |
17 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { | 17 for (OldSpace* space = spaces.next(); space != NULL; space = spaces.next()) { |
18 holes_size += space->Waste() + space->Available(); | 18 holes_size += space->Waste() + space->Available(); |
19 } | 19 } |
20 return holes_size; | 20 return holes_size; |
21 } | 21 } |
22 | 22 |
23 | 23 |
24 GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope) | 24 GCTracer::Scope::Scope(GCTracer* tracer, ScopeId scope) |
25 : tracer_(tracer), scope_(scope) { | 25 : tracer_(tracer), scope_(scope) { |
26 start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs(); | 26 start_time_ = tracer_->heap_->MonotonicallyIncreasingTimeInMs(); |
27 // TODO(cbruni): remove once we fully moved to a trace-based system. | 27 if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) { |
28 if (FLAG_runtime_call_stats) { | 28 RuntimeCallStats::Enter( |
29 tracer_->heap_->isolate()->counters()->tracing_runtime_call_stats(), | |
ulan
2016/09/08 12:03:37
Naive question: why can't we use the same counter
Camillo Bruni
2016/09/08 15:47:52
You're right. Previously they weren't mutually exc
lpy
2016/09/08 18:23:53
Done.
lpy
2016/09/08 18:23:53
right, this is also what fmeawad@ is suggesting. D
| |
30 &timer_, &RuntimeCallStats::GC); | |
fmeawad
2016/09/07 17:31:28
This code is repeated 2 times, should we move it t
lpy
2016/09/08 18:23:53
I don't think we should make another enter and lea
| |
31 } else if (FLAG_runtime_call_stats) { | |
32 // TODO(cbruni): remove once we fully moved to a trace-based system. | |
29 RuntimeCallStats::Enter( | 33 RuntimeCallStats::Enter( |
30 tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_, | 34 tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_, |
31 &RuntimeCallStats::GC); | 35 &RuntimeCallStats::GC); |
32 } | 36 } |
33 // TODO(lpy): Add a tracing equivalent for the runtime call stats. | |
34 } | 37 } |
35 | 38 |
36 GCTracer::Scope::~Scope() { | 39 GCTracer::Scope::~Scope() { |
37 tracer_->AddScopeSample( | 40 tracer_->AddScopeSample( |
38 scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_); | 41 scope_, tracer_->heap_->MonotonicallyIncreasingTimeInMs() - start_time_); |
39 // TODO(cbruni): remove once we fully moved to a trace-based system. | 42 if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) { |
40 if (FLAG_runtime_call_stats) { | 43 RuntimeCallStats::Leave( |
44 tracer_->heap_->isolate()->counters()->tracing_runtime_call_stats(), | |
45 &timer_); | |
46 } else if (FLAG_runtime_call_stats) { | |
47 // TODO(cbruni): remove once we fully moved to a trace-based system. | |
41 RuntimeCallStats::Leave( | 48 RuntimeCallStats::Leave( |
42 tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_); | 49 tracer_->heap_->isolate()->counters()->runtime_call_stats(), &timer_); |
43 } | 50 } |
44 // TODO(lpy): Add a tracing equivalent for the runtime call stats. | |
45 } | 51 } |
46 | 52 |
47 const char* GCTracer::Scope::Name(ScopeId id) { | 53 const char* GCTracer::Scope::Name(ScopeId id) { |
48 #define CASE(scope) \ | 54 #define CASE(scope) \ |
49 case Scope::scope: \ | 55 case Scope::scope: \ |
50 return "V8.GC_" #scope; | 56 return "V8.GC_" #scope; |
51 switch (id) { | 57 switch (id) { |
52 TRACER_SCOPES(CASE) | 58 TRACER_SCOPES(CASE) |
53 case Scope::NUMBER_OF_SCOPES: | 59 case Scope::NUMBER_OF_SCOPES: |
54 break; | 60 break; |
(...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
209 Counters* counters = heap_->isolate()->counters(); | 215 Counters* counters = heap_->isolate()->counters(); |
210 | 216 |
211 if (collector == SCAVENGER) { | 217 if (collector == SCAVENGER) { |
212 counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason)); | 218 counters->scavenge_reason()->AddSample(static_cast<int>(gc_reason)); |
213 } else { | 219 } else { |
214 counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason)); | 220 counters->mark_compact_reason()->AddSample(static_cast<int>(gc_reason)); |
215 } | 221 } |
216 counters->aggregated_memory_heap_committed()->AddSample(start_time, | 222 counters->aggregated_memory_heap_committed()->AddSample(start_time, |
217 committed_memory); | 223 committed_memory); |
218 counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory); | 224 counters->aggregated_memory_heap_used()->AddSample(start_time, used_memory); |
219 // TODO(cbruni): remove once we fully moved to a trace-based system. | 225 if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) { |
220 if (FLAG_runtime_call_stats) { | 226 RuntimeCallStats::Enter( |
227 heap_->isolate()->counters()->tracing_runtime_call_stats(), &timer_, | |
228 &RuntimeCallStats::GC); | |
229 } else if (FLAG_runtime_call_stats) { | |
230 // TODO(cbruni): remove once we fully moved to a trace-based system. | |
221 RuntimeCallStats::Enter(heap_->isolate()->counters()->runtime_call_stats(), | 231 RuntimeCallStats::Enter(heap_->isolate()->counters()->runtime_call_stats(), |
222 &timer_, &RuntimeCallStats::GC); | 232 &timer_, &RuntimeCallStats::GC); |
223 } | 233 } |
224 // TODO(lpy): Add a tracing equivalent for the runtime call stats. | |
225 } | 234 } |
226 | 235 |
227 void GCTracer::MergeBaseline(const Event& baseline) { | 236 void GCTracer::MergeBaseline(const Event& baseline) { |
228 current_.incremental_marking_bytes = | 237 current_.incremental_marking_bytes = |
229 current_.cumulative_incremental_marking_bytes - | 238 current_.cumulative_incremental_marking_bytes - |
230 baseline.cumulative_incremental_marking_bytes; | 239 baseline.cumulative_incremental_marking_bytes; |
231 current_.pure_incremental_marking_duration = | 240 current_.pure_incremental_marking_duration = |
232 current_.cumulative_pure_incremental_marking_duration - | 241 current_.cumulative_pure_incremental_marking_duration - |
233 baseline.cumulative_pure_incremental_marking_duration; | 242 baseline.cumulative_pure_incremental_marking_duration; |
234 for (int i = Scope::FIRST_INCREMENTAL_SCOPE; | 243 for (int i = Scope::FIRST_INCREMENTAL_SCOPE; |
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
315 if (FLAG_trace_gc_nvp) { | 324 if (FLAG_trace_gc_nvp) { |
316 PrintNVP(); | 325 PrintNVP(); |
317 } else { | 326 } else { |
318 Print(); | 327 Print(); |
319 } | 328 } |
320 | 329 |
321 if (FLAG_trace_gc) { | 330 if (FLAG_trace_gc) { |
322 heap_->PrintShortHeapStatistics(); | 331 heap_->PrintShortHeapStatistics(); |
323 } | 332 } |
324 | 333 |
325 // TODO(cbruni): remove once we fully moved to a trace-based system. | 334 if (TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED()) { |
326 if (FLAG_runtime_call_stats) { | 335 RuntimeCallStats::Leave( |
336 heap_->isolate()->counters()->tracing_runtime_call_stats(), &timer_); | |
337 } else if (FLAG_runtime_call_stats) { | |
338 // TODO(cbruni): remove once we fully moved to a trace-based system. | |
327 RuntimeCallStats::Leave(heap_->isolate()->counters()->runtime_call_stats(), | 339 RuntimeCallStats::Leave(heap_->isolate()->counters()->runtime_call_stats(), |
328 &timer_); | 340 &timer_); |
329 } | 341 } |
330 // TODO(lpy): Add a tracing equivalent for the runtime call stats. | |
331 } | 342 } |
332 | 343 |
333 | 344 |
334 void GCTracer::SampleAllocation(double current_ms, | 345 void GCTracer::SampleAllocation(double current_ms, |
335 size_t new_space_counter_bytes, | 346 size_t new_space_counter_bytes, |
336 size_t old_generation_counter_bytes) { | 347 size_t old_generation_counter_bytes) { |
337 if (allocation_time_ms_ == 0) { | 348 if (allocation_time_ms_ == 0) { |
338 // It is the first sample. | 349 // It is the first sample. |
339 allocation_time_ms_ = current_ms; | 350 allocation_time_ms_ = current_ms; |
340 new_space_allocation_counter_bytes_ = new_space_counter_bytes; | 351 new_space_allocation_counter_bytes_ = new_space_counter_bytes; |
(...skipping 493 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
834 } | 845 } |
835 | 846 |
836 void GCTracer::ResetSurvivalEvents() { recorded_survival_ratios_.Reset(); } | 847 void GCTracer::ResetSurvivalEvents() { recorded_survival_ratios_.Reset(); } |
837 | 848 |
838 void GCTracer::NotifyIncrementalMarkingStart() { | 849 void GCTracer::NotifyIncrementalMarkingStart() { |
839 incremental_marking_start_time_ = heap_->MonotonicallyIncreasingTimeInMs(); | 850 incremental_marking_start_time_ = heap_->MonotonicallyIncreasingTimeInMs(); |
840 } | 851 } |
841 | 852 |
842 } // namespace internal | 853 } // namespace internal |
843 } // namespace v8 | 854 } // namespace v8 |
OLD | NEW |