Index: base/trace_event/trace_buffer.cc |
diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc |
index e26e9fd28fa806cfedcc00093dd5b336f8486dc0..0a5770c3dbf0058715b64222bf1ae4db4ae1abf3 100644 |
--- a/base/trace_event/trace_buffer.cc |
+++ b/base/trace_event/trace_buffer.cc |
@@ -105,7 +105,7 @@ class TraceBufferRingBuffer : public TraceBuffer { |
void EstimateTraceMemoryOverhead( |
TraceEventMemoryOverhead* overhead) override { |
- overhead->Add("TraceBufferRingBuffer", sizeof(*this)); |
+ overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, sizeof(*this)); |
for (size_t queue_index = queue_head_; queue_index != queue_tail_; |
queue_index = NextQueueIndex(queue_index)) { |
size_t chunk_index = recyclable_chunks_queue_[queue_index]; |
@@ -221,7 +221,8 @@ class TraceBufferVector : public TraceBuffer { |
sizeof(*this) + max_chunks_ * sizeof(decltype(chunks_)::value_type); |
const size_t chunks_ptr_vector_resident_size = |
sizeof(*this) + chunks_.size() * sizeof(decltype(chunks_)::value_type); |
- overhead->Add("TraceBufferVector", chunks_ptr_vector_allocated_size, |
+ overhead->Add(TraceEventMemoryOverhead::kTraceBuffer, |
+ chunks_ptr_vector_allocated_size, |
chunks_ptr_vector_resident_size); |
for (size_t i = 0; i < chunks_.size(); ++i) { |
TraceBufferChunk* chunk = chunks_[i].get(); |
@@ -268,12 +269,13 @@ void TraceBufferChunk::EstimateTraceMemoryOverhead( |
// When estimating the size of TraceBufferChunk, exclude the array of trace |
// events, as they are computed individually below. |
- cached_overhead_estimate_->Add("TraceBufferChunk", |
+ cached_overhead_estimate_->Add(TraceEventMemoryOverhead::kTraceBufferChunk, |
sizeof(*this) - sizeof(chunk_)); |
} |
const size_t num_cached_estimated_events = |
- cached_overhead_estimate_->GetCount("TraceEvent"); |
+ cached_overhead_estimate_->GetCount( |
+ TraceEventMemoryOverhead::kTraceEvent); |
DCHECK_LE(num_cached_estimated_events, size()); |
if (IsFull() && num_cached_estimated_events == size()) { |
@@ -291,7 +293,7 @@ void TraceBufferChunk::EstimateTraceMemoryOverhead( |
// changing as new TraceEvents are added to this chunk, so they are |
// computed on the fly. |
const size_t num_unused_trace_events = capacity() - size(); |
- overhead->Add("TraceEvent (unused)", |
+ overhead->Add(TraceEventMemoryOverhead::kUnusedTraceEvent, |
num_unused_trace_events * sizeof(TraceEvent)); |
} |