| Index: base/trace_event/trace_buffer.cc
|
| diff --git a/base/trace_event/trace_buffer.cc b/base/trace_event/trace_buffer.cc
|
| index 1a903e9d7dc4ebe17291acbab8236fbb0ee65f12..33cb60ff22f536f28158d125f1147848160b9244 100644
|
| --- a/base/trace_event/trace_buffer.cc
|
| +++ b/base/trace_event/trace_buffer.cc
|
| @@ -294,7 +294,7 @@
|
| chunk_[i].Reset();
|
| next_free_ = 0;
|
| seq_ = new_seq;
|
| - cached_overhead_estimate_when_full_.reset();
|
| + cached_overhead_estimate_.reset();
|
| }
|
|
|
| TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
|
| @@ -313,27 +313,39 @@
|
|
|
| void TraceBufferChunk::EstimateTraceMemoryOverhead(
|
| TraceEventMemoryOverhead* overhead) {
|
| - if (cached_overhead_estimate_when_full_) {
|
| - DCHECK(IsFull());
|
| - overhead->Update(*cached_overhead_estimate_when_full_);
|
| + if (!cached_overhead_estimate_) {
|
| + cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
|
| +
|
| + // When estimating the size of TraceBufferChunk, exclude the array of trace
|
| + // events, as they are computed individually below.
|
| + cached_overhead_estimate_->Add("TraceBufferChunk",
|
| + sizeof(*this) - sizeof(chunk_));
|
| + }
|
| +
|
| + const size_t num_cached_estimated_events =
|
| + cached_overhead_estimate_->GetCount("TraceEvent");
|
| + DCHECK_LE(num_cached_estimated_events, size());
|
| +
|
| + if (IsFull() && num_cached_estimated_events == size()) {
|
| + overhead->Update(*cached_overhead_estimate_);
|
| return;
|
| }
|
|
|
| - // Cache the memory overhead estimate only if the chunk is full.
|
| - TraceEventMemoryOverhead* estimate = overhead;
|
| + for (size_t i = num_cached_estimated_events; i < size(); ++i)
|
| + chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
|
| +
|
| if (IsFull()) {
|
| - cached_overhead_estimate_when_full_.reset(new TraceEventMemoryOverhead);
|
| - estimate = cached_overhead_estimate_when_full_.get();
|
| - }
|
| -
|
| - estimate->Add("TraceBufferChunk", sizeof(*this));
|
| - for (size_t i = 0; i < next_free_; ++i)
|
| - chunk_[i].EstimateTraceMemoryOverhead(estimate);
|
| -
|
| - if (IsFull()) {
|
| - estimate->AddSelf();
|
| - overhead->Update(*estimate);
|
| - }
|
| + cached_overhead_estimate_->AddSelf();
|
| + } else {
|
| + // The unused TraceEvents in |chunks_| are not cached. They will keep
|
| + // changing as new TraceEvents are added to this chunk, so they are
|
| + // computed on the fly.
|
| + const size_t num_unused_trace_events = capacity() - size();
|
| + overhead->Add("TraceEvent (unused)",
|
| + num_unused_trace_events * sizeof(TraceEvent));
|
| + }
|
| +
|
| + overhead->Update(*cached_overhead_estimate_);
|
| }
|
|
|
| TraceResultBuffer::OutputCallback
|
|
|