OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/trace_buffer.h" | 5 #include "base/trace_event/trace_buffer.h" |
6 | 6 |
7 #include "base/trace_event/trace_event_impl.h" | 7 #include "base/trace_event/trace_event_impl.h" |
8 | 8 |
9 namespace base { | 9 namespace base { |
10 namespace trace_event { | 10 namespace trace_event { |
(...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
287 | 287 |
288 TraceBufferChunk::TraceBufferChunk(uint32 seq) : next_free_(0), seq_(seq) {} | 288 TraceBufferChunk::TraceBufferChunk(uint32 seq) : next_free_(0), seq_(seq) {} |
289 | 289 |
290 TraceBufferChunk::~TraceBufferChunk() {} | 290 TraceBufferChunk::~TraceBufferChunk() {} |
291 | 291 |
292 void TraceBufferChunk::Reset(uint32 new_seq) { | 292 void TraceBufferChunk::Reset(uint32 new_seq) { |
293 for (size_t i = 0; i < next_free_; ++i) | 293 for (size_t i = 0; i < next_free_; ++i) |
294 chunk_[i].Reset(); | 294 chunk_[i].Reset(); |
295 next_free_ = 0; | 295 next_free_ = 0; |
296 seq_ = new_seq; | 296 seq_ = new_seq; |
297 cached_overhead_estimate_when_full_.reset(); | 297 cached_overhead_estimate_.reset(); |
298 } | 298 } |
299 | 299 |
300 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { | 300 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { |
301 DCHECK(!IsFull()); | 301 DCHECK(!IsFull()); |
302 *event_index = next_free_++; | 302 *event_index = next_free_++; |
303 return &chunk_[*event_index]; | 303 return &chunk_[*event_index]; |
304 } | 304 } |
305 | 305 |
306 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const { | 306 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const { |
307 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_)); | 307 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_)); |
308 cloned_chunk->next_free_ = next_free_; | 308 cloned_chunk->next_free_ = next_free_; |
309 for (size_t i = 0; i < next_free_; ++i) | 309 for (size_t i = 0; i < next_free_; ++i) |
310 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]); | 310 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]); |
311 return cloned_chunk.Pass(); | 311 return cloned_chunk.Pass(); |
312 } | 312 } |
313 | 313 |
314 void TraceBufferChunk::EstimateTraceMemoryOverhead( | 314 void TraceBufferChunk::EstimateTraceMemoryOverhead( |
315 TraceEventMemoryOverhead* overhead) { | 315 TraceEventMemoryOverhead* overhead) { |
316 if (cached_overhead_estimate_when_full_) { | 316 if (!cached_overhead_estimate_) { |
317 DCHECK(IsFull()); | 317 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead); |
318 overhead->Update(*cached_overhead_estimate_when_full_); | 318 |
| 319 // When estimating the size of TraceBufferChunk, exclude the array of trace |
| 320 // events, as they are computed individually below. |
| 321 cached_overhead_estimate_->Add("TraceBufferChunk", |
| 322 sizeof(*this) - sizeof(chunk_)); |
| 323 } |
| 324 |
| 325 const size_t num_cached_estimated_events = |
| 326 cached_overhead_estimate_->GetCount("TraceEvent"); |
| 327 DCHECK_LE(num_cached_estimated_events, size()); |
| 328 |
| 329 if (IsFull() && num_cached_estimated_events == size()) { |
| 330 overhead->Update(*cached_overhead_estimate_); |
319 return; | 331 return; |
320 } | 332 } |
321 | 333 |
322 // Cache the memory overhead estimate only if the chunk is full. | 334 for (size_t i = num_cached_estimated_events; i < size(); ++i) |
323 TraceEventMemoryOverhead* estimate = overhead; | 335 chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get()); |
| 336 |
324 if (IsFull()) { | 337 if (IsFull()) { |
325 cached_overhead_estimate_when_full_.reset(new TraceEventMemoryOverhead); | 338 cached_overhead_estimate_->AddSelf(); |
326 estimate = cached_overhead_estimate_when_full_.get(); | 339 } else { |
| 340 // The unused TraceEvents in |chunks_| are not cached. They will keep |
| 341 // changing as new TraceEvents are added to this chunk, so they are |
| 342 // computed on the fly. |
| 343 const size_t num_unused_trace_events = capacity() - size(); |
| 344 overhead->Add("TraceEvent (unused)", |
| 345 num_unused_trace_events * sizeof(TraceEvent)); |
327 } | 346 } |
328 | 347 |
329 estimate->Add("TraceBufferChunk", sizeof(*this)); | 348 overhead->Update(*cached_overhead_estimate_); |
330 for (size_t i = 0; i < next_free_; ++i) | |
331 chunk_[i].EstimateTraceMemoryOverhead(estimate); | |
332 | |
333 if (IsFull()) { | |
334 estimate->AddSelf(); | |
335 overhead->Update(*estimate); | |
336 } | |
337 } | 349 } |
338 | 350 |
339 TraceResultBuffer::OutputCallback | 351 TraceResultBuffer::OutputCallback |
340 TraceResultBuffer::SimpleOutput::GetCallback() { | 352 TraceResultBuffer::SimpleOutput::GetCallback() { |
341 return Bind(&SimpleOutput::Append, Unretained(this)); | 353 return Bind(&SimpleOutput::Append, Unretained(this)); |
342 } | 354 } |
343 | 355 |
344 void TraceResultBuffer::SimpleOutput::Append( | 356 void TraceResultBuffer::SimpleOutput::Append( |
345 const std::string& json_trace_output) { | 357 const std::string& json_trace_output) { |
346 json_output += json_trace_output; | 358 json_output += json_trace_output; |
(...skipping 27 matching lines...) Expand all Loading... |
374 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { | 386 TraceBuffer* TraceBuffer::CreateTraceBufferRingBuffer(size_t max_chunks) { |
375 return new TraceBufferRingBuffer(max_chunks); | 387 return new TraceBufferRingBuffer(max_chunks); |
376 } | 388 } |
377 | 389 |
378 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { | 390 TraceBuffer* TraceBuffer::CreateTraceBufferVectorOfSize(size_t max_chunks) { |
379 return new TraceBufferVector(max_chunks); | 391 return new TraceBufferVector(max_chunks); |
380 } | 392 } |
381 | 393 |
382 } // namespace trace_event | 394 } // namespace trace_event |
383 } // namespace base | 395 } // namespace base |
OLD | NEW |