Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(340)

Side by Side Diff: base/trace_event/trace_event_impl.cc

Issue 1251203003: [tracing] Fix, simplify and speed up accounting of TraceEvent memory overhead (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Get rid of rendundant check Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/trace_event/trace_event_impl.h ('k') | base/trace_event/trace_event_memory_overhead.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/trace_event_impl.h" 5 #include "base/trace_event/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "base/base_switches.h" 10 #include "base/base_switches.h"
(...skipping 425 matching lines...) Expand 10 before | Expand all | Expand 10 after
436 } 436 }
437 437
438 TraceBufferChunk::~TraceBufferChunk() { 438 TraceBufferChunk::~TraceBufferChunk() {
439 } 439 }
440 440
441 void TraceBufferChunk::Reset(uint32 new_seq) { 441 void TraceBufferChunk::Reset(uint32 new_seq) {
442 for (size_t i = 0; i < next_free_; ++i) 442 for (size_t i = 0; i < next_free_; ++i)
443 chunk_[i].Reset(); 443 chunk_[i].Reset();
444 next_free_ = 0; 444 next_free_ = 0;
445 seq_ = new_seq; 445 seq_ = new_seq;
446 cached_overhead_estimate_when_full_.reset(); 446 cached_overhead_estimate_.reset();
447 } 447 }
448 448
449 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) { 449 TraceEvent* TraceBufferChunk::AddTraceEvent(size_t* event_index) {
450 DCHECK(!IsFull()); 450 DCHECK(!IsFull());
451 *event_index = next_free_++; 451 *event_index = next_free_++;
452 return &chunk_[*event_index]; 452 return &chunk_[*event_index];
453 } 453 }
454 454
455 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const { 455 scoped_ptr<TraceBufferChunk> TraceBufferChunk::Clone() const {
456 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_)); 456 scoped_ptr<TraceBufferChunk> cloned_chunk(new TraceBufferChunk(seq_));
457 cloned_chunk->next_free_ = next_free_; 457 cloned_chunk->next_free_ = next_free_;
458 for (size_t i = 0; i < next_free_; ++i) 458 for (size_t i = 0; i < next_free_; ++i)
459 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]); 459 cloned_chunk->chunk_[i].CopyFrom(chunk_[i]);
460 return cloned_chunk.Pass(); 460 return cloned_chunk.Pass();
461 } 461 }
462 462
463 void TraceBufferChunk::EstimateTraceMemoryOverhead( 463 void TraceBufferChunk::EstimateTraceMemoryOverhead(
464 TraceEventMemoryOverhead* overhead) { 464 TraceEventMemoryOverhead* overhead) {
465 if (cached_overhead_estimate_when_full_) { 465 if (!cached_overhead_estimate_) {
466 DCHECK(IsFull()); 466 cached_overhead_estimate_.reset(new TraceEventMemoryOverhead);
467 overhead->Update(*cached_overhead_estimate_when_full_); 467
468 // When estimating the size of TraceBufferChunk, exclude the array of trace
469 // events, as they are computed individually below.
470 cached_overhead_estimate_->Add("TraceBufferChunk",
471 sizeof(*this) - sizeof(chunk_));
472 }
473
474 const size_t num_cached_estimated_events =
475 cached_overhead_estimate_->GetCount("TraceEvent");
476 DCHECK_LE(num_cached_estimated_events, size());
477
478 if (IsFull() && num_cached_estimated_events == size()) {
479 overhead->Update(*cached_overhead_estimate_);
468 return; 480 return;
469 } 481 }
470 482
471 // Cache the memory overhead estimate only if the chunk is full. 483 for (size_t i = num_cached_estimated_events; i < size(); ++i)
472 TraceEventMemoryOverhead* estimate = overhead; 484 chunk_[i].EstimateTraceMemoryOverhead(cached_overhead_estimate_.get());
485
473 if (IsFull()) { 486 if (IsFull()) {
474 cached_overhead_estimate_when_full_.reset(new TraceEventMemoryOverhead); 487 cached_overhead_estimate_->AddSelf();
475 estimate = cached_overhead_estimate_when_full_.get(); 488 } else {
489 // The unused TraceEvents in |chunks_| are not cached. They will keep
490 // changing as new TraceEvents are added to this chunk, so they are
491 // computed on the fly.
492 const size_t num_unused_trace_events = capacity() - size();
493 overhead->Add("TraceEvent (unused)",
494 num_unused_trace_events * sizeof(TraceEvent));
476 } 495 }
477 496
478 estimate->Add("TraceBufferChunk", sizeof(*this)); 497 overhead->Update(*cached_overhead_estimate_);
479 for (size_t i = 0; i < next_free_; ++i)
480 chunk_[i].EstimateTraceMemoryOverhead(estimate);
481
482 if (IsFull()) {
483 estimate->AddSelf();
484 overhead->Update(*estimate);
485 }
486 } 498 }
487 499
488 // A helper class that allows the lock to be acquired in the middle of the scope 500 // A helper class that allows the lock to be acquired in the middle of the scope
489 // and unlocks at the end of scope if locked. 501 // and unlocks at the end of scope if locked.
490 class TraceLog::OptionalAutoLock { 502 class TraceLog::OptionalAutoLock {
491 public: 503 public:
492 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} 504 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
493 505
494 ~OptionalAutoLock() { 506 ~OptionalAutoLock() {
495 if (locked_) 507 if (locked_)
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
672 } 684 }
673 } 685 }
674 686
675 void TraceEvent::Reset() { 687 void TraceEvent::Reset() {
676 // Only reset fields that won't be initialized in Initialize(), or that may 688 // Only reset fields that won't be initialized in Initialize(), or that may
677 // hold references to other objects. 689 // hold references to other objects.
678 duration_ = TimeDelta::FromInternalValue(-1); 690 duration_ = TimeDelta::FromInternalValue(-1);
679 parameter_copy_storage_ = NULL; 691 parameter_copy_storage_ = NULL;
680 for (int i = 0; i < kTraceMaxNumArgs; ++i) 692 for (int i = 0; i < kTraceMaxNumArgs; ++i)
681 convertable_values_[i] = NULL; 693 convertable_values_[i] = NULL;
682 cached_memory_overhead_estimate_.reset();
683 } 694 }
684 695
685 void TraceEvent::UpdateDuration(const TraceTicks& now, 696 void TraceEvent::UpdateDuration(const TraceTicks& now,
686 const ThreadTicks& thread_now) { 697 const ThreadTicks& thread_now) {
687 DCHECK_EQ(duration_.ToInternalValue(), -1); 698 DCHECK_EQ(duration_.ToInternalValue(), -1);
688 duration_ = now - timestamp_; 699 duration_ = now - timestamp_;
689 thread_duration_ = thread_now - thread_timestamp_; 700 thread_duration_ = thread_now - thread_timestamp_;
690 } 701 }
691 702
692 void TraceEvent::EstimateTraceMemoryOverhead( 703 void TraceEvent::EstimateTraceMemoryOverhead(
693 TraceEventMemoryOverhead* overhead) { 704 TraceEventMemoryOverhead* overhead) {
694 if (!cached_memory_overhead_estimate_) { 705 overhead->Add("TraceEvent", sizeof(*this));
695 cached_memory_overhead_estimate_.reset(new TraceEventMemoryOverhead); 706
696 cached_memory_overhead_estimate_->Add("TraceEvent", sizeof(*this)); 707 // TODO(primiano): parameter_copy_storage_ is refcounted and, in theory,
697 // TODO(primiano): parameter_copy_storage_ is refcounted and, in theory, 708 // could be shared by several events and we might overcount. In practice
698 // could be shared by several events and we might overcount. In practice 709 // this is unlikely but it's worth checking.
699 // this is unlikely but it's worth checking. 710 if (parameter_copy_storage_)
700 if (parameter_copy_storage_) { 711 overhead->AddRefCountedString(*parameter_copy_storage_.get());
701 cached_memory_overhead_estimate_->AddRefCountedString( 712
702 *parameter_copy_storage_.get()); 713 for (size_t i = 0; i < kTraceMaxNumArgs; ++i) {
703 } 714 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
704 for (size_t i = 0; i < kTraceMaxNumArgs; ++i) { 715 convertable_values_[i]->EstimateTraceMemoryOverhead(overhead);
705 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) {
706 convertable_values_[i]->EstimateTraceMemoryOverhead(
707 cached_memory_overhead_estimate_.get());
708 }
709 }
710 cached_memory_overhead_estimate_->AddSelf();
711 } 716 }
712 overhead->Update(*cached_memory_overhead_estimate_);
713 } 717 }
714 718
715 // static 719 // static
716 void TraceEvent::AppendValueAsJSON(unsigned char type, 720 void TraceEvent::AppendValueAsJSON(unsigned char type,
717 TraceEvent::TraceValue value, 721 TraceEvent::TraceValue value,
718 std::string* out) { 722 std::string* out) {
719 switch (type) { 723 switch (type) {
720 case TRACE_VALUE_TYPE_BOOL: 724 case TRACE_VALUE_TYPE_BOOL:
721 *out += value.as_bool ? "true" : "false"; 725 *out += value.as_bool ? "true" : "false";
722 break; 726 break;
(...skipping 1823 matching lines...) Expand 10 before | Expand all | Expand 10 after
2546 } 2550 }
2547 2551
2548 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 2552 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2549 if (*category_group_enabled_) { 2553 if (*category_group_enabled_) {
2550 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, 2554 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2551 name_, event_handle_); 2555 name_, event_handle_);
2552 } 2556 }
2553 } 2557 }
2554 2558
2555 } // namespace trace_event_internal 2559 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « base/trace_event/trace_event_impl.h ('k') | base/trace_event/trace_event_memory_overhead.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698