Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(15)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 23556003: Implement about:tracing UI for the sampling profiler (Chromium part) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « base/debug/trace_event_impl.h ('k') | base/debug/trace_event_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/base_switches.h" 9 #include "base/base_switches.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
47 47
48 namespace base { 48 namespace base {
49 namespace debug { 49 namespace debug {
50 50
51 // Controls the number of trace events we will buffer in-memory 51 // Controls the number of trace events we will buffer in-memory
52 // before throwing them away. 52 // before throwing them away.
53 const size_t kTraceEventVectorBufferSize = 250000; 53 const size_t kTraceEventVectorBufferSize = 250000;
54 const size_t kTraceEventRingBufferSize = kTraceEventVectorBufferSize / 4; 54 const size_t kTraceEventRingBufferSize = kTraceEventVectorBufferSize / 4;
55 const size_t kTraceEventBatchSize = 1000; 55 const size_t kTraceEventBatchSize = 1000;
56 const size_t kTraceEventInitialBufferSize = 1024; 56 const size_t kTraceEventInitialBufferSize = 1024;
57 // Can store results for 30 seconds with 1 ms sampling interval.
58 const size_t kSamplingTraceEventBufferSize = 30000;
57 59
58 #define MAX_CATEGORY_GROUPS 100 60 #define MAX_CATEGORY_GROUPS 100
59 61
60 namespace { 62 namespace {
61 63
62 // Parallel arrays g_category_groups and g_category_group_enabled are separate 64 // Parallel arrays g_category_groups and g_category_group_enabled are separate
63 // so that a pointer to a member of g_category_group_enabled can be easily 65 // so that a pointer to a member of g_category_group_enabled can be easily
64 // converted to an index into g_category_groups. This allows macros to deal 66 // converted to an index into g_category_groups. This allows macros to deal
65 // only with char enabled pointers from g_category_group_enabled, and we can 67 // only with char enabled pointers from g_category_group_enabled, and we can
66 // convert internally to determine the category name from the char enabled 68 // convert internally to determine the category name from the char enabled
(...skipping 19 matching lines...) Expand all
86 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; 88 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
87 89
88 const char kRecordUntilFull[] = "record-until-full"; 90 const char kRecordUntilFull[] = "record-until-full";
89 const char kRecordContinuously[] = "record-continuously"; 91 const char kRecordContinuously[] = "record-continuously";
90 const char kEnableSampling[] = "enable-sampling"; 92 const char kEnableSampling[] = "enable-sampling";
91 93
92 } // namespace 94 } // namespace
93 95
94 class TraceBufferRingBuffer : public TraceBuffer { 96 class TraceBufferRingBuffer : public TraceBuffer {
95 public: 97 public:
96 TraceBufferRingBuffer() 98 TraceBufferRingBuffer(size_t buffer_size)
97 : unused_event_index_(0), 99 : unused_event_index_(0),
98 oldest_event_index_(0) { 100 oldest_event_index_(0),
101 buffer_size_(buffer_size) {
99 logged_events_.reserve(kTraceEventInitialBufferSize); 102 logged_events_.reserve(kTraceEventInitialBufferSize);
100 } 103 }
101 104
102 virtual ~TraceBufferRingBuffer() {} 105 virtual ~TraceBufferRingBuffer() {}
103 106
104 virtual void AddEvent(const TraceEvent& event) OVERRIDE { 107 virtual void AddEvent(const TraceEvent& event) OVERRIDE {
105 if (unused_event_index_ < Size()) 108 if (unused_event_index_ < Size())
106 logged_events_[unused_event_index_] = event; 109 logged_events_[unused_event_index_] = event;
107 else 110 else
108 logged_events_.push_back(event); 111 logged_events_.push_back(event);
109 112
110 unused_event_index_ = NextIndex(unused_event_index_); 113 unused_event_index_ = NextIndex(unused_event_index_, buffer_size_);
111 if (unused_event_index_ == oldest_event_index_) { 114 if (unused_event_index_ == oldest_event_index_) {
112 oldest_event_index_ = NextIndex(oldest_event_index_); 115 oldest_event_index_ = NextIndex(
116 oldest_event_index_, buffer_size_);
113 } 117 }
114 } 118 }
115 119
116 virtual bool HasMoreEvents() const OVERRIDE { 120 virtual bool HasMoreEvents() const OVERRIDE {
117 return oldest_event_index_ != unused_event_index_; 121 return oldest_event_index_ != unused_event_index_;
118 } 122 }
119 123
120 virtual const TraceEvent& NextEvent() OVERRIDE { 124 virtual const TraceEvent& NextEvent() OVERRIDE {
121 DCHECK(HasMoreEvents()); 125 DCHECK(HasMoreEvents());
122 126
123 size_t next = oldest_event_index_; 127 size_t next = oldest_event_index_;
124 oldest_event_index_ = NextIndex(oldest_event_index_); 128 oldest_event_index_ = NextIndex(oldest_event_index_, buffer_size_);
125 return GetEventAt(next); 129 return GetEventAt(next);
126 } 130 }
127 131
128 virtual bool IsFull() const OVERRIDE { 132 virtual bool IsFull() const OVERRIDE {
129 return false; 133 return false;
130 } 134 }
131 135
132 virtual size_t CountEnabledByName( 136 virtual size_t CountEnabledByName(
133 const unsigned char* category, 137 const unsigned char* category,
134 const std::string& event_name) const OVERRIDE { 138 const std::string& event_name) const OVERRIDE {
135 size_t notify_count = 0; 139 size_t notify_count = 0;
136 size_t index = oldest_event_index_; 140 size_t index = oldest_event_index_;
137 while (index != unused_event_index_) { 141 while (index != unused_event_index_) {
138 const TraceEvent& event = GetEventAt(index); 142 const TraceEvent& event = GetEventAt(index);
139 if (category == event.category_group_enabled() && 143 if (category == event.category_group_enabled() &&
140 strcmp(event_name.c_str(), event.name()) == 0) { 144 strcmp(event_name.c_str(), event.name()) == 0) {
141 ++notify_count; 145 ++notify_count;
142 } 146 }
143 index = NextIndex(index); 147 index = NextIndex(index, buffer_size_);
144 } 148 }
145 return notify_count; 149 return notify_count;
146 } 150 }
147 151
148 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE { 152 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
149 DCHECK(index < logged_events_.size()); 153 DCHECK(index < logged_events_.size());
150 return logged_events_[index]; 154 return logged_events_[index];
151 } 155 }
152 156
153 virtual size_t Size() const OVERRIDE { 157 virtual size_t Size() const OVERRIDE {
154 return logged_events_.size(); 158 return logged_events_.size();
155 } 159 }
156 160
157 virtual size_t Capacity() const OVERRIDE { 161 virtual size_t Capacity() const OVERRIDE {
158 return kTraceEventRingBufferSize; 162 return kTraceEventRingBufferSize;
159 } 163 }
160 164
165 virtual TraceBuffer* Clone() const OVERRIDE {
166 TraceBufferRingBuffer* clonedBuffer =
167 new TraceBufferRingBuffer(buffer_size_);
168 size_t index = oldest_event_index_;
169 while (index != unused_event_index_) {
170 const TraceEvent& event = GetEventAt(index);
171 clonedBuffer->AddEvent(event);
172 index = NextIndex(index, buffer_size_);
173 }
174 return clonedBuffer;
175 }
176
161 private: 177 private:
162 static size_t NextIndex(size_t index) { 178 static size_t NextIndex(size_t index, size_t buffer_size) {
163 index++; 179 index++;
164 if (index >= kTraceEventRingBufferSize) 180 if (index >= buffer_size)
165 index = 0; 181 index = 0;
166 return index; 182 return index;
167 } 183 }
168 184
169 size_t unused_event_index_; 185 size_t unused_event_index_;
170 size_t oldest_event_index_; 186 size_t oldest_event_index_;
187 size_t buffer_size_;
171 std::vector<TraceEvent> logged_events_; 188 std::vector<TraceEvent> logged_events_;
172 189
173 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer); 190 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
174 }; 191 };
175 192
176 class TraceBufferVector : public TraceBuffer { 193 class TraceBufferVector : public TraceBuffer {
177 public: 194 public:
178 TraceBufferVector() : current_iteration_index_(0) { 195 TraceBufferVector() : current_iteration_index_(0) {
179 logged_events_.reserve(kTraceEventInitialBufferSize); 196 logged_events_.reserve(kTraceEventInitialBufferSize);
180 } 197 }
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
224 } 241 }
225 242
226 virtual size_t Size() const OVERRIDE { 243 virtual size_t Size() const OVERRIDE {
227 return logged_events_.size(); 244 return logged_events_.size();
228 } 245 }
229 246
230 virtual size_t Capacity() const OVERRIDE { 247 virtual size_t Capacity() const OVERRIDE {
231 return kTraceEventVectorBufferSize; 248 return kTraceEventVectorBufferSize;
232 } 249 }
233 250
251 virtual TraceBuffer* Clone() const OVERRIDE {
252 NOTIMPLEMENTED();
253 return NULL;
254 }
255
234 private: 256 private:
235 size_t current_iteration_index_; 257 size_t current_iteration_index_;
236 std::vector<TraceEvent> logged_events_; 258 std::vector<TraceEvent> logged_events_;
237 259
238 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector); 260 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
239 }; 261 };
240 262
241 class TraceBufferDiscardsEvents : public TraceBuffer { 263 class TraceBufferDiscardsEvents : public TraceBuffer {
242 public: 264 public:
243 virtual ~TraceBufferDiscardsEvents() { } 265 virtual ~TraceBufferDiscardsEvents() { }
(...skipping 16 matching lines...) Expand all
260 282
261 virtual size_t Size() const OVERRIDE { return 0; } 283 virtual size_t Size() const OVERRIDE { return 0; }
262 284
263 // As this buffer is never full, we can return any positive number. 285 // As this buffer is never full, we can return any positive number.
264 virtual size_t Capacity() const OVERRIDE { return 1; } 286 virtual size_t Capacity() const OVERRIDE { return 1; }
265 287
266 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE { 288 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
267 NOTREACHED(); 289 NOTREACHED();
268 return *static_cast<TraceEvent*>(NULL); 290 return *static_cast<TraceEvent*>(NULL);
269 } 291 }
292
293 virtual TraceBuffer* Clone() const OVERRIDE {
294 NOTIMPLEMENTED();
295 return NULL;
296 }
270 }; 297 };
271 298
272 //////////////////////////////////////////////////////////////////////////////// 299 ////////////////////////////////////////////////////////////////////////////////
273 // 300 //
274 // TraceEvent 301 // TraceEvent
275 // 302 //
276 //////////////////////////////////////////////////////////////////////////////// 303 ////////////////////////////////////////////////////////////////////////////////
277 304
278 namespace { 305 namespace {
279 306
(...skipping 365 matching lines...) Expand 10 before | Expand all | Expand 10 after
645 672
646 // This object must be created on the IO thread. 673 // This object must be created on the IO thread.
647 class TraceSamplingThread : public PlatformThread::Delegate { 674 class TraceSamplingThread : public PlatformThread::Delegate {
648 public: 675 public:
649 TraceSamplingThread(); 676 TraceSamplingThread();
650 virtual ~TraceSamplingThread(); 677 virtual ~TraceSamplingThread();
651 678
652 // Implementation of PlatformThread::Delegate: 679 // Implementation of PlatformThread::Delegate:
653 virtual void ThreadMain() OVERRIDE; 680 virtual void ThreadMain() OVERRIDE;
654 681
655 static void DefaultSampleCallback(TraceBucketData* bucekt_data); 682 static void DefaultSamplingCallback(TraceBucketData* bucekt_data);
683 static void ContinuousSamplingCallback(TraceBucketData* bucekt_data);
656 684
657 void Stop(); 685 void Stop();
658 void InstallWaitableEventForSamplingTesting(WaitableEvent* waitable_event); 686 void InstallWaitableEventForSamplingTesting(WaitableEvent* waitable_event);
659 687
660 private: 688 private:
661 friend class TraceLog; 689 friend class TraceLog;
662 690
663 void GetSamples(); 691 void GetSamples();
664 // Not thread-safe. Once the ThreadMain has been called, this can no longer 692 // Not thread-safe. Once the ThreadMain has been called, this can no longer
665 // be called. 693 // be called.
(...skipping 26 matching lines...) Expand all
692 while (!cancellation_flag_->IsSet()) { 720 while (!cancellation_flag_->IsSet()) {
693 PlatformThread::Sleep( 721 PlatformThread::Sleep(
694 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds)); 722 TimeDelta::FromMicroseconds(kSamplingFrequencyMicroseconds));
695 GetSamples(); 723 GetSamples();
696 if (waitable_event_for_testing_.get()) 724 if (waitable_event_for_testing_.get())
697 waitable_event_for_testing_->Signal(); 725 waitable_event_for_testing_->Signal();
698 } 726 }
699 } 727 }
700 728
701 // static 729 // static
702 void TraceSamplingThread::DefaultSampleCallback(TraceBucketData* bucket_data) { 730 void TraceSamplingThread::DefaultSamplingCallback(
731 TraceBucketData* bucket_data) {
703 TRACE_EVENT_API_ATOMIC_WORD category_and_name = 732 TRACE_EVENT_API_ATOMIC_WORD category_and_name =
704 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket); 733 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
705 if (!category_and_name) 734 if (!category_and_name)
706 return; 735 return;
707 const char* const combined = 736 const char* const combined =
708 reinterpret_cast<const char* const>(category_and_name); 737 reinterpret_cast<const char* const>(category_and_name);
709 const char* category_group; 738 const char* category_group;
710 const char* name; 739 const char* name;
711 ExtractCategoryAndName(combined, &category_group, &name); 740 ExtractCategoryAndName(combined, &category_group, &name);
712 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE, 741 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
713 TraceLog::GetCategoryGroupEnabled(category_group), 742 TraceLog::GetCategoryGroupEnabled(category_group),
714 name, 0, 0, NULL, NULL, NULL, NULL, 0); 743 name, 0, 0, NULL, NULL, NULL, NULL, 0);
715 } 744 }
716 745
746 // static
747 void TraceSamplingThread::ContinuousSamplingCallback(
748 TraceBucketData* bucket_data) {
749 TRACE_EVENT_API_ATOMIC_WORD category_and_name =
750 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
751 if (!category_and_name)
752 return;
753 const char* const combined =
754 reinterpret_cast<const char* const>(category_and_name);
755 const char* category_group;
756 const char* name;
757 ExtractCategoryAndName(combined, &category_group, &name);
758 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
759 TraceLog::GetCategoryGroupEnabled(category_group),
760 name, 0, 0, NULL, NULL, NULL, NULL,
761 TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING);
762 }
763
717 void TraceSamplingThread::GetSamples() { 764 void TraceSamplingThread::GetSamples() {
718 for (size_t i = 0; i < sample_buckets_.size(); ++i) { 765 for (size_t i = 0; i < sample_buckets_.size(); ++i) {
719 TraceBucketData* bucket_data = &sample_buckets_[i]; 766 TraceBucketData* bucket_data = &sample_buckets_[i];
720 bucket_data->callback.Run(bucket_data); 767 bucket_data->callback.Run(bucket_data);
721 } 768 }
722 } 769 }
723 770
724 void TraceSamplingThread::RegisterSampleBucket( 771 void TraceSamplingThread::RegisterSampleBucket(
725 TRACE_EVENT_API_ATOMIC_WORD* bucket, 772 TRACE_EVENT_API_ATOMIC_WORD* bucket,
726 const char* const name, 773 const char* const name,
(...skipping 12 matching lines...) Expand all
739 786
740 void TraceSamplingThread::Stop() { 787 void TraceSamplingThread::Stop() {
741 cancellation_flag_->Set(); 788 cancellation_flag_->Set();
742 } 789 }
743 790
744 void TraceSamplingThread::InstallWaitableEventForSamplingTesting( 791 void TraceSamplingThread::InstallWaitableEventForSamplingTesting(
745 WaitableEvent* waitable_event) { 792 WaitableEvent* waitable_event) {
746 waitable_event_for_testing_.reset(waitable_event); 793 waitable_event_for_testing_.reset(waitable_event);
747 } 794 }
748 795
749
750 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket, 796 TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket,
751 const char* name, 797 const char* name,
752 TraceSampleCallback callback) 798 TraceSampleCallback callback)
753 : bucket(bucket), 799 : bucket(bucket),
754 bucket_name(name), 800 bucket_name(name),
755 callback(callback) { 801 callback(callback) {
756 } 802 }
757 803
758 TraceBucketData::~TraceBucketData() { 804 TraceBucketData::~TraceBucketData() {
759 } 805 }
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
812 } 858 }
813 } 859 }
814 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY)) 860 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
815 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. 861 ret |= RECORD_UNTIL_FULL; // Default when no options are specified.
816 862
817 return static_cast<Options>(ret); 863 return static_cast<Options>(ret);
818 } 864 }
819 865
820 TraceLog::TraceLog() 866 TraceLog::TraceLog()
821 : enable_count_(0), 867 : enable_count_(0),
868 continuous_sampling_enable_count_(0),
822 num_traces_recorded_(0), 869 num_traces_recorded_(0),
823 event_callback_(NULL), 870 event_callback_(NULL),
824 dispatching_to_observer_list_(false), 871 dispatching_to_observer_list_(false),
825 process_sort_index_(0), 872 process_sort_index_(0),
826 watch_category_(NULL), 873 watch_category_(NULL),
827 trace_options_(RECORD_UNTIL_FULL), 874 trace_options_(RECORD_UNTIL_FULL),
828 sampling_thread_handle_(0), 875 sampling_thread_handle_(0),
829 category_filter_(CategoryFilter::kDefaultCategoryFilterString) { 876 category_filter_(CategoryFilter::kDefaultCategoryFilterString) {
830 // Trace is enabled or disabled on one thread while other threads are 877 // Trace is enabled or disabled on one thread while other threads are
831 // accessing the enabled flag. We don't care whether edge-case events are 878 // accessing the enabled flag. We don't care whether edge-case events are
(...skipping 164 matching lines...) Expand 10 before | Expand all | Expand 10 after
996 num_traces_recorded_++; 1043 num_traces_recorded_++;
997 1044
998 category_filter_ = CategoryFilter(category_filter); 1045 category_filter_ = CategoryFilter(category_filter);
999 UpdateCategoryGroupEnabledFlags(); 1046 UpdateCategoryGroupEnabledFlags();
1000 1047
1001 if (options & ENABLE_SAMPLING) { 1048 if (options & ENABLE_SAMPLING) {
1002 sampling_thread_.reset(new TraceSamplingThread); 1049 sampling_thread_.reset(new TraceSamplingThread);
1003 sampling_thread_->RegisterSampleBucket( 1050 sampling_thread_->RegisterSampleBucket(
1004 &g_trace_state[0], 1051 &g_trace_state[0],
1005 "bucket0", 1052 "bucket0",
1006 Bind(&TraceSamplingThread::DefaultSampleCallback)); 1053 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1007 sampling_thread_->RegisterSampleBucket( 1054 sampling_thread_->RegisterSampleBucket(
1008 &g_trace_state[1], 1055 &g_trace_state[1],
1009 "bucket1", 1056 "bucket1",
1010 Bind(&TraceSamplingThread::DefaultSampleCallback)); 1057 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1011 sampling_thread_->RegisterSampleBucket( 1058 sampling_thread_->RegisterSampleBucket(
1012 &g_trace_state[2], 1059 &g_trace_state[2],
1013 "bucket2", 1060 "bucket2",
1014 Bind(&TraceSamplingThread::DefaultSampleCallback)); 1061 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1015 if (!PlatformThread::Create( 1062 if (!PlatformThread::Create(
1016 0, sampling_thread_.get(), &sampling_thread_handle_)) { 1063 0, sampling_thread_.get(), &sampling_thread_handle_)) {
1017 DCHECK(false) << "failed to create thread"; 1064 DCHECK(false) << "failed to create thread";
1018 } 1065 }
1019 } 1066 }
1020 1067
1021 dispatching_to_observer_list_ = true; 1068 dispatching_to_observer_list_ = true;
1022 observer_list = enabled_state_observer_list_; 1069 observer_list = enabled_state_observer_list_;
1023 } 1070 }
1024 // Notify observers outside the lock in case they trigger trace events. 1071 // Notify observers outside the lock in case they trigger trace events.
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
1058 PlatformThread::Join(sampling_thread_handle_); 1105 PlatformThread::Join(sampling_thread_handle_);
1059 lock_.Acquire(); 1106 lock_.Acquire();
1060 sampling_thread_handle_ = PlatformThreadHandle(); 1107 sampling_thread_handle_ = PlatformThreadHandle();
1061 sampling_thread_.reset(); 1108 sampling_thread_.reset();
1062 } 1109 }
1063 1110
1064 category_filter_.Clear(); 1111 category_filter_.Clear();
1065 watch_category_ = NULL; 1112 watch_category_ = NULL;
1066 watch_event_name_ = ""; 1113 watch_event_name_ = "";
1067 UpdateCategoryGroupEnabledFlags(); 1114 UpdateCategoryGroupEnabledFlags();
1068 AddMetadataEvents(); 1115 AddMetadataEvents(0);
1069 1116
1070 dispatching_to_observer_list_ = true; 1117 dispatching_to_observer_list_ = true;
1071 observer_list = enabled_state_observer_list_; 1118 observer_list = enabled_state_observer_list_;
1072 } 1119 }
1073 1120
1074 // Dispatch to observers outside the lock in case the observer triggers a 1121 // Dispatch to observers outside the lock in case the observer triggers a
1075 // trace event. 1122 // trace event.
1076 for (size_t i = 0; i < observer_list.size(); ++i) 1123 for (size_t i = 0; i < observer_list.size(); ++i)
1077 observer_list[i]->OnTraceLogDisabled(); 1124 observer_list[i]->OnTraceLogDisabled();
1078 1125
1079 { 1126 {
1080 AutoLock lock(lock_); 1127 AutoLock lock(lock_);
1081 dispatching_to_observer_list_ = false; 1128 dispatching_to_observer_list_ = false;
1082 } 1129 }
1083 } 1130 }
1084 1131
1132 void TraceLog::SetContinuousSamplingEnabled() {
1133 AutoLock lock(lock_);
1134 if (continuous_sampling_enable_count_++ > 0)
1135 return;
1136
1137 continuous_sampling_logged_events_.reset(
1138 new TraceBufferRingBuffer(kSamplingTraceEventBufferSize));
1139
1140 continuous_sampling_thread_.reset(new TraceSamplingThread);
1141 continuous_sampling_thread_->RegisterSampleBucket(
1142 &g_trace_state[0],
1143 "bucket0",
1144 Bind(&TraceSamplingThread::ContinuousSamplingCallback));
1145 continuous_sampling_thread_->RegisterSampleBucket(
1146 &g_trace_state[1],
1147 "bucket1",
1148 Bind(&TraceSamplingThread::ContinuousSamplingCallback));
1149 continuous_sampling_thread_->RegisterSampleBucket(
1150 &g_trace_state[2],
1151 "bucket2",
1152 Bind(&TraceSamplingThread::ContinuousSamplingCallback));
1153 if (!PlatformThread::Create(
1154 0,
1155 continuous_sampling_thread_.get(),
1156 &continuous_sampling_thread_handle_)) {
1157 DCHECK(false) << "failed to create thread";
1158 }
1159 }
1160
1161 void TraceLog::SetContinuousSamplingDisabled() {
1162 AutoLock lock(lock_);
1163 DCHECK(continuous_sampling_enable_count_ > 0);
1164 if (--continuous_sampling_enable_count_ != 0)
1165 return;
1166
1167 if (continuous_sampling_thread_.get()) {
1168 continuous_sampling_thread_->Stop();
1169 lock_.Release();
1170 PlatformThread::Join(continuous_sampling_thread_handle_);
1171 lock_.Acquire();
1172 continuous_sampling_thread_handle_ = PlatformThreadHandle();
1173 continuous_sampling_thread_.reset();
1174 }
1175 }
1176
1085 int TraceLog::GetNumTracesRecorded() { 1177 int TraceLog::GetNumTracesRecorded() {
1086 AutoLock lock(lock_); 1178 AutoLock lock(lock_);
1087 if (enable_count_ == 0) 1179 if (enable_count_ == 0)
1088 return -1; 1180 return -1;
1089 return num_traces_recorded_; 1181 return num_traces_recorded_;
1090 } 1182 }
1091 1183
1092 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) { 1184 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1093 enabled_state_observer_list_.push_back(listener); 1185 enabled_state_observer_list_.push_back(listener);
1094 } 1186 }
(...skipping 21 matching lines...) Expand all
1116 } 1208 }
1117 1209
1118 void TraceLog::SetNotificationCallback( 1210 void TraceLog::SetNotificationCallback(
1119 const TraceLog::NotificationCallback& cb) { 1211 const TraceLog::NotificationCallback& cb) {
1120 AutoLock lock(lock_); 1212 AutoLock lock(lock_);
1121 notification_callback_ = cb; 1213 notification_callback_ = cb;
1122 } 1214 }
1123 1215
1124 TraceBuffer* TraceLog::GetTraceBuffer() { 1216 TraceBuffer* TraceLog::GetTraceBuffer() {
1125 if (trace_options_ & RECORD_CONTINUOUSLY) 1217 if (trace_options_ & RECORD_CONTINUOUSLY)
1126 return new TraceBufferRingBuffer(); 1218 return new TraceBufferRingBuffer(kTraceEventRingBufferSize);
1127 else if (trace_options_ & ECHO_TO_CONSOLE) 1219 else if (trace_options_ & ECHO_TO_CONSOLE)
1128 return new TraceBufferDiscardsEvents(); 1220 return new TraceBufferDiscardsEvents();
1129 return new TraceBufferVector(); 1221 return new TraceBufferVector();
1130 } 1222 }
1131 1223
1132 void TraceLog::SetEventCallback(EventCallback cb) { 1224 void TraceLog::SetEventCallback(EventCallback cb) {
1133 AutoLock lock(lock_); 1225 AutoLock lock(lock_);
1134 event_callback_ = cb; 1226 event_callback_ = cb;
1135 }; 1227 };
1136 1228
(...skipping 20 matching lines...) Expand all
1157 &(json_events_str_ptr->data())); 1249 &(json_events_str_ptr->data()));
1158 1250
1159 if (!previous_logged_events->HasMoreEvents()) 1251 if (!previous_logged_events->HasMoreEvents())
1160 break; 1252 break;
1161 } 1253 }
1162 1254
1163 cb.Run(json_events_str_ptr); 1255 cb.Run(json_events_str_ptr);
1164 } 1256 }
1165 } 1257 }
1166 1258
1259 void TraceLog::FlushContinuousSamplingTracing(
1260 const TraceLog::OutputCallback& cb) {
1261 if (!continuous_sampling_enable_count_)
1262 return;
1263
1264 scoped_ptr<TraceBuffer> previous_logged_events;
1265 {
1266 AutoLock lock(lock_);
1267 AddMetadataEvents(TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING);
1268 previous_logged_events.reset(continuous_sampling_logged_events_->Clone());
1269 } // release lock
1270
1271 while (previous_logged_events->HasMoreEvents()) {
1272 scoped_refptr<RefCountedString> json_events_str_ptr =
1273 new RefCountedString();
1274
1275 for (size_t i = 0; i < kTraceEventBatchSize; ++i) {
1276 if (i > 0)
1277 *(&(json_events_str_ptr->data())) += ",";
1278
1279 previous_logged_events->NextEvent().AppendAsJSON(
1280 &(json_events_str_ptr->data()));
1281
1282 if (!previous_logged_events->HasMoreEvents())
1283 break;
1284 }
1285
1286 cb.Run(json_events_str_ptr);
1287 }
1288 }
1289
1167 void TraceLog::AddTraceEvent( 1290 void TraceLog::AddTraceEvent(
1168 char phase, 1291 char phase,
1169 const unsigned char* category_group_enabled, 1292 const unsigned char* category_group_enabled,
1170 const char* name, 1293 const char* name,
1171 unsigned long long id, 1294 unsigned long long id,
1172 int num_args, 1295 int num_args,
1173 const char** arg_names, 1296 const char** arg_names,
1174 const unsigned char* arg_types, 1297 const unsigned char* arg_types,
1175 const unsigned long long* arg_values, 1298 const unsigned long long* arg_values,
1176 scoped_ptr<ConvertableToTraceFormat> convertable_values[], 1299 scoped_ptr<ConvertableToTraceFormat> convertable_values[],
(...skipping 17 matching lines...) Expand all
1194 const char** arg_names, 1317 const char** arg_names,
1195 const unsigned char* arg_types, 1318 const unsigned char* arg_types,
1196 const unsigned long long* arg_values, 1319 const unsigned long long* arg_values,
1197 scoped_ptr<ConvertableToTraceFormat> convertable_values[], 1320 scoped_ptr<ConvertableToTraceFormat> convertable_values[],
1198 unsigned char flags) { 1321 unsigned char flags) {
1199 DCHECK(name); 1322 DCHECK(name);
1200 1323
1201 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) 1324 if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
1202 id ^= process_id_hash_; 1325 id ^= process_id_hash_;
1203 1326
1327 TraceBuffer* logged_events = (flags & TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING) ?
1328 continuous_sampling_logged_events_.get() : logged_events_.get();
1329
1204 #if defined(OS_ANDROID) 1330 #if defined(OS_ANDROID)
1205 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id, 1331 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id,
1206 num_args, arg_names, arg_types, arg_values, convertable_values, 1332 num_args, arg_names, arg_types, arg_values, convertable_values,
1207 flags); 1333 flags);
1208 #endif 1334 #endif
1209 1335
1210 if (!IsCategoryGroupEnabled(category_group_enabled)) 1336 // We enable all categories in continuous sampling tracing because it doesn't
1211 return; 1337 // make sense to filter categories in sampling tracing.
1338 if (!IsCategoryGroupEnabled(category_group_enabled) &&
1339 !(flags & TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING))
1340 return;
1212 1341
1213 TimeTicks now = timestamp - time_offset_; 1342 TimeTicks now = timestamp - time_offset_;
1214 base::TimeTicks thread_now; 1343 base::TimeTicks thread_now;
1215 if (base::TimeTicks::IsThreadNowSupported()) 1344 if (base::TimeTicks::IsThreadNowSupported())
1216 thread_now = base::TimeTicks::ThreadNow(); 1345 thread_now = base::TimeTicks::ThreadNow();
1217 EventCallback event_callback_copy; 1346 EventCallback event_callback_copy;
1218 1347
1219 NotificationHelper notifier(this); 1348 NotificationHelper notifier(this);
1220 1349
1221 // Check and update the current thread name only if the event is for the 1350 // Check and update the current thread name only if the event is for the
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
1258 num_args, arg_names, arg_types, arg_values, 1387 num_args, arg_names, arg_types, arg_values,
1259 convertable_values, flags); 1388 convertable_values, flags);
1260 1389
1261 do { 1390 do {
1262 AutoLock lock(lock_); 1391 AutoLock lock(lock_);
1263 1392
1264 event_callback_copy = event_callback_; 1393 event_callback_copy = event_callback_;
1265 if (logged_events_->IsFull()) 1394 if (logged_events_->IsFull())
1266 break; 1395 break;
1267 1396
1268 logged_events_->AddEvent(trace_event); 1397 logged_events->AddEvent(trace_event);
1269 1398
1270 if (trace_options_ & ECHO_TO_CONSOLE) { 1399 if (trace_options_ & ECHO_TO_CONSOLE) {
1271 TimeDelta duration; 1400 TimeDelta duration;
1272 if (phase == TRACE_EVENT_PHASE_END) { 1401 if (phase == TRACE_EVENT_PHASE_END) {
1273 duration = timestamp - thread_event_start_times_[thread_id].top(); 1402 duration = timestamp - thread_event_start_times_[thread_id].top();
1274 thread_event_start_times_[thread_id].pop(); 1403 thread_event_start_times_[thread_id].pop();
1275 } 1404 }
1276 1405
1277 std::string thread_name = thread_names_[thread_id]; 1406 std::string thread_name = thread_names_[thread_id];
1278 if (thread_colors_.find(thread_name) == thread_colors_.end()) 1407 if (thread_colors_.find(thread_name) == thread_colors_.end())
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
1386 thread_id, 1515 thread_id,
1387 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA, 1516 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA,
1388 &g_category_group_enabled[g_category_metadata], 1517 &g_category_group_enabled[g_category_metadata],
1389 metadata_name, trace_event_internal::kNoEventId, 1518 metadata_name, trace_event_internal::kNoEventId,
1390 num_args, &arg_name, &arg_type, &arg_value, NULL, 1519 num_args, &arg_name, &arg_type, &arg_value, NULL,
1391 TRACE_EVENT_FLAG_NONE)); 1520 TRACE_EVENT_FLAG_NONE));
1392 } 1521 }
1393 1522
1394 } 1523 }
1395 1524
1396 void TraceLog::AddMetadataEvents() { 1525 void TraceLog::AddMetadataEvents(unsigned char flags) {
1397 lock_.AssertAcquired(); 1526 lock_.AssertAcquired();
1398 1527
1528 TraceBuffer* logged_events = (flags & TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING) ?
1529 continuous_sampling_logged_events_.get() : logged_events_.get();
1530
1399 int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId()); 1531 int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1400 if (process_sort_index_ != 0) { 1532 if (process_sort_index_ != 0) {
1401 AddMetadataEventToBuffer(logged_events_.get(), 1533 AddMetadataEventToBuffer(logged_events,
1402 current_thread_id, 1534 current_thread_id,
1403 "process_sort_index", "sort_index", 1535 "process_sort_index", "sort_index",
1404 process_sort_index_); 1536 process_sort_index_);
1405 } 1537 }
1406 1538
1407 if (process_name_.size()) { 1539 if (process_name_.size()) {
1408 AddMetadataEventToBuffer(logged_events_.get(), 1540 AddMetadataEventToBuffer(logged_events,
1409 current_thread_id, 1541 current_thread_id,
1410 "process_name", "name", 1542 "process_name", "name",
1411 process_name_); 1543 process_name_);
1412 } 1544 }
1413 1545
1414 if (process_labels_.size() > 0) { 1546 if (process_labels_.size() > 0) {
1415 std::vector<std::string> labels; 1547 std::vector<std::string> labels;
1416 for(base::hash_map<int, std::string>::iterator it = process_labels_.begin(); 1548 for(base::hash_map<int, std::string>::iterator it = process_labels_.begin();
1417 it != process_labels_.end(); 1549 it != process_labels_.end();
1418 it++) { 1550 it++) {
1419 labels.push_back(it->second); 1551 labels.push_back(it->second);
1420 } 1552 }
1421 AddMetadataEventToBuffer(logged_events_.get(), 1553 AddMetadataEventToBuffer(logged_events,
1422 current_thread_id, 1554 current_thread_id,
1423 "process_labels", "labels", 1555 "process_labels", "labels",
1424 JoinString(labels, ',')); 1556 JoinString(labels, ','));
1425 } 1557 }
1426 1558
1427 // Thread sort indices. 1559 // Thread sort indices.
1428 for(hash_map<int, int>::iterator it = thread_sort_indices_.begin(); 1560 for(hash_map<int, int>::iterator it = thread_sort_indices_.begin();
1429 it != thread_sort_indices_.end(); 1561 it != thread_sort_indices_.end();
1430 it++) { 1562 it++) {
1431 if (it->second == 0) 1563 if (it->second == 0)
1432 continue; 1564 continue;
1433 AddMetadataEventToBuffer(logged_events_.get(), 1565 AddMetadataEventToBuffer(logged_events,
1434 it->first, 1566 it->first,
1435 "thread_sort_index", "sort_index", 1567 "thread_sort_index", "sort_index",
1436 it->second); 1568 it->second);
1437 } 1569 }
1438 1570
1439 // Thread names. 1571 // Thread names.
1440 for(hash_map<int, std::string>::iterator it = thread_names_.begin(); 1572 for(hash_map<int, std::string>::iterator it = thread_names_.begin();
1441 it != thread_names_.end(); 1573 it != thread_names_.end();
1442 it++) { 1574 it++) {
1443 if (it->second.empty()) 1575 if (it->second.empty())
1444 continue; 1576 continue;
1445 AddMetadataEventToBuffer(logged_events_.get(), 1577
1578 AddMetadataEventToBuffer(logged_events,
1446 it->first, 1579 it->first,
1447 "thread_name", "name", 1580 "thread_name", "name",
1448 it->second); 1581 it->second);
1449 } 1582 }
1450 } 1583 }
1451 1584
1452 void TraceLog::InstallWaitableEventForSamplingTesting( 1585 void TraceLog::InstallWaitableEventForSamplingTesting(
1453 WaitableEvent* waitable_event) { 1586 WaitableEvent* waitable_event) {
1454 sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event); 1587 if (sampling_thread_)
1588 sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event);
1589 }
1590
1591 void TraceLog::InstallWaitableEventForContinuousSamplingTesting(
1592 WaitableEvent* waitable_event) {
1593 if (continuous_sampling_thread_) {
1594 continuous_sampling_thread_->
1595 InstallWaitableEventForSamplingTesting(waitable_event);
1596 }
1455 } 1597 }
1456 1598
1457 void TraceLog::DeleteForTesting() { 1599 void TraceLog::DeleteForTesting() {
1458 DeleteTraceLogForTesting::Delete(); 1600 DeleteTraceLogForTesting::Delete();
1459 } 1601 }
1460 1602
1461 void TraceLog::SetProcessID(int process_id) { 1603 void TraceLog::SetProcessID(int process_id) {
1462 process_id_ = process_id; 1604 process_id_ = process_id;
1463 // Create a FNV hash from the process ID for XORing. 1605 // Create a FNV hash from the process ID for XORing.
1464 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details. 1606 // See http://isthe.com/chongo/tech/comp/fnv/ for algorithm details.
(...skipping 18 matching lines...) Expand all
1483 if(!current_label.length()) 1625 if(!current_label.length())
1484 return RemoveProcessLabel(label_id); 1626 return RemoveProcessLabel(label_id);
1485 1627
1486 AutoLock lock(lock_); 1628 AutoLock lock(lock_);
1487 process_labels_[label_id] = current_label; 1629 process_labels_[label_id] = current_label;
1488 } 1630 }
1489 1631
1490 void TraceLog::RemoveProcessLabel(int label_id) { 1632 void TraceLog::RemoveProcessLabel(int label_id) {
1491 AutoLock lock(lock_); 1633 AutoLock lock(lock_);
1492 base::hash_map<int, std::string>::iterator it = process_labels_.find( 1634 base::hash_map<int, std::string>::iterator it = process_labels_.find(
1493 label_id); 1635 label_id);
1494 if (it == process_labels_.end()) 1636 if (it == process_labels_.end())
1495 return; 1637 return;
1496 1638
1497 process_labels_.erase(it); 1639 process_labels_.erase(it);
1498 } 1640 }
1499 1641
1500 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) { 1642 void TraceLog::SetThreadSortIndex(PlatformThreadId thread_id, int sort_index) {
1501 AutoLock lock(lock_); 1643 AutoLock lock(lock_);
1502 thread_sort_indices_[static_cast<int>(thread_id)] = sort_index; 1644 thread_sort_indices_[static_cast<int>(thread_id)] = sort_index;
1503 } 1645 }
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
1679 if (!category_group_enabled_) { 1821 if (!category_group_enabled_) {
1680 category_group_enabled_ = TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("gpu"); 1822 category_group_enabled_ = TRACE_EVENT_API_GET_CATEGORY_GROUP_ENABLED("gpu");
1681 TRACE_EVENT_API_ATOMIC_STORE( 1823 TRACE_EVENT_API_ATOMIC_STORE(
1682 *event_uid, 1824 *event_uid,
1683 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(category_group_enabled_)); 1825 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>(category_group_enabled_));
1684 } 1826 }
1685 if (*category_group_enabled_) { 1827 if (*category_group_enabled_) {
1686 name_ = name; 1828 name_ = name;
1687 TRACE_EVENT_API_ADD_TRACE_EVENT( 1829 TRACE_EVENT_API_ADD_TRACE_EVENT(
1688 TRACE_EVENT_PHASE_BEGIN, // phase 1830 TRACE_EVENT_PHASE_BEGIN, // phase
1689 category_group_enabled_, // category enabled 1831 category_group_enabled_, // category enabled
1690 name, // name 1832 name, // name
1691 0, // id 1833 0, // id
1692 0, // num_args 1834 0, // num_args
1693 NULL, // arg_names 1835 NULL, // arg_names
1694 NULL, // arg_types 1836 NULL, // arg_types
1695 NULL, // arg_values 1837 NULL, // arg_values
1696 NULL, // convertable_values 1838 NULL, // convertable_values
1697 TRACE_EVENT_FLAG_NONE); // flags 1839 TRACE_EVENT_FLAG_NONE); // flags
1698 } else { 1840 } else {
1699 category_group_enabled_ = NULL; 1841 category_group_enabled_ = NULL;
(...skipping 10 matching lines...) Expand all
1710 0, // num_args 1852 0, // num_args
1711 NULL, // arg_names 1853 NULL, // arg_names
1712 NULL, // arg_types 1854 NULL, // arg_types
1713 NULL, // arg_values 1855 NULL, // arg_values
1714 NULL, // convertable values 1856 NULL, // convertable values
1715 TRACE_EVENT_FLAG_NONE); // flags 1857 TRACE_EVENT_FLAG_NONE); // flags
1716 } 1858 }
1717 } 1859 }
1718 1860
1719 } // namespace trace_event_internal 1861 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « base/debug/trace_event_impl.h ('k') | base/debug/trace_event_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698