Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: base/trace_event/trace_log.cc

Issue 1345223004: Remove trace event overhead reporting feature (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/trace_log.h" 5 #include "base/trace_event/trace_log.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "base/base_switches.h" 10 #include "base/base_switches.h"
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 static_assert( 72 static_assert(
73 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, 73 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
74 "Too many vector buffer chunks"); 74 "Too many vector buffer chunks");
75 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; 75 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
76 76
77 // Can store results for 30 seconds with 1 ms sampling interval. 77 // Can store results for 30 seconds with 1 ms sampling interval.
78 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; 78 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. 79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
80 const size_t kEchoToConsoleTraceEventBufferChunks = 256; 80 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
81 81
82 // The overhead of TraceEvent above this threshold will be reported in the
83 // trace.
84 const int kOverheadReportThresholdInMicroseconds = 50;
85 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; 82 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
86 const int kThreadFlushTimeoutMs = 3000; 83 const int kThreadFlushTimeoutMs = 3000;
87 84
88 #if !defined(OS_NACL) 85 #if !defined(OS_NACL)
89 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575. 86 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
90 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task"; 87 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
91 #endif 88 #endif
92 89
93 #define MAX_CATEGORY_GROUPS 100 90 #define MAX_CATEGORY_GROUPS 100
94 91
95 // Parallel arrays g_category_groups and g_category_group_enabled are separate 92 // Parallel arrays g_category_groups and g_category_group_enabled are separate
96 // so that a pointer to a member of g_category_group_enabled can be easily 93 // so that a pointer to a member of g_category_group_enabled can be easily
97 // converted to an index into g_category_groups. This allows macros to deal 94 // converted to an index into g_category_groups. This allows macros to deal
98 // only with char enabled pointers from g_category_group_enabled, and we can 95 // only with char enabled pointers from g_category_group_enabled, and we can
99 // convert internally to determine the category name from the char enabled 96 // convert internally to determine the category name from the char enabled
100 // pointer. 97 // pointer.
101 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { 98 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
102 "toplevel", 99 "toplevel",
103 "tracing already shutdown", 100 "tracing already shutdown",
104 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", 101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
105 "__metadata", 102 "__metadata"};
106 // For reporting trace_event overhead. For thread local event buffers only.
107 "trace_event_overhead"};
108 103
109 // The enabled flag is char instead of bool so that the API can be used from C. 104 // The enabled flag is char instead of bool so that the API can be used from C.
110 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; 105 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
111 // Indexes here have to match the g_category_groups array indexes above. 106 // Indexes here have to match the g_category_groups array indexes above.
112 const int g_category_already_shutdown = 1; 107 const int g_category_already_shutdown = 1;
113 const int g_category_categories_exhausted = 2; 108 const int g_category_categories_exhausted = 2;
114 const int g_category_metadata = 3; 109 const int g_category_metadata = 3;
115 const int g_category_trace_event_overhead = 4; 110 const int g_num_builtin_categories = 4;
116 const int g_num_builtin_categories = 5;
117 // Skip default categories. 111 // Skip default categories.
118 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; 112 base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
119 113
120 // The name of the current thread. This is used to decide if the current 114 // The name of the current thread. This is used to decide if the current
121 // thread name has changed. We combine all the seen thread names into the 115 // thread name has changed. We combine all the seen thread names into the
122 // output name for the thread. 116 // output name for the thread.
123 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name = 117 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
124 LAZY_INSTANCE_INITIALIZER; 118 LAZY_INSTANCE_INITIALIZER;
125 119
126 ThreadTicks ThreadNow() { 120 ThreadTicks ThreadNow() {
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
214 208
215 class TraceLog::ThreadLocalEventBuffer 209 class TraceLog::ThreadLocalEventBuffer
216 : public MessageLoop::DestructionObserver, 210 : public MessageLoop::DestructionObserver,
217 public MemoryDumpProvider { 211 public MemoryDumpProvider {
218 public: 212 public:
219 ThreadLocalEventBuffer(TraceLog* trace_log); 213 ThreadLocalEventBuffer(TraceLog* trace_log);
220 ~ThreadLocalEventBuffer() override; 214 ~ThreadLocalEventBuffer() override;
221 215
222 TraceEvent* AddTraceEvent(TraceEventHandle* handle); 216 TraceEvent* AddTraceEvent(TraceEventHandle* handle);
223 217
224 void ReportOverhead(const TraceTicks& event_timestamp,
225 const ThreadTicks& event_thread_timestamp);
226
227 TraceEvent* GetEventByHandle(TraceEventHandle handle) { 218 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
228 if (!chunk_ || handle.chunk_seq != chunk_->seq() || 219 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
229 handle.chunk_index != chunk_index_) 220 handle.chunk_index != chunk_index_)
230 return NULL; 221 return NULL;
231 222
232 return chunk_->GetEventAt(handle.event_index); 223 return chunk_->GetEventAt(handle.event_index);
233 } 224 }
234 225
235 int generation() const { return generation_; } 226 int generation() const { return generation_; }
236 227
237 private: 228 private:
238 // MessageLoop::DestructionObserver 229 // MessageLoop::DestructionObserver
239 void WillDestroyCurrentMessageLoop() override; 230 void WillDestroyCurrentMessageLoop() override;
240 231
241 // MemoryDumpProvider implementation. 232 // MemoryDumpProvider implementation.
242 bool OnMemoryDump(const MemoryDumpArgs& args, 233 bool OnMemoryDump(const MemoryDumpArgs& args,
243 ProcessMemoryDump* pmd) override; 234 ProcessMemoryDump* pmd) override;
244 235
245 void FlushWhileLocked(); 236 void FlushWhileLocked();
246 237
247 void CheckThisIsCurrentBuffer() const { 238 void CheckThisIsCurrentBuffer() const {
248 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); 239 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
249 } 240 }
250 241
251 // Since TraceLog is a leaky singleton, trace_log_ will always be valid 242 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
252 // as long as the thread exists. 243 // as long as the thread exists.
253 TraceLog* trace_log_; 244 TraceLog* trace_log_;
254 scoped_ptr<TraceBufferChunk> chunk_; 245 scoped_ptr<TraceBufferChunk> chunk_;
255 size_t chunk_index_; 246 size_t chunk_index_;
256 int event_count_; 247 int event_count_;
dsinclair 2015/09/17 13:12:28 Can we also remove event_count_?
Xianzhu 2015/09/17 16:01:49 Done.
257 TimeDelta overhead_;
258 int generation_; 248 int generation_;
259 249
260 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); 250 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
261 }; 251 };
262 252
263 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) 253 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
264 : trace_log_(trace_log), 254 : trace_log_(trace_log),
265 chunk_index_(0), 255 chunk_index_(0),
266 event_count_(0), 256 event_count_(0),
267 generation_(trace_log->generation()) { 257 generation_(trace_log->generation()) {
268 // ThreadLocalEventBuffer is created only if the thread has a message loop, so 258 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
269 // the following message_loop won't be NULL. 259 // the following message_loop won't be NULL.
270 MessageLoop* message_loop = MessageLoop::current(); 260 MessageLoop* message_loop = MessageLoop::current();
271 message_loop->AddDestructionObserver(this); 261 message_loop->AddDestructionObserver(this);
272 262
273 // This is to report the local memory usage when memory-infra is enabled. 263 // This is to report the local memory usage when memory-infra is enabled.
274 MemoryDumpManager::GetInstance()->RegisterDumpProvider( 264 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
275 this, ThreadTaskRunnerHandle::Get()); 265 this, ThreadTaskRunnerHandle::Get());
276 266
277 AutoLock lock(trace_log->lock_); 267 AutoLock lock(trace_log->lock_);
278 trace_log->thread_message_loops_.insert(message_loop); 268 trace_log->thread_message_loops_.insert(message_loop);
279 } 269 }
280 270
281 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { 271 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
282 CheckThisIsCurrentBuffer(); 272 CheckThisIsCurrentBuffer();
283 MessageLoop::current()->RemoveDestructionObserver(this); 273 MessageLoop::current()->RemoveDestructionObserver(this);
284 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this); 274 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
285 275
286 // Zero event_count_ happens in either of the following cases:
287 // - no event generated for the thread;
288 // - the thread has no message loop;
289 // - trace_event_overhead is disabled.
290 if (event_count_) {
291 InitializeMetadataEvent(AddTraceEvent(NULL),
292 static_cast<int>(base::PlatformThread::CurrentId()),
293 "overhead", "average_overhead",
294 overhead_.InMillisecondsF() / event_count_);
295 }
296
297 { 276 {
298 AutoLock lock(trace_log_->lock_); 277 AutoLock lock(trace_log_->lock_);
299 FlushWhileLocked(); 278 FlushWhileLocked();
300 trace_log_->thread_message_loops_.erase(MessageLoop::current()); 279 trace_log_->thread_message_loops_.erase(MessageLoop::current());
301 } 280 }
302 trace_log_->thread_local_event_buffer_.Set(NULL); 281 trace_log_->thread_local_event_buffer_.Set(NULL);
303 } 282 }
304 283
305 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent( 284 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
306 TraceEventHandle* handle) { 285 TraceEventHandle* handle) {
(...skipping 13 matching lines...) Expand all
320 return NULL; 299 return NULL;
321 300
322 size_t event_index; 301 size_t event_index;
323 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index); 302 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
324 if (trace_event && handle) 303 if (trace_event && handle)
325 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle); 304 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
326 305
327 return trace_event; 306 return trace_event;
328 } 307 }
329 308
330 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
331 const TraceTicks& event_timestamp,
332 const ThreadTicks& event_thread_timestamp) {
333 if (!g_category_group_enabled[g_category_trace_event_overhead])
334 return;
335
336 CheckThisIsCurrentBuffer();
337
338 event_count_++;
339 ThreadTicks thread_now = ThreadNow();
340 TraceTicks now = trace_log_->OffsetNow();
341 TimeDelta overhead = now - event_timestamp;
342 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
343 TraceEvent* trace_event = AddTraceEvent(NULL);
344 if (trace_event) {
345 trace_event->Initialize(
346 static_cast<int>(PlatformThread::CurrentId()),
347 event_timestamp,
348 event_thread_timestamp,
349 TRACE_EVENT_PHASE_COMPLETE,
350 &g_category_group_enabled[g_category_trace_event_overhead],
351 "overhead",
352 trace_event_internal::kNoId, // id
353 trace_event_internal::kNoId, // context_id
354 trace_event_internal::kNoId, // bind_id
355 ::trace_event_internal::kZeroNumArgs,
356 nullptr,
357 nullptr,
358 nullptr,
359 nullptr,
360 TRACE_EVENT_FLAG_NONE);
361 trace_event->UpdateDuration(now, thread_now);
362 }
363 }
364 overhead_ += overhead;
365 }
366
367 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { 309 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
368 delete this; 310 delete this;
369 } 311 }
370 312
371 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args, 313 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
372 ProcessMemoryDump* pmd) { 314 ProcessMemoryDump* pmd) {
373 if (!chunk_) 315 if (!chunk_)
374 return true; 316 return true;
375 std::string dump_base_name = StringPrintf( 317 std::string dump_base_name = StringPrintf(
376 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId())); 318 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
627 } else { 569 } else {
628 category_group_enabled = 570 category_group_enabled =
629 &g_category_group_enabled[g_category_categories_exhausted]; 571 &g_category_group_enabled[g_category_categories_exhausted];
630 } 572 }
631 return category_group_enabled; 573 return category_group_enabled;
632 } 574 }
633 575
634 void TraceLog::GetKnownCategoryGroups( 576 void TraceLog::GetKnownCategoryGroups(
635 std::vector<std::string>* category_groups) { 577 std::vector<std::string>* category_groups) {
636 AutoLock lock(lock_); 578 AutoLock lock(lock_);
637 category_groups->push_back(
638 g_category_groups[g_category_trace_event_overhead]);
639 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); 579 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
640 for (size_t i = g_num_builtin_categories; i < category_index; i++) 580 for (size_t i = g_num_builtin_categories; i < category_index; i++)
641 category_groups->push_back(g_category_groups[i]); 581 category_groups->push_back(g_category_groups[i]);
642 } 582 }
643 583
644 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) { 584 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
645 std::vector<EnabledStateObserver*> observer_list; 585 std::vector<EnabledStateObserver*> observer_list;
646 { 586 {
647 AutoLock lock(lock_); 587 AutoLock lock(lock_);
648 588
(...skipping 598 matching lines...) Expand 10 before | Expand all | Expand 10 after
1247 DCHECK(!timestamp.is_null()); 1187 DCHECK(!timestamp.is_null());
1248 1188
1249 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) { 1189 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) {
1250 if ((flags & TRACE_EVENT_FLAG_FLOW_IN) || 1190 if ((flags & TRACE_EVENT_FLAG_FLOW_IN) ||
1251 (flags & TRACE_EVENT_FLAG_FLOW_OUT)) 1191 (flags & TRACE_EVENT_FLAG_FLOW_OUT))
1252 bind_id = MangleEventId(bind_id); 1192 bind_id = MangleEventId(bind_id);
1253 id = MangleEventId(id); 1193 id = MangleEventId(id);
1254 } 1194 }
1255 1195
1256 TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp); 1196 TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp);
1257 TraceTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
1258 ? OffsetNow()
1259 : offset_event_timestamp;
1260 ThreadTicks thread_now = ThreadNow(); 1197 ThreadTicks thread_now = ThreadNow();
1261 1198
1262 // |thread_local_event_buffer_| can be null if the current thread doesn't have 1199 // |thread_local_event_buffer_| can be null if the current thread doesn't have
1263 // a message loop or the message loop is blocked. 1200 // a message loop or the message loop is blocked.
1264 InitializeThreadLocalEventBufferIfSupported(); 1201 InitializeThreadLocalEventBufferIfSupported();
1265 auto thread_local_event_buffer = thread_local_event_buffer_.Get(); 1202 auto thread_local_event_buffer = thread_local_event_buffer_.Get();
1266 1203
1267 // Check and update the current thread name only if the event is for the 1204 // Check and update the current thread name only if the event is for the
1268 // current thread to avoid locks in most cases. 1205 // current thread to avoid locks in most cases.
1269 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { 1206 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1375 subtle::NoBarrier_Load(&event_callback_)); 1312 subtle::NoBarrier_Load(&event_callback_));
1376 if (event_callback) { 1313 if (event_callback) {
1377 event_callback( 1314 event_callback(
1378 offset_event_timestamp, 1315 offset_event_timestamp,
1379 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, 1316 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1380 category_group_enabled, name, id, num_args, arg_names, arg_types, 1317 category_group_enabled, name, id, num_args, arg_names, arg_types,
1381 arg_values, flags); 1318 arg_values, flags);
1382 } 1319 }
1383 } 1320 }
1384 1321
1385 if (thread_local_event_buffer)
1386 thread_local_event_buffer->ReportOverhead(now, thread_now);
1387
1388 return handle; 1322 return handle;
1389 } 1323 }
1390 1324
1391 // May be called when a COMPELETE event ends and the unfinished event has been 1325 // May be called when a COMPELETE event ends and the unfinished event has been
1392 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). 1326 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1393 std::string TraceLog::EventToConsoleMessage(unsigned char phase, 1327 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1394 const TraceTicks& timestamp, 1328 const TraceTicks& timestamp,
1395 TraceEvent* trace_event) { 1329 TraceEvent* trace_event) {
1396 AutoLock thread_info_lock(thread_info_lock_); 1330 AutoLock thread_info_lock(thread_info_lock_);
1397 1331
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after
1769 } 1703 }
1770 1704
1771 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 1705 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1772 if (*category_group_enabled_) { 1706 if (*category_group_enabled_) {
1773 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, 1707 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
1774 event_handle_); 1708 event_handle_);
1775 } 1709 }
1776 } 1710 }
1777 1711
1778 } // namespace trace_event_internal 1712 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698