Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1061)

Side by Side Diff: base/trace_event/trace_log.cc

Issue 1345223004: Remove trace event overhead reporting feature (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Remove ThreadLocalEventBuffer::event_count_ Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/trace_log.h" 5 #include "base/trace_event/trace_log.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "base/base_switches.h" 10 #include "base/base_switches.h"
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
72 static_assert( 72 static_assert(
73 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, 73 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex,
74 "Too many vector buffer chunks"); 74 "Too many vector buffer chunks");
75 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; 75 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
76 76
77 // Can store results for 30 seconds with 1 ms sampling interval. 77 // Can store results for 30 seconds with 1 ms sampling interval.
78 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; 78 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. 79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
80 const size_t kEchoToConsoleTraceEventBufferChunks = 256; 80 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
81 81
82 // The overhead of TraceEvent above this threshold will be reported in the
83 // trace.
84 const int kOverheadReportThresholdInMicroseconds = 50;
85 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; 82 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
86 const int kThreadFlushTimeoutMs = 3000; 83 const int kThreadFlushTimeoutMs = 3000;
87 84
88 #if !defined(OS_NACL) 85 #if !defined(OS_NACL)
89 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575. 86 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
90 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task"; 87 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
91 #endif 88 #endif
92 89
93 #define MAX_CATEGORY_GROUPS 100 90 #define MAX_CATEGORY_GROUPS 100
94 91
95 // Parallel arrays g_category_groups and g_category_group_enabled are separate 92 // Parallel arrays g_category_groups and g_category_group_enabled are separate
96 // so that a pointer to a member of g_category_group_enabled can be easily 93 // so that a pointer to a member of g_category_group_enabled can be easily
97 // converted to an index into g_category_groups. This allows macros to deal 94 // converted to an index into g_category_groups. This allows macros to deal
98 // only with char enabled pointers from g_category_group_enabled, and we can 95 // only with char enabled pointers from g_category_group_enabled, and we can
99 // convert internally to determine the category name from the char enabled 96 // convert internally to determine the category name from the char enabled
100 // pointer. 97 // pointer.
101 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { 98 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
102 "toplevel", 99 "toplevel",
103 "tracing already shutdown", 100 "tracing already shutdown",
104 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", 101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
105 "__metadata", 102 "__metadata"};
106 // For reporting trace_event overhead. For thread local event buffers only.
107 "trace_event_overhead"};
108 103
109 // The enabled flag is char instead of bool so that the API can be used from C. 104 // The enabled flag is char instead of bool so that the API can be used from C.
110 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; 105 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
111 // Indexes here have to match the g_category_groups array indexes above. 106 // Indexes here have to match the g_category_groups array indexes above.
112 const int g_category_already_shutdown = 1; 107 const int g_category_already_shutdown = 1;
113 const int g_category_categories_exhausted = 2; 108 const int g_category_categories_exhausted = 2;
114 const int g_category_metadata = 3; 109 const int g_category_metadata = 3;
115 const int g_category_trace_event_overhead = 4; 110 const int g_num_builtin_categories = 4;
116 const int g_num_builtin_categories = 5;
117 // Skip default categories. 111 // Skip default categories.
118 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; 112 base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
119 113
120 // The name of the current thread. This is used to decide if the current 114 // The name of the current thread. This is used to decide if the current
121 // thread name has changed. We combine all the seen thread names into the 115 // thread name has changed. We combine all the seen thread names into the
122 // output name for the thread. 116 // output name for the thread.
123 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name = 117 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name =
124 LAZY_INSTANCE_INITIALIZER; 118 LAZY_INSTANCE_INITIALIZER;
125 119
126 ThreadTicks ThreadNow() { 120 ThreadTicks ThreadNow() {
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after
214 208
215 class TraceLog::ThreadLocalEventBuffer 209 class TraceLog::ThreadLocalEventBuffer
216 : public MessageLoop::DestructionObserver, 210 : public MessageLoop::DestructionObserver,
217 public MemoryDumpProvider { 211 public MemoryDumpProvider {
218 public: 212 public:
219 ThreadLocalEventBuffer(TraceLog* trace_log); 213 ThreadLocalEventBuffer(TraceLog* trace_log);
220 ~ThreadLocalEventBuffer() override; 214 ~ThreadLocalEventBuffer() override;
221 215
222 TraceEvent* AddTraceEvent(TraceEventHandle* handle); 216 TraceEvent* AddTraceEvent(TraceEventHandle* handle);
223 217
224 void ReportOverhead(const TraceTicks& event_timestamp,
225 const ThreadTicks& event_thread_timestamp);
226
227 TraceEvent* GetEventByHandle(TraceEventHandle handle) { 218 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
228 if (!chunk_ || handle.chunk_seq != chunk_->seq() || 219 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
229 handle.chunk_index != chunk_index_) 220 handle.chunk_index != chunk_index_)
230 return NULL; 221 return NULL;
231 222
232 return chunk_->GetEventAt(handle.event_index); 223 return chunk_->GetEventAt(handle.event_index);
233 } 224 }
234 225
235 int generation() const { return generation_; } 226 int generation() const { return generation_; }
236 227
237 private: 228 private:
238 // MessageLoop::DestructionObserver 229 // MessageLoop::DestructionObserver
239 void WillDestroyCurrentMessageLoop() override; 230 void WillDestroyCurrentMessageLoop() override;
240 231
241 // MemoryDumpProvider implementation. 232 // MemoryDumpProvider implementation.
242 bool OnMemoryDump(const MemoryDumpArgs& args, 233 bool OnMemoryDump(const MemoryDumpArgs& args,
243 ProcessMemoryDump* pmd) override; 234 ProcessMemoryDump* pmd) override;
244 235
245 void FlushWhileLocked(); 236 void FlushWhileLocked();
246 237
247 void CheckThisIsCurrentBuffer() const { 238 void CheckThisIsCurrentBuffer() const {
248 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); 239 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
249 } 240 }
250 241
251 // Since TraceLog is a leaky singleton, trace_log_ will always be valid 242 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
252 // as long as the thread exists. 243 // as long as the thread exists.
253 TraceLog* trace_log_; 244 TraceLog* trace_log_;
254 scoped_ptr<TraceBufferChunk> chunk_; 245 scoped_ptr<TraceBufferChunk> chunk_;
255 size_t chunk_index_; 246 size_t chunk_index_;
256 int event_count_;
257 TimeDelta overhead_;
258 int generation_; 247 int generation_;
259 248
260 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); 249 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
261 }; 250 };
262 251
263 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) 252 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
264 : trace_log_(trace_log), 253 : trace_log_(trace_log),
265 chunk_index_(0), 254 chunk_index_(0),
266 event_count_(0),
267 generation_(trace_log->generation()) { 255 generation_(trace_log->generation()) {
268 // ThreadLocalEventBuffer is created only if the thread has a message loop, so 256 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
269 // the following message_loop won't be NULL. 257 // the following message_loop won't be NULL.
270 MessageLoop* message_loop = MessageLoop::current(); 258 MessageLoop* message_loop = MessageLoop::current();
271 message_loop->AddDestructionObserver(this); 259 message_loop->AddDestructionObserver(this);
272 260
273 // This is to report the local memory usage when memory-infra is enabled. 261 // This is to report the local memory usage when memory-infra is enabled.
274 MemoryDumpManager::GetInstance()->RegisterDumpProvider( 262 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
275 this, ThreadTaskRunnerHandle::Get()); 263 this, ThreadTaskRunnerHandle::Get());
276 264
277 AutoLock lock(trace_log->lock_); 265 AutoLock lock(trace_log->lock_);
278 trace_log->thread_message_loops_.insert(message_loop); 266 trace_log->thread_message_loops_.insert(message_loop);
279 } 267 }
280 268
281 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { 269 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
282 CheckThisIsCurrentBuffer(); 270 CheckThisIsCurrentBuffer();
283 MessageLoop::current()->RemoveDestructionObserver(this); 271 MessageLoop::current()->RemoveDestructionObserver(this);
284 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this); 272 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
285 273
286 // Zero event_count_ happens in either of the following cases:
287 // - no event generated for the thread;
288 // - the thread has no message loop;
289 // - trace_event_overhead is disabled.
290 if (event_count_) {
291 InitializeMetadataEvent(AddTraceEvent(NULL),
292 static_cast<int>(base::PlatformThread::CurrentId()),
293 "overhead", "average_overhead",
294 overhead_.InMillisecondsF() / event_count_);
295 }
296
297 { 274 {
298 AutoLock lock(trace_log_->lock_); 275 AutoLock lock(trace_log_->lock_);
299 FlushWhileLocked(); 276 FlushWhileLocked();
300 trace_log_->thread_message_loops_.erase(MessageLoop::current()); 277 trace_log_->thread_message_loops_.erase(MessageLoop::current());
301 } 278 }
302 trace_log_->thread_local_event_buffer_.Set(NULL); 279 trace_log_->thread_local_event_buffer_.Set(NULL);
303 } 280 }
304 281
305 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent( 282 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
306 TraceEventHandle* handle) { 283 TraceEventHandle* handle) {
(...skipping 13 matching lines...) Expand all
320 return NULL; 297 return NULL;
321 298
322 size_t event_index; 299 size_t event_index;
323 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index); 300 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
324 if (trace_event && handle) 301 if (trace_event && handle)
325 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle); 302 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
326 303
327 return trace_event; 304 return trace_event;
328 } 305 }
329 306
330 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
331 const TraceTicks& event_timestamp,
332 const ThreadTicks& event_thread_timestamp) {
333 if (!g_category_group_enabled[g_category_trace_event_overhead])
334 return;
335
336 CheckThisIsCurrentBuffer();
337
338 event_count_++;
339 ThreadTicks thread_now = ThreadNow();
340 TraceTicks now = trace_log_->OffsetNow();
341 TimeDelta overhead = now - event_timestamp;
342 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
343 TraceEvent* trace_event = AddTraceEvent(NULL);
344 if (trace_event) {
345 trace_event->Initialize(
346 static_cast<int>(PlatformThread::CurrentId()),
347 event_timestamp,
348 event_thread_timestamp,
349 TRACE_EVENT_PHASE_COMPLETE,
350 &g_category_group_enabled[g_category_trace_event_overhead],
351 "overhead",
352 trace_event_internal::kNoId, // id
353 trace_event_internal::kNoId, // context_id
354 trace_event_internal::kNoId, // bind_id
355 ::trace_event_internal::kZeroNumArgs,
356 nullptr,
357 nullptr,
358 nullptr,
359 nullptr,
360 TRACE_EVENT_FLAG_NONE);
361 trace_event->UpdateDuration(now, thread_now);
362 }
363 }
364 overhead_ += overhead;
365 }
366
367 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { 307 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
368 delete this; 308 delete this;
369 } 309 }
370 310
371 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args, 311 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
372 ProcessMemoryDump* pmd) { 312 ProcessMemoryDump* pmd) {
373 if (!chunk_) 313 if (!chunk_)
374 return true; 314 return true;
375 std::string dump_base_name = StringPrintf( 315 std::string dump_base_name = StringPrintf(
376 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId())); 316 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
(...skipping 250 matching lines...) Expand 10 before | Expand all | Expand 10 after
627 } else { 567 } else {
628 category_group_enabled = 568 category_group_enabled =
629 &g_category_group_enabled[g_category_categories_exhausted]; 569 &g_category_group_enabled[g_category_categories_exhausted];
630 } 570 }
631 return category_group_enabled; 571 return category_group_enabled;
632 } 572 }
633 573
634 void TraceLog::GetKnownCategoryGroups( 574 void TraceLog::GetKnownCategoryGroups(
635 std::vector<std::string>* category_groups) { 575 std::vector<std::string>* category_groups) {
636 AutoLock lock(lock_); 576 AutoLock lock(lock_);
637 category_groups->push_back(
638 g_category_groups[g_category_trace_event_overhead]);
639 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); 577 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
640 for (size_t i = g_num_builtin_categories; i < category_index; i++) 578 for (size_t i = g_num_builtin_categories; i < category_index; i++)
641 category_groups->push_back(g_category_groups[i]); 579 category_groups->push_back(g_category_groups[i]);
642 } 580 }
643 581
644 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) { 582 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
645 std::vector<EnabledStateObserver*> observer_list; 583 std::vector<EnabledStateObserver*> observer_list;
646 { 584 {
647 AutoLock lock(lock_); 585 AutoLock lock(lock_);
648 586
(...skipping 601 matching lines...) Expand 10 before | Expand all | Expand 10 after
1250 DCHECK(!timestamp.is_null()); 1188 DCHECK(!timestamp.is_null());
1251 1189
1252 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) { 1190 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) {
1253 if ((flags & TRACE_EVENT_FLAG_FLOW_IN) || 1191 if ((flags & TRACE_EVENT_FLAG_FLOW_IN) ||
1254 (flags & TRACE_EVENT_FLAG_FLOW_OUT)) 1192 (flags & TRACE_EVENT_FLAG_FLOW_OUT))
1255 bind_id = MangleEventId(bind_id); 1193 bind_id = MangleEventId(bind_id);
1256 id = MangleEventId(id); 1194 id = MangleEventId(id);
1257 } 1195 }
1258 1196
1259 TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp); 1197 TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp);
1260 TraceTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP
1261 ? OffsetNow()
1262 : offset_event_timestamp;
1263 ThreadTicks thread_now = ThreadNow(); 1198 ThreadTicks thread_now = ThreadNow();
1264 1199
1265 // |thread_local_event_buffer_| can be null if the current thread doesn't have 1200 // |thread_local_event_buffer_| can be null if the current thread doesn't have
1266 // a message loop or the message loop is blocked. 1201 // a message loop or the message loop is blocked.
1267 InitializeThreadLocalEventBufferIfSupported(); 1202 InitializeThreadLocalEventBufferIfSupported();
1268 auto thread_local_event_buffer = thread_local_event_buffer_.Get(); 1203 auto thread_local_event_buffer = thread_local_event_buffer_.Get();
1269 1204
1270 // Check and update the current thread name only if the event is for the 1205 // Check and update the current thread name only if the event is for the
1271 // current thread to avoid locks in most cases. 1206 // current thread to avoid locks in most cases.
1272 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { 1207 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
1378 subtle::NoBarrier_Load(&event_callback_)); 1313 subtle::NoBarrier_Load(&event_callback_));
1379 if (event_callback) { 1314 if (event_callback) {
1380 event_callback( 1315 event_callback(
1381 offset_event_timestamp, 1316 offset_event_timestamp,
1382 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, 1317 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1383 category_group_enabled, name, id, num_args, arg_names, arg_types, 1318 category_group_enabled, name, id, num_args, arg_names, arg_types,
1384 arg_values, flags); 1319 arg_values, flags);
1385 } 1320 }
1386 } 1321 }
1387 1322
1388 if (thread_local_event_buffer)
1389 thread_local_event_buffer->ReportOverhead(now, thread_now);
1390
1391 return handle; 1323 return handle;
1392 } 1324 }
1393 1325
1394 // May be called when a COMPELETE event ends and the unfinished event has been 1326 // May be called when a COMPELETE event ends and the unfinished event has been
1395 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). 1327 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1396 std::string TraceLog::EventToConsoleMessage(unsigned char phase, 1328 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1397 const TraceTicks& timestamp, 1329 const TraceTicks& timestamp,
1398 TraceEvent* trace_event) { 1330 TraceEvent* trace_event) {
1399 AutoLock thread_info_lock(thread_info_lock_); 1331 AutoLock thread_info_lock(thread_info_lock_);
1400 1332
(...skipping 371 matching lines...) Expand 10 before | Expand all | Expand 10 after
1772 } 1704 }
1773 1705
1774 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 1706 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1775 if (*category_group_enabled_) { 1707 if (*category_group_enabled_) {
1776 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, 1708 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
1777 event_handle_); 1709 event_handle_);
1778 } 1710 }
1779 } 1711 }
1780 1712
1781 } // namespace trace_event_internal 1713 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698