OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <cmath> | 8 #include <cmath> |
9 #include <memory> | 9 #include <memory> |
10 #include <utility> | 10 #include <utility> |
11 | 11 |
12 #include "base/base_switches.h" | 12 #include "base/base_switches.h" |
13 #include "base/bind.h" | 13 #include "base/bind.h" |
14 #include "base/command_line.h" | 14 #include "base/command_line.h" |
15 #include "base/debug/leak_annotations.h" | 15 #include "base/debug/leak_annotations.h" |
16 #include "base/lazy_instance.h" | 16 #include "base/lazy_instance.h" |
17 #include "base/location.h" | 17 #include "base/location.h" |
18 #include "base/macros.h" | 18 #include "base/macros.h" |
19 #include "base/memory/ptr_util.h" | |
19 #include "base/memory/ref_counted_memory.h" | 20 #include "base/memory/ref_counted_memory.h" |
20 #include "base/memory/singleton.h" | 21 #include "base/memory/singleton.h" |
21 #include "base/process/process_metrics.h" | 22 #include "base/process/process_metrics.h" |
22 #include "base/stl_util.h" | 23 #include "base/stl_util.h" |
23 #include "base/strings/string_split.h" | 24 #include "base/strings/string_split.h" |
24 #include "base/strings/string_tokenizer.h" | 25 #include "base/strings/string_tokenizer.h" |
25 #include "base/strings/stringprintf.h" | 26 #include "base/strings/stringprintf.h" |
26 #include "base/sys_info.h" | 27 #include "base/sys_info.h" |
27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | 28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
28 #include "base/thread_task_runner_handle.h" | 29 #include "base/thread_task_runner_handle.h" |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
95 // convert internally to determine the category name from the char enabled | 96 // convert internally to determine the category name from the char enabled |
96 // pointer. | 97 // pointer. |
97 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { | 98 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { |
98 "toplevel", | 99 "toplevel", |
99 "tracing already shutdown", | 100 "tracing already shutdown", |
100 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", | 101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", |
101 "__metadata"}; | 102 "__metadata"}; |
102 | 103 |
103 // The enabled flag is char instead of bool so that the API can be used from C. | 104 // The enabled flag is char instead of bool so that the API can be used from C. |
104 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; | 105 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; |
106 | |
107 class TraceEventFilter { | |
108 public: | |
109 TraceEventFilter(const base::DictionaryValue* filter_args) {} | |
110 virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0; | |
shatch
2016/05/04 19:57:38
Did we come to a decision on whether to allow the
| |
111 | |
112 private: | |
113 DISALLOW_COPY_AND_ASSIGN(TraceEventFilter); | |
114 }; | |
115 | |
116 class EventNameFilter : public TraceEventFilter { | |
117 public: | |
118 EventNameFilter(const base::DictionaryValue* filter_args) | |
119 : TraceEventFilter(filter_args) { | |
120 const base::ListValue* whitelist = nullptr; | |
121 if (filter_args->GetList("event_name_whitelist", &whitelist)) { | |
122 for (size_t i = 0; i < whitelist->GetSize(); ++i) { | |
123 std::string event_name; | |
124 if (!whitelist->GetString(i, &event_name)) | |
125 continue; | |
126 | |
127 whitelist_.push_back(event_name); | |
128 } | |
129 } | |
130 } | |
131 | |
132 bool FilterTraceEvent(const TraceEvent& trace_event) const override { | |
133 for (const auto& entry : whitelist_) { | |
134 if (trace_event.name() == entry) | |
135 return true; | |
136 } | |
137 | |
138 return false; | |
139 } | |
140 | |
141 private: | |
142 std::vector<std::string> whitelist_; | |
143 }; | |
144 | |
145 base::LazyInstance<std::list<std::unique_ptr<TraceEventFilter>>>::Leaky | |
146 g_category_group_filter[MAX_CATEGORY_GROUPS] = {LAZY_INSTANCE_INITIALIZER}; | |
147 | |
105 // Indexes here have to match the g_category_groups array indexes above. | 148 // Indexes here have to match the g_category_groups array indexes above. |
106 const int g_category_already_shutdown = 1; | 149 const int g_category_already_shutdown = 1; |
107 const int g_category_categories_exhausted = 2; | 150 const int g_category_categories_exhausted = 2; |
108 const int g_category_metadata = 3; | 151 const int g_category_metadata = 3; |
109 const int g_num_builtin_categories = 4; | 152 const int g_num_builtin_categories = 4; |
110 // Skip default categories. | 153 // Skip default categories. |
111 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; | 154 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; |
112 | 155 |
113 // The name of the current thread. This is used to decide if the current | 156 // The name of the current thread. This is used to decide if the current |
114 // thread name has changed. We combine all the seen thread names into the | 157 // thread name has changed. We combine all the seen thread names into the |
(...skipping 312 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
427 const unsigned char* TraceLog::GetCategoryGroupEnabled( | 470 const unsigned char* TraceLog::GetCategoryGroupEnabled( |
428 const char* category_group) { | 471 const char* category_group) { |
429 TraceLog* tracelog = GetInstance(); | 472 TraceLog* tracelog = GetInstance(); |
430 if (!tracelog) { | 473 if (!tracelog) { |
431 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); | 474 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); |
432 return &g_category_group_enabled[g_category_already_shutdown]; | 475 return &g_category_group_enabled[g_category_already_shutdown]; |
433 } | 476 } |
434 return tracelog->GetCategoryGroupEnabledInternal(category_group); | 477 return tracelog->GetCategoryGroupEnabledInternal(category_group); |
435 } | 478 } |
436 | 479 |
437 const char* TraceLog::GetCategoryGroupName( | 480 namespace { |
438 const unsigned char* category_group_enabled) { | 481 uintptr_t GetCategoryIndex(const unsigned char* category_group_enabled) { |
439 // Calculate the index of the category group by finding | 482 // Calculate the index of the category group by finding |
440 // category_group_enabled in g_category_group_enabled array. | 483 // category_group_enabled in g_category_group_enabled array. |
441 uintptr_t category_begin = | 484 uintptr_t category_begin = |
442 reinterpret_cast<uintptr_t>(g_category_group_enabled); | 485 reinterpret_cast<uintptr_t>(g_category_group_enabled); |
443 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); | 486 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); |
444 DCHECK(category_ptr >= category_begin && | 487 DCHECK(category_ptr >= category_begin && |
445 category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + | 488 category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + |
446 MAX_CATEGORY_GROUPS)) | 489 MAX_CATEGORY_GROUPS)) |
447 << "out of bounds category pointer"; | 490 << "out of bounds category pointer"; |
448 uintptr_t category_index = | 491 uintptr_t category_index = |
449 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); | 492 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); |
450 return g_category_groups[category_index]; | 493 |
494 return category_index; | |
495 } | |
496 } | |
497 | |
498 const char* TraceLog::GetCategoryGroupName( | |
499 const unsigned char* category_group_enabled) { | |
500 return g_category_groups[GetCategoryIndex(category_group_enabled)]; | |
501 } | |
502 | |
503 std::list<std::unique_ptr<TraceEventFilter>>* GetCategoryGroupFilter( | |
504 const unsigned char* category_group_enabled) { | |
505 return g_category_group_filter[GetCategoryIndex(category_group_enabled)] | |
506 .Pointer(); | |
451 } | 507 } |
452 | 508 |
453 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { | 509 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { |
454 unsigned char enabled_flag = 0; | 510 unsigned char enabled_flag = 0; |
455 const char* category_group = g_category_groups[category_index]; | 511 const char* category_group = g_category_groups[category_index]; |
456 if (mode_ == RECORDING_MODE && | 512 if (mode_ == RECORDING_MODE && |
457 trace_config_.IsCategoryGroupEnabled(category_group)) { | 513 trace_config_.IsCategoryGroupEnabled(category_group)) { |
458 enabled_flag |= ENABLED_FOR_RECORDING; | 514 enabled_flag |= ENABLED_FOR_RECORDING; |
459 } | 515 } |
460 | 516 |
461 if (event_callback_ && | 517 if (event_callback_ && |
462 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { | 518 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { |
463 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; | 519 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; |
464 } | 520 } |
465 | 521 |
466 #if defined(OS_WIN) | 522 #if defined(OS_WIN) |
467 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( | 523 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( |
468 category_group)) { | 524 category_group)) { |
469 enabled_flag |= ENABLED_FOR_ETW_EXPORT; | 525 enabled_flag |= ENABLED_FOR_ETW_EXPORT; |
470 } | 526 } |
471 #endif | 527 #endif |
472 | 528 |
529 if (!(g_category_group_filter[category_index] == nullptr)) | |
530 g_category_group_filter[category_index].Get().clear(); | |
531 | |
532 for (const auto& category_event_filter : | |
533 trace_config_.GetCategoryEventFilters()) { | |
534 if (category_event_filter.IsCategoryGroupEnabled(category_group)) { | |
535 std::unique_ptr<TraceEventFilter> new_filter; | |
536 | |
537 if (category_event_filter.predicate_name == "event_whitelist_predicate") { | |
538 new_filter = | |
539 WrapUnique(new EventNameFilter(category_event_filter.args.get())); | |
540 } | |
541 | |
542 if (new_filter) { | |
543 g_category_group_filter[category_index].Get().push_back( | |
544 std::move(new_filter)); | |
545 enabled_flag |= ENABLED_FOR_FILTERING; | |
546 } | |
547 } | |
548 } | |
549 | |
473 g_category_group_enabled[category_index] = enabled_flag; | 550 g_category_group_enabled[category_index] = enabled_flag; |
474 } | 551 } |
475 | 552 |
476 void TraceLog::UpdateCategoryGroupEnabledFlags() { | 553 void TraceLog::UpdateCategoryGroupEnabledFlags() { |
477 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); | 554 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); |
478 for (size_t i = 0; i < category_index; i++) | 555 for (size_t i = 0; i < category_index; i++) |
479 UpdateCategoryGroupEnabledFlag(i); | 556 UpdateCategoryGroupEnabledFlag(i); |
480 } | 557 } |
481 | 558 |
482 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { | 559 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { |
(...skipping 757 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1240 #if defined(OS_WIN) | 1317 #if defined(OS_WIN) |
1241 // This is done sooner rather than later, to avoid creating the event and | 1318 // This is done sooner rather than later, to avoid creating the event and |
1242 // acquiring the lock, which is not needed for ETW as it's already threadsafe. | 1319 // acquiring the lock, which is not needed for ETW as it's already threadsafe. |
1243 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) | 1320 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) |
1244 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, | 1321 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, |
1245 num_args, arg_names, arg_types, arg_values, | 1322 num_args, arg_names, arg_types, arg_values, |
1246 convertable_values); | 1323 convertable_values); |
1247 #endif // OS_WIN | 1324 #endif // OS_WIN |
1248 | 1325 |
1249 std::string console_message; | 1326 std::string console_message; |
1250 if (*category_group_enabled & ENABLED_FOR_RECORDING) { | 1327 if (*category_group_enabled & ENABLED_FOR_FILTERING) { |
1328 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); | |
1329 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, | |
1330 phase, category_group_enabled, name, scope, id, | |
1331 bind_id, num_args, arg_names, arg_types, | |
1332 arg_values, convertable_values, flags); | |
1333 | |
1334 std::list<std::unique_ptr<TraceEventFilter>>* filter_list = | |
1335 GetCategoryGroupFilter(category_group_enabled); | |
1336 DCHECK(filter_list); | |
1337 DCHECK(!filter_list->empty()); | |
1338 | |
1339 bool should_add_event = false; | |
1340 for (const auto& trace_event_filter : *filter_list) { | |
1341 DCHECK(trace_event_filter); | |
1342 if (trace_event_filter.get()->FilterTraceEvent(*new_trace_event)) | |
1343 should_add_event = true; | |
1344 } | |
1345 | |
1346 if (should_add_event) { | |
1347 OptionalAutoLock lock(&lock_); | |
1348 | |
1349 TraceEvent* trace_event = NULL; | |
1350 if (thread_local_event_buffer) { | |
1351 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); | |
1352 } else { | |
1353 lock.EnsureAcquired(); | |
1354 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); | |
1355 } | |
1356 | |
1357 trace_event->MoveFrom(std::move(new_trace_event)); | |
1358 } | |
1359 } else if (*category_group_enabled & ENABLED_FOR_RECORDING) { | |
1251 OptionalAutoLock lock(&lock_); | 1360 OptionalAutoLock lock(&lock_); |
1252 | 1361 |
1253 TraceEvent* trace_event = NULL; | 1362 TraceEvent* trace_event = NULL; |
1254 if (thread_local_event_buffer) { | 1363 if (thread_local_event_buffer) { |
1255 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); | 1364 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); |
1256 } else { | 1365 } else { |
1257 lock.EnsureAcquired(); | 1366 lock.EnsureAcquired(); |
1258 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); | 1367 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); |
1259 } | 1368 } |
1260 | 1369 |
(...skipping 481 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1742 } | 1851 } |
1743 | 1852 |
1744 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1853 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
1745 if (*category_group_enabled_) { | 1854 if (*category_group_enabled_) { |
1746 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1855 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
1747 event_handle_); | 1856 event_handle_); |
1748 } | 1857 } |
1749 } | 1858 } |
1750 | 1859 |
1751 } // namespace trace_event_internal | 1860 } // namespace trace_event_internal |
OLD | NEW |