OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <cmath> | 8 #include <cmath> |
9 #include <memory> | 9 #include <memory> |
10 #include <utility> | 10 #include <utility> |
11 | 11 |
12 #include "base/base_switches.h" | 12 #include "base/base_switches.h" |
13 #include "base/bind.h" | 13 #include "base/bind.h" |
14 #include "base/command_line.h" | 14 #include "base/command_line.h" |
15 #include "base/debug/leak_annotations.h" | 15 #include "base/debug/leak_annotations.h" |
16 #include "base/lazy_instance.h" | 16 #include "base/lazy_instance.h" |
17 #include "base/location.h" | 17 #include "base/location.h" |
18 #include "base/macros.h" | 18 #include "base/macros.h" |
19 #include "base/memory/ptr_util.h" | |
19 #include "base/memory/ref_counted_memory.h" | 20 #include "base/memory/ref_counted_memory.h" |
20 #include "base/memory/singleton.h" | 21 #include "base/memory/singleton.h" |
21 #include "base/process/process_metrics.h" | 22 #include "base/process/process_metrics.h" |
22 #include "base/stl_util.h" | 23 #include "base/stl_util.h" |
23 #include "base/strings/string_split.h" | 24 #include "base/strings/string_split.h" |
24 #include "base/strings/string_tokenizer.h" | 25 #include "base/strings/string_tokenizer.h" |
25 #include "base/strings/stringprintf.h" | 26 #include "base/strings/stringprintf.h" |
26 #include "base/sys_info.h" | 27 #include "base/sys_info.h" |
27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | 28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
28 #include "base/threading/platform_thread.h" | 29 #include "base/threading/platform_thread.h" |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
95 // convert internally to determine the category name from the char enabled | 96 // convert internally to determine the category name from the char enabled |
96 // pointer. | 97 // pointer. |
97 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { | 98 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { |
98 "toplevel", | 99 "toplevel", |
99 "tracing already shutdown", | 100 "tracing already shutdown", |
100 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", | 101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", |
101 "__metadata"}; | 102 "__metadata"}; |
102 | 103 |
103 // The enabled flag is char instead of bool so that the API can be used from C. | 104 // The enabled flag is char instead of bool so that the API can be used from C. |
104 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; | 105 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; |
106 | |
107 class TraceEventFilter { | |
108 public: | |
109 TraceEventFilter() {} | |
110 virtual ~TraceEventFilter() {} | |
111 virtual bool FilterTraceEvent(const TraceEvent& trace_event) const = 0; | |
112 | |
113 private: | |
114 DISALLOW_COPY_AND_ASSIGN(TraceEventFilter); | |
115 }; | |
116 | |
117 class EventNameFilter : public TraceEventFilter { | |
118 public: | |
119 EventNameFilter(const base::DictionaryValue* filter_args) { | |
120 const base::ListValue* whitelist = nullptr; | |
121 if (filter_args->GetList("event_name_whitelist", &whitelist)) { | |
122 for (size_t i = 0; i < whitelist->GetSize(); ++i) { | |
123 std::string event_name; | |
124 if (!whitelist->GetString(i, &event_name)) | |
125 continue; | |
126 | |
127 whitelist_.insert(event_name); | |
128 } | |
129 } | |
130 } | |
131 | |
132 bool FilterTraceEvent(const TraceEvent& trace_event) const override { | |
133 return ContainsKey(whitelist_, trace_event.name()); | |
134 } | |
135 | |
136 private: | |
137 std::unordered_set<std::string> whitelist_; | |
138 }; | |
139 | |
140 base::LazyInstance<std::list<std::unique_ptr<TraceEventFilter>>>::Leaky | |
141 g_category_group_filter[MAX_CATEGORY_GROUPS] = {LAZY_INSTANCE_INITIALIZER}; | |
142 | |
105 // Indexes here have to match the g_category_groups array indexes above. | 143 // Indexes here have to match the g_category_groups array indexes above. |
106 const int g_category_already_shutdown = 1; | 144 const int g_category_already_shutdown = 1; |
107 const int g_category_categories_exhausted = 2; | 145 const int g_category_categories_exhausted = 2; |
108 const int g_category_metadata = 3; | 146 const int g_category_metadata = 3; |
109 const int g_num_builtin_categories = 4; | 147 const int g_num_builtin_categories = 4; |
110 // Skip default categories. | 148 // Skip default categories. |
111 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; | 149 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; |
112 | 150 |
113 // The name of the current thread. This is used to decide if the current | 151 // The name of the current thread. This is used to decide if the current |
114 // thread name has changed. We combine all the seen thread names into the | 152 // thread name has changed. We combine all the seen thread names into the |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
172 size_t event_index, | 210 size_t event_index, |
173 TraceEventHandle* handle) { | 211 TraceEventHandle* handle) { |
174 DCHECK(chunk_seq); | 212 DCHECK(chunk_seq); |
175 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); | 213 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); |
176 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); | 214 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); |
177 handle->chunk_seq = chunk_seq; | 215 handle->chunk_seq = chunk_seq; |
178 handle->chunk_index = static_cast<uint16_t>(chunk_index); | 216 handle->chunk_index = static_cast<uint16_t>(chunk_index); |
179 handle->event_index = static_cast<uint16_t>(event_index); | 217 handle->event_index = static_cast<uint16_t>(event_index); |
180 } | 218 } |
181 | 219 |
220 uintptr_t GetCategoryIndex(const unsigned char* category_group_enabled) { | |
221 // Calculate the index of the category group by finding | |
222 // category_group_enabled in g_category_group_enabled array. | |
223 uintptr_t category_begin = | |
224 reinterpret_cast<uintptr_t>(g_category_group_enabled); | |
225 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); | |
226 DCHECK(category_ptr >= category_begin && | |
227 category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + | |
228 MAX_CATEGORY_GROUPS)) | |
229 << "out of bounds category pointer"; | |
230 uintptr_t category_index = | |
231 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); | |
232 | |
233 return category_index; | |
234 } | |
235 | |
182 } // namespace | 236 } // namespace |
183 | 237 |
184 // A helper class that allows the lock to be acquired in the middle of the scope | 238 // A helper class that allows the lock to be acquired in the middle of the scope |
185 // and unlocks at the end of scope if locked. | 239 // and unlocks at the end of scope if locked. |
186 class TraceLog::OptionalAutoLock { | 240 class TraceLog::OptionalAutoLock { |
187 public: | 241 public: |
188 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} | 242 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} |
189 | 243 |
190 ~OptionalAutoLock() { | 244 ~OptionalAutoLock() { |
191 if (locked_) | 245 if (locked_) |
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
438 TraceLog* tracelog = GetInstance(); | 492 TraceLog* tracelog = GetInstance(); |
439 if (!tracelog) { | 493 if (!tracelog) { |
440 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); | 494 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); |
441 return &g_category_group_enabled[g_category_already_shutdown]; | 495 return &g_category_group_enabled[g_category_already_shutdown]; |
442 } | 496 } |
443 return tracelog->GetCategoryGroupEnabledInternal(category_group); | 497 return tracelog->GetCategoryGroupEnabledInternal(category_group); |
444 } | 498 } |
445 | 499 |
446 const char* TraceLog::GetCategoryGroupName( | 500 const char* TraceLog::GetCategoryGroupName( |
447 const unsigned char* category_group_enabled) { | 501 const unsigned char* category_group_enabled) { |
448 // Calculate the index of the category group by finding | 502 return g_category_groups[GetCategoryIndex(category_group_enabled)]; |
449 // category_group_enabled in g_category_group_enabled array. | 503 } |
450 uintptr_t category_begin = | 504 |
451 reinterpret_cast<uintptr_t>(g_category_group_enabled); | 505 std::list<std::unique_ptr<TraceEventFilter>>* GetCategoryGroupFilter( |
452 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); | 506 const unsigned char* category_group_enabled) { |
453 DCHECK(category_ptr >= category_begin && | 507 return g_category_group_filter[GetCategoryIndex(category_group_enabled)] |
454 category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + | 508 .Pointer(); |
455 MAX_CATEGORY_GROUPS)) | |
456 << "out of bounds category pointer"; | |
457 uintptr_t category_index = | |
458 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); | |
459 return g_category_groups[category_index]; | |
460 } | 509 } |
461 | 510 |
462 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { | 511 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { |
463 unsigned char enabled_flag = 0; | 512 unsigned char enabled_flag = 0; |
464 const char* category_group = g_category_groups[category_index]; | 513 const char* category_group = g_category_groups[category_index]; |
465 if (mode_ == RECORDING_MODE && | 514 if (mode_ == RECORDING_MODE && |
466 trace_config_.IsCategoryGroupEnabled(category_group)) { | 515 trace_config_.IsCategoryGroupEnabled(category_group)) { |
467 enabled_flag |= ENABLED_FOR_RECORDING; | 516 enabled_flag |= ENABLED_FOR_RECORDING; |
468 } | 517 } |
469 | 518 |
470 if (event_callback_ && | 519 if (event_callback_ && |
471 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { | 520 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { |
472 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; | 521 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; |
473 } | 522 } |
474 | 523 |
475 #if defined(OS_WIN) | 524 #if defined(OS_WIN) |
476 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( | 525 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( |
477 category_group)) { | 526 category_group)) { |
478 enabled_flag |= ENABLED_FOR_ETW_EXPORT; | 527 enabled_flag |= ENABLED_FOR_ETW_EXPORT; |
479 } | 528 } |
480 #endif | 529 #endif |
481 | 530 |
531 // Having a filter is an exceptional case, so we avoid | |
532 // the LazyInstance creation in the common case. | |
533 if (!(g_category_group_filter[category_index] == nullptr)) | |
534 g_category_group_filter[category_index].Get().clear(); | |
535 | |
536 for (const auto& event_filter : trace_config_.event_filters()) { | |
537 if (event_filter.IsCategoryGroupEnabled(category_group)) { | |
538 std::unique_ptr<TraceEventFilter> new_filter; | |
539 | |
540 if (event_filter.predicate_name() == "event_whitelist_predicate") { | |
541 new_filter = | |
542 WrapUnique(new EventNameFilter(event_filter.filter_args())); | |
543 } | |
544 | |
545 if (new_filter) { | |
546 g_category_group_filter[category_index].Get().push_back( | |
547 std::move(new_filter)); | |
548 enabled_flag |= ENABLED_FOR_FILTERING; | |
549 } | |
550 } | |
551 } | |
552 | |
482 g_category_group_enabled[category_index] = enabled_flag; | 553 g_category_group_enabled[category_index] = enabled_flag; |
483 } | 554 } |
484 | 555 |
485 void TraceLog::UpdateCategoryGroupEnabledFlags() { | 556 void TraceLog::UpdateCategoryGroupEnabledFlags() { |
486 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); | 557 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); |
487 for (size_t i = 0; i < category_index; i++) | 558 for (size_t i = 0; i < category_index; i++) |
488 UpdateCategoryGroupEnabledFlag(i); | 559 UpdateCategoryGroupEnabledFlag(i); |
489 } | 560 } |
490 | 561 |
491 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { | 562 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { |
(...skipping 771 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1263 #if defined(OS_WIN) | 1334 #if defined(OS_WIN) |
1264 // This is done sooner rather than later, to avoid creating the event and | 1335 // This is done sooner rather than later, to avoid creating the event and |
1265 // acquiring the lock, which is not needed for ETW as it's already threadsafe. | 1336 // acquiring the lock, which is not needed for ETW as it's already threadsafe. |
1266 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) | 1337 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) |
1267 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, | 1338 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, |
1268 num_args, arg_names, arg_types, arg_values, | 1339 num_args, arg_names, arg_types, arg_values, |
1269 convertable_values); | 1340 convertable_values); |
1270 #endif // OS_WIN | 1341 #endif // OS_WIN |
1271 | 1342 |
1272 std::string console_message; | 1343 std::string console_message; |
1273 if (*category_group_enabled & ENABLED_FOR_RECORDING) { | 1344 std::unique_ptr<TraceEvent> filtered_trace_event; |
1345 if (*category_group_enabled & ENABLED_FOR_FILTERING) { | |
1346 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); | |
1347 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, | |
1348 phase, category_group_enabled, name, scope, id, | |
1349 bind_id, num_args, arg_names, arg_types, | |
1350 arg_values, convertable_values, flags); | |
1351 | |
1352 auto filter_list = GetCategoryGroupFilter(category_group_enabled); | |
1353 DCHECK(!filter_list->empty()); | |
1354 | |
1355 bool should_add_event = false; | |
1356 for (const auto& trace_event_filter : *filter_list) { | |
1357 if (trace_event_filter.get()->FilterTraceEvent(*new_trace_event)) | |
ssid
2016/05/20 21:58:42
I dont think ".get()" is needed here. Just
trace_
oystein (OOO til 10th of July)
2016/05/24 01:05:13
Done.
| |
1358 should_add_event = true; | |
1359 } | |
1360 | |
1361 if (should_add_event) | |
1362 filtered_trace_event = std::move(new_trace_event); | |
1363 } | |
1364 | |
1365 // Add the trace event if we're either *just* recording (and not filtering) | |
1366 // or if we one of our filters indicates the event should be added. | |
1367 if (((*category_group_enabled & ENABLED_FOR_RECORDING) && | |
1368 (*category_group_enabled & ENABLED_FOR_FILTERING) == 0) || | |
1369 filtered_trace_event) { | |
1274 OptionalAutoLock lock(&lock_); | 1370 OptionalAutoLock lock(&lock_); |
1275 | 1371 |
1276 TraceEvent* trace_event = NULL; | 1372 TraceEvent* trace_event = NULL; |
1277 if (thread_local_event_buffer) { | 1373 if (thread_local_event_buffer) { |
1278 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); | 1374 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); |
1279 } else { | 1375 } else { |
1280 lock.EnsureAcquired(); | 1376 lock.EnsureAcquired(); |
1281 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); | 1377 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); |
1282 } | 1378 } |
1283 | 1379 |
1284 if (trace_event) { | 1380 if (trace_event) { |
1285 trace_event->Initialize(thread_id, | 1381 if (filtered_trace_event) { |
1286 offset_event_timestamp, | 1382 trace_event->MoveFrom(std::move(filtered_trace_event)); |
1287 thread_now, | 1383 } else { |
1288 phase, | 1384 trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, |
1289 category_group_enabled, | 1385 phase, category_group_enabled, name, scope, id, |
1290 name, | 1386 bind_id, num_args, arg_names, arg_types, |
1291 scope, | 1387 arg_values, convertable_values, flags); |
1292 id, | 1388 } |
1293 bind_id, | |
1294 num_args, | |
1295 arg_names, | |
1296 arg_types, | |
1297 arg_values, | |
1298 convertable_values, | |
1299 flags); | |
1300 | 1389 |
1301 #if defined(OS_ANDROID) | 1390 #if defined(OS_ANDROID) |
1302 trace_event->SendToATrace(); | 1391 trace_event->SendToATrace(); |
1303 #endif | 1392 #endif |
1304 } | 1393 } |
1305 | 1394 |
1306 if (trace_options() & kInternalEchoToConsole) { | 1395 if (trace_options() & kInternalEchoToConsole) { |
1307 console_message = EventToConsoleMessage( | 1396 console_message = EventToConsoleMessage( |
1308 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, | 1397 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, |
1309 timestamp, trace_event); | 1398 timestamp, trace_event); |
(...skipping 476 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1786 } | 1875 } |
1787 | 1876 |
1788 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1877 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
1789 if (*category_group_enabled_) { | 1878 if (*category_group_enabled_) { |
1790 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1879 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
1791 event_handle_); | 1880 event_handle_); |
1792 } | 1881 } |
1793 } | 1882 } |
1794 | 1883 |
1795 } // namespace trace_event_internal | 1884 } // namespace trace_event_internal |
OLD | NEW |