Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(33)

Side by Side Diff: base/trace_event/trace_log.cc

Issue 1923533004: Tracing pre-filtering (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Added test Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« base/trace_event/trace_log.h ('K') | « base/trace_event/trace_log.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/trace_log.h" 5 #include "base/trace_event/trace_log.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cmath> 8 #include <cmath>
9 #include <memory> 9 #include <memory>
10 #include <utility> 10 #include <utility>
11 11
12 #include "base/base_switches.h" 12 #include "base/base_switches.h"
13 #include "base/bind.h" 13 #include "base/bind.h"
14 #include "base/command_line.h" 14 #include "base/command_line.h"
15 #include "base/debug/leak_annotations.h" 15 #include "base/debug/leak_annotations.h"
16 #include "base/lazy_instance.h" 16 #include "base/lazy_instance.h"
17 #include "base/location.h" 17 #include "base/location.h"
18 #include "base/macros.h" 18 #include "base/macros.h"
19 #include "base/memory/ptr_util.h"
19 #include "base/memory/ref_counted_memory.h" 20 #include "base/memory/ref_counted_memory.h"
20 #include "base/memory/singleton.h" 21 #include "base/memory/singleton.h"
21 #include "base/process/process_metrics.h" 22 #include "base/process/process_metrics.h"
22 #include "base/stl_util.h" 23 #include "base/stl_util.h"
23 #include "base/strings/string_split.h" 24 #include "base/strings/string_split.h"
24 #include "base/strings/string_tokenizer.h" 25 #include "base/strings/string_tokenizer.h"
25 #include "base/strings/stringprintf.h" 26 #include "base/strings/stringprintf.h"
26 #include "base/sys_info.h" 27 #include "base/sys_info.h"
27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" 28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
28 #include "base/threading/platform_thread.h" 29 #include "base/threading/platform_thread.h"
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
95 // convert internally to determine the category name from the char enabled 96 // convert internally to determine the category name from the char enabled
96 // pointer. 97 // pointer.
97 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { 98 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
98 "toplevel", 99 "toplevel",
99 "tracing already shutdown", 100 "tracing already shutdown",
100 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", 101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
101 "__metadata"}; 102 "__metadata"};
102 103
103 // The enabled flag is char instead of bool so that the API can be used from C. 104 // The enabled flag is char instead of bool so that the API can be used from C.
104 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; 105 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0};
106
107 class EventNameFilter : public TraceLog::TraceEventFilter {
108 public:
109 EventNameFilter(const base::DictionaryValue* filter_args) {
110 const base::ListValue* whitelist = nullptr;
111 if (filter_args->GetList("event_name_whitelist", &whitelist)) {
112 for (size_t i = 0; i < whitelist->GetSize(); ++i) {
113 std::string event_name;
114 if (!whitelist->GetString(i, &event_name))
115 continue;
116
117 whitelist_.insert(event_name);
118 }
119 }
120 }
121
122 bool FilterTraceEvent(const TraceEvent& trace_event) const override {
123 return ContainsKey(whitelist_, trace_event.name());
124 }
125
126 private:
127 std::unordered_set<std::string> whitelist_;
128 };
129
130 base::LazyInstance<
131 std::list<std::unique_ptr<TraceLog::TraceEventFilter>>>::Leaky
132 g_category_group_filter[MAX_CATEGORY_GROUPS] = {LAZY_INSTANCE_INITIALIZER};
133
134 TraceLog::TraceEventFilterConstructorForTesting
135 g_trace_event_filter_constructor_for_testing = nullptr;
Primiano Tucci (use gerrit) 2016/05/31 15:41:24 I'd move this to be a static field of SetTraceEven
136
105 // Indexes here have to match the g_category_groups array indexes above. 137 // Indexes here have to match the g_category_groups array indexes above.
106 const int g_category_already_shutdown = 1; 138 const int g_category_already_shutdown = 1;
107 const int g_category_categories_exhausted = 2; 139 const int g_category_categories_exhausted = 2;
108 const int g_category_metadata = 3; 140 const int g_category_metadata = 3;
109 const int g_num_builtin_categories = 4; 141 const int g_num_builtin_categories = 4;
110 // Skip default categories. 142 // Skip default categories.
111 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; 143 base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
112 144
113 // The name of the current thread. This is used to decide if the current 145 // The name of the current thread. This is used to decide if the current
114 // thread name has changed. We combine all the seen thread names into the 146 // thread name has changed. We combine all the seen thread names into the
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
172 size_t event_index, 204 size_t event_index,
173 TraceEventHandle* handle) { 205 TraceEventHandle* handle) {
174 DCHECK(chunk_seq); 206 DCHECK(chunk_seq);
175 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); 207 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
176 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); 208 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
177 handle->chunk_seq = chunk_seq; 209 handle->chunk_seq = chunk_seq;
178 handle->chunk_index = static_cast<uint16_t>(chunk_index); 210 handle->chunk_index = static_cast<uint16_t>(chunk_index);
179 handle->event_index = static_cast<uint16_t>(event_index); 211 handle->event_index = static_cast<uint16_t>(event_index);
180 } 212 }
181 213
214 uintptr_t GetCategoryIndex(const unsigned char* category_group_enabled) {
215 // Calculate the index of the category group by finding
216 // category_group_enabled in g_category_group_enabled array.
217 uintptr_t category_begin =
218 reinterpret_cast<uintptr_t>(g_category_group_enabled);
219 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled);
220 DCHECK(category_ptr >= category_begin &&
221 category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled +
222 MAX_CATEGORY_GROUPS))
223 << "out of bounds category pointer";
224 uintptr_t category_index =
225 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
226
227 return category_index;
228 }
229
182 } // namespace 230 } // namespace
183 231
184 // A helper class that allows the lock to be acquired in the middle of the scope 232 // A helper class that allows the lock to be acquired in the middle of the scope
185 // and unlocks at the end of scope if locked. 233 // and unlocks at the end of scope if locked.
186 class TraceLog::OptionalAutoLock { 234 class TraceLog::OptionalAutoLock {
187 public: 235 public:
188 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} 236 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {}
189 237
190 ~OptionalAutoLock() { 238 ~OptionalAutoLock() {
191 if (locked_) 239 if (locked_)
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 TraceLog* tracelog = GetInstance(); 486 TraceLog* tracelog = GetInstance();
439 if (!tracelog) { 487 if (!tracelog) {
440 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); 488 DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
441 return &g_category_group_enabled[g_category_already_shutdown]; 489 return &g_category_group_enabled[g_category_already_shutdown];
442 } 490 }
443 return tracelog->GetCategoryGroupEnabledInternal(category_group); 491 return tracelog->GetCategoryGroupEnabledInternal(category_group);
444 } 492 }
445 493
446 const char* TraceLog::GetCategoryGroupName( 494 const char* TraceLog::GetCategoryGroupName(
447 const unsigned char* category_group_enabled) { 495 const unsigned char* category_group_enabled) {
448 // Calculate the index of the category group by finding 496 return g_category_groups[GetCategoryIndex(category_group_enabled)];
449 // category_group_enabled in g_category_group_enabled array. 497 }
450 uintptr_t category_begin = 498
451 reinterpret_cast<uintptr_t>(g_category_group_enabled); 499 std::list<std::unique_ptr<TraceLog::TraceEventFilter>>* GetCategoryGroupFilter(
452 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); 500 const unsigned char* category_group_enabled) {
453 DCHECK(category_ptr >= category_begin && 501 return g_category_group_filter[GetCategoryIndex(category_group_enabled)]
454 category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + 502 .Pointer();
455 MAX_CATEGORY_GROUPS))
456 << "out of bounds category pointer";
457 uintptr_t category_index =
458 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
459 return g_category_groups[category_index];
460 } 503 }
461 504
462 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { 505 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
463 unsigned char enabled_flag = 0; 506 unsigned char enabled_flag = 0;
464 const char* category_group = g_category_groups[category_index]; 507 const char* category_group = g_category_groups[category_index];
465 if (mode_ == RECORDING_MODE && 508 if (mode_ == RECORDING_MODE &&
466 trace_config_.IsCategoryGroupEnabled(category_group)) { 509 trace_config_.IsCategoryGroupEnabled(category_group)) {
467 enabled_flag |= ENABLED_FOR_RECORDING; 510 enabled_flag |= ENABLED_FOR_RECORDING;
468 } 511 }
469 512
470 if (event_callback_ && 513 if (event_callback_ &&
471 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { 514 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) {
472 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; 515 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
473 } 516 }
474 517
475 #if defined(OS_WIN) 518 #if defined(OS_WIN)
476 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( 519 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled(
477 category_group)) { 520 category_group)) {
478 enabled_flag |= ENABLED_FOR_ETW_EXPORT; 521 enabled_flag |= ENABLED_FOR_ETW_EXPORT;
479 } 522 }
480 #endif 523 #endif
481 524
525 // Having a filter is an exceptional case, so we avoid
526 // the LazyInstance creation in the common case.
527 if (!(g_category_group_filter[category_index] == nullptr))
528 g_category_group_filter[category_index].Get().clear();
529
530 for (const auto& event_filter : trace_config_.event_filters()) {
531 if (event_filter.IsCategoryGroupEnabled(category_group)) {
532 std::unique_ptr<TraceEventFilter> new_filter;
533
534 if (event_filter.predicate_name() == "event_whitelist_predicate") {
535 new_filter =
536 WrapUnique(new EventNameFilter(event_filter.filter_args()));
537 } else if (event_filter.predicate_name() == "testing_predicate") {
538 CHECK(g_trace_event_filter_constructor_for_testing);
539 new_filter = g_trace_event_filter_constructor_for_testing();
540 }
541
542 if (new_filter) {
543 g_category_group_filter[category_index].Get().push_back(
544 std::move(new_filter));
545 enabled_flag |= ENABLED_FOR_FILTERING;
546 }
547 }
548 }
549
482 g_category_group_enabled[category_index] = enabled_flag; 550 g_category_group_enabled[category_index] = enabled_flag;
483 } 551 }
484 552
485 void TraceLog::UpdateCategoryGroupEnabledFlags() { 553 void TraceLog::UpdateCategoryGroupEnabledFlags() {
486 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); 554 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
487 for (size_t i = 0; i < category_index; i++) 555 for (size_t i = 0; i < category_index; i++)
488 UpdateCategoryGroupEnabledFlag(i); 556 UpdateCategoryGroupEnabledFlag(i);
489 } 557 }
490 558
491 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { 559 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
(...skipping 771 matching lines...) Expand 10 before | Expand all | Expand 10 after
1263 #if defined(OS_WIN) 1331 #if defined(OS_WIN)
1264 // This is done sooner rather than later, to avoid creating the event and 1332 // This is done sooner rather than later, to avoid creating the event and
1265 // acquiring the lock, which is not needed for ETW as it's already threadsafe. 1333 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1266 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) 1334 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
1267 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, 1335 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
1268 num_args, arg_names, arg_types, arg_values, 1336 num_args, arg_names, arg_types, arg_values,
1269 convertable_values); 1337 convertable_values);
1270 #endif // OS_WIN 1338 #endif // OS_WIN
1271 1339
1272 std::string console_message; 1340 std::string console_message;
1273 if (*category_group_enabled & ENABLED_FOR_RECORDING) { 1341 std::unique_ptr<TraceEvent> filtered_trace_event;
1342 if (*category_group_enabled & ENABLED_FOR_FILTERING) {
1343 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
Primiano Tucci (use gerrit) 2016/05/31 15:41:24 Can we avoid this to be heap allocated? What I fou
1344 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
1345 phase, category_group_enabled, name, scope, id,
1346 bind_id, num_args, arg_names, arg_types,
1347 arg_values, convertable_values, flags);
1348
1349 auto filter_list = GetCategoryGroupFilter(category_group_enabled);
1350 DCHECK(!filter_list->empty());
1351
1352 bool should_add_event = false;
1353 for (const auto& trace_event_filter : *filter_list) {
1354 if (trace_event_filter->FilterTraceEvent(*new_trace_event))
1355 should_add_event = true;
1356 }
1357
1358 if (should_add_event)
1359 filtered_trace_event = std::move(new_trace_event);
1360 }
1361
1362 // Add the trace event if we're either *just* recording (and not filtering)
1363 // or if we one of our filters indicates the event should be added.
1364 if (((*category_group_enabled & ENABLED_FOR_RECORDING) &&
1365 (*category_group_enabled & ENABLED_FOR_FILTERING) == 0) ||
1366 filtered_trace_event) {
1274 OptionalAutoLock lock(&lock_); 1367 OptionalAutoLock lock(&lock_);
1275 1368
1276 TraceEvent* trace_event = NULL; 1369 TraceEvent* trace_event = NULL;
1277 if (thread_local_event_buffer) { 1370 if (thread_local_event_buffer) {
1278 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); 1371 trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1279 } else { 1372 } else {
1280 lock.EnsureAcquired(); 1373 lock.EnsureAcquired();
1281 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); 1374 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1282 } 1375 }
1283 1376
1284 if (trace_event) { 1377 if (trace_event) {
1285 trace_event->Initialize(thread_id, 1378 if (filtered_trace_event) {
1286 offset_event_timestamp, 1379 trace_event->MoveFrom(std::move(filtered_trace_event));
1287 thread_now, 1380 } else {
1288 phase, 1381 trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
1289 category_group_enabled, 1382 phase, category_group_enabled, name, scope, id,
1290 name, 1383 bind_id, num_args, arg_names, arg_types,
1291 scope, 1384 arg_values, convertable_values, flags);
1292 id, 1385 }
1293 bind_id,
1294 num_args,
1295 arg_names,
1296 arg_types,
1297 arg_values,
1298 convertable_values,
1299 flags);
1300 1386
1301 #if defined(OS_ANDROID) 1387 #if defined(OS_ANDROID)
1302 trace_event->SendToATrace(); 1388 trace_event->SendToATrace();
1303 #endif 1389 #endif
1304 } 1390 }
1305 1391
1306 if (trace_options() & kInternalEchoToConsole) { 1392 if (trace_options() & kInternalEchoToConsole) {
1307 console_message = EventToConsoleMessage( 1393 console_message = EventToConsoleMessage(
1308 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, 1394 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1309 timestamp, trace_event); 1395 timestamp, trace_event);
(...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after
1426 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); 1512 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1427 1513
1428 log << "\x1b[0;m"; 1514 log << "\x1b[0;m";
1429 1515
1430 if (phase == TRACE_EVENT_PHASE_BEGIN) 1516 if (phase == TRACE_EVENT_PHASE_BEGIN)
1431 thread_event_start_times_[thread_id].push(timestamp); 1517 thread_event_start_times_[thread_id].push(timestamp);
1432 1518
1433 return log.str(); 1519 return log.str();
1434 } 1520 }
1435 1521
1522 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled,
1523 const char* name,
1524 TraceEventHandle handle) {
1525 auto filter_list = GetCategoryGroupFilter(category_group_enabled);
1526 DCHECK(!filter_list->empty());
1527
1528 for (const auto& trace_event_filter : *filter_list) {
1529 trace_event_filter->EndEvent(name,
1530 GetCategoryGroupName(category_group_enabled));
1531 }
1532 }
1533
1436 void TraceLog::UpdateTraceEventDuration( 1534 void TraceLog::UpdateTraceEventDuration(
1437 const unsigned char* category_group_enabled, 1535 const unsigned char* category_group_enabled,
1438 const char* name, 1536 const char* name,
1439 TraceEventHandle handle) { 1537 TraceEventHandle handle) {
1440 char category_group_enabled_local = *category_group_enabled; 1538 char category_group_enabled_local = *category_group_enabled;
1441 if (!category_group_enabled_local) 1539 if (!category_group_enabled_local)
1442 return; 1540 return;
1443 1541
1444 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when 1542 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1445 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> 1543 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
1595 void TraceLog::WaitSamplingEventForTesting() { 1693 void TraceLog::WaitSamplingEventForTesting() {
1596 if (!sampling_thread_) 1694 if (!sampling_thread_)
1597 return; 1695 return;
1598 sampling_thread_->WaitSamplingEventForTesting(); 1696 sampling_thread_->WaitSamplingEventForTesting();
1599 } 1697 }
1600 1698
1601 void TraceLog::DeleteForTesting() { 1699 void TraceLog::DeleteForTesting() {
1602 internal::DeleteTraceLogForTesting::Delete(); 1700 internal::DeleteTraceLogForTesting::Delete();
1603 } 1701 }
1604 1702
1703 void TraceLog::SetTraceEventFilterConstructorForTesting(
Primiano Tucci (use gerrit) 2016/05/31 15:41:24 Drop this and jusd to TraceEventFilter::factory_fo
1704 TraceEventFilterConstructorForTesting predicate) {
1705 g_trace_event_filter_constructor_for_testing = predicate;
1706 }
1707
1605 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { 1708 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) {
1606 return GetEventByHandleInternal(handle, NULL); 1709 return GetEventByHandleInternal(handle, NULL);
1607 } 1710 }
1608 1711
1609 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle, 1712 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle,
1610 OptionalAutoLock* lock) { 1713 OptionalAutoLock* lock) {
1611 if (!handle.chunk_seq) 1714 if (!handle.chunk_seq)
1612 return NULL; 1715 return NULL;
1613 1716
1614 if (thread_local_event_buffer_.Get()) { 1717 if (thread_local_event_buffer_.Get()) {
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
1786 } 1889 }
1787 1890
1788 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 1891 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1789 if (*category_group_enabled_) { 1892 if (*category_group_enabled_) {
1790 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, 1893 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
1791 event_handle_); 1894 event_handle_);
1792 } 1895 }
1793 } 1896 }
1794 1897
1795 } // namespace trace_event_internal 1898 } // namespace trace_event_internal
OLDNEW
« base/trace_event/trace_log.h ('K') | « base/trace_event/trace_log.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698