Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(338)

Side by Side Diff: base/trace_event/trace_log.cc

Issue 2259493003: [tracing] Add trace events filtering predicate for heap profiler (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@heap_prof_filter
Patch Set: Primiano's comments. Created 4 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/trace_event/trace_log.h ('k') | base/trace_event/trace_log_constants.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/trace_log.h" 5 #include "base/trace_event/trace_log.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cmath> 8 #include <cmath>
9 #include <memory> 9 #include <memory>
10 #include <utility> 10 #include <utility>
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
126 } 126 }
127 127
128 private: 128 private:
129 std::unordered_set<std::string> whitelist_; 129 std::unordered_set<std::string> whitelist_;
130 }; 130 };
131 131
132 base::LazyInstance< 132 base::LazyInstance<
133 std::list<std::unique_ptr<TraceLog::TraceEventFilter>>>::Leaky 133 std::list<std::unique_ptr<TraceLog::TraceEventFilter>>>::Leaky
134 g_category_group_filter[MAX_CATEGORY_GROUPS] = {LAZY_INSTANCE_INITIALIZER}; 134 g_category_group_filter[MAX_CATEGORY_GROUPS] = {LAZY_INSTANCE_INITIALIZER};
135 135
136 // This filter is used to record trace events as pseudo stack for the heap
137 // profiler. It does not filter-out any events from the trace, ie. the behavior
138 // of trace events being added to TraceLog remains same: the events are added
139 // iff enabled for recording and not filtered-out by any other filter.
140 class HeapProfilerFilter : public TraceLog::TraceEventFilter {
141 public:
142 HeapProfilerFilter() {}
143
144 bool FilterTraceEvent(const TraceEvent& trace_event) const override {
145 if (AllocationContextTracker::capture_mode() !=
146 AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
147 return true;
148 }
149
150 // TODO(primiano): Add support for events with copied name crbug.com/581079.
151 if (trace_event.flags() & TRACE_EVENT_FLAG_COPY)
152 return true;
153
154 if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN ||
155 trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) {
156 AllocationContextTracker::GetInstanceForCurrentThread()
157 ->PushPseudoStackFrame(trace_event.name());
158 } else if (trace_event.phase() == TRACE_EVENT_PHASE_END) {
159 // The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|.
160 AllocationContextTracker::GetInstanceForCurrentThread()
161 ->PopPseudoStackFrame(trace_event.name());
162 }
163 // Do not filter-out any events and always return true. TraceLog adds the
164 // event only if it is enabled for recording.
165 return true;
166 }
167
168 void EndEvent(const char* name, const char* category_group) override {
169 if (AllocationContextTracker::capture_mode() ==
170 AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
171 AllocationContextTracker::GetInstanceForCurrentThread()
172 ->PopPseudoStackFrame(name);
173 }
174 }
175 };
176
136 TraceLog::TraceEventFilterConstructorForTesting 177 TraceLog::TraceEventFilterConstructorForTesting
137 g_trace_event_filter_constructor_for_testing = nullptr; 178 g_trace_event_filter_constructor_for_testing = nullptr;
138 179
139 // Indexes here have to match the g_category_groups array indexes above. 180 // Indexes here have to match the g_category_groups array indexes above.
140 const int kCategoryAlreadyShutdown = 1; 181 const int kCategoryAlreadyShutdown = 1;
141 const int kCategoryCategoriesExhausted = 2; 182 const int kCategoryCategoriesExhausted = 2;
142 const int kCategoryMetadata = 3; 183 const int kCategoryMetadata = 3;
143 const int kNumBuiltinCategories = 4; 184 const int kNumBuiltinCategories = 4;
144 // Skip default categories. 185 // Skip default categories.
145 base::subtle::AtomicWord g_category_index = kNumBuiltinCategories; 186 base::subtle::AtomicWord g_category_index = kNumBuiltinCategories;
(...skipping 386 matching lines...) Expand 10 before | Expand all | Expand 10 after
532 573
533 // Having a filter is an exceptional case, so we avoid 574 // Having a filter is an exceptional case, so we avoid
534 // the LazyInstance creation in the common case. 575 // the LazyInstance creation in the common case.
535 if (!(g_category_group_filter[category_index] == nullptr)) 576 if (!(g_category_group_filter[category_index] == nullptr))
536 g_category_group_filter[category_index].Get().clear(); 577 g_category_group_filter[category_index].Get().clear();
537 578
538 for (const auto& event_filter : trace_config_.event_filters()) { 579 for (const auto& event_filter : trace_config_.event_filters()) {
539 if (event_filter.IsCategoryGroupEnabled(category_group)) { 580 if (event_filter.IsCategoryGroupEnabled(category_group)) {
540 std::unique_ptr<TraceEventFilter> new_filter; 581 std::unique_ptr<TraceEventFilter> new_filter;
541 582
542 if (event_filter.predicate_name() == "event_whitelist_predicate") { 583 if (event_filter.predicate_name() ==
543 new_filter = 584 TraceEventFilter::kEventWhitelistPredicate) {
544 WrapUnique(new EventNameFilter(event_filter.filter_args())); 585 new_filter = MakeUnique<EventNameFilter>(event_filter.filter_args());
586 } else if (event_filter.predicate_name() ==
587 TraceEventFilter::kHeapProfilerPredicate) {
588 new_filter = MakeUnique<HeapProfilerFilter>();
545 } else if (event_filter.predicate_name() == "testing_predicate") { 589 } else if (event_filter.predicate_name() == "testing_predicate") {
546 CHECK(g_trace_event_filter_constructor_for_testing); 590 CHECK(g_trace_event_filter_constructor_for_testing);
547 new_filter = g_trace_event_filter_constructor_for_testing(); 591 new_filter = g_trace_event_filter_constructor_for_testing();
548 } 592 }
549 593
550 if (new_filter) { 594 if (new_filter) {
551 g_category_group_filter[category_index].Get().push_back( 595 g_category_group_filter[category_index].Get().push_back(
552 std::move(new_filter)); 596 std::move(new_filter));
553 enabled_flag |= ENABLED_FOR_FILTERING; 597 enabled_flag |= ENABLED_FOR_FILTERING;
554 } 598 }
(...skipping 780 matching lines...) Expand 10 before | Expand all | Expand 10 after
1335 // This is done sooner rather than later, to avoid creating the event and 1379 // This is done sooner rather than later, to avoid creating the event and
1336 // acquiring the lock, which is not needed for ETW as it's already threadsafe. 1380 // acquiring the lock, which is not needed for ETW as it's already threadsafe.
1337 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) 1381 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT)
1338 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, 1382 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id,
1339 num_args, arg_names, arg_types, arg_values, 1383 num_args, arg_names, arg_types, arg_values,
1340 convertable_values); 1384 convertable_values);
1341 #endif // OS_WIN 1385 #endif // OS_WIN
1342 1386
1343 std::string console_message; 1387 std::string console_message;
1344 std::unique_ptr<TraceEvent> filtered_trace_event; 1388 std::unique_ptr<TraceEvent> filtered_trace_event;
1389 bool disabled_by_filters = false;
1345 if (*category_group_enabled & ENABLED_FOR_FILTERING) { 1390 if (*category_group_enabled & ENABLED_FOR_FILTERING) {
1346 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); 1391 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent);
1347 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, 1392 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now,
1348 phase, category_group_enabled, name, scope, id, 1393 phase, category_group_enabled, name, scope, id,
1349 bind_id, num_args, arg_names, arg_types, 1394 bind_id, num_args, arg_names, arg_types,
1350 arg_values, convertable_values, flags); 1395 arg_values, convertable_values, flags);
1351 1396
1352 auto filter_list = GetCategoryGroupFilter(category_group_enabled); 1397 auto filter_list = GetCategoryGroupFilter(category_group_enabled);
1353 DCHECK(!filter_list->empty()); 1398 DCHECK(!filter_list->empty());
1354 1399
1355 bool should_add_event = false; 1400 disabled_by_filters = true;
1356 for (const auto& trace_event_filter : *filter_list) { 1401 for (const auto& trace_event_filter : *filter_list) {
1357 if (trace_event_filter->FilterTraceEvent(*new_trace_event)) 1402 if (trace_event_filter->FilterTraceEvent(*new_trace_event))
1358 should_add_event = true; 1403 disabled_by_filters = false;
1359 } 1404 }
1360 1405
1361 if (should_add_event) 1406 if (!disabled_by_filters)
1362 filtered_trace_event = std::move(new_trace_event); 1407 filtered_trace_event = std::move(new_trace_event);
1363 } 1408 }
1364 1409
1365 // Add the trace event if we're either *just* recording (and not filtering) 1410 // If enabled for recording, the event should be added only if one of the
1366 // or if we one of our filters indicates the event should be added. 1411 // filters indicates or category is not enabled for filtering.
1367 if (((*category_group_enabled & ENABLED_FOR_RECORDING) && 1412 if ((*category_group_enabled & ENABLED_FOR_RECORDING) &&
1368 (*category_group_enabled & ENABLED_FOR_FILTERING) == 0) || 1413 !disabled_by_filters) {
1369 filtered_trace_event) {
1370 OptionalAutoLock lock(&lock_); 1414 OptionalAutoLock lock(&lock_);
1371 1415
1372 TraceEvent* trace_event = NULL; 1416 TraceEvent* trace_event = NULL;
1373 if (thread_local_event_buffer) { 1417 if (thread_local_event_buffer) {
1374 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); 1418 trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1375 } else { 1419 } else {
1376 lock.EnsureAcquired(); 1420 lock.EnsureAcquired();
1377 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); 1421 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1378 } 1422 }
1379 1423
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
1422 subtle::NoBarrier_Load(&event_callback_)); 1466 subtle::NoBarrier_Load(&event_callback_));
1423 if (event_callback) { 1467 if (event_callback) {
1424 event_callback( 1468 event_callback(
1425 offset_event_timestamp, 1469 offset_event_timestamp,
1426 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, 1470 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase,
1427 category_group_enabled, name, scope, id, num_args, arg_names, 1471 category_group_enabled, name, scope, id, num_args, arg_names,
1428 arg_types, arg_values, flags); 1472 arg_types, arg_values, flags);
1429 } 1473 }
1430 } 1474 }
1431 1475
1432 // TODO(primiano): Add support for events with copied name crbug.com/581078
1433 if (!(flags & TRACE_EVENT_FLAG_COPY)) {
1434 if (AllocationContextTracker::capture_mode() ==
1435 AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
1436 if (phase == TRACE_EVENT_PHASE_BEGIN ||
1437 phase == TRACE_EVENT_PHASE_COMPLETE) {
1438 AllocationContextTracker::GetInstanceForCurrentThread()
1439 ->PushPseudoStackFrame(name);
1440 } else if (phase == TRACE_EVENT_PHASE_END) {
1441 // The pop for |TRACE_EVENT_PHASE_COMPLETE| events
1442 // is in |TraceLog::UpdateTraceEventDuration|.
1443 AllocationContextTracker::GetInstanceForCurrentThread()
1444 ->PopPseudoStackFrame(name);
1445 }
1446 }
1447 }
1448
1449 return handle; 1476 return handle;
1450 } 1477 }
1451 1478
1452 void TraceLog::AddMetadataEvent( 1479 void TraceLog::AddMetadataEvent(
1453 const unsigned char* category_group_enabled, 1480 const unsigned char* category_group_enabled,
1454 const char* name, 1481 const char* name,
1455 int num_args, 1482 int num_args,
1456 const char** arg_names, 1483 const char** arg_names,
1457 const unsigned char* arg_types, 1484 const unsigned char* arg_types,
1458 const unsigned long long* arg_values, 1485 const unsigned long long* arg_values,
(...skipping 111 matching lines...) Expand 10 before | Expand all | Expand 10 after
1570 trace_event->UpdateDuration(now, thread_now); 1597 trace_event->UpdateDuration(now, thread_now);
1571 #if defined(OS_ANDROID) 1598 #if defined(OS_ANDROID)
1572 trace_event->SendToATrace(); 1599 trace_event->SendToATrace();
1573 #endif 1600 #endif
1574 } 1601 }
1575 1602
1576 if (trace_options() & kInternalEchoToConsole) { 1603 if (trace_options() & kInternalEchoToConsole) {
1577 console_message = 1604 console_message =
1578 EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event); 1605 EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event);
1579 } 1606 }
1580
1581 if (AllocationContextTracker::capture_mode() ==
1582 AllocationContextTracker::CaptureMode::PSEUDO_STACK) {
1583 // The corresponding push is in |AddTraceEventWithThreadIdAndTimestamp|.
1584 AllocationContextTracker::GetInstanceForCurrentThread()
1585 ->PopPseudoStackFrame(name);
1586 }
1587 } 1607 }
1588 1608
1589 if (!console_message.empty()) 1609 if (!console_message.empty())
1590 LOG(ERROR) << console_message; 1610 LOG(ERROR) << console_message;
1591 1611
1592 if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) { 1612 if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) {
1593 EventCallback event_callback = reinterpret_cast<EventCallback>( 1613 EventCallback event_callback = reinterpret_cast<EventCallback>(
1594 subtle::NoBarrier_Load(&event_callback_)); 1614 subtle::NoBarrier_Load(&event_callback_));
1595 if (event_callback) { 1615 if (event_callback) {
1596 event_callback( 1616 event_callback(
(...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after
1884 } 1904 }
1885 1905
1886 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 1906 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1887 if (*category_group_enabled_) { 1907 if (*category_group_enabled_) {
1888 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, 1908 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
1889 event_handle_); 1909 event_handle_);
1890 } 1910 }
1891 } 1911 }
1892 1912
1893 } // namespace trace_event_internal 1913 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « base/trace_event/trace_log.h ('k') | base/trace_event/trace_log_constants.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698