Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 #include <memory> | 9 #include <memory> |
| 10 #include <utility> | 10 #include <utility> |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 99 "toplevel", | 99 "toplevel", |
| 100 "tracing already shutdown", | 100 "tracing already shutdown", |
| 101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", | 101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", |
| 102 "__metadata"}; | 102 "__metadata"}; |
| 103 | 103 |
| 104 // The enabled flag is char instead of bool so that the API can be used from C. | 104 // The enabled flag is char instead of bool so that the API can be used from C. |
| 105 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; | 105 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; |
| 106 | 106 |
| 107 const char kEventNameWhitelist[] = "event_name_whitelist"; | 107 const char kEventNameWhitelist[] = "event_name_whitelist"; |
| 108 | 108 |
| 109 #define MAX_TRACE_EVENT_FILTERS 32 | |
| 110 | |
| 111 // List of TraceEventFilter objects from the most recent tracing session. | |
| 112 base::LazyInstance<std::vector<std::unique_ptr<TraceLog::TraceEventFilter>>>:: | |
| 113 Leaky g_category_group_filters = LAZY_INSTANCE_INITIALIZER; | |
| 114 | |
| 115 // Stores a bitmap of filters enabled for each category group. | |
| 116 uint32_t g_category_group_filters_enabled[MAX_CATEGORY_GROUPS] = {0}; | |
| 117 | |
| 109 class EventNameFilter : public TraceLog::TraceEventFilter { | 118 class EventNameFilter : public TraceLog::TraceEventFilter { |
| 110 public: | 119 public: |
| 111 EventNameFilter(const base::DictionaryValue* filter_args) { | 120 EventNameFilter(const base::DictionaryValue* filter_args) { |
| 112 const base::ListValue* whitelist = nullptr; | 121 const base::ListValue* whitelist = nullptr; |
| 113 if (filter_args->GetList(kEventNameWhitelist, &whitelist)) { | 122 if (filter_args->GetList(kEventNameWhitelist, &whitelist)) { |
| 114 for (size_t i = 0; i < whitelist->GetSize(); ++i) { | 123 for (size_t i = 0; i < whitelist->GetSize(); ++i) { |
| 115 std::string event_name; | 124 std::string event_name; |
| 116 if (!whitelist->GetString(i, &event_name)) | 125 if (!whitelist->GetString(i, &event_name)) |
| 117 continue; | 126 continue; |
| 118 | 127 |
| 119 whitelist_.insert(event_name); | 128 whitelist_.insert(event_name); |
| 120 } | 129 } |
| 121 } | 130 } |
| 122 } | 131 } |
| 123 | 132 |
| 124 bool FilterTraceEvent(const TraceEvent& trace_event) const override { | 133 bool FilterTraceEvent(const TraceEvent& trace_event) const override { |
| 125 return ContainsKey(whitelist_, trace_event.name()); | 134 return ContainsKey(whitelist_, trace_event.name()); |
| 126 } | 135 } |
| 127 | 136 |
| 128 private: | 137 private: |
| 129 std::unordered_set<std::string> whitelist_; | 138 std::unordered_set<std::string> whitelist_; |
| 130 }; | 139 }; |
| 131 | 140 |
| 132 base::LazyInstance< | |
| 133 std::list<std::unique_ptr<TraceLog::TraceEventFilter>>>::Leaky | |
| 134 g_category_group_filter[MAX_CATEGORY_GROUPS] = {LAZY_INSTANCE_INITIALIZER}; | |
| 135 | |
| 136 // This filter is used to record trace events as pseudo stack for the heap | 141 // This filter is used to record trace events as pseudo stack for the heap |
| 137 // profiler. It does not filter-out any events from the trace, ie. the behavior | 142 // profiler. It does not filter-out any events from the trace, ie. the behavior |
| 138 // of trace events being added to TraceLog remains same: the events are added | 143 // of trace events being added to TraceLog remains same: the events are added |
| 139 // iff enabled for recording and not filtered-out by any other filter. | 144 // iff enabled for recording and not filtered-out by any other filter. |
| 140 class HeapProfilerFilter : public TraceLog::TraceEventFilter { | 145 class HeapProfilerFilter : public TraceLog::TraceEventFilter { |
| 141 public: | 146 public: |
| 142 HeapProfilerFilter() {} | 147 HeapProfilerFilter() {} |
| 143 | 148 |
| 144 bool FilterTraceEvent(const TraceEvent& trace_event) const override { | 149 bool FilterTraceEvent(const TraceEvent& trace_event) const override { |
| 145 if (AllocationContextTracker::capture_mode() != | 150 if (AllocationContextTracker::capture_mode() != |
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 266 DCHECK(category_ptr >= category_begin); | 271 DCHECK(category_ptr >= category_begin); |
| 267 DCHECK(category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + | 272 DCHECK(category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + |
| 268 MAX_CATEGORY_GROUPS)) | 273 MAX_CATEGORY_GROUPS)) |
| 269 << "out of bounds category pointer"; | 274 << "out of bounds category pointer"; |
| 270 uintptr_t category_index = | 275 uintptr_t category_index = |
| 271 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); | 276 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); |
| 272 | 277 |
| 273 return category_index; | 278 return category_index; |
| 274 } | 279 } |
| 275 | 280 |
| 281 template <typename Function> | |
| 282 void ForEachCategoryGroupFilter(const unsigned char* category_group_enabled, | |
| 283 Function filter_fn) { | |
| 284 uint32_t filter_bitmap = g_category_group_filters_enabled[GetCategoryIndex( | |
| 285 category_group_enabled)]; | |
| 286 int index = 0; | |
| 287 while (filter_bitmap) { | |
| 288 if (filter_bitmap & 1 && g_category_group_filters.Get()[index]) | |
| 289 filter_fn(g_category_group_filters.Get()[index].get()); | |
| 290 filter_bitmap = filter_bitmap >> 1; | |
| 291 index++; | |
| 292 } | |
| 293 } | |
| 294 | |
| 276 } // namespace | 295 } // namespace |
| 277 | 296 |
| 278 // A helper class that allows the lock to be acquired in the middle of the scope | 297 // A helper class that allows the lock to be acquired in the middle of the scope |
| 279 // and unlocks at the end of scope if locked. | 298 // and unlocks at the end of scope if locked. |
| 280 class TraceLog::OptionalAutoLock { | 299 class TraceLog::OptionalAutoLock { |
| 281 public: | 300 public: |
| 282 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} | 301 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} |
| 283 | 302 |
| 284 ~OptionalAutoLock() { | 303 ~OptionalAutoLock() { |
| 285 if (locked_) | 304 if (locked_) |
| (...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 535 return &g_category_group_enabled[kCategoryAlreadyShutdown]; | 554 return &g_category_group_enabled[kCategoryAlreadyShutdown]; |
| 536 } | 555 } |
| 537 return tracelog->GetCategoryGroupEnabledInternal(category_group); | 556 return tracelog->GetCategoryGroupEnabledInternal(category_group); |
| 538 } | 557 } |
| 539 | 558 |
| 540 const char* TraceLog::GetCategoryGroupName( | 559 const char* TraceLog::GetCategoryGroupName( |
| 541 const unsigned char* category_group_enabled) { | 560 const unsigned char* category_group_enabled) { |
| 542 return g_category_groups[GetCategoryIndex(category_group_enabled)]; | 561 return g_category_groups[GetCategoryIndex(category_group_enabled)]; |
| 543 } | 562 } |
| 544 | 563 |
| 545 std::list<std::unique_ptr<TraceLog::TraceEventFilter>>* GetCategoryGroupFilter( | |
| 546 const unsigned char* category_group_enabled) { | |
| 547 return g_category_group_filter[GetCategoryIndex(category_group_enabled)] | |
| 548 .Pointer(); | |
| 549 } | |
| 550 | |
| 551 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { | 564 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { |
| 552 unsigned char enabled_flag = 0; | 565 unsigned char enabled_flag = 0; |
| 553 const char* category_group = g_category_groups[category_index]; | 566 const char* category_group = g_category_groups[category_index]; |
| 554 if (mode_ == RECORDING_MODE && | 567 if (mode_ == RECORDING_MODE && |
| 555 trace_config_.IsCategoryGroupEnabled(category_group)) { | 568 trace_config_.IsCategoryGroupEnabled(category_group)) { |
| 556 enabled_flag |= ENABLED_FOR_RECORDING; | 569 enabled_flag |= ENABLED_FOR_RECORDING; |
| 557 } | 570 } |
| 558 | 571 |
| 559 if (event_callback_ && | 572 if (event_callback_ && |
| 560 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { | 573 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { |
| 561 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; | 574 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; |
| 562 } | 575 } |
| 563 | 576 |
| 564 #if defined(OS_WIN) | 577 #if defined(OS_WIN) |
| 565 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( | 578 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( |
| 566 category_group)) { | 579 category_group)) { |
| 567 enabled_flag |= ENABLED_FOR_ETW_EXPORT; | 580 enabled_flag |= ENABLED_FOR_ETW_EXPORT; |
| 568 } | 581 } |
| 569 #endif | 582 #endif |
| 570 | 583 |
| 571 // TODO(primiano): this is a temporary workaround for catapult:#2341, | 584 // TODO(primiano): this is a temporary workaround for catapult:#2341, |
| 572 // to guarantee that metadata events are always added even if the category | 585 // to guarantee that metadata events are always added even if the category |
| 573 // filter is "-*". See crbug.com/618054 for more details and long-term fix. | 586 // filter is "-*". See crbug.com/618054 for more details and long-term fix. |
| 574 if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata")) | 587 if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata")) |
| 575 enabled_flag |= ENABLED_FOR_RECORDING; | 588 enabled_flag |= ENABLED_FOR_RECORDING; |
| 576 | 589 |
| 577 // Having a filter is an exceptional case, so we avoid | 590 uint32_t enabled_filters_bitmap = 0; |
| 578 // the LazyInstance creation in the common case. | 591 unsigned index = 0; |
|
oystein (OOO til 10th of July)
2016/09/16 19:09:00
nit: We don't use 'unsigned' as a bare type outsid
ssid
2016/09/21 00:20:26
Done.
| |
| 579 if (!(g_category_group_filter[category_index] == nullptr)) | |
| 580 g_category_group_filter[category_index].Get().clear(); | |
| 581 | |
| 582 for (const auto& event_filter : trace_config_.event_filters()) { | 592 for (const auto& event_filter : trace_config_.event_filters()) { |
| 583 if (event_filter.IsCategoryGroupEnabled(category_group)) { | 593 if (event_filter.IsCategoryGroupEnabled(category_group)) { |
| 584 std::unique_ptr<TraceEventFilter> new_filter; | 594 enabled_flag |= ENABLED_FOR_FILTERING; |
| 585 | 595 DCHECK(g_category_group_filters.Get()[index]); |
| 586 if (event_filter.predicate_name() == | 596 enabled_filters_bitmap |= 1 << index; |
| 587 TraceEventFilter::kEventWhitelistPredicate) { | 597 } |
| 588 new_filter = MakeUnique<EventNameFilter>(event_filter.filter_args()); | 598 if (index++ >= MAX_TRACE_EVENT_FILTERS) { |
| 589 } else if (event_filter.predicate_name() == | 599 NOTREACHED(); |
| 590 TraceEventFilter::kHeapProfilerPredicate) { | 600 break; |
| 591 new_filter = MakeUnique<HeapProfilerFilter>(); | |
| 592 } else if (event_filter.predicate_name() == "testing_predicate") { | |
| 593 CHECK(g_trace_event_filter_constructor_for_testing); | |
| 594 new_filter = g_trace_event_filter_constructor_for_testing(); | |
| 595 } | |
| 596 | |
| 597 if (new_filter) { | |
| 598 g_category_group_filter[category_index].Get().push_back( | |
| 599 std::move(new_filter)); | |
| 600 enabled_flag |= ENABLED_FOR_FILTERING; | |
| 601 } | |
| 602 } | 601 } |
| 603 } | 602 } |
| 603 g_category_group_filters_enabled[category_index] = enabled_filters_bitmap; | |
| 604 | 604 |
| 605 g_category_group_enabled[category_index] = enabled_flag; | 605 g_category_group_enabled[category_index] = enabled_flag; |
| 606 } | 606 } |
| 607 | 607 |
| 608 void TraceLog::UpdateCategoryGroupEnabledFlags() { | 608 void TraceLog::UpdateCategoryGroupEnabledFlags() { |
| 609 CreateFiltersForTraceConfig(); | |
| 609 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); | 610 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); |
| 610 for (size_t i = 0; i < category_index; i++) | 611 for (size_t i = 0; i < category_index; i++) |
| 611 UpdateCategoryGroupEnabledFlag(i); | 612 UpdateCategoryGroupEnabledFlag(i); |
| 612 } | 613 } |
| 613 | 614 |
| 615 void TraceLog::CreateFiltersForTraceConfig() { | |
| 616 // Filters were already added and tracing could be enabled. Filters list | |
| 617 // cannot be changed when trace events are using them. | |
| 618 if (g_category_group_filters.Get().size()) | |
| 619 return; | |
| 620 | |
| 621 for (auto& event_filter : trace_config_.event_filters()) { | |
| 622 if (g_category_group_filters.Get().size() >= MAX_TRACE_EVENT_FILTERS) { | |
| 623 LOG(ERROR) | |
| 624 << "Too many trace event filters installed in the current session"; | |
| 625 NOTREACHED(); | |
|
oystein (OOO til 10th of July)
2016/09/16 19:09:00
Just stream the error message into NOTREACHED() in
ssid
2016/09/21 00:20:26
Done.
| |
| 626 break; | |
| 627 } | |
| 628 | |
| 629 std::unique_ptr<TraceEventFilter> new_filter; | |
| 630 if (event_filter.predicate_name() == | |
| 631 TraceEventFilter::kEventWhitelistPredicate) { | |
| 632 new_filter = MakeUnique<EventNameFilter>(event_filter.filter_args()); | |
| 633 } else if (event_filter.predicate_name() == | |
| 634 TraceEventFilter::kHeapProfilerPredicate) { | |
| 635 new_filter = MakeUnique<HeapProfilerFilter>(); | |
| 636 } else if (event_filter.predicate_name() == "testing_predicate") { | |
| 637 CHECK(g_trace_event_filter_constructor_for_testing); | |
| 638 new_filter = g_trace_event_filter_constructor_for_testing(); | |
| 639 } else { | |
| 640 NOTREACHED(); | |
| 641 } | |
| 642 g_category_group_filters.Get().push_back(std::move(new_filter)); | |
| 643 } | |
| 644 } | |
| 645 | |
| 614 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { | 646 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { |
| 615 ResetTraceEventSyntheticDelays(); | 647 ResetTraceEventSyntheticDelays(); |
| 616 const TraceConfig::StringList& delays = | 648 const TraceConfig::StringList& delays = |
| 617 trace_config_.GetSyntheticDelayValues(); | 649 trace_config_.GetSyntheticDelayValues(); |
| 618 TraceConfig::StringList::const_iterator ci; | 650 TraceConfig::StringList::const_iterator ci; |
| 619 for (ci = delays.begin(); ci != delays.end(); ++ci) { | 651 for (ci = delays.begin(); ci != delays.end(); ++ci) { |
| 620 StringTokenizer tokens(*ci, ";"); | 652 StringTokenizer tokens(*ci, ";"); |
| 621 if (!tokens.GetNext()) | 653 if (!tokens.GetNext()) |
| 622 continue; | 654 continue; |
| 623 TraceEventSyntheticDelay* delay = | 655 TraceEventSyntheticDelay* delay = |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 717 if (IsEnabled()) { | 749 if (IsEnabled()) { |
| 718 if (new_options != old_options) { | 750 if (new_options != old_options) { |
| 719 DLOG(ERROR) << "Attempting to re-enable tracing with a different " | 751 DLOG(ERROR) << "Attempting to re-enable tracing with a different " |
| 720 << "set of options."; | 752 << "set of options."; |
| 721 } | 753 } |
| 722 | 754 |
| 723 if (mode != mode_) { | 755 if (mode != mode_) { |
| 724 DLOG(ERROR) << "Attempting to re-enable tracing with a different mode."; | 756 DLOG(ERROR) << "Attempting to re-enable tracing with a different mode."; |
| 725 } | 757 } |
| 726 | 758 |
| 759 if (trace_config.event_filters().size()) { | |
| 760 DLOG(ERROR) << "Adding new filters while tracing was already enabled " | |
|
oystein (OOO til 10th of July)
2016/09/16 19:09:00
I think DLOG() is discouraged these days because i
ssid
2016/09/21 00:20:26
Done.
| |
| 761 "is not supported."; | |
| 762 } | |
| 763 | |
| 727 trace_config_.Merge(trace_config); | 764 trace_config_.Merge(trace_config); |
| 728 UpdateCategoryGroupEnabledFlags(); | 765 UpdateCategoryGroupEnabledFlags(); |
| 729 return; | 766 return; |
| 730 } | 767 } |
| 731 | 768 |
| 732 if (dispatching_to_observer_list_) { | 769 if (dispatching_to_observer_list_) { |
| 733 DLOG(ERROR) | 770 DLOG(ERROR) |
| 734 << "Cannot manipulate TraceLog::Enabled state from an observer."; | 771 << "Cannot manipulate TraceLog::Enabled state from an observer."; |
| 735 return; | 772 return; |
| 736 } | 773 } |
| 737 | 774 |
| 738 mode_ = mode; | 775 mode_ = mode; |
| 739 | 776 |
| 740 if (new_options != old_options) { | 777 if (new_options != old_options) { |
| 741 subtle::NoBarrier_Store(&trace_options_, new_options); | 778 subtle::NoBarrier_Store(&trace_options_, new_options); |
| 742 UseNextTraceBuffer(); | 779 UseNextTraceBuffer(); |
| 743 } | 780 } |
| 744 | 781 |
| 745 num_traces_recorded_++; | 782 num_traces_recorded_++; |
| 746 | 783 |
| 784 // Clear all filters from previous tracing session. These filters are not | |
| 785 // cleared at the end of tracing because some threads which hit trace event | |
| 786 // when disabling, could try to use the filters. | |
| 787 g_category_group_filters.Get().clear(); | |
| 788 | |
| 747 trace_config_ = TraceConfig(trace_config); | 789 trace_config_ = TraceConfig(trace_config); |
| 748 UpdateCategoryGroupEnabledFlags(); | 790 UpdateCategoryGroupEnabledFlags(); |
| 749 UpdateSyntheticDelaysFromTraceConfig(); | 791 UpdateSyntheticDelaysFromTraceConfig(); |
| 750 | 792 |
| 751 if (new_options & kInternalEnableSampling) { | 793 if (new_options & kInternalEnableSampling) { |
| 752 sampling_thread_.reset(new TraceSamplingThread); | 794 sampling_thread_.reset(new TraceSamplingThread); |
| 753 sampling_thread_->RegisterSampleBucket( | 795 sampling_thread_->RegisterSampleBucket( |
| 754 &g_trace_state[0], "bucket0", | 796 &g_trace_state[0], "bucket0", |
| 755 Bind(&TraceSamplingThread::DefaultSamplingCallback)); | 797 Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
| 756 sampling_thread_->RegisterSampleBucket( | 798 sampling_thread_->RegisterSampleBucket( |
| (...skipping 633 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1390 std::string console_message; | 1432 std::string console_message; |
| 1391 std::unique_ptr<TraceEvent> filtered_trace_event; | 1433 std::unique_ptr<TraceEvent> filtered_trace_event; |
| 1392 bool disabled_by_filters = false; | 1434 bool disabled_by_filters = false; |
| 1393 if (*category_group_enabled & ENABLED_FOR_FILTERING) { | 1435 if (*category_group_enabled & ENABLED_FOR_FILTERING) { |
| 1394 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); | 1436 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); |
| 1395 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, | 1437 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, |
| 1396 phase, category_group_enabled, name, scope, id, | 1438 phase, category_group_enabled, name, scope, id, |
| 1397 bind_id, num_args, arg_names, arg_types, | 1439 bind_id, num_args, arg_names, arg_types, |
| 1398 arg_values, convertable_values, flags); | 1440 arg_values, convertable_values, flags); |
| 1399 | 1441 |
| 1400 auto filter_list = GetCategoryGroupFilter(category_group_enabled); | |
| 1401 DCHECK(!filter_list->empty()); | |
| 1402 | |
| 1403 disabled_by_filters = true; | 1442 disabled_by_filters = true; |
| 1404 for (const auto& trace_event_filter : *filter_list) { | 1443 ForEachCategoryGroupFilter( |
| 1405 if (trace_event_filter->FilterTraceEvent(*new_trace_event)) | 1444 category_group_enabled, [&new_trace_event, &disabled_by_filters]( |
| 1406 disabled_by_filters = false; | 1445 TraceEventFilter* trace_event_filter) { |
| 1407 } | 1446 if (trace_event_filter->FilterTraceEvent(*new_trace_event)) |
| 1408 | 1447 disabled_by_filters = false; |
| 1448 }); | |
| 1409 if (!disabled_by_filters) | 1449 if (!disabled_by_filters) |
| 1410 filtered_trace_event = std::move(new_trace_event); | 1450 filtered_trace_event = std::move(new_trace_event); |
| 1411 } | 1451 } |
| 1412 | 1452 |
| 1413 // If enabled for recording, the event should be added only if one of the | 1453 // If enabled for recording, the event should be added only if one of the |
| 1414 // filters indicates or category is not enabled for filtering. | 1454 // filters indicates or category is not enabled for filtering. |
| 1415 if ((*category_group_enabled & ENABLED_FOR_RECORDING) && | 1455 if ((*category_group_enabled & ENABLED_FOR_RECORDING) && |
| 1416 !disabled_by_filters) { | 1456 !disabled_by_filters) { |
| 1417 OptionalAutoLock lock(&lock_); | 1457 OptionalAutoLock lock(&lock_); |
| 1418 | 1458 |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1549 | 1589 |
| 1550 if (phase == TRACE_EVENT_PHASE_BEGIN) | 1590 if (phase == TRACE_EVENT_PHASE_BEGIN) |
| 1551 thread_event_start_times_[thread_id].push(timestamp); | 1591 thread_event_start_times_[thread_id].push(timestamp); |
| 1552 | 1592 |
| 1553 return log.str(); | 1593 return log.str(); |
| 1554 } | 1594 } |
| 1555 | 1595 |
| 1556 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled, | 1596 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled, |
| 1557 const char* name, | 1597 const char* name, |
| 1558 TraceEventHandle handle) { | 1598 TraceEventHandle handle) { |
| 1559 auto filter_list = GetCategoryGroupFilter(category_group_enabled); | 1599 const char* category_name = GetCategoryGroupName(category_group_enabled); |
| 1560 DCHECK(!filter_list->empty()); | 1600 ForEachCategoryGroupFilter( |
| 1561 | 1601 category_group_enabled, |
| 1562 for (const auto& trace_event_filter : *filter_list) { | 1602 [name, category_name](TraceEventFilter* trace_event_filter) { |
| 1563 trace_event_filter->EndEvent(name, | 1603 trace_event_filter->EndEvent(name, category_name); |
| 1564 GetCategoryGroupName(category_group_enabled)); | 1604 }); |
| 1565 } | |
| 1566 } | 1605 } |
| 1567 | 1606 |
| 1568 void TraceLog::UpdateTraceEventDuration( | 1607 void TraceLog::UpdateTraceEventDuration( |
| 1569 const unsigned char* category_group_enabled, | 1608 const unsigned char* category_group_enabled, |
| 1570 const char* name, | 1609 const char* name, |
| 1571 TraceEventHandle handle) { | 1610 TraceEventHandle handle) { |
| 1572 char category_group_enabled_local = *category_group_enabled; | 1611 char category_group_enabled_local = *category_group_enabled; |
| 1573 if (!category_group_enabled_local) | 1612 if (!category_group_enabled_local) |
| 1574 return; | 1613 return; |
| 1575 | 1614 |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1627 if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) { | 1666 if (category_group_enabled_local & ENABLED_FOR_EVENT_CALLBACK) { |
| 1628 EventCallback event_callback = reinterpret_cast<EventCallback>( | 1667 EventCallback event_callback = reinterpret_cast<EventCallback>( |
| 1629 subtle::NoBarrier_Load(&event_callback_)); | 1668 subtle::NoBarrier_Load(&event_callback_)); |
| 1630 if (event_callback) { | 1669 if (event_callback) { |
| 1631 event_callback( | 1670 event_callback( |
| 1632 now, TRACE_EVENT_PHASE_END, category_group_enabled, name, | 1671 now, TRACE_EVENT_PHASE_END, category_group_enabled, name, |
| 1633 trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0, | 1672 trace_event_internal::kGlobalScope, trace_event_internal::kNoId, 0, |
| 1634 nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE); | 1673 nullptr, nullptr, nullptr, TRACE_EVENT_FLAG_NONE); |
| 1635 } | 1674 } |
| 1636 } | 1675 } |
| 1676 | |
| 1677 if (category_group_enabled_local & ENABLED_FOR_FILTERING) | |
| 1678 EndFilteredEvent(category_group_enabled, name, handle); | |
| 1637 } | 1679 } |
| 1638 | 1680 |
| 1639 void TraceLog::SetWatchEvent(const std::string& category_name, | 1681 void TraceLog::SetWatchEvent(const std::string& category_name, |
| 1640 const std::string& event_name, | 1682 const std::string& event_name, |
| 1641 const WatchEventCallback& callback) { | 1683 const WatchEventCallback& callback) { |
| 1642 const unsigned char* category = | 1684 const unsigned char* category = |
| 1643 GetCategoryGroupEnabled(category_name.c_str()); | 1685 GetCategoryGroupEnabled(category_name.c_str()); |
| 1644 AutoLock lock(lock_); | 1686 AutoLock lock(lock_); |
| 1645 subtle::NoBarrier_Store(&watch_category_, | 1687 subtle::NoBarrier_Store(&watch_category_, |
| 1646 reinterpret_cast<subtle::AtomicWord>(category)); | 1688 reinterpret_cast<subtle::AtomicWord>(category)); |
| (...skipping 276 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1923 } | 1965 } |
| 1924 | 1966 |
| 1925 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1967 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
| 1926 if (*category_group_enabled_) { | 1968 if (*category_group_enabled_) { |
| 1927 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1969 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
| 1928 event_handle_); | 1970 event_handle_); |
| 1929 } | 1971 } |
| 1930 } | 1972 } |
| 1931 | 1973 |
| 1932 } // namespace trace_event_internal | 1974 } // namespace trace_event_internal |
| OLD | NEW |