OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <cmath> | 8 #include <cmath> |
9 #include <memory> | 9 #include <memory> |
10 #include <utility> | 10 #include <utility> |
(...skipping 14 matching lines...) Expand all Loading... | |
25 #include "base/strings/string_split.h" | 25 #include "base/strings/string_split.h" |
26 #include "base/strings/string_tokenizer.h" | 26 #include "base/strings/string_tokenizer.h" |
27 #include "base/strings/stringprintf.h" | 27 #include "base/strings/stringprintf.h" |
28 #include "base/sys_info.h" | 28 #include "base/sys_info.h" |
29 #include "base/threading/platform_thread.h" | 29 #include "base/threading/platform_thread.h" |
30 #include "base/threading/thread_id_name_manager.h" | 30 #include "base/threading/thread_id_name_manager.h" |
31 #include "base/threading/thread_task_runner_handle.h" | 31 #include "base/threading/thread_task_runner_handle.h" |
32 #include "base/threading/worker_pool.h" | 32 #include "base/threading/worker_pool.h" |
33 #include "base/time/time.h" | 33 #include "base/time/time.h" |
34 #include "base/trace_event/category_registry.h" | 34 #include "base/trace_event/category_registry.h" |
35 #include "base/trace_event/event_name_filter.h" | |
35 #include "base/trace_event/heap_profiler.h" | 36 #include "base/trace_event/heap_profiler.h" |
36 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | 37 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
38 #include "base/trace_event/heap_profiler_event_filter.h" | |
37 #include "base/trace_event/memory_dump_manager.h" | 39 #include "base/trace_event/memory_dump_manager.h" |
38 #include "base/trace_event/memory_dump_provider.h" | 40 #include "base/trace_event/memory_dump_provider.h" |
39 #include "base/trace_event/process_memory_dump.h" | 41 #include "base/trace_event/process_memory_dump.h" |
40 #include "base/trace_event/trace_buffer.h" | 42 #include "base/trace_event/trace_buffer.h" |
41 #include "base/trace_event/trace_event.h" | 43 #include "base/trace_event/trace_event.h" |
42 #include "base/trace_event/trace_event_synthetic_delay.h" | 44 #include "base/trace_event/trace_event_synthetic_delay.h" |
43 #include "build/build_config.h" | 45 #include "build/build_config.h" |
44 | 46 |
45 #if defined(OS_WIN) | 47 #if defined(OS_WIN) |
46 #include "base/trace_event/trace_event_etw_export_win.h" | 48 #include "base/trace_event/trace_event_etw_export_win.h" |
(...skipping 30 matching lines...) Expand all Loading... | |
77 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, | 79 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, |
78 "Too many vector buffer chunks"); | 80 "Too many vector buffer chunks"); |
79 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; | 81 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; |
80 | 82 |
81 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. | 83 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. |
82 const size_t kEchoToConsoleTraceEventBufferChunks = 256; | 84 const size_t kEchoToConsoleTraceEventBufferChunks = 256; |
83 | 85 |
84 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; | 86 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; |
85 const int kThreadFlushTimeoutMs = 3000; | 87 const int kThreadFlushTimeoutMs = 3000; |
86 | 88 |
87 const char kEventNameWhitelist[] = "event_name_whitelist"; | |
88 | |
89 #define MAX_TRACE_EVENT_FILTERS 32 | 89 #define MAX_TRACE_EVENT_FILTERS 32 |
90 | 90 |
91 // List of TraceEventFilter objects from the most recent tracing session. | 91 // List of TraceEventFilter objects from the most recent tracing session. |
92 base::LazyInstance<std::vector<std::unique_ptr<TraceLog::TraceEventFilter>>>:: | 92 base::LazyInstance<std::vector<std::unique_ptr<TraceEventFilter>>>::Leaky |
93 Leaky g_category_group_filters = LAZY_INSTANCE_INITIALIZER; | 93 g_category_group_filters = LAZY_INSTANCE_INITIALIZER; |
94 | |
95 class EventNameFilter : public TraceLog::TraceEventFilter { | |
96 public: | |
97 EventNameFilter(const base::DictionaryValue* filter_args) { | |
98 const base::ListValue* whitelist = nullptr; | |
99 if (filter_args->GetList(kEventNameWhitelist, &whitelist)) { | |
100 for (size_t i = 0; i < whitelist->GetSize(); ++i) { | |
101 std::string event_name; | |
102 if (!whitelist->GetString(i, &event_name)) | |
103 continue; | |
104 | |
105 whitelist_.insert(event_name); | |
106 } | |
107 } | |
108 } | |
109 | |
110 bool FilterTraceEvent(const TraceEvent& trace_event) const override { | |
111 return ContainsKey(whitelist_, trace_event.name()); | |
112 } | |
113 | |
114 private: | |
115 std::unordered_set<std::string> whitelist_; | |
116 }; | |
117 | |
118 // This filter is used to record trace events as pseudo stack for the heap | |
119 // profiler. It does not filter-out any events from the trace, ie. the behavior | |
120 // of trace events being added to TraceLog remains same: the events are added | |
121 // iff enabled for recording and not filtered-out by any other filter. | |
122 class HeapProfilerFilter : public TraceLog::TraceEventFilter { | |
123 public: | |
124 HeapProfilerFilter() {} | |
125 | |
126 bool FilterTraceEvent(const TraceEvent& trace_event) const override { | |
127 if (AllocationContextTracker::capture_mode() != | |
128 AllocationContextTracker::CaptureMode::PSEUDO_STACK) { | |
129 return true; | |
130 } | |
131 | |
132 // TODO(primiano): Add support for events with copied name crbug.com/581079. | |
133 if (trace_event.flags() & TRACE_EVENT_FLAG_COPY) | |
134 return true; | |
135 | |
136 const char* category_name = | |
137 TraceLog::GetCategoryGroupName(trace_event.category_group_enabled()); | |
138 if (trace_event.phase() == TRACE_EVENT_PHASE_BEGIN || | |
139 trace_event.phase() == TRACE_EVENT_PHASE_COMPLETE) { | |
140 AllocationContextTracker::GetInstanceForCurrentThread() | |
141 ->PushPseudoStackFrame({category_name, trace_event.name()}); | |
142 } else if (trace_event.phase() == TRACE_EVENT_PHASE_END) { | |
143 // The pop for |TRACE_EVENT_PHASE_COMPLETE| events is in |EndEvent|. | |
144 AllocationContextTracker::GetInstanceForCurrentThread() | |
145 ->PopPseudoStackFrame({category_name, trace_event.name()}); | |
146 } | |
147 // Do not filter-out any events and always return true. TraceLog adds the | |
148 // event only if it is enabled for recording. | |
149 return true; | |
150 } | |
151 | |
152 void EndEvent(const char* name, const char* category_group) override { | |
153 if (AllocationContextTracker::capture_mode() == | |
154 AllocationContextTracker::CaptureMode::PSEUDO_STACK) { | |
155 AllocationContextTracker::GetInstanceForCurrentThread() | |
156 ->PopPseudoStackFrame({category_group, name}); | |
157 } | |
158 } | |
159 }; | |
160 | |
161 TraceLog::TraceEventFilterConstructorForTesting | |
162 g_trace_event_filter_constructor_for_testing = nullptr; | |
163 | 94 |
164 // The name of the current thread. This is used to decide if the current | 95 // The name of the current thread. This is used to decide if the current |
165 // thread name has changed. We combine all the seen thread names into the | 96 // thread name has changed. We combine all the seen thread names into the |
166 // output name for the thread. | 97 // output name for the thread. |
167 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name = | 98 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name = |
168 LAZY_INSTANCE_INITIALIZER; | 99 LAZY_INSTANCE_INITIALIZER; |
169 | 100 |
170 ThreadTicks ThreadNow() { | 101 ThreadTicks ThreadNow() { |
171 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks(); | 102 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks(); |
172 } | 103 } |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
225 DCHECK(chunk_seq); | 156 DCHECK(chunk_seq); |
226 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); | 157 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); |
227 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); | 158 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); |
228 DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max()); | 159 DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max()); |
229 handle->chunk_seq = chunk_seq; | 160 handle->chunk_seq = chunk_seq; |
230 handle->chunk_index = static_cast<uint16_t>(chunk_index); | 161 handle->chunk_index = static_cast<uint16_t>(chunk_index); |
231 handle->event_index = static_cast<uint16_t>(event_index); | 162 handle->event_index = static_cast<uint16_t>(event_index); |
232 } | 163 } |
233 | 164 |
234 template <typename Function> | 165 template <typename Function> |
235 void ForEachCategoryGroupFilter(const unsigned char* category_group_enabled, | 166 void ForEachCategoryFilter(const unsigned char* category_group_enabled, |
236 Function filter_fn) { | 167 Function filter_fn) { |
237 const TraceCategory* category = | 168 auto* cat = CategoryRegistry::GetCategoryByStatePtr(category_group_enabled); |
238 CategoryRegistry::GetCategoryByStatePtr(category_group_enabled); | 169 uint32_t filter_bitmap = cat->enabled_filters(); |
oystein (OOO til 10th of July)
2016/12/08 20:40:20
nit: Not convinced auto and the abbreviation makes
Primiano Tucci (use gerrit)
2016/12/09 11:50:09
alright undid the auto+cat
| |
239 uint32_t filter_bitmap = category->enabled_filters(); | 170 for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) { |
240 int index = 0; | |
241 while (filter_bitmap) { | |
242 if (filter_bitmap & 1 && g_category_group_filters.Get()[index]) | 171 if (filter_bitmap & 1 && g_category_group_filters.Get()[index]) |
243 filter_fn(g_category_group_filters.Get()[index].get()); | 172 filter_fn(g_category_group_filters.Get()[index].get()); |
244 filter_bitmap = filter_bitmap >> 1; | |
245 index++; | |
246 } | 173 } |
247 } | 174 } |
248 | 175 |
249 } // namespace | 176 } // namespace |
250 | 177 |
251 // A helper class that allows the lock to be acquired in the middle of the scope | 178 // A helper class that allows the lock to be acquired in the middle of the scope |
252 // and unlocks at the end of scope if locked. | 179 // and unlocks at the end of scope if locked. |
253 class TraceLog::OptionalAutoLock { | 180 class TraceLog::OptionalAutoLock { |
254 public: | 181 public: |
255 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} | 182 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} |
(...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
422 : enabled_modes_(0), | 349 : enabled_modes_(0), |
423 num_traces_recorded_(0), | 350 num_traces_recorded_(0), |
424 dispatching_to_observer_list_(false), | 351 dispatching_to_observer_list_(false), |
425 process_sort_index_(0), | 352 process_sort_index_(0), |
426 process_id_hash_(0), | 353 process_id_hash_(0), |
427 process_id_(0), | 354 process_id_(0), |
428 trace_options_(kInternalRecordUntilFull), | 355 trace_options_(kInternalRecordUntilFull), |
429 trace_config_(TraceConfig()), | 356 trace_config_(TraceConfig()), |
430 thread_shared_chunk_index_(0), | 357 thread_shared_chunk_index_(0), |
431 generation_(0), | 358 generation_(0), |
432 use_worker_thread_(false) { | 359 use_worker_thread_(false), |
360 filter_factory_for_testing_(nullptr) { | |
433 CategoryRegistry::Initialize(); | 361 CategoryRegistry::Initialize(); |
434 | 362 |
435 #if defined(OS_NACL) // NaCl shouldn't expose the process id. | 363 #if defined(OS_NACL) // NaCl shouldn't expose the process id. |
436 SetProcessID(0); | 364 SetProcessID(0); |
437 #else | 365 #else |
438 SetProcessID(static_cast<int>(GetCurrentProcId())); | 366 SetProcessID(static_cast<int>(GetCurrentProcId())); |
439 #endif | 367 #endif |
440 | 368 |
441 logged_events_.reset(CreateTraceBuffer()); | 369 logged_events_.reset(CreateTraceBuffer()); |
442 | 370 |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
509 return category->state_ptr(); | 437 return category->state_ptr(); |
510 } | 438 } |
511 | 439 |
512 const char* TraceLog::GetCategoryGroupName( | 440 const char* TraceLog::GetCategoryGroupName( |
513 const unsigned char* category_group_enabled) { | 441 const unsigned char* category_group_enabled) { |
514 return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled) | 442 return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled) |
515 ->name(); | 443 ->name(); |
516 } | 444 } |
517 | 445 |
518 void TraceLog::UpdateCategoryState(TraceCategory* category) { | 446 void TraceLog::UpdateCategoryState(TraceCategory* category) { |
447 lock_.AssertAcquired(); | |
519 DCHECK(category->is_valid()); | 448 DCHECK(category->is_valid()); |
520 unsigned char state_flags = 0; | 449 unsigned char state_flags = 0; |
521 if (enabled_modes_ & RECORDING_MODE && | 450 if (enabled_modes_ & RECORDING_MODE && |
522 trace_config_.IsCategoryGroupEnabled(category->name())) { | 451 trace_config_.IsCategoryGroupEnabled(category->name())) { |
523 state_flags |= TraceCategory::ENABLED_FOR_RECORDING; | 452 state_flags |= TraceCategory::ENABLED_FOR_RECORDING; |
524 } | 453 } |
525 | 454 |
526 // TODO(primiano): this is a temporary workaround for catapult:#2341, | 455 // TODO(primiano): this is a temporary workaround for catapult:#2341, |
527 // to guarantee that metadata events are always added even if the category | 456 // to guarantee that metadata events are always added even if the category |
528 // filter is "-*". See crbug.com/618054 for more details and long-term fix. | 457 // filter is "-*". See crbug.com/618054 for more details and long-term fix. |
(...skipping 20 matching lines...) Expand all Loading... | |
549 if (index++ >= MAX_TRACE_EVENT_FILTERS) { | 478 if (index++ >= MAX_TRACE_EVENT_FILTERS) { |
550 NOTREACHED(); | 479 NOTREACHED(); |
551 break; | 480 break; |
552 } | 481 } |
553 } | 482 } |
554 category->set_enabled_filters(enabled_filters_bitmap); | 483 category->set_enabled_filters(enabled_filters_bitmap); |
555 category->set_state(state_flags); | 484 category->set_state(state_flags); |
556 } | 485 } |
557 | 486 |
558 void TraceLog::UpdateCategoryRegistry() { | 487 void TraceLog::UpdateCategoryRegistry() { |
488 lock_.AssertAcquired(); | |
559 CreateFiltersForTraceConfig(); | 489 CreateFiltersForTraceConfig(); |
560 for (TraceCategory& category : CategoryRegistry::GetAllCategories()) { | 490 for (TraceCategory& category : CategoryRegistry::GetAllCategories()) { |
561 UpdateCategoryState(&category); | 491 UpdateCategoryState(&category); |
562 } | 492 } |
563 } | 493 } |
564 | 494 |
565 void TraceLog::CreateFiltersForTraceConfig() { | 495 void TraceLog::CreateFiltersForTraceConfig() { |
566 if (!(enabled_modes_ & FILTERING_MODE)) | 496 if (!(enabled_modes_ & FILTERING_MODE)) |
567 return; | 497 return; |
568 | 498 |
569 // Filters were already added and tracing could be enabled. Filters list | 499 // Filters were already added and tracing could be enabled. Filters list |
570 // cannot be changed when trace events are using them. | 500 // cannot be changed when trace events are using them. |
571 if (g_category_group_filters.Get().size()) | 501 if (g_category_group_filters.Get().size()) |
572 return; | 502 return; |
573 | 503 |
574 for (auto& event_filter : enabled_event_filters_) { | 504 for (auto& filter_config : enabled_event_filters_) { |
575 if (g_category_group_filters.Get().size() >= MAX_TRACE_EVENT_FILTERS) { | 505 if (g_category_group_filters.Get().size() >= MAX_TRACE_EVENT_FILTERS) { |
576 NOTREACHED() | 506 NOTREACHED() |
577 << "Too many trace event filters installed in the current session"; | 507 << "Too many trace event filters installed in the current session"; |
578 break; | 508 break; |
579 } | 509 } |
580 | 510 |
581 std::unique_ptr<TraceEventFilter> new_filter; | 511 std::unique_ptr<TraceEventFilter> new_filter; |
582 if (event_filter.predicate_name() == | 512 const std::string predicate_name = filter_config.predicate_name(); |
oystein (OOO til 10th of July)
2016/12/08 20:40:20
reference
Primiano Tucci (use gerrit)
2016/12/09 11:50:10
Oops. well spotted, thanks.
| |
583 TraceEventFilter::kEventWhitelistPredicate) { | 513 if (predicate_name == EventNameFilter::kName) { |
584 new_filter = MakeUnique<EventNameFilter>(event_filter.filter_args()); | 514 auto whitelist = MakeUnique<std::unordered_set<std::string>>(); |
585 } else if (event_filter.predicate_name() == | 515 const ListValue* list = nullptr; |
586 TraceEventFilter::kHeapProfilerPredicate) { | 516 if (filter_config.filter_args()->GetList("event_name_whitelist", &list)) { |
oystein (OOO til 10th of July)
2016/12/08 20:40:20
Hrm. I get why you're doing this, but maybe it'd b
Primiano Tucci (use gerrit)
2016/12/09 11:50:10
I don't like too much coupling all that into Trace
| |
587 new_filter = MakeUnique<HeapProfilerFilter>(); | 517 for (size_t i = 0; i < list->GetSize(); ++i) { |
588 } else if (event_filter.predicate_name() == "testing_predicate") { | 518 std::string event_name; |
589 CHECK(g_trace_event_filter_constructor_for_testing); | 519 if (list->GetString(i, &event_name)) |
590 new_filter = g_trace_event_filter_constructor_for_testing(); | 520 whitelist->insert(event_name); |
521 } | |
522 } | |
523 new_filter = MakeUnique<EventNameFilter>(std::move(whitelist)); | |
524 } else if (predicate_name == HeapProfilerEventFilter::kName) { | |
525 new_filter = MakeUnique<HeapProfilerEventFilter>(); | |
591 } else { | 526 } else { |
592 NOTREACHED(); | 527 if (filter_factory_for_testing_) |
528 new_filter = filter_factory_for_testing_(predicate_name); | |
529 CHECK(new_filter) << "Unknown trace filter " << predicate_name; | |
593 } | 530 } |
594 g_category_group_filters.Get().push_back(std::move(new_filter)); | 531 g_category_group_filters.Get().push_back(std::move(new_filter)); |
595 } | 532 } |
596 } | 533 } |
597 | 534 |
598 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { | 535 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { |
599 ResetTraceEventSyntheticDelays(); | 536 ResetTraceEventSyntheticDelays(); |
600 const TraceConfig::StringList& delays = | 537 const TraceConfig::StringList& delays = |
601 trace_config_.GetSyntheticDelayValues(); | 538 trace_config_.GetSyntheticDelayValues(); |
602 TraceConfig::StringList::const_iterator ci; | 539 TraceConfig::StringList::const_iterator ci; |
(...skipping 181 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
784 return; | 721 return; |
785 } | 722 } |
786 | 723 |
787 bool is_recording_mode_disabled = | 724 bool is_recording_mode_disabled = |
788 (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE); | 725 (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE); |
789 enabled_modes_ &= ~modes_to_disable; | 726 enabled_modes_ &= ~modes_to_disable; |
790 | 727 |
791 if (modes_to_disable & FILTERING_MODE) | 728 if (modes_to_disable & FILTERING_MODE) |
792 enabled_event_filters_.clear(); | 729 enabled_event_filters_.clear(); |
793 | 730 |
794 if (modes_to_disable & RECORDING_MODE) { | 731 if (modes_to_disable & RECORDING_MODE) |
795 trace_config_.Clear(); | 732 trace_config_.Clear(); |
796 } | |
797 | 733 |
798 UpdateCategoryRegistry(); | 734 UpdateCategoryRegistry(); |
799 | 735 |
800 // Add metadata events and notify observers only if recording mode was | 736 // Add metadata events and notify observers only if recording mode was |
801 // disabled now. | 737 // disabled now. |
802 if (!is_recording_mode_disabled) | 738 if (!is_recording_mode_disabled) |
803 return; | 739 return; |
804 | 740 |
805 AddMetadataEventsWhileLocked(); | 741 AddMetadataEventsWhileLocked(); |
806 | 742 |
(...skipping 524 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1331 std::unique_ptr<TraceEvent> filtered_trace_event; | 1267 std::unique_ptr<TraceEvent> filtered_trace_event; |
1332 bool disabled_by_filters = false; | 1268 bool disabled_by_filters = false; |
1333 if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) { | 1269 if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) { |
1334 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); | 1270 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); |
1335 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, | 1271 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, |
1336 phase, category_group_enabled, name, scope, id, | 1272 phase, category_group_enabled, name, scope, id, |
1337 bind_id, num_args, arg_names, arg_types, | 1273 bind_id, num_args, arg_names, arg_types, |
1338 arg_values, convertable_values, flags); | 1274 arg_values, convertable_values, flags); |
1339 | 1275 |
1340 disabled_by_filters = true; | 1276 disabled_by_filters = true; |
1341 ForEachCategoryGroupFilter( | 1277 ForEachCategoryFilter( |
1342 category_group_enabled, [&new_trace_event, &disabled_by_filters]( | 1278 category_group_enabled, [&new_trace_event, &disabled_by_filters]( |
1343 TraceEventFilter* trace_event_filter) { | 1279 TraceEventFilter* trace_event_filter) { |
1344 if (trace_event_filter->FilterTraceEvent(*new_trace_event)) | 1280 if (trace_event_filter->FilterTraceEvent(*new_trace_event)) |
1345 disabled_by_filters = false; | 1281 disabled_by_filters = false; |
1346 }); | 1282 }); |
1347 if (!disabled_by_filters) | 1283 if (!disabled_by_filters) |
1348 filtered_trace_event = std::move(new_trace_event); | 1284 filtered_trace_event = std::move(new_trace_event); |
1349 } | 1285 } |
1350 | 1286 |
1351 // If enabled for recording, the event should be added only if one of the | 1287 // If enabled for recording, the event should be added only if one of the |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1461 if (phase == TRACE_EVENT_PHASE_BEGIN) | 1397 if (phase == TRACE_EVENT_PHASE_BEGIN) |
1462 thread_event_start_times_[thread_id].push(timestamp); | 1398 thread_event_start_times_[thread_id].push(timestamp); |
1463 | 1399 |
1464 return log.str(); | 1400 return log.str(); |
1465 } | 1401 } |
1466 | 1402 |
1467 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled, | 1403 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled, |
1468 const char* name, | 1404 const char* name, |
1469 TraceEventHandle handle) { | 1405 TraceEventHandle handle) { |
1470 const char* category_name = GetCategoryGroupName(category_group_enabled); | 1406 const char* category_name = GetCategoryGroupName(category_group_enabled); |
1471 ForEachCategoryGroupFilter( | 1407 ForEachCategoryFilter( |
1472 category_group_enabled, | 1408 category_group_enabled, |
1473 [name, category_name](TraceEventFilter* trace_event_filter) { | 1409 [name, category_name](TraceEventFilter* trace_event_filter) { |
1474 trace_event_filter->EndEvent(name, category_name); | 1410 trace_event_filter->EndEvent(category_name, name); |
1475 }); | 1411 }); |
1476 } | 1412 } |
1477 | 1413 |
1478 void TraceLog::UpdateTraceEventDuration( | 1414 void TraceLog::UpdateTraceEventDuration( |
1479 const unsigned char* category_group_enabled, | 1415 const unsigned char* category_group_enabled, |
1480 const char* name, | 1416 const char* name, |
1481 TraceEventHandle handle) { | 1417 TraceEventHandle handle) { |
1482 char category_group_enabled_local = *category_group_enabled; | 1418 char category_group_enabled_local = *category_group_enabled; |
1483 if (!category_group_enabled_local) | 1419 if (!category_group_enabled_local) |
1484 return; | 1420 return; |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1605 "overflowed_at_ts", | 1541 "overflowed_at_ts", |
1606 buffer_limit_reached_timestamp_); | 1542 buffer_limit_reached_timestamp_); |
1607 } | 1543 } |
1608 } | 1544 } |
1609 | 1545 |
1610 void TraceLog::DeleteForTesting() { | 1546 void TraceLog::DeleteForTesting() { |
1611 internal::DeleteTraceLogForTesting::Delete(); | 1547 internal::DeleteTraceLogForTesting::Delete(); |
1612 CategoryRegistry::ResetForTesting(); | 1548 CategoryRegistry::ResetForTesting(); |
1613 } | 1549 } |
1614 | 1550 |
1615 void TraceLog::SetTraceEventFilterConstructorForTesting( | |
1616 TraceEventFilterConstructorForTesting predicate) { | |
1617 g_trace_event_filter_constructor_for_testing = predicate; | |
1618 } | |
1619 | |
1620 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { | 1551 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { |
1621 return GetEventByHandleInternal(handle, NULL); | 1552 return GetEventByHandleInternal(handle, NULL); |
1622 } | 1553 } |
1623 | 1554 |
1624 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle, | 1555 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle, |
1625 OptionalAutoLock* lock) { | 1556 OptionalAutoLock* lock) { |
1626 if (!handle.chunk_seq) | 1557 if (!handle.chunk_seq) |
1627 return NULL; | 1558 return NULL; |
1628 | 1559 |
1629 DCHECK(handle.chunk_seq); | 1560 DCHECK(handle.chunk_seq); |
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1797 } | 1728 } |
1798 | 1729 |
1799 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1730 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
1800 if (*category_group_enabled_) { | 1731 if (*category_group_enabled_) { |
1801 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1732 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
1802 event_handle_); | 1733 event_handle_); |
1803 } | 1734 } |
1804 } | 1735 } |
1805 | 1736 |
1806 } // namespace trace_event_internal | 1737 } // namespace trace_event_internal |
OLD | NEW |