| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 #include <memory> | 9 #include <memory> |
| 10 #include <utility> | 10 #include <utility> |
| 11 | 11 |
| 12 #include "base/base_switches.h" | 12 #include "base/base_switches.h" |
| 13 #include "base/bind.h" | 13 #include "base/bind.h" |
| 14 #include "base/command_line.h" | 14 #include "base/command_line.h" |
| 15 #include "base/debug/leak_annotations.h" | 15 #include "base/debug/leak_annotations.h" |
| 16 #include "base/lazy_instance.h" | 16 #include "base/lazy_instance.h" |
| 17 #include "base/location.h" | 17 #include "base/location.h" |
| 18 #include "base/macros.h" | 18 #include "base/macros.h" |
| 19 #include "base/memory/ptr_util.h" | 19 #include "base/memory/ptr_util.h" |
| 20 #include "base/memory/ref_counted_memory.h" | 20 #include "base/memory/ref_counted_memory.h" |
| 21 #include "base/memory/singleton.h" | 21 #include "base/memory/singleton.h" |
| 22 #include "base/message_loop/message_loop.h" | 22 #include "base/message_loop/message_loop.h" |
| 23 #include "base/process/process_metrics.h" | 23 #include "base/process/process_metrics.h" |
| 24 #include "base/stl_util.h" | 24 #include "base/stl_util.h" |
| 25 #include "base/strings/string_split.h" | 25 #include "base/strings/string_split.h" |
| 26 #include "base/strings/string_tokenizer.h" | 26 #include "base/strings/string_tokenizer.h" |
| 27 #include "base/strings/stringprintf.h" | 27 #include "base/strings/stringprintf.h" |
| 28 #include "base/sys_info.h" | 28 #include "base/sys_info.h" |
| 29 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | |
| 30 #include "base/threading/platform_thread.h" | 29 #include "base/threading/platform_thread.h" |
| 31 #include "base/threading/thread_id_name_manager.h" | 30 #include "base/threading/thread_id_name_manager.h" |
| 32 #include "base/threading/thread_task_runner_handle.h" | 31 #include "base/threading/thread_task_runner_handle.h" |
| 33 #include "base/threading/worker_pool.h" | 32 #include "base/threading/worker_pool.h" |
| 34 #include "base/time/time.h" | 33 #include "base/time/time.h" |
| 34 #include "base/trace_event/category_registry.h" |
| 35 #include "base/trace_event/heap_profiler.h" | 35 #include "base/trace_event/heap_profiler.h" |
| 36 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | 36 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
| 37 #include "base/trace_event/memory_dump_manager.h" | 37 #include "base/trace_event/memory_dump_manager.h" |
| 38 #include "base/trace_event/memory_dump_provider.h" | 38 #include "base/trace_event/memory_dump_provider.h" |
| 39 #include "base/trace_event/process_memory_dump.h" | 39 #include "base/trace_event/process_memory_dump.h" |
| 40 #include "base/trace_event/trace_buffer.h" | 40 #include "base/trace_event/trace_buffer.h" |
| 41 #include "base/trace_event/trace_event.h" | 41 #include "base/trace_event/trace_event.h" |
| 42 #include "base/trace_event/trace_event_synthetic_delay.h" | 42 #include "base/trace_event/trace_event_synthetic_delay.h" |
| 43 #include "build/build_config.h" | 43 #include "build/build_config.h" |
| 44 | 44 |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 77 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, | 77 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, |
| 78 "Too many vector buffer chunks"); | 78 "Too many vector buffer chunks"); |
| 79 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; | 79 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; |
| 80 | 80 |
| 81 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. | 81 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. |
| 82 const size_t kEchoToConsoleTraceEventBufferChunks = 256; | 82 const size_t kEchoToConsoleTraceEventBufferChunks = 256; |
| 83 | 83 |
| 84 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; | 84 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; |
| 85 const int kThreadFlushTimeoutMs = 3000; | 85 const int kThreadFlushTimeoutMs = 3000; |
| 86 | 86 |
| 87 #define MAX_CATEGORY_GROUPS 200 | |
| 88 | |
| 89 // Parallel arrays g_category_groups and g_category_group_enabled are separate | |
| 90 // so that a pointer to a member of g_category_group_enabled can be easily | |
| 91 // converted to an index into g_category_groups. This allows macros to deal | |
| 92 // only with char enabled pointers from g_category_group_enabled, and we can | |
| 93 // convert internally to determine the category name from the char enabled | |
| 94 // pointer. | |
| 95 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { | |
| 96 "toplevel", | |
| 97 "tracing already shutdown", | |
| 98 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", | |
| 99 "__metadata"}; | |
| 100 | |
| 101 // The enabled flag is char instead of bool so that the API can be used from C. | |
| 102 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; | |
| 103 | |
| 104 const char kEventNameWhitelist[] = "event_name_whitelist"; | 87 const char kEventNameWhitelist[] = "event_name_whitelist"; |
| 105 | 88 |
| 106 #define MAX_TRACE_EVENT_FILTERS 32 | 89 #define MAX_TRACE_EVENT_FILTERS 32 |
| 107 | 90 |
| 108 // List of TraceEventFilter objects from the most recent tracing session. | 91 // List of TraceEventFilter objects from the most recent tracing session. |
| 109 base::LazyInstance<std::vector<std::unique_ptr<TraceLog::TraceEventFilter>>>:: | 92 base::LazyInstance<std::vector<std::unique_ptr<TraceLog::TraceEventFilter>>>:: |
| 110 Leaky g_category_group_filters = LAZY_INSTANCE_INITIALIZER; | 93 Leaky g_category_group_filters = LAZY_INSTANCE_INITIALIZER; |
| 111 | 94 |
| 112 // Stores a bitmap of filters enabled for each category group. | |
| 113 uint32_t g_category_group_filters_enabled[MAX_CATEGORY_GROUPS] = {0}; | |
| 114 | |
| 115 class EventNameFilter : public TraceLog::TraceEventFilter { | 95 class EventNameFilter : public TraceLog::TraceEventFilter { |
| 116 public: | 96 public: |
| 117 EventNameFilter(const base::DictionaryValue* filter_args) { | 97 EventNameFilter(const base::DictionaryValue* filter_args) { |
| 118 const base::ListValue* whitelist = nullptr; | 98 const base::ListValue* whitelist = nullptr; |
| 119 if (filter_args->GetList(kEventNameWhitelist, &whitelist)) { | 99 if (filter_args->GetList(kEventNameWhitelist, &whitelist)) { |
| 120 for (size_t i = 0; i < whitelist->GetSize(); ++i) { | 100 for (size_t i = 0; i < whitelist->GetSize(); ++i) { |
| 121 std::string event_name; | 101 std::string event_name; |
| 122 if (!whitelist->GetString(i, &event_name)) | 102 if (!whitelist->GetString(i, &event_name)) |
| 123 continue; | 103 continue; |
| 124 | 104 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 174 AllocationContextTracker::CaptureMode::PSEUDO_STACK) { | 154 AllocationContextTracker::CaptureMode::PSEUDO_STACK) { |
| 175 AllocationContextTracker::GetInstanceForCurrentThread() | 155 AllocationContextTracker::GetInstanceForCurrentThread() |
| 176 ->PopPseudoStackFrame({category_group, name}); | 156 ->PopPseudoStackFrame({category_group, name}); |
| 177 } | 157 } |
| 178 } | 158 } |
| 179 }; | 159 }; |
| 180 | 160 |
| 181 TraceLog::TraceEventFilterConstructorForTesting | 161 TraceLog::TraceEventFilterConstructorForTesting |
| 182 g_trace_event_filter_constructor_for_testing = nullptr; | 162 g_trace_event_filter_constructor_for_testing = nullptr; |
| 183 | 163 |
| 184 // Indexes here have to match the g_category_groups array indexes above. | |
| 185 const int kCategoryAlreadyShutdown = 1; | |
| 186 const int kCategoryCategoriesExhausted = 2; | |
| 187 const int kCategoryMetadata = 3; | |
| 188 const int kNumBuiltinCategories = 4; | |
| 189 // Skip default categories. | |
| 190 base::subtle::AtomicWord g_category_index = kNumBuiltinCategories; | |
| 191 | |
| 192 // The name of the current thread. This is used to decide if the current | 164 // The name of the current thread. This is used to decide if the current |
| 193 // thread name has changed. We combine all the seen thread names into the | 165 // thread name has changed. We combine all the seen thread names into the |
| 194 // output name for the thread. | 166 // output name for the thread. |
| 195 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name = | 167 LazyInstance<ThreadLocalPointer<const char>>::Leaky g_current_thread_name = |
| 196 LAZY_INSTANCE_INITIALIZER; | 168 LAZY_INSTANCE_INITIALIZER; |
| 197 | 169 |
| 198 ThreadTicks ThreadNow() { | 170 ThreadTicks ThreadNow() { |
| 199 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks(); | 171 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks(); |
| 200 } | 172 } |
| 201 | 173 |
| 202 template <typename T> | 174 template <typename T> |
| 203 void InitializeMetadataEvent(TraceEvent* trace_event, | 175 void InitializeMetadataEvent(TraceEvent* trace_event, |
| 204 int thread_id, | 176 int thread_id, |
| 205 const char* metadata_name, | 177 const char* metadata_name, |
| 206 const char* arg_name, | 178 const char* arg_name, |
| 207 const T& value) { | 179 const T& value) { |
| 208 if (!trace_event) | 180 if (!trace_event) |
| 209 return; | 181 return; |
| 210 | 182 |
| 211 int num_args = 1; | 183 int num_args = 1; |
| 212 unsigned char arg_type; | 184 unsigned char arg_type; |
| 213 unsigned long long arg_value; | 185 unsigned long long arg_value; |
| 214 ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value); | 186 ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value); |
| 215 trace_event->Initialize( | 187 trace_event->Initialize( |
| 216 thread_id, | 188 thread_id, |
| 217 TimeTicks(), | 189 TimeTicks(), |
| 218 ThreadTicks(), | 190 ThreadTicks(), |
| 219 TRACE_EVENT_PHASE_METADATA, | 191 TRACE_EVENT_PHASE_METADATA, |
| 220 &g_category_group_enabled[kCategoryMetadata], | 192 CategoryRegistry::kCategoryMetadata->state_ptr(), |
| 221 metadata_name, | 193 metadata_name, |
| 222 trace_event_internal::kGlobalScope, // scope | 194 trace_event_internal::kGlobalScope, // scope |
| 223 trace_event_internal::kNoId, // id | 195 trace_event_internal::kNoId, // id |
| 224 trace_event_internal::kNoId, // bind_id | 196 trace_event_internal::kNoId, // bind_id |
| 225 num_args, | 197 num_args, |
| 226 &arg_name, | 198 &arg_name, |
| 227 &arg_type, | 199 &arg_type, |
| 228 &arg_value, | 200 &arg_value, |
| 229 nullptr, | 201 nullptr, |
| 230 TRACE_EVENT_FLAG_NONE); | 202 TRACE_EVENT_FLAG_NONE); |
| (...skipping 21 matching lines...) Expand all Loading... |
| 252 TraceEventHandle* handle) { | 224 TraceEventHandle* handle) { |
| 253 DCHECK(chunk_seq); | 225 DCHECK(chunk_seq); |
| 254 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); | 226 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); |
| 255 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); | 227 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); |
| 256 DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max()); | 228 DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max()); |
| 257 handle->chunk_seq = chunk_seq; | 229 handle->chunk_seq = chunk_seq; |
| 258 handle->chunk_index = static_cast<uint16_t>(chunk_index); | 230 handle->chunk_index = static_cast<uint16_t>(chunk_index); |
| 259 handle->event_index = static_cast<uint16_t>(event_index); | 231 handle->event_index = static_cast<uint16_t>(event_index); |
| 260 } | 232 } |
| 261 | 233 |
| 262 uintptr_t GetCategoryIndex(const unsigned char* category_group_enabled) { | |
| 263 // Calculate the index of the category group by finding | |
| 264 // category_group_enabled in g_category_group_enabled array. | |
| 265 uintptr_t category_begin = | |
| 266 reinterpret_cast<uintptr_t>(g_category_group_enabled); | |
| 267 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); | |
| 268 DCHECK(category_ptr >= category_begin); | |
| 269 DCHECK(category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + | |
| 270 MAX_CATEGORY_GROUPS)) | |
| 271 << "out of bounds category pointer"; | |
| 272 uintptr_t category_index = | |
| 273 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); | |
| 274 | |
| 275 return category_index; | |
| 276 } | |
| 277 | |
| 278 template <typename Function> | 234 template <typename Function> |
| 279 void ForEachCategoryGroupFilter(const unsigned char* category_group_enabled, | 235 void ForEachCategoryGroupFilter(const unsigned char* category_group_enabled, |
| 280 Function filter_fn) { | 236 Function filter_fn) { |
| 281 uint32_t filter_bitmap = g_category_group_filters_enabled[GetCategoryIndex( | 237 const TraceCategory* category = |
| 282 category_group_enabled)]; | 238 CategoryRegistry::GetCategoryByStatePtr(category_group_enabled); |
| 239 uint32_t filter_bitmap = category->enabled_filters(); |
| 283 int index = 0; | 240 int index = 0; |
| 284 while (filter_bitmap) { | 241 while (filter_bitmap) { |
| 285 if (filter_bitmap & 1 && g_category_group_filters.Get()[index]) | 242 if (filter_bitmap & 1 && g_category_group_filters.Get()[index]) |
| 286 filter_fn(g_category_group_filters.Get()[index].get()); | 243 filter_fn(g_category_group_filters.Get()[index].get()); |
| 287 filter_bitmap = filter_bitmap >> 1; | 244 filter_bitmap = filter_bitmap >> 1; |
| 288 index++; | 245 index++; |
| 289 } | 246 } |
| 290 } | 247 } |
| 291 | 248 |
| 292 } // namespace | 249 } // namespace |
| (...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 466 num_traces_recorded_(0), | 423 num_traces_recorded_(0), |
| 467 dispatching_to_observer_list_(false), | 424 dispatching_to_observer_list_(false), |
| 468 process_sort_index_(0), | 425 process_sort_index_(0), |
| 469 process_id_hash_(0), | 426 process_id_hash_(0), |
| 470 process_id_(0), | 427 process_id_(0), |
| 471 trace_options_(kInternalRecordUntilFull), | 428 trace_options_(kInternalRecordUntilFull), |
| 472 trace_config_(TraceConfig()), | 429 trace_config_(TraceConfig()), |
| 473 thread_shared_chunk_index_(0), | 430 thread_shared_chunk_index_(0), |
| 474 generation_(0), | 431 generation_(0), |
| 475 use_worker_thread_(false) { | 432 use_worker_thread_(false) { |
| 476 // Trace is enabled or disabled on one thread while other threads are | 433 CategoryRegistry::Initialize(); |
| 477 // accessing the enabled flag. We don't care whether edge-case events are | 434 |
| 478 // traced or not, so we allow races on the enabled flag to keep the trace | |
| 479 // macros fast. | |
| 480 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: | |
| 481 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, | |
| 482 // sizeof(g_category_group_enabled), | |
| 483 // "trace_event category enabled"); | |
| 484 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) { | |
| 485 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i], | |
| 486 "trace_event category enabled"); | |
| 487 } | |
| 488 #if defined(OS_NACL) // NaCl shouldn't expose the process id. | 435 #if defined(OS_NACL) // NaCl shouldn't expose the process id. |
| 489 SetProcessID(0); | 436 SetProcessID(0); |
| 490 #else | 437 #else |
| 491 SetProcessID(static_cast<int>(GetCurrentProcId())); | 438 SetProcessID(static_cast<int>(GetCurrentProcId())); |
| 492 #endif | 439 #endif |
| 493 | 440 |
| 494 logged_events_.reset(CreateTraceBuffer()); | 441 logged_events_.reset(CreateTraceBuffer()); |
| 495 | 442 |
| 496 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog", | 443 MemoryDumpManager::GetInstance()->RegisterDumpProvider(this, "TraceLog", |
| 497 nullptr); | 444 nullptr); |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 536 } | 483 } |
| 537 overhead.AddSelf(); | 484 overhead.AddSelf(); |
| 538 overhead.DumpInto("tracing/main_trace_log", pmd); | 485 overhead.DumpInto("tracing/main_trace_log", pmd); |
| 539 return true; | 486 return true; |
| 540 } | 487 } |
| 541 | 488 |
| 542 const unsigned char* TraceLog::GetCategoryGroupEnabled( | 489 const unsigned char* TraceLog::GetCategoryGroupEnabled( |
| 543 const char* category_group) { | 490 const char* category_group) { |
| 544 TraceLog* tracelog = GetInstance(); | 491 TraceLog* tracelog = GetInstance(); |
| 545 if (!tracelog) { | 492 if (!tracelog) { |
| 546 DCHECK(!g_category_group_enabled[kCategoryAlreadyShutdown]); | 493 DCHECK(!CategoryRegistry::kCategoryAlreadyShutdown->is_enabled()); |
| 547 return &g_category_group_enabled[kCategoryAlreadyShutdown]; | 494 return CategoryRegistry::kCategoryAlreadyShutdown->state_ptr(); |
| 548 } | 495 } |
| 549 return tracelog->GetCategoryGroupEnabledInternal(category_group); | 496 TraceCategory* category = nullptr; |
| 497 bool is_new_category = |
| 498 CategoryRegistry::GetOrCreateCategoryByName(category_group, &category); |
| 499 if (is_new_category) |
| 500 tracelog->UpdateCategoryState(category); |
| 501 DCHECK(category->state_ptr()); |
| 502 return category->state_ptr(); |
| 550 } | 503 } |
| 551 | 504 |
| 552 const char* TraceLog::GetCategoryGroupName( | 505 const char* TraceLog::GetCategoryGroupName( |
| 553 const unsigned char* category_group_enabled) { | 506 const unsigned char* category_group_enabled) { |
| 554 return g_category_groups[GetCategoryIndex(category_group_enabled)]; | 507 return CategoryRegistry::GetCategoryByStatePtr(category_group_enabled) |
| 508 ->name(); |
| 555 } | 509 } |
| 556 | 510 |
| 557 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { | 511 void TraceLog::UpdateCategoryState(TraceCategory* category) { |
| 558 unsigned char enabled_flag = 0; | 512 DCHECK(category->is_valid()); |
| 559 const char* category_group = g_category_groups[category_index]; | 513 unsigned char state_flags = 0; |
| 560 if (enabled_modes_ & RECORDING_MODE && | 514 if (enabled_modes_ & RECORDING_MODE && |
| 561 trace_config_.IsCategoryGroupEnabled(category_group)) { | 515 trace_config_.IsCategoryGroupEnabled(category->name())) { |
| 562 enabled_flag |= ENABLED_FOR_RECORDING; | 516 state_flags |= TraceCategory::ENABLED_FOR_RECORDING; |
| 517 } |
| 518 |
| 519 // TODO(primiano): this is a temporary workaround for catapult:#2341, |
| 520 // to guarantee that metadata events are always added even if the category |
| 521 // filter is "-*". See crbug.com/618054 for more details and long-term fix. |
| 522 if (enabled_modes_ & RECORDING_MODE && |
| 523 category == CategoryRegistry::kCategoryMetadata) { |
| 524 state_flags |= TraceCategory::ENABLED_FOR_RECORDING; |
| 563 } | 525 } |
| 564 | 526 |
| 565 #if defined(OS_WIN) | 527 #if defined(OS_WIN) |
| 566 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( | 528 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( |
| 567 category_group)) { | 529 category->name())) { |
| 568 enabled_flag |= ENABLED_FOR_ETW_EXPORT; | 530 state_flags |= TraceCategory::ENABLED_FOR_ETW_EXPORT; |
| 569 } | 531 } |
| 570 #endif | 532 #endif |
| 571 | 533 |
| 572 // TODO(primiano): this is a temporary workaround for catapult:#2341, | |
| 573 // to guarantee that metadata events are always added even if the category | |
| 574 // filter is "-*". See crbug.com/618054 for more details and long-term fix. | |
| 575 if (enabled_modes_ & RECORDING_MODE && !strcmp(category_group, "__metadata")) | |
| 576 enabled_flag |= ENABLED_FOR_RECORDING; | |
| 577 | |
| 578 uint32_t enabled_filters_bitmap = 0; | 534 uint32_t enabled_filters_bitmap = 0; |
| 579 int index = 0; | 535 int index = 0; |
| 580 for (const auto& event_filter : enabled_event_filters_) { | 536 for (const auto& event_filter : enabled_event_filters_) { |
| 581 if (event_filter.IsCategoryGroupEnabled(category_group)) { | 537 if (event_filter.IsCategoryGroupEnabled(category->name())) { |
| 582 enabled_flag |= ENABLED_FOR_FILTERING; | 538 state_flags |= TraceCategory::ENABLED_FOR_FILTERING; |
| 583 DCHECK(g_category_group_filters.Get()[index]); | 539 DCHECK(g_category_group_filters.Get()[index]); |
| 584 enabled_filters_bitmap |= 1 << index; | 540 enabled_filters_bitmap |= 1 << index; |
| 585 } | 541 } |
| 586 if (index++ >= MAX_TRACE_EVENT_FILTERS) { | 542 if (index++ >= MAX_TRACE_EVENT_FILTERS) { |
| 587 NOTREACHED(); | 543 NOTREACHED(); |
| 588 break; | 544 break; |
| 589 } | 545 } |
| 590 } | 546 } |
| 591 g_category_group_filters_enabled[category_index] = enabled_filters_bitmap; | 547 category->set_enabled_filters(enabled_filters_bitmap); |
| 592 | 548 category->set_state(state_flags); |
| 593 g_category_group_enabled[category_index] = enabled_flag; | |
| 594 } | 549 } |
| 595 | 550 |
| 596 void TraceLog::UpdateCategoryGroupEnabledFlags() { | 551 void TraceLog::UpdateCategoryRegistry() { |
| 597 CreateFiltersForTraceConfig(); | 552 CreateFiltersForTraceConfig(); |
| 598 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); | 553 for (TraceCategory& category : CategoryRegistry::GetAllCategories()) { |
| 599 for (size_t i = 0; i < category_index; i++) | 554 UpdateCategoryState(&category); |
| 600 UpdateCategoryGroupEnabledFlag(i); | 555 } |
| 601 } | 556 } |
| 602 | 557 |
| 603 void TraceLog::CreateFiltersForTraceConfig() { | 558 void TraceLog::CreateFiltersForTraceConfig() { |
| 604 if (!(enabled_modes_ & FILTERING_MODE)) | 559 if (!(enabled_modes_ & FILTERING_MODE)) |
| 605 return; | 560 return; |
| 606 | 561 |
| 607 // Filters were already added and tracing could be enabled. Filters list | 562 // Filters were already added and tracing could be enabled. Filters list |
| 608 // cannot be changed when trace events are using them. | 563 // cannot be changed when trace events are using them. |
| 609 if (g_category_group_filters.Get().size()) | 564 if (g_category_group_filters.Get().size()) |
| 610 return; | 565 return; |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 655 delay->SetMode(TraceEventSyntheticDelay::STATIC); | 610 delay->SetMode(TraceEventSyntheticDelay::STATIC); |
| 656 } else if (token == "oneshot") { | 611 } else if (token == "oneshot") { |
| 657 delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT); | 612 delay->SetMode(TraceEventSyntheticDelay::ONE_SHOT); |
| 658 } else if (token == "alternating") { | 613 } else if (token == "alternating") { |
| 659 delay->SetMode(TraceEventSyntheticDelay::ALTERNATING); | 614 delay->SetMode(TraceEventSyntheticDelay::ALTERNATING); |
| 660 } | 615 } |
| 661 } | 616 } |
| 662 } | 617 } |
| 663 } | 618 } |
| 664 | 619 |
| 665 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal( | |
| 666 const char* category_group) { | |
| 667 DCHECK(!strchr(category_group, '"')) | |
| 668 << "Category groups may not contain double quote"; | |
| 669 // The g_category_groups is append only, avoid using a lock for the fast path. | |
| 670 size_t current_category_index = base::subtle::Acquire_Load(&g_category_index); | |
| 671 | |
| 672 // Search for pre-existing category group. | |
| 673 for (size_t i = 0; i < current_category_index; ++i) { | |
| 674 if (strcmp(g_category_groups[i], category_group) == 0) { | |
| 675 return &g_category_group_enabled[i]; | |
| 676 } | |
| 677 } | |
| 678 | |
| 679 // This is the slow path: the lock is not held in the case above, so more | |
| 680 // than one thread could have reached here trying to add the same category. | |
| 681 // Only hold to lock when actually appending a new category, and | |
| 682 // check the categories groups again. | |
| 683 AutoLock lock(lock_); | |
| 684 size_t category_index = base::subtle::Acquire_Load(&g_category_index); | |
| 685 for (size_t i = 0; i < category_index; ++i) { | |
| 686 if (strcmp(g_category_groups[i], category_group) == 0) { | |
| 687 return &g_category_group_enabled[i]; | |
| 688 } | |
| 689 } | |
| 690 | |
| 691 // Create a new category group. | |
| 692 DCHECK(category_index < MAX_CATEGORY_GROUPS) | |
| 693 << "must increase MAX_CATEGORY_GROUPS"; | |
| 694 unsigned char* category_group_enabled = nullptr; | |
| 695 if (category_index < MAX_CATEGORY_GROUPS) { | |
| 696 // Don't hold on to the category_group pointer, so that we can create | |
| 697 // category groups with strings not known at compile time (this is | |
| 698 // required by SetWatchEvent). | |
| 699 const char* new_group = strdup(category_group); | |
| 700 ANNOTATE_LEAKING_OBJECT_PTR(new_group); | |
| 701 g_category_groups[category_index] = new_group; | |
| 702 DCHECK(!g_category_group_enabled[category_index]); | |
| 703 // Note that if both included and excluded patterns in the | |
| 704 // TraceConfig are empty, we exclude nothing, | |
| 705 // thereby enabling this category group. | |
| 706 UpdateCategoryGroupEnabledFlag(category_index); | |
| 707 category_group_enabled = &g_category_group_enabled[category_index]; | |
| 708 // Update the max index now. | |
| 709 base::subtle::Release_Store(&g_category_index, category_index + 1); | |
| 710 } else { | |
| 711 category_group_enabled = | |
| 712 &g_category_group_enabled[kCategoryCategoriesExhausted]; | |
| 713 } | |
| 714 return category_group_enabled; | |
| 715 } | |
| 716 | |
| 717 void TraceLog::GetKnownCategoryGroups( | 620 void TraceLog::GetKnownCategoryGroups( |
| 718 std::vector<std::string>* category_groups) { | 621 std::vector<std::string>* category_groups) { |
| 719 AutoLock lock(lock_); | 622 for (const auto& category : CategoryRegistry::GetAllCategories()) { |
| 720 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); | 623 if (!CategoryRegistry::IsBuiltinCategory(&category)) |
| 721 for (size_t i = kNumBuiltinCategories; i < category_index; i++) | 624 category_groups->push_back(category.name()); |
| 722 category_groups->push_back(g_category_groups[i]); | 625 } |
| 723 } | 626 } |
| 724 | 627 |
| 725 void TraceLog::SetEnabled(const TraceConfig& trace_config, | 628 void TraceLog::SetEnabled(const TraceConfig& trace_config, |
| 726 uint8_t modes_to_enable) { | 629 uint8_t modes_to_enable) { |
| 727 std::vector<EnabledStateObserver*> observer_list; | 630 std::vector<EnabledStateObserver*> observer_list; |
| 728 std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map; | 631 std::map<AsyncEnabledStateObserver*, RegisteredAsyncObserver> observer_map; |
| 729 { | 632 { |
| 730 AutoLock lock(lock_); | 633 AutoLock lock(lock_); |
| 731 | 634 |
| 732 // Can't enable tracing when Flush() is in progress. | 635 // Can't enable tracing when Flush() is in progress. |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 776 // Use the given event filters only if filtering was not enabled. | 679 // Use the given event filters only if filtering was not enabled. |
| 777 if (enabled_event_filters_.empty()) | 680 if (enabled_event_filters_.empty()) |
| 778 enabled_event_filters_ = trace_config.event_filters(); | 681 enabled_event_filters_ = trace_config.event_filters(); |
| 779 } | 682 } |
| 780 // Keep the |trace_config_| updated with only enabled filters in case anyone | 683 // Keep the |trace_config_| updated with only enabled filters in case anyone |
| 781 // tries to read it using |GetCurrentTraceConfig| (even if filters are | 684 // tries to read it using |GetCurrentTraceConfig| (even if filters are |
| 782 // empty). | 685 // empty). |
| 783 trace_config_.SetEventFilters(enabled_event_filters_); | 686 trace_config_.SetEventFilters(enabled_event_filters_); |
| 784 | 687 |
| 785 enabled_modes_ |= modes_to_enable; | 688 enabled_modes_ |= modes_to_enable; |
| 786 UpdateCategoryGroupEnabledFlags(); | 689 UpdateCategoryRegistry(); |
| 787 | 690 |
| 788 // Do not notify observers or create trace buffer if only enabled for | 691 // Do not notify observers or create trace buffer if only enabled for |
| 789 // filtering or if recording was already enabled. | 692 // filtering or if recording was already enabled. |
| 790 if (!(modes_to_enable & RECORDING_MODE) || already_recording) | 693 if (!(modes_to_enable & RECORDING_MODE) || already_recording) |
| 791 return; | 694 return; |
| 792 | 695 |
| 793 if (new_options != old_options) { | 696 if (new_options != old_options) { |
| 794 subtle::NoBarrier_Store(&trace_options_, new_options); | 697 subtle::NoBarrier_Store(&trace_options_, new_options); |
| 795 UseNextTraceBuffer(); | 698 UseNextTraceBuffer(); |
| 796 } | 699 } |
| 797 | 700 |
| 798 num_traces_recorded_++; | 701 num_traces_recorded_++; |
| 799 | 702 |
| 800 UpdateCategoryGroupEnabledFlags(); | 703 UpdateCategoryRegistry(); |
| 801 UpdateSyntheticDelaysFromTraceConfig(); | 704 UpdateSyntheticDelaysFromTraceConfig(); |
| 802 | 705 |
| 803 dispatching_to_observer_list_ = true; | 706 dispatching_to_observer_list_ = true; |
| 804 observer_list = enabled_state_observer_list_; | 707 observer_list = enabled_state_observer_list_; |
| 805 observer_map = async_observers_; | 708 observer_map = async_observers_; |
| 806 } | 709 } |
| 807 // Notify observers outside the lock in case they trigger trace events. | 710 // Notify observers outside the lock in case they trigger trace events. |
| 808 for (EnabledStateObserver* observer : observer_list) | 711 for (EnabledStateObserver* observer : observer_list) |
| 809 observer->OnTraceLogEnabled(); | 712 observer->OnTraceLogEnabled(); |
| 810 for (const auto& it : observer_map) { | 713 for (const auto& it : observer_map) { |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 878 (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE); | 781 (enabled_modes_ & RECORDING_MODE) && (modes_to_disable & RECORDING_MODE); |
| 879 enabled_modes_ &= ~modes_to_disable; | 782 enabled_modes_ &= ~modes_to_disable; |
| 880 | 783 |
| 881 if (modes_to_disable & FILTERING_MODE) | 784 if (modes_to_disable & FILTERING_MODE) |
| 882 enabled_event_filters_.clear(); | 785 enabled_event_filters_.clear(); |
| 883 | 786 |
| 884 if (modes_to_disable & RECORDING_MODE) { | 787 if (modes_to_disable & RECORDING_MODE) { |
| 885 trace_config_.Clear(); | 788 trace_config_.Clear(); |
| 886 } | 789 } |
| 887 | 790 |
| 888 UpdateCategoryGroupEnabledFlags(); | 791 UpdateCategoryRegistry(); |
| 889 | 792 |
| 890 // Add metadata events and notify observers only if recording mode was | 793 // Add metadata events and notify observers only if recording mode was |
| 891 // disabled now. | 794 // disabled now. |
| 892 if (!is_recording_mode_disabled) | 795 if (!is_recording_mode_disabled) |
| 893 return; | 796 return; |
| 894 | 797 |
| 895 AddMetadataEventsWhileLocked(); | 798 AddMetadataEventsWhileLocked(); |
| 896 | 799 |
| 897 // Remove metadata events so they will not get added to a subsequent trace. | 800 // Remove metadata events so they will not get added to a subsequent trace. |
| 898 metadata_events_.clear(); | 801 metadata_events_.clear(); |
| (...skipping 505 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1404 existing_name->second.push_back(','); | 1307 existing_name->second.push_back(','); |
| 1405 existing_name->second.append(new_name); | 1308 existing_name->second.append(new_name); |
| 1406 } | 1309 } |
| 1407 } | 1310 } |
| 1408 } | 1311 } |
| 1409 } | 1312 } |
| 1410 | 1313 |
| 1411 #if defined(OS_WIN) | 1314 #if defined(OS_WIN) |
| 1412 // This is done sooner rather than later, to avoid creating the event and | 1315 // This is done sooner rather than later, to avoid creating the event and |
| 1413 // acquiring the lock, which is not needed for ETW as it's already threadsafe. | 1316 // acquiring the lock, which is not needed for ETW as it's already threadsafe. |
| 1414 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) | 1317 if (*category_group_enabled & TraceCategory::ENABLED_FOR_ETW_EXPORT) |
| 1415 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, | 1318 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, |
| 1416 num_args, arg_names, arg_types, arg_values, | 1319 num_args, arg_names, arg_types, arg_values, |
| 1417 convertable_values); | 1320 convertable_values); |
| 1418 #endif // OS_WIN | 1321 #endif // OS_WIN |
| 1419 | 1322 |
| 1420 std::string console_message; | 1323 std::string console_message; |
| 1421 std::unique_ptr<TraceEvent> filtered_trace_event; | 1324 std::unique_ptr<TraceEvent> filtered_trace_event; |
| 1422 bool disabled_by_filters = false; | 1325 bool disabled_by_filters = false; |
| 1423 if (*category_group_enabled & ENABLED_FOR_FILTERING) { | 1326 if (*category_group_enabled & TraceCategory::ENABLED_FOR_FILTERING) { |
| 1424 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); | 1327 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); |
| 1425 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, | 1328 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, |
| 1426 phase, category_group_enabled, name, scope, id, | 1329 phase, category_group_enabled, name, scope, id, |
| 1427 bind_id, num_args, arg_names, arg_types, | 1330 bind_id, num_args, arg_names, arg_types, |
| 1428 arg_values, convertable_values, flags); | 1331 arg_values, convertable_values, flags); |
| 1429 | 1332 |
| 1430 disabled_by_filters = true; | 1333 disabled_by_filters = true; |
| 1431 ForEachCategoryGroupFilter( | 1334 ForEachCategoryGroupFilter( |
| 1432 category_group_enabled, [&new_trace_event, &disabled_by_filters]( | 1335 category_group_enabled, [&new_trace_event, &disabled_by_filters]( |
| 1433 TraceEventFilter* trace_event_filter) { | 1336 TraceEventFilter* trace_event_filter) { |
| 1434 if (trace_event_filter->FilterTraceEvent(*new_trace_event)) | 1337 if (trace_event_filter->FilterTraceEvent(*new_trace_event)) |
| 1435 disabled_by_filters = false; | 1338 disabled_by_filters = false; |
| 1436 }); | 1339 }); |
| 1437 if (!disabled_by_filters) | 1340 if (!disabled_by_filters) |
| 1438 filtered_trace_event = std::move(new_trace_event); | 1341 filtered_trace_event = std::move(new_trace_event); |
| 1439 } | 1342 } |
| 1440 | 1343 |
| 1441 // If enabled for recording, the event should be added only if one of the | 1344 // If enabled for recording, the event should be added only if one of the |
| 1442 // filters indicates or category is not enabled for filtering. | 1345 // filters indicates or category is not enabled for filtering. |
| 1443 if ((*category_group_enabled & ENABLED_FOR_RECORDING) && | 1346 if ((*category_group_enabled & TraceCategory::ENABLED_FOR_RECORDING) && |
| 1444 !disabled_by_filters) { | 1347 !disabled_by_filters) { |
| 1445 OptionalAutoLock lock(&lock_); | 1348 OptionalAutoLock lock(&lock_); |
| 1446 | 1349 |
| 1447 TraceEvent* trace_event = NULL; | 1350 TraceEvent* trace_event = NULL; |
| 1448 if (thread_local_event_buffer) { | 1351 if (thread_local_event_buffer) { |
| 1449 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); | 1352 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); |
| 1450 } else { | 1353 } else { |
| 1451 lock.EnsureAcquired(); | 1354 lock.EnsureAcquired(); |
| 1452 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); | 1355 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); |
| 1453 } | 1356 } |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1579 if (thread_is_in_trace_event_.Get()) | 1482 if (thread_is_in_trace_event_.Get()) |
| 1580 return; | 1483 return; |
| 1581 | 1484 |
| 1582 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); | 1485 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); |
| 1583 | 1486 |
| 1584 ThreadTicks thread_now = ThreadNow(); | 1487 ThreadTicks thread_now = ThreadNow(); |
| 1585 TimeTicks now = OffsetNow(); | 1488 TimeTicks now = OffsetNow(); |
| 1586 | 1489 |
| 1587 #if defined(OS_WIN) | 1490 #if defined(OS_WIN) |
| 1588 // Generate an ETW event that marks the end of a complete event. | 1491 // Generate an ETW event that marks the end of a complete event. |
| 1589 if (category_group_enabled_local & ENABLED_FOR_ETW_EXPORT) | 1492 if (category_group_enabled_local & TraceCategory::ENABLED_FOR_ETW_EXPORT) |
| 1590 TraceEventETWExport::AddCompleteEndEvent(name); | 1493 TraceEventETWExport::AddCompleteEndEvent(name); |
| 1591 #endif // OS_WIN | 1494 #endif // OS_WIN |
| 1592 | 1495 |
| 1593 std::string console_message; | 1496 std::string console_message; |
| 1594 if (category_group_enabled_local & ENABLED_FOR_RECORDING) { | 1497 if (category_group_enabled_local & TraceCategory::ENABLED_FOR_RECORDING) { |
| 1595 OptionalAutoLock lock(&lock_); | 1498 OptionalAutoLock lock(&lock_); |
| 1596 | 1499 |
| 1597 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock); | 1500 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock); |
| 1598 if (trace_event) { | 1501 if (trace_event) { |
| 1599 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE); | 1502 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE); |
| 1600 // TEMP(oysteine) to debug crbug.com/638744 | 1503 // TEMP(oysteine) to debug crbug.com/638744 |
| 1601 if (trace_event->duration().ToInternalValue() != -1) { | 1504 if (trace_event->duration().ToInternalValue() != -1) { |
| 1602 DVLOG(1) << "TraceHandle: chunk_seq " << handle.chunk_seq | 1505 DVLOG(1) << "TraceHandle: chunk_seq " << handle.chunk_seq |
| 1603 << ", chunk_index " << handle.chunk_index << ", event_index " | 1506 << ", chunk_index " << handle.chunk_index << ", event_index " |
| 1604 << handle.event_index; | 1507 << handle.event_index; |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1617 | 1520 |
| 1618 if (trace_options() & kInternalEchoToConsole) { | 1521 if (trace_options() & kInternalEchoToConsole) { |
| 1619 console_message = | 1522 console_message = |
| 1620 EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event); | 1523 EventToConsoleMessage(TRACE_EVENT_PHASE_END, now, trace_event); |
| 1621 } | 1524 } |
| 1622 } | 1525 } |
| 1623 | 1526 |
| 1624 if (!console_message.empty()) | 1527 if (!console_message.empty()) |
| 1625 LOG(ERROR) << console_message; | 1528 LOG(ERROR) << console_message; |
| 1626 | 1529 |
| 1627 if (category_group_enabled_local & ENABLED_FOR_FILTERING) | 1530 if (category_group_enabled_local & TraceCategory::ENABLED_FOR_FILTERING) |
| 1628 EndFilteredEvent(category_group_enabled, name, handle); | 1531 EndFilteredEvent(category_group_enabled, name, handle); |
| 1629 } | 1532 } |
| 1630 | 1533 |
| 1631 uint64_t TraceLog::MangleEventId(uint64_t id) { | 1534 uint64_t TraceLog::MangleEventId(uint64_t id) { |
| 1632 return id ^ process_id_hash_; | 1535 return id ^ process_id_hash_; |
| 1633 } | 1536 } |
| 1634 | 1537 |
| 1635 void TraceLog::AddMetadataEventsWhileLocked() { | 1538 void TraceLog::AddMetadataEventsWhileLocked() { |
| 1636 lock_.AssertAcquired(); | 1539 lock_.AssertAcquired(); |
| 1637 | 1540 |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1692 if (!buffer_limit_reached_timestamp_.is_null()) { | 1595 if (!buffer_limit_reached_timestamp_.is_null()) { |
| 1693 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), | 1596 InitializeMetadataEvent(AddEventToThreadSharedChunkWhileLocked(NULL, false), |
| 1694 current_thread_id, "trace_buffer_overflowed", | 1597 current_thread_id, "trace_buffer_overflowed", |
| 1695 "overflowed_at_ts", | 1598 "overflowed_at_ts", |
| 1696 buffer_limit_reached_timestamp_); | 1599 buffer_limit_reached_timestamp_); |
| 1697 } | 1600 } |
| 1698 } | 1601 } |
| 1699 | 1602 |
| 1700 void TraceLog::DeleteForTesting() { | 1603 void TraceLog::DeleteForTesting() { |
| 1701 internal::DeleteTraceLogForTesting::Delete(); | 1604 internal::DeleteTraceLogForTesting::Delete(); |
| 1605 CategoryRegistry::ResetForTesting(); |
| 1702 } | 1606 } |
| 1703 | 1607 |
| 1704 void TraceLog::SetTraceEventFilterConstructorForTesting( | 1608 void TraceLog::SetTraceEventFilterConstructorForTesting( |
| 1705 TraceEventFilterConstructorForTesting predicate) { | 1609 TraceEventFilterConstructorForTesting predicate) { |
| 1706 g_trace_event_filter_constructor_for_testing = predicate; | 1610 g_trace_event_filter_constructor_for_testing = predicate; |
| 1707 } | 1611 } |
| 1708 | 1612 |
| 1709 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { | 1613 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { |
| 1710 return GetEventByHandleInternal(handle, NULL); | 1614 return GetEventByHandleInternal(handle, NULL); |
| 1711 } | 1615 } |
| (...skipping 96 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1808 if (options & kInternalRecordAsMuchAsPossible) { | 1712 if (options & kInternalRecordAsMuchAsPossible) { |
| 1809 return TraceBuffer::CreateTraceBufferVectorOfSize( | 1713 return TraceBuffer::CreateTraceBufferVectorOfSize( |
| 1810 kTraceEventVectorBigBufferChunks); | 1714 kTraceEventVectorBigBufferChunks); |
| 1811 } | 1715 } |
| 1812 return TraceBuffer::CreateTraceBufferVectorOfSize( | 1716 return TraceBuffer::CreateTraceBufferVectorOfSize( |
| 1813 kTraceEventVectorBufferChunks); | 1717 kTraceEventVectorBufferChunks); |
| 1814 } | 1718 } |
| 1815 | 1719 |
| 1816 #if defined(OS_WIN) | 1720 #if defined(OS_WIN) |
| 1817 void TraceLog::UpdateETWCategoryGroupEnabledFlags() { | 1721 void TraceLog::UpdateETWCategoryGroupEnabledFlags() { |
| 1818 AutoLock lock(lock_); | |
| 1819 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); | |
| 1820 // Go through each category and set/clear the ETW bit depending on whether the | 1722 // Go through each category and set/clear the ETW bit depending on whether the |
| 1821 // category is enabled. | 1723 // category is enabled. |
| 1822 for (size_t i = 0; i < category_index; i++) { | 1724 for (TraceCategory& category : CategoryRegistry::GetAllCategories()) { |
| 1823 const char* category_group = g_category_groups[i]; | |
| 1824 DCHECK(category_group); | |
| 1825 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( | 1725 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( |
| 1826 category_group)) { | 1726 category.name())) { |
| 1827 g_category_group_enabled[i] |= ENABLED_FOR_ETW_EXPORT; | 1727 category.set_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT); |
| 1828 } else { | 1728 } else { |
| 1829 g_category_group_enabled[i] &= ~ENABLED_FOR_ETW_EXPORT; | 1729 category.clear_state_flag(TraceCategory::ENABLED_FOR_ETW_EXPORT); |
| 1830 } | 1730 } |
| 1831 } | 1731 } |
| 1832 } | 1732 } |
| 1833 #endif // defined(OS_WIN) | 1733 #endif // defined(OS_WIN) |
| 1834 | 1734 |
| 1835 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead( | 1735 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead( |
| 1836 TraceEventMemoryOverhead* overhead) { | 1736 TraceEventMemoryOverhead* overhead) { |
| 1837 overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this)); | 1737 overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this)); |
| 1838 } | 1738 } |
| 1839 | 1739 |
| (...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1890 } | 1790 } |
| 1891 | 1791 |
| 1892 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1792 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
| 1893 if (*category_group_enabled_) { | 1793 if (*category_group_enabled_) { |
| 1894 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1794 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
| 1895 event_handle_); | 1795 event_handle_); |
| 1896 } | 1796 } |
| 1897 } | 1797 } |
| 1898 | 1798 |
| 1899 } // namespace trace_event_internal | 1799 } // namespace trace_event_internal |
| OLD | NEW |