OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <cmath> | 8 #include <cmath> |
9 #include <memory> | 9 #include <memory> |
10 #include <utility> | 10 #include <utility> |
11 | 11 |
12 #include "base/base_switches.h" | 12 #include "base/base_switches.h" |
13 #include "base/bind.h" | 13 #include "base/bind.h" |
14 #include "base/command_line.h" | 14 #include "base/command_line.h" |
15 #include "base/debug/leak_annotations.h" | 15 #include "base/debug/leak_annotations.h" |
16 #include "base/lazy_instance.h" | 16 #include "base/lazy_instance.h" |
17 #include "base/location.h" | 17 #include "base/location.h" |
18 #include "base/macros.h" | 18 #include "base/macros.h" |
| 19 #include "base/memory/ptr_util.h" |
19 #include "base/memory/ref_counted_memory.h" | 20 #include "base/memory/ref_counted_memory.h" |
20 #include "base/memory/singleton.h" | 21 #include "base/memory/singleton.h" |
21 #include "base/process/process_metrics.h" | 22 #include "base/process/process_metrics.h" |
22 #include "base/stl_util.h" | 23 #include "base/stl_util.h" |
23 #include "base/strings/string_split.h" | 24 #include "base/strings/string_split.h" |
24 #include "base/strings/string_tokenizer.h" | 25 #include "base/strings/string_tokenizer.h" |
25 #include "base/strings/stringprintf.h" | 26 #include "base/strings/stringprintf.h" |
26 #include "base/sys_info.h" | 27 #include "base/sys_info.h" |
27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | 28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
28 #include "base/threading/platform_thread.h" | 29 #include "base/threading/platform_thread.h" |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
95 // convert internally to determine the category name from the char enabled | 96 // convert internally to determine the category name from the char enabled |
96 // pointer. | 97 // pointer. |
97 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { | 98 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { |
98 "toplevel", | 99 "toplevel", |
99 "tracing already shutdown", | 100 "tracing already shutdown", |
100 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", | 101 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", |
101 "__metadata"}; | 102 "__metadata"}; |
102 | 103 |
103 // The enabled flag is char instead of bool so that the API can be used from C. | 104 // The enabled flag is char instead of bool so that the API can be used from C. |
104 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; | 105 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = {0}; |
| 106 |
| 107 const char kEventNameWhitelist[] = "event_name_whitelist"; |
| 108 |
| 109 class EventNameFilter : public TraceLog::TraceEventFilter { |
| 110 public: |
| 111 EventNameFilter(const base::DictionaryValue* filter_args) { |
| 112 const base::ListValue* whitelist = nullptr; |
| 113 if (filter_args->GetList(kEventNameWhitelist, &whitelist)) { |
| 114 for (size_t i = 0; i < whitelist->GetSize(); ++i) { |
| 115 std::string event_name; |
| 116 if (!whitelist->GetString(i, &event_name)) |
| 117 continue; |
| 118 |
| 119 whitelist_.insert(event_name); |
| 120 } |
| 121 } |
| 122 } |
| 123 |
| 124 bool FilterTraceEvent(const TraceEvent& trace_event) const override { |
| 125 return ContainsKey(whitelist_, trace_event.name()); |
| 126 } |
| 127 |
| 128 private: |
| 129 std::unordered_set<std::string> whitelist_; |
| 130 }; |
| 131 |
| 132 base::LazyInstance< |
| 133 std::list<std::unique_ptr<TraceLog::TraceEventFilter>>>::Leaky |
| 134 g_category_group_filter[MAX_CATEGORY_GROUPS] = {LAZY_INSTANCE_INITIALIZER}; |
| 135 |
| 136 TraceLog::TraceEventFilterConstructorForTesting |
| 137 g_trace_event_filter_constructor_for_testing = nullptr; |
| 138 |
105 // Indexes here have to match the g_category_groups array indexes above. | 139 // Indexes here have to match the g_category_groups array indexes above. |
106 const int kCategoryAlreadyShutdown = 1; | 140 const int kCategoryAlreadyShutdown = 1; |
107 const int kCategoryCategoriesExhausted = 2; | 141 const int kCategoryCategoriesExhausted = 2; |
108 const int kCategoryMetadata = 3; | 142 const int kCategoryMetadata = 3; |
109 const int kNumBuiltinCategories = 4; | 143 const int kNumBuiltinCategories = 4; |
110 // Skip default categories. | 144 // Skip default categories. |
111 base::subtle::AtomicWord g_category_index = kNumBuiltinCategories; | 145 base::subtle::AtomicWord g_category_index = kNumBuiltinCategories; |
112 | 146 |
113 // The name of the current thread. This is used to decide if the current | 147 // The name of the current thread. This is used to decide if the current |
114 // thread name has changed. We combine all the seen thread names into the | 148 // thread name has changed. We combine all the seen thread names into the |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
172 size_t event_index, | 206 size_t event_index, |
173 TraceEventHandle* handle) { | 207 TraceEventHandle* handle) { |
174 DCHECK(chunk_seq); | 208 DCHECK(chunk_seq); |
175 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); | 209 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex); |
176 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); | 210 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize); |
177 handle->chunk_seq = chunk_seq; | 211 handle->chunk_seq = chunk_seq; |
178 handle->chunk_index = static_cast<uint16_t>(chunk_index); | 212 handle->chunk_index = static_cast<uint16_t>(chunk_index); |
179 handle->event_index = static_cast<uint16_t>(event_index); | 213 handle->event_index = static_cast<uint16_t>(event_index); |
180 } | 214 } |
181 | 215 |
| 216 uintptr_t GetCategoryIndex(const unsigned char* category_group_enabled) { |
| 217 // Calculate the index of the category group by finding |
| 218 // category_group_enabled in g_category_group_enabled array. |
| 219 uintptr_t category_begin = |
| 220 reinterpret_cast<uintptr_t>(g_category_group_enabled); |
| 221 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); |
| 222 DCHECK(category_ptr >= category_begin); |
| 223 DCHECK(category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + |
| 224 MAX_CATEGORY_GROUPS)) |
| 225 << "out of bounds category pointer"; |
| 226 uintptr_t category_index = |
| 227 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); |
| 228 |
| 229 return category_index; |
| 230 } |
| 231 |
182 } // namespace | 232 } // namespace |
183 | 233 |
184 // A helper class that allows the lock to be acquired in the middle of the scope | 234 // A helper class that allows the lock to be acquired in the middle of the scope |
185 // and unlocks at the end of scope if locked. | 235 // and unlocks at the end of scope if locked. |
186 class TraceLog::OptionalAutoLock { | 236 class TraceLog::OptionalAutoLock { |
187 public: | 237 public: |
188 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} | 238 explicit OptionalAutoLock(Lock* lock) : lock_(lock), locked_(false) {} |
189 | 239 |
190 ~OptionalAutoLock() { | 240 ~OptionalAutoLock() { |
191 if (locked_) | 241 if (locked_) |
(...skipping 246 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
438 TraceLog* tracelog = GetInstance(); | 488 TraceLog* tracelog = GetInstance(); |
439 if (!tracelog) { | 489 if (!tracelog) { |
440 DCHECK(!g_category_group_enabled[kCategoryAlreadyShutdown]); | 490 DCHECK(!g_category_group_enabled[kCategoryAlreadyShutdown]); |
441 return &g_category_group_enabled[kCategoryAlreadyShutdown]; | 491 return &g_category_group_enabled[kCategoryAlreadyShutdown]; |
442 } | 492 } |
443 return tracelog->GetCategoryGroupEnabledInternal(category_group); | 493 return tracelog->GetCategoryGroupEnabledInternal(category_group); |
444 } | 494 } |
445 | 495 |
446 const char* TraceLog::GetCategoryGroupName( | 496 const char* TraceLog::GetCategoryGroupName( |
447 const unsigned char* category_group_enabled) { | 497 const unsigned char* category_group_enabled) { |
448 // Calculate the index of the category group by finding | 498 return g_category_groups[GetCategoryIndex(category_group_enabled)]; |
449 // category_group_enabled in g_category_group_enabled array. | 499 } |
450 uintptr_t category_begin = | 500 |
451 reinterpret_cast<uintptr_t>(g_category_group_enabled); | 501 std::list<std::unique_ptr<TraceLog::TraceEventFilter>>* GetCategoryGroupFilter( |
452 uintptr_t category_ptr = reinterpret_cast<uintptr_t>(category_group_enabled); | 502 const unsigned char* category_group_enabled) { |
453 DCHECK(category_ptr >= category_begin); | 503 return g_category_group_filter[GetCategoryIndex(category_group_enabled)] |
454 DCHECK(category_ptr < reinterpret_cast<uintptr_t>(g_category_group_enabled + | 504 .Pointer(); |
455 MAX_CATEGORY_GROUPS)) | |
456 << "out of bounds category pointer"; | |
457 uintptr_t category_index = | |
458 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); | |
459 return g_category_groups[category_index]; | |
460 } | 505 } |
461 | 506 |
462 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { | 507 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { |
463 unsigned char enabled_flag = 0; | 508 unsigned char enabled_flag = 0; |
464 const char* category_group = g_category_groups[category_index]; | 509 const char* category_group = g_category_groups[category_index]; |
465 if (mode_ == RECORDING_MODE && | 510 if (mode_ == RECORDING_MODE && |
466 trace_config_.IsCategoryGroupEnabled(category_group)) { | 511 trace_config_.IsCategoryGroupEnabled(category_group)) { |
467 enabled_flag |= ENABLED_FOR_RECORDING; | 512 enabled_flag |= ENABLED_FOR_RECORDING; |
468 } | 513 } |
469 | 514 |
470 if (event_callback_ && | 515 if (event_callback_ && |
471 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { | 516 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { |
472 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; | 517 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; |
473 } | 518 } |
474 | 519 |
475 #if defined(OS_WIN) | 520 #if defined(OS_WIN) |
476 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( | 521 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( |
477 category_group)) { | 522 category_group)) { |
478 enabled_flag |= ENABLED_FOR_ETW_EXPORT; | 523 enabled_flag |= ENABLED_FOR_ETW_EXPORT; |
479 } | 524 } |
480 #endif | 525 #endif |
481 | 526 |
482 // TODO(primiano): this is a temporary workaround for catapult:#2341, | 527 // TODO(primiano): this is a temporary workaround for catapult:#2341, |
483 // to guarantee that metadata events are always added even if the category | 528 // to guarantee that metadata events are always added even if the category |
484 // filter is "-*". See crbug.com/618054 for more details and long-term fix. | 529 // filter is "-*". See crbug.com/618054 for more details and long-term fix. |
485 if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata")) | 530 if (mode_ == RECORDING_MODE && !strcmp(category_group, "__metadata")) |
486 enabled_flag |= ENABLED_FOR_RECORDING; | 531 enabled_flag |= ENABLED_FOR_RECORDING; |
487 | 532 |
| 533 // Having a filter is an exceptional case, so we avoid |
| 534 // the LazyInstance creation in the common case. |
| 535 if (!(g_category_group_filter[category_index] == nullptr)) |
| 536 g_category_group_filter[category_index].Get().clear(); |
| 537 |
| 538 for (const auto& event_filter : trace_config_.event_filters()) { |
| 539 if (event_filter.IsCategoryGroupEnabled(category_group)) { |
| 540 std::unique_ptr<TraceEventFilter> new_filter; |
| 541 |
| 542 if (event_filter.predicate_name() == "event_whitelist_predicate") { |
| 543 new_filter = |
| 544 WrapUnique(new EventNameFilter(event_filter.filter_args())); |
| 545 } else if (event_filter.predicate_name() == "testing_predicate") { |
| 546 CHECK(g_trace_event_filter_constructor_for_testing); |
| 547 new_filter = g_trace_event_filter_constructor_for_testing(); |
| 548 } |
| 549 |
| 550 if (new_filter) { |
| 551 g_category_group_filter[category_index].Get().push_back( |
| 552 std::move(new_filter)); |
| 553 enabled_flag |= ENABLED_FOR_FILTERING; |
| 554 } |
| 555 } |
| 556 } |
| 557 |
488 g_category_group_enabled[category_index] = enabled_flag; | 558 g_category_group_enabled[category_index] = enabled_flag; |
489 } | 559 } |
490 | 560 |
491 void TraceLog::UpdateCategoryGroupEnabledFlags() { | 561 void TraceLog::UpdateCategoryGroupEnabledFlags() { |
492 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); | 562 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); |
493 for (size_t i = 0; i < category_index; i++) | 563 for (size_t i = 0; i < category_index; i++) |
494 UpdateCategoryGroupEnabledFlag(i); | 564 UpdateCategoryGroupEnabledFlag(i); |
495 } | 565 } |
496 | 566 |
497 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { | 567 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() { |
(...skipping 766 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1264 #if defined(OS_WIN) | 1334 #if defined(OS_WIN) |
1265 // This is done sooner rather than later, to avoid creating the event and | 1335 // This is done sooner rather than later, to avoid creating the event and |
1266 // acquiring the lock, which is not needed for ETW as it's already threadsafe. | 1336 // acquiring the lock, which is not needed for ETW as it's already threadsafe. |
1267 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) | 1337 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) |
1268 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, | 1338 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, |
1269 num_args, arg_names, arg_types, arg_values, | 1339 num_args, arg_names, arg_types, arg_values, |
1270 convertable_values); | 1340 convertable_values); |
1271 #endif // OS_WIN | 1341 #endif // OS_WIN |
1272 | 1342 |
1273 std::string console_message; | 1343 std::string console_message; |
1274 if (*category_group_enabled & ENABLED_FOR_RECORDING) { | 1344 std::unique_ptr<TraceEvent> filtered_trace_event; |
| 1345 if (*category_group_enabled & ENABLED_FOR_FILTERING) { |
| 1346 std::unique_ptr<TraceEvent> new_trace_event(new TraceEvent); |
| 1347 new_trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, |
| 1348 phase, category_group_enabled, name, scope, id, |
| 1349 bind_id, num_args, arg_names, arg_types, |
| 1350 arg_values, convertable_values, flags); |
| 1351 |
| 1352 auto filter_list = GetCategoryGroupFilter(category_group_enabled); |
| 1353 DCHECK(!filter_list->empty()); |
| 1354 |
| 1355 bool should_add_event = false; |
| 1356 for (const auto& trace_event_filter : *filter_list) { |
| 1357 if (trace_event_filter->FilterTraceEvent(*new_trace_event)) |
| 1358 should_add_event = true; |
| 1359 } |
| 1360 |
| 1361 if (should_add_event) |
| 1362 filtered_trace_event = std::move(new_trace_event); |
| 1363 } |
| 1364 |
| 1365 // Add the trace event if we're either *just* recording (and not filtering) |
| 1366 // or if we one of our filters indicates the event should be added. |
| 1367 if (((*category_group_enabled & ENABLED_FOR_RECORDING) && |
| 1368 (*category_group_enabled & ENABLED_FOR_FILTERING) == 0) || |
| 1369 filtered_trace_event) { |
1275 OptionalAutoLock lock(&lock_); | 1370 OptionalAutoLock lock(&lock_); |
1276 | 1371 |
1277 TraceEvent* trace_event = NULL; | 1372 TraceEvent* trace_event = NULL; |
1278 if (thread_local_event_buffer) { | 1373 if (thread_local_event_buffer) { |
1279 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); | 1374 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); |
1280 } else { | 1375 } else { |
1281 lock.EnsureAcquired(); | 1376 lock.EnsureAcquired(); |
1282 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); | 1377 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); |
1283 } | 1378 } |
1284 | 1379 |
1285 if (trace_event) { | 1380 if (trace_event) { |
1286 trace_event->Initialize(thread_id, | 1381 if (filtered_trace_event) { |
1287 offset_event_timestamp, | 1382 trace_event->MoveFrom(std::move(filtered_trace_event)); |
1288 thread_now, | 1383 } else { |
1289 phase, | 1384 trace_event->Initialize(thread_id, offset_event_timestamp, thread_now, |
1290 category_group_enabled, | 1385 phase, category_group_enabled, name, scope, id, |
1291 name, | 1386 bind_id, num_args, arg_names, arg_types, |
1292 scope, | 1387 arg_values, convertable_values, flags); |
1293 id, | 1388 } |
1294 bind_id, | |
1295 num_args, | |
1296 arg_names, | |
1297 arg_types, | |
1298 arg_values, | |
1299 convertable_values, | |
1300 flags); | |
1301 | 1389 |
1302 #if defined(OS_ANDROID) | 1390 #if defined(OS_ANDROID) |
1303 trace_event->SendToATrace(); | 1391 trace_event->SendToATrace(); |
1304 #endif | 1392 #endif |
1305 } | 1393 } |
1306 | 1394 |
1307 if (trace_options() & kInternalEchoToConsole) { | 1395 if (trace_options() & kInternalEchoToConsole) { |
1308 console_message = EventToConsoleMessage( | 1396 console_message = EventToConsoleMessage( |
1309 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, | 1397 phase == TRACE_EVENT_PHASE_COMPLETE ? TRACE_EVENT_PHASE_BEGIN : phase, |
1310 timestamp, trace_event); | 1398 timestamp, trace_event); |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1428 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); | 1516 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); |
1429 | 1517 |
1430 log << "\x1b[0;m"; | 1518 log << "\x1b[0;m"; |
1431 | 1519 |
1432 if (phase == TRACE_EVENT_PHASE_BEGIN) | 1520 if (phase == TRACE_EVENT_PHASE_BEGIN) |
1433 thread_event_start_times_[thread_id].push(timestamp); | 1521 thread_event_start_times_[thread_id].push(timestamp); |
1434 | 1522 |
1435 return log.str(); | 1523 return log.str(); |
1436 } | 1524 } |
1437 | 1525 |
| 1526 void TraceLog::EndFilteredEvent(const unsigned char* category_group_enabled, |
| 1527 const char* name, |
| 1528 TraceEventHandle handle) { |
| 1529 auto filter_list = GetCategoryGroupFilter(category_group_enabled); |
| 1530 DCHECK(!filter_list->empty()); |
| 1531 |
| 1532 for (const auto& trace_event_filter : *filter_list) { |
| 1533 trace_event_filter->EndEvent(name, |
| 1534 GetCategoryGroupName(category_group_enabled)); |
| 1535 } |
| 1536 } |
| 1537 |
1438 void TraceLog::UpdateTraceEventDuration( | 1538 void TraceLog::UpdateTraceEventDuration( |
1439 const unsigned char* category_group_enabled, | 1539 const unsigned char* category_group_enabled, |
1440 const char* name, | 1540 const char* name, |
1441 TraceEventHandle handle) { | 1541 TraceEventHandle handle) { |
1442 char category_group_enabled_local = *category_group_enabled; | 1542 char category_group_enabled_local = *category_group_enabled; |
1443 if (!category_group_enabled_local) | 1543 if (!category_group_enabled_local) |
1444 return; | 1544 return; |
1445 | 1545 |
1446 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when | 1546 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when |
1447 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> | 1547 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> |
(...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1592 void TraceLog::WaitSamplingEventForTesting() { | 1692 void TraceLog::WaitSamplingEventForTesting() { |
1593 if (!sampling_thread_) | 1693 if (!sampling_thread_) |
1594 return; | 1694 return; |
1595 sampling_thread_->WaitSamplingEventForTesting(); | 1695 sampling_thread_->WaitSamplingEventForTesting(); |
1596 } | 1696 } |
1597 | 1697 |
1598 void TraceLog::DeleteForTesting() { | 1698 void TraceLog::DeleteForTesting() { |
1599 internal::DeleteTraceLogForTesting::Delete(); | 1699 internal::DeleteTraceLogForTesting::Delete(); |
1600 } | 1700 } |
1601 | 1701 |
| 1702 void TraceLog::SetTraceEventFilterConstructorForTesting( |
| 1703 TraceEventFilterConstructorForTesting predicate) { |
| 1704 g_trace_event_filter_constructor_for_testing = predicate; |
| 1705 } |
| 1706 |
1602 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { | 1707 TraceEvent* TraceLog::GetEventByHandle(TraceEventHandle handle) { |
1603 return GetEventByHandleInternal(handle, NULL); | 1708 return GetEventByHandleInternal(handle, NULL); |
1604 } | 1709 } |
1605 | 1710 |
1606 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle, | 1711 TraceEvent* TraceLog::GetEventByHandleInternal(TraceEventHandle handle, |
1607 OptionalAutoLock* lock) { | 1712 OptionalAutoLock* lock) { |
1608 if (!handle.chunk_seq) | 1713 if (!handle.chunk_seq) |
1609 return NULL; | 1714 return NULL; |
1610 | 1715 |
1611 if (thread_local_event_buffer_.Get()) { | 1716 if (thread_local_event_buffer_.Get()) { |
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1779 } | 1884 } |
1780 | 1885 |
1781 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1886 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
1782 if (*category_group_enabled_) { | 1887 if (*category_group_enabled_) { |
1783 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1888 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
1784 event_handle_); | 1889 event_handle_); |
1785 } | 1890 } |
1786 } | 1891 } |
1787 | 1892 |
1788 } // namespace trace_event_internal | 1893 } // namespace trace_event_internal |
OLD | NEW |