OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <cmath> | 8 #include <cmath> |
9 #include <utility> | 9 #include <utility> |
10 | 10 |
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
71 512000000 / kTraceBufferChunkSize; | 71 512000000 / kTraceBufferChunkSize; |
72 static_assert( | 72 static_assert( |
73 kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex, | 73 kTraceEventVectorBigBufferChunks <= TraceBufferChunk::kMaxChunkIndex, |
74 "Too many big buffer chunks"); | 74 "Too many big buffer chunks"); |
75 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; | 75 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; |
76 static_assert( | 76 static_assert( |
77 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, | 77 kTraceEventVectorBufferChunks <= TraceBufferChunk::kMaxChunkIndex, |
78 "Too many vector buffer chunks"); | 78 "Too many vector buffer chunks"); |
79 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; | 79 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; |
80 | 80 |
81 // Can store results for 30 seconds with 1 ms sampling interval. | |
82 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; | |
83 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. | 81 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. |
84 const size_t kEchoToConsoleTraceEventBufferChunks = 256; | 82 const size_t kEchoToConsoleTraceEventBufferChunks = 256; |
85 | 83 |
86 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; | 84 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; |
87 const int kThreadFlushTimeoutMs = 3000; | 85 const int kThreadFlushTimeoutMs = 3000; |
88 | 86 |
89 #define MAX_CATEGORY_GROUPS 100 | 87 #define MAX_CATEGORY_GROUPS 100 |
90 | 88 |
91 // Parallel arrays g_category_groups and g_category_group_enabled are separate | 89 // Parallel arrays g_category_groups and g_category_group_enabled are separate |
92 // so that a pointer to a member of g_category_group_enabled can be easily | 90 // so that a pointer to a member of g_category_group_enabled can be easily |
(...skipping 353 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
446 << "out of bounds category pointer"; | 444 << "out of bounds category pointer"; |
447 uintptr_t category_index = | 445 uintptr_t category_index = |
448 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); | 446 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); |
449 return g_category_groups[category_index]; | 447 return g_category_groups[category_index]; |
450 } | 448 } |
451 | 449 |
452 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { | 450 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { |
453 unsigned char enabled_flag = 0; | 451 unsigned char enabled_flag = 0; |
454 const char* category_group = g_category_groups[category_index]; | 452 const char* category_group = g_category_groups[category_index]; |
455 if (mode_ == RECORDING_MODE && | 453 if (mode_ == RECORDING_MODE && |
456 trace_config_.IsCategoryGroupEnabled(category_group)) | 454 trace_config_.IsCategoryGroupEnabled(category_group)) { |
457 enabled_flag |= ENABLED_FOR_RECORDING; | 455 enabled_flag |= ENABLED_FOR_RECORDING; |
458 else if (mode_ == MONITORING_MODE && | 456 } |
459 trace_config_.IsCategoryGroupEnabled(category_group)) | 457 |
460 enabled_flag |= ENABLED_FOR_MONITORING; | |
461 if (event_callback_ && | 458 if (event_callback_ && |
462 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) | 459 event_callback_trace_config_.IsCategoryGroupEnabled(category_group)) { |
463 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; | 460 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; |
| 461 } |
| 462 |
464 #if defined(OS_WIN) | 463 #if defined(OS_WIN) |
465 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( | 464 if (base::trace_event::TraceEventETWExport::IsCategoryGroupEnabled( |
466 category_group)) { | 465 category_group)) { |
467 enabled_flag |= ENABLED_FOR_ETW_EXPORT; | 466 enabled_flag |= ENABLED_FOR_ETW_EXPORT; |
468 } | 467 } |
469 #endif | 468 #endif |
470 | 469 |
471 g_category_group_enabled[category_index] = enabled_flag; | 470 g_category_group_enabled[category_index] = enabled_flag; |
472 } | 471 } |
473 | 472 |
(...skipping 537 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1011 "the thread to avoid its trace events from being lost."; | 1010 "the thread to avoid its trace events from being lost."; |
1012 for (hash_set<MessageLoop*>::const_iterator it = | 1011 for (hash_set<MessageLoop*>::const_iterator it = |
1013 thread_message_loops_.begin(); | 1012 thread_message_loops_.begin(); |
1014 it != thread_message_loops_.end(); ++it) { | 1013 it != thread_message_loops_.end(); ++it) { |
1015 LOG(WARNING) << "Thread: " << (*it)->thread_name(); | 1014 LOG(WARNING) << "Thread: " << (*it)->thread_name(); |
1016 } | 1015 } |
1017 } | 1016 } |
1018 FinishFlush(generation, discard_events); | 1017 FinishFlush(generation, discard_events); |
1019 } | 1018 } |
1020 | 1019 |
1021 void TraceLog::FlushButLeaveBufferIntact( | |
1022 const TraceLog::OutputCallback& flush_output_callback) { | |
1023 scoped_ptr<TraceBuffer> previous_logged_events; | |
1024 ArgumentFilterPredicate argument_filter_predicate; | |
1025 { | |
1026 AutoLock lock(lock_); | |
1027 AddMetadataEventsWhileLocked(); | |
1028 if (thread_shared_chunk_) { | |
1029 // Return the chunk to the main buffer to flush the sampling data. | |
1030 logged_events_->ReturnChunk(thread_shared_chunk_index_, | |
1031 std::move(thread_shared_chunk_)); | |
1032 } | |
1033 previous_logged_events = logged_events_->CloneForIteration(); | |
1034 | |
1035 if (trace_options() & kInternalEnableArgumentFilter) { | |
1036 CHECK(!argument_filter_predicate_.is_null()); | |
1037 argument_filter_predicate = argument_filter_predicate_; | |
1038 } | |
1039 } // release lock | |
1040 | |
1041 ConvertTraceEventsToTraceFormat(std::move(previous_logged_events), | |
1042 flush_output_callback, | |
1043 argument_filter_predicate); | |
1044 } | |
1045 | |
1046 void TraceLog::UseNextTraceBuffer() { | 1020 void TraceLog::UseNextTraceBuffer() { |
1047 logged_events_.reset(CreateTraceBuffer()); | 1021 logged_events_.reset(CreateTraceBuffer()); |
1048 subtle::NoBarrier_AtomicIncrement(&generation_, 1); | 1022 subtle::NoBarrier_AtomicIncrement(&generation_, 1); |
1049 thread_shared_chunk_.reset(); | 1023 thread_shared_chunk_.reset(); |
1050 thread_shared_chunk_index_ = 0; | 1024 thread_shared_chunk_index_ = 0; |
1051 } | 1025 } |
1052 | 1026 |
1053 TraceEventHandle TraceLog::AddTraceEvent( | 1027 TraceEventHandle TraceLog::AddTraceEvent( |
1054 char phase, | 1028 char phase, |
1055 const unsigned char* category_group_enabled, | 1029 const unsigned char* category_group_enabled, |
(...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1262 #if defined(OS_WIN) | 1236 #if defined(OS_WIN) |
1263 // This is done sooner rather than later, to avoid creating the event and | 1237 // This is done sooner rather than later, to avoid creating the event and |
1264 // acquiring the lock, which is not needed for ETW as it's already threadsafe. | 1238 // acquiring the lock, which is not needed for ETW as it's already threadsafe. |
1265 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) | 1239 if (*category_group_enabled & ENABLED_FOR_ETW_EXPORT) |
1266 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, | 1240 TraceEventETWExport::AddEvent(phase, category_group_enabled, name, id, |
1267 num_args, arg_names, arg_types, arg_values, | 1241 num_args, arg_names, arg_types, arg_values, |
1268 convertable_values); | 1242 convertable_values); |
1269 #endif // OS_WIN | 1243 #endif // OS_WIN |
1270 | 1244 |
1271 std::string console_message; | 1245 std::string console_message; |
1272 if (*category_group_enabled & | 1246 if (*category_group_enabled & ENABLED_FOR_RECORDING) { |
1273 (ENABLED_FOR_RECORDING | ENABLED_FOR_MONITORING)) { | |
1274 OptionalAutoLock lock(&lock_); | 1247 OptionalAutoLock lock(&lock_); |
1275 | 1248 |
1276 TraceEvent* trace_event = NULL; | 1249 TraceEvent* trace_event = NULL; |
1277 if (thread_local_event_buffer) { | 1250 if (thread_local_event_buffer) { |
1278 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); | 1251 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); |
1279 } else { | 1252 } else { |
1280 lock.EnsureAcquired(); | 1253 lock.EnsureAcquired(); |
1281 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); | 1254 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); |
1282 } | 1255 } |
1283 | 1256 |
(...skipping 394 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1678 // This will flush the thread local buffer. | 1651 // This will flush the thread local buffer. |
1679 delete thread_local_event_buffer_.Get(); | 1652 delete thread_local_event_buffer_.Get(); |
1680 } | 1653 } |
1681 } | 1654 } |
1682 | 1655 |
1683 TraceBuffer* TraceLog::CreateTraceBuffer() { | 1656 TraceBuffer* TraceLog::CreateTraceBuffer() { |
1684 InternalTraceOptions options = trace_options(); | 1657 InternalTraceOptions options = trace_options(); |
1685 if (options & kInternalRecordContinuously) | 1658 if (options & kInternalRecordContinuously) |
1686 return TraceBuffer::CreateTraceBufferRingBuffer( | 1659 return TraceBuffer::CreateTraceBufferRingBuffer( |
1687 kTraceEventRingBufferChunks); | 1660 kTraceEventRingBufferChunks); |
1688 else if ((options & kInternalEnableSampling) && mode_ == MONITORING_MODE) | |
1689 return TraceBuffer::CreateTraceBufferRingBuffer( | |
1690 kMonitorTraceEventBufferChunks); | |
1691 else if (options & kInternalEchoToConsole) | 1661 else if (options & kInternalEchoToConsole) |
1692 return TraceBuffer::CreateTraceBufferRingBuffer( | 1662 return TraceBuffer::CreateTraceBufferRingBuffer( |
1693 kEchoToConsoleTraceEventBufferChunks); | 1663 kEchoToConsoleTraceEventBufferChunks); |
1694 else if (options & kInternalRecordAsMuchAsPossible) | 1664 else if (options & kInternalRecordAsMuchAsPossible) |
1695 return TraceBuffer::CreateTraceBufferVectorOfSize( | 1665 return TraceBuffer::CreateTraceBufferVectorOfSize( |
1696 kTraceEventVectorBigBufferChunks); | 1666 kTraceEventVectorBigBufferChunks); |
1697 return TraceBuffer::CreateTraceBufferVectorOfSize( | 1667 return TraceBuffer::CreateTraceBufferVectorOfSize( |
1698 kTraceEventVectorBufferChunks); | 1668 kTraceEventVectorBufferChunks); |
1699 } | 1669 } |
1700 | 1670 |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1756 } | 1726 } |
1757 | 1727 |
1758 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1728 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
1759 if (*category_group_enabled_) { | 1729 if (*category_group_enabled_) { |
1760 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1730 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
1761 event_handle_); | 1731 event_handle_); |
1762 } | 1732 } |
1763 } | 1733 } |
1764 | 1734 |
1765 } // namespace trace_event_internal | 1735 } // namespace trace_event_internal |
OLD | NEW |