Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1095)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 22962004: Thread-local trace-event buffers (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« base/debug/trace_event_impl.h ('K') | « base/debug/trace_event_impl.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/base_switches.h" 9 #include "base/base_switches.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
11 #include "base/command_line.h" 11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h" 12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h" 13 #include "base/debug/trace_event.h"
14 #include "base/format_macros.h" 14 #include "base/format_macros.h"
15 #include "base/lazy_instance.h" 15 #include "base/lazy_instance.h"
16 #include "base/memory/singleton.h" 16 #include "base/memory/singleton.h"
17 #include "base/message_loop/message_loop.h"
17 #include "base/process/process_metrics.h" 18 #include "base/process/process_metrics.h"
18 #include "base/stl_util.h" 19 #include "base/stl_util.h"
19 #include "base/strings/string_split.h" 20 #include "base/strings/string_split.h"
20 #include "base/strings/string_tokenizer.h" 21 #include "base/strings/string_tokenizer.h"
21 #include "base/strings/string_util.h" 22 #include "base/strings/string_util.h"
22 #include "base/strings/stringprintf.h" 23 #include "base/strings/stringprintf.h"
23 #include "base/strings/utf_string_conversions.h" 24 #include "base/strings/utf_string_conversions.h"
24 #include "base/synchronization/cancellation_flag.h" 25 #include "base/synchronization/cancellation_flag.h"
25 #include "base/synchronization/waitable_event.h" 26 #include "base/synchronization/waitable_event.h"
26 #include "base/sys_info.h" 27 #include "base/sys_info.h"
27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" 28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
28 #include "base/threading/platform_thread.h" 29 #include "base/threading/platform_thread.h"
29 #include "base/threading/thread_id_name_manager.h" 30 #include "base/threading/thread_id_name_manager.h"
30 #include "base/threading/thread_local.h"
31 #include "base/time/time.h" 31 #include "base/time/time.h"
32 32
33 #if defined(OS_WIN) 33 #if defined(OS_WIN)
34 #include "base/debug/trace_event_win.h" 34 #include "base/debug/trace_event_win.h"
35 #endif 35 #endif
36 36
37 class DeleteTraceLogForTesting { 37 class DeleteTraceLogForTesting {
38 public: 38 public:
39 static void Delete() { 39 static void Delete() {
40 Singleton<base::debug::TraceLog, 40 Singleton<base::debug::TraceLog,
41 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0); 41 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
42 } 42 }
43 }; 43 };
44 44
45 // The thread buckets for the sampling profiler. 45 // The thread buckets for the sampling profiler.
46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; 46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
47 47
48 namespace base { 48 namespace base {
49 namespace debug { 49 namespace debug {
50 50
51 namespace {
52
53 // The overhead of TraceEvent above this threshold will be reported in the
54 // trace.
55 const int kOverheadReportThresholdInMicroseconds = 5;
56
51 // Controls the number of trace events we will buffer in-memory 57 // Controls the number of trace events we will buffer in-memory
52 // before throwing them away. 58 // before throwing them away.
53 const size_t kTraceEventBufferSize = 500000; 59 const size_t kTraceEventBufferSize = 500000;
60 const size_t kTraceEventThreadLocalBufferSize = 1024;
54 const size_t kTraceEventBatchSize = 1000; 61 const size_t kTraceEventBatchSize = 1000;
55 const size_t kTraceEventInitialBufferSize = 1024; 62 const size_t kTraceEventInitialBufferSize = 1024;
56 63
57 #define MAX_CATEGORY_GROUPS 100 64 #define MAX_CATEGORY_GROUPS 100
58 65
59 namespace {
60
61 // Parallel arrays g_category_groups and g_category_group_enabled are separate 66 // Parallel arrays g_category_groups and g_category_group_enabled are separate
62 // so that a pointer to a member of g_category_group_enabled can be easily 67 // so that a pointer to a member of g_category_group_enabled can be easily
63 // converted to an index into g_category_groups. This allows macros to deal 68 // converted to an index into g_category_groups. This allows macros to deal
64 // only with char enabled pointers from g_category_group_enabled, and we can 69 // only with char enabled pointers from g_category_group_enabled, and we can
65 // convert internally to determine the category name from the char enabled 70 // convert internally to determine the category name from the char enabled
66 // pointer. 71 // pointer.
67 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { 72 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
68 "tracing already shutdown", 73 "tracing already shutdown",
69 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", 74 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
70 "__metadata", 75 "__metadata",
76 "trace-event-overhead"
dsinclair 2013/08/16 14:38:32 This should probably be disabled-by-default-trace-
Xianzhu 2013/08/16 19:41:03 I'd like let it enabled by default to prevent the
71 }; 77 };
72 78
73 // The enabled flag is char instead of bool so that the API can be used from C. 79 // The enabled flag is char instead of bool so that the API can be used from C.
74 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 }; 80 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
75 const int g_category_already_shutdown = 0; 81 const int g_category_already_shutdown = 0;
76 const int g_category_categories_exhausted = 1; 82 const int g_category_categories_exhausted = 1;
77 const int g_category_metadata = 2; 83 const int g_category_metadata = 2;
78 const int g_num_builtin_categories = 3; 84 const int g_category_trace_event_overhead = 3;
85 const int g_num_builtin_categories = 4;
79 int g_category_index = g_num_builtin_categories; // Skip default categories. 86 int g_category_index = g_num_builtin_categories; // Skip default categories.
80 87
81 // The name of the current thread. This is used to decide if the current 88 // The name of the current thread. This is used to decide if the current
82 // thread name has changed. We combine all the seen thread names into the 89 // thread name has changed. We combine all the seen thread names into the
83 // output name for the thread. 90 // output name for the thread.
84 LazyInstance<ThreadLocalPointer<const char> >::Leaky 91 LazyInstance<ThreadLocalPointer<const char> >::Leaky
85 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; 92 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
86 93
87 const char kRecordUntilFull[] = "record-until-full"; 94 const char kRecordUntilFull[] = "record-until-full";
88 const char kRecordContinuously[] = "record-continuously"; 95 const char kRecordContinuously[] = "record-continuously";
89 const char kEnableSampling[] = "enable-sampling"; 96 const char kEnableSampling[] = "enable-sampling";
90 97
91 size_t NextIndex(size_t index) { 98 size_t NextIndex(size_t index) {
92 index++; 99 index++;
93 if (index >= kTraceEventBufferSize) 100 if (index >= kTraceEventBufferSize)
94 index = 0; 101 index = 0;
95 return index; 102 return index;
96 } 103 }
97 104
98 } // namespace
99
100 class TraceBufferRingBuffer : public TraceBuffer { 105 class TraceBufferRingBuffer : public TraceBuffer {
101 public: 106 public:
102 TraceBufferRingBuffer() 107 TraceBufferRingBuffer()
103 : unused_event_index_(0), 108 : unused_event_index_(0),
104 oldest_event_index_(0) { 109 oldest_event_index_(0) {
105 logged_events_.reserve(kTraceEventInitialBufferSize); 110 logged_events_.reserve(kTraceEventInitialBufferSize);
106 } 111 }
107 112
108 virtual ~TraceBufferRingBuffer() {} 113 virtual ~TraceBufferRingBuffer() {}
109 114
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
171 class TraceBufferVector : public TraceBuffer { 176 class TraceBufferVector : public TraceBuffer {
172 public: 177 public:
173 TraceBufferVector() : current_iteration_index_(0) { 178 TraceBufferVector() : current_iteration_index_(0) {
174 logged_events_.reserve(kTraceEventInitialBufferSize); 179 logged_events_.reserve(kTraceEventInitialBufferSize);
175 } 180 }
176 181
177 virtual ~TraceBufferVector() { 182 virtual ~TraceBufferVector() {
178 } 183 }
179 184
180 virtual void AddEvent(const TraceEvent& event) OVERRIDE { 185 virtual void AddEvent(const TraceEvent& event) OVERRIDE {
181 // Note, we have two callers which need to be handled. The first is 186 // Note, we have two callers which need to be handled:
182 // AddTraceEventWithThreadIdAndTimestamp() which checks Size() and does an 187 // - AddEventToMainBufferWhileLocked() which has two cases:
183 // early exit if full. The second is AddThreadNameMetadataEvents(). 188 // - called directly from AddTraceEventWithThreadIdAndTimeStamp()
189 // which checks if buffer is full and does an early exit if full;
190 // - called from ThreadLocalEventBuffer::FlushWhileLocked();
191 // - AddThreadNameMetadataEvents().
184 // We can not DECHECK(!IsFull()) because we have to add the metadata 192 // We can not DECHECK(!IsFull()) because we have to add the metadata
185 // events even if the buffer is full. 193 // events and flush thread-local buffers even if the buffer is full.
186 logged_events_.push_back(event); 194 logged_events_.push_back(event);
187 } 195 }
188 196
189 virtual bool HasMoreEvents() const OVERRIDE { 197 virtual bool HasMoreEvents() const OVERRIDE {
190 return current_iteration_index_ < Size(); 198 return current_iteration_index_ < Size();
191 } 199 }
192 200
193 virtual const TraceEvent& NextEvent() OVERRIDE { 201 virtual const TraceEvent& NextEvent() OVERRIDE {
194 DCHECK(HasMoreEvents()); 202 DCHECK(HasMoreEvents());
195 return GetEventAt(current_iteration_index_++); 203 return GetEventAt(current_iteration_index_++);
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
250 } 258 }
251 259
252 virtual size_t Size() const OVERRIDE { return 0; } 260 virtual size_t Size() const OVERRIDE { return 0; }
253 261
254 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE { 262 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
255 NOTREACHED(); 263 NOTREACHED();
256 return *static_cast<TraceEvent*>(NULL); 264 return *static_cast<TraceEvent*>(NULL);
257 } 265 }
258 }; 266 };
259 267
268 // Stores the data that are used when ECHO_TO_CONSOLE is enabled.
269 struct EchoToConsoleContext {
270 hash_map<int, std::stack<TimeTicks> > thread_event_start_times;
271 hash_map<std::string, int> thread_colors;
272 };
273
274 } // namespace
275
260 //////////////////////////////////////////////////////////////////////////////// 276 ////////////////////////////////////////////////////////////////////////////////
261 // 277 //
262 // TraceEvent 278 // TraceEvent
263 // 279 //
264 //////////////////////////////////////////////////////////////////////////////// 280 ////////////////////////////////////////////////////////////////////////////////
265 281
266 namespace { 282 namespace {
267 283
268 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } 284 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
269 285
(...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after
734 750
735 TraceBucketData::~TraceBucketData() { 751 TraceBucketData::~TraceBucketData() {
736 } 752 }
737 753
738 //////////////////////////////////////////////////////////////////////////////// 754 ////////////////////////////////////////////////////////////////////////////////
739 // 755 //
740 // TraceLog 756 // TraceLog
741 // 757 //
742 //////////////////////////////////////////////////////////////////////////////// 758 ////////////////////////////////////////////////////////////////////////////////
743 759
760 class TraceLog::ThreadLocalEventBuffer
761 : public MessageLoop::DestructionObserver {
762 public:
763 ThreadLocalEventBuffer(TraceLog* trace_log);
764 virtual ~ThreadLocalEventBuffer();
765
766 void AddEvent(const TraceEvent& event, NotificationHelper* notifier);
767 void ReportOverhead(const TimeTicks& event_timestamp);
768
769 private:
770 // MessageLoop::DestructionObserver
771 virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
772
773 void FlushWhileLocked(NotificationHelper* notifier);
774
775 void CheckThisIsCurrentBuffer() {
776 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
777 }
778
779 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
780 // as long as the thread exists.
781 TraceLog* trace_log_;
782 std::vector<TraceEvent> logged_events_;
783 int event_count_;
784 TimeDelta overhead_;
785
786 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
787 };
788
789 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
790 : trace_log_(trace_log),
791 event_count_(0) {
792 logged_events_.reserve(kTraceEventThreadLocalBufferSize);
793
794 if (g_category_group_enabled[g_category_trace_event_overhead]) {
795 logged_events_.push_back(TraceEvent(
796 static_cast<int>(PlatformThread::CurrentId()),
797 TimeTicks::NowFromSystemTraceTime(),
798 TRACE_EVENT_PHASE_ASYNC_BEGIN,
799 &g_category_group_enabled[g_category_trace_event_overhead],
800 "local-event-buffer",
801 0, 0, NULL, NULL, NULL, NULL, 0));
802 }
803
804 // ThreadLocalEventBuffer is created only if the thread has message loop, so
805 // the following message_loop won't be NULL.
806 MessageLoop* message_loop = MessageLoop::current();
807 message_loop->AddDestructionObserver(this);
808
809 AutoLock lock(trace_log->lock_);
810 trace_log->thread_message_loops_.insert(message_loop);
811 }
812
813 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
814 CheckThisIsCurrentBuffer();
815
816 if (g_category_group_enabled[g_category_trace_event_overhead]) {
817 const char* arg_names[2] = { "event_count", "average_overhead_sec" };
818 unsigned char arg_types[2];
819 unsigned long long arg_values[2];
820 trace_event_internal::SetTraceValue(event_count_,
821 &arg_types[0], &arg_values[0]);
822 trace_event_internal::SetTraceValue(overhead_.InSecondsF() / event_count_,
dsinclair 2013/08/16 14:38:32 Why seconds? Do we really expect this to be outsid
Xianzhu 2013/08/16 19:41:03 Done.
823 &arg_types[1], &arg_values[1]);
824 logged_events_.push_back(TraceEvent(
825 static_cast<int>(PlatformThread::CurrentId()),
826 TimeTicks::NowFromSystemTraceTime(),
827 TRACE_EVENT_PHASE_ASYNC_END,
828 &g_category_group_enabled[g_category_trace_event_overhead],
829 "local-event-buffer",
830 0,
831 2, arg_names, arg_types, arg_values,
832 NULL, 0));
833 }
834
835 NotificationHelper notifier(trace_log_);
836 {
837 AutoLock lock(trace_log_->lock_);
838 FlushWhileLocked(&notifier);
839 trace_log_->thread_message_loops_.erase(MessageLoop::current());
840 }
841 notifier.SendNotificationIfAny();
842 }
843
844 void TraceLog::ThreadLocalEventBuffer::AddEvent(const TraceEvent& event,
845 NotificationHelper* notifier) {
846 CheckThisIsCurrentBuffer();
847 logged_events_.push_back(event);
848 if (logged_events_.size() >= kTraceEventThreadLocalBufferSize) {
849 AutoLock lock(trace_log_->lock_);
850 FlushWhileLocked(notifier);
851 }
852 }
853
854 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
855 const TimeTicks& event_timestamp) {
856 if (g_category_group_enabled[g_category_trace_event_overhead]) {
857 event_count_++;
858 TimeTicks now = TimeTicks::NowFromSystemTraceTime();
859 TimeDelta overhead = now - event_timestamp;
860 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
861 int thread_id = static_cast<int>(PlatformThread::CurrentId());
862 // TODO(wangxianzhu): Use X event when it's ready.
863 logged_events_.push_back(TraceEvent(
864 thread_id,
865 event_timestamp,
866 TRACE_EVENT_PHASE_BEGIN,
867 &g_category_group_enabled[g_category_trace_event_overhead],
868 "overhead",
869 0, 0, NULL, NULL, NULL, NULL, 0));
870 logged_events_.push_back(TraceEvent(
871 thread_id,
872 now,
873 TRACE_EVENT_PHASE_END,
874 &g_category_group_enabled[g_category_trace_event_overhead],
875 "overhead",
876 0, 0, NULL, NULL, NULL, NULL, 0));
877 }
878 overhead_ += overhead;
879 }
880 }
881
882 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
883 delete this;
884 }
885
886 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked(
887 NotificationHelper* notifier) {
888 trace_log_->lock_.AssertAcquired();
889 for (size_t i = 0; i < logged_events_.size(); ++i) {
890 trace_log_->AddEventToMainBufferWhileLocked(logged_events_[i],
891 notifier);
892 }
893 logged_events_.resize(0);
894 }
895
744 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log) 896 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log)
745 : trace_log_(trace_log), 897 : trace_log_(trace_log),
746 notification_(0) { 898 notification_(0) {
747 } 899 }
748 900
749 TraceLog::NotificationHelper::~NotificationHelper() { 901 TraceLog::NotificationHelper::~NotificationHelper() {
750 } 902 }
751 903
752 void TraceLog::NotificationHelper::AddNotificationWhileLocked( 904 void TraceLog::NotificationHelper::AddNotificationWhileLocked(
753 int notification) { 905 int notification) {
906 trace_log_->lock_.AssertAcquired();
754 if (trace_log_->notification_callback_.is_null()) 907 if (trace_log_->notification_callback_.is_null())
755 return; 908 return;
756 if (notification_ == 0) 909 if (notification_ == 0)
757 callback_copy_ = trace_log_->notification_callback_; 910 callback_copy_ = trace_log_->notification_callback_;
758 notification_ |= notification; 911 notification_ |= notification;
759 } 912 }
760 913
761 void TraceLog::NotificationHelper::SendNotificationIfAny() { 914 void TraceLog::NotificationHelper::SendNotificationIfAny() {
762 if (notification_) 915 if (notification_)
763 callback_copy_.Run(notification_); 916 callback_copy_.Run(notification_);
(...skipping 26 matching lines...) Expand all
790 } 943 }
791 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY)) 944 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
792 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. 945 ret |= RECORD_UNTIL_FULL; // Default when no options are specified.
793 946
794 return static_cast<Options>(ret); 947 return static_cast<Options>(ret);
795 } 948 }
796 949
797 TraceLog::TraceLog() 950 TraceLog::TraceLog()
798 : enable_count_(0), 951 : enable_count_(0),
799 num_traces_recorded_(0), 952 num_traces_recorded_(0),
800 event_callback_(NULL), 953 buffer_is_full_(0),
954 event_callback_(0),
801 dispatching_to_observer_list_(false), 955 dispatching_to_observer_list_(false),
802 process_sort_index_(0), 956 process_sort_index_(0),
803 watch_category_(NULL), 957 echo_to_console_context_(0),
958 process_id_hash_(0),
959 process_id_(0),
960 watch_category_(0),
804 trace_options_(RECORD_UNTIL_FULL), 961 trace_options_(RECORD_UNTIL_FULL),
805 sampling_thread_handle_(0), 962 sampling_thread_handle_(0),
806 category_filter_(CategoryFilter::kDefaultCategoryFilterString) { 963 category_filter_(CategoryFilter::kDefaultCategoryFilterString),
964 flush_message_loop_(NULL) {
807 // Trace is enabled or disabled on one thread while other threads are 965 // Trace is enabled or disabled on one thread while other threads are
808 // accessing the enabled flag. We don't care whether edge-case events are 966 // accessing the enabled flag. We don't care whether edge-case events are
809 // traced or not, so we allow races on the enabled flag to keep the trace 967 // traced or not, so we allow races on the enabled flag to keep the trace
810 // macros fast. 968 // macros fast.
811 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: 969 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
812 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, 970 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
813 // sizeof(g_category_group_enabled), 971 // sizeof(g_category_group_enabled),
814 // "trace_event category enabled"); 972 // "trace_event category enabled");
815 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) { 973 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) {
816 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i], 974 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i],
(...skipping 15 matching lines...) Expand all
832 category_string = "*"; 990 category_string = "*";
833 991
834 SetEnabled(CategoryFilter(category_string), ECHO_TO_CONSOLE); 992 SetEnabled(CategoryFilter(category_string), ECHO_TO_CONSOLE);
835 } 993 }
836 #endif 994 #endif
837 995
838 logged_events_.reset(GetTraceBuffer()); 996 logged_events_.reset(GetTraceBuffer());
839 } 997 }
840 998
841 TraceLog::~TraceLog() { 999 TraceLog::~TraceLog() {
1000 delete reinterpret_cast<EchoToConsoleContext*>(echo_to_console_context_);
842 } 1001 }
843 1002
844 const unsigned char* TraceLog::GetCategoryGroupEnabled( 1003 const unsigned char* TraceLog::GetCategoryGroupEnabled(
845 const char* category_group) { 1004 const char* category_group) {
846 TraceLog* tracelog = GetInstance(); 1005 TraceLog* tracelog = GetInstance();
847 if (!tracelog) { 1006 if (!tracelog) {
848 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); 1007 DCHECK(!g_category_group_enabled[g_category_already_shutdown]);
849 return &g_category_group_enabled[g_category_already_shutdown]; 1008 return &g_category_group_enabled[g_category_already_shutdown];
850 } 1009 }
851 return tracelog->GetCategoryGroupEnabledInternal(category_group); 1010 return tracelog->GetCategoryGroupEnabledInternal(category_group);
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
931 category_group_enabled = 1090 category_group_enabled =
932 &g_category_group_enabled[g_category_categories_exhausted]; 1091 &g_category_group_enabled[g_category_categories_exhausted];
933 } 1092 }
934 } 1093 }
935 return category_group_enabled; 1094 return category_group_enabled;
936 } 1095 }
937 1096
938 void TraceLog::GetKnownCategoryGroups( 1097 void TraceLog::GetKnownCategoryGroups(
939 std::vector<std::string>* category_groups) { 1098 std::vector<std::string>* category_groups) {
940 AutoLock lock(lock_); 1099 AutoLock lock(lock_);
1100 category_groups->push_back(
1101 g_category_groups[g_category_trace_event_overhead]);
941 for (int i = g_num_builtin_categories; i < g_category_index; i++) 1102 for (int i = g_num_builtin_categories; i < g_category_index; i++)
942 category_groups->push_back(g_category_groups[i]); 1103 category_groups->push_back(g_category_groups[i]);
943 } 1104 }
944 1105
945 void TraceLog::SetEnabled(const CategoryFilter& category_filter, 1106 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
946 Options options) { 1107 Options options) {
947 std::vector<EnabledStateObserver*> observer_list; 1108 std::vector<EnabledStateObserver*> observer_list;
948 { 1109 {
949 AutoLock lock(lock_); 1110 AutoLock lock(lock_);
950 1111
951 if (enable_count_++ > 0) { 1112 if (enable_count_++ > 0) {
952 if (options != trace_options_) { 1113 if (options != trace_options_) {
953 DLOG(ERROR) << "Attemting to re-enable tracing with a different " 1114 DLOG(ERROR) << "Attemting to re-enable tracing with a different "
954 << "set of options."; 1115 << "set of options.";
955 } 1116 }
956 1117
957 category_filter_.Merge(category_filter); 1118 category_filter_.Merge(category_filter);
958 UpdateCategoryGroupEnabledFlags(); 1119 UpdateCategoryGroupEnabledFlags();
959 return; 1120 return;
960 } 1121 }
961 1122
962 if (options != trace_options_) { 1123 if (options != trace_options_) {
963 trace_options_ = options; 1124 trace_options_ = options;
1125 EnableEchoToConsole(options & ECHO_TO_CONSOLE);
964 logged_events_.reset(GetTraceBuffer()); 1126 logged_events_.reset(GetTraceBuffer());
1127 subtle::NoBarrier_Store(&buffer_is_full_, 0);
965 } 1128 }
966 1129
967 if (dispatching_to_observer_list_) { 1130 if (dispatching_to_observer_list_) {
968 DLOG(ERROR) << 1131 DLOG(ERROR) <<
969 "Cannot manipulate TraceLog::Enabled state from an observer."; 1132 "Cannot manipulate TraceLog::Enabled state from an observer.";
970 return; 1133 return;
971 } 1134 }
972 1135
973 num_traces_recorded_++; 1136 num_traces_recorded_++;
974 1137
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1032 // Stop the sampling thread. 1195 // Stop the sampling thread.
1033 sampling_thread_->Stop(); 1196 sampling_thread_->Stop();
1034 lock_.Release(); 1197 lock_.Release();
1035 PlatformThread::Join(sampling_thread_handle_); 1198 PlatformThread::Join(sampling_thread_handle_);
1036 lock_.Acquire(); 1199 lock_.Acquire();
1037 sampling_thread_handle_ = PlatformThreadHandle(); 1200 sampling_thread_handle_ = PlatformThreadHandle();
1038 sampling_thread_.reset(); 1201 sampling_thread_.reset();
1039 } 1202 }
1040 1203
1041 category_filter_.Clear(); 1204 category_filter_.Clear();
1042 watch_category_ = NULL; 1205 subtle::NoBarrier_Store(&watch_category_, 0);
1043 watch_event_name_ = ""; 1206 watch_event_name_ = "";
1044 UpdateCategoryGroupEnabledFlags(); 1207 UpdateCategoryGroupEnabledFlags();
1045 AddMetadataEvents(); 1208 AddMetadataEvents();
1046 1209
1047 dispatching_to_observer_list_ = true; 1210 dispatching_to_observer_list_ = true;
1048 observer_list = enabled_state_observer_list_; 1211 observer_list = enabled_state_observer_list_;
1049 } 1212 }
1050 1213
1051 // Dispatch to observers outside the lock in case the observer triggers a 1214 // Dispatch to observers outside the lock in case the observer triggers a
1052 // trace event. 1215 // trace event.
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1098 } 1261 }
1099 1262
1100 TraceBuffer* TraceLog::GetTraceBuffer() { 1263 TraceBuffer* TraceLog::GetTraceBuffer() {
1101 if (trace_options_ & RECORD_CONTINUOUSLY) 1264 if (trace_options_ & RECORD_CONTINUOUSLY)
1102 return new TraceBufferRingBuffer(); 1265 return new TraceBufferRingBuffer();
1103 else if (trace_options_ & ECHO_TO_CONSOLE) 1266 else if (trace_options_ & ECHO_TO_CONSOLE)
1104 return new TraceBufferDiscardsEvents(); 1267 return new TraceBufferDiscardsEvents();
1105 return new TraceBufferVector(); 1268 return new TraceBufferVector();
1106 } 1269 }
1107 1270
1271 bool TraceLog::AddEventToMainBufferWhileLocked(const TraceEvent& trace_event,
1272 NotificationHelper* notifier) {
1273 // Don't check buffer_is_full_ because we want the remaining thread-local
1274 // events to be flushed into the main buffer with this method, otherwise
1275 // we may lose some early events of a thread that generates events sparsely.
1276 lock_.AssertAcquired();
1277 logged_events_->AddEvent(trace_event);
1278 if (!subtle::NoBarrier_Load(&buffer_is_full_) && logged_events_->IsFull()) {
1279 subtle::NoBarrier_Store(&buffer_is_full_,
1280 static_cast<subtle::AtomicWord>(1));
1281 notifier->AddNotificationWhileLocked(TRACE_BUFFER_FULL);
1282 return false;
1283 }
1284 return true;
1285 }
1286
1108 void TraceLog::SetEventCallback(EventCallback cb) { 1287 void TraceLog::SetEventCallback(EventCallback cb) {
1109 AutoLock lock(lock_); 1288 subtle::NoBarrier_Store(&event_callback_,
1110 event_callback_ = cb; 1289 reinterpret_cast<subtle::AtomicWord>(cb));
1111 }; 1290 };
1112 1291
1292 // Flush works as the following:
1293 // 1. Flush is called in threadA whose message loop is saved in
1294 // flush_message_loop_;
1295 // 2. In the thread, FlushNextThreadOrFinish gets the first message loop from
1296 // thread_message_loops_. If any, post a task to run
1297 // FlushCurrentThreadAndContinue() in the thread message loop; otherwise
1298 // finish the flush (step 4);
1299 // 3. FlushCurrentThreadAndContinue(), deletes the thread local event buffer:
1300 // - The last batch of events of the thread are flushed into the main
1301 // buffer;
1302 // - The message loop will be removed from thread_message_loops_;
1303 // and post FlushNextThreadOrFinish() in flush_message_loop_ to continue the
1304 // flush procedure (step 2);
1305 // 4. When all thread local buffers have been flushed (and deleted), finish
1306 // the flush by calling the OutputCallback with all events converted into
1307 // JSON format.
1113 void TraceLog::Flush(const TraceLog::OutputCallback& cb) { 1308 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1114 // Ignore memory allocations from here down. 1309 // Ignore memory allocations from here down.
1115 INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"), 1310 INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"),
1116 TRACE_MEMORY_IGNORE); 1311 TRACE_MEMORY_IGNORE);
1312 {
1313 AutoLock lock(lock_);
1314 DCHECK(!flush_message_loop_);
1315 flush_message_loop_ = MessageLoop::current();
1316 DCHECK(flush_message_loop_);
1317 flush_output_callback_ = cb;
1318 }
1319 FlushNextThreadOrFinish();
1320 }
1321
1322 void TraceLog::FlushNextThreadOrFinish() {
1117 scoped_ptr<TraceBuffer> previous_logged_events; 1323 scoped_ptr<TraceBuffer> previous_logged_events;
1118 { 1324 {
1119 AutoLock lock(lock_); 1325 AutoLock lock(lock_);
1326 hash_set<MessageLoop*>::const_iterator next_message_loop =
1327 thread_message_loops_.begin();
1328 if (next_message_loop != thread_message_loops_.end()) {
1329 // Destroy the next thread local buffer. The buffer will be flushed into
1330 // the main event buffer and the message loop will be removed from
1331 // thread_message_loops.
1332 (*next_message_loop)->PostTask(
1333 FROM_HERE,
1334 Bind(&TraceLog::FlushCurrentThreadAndContinue,
1335 Unretained(this)));
1336 return;
1337 }
1338
1339 // All thread local buffers have been flushed (and destroyed).
1340 // From here to the end of the function finishes the whole flush procedure.
1120 previous_logged_events.swap(logged_events_); 1341 previous_logged_events.swap(logged_events_);
1121 logged_events_.reset(GetTraceBuffer()); 1342 logged_events_.reset(GetTraceBuffer());
1343 subtle::NoBarrier_Store(&buffer_is_full_, 0);
1122 } // release lock 1344 } // release lock
1123 1345
1346 flush_message_loop_ = NULL;
1347
1124 while (previous_logged_events->HasMoreEvents()) { 1348 while (previous_logged_events->HasMoreEvents()) {
1125 scoped_refptr<RefCountedString> json_events_str_ptr = 1349 scoped_refptr<RefCountedString> json_events_str_ptr =
1126 new RefCountedString(); 1350 new RefCountedString();
1127 1351
1128 for (size_t i = 0; i < kTraceEventBatchSize; ++i) { 1352 for (size_t i = 0; i < kTraceEventBatchSize; ++i) {
1129 if (i > 0) 1353 if (i > 0)
1130 *(&(json_events_str_ptr->data())) += ","; 1354 *(&(json_events_str_ptr->data())) += ",";
1131 1355
1132 previous_logged_events->NextEvent().AppendAsJSON( 1356 previous_logged_events->NextEvent().AppendAsJSON(
1133 &(json_events_str_ptr->data())); 1357 &(json_events_str_ptr->data()));
1134 1358
1135 if (!previous_logged_events->HasMoreEvents()) 1359 if (!previous_logged_events->HasMoreEvents())
1136 break; 1360 break;
1137 } 1361 }
1138 1362
1139 cb.Run(json_events_str_ptr); 1363 flush_output_callback_.Run(json_events_str_ptr);
1140 } 1364 }
1141 } 1365 }
1142 1366
1367 // Run in each of the thread holding a local event buffer.
dsinclair 2013/08/16 14:38:32 s/thread/threads
Xianzhu 2013/08/16 19:41:03 Done.
1368 void TraceLog::FlushCurrentThreadAndContinue() {
1369 delete thread_local_event_buffer_.Get();
1370 thread_local_event_buffer_.Set(NULL);
1371
1372 // Continue the flush procedure.
1373 flush_message_loop_->PostTask(FROM_HERE,
dsinclair 2013/08/16 14:38:32 Having this here makes the flow a lot simpler to f
1374 Bind(&TraceLog::FlushNextThreadOrFinish,
1375 Unretained(this)));
1376 }
1377
1143 void TraceLog::AddTraceEvent( 1378 void TraceLog::AddTraceEvent(
1144 char phase, 1379 char phase,
1145 const unsigned char* category_group_enabled, 1380 const unsigned char* category_group_enabled,
1146 const char* name, 1381 const char* name,
1147 unsigned long long id, 1382 unsigned long long id,
1148 int num_args, 1383 int num_args,
1149 const char** arg_names, 1384 const char** arg_names,
1150 const unsigned char* arg_types, 1385 const unsigned char* arg_types,
1151 const unsigned long long* arg_values, 1386 const unsigned long long* arg_values,
1152 scoped_ptr<ConvertableToTraceFormat> convertable_values[], 1387 scoped_ptr<ConvertableToTraceFormat> convertable_values[],
(...skipping 27 matching lines...) Expand all
1180 #if defined(OS_ANDROID) 1415 #if defined(OS_ANDROID)
1181 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id, 1416 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id,
1182 num_args, arg_names, arg_types, arg_values, convertable_values, 1417 num_args, arg_names, arg_types, arg_values, convertable_values,
1183 flags); 1418 flags);
1184 #endif 1419 #endif
1185 1420
1186 if (!IsCategoryGroupEnabled(category_group_enabled)) 1421 if (!IsCategoryGroupEnabled(category_group_enabled))
1187 return; 1422 return;
1188 1423
1189 TimeTicks now = timestamp - time_offset_; 1424 TimeTicks now = timestamp - time_offset_;
1190 EventCallback event_callback_copy;
1191 1425
1192 NotificationHelper notifier(this); 1426 NotificationHelper notifier(this);
1193 1427
1428 ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1429 // A ThreadLocalEventBuffer needs the message loop
1430 // - to know when the thread exits;
1431 // - to handle the final flush.
1432 // For a thread without a message loop, the trace events will be added into
1433 // the main buffer directly.
1434 if (MessageLoop::current()) {
1435 thread_local_event_buffer = thread_local_event_buffer_.Get();
1436 if (!thread_local_event_buffer) {
1437 thread_local_event_buffer = new ThreadLocalEventBuffer(this);
1438 thread_local_event_buffer_.Set(thread_local_event_buffer);
1439 }
1440 }
1441
1194 // Check and update the current thread name only if the event is for the 1442 // Check and update the current thread name only if the event is for the
1195 // current thread to avoid locks in most cases. 1443 // current thread to avoid locks in most cases.
1196 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { 1444 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1197 const char* new_name = ThreadIdNameManager::GetInstance()-> 1445 const char* new_name = ThreadIdNameManager::GetInstance()->
1198 GetName(thread_id); 1446 GetName(thread_id);
1199 // Check if the thread name has been set or changed since the previous 1447 // Check if the thread name has been set or changed since the previous
1200 // call (if any), but don't bother if the new name is empty. Note this will 1448 // call (if any), but don't bother if the new name is empty. Note this will
1201 // not detect a thread name change within the same char* buffer address: we 1449 // not detect a thread name change within the same char* buffer address: we
1202 // favor common case performance over corner case correctness. 1450 // favor common case performance over corner case correctness.
1203 if (new_name != g_current_thread_name.Get().Get() && 1451 if (new_name != g_current_thread_name.Get().Get() &&
(...skipping 15 matching lines...) Expand all
1219 existing_names.end(), 1467 existing_names.end(),
1220 new_name) != existing_names.end(); 1468 new_name) != existing_names.end();
1221 if (!found) { 1469 if (!found) {
1222 existing_name->second.push_back(','); 1470 existing_name->second.push_back(',');
1223 existing_name->second.append(new_name); 1471 existing_name->second.append(new_name);
1224 } 1472 }
1225 } 1473 }
1226 } 1474 }
1227 } 1475 }
1228 1476
1229 TraceEvent trace_event(thread_id, 1477 if (!subtle::NoBarrier_Load(&buffer_is_full_)) {
1230 now, phase, category_group_enabled, name, id, 1478 TraceEvent trace_event(thread_id,
1231 num_args, arg_names, arg_types, arg_values, 1479 now, phase, category_group_enabled, name, id,
1232 convertable_values, flags); 1480 num_args, arg_names, arg_types, arg_values,
1481 convertable_values, flags);
1233 1482
1234 do { 1483 if (thread_local_event_buffer) {
1235 AutoLock lock(lock_); 1484 thread_local_event_buffer->AddEvent(trace_event, &notifier);
1485 } else {
1486 AutoLock lock(lock_);
1487 AddEventToMainBufferWhileLocked(trace_event, &notifier);
1488 }
1236 1489
1237 event_callback_copy = event_callback_; 1490 EchoToConsoleContext* echo_to_console_context =
1238 if (logged_events_->IsFull()) 1491 reinterpret_cast<EchoToConsoleContext*>(subtle::NoBarrier_Load(
1239 break; 1492 &echo_to_console_context_));
1493 if (echo_to_console_context) {
1494 // ECHO_TO_CONSOLE is enabled.
1495 AutoLock lock(lock_);
1240 1496
1241 logged_events_->AddEvent(trace_event);
1242
1243 if (trace_options_ & ECHO_TO_CONSOLE) {
1244 TimeDelta duration; 1497 TimeDelta duration;
1245 if (phase == TRACE_EVENT_PHASE_END) { 1498 if (phase == TRACE_EVENT_PHASE_END) {
1246 duration = timestamp - thread_event_start_times_[thread_id].top(); 1499 duration = timestamp -
1247 thread_event_start_times_[thread_id].pop(); 1500 echo_to_console_context->thread_event_start_times[thread_id].top();
1501 echo_to_console_context->thread_event_start_times[thread_id].pop();
1248 } 1502 }
1249 1503
1250 std::string thread_name = thread_names_[thread_id]; 1504 std::string thread_name = thread_names_[thread_id];
1251 if (thread_colors_.find(thread_name) == thread_colors_.end()) 1505 if (echo_to_console_context->thread_colors.find(thread_name) ==
1252 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1; 1506 echo_to_console_context->thread_colors.end())
1507 echo_to_console_context->thread_colors[thread_name] =
1508 (echo_to_console_context->thread_colors.size() % 6) + 1;
1253 1509
1254 std::ostringstream log; 1510 std::ostringstream log;
1255 log << base::StringPrintf("%s: \x1b[0;3%dm", 1511 log << base::StringPrintf(
1256 thread_name.c_str(), 1512 "%s: \x1b[0;3%dm",
1257 thread_colors_[thread_name]); 1513 thread_name.c_str(),
1514 echo_to_console_context->thread_colors[thread_name]);
1258 1515
1259 size_t depth = 0; 1516 size_t depth = 0;
1260 if (thread_event_start_times_.find(thread_id) != 1517 if (echo_to_console_context->thread_event_start_times.find(thread_id) !=
1261 thread_event_start_times_.end()) 1518 echo_to_console_context->thread_event_start_times.end()) {
1262 depth = thread_event_start_times_[thread_id].size(); 1519 depth = echo_to_console_context->thread_event_start_times[thread_id]
1520 .size();
1521 }
1263 1522
1264 for (size_t i = 0; i < depth; ++i) 1523 for (size_t i = 0; i < depth; ++i)
1265 log << "| "; 1524 log << "| ";
1266 1525
1267 trace_event.AppendPrettyPrinted(&log); 1526 trace_event.AppendPrettyPrinted(&log);
1268 if (phase == TRACE_EVENT_PHASE_END) 1527 if (phase == TRACE_EVENT_PHASE_END)
1269 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); 1528 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1270 1529
1271 LOG(ERROR) << log.str() << "\x1b[0;m"; 1530 LOG(ERROR) << log.str() << "\x1b[0;m";
1272 1531
1273 if (phase == TRACE_EVENT_PHASE_BEGIN) 1532 if (phase == TRACE_EVENT_PHASE_BEGIN) {
1274 thread_event_start_times_[thread_id].push(timestamp); 1533 echo_to_console_context->thread_event_start_times[thread_id].push(
1534 timestamp);
1535 }
1275 } 1536 }
1537 }
1276 1538
1277 if (logged_events_->IsFull()) 1539 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1278 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); 1540 &watch_category_)) == category_group_enabled) {
1279 1541 AutoLock lock(lock_);
1280 if (watch_category_ == category_group_enabled && watch_event_name_ == name) 1542 if (watch_event_name_ == name)
1281 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1543 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1282 } while (0); // release lock 1544 }
1283 1545
1284 notifier.SendNotificationIfAny(); 1546 notifier.SendNotificationIfAny();
1285 if (event_callback_copy != NULL) { 1547 EventCallback event_callback = reinterpret_cast<EventCallback>(
1286 event_callback_copy(phase, category_group_enabled, name, id, 1548 subtle::NoBarrier_Load(&event_callback_));
1287 num_args, arg_names, arg_types, arg_values, 1549 if (event_callback) {
1288 flags); 1550 event_callback(phase, category_group_enabled, name, id,
1551 num_args, arg_names, arg_types, arg_values,
1552 flags);
1553 }
1554
1555 if (thread_local_event_buffer)
1556 thread_local_event_buffer->ReportOverhead(timestamp);
1557 }
1558
1559 void TraceLog::EnableEchoToConsole(bool enable) {
1560 if (enable) {
1561 EchoToConsoleContext* new_context = new EchoToConsoleContext();
1562 if (subtle::NoBarrier_CompareAndSwap(
1563 &echo_to_console_context_, 0,
1564 reinterpret_cast<subtle::AtomicWord>(new_context))) {
1565 // There is existing context, the new_context is not used.
1566 delete new_context;
1567 }
1568 } else {
1569 delete reinterpret_cast<EchoToConsoleContext*>(
1570 subtle::NoBarrier_AtomicExchange(&echo_to_console_context_, 0));
1289 } 1571 }
1290 } 1572 }
1291 1573
1292 void TraceLog::AddTraceEventEtw(char phase, 1574 void TraceLog::AddTraceEventEtw(char phase,
1293 const char* name, 1575 const char* name,
1294 const void* id, 1576 const void* id,
1295 const char* extra) { 1577 const char* extra) {
1296 #if defined(OS_WIN) 1578 #if defined(OS_WIN)
1297 TraceEventETWProvider::Trace(name, phase, id, extra); 1579 TraceEventETWProvider::Trace(name, phase, id, extra);
1298 #endif 1580 #endif
(...skipping 13 matching lines...) Expand all
1312 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); 1594 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1313 } 1595 }
1314 1596
1315 void TraceLog::SetWatchEvent(const std::string& category_name, 1597 void TraceLog::SetWatchEvent(const std::string& category_name,
1316 const std::string& event_name) { 1598 const std::string& event_name) {
1317 const unsigned char* category = GetCategoryGroupEnabled( 1599 const unsigned char* category = GetCategoryGroupEnabled(
1318 category_name.c_str()); 1600 category_name.c_str());
1319 size_t notify_count = 0; 1601 size_t notify_count = 0;
1320 { 1602 {
1321 AutoLock lock(lock_); 1603 AutoLock lock(lock_);
1322 watch_category_ = category; 1604 subtle::NoBarrier_Store(&watch_category_,
1605 reinterpret_cast<subtle::AtomicWord>(category));
1323 watch_event_name_ = event_name; 1606 watch_event_name_ = event_name;
1324 1607
1325 // First, search existing events for watch event because we want to catch 1608 // First, search existing events for watch event because we want to catch
1326 // it even if it has already occurred. 1609 // it even if it has already occurred.
1327 notify_count = logged_events_->CountEnabledByName(category, event_name); 1610 notify_count = logged_events_->CountEnabledByName(category, event_name);
1328 } // release lock 1611 } // release lock
1329 1612
1330 // Send notification for each event found. 1613 // Send notification for each event found.
1331 for (size_t i = 0; i < notify_count; ++i) { 1614 for (size_t i = 0; i < notify_count; ++i) {
1332 NotificationHelper notifier(this); 1615 NotificationHelper notifier(this);
1333 lock_.Acquire(); 1616 lock_.Acquire();
1334 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1617 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1335 lock_.Release(); 1618 lock_.Release();
1336 notifier.SendNotificationIfAny(); 1619 notifier.SendNotificationIfAny();
1337 } 1620 }
1338 } 1621 }
1339 1622
1340 void TraceLog::CancelWatchEvent() { 1623 void TraceLog::CancelWatchEvent() {
1341 AutoLock lock(lock_); 1624 AutoLock lock(lock_);
1342 watch_category_ = NULL; 1625 subtle::NoBarrier_Store(&watch_category_, 0);
1343 watch_event_name_ = ""; 1626 watch_event_name_ = "";
1344 } 1627 }
1345 1628
1346 namespace { 1629 namespace {
1347 1630
1348 template <typename T> 1631 template <typename T>
1349 void AddMetadataEventToBuffer( 1632 void AddMetadataEventToBuffer(
1350 TraceBuffer* logged_events, 1633 TraceBuffer* logged_events,
1351 int thread_id, 1634 int thread_id,
1352 const char* metadata_name, const char* arg_name, 1635 const char* metadata_name, const char* arg_name,
(...skipping 330 matching lines...) Expand 10 before | Expand all | Expand 10 after
1683 0, // num_args 1966 0, // num_args
1684 NULL, // arg_names 1967 NULL, // arg_names
1685 NULL, // arg_types 1968 NULL, // arg_types
1686 NULL, // arg_values 1969 NULL, // arg_values
1687 NULL, // convertable values 1970 NULL, // convertable values
1688 TRACE_EVENT_FLAG_NONE); // flags 1971 TRACE_EVENT_FLAG_NONE); // flags
1689 } 1972 }
1690 } 1973 }
1691 1974
1692 } // namespace trace_event_internal 1975 } // namespace trace_event_internal
OLDNEW
« base/debug/trace_event_impl.h ('K') | « base/debug/trace_event_impl.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698