Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(268)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 22962004: Thread-local trace-event buffers (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/base_switches.h" 9 #include "base/base_switches.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
11 #include "base/command_line.h" 11 #include "base/command_line.h"
12 #include "base/debug/leak_annotations.h" 12 #include "base/debug/leak_annotations.h"
13 #include "base/debug/trace_event.h" 13 #include "base/debug/trace_event.h"
14 #include "base/format_macros.h" 14 #include "base/format_macros.h"
15 #include "base/lazy_instance.h" 15 #include "base/lazy_instance.h"
16 #include "base/memory/singleton.h" 16 #include "base/memory/singleton.h"
17 #include "base/message_loop/message_loop.h"
17 #include "base/process/process_metrics.h" 18 #include "base/process/process_metrics.h"
18 #include "base/stl_util.h" 19 #include "base/stl_util.h"
19 #include "base/strings/string_split.h" 20 #include "base/strings/string_split.h"
20 #include "base/strings/string_tokenizer.h" 21 #include "base/strings/string_tokenizer.h"
21 #include "base/strings/string_util.h" 22 #include "base/strings/string_util.h"
22 #include "base/strings/stringprintf.h" 23 #include "base/strings/stringprintf.h"
23 #include "base/strings/utf_string_conversions.h" 24 #include "base/strings/utf_string_conversions.h"
24 #include "base/synchronization/cancellation_flag.h" 25 #include "base/synchronization/cancellation_flag.h"
25 #include "base/synchronization/waitable_event.h" 26 #include "base/synchronization/waitable_event.h"
26 #include "base/sys_info.h" 27 #include "base/sys_info.h"
27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" 28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h"
28 #include "base/threading/platform_thread.h" 29 #include "base/threading/platform_thread.h"
29 #include "base/threading/thread_id_name_manager.h" 30 #include "base/threading/thread_id_name_manager.h"
30 #include "base/threading/thread_local.h"
31 #include "base/time/time.h" 31 #include "base/time/time.h"
32 32
33 #if defined(OS_WIN) 33 #if defined(OS_WIN)
34 #include "base/debug/trace_event_win.h" 34 #include "base/debug/trace_event_win.h"
35 #endif 35 #endif
36 36
37 class DeleteTraceLogForTesting { 37 class DeleteTraceLogForTesting {
38 public: 38 public:
39 static void Delete() { 39 static void Delete() {
40 Singleton<base::debug::TraceLog, 40 Singleton<base::debug::TraceLog,
41 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0); 41 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0);
42 } 42 }
43 }; 43 };
44 44
45 // The thread buckets for the sampling profiler. 45 // The thread buckets for the sampling profiler.
46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; 46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3];
47 47
48 namespace base { 48 namespace base {
49 namespace debug { 49 namespace debug {
50 50
51 namespace {
52
53 // The overhead of TraceEvent above this threshold will be reported in the
54 // trace.
55 const int kOverheadReportThresholdInMicroseconds = 50;
56
51 // Controls the number of trace events we will buffer in-memory 57 // Controls the number of trace events we will buffer in-memory
52 // before throwing them away. 58 // before throwing them away.
53 const size_t kTraceEventVectorBufferSize = 250000; 59 const size_t kTraceEventVectorBufferSize = 250000;
54 const size_t kTraceEventRingBufferSize = kTraceEventVectorBufferSize / 4; 60 const size_t kTraceEventRingBufferSize = kTraceEventVectorBufferSize / 4;
61 const size_t kTraceEventThreadLocalBufferSize = 1024;
55 const size_t kTraceEventBatchSize = 1000; 62 const size_t kTraceEventBatchSize = 1000;
56 const size_t kTraceEventInitialBufferSize = 1024; 63 const size_t kTraceEventInitialBufferSize = 1024;
57 64
58 #define MAX_CATEGORY_GROUPS 100 65 #define MAX_CATEGORY_GROUPS 100
59 66
60 namespace {
61
62 // Parallel arrays g_category_groups and g_category_group_enabled are separate 67 // Parallel arrays g_category_groups and g_category_group_enabled are separate
63 // so that a pointer to a member of g_category_group_enabled can be easily 68 // so that a pointer to a member of g_category_group_enabled can be easily
64 // converted to an index into g_category_groups. This allows macros to deal 69 // converted to an index into g_category_groups. This allows macros to deal
65 // only with char enabled pointers from g_category_group_enabled, and we can 70 // only with char enabled pointers from g_category_group_enabled, and we can
66 // convert internally to determine the category name from the char enabled 71 // convert internally to determine the category name from the char enabled
67 // pointer. 72 // pointer.
68 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { 73 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
69 "tracing already shutdown", 74 "tracing already shutdown",
70 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", 75 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS",
71 "__metadata", 76 "__metadata",
72 }; 77 // For reporting trace_event overhead. For thread local event buffers only.
dsinclair 2013/08/29 14:24:32 After this patch, will we have non-thread-local ev
Xianzhu 2013/08/29 17:42:12 Yes, trace events from a thread without a message
78 "trace_event_overhead"};
73 79
74 // The enabled flag is char instead of bool so that the API can be used from C. 80 // The enabled flag is char instead of bool so that the API can be used from C.
75 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 }; 81 unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 };
76 const int g_category_already_shutdown = 0; 82 const int g_category_already_shutdown = 0;
77 const int g_category_categories_exhausted = 1; 83 const int g_category_categories_exhausted = 1;
78 const int g_category_metadata = 2; 84 const int g_category_metadata = 2;
79 const int g_num_builtin_categories = 3; 85 const int g_category_trace_event_overhead = 3;
86 const int g_num_builtin_categories = 4;
80 int g_category_index = g_num_builtin_categories; // Skip default categories. 87 int g_category_index = g_num_builtin_categories; // Skip default categories.
81 88
82 // The name of the current thread. This is used to decide if the current 89 // The name of the current thread. This is used to decide if the current
83 // thread name has changed. We combine all the seen thread names into the 90 // thread name has changed. We combine all the seen thread names into the
84 // output name for the thread. 91 // output name for the thread.
85 LazyInstance<ThreadLocalPointer<const char> >::Leaky 92 LazyInstance<ThreadLocalPointer<const char> >::Leaky
86 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; 93 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
87 94
88 const char kRecordUntilFull[] = "record-until-full"; 95 const char kRecordUntilFull[] = "record-until-full";
89 const char kRecordContinuously[] = "record-continuously"; 96 const char kRecordContinuously[] = "record-continuously";
90 const char kEnableSampling[] = "enable-sampling"; 97 const char kEnableSampling[] = "enable-sampling";
91 98
92 } // namespace 99 TimeTicks ThreadNow() {
100 return TimeTicks::IsThreadNowSupported() ?
101 TimeTicks::ThreadNow() : TimeTicks();
dsinclair 2013/08/29 14:24:32 nit: indented too far.
Xianzhu 2013/08/29 17:42:12 Done.
102 }
93 103
94 class TraceBufferRingBuffer : public TraceBuffer { 104 class TraceBufferRingBuffer : public TraceBuffer {
95 public: 105 public:
96 TraceBufferRingBuffer() 106 TraceBufferRingBuffer()
97 : unused_event_index_(0), 107 : unused_event_index_(0),
98 oldest_event_index_(0) { 108 oldest_event_index_(0) {
99 logged_events_.reserve(kTraceEventInitialBufferSize); 109 logged_events_.reserve(kTraceEventInitialBufferSize);
100 } 110 }
101 111
102 virtual ~TraceBufferRingBuffer() {} 112 virtual ~TraceBufferRingBuffer() {}
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after
176 class TraceBufferVector : public TraceBuffer { 186 class TraceBufferVector : public TraceBuffer {
177 public: 187 public:
178 TraceBufferVector() : current_iteration_index_(0) { 188 TraceBufferVector() : current_iteration_index_(0) {
179 logged_events_.reserve(kTraceEventInitialBufferSize); 189 logged_events_.reserve(kTraceEventInitialBufferSize);
180 } 190 }
181 191
182 virtual ~TraceBufferVector() { 192 virtual ~TraceBufferVector() {
183 } 193 }
184 194
185 virtual void AddEvent(const TraceEvent& event) OVERRIDE { 195 virtual void AddEvent(const TraceEvent& event) OVERRIDE {
186 // Note, we have two callers which need to be handled. The first is 196 // Note, we have two callers which need to be handled:
187 // AddTraceEventWithThreadIdAndTimestamp() which checks Size() and does an 197 // - AddEventToMainBufferWhileLocked() which has two cases:
188 // early exit if full. The second is AddThreadNameMetadataEvents(). 198 // - called directly from AddTraceEventWithThreadIdAndTimeStamp()
199 // which checks if buffer is full and does an early exit if full;
200 // - called from ThreadLocalEventBuffer::FlushWhileLocked();
201 // - AddThreadNameMetadataEvents().
189 // We can not DECHECK(!IsFull()) because we have to add the metadata 202 // We can not DECHECK(!IsFull()) because we have to add the metadata
190 // events even if the buffer is full. 203 // events and flush thread-local buffers even if the buffer is full.
191 logged_events_.push_back(event); 204 logged_events_.push_back(event);
192 } 205 }
193 206
194 virtual bool HasMoreEvents() const OVERRIDE { 207 virtual bool HasMoreEvents() const OVERRIDE {
195 return current_iteration_index_ < Size(); 208 return current_iteration_index_ < Size();
196 } 209 }
197 210
198 virtual const TraceEvent& NextEvent() OVERRIDE { 211 virtual const TraceEvent& NextEvent() OVERRIDE {
199 DCHECK(HasMoreEvents()); 212 DCHECK(HasMoreEvents());
200 return GetEventAt(current_iteration_index_++); 213 return GetEventAt(current_iteration_index_++);
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
262 275
263 // As this buffer is never full, we can return any positive number. 276 // As this buffer is never full, we can return any positive number.
264 virtual size_t Capacity() const OVERRIDE { return 1; } 277 virtual size_t Capacity() const OVERRIDE { return 1; }
265 278
266 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE { 279 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
267 NOTREACHED(); 280 NOTREACHED();
268 return *static_cast<TraceEvent*>(NULL); 281 return *static_cast<TraceEvent*>(NULL);
269 } 282 }
270 }; 283 };
271 284
285 } // namespace
286
272 //////////////////////////////////////////////////////////////////////////////// 287 ////////////////////////////////////////////////////////////////////////////////
273 // 288 //
274 // TraceEvent 289 // TraceEvent
275 // 290 //
276 //////////////////////////////////////////////////////////////////////////////// 291 ////////////////////////////////////////////////////////////////////////////////
277 292
278 namespace { 293 namespace {
279 294
280 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } 295 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
281 296
(...skipping 475 matching lines...) Expand 10 before | Expand all | Expand 10 after
757 772
758 TraceBucketData::~TraceBucketData() { 773 TraceBucketData::~TraceBucketData() {
759 } 774 }
760 775
761 //////////////////////////////////////////////////////////////////////////////// 776 ////////////////////////////////////////////////////////////////////////////////
762 // 777 //
763 // TraceLog 778 // TraceLog
764 // 779 //
765 //////////////////////////////////////////////////////////////////////////////// 780 ////////////////////////////////////////////////////////////////////////////////
766 781
782 class TraceLog::ThreadLocalEventBuffer
783 : public MessageLoop::DestructionObserver {
784 public:
785 ThreadLocalEventBuffer(TraceLog* trace_log);
786 virtual ~ThreadLocalEventBuffer();
787
788 void AddEvent(const TraceEvent& event, NotificationHelper* notifier);
789 void ReportOverhead(const TimeTicks& event_timestamp,
790 const TimeTicks& event_thread_timestamp);
791
792 private:
793 // MessageLoop::DestructionObserver
794 virtual void WillDestroyCurrentMessageLoop() OVERRIDE;
795
796 void FlushWhileLocked(NotificationHelper* notifier);
797
798 void CheckThisIsCurrentBuffer() {
799 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
800 }
801
802 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
803 // as long as the thread exists.
804 TraceLog* trace_log_;
805 std::vector<TraceEvent> logged_events_;
806 int event_count_;
807 TimeDelta overhead_;
808
809 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
810 };
811
812 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
813 : trace_log_(trace_log),
814 event_count_(0) {
815 logged_events_.reserve(kTraceEventThreadLocalBufferSize);
816
817 if (g_category_group_enabled[g_category_trace_event_overhead]) {
818 int thread_id = static_cast<int>(PlatformThread::CurrentId());
819 logged_events_.push_back(TraceEvent(
820 thread_id,
821 TimeTicks::NowFromSystemTraceTime() - trace_log->time_offset_,
822 ThreadNow(),
823 TRACE_EVENT_PHASE_ASYNC_BEGIN,
824 &g_category_group_enabled[g_category_trace_event_overhead],
825 "thread_trace_event",
826 thread_id,
827 0, NULL, NULL, NULL, NULL,
828 TRACE_EVENT_FLAG_HAS_ID));
829 }
830
831 // ThreadLocalEventBuffer is created only if the thread has message loop, so
dsinclair 2013/08/29 14:24:32 nit: if the thread has a message loop
Xianzhu 2013/08/29 17:42:12 Done.
832 // the following message_loop won't be NULL.
833 MessageLoop* message_loop = MessageLoop::current();
834 message_loop->AddDestructionObserver(this);
835
836 AutoLock lock(trace_log->lock_);
837 trace_log->thread_message_loops_.insert(message_loop);
838 }
839
840 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
841 CheckThisIsCurrentBuffer();
842 MessageLoop::current()->RemoveDestructionObserver(this);
843
844 if (event_count_) {
845 const char* arg_names[2] = { "event_count", "average_overhead" };
846 unsigned char arg_types[2];
847 unsigned long long arg_values[2];
848 trace_event_internal::SetTraceValue(
849 event_count_, &arg_types[0], &arg_values[0]);
850 trace_event_internal::SetTraceValue(
851 overhead_.InMillisecondsF() / event_count_,
dsinclair 2013/08/29 14:24:32 Does overhead_ get an initial value of 0 by defaul
Xianzhu 2013/08/29 17:42:12 Yes, set by the default ctor of TimeDelta.
852 &arg_types[1], &arg_values[1]);
853 int thread_id = static_cast<int>(PlatformThread::CurrentId());
854 logged_events_.push_back(TraceEvent(
855 thread_id,
856 TimeTicks::NowFromSystemTraceTime() - trace_log_->time_offset_,
857 ThreadNow(),
858 TRACE_EVENT_PHASE_ASYNC_END,
859 &g_category_group_enabled[g_category_trace_event_overhead],
860 "thread_trace_event",
861 thread_id,
862 2, arg_names, arg_types, arg_values, NULL,
863 TRACE_EVENT_FLAG_HAS_ID));
864 }
865
866 NotificationHelper notifier(trace_log_);
867 {
868 AutoLock lock(trace_log_->lock_);
869 FlushWhileLocked(&notifier);
870 trace_log_->thread_message_loops_.erase(MessageLoop::current());
871 }
872 notifier.SendNotificationIfAny();
873 }
874
875 void TraceLog::ThreadLocalEventBuffer::AddEvent(const TraceEvent& event,
876 NotificationHelper* notifier) {
877 CheckThisIsCurrentBuffer();
878 logged_events_.push_back(event);
879 if (logged_events_.size() >= kTraceEventThreadLocalBufferSize) {
880 AutoLock lock(trace_log_->lock_);
881 FlushWhileLocked(notifier);
882 }
883 }
884
885 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
886 const TimeTicks& event_timestamp, const TimeTicks& event_thread_timestamp) {
887 if (g_category_group_enabled[g_category_trace_event_overhead]) {
dsinclair 2013/08/29 14:24:32 if (!g_category_group_enabled[g_category_trace_eve
Xianzhu 2013/08/29 17:42:12 Done.
888 event_count_++;
889 TimeTicks now =
890 TimeTicks::NowFromSystemTraceTime() - trace_log_->time_offset_;
891 TimeDelta overhead = now - event_timestamp;
892 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
893 int thread_id = static_cast<int>(PlatformThread::CurrentId());
894 // TODO(wangxianzhu): Use X event when it's ready.
895 logged_events_.push_back(TraceEvent(
896 thread_id, event_timestamp, event_thread_timestamp,
897 TRACE_EVENT_PHASE_BEGIN,
898 &g_category_group_enabled[g_category_trace_event_overhead],
899 "overhead",
900 0, 0, NULL, NULL, NULL, NULL, 0));
901 logged_events_.push_back(TraceEvent(
902 thread_id, now, ThreadNow(),
903 TRACE_EVENT_PHASE_END,
904 &g_category_group_enabled[g_category_trace_event_overhead],
905 "overhead",
906 0, 0, NULL, NULL, NULL, NULL, 0));
907 }
908 overhead_ += overhead;
909 }
910 }
911
912 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
913 delete this;
914 }
915
916 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked(
917 NotificationHelper* notifier) {
918 trace_log_->lock_.AssertAcquired();
919 for (size_t i = 0; i < logged_events_.size(); ++i) {
920 trace_log_->AddEventToMainBufferWhileLocked(logged_events_[i]);
921 }
922 logged_events_.resize(0);
923 trace_log_->CheckIfBufferIsFullWhileLocked(notifier);
924 }
925
767 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log) 926 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log)
768 : trace_log_(trace_log), 927 : trace_log_(trace_log),
769 notification_(0) { 928 notification_(0) {
770 } 929 }
771 930
772 TraceLog::NotificationHelper::~NotificationHelper() { 931 TraceLog::NotificationHelper::~NotificationHelper() {
773 } 932 }
774 933
775 void TraceLog::NotificationHelper::AddNotificationWhileLocked( 934 void TraceLog::NotificationHelper::AddNotificationWhileLocked(
776 int notification) { 935 int notification) {
936 trace_log_->lock_.AssertAcquired();
777 if (trace_log_->notification_callback_.is_null()) 937 if (trace_log_->notification_callback_.is_null())
778 return; 938 return;
779 if (notification_ == 0) 939 if (notification_ == 0)
780 callback_copy_ = trace_log_->notification_callback_; 940 callback_copy_ = trace_log_->notification_callback_;
781 notification_ |= notification; 941 notification_ |= notification;
782 } 942 }
783 943
784 void TraceLog::NotificationHelper::SendNotificationIfAny() { 944 void TraceLog::NotificationHelper::SendNotificationIfAny() {
785 if (notification_) 945 if (notification_)
786 callback_copy_.Run(notification_); 946 callback_copy_.Run(notification_);
(...skipping 26 matching lines...) Expand all
813 } 973 }
814 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY)) 974 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
815 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. 975 ret |= RECORD_UNTIL_FULL; // Default when no options are specified.
816 976
817 return static_cast<Options>(ret); 977 return static_cast<Options>(ret);
818 } 978 }
819 979
820 TraceLog::TraceLog() 980 TraceLog::TraceLog()
821 : enable_count_(0), 981 : enable_count_(0),
822 num_traces_recorded_(0), 982 num_traces_recorded_(0),
823 event_callback_(NULL), 983 buffer_is_full_(0),
984 event_callback_(0),
824 dispatching_to_observer_list_(false), 985 dispatching_to_observer_list_(false),
825 process_sort_index_(0), 986 process_sort_index_(0),
826 watch_category_(NULL), 987 process_id_hash_(0),
988 process_id_(0),
989 watch_category_(0),
827 trace_options_(RECORD_UNTIL_FULL), 990 trace_options_(RECORD_UNTIL_FULL),
828 sampling_thread_handle_(0), 991 sampling_thread_handle_(0),
829 category_filter_(CategoryFilter::kDefaultCategoryFilterString) { 992 category_filter_(CategoryFilter::kDefaultCategoryFilterString) {
830 // Trace is enabled or disabled on one thread while other threads are 993 // Trace is enabled or disabled on one thread while other threads are
831 // accessing the enabled flag. We don't care whether edge-case events are 994 // accessing the enabled flag. We don't care whether edge-case events are
832 // traced or not, so we allow races on the enabled flag to keep the trace 995 // traced or not, so we allow races on the enabled flag to keep the trace
833 // macros fast. 996 // macros fast.
834 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: 997 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
835 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, 998 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
836 // sizeof(g_category_group_enabled), 999 // sizeof(g_category_group_enabled),
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
954 category_group_enabled = 1117 category_group_enabled =
955 &g_category_group_enabled[g_category_categories_exhausted]; 1118 &g_category_group_enabled[g_category_categories_exhausted];
956 } 1119 }
957 } 1120 }
958 return category_group_enabled; 1121 return category_group_enabled;
959 } 1122 }
960 1123
961 void TraceLog::GetKnownCategoryGroups( 1124 void TraceLog::GetKnownCategoryGroups(
962 std::vector<std::string>* category_groups) { 1125 std::vector<std::string>* category_groups) {
963 AutoLock lock(lock_); 1126 AutoLock lock(lock_);
1127 category_groups->push_back(
1128 g_category_groups[g_category_trace_event_overhead]);
964 for (int i = g_num_builtin_categories; i < g_category_index; i++) 1129 for (int i = g_num_builtin_categories; i < g_category_index; i++)
965 category_groups->push_back(g_category_groups[i]); 1130 category_groups->push_back(g_category_groups[i]);
966 } 1131 }
967 1132
968 void TraceLog::SetEnabled(const CategoryFilter& category_filter, 1133 void TraceLog::SetEnabled(const CategoryFilter& category_filter,
969 Options options) { 1134 Options options) {
970 std::vector<EnabledStateObserver*> observer_list; 1135 std::vector<EnabledStateObserver*> observer_list;
971 { 1136 {
972 AutoLock lock(lock_); 1137 AutoLock lock(lock_);
973 1138
1139 Options old_options = trace_options();
1140
974 if (enable_count_++ > 0) { 1141 if (enable_count_++ > 0) {
975 if (options != trace_options_) { 1142 if (options != old_options) {
976 DLOG(ERROR) << "Attemting to re-enable tracing with a different " 1143 DLOG(ERROR) << "Attemting to re-enable tracing with a different "
977 << "set of options."; 1144 << "set of options.";
978 } 1145 }
979 1146
980 category_filter_.Merge(category_filter); 1147 category_filter_.Merge(category_filter);
981 UpdateCategoryGroupEnabledFlags(); 1148 UpdateCategoryGroupEnabledFlags();
982 return; 1149 return;
983 } 1150 }
984 1151
985 if (options != trace_options_) { 1152 if (options != old_options) {
986 trace_options_ = options; 1153 subtle::NoBarrier_Store(&trace_options_, options);
987 logged_events_.reset(GetTraceBuffer()); 1154 logged_events_.reset(GetTraceBuffer());
1155 subtle::NoBarrier_Store(&buffer_is_full_, 0);
988 } 1156 }
989 1157
990 if (dispatching_to_observer_list_) { 1158 if (dispatching_to_observer_list_) {
991 DLOG(ERROR) << 1159 DLOG(ERROR) <<
992 "Cannot manipulate TraceLog::Enabled state from an observer."; 1160 "Cannot manipulate TraceLog::Enabled state from an observer.";
993 return; 1161 return;
994 } 1162 }
995 1163
996 num_traces_recorded_++; 1164 num_traces_recorded_++;
997 1165
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1035 AutoLock lock(lock_); 1203 AutoLock lock(lock_);
1036 DCHECK(enable_count_ > 0); 1204 DCHECK(enable_count_ > 0);
1037 return category_filter_; 1205 return category_filter_;
1038 } 1206 }
1039 1207
1040 void TraceLog::SetDisabled() { 1208 void TraceLog::SetDisabled() {
1041 std::vector<EnabledStateObserver*> observer_list; 1209 std::vector<EnabledStateObserver*> observer_list;
1042 { 1210 {
1043 AutoLock lock(lock_); 1211 AutoLock lock(lock_);
1044 DCHECK(enable_count_ > 0); 1212 DCHECK(enable_count_ > 0);
1213
1045 if (--enable_count_ != 0) 1214 if (--enable_count_ != 0)
1046 return; 1215 return;
1047 1216
1048 if (dispatching_to_observer_list_) { 1217 if (dispatching_to_observer_list_) {
1049 DLOG(ERROR) 1218 DLOG(ERROR)
1050 << "Cannot manipulate TraceLog::Enabled state from an observer."; 1219 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1051 return; 1220 return;
1052 } 1221 }
1053 1222
1054 if (sampling_thread_.get()) { 1223 if (sampling_thread_.get()) {
1055 // Stop the sampling thread. 1224 // Stop the sampling thread.
1056 sampling_thread_->Stop(); 1225 sampling_thread_->Stop();
1057 lock_.Release(); 1226 lock_.Release();
1058 PlatformThread::Join(sampling_thread_handle_); 1227 PlatformThread::Join(sampling_thread_handle_);
1059 lock_.Acquire(); 1228 lock_.Acquire();
1060 sampling_thread_handle_ = PlatformThreadHandle(); 1229 sampling_thread_handle_ = PlatformThreadHandle();
1061 sampling_thread_.reset(); 1230 sampling_thread_.reset();
1062 } 1231 }
1063 1232
1064 category_filter_.Clear(); 1233 category_filter_.Clear();
1065 watch_category_ = NULL; 1234 subtle::NoBarrier_Store(&watch_category_, 0);
1066 watch_event_name_ = ""; 1235 watch_event_name_ = "";
1067 UpdateCategoryGroupEnabledFlags(); 1236 UpdateCategoryGroupEnabledFlags();
1068 AddMetadataEvents(); 1237 AddMetadataEvents();
1069 1238
1070 dispatching_to_observer_list_ = true; 1239 dispatching_to_observer_list_ = true;
1071 observer_list = enabled_state_observer_list_; 1240 observer_list = enabled_state_observer_list_;
1072 } 1241 }
1073 1242
1074 // Dispatch to observers outside the lock in case the observer triggers a 1243 // Dispatch to observers outside the lock in case the observer triggers a
1075 // trace event. 1244 // trace event.
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
1115 logged_events_->Capacity()); 1284 logged_events_->Capacity());
1116 } 1285 }
1117 1286
1118 void TraceLog::SetNotificationCallback( 1287 void TraceLog::SetNotificationCallback(
1119 const TraceLog::NotificationCallback& cb) { 1288 const TraceLog::NotificationCallback& cb) {
1120 AutoLock lock(lock_); 1289 AutoLock lock(lock_);
1121 notification_callback_ = cb; 1290 notification_callback_ = cb;
1122 } 1291 }
1123 1292
1124 TraceBuffer* TraceLog::GetTraceBuffer() { 1293 TraceBuffer* TraceLog::GetTraceBuffer() {
1125 if (trace_options_ & RECORD_CONTINUOUSLY) 1294 Options options = trace_options();
1295 if (options & RECORD_CONTINUOUSLY)
1126 return new TraceBufferRingBuffer(); 1296 return new TraceBufferRingBuffer();
1127 else if (trace_options_ & ECHO_TO_CONSOLE) 1297 else if (options & ECHO_TO_CONSOLE)
1128 return new TraceBufferDiscardsEvents(); 1298 return new TraceBufferDiscardsEvents();
1129 return new TraceBufferVector(); 1299 return new TraceBufferVector();
1130 } 1300 }
1131 1301
1302 void TraceLog::AddEventToMainBufferWhileLocked(const TraceEvent& trace_event) {
1303 // Don't check buffer_is_full_ because we want the remaining thread-local
1304 // events to be flushed into the main buffer with this method, otherwise
1305 // we may lose some early events of a thread that generates events sparsely.
1306 lock_.AssertAcquired();
1307 logged_events_->AddEvent(trace_event);
1308 }
1309
1310 void TraceLog::CheckIfBufferIsFullWhileLocked(NotificationHelper* notifier) {
dsinclair 2013/08/29 14:24:32 lock_.AssertAcquired();
Xianzhu 2013/08/29 17:42:12 Done.
1311 if (!subtle::NoBarrier_Load(&buffer_is_full_) && logged_events_->IsFull()) {
1312 subtle::NoBarrier_Store(&buffer_is_full_,
1313 static_cast<subtle::AtomicWord>(1));
1314 notifier->AddNotificationWhileLocked(TRACE_BUFFER_FULL);
1315 }
1316 }
1317
1132 void TraceLog::SetEventCallback(EventCallback cb) { 1318 void TraceLog::SetEventCallback(EventCallback cb) {
1133 AutoLock lock(lock_); 1319 subtle::NoBarrier_Store(&event_callback_,
1134 event_callback_ = cb; 1320 reinterpret_cast<subtle::AtomicWord>(cb));
1135 }; 1321 };
1136 1322
1323 // Flush() works as the following:
1324 // 1. Flush() is called in threadA whose message loop is saved in
1325 // flush_message_loop_proxy_;
1326 // 2. In the thread, FlushNextThreadOrFinish gets the first message loop from
1327 // thread_message_loops_. If any, post a task to run
1328 // FlushCurrentThreadAndContinue() in the message loop; otherwise finish the
1329 // flush (step 4);
1330 // 3. FlushCurrentThreadAndContinue() deletes the thread local event buffer:
1331 // - The last batch of events of the thread are flushed into the main
1332 // buffer;
1333 // - The message loop will be removed from thread_message_loops_;
1334 // and posts FlushNextThreadOrFinish() in flush_message_loop_proxy_ to
1335 // continue the flush procedure (step 2);
1336 // 4. When all thread local buffers have been flushed (and deleted), finish
1337 // the flush by calling the OutputCallback with all events converted into
1338 // JSON format.
1137 void TraceLog::Flush(const TraceLog::OutputCallback& cb) { 1339 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
1138 // Ignore memory allocations from here down. 1340 if (IsEnabled()) {
dsinclair 2013/08/29 14:24:32 Is this, basically, to stop us from doing any post
Xianzhu 2013/08/29 17:42:12 Yes. In addition, as posting tasks will generate
1139 INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"), 1341 scoped_refptr<RefCountedString> empty_result = new RefCountedString;
1140 TRACE_MEMORY_IGNORE); 1342 cb.Run(empty_result, false);
dsinclair 2013/08/29 14:24:32 Should we check if cb.isNull() here before we try
Xianzhu 2013/08/29 17:42:12 Done.
1343 return;
1344 }
1345
1346 {
1347 AutoLock lock(lock_);
1348 DCHECK(!flush_message_loop_proxy_.get());
1349 flush_message_loop_proxy_ = MessageLoopProxy::current();
1350 DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get());
1351 flush_output_callback_ = cb;
1352 }
1353 FlushNextThreadOrFinish();
1354 }
1355
1356 void TraceLog::FlushNextThreadOrFinish() {
1141 scoped_ptr<TraceBuffer> previous_logged_events; 1357 scoped_ptr<TraceBuffer> previous_logged_events;
1142 { 1358 {
1143 AutoLock lock(lock_); 1359 AutoLock lock(lock_);
1360 hash_set<MessageLoop*>::const_iterator next_message_loop =
1361 thread_message_loops_.begin();
1362 if (next_message_loop != thread_message_loops_.end()) {
1363 // Destroy the next thread local buffer. The buffer will be flushed into
1364 // the main event buffer and the message loop will be removed from
1365 // thread_message_loops.
1366 (*next_message_loop)->PostTask(
1367 FROM_HERE,
1368 Bind(&TraceLog::FlushCurrentThreadAndContinue,
1369 Unretained(this)));
1370 return;
1371 }
1372
1373 // All thread local buffers have been flushed (and destroyed).
1374 // From here to the end of the function finishes the whole flush procedure.
1144 previous_logged_events.swap(logged_events_); 1375 previous_logged_events.swap(logged_events_);
1145 logged_events_.reset(GetTraceBuffer()); 1376 logged_events_.reset(GetTraceBuffer());
1377 subtle::NoBarrier_Store(&buffer_is_full_, 0);
1146 } // release lock 1378 } // release lock
1147 1379
1148 while (previous_logged_events->HasMoreEvents()) { 1380 flush_message_loop_proxy_ = NULL;
1381
1382 bool has_more_events = previous_logged_events->HasMoreEvents();
1383 // The callback need to be called at least once even if there is no events
1384 // to let the caller know the completion of flush.
1385 do {
1149 scoped_refptr<RefCountedString> json_events_str_ptr = 1386 scoped_refptr<RefCountedString> json_events_str_ptr =
1150 new RefCountedString(); 1387 new RefCountedString();
1151 1388
1152 for (size_t i = 0; i < kTraceEventBatchSize; ++i) { 1389 for (size_t i = 0; has_more_events && i < kTraceEventBatchSize; ++i) {
1153 if (i > 0) 1390 if (i > 0)
1154 *(&(json_events_str_ptr->data())) += ","; 1391 *(&(json_events_str_ptr->data())) += ",";
1155 1392
1156 previous_logged_events->NextEvent().AppendAsJSON( 1393 previous_logged_events->NextEvent().AppendAsJSON(
1157 &(json_events_str_ptr->data())); 1394 &(json_events_str_ptr->data()));
1158 1395
1159 if (!previous_logged_events->HasMoreEvents()) 1396 has_more_events = previous_logged_events->HasMoreEvents();
1160 break;
1161 } 1397 }
1162 1398
1163 cb.Run(json_events_str_ptr); 1399 flush_output_callback_.Run(json_events_str_ptr, has_more_events);
dsinclair 2013/08/29 14:24:32 Should we check if flush_output_callback_ exists?
Xianzhu 2013/08/29 17:42:12 Done.
1164 } 1400 } while (has_more_events);
1401 }
1402
1403 // Run in each thread holding a local event buffer.
1404 void TraceLog::FlushCurrentThreadAndContinue() {
1405 delete thread_local_event_buffer_.Get();
1406 thread_local_event_buffer_.Set(NULL);
1407
1408 // Continue the flush procedure.
1409 flush_message_loop_proxy_->PostTask(
1410 FROM_HERE,
1411 Bind(&TraceLog::FlushNextThreadOrFinish, Unretained(this)));
1165 } 1412 }
1166 1413
1167 void TraceLog::AddTraceEvent( 1414 void TraceLog::AddTraceEvent(
1168 char phase, 1415 char phase,
1169 const unsigned char* category_group_enabled, 1416 const unsigned char* category_group_enabled,
1170 const char* name, 1417 const char* name,
1171 unsigned long long id, 1418 unsigned long long id,
1172 int num_args, 1419 int num_args,
1173 const char** arg_names, 1420 const char** arg_names,
1174 const unsigned char* arg_types, 1421 const unsigned char* arg_types,
(...skipping 29 matching lines...) Expand all
1204 #if defined(OS_ANDROID) 1451 #if defined(OS_ANDROID)
1205 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id, 1452 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id,
1206 num_args, arg_names, arg_types, arg_values, convertable_values, 1453 num_args, arg_names, arg_types, arg_values, convertable_values,
1207 flags); 1454 flags);
1208 #endif 1455 #endif
1209 1456
1210 if (!IsCategoryGroupEnabled(category_group_enabled)) 1457 if (!IsCategoryGroupEnabled(category_group_enabled))
1211 return; 1458 return;
1212 1459
1213 TimeTicks now = timestamp - time_offset_; 1460 TimeTicks now = timestamp - time_offset_;
1214 base::TimeTicks thread_now; 1461 TimeTicks thread_now = ThreadNow();
1215 if (base::TimeTicks::IsThreadNowSupported())
1216 thread_now = base::TimeTicks::ThreadNow();
1217 EventCallback event_callback_copy;
1218 1462
1219 NotificationHelper notifier(this); 1463 NotificationHelper notifier(this);
1220 1464
1465 ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1466 // A ThreadLocalEventBuffer needs the message loop
1467 // - to know when the thread exits;
1468 // - to handle the final flush.
1469 // For a thread without a message loop, the trace events will be added into
1470 // the main buffer directly.
1471 if (MessageLoop::current()) {
1472 thread_local_event_buffer = thread_local_event_buffer_.Get();
1473 if (!thread_local_event_buffer) {
1474 thread_local_event_buffer = new ThreadLocalEventBuffer(this);
1475 thread_local_event_buffer_.Set(thread_local_event_buffer);
1476 }
1477 }
1478
1221 // Check and update the current thread name only if the event is for the 1479 // Check and update the current thread name only if the event is for the
1222 // current thread to avoid locks in most cases. 1480 // current thread to avoid locks in most cases.
1223 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { 1481 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) {
1224 const char* new_name = ThreadIdNameManager::GetInstance()-> 1482 const char* new_name = ThreadIdNameManager::GetInstance()->
1225 GetName(thread_id); 1483 GetName(thread_id);
1226 // Check if the thread name has been set or changed since the previous 1484 // Check if the thread name has been set or changed since the previous
1227 // call (if any), but don't bother if the new name is empty. Note this will 1485 // call (if any), but don't bother if the new name is empty. Note this will
1228 // not detect a thread name change within the same char* buffer address: we 1486 // not detect a thread name change within the same char* buffer address: we
1229 // favor common case performance over corner case correctness. 1487 // favor common case performance over corner case correctness.
1230 if (new_name != g_current_thread_name.Get().Get() && 1488 if (new_name != g_current_thread_name.Get().Get() &&
(...skipping 15 matching lines...) Expand all
1246 existing_names.end(), 1504 existing_names.end(),
1247 new_name) != existing_names.end(); 1505 new_name) != existing_names.end();
1248 if (!found) { 1506 if (!found) {
1249 existing_name->second.push_back(','); 1507 existing_name->second.push_back(',');
1250 existing_name->second.append(new_name); 1508 existing_name->second.append(new_name);
1251 } 1509 }
1252 } 1510 }
1253 } 1511 }
1254 } 1512 }
1255 1513
1256 TraceEvent trace_event(thread_id, 1514 if (!subtle::NoBarrier_Load(&buffer_is_full_)) {
1257 now, thread_now, phase, category_group_enabled, name, id, 1515 TraceEvent trace_event(thread_id, now, thread_now, phase,
1258 num_args, arg_names, arg_types, arg_values, 1516 category_group_enabled, name, id,
1259 convertable_values, flags); 1517 num_args, arg_names, arg_types, arg_values,
1518 convertable_values, flags);
1260 1519
1261 do { 1520 if (thread_local_event_buffer) {
1262 AutoLock lock(lock_); 1521 thread_local_event_buffer->AddEvent(trace_event, &notifier);
1522 } else {
1523 AutoLock lock(lock_);
1524 AddEventToMainBufferWhileLocked(trace_event);
1525 CheckIfBufferIsFullWhileLocked(&notifier);
1526 }
1263 1527
1264 event_callback_copy = event_callback_; 1528 if (trace_options() & ECHO_TO_CONSOLE) {
1265 if (logged_events_->IsFull()) 1529 AutoLock lock(lock_);
1266 break;
1267 1530
1268 logged_events_->AddEvent(trace_event);
1269
1270 if (trace_options_ & ECHO_TO_CONSOLE) {
1271 TimeDelta duration; 1531 TimeDelta duration;
1272 if (phase == TRACE_EVENT_PHASE_END) { 1532 if (phase == TRACE_EVENT_PHASE_END) {
1273 duration = timestamp - thread_event_start_times_[thread_id].top(); 1533 duration = timestamp - thread_event_start_times_[thread_id].top();
1274 thread_event_start_times_[thread_id].pop(); 1534 thread_event_start_times_[thread_id].pop();
1275 } 1535 }
1276 1536
1277 std::string thread_name = thread_names_[thread_id]; 1537 std::string thread_name = thread_names_[thread_id];
1278 if (thread_colors_.find(thread_name) == thread_colors_.end()) 1538 if (thread_colors_.find(thread_name) == thread_colors_.end())
1279 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1; 1539 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1;
1280 1540
(...skipping 12 matching lines...) Expand all
1293 1553
1294 trace_event.AppendPrettyPrinted(&log); 1554 trace_event.AppendPrettyPrinted(&log);
1295 if (phase == TRACE_EVENT_PHASE_END) 1555 if (phase == TRACE_EVENT_PHASE_END)
1296 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); 1556 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF());
1297 1557
1298 LOG(ERROR) << log.str() << "\x1b[0;m"; 1558 LOG(ERROR) << log.str() << "\x1b[0;m";
1299 1559
1300 if (phase == TRACE_EVENT_PHASE_BEGIN) 1560 if (phase == TRACE_EVENT_PHASE_BEGIN)
1301 thread_event_start_times_[thread_id].push(timestamp); 1561 thread_event_start_times_[thread_id].push(timestamp);
1302 } 1562 }
1563 }
1303 1564
1304 if (logged_events_->IsFull()) 1565 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1305 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); 1566 &watch_category_)) == category_group_enabled) {
1306 1567 AutoLock lock(lock_);
1307 if (watch_category_ == category_group_enabled && watch_event_name_ == name) 1568 if (watch_event_name_ == name)
1308 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1569 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1309 } while (0); // release lock 1570 }
1310 1571
1311 notifier.SendNotificationIfAny(); 1572 notifier.SendNotificationIfAny();
1312 if (event_callback_copy != NULL) { 1573 EventCallback event_callback = reinterpret_cast<EventCallback>(
1313 event_callback_copy(phase, category_group_enabled, name, id, 1574 subtle::NoBarrier_Load(&event_callback_));
1314 num_args, arg_names, arg_types, arg_values, 1575 if (event_callback) {
1315 flags); 1576 event_callback(phase, category_group_enabled, name, id,
1577 num_args, arg_names, arg_types, arg_values,
1578 flags);
1316 } 1579 }
1580
1581 if (thread_local_event_buffer)
1582 thread_local_event_buffer->ReportOverhead(now, thread_now);
1317 } 1583 }
1318 1584
1319 void TraceLog::AddTraceEventEtw(char phase, 1585 void TraceLog::AddTraceEventEtw(char phase,
1320 const char* name, 1586 const char* name,
1321 const void* id, 1587 const void* id,
1322 const char* extra) { 1588 const char* extra) {
1323 #if defined(OS_WIN) 1589 #if defined(OS_WIN)
1324 TraceEventETWProvider::Trace(name, phase, id, extra); 1590 TraceEventETWProvider::Trace(name, phase, id, extra);
1325 #endif 1591 #endif
1326 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, 1592 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
(...skipping 12 matching lines...) Expand all
1339 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); 1605 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1340 } 1606 }
1341 1607
1342 void TraceLog::SetWatchEvent(const std::string& category_name, 1608 void TraceLog::SetWatchEvent(const std::string& category_name,
1343 const std::string& event_name) { 1609 const std::string& event_name) {
1344 const unsigned char* category = GetCategoryGroupEnabled( 1610 const unsigned char* category = GetCategoryGroupEnabled(
1345 category_name.c_str()); 1611 category_name.c_str());
1346 size_t notify_count = 0; 1612 size_t notify_count = 0;
1347 { 1613 {
1348 AutoLock lock(lock_); 1614 AutoLock lock(lock_);
1349 watch_category_ = category; 1615 subtle::NoBarrier_Store(&watch_category_,
1616 reinterpret_cast<subtle::AtomicWord>(category));
1350 watch_event_name_ = event_name; 1617 watch_event_name_ = event_name;
1351 1618
1352 // First, search existing events for watch event because we want to catch 1619 // First, search existing events for watch event because we want to catch
1353 // it even if it has already occurred. 1620 // it even if it has already occurred.
1354 notify_count = logged_events_->CountEnabledByName(category, event_name); 1621 notify_count = logged_events_->CountEnabledByName(category, event_name);
1355 } // release lock 1622 } // release lock
1356 1623
1357 // Send notification for each event found. 1624 // Send notification for each event found.
1358 for (size_t i = 0; i < notify_count; ++i) { 1625 for (size_t i = 0; i < notify_count; ++i) {
1359 NotificationHelper notifier(this); 1626 NotificationHelper notifier(this);
1360 lock_.Acquire(); 1627 lock_.Acquire();
1361 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1628 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1362 lock_.Release(); 1629 lock_.Release();
1363 notifier.SendNotificationIfAny(); 1630 notifier.SendNotificationIfAny();
1364 } 1631 }
1365 } 1632 }
1366 1633
1367 void TraceLog::CancelWatchEvent() { 1634 void TraceLog::CancelWatchEvent() {
1368 AutoLock lock(lock_); 1635 AutoLock lock(lock_);
1369 watch_category_ = NULL; 1636 subtle::NoBarrier_Store(&watch_category_, 0);
1370 watch_event_name_ = ""; 1637 watch_event_name_ = "";
1371 } 1638 }
1372 1639
1373 namespace { 1640 namespace {
1374 1641
1375 template <typename T> 1642 template <typename T>
1376 void AddMetadataEventToBuffer( 1643 void AddMetadataEventToBuffer(
1377 TraceBuffer* logged_events, 1644 TraceBuffer* logged_events,
1378 int thread_id, 1645 int thread_id,
1379 const char* metadata_name, const char* arg_name, 1646 const char* metadata_name, const char* arg_name,
(...skipping 330 matching lines...) Expand 10 before | Expand all | Expand 10 after
1710 0, // num_args 1977 0, // num_args
1711 NULL, // arg_names 1978 NULL, // arg_names
1712 NULL, // arg_types 1979 NULL, // arg_types
1713 NULL, // arg_values 1980 NULL, // arg_values
1714 NULL, // convertable values 1981 NULL, // convertable values
1715 TRACE_EVENT_FLAG_NONE); // flags 1982 TRACE_EVENT_FLAG_NONE); // flags
1716 } 1983 }
1717 } 1984 }
1718 1985
1719 } // namespace trace_event_internal 1986 } // namespace trace_event_internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698