Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/trace_event_impl.h" | 5 #include "base/debug/trace_event_impl.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/base_switches.h" | 9 #include "base/base_switches.h" |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| 11 #include "base/command_line.h" | 11 #include "base/command_line.h" |
| 12 #include "base/debug/leak_annotations.h" | 12 #include "base/debug/leak_annotations.h" |
| 13 #include "base/debug/trace_event.h" | 13 #include "base/debug/trace_event.h" |
| 14 #include "base/format_macros.h" | 14 #include "base/format_macros.h" |
| 15 #include "base/lazy_instance.h" | 15 #include "base/lazy_instance.h" |
| 16 #include "base/memory/singleton.h" | 16 #include "base/memory/singleton.h" |
| 17 #include "base/message_loop/message_loop.h" | |
| 17 #include "base/process/process_metrics.h" | 18 #include "base/process/process_metrics.h" |
| 18 #include "base/stl_util.h" | 19 #include "base/stl_util.h" |
| 19 #include "base/strings/string_split.h" | 20 #include "base/strings/string_split.h" |
| 20 #include "base/strings/string_tokenizer.h" | 21 #include "base/strings/string_tokenizer.h" |
| 21 #include "base/strings/string_util.h" | 22 #include "base/strings/string_util.h" |
| 22 #include "base/strings/stringprintf.h" | 23 #include "base/strings/stringprintf.h" |
| 23 #include "base/strings/utf_string_conversions.h" | 24 #include "base/strings/utf_string_conversions.h" |
| 24 #include "base/synchronization/cancellation_flag.h" | 25 #include "base/synchronization/cancellation_flag.h" |
| 25 #include "base/synchronization/waitable_event.h" | 26 #include "base/synchronization/waitable_event.h" |
| 26 #include "base/sys_info.h" | 27 #include "base/sys_info.h" |
| 27 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" | 28 #include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
| 28 #include "base/threading/platform_thread.h" | 29 #include "base/threading/platform_thread.h" |
| 29 #include "base/threading/thread_id_name_manager.h" | 30 #include "base/threading/thread_id_name_manager.h" |
| 30 #include "base/threading/thread_local.h" | |
| 31 #include "base/time/time.h" | 31 #include "base/time/time.h" |
| 32 | 32 |
| 33 #if defined(OS_WIN) | 33 #if defined(OS_WIN) |
| 34 #include "base/debug/trace_event_win.h" | 34 #include "base/debug/trace_event_win.h" |
| 35 #endif | 35 #endif |
| 36 | 36 |
| 37 class DeleteTraceLogForTesting { | 37 class DeleteTraceLogForTesting { |
| 38 public: | 38 public: |
| 39 static void Delete() { | 39 static void Delete() { |
| 40 Singleton<base::debug::TraceLog, | 40 Singleton<base::debug::TraceLog, |
| 41 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0); | 41 LeakySingletonTraits<base::debug::TraceLog> >::OnExit(0); |
| 42 } | 42 } |
| 43 }; | 43 }; |
| 44 | 44 |
| 45 // The thread buckets for the sampling profiler. | 45 // The thread buckets for the sampling profiler. |
| 46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; | 46 BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; |
| 47 | 47 |
| 48 namespace base { | 48 namespace base { |
| 49 namespace debug { | 49 namespace debug { |
| 50 | 50 |
| 51 namespace { | |
| 52 | |
| 51 // Controls the number of trace events we will buffer in-memory | 53 // Controls the number of trace events we will buffer in-memory |
| 52 // before throwing them away. | 54 // before throwing them away. |
| 53 const size_t kTraceEventBufferSize = 500000; | 55 const size_t kTraceEventBufferSize = 500000; |
| 56 const size_t kTraceEventThreadLocalBufferSize = 1024; | |
| 54 const size_t kTraceEventBatchSize = 1000; | 57 const size_t kTraceEventBatchSize = 1000; |
| 55 const size_t kTraceEventInitialBufferSize = 1024; | 58 const size_t kTraceEventInitialBufferSize = 1024; |
| 56 | 59 |
| 57 #define MAX_CATEGORY_GROUPS 100 | 60 #define MAX_CATEGORY_GROUPS 100 |
| 58 | 61 |
| 59 namespace { | |
| 60 | |
| 61 // Parallel arrays g_category_groups and g_category_group_enabled are separate | 62 // Parallel arrays g_category_groups and g_category_group_enabled are separate |
| 62 // so that a pointer to a member of g_category_group_enabled can be easily | 63 // so that a pointer to a member of g_category_group_enabled can be easily |
| 63 // converted to an index into g_category_groups. This allows macros to deal | 64 // converted to an index into g_category_groups. This allows macros to deal |
| 64 // only with char enabled pointers from g_category_group_enabled, and we can | 65 // only with char enabled pointers from g_category_group_enabled, and we can |
| 65 // convert internally to determine the category name from the char enabled | 66 // convert internally to determine the category name from the char enabled |
| 66 // pointer. | 67 // pointer. |
| 67 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { | 68 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { |
| 68 "tracing already shutdown", | 69 "tracing already shutdown", |
| 69 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", | 70 "tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", |
| 70 "__metadata", | 71 "__metadata", |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 88 const char kRecordContinuously[] = "record-continuously"; | 89 const char kRecordContinuously[] = "record-continuously"; |
| 89 const char kEnableSampling[] = "enable-sampling"; | 90 const char kEnableSampling[] = "enable-sampling"; |
| 90 | 91 |
| 91 size_t NextIndex(size_t index) { | 92 size_t NextIndex(size_t index) { |
| 92 index++; | 93 index++; |
| 93 if (index >= kTraceEventBufferSize) | 94 if (index >= kTraceEventBufferSize) |
| 94 index = 0; | 95 index = 0; |
| 95 return index; | 96 return index; |
| 96 } | 97 } |
| 97 | 98 |
| 98 } // namespace | |
| 99 | |
| 100 class TraceBufferRingBuffer : public TraceBuffer { | 99 class TraceBufferRingBuffer : public TraceBuffer { |
| 101 public: | 100 public: |
| 102 TraceBufferRingBuffer() | 101 TraceBufferRingBuffer() |
| 103 : unused_event_index_(0), | 102 : unused_event_index_(0), |
| 104 oldest_event_index_(0) { | 103 oldest_event_index_(0) { |
| 105 logged_events_.reserve(kTraceEventInitialBufferSize); | 104 logged_events_.reserve(kTraceEventInitialBufferSize); |
| 106 } | 105 } |
| 107 | 106 |
| 108 virtual ~TraceBufferRingBuffer() {} | 107 virtual ~TraceBufferRingBuffer() {} |
| 109 | 108 |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 171 class TraceBufferVector : public TraceBuffer { | 170 class TraceBufferVector : public TraceBuffer { |
| 172 public: | 171 public: |
| 173 TraceBufferVector() : current_iteration_index_(0) { | 172 TraceBufferVector() : current_iteration_index_(0) { |
| 174 logged_events_.reserve(kTraceEventInitialBufferSize); | 173 logged_events_.reserve(kTraceEventInitialBufferSize); |
| 175 } | 174 } |
| 176 | 175 |
| 177 virtual ~TraceBufferVector() { | 176 virtual ~TraceBufferVector() { |
| 178 } | 177 } |
| 179 | 178 |
| 180 virtual void AddEvent(const TraceEvent& event) OVERRIDE { | 179 virtual void AddEvent(const TraceEvent& event) OVERRIDE { |
| 181 // Note, we have two callers which need to be handled. The first is | 180 // Note, we have two callers which need to be handled: |
| 182 // AddTraceEventWithThreadIdAndTimestamp() which checks Size() and does an | 181 // - AddEventToMainBufferWhileLocked() which has two cases: |
| 183 // early exit if full. The second is AddThreadNameMetadataEvents(). | 182 // - called directly from AddTraceEventWithThreadIdAndTimeStamp() |
| 183 // which checks if buffer is full and does an early exit if full; | |
| 184 // - called from ThreadLocalEventBuffer::FlushWhileLocked(); | |
| 185 // - AddThreadNameMetadataEvents(). | |
| 184 // We can not DECHECK(!IsFull()) because we have to add the metadata | 186 // We can not DECHECK(!IsFull()) because we have to add the metadata |
| 185 // events even if the buffer is full. | 187 // events and flush thread-local buffers even if the buffer is full. |
| 186 logged_events_.push_back(event); | 188 logged_events_.push_back(event); |
| 187 } | 189 } |
| 188 | 190 |
| 189 virtual bool HasMoreEvents() const OVERRIDE { | 191 virtual bool HasMoreEvents() const OVERRIDE { |
| 190 return current_iteration_index_ < Size(); | 192 return current_iteration_index_ < Size(); |
| 191 } | 193 } |
| 192 | 194 |
| 193 virtual const TraceEvent& NextEvent() OVERRIDE { | 195 virtual const TraceEvent& NextEvent() OVERRIDE { |
| 194 DCHECK(HasMoreEvents()); | 196 DCHECK(HasMoreEvents()); |
| 195 return GetEventAt(current_iteration_index_++); | 197 return GetEventAt(current_iteration_index_++); |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 250 } | 252 } |
| 251 | 253 |
| 252 virtual size_t Size() const OVERRIDE { return 0; } | 254 virtual size_t Size() const OVERRIDE { return 0; } |
| 253 | 255 |
| 254 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE { | 256 virtual const TraceEvent& GetEventAt(size_t index) const OVERRIDE { |
| 255 NOTREACHED(); | 257 NOTREACHED(); |
| 256 return *static_cast<TraceEvent*>(NULL); | 258 return *static_cast<TraceEvent*>(NULL); |
| 257 } | 259 } |
| 258 }; | 260 }; |
| 259 | 261 |
| 262 } // namespace | |
| 263 | |
| 260 //////////////////////////////////////////////////////////////////////////////// | 264 //////////////////////////////////////////////////////////////////////////////// |
| 261 // | 265 // |
| 262 // TraceEvent | 266 // TraceEvent |
| 263 // | 267 // |
| 264 //////////////////////////////////////////////////////////////////////////////// | 268 //////////////////////////////////////////////////////////////////////////////// |
| 265 | 269 |
| 266 namespace { | 270 namespace { |
| 267 | 271 |
| 268 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } | 272 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } |
| 269 | 273 |
| (...skipping 464 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 734 | 738 |
| 735 TraceBucketData::~TraceBucketData() { | 739 TraceBucketData::~TraceBucketData() { |
| 736 } | 740 } |
| 737 | 741 |
| 738 //////////////////////////////////////////////////////////////////////////////// | 742 //////////////////////////////////////////////////////////////////////////////// |
| 739 // | 743 // |
| 740 // TraceLog | 744 // TraceLog |
| 741 // | 745 // |
| 742 //////////////////////////////////////////////////////////////////////////////// | 746 //////////////////////////////////////////////////////////////////////////////// |
| 743 | 747 |
| 748 class TraceLog::ThreadLocalEventBuffer | |
| 749 : public MessageLoop::DestructionObserver { | |
| 750 public: | |
| 751 ThreadLocalEventBuffer(TraceLog* trace_log); | |
| 752 virtual ~ThreadLocalEventBuffer(); | |
| 753 | |
| 754 void AddEvent(const TraceEvent& event, NotificationHelper* notifier); | |
| 755 | |
| 756 private: | |
| 757 | |
| 758 // MessageLoop::DestructionObserver | |
| 759 virtual void WillDestroyCurrentMessageLoop() OVERRIDE; | |
| 760 | |
| 761 void FlushWhileLocked(NotificationHelper* notifier); | |
| 762 | |
| 763 void CheckThread() { | |
|
dsinclair
2013/08/14 15:15:59
Name is a bit generic, what about something like:
Xianzhu
2013/08/14 21:28:34
Done.
| |
| 764 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); | |
| 765 } | |
| 766 | |
| 767 TraceLog* trace_log_; | |
|
dsinclair
2013/08/14 15:15:59
Are we making the assumption that, since TraceLog
Xianzhu
2013/08/14 21:28:34
Yes. Done.
| |
| 768 std::vector<TraceEvent> logged_events_; | |
| 769 | |
| 770 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); | |
| 771 }; | |
| 772 | |
| 773 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) | |
| 774 : trace_log_(trace_log) { | |
| 775 logged_events_.reserve(kTraceEventThreadLocalBufferSize); | |
| 776 | |
| 777 MessageLoop* message_loop = MessageLoop::current(); | |
| 778 DCHECK(message_loop); | |
|
dsinclair
2013/08/14 15:15:59
What does it mean if this fails? Is it possible in
Xianzhu
2013/08/14 21:28:34
It never fails because ThreadLocalEventBuffer is c
| |
| 779 message_loop->AddDestructionObserver(this); | |
| 780 | |
| 781 AutoLock lock(trace_log->lock_); | |
| 782 trace_log->thread_message_loops_.insert(message_loop); | |
| 783 } | |
| 784 | |
| 785 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { | |
| 786 CheckThread(); | |
| 787 NotificationHelper notifier(trace_log_); | |
| 788 { | |
| 789 AutoLock lock(trace_log_->lock_); | |
| 790 FlushWhileLocked(¬ifier); | |
| 791 trace_log_->thread_message_loops_.erase(MessageLoop::current()); | |
| 792 if (trace_log_->flush_message_loop_) { | |
| 793 trace_log_->flush_message_loop_->PostTask( | |
| 794 FROM_HERE, | |
| 795 Bind(&TraceLog::FlushNextThreadOrFinish, Unretained(trace_log_))); | |
| 796 } | |
| 797 } | |
| 798 notifier.SendNotificationIfAny(); | |
| 799 trace_log_->thread_local_event_buffer_.Set(NULL); | |
| 800 } | |
| 801 | |
| 802 void TraceLog::ThreadLocalEventBuffer::AddEvent(const TraceEvent& event, | |
| 803 NotificationHelper* notifier) { | |
| 804 CheckThread(); | |
| 805 logged_events_.push_back(event); | |
| 806 if (logged_events_.size() >= kTraceEventThreadLocalBufferSize) { | |
| 807 AutoLock lock(trace_log_->lock_); | |
| 808 FlushWhileLocked(notifier); | |
| 809 } | |
| 810 } | |
| 811 | |
| 812 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { | |
| 813 delete this; | |
| 814 } | |
| 815 | |
| 816 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked( | |
| 817 NotificationHelper* notifier) { | |
|
dsinclair
2013/08/14 15:15:59
lock_.AssertAcquired()?
Xianzhu
2013/08/14 21:28:34
Done.
| |
| 818 for (size_t i = 0; i < logged_events_.size(); ++i) { | |
| 819 trace_log_->AddEventToMainBufferWhileLocked(logged_events_[i], | |
| 820 notifier); | |
| 821 } | |
| 822 logged_events_.resize(0); | |
| 823 } | |
| 824 | |
| 744 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log) | 825 TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log) |
| 745 : trace_log_(trace_log), | 826 : trace_log_(trace_log), |
| 746 notification_(0) { | 827 notification_(0) { |
| 747 } | 828 } |
| 748 | 829 |
| 749 TraceLog::NotificationHelper::~NotificationHelper() { | 830 TraceLog::NotificationHelper::~NotificationHelper() { |
| 750 } | 831 } |
| 751 | 832 |
| 752 void TraceLog::NotificationHelper::AddNotificationWhileLocked( | 833 void TraceLog::NotificationHelper::AddNotificationWhileLocked( |
| 753 int notification) { | 834 int notification) { |
| 835 trace_log_->lock_.AssertAcquired(); | |
| 754 if (trace_log_->notification_callback_.is_null()) | 836 if (trace_log_->notification_callback_.is_null()) |
| 755 return; | 837 return; |
| 756 if (notification_ == 0) | 838 if (notification_ == 0) |
| 757 callback_copy_ = trace_log_->notification_callback_; | 839 callback_copy_ = trace_log_->notification_callback_; |
| 758 notification_ |= notification; | 840 notification_ |= notification; |
| 759 } | 841 } |
| 760 | 842 |
| 761 void TraceLog::NotificationHelper::SendNotificationIfAny() { | 843 void TraceLog::NotificationHelper::SendNotificationIfAny() { |
| 762 if (notification_) | 844 if (notification_) |
| 763 callback_copy_.Run(notification_); | 845 callback_copy_.Run(notification_); |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 787 } else { | 869 } else { |
| 788 NOTREACHED(); // Unknown option provided. | 870 NOTREACHED(); // Unknown option provided. |
| 789 } | 871 } |
| 790 } | 872 } |
| 791 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY)) | 873 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY)) |
| 792 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. | 874 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. |
| 793 | 875 |
| 794 return static_cast<Options>(ret); | 876 return static_cast<Options>(ret); |
| 795 } | 877 } |
| 796 | 878 |
| 879 namespace { | |
| 880 | |
| 881 struct EchoToConsoleContext { | |
|
dsinclair
2013/08/14 15:15:59
Can we move this up into the other anonymous names
Xianzhu
2013/08/14 21:28:34
Done.
| |
| 882 hash_map<int, std::stack<TimeTicks> > thread_event_start_times; | |
| 883 hash_map<std::string, int> thread_colors; | |
| 884 }; | |
| 885 | |
| 886 } // namespace | |
| 887 | |
| 797 TraceLog::TraceLog() | 888 TraceLog::TraceLog() |
| 798 : enable_count_(0), | 889 : enable_count_(0), |
| 799 num_traces_recorded_(0), | 890 num_traces_recorded_(0), |
| 800 event_callback_(NULL), | 891 buffer_is_full_(0), |
| 892 event_callback_(0), | |
| 801 dispatching_to_observer_list_(false), | 893 dispatching_to_observer_list_(false), |
| 802 process_sort_index_(0), | 894 process_sort_index_(0), |
| 803 watch_category_(NULL), | 895 echo_to_console_context_(0), |
| 896 process_id_hash_(0), | |
| 897 process_id_(0), | |
| 898 watch_category_(0), | |
| 804 trace_options_(RECORD_UNTIL_FULL), | 899 trace_options_(RECORD_UNTIL_FULL), |
| 805 sampling_thread_handle_(0), | 900 sampling_thread_handle_(0), |
| 806 category_filter_(CategoryFilter::kDefaultCategoryFilterString) { | 901 category_filter_(CategoryFilter::kDefaultCategoryFilterString), |
| 902 flush_message_loop_(NULL) { | |
| 807 // Trace is enabled or disabled on one thread while other threads are | 903 // Trace is enabled or disabled on one thread while other threads are |
| 808 // accessing the enabled flag. We don't care whether edge-case events are | 904 // accessing the enabled flag. We don't care whether edge-case events are |
| 809 // traced or not, so we allow races on the enabled flag to keep the trace | 905 // traced or not, so we allow races on the enabled flag to keep the trace |
| 810 // macros fast. | 906 // macros fast. |
| 811 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: | 907 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: |
| 812 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, | 908 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, |
| 813 // sizeof(g_category_group_enabled), | 909 // sizeof(g_category_group_enabled), |
| 814 // "trace_event category enabled"); | 910 // "trace_event category enabled"); |
| 815 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) { | 911 for (int i = 0; i < MAX_CATEGORY_GROUPS; ++i) { |
| 816 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i], | 912 ANNOTATE_BENIGN_RACE(&g_category_group_enabled[i], |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 832 category_string = "*"; | 928 category_string = "*"; |
| 833 | 929 |
| 834 SetEnabled(CategoryFilter(category_string), ECHO_TO_CONSOLE); | 930 SetEnabled(CategoryFilter(category_string), ECHO_TO_CONSOLE); |
| 835 } | 931 } |
| 836 #endif | 932 #endif |
| 837 | 933 |
| 838 logged_events_.reset(GetTraceBuffer()); | 934 logged_events_.reset(GetTraceBuffer()); |
| 839 } | 935 } |
| 840 | 936 |
| 841 TraceLog::~TraceLog() { | 937 TraceLog::~TraceLog() { |
| 938 delete reinterpret_cast<EchoToConsoleContext*>(echo_to_console_context_); | |
| 842 } | 939 } |
| 843 | 940 |
| 844 const unsigned char* TraceLog::GetCategoryGroupEnabled( | 941 const unsigned char* TraceLog::GetCategoryGroupEnabled( |
| 845 const char* category_group) { | 942 const char* category_group) { |
| 846 TraceLog* tracelog = GetInstance(); | 943 TraceLog* tracelog = GetInstance(); |
| 847 if (!tracelog) { | 944 if (!tracelog) { |
| 848 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); | 945 DCHECK(!g_category_group_enabled[g_category_already_shutdown]); |
| 849 return &g_category_group_enabled[g_category_already_shutdown]; | 946 return &g_category_group_enabled[g_category_already_shutdown]; |
| 850 } | 947 } |
| 851 return tracelog->GetCategoryGroupEnabledInternal(category_group); | 948 return tracelog->GetCategoryGroupEnabledInternal(category_group); |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 954 << "set of options."; | 1051 << "set of options."; |
| 955 } | 1052 } |
| 956 | 1053 |
| 957 category_filter_.Merge(category_filter); | 1054 category_filter_.Merge(category_filter); |
| 958 UpdateCategoryGroupEnabledFlags(); | 1055 UpdateCategoryGroupEnabledFlags(); |
| 959 return; | 1056 return; |
| 960 } | 1057 } |
| 961 | 1058 |
| 962 if (options != trace_options_) { | 1059 if (options != trace_options_) { |
| 963 trace_options_ = options; | 1060 trace_options_ = options; |
| 1061 EnableEchoToConsole(options & ECHO_TO_CONSOLE); | |
| 964 logged_events_.reset(GetTraceBuffer()); | 1062 logged_events_.reset(GetTraceBuffer()); |
| 1063 subtle::NoBarrier_Store(&buffer_is_full_, 0); | |
| 965 } | 1064 } |
| 966 | 1065 |
| 967 if (dispatching_to_observer_list_) { | 1066 if (dispatching_to_observer_list_) { |
| 968 DLOG(ERROR) << | 1067 DLOG(ERROR) << |
| 969 "Cannot manipulate TraceLog::Enabled state from an observer."; | 1068 "Cannot manipulate TraceLog::Enabled state from an observer."; |
| 970 return; | 1069 return; |
| 971 } | 1070 } |
| 972 | 1071 |
| 973 num_traces_recorded_++; | 1072 num_traces_recorded_++; |
| 974 | 1073 |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1032 // Stop the sampling thread. | 1131 // Stop the sampling thread. |
| 1033 sampling_thread_->Stop(); | 1132 sampling_thread_->Stop(); |
| 1034 lock_.Release(); | 1133 lock_.Release(); |
| 1035 PlatformThread::Join(sampling_thread_handle_); | 1134 PlatformThread::Join(sampling_thread_handle_); |
| 1036 lock_.Acquire(); | 1135 lock_.Acquire(); |
| 1037 sampling_thread_handle_ = PlatformThreadHandle(); | 1136 sampling_thread_handle_ = PlatformThreadHandle(); |
| 1038 sampling_thread_.reset(); | 1137 sampling_thread_.reset(); |
| 1039 } | 1138 } |
| 1040 | 1139 |
| 1041 category_filter_.Clear(); | 1140 category_filter_.Clear(); |
| 1042 watch_category_ = NULL; | 1141 subtle::NoBarrier_Store(&watch_category_, 0); |
| 1043 watch_event_name_ = ""; | 1142 watch_event_name_ = ""; |
| 1044 UpdateCategoryGroupEnabledFlags(); | 1143 UpdateCategoryGroupEnabledFlags(); |
| 1045 AddMetadataEvents(); | 1144 AddMetadataEvents(); |
| 1046 | 1145 |
| 1047 dispatching_to_observer_list_ = true; | 1146 dispatching_to_observer_list_ = true; |
| 1048 observer_list = enabled_state_observer_list_; | 1147 observer_list = enabled_state_observer_list_; |
| 1049 } | 1148 } |
| 1050 | 1149 |
| 1051 // Dispatch to observers outside the lock in case the observer triggers a | 1150 // Dispatch to observers outside the lock in case the observer triggers a |
| 1052 // trace event. | 1151 // trace event. |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1098 } | 1197 } |
| 1099 | 1198 |
| 1100 TraceBuffer* TraceLog::GetTraceBuffer() { | 1199 TraceBuffer* TraceLog::GetTraceBuffer() { |
| 1101 if (trace_options_ & RECORD_CONTINUOUSLY) | 1200 if (trace_options_ & RECORD_CONTINUOUSLY) |
| 1102 return new TraceBufferRingBuffer(); | 1201 return new TraceBufferRingBuffer(); |
| 1103 else if (trace_options_ & ECHO_TO_CONSOLE) | 1202 else if (trace_options_ & ECHO_TO_CONSOLE) |
| 1104 return new TraceBufferDiscardsEvents(); | 1203 return new TraceBufferDiscardsEvents(); |
| 1105 return new TraceBufferVector(); | 1204 return new TraceBufferVector(); |
| 1106 } | 1205 } |
| 1107 | 1206 |
| 1207 bool TraceLog::AddEventToMainBufferWhileLocked(const TraceEvent& trace_event, | |
| 1208 NotificationHelper* notifier) { | |
| 1209 // Don't check buffer_is_full_ because we want the remaining thread-local | |
| 1210 // events to be flushed into the main buffer with this method, otherwise | |
| 1211 // we may lose some early events of a thread that generates events sparsely. | |
| 1212 lock_.AssertAcquired(); | |
| 1213 logged_events_->AddEvent(trace_event); | |
| 1214 if (!subtle::NoBarrier_Load(&buffer_is_full_) && logged_events_->IsFull()) { | |
| 1215 subtle::NoBarrier_Store(&buffer_is_full_, | |
| 1216 static_cast<subtle::AtomicWord>(1)); | |
| 1217 notifier->AddNotificationWhileLocked(TRACE_BUFFER_FULL); | |
| 1218 return false; | |
| 1219 } | |
| 1220 return true; | |
| 1221 } | |
| 1222 | |
| 1108 void TraceLog::SetEventCallback(EventCallback cb) { | 1223 void TraceLog::SetEventCallback(EventCallback cb) { |
| 1109 AutoLock lock(lock_); | 1224 subtle::NoBarrier_Store(&event_callback_, |
| 1110 event_callback_ = cb; | 1225 reinterpret_cast<subtle::AtomicWord>(cb)); |
| 1111 }; | 1226 }; |
| 1112 | 1227 |
| 1113 void TraceLog::Flush(const TraceLog::OutputCallback& cb) { | 1228 void TraceLog::Flush(const TraceLog::OutputCallback& cb) { |
| 1114 // Ignore memory allocations from here down. | 1229 // Ignore memory allocations from here down. |
| 1115 INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"), | 1230 INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"), |
| 1116 TRACE_MEMORY_IGNORE); | 1231 TRACE_MEMORY_IGNORE); |
| 1232 { | |
| 1233 AutoLock lock(lock_); | |
| 1234 DCHECK(!flush_message_loop_); | |
| 1235 flush_message_loop_ = MessageLoop::current(); | |
| 1236 DCHECK(flush_message_loop_); | |
| 1237 flush_output_callback_ = cb; | |
| 1238 } | |
| 1239 FlushNextThreadOrFinish(); | |
| 1240 } | |
| 1241 | |
| 1242 void TraceLog::FlushNextThreadOrFinish() { | |
| 1117 scoped_ptr<TraceBuffer> previous_logged_events; | 1243 scoped_ptr<TraceBuffer> previous_logged_events; |
| 1118 { | 1244 { |
| 1119 AutoLock lock(lock_); | 1245 AutoLock lock(lock_); |
| 1246 hash_set<MessageLoop*>::const_iterator next_message_loop = | |
| 1247 thread_message_loops_.begin(); | |
| 1248 if (next_message_loop != thread_message_loops_.end()) { | |
|
dsinclair
2013/08/14 15:15:59
The flush logic is, I think, the biggest confusion
Xianzhu
2013/08/14 21:28:34
Exactly :)
Added comments.
| |
| 1249 (*next_message_loop)->PostTask( | |
| 1250 FROM_HERE, | |
| 1251 Bind(&TraceLog::DestroyThreadLocalEventBuffer, Unretained(this))); | |
| 1252 return; | |
| 1253 } | |
| 1254 | |
| 1120 previous_logged_events.swap(logged_events_); | 1255 previous_logged_events.swap(logged_events_); |
| 1121 logged_events_.reset(GetTraceBuffer()); | 1256 logged_events_.reset(GetTraceBuffer()); |
| 1257 subtle::NoBarrier_Store(&buffer_is_full_, 0); | |
| 1122 } // release lock | 1258 } // release lock |
| 1123 | 1259 |
| 1260 flush_message_loop_ = NULL; | |
| 1261 | |
| 1124 while (previous_logged_events->HasMoreEvents()) { | 1262 while (previous_logged_events->HasMoreEvents()) { |
| 1125 scoped_refptr<RefCountedString> json_events_str_ptr = | 1263 scoped_refptr<RefCountedString> json_events_str_ptr = |
| 1126 new RefCountedString(); | 1264 new RefCountedString(); |
| 1127 | 1265 |
| 1128 for (size_t i = 0; i < kTraceEventBatchSize; ++i) { | 1266 for (size_t i = 0; i < kTraceEventBatchSize; ++i) { |
| 1129 if (i > 0) | 1267 if (i > 0) |
| 1130 *(&(json_events_str_ptr->data())) += ","; | 1268 *(&(json_events_str_ptr->data())) += ","; |
| 1131 | 1269 |
| 1132 previous_logged_events->NextEvent().AppendAsJSON( | 1270 previous_logged_events->NextEvent().AppendAsJSON( |
| 1133 &(json_events_str_ptr->data())); | 1271 &(json_events_str_ptr->data())); |
| 1134 | 1272 |
| 1135 if (!previous_logged_events->HasMoreEvents()) | 1273 if (!previous_logged_events->HasMoreEvents()) |
| 1136 break; | 1274 break; |
| 1137 } | 1275 } |
| 1138 | 1276 |
| 1139 cb.Run(json_events_str_ptr); | 1277 flush_output_callback_.Run(json_events_str_ptr); |
| 1140 } | 1278 } |
| 1141 } | 1279 } |
| 1142 | 1280 |
| 1281 void TraceLog::DestroyThreadLocalEventBuffer() { | |
| 1282 delete thread_local_event_buffer_.Get(); | |
| 1283 thread_local_event_buffer_.Set(NULL); | |
| 1284 } | |
| 1285 | |
| 1143 void TraceLog::AddTraceEvent( | 1286 void TraceLog::AddTraceEvent( |
| 1144 char phase, | 1287 char phase, |
| 1145 const unsigned char* category_group_enabled, | 1288 const unsigned char* category_group_enabled, |
| 1146 const char* name, | 1289 const char* name, |
| 1147 unsigned long long id, | 1290 unsigned long long id, |
| 1148 int num_args, | 1291 int num_args, |
| 1149 const char** arg_names, | 1292 const char** arg_names, |
| 1150 const unsigned char* arg_types, | 1293 const unsigned char* arg_types, |
| 1151 const unsigned long long* arg_values, | 1294 const unsigned long long* arg_values, |
| 1152 scoped_ptr<ConvertableToTraceFormat> convertable_values[], | 1295 scoped_ptr<ConvertableToTraceFormat> convertable_values[], |
| (...skipping 27 matching lines...) Expand all Loading... | |
| 1180 #if defined(OS_ANDROID) | 1323 #if defined(OS_ANDROID) |
| 1181 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id, | 1324 SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id, |
| 1182 num_args, arg_names, arg_types, arg_values, convertable_values, | 1325 num_args, arg_names, arg_types, arg_values, convertable_values, |
| 1183 flags); | 1326 flags); |
| 1184 #endif | 1327 #endif |
| 1185 | 1328 |
| 1186 if (!IsCategoryGroupEnabled(category_group_enabled)) | 1329 if (!IsCategoryGroupEnabled(category_group_enabled)) |
| 1187 return; | 1330 return; |
| 1188 | 1331 |
| 1189 TimeTicks now = timestamp - time_offset_; | 1332 TimeTicks now = timestamp - time_offset_; |
| 1190 EventCallback event_callback_copy; | |
| 1191 | 1333 |
| 1192 NotificationHelper notifier(this); | 1334 NotificationHelper notifier(this); |
| 1193 | 1335 |
| 1194 // Check and update the current thread name only if the event is for the | 1336 ThreadLocalEventBuffer* thread_local_event_buffer = NULL; |
| 1195 // current thread to avoid locks in most cases. | 1337 |
| 1196 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { | 1338 if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { |
| 1339 if (MessageLoop::current()) { | |
|
dsinclair
2013/08/14 15:15:59
Why is this inside the if (thread_id ...) check? I
Xianzhu
2013/08/14 21:28:34
Done.
| |
| 1340 thread_local_event_buffer = thread_local_event_buffer_.Get(); | |
| 1341 if (!thread_local_event_buffer) { | |
| 1342 thread_local_event_buffer = new ThreadLocalEventBuffer(this); | |
| 1343 thread_local_event_buffer_.Set(thread_local_event_buffer); | |
| 1344 } | |
| 1345 } | |
| 1346 | |
| 1347 // Check and update the current thread name only if the event is for the | |
| 1348 // current thread to avoid locks in most cases. | |
| 1197 const char* new_name = ThreadIdNameManager::GetInstance()-> | 1349 const char* new_name = ThreadIdNameManager::GetInstance()-> |
| 1198 GetName(thread_id); | 1350 GetName(thread_id); |
| 1199 // Check if the thread name has been set or changed since the previous | 1351 // Check if the thread name has been set or changed since the previous |
| 1200 // call (if any), but don't bother if the new name is empty. Note this will | 1352 // call (if any), but don't bother if the new name is empty. Note this will |
| 1201 // not detect a thread name change within the same char* buffer address: we | 1353 // not detect a thread name change within the same char* buffer address: we |
| 1202 // favor common case performance over corner case correctness. | 1354 // favor common case performance over corner case correctness. |
| 1203 if (new_name != g_current_thread_name.Get().Get() && | 1355 if (new_name != g_current_thread_name.Get().Get() && |
| 1204 new_name && *new_name) { | 1356 new_name && *new_name) { |
| 1205 g_current_thread_name.Get().Set(new_name); | 1357 g_current_thread_name.Get().Set(new_name); |
| 1206 | |
| 1207 AutoLock lock(lock_); | |
| 1208 hash_map<int, std::string>::iterator existing_name = | 1358 hash_map<int, std::string>::iterator existing_name = |
|
dsinclair
2013/08/14 15:15:59
Why is this safe to do without holding the lock? I
Xianzhu
2013/08/14 21:28:34
This is a bug. Added lock.
| |
| 1209 thread_names_.find(thread_id); | 1359 thread_names_.find(thread_id); |
| 1210 if (existing_name == thread_names_.end()) { | 1360 if (existing_name == thread_names_.end()) { |
| 1211 // This is a new thread id, and a new name. | 1361 // This is a new thread id, and a new name. |
| 1212 thread_names_[thread_id] = new_name; | 1362 thread_names_[thread_id] = new_name; |
| 1213 } else { | 1363 } else { |
| 1214 // This is a thread id that we've seen before, but potentially with a | 1364 // This is a thread id that we've seen before, but potentially with a |
| 1215 // new name. | 1365 // new name. |
| 1216 std::vector<StringPiece> existing_names; | 1366 std::vector<StringPiece> existing_names; |
| 1217 Tokenize(existing_name->second, ",", &existing_names); | 1367 Tokenize(existing_name->second, ",", &existing_names); |
| 1218 bool found = std::find(existing_names.begin(), | 1368 bool found = std::find(existing_names.begin(), |
| 1219 existing_names.end(), | 1369 existing_names.end(), |
| 1220 new_name) != existing_names.end(); | 1370 new_name) != existing_names.end(); |
| 1221 if (!found) { | 1371 if (!found) { |
| 1222 existing_name->second.push_back(','); | 1372 existing_name->second.push_back(','); |
| 1223 existing_name->second.append(new_name); | 1373 existing_name->second.append(new_name); |
| 1224 } | 1374 } |
| 1225 } | 1375 } |
| 1226 } | 1376 } |
| 1227 } | 1377 } |
| 1228 | 1378 |
| 1229 TraceEvent trace_event(thread_id, | 1379 if (!subtle::NoBarrier_Load(&buffer_is_full_)) { |
| 1230 now, phase, category_group_enabled, name, id, | 1380 TraceEvent trace_event(thread_id, |
| 1231 num_args, arg_names, arg_types, arg_values, | 1381 now, phase, category_group_enabled, name, id, |
| 1232 convertable_values, flags); | 1382 num_args, arg_names, arg_types, arg_values, |
| 1383 convertable_values, flags); | |
| 1233 | 1384 |
| 1234 do { | 1385 if (thread_local_event_buffer) { |
| 1235 AutoLock lock(lock_); | 1386 thread_local_event_buffer->AddEvent(trace_event, ¬ifier); |
| 1387 } else { | |
| 1388 AutoLock lock(lock_); | |
| 1389 AddEventToMainBufferWhileLocked(trace_event, ¬ifier); | |
| 1390 } | |
| 1236 | 1391 |
| 1237 event_callback_copy = event_callback_; | 1392 EchoToConsoleContext* echo_to_console_context = |
| 1238 if (logged_events_->IsFull()) | 1393 reinterpret_cast<EchoToConsoleContext*>(subtle::NoBarrier_Load( |
| 1239 break; | 1394 &echo_to_console_context_)); |
| 1395 if (echo_to_console_context) { | |
| 1396 // ECHO_TO_CONSOLE is enabled. | |
| 1397 AutoLock lock(lock_); | |
| 1240 | 1398 |
| 1241 logged_events_->AddEvent(trace_event); | |
| 1242 | |
| 1243 if (trace_options_ & ECHO_TO_CONSOLE) { | |
| 1244 TimeDelta duration; | 1399 TimeDelta duration; |
| 1245 if (phase == TRACE_EVENT_PHASE_END) { | 1400 if (phase == TRACE_EVENT_PHASE_END) { |
| 1246 duration = timestamp - thread_event_start_times_[thread_id].top(); | 1401 duration = timestamp - |
| 1247 thread_event_start_times_[thread_id].pop(); | 1402 echo_to_console_context->thread_event_start_times[thread_id].top(); |
| 1403 echo_to_console_context->thread_event_start_times[thread_id].pop(); | |
| 1248 } | 1404 } |
| 1249 | 1405 |
| 1250 std::string thread_name = thread_names_[thread_id]; | 1406 std::string thread_name = thread_names_[thread_id]; |
| 1251 if (thread_colors_.find(thread_name) == thread_colors_.end()) | 1407 if (echo_to_console_context->thread_colors.find(thread_name) == |
| 1252 thread_colors_[thread_name] = (thread_colors_.size() % 6) + 1; | 1408 echo_to_console_context->thread_colors.end()) |
| 1409 echo_to_console_context->thread_colors[thread_name] = | |
| 1410 (echo_to_console_context->thread_colors.size() % 6) + 1; | |
| 1253 | 1411 |
| 1254 std::ostringstream log; | 1412 std::ostringstream log; |
| 1255 log << base::StringPrintf("%s: \x1b[0;3%dm", | 1413 log << base::StringPrintf( |
| 1256 thread_name.c_str(), | 1414 "%s: \x1b[0;3%dm", |
| 1257 thread_colors_[thread_name]); | 1415 thread_name.c_str(), |
| 1416 echo_to_console_context->thread_colors[thread_name]); | |
| 1258 | 1417 |
| 1259 size_t depth = 0; | 1418 size_t depth = 0; |
| 1260 if (thread_event_start_times_.find(thread_id) != | 1419 if (echo_to_console_context->thread_event_start_times.find(thread_id) != |
| 1261 thread_event_start_times_.end()) | 1420 echo_to_console_context->thread_event_start_times.end()) { |
| 1262 depth = thread_event_start_times_[thread_id].size(); | 1421 depth = echo_to_console_context->thread_event_start_times[thread_id] |
| 1422 .size(); | |
| 1423 } | |
| 1263 | 1424 |
| 1264 for (size_t i = 0; i < depth; ++i) | 1425 for (size_t i = 0; i < depth; ++i) |
| 1265 log << "| "; | 1426 log << "| "; |
| 1266 | 1427 |
| 1267 trace_event.AppendPrettyPrinted(&log); | 1428 trace_event.AppendPrettyPrinted(&log); |
| 1268 if (phase == TRACE_EVENT_PHASE_END) | 1429 if (phase == TRACE_EVENT_PHASE_END) |
| 1269 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); | 1430 log << base::StringPrintf(" (%.3f ms)", duration.InMillisecondsF()); |
| 1270 | 1431 |
| 1271 LOG(ERROR) << log.str() << "\x1b[0;m"; | 1432 LOG(ERROR) << log.str() << "\x1b[0;m"; |
| 1272 | 1433 |
| 1273 if (phase == TRACE_EVENT_PHASE_BEGIN) | 1434 if (phase == TRACE_EVENT_PHASE_BEGIN) { |
| 1274 thread_event_start_times_[thread_id].push(timestamp); | 1435 echo_to_console_context->thread_event_start_times[thread_id].push( |
| 1436 timestamp); | |
| 1437 } | |
| 1275 } | 1438 } |
| 1439 } | |
| 1276 | 1440 |
| 1277 if (logged_events_->IsFull()) | 1441 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load( |
| 1278 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); | 1442 &watch_category_)) == category_group_enabled) { |
| 1279 | 1443 AutoLock lock(lock_); |
| 1280 if (watch_category_ == category_group_enabled && watch_event_name_ == name) | 1444 if (watch_event_name_ == name) |
| 1281 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); | 1445 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); |
| 1282 } while (0); // release lock | 1446 } |
| 1283 | 1447 |
| 1284 notifier.SendNotificationIfAny(); | 1448 notifier.SendNotificationIfAny(); |
| 1285 if (event_callback_copy != NULL) { | 1449 EventCallback event_callback = reinterpret_cast<EventCallback>( |
| 1286 event_callback_copy(phase, category_group_enabled, name, id, | 1450 subtle::NoBarrier_Load(&event_callback_)); |
| 1287 num_args, arg_names, arg_types, arg_values, | 1451 if (event_callback) { |
| 1288 flags); | 1452 event_callback(phase, category_group_enabled, name, id, |
| 1453 num_args, arg_names, arg_types, arg_values, | |
| 1454 flags); | |
| 1289 } | 1455 } |
| 1290 } | 1456 } |
| 1291 | 1457 |
| 1458 void TraceLog::EnableEchoToConsole(bool enable) { | |
| 1459 if (enable) { | |
| 1460 EchoToConsoleContext* new_context = new EchoToConsoleContext(); | |
| 1461 if (subtle::NoBarrier_CompareAndSwap( | |
| 1462 &echo_to_console_context_, 0, | |
| 1463 reinterpret_cast<subtle::AtomicWord>(new_context))) { | |
| 1464 // There is existing context, the new_context is not used. | |
| 1465 delete new_context; | |
| 1466 } | |
| 1467 } else { | |
| 1468 delete reinterpret_cast<EchoToConsoleContext*>( | |
| 1469 subtle::NoBarrier_AtomicExchange(&echo_to_console_context_, 0)); | |
| 1470 } | |
| 1471 } | |
| 1472 | |
| 1292 void TraceLog::AddTraceEventEtw(char phase, | 1473 void TraceLog::AddTraceEventEtw(char phase, |
| 1293 const char* name, | 1474 const char* name, |
| 1294 const void* id, | 1475 const void* id, |
| 1295 const char* extra) { | 1476 const char* extra) { |
| 1296 #if defined(OS_WIN) | 1477 #if defined(OS_WIN) |
| 1297 TraceEventETWProvider::Trace(name, phase, id, extra); | 1478 TraceEventETWProvider::Trace(name, phase, id, extra); |
| 1298 #endif | 1479 #endif |
| 1299 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, | 1480 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, |
| 1300 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); | 1481 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); |
| 1301 } | 1482 } |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 1312 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); | 1493 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); |
| 1313 } | 1494 } |
| 1314 | 1495 |
| 1315 void TraceLog::SetWatchEvent(const std::string& category_name, | 1496 void TraceLog::SetWatchEvent(const std::string& category_name, |
| 1316 const std::string& event_name) { | 1497 const std::string& event_name) { |
| 1317 const unsigned char* category = GetCategoryGroupEnabled( | 1498 const unsigned char* category = GetCategoryGroupEnabled( |
| 1318 category_name.c_str()); | 1499 category_name.c_str()); |
| 1319 size_t notify_count = 0; | 1500 size_t notify_count = 0; |
| 1320 { | 1501 { |
| 1321 AutoLock lock(lock_); | 1502 AutoLock lock(lock_); |
| 1322 watch_category_ = category; | 1503 subtle::NoBarrier_Store(&watch_category_, |
| 1504 reinterpret_cast<subtle::AtomicWord>(category)); | |
| 1323 watch_event_name_ = event_name; | 1505 watch_event_name_ = event_name; |
| 1324 | 1506 |
| 1325 // First, search existing events for watch event because we want to catch | 1507 // First, search existing events for watch event because we want to catch |
| 1326 // it even if it has already occurred. | 1508 // it even if it has already occurred. |
| 1327 notify_count = logged_events_->CountEnabledByName(category, event_name); | 1509 notify_count = logged_events_->CountEnabledByName(category, event_name); |
| 1328 } // release lock | 1510 } // release lock |
| 1329 | 1511 |
| 1330 // Send notification for each event found. | 1512 // Send notification for each event found. |
| 1331 for (size_t i = 0; i < notify_count; ++i) { | 1513 for (size_t i = 0; i < notify_count; ++i) { |
| 1332 NotificationHelper notifier(this); | 1514 NotificationHelper notifier(this); |
| 1333 lock_.Acquire(); | 1515 lock_.Acquire(); |
| 1334 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); | 1516 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); |
| 1335 lock_.Release(); | 1517 lock_.Release(); |
| 1336 notifier.SendNotificationIfAny(); | 1518 notifier.SendNotificationIfAny(); |
| 1337 } | 1519 } |
| 1338 } | 1520 } |
| 1339 | 1521 |
| 1340 void TraceLog::CancelWatchEvent() { | 1522 void TraceLog::CancelWatchEvent() { |
| 1341 AutoLock lock(lock_); | 1523 AutoLock lock(lock_); |
| 1342 watch_category_ = NULL; | 1524 subtle::NoBarrier_Store(&watch_category_, 0); |
| 1343 watch_event_name_ = ""; | 1525 watch_event_name_ = ""; |
| 1344 } | 1526 } |
| 1345 | 1527 |
| 1346 namespace { | 1528 namespace { |
| 1347 | 1529 |
| 1348 template <typename T> | 1530 template <typename T> |
| 1349 void AddMetadataEventToBuffer( | 1531 void AddMetadataEventToBuffer( |
| 1350 TraceBuffer* logged_events, | 1532 TraceBuffer* logged_events, |
| 1351 int thread_id, | 1533 int thread_id, |
| 1352 const char* metadata_name, const char* arg_name, | 1534 const char* metadata_name, const char* arg_name, |
| (...skipping 330 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1683 0, // num_args | 1865 0, // num_args |
| 1684 NULL, // arg_names | 1866 NULL, // arg_names |
| 1685 NULL, // arg_types | 1867 NULL, // arg_types |
| 1686 NULL, // arg_values | 1868 NULL, // arg_values |
| 1687 NULL, // convertable values | 1869 NULL, // convertable values |
| 1688 TRACE_EVENT_FLAG_NONE); // flags | 1870 TRACE_EVENT_FLAG_NONE); // flags |
| 1689 } | 1871 } |
| 1690 } | 1872 } |
| 1691 | 1873 |
| 1692 } // namespace trace_event_internal | 1874 } // namespace trace_event_internal |
| OLD | NEW |