Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(79)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 12096115: Update tracing framework to optionally use a ringbuffer. (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: With less friends. Created 7 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « base/debug/trace_event_impl.h ('k') | base/debug/trace_event_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/bind.h" 9 #include "base/bind.h"
10 #include "base/debug/leak_annotations.h" 10 #include "base/debug/leak_annotations.h"
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after
60 // Parallel arrays g_categories and g_category_enabled are separate so that 60 // Parallel arrays g_categories and g_category_enabled are separate so that
61 // a pointer to a member of g_category_enabled can be easily converted to an 61 // a pointer to a member of g_category_enabled can be easily converted to an
62 // index into g_categories. This allows macros to deal only with char enabled 62 // index into g_categories. This allows macros to deal only with char enabled
63 // pointers from g_category_enabled, and we can convert internally to determine 63 // pointers from g_category_enabled, and we can convert internally to determine
64 // the category name from the char enabled pointer. 64 // the category name from the char enabled pointer.
65 const char* g_categories[TRACE_EVENT_MAX_CATEGORIES] = { 65 const char* g_categories[TRACE_EVENT_MAX_CATEGORIES] = {
66 "tracing already shutdown", 66 "tracing already shutdown",
67 "tracing categories exhausted; must increase TRACE_EVENT_MAX_CATEGORIES", 67 "tracing categories exhausted; must increase TRACE_EVENT_MAX_CATEGORIES",
68 "__metadata", 68 "__metadata",
69 }; 69 };
70
70 // The enabled flag is char instead of bool so that the API can be used from C. 71 // The enabled flag is char instead of bool so that the API can be used from C.
71 unsigned char g_category_enabled[TRACE_EVENT_MAX_CATEGORIES] = { 0 }; 72 unsigned char g_category_enabled[TRACE_EVENT_MAX_CATEGORIES] = { 0 };
72 const int g_category_already_shutdown = 0; 73 const int g_category_already_shutdown = 0;
73 const int g_category_categories_exhausted = 1; 74 const int g_category_categories_exhausted = 1;
74 const int g_category_metadata = 2; 75 const int g_category_metadata = 2;
75 int g_category_index = 3; // skip initial 3 categories 76 int g_category_index = 3; // skip initial 3 categories
76 77
77 // The name of the current thread. This is used to decide if the current 78 // The name of the current thread. This is used to decide if the current
78 // thread name has changed. We combine all the seen thread names into the 79 // thread name has changed. We combine all the seen thread names into the
79 // output name for the thread. 80 // output name for the thread.
80 LazyInstance<ThreadLocalPointer<const char> >::Leaky 81 LazyInstance<ThreadLocalPointer<const char> >::Leaky
81 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; 82 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
82 83
83 const char kRecordUntilFull[] = "record-until-full"; 84 const char kRecordUntilFull[] = "record-until-full";
85 const char kRecordContinuously[] = "record-continuously";
84 86
85 } // namespace 87 } // namespace
86 88
89 class TraceBufferRingBuffer : public TraceBuffer {
90 public:
91 TraceBufferRingBuffer()
92 : unused_event_index_(0),
93 oldest_event_index_(0) {
94 logged_events_.reserve(1024);
95 }
96
97 ~TraceBufferRingBuffer() {}
98
99 void AddEvent(const TraceEvent& event) OVERRIDE {
100 if (unused_event_index_ < Size())
101 logged_events_[unused_event_index_] = event;
102 else
103 logged_events_.push_back(event);
104
105 unused_event_index_++;
106 if (unused_event_index_ >= kTraceEventBufferSize)
107 unused_event_index_ = 0;
108 if (unused_event_index_ == oldest_event_index_) {
109 oldest_event_index_++;
110 if (oldest_event_index_ >= kTraceEventBufferSize) {
111 oldest_event_index_ = 0;
112 }
113 }
114 }
115
116 bool HasMoreEvents() const OVERRIDE {
117 return oldest_event_index_ != unused_event_index_;
118 }
119
120 const TraceEvent& NextEvent() OVERRIDE {
121 DCHECK(HasMoreEvents());
122
123 int next = oldest_event_index_;
124 oldest_event_index_++;
125 if (oldest_event_index_ >= kTraceEventBufferSize)
126 oldest_event_index_ = 0;
jar (doing other things) 2013/03/15 19:13:30 nit: This three line snippet could be pulled into
dsinclair 2013/03/15 19:39:55 Done.
127 return GetEventAt(next);
128 }
129
130 bool IsFull() const OVERRIDE {
131 return false;
132 }
133
134 size_t CountEnabledByName(const unsigned char* category,
135 const std::string& event_name) const OVERRIDE {
136 size_t notify_count = 0;
137 size_t index = oldest_event_index_;
138 while (index != unused_event_index_) {
139 const TraceEvent& event = GetEventAt(index);
140 if (category == event.category_enabled() &&
141 strcmp(event_name.c_str(), event.name()) == 0) {
142 ++notify_count;
143 }
144
145 index++;
146 if (index >= kTraceEventBufferSize)
147 index = 0;
148 }
149 return notify_count;
150 }
151
152 const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
153 DCHECK(index < logged_events_.size());
154 return logged_events_[index];
155 }
156
157 size_t Size() const OVERRIDE {
158 return logged_events_.size();
159 }
160
161 private:
162 uint32 unused_event_index_;
163 uint32 oldest_event_index_;
164 std::vector<TraceEvent> logged_events_;
165
166 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer);
167 };
168
169 class TraceBufferVector : public TraceBuffer {
170 public:
171 TraceBufferVector()
172 : current_iteration_index_(0) {
jar (doing other things) 2013/03/15 19:13:30 nit: no need to wrap.. this will fit on previous l
dsinclair 2013/03/15 19:39:55 Done.
173 logged_events_.reserve(1024);
jar (doing other things) 2013/03/15 19:13:30 nit: Please use a named constant here, and in the
dsinclair 2013/03/15 19:39:55 Done. It's a performance optimization as we know t
174 }
175
176 ~TraceBufferVector() {
177 }
178
179 void AddEvent(const TraceEvent& event) OVERRIDE {
180 // Note, this code assumes the caller is doing a !IsFull check before
181 // calling AddEvent. We don't DCHECK(!IsFull()) here as the code to add
182 // the metadata does an AddEvent, if we had the DCHECK we would fail
jar (doing other things) 2013/03/15 19:13:30 nit: this is a run-on sentence. Please change the
dsinclair 2013/03/15 19:39:55 Done. Hopefully with more clarity this time.
183 // to add the metadata to the trace log when the buffer is full.
184 logged_events_.push_back(event);
185 }
186
187 bool HasMoreEvents() const OVERRIDE {
188 return current_iteration_index_ < Size();
189 }
190
191 const TraceEvent& NextEvent() OVERRIDE {
192 DCHECK(HasMoreEvents());
193 return GetEventAt(current_iteration_index_++);
194 }
195
196 bool IsFull() const OVERRIDE {
197 return Size() >= kTraceEventBufferSize;
198 }
199
200 size_t CountEnabledByName(const unsigned char* category,
201 const std::string& event_name) const OVERRIDE {
202 size_t notify_count = 0;
203 for (size_t i = 0; i < Size(); i++) {
204 const TraceEvent& event = GetEventAt(i);
205 if (category == event.category_enabled() &&
206 strcmp(event_name.c_str(), event.name()) == 0) {
207 ++notify_count;
208 }
209 }
210 return notify_count;
211 }
212
213 const TraceEvent& GetEventAt(size_t index) const OVERRIDE {
214 DCHECK(index < logged_events_.size());
215 return logged_events_[index];
216 }
217
218 size_t Size() const OVERRIDE {
219 return logged_events_.size();
220 }
221
222 private:
223 size_t current_iteration_index_;
224 std::vector<TraceEvent> logged_events_;
225
226 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector);
227 };
228
87 //////////////////////////////////////////////////////////////////////////////// 229 ////////////////////////////////////////////////////////////////////////////////
88 // 230 //
89 // TraceEvent 231 // TraceEvent
90 // 232 //
91 //////////////////////////////////////////////////////////////////////////////// 233 ////////////////////////////////////////////////////////////////////////////////
92 234
93 namespace { 235 namespace {
94 236
95 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; } 237 size_t GetAllocLength(const char* str) { return str ? strlen(str) + 1 : 0; }
96 238
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
231 start_pos += 2; 373 start_pos += 2;
232 } 374 }
233 *out += "\""; 375 *out += "\"";
234 break; 376 break;
235 default: 377 default:
236 NOTREACHED() << "Don't know how to print this value"; 378 NOTREACHED() << "Don't know how to print this value";
237 break; 379 break;
238 } 380 }
239 } 381 }
240 382
241 void TraceEvent::AppendEventsAsJSON(const std::vector<TraceEvent>& events,
242 size_t start,
243 size_t count,
244 std::string* out) {
245 for (size_t i = 0; i < count && start + i < events.size(); ++i) {
246 if (i > 0)
247 *out += ",";
248 events[i + start].AppendAsJSON(out);
249 }
250 }
251
252 void TraceEvent::AppendAsJSON(std::string* out) const { 383 void TraceEvent::AppendAsJSON(std::string* out) const {
253 int64 time_int64 = timestamp_.ToInternalValue(); 384 int64 time_int64 = timestamp_.ToInternalValue();
254 int process_id = TraceLog::GetInstance()->process_id(); 385 int process_id = TraceLog::GetInstance()->process_id();
255 // Category name checked at category creation time. 386 // Category name checked at category creation time.
256 DCHECK(!strchr(name_, '"')); 387 DCHECK(!strchr(name_, '"'));
257 StringAppendF(out, 388 StringAppendF(out,
258 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 "," 389 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 ","
259 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{", 390 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{",
260 TraceLog::GetCategoryName(category_enabled_), 391 TraceLog::GetCategoryName(category_enabled_),
261 process_id, 392 process_id,
(...skipping 242 matching lines...) Expand 10 before | Expand all | Expand 10 after
504 // content/browser/devtools/devtools_tracing_handler:TraceOptionsFromString 635 // content/browser/devtools/devtools_tracing_handler:TraceOptionsFromString
505 TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) { 636 TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) {
506 std::vector<std::string> split; 637 std::vector<std::string> split;
507 base::SplitString(options, ',', &split); 638 base::SplitString(options, ',', &split);
508 int ret = 0; 639 int ret = 0;
509 for (std::vector<std::string>::iterator iter = split.begin(); 640 for (std::vector<std::string>::iterator iter = split.begin();
510 iter != split.end(); 641 iter != split.end();
511 ++iter) { 642 ++iter) {
512 if (*iter == kRecordUntilFull) { 643 if (*iter == kRecordUntilFull) {
513 ret |= RECORD_UNTIL_FULL; 644 ret |= RECORD_UNTIL_FULL;
645 } else if (*iter == kRecordContinuously) {
646 ret |= RECORD_CONTINUOUSLY;
514 } else { 647 } else {
515 NOTREACHED(); // Unknown option provided. 648 NOTREACHED(); // Unknown option provided.
516 } 649 }
517 } 650 }
518 // Check to see if any RECORD_* options are set, and if none, then provide 651 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
519 // a default.
520 // TODO(dsinclair): Remove this comment when we have more then one RECORD_*
521 // flag and the code's structure is then sensible.
522 if (!(ret & RECORD_UNTIL_FULL))
523 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. 652 ret |= RECORD_UNTIL_FULL; // Default when no options are specified.
524 653
525 return static_cast<Options>(ret); 654 return static_cast<Options>(ret);
526 } 655 }
527 656
528 TraceLog::TraceLog() 657 TraceLog::TraceLog()
529 : enable_count_(0), 658 : enable_count_(0),
659 logged_events_(NULL),
530 dispatching_to_observer_list_(false), 660 dispatching_to_observer_list_(false),
531 watch_category_(NULL), 661 watch_category_(NULL),
532 trace_options_(RECORD_UNTIL_FULL), 662 trace_options_(RECORD_UNTIL_FULL),
533 sampling_thread_handle_(0) { 663 sampling_thread_handle_(0) {
534 // Trace is enabled or disabled on one thread while other threads are 664 // Trace is enabled or disabled on one thread while other threads are
535 // accessing the enabled flag. We don't care whether edge-case events are 665 // accessing the enabled flag. We don't care whether edge-case events are
536 // traced or not, so we allow races on the enabled flag to keep the trace 666 // traced or not, so we allow races on the enabled flag to keep the trace
537 // macros fast. 667 // macros fast.
538 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: 668 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
539 // ANNOTATE_BENIGN_RACE_SIZED(g_category_enabled, sizeof(g_category_enabled), 669 // ANNOTATE_BENIGN_RACE_SIZED(g_category_enabled, sizeof(g_category_enabled),
540 // "trace_event category enabled"); 670 // "trace_event category enabled");
541 for (int i = 0; i < TRACE_EVENT_MAX_CATEGORIES; ++i) { 671 for (int i = 0; i < TRACE_EVENT_MAX_CATEGORIES; ++i) {
542 ANNOTATE_BENIGN_RACE(&g_category_enabled[i], 672 ANNOTATE_BENIGN_RACE(&g_category_enabled[i],
543 "trace_event category enabled"); 673 "trace_event category enabled");
544 } 674 }
545 #if defined(OS_NACL) // NaCl shouldn't expose the process id. 675 #if defined(OS_NACL) // NaCl shouldn't expose the process id.
546 SetProcessID(0); 676 SetProcessID(0);
547 #else 677 #else
548 SetProcessID(static_cast<int>(GetCurrentProcId())); 678 SetProcessID(static_cast<int>(GetCurrentProcId()));
549 #endif 679 #endif
680
681 logged_events_.reset(GetTraceBuffer());
550 } 682 }
551 683
552 TraceLog::~TraceLog() { 684 TraceLog::~TraceLog() {
553 } 685 }
554 686
555 const unsigned char* TraceLog::GetCategoryEnabled(const char* name) { 687 const unsigned char* TraceLog::GetCategoryEnabled(const char* name) {
556 TraceLog* tracelog = GetInstance(); 688 TraceLog* tracelog = GetInstance();
557 if (!tracelog) { 689 if (!tracelog) {
558 DCHECK(!g_category_enabled[g_category_already_shutdown]); 690 DCHECK(!g_category_enabled[g_category_already_shutdown]);
559 return &g_category_enabled[g_category_already_shutdown]; 691 return &g_category_enabled[g_category_already_shutdown];
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
675 included_categories.end()); 807 included_categories.end());
676 EnableMatchingCategories(included_categories_, CATEGORY_ENABLED, 0); 808 EnableMatchingCategories(included_categories_, CATEGORY_ENABLED, 0);
677 } else { 809 } else {
678 // If either old or new included categories are empty, allow all events. 810 // If either old or new included categories are empty, allow all events.
679 included_categories_.clear(); 811 included_categories_.clear();
680 excluded_categories_.clear(); 812 excluded_categories_.clear();
681 EnableMatchingCategories(excluded_categories_, 0, CATEGORY_ENABLED); 813 EnableMatchingCategories(excluded_categories_, 0, CATEGORY_ENABLED);
682 } 814 }
683 return; 815 return;
684 } 816 }
685 trace_options_ = options; 817
818 if (options != trace_options_) {
819 trace_options_ = options;
820 logged_events_.reset(GetTraceBuffer());
821 }
686 822
687 if (dispatching_to_observer_list_) { 823 if (dispatching_to_observer_list_) {
688 DLOG(ERROR) << 824 DLOG(ERROR) <<
689 "Cannot manipulate TraceLog::Enabled state from an observer."; 825 "Cannot manipulate TraceLog::Enabled state from an observer.";
690 return; 826 return;
691 } 827 }
692 828
693 dispatching_to_observer_list_ = true; 829 dispatching_to_observer_list_ = true;
694 FOR_EACH_OBSERVER(EnabledStateChangedObserver, enabled_state_observer_list_, 830 FOR_EACH_OBSERVER(EnabledStateChangedObserver, enabled_state_observer_list_,
695 OnTraceLogWillEnable()); 831 OnTraceLogWillEnable());
696 dispatching_to_observer_list_ = false; 832 dispatching_to_observer_list_ = false;
697 833
698 logged_events_.reserve(1024);
699 included_categories_ = included_categories; 834 included_categories_ = included_categories;
700 excluded_categories_ = excluded_categories; 835 excluded_categories_ = excluded_categories;
701 // Note that if both included and excluded_categories are empty, the else 836 // Note that if both included and excluded_categories are empty, the else
702 // clause below excludes nothing, thereby enabling all categories. 837 // clause below excludes nothing, thereby enabling all categories.
703 if (!included_categories_.empty()) 838 if (!included_categories_.empty())
704 EnableMatchingCategories(included_categories_, CATEGORY_ENABLED, 0); 839 EnableMatchingCategories(included_categories_, CATEGORY_ENABLED, 0);
705 else 840 else
706 EnableMatchingCategories(excluded_categories_, 0, CATEGORY_ENABLED); 841 EnableMatchingCategories(excluded_categories_, 0, CATEGORY_ENABLED);
707 842
708 if (options & ENABLE_SAMPLING) { 843 if (options & ENABLE_SAMPLING) {
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
804 void TraceLog::AddEnabledStateObserver(EnabledStateChangedObserver* listener) { 939 void TraceLog::AddEnabledStateObserver(EnabledStateChangedObserver* listener) {
805 enabled_state_observer_list_.AddObserver(listener); 940 enabled_state_observer_list_.AddObserver(listener);
806 } 941 }
807 942
808 void TraceLog::RemoveEnabledStateObserver( 943 void TraceLog::RemoveEnabledStateObserver(
809 EnabledStateChangedObserver* listener) { 944 EnabledStateChangedObserver* listener) {
810 enabled_state_observer_list_.RemoveObserver(listener); 945 enabled_state_observer_list_.RemoveObserver(listener);
811 } 946 }
812 947
813 float TraceLog::GetBufferPercentFull() const { 948 float TraceLog::GetBufferPercentFull() const {
814 return (float)((double)logged_events_.size()/(double)kTraceEventBufferSize); 949 return (float)((double)logged_events_->Size()/(double)kTraceEventBufferSize);
815 } 950 }
816 951
817 void TraceLog::SetNotificationCallback( 952 void TraceLog::SetNotificationCallback(
818 const TraceLog::NotificationCallback& cb) { 953 const TraceLog::NotificationCallback& cb) {
819 AutoLock lock(lock_); 954 AutoLock lock(lock_);
820 notification_callback_ = cb; 955 notification_callback_ = cb;
821 } 956 }
822 957
958 TraceBuffer* TraceLog::GetTraceBuffer() {
959 if (trace_options_ & RECORD_CONTINUOUSLY)
960 return new TraceBufferRingBuffer();
961 return new TraceBufferVector();
962 }
963
823 void TraceLog::SetEventCallback(EventCallback cb) { 964 void TraceLog::SetEventCallback(EventCallback cb) {
824 AutoLock lock(lock_); 965 AutoLock lock(lock_);
825 event_callback_ = cb; 966 event_callback_ = cb;
826 }; 967 };
827 968
828 void TraceLog::Flush(const TraceLog::OutputCallback& cb) { 969 void TraceLog::Flush(const TraceLog::OutputCallback& cb) {
829 std::vector<TraceEvent> previous_logged_events; 970 scoped_ptr<TraceBuffer> previous_logged_events;
830 { 971 {
831 AutoLock lock(lock_); 972 AutoLock lock(lock_);
832 previous_logged_events.swap(logged_events_); 973 previous_logged_events.swap(logged_events_);
974 logged_events_.reset(GetTraceBuffer());
833 } // release lock 975 } // release lock
834 976
835 for (size_t i = 0; 977 while (previous_logged_events->HasMoreEvents()) {
836 i < previous_logged_events.size();
837 i += kTraceEventBatchSize) {
838 scoped_refptr<RefCountedString> json_events_str_ptr = 978 scoped_refptr<RefCountedString> json_events_str_ptr =
839 new RefCountedString(); 979 new RefCountedString();
840 TraceEvent::AppendEventsAsJSON(previous_logged_events, 980
841 i, 981 for (size_t i = 0; i < kTraceEventBatchSize; ++i) {
842 kTraceEventBatchSize, 982 if (i > 0)
843 &(json_events_str_ptr->data())); 983 *(&(json_events_str_ptr->data())) += ",";
984
985 previous_logged_events->NextEvent().AppendAsJSON(
986 &(json_events_str_ptr->data()));
987
988 if (!previous_logged_events->HasMoreEvents())
989 break;
990 }
991
844 cb.Run(json_events_str_ptr); 992 cb.Run(json_events_str_ptr);
845 } 993 }
846 } 994 }
847 995
848 void TraceLog::AddTraceEvent(char phase, 996 void TraceLog::AddTraceEvent(char phase,
849 const unsigned char* category_enabled, 997 const unsigned char* category_enabled,
850 const char* name, 998 const char* name,
851 unsigned long long id, 999 unsigned long long id,
852 int num_args, 1000 int num_args,
853 const char** arg_names, 1001 const char** arg_names,
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
885 1033
886 TimeTicks now = timestamp - time_offset_; 1034 TimeTicks now = timestamp - time_offset_;
887 EventCallback event_callback_copy; 1035 EventCallback event_callback_copy;
888 1036
889 NotificationHelper notifier(this); 1037 NotificationHelper notifier(this);
890 1038
891 { 1039 {
892 AutoLock lock(lock_); 1040 AutoLock lock(lock_);
893 if (*category_enabled != CATEGORY_ENABLED) 1041 if (*category_enabled != CATEGORY_ENABLED)
894 return; 1042 return;
895 if (logged_events_.size() >= kTraceEventBufferSize) 1043 if (logged_events_->IsFull())
896 return; 1044 return;
897 1045
898 const char* new_name = ThreadIdNameManager::GetInstance()-> 1046 const char* new_name = ThreadIdNameManager::GetInstance()->
899 GetName(thread_id); 1047 GetName(thread_id);
900 // Check if the thread name has been set or changed since the previous 1048 // Check if the thread name has been set or changed since the previous
901 // call (if any), but don't bother if the new name is empty. Note this will 1049 // call (if any), but don't bother if the new name is empty. Note this will
902 // not detect a thread name change within the same char* buffer address: we 1050 // not detect a thread name change within the same char* buffer address: we
903 // favor common case performance over corner case correctness. 1051 // favor common case performance over corner case correctness.
904 if (new_name != g_current_thread_name.Get().Get() && 1052 if (new_name != g_current_thread_name.Get().Get() &&
905 new_name && *new_name) { 1053 new_name && *new_name) {
(...skipping 12 matching lines...) Expand all
918 bool found = std::find(existing_names.begin(), 1066 bool found = std::find(existing_names.begin(),
919 existing_names.end(), 1067 existing_names.end(),
920 new_name) != existing_names.end(); 1068 new_name) != existing_names.end();
921 if (!found) { 1069 if (!found) {
922 existing_name->second.push_back(','); 1070 existing_name->second.push_back(',');
923 existing_name->second.append(new_name); 1071 existing_name->second.append(new_name);
924 } 1072 }
925 } 1073 }
926 } 1074 }
927 1075
928 logged_events_.push_back( 1076 logged_events_->AddEvent(TraceEvent(thread_id,
929 TraceEvent(thread_id, 1077 now, phase, category_enabled, name, id,
930 now, phase, category_enabled, name, id, 1078 num_args, arg_names, arg_types, arg_values,
931 num_args, arg_names, arg_types, arg_values, 1079 flags));
932 flags));
933 1080
934 if (logged_events_.size() == kTraceEventBufferSize) 1081 if (logged_events_->IsFull())
935 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); 1082 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL);
936 1083
937 if (watch_category_ == category_enabled && watch_event_name_ == name) 1084 if (watch_category_ == category_enabled && watch_event_name_ == name)
938 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1085 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
939 1086
940 event_callback_copy = event_callback_; 1087 event_callback_copy = event_callback_;
941 } // release lock 1088 } // release lock
942 1089
943 notifier.SendNotificationIfAny(); 1090 notifier.SendNotificationIfAny();
944 if (event_callback_copy != NULL) { 1091 if (event_callback_copy != NULL) {
(...skipping 22 matching lines...) Expand all
967 #if defined(OS_WIN) 1114 #if defined(OS_WIN)
968 TraceEventETWProvider::Trace(name, phase, id, extra); 1115 TraceEventETWProvider::Trace(name, phase, id, extra);
969 #endif 1116 #endif
970 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, 1117 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
971 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); 1118 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
972 } 1119 }
973 1120
974 void TraceLog::SetWatchEvent(const std::string& category_name, 1121 void TraceLog::SetWatchEvent(const std::string& category_name,
975 const std::string& event_name) { 1122 const std::string& event_name) {
976 const unsigned char* category = GetCategoryEnabled(category_name.c_str()); 1123 const unsigned char* category = GetCategoryEnabled(category_name.c_str());
977 int notify_count = 0; 1124 size_t notify_count = 0;
978 { 1125 {
979 AutoLock lock(lock_); 1126 AutoLock lock(lock_);
980 watch_category_ = category; 1127 watch_category_ = category;
981 watch_event_name_ = event_name; 1128 watch_event_name_ = event_name;
982 1129
983 // First, search existing events for watch event because we want to catch it 1130 // First, search existing events for watch event because we want to catch
984 // even if it has already occurred. 1131 // it even if it has already occurred.
985 for (size_t i = 0u; i < logged_events_.size(); ++i) { 1132 notify_count = logged_events_->CountEnabledByName(category, event_name);
986 if (category == logged_events_[i].category_enabled() &&
987 strcmp(event_name.c_str(), logged_events_[i].name()) == 0) {
988 ++notify_count;
989 }
990 }
991 } // release lock 1133 } // release lock
992 1134
993 // Send notification for each event found. 1135 // Send notification for each event found.
994 for (int i = 0; i < notify_count; ++i) { 1136 for (size_t i = 0; i < notify_count; ++i) {
995 NotificationHelper notifier(this); 1137 NotificationHelper notifier(this);
996 lock_.Acquire(); 1138 lock_.Acquire();
997 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1139 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
998 lock_.Release(); 1140 lock_.Release();
999 notifier.SendNotificationIfAny(); 1141 notifier.SendNotificationIfAny();
1000 } 1142 }
1001 } 1143 }
1002 1144
1003 void TraceLog::CancelWatchEvent() { 1145 void TraceLog::CancelWatchEvent() {
1004 AutoLock lock(lock_); 1146 AutoLock lock(lock_);
1005 watch_category_ = NULL; 1147 watch_category_ = NULL;
1006 watch_event_name_ = ""; 1148 watch_event_name_ = "";
1007 } 1149 }
1008 1150
1009 void TraceLog::AddThreadNameMetadataEvents() { 1151 void TraceLog::AddThreadNameMetadataEvents() {
1010 lock_.AssertAcquired(); 1152 lock_.AssertAcquired();
1011 for(hash_map<int, std::string>::iterator it = thread_names_.begin(); 1153 for(hash_map<int, std::string>::iterator it = thread_names_.begin();
1012 it != thread_names_.end(); 1154 it != thread_names_.end();
1013 it++) { 1155 it++) {
1014 if (!it->second.empty()) { 1156 if (!it->second.empty()) {
1015 int num_args = 1; 1157 int num_args = 1;
1016 const char* arg_name = "name"; 1158 const char* arg_name = "name";
1017 unsigned char arg_type; 1159 unsigned char arg_type;
1018 unsigned long long arg_value; 1160 unsigned long long arg_value;
1019 trace_event_internal::SetTraceValue(it->second, &arg_type, &arg_value); 1161 trace_event_internal::SetTraceValue(it->second, &arg_type, &arg_value);
1020 logged_events_.push_back( 1162 logged_events_->AddEvent(TraceEvent(it->first,
1021 TraceEvent(it->first, 1163 TimeTicks(), TRACE_EVENT_PHASE_METADATA,
1022 TimeTicks(), TRACE_EVENT_PHASE_METADATA, 1164 &g_category_enabled[g_category_metadata],
1023 &g_category_enabled[g_category_metadata], 1165 "thread_name", trace_event_internal::kNoEventId,
1024 "thread_name", trace_event_internal::kNoEventId, 1166 num_args, &arg_name, &arg_type, &arg_value,
1025 num_args, &arg_name, &arg_type, &arg_value, 1167 TRACE_EVENT_FLAG_NONE));
1026 TRACE_EVENT_FLAG_NONE));
1027 } 1168 }
1028 } 1169 }
1029 } 1170 }
1030 1171
1031 void TraceLog::InstallWaitableEventForSamplingTesting( 1172 void TraceLog::InstallWaitableEventForSamplingTesting(
1032 WaitableEvent* waitable_event) { 1173 WaitableEvent* waitable_event) {
1033 sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event); 1174 sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event);
1034 } 1175 }
1035 1176
1036 void TraceLog::DeleteForTesting() { 1177 void TraceLog::DeleteForTesting() {
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
1098 0, // num_args 1239 0, // num_args
1099 NULL, // arg_names 1240 NULL, // arg_names
1100 NULL, // arg_types 1241 NULL, // arg_types
1101 NULL, // arg_values 1242 NULL, // arg_values
1102 TRACE_EVENT_FLAG_NONE); // flags 1243 TRACE_EVENT_FLAG_NONE); // flags
1103 } 1244 }
1104 } 1245 }
1105 1246
1106 } // namespace trace_event_internal 1247 } // namespace trace_event_internal
1107 1248
OLDNEW
« no previous file with comments | « base/debug/trace_event_impl.h ('k') | base/debug/trace_event_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698