Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/trace_event_impl.h" | 5 #include "base/debug/trace_event_impl.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/bind.h" | 9 #include "base/bind.h" |
| 10 #include "base/debug/leak_annotations.h" | 10 #include "base/debug/leak_annotations.h" |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 60 // Parallel arrays g_categories and g_category_enabled are separate so that | 60 // Parallel arrays g_categories and g_category_enabled are separate so that |
| 61 // a pointer to a member of g_category_enabled can be easily converted to an | 61 // a pointer to a member of g_category_enabled can be easily converted to an |
| 62 // index into g_categories. This allows macros to deal only with char enabled | 62 // index into g_categories. This allows macros to deal only with char enabled |
| 63 // pointers from g_category_enabled, and we can convert internally to determine | 63 // pointers from g_category_enabled, and we can convert internally to determine |
| 64 // the category name from the char enabled pointer. | 64 // the category name from the char enabled pointer. |
| 65 const char* g_categories[TRACE_EVENT_MAX_CATEGORIES] = { | 65 const char* g_categories[TRACE_EVENT_MAX_CATEGORIES] = { |
| 66 "tracing already shutdown", | 66 "tracing already shutdown", |
| 67 "tracing categories exhausted; must increase TRACE_EVENT_MAX_CATEGORIES", | 67 "tracing categories exhausted; must increase TRACE_EVENT_MAX_CATEGORIES", |
| 68 "__metadata", | 68 "__metadata", |
| 69 }; | 69 }; |
| 70 | |
| 70 // The enabled flag is char instead of bool so that the API can be used from C. | 71 // The enabled flag is char instead of bool so that the API can be used from C. |
| 71 unsigned char g_category_enabled[TRACE_EVENT_MAX_CATEGORIES] = { 0 }; | 72 unsigned char g_category_enabled[TRACE_EVENT_MAX_CATEGORIES] = { 0 }; |
| 72 const int g_category_already_shutdown = 0; | 73 const int g_category_already_shutdown = 0; |
| 73 const int g_category_categories_exhausted = 1; | 74 const int g_category_categories_exhausted = 1; |
| 74 const int g_category_metadata = 2; | 75 const int g_category_metadata = 2; |
| 75 int g_category_index = 3; // skip initial 3 categories | 76 int g_category_index = 3; // skip initial 3 categories |
| 76 | 77 |
| 77 // The name of the current thread. This is used to decide if the current | 78 // The name of the current thread. This is used to decide if the current |
| 78 // thread name has changed. We combine all the seen thread names into the | 79 // thread name has changed. We combine all the seen thread names into the |
| 79 // output name for the thread. | 80 // output name for the thread. |
| 80 LazyInstance<ThreadLocalPointer<const char> >::Leaky | 81 LazyInstance<ThreadLocalPointer<const char> >::Leaky |
| 81 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; | 82 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; |
| 82 | 83 |
| 83 const char kRecordUntilFull[] = "record-until-full"; | 84 const char kRecordUntilFull[] = "record-until-full"; |
| 85 const char kRecordContinuously[] = "record-continuously"; | |
| 86 | |
| 87 class TraceBufferRingBuffer : public TraceBuffer { | |
| 88 public: | |
| 89 TraceBufferRingBuffer() | |
| 90 : logged_events_newest_(0), | |
| 91 logged_events_oldest_(0) { | |
| 92 } | |
| 93 | |
| 94 ~TraceBufferRingBuffer() {} | |
| 95 | |
| 96 void AddEvent(const TraceEvent& event) OVERRIDE { | |
| 97 if (logged_events_newest_ < logged_events_.size()) | |
| 98 logged_events_[logged_events_newest_] = event; | |
| 99 else | |
| 100 logged_events_.push_back(event); | |
| 101 | |
| 102 logged_events_newest_++; | |
| 103 if (logged_events_newest_ >= kTraceEventBufferSize) | |
| 104 logged_events_newest_ = 0; | |
| 105 if (logged_events_newest_ == logged_events_oldest_) { | |
| 106 logged_events_oldest_++; | |
| 107 if (logged_events_oldest_ >= kTraceEventBufferSize) { | |
| 108 logged_events_oldest_ = 0; | |
| 109 } | |
| 110 } | |
| 111 } | |
| 112 | |
| 113 bool HasMoreEvents() const OVERRIDE { | |
| 114 return logged_events_oldest_ != logged_events_newest_; | |
| 115 } | |
| 116 | |
| 117 const TraceEvent& NextEvent() OVERRIDE { | |
| 118 DCHECK(HasMoreEvents()); | |
| 119 | |
| 120 int cur = logged_events_oldest_; | |
|
jar (doing other things)
2013/03/13 18:42:30
nit: avoid abreviations such as |cur| which I'm gu
dsinclair
2013/03/13 19:27:27
Done.
| |
| 121 logged_events_oldest_++; | |
| 122 if (logged_events_oldest_ >= kTraceEventBufferSize) | |
| 123 logged_events_oldest_ = 0; | |
| 124 return logged_events_[cur]; | |
| 125 } | |
| 126 | |
| 127 bool IsFull() const OVERRIDE { | |
| 128 return false; | |
| 129 } | |
| 130 | |
| 131 size_t CountEnabledByName(const unsigned char* category, | |
| 132 const std::string& event_name) const OVERRIDE { | |
| 133 size_t notify_count = 0; | |
| 134 size_t idx = logged_events_oldest_; | |
| 135 while (true) { | |
| 136 if (idx == logged_events_newest_) | |
| 137 break; | |
|
jar (doing other things)
2013/03/13 18:42:30
nit: How about replacing last three lines with:
wh
dsinclair
2013/03/13 19:27:27
Done.
| |
| 138 | |
| 139 if (category == logged_events_[idx].category_enabled() && | |
| 140 strcmp(event_name.c_str(), logged_events_[idx].name()) == 0) { | |
| 141 ++notify_count; | |
| 142 } | |
| 143 | |
| 144 idx++; | |
| 145 if (idx >= kTraceEventBufferSize) | |
| 146 idx = 0; | |
| 147 } | |
| 148 return notify_count; | |
| 149 } | |
| 150 | |
| 151 private: | |
| 152 uint32 logged_events_newest_; | |
|
jar (doing other things)
2013/03/13 18:42:30
nit: This is not really the index of the newest, b
dsinclair
2013/03/13 19:27:27
Done.
| |
| 153 uint32 logged_events_oldest_; | |
|
jar (doing other things)
2013/03/13 18:42:30
nit: Since this points to a singular event, better
dsinclair
2013/03/13 19:27:27
Done.
| |
| 154 | |
| 155 DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer); | |
| 156 }; | |
| 157 | |
| 158 class TraceBufferVector : public TraceBuffer { | |
| 159 public: | |
| 160 TraceBufferVector() : current_iteration_index_(0) { | |
| 161 } | |
| 162 | |
| 163 ~TraceBufferVector() { | |
| 164 } | |
| 165 | |
| 166 void AddEvent(const TraceEvent& event) OVERRIDE { | |
| 167 // Note, we don't check IsFull here as the code to add the metadata | |
| 168 // will do an AddEvent, if the buffer is full we'd lose the metadata. | |
|
jar (doing other things)
2013/03/13 18:42:30
nit: "AddEvent, if the" --> "AddEvent. If the"
I
dsinclair
2013/03/13 19:27:27
Clarified the comment.
| |
| 169 logged_events_.push_back(event); | |
| 170 } | |
| 171 | |
| 172 bool HasMoreEvents() const OVERRIDE { | |
| 173 return current_iteration_index_ < Size(); | |
| 174 } | |
| 175 | |
| 176 const TraceEvent& NextEvent() OVERRIDE { | |
| 177 DCHECK(HasMoreEvents()); | |
| 178 return logged_events_[current_iteration_index_++]; | |
| 179 } | |
| 180 | |
| 181 bool IsFull() const OVERRIDE { | |
| 182 return Size() == kTraceEventBufferSize; | |
|
jar (doing other things)
2013/03/13 18:42:30
I'm not clear on the meaning, given that you push
dsinclair
2013/03/13 19:27:27
Done.
| |
| 183 } | |
| 184 | |
| 185 size_t CountEnabledByName(const unsigned char* category, | |
| 186 const std::string& event_name) const OVERRIDE { | |
| 187 size_t notify_count = 0; | |
| 188 for (size_t idx = 0; idx < logged_events_.size(); idx++) { | |
|
jar (doing other things)
2013/03/13 18:42:30
nit: Rather than abbreviate |idx|, use |index|, or
dsinclair
2013/03/13 19:27:27
Done.
| |
| 189 if (category == logged_events_[idx].category_enabled() && | |
| 190 strcmp(event_name.c_str(), logged_events_[idx].name()) == 0) { | |
| 191 ++notify_count; | |
| 192 } | |
| 193 } | |
| 194 return notify_count; | |
| 195 } | |
| 196 | |
| 197 private: | |
| 198 size_t current_iteration_index_; | |
| 199 | |
| 200 DISALLOW_COPY_AND_ASSIGN(TraceBufferVector); | |
| 201 }; | |
| 84 | 202 |
| 85 } // namespace | 203 } // namespace |
| 86 | 204 |
| 87 //////////////////////////////////////////////////////////////////////////////// | 205 //////////////////////////////////////////////////////////////////////////////// |
| 88 // | 206 // |
| 89 // TraceEvent | 207 // TraceEvent |
| 90 // | 208 // |
| 91 //////////////////////////////////////////////////////////////////////////////// | 209 //////////////////////////////////////////////////////////////////////////////// |
| 92 | 210 |
| 93 namespace { | 211 namespace { |
| (...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 231 start_pos += 2; | 349 start_pos += 2; |
| 232 } | 350 } |
| 233 *out += "\""; | 351 *out += "\""; |
| 234 break; | 352 break; |
| 235 default: | 353 default: |
| 236 NOTREACHED() << "Don't know how to print this value"; | 354 NOTREACHED() << "Don't know how to print this value"; |
| 237 break; | 355 break; |
| 238 } | 356 } |
| 239 } | 357 } |
| 240 | 358 |
| 241 void TraceEvent::AppendEventsAsJSON(const std::vector<TraceEvent>& events, | |
| 242 size_t start, | |
| 243 size_t count, | |
| 244 std::string* out) { | |
| 245 for (size_t i = 0; i < count && start + i < events.size(); ++i) { | |
| 246 if (i > 0) | |
| 247 *out += ","; | |
| 248 events[i + start].AppendAsJSON(out); | |
| 249 } | |
| 250 } | |
| 251 | |
| 252 void TraceEvent::AppendAsJSON(std::string* out) const { | 359 void TraceEvent::AppendAsJSON(std::string* out) const { |
| 253 int64 time_int64 = timestamp_.ToInternalValue(); | 360 int64 time_int64 = timestamp_.ToInternalValue(); |
| 254 int process_id = TraceLog::GetInstance()->process_id(); | 361 int process_id = TraceLog::GetInstance()->process_id(); |
| 255 // Category name checked at category creation time. | 362 // Category name checked at category creation time. |
| 256 DCHECK(!strchr(name_, '"')); | 363 DCHECK(!strchr(name_, '"')); |
| 257 StringAppendF(out, | 364 StringAppendF(out, |
| 258 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 "," | 365 "{\"cat\":\"%s\",\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 "," |
| 259 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{", | 366 "\"ph\":\"%c\",\"name\":\"%s\",\"args\":{", |
| 260 TraceLog::GetCategoryName(category_enabled_), | 367 TraceLog::GetCategoryName(category_enabled_), |
| 261 process_id, | 368 process_id, |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 277 | 384 |
| 278 // If id_ is set, print it out as a hex string so we don't loose any | 385 // If id_ is set, print it out as a hex string so we don't loose any |
| 279 // bits (it might be a 64-bit pointer). | 386 // bits (it might be a 64-bit pointer). |
| 280 if (flags_ & TRACE_EVENT_FLAG_HAS_ID) | 387 if (flags_ & TRACE_EVENT_FLAG_HAS_ID) |
| 281 StringAppendF(out, ",\"id\":\"%" PRIx64 "\"", static_cast<uint64>(id_)); | 388 StringAppendF(out, ",\"id\":\"%" PRIx64 "\"", static_cast<uint64>(id_)); |
| 282 *out += "}"; | 389 *out += "}"; |
| 283 } | 390 } |
| 284 | 391 |
| 285 //////////////////////////////////////////////////////////////////////////////// | 392 //////////////////////////////////////////////////////////////////////////////// |
| 286 // | 393 // |
| 394 // TraceBuffer | |
| 395 // | |
| 396 //////////////////////////////////////////////////////////////////////////////// | |
| 397 | |
| 398 TraceBuffer::TraceBuffer() { | |
| 399 logged_events_.reserve(1024); | |
| 400 } | |
| 401 | |
| 402 TraceBuffer::~TraceBuffer() { | |
| 403 } | |
| 404 | |
| 405 //////////////////////////////////////////////////////////////////////////////// | |
| 406 // | |
| 287 // TraceResultBuffer | 407 // TraceResultBuffer |
| 288 // | 408 // |
| 289 //////////////////////////////////////////////////////////////////////////////// | 409 //////////////////////////////////////////////////////////////////////////////// |
| 290 | 410 |
| 291 TraceResultBuffer::OutputCallback | 411 TraceResultBuffer::OutputCallback |
| 292 TraceResultBuffer::SimpleOutput::GetCallback() { | 412 TraceResultBuffer::SimpleOutput::GetCallback() { |
| 293 return Bind(&SimpleOutput::Append, Unretained(this)); | 413 return Bind(&SimpleOutput::Append, Unretained(this)); |
| 294 } | 414 } |
| 295 | 415 |
| 296 void TraceResultBuffer::SimpleOutput::Append( | 416 void TraceResultBuffer::SimpleOutput::Append( |
| (...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 504 // content/browser/devtools/devtools_tracing_handler:TraceOptionsFromString | 624 // content/browser/devtools/devtools_tracing_handler:TraceOptionsFromString |
| 505 TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) { | 625 TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) { |
| 506 std::vector<std::string> split; | 626 std::vector<std::string> split; |
| 507 base::SplitString(options, ',', &split); | 627 base::SplitString(options, ',', &split); |
| 508 int ret = 0; | 628 int ret = 0; |
| 509 for (std::vector<std::string>::iterator iter = split.begin(); | 629 for (std::vector<std::string>::iterator iter = split.begin(); |
| 510 iter != split.end(); | 630 iter != split.end(); |
| 511 ++iter) { | 631 ++iter) { |
| 512 if (*iter == kRecordUntilFull) { | 632 if (*iter == kRecordUntilFull) { |
| 513 ret |= RECORD_UNTIL_FULL; | 633 ret |= RECORD_UNTIL_FULL; |
| 634 } else if (*iter == kRecordContinuously) { | |
| 635 ret |= RECORD_CONTINUOUSLY; | |
| 514 } else { | 636 } else { |
| 515 NOTREACHED(); // Unknown option provided. | 637 NOTREACHED(); // Unknown option provided. |
| 516 } | 638 } |
| 517 } | 639 } |
| 518 // Check to see if any RECORD_* options are set, and if none, then provide | 640 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY)) |
| 519 // a default. | |
| 520 // TODO(dsinclair): Remove this comment when we have more then one RECORD_* | |
| 521 // flag and the code's structure is then sensible. | |
| 522 if (!(ret & RECORD_UNTIL_FULL)) | |
| 523 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. | 641 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. |
| 524 | 642 |
| 525 return static_cast<Options>(ret); | 643 return static_cast<Options>(ret); |
| 526 } | 644 } |
| 527 | 645 |
| 528 TraceLog::TraceLog() | 646 TraceLog::TraceLog() |
| 529 : enable_count_(0), | 647 : enable_count_(0), |
| 648 logged_events_(NULL), | |
| 530 dispatching_to_observer_list_(false), | 649 dispatching_to_observer_list_(false), |
| 531 watch_category_(NULL), | 650 watch_category_(NULL), |
| 532 trace_options_(RECORD_UNTIL_FULL), | 651 trace_options_(RECORD_UNTIL_FULL), |
| 533 sampling_thread_handle_(0) { | 652 sampling_thread_handle_(0) { |
| 534 // Trace is enabled or disabled on one thread while other threads are | 653 // Trace is enabled or disabled on one thread while other threads are |
| 535 // accessing the enabled flag. We don't care whether edge-case events are | 654 // accessing the enabled flag. We don't care whether edge-case events are |
| 536 // traced or not, so we allow races on the enabled flag to keep the trace | 655 // traced or not, so we allow races on the enabled flag to keep the trace |
| 537 // macros fast. | 656 // macros fast. |
| 538 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: | 657 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: |
| 539 // ANNOTATE_BENIGN_RACE_SIZED(g_category_enabled, sizeof(g_category_enabled), | 658 // ANNOTATE_BENIGN_RACE_SIZED(g_category_enabled, sizeof(g_category_enabled), |
| 540 // "trace_event category enabled"); | 659 // "trace_event category enabled"); |
| 541 for (int i = 0; i < TRACE_EVENT_MAX_CATEGORIES; ++i) { | 660 for (int i = 0; i < TRACE_EVENT_MAX_CATEGORIES; ++i) { |
| 542 ANNOTATE_BENIGN_RACE(&g_category_enabled[i], | 661 ANNOTATE_BENIGN_RACE(&g_category_enabled[i], |
| 543 "trace_event category enabled"); | 662 "trace_event category enabled"); |
| 544 } | 663 } |
| 545 #if defined(OS_NACL) // NaCl shouldn't expose the process id. | 664 #if defined(OS_NACL) // NaCl shouldn't expose the process id. |
| 546 SetProcessID(0); | 665 SetProcessID(0); |
| 547 #else | 666 #else |
| 548 SetProcessID(static_cast<int>(GetCurrentProcId())); | 667 SetProcessID(static_cast<int>(GetCurrentProcId())); |
| 549 #endif | 668 #endif |
| 669 | |
| 670 logged_events_.reset(GetTraceBuffer()); | |
| 550 } | 671 } |
| 551 | 672 |
| 552 TraceLog::~TraceLog() { | 673 TraceLog::~TraceLog() { |
| 553 } | 674 } |
| 554 | 675 |
| 555 const unsigned char* TraceLog::GetCategoryEnabled(const char* name) { | 676 const unsigned char* TraceLog::GetCategoryEnabled(const char* name) { |
| 556 TraceLog* tracelog = GetInstance(); | 677 TraceLog* tracelog = GetInstance(); |
| 557 if (!tracelog) { | 678 if (!tracelog) { |
| 558 DCHECK(!g_category_enabled[g_category_already_shutdown]); | 679 DCHECK(!g_category_enabled[g_category_already_shutdown]); |
| 559 return &g_category_enabled[g_category_already_shutdown]; | 680 return &g_category_enabled[g_category_already_shutdown]; |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 675 included_categories.end()); | 796 included_categories.end()); |
| 676 EnableMatchingCategories(included_categories_, CATEGORY_ENABLED, 0); | 797 EnableMatchingCategories(included_categories_, CATEGORY_ENABLED, 0); |
| 677 } else { | 798 } else { |
| 678 // If either old or new included categories are empty, allow all events. | 799 // If either old or new included categories are empty, allow all events. |
| 679 included_categories_.clear(); | 800 included_categories_.clear(); |
| 680 excluded_categories_.clear(); | 801 excluded_categories_.clear(); |
| 681 EnableMatchingCategories(excluded_categories_, 0, CATEGORY_ENABLED); | 802 EnableMatchingCategories(excluded_categories_, 0, CATEGORY_ENABLED); |
| 682 } | 803 } |
| 683 return; | 804 return; |
| 684 } | 805 } |
| 685 trace_options_ = options; | 806 |
| 807 if (options != trace_options_) { | |
| 808 trace_options_ = options; | |
| 809 logged_events_.reset(GetTraceBuffer()); | |
| 810 } | |
| 686 | 811 |
| 687 if (dispatching_to_observer_list_) { | 812 if (dispatching_to_observer_list_) { |
| 688 DLOG(ERROR) << | 813 DLOG(ERROR) << |
| 689 "Cannot manipulate TraceLog::Enabled state from an observer."; | 814 "Cannot manipulate TraceLog::Enabled state from an observer."; |
| 690 return; | 815 return; |
| 691 } | 816 } |
| 692 | 817 |
| 693 dispatching_to_observer_list_ = true; | 818 dispatching_to_observer_list_ = true; |
| 694 FOR_EACH_OBSERVER(EnabledStateChangedObserver, enabled_state_observer_list_, | 819 FOR_EACH_OBSERVER(EnabledStateChangedObserver, enabled_state_observer_list_, |
| 695 OnTraceLogWillEnable()); | 820 OnTraceLogWillEnable()); |
| 696 dispatching_to_observer_list_ = false; | 821 dispatching_to_observer_list_ = false; |
| 697 | 822 |
| 698 logged_events_.reserve(1024); | |
| 699 included_categories_ = included_categories; | 823 included_categories_ = included_categories; |
| 700 excluded_categories_ = excluded_categories; | 824 excluded_categories_ = excluded_categories; |
| 701 // Note that if both included and excluded_categories are empty, the else | 825 // Note that if both included and excluded_categories are empty, the else |
| 702 // clause below excludes nothing, thereby enabling all categories. | 826 // clause below excludes nothing, thereby enabling all categories. |
| 703 if (!included_categories_.empty()) | 827 if (!included_categories_.empty()) |
| 704 EnableMatchingCategories(included_categories_, CATEGORY_ENABLED, 0); | 828 EnableMatchingCategories(included_categories_, CATEGORY_ENABLED, 0); |
| 705 else | 829 else |
| 706 EnableMatchingCategories(excluded_categories_, 0, CATEGORY_ENABLED); | 830 EnableMatchingCategories(excluded_categories_, 0, CATEGORY_ENABLED); |
| 707 | 831 |
| 708 if (options & ENABLE_SAMPLING) { | 832 if (options & ENABLE_SAMPLING) { |
| (...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 804 void TraceLog::AddEnabledStateObserver(EnabledStateChangedObserver* listener) { | 928 void TraceLog::AddEnabledStateObserver(EnabledStateChangedObserver* listener) { |
| 805 enabled_state_observer_list_.AddObserver(listener); | 929 enabled_state_observer_list_.AddObserver(listener); |
| 806 } | 930 } |
| 807 | 931 |
| 808 void TraceLog::RemoveEnabledStateObserver( | 932 void TraceLog::RemoveEnabledStateObserver( |
| 809 EnabledStateChangedObserver* listener) { | 933 EnabledStateChangedObserver* listener) { |
| 810 enabled_state_observer_list_.RemoveObserver(listener); | 934 enabled_state_observer_list_.RemoveObserver(listener); |
| 811 } | 935 } |
| 812 | 936 |
| 813 float TraceLog::GetBufferPercentFull() const { | 937 float TraceLog::GetBufferPercentFull() const { |
| 814 return (float)((double)logged_events_.size()/(double)kTraceEventBufferSize); | 938 return (float)((double)logged_events_->Size()/(double)kTraceEventBufferSize); |
| 815 } | 939 } |
| 816 | 940 |
| 817 void TraceLog::SetNotificationCallback( | 941 void TraceLog::SetNotificationCallback( |
| 818 const TraceLog::NotificationCallback& cb) { | 942 const TraceLog::NotificationCallback& cb) { |
| 819 AutoLock lock(lock_); | 943 AutoLock lock(lock_); |
| 820 notification_callback_ = cb; | 944 notification_callback_ = cb; |
| 821 } | 945 } |
| 822 | 946 |
| 947 TraceBuffer* TraceLog::GetTraceBuffer() { | |
| 948 if (trace_options_ & RECORD_CONTINUOUSLY) | |
| 949 return new TraceBufferRingBuffer(); | |
| 950 return new TraceBufferVector(); | |
| 951 } | |
| 952 | |
| 823 void TraceLog::SetEventCallback(EventCallback cb) { | 953 void TraceLog::SetEventCallback(EventCallback cb) { |
| 824 AutoLock lock(lock_); | 954 AutoLock lock(lock_); |
| 825 event_callback_ = cb; | 955 event_callback_ = cb; |
| 826 }; | 956 }; |
| 827 | 957 |
| 828 void TraceLog::Flush(const TraceLog::OutputCallback& cb) { | 958 void TraceLog::Flush(const TraceLog::OutputCallback& cb) { |
| 829 std::vector<TraceEvent> previous_logged_events; | 959 scoped_ptr<TraceBuffer> previous_logged_events; |
| 830 { | 960 { |
| 831 AutoLock lock(lock_); | 961 AutoLock lock(lock_); |
| 832 previous_logged_events.swap(logged_events_); | 962 previous_logged_events.swap(logged_events_); |
| 963 logged_events_.reset(GetTraceBuffer()); | |
| 833 } // release lock | 964 } // release lock |
| 834 | 965 |
| 835 for (size_t i = 0; | 966 while (true) { |
| 836 i < previous_logged_events.size(); | 967 if (!previous_logged_events->HasMoreEvents()) |
|
jar (doing other things)
2013/03/13 18:42:30
nit: again, why not put test into while()
dsinclair
2013/03/13 19:27:27
Done.
| |
| 837 i += kTraceEventBatchSize) { | 968 break; |
| 969 | |
| 838 scoped_refptr<RefCountedString> json_events_str_ptr = | 970 scoped_refptr<RefCountedString> json_events_str_ptr = |
| 839 new RefCountedString(); | 971 new RefCountedString(); |
| 840 TraceEvent::AppendEventsAsJSON(previous_logged_events, | 972 |
| 841 i, | 973 for (size_t i = 0; i < kTraceEventBatchSize; ++i) { |
| 842 kTraceEventBatchSize, | 974 if (i > 0) |
| 843 &(json_events_str_ptr->data())); | 975 *(&(json_events_str_ptr->data())) += ","; |
| 976 | |
| 977 previous_logged_events->NextEvent().AppendAsJSON( | |
| 978 &(json_events_str_ptr->data())); | |
| 979 | |
| 980 if (!previous_logged_events->HasMoreEvents()) | |
| 981 break; | |
| 982 } | |
| 983 | |
| 844 cb.Run(json_events_str_ptr); | 984 cb.Run(json_events_str_ptr); |
| 845 } | 985 } |
| 846 } | 986 } |
| 847 | 987 |
| 848 void TraceLog::AddTraceEvent(char phase, | 988 void TraceLog::AddTraceEvent(char phase, |
| 849 const unsigned char* category_enabled, | 989 const unsigned char* category_enabled, |
| 850 const char* name, | 990 const char* name, |
| 851 unsigned long long id, | 991 unsigned long long id, |
| 852 int num_args, | 992 int num_args, |
| 853 const char** arg_names, | 993 const char** arg_names, |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 882 | 1022 |
| 883 TimeTicks now = timestamp - time_offset_; | 1023 TimeTicks now = timestamp - time_offset_; |
| 884 EventCallback event_callback_copy; | 1024 EventCallback event_callback_copy; |
| 885 | 1025 |
| 886 NotificationHelper notifier(this); | 1026 NotificationHelper notifier(this); |
| 887 | 1027 |
| 888 { | 1028 { |
| 889 AutoLock lock(lock_); | 1029 AutoLock lock(lock_); |
| 890 if (*category_enabled != CATEGORY_ENABLED) | 1030 if (*category_enabled != CATEGORY_ENABLED) |
| 891 return; | 1031 return; |
| 892 if (logged_events_.size() >= kTraceEventBufferSize) | 1032 if (logged_events_->IsFull()) |
| 893 return; | 1033 return; |
| 894 | 1034 |
| 895 const char* new_name = ThreadIdNameManager::GetInstance()-> | 1035 const char* new_name = ThreadIdNameManager::GetInstance()-> |
| 896 GetName(thread_id); | 1036 GetName(thread_id); |
| 897 // Check if the thread name has been set or changed since the previous | 1037 // Check if the thread name has been set or changed since the previous |
| 898 // call (if any), but don't bother if the new name is empty. Note this will | 1038 // call (if any), but don't bother if the new name is empty. Note this will |
| 899 // not detect a thread name change within the same char* buffer address: we | 1039 // not detect a thread name change within the same char* buffer address: we |
| 900 // favor common case performance over corner case correctness. | 1040 // favor common case performance over corner case correctness. |
| 901 if (new_name != g_current_thread_name.Get().Get() && | 1041 if (new_name != g_current_thread_name.Get().Get() && |
| 902 new_name && *new_name) { | 1042 new_name && *new_name) { |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 918 if (!found) { | 1058 if (!found) { |
| 919 existing_name->second.push_back(','); | 1059 existing_name->second.push_back(','); |
| 920 existing_name->second.append(new_name); | 1060 existing_name->second.append(new_name); |
| 921 } | 1061 } |
| 922 } | 1062 } |
| 923 } | 1063 } |
| 924 | 1064 |
| 925 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) | 1065 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) |
| 926 id ^= process_id_hash_; | 1066 id ^= process_id_hash_; |
| 927 | 1067 |
| 928 logged_events_.push_back( | 1068 logged_events_->AddEvent(TraceEvent(thread_id, |
| 929 TraceEvent(thread_id, | 1069 now, phase, category_enabled, name, id, |
| 930 now, phase, category_enabled, name, id, | 1070 num_args, arg_names, arg_types, arg_values, |
| 931 num_args, arg_names, arg_types, arg_values, | 1071 flags)); |
| 932 flags)); | |
| 933 | 1072 |
| 934 if (logged_events_.size() == kTraceEventBufferSize) | 1073 if (logged_events_->IsFull()) |
| 935 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); | 1074 notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); |
| 936 | 1075 |
| 937 if (watch_category_ == category_enabled && watch_event_name_ == name) | 1076 if (watch_category_ == category_enabled && watch_event_name_ == name) |
| 938 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); | 1077 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); |
| 939 | 1078 |
| 940 event_callback_copy = event_callback_; | 1079 event_callback_copy = event_callback_; |
| 941 } // release lock | 1080 } // release lock |
| 942 | 1081 |
| 943 notifier.SendNotificationIfAny(); | 1082 notifier.SendNotificationIfAny(); |
| 944 if (event_callback_copy != NULL) { | 1083 if (event_callback_copy != NULL) { |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 973 | 1112 |
| 974 void TraceLog::SetWatchEvent(const std::string& category_name, | 1113 void TraceLog::SetWatchEvent(const std::string& category_name, |
| 975 const std::string& event_name) { | 1114 const std::string& event_name) { |
| 976 const unsigned char* category = GetCategoryEnabled(category_name.c_str()); | 1115 const unsigned char* category = GetCategoryEnabled(category_name.c_str()); |
| 977 int notify_count = 0; | 1116 int notify_count = 0; |
| 978 { | 1117 { |
| 979 AutoLock lock(lock_); | 1118 AutoLock lock(lock_); |
| 980 watch_category_ = category; | 1119 watch_category_ = category; |
| 981 watch_event_name_ = event_name; | 1120 watch_event_name_ = event_name; |
| 982 | 1121 |
| 983 // First, search existing events for watch event because we want to catch it | 1122 // First, search existing events for watch event because we want to catch |
| 984 // even if it has already occurred. | 1123 // it even if it has already occurred. |
| 985 for (size_t i = 0u; i < logged_events_.size(); ++i) { | 1124 notify_count = logged_events_->CountEnabledByName(category, event_name); |
| 986 if (category == logged_events_[i].category_enabled() && | |
| 987 strcmp(event_name.c_str(), logged_events_[i].name()) == 0) { | |
| 988 ++notify_count; | |
| 989 } | |
| 990 } | |
| 991 } // release lock | 1125 } // release lock |
| 992 | 1126 |
| 993 // Send notification for each event found. | 1127 // Send notification for each event found. |
| 994 for (int i = 0; i < notify_count; ++i) { | 1128 for (int i = 0; i < notify_count; ++i) { |
| 995 NotificationHelper notifier(this); | 1129 NotificationHelper notifier(this); |
| 996 lock_.Acquire(); | 1130 lock_.Acquire(); |
| 997 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); | 1131 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); |
| 998 lock_.Release(); | 1132 lock_.Release(); |
| 999 notifier.SendNotificationIfAny(); | 1133 notifier.SendNotificationIfAny(); |
| 1000 } | 1134 } |
| 1001 } | 1135 } |
| 1002 | 1136 |
| 1003 void TraceLog::CancelWatchEvent() { | 1137 void TraceLog::CancelWatchEvent() { |
| 1004 AutoLock lock(lock_); | 1138 AutoLock lock(lock_); |
| 1005 watch_category_ = NULL; | 1139 watch_category_ = NULL; |
| 1006 watch_event_name_ = ""; | 1140 watch_event_name_ = ""; |
| 1007 } | 1141 } |
| 1008 | 1142 |
| 1009 void TraceLog::AddThreadNameMetadataEvents() { | 1143 void TraceLog::AddThreadNameMetadataEvents() { |
| 1010 lock_.AssertAcquired(); | 1144 lock_.AssertAcquired(); |
| 1011 for(hash_map<int, std::string>::iterator it = thread_names_.begin(); | 1145 for(hash_map<int, std::string>::iterator it = thread_names_.begin(); |
| 1012 it != thread_names_.end(); | 1146 it != thread_names_.end(); |
| 1013 it++) { | 1147 it++) { |
| 1014 if (!it->second.empty()) { | 1148 if (!it->second.empty()) { |
| 1015 int num_args = 1; | 1149 int num_args = 1; |
| 1016 const char* arg_name = "name"; | 1150 const char* arg_name = "name"; |
| 1017 unsigned char arg_type; | 1151 unsigned char arg_type; |
| 1018 unsigned long long arg_value; | 1152 unsigned long long arg_value; |
| 1019 trace_event_internal::SetTraceValue(it->second, &arg_type, &arg_value); | 1153 trace_event_internal::SetTraceValue(it->second, &arg_type, &arg_value); |
| 1020 logged_events_.push_back( | 1154 logged_events_->AddEvent(TraceEvent(it->first, |
|
nduca
2013/03/13 17:04:40
a nice followup would maybe be to initialize inpla
dsinclair
2013/03/13 19:27:27
Ack.
| |
| 1021 TraceEvent(it->first, | 1155 TimeTicks(), TRACE_EVENT_PHASE_METADATA, |
| 1022 TimeTicks(), TRACE_EVENT_PHASE_METADATA, | 1156 &g_category_enabled[g_category_metadata], |
| 1023 &g_category_enabled[g_category_metadata], | 1157 "thread_name", trace_event_internal::kNoEventId, |
| 1024 "thread_name", trace_event_internal::kNoEventId, | 1158 num_args, &arg_name, &arg_type, &arg_value, |
| 1025 num_args, &arg_name, &arg_type, &arg_value, | 1159 TRACE_EVENT_FLAG_NONE)); |
| 1026 TRACE_EVENT_FLAG_NONE)); | |
| 1027 } | 1160 } |
| 1028 } | 1161 } |
| 1029 } | 1162 } |
| 1030 | 1163 |
| 1031 void TraceLog::InstallWaitableEventForSamplingTesting( | 1164 void TraceLog::InstallWaitableEventForSamplingTesting( |
| 1032 WaitableEvent* waitable_event) { | 1165 WaitableEvent* waitable_event) { |
| 1033 sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event); | 1166 sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event); |
| 1034 } | 1167 } |
| 1035 | 1168 |
| 1036 void TraceLog::DeleteForTesting() { | 1169 void TraceLog::DeleteForTesting() { |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1098 0, // num_args | 1231 0, // num_args |
| 1099 NULL, // arg_names | 1232 NULL, // arg_names |
| 1100 NULL, // arg_types | 1233 NULL, // arg_types |
| 1101 NULL, // arg_values | 1234 NULL, // arg_values |
| 1102 TRACE_EVENT_FLAG_NONE); // flags | 1235 TRACE_EVENT_FLAG_NONE); // flags |
| 1103 } | 1236 } |
| 1104 } | 1237 } |
| 1105 | 1238 |
| 1106 } // namespace trace_event_internal | 1239 } // namespace trace_event_internal |
| 1107 | 1240 |
| OLD | NEW |