Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(260)

Side by Side Diff: base/trace_event/trace_event_impl.cc

Issue 1180693002: Update from https://crrev.com/333737 (Closed) Base URL: https://github.com/domokit/mojo.git@master
Patch Set: rebased Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/trace_event/trace_event_impl.h ('k') | base/trace_event/trace_event_impl_constants.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/trace_event_impl.h" 5 #include "base/trace_event/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cmath> 8 #include <cmath>
9 9
10 #include "base/base_switches.h" 10 #include "base/base_switches.h"
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
54 54
55 namespace base { 55 namespace base {
56 namespace trace_event { 56 namespace trace_event {
57 57
58 namespace { 58 namespace {
59 59
60 // The overhead of TraceEvent above this threshold will be reported in the 60 // The overhead of TraceEvent above this threshold will be reported in the
61 // trace. 61 // trace.
62 const int kOverheadReportThresholdInMicroseconds = 50; 62 const int kOverheadReportThresholdInMicroseconds = 50;
63 63
64 // String options that can be used to initialize TraceOptions.
65 const char kRecordUntilFull[] = "record-until-full";
66 const char kRecordContinuously[] = "record-continuously";
67 const char kRecordAsMuchAsPossible[] = "record-as-much-as-possible";
68 const char kTraceToConsole[] = "trace-to-console";
69 const char kEnableSampling[] = "enable-sampling";
70 const char kEnableSystrace[] = "enable-systrace";
71
72 // Controls the number of trace events we will buffer in-memory 64 // Controls the number of trace events we will buffer in-memory
73 // before throwing them away. 65 // before throwing them away.
74 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize; 66 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize;
75 const size_t kTraceEventVectorBigBufferChunks = 67 const size_t kTraceEventVectorBigBufferChunks =
76 512000000 / kTraceBufferChunkSize; 68 512000000 / kTraceBufferChunkSize;
77 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; 69 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize;
78 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; 70 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4;
79 const size_t kTraceEventBufferSizeInBytes = 100 * 1024; 71 const size_t kTraceEventBufferSizeInBytes = 100 * 1024;
80 // Can store results for 30 seconds with 1 ms sampling interval. 72 // Can store results for 30 seconds with 1 ms sampling interval.
81 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; 73 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize;
82 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. 74 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events.
83 const size_t kEchoToConsoleTraceEventBufferChunks = 256; 75 const size_t kEchoToConsoleTraceEventBufferChunks = 256;
84 76
85 const int kThreadFlushTimeoutMs = 3000; 77 const int kThreadFlushTimeoutMs = 3000;
86 78
87 #if !defined(OS_NACL) 79 #if !defined(OS_NACL)
88 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575. 80 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575.
89 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task"; 81 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task";
90 #endif 82 #endif
91 83
92 const char kSyntheticDelayCategoryFilterPrefix[] = "DELAY(";
93
94 #define MAX_CATEGORY_GROUPS 100 84 #define MAX_CATEGORY_GROUPS 100
95 85
96 // Parallel arrays g_category_groups and g_category_group_enabled are separate 86 // Parallel arrays g_category_groups and g_category_group_enabled are separate
97 // so that a pointer to a member of g_category_group_enabled can be easily 87 // so that a pointer to a member of g_category_group_enabled can be easily
98 // converted to an index into g_category_groups. This allows macros to deal 88 // converted to an index into g_category_groups. This allows macros to deal
99 // only with char enabled pointers from g_category_group_enabled, and we can 89 // only with char enabled pointers from g_category_group_enabled, and we can
100 // convert internally to determine the category name from the char enabled 90 // convert internally to determine the category name from the char enabled
101 // pointer. 91 // pointer.
102 const char* g_category_groups[MAX_CATEGORY_GROUPS] = { 92 const char* g_category_groups[MAX_CATEGORY_GROUPS] = {
103 "toplevel", 93 "toplevel",
(...skipping 13 matching lines...) Expand all
117 const int g_num_builtin_categories = 5; 107 const int g_num_builtin_categories = 5;
118 // Skip default categories. 108 // Skip default categories.
119 base::subtle::AtomicWord g_category_index = g_num_builtin_categories; 109 base::subtle::AtomicWord g_category_index = g_num_builtin_categories;
120 110
121 // The name of the current thread. This is used to decide if the current 111 // The name of the current thread. This is used to decide if the current
122 // thread name has changed. We combine all the seen thread names into the 112 // thread name has changed. We combine all the seen thread names into the
123 // output name for the thread. 113 // output name for the thread.
124 LazyInstance<ThreadLocalPointer<const char> >::Leaky 114 LazyInstance<ThreadLocalPointer<const char> >::Leaky
125 g_current_thread_name = LAZY_INSTANCE_INITIALIZER; 115 g_current_thread_name = LAZY_INSTANCE_INITIALIZER;
126 116
127 TimeTicks ThreadNow() { 117 ThreadTicks ThreadNow() {
128 return TimeTicks::IsThreadNowSupported() ? 118 return ThreadTicks::IsSupported() ? ThreadTicks::Now() : ThreadTicks();
129 TimeTicks::ThreadNow() : TimeTicks();
130 } 119 }
131 120
132 class TraceBufferRingBuffer : public TraceBuffer { 121 class TraceBufferRingBuffer : public TraceBuffer {
133 public: 122 public:
134 TraceBufferRingBuffer(size_t max_chunks) 123 TraceBufferRingBuffer(size_t max_chunks)
135 : max_chunks_(max_chunks), 124 : max_chunks_(max_chunks),
136 recyclable_chunks_queue_(new size_t[queue_capacity()]), 125 recyclable_chunks_queue_(new size_t[queue_capacity()]),
137 queue_head_(0), 126 queue_head_(0),
138 queue_tail_(max_chunks), 127 queue_tail_(max_chunks),
139 current_iteration_index_(0), 128 current_iteration_index_(0),
(...skipping 236 matching lines...) Expand 10 before | Expand all | Expand 10 after
376 const char* metadata_name, const char* arg_name, 365 const char* metadata_name, const char* arg_name,
377 const T& value) { 366 const T& value) {
378 if (!trace_event) 367 if (!trace_event)
379 return; 368 return;
380 369
381 int num_args = 1; 370 int num_args = 1;
382 unsigned char arg_type; 371 unsigned char arg_type;
383 unsigned long long arg_value; 372 unsigned long long arg_value;
384 ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value); 373 ::trace_event_internal::SetTraceValue(value, &arg_type, &arg_value);
385 trace_event->Initialize(thread_id, 374 trace_event->Initialize(thread_id,
386 TimeTicks(), TimeTicks(), TRACE_EVENT_PHASE_METADATA, 375 TraceTicks(), ThreadTicks(),
376 TRACE_EVENT_PHASE_METADATA,
387 &g_category_group_enabled[g_category_metadata], 377 &g_category_group_enabled[g_category_metadata],
388 metadata_name, ::trace_event_internal::kNoEventId, 378 metadata_name, ::trace_event_internal::kNoEventId,
389 num_args, &arg_name, &arg_type, &arg_value, NULL, 379 num_args, &arg_name, &arg_type, &arg_value, NULL,
390 TRACE_EVENT_FLAG_NONE); 380 TRACE_EVENT_FLAG_NONE);
391 } 381 }
392 382
393 class AutoThreadLocalBoolean { 383 class AutoThreadLocalBoolean {
394 public: 384 public:
395 explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean) 385 explicit AutoThreadLocalBoolean(ThreadLocalBoolean* thread_local_boolean)
396 : thread_local_boolean_(thread_local_boolean) { 386 : thread_local_boolean_(thread_local_boolean) {
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
521 for (int i = 0; i < kTraceMaxNumArgs; ++i) { 511 for (int i = 0; i < kTraceMaxNumArgs; ++i) {
522 arg_names_[i] = other.arg_names_[i]; 512 arg_names_[i] = other.arg_names_[i];
523 arg_types_[i] = other.arg_types_[i]; 513 arg_types_[i] = other.arg_types_[i];
524 arg_values_[i] = other.arg_values_[i]; 514 arg_values_[i] = other.arg_values_[i];
525 convertable_values_[i] = other.convertable_values_[i]; 515 convertable_values_[i] = other.convertable_values_[i];
526 } 516 }
527 } 517 }
528 518
529 void TraceEvent::Initialize( 519 void TraceEvent::Initialize(
530 int thread_id, 520 int thread_id,
531 TimeTicks timestamp, 521 TraceTicks timestamp,
532 TimeTicks thread_timestamp, 522 ThreadTicks thread_timestamp,
533 char phase, 523 char phase,
534 const unsigned char* category_group_enabled, 524 const unsigned char* category_group_enabled,
535 const char* name, 525 const char* name,
536 unsigned long long id, 526 unsigned long long id,
537 int num_args, 527 int num_args,
538 const char** arg_names, 528 const char** arg_names,
539 const unsigned char* arg_types, 529 const unsigned char* arg_types,
540 const unsigned long long* arg_values, 530 const unsigned long long* arg_values,
541 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, 531 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
542 unsigned char flags) { 532 unsigned char flags) {
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
615 605
616 void TraceEvent::Reset() { 606 void TraceEvent::Reset() {
617 // Only reset fields that won't be initialized in Initialize(), or that may 607 // Only reset fields that won't be initialized in Initialize(), or that may
618 // hold references to other objects. 608 // hold references to other objects.
619 duration_ = TimeDelta::FromInternalValue(-1); 609 duration_ = TimeDelta::FromInternalValue(-1);
620 parameter_copy_storage_ = NULL; 610 parameter_copy_storage_ = NULL;
621 for (int i = 0; i < kTraceMaxNumArgs; ++i) 611 for (int i = 0; i < kTraceMaxNumArgs; ++i)
622 convertable_values_[i] = NULL; 612 convertable_values_[i] = NULL;
623 } 613 }
624 614
625 void TraceEvent::UpdateDuration(const TimeTicks& now, 615 void TraceEvent::UpdateDuration(const TraceTicks& now,
626 const TimeTicks& thread_now) { 616 const ThreadTicks& thread_now) {
627 DCHECK_EQ(duration_.ToInternalValue(), -1); 617 DCHECK_EQ(duration_.ToInternalValue(), -1);
628 duration_ = now - timestamp_; 618 duration_ = now - timestamp_;
629 thread_duration_ = thread_now - thread_timestamp_; 619 thread_duration_ = thread_now - thread_timestamp_;
630 } 620 }
631 621
632 // static 622 // static
633 void TraceEvent::AppendValueAsJSON(unsigned char type, 623 void TraceEvent::AppendValueAsJSON(unsigned char type,
634 TraceEvent::TraceValue value, 624 TraceEvent::TraceValue value,
635 std::string* out) { 625 std::string* out) {
636 switch (type) { 626 switch (type) {
(...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after
688 case TRACE_VALUE_TYPE_STRING: 678 case TRACE_VALUE_TYPE_STRING:
689 case TRACE_VALUE_TYPE_COPY_STRING: 679 case TRACE_VALUE_TYPE_COPY_STRING:
690 EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out); 680 EscapeJSONString(value.as_string ? value.as_string : "NULL", true, out);
691 break; 681 break;
692 default: 682 default:
693 NOTREACHED() << "Don't know how to print this value"; 683 NOTREACHED() << "Don't know how to print this value";
694 break; 684 break;
695 } 685 }
696 } 686 }
697 687
698 void TraceEvent::AppendAsJSON(std::string* out) const { 688 void TraceEvent::AppendAsJSON(
689 std::string* out,
690 const ArgumentFilterPredicate& argument_filter_predicate) const {
699 int64 time_int64 = timestamp_.ToInternalValue(); 691 int64 time_int64 = timestamp_.ToInternalValue();
700 int process_id = TraceLog::GetInstance()->process_id(); 692 int process_id = TraceLog::GetInstance()->process_id();
693 const char* category_group_name =
694 TraceLog::GetCategoryGroupName(category_group_enabled_);
695
701 // Category group checked at category creation time. 696 // Category group checked at category creation time.
702 DCHECK(!strchr(name_, '"')); 697 DCHECK(!strchr(name_, '"'));
703 StringAppendF(out, 698 StringAppendF(out, "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64
704 "{\"pid\":%i,\"tid\":%i,\"ts\":%" PRId64 "," 699 ","
705 "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":{", 700 "\"ph\":\"%c\",\"cat\":\"%s\",\"name\":\"%s\",\"args\":{",
706 process_id, 701 process_id, thread_id_, time_int64, phase_, category_group_name,
707 thread_id_, 702 name_);
708 time_int64,
709 phase_,
710 TraceLog::GetCategoryGroupName(category_group_enabled_),
711 name_);
712 703
713 // Output argument names and values, stop at first NULL argument name. 704 // Output argument names and values, stop at first NULL argument name.
714 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) { 705 if (arg_names_[0]) {
715 if (i > 0) 706 bool allow_args = argument_filter_predicate.is_null() ||
716 *out += ","; 707 argument_filter_predicate.Run(category_group_name, name_);
717 *out += "\"";
718 *out += arg_names_[i];
719 *out += "\":";
720 708
721 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE) 709 if (allow_args) {
722 convertable_values_[i]->AppendAsTraceFormat(out); 710 for (int i = 0; i < kTraceMaxNumArgs && arg_names_[i]; ++i) {
723 else 711 if (i > 0)
724 AppendValueAsJSON(arg_types_[i], arg_values_[i], out); 712 *out += ",";
713 *out += "\"";
714 *out += arg_names_[i];
715 *out += "\":";
716
717 if (arg_types_[i] == TRACE_VALUE_TYPE_CONVERTABLE)
718 convertable_values_[i]->AppendAsTraceFormat(out);
719 else
720 AppendValueAsJSON(arg_types_[i], arg_values_[i], out);
721 }
722 } else {
723 *out += "\"stripped\":1";
724 }
725 } 725 }
726
726 *out += "}"; 727 *out += "}";
727 728
728 if (phase_ == TRACE_EVENT_PHASE_COMPLETE) { 729 if (phase_ == TRACE_EVENT_PHASE_COMPLETE) {
729 int64 duration = duration_.ToInternalValue(); 730 int64 duration = duration_.ToInternalValue();
730 if (duration != -1) 731 if (duration != -1)
731 StringAppendF(out, ",\"dur\":%" PRId64, duration); 732 StringAppendF(out, ",\"dur\":%" PRId64, duration);
732 if (!thread_timestamp_.is_null()) { 733 if (!thread_timestamp_.is_null()) {
733 int64 thread_duration = thread_duration_.ToInternalValue(); 734 int64 thread_duration = thread_duration_.ToInternalValue();
734 if (thread_duration != -1) 735 if (thread_duration != -1)
735 StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration); 736 StringAppendF(out, ",\"tdur\":%" PRId64, thread_duration);
(...skipping 235 matching lines...) Expand 10 before | Expand all | Expand 10 after
971 : bucket(bucket), 972 : bucket(bucket),
972 bucket_name(name), 973 bucket_name(name),
973 callback(callback) { 974 callback(callback) {
974 } 975 }
975 976
976 TraceBucketData::~TraceBucketData() { 977 TraceBucketData::~TraceBucketData() {
977 } 978 }
978 979
979 //////////////////////////////////////////////////////////////////////////////// 980 ////////////////////////////////////////////////////////////////////////////////
980 // 981 //
981 // TraceOptions
982 //
983 ////////////////////////////////////////////////////////////////////////////////
984
985 bool TraceOptions::SetFromString(const std::string& options_string) {
986 record_mode = RECORD_UNTIL_FULL;
987 enable_sampling = false;
988 enable_systrace = false;
989
990 std::vector<std::string> split;
991 std::vector<std::string>::iterator iter;
992 base::SplitString(options_string, ',', &split);
993 for (iter = split.begin(); iter != split.end(); ++iter) {
994 if (*iter == kRecordUntilFull) {
995 record_mode = RECORD_UNTIL_FULL;
996 } else if (*iter == kRecordContinuously) {
997 record_mode = RECORD_CONTINUOUSLY;
998 } else if (*iter == kTraceToConsole) {
999 record_mode = ECHO_TO_CONSOLE;
1000 } else if (*iter == kRecordAsMuchAsPossible) {
1001 record_mode = RECORD_AS_MUCH_AS_POSSIBLE;
1002 } else if (*iter == kEnableSampling) {
1003 enable_sampling = true;
1004 } else if (*iter == kEnableSystrace) {
1005 enable_systrace = true;
1006 } else {
1007 return false;
1008 }
1009 }
1010 return true;
1011 }
1012
1013 std::string TraceOptions::ToString() const {
1014 std::string ret;
1015 switch (record_mode) {
1016 case RECORD_UNTIL_FULL:
1017 ret = kRecordUntilFull;
1018 break;
1019 case RECORD_CONTINUOUSLY:
1020 ret = kRecordContinuously;
1021 break;
1022 case ECHO_TO_CONSOLE:
1023 ret = kTraceToConsole;
1024 break;
1025 case RECORD_AS_MUCH_AS_POSSIBLE:
1026 ret = kRecordAsMuchAsPossible;
1027 break;
1028 default:
1029 NOTREACHED();
1030 }
1031 if (enable_sampling)
1032 ret = ret + "," + kEnableSampling;
1033 if (enable_systrace)
1034 ret = ret + "," + kEnableSystrace;
1035 return ret;
1036 }
1037
1038 ////////////////////////////////////////////////////////////////////////////////
1039 //
1040 // TraceLog 982 // TraceLog
1041 // 983 //
1042 //////////////////////////////////////////////////////////////////////////////// 984 ////////////////////////////////////////////////////////////////////////////////
1043 985
1044 class TraceLog::ThreadLocalEventBuffer 986 class TraceLog::ThreadLocalEventBuffer
1045 : public MessageLoop::DestructionObserver { 987 : public MessageLoop::DestructionObserver {
1046 public: 988 public:
1047 ThreadLocalEventBuffer(TraceLog* trace_log); 989 ThreadLocalEventBuffer(TraceLog* trace_log);
1048 ~ThreadLocalEventBuffer() override; 990 ~ThreadLocalEventBuffer() override;
1049 991
1050 TraceEvent* AddTraceEvent(TraceEventHandle* handle); 992 TraceEvent* AddTraceEvent(TraceEventHandle* handle);
1051 993
1052 void ReportOverhead(const TimeTicks& event_timestamp, 994 void ReportOverhead(const TraceTicks& event_timestamp,
1053 const TimeTicks& event_thread_timestamp); 995 const ThreadTicks& event_thread_timestamp);
1054 996
1055 TraceEvent* GetEventByHandle(TraceEventHandle handle) { 997 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
1056 if (!chunk_ || handle.chunk_seq != chunk_->seq() || 998 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
1057 handle.chunk_index != chunk_index_) 999 handle.chunk_index != chunk_index_)
1058 return NULL; 1000 return NULL;
1059 1001
1060 return chunk_->GetEventAt(handle.event_index); 1002 return chunk_->GetEventAt(handle.event_index);
1061 } 1003 }
1062 1004
1063 int generation() const { return generation_; } 1005 int generation() const { return generation_; }
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
1140 1082
1141 size_t event_index; 1083 size_t event_index;
1142 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index); 1084 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
1143 if (trace_event && handle) 1085 if (trace_event && handle)
1144 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle); 1086 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
1145 1087
1146 return trace_event; 1088 return trace_event;
1147 } 1089 }
1148 1090
1149 void TraceLog::ThreadLocalEventBuffer::ReportOverhead( 1091 void TraceLog::ThreadLocalEventBuffer::ReportOverhead(
1150 const TimeTicks& event_timestamp, 1092 const TraceTicks& event_timestamp,
1151 const TimeTicks& event_thread_timestamp) { 1093 const ThreadTicks& event_thread_timestamp) {
1152 if (!g_category_group_enabled[g_category_trace_event_overhead]) 1094 if (!g_category_group_enabled[g_category_trace_event_overhead])
1153 return; 1095 return;
1154 1096
1155 CheckThisIsCurrentBuffer(); 1097 CheckThisIsCurrentBuffer();
1156 1098
1157 event_count_++; 1099 event_count_++;
1158 TimeTicks thread_now = ThreadNow(); 1100 ThreadTicks thread_now = ThreadNow();
1159 TimeTicks now = trace_log_->OffsetNow(); 1101 TraceTicks now = trace_log_->OffsetNow();
1160 TimeDelta overhead = now - event_timestamp; 1102 TimeDelta overhead = now - event_timestamp;
1161 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) { 1103 if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) {
1162 TraceEvent* trace_event = AddTraceEvent(NULL); 1104 TraceEvent* trace_event = AddTraceEvent(NULL);
1163 if (trace_event) { 1105 if (trace_event) {
1164 trace_event->Initialize( 1106 trace_event->Initialize(
1165 static_cast<int>(PlatformThread::CurrentId()), 1107 static_cast<int>(PlatformThread::CurrentId()),
1166 event_timestamp, event_thread_timestamp, 1108 event_timestamp, event_thread_timestamp,
1167 TRACE_EVENT_PHASE_COMPLETE, 1109 TRACE_EVENT_PHASE_COMPLETE,
1168 &g_category_group_enabled[g_category_trace_event_overhead], 1110 &g_category_group_enabled[g_category_trace_event_overhead],
1169 "overhead", 0, 0, NULL, NULL, NULL, NULL, 0); 1111 "overhead", 0, 0, NULL, NULL, NULL, NULL, 0);
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1205 : mode_(DISABLED), 1147 : mode_(DISABLED),
1206 num_traces_recorded_(0), 1148 num_traces_recorded_(0),
1207 event_callback_(0), 1149 event_callback_(0),
1208 dispatching_to_observer_list_(false), 1150 dispatching_to_observer_list_(false),
1209 process_sort_index_(0), 1151 process_sort_index_(0),
1210 process_id_hash_(0), 1152 process_id_hash_(0),
1211 process_id_(0), 1153 process_id_(0),
1212 watch_category_(0), 1154 watch_category_(0),
1213 trace_options_(kInternalRecordUntilFull), 1155 trace_options_(kInternalRecordUntilFull),
1214 sampling_thread_handle_(0), 1156 sampling_thread_handle_(0),
1215 category_filter_(CategoryFilter::kDefaultCategoryFilterString), 1157 trace_config_(TraceConfig()),
1216 event_callback_category_filter_( 1158 event_callback_trace_config_(TraceConfig()),
1217 CategoryFilter::kDefaultCategoryFilterString),
1218 thread_shared_chunk_index_(0), 1159 thread_shared_chunk_index_(0),
1219 generation_(0), 1160 generation_(0),
1220 use_worker_thread_(false) { 1161 use_worker_thread_(false) {
1221 // Trace is enabled or disabled on one thread while other threads are 1162 // Trace is enabled or disabled on one thread while other threads are
1222 // accessing the enabled flag. We don't care whether edge-case events are 1163 // accessing the enabled flag. We don't care whether edge-case events are
1223 // traced or not, so we allow races on the enabled flag to keep the trace 1164 // traced or not, so we allow races on the enabled flag to keep the trace
1224 // macros fast. 1165 // macros fast.
1225 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: 1166 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1226 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, 1167 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1227 // sizeof(g_category_group_enabled), 1168 // sizeof(g_category_group_enabled),
(...skipping 14 matching lines...) Expand all
1242 switches::kTraceToConsole); 1183 switches::kTraceToConsole);
1243 if (filter.empty()) { 1184 if (filter.empty()) {
1244 filter = kEchoToConsoleCategoryFilter; 1185 filter = kEchoToConsoleCategoryFilter;
1245 } else { 1186 } else {
1246 filter.append(","); 1187 filter.append(",");
1247 filter.append(kEchoToConsoleCategoryFilter); 1188 filter.append(kEchoToConsoleCategoryFilter);
1248 } 1189 }
1249 1190
1250 LOG(ERROR) << "Start " << switches::kTraceToConsole 1191 LOG(ERROR) << "Start " << switches::kTraceToConsole
1251 << " with CategoryFilter '" << filter << "'."; 1192 << " with CategoryFilter '" << filter << "'.";
1252 SetEnabled(CategoryFilter(filter), 1193 SetEnabled(TraceConfig(filter, ECHO_TO_CONSOLE), RECORDING_MODE);
1253 RECORDING_MODE,
1254 TraceOptions(ECHO_TO_CONSOLE));
1255 } 1194 }
1256 #endif 1195 #endif
1257 1196
1258 logged_events_.reset(CreateTraceBuffer()); 1197 logged_events_.reset(CreateTraceBuffer());
1259 } 1198 }
1260 1199
1261 TraceLog::~TraceLog() { 1200 TraceLog::~TraceLog() {
1262 } 1201 }
1263 1202
1264 const unsigned char* TraceLog::GetCategoryGroupEnabled( 1203 const unsigned char* TraceLog::GetCategoryGroupEnabled(
(...skipping 19 matching lines...) Expand all
1284 "out of bounds category pointer"; 1223 "out of bounds category pointer";
1285 uintptr_t category_index = 1224 uintptr_t category_index =
1286 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); 1225 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
1287 return g_category_groups[category_index]; 1226 return g_category_groups[category_index];
1288 } 1227 }
1289 1228
1290 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) { 1229 void TraceLog::UpdateCategoryGroupEnabledFlag(size_t category_index) {
1291 unsigned char enabled_flag = 0; 1230 unsigned char enabled_flag = 0;
1292 const char* category_group = g_category_groups[category_index]; 1231 const char* category_group = g_category_groups[category_index];
1293 if (mode_ == RECORDING_MODE && 1232 if (mode_ == RECORDING_MODE &&
1294 category_filter_.IsCategoryGroupEnabled(category_group)) 1233 trace_config_.IsCategoryGroupEnabled(category_group))
1295 enabled_flag |= ENABLED_FOR_RECORDING; 1234 enabled_flag |= ENABLED_FOR_RECORDING;
1296 else if (mode_ == MONITORING_MODE && 1235 else if (mode_ == MONITORING_MODE &&
1297 category_filter_.IsCategoryGroupEnabled(category_group)) 1236 trace_config_.IsCategoryGroupEnabled(category_group))
1298 enabled_flag |= ENABLED_FOR_MONITORING; 1237 enabled_flag |= ENABLED_FOR_MONITORING;
1299 if (event_callback_ && 1238 if (event_callback_ &&
1300 event_callback_category_filter_.IsCategoryGroupEnabled(category_group)) 1239 event_callback_trace_config_.IsCategoryGroupEnabled(category_group))
1301 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK; 1240 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
1302 #if defined(OS_WIN) 1241 #if defined(OS_WIN)
1303 if (base::trace_event::TraceEventETWExport::isETWExportEnabled()) 1242 if (base::trace_event::TraceEventETWExport::isETWExportEnabled())
1304 enabled_flag |= ENABLED_FOR_ETW_EXPORT; 1243 enabled_flag |= ENABLED_FOR_ETW_EXPORT;
1305 #endif 1244 #endif
1306 1245
1307 g_category_group_enabled[category_index] = enabled_flag; 1246 g_category_group_enabled[category_index] = enabled_flag;
1308 } 1247 }
1309 1248
1310 void TraceLog::UpdateCategoryGroupEnabledFlags() { 1249 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1311 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); 1250 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
1312 for (size_t i = 0; i < category_index; i++) 1251 for (size_t i = 0; i < category_index; i++)
1313 UpdateCategoryGroupEnabledFlag(i); 1252 UpdateCategoryGroupEnabledFlag(i);
1314 } 1253 }
1315 1254
1316 void TraceLog::UpdateSyntheticDelaysFromCategoryFilter() { 1255 void TraceLog::UpdateSyntheticDelaysFromTraceConfig() {
1317 ResetTraceEventSyntheticDelays(); 1256 ResetTraceEventSyntheticDelays();
1318 const CategoryFilter::StringList& delays = 1257 const TraceConfig::StringList& delays =
1319 category_filter_.GetSyntheticDelayValues(); 1258 trace_config_.GetSyntheticDelayValues();
1320 CategoryFilter::StringList::const_iterator ci; 1259 TraceConfig::StringList::const_iterator ci;
1321 for (ci = delays.begin(); ci != delays.end(); ++ci) { 1260 for (ci = delays.begin(); ci != delays.end(); ++ci) {
1322 StringTokenizer tokens(*ci, ";"); 1261 StringTokenizer tokens(*ci, ";");
1323 if (!tokens.GetNext()) 1262 if (!tokens.GetNext())
1324 continue; 1263 continue;
1325 TraceEventSyntheticDelay* delay = 1264 TraceEventSyntheticDelay* delay =
1326 TraceEventSyntheticDelay::Lookup(tokens.token()); 1265 TraceEventSyntheticDelay::Lookup(tokens.token());
1327 while (tokens.GetNext()) { 1266 while (tokens.GetNext()) {
1328 std::string token = tokens.token(); 1267 std::string token = tokens.token();
1329 char* duration_end; 1268 char* duration_end;
1330 double target_duration = strtod(token.c_str(), &duration_end); 1269 double target_duration = strtod(token.c_str(), &duration_end);
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
1374 "must increase MAX_CATEGORY_GROUPS"; 1313 "must increase MAX_CATEGORY_GROUPS";
1375 if (category_index < MAX_CATEGORY_GROUPS) { 1314 if (category_index < MAX_CATEGORY_GROUPS) {
1376 // Don't hold on to the category_group pointer, so that we can create 1315 // Don't hold on to the category_group pointer, so that we can create
1377 // category groups with strings not known at compile time (this is 1316 // category groups with strings not known at compile time (this is
1378 // required by SetWatchEvent). 1317 // required by SetWatchEvent).
1379 const char* new_group = strdup(category_group); 1318 const char* new_group = strdup(category_group);
1380 ANNOTATE_LEAKING_OBJECT_PTR(new_group); 1319 ANNOTATE_LEAKING_OBJECT_PTR(new_group);
1381 g_category_groups[category_index] = new_group; 1320 g_category_groups[category_index] = new_group;
1382 DCHECK(!g_category_group_enabled[category_index]); 1321 DCHECK(!g_category_group_enabled[category_index]);
1383 // Note that if both included and excluded patterns in the 1322 // Note that if both included and excluded patterns in the
1384 // CategoryFilter are empty, we exclude nothing, 1323 // TraceConfig are empty, we exclude nothing,
1385 // thereby enabling this category group. 1324 // thereby enabling this category group.
1386 UpdateCategoryGroupEnabledFlag(category_index); 1325 UpdateCategoryGroupEnabledFlag(category_index);
1387 category_group_enabled = &g_category_group_enabled[category_index]; 1326 category_group_enabled = &g_category_group_enabled[category_index];
1388 // Update the max index now. 1327 // Update the max index now.
1389 base::subtle::Release_Store(&g_category_index, category_index + 1); 1328 base::subtle::Release_Store(&g_category_index, category_index + 1);
1390 } else { 1329 } else {
1391 category_group_enabled = 1330 category_group_enabled =
1392 &g_category_group_enabled[g_category_categories_exhausted]; 1331 &g_category_group_enabled[g_category_categories_exhausted];
1393 } 1332 }
1394 return category_group_enabled; 1333 return category_group_enabled;
1395 } 1334 }
1396 1335
1397 void TraceLog::GetKnownCategoryGroups( 1336 void TraceLog::GetKnownCategoryGroups(
1398 std::vector<std::string>* category_groups) { 1337 std::vector<std::string>* category_groups) {
1399 AutoLock lock(lock_); 1338 AutoLock lock(lock_);
1400 category_groups->push_back( 1339 category_groups->push_back(
1401 g_category_groups[g_category_trace_event_overhead]); 1340 g_category_groups[g_category_trace_event_overhead]);
1402 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index); 1341 size_t category_index = base::subtle::NoBarrier_Load(&g_category_index);
1403 for (size_t i = g_num_builtin_categories; i < category_index; i++) 1342 for (size_t i = g_num_builtin_categories; i < category_index; i++)
1404 category_groups->push_back(g_category_groups[i]); 1343 category_groups->push_back(g_category_groups[i]);
1405 } 1344 }
1406 1345
1407 void TraceLog::SetEnabled(const CategoryFilter& category_filter, 1346 void TraceLog::SetEnabled(const TraceConfig& trace_config, Mode mode) {
1408 Mode mode,
1409 const TraceOptions& options) {
1410 std::vector<EnabledStateObserver*> observer_list; 1347 std::vector<EnabledStateObserver*> observer_list;
1411 { 1348 {
1412 AutoLock lock(lock_); 1349 AutoLock lock(lock_);
1413 1350
1414 // Can't enable tracing when Flush() is in progress. 1351 // Can't enable tracing when Flush() is in progress.
1415 DCHECK(!flush_task_runner_); 1352 DCHECK(!flush_task_runner_);
1416 1353
1417 InternalTraceOptions new_options = 1354 InternalTraceOptions new_options =
1418 GetInternalOptionsFromTraceOptions(options); 1355 GetInternalOptionsFromTraceConfig(trace_config);
1419 1356
1420 InternalTraceOptions old_options = trace_options(); 1357 InternalTraceOptions old_options = trace_options();
1421 1358
1422 if (IsEnabled()) { 1359 if (IsEnabled()) {
1423 if (new_options != old_options) { 1360 if (new_options != old_options) {
1424 DLOG(ERROR) << "Attempting to re-enable tracing with a different " 1361 DLOG(ERROR) << "Attempting to re-enable tracing with a different "
1425 << "set of options."; 1362 << "set of options.";
1426 } 1363 }
1427 1364
1428 if (mode != mode_) { 1365 if (mode != mode_) {
1429 DLOG(ERROR) << "Attempting to re-enable tracing with a different mode."; 1366 DLOG(ERROR) << "Attempting to re-enable tracing with a different mode.";
1430 } 1367 }
1431 1368
1432 category_filter_.Merge(category_filter); 1369 trace_config_.Merge(trace_config);
1433 UpdateCategoryGroupEnabledFlags(); 1370 UpdateCategoryGroupEnabledFlags();
1434 return; 1371 return;
1435 } 1372 }
1436 1373
1437 if (dispatching_to_observer_list_) { 1374 if (dispatching_to_observer_list_) {
1438 DLOG(ERROR) << 1375 DLOG(ERROR) <<
1439 "Cannot manipulate TraceLog::Enabled state from an observer."; 1376 "Cannot manipulate TraceLog::Enabled state from an observer.";
1440 return; 1377 return;
1441 } 1378 }
1442 1379
1443 mode_ = mode; 1380 mode_ = mode;
1444 1381
1445 if (new_options != old_options) { 1382 if (new_options != old_options) {
1446 subtle::NoBarrier_Store(&trace_options_, new_options); 1383 subtle::NoBarrier_Store(&trace_options_, new_options);
1447 UseNextTraceBuffer(); 1384 UseNextTraceBuffer();
1448 } 1385 }
1449 1386
1450 num_traces_recorded_++; 1387 num_traces_recorded_++;
1451 1388
1452 category_filter_ = CategoryFilter(category_filter); 1389 trace_config_ = TraceConfig(trace_config);
1453 UpdateCategoryGroupEnabledFlags(); 1390 UpdateCategoryGroupEnabledFlags();
1454 UpdateSyntheticDelaysFromCategoryFilter(); 1391 UpdateSyntheticDelaysFromTraceConfig();
1455 1392
1456 if (new_options & kInternalEnableSampling) { 1393 if (new_options & kInternalEnableSampling) {
1457 sampling_thread_.reset(new TraceSamplingThread); 1394 sampling_thread_.reset(new TraceSamplingThread);
1458 sampling_thread_->RegisterSampleBucket( 1395 sampling_thread_->RegisterSampleBucket(
1459 &g_trace_state[0], 1396 &g_trace_state[0],
1460 "bucket0", 1397 "bucket0",
1461 Bind(&TraceSamplingThread::DefaultSamplingCallback)); 1398 Bind(&TraceSamplingThread::DefaultSamplingCallback));
1462 sampling_thread_->RegisterSampleBucket( 1399 sampling_thread_->RegisterSampleBucket(
1463 &g_trace_state[1], 1400 &g_trace_state[1],
1464 "bucket1", 1401 "bucket1",
(...skipping 14 matching lines...) Expand all
1479 // Notify observers outside the lock in case they trigger trace events. 1416 // Notify observers outside the lock in case they trigger trace events.
1480 for (size_t i = 0; i < observer_list.size(); ++i) 1417 for (size_t i = 0; i < observer_list.size(); ++i)
1481 observer_list[i]->OnTraceLogEnabled(); 1418 observer_list[i]->OnTraceLogEnabled();
1482 1419
1483 { 1420 {
1484 AutoLock lock(lock_); 1421 AutoLock lock(lock_);
1485 dispatching_to_observer_list_ = false; 1422 dispatching_to_observer_list_ = false;
1486 } 1423 }
1487 } 1424 }
1488 1425
1489 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceOptions( 1426 void TraceLog::SetArgumentFilterPredicate(
1490 const TraceOptions& options) { 1427 const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate) {
1428 AutoLock lock(lock_);
1429 DCHECK(!argument_filter_predicate.is_null());
1430 DCHECK(argument_filter_predicate_.is_null());
1431 argument_filter_predicate_ = argument_filter_predicate;
1432 }
1433
1434 TraceLog::InternalTraceOptions TraceLog::GetInternalOptionsFromTraceConfig(
1435 const TraceConfig& config) {
1491 InternalTraceOptions ret = 1436 InternalTraceOptions ret =
1492 options.enable_sampling ? kInternalEnableSampling : kInternalNone; 1437 config.IsSamplingEnabled() ? kInternalEnableSampling : kInternalNone;
1493 switch (options.record_mode) { 1438 if (config.IsArgumentFilterEnabled())
1439 ret |= kInternalEnableArgumentFilter;
1440 switch (config.GetTraceRecordMode()) {
1494 case RECORD_UNTIL_FULL: 1441 case RECORD_UNTIL_FULL:
1495 return ret | kInternalRecordUntilFull; 1442 return ret | kInternalRecordUntilFull;
1496 case RECORD_CONTINUOUSLY: 1443 case RECORD_CONTINUOUSLY:
1497 return ret | kInternalRecordContinuously; 1444 return ret | kInternalRecordContinuously;
1498 case ECHO_TO_CONSOLE: 1445 case ECHO_TO_CONSOLE:
1499 return ret | kInternalEchoToConsole; 1446 return ret | kInternalEchoToConsole;
1500 case RECORD_AS_MUCH_AS_POSSIBLE: 1447 case RECORD_AS_MUCH_AS_POSSIBLE:
1501 return ret | kInternalRecordAsMuchAsPossible; 1448 return ret | kInternalRecordAsMuchAsPossible;
1502 } 1449 }
1503 NOTREACHED(); 1450 NOTREACHED();
1504 return kInternalNone; 1451 return kInternalNone;
1505 } 1452 }
1506 1453
1507 CategoryFilter TraceLog::GetCurrentCategoryFilter() { 1454 TraceConfig TraceLog::GetCurrentTraceConfig() const {
1508 AutoLock lock(lock_); 1455 AutoLock lock(lock_);
1509 return category_filter_; 1456 return trace_config_;
1510 }
1511
1512 TraceOptions TraceLog::GetCurrentTraceOptions() const {
1513 TraceOptions ret;
1514 InternalTraceOptions option = trace_options();
1515 ret.enable_sampling = (option & kInternalEnableSampling) != 0;
1516 if (option & kInternalRecordUntilFull)
1517 ret.record_mode = RECORD_UNTIL_FULL;
1518 else if (option & kInternalRecordContinuously)
1519 ret.record_mode = RECORD_CONTINUOUSLY;
1520 else if (option & kInternalEchoToConsole)
1521 ret.record_mode = ECHO_TO_CONSOLE;
1522 else if (option & kInternalRecordAsMuchAsPossible)
1523 ret.record_mode = RECORD_AS_MUCH_AS_POSSIBLE;
1524 else
1525 NOTREACHED();
1526 return ret;
1527 } 1457 }
1528 1458
1529 void TraceLog::SetDisabled() { 1459 void TraceLog::SetDisabled() {
1530 AutoLock lock(lock_); 1460 AutoLock lock(lock_);
1531 SetDisabledWhileLocked(); 1461 SetDisabledWhileLocked();
1532 } 1462 }
1533 1463
1534 void TraceLog::SetDisabledWhileLocked() { 1464 void TraceLog::SetDisabledWhileLocked() {
1535 lock_.AssertAcquired(); 1465 lock_.AssertAcquired();
1536 1466
(...skipping 11 matching lines...) Expand all
1548 if (sampling_thread_.get()) { 1478 if (sampling_thread_.get()) {
1549 // Stop the sampling thread. 1479 // Stop the sampling thread.
1550 sampling_thread_->Stop(); 1480 sampling_thread_->Stop();
1551 lock_.Release(); 1481 lock_.Release();
1552 PlatformThread::Join(sampling_thread_handle_); 1482 PlatformThread::Join(sampling_thread_handle_);
1553 lock_.Acquire(); 1483 lock_.Acquire();
1554 sampling_thread_handle_ = PlatformThreadHandle(); 1484 sampling_thread_handle_ = PlatformThreadHandle();
1555 sampling_thread_.reset(); 1485 sampling_thread_.reset();
1556 } 1486 }
1557 1487
1558 category_filter_.Clear(); 1488 trace_config_.Clear();
1559 subtle::NoBarrier_Store(&watch_category_, 0); 1489 subtle::NoBarrier_Store(&watch_category_, 0);
1560 watch_event_name_ = ""; 1490 watch_event_name_ = "";
1561 UpdateCategoryGroupEnabledFlags(); 1491 UpdateCategoryGroupEnabledFlags();
1562 AddMetadataEventsWhileLocked(); 1492 AddMetadataEventsWhileLocked();
1563 1493
1564 dispatching_to_observer_list_ = true; 1494 dispatching_to_observer_list_ = true;
1565 std::vector<EnabledStateObserver*> observer_list = 1495 std::vector<EnabledStateObserver*> observer_list =
1566 enabled_state_observer_list_; 1496 enabled_state_observer_list_;
1567 1497
1568 { 1498 {
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
1663 void TraceLog::CheckIfBufferIsFullWhileLocked() { 1593 void TraceLog::CheckIfBufferIsFullWhileLocked() {
1664 lock_.AssertAcquired(); 1594 lock_.AssertAcquired();
1665 if (logged_events_->IsFull()) { 1595 if (logged_events_->IsFull()) {
1666 if (buffer_limit_reached_timestamp_.is_null()) { 1596 if (buffer_limit_reached_timestamp_.is_null()) {
1667 buffer_limit_reached_timestamp_ = OffsetNow(); 1597 buffer_limit_reached_timestamp_ = OffsetNow();
1668 } 1598 }
1669 SetDisabledWhileLocked(); 1599 SetDisabledWhileLocked();
1670 } 1600 }
1671 } 1601 }
1672 1602
1673 void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter, 1603 void TraceLog::SetEventCallbackEnabled(const TraceConfig& trace_config,
1674 EventCallback cb) { 1604 EventCallback cb) {
1675 AutoLock lock(lock_); 1605 AutoLock lock(lock_);
1676 subtle::NoBarrier_Store(&event_callback_, 1606 subtle::NoBarrier_Store(&event_callback_,
1677 reinterpret_cast<subtle::AtomicWord>(cb)); 1607 reinterpret_cast<subtle::AtomicWord>(cb));
1678 event_callback_category_filter_ = category_filter; 1608 event_callback_trace_config_ = trace_config;
1679 UpdateCategoryGroupEnabledFlags(); 1609 UpdateCategoryGroupEnabledFlags();
1680 }; 1610 };
1681 1611
1682 void TraceLog::SetEventCallbackDisabled() { 1612 void TraceLog::SetEventCallbackDisabled() {
1683 AutoLock lock(lock_); 1613 AutoLock lock(lock_);
1684 subtle::NoBarrier_Store(&event_callback_, 0); 1614 subtle::NoBarrier_Store(&event_callback_, 0);
1685 UpdateCategoryGroupEnabledFlags(); 1615 UpdateCategoryGroupEnabledFlags();
1686 } 1616 }
1687 1617
1688 // Flush() works as the following: 1618 // Flush() works as the following:
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1749 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs)); 1679 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs));
1750 return; 1680 return;
1751 } 1681 }
1752 1682
1753 FinishFlush(generation); 1683 FinishFlush(generation);
1754 } 1684 }
1755 1685
1756 // Usually it runs on a different thread. 1686 // Usually it runs on a different thread.
1757 void TraceLog::ConvertTraceEventsToTraceFormat( 1687 void TraceLog::ConvertTraceEventsToTraceFormat(
1758 scoped_ptr<TraceBuffer> logged_events, 1688 scoped_ptr<TraceBuffer> logged_events,
1759 const TraceLog::OutputCallback& flush_output_callback) { 1689 const OutputCallback& flush_output_callback,
1760 1690 const TraceEvent::ArgumentFilterPredicate& argument_filter_predicate) {
1761 if (flush_output_callback.is_null()) 1691 if (flush_output_callback.is_null())
1762 return; 1692 return;
1763 1693
1764 // The callback need to be called at least once even if there is no events 1694 // The callback need to be called at least once even if there is no events
1765 // to let the caller know the completion of flush. 1695 // to let the caller know the completion of flush.
1766 bool has_more_events = true; 1696 bool has_more_events = true;
1767 do { 1697 do {
1768 scoped_refptr<RefCountedString> json_events_str_ptr = 1698 scoped_refptr<RefCountedString> json_events_str_ptr =
1769 new RefCountedString(); 1699 new RefCountedString();
1770 1700
1771 while (json_events_str_ptr->size() < kTraceEventBufferSizeInBytes) { 1701 while (json_events_str_ptr->size() < kTraceEventBufferSizeInBytes) {
1772 const TraceBufferChunk* chunk = logged_events->NextChunk(); 1702 const TraceBufferChunk* chunk = logged_events->NextChunk();
1773 has_more_events = chunk != NULL; 1703 has_more_events = chunk != NULL;
1774 if (!chunk) 1704 if (!chunk)
1775 break; 1705 break;
1776 for (size_t j = 0; j < chunk->size(); ++j) { 1706 for (size_t j = 0; j < chunk->size(); ++j) {
1777 if (json_events_str_ptr->size()) 1707 if (json_events_str_ptr->size())
1778 json_events_str_ptr->data().append(",\n"); 1708 json_events_str_ptr->data().append(",\n");
1779 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data())); 1709 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data()),
1710 argument_filter_predicate);
1780 } 1711 }
1781 } 1712 }
1782 flush_output_callback.Run(json_events_str_ptr, has_more_events); 1713 flush_output_callback.Run(json_events_str_ptr, has_more_events);
1783 } while (has_more_events); 1714 } while (has_more_events);
1784 } 1715 }
1785 1716
1786 void TraceLog::FinishFlush(int generation) { 1717 void TraceLog::FinishFlush(int generation) {
1787 scoped_ptr<TraceBuffer> previous_logged_events; 1718 scoped_ptr<TraceBuffer> previous_logged_events;
1788 OutputCallback flush_output_callback; 1719 OutputCallback flush_output_callback;
1720 TraceEvent::ArgumentFilterPredicate argument_filter_predicate;
1789 1721
1790 if (!CheckGeneration(generation)) 1722 if (!CheckGeneration(generation))
1791 return; 1723 return;
1792 1724
1793 { 1725 {
1794 AutoLock lock(lock_); 1726 AutoLock lock(lock_);
1795 1727
1796 previous_logged_events.swap(logged_events_); 1728 previous_logged_events.swap(logged_events_);
1797 UseNextTraceBuffer(); 1729 UseNextTraceBuffer();
1798 thread_message_loops_.clear(); 1730 thread_message_loops_.clear();
1799 1731
1800 flush_task_runner_ = NULL; 1732 flush_task_runner_ = NULL;
1801 flush_output_callback = flush_output_callback_; 1733 flush_output_callback = flush_output_callback_;
1802 flush_output_callback_.Reset(); 1734 flush_output_callback_.Reset();
1735
1736 if (trace_options() & kInternalEnableArgumentFilter) {
1737 CHECK(!argument_filter_predicate_.is_null());
1738 argument_filter_predicate = argument_filter_predicate_;
1739 }
1803 } 1740 }
1804 1741
1805 if (use_worker_thread_ && 1742 if (use_worker_thread_ &&
1806 WorkerPool::PostTask( 1743 WorkerPool::PostTask(
1807 FROM_HERE, 1744 FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat,
1808 Bind(&TraceLog::ConvertTraceEventsToTraceFormat, 1745 Passed(&previous_logged_events),
1809 Passed(&previous_logged_events), 1746 flush_output_callback, argument_filter_predicate),
1810 flush_output_callback),
1811 true)) { 1747 true)) {
1812 return; 1748 return;
1813 } 1749 }
1814 1750
1815 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), 1751 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1816 flush_output_callback); 1752 flush_output_callback,
1753 argument_filter_predicate);
1817 } 1754 }
1818 1755
1819 // Run in each thread holding a local event buffer. 1756 // Run in each thread holding a local event buffer.
1820 void TraceLog::FlushCurrentThread(int generation) { 1757 void TraceLog::FlushCurrentThread(int generation) {
1821 { 1758 {
1822 AutoLock lock(lock_); 1759 AutoLock lock(lock_);
1823 if (!CheckGeneration(generation) || !flush_task_runner_) { 1760 if (!CheckGeneration(generation) || !flush_task_runner_) {
1824 // This is late. The corresponding flush has finished. 1761 // This is late. The corresponding flush has finished.
1825 return; 1762 return;
1826 } 1763 }
(...skipping 29 matching lines...) Expand all
1856 it != thread_message_loops_.end(); ++it) { 1793 it != thread_message_loops_.end(); ++it) {
1857 LOG(WARNING) << "Thread: " << (*it)->thread_name(); 1794 LOG(WARNING) << "Thread: " << (*it)->thread_name();
1858 } 1795 }
1859 } 1796 }
1860 FinishFlush(generation); 1797 FinishFlush(generation);
1861 } 1798 }
1862 1799
1863 void TraceLog::FlushButLeaveBufferIntact( 1800 void TraceLog::FlushButLeaveBufferIntact(
1864 const TraceLog::OutputCallback& flush_output_callback) { 1801 const TraceLog::OutputCallback& flush_output_callback) {
1865 scoped_ptr<TraceBuffer> previous_logged_events; 1802 scoped_ptr<TraceBuffer> previous_logged_events;
1803 TraceEvent::ArgumentFilterPredicate argument_filter_predicate;
1866 { 1804 {
1867 AutoLock lock(lock_); 1805 AutoLock lock(lock_);
1868 AddMetadataEventsWhileLocked(); 1806 AddMetadataEventsWhileLocked();
1869 if (thread_shared_chunk_) { 1807 if (thread_shared_chunk_) {
1870 // Return the chunk to the main buffer to flush the sampling data. 1808 // Return the chunk to the main buffer to flush the sampling data.
1871 logged_events_->ReturnChunk(thread_shared_chunk_index_, 1809 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1872 thread_shared_chunk_.Pass()); 1810 thread_shared_chunk_.Pass());
1873 } 1811 }
1874 previous_logged_events = logged_events_->CloneForIteration().Pass(); 1812 previous_logged_events = logged_events_->CloneForIteration().Pass();
1813
1814 if (trace_options() & kInternalEnableArgumentFilter) {
1815 CHECK(!argument_filter_predicate_.is_null());
1816 argument_filter_predicate = argument_filter_predicate_;
1817 }
1875 } // release lock 1818 } // release lock
1876 1819
1877 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), 1820 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1878 flush_output_callback); 1821 flush_output_callback,
1822 argument_filter_predicate);
1879 } 1823 }
1880 1824
1881 void TraceLog::UseNextTraceBuffer() { 1825 void TraceLog::UseNextTraceBuffer() {
1882 logged_events_.reset(CreateTraceBuffer()); 1826 logged_events_.reset(CreateTraceBuffer());
1883 subtle::NoBarrier_AtomicIncrement(&generation_, 1); 1827 subtle::NoBarrier_AtomicIncrement(&generation_, 1);
1884 thread_shared_chunk_.reset(); 1828 thread_shared_chunk_.reset();
1885 thread_shared_chunk_index_ = 0; 1829 thread_shared_chunk_index_ = 0;
1886 } 1830 }
1887 1831
1888 TraceEventHandle TraceLog::AddTraceEvent( 1832 TraceEventHandle TraceLog::AddTraceEvent(
1889 char phase, 1833 char phase,
1890 const unsigned char* category_group_enabled, 1834 const unsigned char* category_group_enabled,
1891 const char* name, 1835 const char* name,
1892 unsigned long long id, 1836 unsigned long long id,
1893 int num_args, 1837 int num_args,
1894 const char** arg_names, 1838 const char** arg_names,
1895 const unsigned char* arg_types, 1839 const unsigned char* arg_types,
1896 const unsigned long long* arg_values, 1840 const unsigned long long* arg_values,
1897 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, 1841 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1898 unsigned char flags) { 1842 unsigned char flags) {
1899 int thread_id = static_cast<int>(base::PlatformThread::CurrentId()); 1843 int thread_id = static_cast<int>(base::PlatformThread::CurrentId());
1900 base::TimeTicks now = base::TimeTicks::NowFromSystemTraceTime(); 1844 base::TraceTicks now = base::TraceTicks::Now();
1901 return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled, 1845 return AddTraceEventWithThreadIdAndTimestamp(phase, category_group_enabled,
1902 name, id, thread_id, now, 1846 name, id, thread_id, now,
1903 num_args, arg_names, 1847 num_args, arg_names,
1904 arg_types, arg_values, 1848 arg_types, arg_values,
1905 convertable_values, flags); 1849 convertable_values, flags);
1906 } 1850 }
1907 1851
1908 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp( 1852 TraceEventHandle TraceLog::AddTraceEventWithThreadIdAndTimestamp(
1909 char phase, 1853 char phase,
1910 const unsigned char* category_group_enabled, 1854 const unsigned char* category_group_enabled,
1911 const char* name, 1855 const char* name,
1912 unsigned long long id, 1856 unsigned long long id,
1913 int thread_id, 1857 int thread_id,
1914 const TimeTicks& timestamp, 1858 const TraceTicks& timestamp,
1915 int num_args, 1859 int num_args,
1916 const char** arg_names, 1860 const char** arg_names,
1917 const unsigned char* arg_types, 1861 const unsigned char* arg_types,
1918 const unsigned long long* arg_values, 1862 const unsigned long long* arg_values,
1919 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, 1863 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1920 unsigned char flags) { 1864 unsigned char flags) {
1921 TraceEventHandle handle = { 0, 0, 0 }; 1865 TraceEventHandle handle = { 0, 0, 0 };
1922 if (!*category_group_enabled) 1866 if (!*category_group_enabled)
1923 return handle; 1867 return handle;
1924 1868
1925 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when 1869 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1926 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> 1870 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1927 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... 1871 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1928 if (thread_is_in_trace_event_.Get()) 1872 if (thread_is_in_trace_event_.Get())
1929 return handle; 1873 return handle;
1930 1874
1931 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); 1875 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1932 1876
1933 DCHECK(name); 1877 DCHECK(name);
1934 DCHECK(!timestamp.is_null()); 1878 DCHECK(!timestamp.is_null());
1935 1879
1936 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) 1880 if (flags & TRACE_EVENT_FLAG_MANGLE_ID)
1937 id = MangleEventId(id); 1881 id = MangleEventId(id);
1938 1882
1939 TimeTicks offset_event_timestamp = OffsetTimestamp(timestamp); 1883 TraceTicks offset_event_timestamp = OffsetTimestamp(timestamp);
1940 TimeTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ? 1884 TraceTicks now = flags & TRACE_EVENT_FLAG_EXPLICIT_TIMESTAMP ?
1941 OffsetNow() : offset_event_timestamp; 1885 OffsetNow() : offset_event_timestamp;
1942 TimeTicks thread_now = ThreadNow(); 1886 ThreadTicks thread_now = ThreadNow();
1943 1887
1944 ThreadLocalEventBuffer* thread_local_event_buffer = NULL; 1888 ThreadLocalEventBuffer* thread_local_event_buffer = NULL;
1945 // A ThreadLocalEventBuffer needs the message loop 1889 // A ThreadLocalEventBuffer needs the message loop
1946 // - to know when the thread exits; 1890 // - to know when the thread exits;
1947 // - to handle the final flush. 1891 // - to handle the final flush.
1948 // For a thread without a message loop or the message loop may be blocked, the 1892 // For a thread without a message loop or the message loop may be blocked, the
1949 // trace events will be added into the main buffer directly. 1893 // trace events will be added into the main buffer directly.
1950 if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) { 1894 if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) {
1951 thread_local_event_buffer = thread_local_event_buffer_.Get(); 1895 thread_local_event_buffer = thread_local_event_buffer_.Get();
1952 if (thread_local_event_buffer && 1896 if (thread_local_event_buffer &&
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
2070 2014
2071 if (thread_local_event_buffer) 2015 if (thread_local_event_buffer)
2072 thread_local_event_buffer->ReportOverhead(now, thread_now); 2016 thread_local_event_buffer->ReportOverhead(now, thread_now);
2073 2017
2074 return handle; 2018 return handle;
2075 } 2019 }
2076 2020
2077 // May be called when a COMPELETE event ends and the unfinished event has been 2021 // May be called when a COMPELETE event ends and the unfinished event has been
2078 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). 2022 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
2079 std::string TraceLog::EventToConsoleMessage(unsigned char phase, 2023 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
2080 const TimeTicks& timestamp, 2024 const TraceTicks& timestamp,
2081 TraceEvent* trace_event) { 2025 TraceEvent* trace_event) {
2082 AutoLock thread_info_lock(thread_info_lock_); 2026 AutoLock thread_info_lock(thread_info_lock_);
2083 2027
2084 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to 2028 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
2085 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END. 2029 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
2086 DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE); 2030 DCHECK(phase != TRACE_EVENT_PHASE_COMPLETE);
2087 2031
2088 TimeDelta duration; 2032 TimeDelta duration;
2089 int thread_id = trace_event ? 2033 int thread_id = trace_event ?
2090 trace_event->thread_id() : PlatformThread::CurrentId(); 2034 trace_event->thread_id() : PlatformThread::CurrentId();
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
2150 const char* name, 2094 const char* name,
2151 TraceEventHandle handle) { 2095 TraceEventHandle handle) {
2152 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when 2096 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
2153 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> 2097 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
2154 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... 2098 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
2155 if (thread_is_in_trace_event_.Get()) 2099 if (thread_is_in_trace_event_.Get())
2156 return; 2100 return;
2157 2101
2158 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); 2102 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
2159 2103
2160 TimeTicks thread_now = ThreadNow(); 2104 ThreadTicks thread_now = ThreadNow();
2161 TimeTicks now = OffsetNow(); 2105 TraceTicks now = OffsetNow();
2162 2106
2163 std::string console_message; 2107 std::string console_message;
2164 if (*category_group_enabled & ENABLED_FOR_RECORDING) { 2108 if (*category_group_enabled & ENABLED_FOR_RECORDING) {
2165 OptionalAutoLock lock(&lock_); 2109 OptionalAutoLock lock(&lock_);
2166 2110
2167 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock); 2111 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock);
2168 if (trace_event) { 2112 if (trace_event) {
2169 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE); 2113 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
2170 trace_event->UpdateDuration(now, thread_now); 2114 trace_event->UpdateDuration(now, thread_now);
2171 #if defined(OS_ANDROID) 2115 #if defined(OS_ANDROID)
(...skipping 211 matching lines...) Expand 10 before | Expand all | Expand 10 after
2383 } 2327 }
2384 2328
2385 void TraceLog::SetCurrentThreadBlocksMessageLoop() { 2329 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
2386 thread_blocks_message_loop_.Set(true); 2330 thread_blocks_message_loop_.Set(true);
2387 if (thread_local_event_buffer_.Get()) { 2331 if (thread_local_event_buffer_.Get()) {
2388 // This will flush the thread local buffer. 2332 // This will flush the thread local buffer.
2389 delete thread_local_event_buffer_.Get(); 2333 delete thread_local_event_buffer_.Get();
2390 } 2334 }
2391 } 2335 }
2392 2336
2393 bool CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2394 const std::string& str) {
2395 return str.empty() ||
2396 str.at(0) == ' ' ||
2397 str.at(str.length() - 1) == ' ';
2398 }
2399
2400 CategoryFilter::CategoryFilter(const std::string& filter_string) {
2401 if (!filter_string.empty())
2402 Initialize(filter_string);
2403 else
2404 Initialize(CategoryFilter::kDefaultCategoryFilterString);
2405 }
2406
2407 CategoryFilter::CategoryFilter() {
2408 Initialize(CategoryFilter::kDefaultCategoryFilterString);
2409 }
2410
2411 CategoryFilter::CategoryFilter(const CategoryFilter& cf)
2412 : included_(cf.included_),
2413 disabled_(cf.disabled_),
2414 excluded_(cf.excluded_),
2415 delays_(cf.delays_) {
2416 }
2417
2418 CategoryFilter::~CategoryFilter() {
2419 }
2420
2421 CategoryFilter& CategoryFilter::operator=(const CategoryFilter& rhs) {
2422 if (this == &rhs)
2423 return *this;
2424
2425 included_ = rhs.included_;
2426 disabled_ = rhs.disabled_;
2427 excluded_ = rhs.excluded_;
2428 delays_ = rhs.delays_;
2429 return *this;
2430 }
2431
2432 void CategoryFilter::Initialize(const std::string& filter_string) {
2433 // Tokenize list of categories, delimited by ','.
2434 StringTokenizer tokens(filter_string, ",");
2435 // Add each token to the appropriate list (included_,excluded_).
2436 while (tokens.GetNext()) {
2437 std::string category = tokens.token();
2438 // Ignore empty categories.
2439 if (category.empty())
2440 continue;
2441 // Synthetic delays are of the form 'DELAY(delay;option;option;...)'.
2442 if (category.find(kSyntheticDelayCategoryFilterPrefix) == 0 &&
2443 category.at(category.size() - 1) == ')') {
2444 category = category.substr(
2445 strlen(kSyntheticDelayCategoryFilterPrefix),
2446 category.size() - strlen(kSyntheticDelayCategoryFilterPrefix) - 1);
2447 size_t name_length = category.find(';');
2448 if (name_length != std::string::npos && name_length > 0 &&
2449 name_length != category.size() - 1) {
2450 delays_.push_back(category);
2451 }
2452 } else if (category.at(0) == '-') {
2453 // Excluded categories start with '-'.
2454 // Remove '-' from category string.
2455 category = category.substr(1);
2456 excluded_.push_back(category);
2457 } else if (category.compare(0, strlen(TRACE_DISABLED_BY_DEFAULT("")),
2458 TRACE_DISABLED_BY_DEFAULT("")) == 0) {
2459 disabled_.push_back(category);
2460 } else {
2461 included_.push_back(category);
2462 }
2463 }
2464 }
2465
2466 void CategoryFilter::WriteString(const StringList& values,
2467 std::string* out,
2468 bool included) const {
2469 bool prepend_comma = !out->empty();
2470 int token_cnt = 0;
2471 for (StringList::const_iterator ci = values.begin();
2472 ci != values.end(); ++ci) {
2473 if (token_cnt > 0 || prepend_comma)
2474 StringAppendF(out, ",");
2475 StringAppendF(out, "%s%s", (included ? "" : "-"), ci->c_str());
2476 ++token_cnt;
2477 }
2478 }
2479
2480 void CategoryFilter::WriteString(const StringList& delays,
2481 std::string* out) const {
2482 bool prepend_comma = !out->empty();
2483 int token_cnt = 0;
2484 for (StringList::const_iterator ci = delays.begin();
2485 ci != delays.end(); ++ci) {
2486 if (token_cnt > 0 || prepend_comma)
2487 StringAppendF(out, ",");
2488 StringAppendF(out, "%s%s)", kSyntheticDelayCategoryFilterPrefix,
2489 ci->c_str());
2490 ++token_cnt;
2491 }
2492 }
2493
2494 std::string CategoryFilter::ToString() const {
2495 std::string filter_string;
2496 WriteString(included_, &filter_string, true);
2497 WriteString(disabled_, &filter_string, true);
2498 WriteString(excluded_, &filter_string, false);
2499 WriteString(delays_, &filter_string);
2500 return filter_string;
2501 }
2502
2503 bool CategoryFilter::IsCategoryGroupEnabled(
2504 const char* category_group_name) const {
2505 // TraceLog should call this method only as part of enabling/disabling
2506 // categories.
2507
2508 bool had_enabled_by_default = false;
2509 DCHECK(category_group_name);
2510 CStringTokenizer category_group_tokens(
2511 category_group_name, category_group_name + strlen(category_group_name),
2512 ",");
2513 while (category_group_tokens.GetNext()) {
2514 std::string category_group_token = category_group_tokens.token();
2515 // Don't allow empty tokens, nor tokens with leading or trailing space.
2516 DCHECK(!CategoryFilter::IsEmptyOrContainsLeadingOrTrailingWhitespace(
2517 category_group_token))
2518 << "Disallowed category string";
2519 if (IsCategoryEnabled(category_group_token.c_str())) {
2520 return true;
2521 }
2522 if (!MatchPattern(category_group_token.c_str(),
2523 TRACE_DISABLED_BY_DEFAULT("*")))
2524 had_enabled_by_default = true;
2525 }
2526 // Do a second pass to check for explicitly disabled categories
2527 // (those explicitly enabled have priority due to first pass).
2528 category_group_tokens.Reset();
2529 bool category_group_disabled = false;
2530 while (category_group_tokens.GetNext()) {
2531 std::string category_group_token = category_group_tokens.token();
2532 for (StringList::const_iterator ci = excluded_.begin();
2533 ci != excluded_.end(); ++ci) {
2534 if (MatchPattern(category_group_token.c_str(), ci->c_str())) {
2535 // Current token of category_group_name is present in excluded_list.
2536 // Flag the exclusion and proceed further to check if any of the
2537 // remaining categories of category_group_name is not present in the
2538 // excluded_ list.
2539 category_group_disabled = true;
2540 break;
2541 }
2542 // One of the category of category_group_name is not present in
2543 // excluded_ list. So, it has to be included_ list. Enable the
2544 // category_group_name for recording.
2545 category_group_disabled = false;
2546 }
2547 // One of the categories present in category_group_name is not present in
2548 // excluded_ list. Implies this category_group_name group can be enabled
2549 // for recording, since one of its groups is enabled for recording.
2550 if (!category_group_disabled)
2551 break;
2552 }
2553 // If the category group is not excluded, and there are no included patterns
2554 // we consider this category group enabled, as long as it had categories
2555 // other than disabled-by-default.
2556 return !category_group_disabled &&
2557 included_.empty() && had_enabled_by_default;
2558 }
2559
2560 bool CategoryFilter::IsCategoryEnabled(const char* category_name) const {
2561 StringList::const_iterator ci;
2562
2563 // Check the disabled- filters and the disabled-* wildcard first so that a
2564 // "*" filter does not include the disabled.
2565 for (ci = disabled_.begin(); ci != disabled_.end(); ++ci) {
2566 if (MatchPattern(category_name, ci->c_str()))
2567 return true;
2568 }
2569
2570 if (MatchPattern(category_name, TRACE_DISABLED_BY_DEFAULT("*")))
2571 return false;
2572
2573 for (ci = included_.begin(); ci != included_.end(); ++ci) {
2574 if (MatchPattern(category_name, ci->c_str()))
2575 return true;
2576 }
2577
2578 return false;
2579 }
2580
2581 bool CategoryFilter::HasIncludedPatterns() const {
2582 return !included_.empty();
2583 }
2584
2585 void CategoryFilter::Merge(const CategoryFilter& nested_filter) {
2586 // Keep included patterns only if both filters have an included entry.
2587 // Otherwise, one of the filter was specifying "*" and we want to honour the
2588 // broadest filter.
2589 if (HasIncludedPatterns() && nested_filter.HasIncludedPatterns()) {
2590 included_.insert(included_.end(),
2591 nested_filter.included_.begin(),
2592 nested_filter.included_.end());
2593 } else {
2594 included_.clear();
2595 }
2596
2597 disabled_.insert(disabled_.end(),
2598 nested_filter.disabled_.begin(),
2599 nested_filter.disabled_.end());
2600 excluded_.insert(excluded_.end(),
2601 nested_filter.excluded_.begin(),
2602 nested_filter.excluded_.end());
2603 delays_.insert(delays_.end(),
2604 nested_filter.delays_.begin(),
2605 nested_filter.delays_.end());
2606 }
2607
2608 void CategoryFilter::Clear() {
2609 included_.clear();
2610 disabled_.clear();
2611 excluded_.clear();
2612 }
2613
2614 const CategoryFilter::StringList&
2615 CategoryFilter::GetSyntheticDelayValues() const {
2616 return delays_;
2617 }
2618
2619 } // namespace trace_event 2337 } // namespace trace_event
2620 } // namespace base 2338 } // namespace base
2621 2339
2622 namespace trace_event_internal { 2340 namespace trace_event_internal {
2623 2341
2624 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( 2342 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2625 const char* category_group, const char* name) { 2343 const char* category_group, const char* name) {
2626 // The single atom works because for now the category_group can only be "gpu". 2344 // The single atom works because for now the category_group can only be "gpu".
2627 DCHECK_EQ(strcmp(category_group, "gpu"), 0); 2345 DCHECK_EQ(strcmp(category_group, "gpu"), 0);
2628 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0; 2346 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2629 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( 2347 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2630 category_group, atomic, category_group_enabled_); 2348 category_group, atomic, category_group_enabled_);
2631 name_ = name; 2349 name_ = name;
2632 if (*category_group_enabled_) { 2350 if (*category_group_enabled_) {
2633 event_handle_ = 2351 event_handle_ =
2634 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP( 2352 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2635 TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name, 2353 TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2636 trace_event_internal::kNoEventId, 2354 trace_event_internal::kNoEventId,
2637 static_cast<int>(base::PlatformThread::CurrentId()), 2355 static_cast<int>(base::PlatformThread::CurrentId()),
2638 base::TimeTicks::NowFromSystemTraceTime(), 2356 base::TraceTicks::Now(), 0, NULL, NULL, NULL, NULL,
2639 0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE); 2357 TRACE_EVENT_FLAG_NONE);
2640 } 2358 }
2641 } 2359 }
2642 2360
2643 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 2361 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2644 if (*category_group_enabled_) { 2362 if (*category_group_enabled_) {
2645 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, 2363 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2646 name_, event_handle_); 2364 name_, event_handle_);
2647 } 2365 }
2648 } 2366 }
2649 2367
2650 } // namespace trace_event_internal 2368 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « base/trace_event/trace_event_impl.h ('k') | base/trace_event/trace_event_impl_constants.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698