Index: base/debug/trace_event_impl.cc |
diff --git a/base/debug/trace_event_impl.cc b/base/debug/trace_event_impl.cc |
index fa23defac0db45426066dcffce7e518169205114..9933d899c654a7b73694bad48f8916c1397fc7a3 100644 |
--- a/base/debug/trace_event_impl.cc |
+++ b/base/debug/trace_event_impl.cc |
@@ -348,6 +348,7 @@ TraceLog* TraceLog::GetInstance() { |
TraceLog::TraceLog() |
: enabled_(false), |
dispatching_to_observer_list_(false), |
+ next_fake_thread_id_(-1), |
watch_category_(NULL) { |
// Trace is enabled or disabled on one thread while other threads are |
// accessing the enabled flag. We don't care whether edge-case events are |
@@ -594,6 +595,56 @@ void TraceLog::Flush(const TraceLog::OutputCallback& cb) { |
} |
} |
+int TraceLog::AddTraceRawEvent(char phase, |
+ const unsigned char* category_enabled, |
+ const char* thread_name, |
+ const char* name, |
+ int64 timestamp, |
+ unsigned long long id, |
+ int num_args, |
+ const char** arg_names, |
+ const unsigned char* arg_types, |
+ const unsigned long long* arg_values, |
+ int threshold_begin_id, |
+ long long threshold, |
+ unsigned char flags) { |
+ DCHECK(thread_name); |
+ DCHECK(name); |
+ TimeTicks now = TimeTicks::FromInternalValue(timestamp); |
+ NotificationHelper notifier(this); |
+ int ret_begin_id = -1; |
+ { |
+ AutoLock lock(lock_); |
+ if (!*category_enabled) |
+ return -1; |
+ if (logged_events_.size() >= kTraceEventBufferSize) |
+ return -1; |
+ |
+ int thread_id; |
+ base::hash_map<std::string, int>::iterator existing_id = |
+ fake_thread_names_.find(thread_name); |
+ if (existing_id == fake_thread_names_.end()) { |
+ // This is a new thread name, add a new id. |
+ thread_id = next_fake_thread_id_; |
+ next_fake_thread_id_--; |
+ |
+ fake_thread_names_[thread_name] = thread_id; |
+ thread_names_[thread_id] = thread_name; |
+ } else { |
+ thread_id = existing_id->second; |
+ } |
+ |
+ ret_begin_id = AddTraceEventInternal(notifier, now, phase, category_enabled, |
+ thread_id, name, id, num_args, |
+ arg_names, arg_types, arg_values, |
+ threshold_begin_id, threshold, flags); |
+ } // release lock |
+ |
+ notifier.SendNotificationIfAny(); |
+ |
+ return ret_begin_id; |
+} |
+ |
int TraceLog::AddTraceEvent(char phase, |
const unsigned char* category_enabled, |
const char* name, |
@@ -652,41 +703,63 @@ int TraceLog::AddTraceEvent(char phase, |
} |
} |
- if (threshold_begin_id > -1) { |
- DCHECK(phase == TRACE_EVENT_PHASE_END); |
- size_t begin_i = static_cast<size_t>(threshold_begin_id); |
- // Return now if there has been a flush since the begin event was posted. |
- if (begin_i >= logged_events_.size()) |
- return -1; |
- // Determine whether to drop the begin/end pair. |
- TimeDelta elapsed = now - logged_events_[begin_i].timestamp(); |
- if (elapsed < TimeDelta::FromMicroseconds(threshold)) { |
- // Remove begin event and do not add end event. |
- // This will be expensive if there have been other events in the |
- // mean time (should be rare). |
- logged_events_.erase(logged_events_.begin() + begin_i); |
- return -1; |
- } |
- } |
+ ret_begin_id = AddTraceEventInternal(notifier, now, phase, category_enabled, |
+ thread_id, name, id, num_args, |
+ arg_names, arg_types, arg_values, |
+ threshold_begin_id, threshold, flags); |
+ } // release lock |
+ |
+ notifier.SendNotificationIfAny(); |
+ |
+ return ret_begin_id; |
+} |
- if (flags & TRACE_EVENT_FLAG_MANGLE_ID) |
- id ^= process_id_hash_; |
+int TraceLog::AddTraceEventInternal(NotificationHelper& notifier, |
+ TimeTicks timestamp, |
+ char phase, |
+ const unsigned char* category_enabled, |
+ int thread_id, |
+ const char* name, |
+ unsigned long long id, |
+ int num_args, |
+ const char** arg_names, |
+ const unsigned char* arg_types, |
+ const unsigned long long* arg_values, |
+ int threshold_begin_id, |
+ long long threshold, |
+ unsigned char flags) { |
+ if (threshold_begin_id > -1) { |
+ DCHECK(phase == TRACE_EVENT_PHASE_END); |
+ size_t begin_i = static_cast<size_t>(threshold_begin_id); |
+ // Return now if there has been a flush since the begin event was posted. |
+ if (begin_i >= logged_events_.size()) |
+ return -1; |
+ // Determine whether to drop the begin/end pair. |
+ TimeDelta elapsed = timestamp - logged_events_[begin_i].timestamp(); |
+ if (elapsed < TimeDelta::FromMicroseconds(threshold)) { |
+ // Remove begin event and do not add end event. |
+ // This will be expensive if there have been other events in the |
+ // mean time (should be rare). |
+ logged_events_.erase(logged_events_.begin() + begin_i); |
+ return -1; |
+ } |
+ } |
- ret_begin_id = static_cast<int>(logged_events_.size()); |
- logged_events_.push_back( |
- TraceEvent(thread_id, |
- now, phase, category_enabled, name, id, |
- num_args, arg_names, arg_types, arg_values, |
- flags)); |
+ if (flags & TRACE_EVENT_FLAG_MANGLE_ID) |
+ id ^= process_id_hash_; |
- if (logged_events_.size() == kTraceEventBufferSize) |
- notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); |
+ int ret_begin_id = static_cast<int>(logged_events_.size()); |
+ logged_events_.push_back( |
+ TraceEvent(thread_id, |
+ timestamp, phase, category_enabled, name, id, |
+ num_args, arg_names, arg_types, arg_values, |
+ flags)); |
- if (watch_category_ == category_enabled && watch_event_name_ == name) |
- notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); |
- } // release lock |
+ if (logged_events_.size() == kTraceEventBufferSize) |
+ notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); |
- notifier.SendNotificationIfAny(); |
+ if (watch_category_ == category_enabled && watch_event_name_ == name) |
+ notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); |
return ret_begin_id; |
} |