Index: base/debug/trace_event_impl.cc |
diff --git a/base/debug/trace_event_impl.cc b/base/debug/trace_event_impl.cc |
index 2f63f91caa553e7819d1e59f27c0da3ca5e6c707..ff88b261a0296cf0dc608d289f94a986e6e1e268 100644 |
--- a/base/debug/trace_event_impl.cc |
+++ b/base/debug/trace_event_impl.cc |
@@ -54,6 +54,8 @@ const size_t kTraceEventVectorBufferSize = 250000; |
const size_t kTraceEventRingBufferSize = kTraceEventVectorBufferSize / 4; |
const size_t kTraceEventBatchSize = 1000; |
const size_t kTraceEventInitialBufferSize = 1024; |
+// Can store results for 30 seconds with 1 ms sampling interval. |
+const size_t kSamplingTraceEventBufferSize = 30000; |
#define MAX_CATEGORY_GROUPS 100 |
@@ -93,9 +95,10 @@ const char kEnableSampling[] = "enable-sampling"; |
class TraceBufferRingBuffer : public TraceBuffer { |
public: |
- TraceBufferRingBuffer() |
+ TraceBufferRingBuffer(size_t buffer_size) |
: unused_event_index_(0), |
- oldest_event_index_(0) { |
+ oldest_event_index_(0), |
+ buffer_size_(buffer_size) { |
logged_events_.reserve(kTraceEventInitialBufferSize); |
} |
@@ -107,9 +110,10 @@ class TraceBufferRingBuffer : public TraceBuffer { |
else |
logged_events_.push_back(event); |
- unused_event_index_ = NextIndex(unused_event_index_); |
+ unused_event_index_ = NextIndex(unused_event_index_, buffer_size_); |
if (unused_event_index_ == oldest_event_index_) { |
- oldest_event_index_ = NextIndex(oldest_event_index_); |
+ oldest_event_index_ = NextIndex( |
+ oldest_event_index_, buffer_size_); |
} |
} |
@@ -121,7 +125,7 @@ class TraceBufferRingBuffer : public TraceBuffer { |
DCHECK(HasMoreEvents()); |
size_t next = oldest_event_index_; |
- oldest_event_index_ = NextIndex(oldest_event_index_); |
+ oldest_event_index_ = NextIndex(oldest_event_index_, buffer_size_); |
return GetEventAt(next); |
} |
@@ -140,7 +144,7 @@ class TraceBufferRingBuffer : public TraceBuffer { |
strcmp(event_name.c_str(), event.name()) == 0) { |
++notify_count; |
} |
- index = NextIndex(index); |
+ index = NextIndex(index, buffer_size_); |
} |
return notify_count; |
} |
@@ -158,16 +162,29 @@ class TraceBufferRingBuffer : public TraceBuffer { |
return kTraceEventRingBufferSize; |
} |
+ virtual TraceBuffer* Clone() const OVERRIDE { |
+ TraceBufferRingBuffer* clonedBuffer = |
+ new TraceBufferRingBuffer(buffer_size_); |
+ size_t index = oldest_event_index_; |
+ while (index != unused_event_index_) { |
+ const TraceEvent& event = GetEventAt(index); |
+ clonedBuffer->AddEvent(event); |
+ index = NextIndex(index, buffer_size_); |
+ } |
+ return clonedBuffer; |
+ } |
+ |
private: |
- static size_t NextIndex(size_t index) { |
+ static size_t NextIndex(size_t index, size_t buffer_size) { |
index++; |
- if (index >= kTraceEventRingBufferSize) |
+ if (index >= buffer_size) |
index = 0; |
return index; |
} |
size_t unused_event_index_; |
size_t oldest_event_index_; |
+ size_t buffer_size_; |
std::vector<TraceEvent> logged_events_; |
DISALLOW_COPY_AND_ASSIGN(TraceBufferRingBuffer); |
@@ -231,6 +248,11 @@ class TraceBufferVector : public TraceBuffer { |
return kTraceEventVectorBufferSize; |
} |
+ virtual TraceBuffer* Clone() const OVERRIDE { |
+ NOTIMPLEMENTED(); |
+ return NULL; |
+ } |
+ |
private: |
size_t current_iteration_index_; |
std::vector<TraceEvent> logged_events_; |
@@ -267,6 +289,11 @@ class TraceBufferDiscardsEvents : public TraceBuffer { |
NOTREACHED(); |
return *static_cast<TraceEvent*>(NULL); |
} |
+ |
+ virtual TraceBuffer* Clone() const OVERRIDE { |
+ NOTIMPLEMENTED(); |
+ return NULL; |
+ } |
}; |
//////////////////////////////////////////////////////////////////////////////// |
@@ -652,7 +679,8 @@ class TraceSamplingThread : public PlatformThread::Delegate { |
// Implementation of PlatformThread::Delegate: |
virtual void ThreadMain() OVERRIDE; |
- static void DefaultSampleCallback(TraceBucketData* bucekt_data); |
+ static void DefaultSamplingCallback(TraceBucketData* bucekt_data); |
+ static void ContinuousSamplingCallback(TraceBucketData* bucekt_data); |
void Stop(); |
void InstallWaitableEventForSamplingTesting(WaitableEvent* waitable_event); |
@@ -699,7 +727,8 @@ void TraceSamplingThread::ThreadMain() { |
} |
// static |
-void TraceSamplingThread::DefaultSampleCallback(TraceBucketData* bucket_data) { |
+void TraceSamplingThread::DefaultSamplingCallback( |
+ TraceBucketData* bucket_data) { |
TRACE_EVENT_API_ATOMIC_WORD category_and_name = |
TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket); |
if (!category_and_name) |
@@ -714,6 +743,24 @@ void TraceSamplingThread::DefaultSampleCallback(TraceBucketData* bucket_data) { |
name, 0, 0, NULL, NULL, NULL, NULL, 0); |
} |
+// static |
+void TraceSamplingThread::ContinuousSamplingCallback( |
+ TraceBucketData* bucket_data) { |
+ TRACE_EVENT_API_ATOMIC_WORD category_and_name = |
+ TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket); |
+ if (!category_and_name) |
+ return; |
+ const char* const combined = |
+ reinterpret_cast<const char* const>(category_and_name); |
+ const char* category_group; |
+ const char* name; |
+ ExtractCategoryAndName(combined, &category_group, &name); |
+ TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE, |
+ TraceLog::GetCategoryGroupEnabled(category_group), |
+ name, 0, 0, NULL, NULL, NULL, NULL, |
+ TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING); |
+} |
+ |
void TraceSamplingThread::GetSamples() { |
for (size_t i = 0; i < sample_buckets_.size(); ++i) { |
TraceBucketData* bucket_data = &sample_buckets_[i]; |
@@ -746,7 +793,6 @@ void TraceSamplingThread::InstallWaitableEventForSamplingTesting( |
waitable_event_for_testing_.reset(waitable_event); |
} |
- |
TraceBucketData::TraceBucketData(base::subtle::AtomicWord* bucket, |
const char* name, |
TraceSampleCallback callback) |
@@ -819,6 +865,7 @@ TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) { |
TraceLog::TraceLog() |
: enable_count_(0), |
+ continuous_sampling_enable_count_(0), |
num_traces_recorded_(0), |
event_callback_(NULL), |
dispatching_to_observer_list_(false), |
@@ -1003,15 +1050,15 @@ void TraceLog::SetEnabled(const CategoryFilter& category_filter, |
sampling_thread_->RegisterSampleBucket( |
&g_trace_state[0], |
"bucket0", |
- Bind(&TraceSamplingThread::DefaultSampleCallback)); |
+ Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
sampling_thread_->RegisterSampleBucket( |
&g_trace_state[1], |
"bucket1", |
- Bind(&TraceSamplingThread::DefaultSampleCallback)); |
+ Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
sampling_thread_->RegisterSampleBucket( |
&g_trace_state[2], |
"bucket2", |
- Bind(&TraceSamplingThread::DefaultSampleCallback)); |
+ Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
if (!PlatformThread::Create( |
0, sampling_thread_.get(), &sampling_thread_handle_)) { |
DCHECK(false) << "failed to create thread"; |
@@ -1065,7 +1112,7 @@ void TraceLog::SetDisabled() { |
watch_category_ = NULL; |
watch_event_name_ = ""; |
UpdateCategoryGroupEnabledFlags(); |
- AddMetadataEvents(); |
+ AddMetadataEvents(0); |
dispatching_to_observer_list_ = true; |
observer_list = enabled_state_observer_list_; |
@@ -1082,6 +1129,51 @@ void TraceLog::SetDisabled() { |
} |
} |
+void TraceLog::SetContinuousSamplingEnabled() { |
+ AutoLock lock(lock_); |
+ if (continuous_sampling_enable_count_++ > 0) |
+ return; |
+ |
+ continuous_sampling_logged_events_.reset( |
+ new TraceBufferRingBuffer(kSamplingTraceEventBufferSize)); |
+ |
+ continuous_sampling_thread_.reset(new TraceSamplingThread); |
+ continuous_sampling_thread_->RegisterSampleBucket( |
+ &g_trace_state[0], |
+ "bucket0", |
+ Bind(&TraceSamplingThread::ContinuousSamplingCallback)); |
+ continuous_sampling_thread_->RegisterSampleBucket( |
+ &g_trace_state[1], |
+ "bucket1", |
+ Bind(&TraceSamplingThread::ContinuousSamplingCallback)); |
+ continuous_sampling_thread_->RegisterSampleBucket( |
+ &g_trace_state[2], |
+ "bucket2", |
+ Bind(&TraceSamplingThread::ContinuousSamplingCallback)); |
+ if (!PlatformThread::Create( |
+ 0, |
+ continuous_sampling_thread_.get(), |
+ &continuous_sampling_thread_handle_)) { |
+ DCHECK(false) << "failed to create thread"; |
+ } |
+} |
+ |
+void TraceLog::SetContinuousSamplingDisabled() { |
+ AutoLock lock(lock_); |
+ DCHECK(continuous_sampling_enable_count_ > 0); |
+ if (--continuous_sampling_enable_count_ != 0) |
+ return; |
+ |
+ if (continuous_sampling_thread_.get()) { |
+ continuous_sampling_thread_->Stop(); |
+ lock_.Release(); |
+ PlatformThread::Join(continuous_sampling_thread_handle_); |
+ lock_.Acquire(); |
+ continuous_sampling_thread_handle_ = PlatformThreadHandle(); |
+ continuous_sampling_thread_.reset(); |
+ } |
+} |
+ |
int TraceLog::GetNumTracesRecorded() { |
AutoLock lock(lock_); |
if (enable_count_ == 0) |
@@ -1123,7 +1215,7 @@ void TraceLog::SetNotificationCallback( |
TraceBuffer* TraceLog::GetTraceBuffer() { |
if (trace_options_ & RECORD_CONTINUOUSLY) |
- return new TraceBufferRingBuffer(); |
+ return new TraceBufferRingBuffer(kTraceEventRingBufferSize); |
else if (trace_options_ & ECHO_TO_CONSOLE) |
return new TraceBufferDiscardsEvents(); |
return new TraceBufferVector(); |
@@ -1164,6 +1256,37 @@ void TraceLog::Flush(const TraceLog::OutputCallback& cb) { |
} |
} |
+void TraceLog::FlushContinuousSamplingTracing( |
+ const TraceLog::OutputCallback& cb) { |
+ if (!continuous_sampling_enable_count_) |
+ return; |
+ |
+ scoped_ptr<TraceBuffer> previous_logged_events; |
+ { |
+ AutoLock lock(lock_); |
+ AddMetadataEvents(TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING); |
+ previous_logged_events.reset(continuous_sampling_logged_events_->Clone()); |
+ } // release lock |
+ |
+ while (previous_logged_events->HasMoreEvents()) { |
+ scoped_refptr<RefCountedString> json_events_str_ptr = |
+ new RefCountedString(); |
+ |
+ for (size_t i = 0; i < kTraceEventBatchSize; ++i) { |
+ if (i > 0) |
+ *(&(json_events_str_ptr->data())) += ","; |
+ |
+ previous_logged_events->NextEvent().AppendAsJSON( |
+ &(json_events_str_ptr->data())); |
+ |
+ if (!previous_logged_events->HasMoreEvents()) |
+ break; |
+ } |
+ |
+ cb.Run(json_events_str_ptr); |
+ } |
+} |
+ |
void TraceLog::AddTraceEvent( |
char phase, |
const unsigned char* category_group_enabled, |
@@ -1201,14 +1324,20 @@ void TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
if (flags & TRACE_EVENT_FLAG_MANGLE_ID) |
id ^= process_id_hash_; |
+ TraceBuffer* logged_events = (flags & TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING) ? |
+ continuous_sampling_logged_events_.get() : logged_events_.get(); |
+ |
#if defined(OS_ANDROID) |
SendToATrace(phase, GetCategoryGroupName(category_group_enabled), name, id, |
num_args, arg_names, arg_types, arg_values, convertable_values, |
flags); |
#endif |
- if (!IsCategoryGroupEnabled(category_group_enabled)) |
- return; |
+ // We enable all categories in continuous sampling tracing because it doesn't |
+ // make sense to filter categories in sampling tracing. |
+ if (!IsCategoryGroupEnabled(category_group_enabled) && |
+ !(flags & TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING)) |
+ return; |
TimeTicks now = timestamp - time_offset_; |
base::TimeTicks thread_now; |
@@ -1265,7 +1394,7 @@ void TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
if (logged_events_->IsFull()) |
break; |
- logged_events_->AddEvent(trace_event); |
+ logged_events->AddEvent(trace_event); |
if (trace_options_ & ECHO_TO_CONSOLE) { |
TimeDelta duration; |
@@ -1393,19 +1522,22 @@ void AddMetadataEventToBuffer( |
} |
-void TraceLog::AddMetadataEvents() { |
+void TraceLog::AddMetadataEvents(unsigned char flags) { |
lock_.AssertAcquired(); |
+ TraceBuffer* logged_events = (flags & TRACE_EVENT_FLAG_CONTINUOUS_SAMPLING) ? |
+ continuous_sampling_logged_events_.get() : logged_events_.get(); |
+ |
int current_thread_id = static_cast<int>(base::PlatformThread::CurrentId()); |
if (process_sort_index_ != 0) { |
- AddMetadataEventToBuffer(logged_events_.get(), |
+ AddMetadataEventToBuffer(logged_events, |
current_thread_id, |
"process_sort_index", "sort_index", |
process_sort_index_); |
} |
if (process_name_.size()) { |
- AddMetadataEventToBuffer(logged_events_.get(), |
+ AddMetadataEventToBuffer(logged_events, |
current_thread_id, |
"process_name", "name", |
process_name_); |
@@ -1418,7 +1550,7 @@ void TraceLog::AddMetadataEvents() { |
it++) { |
labels.push_back(it->second); |
} |
- AddMetadataEventToBuffer(logged_events_.get(), |
+ AddMetadataEventToBuffer(logged_events, |
current_thread_id, |
"process_labels", "labels", |
JoinString(labels, ',')); |
@@ -1430,7 +1562,7 @@ void TraceLog::AddMetadataEvents() { |
it++) { |
if (it->second == 0) |
continue; |
- AddMetadataEventToBuffer(logged_events_.get(), |
+ AddMetadataEventToBuffer(logged_events, |
it->first, |
"thread_sort_index", "sort_index", |
it->second); |
@@ -1442,7 +1574,8 @@ void TraceLog::AddMetadataEvents() { |
it++) { |
if (it->second.empty()) |
continue; |
- AddMetadataEventToBuffer(logged_events_.get(), |
+ |
+ AddMetadataEventToBuffer(logged_events, |
it->first, |
"thread_name", "name", |
it->second); |
@@ -1451,7 +1584,16 @@ void TraceLog::AddMetadataEvents() { |
void TraceLog::InstallWaitableEventForSamplingTesting( |
WaitableEvent* waitable_event) { |
- sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event); |
+ if (sampling_thread_) |
+ sampling_thread_->InstallWaitableEventForSamplingTesting(waitable_event); |
+} |
+ |
+void TraceLog::InstallWaitableEventForContinuousSamplingTesting( |
+ WaitableEvent* waitable_event) { |
+ if (continuous_sampling_thread_) { |
+ continuous_sampling_thread_-> |
+ InstallWaitableEventForSamplingTesting(waitable_event); |
+ } |
} |
void TraceLog::DeleteForTesting() { |
@@ -1490,7 +1632,7 @@ void TraceLog::UpdateProcessLabel( |
void TraceLog::RemoveProcessLabel(int label_id) { |
AutoLock lock(lock_); |
base::hash_map<int, std::string>::iterator it = process_labels_.find( |
- label_id); |
+ label_id); |
if (it == process_labels_.end()) |
return; |
@@ -1686,7 +1828,7 @@ ScopedTrace::ScopedTrace( |
name_ = name; |
TRACE_EVENT_API_ADD_TRACE_EVENT( |
TRACE_EVENT_PHASE_BEGIN, // phase |
- category_group_enabled_, // category enabled |
+ category_group_enabled_, // category enabled |
name, // name |
0, // id |
0, // num_args |