Index: base/debug/trace_event_impl.cc |
diff --git a/base/debug/trace_event_impl.cc b/base/debug/trace_event_impl.cc |
index 2f63f91caa553e7819d1e59f27c0da3ca5e6c707..013abc4f5c7d0fb957a8978db3e914b607f40800 100644 |
--- a/base/debug/trace_event_impl.cc |
+++ b/base/debug/trace_event_impl.cc |
@@ -14,6 +14,7 @@ |
#include "base/format_macros.h" |
#include "base/lazy_instance.h" |
#include "base/memory/singleton.h" |
+#include "base/message_loop/message_loop.h" |
#include "base/process/process_metrics.h" |
#include "base/stl_util.h" |
#include "base/strings/string_split.h" |
@@ -27,7 +28,6 @@ |
#include "base/third_party/dynamic_annotations/dynamic_annotations.h" |
#include "base/threading/platform_thread.h" |
#include "base/threading/thread_id_name_manager.h" |
-#include "base/threading/thread_local.h" |
#include "base/time/time.h" |
#if defined(OS_WIN) |
@@ -48,17 +48,22 @@ BASE_EXPORT TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; |
namespace base { |
namespace debug { |
+namespace { |
+ |
+// The overhead of TraceEvent above this threshold will be reported in the |
+// trace. |
+const int kOverheadReportThresholdInMicroseconds = 50; |
+ |
// Controls the number of trace events we will buffer in-memory |
// before throwing them away. |
const size_t kTraceEventVectorBufferSize = 250000; |
const size_t kTraceEventRingBufferSize = kTraceEventVectorBufferSize / 4; |
+const size_t kTraceEventThreadLocalBufferSize = 1024; |
const size_t kTraceEventBatchSize = 1000; |
const size_t kTraceEventInitialBufferSize = 1024; |
#define MAX_CATEGORY_GROUPS 100 |
-namespace { |
- |
// Parallel arrays g_category_groups and g_category_group_enabled are separate |
// so that a pointer to a member of g_category_group_enabled can be easily |
// converted to an index into g_category_groups. This allows macros to deal |
@@ -69,14 +74,16 @@ const char* g_category_groups[MAX_CATEGORY_GROUPS] = { |
"tracing already shutdown", |
"tracing categories exhausted; must increase MAX_CATEGORY_GROUPS", |
"__metadata", |
-}; |
+ // For reporting trace_event overhead. For thread local event buffers only. |
dsinclair
2013/08/29 14:24:32
After this patch, will we have non-thread-local ev
Xianzhu
2013/08/29 17:42:12
Yes, trace events from a thread without a message
|
+ "trace_event_overhead"}; |
// The enabled flag is char instead of bool so that the API can be used from C. |
unsigned char g_category_group_enabled[MAX_CATEGORY_GROUPS] = { 0 }; |
const int g_category_already_shutdown = 0; |
const int g_category_categories_exhausted = 1; |
const int g_category_metadata = 2; |
-const int g_num_builtin_categories = 3; |
+const int g_category_trace_event_overhead = 3; |
+const int g_num_builtin_categories = 4; |
int g_category_index = g_num_builtin_categories; // Skip default categories. |
// The name of the current thread. This is used to decide if the current |
@@ -89,7 +96,10 @@ const char kRecordUntilFull[] = "record-until-full"; |
const char kRecordContinuously[] = "record-continuously"; |
const char kEnableSampling[] = "enable-sampling"; |
-} // namespace |
+TimeTicks ThreadNow() { |
+ return TimeTicks::IsThreadNowSupported() ? |
+ TimeTicks::ThreadNow() : TimeTicks(); |
dsinclair
2013/08/29 14:24:32
nit: indented too far.
Xianzhu
2013/08/29 17:42:12
Done.
|
+} |
class TraceBufferRingBuffer : public TraceBuffer { |
public: |
@@ -183,11 +193,14 @@ class TraceBufferVector : public TraceBuffer { |
} |
virtual void AddEvent(const TraceEvent& event) OVERRIDE { |
- // Note, we have two callers which need to be handled. The first is |
- // AddTraceEventWithThreadIdAndTimestamp() which checks Size() and does an |
- // early exit if full. The second is AddThreadNameMetadataEvents(). |
+ // Note, we have two callers which need to be handled: |
+ // - AddEventToMainBufferWhileLocked() which has two cases: |
+ // - called directly from AddTraceEventWithThreadIdAndTimeStamp() |
+ // which checks if buffer is full and does an early exit if full; |
+ // - called from ThreadLocalEventBuffer::FlushWhileLocked(); |
+ // - AddThreadNameMetadataEvents(). |
// We can not DECHECK(!IsFull()) because we have to add the metadata |
- // events even if the buffer is full. |
+ // events and flush thread-local buffers even if the buffer is full. |
logged_events_.push_back(event); |
} |
@@ -269,6 +282,8 @@ class TraceBufferDiscardsEvents : public TraceBuffer { |
} |
}; |
+} // namespace |
+ |
//////////////////////////////////////////////////////////////////////////////// |
// |
// TraceEvent |
@@ -764,6 +779,150 @@ TraceBucketData::~TraceBucketData() { |
// |
//////////////////////////////////////////////////////////////////////////////// |
+class TraceLog::ThreadLocalEventBuffer |
+ : public MessageLoop::DestructionObserver { |
+ public: |
+ ThreadLocalEventBuffer(TraceLog* trace_log); |
+ virtual ~ThreadLocalEventBuffer(); |
+ |
+ void AddEvent(const TraceEvent& event, NotificationHelper* notifier); |
+ void ReportOverhead(const TimeTicks& event_timestamp, |
+ const TimeTicks& event_thread_timestamp); |
+ |
+ private: |
+ // MessageLoop::DestructionObserver |
+ virtual void WillDestroyCurrentMessageLoop() OVERRIDE; |
+ |
+ void FlushWhileLocked(NotificationHelper* notifier); |
+ |
+ void CheckThisIsCurrentBuffer() { |
+ DCHECK(trace_log_->thread_local_event_buffer_.Get() == this); |
+ } |
+ |
+ // Since TraceLog is a leaky singleton, trace_log_ will always be valid |
+ // as long as the thread exists. |
+ TraceLog* trace_log_; |
+ std::vector<TraceEvent> logged_events_; |
+ int event_count_; |
+ TimeDelta overhead_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer); |
+}; |
+ |
+TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log) |
+ : trace_log_(trace_log), |
+ event_count_(0) { |
+ logged_events_.reserve(kTraceEventThreadLocalBufferSize); |
+ |
+ if (g_category_group_enabled[g_category_trace_event_overhead]) { |
+ int thread_id = static_cast<int>(PlatformThread::CurrentId()); |
+ logged_events_.push_back(TraceEvent( |
+ thread_id, |
+ TimeTicks::NowFromSystemTraceTime() - trace_log->time_offset_, |
+ ThreadNow(), |
+ TRACE_EVENT_PHASE_ASYNC_BEGIN, |
+ &g_category_group_enabled[g_category_trace_event_overhead], |
+ "thread_trace_event", |
+ thread_id, |
+ 0, NULL, NULL, NULL, NULL, |
+ TRACE_EVENT_FLAG_HAS_ID)); |
+ } |
+ |
+ // ThreadLocalEventBuffer is created only if the thread has message loop, so |
dsinclair
2013/08/29 14:24:32
nit: if the thread has a message loop
Xianzhu
2013/08/29 17:42:12
Done.
|
+ // the following message_loop won't be NULL. |
+ MessageLoop* message_loop = MessageLoop::current(); |
+ message_loop->AddDestructionObserver(this); |
+ |
+ AutoLock lock(trace_log->lock_); |
+ trace_log->thread_message_loops_.insert(message_loop); |
+} |
+ |
+TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() { |
+ CheckThisIsCurrentBuffer(); |
+ MessageLoop::current()->RemoveDestructionObserver(this); |
+ |
+ if (event_count_) { |
+ const char* arg_names[2] = { "event_count", "average_overhead" }; |
+ unsigned char arg_types[2]; |
+ unsigned long long arg_values[2]; |
+ trace_event_internal::SetTraceValue( |
+ event_count_, &arg_types[0], &arg_values[0]); |
+ trace_event_internal::SetTraceValue( |
+ overhead_.InMillisecondsF() / event_count_, |
dsinclair
2013/08/29 14:24:32
Does overhead_ get an initial value of 0 by defaul
Xianzhu
2013/08/29 17:42:12
Yes, set by the default ctor of TimeDelta.
|
+ &arg_types[1], &arg_values[1]); |
+ int thread_id = static_cast<int>(PlatformThread::CurrentId()); |
+ logged_events_.push_back(TraceEvent( |
+ thread_id, |
+ TimeTicks::NowFromSystemTraceTime() - trace_log_->time_offset_, |
+ ThreadNow(), |
+ TRACE_EVENT_PHASE_ASYNC_END, |
+ &g_category_group_enabled[g_category_trace_event_overhead], |
+ "thread_trace_event", |
+ thread_id, |
+ 2, arg_names, arg_types, arg_values, NULL, |
+ TRACE_EVENT_FLAG_HAS_ID)); |
+ } |
+ |
+ NotificationHelper notifier(trace_log_); |
+ { |
+ AutoLock lock(trace_log_->lock_); |
+ FlushWhileLocked(¬ifier); |
+ trace_log_->thread_message_loops_.erase(MessageLoop::current()); |
+ } |
+ notifier.SendNotificationIfAny(); |
+} |
+ |
+void TraceLog::ThreadLocalEventBuffer::AddEvent(const TraceEvent& event, |
+ NotificationHelper* notifier) { |
+ CheckThisIsCurrentBuffer(); |
+ logged_events_.push_back(event); |
+ if (logged_events_.size() >= kTraceEventThreadLocalBufferSize) { |
+ AutoLock lock(trace_log_->lock_); |
+ FlushWhileLocked(notifier); |
+ } |
+} |
+ |
+void TraceLog::ThreadLocalEventBuffer::ReportOverhead( |
+ const TimeTicks& event_timestamp, const TimeTicks& event_thread_timestamp) { |
+ if (g_category_group_enabled[g_category_trace_event_overhead]) { |
dsinclair
2013/08/29 14:24:32
if (!g_category_group_enabled[g_category_trace_eve
Xianzhu
2013/08/29 17:42:12
Done.
|
+ event_count_++; |
+ TimeTicks now = |
+ TimeTicks::NowFromSystemTraceTime() - trace_log_->time_offset_; |
+ TimeDelta overhead = now - event_timestamp; |
+ if (overhead.InMicroseconds() >= kOverheadReportThresholdInMicroseconds) { |
+ int thread_id = static_cast<int>(PlatformThread::CurrentId()); |
+ // TODO(wangxianzhu): Use X event when it's ready. |
+ logged_events_.push_back(TraceEvent( |
+ thread_id, event_timestamp, event_thread_timestamp, |
+ TRACE_EVENT_PHASE_BEGIN, |
+ &g_category_group_enabled[g_category_trace_event_overhead], |
+ "overhead", |
+ 0, 0, NULL, NULL, NULL, NULL, 0)); |
+ logged_events_.push_back(TraceEvent( |
+ thread_id, now, ThreadNow(), |
+ TRACE_EVENT_PHASE_END, |
+ &g_category_group_enabled[g_category_trace_event_overhead], |
+ "overhead", |
+ 0, 0, NULL, NULL, NULL, NULL, 0)); |
+ } |
+ overhead_ += overhead; |
+ } |
+} |
+ |
+void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { |
+ delete this; |
+} |
+ |
+void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked( |
+ NotificationHelper* notifier) { |
+ trace_log_->lock_.AssertAcquired(); |
+ for (size_t i = 0; i < logged_events_.size(); ++i) { |
+ trace_log_->AddEventToMainBufferWhileLocked(logged_events_[i]); |
+ } |
+ logged_events_.resize(0); |
+ trace_log_->CheckIfBufferIsFullWhileLocked(notifier); |
+} |
+ |
TraceLog::NotificationHelper::NotificationHelper(TraceLog* trace_log) |
: trace_log_(trace_log), |
notification_(0) { |
@@ -774,6 +933,7 @@ TraceLog::NotificationHelper::~NotificationHelper() { |
void TraceLog::NotificationHelper::AddNotificationWhileLocked( |
int notification) { |
+ trace_log_->lock_.AssertAcquired(); |
if (trace_log_->notification_callback_.is_null()) |
return; |
if (notification_ == 0) |
@@ -820,10 +980,13 @@ TraceLog::Options TraceLog::TraceOptionsFromString(const std::string& options) { |
TraceLog::TraceLog() |
: enable_count_(0), |
num_traces_recorded_(0), |
- event_callback_(NULL), |
+ buffer_is_full_(0), |
+ event_callback_(0), |
dispatching_to_observer_list_(false), |
process_sort_index_(0), |
- watch_category_(NULL), |
+ process_id_hash_(0), |
+ process_id_(0), |
+ watch_category_(0), |
trace_options_(RECORD_UNTIL_FULL), |
sampling_thread_handle_(0), |
category_filter_(CategoryFilter::kDefaultCategoryFilterString) { |
@@ -961,6 +1124,8 @@ const unsigned char* TraceLog::GetCategoryGroupEnabledInternal( |
void TraceLog::GetKnownCategoryGroups( |
std::vector<std::string>* category_groups) { |
AutoLock lock(lock_); |
+ category_groups->push_back( |
+ g_category_groups[g_category_trace_event_overhead]); |
for (int i = g_num_builtin_categories; i < g_category_index; i++) |
category_groups->push_back(g_category_groups[i]); |
} |
@@ -971,8 +1136,10 @@ void TraceLog::SetEnabled(const CategoryFilter& category_filter, |
{ |
AutoLock lock(lock_); |
+ Options old_options = trace_options(); |
+ |
if (enable_count_++ > 0) { |
- if (options != trace_options_) { |
+ if (options != old_options) { |
DLOG(ERROR) << "Attemting to re-enable tracing with a different " |
<< "set of options."; |
} |
@@ -982,9 +1149,10 @@ void TraceLog::SetEnabled(const CategoryFilter& category_filter, |
return; |
} |
- if (options != trace_options_) { |
- trace_options_ = options; |
+ if (options != old_options) { |
+ subtle::NoBarrier_Store(&trace_options_, options); |
logged_events_.reset(GetTraceBuffer()); |
+ subtle::NoBarrier_Store(&buffer_is_full_, 0); |
} |
if (dispatching_to_observer_list_) { |
@@ -1042,6 +1210,7 @@ void TraceLog::SetDisabled() { |
{ |
AutoLock lock(lock_); |
DCHECK(enable_count_ > 0); |
+ |
if (--enable_count_ != 0) |
return; |
@@ -1062,7 +1231,7 @@ void TraceLog::SetDisabled() { |
} |
category_filter_.Clear(); |
- watch_category_ = NULL; |
+ subtle::NoBarrier_Store(&watch_category_, 0); |
watch_event_name_ = ""; |
UpdateCategoryGroupEnabledFlags(); |
AddMetadataEvents(); |
@@ -1122,46 +1291,124 @@ void TraceLog::SetNotificationCallback( |
} |
TraceBuffer* TraceLog::GetTraceBuffer() { |
- if (trace_options_ & RECORD_CONTINUOUSLY) |
+ Options options = trace_options(); |
+ if (options & RECORD_CONTINUOUSLY) |
return new TraceBufferRingBuffer(); |
- else if (trace_options_ & ECHO_TO_CONSOLE) |
+ else if (options & ECHO_TO_CONSOLE) |
return new TraceBufferDiscardsEvents(); |
return new TraceBufferVector(); |
} |
+void TraceLog::AddEventToMainBufferWhileLocked(const TraceEvent& trace_event) { |
+ // Don't check buffer_is_full_ because we want the remaining thread-local |
+ // events to be flushed into the main buffer with this method, otherwise |
+ // we may lose some early events of a thread that generates events sparsely. |
+ lock_.AssertAcquired(); |
+ logged_events_->AddEvent(trace_event); |
+} |
+ |
+void TraceLog::CheckIfBufferIsFullWhileLocked(NotificationHelper* notifier) { |
dsinclair
2013/08/29 14:24:32
lock_.AssertAcquired();
Xianzhu
2013/08/29 17:42:12
Done.
|
+ if (!subtle::NoBarrier_Load(&buffer_is_full_) && logged_events_->IsFull()) { |
+ subtle::NoBarrier_Store(&buffer_is_full_, |
+ static_cast<subtle::AtomicWord>(1)); |
+ notifier->AddNotificationWhileLocked(TRACE_BUFFER_FULL); |
+ } |
+} |
+ |
void TraceLog::SetEventCallback(EventCallback cb) { |
- AutoLock lock(lock_); |
- event_callback_ = cb; |
+ subtle::NoBarrier_Store(&event_callback_, |
+ reinterpret_cast<subtle::AtomicWord>(cb)); |
}; |
+// Flush() works as the following: |
+// 1. Flush() is called in threadA whose message loop is saved in |
+// flush_message_loop_proxy_; |
+// 2. In the thread, FlushNextThreadOrFinish gets the first message loop from |
+// thread_message_loops_. If any, post a task to run |
+// FlushCurrentThreadAndContinue() in the message loop; otherwise finish the |
+// flush (step 4); |
+// 3. FlushCurrentThreadAndContinue() deletes the thread local event buffer: |
+// - The last batch of events of the thread are flushed into the main |
+// buffer; |
+// - The message loop will be removed from thread_message_loops_; |
+// and posts FlushNextThreadOrFinish() in flush_message_loop_proxy_ to |
+// continue the flush procedure (step 2); |
+// 4. When all thread local buffers have been flushed (and deleted), finish |
+// the flush by calling the OutputCallback with all events converted into |
+// JSON format. |
void TraceLog::Flush(const TraceLog::OutputCallback& cb) { |
- // Ignore memory allocations from here down. |
- INTERNAL_TRACE_MEMORY(TRACE_DISABLED_BY_DEFAULT("memory"), |
- TRACE_MEMORY_IGNORE); |
+ if (IsEnabled()) { |
dsinclair
2013/08/29 14:24:32
Is this, basically, to stop us from doing any post
Xianzhu
2013/08/29 17:42:12
Yes.
In addition, as posting tasks will generate
|
+ scoped_refptr<RefCountedString> empty_result = new RefCountedString; |
+ cb.Run(empty_result, false); |
dsinclair
2013/08/29 14:24:32
Should we check if cb.isNull() here before we try
Xianzhu
2013/08/29 17:42:12
Done.
|
+ return; |
+ } |
+ |
+ { |
+ AutoLock lock(lock_); |
+ DCHECK(!flush_message_loop_proxy_.get()); |
+ flush_message_loop_proxy_ = MessageLoopProxy::current(); |
+ DCHECK(!thread_message_loops_.size() || flush_message_loop_proxy_.get()); |
+ flush_output_callback_ = cb; |
+ } |
+ FlushNextThreadOrFinish(); |
+} |
+ |
+void TraceLog::FlushNextThreadOrFinish() { |
scoped_ptr<TraceBuffer> previous_logged_events; |
{ |
AutoLock lock(lock_); |
+ hash_set<MessageLoop*>::const_iterator next_message_loop = |
+ thread_message_loops_.begin(); |
+ if (next_message_loop != thread_message_loops_.end()) { |
+ // Destroy the next thread local buffer. The buffer will be flushed into |
+ // the main event buffer and the message loop will be removed from |
+ // thread_message_loops. |
+ (*next_message_loop)->PostTask( |
+ FROM_HERE, |
+ Bind(&TraceLog::FlushCurrentThreadAndContinue, |
+ Unretained(this))); |
+ return; |
+ } |
+ |
+ // All thread local buffers have been flushed (and destroyed). |
+ // From here to the end of the function finishes the whole flush procedure. |
previous_logged_events.swap(logged_events_); |
logged_events_.reset(GetTraceBuffer()); |
+ subtle::NoBarrier_Store(&buffer_is_full_, 0); |
} // release lock |
- while (previous_logged_events->HasMoreEvents()) { |
+ flush_message_loop_proxy_ = NULL; |
+ |
+ bool has_more_events = previous_logged_events->HasMoreEvents(); |
+ // The callback need to be called at least once even if there is no events |
+ // to let the caller know the completion of flush. |
+ do { |
scoped_refptr<RefCountedString> json_events_str_ptr = |
new RefCountedString(); |
- for (size_t i = 0; i < kTraceEventBatchSize; ++i) { |
+ for (size_t i = 0; has_more_events && i < kTraceEventBatchSize; ++i) { |
if (i > 0) |
*(&(json_events_str_ptr->data())) += ","; |
previous_logged_events->NextEvent().AppendAsJSON( |
&(json_events_str_ptr->data())); |
- if (!previous_logged_events->HasMoreEvents()) |
- break; |
+ has_more_events = previous_logged_events->HasMoreEvents(); |
} |
- cb.Run(json_events_str_ptr); |
- } |
+ flush_output_callback_.Run(json_events_str_ptr, has_more_events); |
dsinclair
2013/08/29 14:24:32
Should we check if flush_output_callback_ exists?
Xianzhu
2013/08/29 17:42:12
Done.
|
+ } while (has_more_events); |
+} |
+ |
+// Run in each thread holding a local event buffer. |
+void TraceLog::FlushCurrentThreadAndContinue() { |
+ delete thread_local_event_buffer_.Get(); |
+ thread_local_event_buffer_.Set(NULL); |
+ |
+ // Continue the flush procedure. |
+ flush_message_loop_proxy_->PostTask( |
+ FROM_HERE, |
+ Bind(&TraceLog::FlushNextThreadOrFinish, Unretained(this))); |
} |
void TraceLog::AddTraceEvent( |
@@ -1211,13 +1458,24 @@ void TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
return; |
TimeTicks now = timestamp - time_offset_; |
- base::TimeTicks thread_now; |
- if (base::TimeTicks::IsThreadNowSupported()) |
- thread_now = base::TimeTicks::ThreadNow(); |
- EventCallback event_callback_copy; |
+ TimeTicks thread_now = ThreadNow(); |
NotificationHelper notifier(this); |
+ ThreadLocalEventBuffer* thread_local_event_buffer = NULL; |
+ // A ThreadLocalEventBuffer needs the message loop |
+ // - to know when the thread exits; |
+ // - to handle the final flush. |
+ // For a thread without a message loop, the trace events will be added into |
+ // the main buffer directly. |
+ if (MessageLoop::current()) { |
+ thread_local_event_buffer = thread_local_event_buffer_.Get(); |
+ if (!thread_local_event_buffer) { |
+ thread_local_event_buffer = new ThreadLocalEventBuffer(this); |
+ thread_local_event_buffer_.Set(thread_local_event_buffer); |
+ } |
+ } |
+ |
// Check and update the current thread name only if the event is for the |
// current thread to avoid locks in most cases. |
if (thread_id == static_cast<int>(PlatformThread::CurrentId())) { |
@@ -1253,21 +1511,23 @@ void TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
} |
} |
- TraceEvent trace_event(thread_id, |
- now, thread_now, phase, category_group_enabled, name, id, |
- num_args, arg_names, arg_types, arg_values, |
- convertable_values, flags); |
- |
- do { |
- AutoLock lock(lock_); |
+ if (!subtle::NoBarrier_Load(&buffer_is_full_)) { |
+ TraceEvent trace_event(thread_id, now, thread_now, phase, |
+ category_group_enabled, name, id, |
+ num_args, arg_names, arg_types, arg_values, |
+ convertable_values, flags); |
- event_callback_copy = event_callback_; |
- if (logged_events_->IsFull()) |
- break; |
+ if (thread_local_event_buffer) { |
+ thread_local_event_buffer->AddEvent(trace_event, ¬ifier); |
+ } else { |
+ AutoLock lock(lock_); |
+ AddEventToMainBufferWhileLocked(trace_event); |
+ CheckIfBufferIsFullWhileLocked(¬ifier); |
+ } |
- logged_events_->AddEvent(trace_event); |
+ if (trace_options() & ECHO_TO_CONSOLE) { |
+ AutoLock lock(lock_); |
- if (trace_options_ & ECHO_TO_CONSOLE) { |
TimeDelta duration; |
if (phase == TRACE_EVENT_PHASE_END) { |
duration = timestamp - thread_event_start_times_[thread_id].top(); |
@@ -1300,20 +1560,26 @@ void TraceLog::AddTraceEventWithThreadIdAndTimestamp( |
if (phase == TRACE_EVENT_PHASE_BEGIN) |
thread_event_start_times_[thread_id].push(timestamp); |
} |
+ } |
- if (logged_events_->IsFull()) |
- notifier.AddNotificationWhileLocked(TRACE_BUFFER_FULL); |
- |
- if (watch_category_ == category_group_enabled && watch_event_name_ == name) |
+ if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load( |
+ &watch_category_)) == category_group_enabled) { |
+ AutoLock lock(lock_); |
+ if (watch_event_name_ == name) |
notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); |
- } while (0); // release lock |
+ } |
notifier.SendNotificationIfAny(); |
- if (event_callback_copy != NULL) { |
- event_callback_copy(phase, category_group_enabled, name, id, |
- num_args, arg_names, arg_types, arg_values, |
- flags); |
+ EventCallback event_callback = reinterpret_cast<EventCallback>( |
+ subtle::NoBarrier_Load(&event_callback_)); |
+ if (event_callback) { |
+ event_callback(phase, category_group_enabled, name, id, |
+ num_args, arg_names, arg_types, arg_values, |
+ flags); |
} |
+ |
+ if (thread_local_event_buffer) |
+ thread_local_event_buffer->ReportOverhead(now, thread_now); |
} |
void TraceLog::AddTraceEventEtw(char phase, |
@@ -1346,7 +1612,8 @@ void TraceLog::SetWatchEvent(const std::string& category_name, |
size_t notify_count = 0; |
{ |
AutoLock lock(lock_); |
- watch_category_ = category; |
+ subtle::NoBarrier_Store(&watch_category_, |
+ reinterpret_cast<subtle::AtomicWord>(category)); |
watch_event_name_ = event_name; |
// First, search existing events for watch event because we want to catch |
@@ -1366,7 +1633,7 @@ void TraceLog::SetWatchEvent(const std::string& category_name, |
void TraceLog::CancelWatchEvent() { |
AutoLock lock(lock_); |
- watch_category_ = NULL; |
+ subtle::NoBarrier_Store(&watch_category_, 0); |
watch_event_name_ = ""; |
} |