Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(31)

Side by Side Diff: base/trace_event/trace_log.cc

Issue 2503473002: tracing: split out ThreadLocalEventBuffer
Patch Set: Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/trace_event/trace_log.h ('k') | tools/gn/bootstrap/bootstrap.py » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/trace_log.h" 5 #include "base/trace_event/trace_log.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <cmath> 8 #include <cmath>
9 #include <memory> 9 #include <memory>
10 #include <utility> 10 #include <utility>
(...skipping 22 matching lines...) Expand all
33 #include "base/time/time.h" 33 #include "base/time/time.h"
34 #include "base/trace_event/category_registry.h" 34 #include "base/trace_event/category_registry.h"
35 #include "base/trace_event/event_filter_registry.h" 35 #include "base/trace_event/event_filter_registry.h"
36 #include "base/trace_event/event_name_filter.h" 36 #include "base/trace_event/event_name_filter.h"
37 #include "base/trace_event/heap_profiler.h" 37 #include "base/trace_event/heap_profiler.h"
38 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 38 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
39 #include "base/trace_event/heap_profiler_event_filter.h" 39 #include "base/trace_event/heap_profiler_event_filter.h"
40 #include "base/trace_event/memory_dump_manager.h" 40 #include "base/trace_event/memory_dump_manager.h"
41 #include "base/trace_event/memory_dump_provider.h" 41 #include "base/trace_event/memory_dump_provider.h"
42 #include "base/trace_event/process_memory_dump.h" 42 #include "base/trace_event/process_memory_dump.h"
43 #include "base/trace_event/thread_local_event_buffer.h"
43 #include "base/trace_event/trace_buffer.h" 44 #include "base/trace_event/trace_buffer.h"
44 #include "base/trace_event/trace_event.h" 45 #include "base/trace_event/trace_event.h"
45 #include "base/trace_event/trace_event_synthetic_delay.h" 46 #include "base/trace_event/trace_event_synthetic_delay.h"
46 #include "build/build_config.h" 47 #include "build/build_config.h"
47 48
48 #if defined(OS_WIN) 49 #if defined(OS_WIN)
49 #include "base/trace_event/trace_event_etw_export_win.h" 50 #include "base/trace_event/trace_event_etw_export_win.h"
50 #endif 51 #endif
51 52
52 namespace base { 53 namespace base {
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
135 DCHECK(!thread_local_boolean_->Get()); 136 DCHECK(!thread_local_boolean_->Get());
136 thread_local_boolean_->Set(true); 137 thread_local_boolean_->Set(true);
137 } 138 }
138 ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); } 139 ~AutoThreadLocalBoolean() { thread_local_boolean_->Set(false); }
139 140
140 private: 141 private:
141 ThreadLocalBoolean* thread_local_boolean_; 142 ThreadLocalBoolean* thread_local_boolean_;
142 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean); 143 DISALLOW_COPY_AND_ASSIGN(AutoThreadLocalBoolean);
143 }; 144 };
144 145
145 // Use this function instead of TraceEventHandle constructor to keep the
146 // overhead of ScopedTracer (trace_event.h) constructor minimum.
147 void MakeHandle(uint32_t chunk_seq,
148 size_t chunk_index,
149 size_t event_index,
150 TraceEventHandle* handle) {
151 DCHECK(chunk_seq);
152 DCHECK(chunk_index <= TraceBufferChunk::kMaxChunkIndex);
153 DCHECK(event_index < TraceBufferChunk::kTraceBufferChunkSize);
154 DCHECK(chunk_index <= std::numeric_limits<uint16_t>::max());
155 handle->chunk_seq = chunk_seq;
156 handle->chunk_index = static_cast<uint16_t>(chunk_index);
157 handle->event_index = static_cast<uint16_t>(event_index);
158 }
159
160 template <typename Function> 146 template <typename Function>
161 void ForEachCategoryFilter(const unsigned char* category_group_enabled, 147 void ForEachCategoryFilter(const unsigned char* category_group_enabled,
162 Function filter_fn) { 148 Function filter_fn) {
163 auto* cat = CategoryRegistry::GetCategoryByStatePtr(category_group_enabled); 149 auto* cat = CategoryRegistry::GetCategoryByStatePtr(category_group_enabled);
164 uint32_t filter_bitmap = cat->enabled_filters(); 150 uint32_t filter_bitmap = cat->enabled_filters();
165 for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) { 151 for (int index = 0; filter_bitmap != 0; filter_bitmap >>= 1, index++) {
166 if (filter_bitmap & 1) 152 if (filter_bitmap & 1)
167 filter_fn(EventFilterRegistry::Get(index)); 153 filter_fn(EventFilterRegistry::Get(index));
168 } 154 }
169 } 155 }
(...skipping 17 matching lines...) Expand all
187 locked_ = true; 173 locked_ = true;
188 } 174 }
189 } 175 }
190 176
191 private: 177 private:
192 Lock* lock_; 178 Lock* lock_;
193 bool locked_; 179 bool locked_;
194 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock); 180 DISALLOW_COPY_AND_ASSIGN(OptionalAutoLock);
195 }; 181 };
196 182
197 class TraceLog::ThreadLocalEventBuffer
198 : public MessageLoop::DestructionObserver,
199 public MemoryDumpProvider {
200 public:
201 explicit ThreadLocalEventBuffer(TraceLog* trace_log);
202 ~ThreadLocalEventBuffer() override;
203
204 TraceEvent* AddTraceEvent(TraceEventHandle* handle);
205
206 TraceEvent* GetEventByHandle(TraceEventHandle handle) {
207 if (!chunk_ || handle.chunk_seq != chunk_->seq() ||
208 handle.chunk_index != chunk_index_) {
209 return nullptr;
210 }
211
212 return chunk_->GetEventAt(handle.event_index);
213 }
214
215 int generation() const { return generation_; }
216
217 private:
218 // MessageLoop::DestructionObserver
219 void WillDestroyCurrentMessageLoop() override;
220
221 // MemoryDumpProvider implementation.
222 bool OnMemoryDump(const MemoryDumpArgs& args,
223 ProcessMemoryDump* pmd) override;
224
225 void FlushWhileLocked();
226
227 void CheckThisIsCurrentBuffer() const {
228 DCHECK(trace_log_->thread_local_event_buffer_.Get() == this);
229 }
230
231 // Since TraceLog is a leaky singleton, trace_log_ will always be valid
232 // as long as the thread exists.
233 TraceLog* trace_log_;
234 std::unique_ptr<TraceBufferChunk> chunk_;
235 size_t chunk_index_;
236 int generation_;
237
238 DISALLOW_COPY_AND_ASSIGN(ThreadLocalEventBuffer);
239 };
240
241 TraceLog::ThreadLocalEventBuffer::ThreadLocalEventBuffer(TraceLog* trace_log)
242 : trace_log_(trace_log),
243 chunk_index_(0),
244 generation_(trace_log->generation()) {
245 // ThreadLocalEventBuffer is created only if the thread has a message loop, so
246 // the following message_loop won't be NULL.
247 MessageLoop* message_loop = MessageLoop::current();
248 message_loop->AddDestructionObserver(this);
249
250 // This is to report the local memory usage when memory-infra is enabled.
251 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
252 this, "ThreadLocalEventBuffer", ThreadTaskRunnerHandle::Get());
253
254 AutoLock lock(trace_log->lock_);
255 trace_log->thread_message_loops_.insert(message_loop);
256 }
257
258 TraceLog::ThreadLocalEventBuffer::~ThreadLocalEventBuffer() {
259 CheckThisIsCurrentBuffer();
260 MessageLoop::current()->RemoveDestructionObserver(this);
261 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(this);
262
263 {
264 AutoLock lock(trace_log_->lock_);
265 FlushWhileLocked();
266 trace_log_->thread_message_loops_.erase(MessageLoop::current());
267 }
268 trace_log_->thread_local_event_buffer_.Set(NULL);
269 }
270
271 TraceEvent* TraceLog::ThreadLocalEventBuffer::AddTraceEvent(
272 TraceEventHandle* handle) {
273 CheckThisIsCurrentBuffer();
274
275 if (chunk_ && chunk_->IsFull()) {
276 AutoLock lock(trace_log_->lock_);
277 FlushWhileLocked();
278 chunk_.reset();
279 }
280 if (!chunk_) {
281 AutoLock lock(trace_log_->lock_);
282 chunk_ = trace_log_->logged_events_->GetChunk(&chunk_index_);
283 trace_log_->CheckIfBufferIsFullWhileLocked();
284 }
285 if (!chunk_)
286 return NULL;
287
288 size_t event_index;
289 TraceEvent* trace_event = chunk_->AddTraceEvent(&event_index);
290 if (trace_event && handle)
291 MakeHandle(chunk_->seq(), chunk_index_, event_index, handle);
292
293 return trace_event;
294 }
295
296 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
297 delete this;
298 }
299
300 bool TraceLog::ThreadLocalEventBuffer::OnMemoryDump(const MemoryDumpArgs& args,
301 ProcessMemoryDump* pmd) {
302 if (!chunk_)
303 return true;
304 std::string dump_base_name = StringPrintf(
305 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
306 TraceEventMemoryOverhead overhead;
307 chunk_->EstimateTraceMemoryOverhead(&overhead);
308 overhead.DumpInto(dump_base_name.c_str(), pmd);
309 return true;
310 }
311
312 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
313 if (!chunk_)
314 return;
315
316 trace_log_->lock_.AssertAcquired();
317 if (trace_log_->CheckGeneration(generation_)) {
318 // Return the chunk to the buffer only if the generation matches.
319 trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_));
320 }
321 // Otherwise this method may be called from the destructor, or TraceLog will
322 // find the generation mismatch and delete this buffer soon.
323 }
324
325 struct TraceLog::RegisteredAsyncObserver { 183 struct TraceLog::RegisteredAsyncObserver {
326 explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer) 184 explicit RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer)
327 : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {} 185 : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {}
328 ~RegisteredAsyncObserver() {} 186 ~RegisteredAsyncObserver() {}
329 187
330 WeakPtr<AsyncEnabledStateObserver> observer; 188 WeakPtr<AsyncEnabledStateObserver> observer;
331 scoped_refptr<SequencedTaskRunner> task_runner; 189 scoped_refptr<SequencedTaskRunner> task_runner;
332 }; 190 };
333 191
192 TraceLog::MemoryDumpProviderForThreadLocalBuffer::
193 MemoryDumpProviderForThreadLocalBuffer(
194 ThreadLocalEventBuffer* thread_local_event_buffer)
195 : thread_local_event_buffer_(thread_local_event_buffer) {}
196
197 TraceLog::MemoryDumpProviderForThreadLocalBuffer::
198 ~MemoryDumpProviderForThreadLocalBuffer() {}
199
200 bool TraceLog::MemoryDumpProviderForThreadLocalBuffer::OnMemoryDump(
201 const MemoryDumpArgs& args,
202 ProcessMemoryDump* pmd) {
203 std::string dump_base_name = StringPrintf(
204 "tracing/thread_%d", static_cast<int>(PlatformThread::CurrentId()));
205 TraceEventMemoryOverhead overhead;
206 TraceBufferChunk* const chunk = thread_local_event_buffer_->chunk();
207 chunk->EstimateTraceMemoryOverhead(&overhead);
208 overhead.DumpInto(dump_base_name.c_str(), pmd);
209 return true;
210 }
211
334 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {} 212 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {}
335 213
336 TraceLogStatus::~TraceLogStatus() {} 214 TraceLogStatus::~TraceLogStatus() {}
337 215
338 // static 216 // static
339 TraceLog* TraceLog::GetInstance() { 217 TraceLog* TraceLog::GetInstance() {
340 return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get(); 218 return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get();
341 } 219 }
342 220
343 TraceLog::TraceLog() 221 TraceLog::TraceLog()
(...skipping 23 matching lines...) Expand all
367 } 245 }
368 246
369 TraceLog::~TraceLog() {} 247 TraceLog::~TraceLog() {}
370 248
371 void TraceLog::InitializeThreadLocalEventBufferIfSupported() { 249 void TraceLog::InitializeThreadLocalEventBufferIfSupported() {
372 // A ThreadLocalEventBuffer needs the message loop 250 // A ThreadLocalEventBuffer needs the message loop
373 // - to know when the thread exits; 251 // - to know when the thread exits;
374 // - to handle the final flush. 252 // - to handle the final flush.
375 // For a thread without a message loop or the message loop may be blocked, the 253 // For a thread without a message loop or the message loop may be blocked, the
376 // trace events will be added into the main buffer directly. 254 // trace events will be added into the main buffer directly.
377 if (thread_blocks_message_loop_.Get() || !MessageLoop::current()) 255 MessageLoop* const message_loop = MessageLoop::current();
256 if (thread_blocks_message_loop_.Get() || !message_loop)
378 return; 257 return;
379 HEAP_PROFILER_SCOPED_IGNORE; 258
259 // If the thread-local instance was created during a prior tracing session,
260 // tear it down and re-create it.
380 auto* thread_local_event_buffer = thread_local_event_buffer_.Get(); 261 auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
381 if (thread_local_event_buffer && 262 if (thread_local_event_buffer &&
382 !CheckGeneration(thread_local_event_buffer->generation())) { 263 !CheckGeneration(thread_local_event_buffer->generation())) {
383 delete thread_local_event_buffer; 264 DestroyThreadLocalEventBufferIfSupported();
384 thread_local_event_buffer = NULL; 265 thread_local_event_buffer = nullptr;
385 } 266 }
386 if (!thread_local_event_buffer) { 267 if (!thread_local_event_buffer) {
387 thread_local_event_buffer = new ThreadLocalEventBuffer(this); 268 HEAP_PROFILER_SCOPED_IGNORE;
269 thread_local_event_buffer = new ThreadLocalEventBuffer(generation());
388 thread_local_event_buffer_.Set(thread_local_event_buffer); 270 thread_local_event_buffer_.Set(thread_local_event_buffer);
271 message_loop->AddDestructionObserver(this);
272 // This is to report the local memory usage when memory-infra is enabled.
273 auto memory_dump_provider =
274 MakeUnique<MemoryDumpProviderForThreadLocalBuffer>(
275 thread_local_event_buffer);
276 MemoryDumpManager::GetInstance()->RegisterDumpProvider(
277 memory_dump_provider.get(), "ThreadLocalEventBuffer",
278 message_loop->task_runner());
279 AutoLock lock(lock_);
280 thread_message_loops_.insert(message_loop);
281 thread_local_mdps_[message_loop] = std::move(memory_dump_provider);
389 } 282 }
390 } 283 }
391 284
285 void TraceLog::DestroyThreadLocalEventBufferIfSupported() {
286 auto* thread_local_event_buffer = thread_local_event_buffer_.Get();
287 if (!thread_local_event_buffer)
288 return;
289 HEAP_PROFILER_SCOPED_IGNORE;
290 thread_local_event_buffer->Flush();
291 thread_local_event_buffer_.Set(nullptr);
292 MessageLoop* const message_loop = MessageLoop::current();
293 delete thread_local_event_buffer;
294 message_loop->RemoveDestructionObserver(this);
295
296 AutoLock lock(lock_);
297 thread_message_loops_.erase(message_loop);
298 auto memory_dump_provider_it = thread_local_mdps_.find(message_loop);
299 DCHECK(memory_dump_provider_it != thread_local_mdps_.end());
300 MemoryDumpManager::GetInstance()->UnregisterDumpProvider(
301 memory_dump_provider_it->second.get());
302 memory_dump_provider_it->second.reset();
303 thread_local_mdps_.erase(memory_dump_provider_it);
304 // TODO order of this !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
305 }
306
307 void TraceLog::WillDestroyCurrentMessageLoop() {
308 DestroyThreadLocalEventBufferIfSupported();
309 }
310
392 bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args, 311 bool TraceLog::OnMemoryDump(const MemoryDumpArgs& args,
393 ProcessMemoryDump* pmd) { 312 ProcessMemoryDump* pmd) {
394 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested 313 // TODO(ssid): Use MemoryDumpArgs to create light dumps when requested
395 // (crbug.com/499731). 314 // (crbug.com/499731).
396 TraceEventMemoryOverhead overhead; 315 TraceEventMemoryOverhead overhead;
397 overhead.Add("TraceLog", sizeof(*this)); 316 overhead.Add("TraceLog", sizeof(*this));
398 { 317 {
399 AutoLock lock(lock_); 318 AutoLock lock(lock_);
400 if (logged_events_) 319 if (logged_events_)
401 logged_events_->EstimateTraceMemoryOverhead(&overhead); 320 logged_events_->EstimateTraceMemoryOverhead(&overhead);
(...skipping 395 matching lines...) Expand 10 before | Expand all | Expand 10 after
797 thread_shared_chunk_ = 716 thread_shared_chunk_ =
798 logged_events_->GetChunk(&thread_shared_chunk_index_); 717 logged_events_->GetChunk(&thread_shared_chunk_index_);
799 if (check_buffer_is_full) 718 if (check_buffer_is_full)
800 CheckIfBufferIsFullWhileLocked(); 719 CheckIfBufferIsFullWhileLocked();
801 } 720 }
802 if (!thread_shared_chunk_) 721 if (!thread_shared_chunk_)
803 return NULL; 722 return NULL;
804 723
805 size_t event_index; 724 size_t event_index;
806 TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index); 725 TraceEvent* trace_event = thread_shared_chunk_->AddTraceEvent(&event_index);
807 if (trace_event && handle) { 726 DCHECK(trace_event);
808 MakeHandle(thread_shared_chunk_->seq(), thread_shared_chunk_index_, 727 if (handle) {
809 event_index, handle); 728 thread_shared_chunk_->MakeHandle(thread_shared_chunk_index_, event_index,
729 handle);
810 } 730 }
811 return trace_event; 731 return trace_event;
812 } 732 }
813 733
814 void TraceLog::CheckIfBufferIsFullWhileLocked() { 734 void TraceLog::CheckIfBufferIsFullWhileLocked() {
815 lock_.AssertAcquired(); 735 lock_.AssertAcquired();
816 if (logged_events_->IsFull()) { 736 if (logged_events_->IsFull()) {
817 if (buffer_limit_reached_timestamp_.is_null()) { 737 if (buffer_limit_reached_timestamp_.is_null()) {
818 buffer_limit_reached_timestamp_ = OffsetNow(); 738 buffer_limit_reached_timestamp_ = OffsetNow();
819 } 739 }
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
973 // Run in each thread holding a local event buffer. 893 // Run in each thread holding a local event buffer.
974 void TraceLog::FlushCurrentThread(int generation, bool discard_events) { 894 void TraceLog::FlushCurrentThread(int generation, bool discard_events) {
975 { 895 {
976 AutoLock lock(lock_); 896 AutoLock lock(lock_);
977 if (!CheckGeneration(generation) || !flush_task_runner_) { 897 if (!CheckGeneration(generation) || !flush_task_runner_) {
978 // This is late. The corresponding flush has finished. 898 // This is late. The corresponding flush has finished.
979 return; 899 return;
980 } 900 }
981 } 901 }
982 902
983 // This will flush the thread local buffer. 903 DestroyThreadLocalEventBufferIfSupported();
984 delete thread_local_event_buffer_.Get();
985 904
986 AutoLock lock(lock_); 905 AutoLock lock(lock_);
987 if (!CheckGeneration(generation) || !flush_task_runner_ || 906 if (!CheckGeneration(generation) || !flush_task_runner_ ||
988 !thread_message_loops_.empty()) 907 !thread_message_loops_.empty())
989 return; 908 return;
990 909
991 flush_task_runner_->PostTask( 910 flush_task_runner_->PostTask(
992 FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation, 911 FROM_HERE, Bind(&TraceLog::FinishFlush, Unretained(this), generation,
993 discard_events)); 912 discard_events));
994 } 913 }
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after
1324 trace_event->Initialize( 1243 trace_event->Initialize(
1325 thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA, 1244 thread_id, now, thread_now, TRACE_EVENT_PHASE_METADATA,
1326 category_group_enabled, name, 1245 category_group_enabled, name,
1327 trace_event_internal::kGlobalScope, // scope 1246 trace_event_internal::kGlobalScope, // scope
1328 trace_event_internal::kNoId, // id 1247 trace_event_internal::kNoId, // id
1329 trace_event_internal::kNoId, // bind_id 1248 trace_event_internal::kNoId, // bind_id
1330 num_args, arg_names, arg_types, arg_values, convertable_values, flags); 1249 num_args, arg_names, arg_types, arg_values, convertable_values, flags);
1331 metadata_events_.push_back(std::move(trace_event)); 1250 metadata_events_.push_back(std::move(trace_event));
1332 } 1251 }
1333 1252
1253 std::unique_ptr<TraceBufferChunk> TraceLog::TakeChunk(size_t* chunk_index) {
1254 AutoLock lock(lock_);
1255 auto chunk = logged_events_->GetChunk(chunk_index);
1256 CheckIfBufferIsFullWhileLocked();
1257 return chunk;
1258 }
1259
1260 void TraceLog::ReturnChunk(std::unique_ptr<TraceBufferChunk> chunk,
1261 int generation,
1262 size_t chunk_index) {
1263 AutoLock lock(lock_);
1264 // Return the chunk to the buffer only if the generation matches.
1265 if (CheckGeneration(generation))
1266 logged_events_->ReturnChunk(chunk_index, std::move(chunk));
1267 }
1268
1334 // May be called when a COMPELETE event ends and the unfinished event has been 1269 // May be called when a COMPELETE event ends and the unfinished event has been
1335 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). 1270 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL).
1336 std::string TraceLog::EventToConsoleMessage(unsigned char phase, 1271 std::string TraceLog::EventToConsoleMessage(unsigned char phase,
1337 const TimeTicks& timestamp, 1272 const TimeTicks& timestamp,
1338 TraceEvent* trace_event) { 1273 TraceEvent* trace_event) {
1339 HEAP_PROFILER_SCOPED_IGNORE; 1274 HEAP_PROFILER_SCOPED_IGNORE;
1340 AutoLock thread_info_lock(thread_info_lock_); 1275 AutoLock thread_info_lock(thread_info_lock_);
1341 1276
1342 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to 1277 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to
1343 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END. 1278 // TRACE_EVENT_PHASE_BEGIN or TRACE_EVENT_END.
(...skipping 261 matching lines...) Expand 10 before | Expand all | Expand 10 after
1605 void TraceLog::SetTimeOffset(TimeDelta offset) { 1540 void TraceLog::SetTimeOffset(TimeDelta offset) {
1606 time_offset_ = offset; 1541 time_offset_ = offset;
1607 } 1542 }
1608 1543
1609 size_t TraceLog::GetObserverCountForTest() const { 1544 size_t TraceLog::GetObserverCountForTest() const {
1610 return enabled_state_observer_list_.size(); 1545 return enabled_state_observer_list_.size();
1611 } 1546 }
1612 1547
1613 void TraceLog::SetCurrentThreadBlocksMessageLoop() { 1548 void TraceLog::SetCurrentThreadBlocksMessageLoop() {
1614 thread_blocks_message_loop_.Set(true); 1549 thread_blocks_message_loop_.Set(true);
1615 // This will flush the thread local buffer. 1550 DestroyThreadLocalEventBufferIfSupported();
1616 delete thread_local_event_buffer_.Get();
1617 } 1551 }
1618 1552
1619 TraceBuffer* TraceLog::CreateTraceBuffer() { 1553 TraceBuffer* TraceLog::CreateTraceBuffer() {
1620 HEAP_PROFILER_SCOPED_IGNORE; 1554 HEAP_PROFILER_SCOPED_IGNORE;
1621 InternalTraceOptions options = trace_options(); 1555 InternalTraceOptions options = trace_options();
1622 if (options & kInternalRecordContinuously) { 1556 if (options & kInternalRecordContinuously) {
1623 return TraceBuffer::CreateTraceBufferRingBuffer( 1557 return TraceBuffer::CreateTraceBufferRingBuffer(
1624 kTraceEventRingBufferChunks); 1558 kTraceEventRingBufferChunks);
1625 } 1559 }
1626 if (options & kInternalEchoToConsole) { 1560 if (options & kInternalEchoToConsole) {
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
1708 } 1642 }
1709 1643
1710 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 1644 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
1711 if (*category_group_enabled_) { 1645 if (*category_group_enabled_) {
1712 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, 1646 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_,
1713 event_handle_); 1647 event_handle_);
1714 } 1648 }
1715 } 1649 }
1716 1650
1717 } // namespace trace_event_internal 1651 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « base/trace_event/trace_log.h ('k') | tools/gn/bootstrap/bootstrap.py » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698