| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 #include <utility> |
| 9 | 10 |
| 10 #include "base/base_switches.h" | 11 #include "base/base_switches.h" |
| 11 #include "base/bind.h" | 12 #include "base/bind.h" |
| 12 #include "base/command_line.h" | 13 #include "base/command_line.h" |
| 13 #include "base/debug/leak_annotations.h" | 14 #include "base/debug/leak_annotations.h" |
| 14 #include "base/lazy_instance.h" | 15 #include "base/lazy_instance.h" |
| 15 #include "base/location.h" | 16 #include "base/location.h" |
| 16 #include "base/memory/scoped_ptr.h" | 17 #include "base/memory/scoped_ptr.h" |
| 17 #include "base/memory/singleton.h" | 18 #include "base/memory/singleton.h" |
| 18 #include "base/process/process_metrics.h" | 19 #include "base/process/process_metrics.h" |
| (...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 321 return true; | 322 return true; |
| 322 } | 323 } |
| 323 | 324 |
| 324 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() { | 325 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() { |
| 325 if (!chunk_) | 326 if (!chunk_) |
| 326 return; | 327 return; |
| 327 | 328 |
| 328 trace_log_->lock_.AssertAcquired(); | 329 trace_log_->lock_.AssertAcquired(); |
| 329 if (trace_log_->CheckGeneration(generation_)) { | 330 if (trace_log_->CheckGeneration(generation_)) { |
| 330 // Return the chunk to the buffer only if the generation matches. | 331 // Return the chunk to the buffer only if the generation matches. |
| 331 trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass()); | 332 trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_)); |
| 332 } | 333 } |
| 333 // Otherwise this method may be called from the destructor, or TraceLog will | 334 // Otherwise this method may be called from the destructor, or TraceLog will |
| 334 // find the generation mismatch and delete this buffer soon. | 335 // find the generation mismatch and delete this buffer soon. |
| 335 } | 336 } |
| 336 | 337 |
| 337 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {} | 338 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {} |
| 338 | 339 |
| 339 TraceLogStatus::~TraceLogStatus() {} | 340 TraceLogStatus::~TraceLogStatus() {} |
| 340 | 341 |
| 341 // static | 342 // static |
| (...skipping 446 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 788 return logged_events_->IsFull(); | 789 return logged_events_->IsFull(); |
| 789 } | 790 } |
| 790 | 791 |
| 791 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked( | 792 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked( |
| 792 TraceEventHandle* handle, | 793 TraceEventHandle* handle, |
| 793 bool check_buffer_is_full) { | 794 bool check_buffer_is_full) { |
| 794 lock_.AssertAcquired(); | 795 lock_.AssertAcquired(); |
| 795 | 796 |
| 796 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) { | 797 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) { |
| 797 logged_events_->ReturnChunk(thread_shared_chunk_index_, | 798 logged_events_->ReturnChunk(thread_shared_chunk_index_, |
| 798 thread_shared_chunk_.Pass()); | 799 std::move(thread_shared_chunk_)); |
| 799 } | 800 } |
| 800 | 801 |
| 801 if (!thread_shared_chunk_) { | 802 if (!thread_shared_chunk_) { |
| 802 thread_shared_chunk_ = | 803 thread_shared_chunk_ = |
| 803 logged_events_->GetChunk(&thread_shared_chunk_index_); | 804 logged_events_->GetChunk(&thread_shared_chunk_index_); |
| 804 if (check_buffer_is_full) | 805 if (check_buffer_is_full) |
| 805 CheckIfBufferIsFullWhileLocked(); | 806 CheckIfBufferIsFullWhileLocked(); |
| 806 } | 807 } |
| 807 if (!thread_shared_chunk_) | 808 if (!thread_shared_chunk_) |
| 808 return NULL; | 809 return NULL; |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 885 AutoLock lock(lock_); | 886 AutoLock lock(lock_); |
| 886 DCHECK(!flush_task_runner_); | 887 DCHECK(!flush_task_runner_); |
| 887 flush_task_runner_ = ThreadTaskRunnerHandle::IsSet() | 888 flush_task_runner_ = ThreadTaskRunnerHandle::IsSet() |
| 888 ? ThreadTaskRunnerHandle::Get() | 889 ? ThreadTaskRunnerHandle::Get() |
| 889 : nullptr; | 890 : nullptr; |
| 890 DCHECK(!thread_message_loops_.size() || flush_task_runner_); | 891 DCHECK(!thread_message_loops_.size() || flush_task_runner_); |
| 891 flush_output_callback_ = cb; | 892 flush_output_callback_ = cb; |
| 892 | 893 |
| 893 if (thread_shared_chunk_) { | 894 if (thread_shared_chunk_) { |
| 894 logged_events_->ReturnChunk(thread_shared_chunk_index_, | 895 logged_events_->ReturnChunk(thread_shared_chunk_index_, |
| 895 thread_shared_chunk_.Pass()); | 896 std::move(thread_shared_chunk_)); |
| 896 } | 897 } |
| 897 | 898 |
| 898 if (thread_message_loops_.size()) { | 899 if (thread_message_loops_.size()) { |
| 899 for (hash_set<MessageLoop*>::const_iterator it = | 900 for (hash_set<MessageLoop*>::const_iterator it = |
| 900 thread_message_loops_.begin(); | 901 thread_message_loops_.begin(); |
| 901 it != thread_message_loops_.end(); ++it) { | 902 it != thread_message_loops_.end(); ++it) { |
| 902 thread_message_loop_task_runners.push_back((*it)->task_runner()); | 903 thread_message_loop_task_runners.push_back((*it)->task_runner()); |
| 903 } | 904 } |
| 904 } | 905 } |
| 905 } | 906 } |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 982 | 983 |
| 983 if (use_worker_thread_ && | 984 if (use_worker_thread_ && |
| 984 WorkerPool::PostTask( | 985 WorkerPool::PostTask( |
| 985 FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat, | 986 FROM_HERE, Bind(&TraceLog::ConvertTraceEventsToTraceFormat, |
| 986 Passed(&previous_logged_events), | 987 Passed(&previous_logged_events), |
| 987 flush_output_callback, argument_filter_predicate), | 988 flush_output_callback, argument_filter_predicate), |
| 988 true)) { | 989 true)) { |
| 989 return; | 990 return; |
| 990 } | 991 } |
| 991 | 992 |
| 992 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), | 993 ConvertTraceEventsToTraceFormat(std::move(previous_logged_events), |
| 993 flush_output_callback, | 994 flush_output_callback, |
| 994 argument_filter_predicate); | 995 argument_filter_predicate); |
| 995 } | 996 } |
| 996 | 997 |
| 997 // Run in each thread holding a local event buffer. | 998 // Run in each thread holding a local event buffer. |
| 998 void TraceLog::FlushCurrentThread(int generation, bool discard_events) { | 999 void TraceLog::FlushCurrentThread(int generation, bool discard_events) { |
| 999 { | 1000 { |
| 1000 AutoLock lock(lock_); | 1001 AutoLock lock(lock_); |
| 1001 if (!CheckGeneration(generation) || !flush_task_runner_) { | 1002 if (!CheckGeneration(generation) || !flush_task_runner_) { |
| 1002 // This is late. The corresponding flush has finished. | 1003 // This is late. The corresponding flush has finished. |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1042 void TraceLog::FlushButLeaveBufferIntact( | 1043 void TraceLog::FlushButLeaveBufferIntact( |
| 1043 const TraceLog::OutputCallback& flush_output_callback) { | 1044 const TraceLog::OutputCallback& flush_output_callback) { |
| 1044 scoped_ptr<TraceBuffer> previous_logged_events; | 1045 scoped_ptr<TraceBuffer> previous_logged_events; |
| 1045 ArgumentFilterPredicate argument_filter_predicate; | 1046 ArgumentFilterPredicate argument_filter_predicate; |
| 1046 { | 1047 { |
| 1047 AutoLock lock(lock_); | 1048 AutoLock lock(lock_); |
| 1048 AddMetadataEventsWhileLocked(); | 1049 AddMetadataEventsWhileLocked(); |
| 1049 if (thread_shared_chunk_) { | 1050 if (thread_shared_chunk_) { |
| 1050 // Return the chunk to the main buffer to flush the sampling data. | 1051 // Return the chunk to the main buffer to flush the sampling data. |
| 1051 logged_events_->ReturnChunk(thread_shared_chunk_index_, | 1052 logged_events_->ReturnChunk(thread_shared_chunk_index_, |
| 1052 thread_shared_chunk_.Pass()); | 1053 std::move(thread_shared_chunk_)); |
| 1053 } | 1054 } |
| 1054 previous_logged_events = logged_events_->CloneForIteration().Pass(); | 1055 previous_logged_events = logged_events_->CloneForIteration(); |
| 1055 | 1056 |
| 1056 if (trace_options() & kInternalEnableArgumentFilter) { | 1057 if (trace_options() & kInternalEnableArgumentFilter) { |
| 1057 CHECK(!argument_filter_predicate_.is_null()); | 1058 CHECK(!argument_filter_predicate_.is_null()); |
| 1058 argument_filter_predicate = argument_filter_predicate_; | 1059 argument_filter_predicate = argument_filter_predicate_; |
| 1059 } | 1060 } |
| 1060 } // release lock | 1061 } // release lock |
| 1061 | 1062 |
| 1062 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), | 1063 ConvertTraceEventsToTraceFormat(std::move(previous_logged_events), |
| 1063 flush_output_callback, | 1064 flush_output_callback, |
| 1064 argument_filter_predicate); | 1065 argument_filter_predicate); |
| 1065 } | 1066 } |
| 1066 | 1067 |
| 1067 void TraceLog::UseNextTraceBuffer() { | 1068 void TraceLog::UseNextTraceBuffer() { |
| 1068 logged_events_.reset(CreateTraceBuffer()); | 1069 logged_events_.reset(CreateTraceBuffer()); |
| 1069 subtle::NoBarrier_AtomicIncrement(&generation_, 1); | 1070 subtle::NoBarrier_AtomicIncrement(&generation_, 1); |
| 1070 thread_shared_chunk_.reset(); | 1071 thread_shared_chunk_.reset(); |
| 1071 thread_shared_chunk_index_ = 0; | 1072 thread_shared_chunk_index_ = 0; |
| 1072 } | 1073 } |
| (...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1381 scoped_ptr<TraceEvent> trace_event(new TraceEvent); | 1382 scoped_ptr<TraceEvent> trace_event(new TraceEvent); |
| 1382 trace_event->Initialize( | 1383 trace_event->Initialize( |
| 1383 0, // thread_id | 1384 0, // thread_id |
| 1384 TimeTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA, | 1385 TimeTicks(), ThreadTicks(), TRACE_EVENT_PHASE_METADATA, |
| 1385 &g_category_group_enabled[g_category_metadata], name, | 1386 &g_category_group_enabled[g_category_metadata], name, |
| 1386 trace_event_internal::kNoId, // id | 1387 trace_event_internal::kNoId, // id |
| 1387 trace_event_internal::kNoId, // context_id | 1388 trace_event_internal::kNoId, // context_id |
| 1388 trace_event_internal::kNoId, // bind_id | 1389 trace_event_internal::kNoId, // bind_id |
| 1389 num_args, arg_names, arg_types, arg_values, convertable_values, flags); | 1390 num_args, arg_names, arg_types, arg_values, convertable_values, flags); |
| 1390 AutoLock lock(lock_); | 1391 AutoLock lock(lock_); |
| 1391 metadata_events_.push_back(trace_event.Pass()); | 1392 metadata_events_.push_back(std::move(trace_event)); |
| 1392 } | 1393 } |
| 1393 | 1394 |
| 1394 // May be called when a COMPELETE event ends and the unfinished event has been | 1395 // May be called when a COMPELETE event ends and the unfinished event has been |
| 1395 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). | 1396 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). |
| 1396 std::string TraceLog::EventToConsoleMessage(unsigned char phase, | 1397 std::string TraceLog::EventToConsoleMessage(unsigned char phase, |
| 1397 const TimeTicks& timestamp, | 1398 const TimeTicks& timestamp, |
| 1398 TraceEvent* trace_event) { | 1399 TraceEvent* trace_event) { |
| 1399 AutoLock thread_info_lock(thread_info_lock_); | 1400 AutoLock thread_info_lock(thread_info_lock_); |
| 1400 | 1401 |
| 1401 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to | 1402 // The caller should translate TRACE_EVENT_PHASE_COMPLETE to |
| (...skipping 367 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1769 } | 1770 } |
| 1770 | 1771 |
| 1771 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1772 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
| 1772 if (*category_group_enabled_) { | 1773 if (*category_group_enabled_) { |
| 1773 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1774 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
| 1774 event_handle_); | 1775 event_handle_); |
| 1775 } | 1776 } |
| 1776 } | 1777 } |
| 1777 | 1778 |
| 1778 } // namespace trace_event_internal | 1779 } // namespace trace_event_internal |
| OLD | NEW |