Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/trace_log.h" | 5 #include "base/trace_event/trace_log.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <cmath> | 8 #include <cmath> |
| 9 #include <memory> | 9 #include <memory> |
| 10 #include <utility> | 10 #include <utility> |
| (...skipping 315 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 326 | 326 |
| 327 trace_log_->lock_.AssertAcquired(); | 327 trace_log_->lock_.AssertAcquired(); |
| 328 if (trace_log_->CheckGeneration(generation_)) { | 328 if (trace_log_->CheckGeneration(generation_)) { |
| 329 // Return the chunk to the buffer only if the generation matches. | 329 // Return the chunk to the buffer only if the generation matches. |
| 330 trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_)); | 330 trace_log_->logged_events_->ReturnChunk(chunk_index_, std::move(chunk_)); |
| 331 } | 331 } |
| 332 // Otherwise this method may be called from the destructor, or TraceLog will | 332 // Otherwise this method may be called from the destructor, or TraceLog will |
| 333 // find the generation mismatch and delete this buffer soon. | 333 // find the generation mismatch and delete this buffer soon. |
| 334 } | 334 } |
| 335 | 335 |
| 336 struct TraceLog::RegisteredAsyncObserver { | |
| 337 RegisteredAsyncObserver(WeakPtr<AsyncEnabledStateObserver> observer) | |
| 338 : observer(observer), task_runner(ThreadTaskRunnerHandle::Get()) {} | |
| 339 ~RegisteredAsyncObserver() {} | |
| 340 | |
| 341 WeakPtr<AsyncEnabledStateObserver> observer; | |
| 342 scoped_refptr<SequencedTaskRunner> task_runner; | |
| 343 }; | |
| 344 | |
| 336 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {} | 345 TraceLogStatus::TraceLogStatus() : event_capacity(0), event_count(0) {} |
| 337 | 346 |
| 338 TraceLogStatus::~TraceLogStatus() {} | 347 TraceLogStatus::~TraceLogStatus() {} |
| 339 | 348 |
| 340 // static | 349 // static |
| 341 TraceLog* TraceLog::GetInstance() { | 350 TraceLog* TraceLog::GetInstance() { |
| 342 return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get(); | 351 return Singleton<TraceLog, LeakySingletonTraits<TraceLog>>::get(); |
| 343 } | 352 } |
| 344 | 353 |
| 345 TraceLog::TraceLog() | 354 TraceLog::TraceLog() |
| (...skipping 279 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 625 Bind(&TraceSamplingThread::DefaultSamplingCallback)); | 634 Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
| 626 sampling_thread_->RegisterSampleBucket( | 635 sampling_thread_->RegisterSampleBucket( |
| 627 &g_trace_state[2], "bucket2", | 636 &g_trace_state[2], "bucket2", |
| 628 Bind(&TraceSamplingThread::DefaultSamplingCallback)); | 637 Bind(&TraceSamplingThread::DefaultSamplingCallback)); |
| 629 if (!PlatformThread::Create(0, sampling_thread_.get(), | 638 if (!PlatformThread::Create(0, sampling_thread_.get(), |
| 630 &sampling_thread_handle_)) { | 639 &sampling_thread_handle_)) { |
| 631 DCHECK(false) << "failed to create thread"; | 640 DCHECK(false) << "failed to create thread"; |
| 632 } | 641 } |
| 633 } | 642 } |
| 634 | 643 |
| 644 for (auto& it : async_observers_) | |
|
Primiano Tucci (use gerrit)
2016/05/10 14:20:26
I think this could even be +const, i.e. const auto
Xiaocheng
2016/05/11 05:19:20
Done.
| |
| 645 it.second.task_runner->PostTask( | |
|
Sami
2016/05/10 14:42:16
nit: Please add braces around this loop.
Xiaocheng
2016/05/11 05:19:20
Done.
| |
| 646 FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogEnabled, | |
| 647 it.second.observer)); | |
| 648 | |
| 635 dispatching_to_observer_list_ = true; | 649 dispatching_to_observer_list_ = true; |
| 636 observer_list = enabled_state_observer_list_; | 650 observer_list = enabled_state_observer_list_; |
| 637 } | 651 } |
| 638 // Notify observers outside the lock in case they trigger trace events. | 652 // Notify observers outside the lock in case they trigger trace events. |
| 639 for (size_t i = 0; i < observer_list.size(); ++i) | 653 for (size_t i = 0; i < observer_list.size(); ++i) |
| 640 observer_list[i]->OnTraceLogEnabled(); | 654 observer_list[i]->OnTraceLogEnabled(); |
| 641 | 655 |
| 642 { | 656 { |
| 643 AutoLock lock(lock_); | 657 AutoLock lock(lock_); |
| 644 dispatching_to_observer_list_ = false; | 658 dispatching_to_observer_list_ = false; |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 709 | 723 |
| 710 trace_config_.Clear(); | 724 trace_config_.Clear(); |
| 711 subtle::NoBarrier_Store(&watch_category_, 0); | 725 subtle::NoBarrier_Store(&watch_category_, 0); |
| 712 watch_event_name_ = ""; | 726 watch_event_name_ = ""; |
| 713 UpdateCategoryGroupEnabledFlags(); | 727 UpdateCategoryGroupEnabledFlags(); |
| 714 AddMetadataEventsWhileLocked(); | 728 AddMetadataEventsWhileLocked(); |
| 715 | 729 |
| 716 // Remove metadata events so they will not get added to a subsequent trace. | 730 // Remove metadata events so they will not get added to a subsequent trace. |
| 717 metadata_events_.clear(); | 731 metadata_events_.clear(); |
| 718 | 732 |
| 733 for (auto& it : async_observers_) | |
| 734 it.second.task_runner->PostTask( | |
|
Sami
2016/05/10 14:42:16
nit: Please add braces around this loop.
Xiaocheng
2016/05/11 05:19:20
Done.
| |
| 735 FROM_HERE, Bind(&AsyncEnabledStateObserver::OnTraceLogDisabled, | |
| 736 it.second.observer)); | |
| 737 | |
| 719 dispatching_to_observer_list_ = true; | 738 dispatching_to_observer_list_ = true; |
| 720 std::vector<EnabledStateObserver*> observer_list = | 739 std::vector<EnabledStateObserver*> observer_list = |
| 721 enabled_state_observer_list_; | 740 enabled_state_observer_list_; |
| 722 | 741 |
| 723 { | 742 { |
| 724 // Dispatch to observers outside the lock in case the observer triggers a | 743 // Dispatch to observers outside the lock in case the observer triggers a |
| 725 // trace event. | 744 // trace event. |
| 726 AutoUnlock unlock(lock_); | 745 AutoUnlock unlock(lock_); |
| 727 for (size_t i = 0; i < observer_list.size(); ++i) | 746 for (size_t i = 0; i < observer_list.size(); ++i) |
| 728 observer_list[i]->OnTraceLogDisabled(); | 747 observer_list[i]->OnTraceLogDisabled(); |
| (...skipping 974 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1703 } | 1722 } |
| 1704 } | 1723 } |
| 1705 } | 1724 } |
| 1706 #endif // defined(OS_WIN) | 1725 #endif // defined(OS_WIN) |
| 1707 | 1726 |
| 1708 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead( | 1727 void ConvertableToTraceFormat::EstimateTraceMemoryOverhead( |
| 1709 TraceEventMemoryOverhead* overhead) { | 1728 TraceEventMemoryOverhead* overhead) { |
| 1710 overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this)); | 1729 overhead->Add("ConvertableToTraceFormat(Unknown)", sizeof(*this)); |
| 1711 } | 1730 } |
| 1712 | 1731 |
| 1732 void TraceLog::AddAsyncEnabledStateObserver( | |
| 1733 WeakPtr<AsyncEnabledStateObserver> listener) { | |
| 1734 AutoLock lock(lock_); | |
| 1735 async_observers_.insert( | |
| 1736 std::make_pair(listener.get(), RegisteredAsyncObserver(listener))); | |
| 1737 } | |
| 1738 | |
| 1739 void TraceLog::RemoveAsyncEnabledStateObserver( | |
| 1740 AsyncEnabledStateObserver* listener) { | |
| 1741 AutoLock lock(lock_); | |
| 1742 async_observers_.erase(listener); | |
| 1743 } | |
| 1744 | |
| 1745 bool TraceLog::HasAsyncEnabledStateObserver( | |
| 1746 AsyncEnabledStateObserver* listener) const { | |
| 1747 AutoLock lock(lock_); | |
| 1748 return async_observers_.find(listener) != async_observers_.end(); | |
|
Primiano Tucci (use gerrit)
2016/05/10 14:20:26
I think that .count(listener) != 0 is, in some STL
Xiaocheng
2016/05/11 05:19:20
Let's use ContainsKey() which looks less hacky (an
| |
| 1749 } | |
| 1750 | |
| 1713 } // namespace trace_event | 1751 } // namespace trace_event |
| 1714 } // namespace base | 1752 } // namespace base |
| 1715 | 1753 |
| 1716 namespace trace_event_internal { | 1754 namespace trace_event_internal { |
| 1717 | 1755 |
| 1718 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( | 1756 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( |
| 1719 const char* category_group, | 1757 const char* category_group, |
| 1720 const char* name) { | 1758 const char* name) { |
| 1721 // The single atom works because for now the category_group can only be "gpu". | 1759 // The single atom works because for now the category_group can only be "gpu". |
| 1722 DCHECK_EQ(strcmp(category_group, "gpu"), 0); | 1760 DCHECK_EQ(strcmp(category_group, "gpu"), 0); |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 1744 } | 1782 } |
| 1745 | 1783 |
| 1746 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 1784 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
| 1747 if (*category_group_enabled_) { | 1785 if (*category_group_enabled_) { |
| 1748 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, | 1786 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, name_, |
| 1749 event_handle_); | 1787 event_handle_); |
| 1750 } | 1788 } |
| 1751 } | 1789 } |
| 1752 | 1790 |
| 1753 } // namespace trace_event_internal | 1791 } // namespace trace_event_internal |
| OLD | NEW |