Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(21)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 109933006: Implement sampling profiler (chromium side change) (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: first CL Created 7 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/base_switches.h" 9 #include "base/base_switches.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 880 matching lines...) Expand 10 before | Expand all | Expand 10 after
891 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket); 891 TRACE_EVENT_API_ATOMIC_LOAD(*bucket_data->bucket);
892 if (!category_and_name) 892 if (!category_and_name)
893 return; 893 return;
894 const char* const combined = 894 const char* const combined =
895 reinterpret_cast<const char* const>(category_and_name); 895 reinterpret_cast<const char* const>(category_and_name);
896 const char* category_group; 896 const char* category_group;
897 const char* name; 897 const char* name;
898 ExtractCategoryAndName(combined, &category_group, &name); 898 ExtractCategoryAndName(combined, &category_group, &name);
899 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE, 899 TRACE_EVENT_API_ADD_TRACE_EVENT(TRACE_EVENT_PHASE_SAMPLE,
900 TraceLog::GetCategoryGroupEnabled(category_group), 900 TraceLog::GetCategoryGroupEnabled(category_group),
901 name, 0, 0, NULL, NULL, NULL, NULL, 0); 901 name, 0, 0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_SAMPLING);
nduca 2013/12/12 07:02:41 i'm a little confused on this side. what is the ca
haraken 2013/12/12 07:34:28 (1) The sampling thread gets the current category
dsinclair 2013/12/12 15:19:31 If we really want everything then why not set hte
haraken 2013/12/12 15:32:17 Then there arises another problem: If we use "*",
dsinclair 2013/12/13 15:55:57 Isn't adding the TRACE_EVENT_FLAG_SAMPLING the sam
902 } 902 }
903 903
904 void TraceSamplingThread::GetSamples() { 904 void TraceSamplingThread::GetSamples() {
905 for (size_t i = 0; i < sample_buckets_.size(); ++i) { 905 for (size_t i = 0; i < sample_buckets_.size(); ++i) {
906 TraceBucketData* bucket_data = &sample_buckets_[i]; 906 TraceBucketData* bucket_data = &sample_buckets_[i];
907 bucket_data->callback.Run(bucket_data); 907 bucket_data->callback.Run(bucket_data);
908 } 908 }
909 } 909 }
910 910
911 void TraceSamplingThread::RegisterSampleBucket( 911 void TraceSamplingThread::RegisterSampleBucket(
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
1087 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() { 1087 void TraceLog::ThreadLocalEventBuffer::WillDestroyCurrentMessageLoop() {
1088 delete this; 1088 delete this;
1089 } 1089 }
1090 1090
1091 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() { 1091 void TraceLog::ThreadLocalEventBuffer::FlushWhileLocked() {
1092 if (!chunk_) 1092 if (!chunk_)
1093 return; 1093 return;
1094 1094
1095 trace_log_->lock_.AssertAcquired(); 1095 trace_log_->lock_.AssertAcquired();
1096 if (trace_log_->CheckGeneration(generation_)) { 1096 if (trace_log_->CheckGeneration(generation_)) {
1097 // Return the chunk to the buffer only if the generation matches, 1097 // Return the chunk to the buffer only if the generation matches.
1098 trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass()); 1098 trace_log_->logged_events_->ReturnChunk(chunk_index_, chunk_.Pass());
1099 } 1099 }
1100 // Otherwise this method may be called from the destructor, or TraceLog will 1100 // Otherwise this method may be called from the destructor, or TraceLog will
1101 // find the generation mismatch and delete this buffer soon. 1101 // find the generation mismatch and delete this buffer soon.
1102 } 1102 }
1103 1103
1104 // static 1104 // static
1105 TraceLog* TraceLog::GetInstance() { 1105 TraceLog* TraceLog::GetInstance() {
1106 return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get(); 1106 return Singleton<TraceLog, LeakySingletonTraits<TraceLog> >::get();
1107 } 1107 }
(...skipping 234 matching lines...) Expand 10 before | Expand all | Expand 10 after
1342 1342
1343 if (dispatching_to_observer_list_) { 1343 if (dispatching_to_observer_list_) {
1344 DLOG(ERROR) 1344 DLOG(ERROR)
1345 << "Cannot manipulate TraceLog::Enabled state from an observer."; 1345 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1346 return; 1346 return;
1347 } 1347 }
1348 1348
1349 enabled_ = false; 1349 enabled_ = false;
1350 1350
1351 if (sampling_thread_.get()) { 1351 if (sampling_thread_.get()) {
1352 // TODO(haraken): Don't call Platform::Join from the UI thread.
dsinclair 2013/12/11 15:55:04 Is this todo to be done before commit? Or for late
haraken 2013/12/12 00:48:47 Sure, I'll fix before commit.
haraken 2013/12/12 01:54:52 Question: Would the right fix for this be to chang
dsinclair 2013/12/13 15:55:57 I believe this was handled in a separate CL correc
1353 base::ThreadRestrictions::SetIOAllowed(true);
1354
1352 // Stop the sampling thread. 1355 // Stop the sampling thread.
1353 sampling_thread_->Stop(); 1356 sampling_thread_->Stop();
1354 lock_.Release(); 1357 lock_.Release();
1355 PlatformThread::Join(sampling_thread_handle_); 1358 PlatformThread::Join(sampling_thread_handle_);
1356 lock_.Acquire(); 1359 lock_.Acquire();
1357 sampling_thread_handle_ = PlatformThreadHandle(); 1360 sampling_thread_handle_ = PlatformThreadHandle();
1358 sampling_thread_.reset(); 1361 sampling_thread_.reset();
1362
1363 base::ThreadRestrictions::SetIOAllowed(false);
1359 } 1364 }
1360 1365
1361 category_filter_.Clear(); 1366 category_filter_.Clear();
1362 subtle::NoBarrier_Store(&watch_category_, 0); 1367 subtle::NoBarrier_Store(&watch_category_, 0);
1363 watch_event_name_ = ""; 1368 watch_event_name_ = "";
1364 UpdateCategoryGroupEnabledFlags(); 1369 UpdateCategoryGroupEnabledFlags();
1365 AddMetadataEventsWhileLocked(); 1370 AddMetadataEventsWhileLocked();
1366 1371
1367 dispatching_to_observer_list_ = true; 1372 dispatching_to_observer_list_ = true;
1368 std::vector<EnabledStateObserver*> observer_list = 1373 std::vector<EnabledStateObserver*> observer_list =
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1426 else if (options & ECHO_TO_CONSOLE) 1431 else if (options & ECHO_TO_CONSOLE)
1427 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks); 1432 return new TraceBufferRingBuffer(kEchoToConsoleTraceEventBufferChunks);
1428 return new TraceBufferVector(); 1433 return new TraceBufferVector();
1429 } 1434 }
1430 1435
1431 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked( 1436 TraceEvent* TraceLog::AddEventToThreadSharedChunkWhileLocked(
1432 TraceEventHandle* handle, bool check_buffer_is_full) { 1437 TraceEventHandle* handle, bool check_buffer_is_full) {
1433 lock_.AssertAcquired(); 1438 lock_.AssertAcquired();
1434 1439
1435 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) { 1440 if (thread_shared_chunk_ && thread_shared_chunk_->IsFull()) {
1441 // Return the chunk to the buffer only if the generation matches.
1436 logged_events_->ReturnChunk(thread_shared_chunk_index_, 1442 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1437 thread_shared_chunk_.Pass()); 1443 thread_shared_chunk_.Pass());
1438 } 1444 }
1439 1445
1440 if (!thread_shared_chunk_) { 1446 if (!thread_shared_chunk_) {
1441 thread_shared_chunk_ = logged_events_->GetChunk( 1447 thread_shared_chunk_ = logged_events_->GetChunk(
1442 &thread_shared_chunk_index_); 1448 &thread_shared_chunk_index_);
1443 if (check_buffer_is_full) 1449 if (check_buffer_is_full)
1444 CheckIfBufferIsFullWhileLocked(); 1450 CheckIfBufferIsFullWhileLocked();
1445 } 1451 }
(...skipping 196 matching lines...) Expand 10 before | Expand all | Expand 10 after
1642 logged_events_->ReturnChunk(thread_shared_chunk_index_, 1648 logged_events_->ReturnChunk(thread_shared_chunk_index_,
1643 thread_shared_chunk_.Pass()); 1649 thread_shared_chunk_.Pass());
1644 } 1650 }
1645 previous_logged_events = logged_events_->CloneForIteration().Pass(); 1651 previous_logged_events = logged_events_->CloneForIteration().Pass();
1646 } // release lock 1652 } // release lock
1647 1653
1648 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), 1654 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(),
1649 flush_output_callback); 1655 flush_output_callback);
1650 } 1656 }
1651 1657
1658 int TraceLog::NextGeneration()
1659 {
1660 thread_shared_chunk_.reset();
1661 thread_shared_chunk_index_ = 0;
1662 return static_cast<int>(subtle::NoBarrier_AtomicIncrement(&generation_, 1));
Xianzhu 2013/12/11 17:53:42 Thanks for fixing this! Would it be even better t
haraken 2013/12/12 00:48:47 Sure, will do!
1663 }
1664
1652 TraceEventHandle TraceLog::AddTraceEvent( 1665 TraceEventHandle TraceLog::AddTraceEvent(
1653 char phase, 1666 char phase,
1654 const unsigned char* category_group_enabled, 1667 const unsigned char* category_group_enabled,
1655 const char* name, 1668 const char* name,
1656 unsigned long long id, 1669 unsigned long long id,
1657 int num_args, 1670 int num_args,
1658 const char** arg_names, 1671 const char** arg_names,
1659 const unsigned char* arg_types, 1672 const unsigned char* arg_types,
1660 const unsigned long long* arg_values, 1673 const unsigned long long* arg_values,
1661 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, 1674 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
(...skipping 14 matching lines...) Expand all
1676 unsigned long long id, 1689 unsigned long long id,
1677 int thread_id, 1690 int thread_id,
1678 const TimeTicks& timestamp, 1691 const TimeTicks& timestamp,
1679 int num_args, 1692 int num_args,
1680 const char** arg_names, 1693 const char** arg_names,
1681 const unsigned char* arg_types, 1694 const unsigned char* arg_types,
1682 const unsigned long long* arg_values, 1695 const unsigned long long* arg_values,
1683 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, 1696 const scoped_refptr<ConvertableToTraceFormat>* convertable_values,
1684 unsigned char flags) { 1697 unsigned char flags) {
1685 TraceEventHandle handle = { 0, 0, 0 }; 1698 TraceEventHandle handle = { 0, 0, 0 };
1686 if (!*category_group_enabled) 1699
1700 // We enable all categories in sampling tracing, since it doesn't make sense
dsinclair 2013/12/11 15:55:04 Does this include disabled-by-default- categories?
haraken 2013/12/12 00:48:47 I think the answer is yes. Given that the sampling
dsinclair 2013/12/12 15:19:31 The problem with having disabled-by-default- is th
haraken 2013/12/12 15:32:17 Now I agree that we shouldn't enable all categorie
dsinclair 2013/12/13 15:55:57 I think this was my own confusion with how samplin
1701 // to filter sample events by categories.
1702 if (!(flags & TRACE_EVENT_FLAG_SAMPLING) && !*category_group_enabled)
dsinclair 2013/12/11 15:55:04 If we always enable all categories then why does t
haraken 2013/12/12 00:48:47 I think that's because the monitoring mode is goin
dsinclair 2013/12/12 15:19:31 Why can't I use monitoring mode to say, tell me th
haraken 2013/12/12 15:32:17 Sounds reasonable. We want to have category filter
dsinclair 2013/12/13 15:55:57 I think I'm getting confused on how this works. Pl
1687 return handle; 1703 return handle;
1688 1704
1689 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when 1705 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when
1690 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> 1706 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) ->
1691 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... 1707 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ...
1692 if (thread_is_in_trace_event_.Get()) 1708 if (thread_is_in_trace_event_.Get())
1693 return handle; 1709 return handle;
1694 1710
1695 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); 1711 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_);
1696 1712
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after
1752 if (!found) { 1768 if (!found) {
1753 if (existing_names.size()) 1769 if (existing_names.size())
1754 existing_name->second.push_back(','); 1770 existing_name->second.push_back(',');
1755 existing_name->second.append(new_name); 1771 existing_name->second.append(new_name);
1756 } 1772 }
1757 } 1773 }
1758 } 1774 }
1759 } 1775 }
1760 1776
1761 std::string console_message; 1777 std::string console_message;
1762 if ((*category_group_enabled & ENABLED_FOR_RECORDING)) { 1778 if ((flags & TRACE_EVENT_FLAG_SAMPLING) ||
1779 (*category_group_enabled & ENABLED_FOR_RECORDING)) {
1763 OptionalAutoLock lock(lock_); 1780 OptionalAutoLock lock(lock_);
1764 1781
1765 TraceEvent* trace_event = NULL; 1782 TraceEvent* trace_event = NULL;
1766 if (thread_local_event_buffer) { 1783 if (thread_local_event_buffer) {
1767 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); 1784 trace_event = thread_local_event_buffer->AddTraceEvent(&handle);
1768 } else { 1785 } else {
1769 lock.EnsureAcquired(); 1786 lock.EnsureAcquired();
1770 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); 1787 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true);
1771 } 1788 }
1772 1789
(...skipping 529 matching lines...) Expand 10 before | Expand all | Expand 10 after
2302 } 2319 }
2303 2320
2304 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 2321 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2305 if (*category_group_enabled_) { 2322 if (*category_group_enabled_) {
2306 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, 2323 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2307 name_, event_handle_); 2324 name_, event_handle_);
2308 } 2325 }
2309 } 2326 }
2310 2327
2311 } // namespace trace_event_internal 2328 } // namespace trace_event_internal
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698