Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/debug/trace_event_impl.h" | 5 #include "base/debug/trace_event_impl.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 | 8 |
| 9 #include "base/base_switches.h" | 9 #include "base/base_switches.h" |
| 10 #include "base/bind.h" | 10 #include "base/bind.h" |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 66 const char kEnableSampling[] = "enable-sampling"; | 66 const char kEnableSampling[] = "enable-sampling"; |
| 67 const char kEnableSystrace[] = "enable-systrace"; | 67 const char kEnableSystrace[] = "enable-systrace"; |
| 68 | 68 |
| 69 // Controls the number of trace events we will buffer in-memory | 69 // Controls the number of trace events we will buffer in-memory |
| 70 // before throwing them away. | 70 // before throwing them away. |
| 71 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize; | 71 const size_t kTraceBufferChunkSize = TraceBufferChunk::kTraceBufferChunkSize; |
| 72 const size_t kTraceEventVectorBigBufferChunks = | 72 const size_t kTraceEventVectorBigBufferChunks = |
| 73 512000000 / kTraceBufferChunkSize; | 73 512000000 / kTraceBufferChunkSize; |
| 74 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; | 74 const size_t kTraceEventVectorBufferChunks = 256000 / kTraceBufferChunkSize; |
| 75 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; | 75 const size_t kTraceEventRingBufferChunks = kTraceEventVectorBufferChunks / 4; |
| 76 const size_t kTraceEventBatchChunks = 1000 / kTraceBufferChunkSize; | 76 const size_t kTraceEventBatch = 1000; |
| 77 // Can store results for 30 seconds with 1 ms sampling interval. | 77 // Can store results for 30 seconds with 1 ms sampling interval. |
| 78 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; | 78 const size_t kMonitorTraceEventBufferChunks = 30000 / kTraceBufferChunkSize; |
| 79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. | 79 // ECHO_TO_CONSOLE needs a small buffer to hold the unfinished COMPLETE events. |
| 80 const size_t kEchoToConsoleTraceEventBufferChunks = 256; | 80 const size_t kEchoToConsoleTraceEventBufferChunks = 256; |
| 81 | 81 |
| 82 const int kThreadFlushTimeoutMs = 3000; | 82 const int kThreadFlushTimeoutMs = 3000; |
| 83 | 83 |
| 84 #if !defined(OS_NACL) | 84 #if !defined(OS_NACL) |
| 85 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575. | 85 // These categories will cause deadlock when ECHO_TO_CONSOLE. crbug.com/325575. |
| 86 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task"; | 86 const char kEchoToConsoleCategoryFilter[] = "-ipc,-task"; |
| (...skipping 1644 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1731 flush_message_loop_proxy_->PostDelayedTask( | 1731 flush_message_loop_proxy_->PostDelayedTask( |
| 1732 FROM_HERE, | 1732 FROM_HERE, |
| 1733 Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation), | 1733 Bind(&TraceLog::OnFlushTimeout, Unretained(this), generation), |
| 1734 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs)); | 1734 TimeDelta::FromMilliseconds(kThreadFlushTimeoutMs)); |
| 1735 return; | 1735 return; |
| 1736 } | 1736 } |
| 1737 | 1737 |
| 1738 FinishFlush(generation); | 1738 FinishFlush(generation); |
| 1739 } | 1739 } |
| 1740 | 1740 |
| 1741 struct TraceEventTimestampComparator { | |
| 1742 bool operator()(const TraceEvent* event1, const TraceEvent* event2) const { | |
| 1743 return event1->timestamp() < event2->timestamp(); | |
| 1744 } | |
| 1745 }; | |
| 1746 | |
| 1741 void TraceLog::ConvertTraceEventsToTraceFormat( | 1747 void TraceLog::ConvertTraceEventsToTraceFormat( |
| 1742 scoped_ptr<TraceBuffer> logged_events, | 1748 scoped_ptr<TraceBuffer> logged_events, |
| 1743 const TraceLog::OutputCallback& flush_output_callback) { | 1749 const TraceLog::OutputCallback& flush_output_callback) { |
| 1744 | 1750 |
| 1745 if (flush_output_callback.is_null()) | 1751 if (flush_output_callback.is_null()) |
| 1746 return; | 1752 return; |
| 1747 | 1753 |
| 1748 // The callback need to be called at least once even if there is no events | 1754 // Note that the TraceEvent frontend expects the trace events to be sorted. |
|
dsinclair
2015/01/27 14:43:30
This seems like it's adding a lot of extra work. W
Benoit L
2015/02/09 16:34:00
Done.
| |
| 1749 // to let the caller know the completion of flush. | 1755 // An extra sort is needed here due to clients that might break the order by |
| 1756 // manually pushing additional trace events at random times (see uses of | |
| 1757 // AddCustomTraceEvent()). | |
| 1758 std::vector<const TraceEvent*> trace_events; | |
| 1759 | |
| 1760 for (size_t buffer_index = 0;; ++buffer_index) { | |
| 1761 const TraceBufferChunk* chunk = logged_events->NextChunk(); | |
| 1762 if (!chunk) | |
| 1763 break; | |
| 1764 | |
| 1765 for (size_t chunk_index = 0; chunk_index < chunk->size(); ++chunk_index) | |
| 1766 trace_events.push_back(chunk->GetEventAt(chunk_index)); | |
| 1767 } | |
| 1768 | |
| 1769 std::stable_sort(trace_events.begin(), trace_events.end(), | |
|
dsinclair
2015/01/27 14:43:30
This seems like it could be expensive if we have t
Benoit L
2015/02/09 16:34:00
Done.
| |
| 1770 TraceEventTimestampComparator()); | |
| 1771 | |
| 1772 // The callback needs to be called at least once even if there is no events to | |
| 1773 // let the caller know the completion of flush. | |
| 1750 bool has_more_events = true; | 1774 bool has_more_events = true; |
| 1775 std::vector<const TraceEvent*>::const_iterator it = trace_events.begin(); | |
|
dsinclair
2015/01/27 14:43:30
The change to use an iterator seems unrelated to t
Benoit L
2015/02/09 16:34:01
Done.
| |
| 1776 | |
| 1751 do { | 1777 do { |
| 1752 scoped_refptr<RefCountedString> json_events_str_ptr = | 1778 const scoped_refptr<RefCountedString> json_events_str_ptr = |
| 1753 new RefCountedString(); | 1779 new RefCountedString(); |
| 1754 | 1780 |
| 1755 for (size_t i = 0; i < kTraceEventBatchChunks; ++i) { | 1781 for (size_t i = 0; i < kTraceEventBatch; ++i) { |
| 1756 const TraceBufferChunk* chunk = logged_events->NextChunk(); | 1782 if (it == trace_events.end()) { |
| 1757 if (!chunk) { | |
| 1758 has_more_events = false; | 1783 has_more_events = false; |
| 1759 break; | 1784 break; |
| 1760 } | 1785 } |
| 1761 for (size_t j = 0; j < chunk->size(); ++j) { | 1786 |
| 1762 if (i > 0 || j > 0) | 1787 (*it)->AppendAsJSON(&json_events_str_ptr->data()); |
| 1763 json_events_str_ptr->data().append(",\n"); | 1788 ++it; |
| 1764 chunk->GetEventAt(j)->AppendAsJSON(&(json_events_str_ptr->data())); | 1789 |
| 1765 } | 1790 if (it != trace_events.end() && i + 1 < kTraceEventBatch) |
| 1791 json_events_str_ptr->data().append(",\n"); | |
| 1766 } | 1792 } |
| 1767 | 1793 |
| 1768 flush_output_callback.Run(json_events_str_ptr, has_more_events); | 1794 flush_output_callback.Run(json_events_str_ptr, has_more_events); |
| 1769 } while (has_more_events); | 1795 } while (has_more_events); |
| 1770 } | 1796 } |
| 1771 | 1797 |
| 1772 void TraceLog::FinishFlush(int generation) { | 1798 void TraceLog::FinishFlush(int generation) { |
| 1773 scoped_ptr<TraceBuffer> previous_logged_events; | 1799 scoped_ptr<TraceBuffer> previous_logged_events; |
| 1774 OutputCallback flush_output_callback; | 1800 OutputCallback flush_output_callback; |
| 1775 | 1801 |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1848 logged_events_->ReturnChunk(thread_shared_chunk_index_, | 1874 logged_events_->ReturnChunk(thread_shared_chunk_index_, |
| 1849 thread_shared_chunk_.Pass()); | 1875 thread_shared_chunk_.Pass()); |
| 1850 } | 1876 } |
| 1851 previous_logged_events = logged_events_->CloneForIteration().Pass(); | 1877 previous_logged_events = logged_events_->CloneForIteration().Pass(); |
| 1852 } // release lock | 1878 } // release lock |
| 1853 | 1879 |
| 1854 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), | 1880 ConvertTraceEventsToTraceFormat(previous_logged_events.Pass(), |
| 1855 flush_output_callback); | 1881 flush_output_callback); |
| 1856 } | 1882 } |
| 1857 | 1883 |
| 1884 TraceEventHandle TraceLog::AddCustomTraceEvent( | |
| 1885 char phase, | |
| 1886 const unsigned char* category_group_enabled, | |
| 1887 const char* name, | |
| 1888 int thread_id, | |
| 1889 const TimeTicks& timestamp, | |
| 1890 const TimeTicks& thread_time) { | |
| 1891 return AddTraceEventInternal( | |
| 1892 phase, category_group_enabled, name, trace_event_internal::kNoEventId, | |
| 1893 thread_id, timestamp, thread_time, 0, NULL, NULL, NULL, NULL, | |
| 1894 TRACE_EVENT_FLAG_COPY | TRACE_EVENT_FLAG_DONT_REPORT_OVERHEAD); | |
| 1895 } | |
| 1896 | |
| 1858 void TraceLog::UseNextTraceBuffer() { | 1897 void TraceLog::UseNextTraceBuffer() { |
| 1859 logged_events_.reset(CreateTraceBuffer()); | 1898 logged_events_.reset(CreateTraceBuffer()); |
| 1860 subtle::NoBarrier_AtomicIncrement(&generation_, 1); | 1899 subtle::NoBarrier_AtomicIncrement(&generation_, 1); |
| 1861 thread_shared_chunk_.reset(); | 1900 thread_shared_chunk_.reset(); |
| 1862 thread_shared_chunk_index_ = 0; | 1901 thread_shared_chunk_index_ = 0; |
| 1863 } | 1902 } |
| 1864 | 1903 |
| 1865 TraceEventHandle TraceLog::AddTraceEvent( | 1904 TraceEventHandle TraceLog::AddTraceEvent( |
| 1866 char phase, | 1905 char phase, |
| 1867 const unsigned char* category_group_enabled, | 1906 const unsigned char* category_group_enabled, |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 1888 const char* name, | 1927 const char* name, |
| 1889 unsigned long long id, | 1928 unsigned long long id, |
| 1890 int thread_id, | 1929 int thread_id, |
| 1891 const TimeTicks& timestamp, | 1930 const TimeTicks& timestamp, |
| 1892 int num_args, | 1931 int num_args, |
| 1893 const char** arg_names, | 1932 const char** arg_names, |
| 1894 const unsigned char* arg_types, | 1933 const unsigned char* arg_types, |
| 1895 const unsigned long long* arg_values, | 1934 const unsigned long long* arg_values, |
| 1896 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, | 1935 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, |
| 1897 unsigned char flags) { | 1936 unsigned char flags) { |
| 1937 return AddTraceEventInternal( | |
| 1938 phase, category_group_enabled, name, id, thread_id, timestamp, | |
| 1939 ThreadNow(), num_args, arg_names, arg_types, arg_values, | |
| 1940 convertable_values, flags); | |
| 1941 } | |
| 1942 | |
| 1943 TraceEventHandle TraceLog::AddTraceEventInternal( | |
| 1944 char phase, | |
| 1945 const unsigned char* category_group_enabled, | |
| 1946 const char* name, | |
| 1947 unsigned long long id, | |
| 1948 int thread_id, | |
| 1949 const TimeTicks& timestamp, | |
| 1950 const TimeTicks& thread_time, | |
| 1951 int num_args, | |
| 1952 const char** arg_names, | |
| 1953 const unsigned char* arg_types, | |
| 1954 const unsigned long long* arg_values, | |
| 1955 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, | |
| 1956 unsigned char flags) { | |
| 1898 TraceEventHandle handle = { 0, 0, 0 }; | 1957 TraceEventHandle handle = { 0, 0, 0 }; |
| 1899 if (!*category_group_enabled) | 1958 if (!*category_group_enabled) |
| 1900 return handle; | 1959 return handle; |
| 1901 | 1960 |
| 1902 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when | 1961 // Avoid re-entrance of AddTraceEvent. This may happen in GPU process when |
| 1903 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> | 1962 // ECHO_TO_CONSOLE is enabled: AddTraceEvent -> LOG(ERROR) -> |
| 1904 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... | 1963 // GpuProcessLogMessageHandler -> PostPendingTask -> TRACE_EVENT ... |
| 1905 if (thread_is_in_trace_event_.Get()) | 1964 if (thread_is_in_trace_event_.Get()) |
| 1906 return handle; | 1965 return handle; |
| 1907 | 1966 |
| 1908 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); | 1967 AutoThreadLocalBoolean thread_is_in_trace_event(&thread_is_in_trace_event_); |
| 1909 | 1968 |
| 1910 DCHECK(name); | 1969 DCHECK(name); |
| 1911 | 1970 |
| 1912 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) | 1971 if (flags & TRACE_EVENT_FLAG_MANGLE_ID) |
| 1913 id ^= process_id_hash_; | 1972 id ^= process_id_hash_; |
| 1914 | 1973 |
| 1915 TimeTicks now = OffsetTimestamp(timestamp); | 1974 TimeTicks now = OffsetTimestamp(timestamp); |
| 1916 TimeTicks thread_now = ThreadNow(); | |
| 1917 | 1975 |
| 1918 ThreadLocalEventBuffer* thread_local_event_buffer = NULL; | 1976 ThreadLocalEventBuffer* thread_local_event_buffer = NULL; |
| 1919 // A ThreadLocalEventBuffer needs the message loop | 1977 // A ThreadLocalEventBuffer needs the message loop |
| 1920 // - to know when the thread exits; | 1978 // - to know when the thread exits; |
| 1921 // - to handle the final flush. | 1979 // - to handle the final flush. |
| 1922 // For a thread without a message loop or the message loop may be blocked, the | 1980 // For a thread without a message loop or the message loop may be blocked, the |
| 1923 // trace events will be added into the main buffer directly. | 1981 // trace events will be added into the main buffer directly. |
| 1924 if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) { | 1982 if (!thread_blocks_message_loop_.Get() && MessageLoop::current()) { |
| 1925 thread_local_event_buffer = thread_local_event_buffer_.Get(); | 1983 thread_local_event_buffer = thread_local_event_buffer_.Get(); |
| 1926 if (thread_local_event_buffer && | 1984 if (thread_local_event_buffer && |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1978 | 2036 |
| 1979 TraceEvent* trace_event = NULL; | 2037 TraceEvent* trace_event = NULL; |
| 1980 if (thread_local_event_buffer) { | 2038 if (thread_local_event_buffer) { |
| 1981 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); | 2039 trace_event = thread_local_event_buffer->AddTraceEvent(&handle); |
| 1982 } else { | 2040 } else { |
| 1983 lock.EnsureAcquired(); | 2041 lock.EnsureAcquired(); |
| 1984 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); | 2042 trace_event = AddEventToThreadSharedChunkWhileLocked(&handle, true); |
| 1985 } | 2043 } |
| 1986 | 2044 |
| 1987 if (trace_event) { | 2045 if (trace_event) { |
| 1988 trace_event->Initialize(thread_id, now, thread_now, phase, | 2046 trace_event->Initialize(thread_id, now, thread_time, phase, |
| 1989 category_group_enabled, name, id, | 2047 category_group_enabled, name, id, |
| 1990 num_args, arg_names, arg_types, arg_values, | 2048 num_args, arg_names, arg_types, arg_values, |
| 1991 convertable_values, flags); | 2049 convertable_values, flags); |
| 1992 | 2050 |
| 1993 #if defined(OS_ANDROID) | 2051 #if defined(OS_ANDROID) |
| 1994 trace_event->SendToATrace(); | 2052 trace_event->SendToATrace(); |
| 1995 #endif | 2053 #endif |
| 1996 } | 2054 } |
| 1997 | 2055 |
| 1998 if (trace_options() & kInternalEchoToConsole) { | 2056 if (trace_options() & kInternalEchoToConsole) { |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 2027 event_callback(now, | 2085 event_callback(now, |
| 2028 phase == TRACE_EVENT_PHASE_COMPLETE ? | 2086 phase == TRACE_EVENT_PHASE_COMPLETE ? |
| 2029 TRACE_EVENT_PHASE_BEGIN : phase, | 2087 TRACE_EVENT_PHASE_BEGIN : phase, |
| 2030 category_group_enabled, name, id, | 2088 category_group_enabled, name, id, |
| 2031 num_args, arg_names, arg_types, arg_values, | 2089 num_args, arg_names, arg_types, arg_values, |
| 2032 flags); | 2090 flags); |
| 2033 } | 2091 } |
| 2034 } | 2092 } |
| 2035 | 2093 |
| 2036 if (thread_local_event_buffer) | 2094 if (thread_local_event_buffer) |
| 2037 thread_local_event_buffer->ReportOverhead(now, thread_now); | 2095 if (!(flags & TRACE_EVENT_FLAG_DONT_REPORT_OVERHEAD)) |
|
dsinclair
2015/01/27 14:43:30
Why would we want to skip reporting the overhead?
Benoit L
2015/02/09 16:34:01
Done.
| |
| 2096 thread_local_event_buffer->ReportOverhead(now, thread_time); | |
| 2038 | 2097 |
| 2039 return handle; | 2098 return handle; |
| 2040 } | 2099 } |
| 2041 | 2100 |
| 2042 // May be called when a COMPELETE event ends and the unfinished event has been | 2101 // May be called when a COMPELETE event ends and the unfinished event has been |
| 2043 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). | 2102 // recycled (phase == TRACE_EVENT_PHASE_END and trace_event == NULL). |
| 2044 std::string TraceLog::EventToConsoleMessage(unsigned char phase, | 2103 std::string TraceLog::EventToConsoleMessage(unsigned char phase, |
| 2045 const TimeTicks& timestamp, | 2104 const TimeTicks& timestamp, |
| 2046 TraceEvent* trace_event) { | 2105 TraceEvent* trace_event) { |
| 2047 AutoLock thread_info_lock(thread_info_lock_); | 2106 AutoLock thread_info_lock(thread_info_lock_); |
| (...skipping 537 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2585 } | 2644 } |
| 2586 | 2645 |
| 2587 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { | 2646 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { |
| 2588 if (*category_group_enabled_) { | 2647 if (*category_group_enabled_) { |
| 2589 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, | 2648 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_, |
| 2590 name_, event_handle_); | 2649 name_, event_handle_); |
| 2591 } | 2650 } |
| 2592 } | 2651 } |
| 2593 | 2652 |
| 2594 } // namespace trace_event_internal | 2653 } // namespace trace_event_internal |
| OLD | NEW |