Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(182)

Side by Side Diff: base/debug/trace_event_impl.cc

Issue 66193005: Independently enable recording and event callback (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « base/debug/trace_event_impl.h ('k') | base/debug/trace_event_unittest.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/trace_event_impl.h" 5 #include "base/debug/trace_event_impl.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 8
9 #include "base/base_switches.h" 9 #include "base/base_switches.h"
10 #include "base/bind.h" 10 #include "base/bind.h"
(...skipping 1131 matching lines...) Expand 10 before | Expand all | Expand 10 after
1142 NOTREACHED(); // Unknown option provided. 1142 NOTREACHED(); // Unknown option provided.
1143 } 1143 }
1144 } 1144 }
1145 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY)) 1145 if (!(ret & RECORD_UNTIL_FULL) && !(ret & RECORD_CONTINUOUSLY))
1146 ret |= RECORD_UNTIL_FULL; // Default when no options are specified. 1146 ret |= RECORD_UNTIL_FULL; // Default when no options are specified.
1147 1147
1148 return static_cast<Options>(ret); 1148 return static_cast<Options>(ret);
1149 } 1149 }
1150 1150
1151 TraceLog::TraceLog() 1151 TraceLog::TraceLog()
1152 : enable_count_(0), 1152 : enabled_(false),
1153 num_traces_recorded_(0), 1153 num_traces_recorded_(0),
1154 buffer_is_full_(0), 1154 buffer_is_full_(0),
1155 event_callback_(0), 1155 event_callback_(0),
1156 dispatching_to_observer_list_(false), 1156 dispatching_to_observer_list_(false),
1157 process_sort_index_(0), 1157 process_sort_index_(0),
1158 process_id_hash_(0), 1158 process_id_hash_(0),
1159 process_id_(0), 1159 process_id_(0),
1160 watch_category_(0), 1160 watch_category_(0),
1161 trace_options_(RECORD_UNTIL_FULL), 1161 trace_options_(RECORD_UNTIL_FULL),
1162 sampling_thread_handle_(0), 1162 sampling_thread_handle_(0),
1163 category_filter_(CategoryFilter::kDefaultCategoryFilterString), 1163 category_filter_(CategoryFilter::kDefaultCategoryFilterString),
1164 event_callback_category_filter_(
1165 CategoryFilter::kDefaultCategoryFilterString),
1164 thread_shared_chunk_index_(0), 1166 thread_shared_chunk_index_(0),
1165 generation_(0) { 1167 generation_(0) {
1166 // Trace is enabled or disabled on one thread while other threads are 1168 // Trace is enabled or disabled on one thread while other threads are
1167 // accessing the enabled flag. We don't care whether edge-case events are 1169 // accessing the enabled flag. We don't care whether edge-case events are
1168 // traced or not, so we allow races on the enabled flag to keep the trace 1170 // traced or not, so we allow races on the enabled flag to keep the trace
1169 // macros fast. 1171 // macros fast.
1170 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots: 1172 // TODO(jbates): ANNOTATE_BENIGN_RACE_SIZED crashes windows TSAN bots:
1171 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled, 1173 // ANNOTATE_BENIGN_RACE_SIZED(g_category_group_enabled,
1172 // sizeof(g_category_group_enabled), 1174 // sizeof(g_category_group_enabled),
1173 // "trace_event category enabled"); 1175 // "trace_event category enabled");
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
1220 DCHECK(category_ptr >= category_begin && 1222 DCHECK(category_ptr >= category_begin &&
1221 category_ptr < reinterpret_cast<uintptr_t>( 1223 category_ptr < reinterpret_cast<uintptr_t>(
1222 g_category_group_enabled + MAX_CATEGORY_GROUPS)) << 1224 g_category_group_enabled + MAX_CATEGORY_GROUPS)) <<
1223 "out of bounds category pointer"; 1225 "out of bounds category pointer";
1224 uintptr_t category_index = 1226 uintptr_t category_index =
1225 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]); 1227 (category_ptr - category_begin) / sizeof(g_category_group_enabled[0]);
1226 return g_category_groups[category_index]; 1228 return g_category_groups[category_index];
1227 } 1229 }
1228 1230
1229 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) { 1231 void TraceLog::UpdateCategoryGroupEnabledFlag(int category_index) {
1230 g_category_group_enabled[category_index] = 1232 unsigned char enabled_flag = 0;
1231 enable_count_ && 1233 const char* category_group = g_category_groups[category_index];
1232 category_filter_.IsCategoryGroupEnabled( 1234 if (enabled_ && category_filter_.IsCategoryGroupEnabled(category_group))
1233 g_category_groups[category_index]); 1235 enabled_flag |= ENABLED_FOR_RECORDING;
1236 if (event_callback_ &&
1237 event_callback_category_filter_.IsCategoryGroupEnabled(category_group))
1238 enabled_flag |= ENABLED_FOR_EVENT_CALLBACK;
1239 g_category_group_enabled[category_index] = enabled_flag;
1234 } 1240 }
1235 1241
1236 void TraceLog::UpdateCategoryGroupEnabledFlags() { 1242 void TraceLog::UpdateCategoryGroupEnabledFlags() {
1237 for (int i = 0; i < g_category_index; i++) 1243 for (int i = 0; i < g_category_index; i++)
1238 UpdateCategoryGroupEnabledFlag(i); 1244 UpdateCategoryGroupEnabledFlag(i);
1239 } 1245 }
1240 1246
1241 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal( 1247 const unsigned char* TraceLog::GetCategoryGroupEnabledInternal(
1242 const char* category_group) { 1248 const char* category_group) {
1243 DCHECK(!strchr(category_group, '"')) << 1249 DCHECK(!strchr(category_group, '"')) <<
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
1292 Options options) { 1298 Options options) {
1293 std::vector<EnabledStateObserver*> observer_list; 1299 std::vector<EnabledStateObserver*> observer_list;
1294 { 1300 {
1295 AutoLock lock(lock_); 1301 AutoLock lock(lock_);
1296 1302
1297 // Can't enable tracing when Flush() is in progress. 1303 // Can't enable tracing when Flush() is in progress.
1298 DCHECK(!flush_message_loop_proxy_.get()); 1304 DCHECK(!flush_message_loop_proxy_.get());
1299 1305
1300 Options old_options = trace_options(); 1306 Options old_options = trace_options();
1301 1307
1302 if (enable_count_++ > 0) { 1308 if (enabled_) {
1303 if (options != old_options) { 1309 if (options != old_options) {
1304 DLOG(ERROR) << "Attemting to re-enable tracing with a different " 1310 DLOG(ERROR) << "Attemting to re-enable tracing with a different "
1305 << "set of options."; 1311 << "set of options.";
1306 } 1312 }
1307 1313
1308 category_filter_.Merge(category_filter); 1314 category_filter_.Merge(category_filter);
1309 UpdateCategoryGroupEnabledFlags(); 1315 UpdateCategoryGroupEnabledFlags();
1310 return; 1316 return;
1311 } 1317 }
1312 1318
1319 if (dispatching_to_observer_list_) {
1320 DLOG(ERROR) <<
1321 "Cannot manipulate TraceLog::Enabled state from an observer.";
1322 return;
1323 }
1324
1325 enabled_ = true;
1326
1313 if (options != old_options) { 1327 if (options != old_options) {
1314 subtle::NoBarrier_Store(&trace_options_, options); 1328 subtle::NoBarrier_Store(&trace_options_, options);
1315 logged_events_.reset(CreateTraceBuffer()); 1329 logged_events_.reset(CreateTraceBuffer());
1316 NextGeneration(); 1330 NextGeneration();
1317 subtle::NoBarrier_Store(&buffer_is_full_, 0); 1331 subtle::NoBarrier_Store(&buffer_is_full_, 0);
1318 } 1332 }
1319 1333
1320 if (dispatching_to_observer_list_) {
1321 DLOG(ERROR) <<
1322 "Cannot manipulate TraceLog::Enabled state from an observer.";
1323 return;
1324 }
1325
1326 num_traces_recorded_++; 1334 num_traces_recorded_++;
1327 1335
1328 category_filter_ = CategoryFilter(category_filter); 1336 category_filter_ = CategoryFilter(category_filter);
1329 UpdateCategoryGroupEnabledFlags(); 1337 UpdateCategoryGroupEnabledFlags();
1330 1338
1331 if ((options & ENABLE_SAMPLING) || (options & MONITOR_SAMPLING)) { 1339 if ((options & ENABLE_SAMPLING) || (options & MONITOR_SAMPLING)) {
1332 sampling_thread_.reset(new TraceSamplingThread); 1340 sampling_thread_.reset(new TraceSamplingThread);
1333 sampling_thread_->RegisterSampleBucket( 1341 sampling_thread_->RegisterSampleBucket(
1334 &g_trace_state[0], 1342 &g_trace_state[0],
1335 "bucket0", 1343 "bucket0",
(...skipping 18 matching lines...) Expand all
1354 // Notify observers outside the lock in case they trigger trace events. 1362 // Notify observers outside the lock in case they trigger trace events.
1355 for (size_t i = 0; i < observer_list.size(); ++i) 1363 for (size_t i = 0; i < observer_list.size(); ++i)
1356 observer_list[i]->OnTraceLogEnabled(); 1364 observer_list[i]->OnTraceLogEnabled();
1357 1365
1358 { 1366 {
1359 AutoLock lock(lock_); 1367 AutoLock lock(lock_);
1360 dispatching_to_observer_list_ = false; 1368 dispatching_to_observer_list_ = false;
1361 } 1369 }
1362 } 1370 }
1363 1371
1364 const CategoryFilter& TraceLog::GetCurrentCategoryFilter() { 1372 CategoryFilter TraceLog::GetCurrentCategoryFilter() {
1365 AutoLock lock(lock_); 1373 AutoLock lock(lock_);
1366 DCHECK(enable_count_ > 0);
1367 return category_filter_; 1374 return category_filter_;
1368 } 1375 }
1369 1376
1370 void TraceLog::SetDisabled() { 1377 void TraceLog::SetDisabled() {
1371 std::vector<EnabledStateObserver*> observer_list; 1378 std::vector<EnabledStateObserver*> observer_list;
1372 { 1379 {
1373 AutoLock lock(lock_); 1380 AutoLock lock(lock_);
1374 DCHECK(enable_count_ > 0); 1381 if (!enabled_)
1375
1376 if (--enable_count_ != 0)
1377 return; 1382 return;
1378 1383
1379 if (dispatching_to_observer_list_) { 1384 if (dispatching_to_observer_list_) {
1380 DLOG(ERROR) 1385 DLOG(ERROR)
1381 << "Cannot manipulate TraceLog::Enabled state from an observer."; 1386 << "Cannot manipulate TraceLog::Enabled state from an observer.";
1382 return; 1387 return;
1383 } 1388 }
1384 1389
1390 enabled_ = false;
1391
1385 if (sampling_thread_.get()) { 1392 if (sampling_thread_.get()) {
1386 // Stop the sampling thread. 1393 // Stop the sampling thread.
1387 sampling_thread_->Stop(); 1394 sampling_thread_->Stop();
1388 lock_.Release(); 1395 lock_.Release();
1389 PlatformThread::Join(sampling_thread_handle_); 1396 PlatformThread::Join(sampling_thread_handle_);
1390 lock_.Acquire(); 1397 lock_.Acquire();
1391 sampling_thread_handle_ = PlatformThreadHandle(); 1398 sampling_thread_handle_ = PlatformThreadHandle();
1392 sampling_thread_.reset(); 1399 sampling_thread_.reset();
1393 } 1400 }
1394 1401
(...skipping 13 matching lines...) Expand all
1408 observer_list[i]->OnTraceLogDisabled(); 1415 observer_list[i]->OnTraceLogDisabled();
1409 1416
1410 { 1417 {
1411 AutoLock lock(lock_); 1418 AutoLock lock(lock_);
1412 dispatching_to_observer_list_ = false; 1419 dispatching_to_observer_list_ = false;
1413 } 1420 }
1414 } 1421 }
1415 1422
1416 int TraceLog::GetNumTracesRecorded() { 1423 int TraceLog::GetNumTracesRecorded() {
1417 AutoLock lock(lock_); 1424 AutoLock lock(lock_);
1418 if (enable_count_ == 0) 1425 if (!enabled_)
1419 return -1; 1426 return -1;
1420 return num_traces_recorded_; 1427 return num_traces_recorded_;
1421 } 1428 }
1422 1429
1423 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) { 1430 void TraceLog::AddEnabledStateObserver(EnabledStateObserver* listener) {
1424 enabled_state_observer_list_.push_back(listener); 1431 enabled_state_observer_list_.push_back(listener);
1425 } 1432 }
1426 1433
1427 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) { 1434 void TraceLog::RemoveEnabledStateObserver(EnabledStateObserver* listener) {
1428 std::vector<EnabledStateObserver*>::iterator it = 1435 std::vector<EnabledStateObserver*>::iterator it =
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
1492 1499
1493 void TraceLog::CheckIfBufferIsFullWhileLocked(NotificationHelper* notifier) { 1500 void TraceLog::CheckIfBufferIsFullWhileLocked(NotificationHelper* notifier) {
1494 lock_.AssertAcquired(); 1501 lock_.AssertAcquired();
1495 if (!subtle::NoBarrier_Load(&buffer_is_full_) && logged_events_->IsFull()) { 1502 if (!subtle::NoBarrier_Load(&buffer_is_full_) && logged_events_->IsFull()) {
1496 subtle::NoBarrier_Store(&buffer_is_full_, 1503 subtle::NoBarrier_Store(&buffer_is_full_,
1497 static_cast<subtle::AtomicWord>(1)); 1504 static_cast<subtle::AtomicWord>(1));
1498 notifier->AddNotificationWhileLocked(TRACE_BUFFER_FULL); 1505 notifier->AddNotificationWhileLocked(TRACE_BUFFER_FULL);
1499 } 1506 }
1500 } 1507 }
1501 1508
1502 void TraceLog::SetEventCallback(EventCallback cb) { 1509 void TraceLog::SetEventCallbackEnabled(const CategoryFilter& category_filter,
1510 EventCallback cb) {
1511 AutoLock lock(lock_);
1503 subtle::NoBarrier_Store(&event_callback_, 1512 subtle::NoBarrier_Store(&event_callback_,
1504 reinterpret_cast<subtle::AtomicWord>(cb)); 1513 reinterpret_cast<subtle::AtomicWord>(cb));
1514 event_callback_category_filter_ = category_filter;
1515 UpdateCategoryGroupEnabledFlags();
1505 }; 1516 };
1506 1517
1518 void TraceLog::SetEventCallbackDisabled() {
1519 AutoLock lock(lock_);
1520 subtle::NoBarrier_Store(&event_callback_, 0);
1521 UpdateCategoryGroupEnabledFlags();
1522 }
1523
1507 // Flush() works as the following: 1524 // Flush() works as the following:
1508 // 1. Flush() is called in threadA whose message loop is saved in 1525 // 1. Flush() is called in threadA whose message loop is saved in
1509 // flush_message_loop_proxy_; 1526 // flush_message_loop_proxy_;
1510 // 2. If thread_message_loops_ is not empty, threadA posts task to each message 1527 // 2. If thread_message_loops_ is not empty, threadA posts task to each message
1511 // loop to flush the thread local buffers; otherwise finish the flush; 1528 // loop to flush the thread local buffers; otherwise finish the flush;
1512 // 3. FlushCurrentThread() deletes the thread local event buffer: 1529 // 3. FlushCurrentThread() deletes the thread local event buffer:
1513 // - The last batch of events of the thread are flushed into the main buffer; 1530 // - The last batch of events of the thread are flushed into the main buffer;
1514 // - The message loop will be removed from thread_message_loops_; 1531 // - The message loop will be removed from thread_message_loops_;
1515 // If this is the last message loop, finish the flush; 1532 // If this is the last message loop, finish the flush;
1516 // 4. If any thread hasn't finish its flush in time, finish the flush. 1533 // 4. If any thread hasn't finish its flush in time, finish the flush.
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
1777 new_name) != existing_names.end(); 1794 new_name) != existing_names.end();
1778 if (!found) { 1795 if (!found) {
1779 existing_name->second.push_back(','); 1796 existing_name->second.push_back(',');
1780 existing_name->second.append(new_name); 1797 existing_name->second.append(new_name);
1781 } 1798 }
1782 } 1799 }
1783 } 1800 }
1784 } 1801 }
1785 1802
1786 TraceEvent* trace_event = NULL; 1803 TraceEvent* trace_event = NULL;
1787 if (!subtle::NoBarrier_Load(&buffer_is_full_)) { 1804 if ((*category_group_enabled & ENABLED_FOR_RECORDING) &&
1805 !subtle::NoBarrier_Load(&buffer_is_full_)) {
1788 if (thread_local_event_buffer) { 1806 if (thread_local_event_buffer) {
1789 lock.EnsureReleased(); 1807 lock.EnsureReleased();
1790 trace_event = thread_local_event_buffer->AddTraceEvent(&notifier, 1808 trace_event = thread_local_event_buffer->AddTraceEvent(&notifier,
1791 &handle); 1809 &handle);
1792 } else { 1810 } else {
1793 lock.EnsureAcquired(); 1811 lock.EnsureAcquired();
1794 trace_event = AddEventToThreadSharedChunkWhileLocked(&notifier, &handle); 1812 trace_event = AddEventToThreadSharedChunkWhileLocked(&notifier, &handle);
1795 } 1813 }
1796 1814
1797 if (trace_event) { 1815 if (trace_event) {
(...skipping 16 matching lines...) Expand all
1814 } 1832 }
1815 1833
1816 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load( 1834 if (reinterpret_cast<const unsigned char*>(subtle::NoBarrier_Load(
1817 &watch_category_)) == category_group_enabled) { 1835 &watch_category_)) == category_group_enabled) {
1818 lock.EnsureAcquired(); 1836 lock.EnsureAcquired();
1819 if (watch_event_name_ == name) 1837 if (watch_event_name_ == name)
1820 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION); 1838 notifier.AddNotificationWhileLocked(EVENT_WATCH_NOTIFICATION);
1821 } 1839 }
1822 1840
1823 lock.EnsureReleased(); 1841 lock.EnsureReleased();
1824 EventCallback event_callback = reinterpret_cast<EventCallback>( 1842 if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
1825 subtle::NoBarrier_Load(&event_callback_)); 1843 EventCallback event_callback = reinterpret_cast<EventCallback>(
1826 if (event_callback) { 1844 subtle::NoBarrier_Load(&event_callback_));
1827 // TODO(wangxianzhu): Should send TRACE_EVENT_PHASE_COMPLETE directly to 1845 if (event_callback) {
1828 // clients if it is beneficial and feasible. 1846 event_callback(now,
1829 event_callback(now, 1847 phase == TRACE_EVENT_PHASE_COMPLETE ?
1830 phase == TRACE_EVENT_PHASE_COMPLETE ? 1848 TRACE_EVENT_PHASE_BEGIN : phase,
1831 TRACE_EVENT_PHASE_BEGIN : phase, 1849 category_group_enabled, name, id,
1832 category_group_enabled, name, id, 1850 num_args, arg_names, arg_types, arg_values,
1833 num_args, arg_names, arg_types, arg_values, 1851 flags);
1834 flags); 1852 }
1835 } 1853 }
1836 1854
1837 if (thread_local_event_buffer) 1855 if (thread_local_event_buffer)
1838 thread_local_event_buffer->ReportOverhead(now, thread_now, &notifier); 1856 thread_local_event_buffer->ReportOverhead(now, thread_now, &notifier);
1839 1857
1840 notifier.SendNotificationIfAny(); 1858 notifier.SendNotificationIfAny();
1841 1859
1842 return handle; 1860 return handle;
1843 } 1861 }
1844 1862
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1904 const void* id, 1922 const void* id,
1905 const std::string& extra) 1923 const std::string& extra)
1906 { 1924 {
1907 #if defined(OS_WIN) 1925 #if defined(OS_WIN)
1908 TraceEventETWProvider::Trace(name, phase, id, extra); 1926 TraceEventETWProvider::Trace(name, phase, id, extra);
1909 #endif 1927 #endif
1910 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name, 1928 INTERNAL_TRACE_EVENT_ADD(phase, "ETW Trace Event", name,
1911 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra); 1929 TRACE_EVENT_FLAG_COPY, "id", id, "extra", extra);
1912 } 1930 }
1913 1931
1914 void TraceLog::UpdateTraceEventDuration(TraceEventHandle handle) { 1932 void TraceLog::UpdateTraceEventDuration(
1933 const unsigned char* category_group_enabled,
1934 const char* name,
1935 TraceEventHandle handle) {
1915 OptionalAutoLock lock(lock_); 1936 OptionalAutoLock lock(lock_);
1916 1937
1917 TimeTicks now = OffsetNow(); 1938 TimeTicks now = OffsetNow();
1918 TraceEvent* trace_event = GetEventByHandleInternal(handle, &lock); 1939 TraceEvent* trace_event = NULL;
1919 if (trace_event) { 1940 // TODO(wangxianzhu): Remove the !category_group_enabled condition after
1920 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE); 1941 // all clients migrate to the new UpdateTraceEventDuration API.
1921 trace_event->UpdateDuration(now); 1942 if (!category_group_enabled ||
1943 (*category_group_enabled & ENABLED_FOR_RECORDING)) {
1944 trace_event = GetEventByHandleInternal(handle, &lock);
1945 if (trace_event) {
1946 DCHECK(trace_event->phase() == TRACE_EVENT_PHASE_COMPLETE);
1947 trace_event->UpdateDuration(now);
1922 #if defined(OS_ANDROID) 1948 #if defined(OS_ANDROID)
1923 trace_event->SendToATrace(); 1949 trace_event->SendToATrace();
1924 #endif 1950 #endif
1951 }
1952
1953 if (trace_options() & ECHO_TO_CONSOLE) {
1954 lock.EnsureAcquired();
1955 OutputEventToConsoleWhileLocked(TRACE_EVENT_PHASE_END, now, trace_event);
1956 }
1925 } 1957 }
1926 1958
1927 if (trace_options() & ECHO_TO_CONSOLE) { 1959 // TODO(wangxianzhu): Remove this block after all clients migrate to the
1928 lock.EnsureAcquired(); 1960 // new UpdateTraceEventDuration API.
1929 OutputEventToConsoleWhileLocked(TRACE_EVENT_PHASE_END, now, trace_event); 1961 if (!category_group_enabled || !name) {
1962 if (!trace_event)
1963 return;
1964 category_group_enabled = trace_event->category_group_enabled();
1965 name = trace_event->name();
1930 } 1966 }
1931 1967
1932 EventCallback event_callback = reinterpret_cast<EventCallback>( 1968 lock.EnsureReleased();
1933 subtle::NoBarrier_Load(&event_callback_)); 1969 if (*category_group_enabled & ENABLED_FOR_EVENT_CALLBACK) {
1934 if (event_callback && trace_event) { 1970 EventCallback event_callback = reinterpret_cast<EventCallback>(
1935 // The copy is needed when trace_event is from the main buffer in which case 1971 subtle::NoBarrier_Load(&event_callback_));
1936 // the lock has been locked. 1972 event_callback(now, TRACE_EVENT_PHASE_END, category_group_enabled, name,
1937 TraceEvent event_copy; 1973 trace_event_internal::kNoEventId, 0, NULL, NULL, NULL,
1938 event_copy.CopyFrom(*trace_event); 1974 TRACE_EVENT_FLAG_NONE);
1939 lock.EnsureReleased();
1940 // TODO(wangxianzhu): Should send TRACE_EVENT_PHASE_COMPLETE directly to
1941 // clients if it is beneficial and feasible.
1942 event_callback(now, TRACE_EVENT_PHASE_END,
1943 event_copy.category_group_enabled(),
1944 event_copy.name(), event_copy.id(),
1945 0, NULL, NULL, NULL, event_copy.flags());
1946 } 1975 }
1947 } 1976 }
1948 1977
1949 void TraceLog::SetWatchEvent(const std::string& category_name, 1978 void TraceLog::SetWatchEvent(const std::string& category_name,
1950 const std::string& event_name) { 1979 const std::string& event_name) {
1951 const unsigned char* category = GetCategoryGroupEnabled( 1980 const unsigned char* category = GetCategoryGroupEnabled(
1952 category_name.c_str()); 1981 category_name.c_str());
1953 AutoLock lock(lock_); 1982 AutoLock lock(lock_);
1954 subtle::NoBarrier_Store(&watch_category_, 1983 subtle::NoBarrier_Store(&watch_category_,
1955 reinterpret_cast<subtle::AtomicWord>(category)); 1984 reinterpret_cast<subtle::AtomicWord>(category));
(...skipping 323 matching lines...) Expand 10 before | Expand all | Expand 10 after
2279 2308
2280 namespace trace_event_internal { 2309 namespace trace_event_internal {
2281 2310
2282 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient( 2311 ScopedTraceBinaryEfficient::ScopedTraceBinaryEfficient(
2283 const char* category_group, const char* name) { 2312 const char* category_group, const char* name) {
2284 // The single atom works because for now the category_group can only be "gpu". 2313 // The single atom works because for now the category_group can only be "gpu".
2285 DCHECK(strcmp(category_group, "gpu") == 0); 2314 DCHECK(strcmp(category_group, "gpu") == 0);
2286 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0; 2315 static TRACE_EVENT_API_ATOMIC_WORD atomic = 0;
2287 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES( 2316 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO_CUSTOM_VARIABLES(
2288 category_group, atomic, category_group_enabled_); 2317 category_group, atomic, category_group_enabled_);
2318 name_ = name;
2289 if (*category_group_enabled_) { 2319 if (*category_group_enabled_) {
2290 event_handle_ = 2320 event_handle_ =
2291 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP( 2321 TRACE_EVENT_API_ADD_TRACE_EVENT_WITH_THREAD_ID_AND_TIMESTAMP(
2292 TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name, 2322 TRACE_EVENT_PHASE_COMPLETE, category_group_enabled_, name,
2293 trace_event_internal::kNoEventId, 2323 trace_event_internal::kNoEventId,
2294 static_cast<int>(base::PlatformThread::CurrentId()), 2324 static_cast<int>(base::PlatformThread::CurrentId()),
2295 base::TimeTicks::NowFromSystemTraceTime(), 2325 base::TimeTicks::NowFromSystemTraceTime(),
2296 0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE); 2326 0, NULL, NULL, NULL, NULL, TRACE_EVENT_FLAG_NONE);
2297 } 2327 }
2298 } 2328 }
2299 2329
2300 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() { 2330 ScopedTraceBinaryEfficient::~ScopedTraceBinaryEfficient() {
2301 if (*category_group_enabled_) 2331 if (*category_group_enabled_) {
2302 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(event_handle_); 2332 TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION(category_group_enabled_,
2333 name_, event_handle_);
2334 }
2303 } 2335 }
2304 2336
2305 } // namespace trace_event_internal 2337 } // namespace trace_event_internal
OLDNEW
« no previous file with comments | « base/debug/trace_event_impl.h ('k') | base/debug/trace_event_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698