| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // In the process of moving the trace event files. Right now the headers |
| 6 // are being forwarded. In next CLs the change will get completed |
| 7 // TODO(ssid): https://code.google.com/p/chromium/issues/detail?id=451032 |
| 5 | 8 |
| 6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 9 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
| 7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 10 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
| 8 | 11 |
| 9 #include <stack> | 12 #include "base/trace_event/trace_event_impl.h" |
| 10 #include <string> | |
| 11 #include <vector> | |
| 12 | |
| 13 #include "base/atomicops.h" | |
| 14 #include "base/base_export.h" | |
| 15 #include "base/callback.h" | |
| 16 #include "base/containers/hash_tables.h" | |
| 17 #include "base/gtest_prod_util.h" | |
| 18 #include "base/memory/ref_counted_memory.h" | |
| 19 #include "base/memory/scoped_vector.h" | |
| 20 #include "base/observer_list.h" | |
| 21 #include "base/strings/string_util.h" | |
| 22 #include "base/synchronization/condition_variable.h" | |
| 23 #include "base/synchronization/lock.h" | |
| 24 #include "base/threading/thread.h" | |
| 25 #include "base/threading/thread_local.h" | |
| 26 | |
| 27 // Older style trace macros with explicit id and extra data | |
| 28 // Only these macros result in publishing data to ETW as currently implemented. | |
| 29 #define TRACE_EVENT_BEGIN_ETW(name, id, extra) \ | |
| 30 base::debug::TraceLog::AddTraceEventEtw( \ | |
| 31 TRACE_EVENT_PHASE_BEGIN, \ | |
| 32 name, reinterpret_cast<const void*>(id), extra) | |
| 33 | |
| 34 #define TRACE_EVENT_END_ETW(name, id, extra) \ | |
| 35 base::debug::TraceLog::AddTraceEventEtw( \ | |
| 36 TRACE_EVENT_PHASE_END, \ | |
| 37 name, reinterpret_cast<const void*>(id), extra) | |
| 38 | |
| 39 #define TRACE_EVENT_INSTANT_ETW(name, id, extra) \ | |
| 40 base::debug::TraceLog::AddTraceEventEtw( \ | |
| 41 TRACE_EVENT_PHASE_INSTANT, \ | |
| 42 name, reinterpret_cast<const void*>(id), extra) | |
| 43 | |
| 44 template <typename Type> | |
| 45 struct DefaultSingletonTraits; | |
| 46 | |
| 47 namespace base { | |
| 48 | |
| 49 class WaitableEvent; | |
| 50 class MessageLoop; | |
| 51 | |
| 52 namespace debug { | |
| 53 | |
| 54 // For any argument of type TRACE_VALUE_TYPE_CONVERTABLE the provided | |
| 55 // class must implement this interface. | |
| 56 class BASE_EXPORT ConvertableToTraceFormat | |
| 57 : public RefCounted<ConvertableToTraceFormat> { | |
| 58 public: | |
| 59 // Append the class info to the provided |out| string. The appended | |
| 60 // data must be a valid JSON object. Strings must be properly quoted, and | |
| 61 // escaped. There is no processing applied to the content after it is | |
| 62 // appended. | |
| 63 virtual void AppendAsTraceFormat(std::string* out) const = 0; | |
| 64 | |
| 65 std::string ToString() const { | |
| 66 std::string result; | |
| 67 AppendAsTraceFormat(&result); | |
| 68 return result; | |
| 69 } | |
| 70 | |
| 71 protected: | |
| 72 virtual ~ConvertableToTraceFormat() {} | |
| 73 | |
| 74 private: | |
| 75 friend class RefCounted<ConvertableToTraceFormat>; | |
| 76 }; | |
| 77 | |
| 78 struct TraceEventHandle { | |
| 79 uint32 chunk_seq; | |
| 80 uint16 chunk_index; | |
| 81 uint16 event_index; | |
| 82 }; | |
| 83 | |
| 84 const int kTraceMaxNumArgs = 2; | |
| 85 | |
| 86 class BASE_EXPORT TraceEvent { | |
| 87 public: | |
| 88 union TraceValue { | |
| 89 bool as_bool; | |
| 90 unsigned long long as_uint; | |
| 91 long long as_int; | |
| 92 double as_double; | |
| 93 const void* as_pointer; | |
| 94 const char* as_string; | |
| 95 }; | |
| 96 | |
| 97 TraceEvent(); | |
| 98 ~TraceEvent(); | |
| 99 | |
| 100 // We don't need to copy TraceEvent except when TraceEventBuffer is cloned. | |
| 101 // Use explicit copy method to avoid accidentally misuse of copy. | |
| 102 void CopyFrom(const TraceEvent& other); | |
| 103 | |
| 104 void Initialize( | |
| 105 int thread_id, | |
| 106 TimeTicks timestamp, | |
| 107 TimeTicks thread_timestamp, | |
| 108 char phase, | |
| 109 const unsigned char* category_group_enabled, | |
| 110 const char* name, | |
| 111 unsigned long long id, | |
| 112 int num_args, | |
| 113 const char** arg_names, | |
| 114 const unsigned char* arg_types, | |
| 115 const unsigned long long* arg_values, | |
| 116 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, | |
| 117 unsigned char flags); | |
| 118 | |
| 119 void Reset(); | |
| 120 | |
| 121 void UpdateDuration(const TimeTicks& now, const TimeTicks& thread_now); | |
| 122 | |
| 123 // Serialize event data to JSON | |
| 124 void AppendAsJSON(std::string* out) const; | |
| 125 void AppendPrettyPrinted(std::ostringstream* out) const; | |
| 126 | |
| 127 static void AppendValueAsJSON(unsigned char type, | |
| 128 TraceValue value, | |
| 129 std::string* out); | |
| 130 | |
| 131 TimeTicks timestamp() const { return timestamp_; } | |
| 132 TimeTicks thread_timestamp() const { return thread_timestamp_; } | |
| 133 char phase() const { return phase_; } | |
| 134 int thread_id() const { return thread_id_; } | |
| 135 TimeDelta duration() const { return duration_; } | |
| 136 TimeDelta thread_duration() const { return thread_duration_; } | |
| 137 unsigned long long id() const { return id_; } | |
| 138 unsigned char flags() const { return flags_; } | |
| 139 | |
| 140 // Exposed for unittesting: | |
| 141 | |
| 142 const base::RefCountedString* parameter_copy_storage() const { | |
| 143 return parameter_copy_storage_.get(); | |
| 144 } | |
| 145 | |
| 146 const unsigned char* category_group_enabled() const { | |
| 147 return category_group_enabled_; | |
| 148 } | |
| 149 | |
| 150 const char* name() const { return name_; } | |
| 151 | |
| 152 #if defined(OS_ANDROID) | |
| 153 void SendToATrace(); | |
| 154 #endif | |
| 155 | |
| 156 private: | |
| 157 // Note: these are ordered by size (largest first) for optimal packing. | |
| 158 TimeTicks timestamp_; | |
| 159 TimeTicks thread_timestamp_; | |
| 160 TimeDelta duration_; | |
| 161 TimeDelta thread_duration_; | |
| 162 // id_ can be used to store phase-specific data. | |
| 163 unsigned long long id_; | |
| 164 TraceValue arg_values_[kTraceMaxNumArgs]; | |
| 165 const char* arg_names_[kTraceMaxNumArgs]; | |
| 166 scoped_refptr<ConvertableToTraceFormat> convertable_values_[kTraceMaxNumArgs]; | |
| 167 const unsigned char* category_group_enabled_; | |
| 168 const char* name_; | |
| 169 scoped_refptr<base::RefCountedString> parameter_copy_storage_; | |
| 170 int thread_id_; | |
| 171 char phase_; | |
| 172 unsigned char flags_; | |
| 173 unsigned char arg_types_[kTraceMaxNumArgs]; | |
| 174 | |
| 175 DISALLOW_COPY_AND_ASSIGN(TraceEvent); | |
| 176 }; | |
| 177 | |
| 178 // TraceBufferChunk is the basic unit of TraceBuffer. | |
| 179 class BASE_EXPORT TraceBufferChunk { | |
| 180 public: | |
| 181 explicit TraceBufferChunk(uint32 seq) | |
| 182 : next_free_(0), | |
| 183 seq_(seq) { | |
| 184 } | |
| 185 | |
| 186 void Reset(uint32 new_seq); | |
| 187 TraceEvent* AddTraceEvent(size_t* event_index); | |
| 188 bool IsFull() const { return next_free_ == kTraceBufferChunkSize; } | |
| 189 | |
| 190 uint32 seq() const { return seq_; } | |
| 191 size_t capacity() const { return kTraceBufferChunkSize; } | |
| 192 size_t size() const { return next_free_; } | |
| 193 | |
| 194 TraceEvent* GetEventAt(size_t index) { | |
| 195 DCHECK(index < size()); | |
| 196 return &chunk_[index]; | |
| 197 } | |
| 198 const TraceEvent* GetEventAt(size_t index) const { | |
| 199 DCHECK(index < size()); | |
| 200 return &chunk_[index]; | |
| 201 } | |
| 202 | |
| 203 scoped_ptr<TraceBufferChunk> Clone() const; | |
| 204 | |
| 205 static const size_t kTraceBufferChunkSize = 64; | |
| 206 | |
| 207 private: | |
| 208 size_t next_free_; | |
| 209 TraceEvent chunk_[kTraceBufferChunkSize]; | |
| 210 uint32 seq_; | |
| 211 }; | |
| 212 | |
| 213 // TraceBuffer holds the events as they are collected. | |
| 214 class BASE_EXPORT TraceBuffer { | |
| 215 public: | |
| 216 virtual ~TraceBuffer() {} | |
| 217 | |
| 218 virtual scoped_ptr<TraceBufferChunk> GetChunk(size_t *index) = 0; | |
| 219 virtual void ReturnChunk(size_t index, | |
| 220 scoped_ptr<TraceBufferChunk> chunk) = 0; | |
| 221 | |
| 222 virtual bool IsFull() const = 0; | |
| 223 virtual size_t Size() const = 0; | |
| 224 virtual size_t Capacity() const = 0; | |
| 225 virtual TraceEvent* GetEventByHandle(TraceEventHandle handle) = 0; | |
| 226 | |
| 227 // For iteration. Each TraceBuffer can only be iterated once. | |
| 228 virtual const TraceBufferChunk* NextChunk() = 0; | |
| 229 | |
| 230 virtual scoped_ptr<TraceBuffer> CloneForIteration() const = 0; | |
| 231 }; | |
| 232 | |
| 233 // TraceResultBuffer collects and converts trace fragments returned by TraceLog | |
| 234 // to JSON output. | |
| 235 class BASE_EXPORT TraceResultBuffer { | |
| 236 public: | |
| 237 typedef base::Callback<void(const std::string&)> OutputCallback; | |
| 238 | |
| 239 // If you don't need to stream JSON chunks out efficiently, and just want to | |
| 240 // get a complete JSON string after calling Finish, use this struct to collect | |
| 241 // JSON trace output. | |
| 242 struct BASE_EXPORT SimpleOutput { | |
| 243 OutputCallback GetCallback(); | |
| 244 void Append(const std::string& json_string); | |
| 245 | |
| 246 // Do what you want with the json_output_ string after calling | |
| 247 // TraceResultBuffer::Finish. | |
| 248 std::string json_output; | |
| 249 }; | |
| 250 | |
| 251 TraceResultBuffer(); | |
| 252 ~TraceResultBuffer(); | |
| 253 | |
| 254 // Set callback. The callback will be called during Start with the initial | |
| 255 // JSON output and during AddFragment and Finish with following JSON output | |
| 256 // chunks. The callback target must live past the last calls to | |
| 257 // TraceResultBuffer::Start/AddFragment/Finish. | |
| 258 void SetOutputCallback(const OutputCallback& json_chunk_callback); | |
| 259 | |
| 260 // Start JSON output. This resets all internal state, so you can reuse | |
| 261 // the TraceResultBuffer by calling Start. | |
| 262 void Start(); | |
| 263 | |
| 264 // Call AddFragment 0 or more times to add trace fragments from TraceLog. | |
| 265 void AddFragment(const std::string& trace_fragment); | |
| 266 | |
| 267 // When all fragments have been added, call Finish to complete the JSON | |
| 268 // formatted output. | |
| 269 void Finish(); | |
| 270 | |
| 271 private: | |
| 272 OutputCallback output_callback_; | |
| 273 bool append_comma_; | |
| 274 }; | |
| 275 | |
| 276 class BASE_EXPORT CategoryFilter { | |
| 277 public: | |
| 278 typedef std::vector<std::string> StringList; | |
| 279 | |
| 280 // The default category filter, used when none is provided. | |
| 281 // Allows all categories through, except if they end in the suffix 'Debug' or | |
| 282 // 'Test'. | |
| 283 static const char kDefaultCategoryFilterString[]; | |
| 284 | |
| 285 // |filter_string| is a comma-delimited list of category wildcards. | |
| 286 // A category can have an optional '-' prefix to make it an excluded category. | |
| 287 // All the same rules apply above, so for example, having both included and | |
| 288 // excluded categories in the same list would not be supported. | |
| 289 // | |
| 290 // Example: CategoryFilter"test_MyTest*"); | |
| 291 // Example: CategoryFilter("test_MyTest*,test_OtherStuff"); | |
| 292 // Example: CategoryFilter("-excluded_category1,-excluded_category2"); | |
| 293 // Example: CategoryFilter("-*,webkit"); would disable everything but webkit. | |
| 294 // Example: CategoryFilter("-webkit"); would enable everything but webkit. | |
| 295 // | |
| 296 // Category filters can also be used to configure synthetic delays. | |
| 297 // | |
| 298 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16)"); would make swap | |
| 299 // buffers always take at least 16 ms. | |
| 300 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;oneshot)"); would | |
| 301 // make swap buffers take at least 16 ms the first time it is | |
| 302 // called. | |
| 303 // Example: CategoryFilter("DELAY(gpu.PresentingFrame;16;alternating)"); | |
| 304 // would make swap buffers take at least 16 ms every other time it | |
| 305 // is called. | |
| 306 explicit CategoryFilter(const std::string& filter_string); | |
| 307 | |
| 308 CategoryFilter(); | |
| 309 | |
| 310 CategoryFilter(const CategoryFilter& cf); | |
| 311 | |
| 312 ~CategoryFilter(); | |
| 313 | |
| 314 CategoryFilter& operator=(const CategoryFilter& rhs); | |
| 315 | |
| 316 // Writes the string representation of the CategoryFilter. This is a comma | |
| 317 // separated string, similar in nature to the one used to determine | |
| 318 // enabled/disabled category patterns, except here there is an arbitrary | |
| 319 // order, included categories go first, then excluded categories. Excluded | |
| 320 // categories are distinguished from included categories by the prefix '-'. | |
| 321 std::string ToString() const; | |
| 322 | |
| 323 // Returns true if at least one category in the list is enabled by this | |
| 324 // category filter. | |
| 325 bool IsCategoryGroupEnabled(const char* category_group) const; | |
| 326 | |
| 327 // Return a list of the synthetic delays specified in this category filter. | |
| 328 const StringList& GetSyntheticDelayValues() const; | |
| 329 | |
| 330 // Merges nested_filter with the current CategoryFilter | |
| 331 void Merge(const CategoryFilter& nested_filter); | |
| 332 | |
| 333 // Clears both included/excluded pattern lists. This would be equivalent to | |
| 334 // creating a CategoryFilter with an empty string, through the constructor. | |
| 335 // i.e: CategoryFilter(). | |
| 336 // | |
| 337 // When using an empty filter, all categories are considered included as we | |
| 338 // are not excluding anything. | |
| 339 void Clear(); | |
| 340 | |
| 341 private: | |
| 342 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, CategoryFilter); | |
| 343 | |
| 344 // Returns true if category is enable according to this filter. | |
| 345 bool IsCategoryEnabled(const char* category_name) const; | |
| 346 | |
| 347 static bool IsEmptyOrContainsLeadingOrTrailingWhitespace( | |
| 348 const std::string& str); | |
| 349 | |
| 350 void Initialize(const std::string& filter_string); | |
| 351 void WriteString(const StringList& values, | |
| 352 std::string* out, | |
| 353 bool included) const; | |
| 354 void WriteString(const StringList& delays, std::string* out) const; | |
| 355 bool HasIncludedPatterns() const; | |
| 356 | |
| 357 StringList included_; | |
| 358 StringList disabled_; | |
| 359 StringList excluded_; | |
| 360 StringList delays_; | |
| 361 }; | |
| 362 | |
| 363 class TraceSamplingThread; | |
| 364 | |
| 365 // Options determines how the trace buffer stores data. | |
| 366 enum TraceRecordMode { | |
| 367 // Record until the trace buffer is full. | |
| 368 RECORD_UNTIL_FULL, | |
| 369 | |
| 370 // Record until the user ends the trace. The trace buffer is a fixed size | |
| 371 // and we use it as a ring buffer during recording. | |
| 372 RECORD_CONTINUOUSLY, | |
| 373 | |
| 374 // Echo to console. Events are discarded. | |
| 375 ECHO_TO_CONSOLE, | |
| 376 | |
| 377 // Record until the trace buffer is full, but with a huge buffer size. | |
| 378 RECORD_AS_MUCH_AS_POSSIBLE | |
| 379 }; | |
| 380 | |
| 381 struct BASE_EXPORT TraceOptions { | |
| 382 TraceOptions() | |
| 383 : record_mode(RECORD_UNTIL_FULL), | |
| 384 enable_sampling(false), | |
| 385 enable_systrace(false) {} | |
| 386 | |
| 387 explicit TraceOptions(TraceRecordMode record_mode) | |
| 388 : record_mode(record_mode), | |
| 389 enable_sampling(false), | |
| 390 enable_systrace(false) {} | |
| 391 | |
| 392 // |options_string| is a comma-delimited list of trace options. | |
| 393 // Possible options are: "record-until-full", "record-continuously", | |
| 394 // "trace-to-console", "enable-sampling" and "enable-systrace". | |
| 395 // The first 3 options are trace recoding modes and hence | |
| 396 // mutually exclusive. If more than one trace recording modes appear in the | |
| 397 // options_string, the last one takes precedence. If none of the trace | |
| 398 // recording mode is specified, recording mode is RECORD_UNTIL_FULL. | |
| 399 // | |
| 400 // The trace option will first be reset to the default option | |
| 401 // (record_mode set to RECORD_UNTIL_FULL, enable_sampling and enable_systrace | |
| 402 // set to false) before options parsed from |options_string| are applied on | |
| 403 // it. | |
| 404 // If |options_string| is invalid, the final state of trace_options is | |
| 405 // undefined. | |
| 406 // | |
| 407 // Example: trace_options.SetFromString("record-until-full") | |
| 408 // Example: trace_options.SetFromString( | |
| 409 // "record-continuously, enable-sampling") | |
| 410 // Example: trace_options.SetFromString("record-until-full, trace-to-console") | |
| 411 // will set ECHO_TO_CONSOLE as the recording mode. | |
| 412 // | |
| 413 // Returns true on success. | |
| 414 bool SetFromString(const std::string& options_string); | |
| 415 | |
| 416 std::string ToString() const; | |
| 417 | |
| 418 TraceRecordMode record_mode; | |
| 419 bool enable_sampling; | |
| 420 bool enable_systrace; | |
| 421 }; | |
| 422 | |
| 423 struct BASE_EXPORT TraceLogStatus { | |
| 424 TraceLogStatus(); | |
| 425 ~TraceLogStatus(); | |
| 426 size_t event_capacity; | |
| 427 size_t event_count; | |
| 428 }; | |
| 429 | |
| 430 class BASE_EXPORT TraceLog { | |
| 431 public: | |
| 432 enum Mode { | |
| 433 DISABLED = 0, | |
| 434 RECORDING_MODE, | |
| 435 MONITORING_MODE, | |
| 436 }; | |
| 437 | |
| 438 // The pointer returned from GetCategoryGroupEnabledInternal() points to a | |
| 439 // value with zero or more of the following bits. Used in this class only. | |
| 440 // The TRACE_EVENT macros should only use the value as a bool. | |
| 441 // These values must be in sync with macro values in TraceEvent.h in Blink. | |
| 442 enum CategoryGroupEnabledFlags { | |
| 443 // Category group enabled for the recording mode. | |
| 444 ENABLED_FOR_RECORDING = 1 << 0, | |
| 445 // Category group enabled for the monitoring mode. | |
| 446 ENABLED_FOR_MONITORING = 1 << 1, | |
| 447 // Category group enabled by SetEventCallbackEnabled(). | |
| 448 ENABLED_FOR_EVENT_CALLBACK = 1 << 2, | |
| 449 }; | |
| 450 | |
| 451 static TraceLog* GetInstance(); | |
| 452 | |
| 453 // Get set of known category groups. This can change as new code paths are | |
| 454 // reached. The known category groups are inserted into |category_groups|. | |
| 455 void GetKnownCategoryGroups(std::vector<std::string>* category_groups); | |
| 456 | |
| 457 // Retrieves a copy (for thread-safety) of the current CategoryFilter. | |
| 458 CategoryFilter GetCurrentCategoryFilter(); | |
| 459 | |
| 460 // Retrieves a copy (for thread-safety) of the current TraceOptions. | |
| 461 TraceOptions GetCurrentTraceOptions() const; | |
| 462 | |
| 463 // Enables normal tracing (recording trace events in the trace buffer). | |
| 464 // See CategoryFilter comments for details on how to control what categories | |
| 465 // will be traced. If tracing has already been enabled, |category_filter| will | |
| 466 // be merged into the current category filter. | |
| 467 void SetEnabled(const CategoryFilter& category_filter, | |
| 468 Mode mode, const TraceOptions& options); | |
| 469 | |
| 470 // Disables normal tracing for all categories. | |
| 471 void SetDisabled(); | |
| 472 | |
| 473 bool IsEnabled() { return mode_ != DISABLED; } | |
| 474 | |
| 475 // The number of times we have begun recording traces. If tracing is off, | |
| 476 // returns -1. If tracing is on, then it returns the number of times we have | |
| 477 // recorded a trace. By watching for this number to increment, you can | |
| 478 // passively discover when a new trace has begun. This is then used to | |
| 479 // implement the TRACE_EVENT_IS_NEW_TRACE() primitive. | |
| 480 int GetNumTracesRecorded(); | |
| 481 | |
| 482 #if defined(OS_ANDROID) | |
| 483 void StartATrace(); | |
| 484 void StopATrace(); | |
| 485 void AddClockSyncMetadataEvent(); | |
| 486 #endif | |
| 487 | |
| 488 // Enabled state listeners give a callback when tracing is enabled or | |
| 489 // disabled. This can be used to tie into other library's tracing systems | |
| 490 // on-demand. | |
| 491 class BASE_EXPORT EnabledStateObserver { | |
| 492 public: | |
| 493 // Called just after the tracing system becomes enabled, outside of the | |
| 494 // |lock_|. TraceLog::IsEnabled() is true at this point. | |
| 495 virtual void OnTraceLogEnabled() = 0; | |
| 496 | |
| 497 // Called just after the tracing system disables, outside of the |lock_|. | |
| 498 // TraceLog::IsEnabled() is false at this point. | |
| 499 virtual void OnTraceLogDisabled() = 0; | |
| 500 }; | |
| 501 void AddEnabledStateObserver(EnabledStateObserver* listener); | |
| 502 void RemoveEnabledStateObserver(EnabledStateObserver* listener); | |
| 503 bool HasEnabledStateObserver(EnabledStateObserver* listener) const; | |
| 504 | |
| 505 TraceLogStatus GetStatus() const; | |
| 506 bool BufferIsFull() const; | |
| 507 | |
| 508 // Not using base::Callback because of its limited by 7 parameters. | |
| 509 // Also, using primitive type allows directly passing callback from WebCore. | |
| 510 // WARNING: It is possible for the previously set callback to be called | |
| 511 // after a call to SetEventCallbackEnabled() that replaces or a call to | |
| 512 // SetEventCallbackDisabled() that disables the callback. | |
| 513 // This callback may be invoked on any thread. | |
| 514 // For TRACE_EVENT_PHASE_COMPLETE events, the client will still receive pairs | |
| 515 // of TRACE_EVENT_PHASE_BEGIN and TRACE_EVENT_PHASE_END events to keep the | |
| 516 // interface simple. | |
| 517 typedef void (*EventCallback)(TimeTicks timestamp, | |
| 518 char phase, | |
| 519 const unsigned char* category_group_enabled, | |
| 520 const char* name, | |
| 521 unsigned long long id, | |
| 522 int num_args, | |
| 523 const char* const arg_names[], | |
| 524 const unsigned char arg_types[], | |
| 525 const unsigned long long arg_values[], | |
| 526 unsigned char flags); | |
| 527 | |
| 528 // Enable tracing for EventCallback. | |
| 529 void SetEventCallbackEnabled(const CategoryFilter& category_filter, | |
| 530 EventCallback cb); | |
| 531 void SetEventCallbackDisabled(); | |
| 532 | |
| 533 // Flush all collected events to the given output callback. The callback will | |
| 534 // be called one or more times either synchronously or asynchronously from | |
| 535 // the current thread with IPC-bite-size chunks. The string format is | |
| 536 // undefined. Use TraceResultBuffer to convert one or more trace strings to | |
| 537 // JSON. The callback can be null if the caller doesn't want any data. | |
| 538 // Due to the implementation of thread-local buffers, flush can't be | |
| 539 // done when tracing is enabled. If called when tracing is enabled, the | |
| 540 // callback will be called directly with (empty_string, false) to indicate | |
| 541 // the end of this unsuccessful flush. | |
| 542 typedef base::Callback<void(const scoped_refptr<base::RefCountedString>&, | |
| 543 bool has_more_events)> OutputCallback; | |
| 544 void Flush(const OutputCallback& cb); | |
| 545 void FlushButLeaveBufferIntact(const OutputCallback& flush_output_callback); | |
| 546 | |
| 547 // Called by TRACE_EVENT* macros, don't call this directly. | |
| 548 // The name parameter is a category group for example: | |
| 549 // TRACE_EVENT0("renderer,webkit", "WebViewImpl::HandleInputEvent") | |
| 550 static const unsigned char* GetCategoryGroupEnabled(const char* name); | |
| 551 static const char* GetCategoryGroupName( | |
| 552 const unsigned char* category_group_enabled); | |
| 553 | |
| 554 // Called by TRACE_EVENT* macros, don't call this directly. | |
| 555 // If |copy| is set, |name|, |arg_name1| and |arg_name2| will be deep copied | |
| 556 // into the event; see "Memory scoping note" and TRACE_EVENT_COPY_XXX above. | |
| 557 TraceEventHandle AddTraceEvent( | |
| 558 char phase, | |
| 559 const unsigned char* category_group_enabled, | |
| 560 const char* name, | |
| 561 unsigned long long id, | |
| 562 int num_args, | |
| 563 const char** arg_names, | |
| 564 const unsigned char* arg_types, | |
| 565 const unsigned long long* arg_values, | |
| 566 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, | |
| 567 unsigned char flags); | |
| 568 TraceEventHandle AddTraceEventWithThreadIdAndTimestamp( | |
| 569 char phase, | |
| 570 const unsigned char* category_group_enabled, | |
| 571 const char* name, | |
| 572 unsigned long long id, | |
| 573 int thread_id, | |
| 574 const TimeTicks& timestamp, | |
| 575 int num_args, | |
| 576 const char** arg_names, | |
| 577 const unsigned char* arg_types, | |
| 578 const unsigned long long* arg_values, | |
| 579 const scoped_refptr<ConvertableToTraceFormat>* convertable_values, | |
| 580 unsigned char flags); | |
| 581 static void AddTraceEventEtw(char phase, | |
| 582 const char* category_group, | |
| 583 const void* id, | |
| 584 const char* extra); | |
| 585 static void AddTraceEventEtw(char phase, | |
| 586 const char* category_group, | |
| 587 const void* id, | |
| 588 const std::string& extra); | |
| 589 | |
| 590 void UpdateTraceEventDuration(const unsigned char* category_group_enabled, | |
| 591 const char* name, | |
| 592 TraceEventHandle handle); | |
| 593 | |
| 594 // For every matching event, the callback will be called. | |
| 595 typedef base::Callback<void()> WatchEventCallback; | |
| 596 void SetWatchEvent(const std::string& category_name, | |
| 597 const std::string& event_name, | |
| 598 const WatchEventCallback& callback); | |
| 599 // Cancel the watch event. If tracing is enabled, this may race with the | |
| 600 // watch event notification firing. | |
| 601 void CancelWatchEvent(); | |
| 602 | |
| 603 int process_id() const { return process_id_; } | |
| 604 | |
| 605 // Exposed for unittesting: | |
| 606 | |
| 607 void WaitSamplingEventForTesting(); | |
| 608 | |
| 609 // Allows deleting our singleton instance. | |
| 610 static void DeleteForTesting(); | |
| 611 | |
| 612 // Allow tests to inspect TraceEvents. | |
| 613 TraceEvent* GetEventByHandle(TraceEventHandle handle); | |
| 614 | |
| 615 void SetProcessID(int process_id); | |
| 616 | |
| 617 // Process sort indices, if set, override the order of a process will appear | |
| 618 // relative to other processes in the trace viewer. Processes are sorted first | |
| 619 // on their sort index, ascending, then by their name, and then tid. | |
| 620 void SetProcessSortIndex(int sort_index); | |
| 621 | |
| 622 // Sets the name of the process. | |
| 623 void SetProcessName(const std::string& process_name); | |
| 624 | |
| 625 // Processes can have labels in addition to their names. Use labels, for | |
| 626 // instance, to list out the web page titles that a process is handling. | |
| 627 void UpdateProcessLabel(int label_id, const std::string& current_label); | |
| 628 void RemoveProcessLabel(int label_id); | |
| 629 | |
| 630 // Thread sort indices, if set, override the order of a thread will appear | |
| 631 // within its process in the trace viewer. Threads are sorted first on their | |
| 632 // sort index, ascending, then by their name, and then tid. | |
| 633 void SetThreadSortIndex(PlatformThreadId , int sort_index); | |
| 634 | |
| 635 // Allow setting an offset between the current TimeTicks time and the time | |
| 636 // that should be reported. | |
| 637 void SetTimeOffset(TimeDelta offset); | |
| 638 | |
| 639 size_t GetObserverCountForTest() const; | |
| 640 | |
| 641 // Call this method if the current thread may block the message loop to | |
| 642 // prevent the thread from using the thread-local buffer because the thread | |
| 643 // may not handle the flush request in time causing lost of unflushed events. | |
| 644 void SetCurrentThreadBlocksMessageLoop(); | |
| 645 | |
| 646 private: | |
| 647 typedef unsigned int InternalTraceOptions; | |
| 648 | |
| 649 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | |
| 650 TraceBufferRingBufferGetReturnChunk); | |
| 651 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | |
| 652 TraceBufferRingBufferHalfIteration); | |
| 653 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | |
| 654 TraceBufferRingBufferFullIteration); | |
| 655 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | |
| 656 TraceBufferVectorReportFull); | |
| 657 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | |
| 658 ConvertTraceOptionsToInternalOptions); | |
| 659 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | |
| 660 TraceRecordAsMuchAsPossibleMode); | |
| 661 | |
| 662 // This allows constructor and destructor to be private and usable only | |
| 663 // by the Singleton class. | |
| 664 friend struct DefaultSingletonTraits<TraceLog>; | |
| 665 | |
| 666 // Enable/disable each category group based on the current mode_, | |
| 667 // category_filter_, event_callback_ and event_callback_category_filter_. | |
| 668 // Enable the category group in the enabled mode if category_filter_ matches | |
| 669 // the category group, or event_callback_ is not null and | |
| 670 // event_callback_category_filter_ matches the category group. | |
| 671 void UpdateCategoryGroupEnabledFlags(); | |
| 672 void UpdateCategoryGroupEnabledFlag(size_t category_index); | |
| 673 | |
| 674 // Configure synthetic delays based on the values set in the current | |
| 675 // category filter. | |
| 676 void UpdateSyntheticDelaysFromCategoryFilter(); | |
| 677 | |
| 678 InternalTraceOptions GetInternalOptionsFromTraceOptions( | |
| 679 const TraceOptions& options); | |
| 680 | |
| 681 class ThreadLocalEventBuffer; | |
| 682 class OptionalAutoLock; | |
| 683 | |
| 684 TraceLog(); | |
| 685 ~TraceLog(); | |
| 686 const unsigned char* GetCategoryGroupEnabledInternal(const char* name); | |
| 687 void AddMetadataEventsWhileLocked(); | |
| 688 | |
| 689 InternalTraceOptions trace_options() const { | |
| 690 return static_cast<InternalTraceOptions>( | |
| 691 subtle::NoBarrier_Load(&trace_options_)); | |
| 692 } | |
| 693 | |
| 694 TraceBuffer* trace_buffer() const { return logged_events_.get(); } | |
| 695 TraceBuffer* CreateTraceBuffer(); | |
| 696 TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks); | |
| 697 | |
| 698 std::string EventToConsoleMessage(unsigned char phase, | |
| 699 const TimeTicks& timestamp, | |
| 700 TraceEvent* trace_event); | |
| 701 | |
| 702 TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle, | |
| 703 bool check_buffer_is_full); | |
| 704 void CheckIfBufferIsFullWhileLocked(); | |
| 705 void SetDisabledWhileLocked(); | |
| 706 | |
| 707 TraceEvent* GetEventByHandleInternal(TraceEventHandle handle, | |
| 708 OptionalAutoLock* lock); | |
| 709 | |
| 710 // |generation| is used in the following callbacks to check if the callback | |
| 711 // is called for the flush of the current |logged_events_|. | |
| 712 void FlushCurrentThread(int generation); | |
| 713 void ConvertTraceEventsToTraceFormat(scoped_ptr<TraceBuffer> logged_events, | |
| 714 const TraceLog::OutputCallback& flush_output_callback); | |
| 715 void FinishFlush(int generation); | |
| 716 void OnFlushTimeout(int generation); | |
| 717 | |
| 718 int generation() const { | |
| 719 return static_cast<int>(subtle::NoBarrier_Load(&generation_)); | |
| 720 } | |
| 721 bool CheckGeneration(int generation) const { | |
| 722 return generation == this->generation(); | |
| 723 } | |
| 724 void UseNextTraceBuffer(); | |
| 725 | |
| 726 TimeTicks OffsetNow() const { | |
| 727 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime()); | |
| 728 } | |
| 729 TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const { | |
| 730 return timestamp - time_offset_; | |
| 731 } | |
| 732 | |
| 733 // Internal representation of trace options since we store the currently used | |
| 734 // trace option as an AtomicWord. | |
| 735 static const InternalTraceOptions kInternalNone; | |
| 736 static const InternalTraceOptions kInternalRecordUntilFull; | |
| 737 static const InternalTraceOptions kInternalRecordContinuously; | |
| 738 static const InternalTraceOptions kInternalEchoToConsole; | |
| 739 static const InternalTraceOptions kInternalEnableSampling; | |
| 740 static const InternalTraceOptions kInternalRecordAsMuchAsPossible; | |
| 741 | |
| 742 // This lock protects TraceLog member accesses (except for members protected | |
| 743 // by thread_info_lock_) from arbitrary threads. | |
| 744 mutable Lock lock_; | |
| 745 // This lock protects accesses to thread_names_, thread_event_start_times_ | |
| 746 // and thread_colors_. | |
| 747 Lock thread_info_lock_; | |
| 748 Mode mode_; | |
| 749 int num_traces_recorded_; | |
| 750 scoped_ptr<TraceBuffer> logged_events_; | |
| 751 subtle::AtomicWord /* EventCallback */ event_callback_; | |
| 752 bool dispatching_to_observer_list_; | |
| 753 std::vector<EnabledStateObserver*> enabled_state_observer_list_; | |
| 754 | |
| 755 std::string process_name_; | |
| 756 base::hash_map<int, std::string> process_labels_; | |
| 757 int process_sort_index_; | |
| 758 base::hash_map<int, int> thread_sort_indices_; | |
| 759 base::hash_map<int, std::string> thread_names_; | |
| 760 | |
| 761 // The following two maps are used only when ECHO_TO_CONSOLE. | |
| 762 base::hash_map<int, std::stack<TimeTicks> > thread_event_start_times_; | |
| 763 base::hash_map<std::string, int> thread_colors_; | |
| 764 | |
| 765 TimeTicks buffer_limit_reached_timestamp_; | |
| 766 | |
| 767 // XORed with TraceID to make it unlikely to collide with other processes. | |
| 768 unsigned long long process_id_hash_; | |
| 769 | |
| 770 int process_id_; | |
| 771 | |
| 772 TimeDelta time_offset_; | |
| 773 | |
| 774 // Allow tests to wake up when certain events occur. | |
| 775 WatchEventCallback watch_event_callback_; | |
| 776 subtle::AtomicWord /* const unsigned char* */ watch_category_; | |
| 777 std::string watch_event_name_; | |
| 778 | |
| 779 subtle::AtomicWord /* Options */ trace_options_; | |
| 780 | |
| 781 // Sampling thread handles. | |
| 782 scoped_ptr<TraceSamplingThread> sampling_thread_; | |
| 783 PlatformThreadHandle sampling_thread_handle_; | |
| 784 | |
| 785 CategoryFilter category_filter_; | |
| 786 CategoryFilter event_callback_category_filter_; | |
| 787 | |
| 788 ThreadLocalPointer<ThreadLocalEventBuffer> thread_local_event_buffer_; | |
| 789 ThreadLocalBoolean thread_blocks_message_loop_; | |
| 790 ThreadLocalBoolean thread_is_in_trace_event_; | |
| 791 | |
| 792 // Contains the message loops of threads that have had at least one event | |
| 793 // added into the local event buffer. Not using MessageLoopProxy because we | |
| 794 // need to know the life time of the message loops. | |
| 795 hash_set<MessageLoop*> thread_message_loops_; | |
| 796 | |
| 797 // For events which can't be added into the thread local buffer, e.g. events | |
| 798 // from threads without a message loop. | |
| 799 scoped_ptr<TraceBufferChunk> thread_shared_chunk_; | |
| 800 size_t thread_shared_chunk_index_; | |
| 801 | |
| 802 // Set when asynchronous Flush is in progress. | |
| 803 OutputCallback flush_output_callback_; | |
| 804 scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_; | |
| 805 subtle::AtomicWord generation_; | |
| 806 | |
| 807 DISALLOW_COPY_AND_ASSIGN(TraceLog); | |
| 808 }; | |
| 809 | |
| 810 } // namespace debug | |
| 811 } // namespace base | |
| 812 | 13 |
| 813 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 14 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
| OLD | NEW |