OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 | 5 |
6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
8 | 8 |
9 #include <stack> | 9 #include <stack> |
10 #include <string> | 10 #include <string> |
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
357 const char* category) const; | 357 const char* category) const; |
358 | 358 |
359 StringList included_; | 359 StringList included_; |
360 StringList disabled_; | 360 StringList disabled_; |
361 StringList excluded_; | 361 StringList excluded_; |
362 StringList delays_; | 362 StringList delays_; |
363 }; | 363 }; |
364 | 364 |
365 class TraceSamplingThread; | 365 class TraceSamplingThread; |
366 | 366 |
| 367 // Options determines how the trace buffer stores data. |
| 368 enum TraceRecordMode { |
| 369 // Record until the trace buffer is full. |
| 370 RECORD_UNTIL_FULL, |
| 371 |
| 372 // Record until the user ends the trace. The trace buffer is a fixed size |
| 373 // and we use it as a ring buffer during recording. |
| 374 RECORD_CONTINUOUSLY, |
| 375 |
| 376 // Echo to console. Events are discarded. |
| 377 ECHO_TO_CONSOLE, |
| 378 }; |
| 379 |
| 380 struct BASE_EXPORT TraceOptions { |
| 381 |
| 382 TraceOptions() |
| 383 : record_mode(RECORD_UNTIL_FULL), |
| 384 enable_sampling(false), |
| 385 enable_systrace(false) {} |
| 386 |
| 387 TraceOptions(TraceRecordMode record_mode) |
| 388 : record_mode(record_mode), |
| 389 enable_sampling(false), |
| 390 enable_systrace(false) {} |
| 391 |
| 392 // |options_string| is a comma-delimited list of trace options. |
| 393 // Possible options are: "record-until-full", "record-continuously", |
| 394 // "trace-to-console", "enable-sampling" and "enable-systrace". |
| 395 // The first 3 options are trace recoding modes and hence |
| 396 // mutually exclusive. If more than one trace recording modes appear in the |
| 397 // options_string, the last one takes precedence. If none of the trace |
| 398 // recording mode is specified, recording mode is RECORD_UNTIL_FULL. |
| 399 // |
| 400 // Example: TraceOptions("record-until-full") |
| 401 // Example: TraceOptions("record-continuously, enable-sampling") |
| 402 // Example: TraceOptions("record-until-full, trace-to-console") would have |
| 403 // ECHO_TO_CONSOLE as the recording mode. |
| 404 explicit TraceOptions(const std::string& options_string); |
| 405 |
| 406 std::string ToString() const; |
| 407 |
| 408 TraceRecordMode record_mode; |
| 409 bool enable_sampling; |
| 410 bool enable_systrace; |
| 411 }; |
| 412 |
367 class BASE_EXPORT TraceLog { | 413 class BASE_EXPORT TraceLog { |
368 public: | 414 public: |
369 enum Mode { | 415 enum Mode { |
370 DISABLED = 0, | 416 DISABLED = 0, |
371 RECORDING_MODE, | 417 RECORDING_MODE, |
372 MONITORING_MODE, | 418 MONITORING_MODE, |
373 }; | 419 }; |
374 | 420 |
375 // Options determines how the trace buffer stores data. | |
376 enum Options { | |
377 // Record until the trace buffer is full. | |
378 RECORD_UNTIL_FULL = 1 << 0, | |
379 | |
380 // Record until the user ends the trace. The trace buffer is a fixed size | |
381 // and we use it as a ring buffer during recording. | |
382 RECORD_CONTINUOUSLY = 1 << 1, | |
383 | |
384 // Enable the sampling profiler in the recording mode. | |
385 ENABLE_SAMPLING = 1 << 2, | |
386 | |
387 // Echo to console. Events are discarded. | |
388 ECHO_TO_CONSOLE = 1 << 3, | |
389 }; | |
390 | |
391 // The pointer returned from GetCategoryGroupEnabledInternal() points to a | 421 // The pointer returned from GetCategoryGroupEnabledInternal() points to a |
392 // value with zero or more of the following bits. Used in this class only. | 422 // value with zero or more of the following bits. Used in this class only. |
393 // The TRACE_EVENT macros should only use the value as a bool. | 423 // The TRACE_EVENT macros should only use the value as a bool. |
394 // These values must be in sync with macro values in TraceEvent.h in Blink. | 424 // These values must be in sync with macro values in TraceEvent.h in Blink. |
395 enum CategoryGroupEnabledFlags { | 425 enum CategoryGroupEnabledFlags { |
396 // Category group enabled for the recording mode. | 426 // Category group enabled for the recording mode. |
397 ENABLED_FOR_RECORDING = 1 << 0, | 427 ENABLED_FOR_RECORDING = 1 << 0, |
398 // Category group enabled for the monitoring mode. | 428 // Category group enabled for the monitoring mode. |
399 ENABLED_FOR_MONITORING = 1 << 1, | 429 ENABLED_FOR_MONITORING = 1 << 1, |
400 // Category group enabled by SetEventCallbackEnabled(). | 430 // Category group enabled by SetEventCallbackEnabled(). |
401 ENABLED_FOR_EVENT_CALLBACK = 1 << 2, | 431 ENABLED_FOR_EVENT_CALLBACK = 1 << 2, |
402 }; | 432 }; |
403 | 433 |
404 static TraceLog* GetInstance(); | 434 static TraceLog* GetInstance(); |
405 | 435 |
406 // Get set of known category groups. This can change as new code paths are | 436 // Get set of known category groups. This can change as new code paths are |
407 // reached. The known category groups are inserted into |category_groups|. | 437 // reached. The known category groups are inserted into |category_groups|. |
408 void GetKnownCategoryGroups(std::vector<std::string>* category_groups); | 438 void GetKnownCategoryGroups(std::vector<std::string>* category_groups); |
409 | 439 |
410 // Retrieves a copy (for thread-safety) of the current CategoryFilter. | 440 // Retrieves a copy (for thread-safety) of the current CategoryFilter. |
411 CategoryFilter GetCurrentCategoryFilter(); | 441 CategoryFilter GetCurrentCategoryFilter(); |
412 | 442 |
413 Options trace_options() const { | 443 // Retrieves a copy (for thread-safety) of the current TraceOptions. |
414 return static_cast<Options>(subtle::NoBarrier_Load(&trace_options_)); | 444 TraceOptions GetCurrentTraceOptions() const; |
415 } | |
416 | 445 |
417 // Enables normal tracing (recording trace events in the trace buffer). | 446 // Enables normal tracing (recording trace events in the trace buffer). |
418 // See CategoryFilter comments for details on how to control what categories | 447 // See CategoryFilter comments for details on how to control what categories |
419 // will be traced. If tracing has already been enabled, |category_filter| will | 448 // will be traced. If tracing has already been enabled, |category_filter| will |
420 // be merged into the current category filter. | 449 // be merged into the current category filter. |
421 void SetEnabled(const CategoryFilter& category_filter, | 450 void SetEnabled(const CategoryFilter& category_filter, |
422 Mode mode, Options options); | 451 Mode mode, const TraceOptions& options); |
423 | 452 |
424 // Disables normal tracing for all categories. | 453 // Disables normal tracing for all categories. |
425 void SetDisabled(); | 454 void SetDisabled(); |
426 | 455 |
427 bool IsEnabled() { return mode_ != DISABLED; } | 456 bool IsEnabled() { return mode_ != DISABLED; } |
428 | 457 |
429 // The number of times we have begun recording traces. If tracing is off, | 458 // The number of times we have begun recording traces. If tracing is off, |
430 // returns -1. If tracing is on, then it returns the number of times we have | 459 // returns -1. If tracing is on, then it returns the number of times we have |
431 // recorded a trace. By watching for this number to increment, you can | 460 // recorded a trace. By watching for this number to increment, you can |
432 // passively discover when a new trace has begun. This is then used to | 461 // passively discover when a new trace has begun. This is then used to |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
592 void SetTimeOffset(TimeDelta offset); | 621 void SetTimeOffset(TimeDelta offset); |
593 | 622 |
594 size_t GetObserverCountForTest() const; | 623 size_t GetObserverCountForTest() const; |
595 | 624 |
596 // Call this method if the current thread may block the message loop to | 625 // Call this method if the current thread may block the message loop to |
597 // prevent the thread from using the thread-local buffer because the thread | 626 // prevent the thread from using the thread-local buffer because the thread |
598 // may not handle the flush request in time causing lost of unflushed events. | 627 // may not handle the flush request in time causing lost of unflushed events. |
599 void SetCurrentThreadBlocksMessageLoop(); | 628 void SetCurrentThreadBlocksMessageLoop(); |
600 | 629 |
601 private: | 630 private: |
| 631 typedef unsigned int InternalTraceOptions; |
| 632 |
602 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | 633 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
603 TraceBufferRingBufferGetReturnChunk); | 634 TraceBufferRingBufferGetReturnChunk); |
604 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | 635 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
605 TraceBufferRingBufferHalfIteration); | 636 TraceBufferRingBufferHalfIteration); |
606 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | 637 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
607 TraceBufferRingBufferFullIteration); | 638 TraceBufferRingBufferFullIteration); |
608 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | 639 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
609 TraceBufferVectorReportFull); | 640 TraceBufferVectorReportFull); |
| 641 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
| 642 ConvertTraceOptionsToInternalOptions); |
| 643 |
610 | 644 |
611 // This allows constructor and destructor to be private and usable only | 645 // This allows constructor and destructor to be private and usable only |
612 // by the Singleton class. | 646 // by the Singleton class. |
613 friend struct DefaultSingletonTraits<TraceLog>; | 647 friend struct DefaultSingletonTraits<TraceLog>; |
614 | 648 |
615 // Enable/disable each category group based on the current mode_, | 649 // Enable/disable each category group based on the current mode_, |
616 // category_filter_, event_callback_ and event_callback_category_filter_. | 650 // category_filter_, event_callback_ and event_callback_category_filter_. |
617 // Enable the category group in the enabled mode if category_filter_ matches | 651 // Enable the category group in the enabled mode if category_filter_ matches |
618 // the category group, or event_callback_ is not null and | 652 // the category group, or event_callback_ is not null and |
619 // event_callback_category_filter_ matches the category group. | 653 // event_callback_category_filter_ matches the category group. |
620 void UpdateCategoryGroupEnabledFlags(); | 654 void UpdateCategoryGroupEnabledFlags(); |
621 void UpdateCategoryGroupEnabledFlag(size_t category_index); | 655 void UpdateCategoryGroupEnabledFlag(size_t category_index); |
622 | 656 |
623 // Configure synthetic delays based on the values set in the current | 657 // Configure synthetic delays based on the values set in the current |
624 // category filter. | 658 // category filter. |
625 void UpdateSyntheticDelaysFromCategoryFilter(); | 659 void UpdateSyntheticDelaysFromCategoryFilter(); |
626 | 660 |
| 661 InternalTraceOptions GetInternalOptionsFromTraceOptions( |
| 662 const TraceOptions& options); |
| 663 |
627 class ThreadLocalEventBuffer; | 664 class ThreadLocalEventBuffer; |
628 class OptionalAutoLock; | 665 class OptionalAutoLock; |
629 | 666 |
630 TraceLog(); | 667 TraceLog(); |
631 ~TraceLog(); | 668 ~TraceLog(); |
632 const unsigned char* GetCategoryGroupEnabledInternal(const char* name); | 669 const unsigned char* GetCategoryGroupEnabledInternal(const char* name); |
633 void AddMetadataEventsWhileLocked(); | 670 void AddMetadataEventsWhileLocked(); |
634 | 671 |
| 672 InternalTraceOptions trace_options() const { |
| 673 return static_cast<InternalTraceOptions>( |
| 674 subtle::NoBarrier_Load(&trace_options_)); |
| 675 } |
| 676 |
635 TraceBuffer* trace_buffer() const { return logged_events_.get(); } | 677 TraceBuffer* trace_buffer() const { return logged_events_.get(); } |
636 TraceBuffer* CreateTraceBuffer(); | 678 TraceBuffer* CreateTraceBuffer(); |
637 TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks); | 679 TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks); |
638 | 680 |
639 std::string EventToConsoleMessage(unsigned char phase, | 681 std::string EventToConsoleMessage(unsigned char phase, |
640 const TimeTicks& timestamp, | 682 const TimeTicks& timestamp, |
641 TraceEvent* trace_event); | 683 TraceEvent* trace_event); |
642 | 684 |
643 TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle, | 685 TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle, |
644 bool check_buffer_is_full); | 686 bool check_buffer_is_full); |
(...skipping 19 matching lines...) Expand all Loading... |
664 } | 706 } |
665 void UseNextTraceBuffer(); | 707 void UseNextTraceBuffer(); |
666 | 708 |
667 TimeTicks OffsetNow() const { | 709 TimeTicks OffsetNow() const { |
668 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime()); | 710 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime()); |
669 } | 711 } |
670 TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const { | 712 TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const { |
671 return timestamp - time_offset_; | 713 return timestamp - time_offset_; |
672 } | 714 } |
673 | 715 |
| 716 // Internal representation of trace options since we store the currently used |
| 717 // trace option as an AtomicWord. |
| 718 static const InternalTraceOptions kInternalNone; |
| 719 static const InternalTraceOptions kInternalRecordUntilFull; |
| 720 static const InternalTraceOptions kInternalRecordContinuously; |
| 721 static const InternalTraceOptions kInternalEchoToConsole; |
| 722 static const InternalTraceOptions kInternalEnableSampling; |
| 723 |
674 // This lock protects TraceLog member accesses (except for members protected | 724 // This lock protects TraceLog member accesses (except for members protected |
675 // by thread_info_lock_) from arbitrary threads. | 725 // by thread_info_lock_) from arbitrary threads. |
676 mutable Lock lock_; | 726 mutable Lock lock_; |
677 // This lock protects accesses to thread_names_, thread_event_start_times_ | 727 // This lock protects accesses to thread_names_, thread_event_start_times_ |
678 // and thread_colors_. | 728 // and thread_colors_. |
679 Lock thread_info_lock_; | 729 Lock thread_info_lock_; |
680 int locked_line_; | 730 int locked_line_; |
681 Mode mode_; | 731 Mode mode_; |
682 int num_traces_recorded_; | 732 int num_traces_recorded_; |
683 scoped_ptr<TraceBuffer> logged_events_; | 733 scoped_ptr<TraceBuffer> logged_events_; |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
737 scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_; | 787 scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_; |
738 subtle::AtomicWord generation_; | 788 subtle::AtomicWord generation_; |
739 | 789 |
740 DISALLOW_COPY_AND_ASSIGN(TraceLog); | 790 DISALLOW_COPY_AND_ASSIGN(TraceLog); |
741 }; | 791 }; |
742 | 792 |
743 } // namespace debug | 793 } // namespace debug |
744 } // namespace base | 794 } // namespace base |
745 | 795 |
746 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 796 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
OLD | NEW |