OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 | 5 |
6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 6 #ifndef BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 7 #define BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
8 | 8 |
9 #include <stack> | 9 #include <stack> |
10 #include <string> | 10 #include <string> |
(...skipping 346 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
357 const char* category) const; | 357 const char* category) const; |
358 | 358 |
359 StringList included_; | 359 StringList included_; |
360 StringList disabled_; | 360 StringList disabled_; |
361 StringList excluded_; | 361 StringList excluded_; |
362 StringList delays_; | 362 StringList delays_; |
363 }; | 363 }; |
364 | 364 |
365 class TraceSamplingThread; | 365 class TraceSamplingThread; |
366 | 366 |
367 struct BASE_EXPORT TraceOptions { | |
368 | |
369 // Options determines how the trace buffer stores data. | |
370 enum RecordMode { | |
371 // Record until the trace buffer is full. | |
372 RECORD_UNTIL_FULL, | |
373 | |
374 // Record until the user ends the trace. The trace buffer is a fixed size | |
375 // and we use it as a ring buffer during recording. | |
376 RECORD_CONTINUOUSLY, | |
377 | |
378 // Echo to console. Events are discarded. | |
379 ECHO_TO_CONSOLE, | |
380 }; | |
381 | |
382 static const char* kRecordUntilFull; | |
383 static const char* kRecordContinuously; | |
384 static const char* kTraceToConsole; | |
385 static const char* kEnableSampling; | |
386 | |
387 TraceOptions() | |
388 : record_mode(RECORD_UNTIL_FULL), enable_sampling(false) {} | |
389 | |
390 TraceOptions(RecordMode record_mode, bool enable_sampling) | |
391 : record_mode(record_mode), enable_sampling(enable_sampling) {} | |
Xianzhu
2014/07/29 21:59:05
Indentation of the above lines seems incorrect.
Xianzhu
2014/07/29 22:00:27
Sorry, this is an old comment. Please ignore it.
| |
392 | |
393 explicit TraceOptions(StringPiece options); | |
394 | |
395 std::string ToString() const; | |
396 | |
397 RecordMode record_mode; | |
398 bool enable_sampling; | |
399 }; | |
400 | |
367 class BASE_EXPORT TraceLog { | 401 class BASE_EXPORT TraceLog { |
368 public: | 402 public: |
369 enum Mode { | 403 enum Mode { |
370 DISABLED = 0, | 404 DISABLED = 0, |
371 RECORDING_MODE, | 405 RECORDING_MODE, |
372 MONITORING_MODE, | 406 MONITORING_MODE, |
373 }; | 407 }; |
374 | 408 |
375 // Options determines how the trace buffer stores data. | |
376 enum Options { | |
377 // Record until the trace buffer is full. | |
378 RECORD_UNTIL_FULL = 1 << 0, | |
379 | |
380 // Record until the user ends the trace. The trace buffer is a fixed size | |
381 // and we use it as a ring buffer during recording. | |
382 RECORD_CONTINUOUSLY = 1 << 1, | |
383 | |
384 // Enable the sampling profiler in the recording mode. | |
385 ENABLE_SAMPLING = 1 << 2, | |
386 | |
387 // Echo to console. Events are discarded. | |
388 ECHO_TO_CONSOLE = 1 << 3, | |
389 }; | |
390 | |
391 // The pointer returned from GetCategoryGroupEnabledInternal() points to a | 409 // The pointer returned from GetCategoryGroupEnabledInternal() points to a |
392 // value with zero or more of the following bits. Used in this class only. | 410 // value with zero or more of the following bits. Used in this class only. |
393 // The TRACE_EVENT macros should only use the value as a bool. | 411 // The TRACE_EVENT macros should only use the value as a bool. |
394 // These values must be in sync with macro values in TraceEvent.h in Blink. | 412 // These values must be in sync with macro values in TraceEvent.h in Blink. |
395 enum CategoryGroupEnabledFlags { | 413 enum CategoryGroupEnabledFlags { |
396 // Category group enabled for the recording mode. | 414 // Category group enabled for the recording mode. |
397 ENABLED_FOR_RECORDING = 1 << 0, | 415 ENABLED_FOR_RECORDING = 1 << 0, |
398 // Category group enabled for the monitoring mode. | 416 // Category group enabled for the monitoring mode. |
399 ENABLED_FOR_MONITORING = 1 << 1, | 417 ENABLED_FOR_MONITORING = 1 << 1, |
400 // Category group enabled by SetEventCallbackEnabled(). | 418 // Category group enabled by SetEventCallbackEnabled(). |
401 ENABLED_FOR_EVENT_CALLBACK = 1 << 2, | 419 ENABLED_FOR_EVENT_CALLBACK = 1 << 2, |
402 }; | 420 }; |
403 | 421 |
404 static TraceLog* GetInstance(); | 422 static TraceLog* GetInstance(); |
405 | 423 |
406 // Get set of known category groups. This can change as new code paths are | 424 // Get set of known category groups. This can change as new code paths are |
407 // reached. The known category groups are inserted into |category_groups|. | 425 // reached. The known category groups are inserted into |category_groups|. |
408 void GetKnownCategoryGroups(std::vector<std::string>* category_groups); | 426 void GetKnownCategoryGroups(std::vector<std::string>* category_groups); |
409 | 427 |
410 // Retrieves a copy (for thread-safety) of the current CategoryFilter. | 428 // Retrieves a copy (for thread-safety) of the current CategoryFilter. |
411 CategoryFilter GetCurrentCategoryFilter(); | 429 CategoryFilter GetCurrentCategoryFilter(); |
412 | 430 |
413 Options trace_options() const { | 431 // Retrieves a copy (for thread-safety) of the current TraceOptions. |
414 return static_cast<Options>(subtle::NoBarrier_Load(&trace_options_)); | 432 TraceOptions GetCurrentTraceOptions() const; |
415 } | |
416 | 433 |
417 // Enables normal tracing (recording trace events in the trace buffer). | 434 // Enables normal tracing (recording trace events in the trace buffer). |
418 // See CategoryFilter comments for details on how to control what categories | 435 // See CategoryFilter comments for details on how to control what categories |
419 // will be traced. If tracing has already been enabled, |category_filter| will | 436 // will be traced. If tracing has already been enabled, |category_filter| will |
420 // be merged into the current category filter. | 437 // be merged into the current category filter. |
421 void SetEnabled(const CategoryFilter& category_filter, | 438 void SetEnabled(const CategoryFilter& category_filter, |
422 Mode mode, Options options); | 439 Mode mode, TraceOptions options); |
423 | 440 |
424 // Disables normal tracing for all categories. | 441 // Disables normal tracing for all categories. |
425 void SetDisabled(); | 442 void SetDisabled(); |
426 | 443 |
427 bool IsEnabled() { return mode_ != DISABLED; } | 444 bool IsEnabled() { return mode_ != DISABLED; } |
428 | 445 |
429 // The number of times we have begun recording traces. If tracing is off, | 446 // The number of times we have begun recording traces. If tracing is off, |
430 // returns -1. If tracing is on, then it returns the number of times we have | 447 // returns -1. If tracing is on, then it returns the number of times we have |
431 // recorded a trace. By watching for this number to increment, you can | 448 // recorded a trace. By watching for this number to increment, you can |
432 // passively discover when a new trace has begun. This is then used to | 449 // passively discover when a new trace has begun. This is then used to |
(...skipping 159 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
592 void SetTimeOffset(TimeDelta offset); | 609 void SetTimeOffset(TimeDelta offset); |
593 | 610 |
594 size_t GetObserverCountForTest() const; | 611 size_t GetObserverCountForTest() const; |
595 | 612 |
596 // Call this method if the current thread may block the message loop to | 613 // Call this method if the current thread may block the message loop to |
597 // prevent the thread from using the thread-local buffer because the thread | 614 // prevent the thread from using the thread-local buffer because the thread |
598 // may not handle the flush request in time causing lost of unflushed events. | 615 // may not handle the flush request in time causing lost of unflushed events. |
599 void SetCurrentThreadBlocksMessageLoop(); | 616 void SetCurrentThreadBlocksMessageLoop(); |
600 | 617 |
601 private: | 618 private: |
619 typedef unsigned int InternalTraceOptions; | |
620 | |
602 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | 621 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
603 TraceBufferRingBufferGetReturnChunk); | 622 TraceBufferRingBufferGetReturnChunk); |
604 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | 623 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
605 TraceBufferRingBufferHalfIteration); | 624 TraceBufferRingBufferHalfIteration); |
606 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | 625 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
607 TraceBufferRingBufferFullIteration); | 626 TraceBufferRingBufferFullIteration); |
608 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | 627 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, |
609 TraceBufferVectorReportFull); | 628 TraceBufferVectorReportFull); |
629 FRIEND_TEST_ALL_PREFIXES(TraceEventTestFixture, | |
630 ConvertTraceOptionsToInternalOptions); | |
631 | |
610 | 632 |
611 // This allows constructor and destructor to be private and usable only | 633 // This allows constructor and destructor to be private and usable only |
612 // by the Singleton class. | 634 // by the Singleton class. |
613 friend struct DefaultSingletonTraits<TraceLog>; | 635 friend struct DefaultSingletonTraits<TraceLog>; |
614 | 636 |
615 // Enable/disable each category group based on the current mode_, | 637 // Enable/disable each category group based on the current mode_, |
616 // category_filter_, event_callback_ and event_callback_category_filter_. | 638 // category_filter_, event_callback_ and event_callback_category_filter_. |
617 // Enable the category group in the enabled mode if category_filter_ matches | 639 // Enable the category group in the enabled mode if category_filter_ matches |
618 // the category group, or event_callback_ is not null and | 640 // the category group, or event_callback_ is not null and |
619 // event_callback_category_filter_ matches the category group. | 641 // event_callback_category_filter_ matches the category group. |
620 void UpdateCategoryGroupEnabledFlags(); | 642 void UpdateCategoryGroupEnabledFlags(); |
621 void UpdateCategoryGroupEnabledFlag(size_t category_index); | 643 void UpdateCategoryGroupEnabledFlag(size_t category_index); |
622 | 644 |
623 // Configure synthetic delays based on the values set in the current | 645 // Configure synthetic delays based on the values set in the current |
624 // category filter. | 646 // category filter. |
625 void UpdateSyntheticDelaysFromCategoryFilter(); | 647 void UpdateSyntheticDelaysFromCategoryFilter(); |
626 | 648 |
649 InternalTraceOptions GetInternalOptionsFromTraceOptions( | |
650 const TraceOptions& options); | |
651 | |
627 class ThreadLocalEventBuffer; | 652 class ThreadLocalEventBuffer; |
628 class OptionalAutoLock; | 653 class OptionalAutoLock; |
629 | 654 |
630 TraceLog(); | 655 TraceLog(); |
631 ~TraceLog(); | 656 ~TraceLog(); |
632 const unsigned char* GetCategoryGroupEnabledInternal(const char* name); | 657 const unsigned char* GetCategoryGroupEnabledInternal(const char* name); |
633 void AddMetadataEventsWhileLocked(); | 658 void AddMetadataEventsWhileLocked(); |
634 | 659 |
660 InternalTraceOptions trace_options() const { | |
661 return static_cast<InternalTraceOptions>( | |
662 subtle::NoBarrier_Load(&trace_options_)); | |
663 } | |
664 | |
635 TraceBuffer* trace_buffer() const { return logged_events_.get(); } | 665 TraceBuffer* trace_buffer() const { return logged_events_.get(); } |
636 TraceBuffer* CreateTraceBuffer(); | 666 TraceBuffer* CreateTraceBuffer(); |
637 TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks); | 667 TraceBuffer* CreateTraceBufferVectorOfSize(size_t max_chunks); |
638 | 668 |
639 std::string EventToConsoleMessage(unsigned char phase, | 669 std::string EventToConsoleMessage(unsigned char phase, |
640 const TimeTicks& timestamp, | 670 const TimeTicks& timestamp, |
641 TraceEvent* trace_event); | 671 TraceEvent* trace_event); |
642 | 672 |
643 TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle, | 673 TraceEvent* AddEventToThreadSharedChunkWhileLocked(TraceEventHandle* handle, |
644 bool check_buffer_is_full); | 674 bool check_buffer_is_full); |
(...skipping 19 matching lines...) Expand all Loading... | |
664 } | 694 } |
665 void UseNextTraceBuffer(); | 695 void UseNextTraceBuffer(); |
666 | 696 |
667 TimeTicks OffsetNow() const { | 697 TimeTicks OffsetNow() const { |
668 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime()); | 698 return OffsetTimestamp(TimeTicks::NowFromSystemTraceTime()); |
669 } | 699 } |
670 TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const { | 700 TimeTicks OffsetTimestamp(const TimeTicks& timestamp) const { |
671 return timestamp - time_offset_; | 701 return timestamp - time_offset_; |
672 } | 702 } |
673 | 703 |
704 // Internal representation of trace options since we store the currently used | |
705 // trace option as an AtomicWord. | |
706 static const InternalTraceOptions NONE; | |
dsinclair
2014/07/28 18:52:36
Not a big fan of this duplication. We now have to
| |
707 static const InternalTraceOptions RECORD_UNTIL_FULL; | |
708 static const InternalTraceOptions RECORD_CONTINUOUSLY; | |
709 static const InternalTraceOptions ENABLE_SAMPLING; | |
710 static const InternalTraceOptions ECHO_TO_CONSOLE; | |
711 | |
674 // This lock protects TraceLog member accesses (except for members protected | 712 // This lock protects TraceLog member accesses (except for members protected |
675 // by thread_info_lock_) from arbitrary threads. | 713 // by thread_info_lock_) from arbitrary threads. |
676 mutable Lock lock_; | 714 mutable Lock lock_; |
677 // This lock protects accesses to thread_names_, thread_event_start_times_ | 715 // This lock protects accesses to thread_names_, thread_event_start_times_ |
678 // and thread_colors_. | 716 // and thread_colors_. |
679 Lock thread_info_lock_; | 717 Lock thread_info_lock_; |
680 int locked_line_; | 718 int locked_line_; |
681 Mode mode_; | 719 Mode mode_; |
682 int num_traces_recorded_; | 720 int num_traces_recorded_; |
683 scoped_ptr<TraceBuffer> logged_events_; | 721 scoped_ptr<TraceBuffer> logged_events_; |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
737 scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_; | 775 scoped_refptr<MessageLoopProxy> flush_message_loop_proxy_; |
738 subtle::AtomicWord generation_; | 776 subtle::AtomicWord generation_; |
739 | 777 |
740 DISALLOW_COPY_AND_ASSIGN(TraceLog); | 778 DISALLOW_COPY_AND_ASSIGN(TraceLog); |
741 }; | 779 }; |
742 | 780 |
743 } // namespace debug | 781 } // namespace debug |
744 } // namespace base | 782 } // namespace base |
745 | 783 |
746 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_ | 784 #endif // BASE_DEBUG_TRACE_EVENT_IMPL_H_ |
OLD | NEW |