| OLD | NEW |
| 1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef SRC_TRACING_TRACE_EVENT_H_ | 5 #ifndef SRC_TRACING_TRACE_EVENT_H_ |
| 6 #define SRC_TRACING_TRACE_EVENT_H_ | 6 #define SRC_TRACING_TRACE_EVENT_H_ |
| 7 | 7 |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 #include <memory> | 9 #include <memory> |
| 10 | 10 |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 42 | 42 |
| 43 // By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC | 43 // By default, pointers are mangled with the Process ID in TRACE_EVENT_ASYNC |
| 44 // macros. Use this macro to prevent Process ID mangling. | 44 // macros. Use this macro to prevent Process ID mangling. |
| 45 #define TRACE_ID_DONT_MANGLE(id) v8::internal::tracing::TraceID::DontMangle(id) | 45 #define TRACE_ID_DONT_MANGLE(id) v8::internal::tracing::TraceID::DontMangle(id) |
| 46 | 46 |
| 47 // By default, trace IDs are eventually converted to a single 64-bit number. Use | 47 // By default, trace IDs are eventually converted to a single 64-bit number. Use |
| 48 // this macro to add a scope string. | 48 // this macro to add a scope string. |
| 49 #define TRACE_ID_WITH_SCOPE(scope, id) \ | 49 #define TRACE_ID_WITH_SCOPE(scope, id) \ |
| 50 trace_event_internal::TraceID::WithScope(scope, id) | 50 trace_event_internal::TraceID::WithScope(scope, id) |
| 51 | 51 |
| 52 // Sets the current sample state to the given category and name (both must be | |
| 53 // constant strings). These states are intended for a sampling profiler. | |
| 54 // Implementation note: we store category and name together because we don't | |
| 55 // want the inconsistency/expense of storing two pointers. | |
| 56 // |thread_bucket| is [0..2] and is used to statically isolate samples in one | |
| 57 // thread from others. | |
| 58 #define TRACE_EVENT_SET_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \ | |
| 59 name) \ | |
| 60 v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Set( \ | |
| 61 category "\0" name) | |
| 62 | |
| 63 // Returns a current sampling state of the given bucket. | |
| 64 #define TRACE_EVENT_GET_SAMPLING_STATE_FOR_BUCKET(bucket_number) \ | |
| 65 v8::internal::tracing::TraceEventSamplingStateScope<bucket_number>::Current() | |
| 66 | |
| 67 // Creates a scope of a sampling state of the given bucket. | |
| 68 // | |
| 69 // { // The sampling state is set within this scope. | |
| 70 // TRACE_EVENT_SAMPLING_STATE_SCOPE_FOR_BUCKET(0, "category", "name"); | |
| 71 // ...; | |
| 72 // } | |
| 73 #define TRACE_EVENT_SCOPED_SAMPLING_STATE_FOR_BUCKET(bucket_number, category, \ | |
| 74 name) \ | |
| 75 v8::internal::TraceEventSamplingStateScope<bucket_number> \ | |
| 76 traceEventSamplingScope(category "\0" name); | |
| 77 | |
| 78 | |
| 79 #define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \ | 52 #define INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() \ |
| 80 *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \ | 53 *INTERNAL_TRACE_EVENT_UID(category_group_enabled) & \ |
| 81 (kEnabledForRecording_CategoryGroupEnabledFlags | \ | 54 (kEnabledForRecording_CategoryGroupEnabledFlags | \ |
| 82 kEnabledForEventCallback_CategoryGroupEnabledFlags) | 55 kEnabledForEventCallback_CategoryGroupEnabledFlags) |
| 83 | 56 |
| 84 // The following macro has no implementation, but it needs to exist since | 57 // The following macro has no implementation, but it needs to exist since |
| 85 // it gets called from scoped trace events. It cannot call UNIMPLEMENTED() | 58 // it gets called from scoped trace events. It cannot call UNIMPLEMENTED() |
| 86 // since an empty implementation is a valid one. | 59 // since an empty implementation is a valid one. |
| 87 #define INTERNAL_TRACE_MEMORY(category, name) | 60 #define INTERNAL_TRACE_MEMORY(category, name) |
| 88 | 61 |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 131 #define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \ | 104 #define TRACE_EVENT_API_UPDATE_TRACE_EVENT_DURATION \ |
| 132 v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \ | 105 v8::internal::tracing::TraceEventHelper::GetCurrentPlatform() \ |
| 133 ->UpdateTraceEventDuration | 106 ->UpdateTraceEventDuration |
| 134 | 107 |
| 135 // Defines atomic operations used internally by the tracing system. | 108 // Defines atomic operations used internally by the tracing system. |
| 136 #define TRACE_EVENT_API_ATOMIC_WORD v8::base::AtomicWord | 109 #define TRACE_EVENT_API_ATOMIC_WORD v8::base::AtomicWord |
| 137 #define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::NoBarrier_Load(&(var)) | 110 #define TRACE_EVENT_API_ATOMIC_LOAD(var) v8::base::NoBarrier_Load(&(var)) |
| 138 #define TRACE_EVENT_API_ATOMIC_STORE(var, value) \ | 111 #define TRACE_EVENT_API_ATOMIC_STORE(var, value) \ |
| 139 v8::base::NoBarrier_Store(&(var), (value)) | 112 v8::base::NoBarrier_Store(&(var), (value)) |
| 140 | 113 |
| 141 // The thread buckets for the sampling profiler. | |
| 142 extern TRACE_EVENT_API_ATOMIC_WORD g_trace_state[3]; | |
| 143 | |
| 144 #define TRACE_EVENT_API_THREAD_BUCKET(thread_bucket) \ | |
| 145 g_trace_state[thread_bucket] | |
| 146 | |
| 147 //////////////////////////////////////////////////////////////////////////////// | 114 //////////////////////////////////////////////////////////////////////////////// |
| 148 | 115 |
| 149 // Implementation detail: trace event macros create temporary variables | 116 // Implementation detail: trace event macros create temporary variables |
| 150 // to keep instrumentation overhead low. These macros give each temporary | 117 // to keep instrumentation overhead low. These macros give each temporary |
| 151 // variable a unique name based on the line number to prevent name collisions. | 118 // variable a unique name based on the line number to prevent name collisions. |
| 152 #define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b | 119 #define INTERNAL_TRACE_EVENT_UID3(a, b) trace_event_unique_##a##b |
| 153 #define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b) | 120 #define INTERNAL_TRACE_EVENT_UID2(a, b) INTERNAL_TRACE_EVENT_UID3(a, b) |
| 154 #define INTERNAL_TRACE_EVENT_UID(name_prefix) \ | 121 #define INTERNAL_TRACE_EVENT_UID(name_prefix) \ |
| 155 INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__) | 122 INTERNAL_TRACE_EVENT_UID2(name_prefix, __LINE__) |
| 156 | 123 |
| (...skipping 449 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 606 // uninitialized accesses. | 573 // uninitialized accesses. |
| 607 struct Data { | 574 struct Data { |
| 608 const uint8_t* category_group_enabled; | 575 const uint8_t* category_group_enabled; |
| 609 const char* name; | 576 const char* name; |
| 610 uint64_t event_handle; | 577 uint64_t event_handle; |
| 611 }; | 578 }; |
| 612 Data* p_data_; | 579 Data* p_data_; |
| 613 Data data_; | 580 Data data_; |
| 614 }; | 581 }; |
| 615 | 582 |
| 616 // Used by TRACE_EVENT_BINARY_EFFICIENTx macro. Do not use directly. | |
| 617 class ScopedTraceBinaryEfficient { | |
| 618 public: | |
| 619 ScopedTraceBinaryEfficient(const char* category_group, const char* name); | |
| 620 ~ScopedTraceBinaryEfficient(); | |
| 621 | |
| 622 private: | |
| 623 const uint8_t* category_group_enabled_; | |
| 624 const char* name_; | |
| 625 uint64_t event_handle_; | |
| 626 }; | |
| 627 | |
| 628 // TraceEventSamplingStateScope records the current sampling state | |
| 629 // and sets a new sampling state. When the scope exists, it restores | |
| 630 // the sampling state having recorded. | |
| 631 template <size_t BucketNumber> | |
| 632 class TraceEventSamplingStateScope { | |
| 633 public: | |
| 634 explicit TraceEventSamplingStateScope(const char* category_and_name) { | |
| 635 previous_state_ = TraceEventSamplingStateScope<BucketNumber>::Current(); | |
| 636 TraceEventSamplingStateScope<BucketNumber>::Set(category_and_name); | |
| 637 } | |
| 638 | |
| 639 ~TraceEventSamplingStateScope() { | |
| 640 TraceEventSamplingStateScope<BucketNumber>::Set(previous_state_); | |
| 641 } | |
| 642 | |
| 643 static V8_INLINE const char* Current() { | |
| 644 return reinterpret_cast<const char*>( | |
| 645 TRACE_EVENT_API_ATOMIC_LOAD(g_trace_state[BucketNumber])); | |
| 646 } | |
| 647 | |
| 648 static V8_INLINE void Set(const char* category_and_name) { | |
| 649 TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber], | |
| 650 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( | |
| 651 const_cast<char*>(category_and_name))); | |
| 652 } | |
| 653 | |
| 654 private: | |
| 655 const char* previous_state_; | |
| 656 }; | |
| 657 | |
| 658 // Do not use directly. | 583 // Do not use directly. |
| 659 class CallStatsScopedTracer { | 584 class CallStatsScopedTracer { |
| 660 public: | 585 public: |
| 661 CallStatsScopedTracer() : p_data_(nullptr) {} | 586 CallStatsScopedTracer() : p_data_(nullptr) {} |
| 662 ~CallStatsScopedTracer() { | 587 ~CallStatsScopedTracer() { |
| 663 if (V8_UNLIKELY(p_data_ && *data_.category_group_enabled)) { | 588 if (V8_UNLIKELY(p_data_ && *data_.category_group_enabled)) { |
| 664 AddEndTraceEvent(); | 589 AddEndTraceEvent(); |
| 665 } | 590 } |
| 666 } | 591 } |
| 667 | 592 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 678 bool has_parent_scope_; | 603 bool has_parent_scope_; |
| 679 Data* p_data_; | 604 Data* p_data_; |
| 680 Data data_; | 605 Data data_; |
| 681 }; | 606 }; |
| 682 | 607 |
| 683 } // namespace tracing | 608 } // namespace tracing |
| 684 } // namespace internal | 609 } // namespace internal |
| 685 } // namespace v8 | 610 } // namespace v8 |
| 686 | 611 |
| 687 #endif // SRC_TRACING_TRACE_EVENT_H_ | 612 #endif // SRC_TRACING_TRACE_EVENT_H_ |
| OLD | NEW |