OLD | NEW |
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef SRC_TRACING_TRACE_EVENT_H_ | 5 #ifndef SRC_TRACING_TRACE_EVENT_H_ |
6 #define SRC_TRACING_TRACE_EVENT_H_ | 6 #define SRC_TRACING_TRACE_EVENT_H_ |
7 | 7 |
8 #include <stddef.h> | 8 #include <stddef.h> |
9 | 9 |
10 #include "base/trace_event/common/trace_event_common.h" | 10 #include "base/trace_event/common/trace_event_common.h" |
11 #include "include/v8-platform.h" | 11 #include "include/v8-platform.h" |
12 #include "src/base/atomicops.h" | 12 #include "src/base/atomicops.h" |
13 #include "src/base/macros.h" | 13 #include "src/base/macros.h" |
| 14 #include "src/counters.h" |
14 | 15 |
15 // This header file defines implementation details of how the trace macros in | 16 // This header file defines implementation details of how the trace macros in |
16 // trace_event_common.h collect and store trace events. Anything not | 17 // trace_event_common.h collect and store trace events. Anything not |
17 // implementation-specific should go in trace_macros_common.h instead of here. | 18 // implementation-specific should go in trace_macros_common.h instead of here. |
18 | 19 |
19 | 20 |
20 // The pointer returned from GetCategoryGroupEnabled() points to a | 21 // The pointer returned from GetCategoryGroupEnabled() points to a |
21 // value with zero or more of the following bits. Used in this class only. | 22 // value with zero or more of the following bits. Used in this class only. |
22 // The TRACE_EVENT macros should only use the value as a bool. | 23 // The TRACE_EVENT macros should only use the value as a bool. |
23 // These values must be in sync with macro values in trace_log.h in | 24 // These values must be in sync with macro values in trace_log.h in |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
275 private: \ | 276 private: \ |
276 /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \ | 277 /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \ |
277 INTERNAL_TRACE_EVENT_UID(ScopedContext) \ | 278 INTERNAL_TRACE_EVENT_UID(ScopedContext) \ |
278 (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ | 279 (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ |
279 void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ | 280 void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ |
280 uint64_t cid_; \ | 281 uint64_t cid_; \ |
281 }; \ | 282 }; \ |
282 INTERNAL_TRACE_EVENT_UID(ScopedContext) \ | 283 INTERNAL_TRACE_EVENT_UID(ScopedContext) \ |
283 INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id()); | 284 INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id()); |
284 | 285 |
| 286 #define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() \ |
| 287 base::NoBarrier_Load(&v8::internal::tracing::kRuntimeCallStatsTracingEnabled) |
| 288 |
| 289 #define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \ |
| 290 INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) |
| 291 |
| 292 #define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id) \ |
| 293 INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id) |
| 294 |
| 295 #define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \ |
| 296 { \ |
| 297 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO( \ |
| 298 TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")); \ |
| 299 base::NoBarrier_Store( \ |
| 300 &v8::internal::tracing::kRuntimeCallStatsTracingEnabled, \ |
| 301 INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()); \ |
| 302 } \ |
| 303 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ |
| 304 v8::internal::tracing::CallStatsScopedTracer INTERNAL_TRACE_EVENT_UID( \ |
| 305 tracer); \ |
| 306 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ |
| 307 INTERNAL_TRACE_EVENT_UID(tracer) \ |
| 308 .Initialize(isolate, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \ |
| 309 name); \ |
| 310 } |
| 311 |
| 312 #define INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, \ |
| 313 counter_id) \ |
| 314 v8::internal::tracing::CounterScope INTERNAL_TRACE_EVENT_UID(scope)( \ |
| 315 isolate, counter_id); |
| 316 |
285 namespace v8 { | 317 namespace v8 { |
286 namespace internal { | 318 namespace internal { |
287 namespace tracing { | 319 namespace tracing { |
288 | 320 |
289 // Specify these values when the corresponding argument of AddTraceEvent is not | 321 // Specify these values when the corresponding argument of AddTraceEvent is not |
290 // used. | 322 // used. |
291 const int kZeroNumArgs = 0; | 323 const int kZeroNumArgs = 0; |
292 const decltype(nullptr) kGlobalScope = nullptr; | 324 const decltype(nullptr) kGlobalScope = nullptr; |
293 const uint64_t kNoId = 0; | 325 const uint64_t kNoId = 0; |
294 | 326 |
| 327 extern base::Atomic32 kRuntimeCallStatsTracingEnabled; |
| 328 |
295 class TraceEventHelper { | 329 class TraceEventHelper { |
296 public: | 330 public: |
297 static v8::Platform* GetCurrentPlatform(); | 331 static v8::Platform* GetCurrentPlatform(); |
298 }; | 332 }; |
299 | 333 |
300 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers | 334 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers |
301 // are by default mangled with the Process ID so that they are unlikely to | 335 // are by default mangled with the Process ID so that they are unlikely to |
302 // collide when the same pointer is used on different processes. | 336 // collide when the same pointer is used on different processes. |
303 class TraceID { | 337 class TraceID { |
304 public: | 338 public: |
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
583 static V8_INLINE void Set(const char* category_and_name) { | 617 static V8_INLINE void Set(const char* category_and_name) { |
584 TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber], | 618 TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber], |
585 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( | 619 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( |
586 const_cast<char*>(category_and_name))); | 620 const_cast<char*>(category_and_name))); |
587 } | 621 } |
588 | 622 |
589 private: | 623 private: |
590 const char* previous_state_; | 624 const char* previous_state_; |
591 }; | 625 }; |
592 | 626 |
| 627 // Do not use directly. |
| 628 class CallStatsScopedTracer { |
| 629 public: |
| 630 CallStatsScopedTracer() : p_data_(nullptr) {} |
| 631 ~CallStatsScopedTracer() { |
| 632 if (V8_UNLIKELY(p_data_ && *data_.category_group_enabled)) { |
| 633 AddEndTraceEvent(); |
| 634 } |
| 635 } |
| 636 |
| 637 void Initialize(Isolate* isolate, const uint8_t* category_group_enabled, |
| 638 const char* name); |
| 639 |
| 640 private: |
| 641 void AddEndTraceEvent(); |
| 642 struct Data { |
| 643 const uint8_t* category_group_enabled; |
| 644 const char* name; |
| 645 Isolate* isolate; |
| 646 }; |
| 647 bool has_parent_scope_; |
| 648 Data* p_data_; |
| 649 Data data_; |
| 650 }; |
| 651 |
| 652 // TraceEventCallStatsTimer is used to keep track of the stack of currently |
| 653 // active timers used for properly measuring the own time of a |
| 654 // RuntimeCallCounter. |
| 655 class TraceEventCallStatsTimer { |
| 656 public: |
| 657 TraceEventCallStatsTimer() : counter_(nullptr), parent_(nullptr) {} |
| 658 RuntimeCallCounter* counter() { return counter_; } |
| 659 base::ElapsedTimer timer() { return timer_; } |
| 660 |
| 661 private: |
| 662 friend class TraceEventStatsTable; |
| 663 |
| 664 V8_INLINE void Start(RuntimeCallCounter* counter, |
| 665 TraceEventCallStatsTimer* parent) { |
| 666 counter_ = counter; |
| 667 parent_ = parent; |
| 668 timer_.Start(); |
| 669 } |
| 670 |
| 671 V8_INLINE TraceEventCallStatsTimer* Stop() { |
| 672 base::TimeDelta delta = timer_.Elapsed(); |
| 673 timer_.Stop(); |
| 674 counter_->count++; |
| 675 counter_->time += delta; |
| 676 if (parent_ != nullptr) { |
| 677 // Adjust parent timer so that it does not include sub timer's time. |
| 678 parent_->counter_->time -= delta; |
| 679 } |
| 680 return parent_; |
| 681 } |
| 682 |
| 683 RuntimeCallCounter* counter_; |
| 684 TraceEventCallStatsTimer* parent_; |
| 685 base::ElapsedTimer timer_; |
| 686 }; |
| 687 |
| 688 class TraceEventStatsTable { |
| 689 public: |
| 690 typedef RuntimeCallCounter TraceEventStatsTable::*CounterId; |
| 691 |
| 692 #define CALL_RUNTIME_COUNTER(name) \ |
| 693 RuntimeCallCounter name = RuntimeCallCounter(#name); |
| 694 FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER) |
| 695 #undef CALL_RUNTIME_COUNTER |
| 696 #define CALL_RUNTIME_COUNTER(name, nargs, ressize) \ |
| 697 RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name); |
| 698 FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER) |
| 699 #undef CALL_RUNTIME_COUNTER |
| 700 #define CALL_BUILTIN_COUNTER(name) \ |
| 701 RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name); |
| 702 BUILTIN_LIST_C(CALL_BUILTIN_COUNTER) |
| 703 #undef CALL_BUILTIN_COUNTER |
| 704 #define CALL_BUILTIN_COUNTER(name) \ |
| 705 RuntimeCallCounter API_##name = RuntimeCallCounter("API_" #name); |
| 706 FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER) |
| 707 #undef CALL_BUILTIN_COUNTER |
| 708 #define CALL_BUILTIN_COUNTER(name) \ |
| 709 RuntimeCallCounter Handler_##name = RuntimeCallCounter(#name); |
| 710 FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) |
| 711 #undef CALL_BUILTIN_COUNTER |
| 712 |
| 713 // Starting measuring the time for a function. This will establish the |
| 714 // connection to the parent counter for properly calculating the own times. |
| 715 static void Enter(Isolate* isolate, TraceEventCallStatsTimer* timer, |
| 716 CounterId counter_id); |
| 717 |
| 718 // Leave a scope for a measured runtime function. This will properly add |
| 719 // the time delta to the current_counter and subtract the delta from its |
| 720 // parent. |
| 721 static void Leave(Isolate* isolate, TraceEventCallStatsTimer* timer); |
| 722 |
| 723 void Reset(); |
| 724 const char* Dump(); |
| 725 |
| 726 TraceEventStatsTable() { |
| 727 Reset(); |
| 728 in_use_ = false; |
| 729 } |
| 730 |
| 731 TraceEventCallStatsTimer* current_timer() { return current_timer_; } |
| 732 bool InUse() { return in_use_; } |
| 733 |
| 734 private: |
| 735 std::stringstream buffer_; |
| 736 std::unique_ptr<char[]> buffer_c_str_; |
| 737 size_t len_ = 0; |
| 738 // Counter to track recursive time events. |
| 739 TraceEventCallStatsTimer* current_timer_ = nullptr; |
| 740 bool in_use_; |
| 741 }; |
| 742 |
| 743 class CounterScope { |
| 744 public: |
| 745 CounterScope(Isolate* isolate, TraceEventStatsTable::CounterId counter_id) |
| 746 : isolate_(nullptr) { |
| 747 if (V8_UNLIKELY(TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED())) { |
| 748 isolate_ = isolate; |
| 749 TraceEventStatsTable::Enter(isolate_, &timer_, counter_id); |
| 750 } |
| 751 } |
| 752 ~CounterScope() { |
| 753 // A non-nullptr isolate_ means the stats table already entered the scope |
| 754 // and started the timer, we need to leave the scope and reset the timer |
| 755 // even when we stop tracing, otherwise we have the risk to have a dangling |
| 756 // pointer. |
| 757 if (V8_UNLIKELY(isolate_ != nullptr)) { |
| 758 TraceEventStatsTable::Leave(isolate_, &timer_); |
| 759 } |
| 760 } |
| 761 |
| 762 private: |
| 763 Isolate* isolate_; |
| 764 TraceEventCallStatsTimer timer_; |
| 765 }; |
| 766 |
593 } // namespace tracing | 767 } // namespace tracing |
594 } // namespace internal | 768 } // namespace internal |
595 } // namespace v8 | 769 } // namespace v8 |
596 | 770 |
597 #endif // SRC_TRACING_TRACE_EVENT_H_ | 771 #endif // SRC_TRACING_TRACE_EVENT_H_ |
OLD | NEW |