OLD | NEW |
---|---|
1 // Copyright 2015 the V8 project authors. All rights reserved. | 1 // Copyright 2015 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef SRC_TRACING_TRACE_EVENT_H_ | 5 #ifndef SRC_TRACING_TRACE_EVENT_H_ |
6 #define SRC_TRACING_TRACE_EVENT_H_ | 6 #define SRC_TRACING_TRACE_EVENT_H_ |
7 | 7 |
8 #include <stddef.h> | 8 #include <stddef.h> |
9 | 9 |
10 #include "base/trace_event/common/trace_event_common.h" | 10 #include "base/trace_event/common/trace_event_common.h" |
11 #include "include/v8-platform.h" | 11 #include "include/v8-platform.h" |
12 #include "src/base/atomicops.h" | 12 #include "src/base/atomicops.h" |
13 #include "src/base/macros.h" | 13 #include "src/base/macros.h" |
14 #include "src/counters.h" | |
14 | 15 |
15 // This header file defines implementation details of how the trace macros in | 16 // This header file defines implementation details of how the trace macros in |
16 // trace_event_common.h collect and store trace events. Anything not | 17 // trace_event_common.h collect and store trace events. Anything not |
17 // implementation-specific should go in trace_macros_common.h instead of here. | 18 // implementation-specific should go in trace_macros_common.h instead of here. |
18 | 19 |
19 | 20 |
20 // The pointer returned from GetCategoryGroupEnabled() points to a | 21 // The pointer returned from GetCategoryGroupEnabled() points to a |
21 // value with zero or more of the following bits. Used in this class only. | 22 // value with zero or more of the following bits. Used in this class only. |
22 // The TRACE_EVENT macros should only use the value as a bool. | 23 // The TRACE_EVENT macros should only use the value as a bool. |
23 // These values must be in sync with macro values in trace_log.h in | 24 // These values must be in sync with macro values in trace_log.h in |
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
275 private: \ | 276 private: \ |
276 /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \ | 277 /* Local class friendly DISALLOW_COPY_AND_ASSIGN */ \ |
277 INTERNAL_TRACE_EVENT_UID(ScopedContext) \ | 278 INTERNAL_TRACE_EVENT_UID(ScopedContext) \ |
278 (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ | 279 (const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ |
279 void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ | 280 void operator=(const INTERNAL_TRACE_EVENT_UID(ScopedContext)&) {} \ |
280 uint64_t cid_; \ | 281 uint64_t cid_; \ |
281 }; \ | 282 }; \ |
282 INTERNAL_TRACE_EVENT_UID(ScopedContext) \ | 283 INTERNAL_TRACE_EVENT_UID(ScopedContext) \ |
283 INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id()); | 284 INTERNAL_TRACE_EVENT_UID(scoped_context)(context.raw_id()); |
284 | 285 |
286 #define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_ENABLED() \ | |
287 v8::internal::tracing::kRuntimeCallsTracingEnabled == 1 | |
288 | |
289 #define TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \ | |
290 INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) | |
291 | |
292 #define TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id) \ | |
293 INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, counter_id) | |
294 | |
295 #define INTERNAL_TRACE_EVENT_CALL_STATS_SCOPED(isolate, category_group, name) \ | |
296 { \ | |
297 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO( \ | |
298 TRACE_DISABLED_BY_DEFAULT("v8.runtime_stats")); \ | |
299 v8::internal::tracing::kRuntimeCallsTracingEnabled = \ | |
300 INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE() ? 1 \ | |
301 : 0; \ | |
302 } \ | |
303 INTERNAL_TRACE_EVENT_GET_CATEGORY_INFO(category_group); \ | |
304 v8::internal::tracing::CallStatsScopedTracer INTERNAL_TRACE_EVENT_UID( \ | |
305 tracer); \ | |
306 if (INTERNAL_TRACE_EVENT_CATEGORY_GROUP_ENABLED_FOR_RECORDING_MODE()) { \ | |
307 INTERNAL_TRACE_EVENT_UID(tracer) \ | |
308 .Initialize(isolate, INTERNAL_TRACE_EVENT_UID(category_group_enabled), \ | |
309 name); \ | |
310 } | |
311 | |
312 #define INTERNAL_TRACE_EVENT_RUNTIME_CALL_STATS_TRACING_SCOPED(isolate, \ | |
313 counter_id) \ | |
314 v8::internal::tracing::CounterScope INTERNAL_TRACE_EVENT_UID(scope)( \ | |
315 isolate, counter_id); | |
316 | |
285 namespace v8 { | 317 namespace v8 { |
286 namespace internal { | 318 namespace internal { |
287 namespace tracing { | 319 namespace tracing { |
288 | 320 |
289 // Specify these values when the corresponding argument of AddTraceEvent is not | 321 // Specify these values when the corresponding argument of AddTraceEvent is not |
290 // used. | 322 // used. |
291 const int kZeroNumArgs = 0; | 323 const int kZeroNumArgs = 0; |
292 const decltype(nullptr) kGlobalScope = nullptr; | 324 const decltype(nullptr) kGlobalScope = nullptr; |
293 const uint64_t kNoId = 0; | 325 const uint64_t kNoId = 0; |
294 | 326 |
327 extern int kRuntimeCallsTracingEnabled; | |
Camillo Bruni
2016/07/27 08:29:07
change to bool.
lpy
2016/07/27 19:58:37
Done.
| |
328 | |
295 class TraceEventHelper { | 329 class TraceEventHelper { |
296 public: | 330 public: |
297 static v8::Platform* GetCurrentPlatform(); | 331 static v8::Platform* GetCurrentPlatform(); |
298 }; | 332 }; |
299 | 333 |
300 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers | 334 // TraceID encapsulates an ID that can either be an integer or pointer. Pointers |
301 // are by default mangled with the Process ID so that they are unlikely to | 335 // are by default mangled with the Process ID so that they are unlikely to |
302 // collide when the same pointer is used on different processes. | 336 // collide when the same pointer is used on different processes. |
303 class TraceID { | 337 class TraceID { |
304 public: | 338 public: |
(...skipping 278 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
583 static V8_INLINE void Set(const char* category_and_name) { | 617 static V8_INLINE void Set(const char* category_and_name) { |
584 TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber], | 618 TRACE_EVENT_API_ATOMIC_STORE(g_trace_state[BucketNumber], |
585 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( | 619 reinterpret_cast<TRACE_EVENT_API_ATOMIC_WORD>( |
586 const_cast<char*>(category_and_name))); | 620 const_cast<char*>(category_and_name))); |
587 } | 621 } |
588 | 622 |
589 private: | 623 private: |
590 const char* previous_state_; | 624 const char* previous_state_; |
591 }; | 625 }; |
592 | 626 |
627 // Do not use directly. | |
628 class CallStatsScopedTracer { | |
629 public: | |
630 CallStatsScopedTracer() : p_data_(nullptr) {} | |
631 ~CallStatsScopedTracer(); | |
632 | |
633 void Initialize(Isolate* isolate, const uint8_t* category_group_enabled, | |
634 const char* name); | |
635 | |
636 private: | |
637 struct Data { | |
638 const uint8_t* category_group_enabled; | |
639 const char* name; | |
640 Isolate* isolate; | |
641 }; | |
642 bool has_parent_scope_; | |
643 Data* p_data_; | |
644 Data data_; | |
645 }; | |
646 | |
647 // TraceEventCallStatsTimer is used to keep track of the stack of currently | |
648 // active timers used for properly measuring the own time of a | |
649 // RuntimeCallCounter. | |
650 class TraceEventCallStatsTimer { | |
651 public: | |
652 TraceEventCallStatsTimer() : counter_(nullptr), parent_(nullptr) {} | |
653 RuntimeCallCounter* counter() { return counter_; } | |
654 base::ElapsedTimer timer() { return timer_; } | |
655 | |
656 private: | |
657 friend class TraceEventStatsTable; | |
658 | |
659 V8_INLINE void Start(RuntimeCallCounter* counter, | |
660 TraceEventCallStatsTimer* parent) { | |
661 counter_ = counter; | |
662 parent_ = parent; | |
663 timer_.Start(); | |
664 } | |
665 | |
666 V8_INLINE TraceEventCallStatsTimer* Stop() { | |
667 base::TimeDelta delta = timer_.Elapsed(); | |
668 timer_.Stop(); | |
669 counter_->count++; | |
670 counter_->time += delta; | |
671 if (parent_ != nullptr) { | |
672 // Adjust parent timer so that it does not include sub timer's time. | |
673 parent_->counter_->time -= delta; | |
674 } | |
675 return parent_; | |
676 } | |
677 | |
678 RuntimeCallCounter* counter_; | |
679 TraceEventCallStatsTimer* parent_; | |
680 base::ElapsedTimer timer_; | |
681 }; | |
682 | |
683 class TraceEventStatsTable { | |
684 public: | |
685 typedef RuntimeCallCounter TraceEventStatsTable::*CounterId; | |
686 | |
687 #define CALL_RUNTIME_COUNTER(name) \ | |
688 RuntimeCallCounter name = RuntimeCallCounter(#name); | |
689 FOR_EACH_MANUAL_COUNTER(CALL_RUNTIME_COUNTER) | |
690 #undef CALL_RUNTIME_COUNTER | |
691 #define CALL_RUNTIME_COUNTER(name, nargs, ressize) \ | |
692 RuntimeCallCounter Runtime_##name = RuntimeCallCounter(#name); | |
693 FOR_EACH_INTRINSIC(CALL_RUNTIME_COUNTER) | |
694 #undef CALL_RUNTIME_COUNTER | |
695 #define CALL_BUILTIN_COUNTER(name) \ | |
696 RuntimeCallCounter Builtin_##name = RuntimeCallCounter(#name); | |
697 BUILTIN_LIST_C(CALL_BUILTIN_COUNTER) | |
698 #undef CALL_BUILTIN_COUNTER | |
699 #define CALL_BUILTIN_COUNTER(name) \ | |
700 RuntimeCallCounter API_##name = RuntimeCallCounter("API_" #name); | |
701 FOR_EACH_API_COUNTER(CALL_BUILTIN_COUNTER) | |
702 #undef CALL_BUILTIN_COUNTER | |
703 #define CALL_BUILTIN_COUNTER(name) \ | |
704 RuntimeCallCounter Handler_##name = RuntimeCallCounter(#name); | |
705 FOR_EACH_HANDLER_COUNTER(CALL_BUILTIN_COUNTER) | |
706 #undef CALL_BUILTIN_COUNTER | |
707 | |
708 // Starting measuring the time for a function. This will establish the | |
709 // connection to the parent counter for properly calculating the own times. | |
710 static void Enter(Isolate* isolate, TraceEventCallStatsTimer* timer, | |
711 CounterId counter_id); | |
712 | |
713 // Leave a scope for a measured runtime function. This will properly add | |
714 // the time delta to the current_counter and subtract the delta from its | |
715 // parent. | |
716 static void Leave(Isolate* isolate, TraceEventCallStatsTimer* timer); | |
717 | |
718 void Reset(); | |
719 const char* Dump(); | |
720 | |
721 TraceEventStatsTable() { | |
722 Reset(); | |
723 in_use_ = false; | |
724 } | |
725 TraceEventCallStatsTimer* current_timer() { return current_timer_; } | |
726 bool InUse() { return in_use_; } | |
727 | |
728 private: | |
729 std::stringstream buffer_; | |
730 // Counter to track recursive time events. | |
731 TraceEventCallStatsTimer* current_timer_ = nullptr; | |
732 bool in_use_; | |
733 }; | |
734 | |
735 class CounterScope { | |
736 public: | |
737 CounterScope(Isolate* isolate, TraceEventStatsTable::CounterId counter_id); | |
738 ~CounterScope(); | |
739 | |
740 private: | |
741 Isolate* isolate_; | |
742 TraceEventCallStatsTimer timer_; | |
743 }; | |
744 | |
593 } // namespace tracing | 745 } // namespace tracing |
594 } // namespace internal | 746 } // namespace internal |
595 } // namespace v8 | 747 } // namespace v8 |
596 | 748 |
597 #endif // SRC_TRACING_TRACE_EVENT_H_ | 749 #endif // SRC_TRACING_TRACE_EVENT_H_ |
OLD | NEW |