OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ | 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ |
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ | 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ |
7 | 7 |
8 #include <vector> | 8 #include <vector> |
9 | 9 |
10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
11 #include "base/base_export.h" | 11 #include "base/base_export.h" |
12 #include "base/debug/debugging_flags.h" | |
13 #include "base/debug/stack_trace.h" | |
12 #include "base/logging.h" | 14 #include "base/logging.h" |
13 #include "base/macros.h" | 15 #include "base/macros.h" |
14 #include "base/trace_event/heap_profiler_allocation_context.h" | 16 #include "base/trace_event/heap_profiler_allocation_context.h" |
15 | 17 |
18 #if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL) && \ | |
19 (BUILDFLAG(ENABLE_PROFILING) || defined(DEBUG)) | |
Primiano Tucci (use gerrit)
2016/04/19 20:15:19
Why do you need the (BUILDFLAG(ENABLE_PROFILING) |
Dmitry Skiba
2016/04/20 19:19:25
Actually, you suggested these defines earlier :) B
Primiano Tucci (use gerrit)
2016/04/21 20:08:21
oh right lolz :)
| |
20 #define ENABLE_NATIVE_ALLOCATION_TRACES 1 | |
21 #else | |
22 #define ENABLE_NATIVE_ALLOCATION_TRACES 0 | |
23 #endif | |
24 | |
16 namespace base { | 25 namespace base { |
17 namespace trace_event { | 26 namespace trace_event { |
18 | 27 |
19 // The allocation context tracker keeps track of thread-local context for heap | 28 // The allocation context tracker keeps track of thread-local context for heap |
20 // profiling. It includes a pseudo stack of trace events. On every allocation | 29 // profiling. It includes a pseudo stack of trace events. On every allocation |
21 // the tracker provides a snapshot of its context in the form of an | 30 // the tracker provides a snapshot of its context in the form of an |
22 // |AllocationContext| that is to be stored together with the allocation | 31 // |AllocationContext| that is to be stored together with the allocation |
23 // details. | 32 // details. |
24 class BASE_EXPORT AllocationContextTracker { | 33 class BASE_EXPORT AllocationContextTracker { |
25 public: | 34 public: |
26 // Globally enables capturing allocation context. | 35 enum class CaptureMode: int { |
Primiano Tucci (use gerrit)
2016/04/19 20:15:19
s/int/int32_t/
Dmitry Skiba
2016/04/20 19:19:24
Done.
| |
27 // TODO(ruuda): Should this be replaced by |EnableCapturing| in the future? | 36 DISABLED, // Don't capture anything |
28 // Or at least have something that guards agains enable -> disable -> enable? | 37 PSEUDO_STACK, // GetContextSnapshot() returns pseudo stack trace |
29 static void SetCaptureEnabled(bool enabled); | 38 #if ENABLE_NATIVE_ALLOCATION_TRACES |
39 NATIVE_STACK // GetContextSnapshot() returns native (real) stack trace | |
40 #endif | |
41 }; | |
42 | |
43 // Globally sets capturing mode. | |
44 // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK? | |
45 static void SetCaptureMode(CaptureMode mode); | |
30 | 46 |
31 // Returns whether capturing allocation context is enabled globally. | 47 // Returns whether capturing allocation context is enabled globally. |
32 inline static bool capture_enabled() { | 48 inline static bool capture_enabled() { |
Primiano Tucci (use gerrit)
2016/04/19 20:15:19
I think you want to return a CaptureMode here and
Dmitry Skiba
2016/04/20 19:19:24
Done.
| |
33 // A little lag after heap profiling is enabled or disabled is fine, it is | 49 // A little lag after heap profiling is enabled or disabled is fine, it is |
34 // more important that the check is as cheap as possible when capturing is | 50 // more important that the check is as cheap as possible when capturing is |
35 // not enabled, so do not issue a memory barrier in the fast path. | 51 // not enabled, so do not issue a memory barrier in the fast path. |
36 if (subtle::NoBarrier_Load(&capture_enabled_) == 0) | 52 if (subtle::NoBarrier_Load(&capture_mode_) == |
53 static_cast<int>(CaptureMode::DISABLED)) | |
37 return false; | 54 return false; |
38 | 55 |
39 // In the slow path, an acquire load is required to pair with the release | 56 // In the slow path, an acquire load is required to pair with the release |
40 // store in |SetCaptureEnabled|. This is to ensure that the TLS slot for | 57 // store in |SetCaptureMode|. This is to ensure that the TLS slot for |
41 // the thread-local allocation context tracker has been initialized if | 58 // the thread-local allocation context tracker has been initialized if |
42 // |capture_enabled| returns true. | 59 // |capture_enabled| returns true. |
43 return subtle::Acquire_Load(&capture_enabled_) != 0; | 60 return subtle::Acquire_Load(&capture_mode_) != |
61 static_cast<int>(CaptureMode::DISABLED); | |
44 } | 62 } |
45 | 63 |
46 // Returns the thread-local instance, creating one if necessary. Returns | 64 // Returns the thread-local instance, creating one if necessary. Returns |
47 // always a valid instance, unless it is called re-entrantly, in which case | 65 // always a valid instance, unless it is called re-entrantly, in which case |
48 // returns nullptr in the nested calls. | 66 // returns nullptr in the nested calls. |
49 static AllocationContextTracker* GetInstanceForCurrentThread(); | 67 static AllocationContextTracker* GetInstanceForCurrentThread(); |
50 | 68 |
51 // Set the thread name in the AllocationContextTracker of the current thread | 69 // Set the thread name in the AllocationContextTracker of the current thread |
52 // if capture is enabled. | 70 // if capture is enabled. |
53 static void SetCurrentThreadName(const char* name); | 71 static void SetCurrentThreadName(const char* name); |
(...skipping 10 matching lines...) Expand all Loading... | |
64 void PopCurrentTaskContext(const char* context); | 82 void PopCurrentTaskContext(const char* context); |
65 | 83 |
66 // Returns a snapshot of the current thread-local context. | 84 // Returns a snapshot of the current thread-local context. |
67 AllocationContext GetContextSnapshot(); | 85 AllocationContext GetContextSnapshot(); |
68 | 86 |
69 ~AllocationContextTracker(); | 87 ~AllocationContextTracker(); |
70 | 88 |
71 private: | 89 private: |
72 AllocationContextTracker(); | 90 AllocationContextTracker(); |
73 | 91 |
74 static subtle::Atomic32 capture_enabled_; | 92 static subtle::Atomic32 capture_mode_; |
75 | 93 |
76 // The pseudo stack where frames are |TRACE_EVENT| names. | 94 // The pseudo stack where frames are |TRACE_EVENT| names. |
77 std::vector<const char*> pseudo_stack_; | 95 std::vector<const char*> pseudo_stack_; |
78 | 96 |
79 // The thread name is used as the first entry in the pseudo stack. | 97 // The thread name is used as the first entry in the pseudo stack. |
80 const char* thread_name_; | 98 const char* thread_name_; |
81 | 99 |
82 // Stack of tasks' contexts. Context serves as a different dimension than | 100 // Stack of tasks' contexts. Context serves as a different dimension than |
83 // pseudo stack to cluster allocations. | 101 // pseudo stack to cluster allocations. |
84 std::vector<const char*> task_contexts_; | 102 std::vector<const char*> task_contexts_; |
85 | 103 |
86 DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker); | 104 DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker); |
87 }; | 105 }; |
88 | 106 |
89 } // namespace trace_event | 107 } // namespace trace_event |
90 } // namespace base | 108 } // namespace base |
91 | 109 |
92 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ | 110 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ |
OLD | NEW |