Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(86)

Side by Side Diff: base/trace_event/heap_profiler_allocation_context_tracker.h

Issue 1839503002: [tracing] Add native allocation tracing mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Increase Backtrace frames, request even more frames Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
7 7
8 #include <vector> 8 #include <vector>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/base_export.h" 11 #include "base/base_export.h"
12 #include "base/debug/debugging_flags.h"
13 #include "base/debug/stack_trace.h"
12 #include "base/logging.h" 14 #include "base/logging.h"
13 #include "base/macros.h" 15 #include "base/macros.h"
14 #include "base/trace_event/heap_profiler_allocation_context.h" 16 #include "base/trace_event/heap_profiler_allocation_context.h"
15 17
18 #if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL) && \
19 (BUILDFLAG(ENABLE_PROFILING) || !defined(NDEBUG))
20 #define ENABLE_NATIVE_ALLOCATION_TRACES 1
21 #else
22 #define ENABLE_NATIVE_ALLOCATION_TRACES 0
23 #endif
24
16 namespace base { 25 namespace base {
17 namespace trace_event { 26 namespace trace_event {
18 27
19 // The allocation context tracker keeps track of thread-local context for heap 28 // The allocation context tracker keeps track of thread-local context for heap
20 // profiling. It includes a pseudo stack of trace events. On every allocation 29 // profiling. It includes a pseudo stack of trace events. On every allocation
21 // the tracker provides a snapshot of its context in the form of an 30 // the tracker provides a snapshot of its context in the form of an
22 // |AllocationContext| that is to be stored together with the allocation 31 // |AllocationContext| that is to be stored together with the allocation
23 // details. 32 // details.
24 class BASE_EXPORT AllocationContextTracker { 33 class BASE_EXPORT AllocationContextTracker {
25 public: 34 public:
26 // Globally enables capturing allocation context. 35 enum class CaptureMode: int32_t {
27 // TODO(ruuda): Should this be replaced by |EnableCapturing| in the future? 36 DISABLED, // Don't capture anything
28 // Or at least have something that guards agains enable -> disable -> enable? 37 PSEUDO_STACK, // GetContextSnapshot() returns pseudo stack trace
29 static void SetCaptureEnabled(bool enabled); 38 #if ENABLE_NATIVE_ALLOCATION_TRACES
39 NATIVE_STACK // GetContextSnapshot() returns native (real) stack trace
40 #endif
41 };
30 42
31 // Returns whether capturing allocation context is enabled globally. 43 // Globally sets capturing mode.
32 inline static bool capture_enabled() { 44 // TODO(primiano): How to guard against *_STACK -> DISABLED -> *_STACK?
45 static void SetCaptureMode(CaptureMode mode);
46
47 // Returns global capturing mode.
48 inline static CaptureMode capture_mode() {
33 // A little lag after heap profiling is enabled or disabled is fine, it is 49 // A little lag after heap profiling is enabled or disabled is fine, it is
34 // more important that the check is as cheap as possible when capturing is 50 // more important that the check is as cheap as possible when capturing is
35 // not enabled, so do not issue a memory barrier in the fast path. 51 // not enabled, so do not issue a memory barrier in the fast path.
36 if (subtle::NoBarrier_Load(&capture_enabled_) == 0) 52 if (subtle::NoBarrier_Load(&capture_mode_) ==
37 return false; 53 static_cast<int32_t>(CaptureMode::DISABLED))
54 return CaptureMode::DISABLED;
38 55
39 // In the slow path, an acquire load is required to pair with the release 56 // In the slow path, an acquire load is required to pair with the release
40 // store in |SetCaptureEnabled|. This is to ensure that the TLS slot for 57 // store in |SetCaptureMode|. This is to ensure that the TLS slot for
41 // the thread-local allocation context tracker has been initialized if 58 // the thread-local allocation context tracker has been initialized if
42 // |capture_enabled| returns true. 59 // |capture_mode| returns something other than DISABLED.
43 return subtle::Acquire_Load(&capture_enabled_) != 0; 60 return static_cast<CaptureMode>(subtle::Acquire_Load(&capture_mode_));
44 } 61 }
45 62
46 // Returns the thread-local instance, creating one if necessary. Returns 63 // Returns the thread-local instance, creating one if necessary. Returns
47 // always a valid instance, unless it is called re-entrantly, in which case 64 // always a valid instance, unless it is called re-entrantly, in which case
48 // returns nullptr in the nested calls. 65 // returns nullptr in the nested calls.
49 static AllocationContextTracker* GetInstanceForCurrentThread(); 66 static AllocationContextTracker* GetInstanceForCurrentThread();
50 67
51 // Set the thread name in the AllocationContextTracker of the current thread 68 // Set the thread name in the AllocationContextTracker of the current thread
52 // if capture is enabled. 69 // if capture is enabled.
53 static void SetCurrentThreadName(const char* name); 70 static void SetCurrentThreadName(const char* name);
(...skipping 19 matching lines...) Expand all
73 void PopCurrentTaskContext(const char* context); 90 void PopCurrentTaskContext(const char* context);
74 91
75 // Returns a snapshot of the current thread-local context. 92 // Returns a snapshot of the current thread-local context.
76 AllocationContext GetContextSnapshot(); 93 AllocationContext GetContextSnapshot();
77 94
78 ~AllocationContextTracker(); 95 ~AllocationContextTracker();
79 96
80 private: 97 private:
81 AllocationContextTracker(); 98 AllocationContextTracker();
82 99
83 static subtle::Atomic32 capture_enabled_; 100 static subtle::Atomic32 capture_mode_;
84 101
85 // The pseudo stack where frames are |TRACE_EVENT| names. 102 // The pseudo stack where frames are |TRACE_EVENT| names.
86 std::vector<const char*> pseudo_stack_; 103 std::vector<const char*> pseudo_stack_;
87 104
88 // The thread name is used as the first entry in the pseudo stack. 105 // The thread name is used as the first entry in the pseudo stack.
89 const char* thread_name_; 106 const char* thread_name_;
90 107
91 // Stack of tasks' contexts. Context serves as a different dimension than 108 // Stack of tasks' contexts. Context serves as a different dimension than
92 // pseudo stack to cluster allocations. 109 // pseudo stack to cluster allocations.
93 std::vector<const char*> task_contexts_; 110 std::vector<const char*> task_contexts_;
94 111
95 uint32_t ignore_scope_depth_; 112 uint32_t ignore_scope_depth_;
96 113
97 DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker); 114 DISALLOW_COPY_AND_ASSIGN(AllocationContextTracker);
98 }; 115 };
99 116
100 } // namespace trace_event 117 } // namespace trace_event
101 } // namespace base 118 } // namespace base
102 119
103 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_ 120 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_CONTEXT_TRACKER_H_
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_allocation_context.h ('k') | base/trace_event/heap_profiler_allocation_context_tracker.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698