Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(6)

Side by Side Diff: base/trace_event/heap_profiler_allocation_context_tracker.cc

Issue 1839503002: [tracing] Add native allocation tracing mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Increase Backtrace frames, request even more frames Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <iterator> 8 #include <iterator>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/threading/thread_local_storage.h" 11 #include "base/threading/thread_local_storage.h"
12 #include "base/trace_event/heap_profiler_allocation_context.h" 12 #include "base/trace_event/heap_profiler_allocation_context.h"
13 13
14 namespace base { 14 namespace base {
15 namespace trace_event { 15 namespace trace_event {
16 16
17 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0; 17 subtle::Atomic32 AllocationContextTracker::capture_mode_ =
18 static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
18 19
19 namespace { 20 namespace {
20 21
21 const size_t kMaxStackDepth = 128u; 22 const size_t kMaxStackDepth = 128u;
22 const size_t kMaxTaskDepth = 16u; 23 const size_t kMaxTaskDepth = 16u;
23 AllocationContextTracker* const kInitializingSentinel = 24 AllocationContextTracker* const kInitializingSentinel =
24 reinterpret_cast<AllocationContextTracker*>(-1); 25 reinterpret_cast<AllocationContextTracker*>(-1);
25 const char kTracingOverhead[] = "tracing_overhead"; 26 const char kTracingOverhead[] = "tracing_overhead";
26 27
27 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER; 28 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
(...skipping 25 matching lines...) Expand all
53 54
54 AllocationContextTracker::AllocationContextTracker() 55 AllocationContextTracker::AllocationContextTracker()
55 : thread_name_(nullptr), ignore_scope_depth_(0) { 56 : thread_name_(nullptr), ignore_scope_depth_(0) {
56 pseudo_stack_.reserve(kMaxStackDepth); 57 pseudo_stack_.reserve(kMaxStackDepth);
57 task_contexts_.reserve(kMaxTaskDepth); 58 task_contexts_.reserve(kMaxTaskDepth);
58 } 59 }
59 AllocationContextTracker::~AllocationContextTracker() {} 60 AllocationContextTracker::~AllocationContextTracker() {}
60 61
61 // static 62 // static
62 void AllocationContextTracker::SetCurrentThreadName(const char* name) { 63 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
63 if (name && capture_enabled()) { 64 if (name && capture_mode() != CaptureMode::DISABLED) {
64 GetInstanceForCurrentThread()->thread_name_ = name; 65 GetInstanceForCurrentThread()->thread_name_ = name;
65 } 66 }
66 } 67 }
67 68
68 // static 69 // static
69 void AllocationContextTracker::SetCaptureEnabled(bool enabled) { 70 void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
70 // When enabling capturing, also initialize the TLS slot. This does not create 71 // When enabling capturing, also initialize the TLS slot. This does not create
71 // a TLS instance yet. 72 // a TLS instance yet.
72 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) 73 if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized())
73 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); 74 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
74 75
75 // Release ordering ensures that when a thread observes |capture_enabled_| to 76 // Release ordering ensures that when a thread observes |capture_mode_| to
76 // be true through an acquire load, the TLS slot has been initialized. 77 // be true through an acquire load, the TLS slot has been initialized.
77 subtle::Release_Store(&capture_enabled_, enabled); 78 subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
78 } 79 }
79 80
80 void AllocationContextTracker::PushPseudoStackFrame( 81 void AllocationContextTracker::PushPseudoStackFrame(
81 const char* trace_event_name) { 82 const char* trace_event_name) {
82 // Impose a limit on the height to verify that every push is popped, because 83 // Impose a limit on the height to verify that every push is popped, because
83 // in practice the pseudo stack never grows higher than ~20 frames. 84 // in practice the pseudo stack never grows higher than ~20 frames.
84 if (pseudo_stack_.size() < kMaxStackDepth) 85 if (pseudo_stack_.size() < kMaxStackDepth)
85 pseudo_stack_.push_back(trace_event_name); 86 pseudo_stack_.push_back(trace_event_name);
86 else 87 else
87 NOTREACHED(); 88 NOTREACHED();
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
122 AllocationContext AllocationContextTracker::GetContextSnapshot() { 123 AllocationContext AllocationContextTracker::GetContextSnapshot() {
123 AllocationContext ctx; 124 AllocationContext ctx;
124 125
125 if (ignore_scope_depth_) { 126 if (ignore_scope_depth_) {
126 ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead); 127 ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
127 ctx.type_name = kTracingOverhead; 128 ctx.type_name = kTracingOverhead;
128 ctx.backtrace.frame_count = 1; 129 ctx.backtrace.frame_count = 1;
129 return ctx; 130 return ctx;
130 } 131 }
131 132
132 // Fill the backtrace. 133 CaptureMode mode = static_cast<CaptureMode>(
133 { 134 subtle::NoBarrier_Load(&capture_mode_));
134 auto backtrace = std::begin(ctx.backtrace.frames);
135 auto backtrace_end = std::end(ctx.backtrace.frames);
136 135
137 // Add the thread name as the first entry 136 auto backtrace = std::begin(ctx.backtrace.frames);
138 if (thread_name_) { 137 auto backtrace_end = std::end(ctx.backtrace.frames);
139 *backtrace++ = StackFrame::FromThreadName(thread_name_);
140 }
141 138
142 for (const char* event_name: pseudo_stack_) { 139 // Add the thread name as the first entry
143 if (backtrace == backtrace_end) { 140 if (thread_name_) {
141 *backtrace++ = StackFrame::FromThreadName(thread_name_);
142 }
143
144 switch (mode) {
145 case CaptureMode::DISABLED:
146 {
144 break; 147 break;
145 } 148 }
146 *backtrace++ = StackFrame::FromTraceEventName(event_name); 149 case CaptureMode::PSEUDO_STACK:
147 } 150 {
151 for (const auto& event_name: pseudo_stack_) {
152 if (backtrace == backtrace_end) {
153 break;
154 }
155 *backtrace++ = StackFrame::FromTraceEventName(event_name);
156 }
157 break;
158 }
159 #if ENABLE_NATIVE_ALLOCATION_TRACES
160 case CaptureMode::NATIVE_STACK:
161 {
162 // base::trace_event::AllocationContextTracker::GetContextSnapshot()
163 const size_t kKnownFrameCount = 1;
148 164
149 ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames); 165 // Backtrace contract requires us to return bottom frames, i.e.
166 // from main() and up. Stack unwinding produces top frames, i.e.
167 // from this point and up until main(). We request many frames to
168 // make sure we reach main(), and then copy bottom portion of them.
169 const void* frames[50];
170 static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
171 "not requesting enough frames to fill Backtrace");
172 size_t frame_count = debug::TraceStackFramePointers(
173 frames,
174 arraysize(frames),
175 kKnownFrameCount);
176
177 // Copy frames backwards
178 size_t backtrace_capacity = backtrace_end - backtrace;
179 ssize_t top_frame_index = (backtrace_capacity >= frame_count) ?
180 0 :
181 frame_count - backtrace_capacity;
182 for (ssize_t i = frame_count - 1; i >= top_frame_index; --i) {
183 const void* frame = frames[i];
184 *backtrace++ = StackFrame::FromProgramCounter(frame);
185 }
186 break;
187 }
188 #endif // ENABLE_NATIVE_ALLOCATION_TRACES
150 } 189 }
151 190
191 ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
192
152 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension 193 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
153 // (component name) in the heap profiler and not piggy back on the type name. 194 // (component name) in the heap profiler and not piggy back on the type name.
154 ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back(); 195 ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
155 196
156 return ctx; 197 return ctx;
157 } 198 }
158 199
159 } // namespace trace_event 200 } // namespace trace_event
160 } // namespace base 201 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698