Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(187)

Side by Side Diff: base/trace_event/heap_profiler_allocation_context_tracker.cc

Issue 1921773003: Reland of [tracing] Add native allocation tracing mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Make NATIVE_STACK mode always available Created 4 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <iterator> 8 #include <iterator>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/threading/thread_local_storage.h" 11 #include "base/threading/thread_local_storage.h"
12 #include "base/trace_event/heap_profiler_allocation_context.h" 12 #include "base/trace_event/heap_profiler_allocation_context.h"
13 13
14 namespace base { 14 namespace base {
15 namespace trace_event { 15 namespace trace_event {
16 16
17 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0; 17 subtle::Atomic32 AllocationContextTracker::capture_mode_ =
18 static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
18 19
19 namespace { 20 namespace {
20 21
21 const size_t kMaxStackDepth = 128u; 22 const size_t kMaxStackDepth = 128u;
22 const size_t kMaxTaskDepth = 16u; 23 const size_t kMaxTaskDepth = 16u;
23 AllocationContextTracker* const kInitializingSentinel = 24 AllocationContextTracker* const kInitializingSentinel =
24 reinterpret_cast<AllocationContextTracker*>(-1); 25 reinterpret_cast<AllocationContextTracker*>(-1);
25 const char kTracingOverhead[] = "tracing_overhead"; 26 const char kTracingOverhead[] = "tracing_overhead";
26 27
27 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER; 28 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
(...skipping 25 matching lines...) Expand all
53 54
54 AllocationContextTracker::AllocationContextTracker() 55 AllocationContextTracker::AllocationContextTracker()
55 : thread_name_(nullptr), ignore_scope_depth_(0) { 56 : thread_name_(nullptr), ignore_scope_depth_(0) {
56 pseudo_stack_.reserve(kMaxStackDepth); 57 pseudo_stack_.reserve(kMaxStackDepth);
57 task_contexts_.reserve(kMaxTaskDepth); 58 task_contexts_.reserve(kMaxTaskDepth);
58 } 59 }
59 AllocationContextTracker::~AllocationContextTracker() {} 60 AllocationContextTracker::~AllocationContextTracker() {}
60 61
61 // static 62 // static
62 void AllocationContextTracker::SetCurrentThreadName(const char* name) { 63 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
63 if (name && capture_enabled()) { 64 if (name && capture_mode() != CaptureMode::DISABLED) {
64 GetInstanceForCurrentThread()->thread_name_ = name; 65 GetInstanceForCurrentThread()->thread_name_ = name;
65 } 66 }
66 } 67 }
67 68
68 // static 69 // static
69 void AllocationContextTracker::SetCaptureEnabled(bool enabled) { 70 void AllocationContextTracker::SetCaptureMode(CaptureMode mode) {
70 // When enabling capturing, also initialize the TLS slot. This does not create 71 // When enabling capturing, also initialize the TLS slot. This does not create
71 // a TLS instance yet. 72 // a TLS instance yet.
72 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) 73 if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized())
73 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); 74 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
74 75
75 // Release ordering ensures that when a thread observes |capture_enabled_| to 76 // Release ordering ensures that when a thread observes |capture_mode_| to
76 // be true through an acquire load, the TLS slot has been initialized. 77 // be true through an acquire load, the TLS slot has been initialized.
77 subtle::Release_Store(&capture_enabled_, enabled); 78 subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode));
78 } 79 }
79 80
80 void AllocationContextTracker::PushPseudoStackFrame( 81 void AllocationContextTracker::PushPseudoStackFrame(
81 const char* trace_event_name) { 82 const char* trace_event_name) {
82 // Impose a limit on the height to verify that every push is popped, because 83 // Impose a limit on the height to verify that every push is popped, because
83 // in practice the pseudo stack never grows higher than ~20 frames. 84 // in practice the pseudo stack never grows higher than ~20 frames.
84 if (pseudo_stack_.size() < kMaxStackDepth) 85 if (pseudo_stack_.size() < kMaxStackDepth)
85 pseudo_stack_.push_back(trace_event_name); 86 pseudo_stack_.push_back(trace_event_name);
86 else 87 else
87 NOTREACHED(); 88 NOTREACHED();
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
122 AllocationContext AllocationContextTracker::GetContextSnapshot() { 123 AllocationContext AllocationContextTracker::GetContextSnapshot() {
123 AllocationContext ctx; 124 AllocationContext ctx;
124 125
125 if (ignore_scope_depth_) { 126 if (ignore_scope_depth_) {
126 ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead); 127 ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
127 ctx.type_name = kTracingOverhead; 128 ctx.type_name = kTracingOverhead;
128 ctx.backtrace.frame_count = 1; 129 ctx.backtrace.frame_count = 1;
129 return ctx; 130 return ctx;
130 } 131 }
131 132
132 // Fill the backtrace. 133 CaptureMode mode = static_cast<CaptureMode>(
133 { 134 subtle::NoBarrier_Load(&capture_mode_));
134 auto backtrace = std::begin(ctx.backtrace.frames);
135 auto backtrace_end = std::end(ctx.backtrace.frames);
136 135
137 // Add the thread name as the first entry 136 auto backtrace = std::begin(ctx.backtrace.frames);
138 if (thread_name_) { 137 auto backtrace_end = std::end(ctx.backtrace.frames);
139 *backtrace++ = StackFrame::FromThreadName(thread_name_);
140 }
141 138
142 for (const char* event_name: pseudo_stack_) { 139 // Add the thread name as the first entry
143 if (backtrace == backtrace_end) { 140 if (thread_name_) {
141 *backtrace++ = StackFrame::FromThreadName(thread_name_);
142 }
143
144 switch (mode) {
145 case CaptureMode::DISABLED:
146 {
144 break; 147 break;
145 } 148 }
146 *backtrace++ = StackFrame::FromTraceEventName(event_name); 149 case CaptureMode::PSEUDO_STACK:
147 } 150 {
151 for (const auto& event_name: pseudo_stack_) {
Primiano Tucci (use gerrit) 2016/04/28 07:02:42 minor thing: IMHO auto here adds little benefit, c
Dmitry Skiba 2016/04/28 18:16:30 Done.
152 if (backtrace == backtrace_end) {
153 break;
154 }
155 *backtrace++ = StackFrame::FromTraceEventName(event_name);
156 }
157 break;
158 }
159 case CaptureMode::NATIVE_STACK:
160 {
161 // Backtrace contract requires us to return bottom frames, i.e.
162 // from main() and up. Stack unwinding produces top frames, i.e.
163 // from this point and up until main(). We request many frames to
164 // make sure we reach main(), and then copy bottom portion of them.
165 const void* frames[50];
Primiano Tucci (use gerrit) 2016/04/28 07:02:42 I'd probably stay a bit more conservative and make
Dmitry Skiba 2016/04/28 18:16:31 Done.
166 static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
167 "not requesting enough frames to fill Backtrace");
168 #if HAVE_TRACE_STACK_FRAME_POINTERS && !defined(OS_NACL)
169 size_t frame_count = debug::TraceStackFramePointers(
170 frames,
171 arraysize(frames),
172 1 /* exclude this function from the trace */ );
173 #else
174 size_t frame_count = 0;
175 NOTREACHED();
176 #endif
148 177
149 ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames); 178 // Copy frames backwards
179 size_t backtrace_capacity = backtrace_end - backtrace;
180 size_t top_frame_index = (backtrace_capacity >= frame_count) ?
181 0 :
182 frame_count - backtrace_capacity;
183 for (size_t i = frame_count; i > top_frame_index;) {
184 const void* frame = frames[--i];
185 *backtrace++ = StackFrame::FromProgramCounter(frame);
186 }
187 break;
188 }
150 } 189 }
151 190
191 ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
192
152 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension 193 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
153 // (component name) in the heap profiler and not piggy back on the type name. 194 // (component name) in the heap profiler and not piggy back on the type name.
154 ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back(); 195 ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
155 196
156 return ctx; 197 return ctx;
157 } 198 }
158 199
159 } // namespace trace_event 200 } // namespace trace_event
160 } // namespace base 201 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698