Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(463)

Side by Side Diff: base/trace_event/heap_profiler_allocation_context_tracker.cc

Issue 1916033002: Revert of [tracing] Add native allocation tracing mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <iterator> 8 #include <iterator>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/threading/thread_local_storage.h" 11 #include "base/threading/thread_local_storage.h"
12 #include "base/trace_event/heap_profiler_allocation_context.h" 12 #include "base/trace_event/heap_profiler_allocation_context.h"
13 13
14 namespace base { 14 namespace base {
15 namespace trace_event { 15 namespace trace_event {
16 16
17 subtle::Atomic32 AllocationContextTracker::capture_mode_ = 17 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0;
18 static_cast<int32_t>(AllocationContextTracker::CaptureMode::DISABLED);
19 18
20 namespace { 19 namespace {
21 20
22 const size_t kMaxStackDepth = 128u; 21 const size_t kMaxStackDepth = 128u;
23 const size_t kMaxTaskDepth = 16u; 22 const size_t kMaxTaskDepth = 16u;
24 AllocationContextTracker* const kInitializingSentinel = 23 AllocationContextTracker* const kInitializingSentinel =
25 reinterpret_cast<AllocationContextTracker*>(-1); 24 reinterpret_cast<AllocationContextTracker*>(-1);
26 const char kTracingOverhead[] = "tracing_overhead"; 25 const char kTracingOverhead[] = "tracing_overhead";
27 26
28 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER; 27 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
(...skipping 25 matching lines...) Expand all
54 53
55 AllocationContextTracker::AllocationContextTracker() 54 AllocationContextTracker::AllocationContextTracker()
56 : thread_name_(nullptr), ignore_scope_depth_(0) { 55 : thread_name_(nullptr), ignore_scope_depth_(0) {
57 pseudo_stack_.reserve(kMaxStackDepth); 56 pseudo_stack_.reserve(kMaxStackDepth);
58 task_contexts_.reserve(kMaxTaskDepth); 57 task_contexts_.reserve(kMaxTaskDepth);
59 } 58 }
60 AllocationContextTracker::~AllocationContextTracker() {} 59 AllocationContextTracker::~AllocationContextTracker() {}
61 60
62 // static 61 // static
63 void AllocationContextTracker::SetCurrentThreadName(const char* name) { 62 void AllocationContextTracker::SetCurrentThreadName(const char* name) {
64 if (name && capture_mode() != CaptureMode::DISABLED) { 63 if (name && capture_enabled()) {
65 GetInstanceForCurrentThread()->thread_name_ = name; 64 GetInstanceForCurrentThread()->thread_name_ = name;
66 } 65 }
67 } 66 }
68 67
69 // static 68 // static
70 void AllocationContextTracker::SetCaptureMode(CaptureMode mode) { 69 void AllocationContextTracker::SetCaptureEnabled(bool enabled) {
71 // When enabling capturing, also initialize the TLS slot. This does not create 70 // When enabling capturing, also initialize the TLS slot. This does not create
72 // a TLS instance yet. 71 // a TLS instance yet.
73 if (mode != CaptureMode::DISABLED && !g_tls_alloc_ctx_tracker.initialized()) 72 if (enabled && !g_tls_alloc_ctx_tracker.initialized())
74 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); 73 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
75 74
76 // Release ordering ensures that when a thread observes |capture_mode_| to 75 // Release ordering ensures that when a thread observes |capture_enabled_| to
77 // be true through an acquire load, the TLS slot has been initialized. 76 // be true through an acquire load, the TLS slot has been initialized.
78 subtle::Release_Store(&capture_mode_, static_cast<int32_t>(mode)); 77 subtle::Release_Store(&capture_enabled_, enabled);
79 } 78 }
80 79
81 void AllocationContextTracker::PushPseudoStackFrame( 80 void AllocationContextTracker::PushPseudoStackFrame(
82 const char* trace_event_name) { 81 const char* trace_event_name) {
83 // Impose a limit on the height to verify that every push is popped, because 82 // Impose a limit on the height to verify that every push is popped, because
84 // in practice the pseudo stack never grows higher than ~20 frames. 83 // in practice the pseudo stack never grows higher than ~20 frames.
85 if (pseudo_stack_.size() < kMaxStackDepth) 84 if (pseudo_stack_.size() < kMaxStackDepth)
86 pseudo_stack_.push_back(trace_event_name); 85 pseudo_stack_.push_back(trace_event_name);
87 else 86 else
88 NOTREACHED(); 87 NOTREACHED();
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
123 AllocationContext AllocationContextTracker::GetContextSnapshot() { 122 AllocationContext AllocationContextTracker::GetContextSnapshot() {
124 AllocationContext ctx; 123 AllocationContext ctx;
125 124
126 if (ignore_scope_depth_) { 125 if (ignore_scope_depth_) {
127 ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead); 126 ctx.backtrace.frames[0] = StackFrame::FromTraceEventName(kTracingOverhead);
128 ctx.type_name = kTracingOverhead; 127 ctx.type_name = kTracingOverhead;
129 ctx.backtrace.frame_count = 1; 128 ctx.backtrace.frame_count = 1;
130 return ctx; 129 return ctx;
131 } 130 }
132 131
133 CaptureMode mode = static_cast<CaptureMode>( 132 // Fill the backtrace.
134 subtle::NoBarrier_Load(&capture_mode_)); 133 {
134 auto backtrace = std::begin(ctx.backtrace.frames);
135 auto backtrace_end = std::end(ctx.backtrace.frames);
135 136
136 auto backtrace = std::begin(ctx.backtrace.frames); 137 // Add the thread name as the first entry
137 auto backtrace_end = std::end(ctx.backtrace.frames); 138 if (thread_name_) {
139 *backtrace++ = StackFrame::FromThreadName(thread_name_);
140 }
138 141
139 // Add the thread name as the first entry 142 for (const char* event_name: pseudo_stack_) {
140 if (thread_name_) { 143 if (backtrace == backtrace_end) {
141 *backtrace++ = StackFrame::FromThreadName(thread_name_);
142 }
143
144 switch (mode) {
145 case CaptureMode::DISABLED:
146 {
147 break; 144 break;
148 } 145 }
149 case CaptureMode::PSEUDO_STACK: 146 *backtrace++ = StackFrame::FromTraceEventName(event_name);
150 { 147 }
151 for (const auto& event_name: pseudo_stack_) {
152 if (backtrace == backtrace_end) {
153 break;
154 }
155 *backtrace++ = StackFrame::FromTraceEventName(event_name);
156 }
157 break;
158 }
159 #if ENABLE_NATIVE_ALLOCATION_TRACES
160 case CaptureMode::NATIVE_STACK:
161 {
162 // base::trace_event::AllocationContextTracker::GetContextSnapshot()
163 const size_t kKnownFrameCount = 1;
164 148
165 // Backtrace contract requires us to return bottom frames, i.e. 149 ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
166 // from main() and up. Stack unwinding produces top frames, i.e.
167 // from this point and up until main(). We request many frames to
168 // make sure we reach main(), and then copy bottom portion of them.
169 const void* frames[50];
170 static_assert(arraysize(frames) >= Backtrace::kMaxFrameCount,
171 "not requesting enough frames to fill Backtrace");
172 size_t frame_count = debug::TraceStackFramePointers(
173 frames,
174 arraysize(frames),
175 kKnownFrameCount);
176
177 // Copy frames backwards
178 size_t backtrace_capacity = backtrace_end - backtrace;
179 ssize_t top_frame_index = (backtrace_capacity >= frame_count) ?
180 0 :
181 frame_count - backtrace_capacity;
182 for (ssize_t i = frame_count - 1; i >= top_frame_index; --i) {
183 const void* frame = frames[i];
184 *backtrace++ = StackFrame::FromProgramCounter(frame);
185 }
186 break;
187 }
188 #endif // ENABLE_NATIVE_ALLOCATION_TRACES
189 } 150 }
190 151
191 ctx.backtrace.frame_count = backtrace - std::begin(ctx.backtrace.frames);
192
193 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension 152 // TODO(ssid): Fix crbug.com/594803 to add file name as 3rd dimension
194 // (component name) in the heap profiler and not piggy back on the type name. 153 // (component name) in the heap profiler and not piggy back on the type name.
195 ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back(); 154 ctx.type_name = task_contexts_.empty() ? nullptr : task_contexts_.back();
196 155
197 return ctx; 156 return ctx;
198 } 157 }
199 158
200 } // namespace trace_event 159 } // namespace trace_event
201 } // namespace base 160 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698