Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(174)

Side by Side Diff: base/trace_event/heap_profiler_allocation_context_tracker.cc

Issue 1822013002: Revert of tracing: add dump provider for malloc heap profiler (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@shim_layer_linux
Patch Set: Created 4 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" 5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h"
6 6
7 #include <algorithm> 7 #include <algorithm>
8 #include <iterator> 8 #include <iterator>
9 9
10 #include "base/atomicops.h" 10 #include "base/atomicops.h"
11 #include "base/threading/thread_local_storage.h" 11 #include "base/threading/thread_local_storage.h"
12 #include "base/trace_event/heap_profiler_allocation_context.h" 12 #include "base/trace_event/heap_profiler_allocation_context.h"
13 13
14 namespace base { 14 namespace base {
15 namespace trace_event { 15 namespace trace_event {
16 16
17 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0; 17 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0;
18 18
19 namespace { 19 namespace {
20 20
21 const size_t kMaxStackDepth = 128u;
22 AllocationContextTracker* const kInitializingSentinel =
23 reinterpret_cast<AllocationContextTracker*>(-1);
24
25 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER; 21 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER;
26 22
27 // This function is added to the TLS slot to clean up the instance when the 23 // This function is added to the TLS slot to clean up the instance when the
28 // thread exits. 24 // thread exits.
29 void DestructAllocationContextTracker(void* alloc_ctx_tracker) { 25 void DestructAllocationContextTracker(void* alloc_ctx_tracker) {
30 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker); 26 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker);
31 } 27 }
32 28
33 } // namespace 29 } // namespace
34 30
31 AllocationContextTracker::AllocationContextTracker() {}
32 AllocationContextTracker::~AllocationContextTracker() {}
33
35 // static 34 // static
36 AllocationContextTracker* 35 AllocationContextTracker* AllocationContextTracker::GetThreadLocalTracker() {
37 AllocationContextTracker::GetInstanceForCurrentThread() { 36 auto tracker =
38 AllocationContextTracker* tracker =
39 static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get()); 37 static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get());
40 if (tracker == kInitializingSentinel)
41 return nullptr; // Re-entrancy case.
42 38
43 if (!tracker) { 39 if (!tracker) {
44 g_tls_alloc_ctx_tracker.Set(kInitializingSentinel);
45 tracker = new AllocationContextTracker(); 40 tracker = new AllocationContextTracker();
46 g_tls_alloc_ctx_tracker.Set(tracker); 41 g_tls_alloc_ctx_tracker.Set(tracker);
47 } 42 }
48 43
49 return tracker; 44 return tracker;
50 } 45 }
51 46
52 AllocationContextTracker::AllocationContextTracker() {
53 pseudo_stack_.reserve(kMaxStackDepth);
54 }
55 AllocationContextTracker::~AllocationContextTracker() {}
56
57 // static 47 // static
58 void AllocationContextTracker::SetCaptureEnabled(bool enabled) { 48 void AllocationContextTracker::SetCaptureEnabled(bool enabled) {
59 // When enabling capturing, also initialize the TLS slot. This does not create 49 // When enabling capturing, also initialize the TLS slot. This does not create
60 // a TLS instance yet. 50 // a TLS instance yet.
61 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) 51 if (enabled && !g_tls_alloc_ctx_tracker.initialized())
62 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); 52 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker);
63 53
64 // Release ordering ensures that when a thread observes |capture_enabled_| to 54 // Release ordering ensures that when a thread observes |capture_enabled_| to
65 // be true through an acquire load, the TLS slot has been initialized. 55 // be true through an acquire load, the TLS slot has been initialized.
66 subtle::Release_Store(&capture_enabled_, enabled); 56 subtle::Release_Store(&capture_enabled_, enabled);
67 } 57 }
68 58
59 // static
69 void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) { 60 void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) {
61 auto tracker = AllocationContextTracker::GetThreadLocalTracker();
62
70 // Impose a limit on the height to verify that every push is popped, because 63 // Impose a limit on the height to verify that every push is popped, because
71 // in practice the pseudo stack never grows higher than ~20 frames. 64 // in practice the pseudo stack never grows higher than ~20 frames.
72 if (pseudo_stack_.size() < kMaxStackDepth) 65 DCHECK_LT(tracker->pseudo_stack_.size(), 128u);
73 pseudo_stack_.push_back(frame); 66 tracker->pseudo_stack_.push_back(frame);
74 else
75 NOTREACHED();
76 } 67 }
77 68
78 // static 69 // static
79 void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) { 70 void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) {
71 auto tracker = AllocationContextTracker::GetThreadLocalTracker();
72
80 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in 73 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in
81 // scope, the frame was never pushed, so it is possible that pop is called 74 // scope, the frame was never pushed, so it is possible that pop is called
82 // on an empty stack. 75 // on an empty stack.
83 if (pseudo_stack_.empty()) 76 if (tracker->pseudo_stack_.empty())
84 return; 77 return;
85 78
86 // Assert that pushes and pops are nested correctly. This DCHECK can be 79 // Assert that pushes and pops are nested correctly. This DCHECK can be
87 // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call 80 // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call
88 // without a corresponding TRACE_EVENT_BEGIN). 81 // without a corresponding TRACE_EVENT_BEGIN).
89 DCHECK_EQ(frame, pseudo_stack_.back()) 82 DCHECK_EQ(frame, tracker->pseudo_stack_.back())
90 << "Encountered an unmatched TRACE_EVENT_END"; 83 << "Encountered an unmatched TRACE_EVENT_END";
91 84
92 pseudo_stack_.pop_back(); 85 tracker->pseudo_stack_.pop_back();
93 } 86 }
94 87
95 // static 88 // static
96 AllocationContext AllocationContextTracker::GetContextSnapshot() { 89 AllocationContext AllocationContextTracker::GetContextSnapshot() {
90 AllocationContextTracker* tracker = GetThreadLocalTracker();
97 AllocationContext ctx; 91 AllocationContext ctx;
98 92
99 // Fill the backtrace. 93 // Fill the backtrace.
100 { 94 {
101 auto src = pseudo_stack_.begin(); 95 auto src = tracker->pseudo_stack_.begin();
102 auto dst = std::begin(ctx.backtrace.frames); 96 auto dst = std::begin(ctx.backtrace.frames);
103 auto src_end = pseudo_stack_.end(); 97 auto src_end = tracker->pseudo_stack_.end();
104 auto dst_end = std::end(ctx.backtrace.frames); 98 auto dst_end = std::end(ctx.backtrace.frames);
105 99
106 // Copy as much of the bottom of the pseudo stack into the backtrace as 100 // Copy as much of the bottom of the pseudo stack into the backtrace as
107 // possible. 101 // possible.
108 for (; src != src_end && dst != dst_end; src++, dst++) 102 for (; src != src_end && dst != dst_end; src++, dst++)
109 *dst = *src; 103 *dst = *src;
110 104
111 // If there is room for more, fill the remaining slots with empty frames. 105 // If there is room for more, fill the remaining slots with empty frames.
112 std::fill(dst, dst_end, nullptr); 106 std::fill(dst, dst_end, nullptr);
113 } 107 }
114 108
115 ctx.type_name = nullptr; 109 ctx.type_name = nullptr;
116 110
117 return ctx; 111 return ctx;
118 } 112 }
119 113
120 } // namespace trace_event 114 } // namespace trace_event
121 } // namespace base 115 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698