OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" | 5 #include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <iterator> | 8 #include <iterator> |
9 | 9 |
10 #include "base/atomicops.h" | 10 #include "base/atomicops.h" |
11 #include "base/threading/thread_local_storage.h" | 11 #include "base/threading/thread_local_storage.h" |
12 #include "base/trace_event/heap_profiler_allocation_context.h" | 12 #include "base/trace_event/heap_profiler_allocation_context.h" |
13 | 13 |
14 namespace base { | 14 namespace base { |
15 namespace trace_event { | 15 namespace trace_event { |
16 | 16 |
17 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0; | 17 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0; |
18 | 18 |
19 namespace { | 19 namespace { |
20 | 20 |
21 const size_t kMaxStackDepth = 128u; | |
22 AllocationContextTracker* const kInitializingSentinel = | |
23 reinterpret_cast<AllocationContextTracker*>(-1); | |
24 | |
21 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER; | 25 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER; |
22 | 26 |
23 // This function is added to the TLS slot to clean up the instance when the | 27 // This function is added to the TLS slot to clean up the instance when the |
24 // thread exits. | 28 // thread exits. |
25 void DestructAllocationContextTracker(void* alloc_ctx_tracker) { | 29 void DestructAllocationContextTracker(void* alloc_ctx_tracker) { |
26 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker); | 30 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker); |
27 } | 31 } |
28 | 32 |
29 } // namespace | 33 } // namespace |
30 | 34 |
31 AllocationContextTracker::AllocationContextTracker() {} | |
32 AllocationContextTracker::~AllocationContextTracker() {} | |
33 | |
34 // static | 35 // static |
35 AllocationContextTracker* AllocationContextTracker::GetThreadLocalTracker() { | 36 AllocationContextTracker* |
36 auto tracker = | 37 AllocationContextTracker::GetInstanceForCurrentThread() { |
38 AllocationContextTracker* tracker = | |
37 static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get()); | 39 static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get()); |
40 if (tracker == kInitializingSentinel) | |
41 return nullptr; // Re-entrancy case. | |
38 | 42 |
39 if (!tracker) { | 43 if (!tracker) { |
44 g_tls_alloc_ctx_tracker.Set(kInitializingSentinel); | |
40 tracker = new AllocationContextTracker(); | 45 tracker = new AllocationContextTracker(); |
41 g_tls_alloc_ctx_tracker.Set(tracker); | 46 g_tls_alloc_ctx_tracker.Set(tracker); |
42 } | 47 } |
43 | 48 |
44 return tracker; | 49 return tracker; |
45 } | 50 } |
46 | 51 |
52 AllocationContextTracker::AllocationContextTracker() { | |
53 pseudo_stack_.reserve(kMaxStackDepth); | |
54 } | |
55 AllocationContextTracker::~AllocationContextTracker() {} | |
56 | |
47 // static | 57 // static |
48 void AllocationContextTracker::SetCaptureEnabled(bool enabled) { | 58 void AllocationContextTracker::SetCaptureEnabled(bool enabled) { |
49 // When enabling capturing, also initialize the TLS slot. This does not create | 59 // When enabling capturing, also initialize the TLS slot. This does not create |
50 // a TLS instance yet. | 60 // a TLS instance yet. |
51 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) | 61 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) |
52 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); | 62 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); |
53 | 63 |
54 // Release ordering ensures that when a thread observes |capture_enabled_| to | 64 // Release ordering ensures that when a thread observes |capture_enabled_| to |
55 // be true through an acquire load, the TLS slot has been initialized. | 65 // be true through an acquire load, the TLS slot has been initialized. |
56 subtle::Release_Store(&capture_enabled_, enabled); | 66 subtle::Release_Store(&capture_enabled_, enabled); |
57 } | 67 } |
58 | 68 |
59 // static | |
60 void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) { | 69 void AllocationContextTracker::PushPseudoStackFrame(StackFrame frame) { |
61 auto tracker = AllocationContextTracker::GetThreadLocalTracker(); | |
62 | |
63 // Impose a limit on the height to verify that every push is popped, because | 70 // Impose a limit on the height to verify that every push is popped, because |
64 // in practice the pseudo stack never grows higher than ~20 frames. | 71 // in practice the pseudo stack never grows higher than ~20 frames. |
65 DCHECK_LT(tracker->pseudo_stack_.size(), 128u); | 72 if (pseudo_stack_.size() < kMaxStackDepth) |
66 tracker->pseudo_stack_.push_back(frame); | 73 pseudo_stack_.push_back(frame); |
74 else | |
75 NOTREACHED(); | |
67 } | 76 } |
68 | 77 |
69 // static | 78 // static |
Dmitry Skiba
2016/03/22 19:23:40
This method (and others below) is not static anymo
| |
70 void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) { | 79 void AllocationContextTracker::PopPseudoStackFrame(StackFrame frame) { |
71 auto tracker = AllocationContextTracker::GetThreadLocalTracker(); | |
72 | |
73 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in | 80 // Guard for stack underflow. If tracing was started with a TRACE_EVENT in |
74 // scope, the frame was never pushed, so it is possible that pop is called | 81 // scope, the frame was never pushed, so it is possible that pop is called |
75 // on an empty stack. | 82 // on an empty stack. |
76 if (tracker->pseudo_stack_.empty()) | 83 if (pseudo_stack_.empty()) |
77 return; | 84 return; |
78 | 85 |
79 // Assert that pushes and pops are nested correctly. This DCHECK can be | 86 // Assert that pushes and pops are nested correctly. This DCHECK can be |
80 // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call | 87 // hit if some TRACE_EVENT macro is unbalanced (a TRACE_EVENT_END* call |
81 // without a corresponding TRACE_EVENT_BEGIN). | 88 // without a corresponding TRACE_EVENT_BEGIN). |
82 DCHECK_EQ(frame, tracker->pseudo_stack_.back()) | 89 DCHECK_EQ(frame, pseudo_stack_.back()) |
83 << "Encountered an unmatched TRACE_EVENT_END"; | 90 << "Encountered an unmatched TRACE_EVENT_END"; |
84 | 91 |
85 tracker->pseudo_stack_.pop_back(); | 92 pseudo_stack_.pop_back(); |
86 } | 93 } |
87 | 94 |
88 // static | 95 // static |
89 AllocationContext AllocationContextTracker::GetContextSnapshot() { | 96 AllocationContext AllocationContextTracker::GetContextSnapshot() { |
90 AllocationContextTracker* tracker = GetThreadLocalTracker(); | |
91 AllocationContext ctx; | 97 AllocationContext ctx; |
92 | 98 |
93 // Fill the backtrace. | 99 // Fill the backtrace. |
94 { | 100 { |
95 auto src = tracker->pseudo_stack_.begin(); | 101 auto src = pseudo_stack_.begin(); |
96 auto dst = std::begin(ctx.backtrace.frames); | 102 auto dst = std::begin(ctx.backtrace.frames); |
97 auto src_end = tracker->pseudo_stack_.end(); | 103 auto src_end = pseudo_stack_.end(); |
98 auto dst_end = std::end(ctx.backtrace.frames); | 104 auto dst_end = std::end(ctx.backtrace.frames); |
99 | 105 |
100 // Copy as much of the bottom of the pseudo stack into the backtrace as | 106 // Copy as much of the bottom of the pseudo stack into the backtrace as |
101 // possible. | 107 // possible. |
102 for (; src != src_end && dst != dst_end; src++, dst++) | 108 for (; src != src_end && dst != dst_end; src++, dst++) |
103 *dst = *src; | 109 *dst = *src; |
104 | 110 |
105 // If there is room for more, fill the remaining slots with empty frames. | 111 // If there is room for more, fill the remaining slots with empty frames. |
106 std::fill(dst, dst_end, nullptr); | 112 std::fill(dst, dst_end, nullptr); |
107 } | 113 } |
108 | 114 |
109 ctx.type_name = nullptr; | 115 ctx.type_name = nullptr; |
110 | 116 |
111 return ctx; | 117 return ctx; |
112 } | 118 } |
113 | 119 |
114 } // namespace trace_event | 120 } // namespace trace_event |
115 } // namespace base | 121 } // namespace base |
OLD | NEW |