OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/memory_profiler_allocation_context.h" | 5 #include "base/trace_event/memory_profiler_allocation_context.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <cstring> | |
8 | 9 |
10 #include "base/hash.h" | |
9 #include "base/threading/thread_local_storage.h" | 11 #include "base/threading/thread_local_storage.h" |
10 | 12 |
11 namespace base { | 13 namespace base { |
12 namespace trace_event { | 14 namespace trace_event { |
13 | 15 |
14 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0; | 16 subtle::Atomic32 AllocationContextTracker::capture_enabled_ = 0; |
15 | 17 |
16 namespace { | 18 namespace { |
17 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER; | 19 ThreadLocalStorage::StaticSlot g_tls_alloc_ctx_tracker = TLS_INITIALIZER; |
18 } | 20 } |
19 | 21 |
20 AllocationStack::AllocationStack() {} | 22 AllocationStack::AllocationStack() {} |
21 AllocationStack::~AllocationStack() {} | 23 AllocationStack::~AllocationStack() {} |
22 | 24 |
23 // This function is added to the TLS slot to clean up the instance when the | 25 bool operator==(const Backtrace& lhs, const Backtrace& rhs) { |
24 // thread exits. | 26 // Pointer equality of the stack frames is assumed, so instead of doing a deep |
25 void DestructAllocationContextTracker(void* alloc_ctx_tracker) { | 27 // string comparison on all of the frames, a |memcmp| suffices. |
26 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker); | 28 return std::memcmp(lhs.frames, rhs.frames, sizeof(lhs.frames)) == 0; |
27 } | |
28 | |
29 AllocationContextTracker* AllocationContextTracker::GetThreadLocalTracker() { | |
30 auto tracker = | |
31 static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get()); | |
32 | |
33 if (!tracker) { | |
34 tracker = new AllocationContextTracker(); | |
35 g_tls_alloc_ctx_tracker.Set(tracker); | |
36 } | |
37 | |
38 return tracker; | |
39 } | 29 } |
40 | 30 |
41 StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame, | 31 StackFrameDeduplicator::FrameNode::FrameNode(StackFrame frame, |
42 int parent_frame_index) | 32 int parent_frame_index) |
43 : frame(frame), parent_frame_index(parent_frame_index) {} | 33 : frame(frame), parent_frame_index(parent_frame_index) {} |
44 StackFrameDeduplicator::FrameNode::~FrameNode() {} | 34 StackFrameDeduplicator::FrameNode::~FrameNode() {} |
45 | 35 |
46 StackFrameDeduplicator::StackFrameDeduplicator() {} | 36 StackFrameDeduplicator::StackFrameDeduplicator() {} |
47 StackFrameDeduplicator::~StackFrameDeduplicator() {} | 37 StackFrameDeduplicator::~StackFrameDeduplicator() {} |
48 | 38 |
49 int StackFrameDeduplicator::Insert(const AllocationContext::Backtrace& bt) { | 39 int StackFrameDeduplicator::Insert(const Backtrace& bt) { |
50 int frame_index = -1; | 40 int frame_index = -1; |
51 std::map<StackFrame, int>* nodes = &roots_; | 41 std::map<StackFrame, int>* nodes = &roots_; |
52 | 42 |
53 for (size_t i = 0; i < arraysize(bt.frames); i++) { | 43 for (size_t i = 0; i < arraysize(bt.frames); i++) { |
54 if (!bt.frames[i]) | 44 if (!bt.frames[i]) |
55 break; | 45 break; |
56 | 46 |
57 auto node = nodes->find(bt.frames[i]); | 47 auto node = nodes->find(bt.frames[i]); |
58 if (node == nodes->end()) { | 48 if (node == nodes->end()) { |
59 // There is no tree node for this frame yet, create it. The parent node | 49 // There is no tree node for this frame yet, create it. The parent node |
(...skipping 14 matching lines...) Expand all Loading... | |
74 // A tree node for this frame exists. Look for the next one. | 64 // A tree node for this frame exists. Look for the next one. |
75 frame_index = node->second; | 65 frame_index = node->second; |
76 } | 66 } |
77 | 67 |
78 nodes = &frames_[frame_index].children; | 68 nodes = &frames_[frame_index].children; |
79 } | 69 } |
80 | 70 |
81 return frame_index; | 71 return frame_index; |
82 } | 72 } |
83 | 73 |
74 // This function is added to the TLS slot to clean up the instance when the | |
75 // thread exits. | |
76 void DestructAllocationContextTracker(void* alloc_ctx_tracker) { | |
Primiano Tucci (use gerrit)
2015/10/27 11:56:20
This should be moved perhaps in the anonymous name
Ruud van Asseldonk
2015/10/27 11:59:31
Can do when refactoring, other CLs touch this piec
| |
77 delete static_cast<AllocationContextTracker*>(alloc_ctx_tracker); | |
78 } | |
79 | |
80 AllocationContextTracker* AllocationContextTracker::GetThreadLocalTracker() { | |
81 auto tracker = | |
82 static_cast<AllocationContextTracker*>(g_tls_alloc_ctx_tracker.Get()); | |
83 | |
84 if (!tracker) { | |
85 tracker = new AllocationContextTracker(); | |
86 g_tls_alloc_ctx_tracker.Set(tracker); | |
87 } | |
88 | |
89 return tracker; | |
90 } | |
91 | |
84 AllocationContextTracker::AllocationContextTracker() {} | 92 AllocationContextTracker::AllocationContextTracker() {} |
85 AllocationContextTracker::~AllocationContextTracker() {} | 93 AllocationContextTracker::~AllocationContextTracker() {} |
86 | 94 |
87 // static | 95 // static |
88 void AllocationContextTracker::SetCaptureEnabled(bool enabled) { | 96 void AllocationContextTracker::SetCaptureEnabled(bool enabled) { |
89 // When enabling capturing, also initialize the TLS slot. This does not create | 97 // When enabling capturing, also initialize the TLS slot. This does not create |
90 // a TLS instance yet. | 98 // a TLS instance yet. |
91 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) | 99 if (enabled && !g_tls_alloc_ctx_tracker.initialized()) |
92 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); | 100 g_tls_alloc_ctx_tracker.Initialize(DestructAllocationContextTracker); |
93 | 101 |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
166 // If there is room for more, fill the remaining slots with nullptr keys. | 174 // If there is room for more, fill the remaining slots with nullptr keys. |
167 for (; dst != dst_end; dst++) | 175 for (; dst != dst_end; dst++) |
168 dst->first = nullptr; | 176 dst->first = nullptr; |
169 } | 177 } |
170 | 178 |
171 return ctx; | 179 return ctx; |
172 } | 180 } |
173 | 181 |
174 } // namespace trace_event | 182 } // namespace trace_event |
175 } // namespace base | 183 } // namespace base |
184 | |
185 namespace BASE_HASH_NAMESPACE { | |
186 using base::trace_event::Backtrace; | |
187 | |
188 uint32_t hash<Backtrace>::operator()(const Backtrace& backtrace) const { | |
189 return base::SuperFastHash(reinterpret_cast<const char*>(backtrace.frames), | |
190 sizeof(backtrace.frames)); | |
191 } | |
192 | |
193 } // BASE_HASH_NAMESPACE | |
OLD | NEW |