OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/debug/thread_heap_usage_tracker.h" | 5 #include "base/debug/thread_heap_usage_tracker.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 #include <algorithm> | 8 #include <algorithm> |
9 #include <limits> | |
9 #include <new> | 10 #include <new> |
10 #include <type_traits> | 11 #include <type_traits> |
11 | 12 |
12 #include "base/allocator/allocator_shim.h" | 13 #include "base/allocator/allocator_shim.h" |
13 #include "base/allocator/features.h" | 14 #include "base/allocator/features.h" |
14 #include "base/logging.h" | 15 #include "base/logging.h" |
15 #include "base/threading/thread_local_storage.h" | 16 #include "base/threading/thread_local_storage.h" |
16 #include "build/build_config.h" | 17 #include "build/build_config.h" |
17 | 18 |
18 #if defined(OS_MACOSX) || defined(OS_IOS) | 19 #if defined(OS_MACOSX) || defined(OS_IOS) |
19 #include <malloc/malloc.h> | 20 #include <malloc/malloc.h> |
20 #else | 21 #else |
21 #include <malloc.h> | 22 #include <malloc.h> |
22 #endif | 23 #endif |
23 | 24 |
24 namespace base { | 25 namespace base { |
25 namespace debug { | 26 namespace debug { |
26 | 27 |
27 namespace { | 28 namespace { |
28 | 29 |
29 using base::allocator::AllocatorDispatch; | 30 using base::allocator::AllocatorDispatch; |
30 | 31 |
31 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; | 32 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; |
32 | 33 |
33 ThreadHeapUsage* const kInitializingSentinel = | 34 const uintptr_t kSentinelMask = std::numeric_limits<uintptr_t>::max() - 1; |
34 reinterpret_cast<ThreadHeapUsage*>(-1); | 35 ThreadHeapUsage* const kInitializationSentinel = |
36 reinterpret_cast<ThreadHeapUsage*>(kSentinelMask); | |
37 ThreadHeapUsage* const kTeardownSentinel = | |
38 reinterpret_cast<ThreadHeapUsage*>(kSentinelMask | 1); | |
35 | 39 |
36 bool g_heap_tracking_enabled = false; | 40 bool g_heap_tracking_enabled = false; |
37 | 41 |
38 // Forward declared as it needs to delegate memory allocation to the next | 42 // Forward declared as it needs to delegate memory allocation to the next |
39 // lower shim. | 43 // lower shim. |
40 ThreadHeapUsage* GetOrCreateThreadUsage(); | 44 ThreadHeapUsage* GetOrCreateThreadUsage(); |
41 | 45 |
42 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { | 46 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { |
43 if (ptr == nullptr) | 47 if (ptr == nullptr) |
44 return 0U; | 48 return 0U; |
(...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
129 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) { | 133 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) { |
130 return self->next->get_size_estimate_function(self->next, address); | 134 return self->next->get_size_estimate_function(self->next, address); |
131 } | 135 } |
132 | 136 |
133 // The allocator dispatch used to intercept heap operations. | 137 // The allocator dispatch used to intercept heap operations. |
134 AllocatorDispatch allocator_dispatch = { | 138 AllocatorDispatch allocator_dispatch = { |
135 &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, | 139 &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, |
136 &FreeFn, &GetSizeEstimateFn, nullptr}; | 140 &FreeFn, &GetSizeEstimateFn, nullptr}; |
137 | 141 |
138 ThreadHeapUsage* GetOrCreateThreadUsage() { | 142 ThreadHeapUsage* GetOrCreateThreadUsage() { |
139 ThreadHeapUsage* allocator_usage = | 143 auto tls_ptr = reinterpret_cast<uintptr_t>(g_thread_allocator_usage.Get()); |
140 static_cast<ThreadHeapUsage*>(g_thread_allocator_usage.Get()); | 144 if ((tls_ptr & kSentinelMask) == kSentinelMask) |
141 if (allocator_usage == kInitializingSentinel) | |
142 return nullptr; // Re-entrancy case. | 145 return nullptr; // Re-entrancy case. |
143 | 146 |
147 auto* allocator_usage = reinterpret_cast<ThreadHeapUsage*>(tls_ptr); | |
144 if (allocator_usage == nullptr) { | 148 if (allocator_usage == nullptr) { |
145 // Prevent reentrancy due to the allocation below. | 149 // Prevent reentrancy due to the allocation below. |
146 g_thread_allocator_usage.Set(kInitializingSentinel); | 150 g_thread_allocator_usage.Set(kInitializationSentinel); |
147 | 151 |
148 // Delegate the allocation of the per-thread structure to the underlying | 152 allocator_usage = new ThreadHeapUsage(); |
149 // heap shim, for symmetry with the deallocation. Otherwise interposing | |
150 // shims may mis-attribute or mis-direct this allocation. | |
151 const AllocatorDispatch* next = allocator_dispatch.next; | |
152 allocator_usage = new (next->alloc_function(next, sizeof(ThreadHeapUsage))) | |
153 ThreadHeapUsage(); | |
154 static_assert(std::is_pod<ThreadHeapUsage>::value, | 153 static_assert(std::is_pod<ThreadHeapUsage>::value, |
155 "AllocatorDispatch must be POD"); | 154 "AllocatorDispatch must be POD"); |
156 memset(allocator_usage, 0, sizeof(*allocator_usage)); | 155 memset(allocator_usage, 0, sizeof(*allocator_usage)); |
157 g_thread_allocator_usage.Set(allocator_usage); | 156 g_thread_allocator_usage.Set(allocator_usage); |
158 } | 157 } |
159 | 158 |
160 return allocator_usage; | 159 return allocator_usage; |
161 } | 160 } |
162 | 161 |
163 } // namespace | 162 } // namespace |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
254 g_heap_tracking_enabled = false; | 253 g_heap_tracking_enabled = false; |
255 } | 254 } |
256 | 255 |
257 base::allocator::AllocatorDispatch* | 256 base::allocator::AllocatorDispatch* |
258 ThreadHeapUsageTracker::GetDispatchForTesting() { | 257 ThreadHeapUsageTracker::GetDispatchForTesting() { |
259 return &allocator_dispatch; | 258 return &allocator_dispatch; |
260 } | 259 } |
261 | 260 |
262 void ThreadHeapUsageTracker::EnsureTLSInitialized() { | 261 void ThreadHeapUsageTracker::EnsureTLSInitialized() { |
263 if (!g_thread_allocator_usage.initialized()) { | 262 if (!g_thread_allocator_usage.initialized()) { |
264 g_thread_allocator_usage.Initialize([](void* allocator_usage) { | 263 g_thread_allocator_usage.Initialize([](void* thread_heap_usage) { |
265 // Delegate the freeing of the per-thread structure to the next-lower | 264 // This destructor will be called twice. Once to destroy the actual |
266 // heap shim. Otherwise this free will re-initialize the TLS on thread | 265 // ThreadHeapUsage instance and a second time, immediately after, for the |
267 // exit. | 266 // sentinel. Re-setting the TLS slow (below) does re-initialize the TLS |
268 allocator_dispatch.next->free_function(allocator_dispatch.next, | 267 // slot. The ThreadLocalStorage code is designed to deal with this use |
269 allocator_usage); | 268 // case (see comments in ThreadHeapUsageTracker::EnsureTLSInitialized) and |
269 // will re-call the destructor with the kTeardownSentinel as arg. | |
270 if (thread_heap_usage == kTeardownSentinel) | |
271 return; | |
272 DCHECK(thread_heap_usage != kInitializationSentinel); | |
dcheng
2017/02/03 06:57:16
Minor nit: DCHECK_NE?
Primiano Tucci (use gerrit)
2017/02/03 14:50:24
= i tried that initially but _ne is too picky and
| |
273 | |
274 // Deleting the ThreadHeapUsage TLS object will re-enter the shim and hit | |
275 // RecordFree() above. The sentinel prevents RecordFree() from re-creating | |
276 // another ThreadHeapUsage object. | |
277 g_thread_allocator_usage.Set(kTeardownSentinel); | |
278 delete static_cast<ThreadHeapUsage*>(thread_heap_usage); | |
270 }); | 279 }); |
271 } | 280 } |
272 } | 281 } |
273 | 282 |
274 } // namespace debug | 283 } // namespace debug |
275 } // namespace base | 284 } // namespace base |
OLD | NEW |