| OLD | NEW | 
|---|
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #include "base/debug/thread_heap_usage_tracker.h" | 5 #include "base/debug/thread_heap_usage_tracker.h" | 
| 6 | 6 | 
| 7 #include <stdint.h> | 7 #include <stdint.h> | 
| 8 #include <algorithm> | 8 #include <algorithm> | 
|  | 9 #include <limits> | 
| 9 #include <new> | 10 #include <new> | 
| 10 #include <type_traits> | 11 #include <type_traits> | 
| 11 | 12 | 
| 12 #include "base/allocator/allocator_shim.h" | 13 #include "base/allocator/allocator_shim.h" | 
| 13 #include "base/allocator/features.h" | 14 #include "base/allocator/features.h" | 
| 14 #include "base/logging.h" | 15 #include "base/logging.h" | 
| 15 #include "base/threading/thread_local_storage.h" | 16 #include "base/threading/thread_local_storage.h" | 
| 16 #include "build/build_config.h" | 17 #include "build/build_config.h" | 
| 17 | 18 | 
| 18 #if defined(OS_MACOSX) || defined(OS_IOS) | 19 #if defined(OS_MACOSX) || defined(OS_IOS) | 
| 19 #include <malloc/malloc.h> | 20 #include <malloc/malloc.h> | 
| 20 #else | 21 #else | 
| 21 #include <malloc.h> | 22 #include <malloc.h> | 
| 22 #endif | 23 #endif | 
| 23 | 24 | 
| 24 namespace base { | 25 namespace base { | 
| 25 namespace debug { | 26 namespace debug { | 
| 26 | 27 | 
| 27 namespace { | 28 namespace { | 
| 28 | 29 | 
| 29 using base::allocator::AllocatorDispatch; | 30 using base::allocator::AllocatorDispatch; | 
| 30 | 31 | 
| 31 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; | 32 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; | 
| 32 | 33 | 
| 33 ThreadHeapUsage* const kInitializingSentinel = | 34 const uintptr_t kSentinelMask = std::numeric_limits<uintptr_t>::max() - 1; | 
| 34     reinterpret_cast<ThreadHeapUsage*>(-1); | 35 ThreadHeapUsage* const kInitializationSentinel = | 
|  | 36     reinterpret_cast<ThreadHeapUsage*>(kSentinelMask); | 
|  | 37 ThreadHeapUsage* const kTeardownSentinel = | 
|  | 38     reinterpret_cast<ThreadHeapUsage*>(kSentinelMask | 1); | 
| 35 | 39 | 
| 36 bool g_heap_tracking_enabled = false; | 40 bool g_heap_tracking_enabled = false; | 
| 37 | 41 | 
| 38 // Forward declared as it needs to delegate memory allocation to the next | 42 // Forward declared as it needs to delegate memory allocation to the next | 
| 39 // lower shim. | 43 // lower shim. | 
| 40 ThreadHeapUsage* GetOrCreateThreadUsage(); | 44 ThreadHeapUsage* GetOrCreateThreadUsage(); | 
| 41 | 45 | 
| 42 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { | 46 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { | 
| 43   if (ptr == nullptr) | 47   if (ptr == nullptr) | 
| 44     return 0U; | 48     return 0U; | 
| (...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 165                                         &AllocAlignedFn, | 169                                         &AllocAlignedFn, | 
| 166                                         &ReallocFn, | 170                                         &ReallocFn, | 
| 167                                         &FreeFn, | 171                                         &FreeFn, | 
| 168                                         &GetSizeEstimateFn, | 172                                         &GetSizeEstimateFn, | 
| 169                                         &BatchMallocFn, | 173                                         &BatchMallocFn, | 
| 170                                         &BatchFreeFn, | 174                                         &BatchFreeFn, | 
| 171                                         &FreeDefiniteSizeFn, | 175                                         &FreeDefiniteSizeFn, | 
| 172                                         nullptr}; | 176                                         nullptr}; | 
| 173 | 177 | 
| 174 ThreadHeapUsage* GetOrCreateThreadUsage() { | 178 ThreadHeapUsage* GetOrCreateThreadUsage() { | 
| 175   ThreadHeapUsage* allocator_usage = | 179   auto tls_ptr = reinterpret_cast<uintptr_t>(g_thread_allocator_usage.Get()); | 
| 176       static_cast<ThreadHeapUsage*>(g_thread_allocator_usage.Get()); | 180   if ((tls_ptr & kSentinelMask) == kSentinelMask) | 
| 177   if (allocator_usage == kInitializingSentinel) |  | 
| 178     return nullptr;  // Re-entrancy case. | 181     return nullptr;  // Re-entrancy case. | 
| 179 | 182 | 
|  | 183   auto* allocator_usage = reinterpret_cast<ThreadHeapUsage*>(tls_ptr); | 
| 180   if (allocator_usage == nullptr) { | 184   if (allocator_usage == nullptr) { | 
| 181     // Prevent reentrancy due to the allocation below. | 185     // Prevent reentrancy due to the allocation below. | 
| 182     g_thread_allocator_usage.Set(kInitializingSentinel); | 186     g_thread_allocator_usage.Set(kInitializationSentinel); | 
| 183 | 187 | 
| 184     // Delegate the allocation of the per-thread structure to the underlying | 188     allocator_usage = new ThreadHeapUsage(); | 
| 185     // heap shim, for symmetry with the deallocation. Otherwise interposing |  | 
| 186     // shims may mis-attribute or mis-direct this allocation. |  | 
| 187     const AllocatorDispatch* next = allocator_dispatch.next; |  | 
| 188     allocator_usage = new (next->alloc_function(next, sizeof(ThreadHeapUsage))) |  | 
| 189         ThreadHeapUsage(); |  | 
| 190     static_assert(std::is_pod<ThreadHeapUsage>::value, | 189     static_assert(std::is_pod<ThreadHeapUsage>::value, | 
| 191                   "AllocatorDispatch must be POD"); | 190                   "AllocatorDispatch must be POD"); | 
| 192     memset(allocator_usage, 0, sizeof(*allocator_usage)); | 191     memset(allocator_usage, 0, sizeof(*allocator_usage)); | 
| 193     g_thread_allocator_usage.Set(allocator_usage); | 192     g_thread_allocator_usage.Set(allocator_usage); | 
| 194   } | 193   } | 
| 195 | 194 | 
| 196   return allocator_usage; | 195   return allocator_usage; | 
| 197 } | 196 } | 
| 198 | 197 | 
| 199 }  // namespace | 198 }  // namespace | 
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 290   g_heap_tracking_enabled = false; | 289   g_heap_tracking_enabled = false; | 
| 291 } | 290 } | 
| 292 | 291 | 
| 293 base::allocator::AllocatorDispatch* | 292 base::allocator::AllocatorDispatch* | 
| 294 ThreadHeapUsageTracker::GetDispatchForTesting() { | 293 ThreadHeapUsageTracker::GetDispatchForTesting() { | 
| 295   return &allocator_dispatch; | 294   return &allocator_dispatch; | 
| 296 } | 295 } | 
| 297 | 296 | 
| 298 void ThreadHeapUsageTracker::EnsureTLSInitialized() { | 297 void ThreadHeapUsageTracker::EnsureTLSInitialized() { | 
| 299   if (!g_thread_allocator_usage.initialized()) { | 298   if (!g_thread_allocator_usage.initialized()) { | 
| 300     g_thread_allocator_usage.Initialize([](void* allocator_usage) { | 299     g_thread_allocator_usage.Initialize([](void* thread_heap_usage) { | 
| 301       // Delegate the freeing of the per-thread structure to the next-lower | 300       // This destructor will be called twice. Once to destroy the actual | 
| 302       // heap shim. Otherwise this free will re-initialize the TLS on thread | 301       // ThreadHeapUsage instance and a second time, immediately after, for the | 
| 303       // exit. | 302       // sentinel. Re-setting the TLS slow (below) does re-initialize the TLS | 
| 304       allocator_dispatch.next->free_function(allocator_dispatch.next, | 303       // slot. The ThreadLocalStorage code is designed to deal with this use | 
| 305                                              allocator_usage); | 304       // case (see comments in ThreadHeapUsageTracker::EnsureTLSInitialized) and | 
|  | 305       // will re-call the destructor with the kTeardownSentinel as arg. | 
|  | 306       if (thread_heap_usage == kTeardownSentinel) | 
|  | 307         return; | 
|  | 308       DCHECK(thread_heap_usage != kInitializationSentinel); | 
|  | 309 | 
|  | 310       // Deleting the ThreadHeapUsage TLS object will re-enter the shim and hit | 
|  | 311       // RecordFree() above. The sentinel prevents RecordFree() from re-creating | 
|  | 312       // another ThreadHeapUsage object. | 
|  | 313       g_thread_allocator_usage.Set(kTeardownSentinel); | 
|  | 314       delete static_cast<ThreadHeapUsage*>(thread_heap_usage); | 
| 306     }); | 315     }); | 
| 307   } | 316   } | 
| 308 } | 317 } | 
| 309 | 318 | 
| 310 }  // namespace debug | 319 }  // namespace debug | 
| 311 }  // namespace base | 320 }  // namespace base | 
| OLD | NEW | 
|---|