Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/debug/scoped_thread_heap_usage.h" | |
| 6 | |
| 7 #include <stdint.h> | |
| 8 #include <algorithm> | |
| 9 #include <type_traits> | |
| 10 | |
| 11 #include "base/allocator/allocator_shim.h" | |
| 12 #include "base/allocator/features.h" | |
| 13 #include "base/logging.h" | |
| 14 #include "base/threading/thread_local_storage.h" | |
| 15 #include "build/build_config.h" | |
| 16 | |
| 17 #if defined(OS_MACOSX) || defined(OS_IOS) | |
| 18 #include <malloc/malloc.h> | |
|
Primiano Tucci (use gerrit)
2016/09/07 17:53:38
the order of includes seems a bit weird here. but
Sigurður Ásgeirsson
2016/09/07 18:35:35
The OS defines come from build/build_config.h - he
| |
| 19 #else | |
| 20 #include <malloc.h> | |
| 21 #endif | |
| 22 | |
| 23 namespace base { | |
| 24 namespace debug { | |
| 25 | |
| 26 namespace { | |
| 27 | |
| 28 using base::allocator::AllocatorDispatch; | |
| 29 | |
| 30 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; | |
| 31 | |
| 32 ScopedThreadHeapUsage::ThreadAllocatorUsage* const kInitializingSentinel = | |
| 33 reinterpret_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(-1); | |
| 34 | |
| 35 // Forward declared as it needs to delegate memory allocation to the next | |
| 36 // lower shim. | |
| 37 ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage(); | |
| 38 | |
| 39 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { | |
| 40 if (ptr == nullptr || !next->get_size_estimate_function) | |
|
Primiano Tucci (use gerrit)
2016/09/07 17:53:38
ditto about get_size_estimate_function should neve
Sigurður Ásgeirsson
2016/09/07 18:35:35
Done.
| |
| 41 return 0U; | |
| 42 | |
| 43 return next->get_size_estimate_function(next, ptr); | |
| 44 } | |
| 45 | |
| 46 void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) { | |
| 47 ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | |
| 48 if (usage == nullptr) | |
| 49 return; | |
| 50 | |
| 51 usage->alloc_ops++; | |
| 52 size_t estimate = GetAllocSizeEstimate(next, ptr); | |
| 53 if (size && estimate) { | |
| 54 usage->alloc_bytes += estimate; | |
| 55 usage->alloc_overhead_bytes += estimate - size; | |
| 56 | |
| 57 // Only keep track of the net number of bytes allocated in the scope if the | |
| 58 // size estimate function returns sane values, e.g. non-zero. | |
| 59 uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; | |
| 60 if (allocated_bytes > usage->max_allocated_bytes) | |
| 61 usage->max_allocated_bytes = allocated_bytes; | |
| 62 } else { | |
| 63 usage->alloc_bytes += size; | |
| 64 } | |
| 65 } | |
| 66 | |
| 67 void RecordFree(const AllocatorDispatch* next, void* ptr) { | |
| 68 ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | |
| 69 if (usage == nullptr) | |
| 70 return; | |
| 71 | |
| 72 size_t estimate = GetAllocSizeEstimate(next, ptr); | |
| 73 usage->free_ops++; | |
| 74 usage->free_bytes += estimate; | |
| 75 } | |
| 76 | |
| 77 void* AllocFn(const AllocatorDispatch* self, size_t size) { | |
| 78 void* ret = self->next->alloc_function(self->next, size); | |
| 79 if (ret != nullptr) | |
| 80 RecordAlloc(self->next, ret, size); | |
| 81 | |
| 82 return ret; | |
| 83 } | |
| 84 | |
| 85 void* AllocZeroInitializedFn(const AllocatorDispatch* self, | |
| 86 size_t n, | |
| 87 size_t size) { | |
| 88 void* ret = self->next->alloc_zero_initialized_function(self->next, n, size); | |
| 89 if (ret != nullptr) | |
| 90 RecordAlloc(self->next, ret, size); | |
| 91 | |
| 92 return ret; | |
| 93 } | |
| 94 | |
| 95 void* AllocAlignedFn(const AllocatorDispatch* self, | |
| 96 size_t alignment, | |
| 97 size_t size) { | |
| 98 void* ret = self->next->alloc_aligned_function(self->next, alignment, size); | |
| 99 if (ret != nullptr) | |
| 100 RecordAlloc(self->next, ret, size); | |
| 101 | |
| 102 return ret; | |
| 103 } | |
| 104 | |
| 105 void* ReallocFn(const AllocatorDispatch* self, void* address, size_t size) { | |
| 106 if (address != nullptr) | |
| 107 RecordFree(self->next, address); | |
| 108 | |
| 109 void* ret = self->next->realloc_function(self->next, address, size); | |
| 110 if (ret != nullptr && size != 0) | |
| 111 RecordAlloc(self->next, ret, size); | |
| 112 | |
| 113 return ret; | |
| 114 } | |
| 115 | |
| 116 void FreeFn(const AllocatorDispatch* self, void* address) { | |
| 117 if (address != nullptr) | |
| 118 RecordFree(self->next, address); | |
| 119 self->next->free_function(self->next, address); | |
| 120 } | |
| 121 | |
| 122 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) { | |
| 123 return self->next->get_size_estimate_function(self->next, address); | |
| 124 } | |
| 125 | |
| 126 // The dispatch for the intercept used. | |
| 127 AllocatorDispatch allocator_dispatch = { | |
| 128 &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, | |
| 129 &FreeFn, &GetSizeEstimateFn, nullptr}; | |
| 130 | |
| 131 ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() { | |
| 132 ScopedThreadHeapUsage::ThreadAllocatorUsage* allocator_usage = | |
| 133 static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( | |
| 134 g_thread_allocator_usage.Get()); | |
| 135 if (allocator_usage == kInitializingSentinel) | |
| 136 return nullptr; // Re-entrancy case. | |
| 137 | |
| 138 if (allocator_usage == nullptr) { | |
| 139 // Prevent reentrancy due to the allocation below. | |
| 140 g_thread_allocator_usage.Set(kInitializingSentinel); | |
| 141 | |
| 142 allocator_usage = new ScopedThreadHeapUsage::ThreadAllocatorUsage; | |
| 143 memset(allocator_usage, 0, sizeof(*allocator_usage)); | |
| 144 g_thread_allocator_usage.Set(allocator_usage); | |
| 145 } | |
| 146 | |
| 147 return allocator_usage; | |
| 148 } | |
| 149 | |
| 150 } // namespace | |
| 151 | |
| 152 ScopedThreadHeapUsage::ScopedThreadHeapUsage() { | |
| 153 // Initialize must be called before creating instances of this class. | |
| 154 CHECK(g_thread_allocator_usage.initialized()); | |
| 155 | |
| 156 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | |
| 157 usage_at_creation_ = *usage; | |
| 158 | |
| 159 // Reset the stats for our current scope. | |
| 160 // The per-thread usage instance now tracks this scope's usage, while this | |
| 161 // instance persists the outer scope's usage stats. On destruction, this | |
| 162 // instance will restore the outer scope's usage stats with this scopes usage | |
| 163 // added. | |
| 164 memset(usage, 0, sizeof(*usage)); | |
| 165 | |
| 166 static_assert(std::is_pod<ThreadAllocatorUsage>::value, "Must be POD."); | |
| 167 } | |
| 168 | |
| 169 ScopedThreadHeapUsage::~ScopedThreadHeapUsage() { | |
| 170 DCHECK(thread_checker_.CalledOnValidThread()); | |
| 171 | |
| 172 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | |
| 173 | |
| 174 // Update the outer max. | |
| 175 if (usage->max_allocated_bytes) { | |
| 176 uint64_t outer_net_alloc_bytes = | |
| 177 usage_at_creation_.alloc_bytes - usage_at_creation_.free_bytes; | |
| 178 | |
| 179 usage->max_allocated_bytes = | |
| 180 std::max(usage_at_creation_.max_allocated_bytes, | |
| 181 outer_net_alloc_bytes + usage->max_allocated_bytes); | |
| 182 } | |
| 183 | |
| 184 usage->alloc_ops += usage_at_creation_.alloc_ops; | |
| 185 usage->alloc_bytes += usage_at_creation_.alloc_bytes; | |
| 186 usage->alloc_overhead_bytes += usage_at_creation_.alloc_overhead_bytes; | |
| 187 usage->free_ops += usage_at_creation_.free_ops; | |
| 188 usage->free_bytes += usage_at_creation_.free_bytes; | |
| 189 } | |
| 190 | |
| 191 ScopedThreadHeapUsage::ThreadAllocatorUsage ScopedThreadHeapUsage::Now() { | |
| 192 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | |
| 193 return *usage; | |
| 194 } | |
| 195 | |
| 196 void ScopedThreadHeapUsage::Initialize() { | |
| 197 if (!g_thread_allocator_usage.initialized()) { | |
| 198 g_thread_allocator_usage.Initialize([](void* allocator_usage) { | |
| 199 delete static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( | |
| 200 allocator_usage); | |
| 201 }); | |
| 202 } | |
|
Primiano Tucci (use gerrit)
2016/09/07 17:53:38
i'd probably add an else NOTREACHED() (or just mak
Sigurður Ásgeirsson
2016/09/07 18:35:35
This'll happen in tests - I don't think it's possi
Primiano Tucci (use gerrit)
2016/09/07 18:57:33
ah yes you are right. fine as it is then.
| |
| 203 } | |
| 204 | |
| 205 void ScopedThreadHeapUsage::EnableHeapTracking() { | |
| 206 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | |
| 207 base::allocator::InsertAllocatorDispatch(&allocator_dispatch); | |
|
Primiano Tucci (use gerrit)
2016/09/07 17:53:38
this one also I'd guard with a (D)CHECK against do
Sigurður Ásgeirsson
2016/09/07 18:35:35
Done.
| |
| 208 #else | |
| 209 CHECK(false) << "Can't enable heap tracking without the shim."; | |
| 210 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | |
| 211 } | |
| 212 | |
| 213 void ScopedThreadHeapUsage::DisableHeapTrackingForTesting() { | |
| 214 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | |
| 215 base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch); | |
| 216 #else | |
| 217 CHECK(false) << "Can't enable heap tracking without the shim."; | |
| 218 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | |
| 219 } | |
| 220 | |
| 221 base::allocator::AllocatorDispatch* | |
| 222 ScopedThreadHeapUsage::GetDispatchForTesting() { | |
| 223 return &allocator_dispatch; | |
| 224 } | |
| 225 | |
| 226 } // namespace debug | |
| 227 } // namespace base | |
| OLD | NEW |