OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/debug/scoped_thread_heap_usage.h" | 5 #include "base/debug/thread_heap_usage_tracker.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 #include <algorithm> | 8 #include <algorithm> |
9 #include <type_traits> | 9 #include <type_traits> |
10 | 10 |
11 #include "base/allocator/allocator_shim.h" | 11 #include "base/allocator/allocator_shim.h" |
12 #include "base/allocator/features.h" | 12 #include "base/allocator/features.h" |
13 #include "base/logging.h" | 13 #include "base/logging.h" |
14 #include "base/threading/thread_local_storage.h" | 14 #include "base/threading/thread_local_storage.h" |
15 #include "build/build_config.h" | 15 #include "build/build_config.h" |
16 | 16 |
17 #if defined(OS_MACOSX) || defined(OS_IOS) | 17 #if defined(OS_MACOSX) || defined(OS_IOS) |
18 #include <malloc/malloc.h> | 18 #include <malloc/malloc.h> |
19 #else | 19 #else |
20 #include <malloc.h> | 20 #include <malloc.h> |
21 #endif | 21 #endif |
22 | 22 |
23 namespace base { | 23 namespace base { |
24 namespace debug { | 24 namespace debug { |
25 | 25 |
26 namespace { | 26 namespace { |
27 | 27 |
28 using base::allocator::AllocatorDispatch; | 28 using base::allocator::AllocatorDispatch; |
29 | 29 |
30 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; | 30 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; |
31 | 31 |
32 ScopedThreadHeapUsage::ThreadAllocatorUsage* const kInitializingSentinel = | 32 ThreadHeapUsage* const kInitializingSentinel = |
33 reinterpret_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(-1); | 33 reinterpret_cast<ThreadHeapUsage*>(-1); |
34 | 34 |
35 bool g_heap_tracking_enabled = false; | 35 bool g_heap_tracking_enabled = false; |
36 | 36 |
37 // Forward declared as it needs to delegate memory allocation to the next | 37 // Forward declared as it needs to delegate memory allocation to the next |
38 // lower shim. | 38 // lower shim. |
39 ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage(); | 39 ThreadHeapUsage* GetOrCreateThreadUsage(); |
40 | 40 |
41 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { | 41 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { |
42 if (ptr == nullptr) | 42 if (ptr == nullptr) |
43 return 0U; | 43 return 0U; |
44 | 44 |
45 return next->get_size_estimate_function(next, ptr); | 45 return next->get_size_estimate_function(next, ptr); |
46 } | 46 } |
47 | 47 |
48 void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) { | 48 void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) { |
49 ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | 49 ThreadHeapUsage* usage = GetOrCreateThreadUsage(); |
50 if (usage == nullptr) | 50 if (usage == nullptr) |
51 return; | 51 return; |
52 | 52 |
53 usage->alloc_ops++; | 53 usage->alloc_ops++; |
54 size_t estimate = GetAllocSizeEstimate(next, ptr); | 54 size_t estimate = GetAllocSizeEstimate(next, ptr); |
55 if (size && estimate) { | 55 if (size && estimate) { |
| 56 // Only keep track of the net number of bytes allocated in the scope if the |
| 57 // size estimate function returns sane values, e.g. non-zero. |
56 usage->alloc_bytes += estimate; | 58 usage->alloc_bytes += estimate; |
57 usage->alloc_overhead_bytes += estimate - size; | 59 usage->alloc_overhead_bytes += estimate - size; |
58 | 60 |
59 // Only keep track of the net number of bytes allocated in the scope if the | 61 // Record the max outstanding number of bytes, but only if the difference |
60 // size estimate function returns sane values, e.g. non-zero. | 62 // is net positive (e.g. more bytes allocated than freed in the scope). |
61 uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; | 63 if (usage->alloc_bytes > usage->free_bytes) { |
62 if (allocated_bytes > usage->max_allocated_bytes) | 64 uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; |
63 usage->max_allocated_bytes = allocated_bytes; | 65 if (allocated_bytes > usage->max_allocated_bytes) |
| 66 usage->max_allocated_bytes = allocated_bytes; |
| 67 } |
64 } else { | 68 } else { |
65 usage->alloc_bytes += size; | 69 usage->alloc_bytes += size; |
66 } | 70 } |
67 } | 71 } |
68 | 72 |
69 void RecordFree(const AllocatorDispatch* next, void* ptr) { | 73 void RecordFree(const AllocatorDispatch* next, void* ptr) { |
70 ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | 74 ThreadHeapUsage* usage = GetOrCreateThreadUsage(); |
71 if (usage == nullptr) | 75 if (usage == nullptr) |
72 return; | 76 return; |
73 | 77 |
74 size_t estimate = GetAllocSizeEstimate(next, ptr); | 78 size_t estimate = GetAllocSizeEstimate(next, ptr); |
75 usage->free_ops++; | 79 usage->free_ops++; |
76 usage->free_bytes += estimate; | 80 usage->free_bytes += estimate; |
77 } | 81 } |
78 | 82 |
79 void* AllocFn(const AllocatorDispatch* self, size_t size) { | 83 void* AllocFn(const AllocatorDispatch* self, size_t size) { |
80 void* ret = self->next->alloc_function(self->next, size); | 84 void* ret = self->next->alloc_function(self->next, size); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
123 | 127 |
124 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) { | 128 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) { |
125 return self->next->get_size_estimate_function(self->next, address); | 129 return self->next->get_size_estimate_function(self->next, address); |
126 } | 130 } |
127 | 131 |
128 // The allocator dispatch used to intercept heap operations. | 132 // The allocator dispatch used to intercept heap operations. |
129 AllocatorDispatch allocator_dispatch = { | 133 AllocatorDispatch allocator_dispatch = { |
130 &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, | 134 &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, |
131 &FreeFn, &GetSizeEstimateFn, nullptr}; | 135 &FreeFn, &GetSizeEstimateFn, nullptr}; |
132 | 136 |
133 ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() { | 137 ThreadHeapUsage* GetOrCreateThreadUsage() { |
134 ScopedThreadHeapUsage::ThreadAllocatorUsage* allocator_usage = | 138 ThreadHeapUsage* allocator_usage = |
135 static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( | 139 static_cast<ThreadHeapUsage*>(g_thread_allocator_usage.Get()); |
136 g_thread_allocator_usage.Get()); | |
137 if (allocator_usage == kInitializingSentinel) | 140 if (allocator_usage == kInitializingSentinel) |
138 return nullptr; // Re-entrancy case. | 141 return nullptr; // Re-entrancy case. |
139 | 142 |
140 if (allocator_usage == nullptr) { | 143 if (allocator_usage == nullptr) { |
141 // Prevent reentrancy due to the allocation below. | 144 // Prevent reentrancy due to the allocation below. |
142 g_thread_allocator_usage.Set(kInitializingSentinel); | 145 g_thread_allocator_usage.Set(kInitializingSentinel); |
143 | 146 |
144 allocator_usage = new ScopedThreadHeapUsage::ThreadAllocatorUsage; | 147 allocator_usage = new ThreadHeapUsage; |
145 memset(allocator_usage, 0, sizeof(*allocator_usage)); | 148 memset(allocator_usage, 0, sizeof(*allocator_usage)); |
146 g_thread_allocator_usage.Set(allocator_usage); | 149 g_thread_allocator_usage.Set(allocator_usage); |
147 } | 150 } |
148 | 151 |
149 return allocator_usage; | 152 return allocator_usage; |
150 } | 153 } |
151 | 154 |
152 } // namespace | 155 } // namespace |
153 | 156 |
154 ScopedThreadHeapUsage::ScopedThreadHeapUsage() { | 157 ThreadHeapUsageTracker::ThreadHeapUsageTracker() : thread_usage_(nullptr) { |
155 // Initialize must be called before creating instances of this class. | 158 static_assert(std::is_pod<ThreadHeapUsage>::value, "Must be POD."); |
156 CHECK(g_thread_allocator_usage.initialized()); | 159 } |
157 | 160 |
158 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | 161 ThreadHeapUsageTracker::~ThreadHeapUsageTracker() { |
159 usage_at_creation_ = *usage; | 162 DCHECK(thread_checker_.CalledOnValidThread()); |
| 163 |
| 164 if (thread_usage_ != nullptr) { |
| 165 // If this tracker wasn't stopped, make it inclusive so that the |
| 166 // usage isn't lost. |
| 167 Stop(false); |
| 168 } |
| 169 } |
| 170 |
| 171 void ThreadHeapUsageTracker::Start() { |
| 172 DCHECK(g_thread_allocator_usage.initialized()); |
| 173 |
| 174 thread_usage_ = GetOrCreateThreadUsage(); |
| 175 usage_ = *thread_usage_; |
160 | 176 |
161 // Reset the stats for our current scope. | 177 // Reset the stats for our current scope. |
162 // The per-thread usage instance now tracks this scope's usage, while this | 178 // The per-thread usage instance now tracks this scope's usage, while this |
163 // instance persists the outer scope's usage stats. On destruction, this | 179 // instance persists the outer scope's usage stats. On destruction, this |
164 // instance will restore the outer scope's usage stats with this scope's usage | 180 // instance will restore the outer scope's usage stats with this scope's |
165 // added. | 181 // usage added. |
166 memset(usage, 0, sizeof(*usage)); | 182 memset(thread_usage_, 0, sizeof(*thread_usage_)); |
167 | |
168 static_assert(std::is_pod<ThreadAllocatorUsage>::value, "Must be POD."); | |
169 } | 183 } |
170 | 184 |
171 ScopedThreadHeapUsage::~ScopedThreadHeapUsage() { | 185 void ThreadHeapUsageTracker::Stop(bool usage_is_exclusive) { |
172 DCHECK(thread_checker_.CalledOnValidThread()); | 186 ThreadHeapUsage current = *thread_usage_; |
| 187 if (usage_is_exclusive) { |
| 188 // Restore the outer scope. |
| 189 *thread_usage_ = usage_; |
| 190 } else { |
| 191 // Update the outer scope with the accrued inner usage. |
| 192 if (thread_usage_->max_allocated_bytes) { |
| 193 uint64_t outer_net_alloc_bytes = usage_.alloc_bytes - usage_.free_bytes; |
173 | 194 |
174 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | 195 thread_usage_->max_allocated_bytes = |
| 196 std::max(usage_.max_allocated_bytes, |
| 197 outer_net_alloc_bytes + thread_usage_->max_allocated_bytes); |
| 198 } |
175 | 199 |
176 // Update the outer max. | 200 thread_usage_->alloc_ops += usage_.alloc_ops; |
177 if (usage->max_allocated_bytes) { | 201 thread_usage_->alloc_bytes += usage_.alloc_bytes; |
178 uint64_t outer_net_alloc_bytes = | 202 thread_usage_->alloc_overhead_bytes += usage_.alloc_overhead_bytes; |
179 usage_at_creation_.alloc_bytes - usage_at_creation_.free_bytes; | 203 thread_usage_->free_ops += usage_.free_ops; |
180 | 204 thread_usage_->free_bytes += usage_.free_bytes; |
181 usage->max_allocated_bytes = | |
182 std::max(usage_at_creation_.max_allocated_bytes, | |
183 outer_net_alloc_bytes + usage->max_allocated_bytes); | |
184 } | 205 } |
185 | 206 |
186 usage->alloc_ops += usage_at_creation_.alloc_ops; | 207 thread_usage_ = nullptr; |
187 usage->alloc_bytes += usage_at_creation_.alloc_bytes; | 208 usage_ = current; |
188 usage->alloc_overhead_bytes += usage_at_creation_.alloc_overhead_bytes; | |
189 usage->free_ops += usage_at_creation_.free_ops; | |
190 usage->free_bytes += usage_at_creation_.free_bytes; | |
191 } | 209 } |
192 | 210 |
193 ScopedThreadHeapUsage::ThreadAllocatorUsage | 211 ThreadHeapUsage ThreadHeapUsageTracker::CurrentUsage() { |
194 ScopedThreadHeapUsage::CurrentUsage() { | 212 DCHECK(g_thread_allocator_usage.initialized()); |
195 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); | 213 |
| 214 ThreadHeapUsage* usage = GetOrCreateThreadUsage(); |
| 215 DCHECK_NE(nullptr, usage); |
196 return *usage; | 216 return *usage; |
197 } | 217 } |
198 | 218 |
199 void ScopedThreadHeapUsage::Initialize() { | 219 void ThreadHeapUsageTracker::EnableHeapTracking() { |
200 if (!g_thread_allocator_usage.initialized()) { | 220 EnsureTLSInitialized(); |
201 g_thread_allocator_usage.Initialize([](void* allocator_usage) { | |
202 delete static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( | |
203 allocator_usage); | |
204 }); | |
205 } | |
206 } | |
207 | 221 |
208 void ScopedThreadHeapUsage::EnableHeapTracking() { | |
209 CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling."; | 222 CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling."; |
210 g_heap_tracking_enabled = true; | 223 g_heap_tracking_enabled = true; |
211 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | 224 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
212 base::allocator::InsertAllocatorDispatch(&allocator_dispatch); | 225 base::allocator::InsertAllocatorDispatch(&allocator_dispatch); |
213 #else | 226 #else |
214 CHECK(false) << "Can't enable heap tracking without the shim."; | 227 CHECK(false) << "Can't enable heap tracking without the shim."; |
215 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | 228 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
216 } | 229 } |
217 | 230 |
218 void ScopedThreadHeapUsage::DisableHeapTrackingForTesting() { | 231 bool ThreadHeapUsageTracker::IsHeapTrackingEnabled() { |
| 232 return g_heap_tracking_enabled; |
| 233 } |
| 234 |
| 235 void ThreadHeapUsageTracker::DisableHeapTrackingForTesting() { |
219 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | 236 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
220 base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch); | 237 base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch); |
221 #else | 238 #else |
222 CHECK(false) << "Can't disable heap tracking without the shim."; | 239 CHECK(false) << "Can't disable heap tracking without the shim."; |
223 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) | 240 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
224 DCHECK_EQ(true, g_heap_tracking_enabled) << "Heap tracking not enabled."; | 241 DCHECK_EQ(true, g_heap_tracking_enabled) << "Heap tracking not enabled."; |
225 g_heap_tracking_enabled = false; | 242 g_heap_tracking_enabled = false; |
226 } | 243 } |
227 | 244 |
| 245 void ThreadHeapUsageTracker::EnsureTLSInitializedForTesting() { |
| 246 EnsureTLSInitialized(); |
| 247 } |
| 248 |
228 base::allocator::AllocatorDispatch* | 249 base::allocator::AllocatorDispatch* |
229 ScopedThreadHeapUsage::GetDispatchForTesting() { | 250 ThreadHeapUsageTracker::GetDispatchForTesting() { |
230 return &allocator_dispatch; | 251 return &allocator_dispatch; |
231 } | 252 } |
232 | 253 |
| 254 void ThreadHeapUsageTracker::EnsureTLSInitialized() { |
| 255 if (!g_thread_allocator_usage.initialized()) { |
| 256 g_thread_allocator_usage.Initialize([](void* allocator_usage) { |
| 257 delete static_cast<ThreadHeapUsage*>(allocator_usage); |
| 258 }); |
| 259 } |
| 260 } |
| 261 |
233 } // namespace debug | 262 } // namespace debug |
234 } // namespace base | 263 } // namespace base |
OLD | NEW |