Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(392)

Side by Side Diff: base/debug/scoped_thread_heap_usage.cc

Issue 2386123003: Add heap allocator usage to task profiler. (Closed)
Patch Set: Figure out where the @#$%! corruption is coming from. Move heap tracking to TaskStopwatch." Created 4 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/scoped_thread_heap_usage.h" 5 #include "base/debug/scoped_thread_heap_usage.h"
6 6
7 #include <windows.h>
8
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 if defined(OS_WINDOWS) ?? (but then you need build
Sigurður Ásgeirsson 2016/10/14 13:23:47 Thanks, this is debugging remnants - <windows.h> h
7 #include <stdint.h> 9 #include <stdint.h>
8 #include <algorithm> 10 #include <algorithm>
9 #include <type_traits> 11 #include <type_traits>
10 12
11 #include "base/allocator/allocator_shim.h" 13 #include "base/allocator/allocator_shim.h"
12 #include "base/allocator/features.h" 14 #include "base/allocator/features.h"
13 #include "base/logging.h" 15 #include "base/logging.h"
14 #include "base/threading/thread_local_storage.h" 16 #include "base/threading/thread_local_storage.h"
15 #include "build/build_config.h" 17 #include "build/build_config.h"
16 18
17 #if defined(OS_MACOSX) || defined(OS_IOS) 19 #if defined(OS_MACOSX) || defined(OS_IOS)
18 #include <malloc/malloc.h> 20 #include <malloc/malloc.h>
19 #else 21 #else
20 #include <malloc.h> 22 #include <malloc.h>
21 #endif 23 #endif
22 24
23 namespace base { 25 namespace base {
24 namespace debug { 26 namespace debug {
25 27
26 namespace { 28 namespace {
27 29
28 using base::allocator::AllocatorDispatch; 30 using base::allocator::AllocatorDispatch;
29 31
30 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; 32 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER;
31 33
32 ScopedThreadHeapUsage::ThreadAllocatorUsage* const kInitializingSentinel = 34 ThreadAllocatorUsage* const kInitializingSentinel =
33 reinterpret_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(-1); 35 reinterpret_cast<ThreadAllocatorUsage*>(-1);
34 36
35 bool g_heap_tracking_enabled = false; 37 bool g_heap_tracking_enabled = false;
36 38
37 // Forward declared as it needs to delegate memory allocation to the next 39 // Forward declared as it needs to delegate memory allocation to the next
38 // lower shim. 40 // lower shim.
39 ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage(); 41 ThreadAllocatorUsage* GetOrCreateThreadUsage();
40 42
41 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { 43 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) {
42 if (ptr == nullptr) 44 if (ptr == nullptr)
43 return 0U; 45 return 0U;
44 46
45 return next->get_size_estimate_function(next, ptr); 47 return next->get_size_estimate_function(next, ptr);
46 } 48 }
47 49
48 void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) { 50 void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) {
49 ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); 51 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
50 if (usage == nullptr) 52 if (usage == nullptr)
51 return; 53 return;
52 54
53 usage->alloc_ops++; 55 usage->alloc_ops++;
54 size_t estimate = GetAllocSizeEstimate(next, ptr); 56 size_t estimate = GetAllocSizeEstimate(next, ptr);
55 if (size && estimate) { 57 if (size && estimate) {
58 // Only keep track of the net number of bytes allocated in the scope if the
59 // size estimate function returns sane values, e.g. non-zero.
56 usage->alloc_bytes += estimate; 60 usage->alloc_bytes += estimate;
57 usage->alloc_overhead_bytes += estimate - size; 61 usage->alloc_overhead_bytes += estimate - size;
58 62
59 // Only keep track of the net number of bytes allocated in the scope if the 63 // Record the max outstanding number of bytes, but only if the difference
60 // size estimate function returns sane values, e.g. non-zero. 64 // is net positive (e.g. more bytes allocated than freed in the scope).
61 uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; 65 if (usage->alloc_bytes > usage->free_bytes) {
62 if (allocated_bytes > usage->max_allocated_bytes) 66 uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes;
63 usage->max_allocated_bytes = allocated_bytes; 67 if (allocated_bytes > usage->max_allocated_bytes)
68 usage->max_allocated_bytes = allocated_bytes;
69 }
64 } else { 70 } else {
65 usage->alloc_bytes += size; 71 usage->alloc_bytes += size;
66 } 72 }
67 } 73 }
68 74
69 void RecordFree(const AllocatorDispatch* next, void* ptr) { 75 void RecordFree(const AllocatorDispatch* next, void* ptr) {
70 ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); 76 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
71 if (usage == nullptr) 77 if (usage == nullptr)
72 return; 78 return;
73 79
74 size_t estimate = GetAllocSizeEstimate(next, ptr); 80 size_t estimate = GetAllocSizeEstimate(next, ptr);
75 usage->free_ops++; 81 usage->free_ops++;
76 usage->free_bytes += estimate; 82 usage->free_bytes += estimate;
77 } 83 }
78 84
79 void* AllocFn(const AllocatorDispatch* self, size_t size) { 85 void* AllocFn(const AllocatorDispatch* self, size_t size) {
80 void* ret = self->next->alloc_function(self->next, size); 86 void* ret = self->next->alloc_function(self->next, size);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
123 129
124 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) { 130 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) {
125 return self->next->get_size_estimate_function(self->next, address); 131 return self->next->get_size_estimate_function(self->next, address);
126 } 132 }
127 133
128 // The allocator dispatch used to intercept heap operations. 134 // The allocator dispatch used to intercept heap operations.
129 AllocatorDispatch allocator_dispatch = { 135 AllocatorDispatch allocator_dispatch = {
130 &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, 136 &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn,
131 &FreeFn, &GetSizeEstimateFn, nullptr}; 137 &FreeFn, &GetSizeEstimateFn, nullptr};
132 138
133 ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() { 139 ThreadAllocatorUsage* GetOrCreateThreadUsage() {
134 ScopedThreadHeapUsage::ThreadAllocatorUsage* allocator_usage = 140 // DO NOT SUBMIT!
135 static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( 141 if (!g_thread_allocator_usage.initialized())
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 uh? Shouldn't just you initialize this in some mai
Sigurður Ásgeirsson 2016/10/14 20:11:35 Sorry, more debugging remnants.
136 g_thread_allocator_usage.Get()); 142 return nullptr;
143
144 ThreadAllocatorUsage* allocator_usage =
145 static_cast<ThreadAllocatorUsage*>(g_thread_allocator_usage.Get());
137 if (allocator_usage == kInitializingSentinel) 146 if (allocator_usage == kInitializingSentinel)
138 return nullptr; // Re-entrancy case. 147 return nullptr; // Re-entrancy case.
139 148
140 if (allocator_usage == nullptr) { 149 if (allocator_usage == nullptr) {
141 // Prevent reentrancy due to the allocation below. 150 // Prevent reentrancy due to the allocation below.
142 g_thread_allocator_usage.Set(kInitializingSentinel); 151 g_thread_allocator_usage.Set(kInitializingSentinel);
143 152
144 allocator_usage = new ScopedThreadHeapUsage::ThreadAllocatorUsage; 153 allocator_usage = new ThreadAllocatorUsage;
145 memset(allocator_usage, 0, sizeof(*allocator_usage)); 154 memset(allocator_usage, 0, sizeof(*allocator_usage));
146 g_thread_allocator_usage.Set(allocator_usage); 155 g_thread_allocator_usage.Set(allocator_usage);
147 } 156 }
148 157
149 return allocator_usage; 158 return allocator_usage;
150 } 159 }
151 160
152 } // namespace 161 } // namespace
153 162
154 ScopedThreadHeapUsage::ScopedThreadHeapUsage() { 163 HeapUsageTracker::HeapUsageTracker() : thread_usage_(nullptr) {
155 // Initialize must be called before creating instances of this class. 164 static_assert(std::is_pod<ThreadAllocatorUsage>::value, "Must be POD.");
156 CHECK(g_thread_allocator_usage.initialized()); 165 }
157 166
158 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); 167 HeapUsageTracker::~HeapUsageTracker() {
159 usage_at_creation_ = *usage; 168 // If this is called, weirdness happens in Chrome's state at large.
169 // CHECK(thread_checker_.CalledOnValidThread());
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 Please tell me you did never hit this CHECK :)
Sigurður Ásgeirsson 2016/10/14 20:11:35 No, it just wrecked Chrome's state to the point wh
170 }
171
172 void HeapUsageTracker::Start() {
173 // TODO(siggi): Grrrr - more usable this way.
174 if (!g_thread_allocator_usage.initialized())
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 hmm the TLS slot itself should be initialzied only
Sigurður Ásgeirsson 2016/10/14 20:11:35 Yeah, I'm not sure what's the right way to do this
175 return;
176
177 DCHECK(g_thread_allocator_usage.initialized());
178
179 thread_usage_ = GetOrCreateThreadUsage();
180 DCHECK_NE(nullptr, thread_usage_);
Primiano Tucci (use gerrit) 2016/10/13 20:47:44 well, even without this dcheck, if this happens to
Sigurður Ásgeirsson 2016/10/14 20:11:35 Done.
181
182 usage_ = *thread_usage_;
160 183
161 // Reset the stats for our current scope. 184 // Reset the stats for our current scope.
162 // The per-thread usage instance now tracks this scope's usage, while this 185 // The per-thread usage instance now tracks this scope's usage, while this
163 // instance persists the outer scope's usage stats. On destruction, this 186 // instance persists the outer scope's usage stats. On destruction, this
164 // instance will restore the outer scope's usage stats with this scope's usage 187 // instance will restore the outer scope's usage stats with this scope's
165 // added. 188 // usage added.
166 memset(usage, 0, sizeof(*usage)); 189 memset(thread_usage_, 0, sizeof(*thread_usage_));
167
168 static_assert(std::is_pod<ThreadAllocatorUsage>::value, "Must be POD.");
169 } 190 }
170 191
171 ScopedThreadHeapUsage::~ScopedThreadHeapUsage() { 192 void HeapUsageTracker::Stop(bool usage_is_exclusive) {
172 DCHECK(thread_checker_.CalledOnValidThread()); 193 // TODO(siggi): Grrrr - more usable this way.
194 if (thread_usage_ == nullptr) {
195 memset(&usage_, 0, sizeof(usage_));
196 return;
197 }
198
199 DCHECK_NE(nullptr, thread_usage_);
200
201 ThreadAllocatorUsage current = CurrentUsage();
202 if (usage_is_exclusive) {
203 *thread_usage_ = usage_;
204 } else {
205 // Update the outer max.
206 if (thread_usage_->max_allocated_bytes) {
207 uint64_t outer_net_alloc_bytes = usage_.alloc_bytes - usage_.free_bytes;
208
209 thread_usage_->max_allocated_bytes =
210 std::max(usage_.max_allocated_bytes,
211 outer_net_alloc_bytes + thread_usage_->max_allocated_bytes);
212 }
213
214 thread_usage_->alloc_ops += usage_.alloc_ops;
215 thread_usage_->alloc_bytes += usage_.alloc_bytes;
216 thread_usage_->alloc_overhead_bytes += usage_.alloc_overhead_bytes;
217 thread_usage_->free_ops += usage_.free_ops;
218 thread_usage_->free_bytes += usage_.free_bytes;
219 }
220
221 usage_ = current;
222 }
223
224 ThreadAllocatorUsage HeapUsageTracker::CurrentUsage() {
225 DCHECK(g_thread_allocator_usage.initialized());
173 226
174 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); 227 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
175 228 DCHECK_NE(nullptr, usage);
176 // Update the outer max.
177 if (usage->max_allocated_bytes) {
178 uint64_t outer_net_alloc_bytes =
179 usage_at_creation_.alloc_bytes - usage_at_creation_.free_bytes;
180
181 usage->max_allocated_bytes =
182 std::max(usage_at_creation_.max_allocated_bytes,
183 outer_net_alloc_bytes + usage->max_allocated_bytes);
184 }
185
186 usage->alloc_ops += usage_at_creation_.alloc_ops;
187 usage->alloc_bytes += usage_at_creation_.alloc_bytes;
188 usage->alloc_overhead_bytes += usage_at_creation_.alloc_overhead_bytes;
189 usage->free_ops += usage_at_creation_.free_ops;
190 usage->free_bytes += usage_at_creation_.free_bytes;
191 }
192
193 ScopedThreadHeapUsage::ThreadAllocatorUsage
194 ScopedThreadHeapUsage::CurrentUsage() {
195 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
196 return *usage; 229 return *usage;
197 } 230 }
198 231
199 void ScopedThreadHeapUsage::Initialize() { 232 void HeapUsageTracker::EnableHeapTracking() {
200 if (!g_thread_allocator_usage.initialized()) { 233 EnsureTLSInitialized();
201 g_thread_allocator_usage.Initialize([](void* allocator_usage) {
202 delete static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(
203 allocator_usage);
204 });
205 }
206 }
207 234
208 void ScopedThreadHeapUsage::EnableHeapTracking() {
209 CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling."; 235 CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling.";
210 g_heap_tracking_enabled = true; 236 g_heap_tracking_enabled = true;
211 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 237 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
212 base::allocator::InsertAllocatorDispatch(&allocator_dispatch); 238 base::allocator::InsertAllocatorDispatch(&allocator_dispatch);
213 #else 239 #else
214 CHECK(false) << "Can't enable heap tracking without the shim."; 240 CHECK(false) << "Can't enable heap tracking without the shim.";
215 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 241 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
216 } 242 }
217 243
218 void ScopedThreadHeapUsage::DisableHeapTrackingForTesting() { 244 bool HeapUsageTracker::IsHeapTrackingEnabled() {
245 return g_heap_tracking_enabled;
246 }
247
248 void HeapUsageTracker::DisableHeapTrackingForTesting() {
219 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 249 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
220 base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch); 250 base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch);
221 #else 251 #else
222 CHECK(false) << "Can't disable heap tracking without the shim."; 252 CHECK(false) << "Can't disable heap tracking without the shim.";
223 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 253 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
224 DCHECK_EQ(true, g_heap_tracking_enabled) << "Heap tracking not enabled."; 254 DCHECK_EQ(true, g_heap_tracking_enabled) << "Heap tracking not enabled.";
225 g_heap_tracking_enabled = false; 255 g_heap_tracking_enabled = false;
226 } 256 }
227 257
228 base::allocator::AllocatorDispatch* 258 void HeapUsageTracker::EnsureTLSInitializedForTesting() {
229 ScopedThreadHeapUsage::GetDispatchForTesting() { 259 EnsureTLSInitialized();
260 }
261
262 base::allocator::AllocatorDispatch* HeapUsageTracker::GetDispatchForTesting() {
230 return &allocator_dispatch; 263 return &allocator_dispatch;
231 } 264 }
232 265
266 void HeapUsageTracker::EnsureTLSInitialized() {
267 if (!g_thread_allocator_usage.initialized()) {
268 g_thread_allocator_usage.Initialize([](void* allocator_usage) {
fdoray 2016/10/14 13:03:32 How can you be sure that this isn't initialized co
Sigurður Ásgeirsson 2016/10/14 13:23:47 By contract, EnableHeapTracking can only be called
269 delete static_cast<ThreadAllocatorUsage*>(allocator_usage);
270 });
271 }
272 }
273
233 } // namespace debug 274 } // namespace debug
234 } // namespace base 275 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698