Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(126)

Side by Side Diff: base/debug/scoped_thread_heap_usage.cc

Issue 2163783003: Implement a ScopedThreadHeapUsage class to allow profiling per-thread heap usage. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@shim-default
Patch Set: Moar speling [sic]. Created 4 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "base/debug/scoped_thread_heap_usage.h"
6
7 #include <stdint.h>
8 #include <algorithm>
9 #include <type_traits>
10
11 #include "base/allocator/allocator_shim.h"
12 #include "base/allocator/features.h"
13 #include "base/logging.h"
14 #include "base/threading/thread_local_storage.h"
15 #include "build/build_config.h"
16
17 #if defined(OS_MACOSX) || defined(OS_IOS)
18 #include <malloc/malloc.h>
19 #else
20 #include <malloc.h>
21 #endif
22
23 namespace base {
24 namespace debug {
25
26 namespace {
27
28 using base::allocator::AllocatorDispatch;
29
30 ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER;
31
32 ScopedThreadHeapUsage::ThreadAllocatorUsage* const kInitializingSentinel =
33 reinterpret_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(-1);
34
35 bool g_heap_tracking_enabled = false;
36
37 // Forward declared as it needs to delegate memory allocation to the next
38 // lower shim.
39 ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage();
40
41 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) {
42 if (ptr == nullptr)
43 return 0U;
44
45 return next->get_size_estimate_function(next, ptr);
46 }
47
48 void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) {
49 ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
50 if (usage == nullptr)
51 return;
52
53 usage->alloc_ops++;
54 size_t estimate = GetAllocSizeEstimate(next, ptr);
55 if (size && estimate) {
56 usage->alloc_bytes += estimate;
57 usage->alloc_overhead_bytes += estimate - size;
58
59 // Only keep track of the net number of bytes allocated in the scope if the
60 // size estimate function returns sane values, e.g. non-zero.
61 uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes;
62 if (allocated_bytes > usage->max_allocated_bytes)
63 usage->max_allocated_bytes = allocated_bytes;
64 } else {
65 usage->alloc_bytes += size;
66 }
67 }
68
69 void RecordFree(const AllocatorDispatch* next, void* ptr) {
70 ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
71 if (usage == nullptr)
72 return;
73
74 size_t estimate = GetAllocSizeEstimate(next, ptr);
75 usage->free_ops++;
76 usage->free_bytes += estimate;
77 }
78
79 void* AllocFn(const AllocatorDispatch* self, size_t size) {
80 void* ret = self->next->alloc_function(self->next, size);
81 if (ret != nullptr)
82 RecordAlloc(self->next, ret, size);
83
84 return ret;
85 }
86
87 void* AllocZeroInitializedFn(const AllocatorDispatch* self,
88 size_t n,
89 size_t size) {
90 void* ret = self->next->alloc_zero_initialized_function(self->next, n, size);
91 if (ret != nullptr)
92 RecordAlloc(self->next, ret, size);
93
94 return ret;
95 }
96
97 void* AllocAlignedFn(const AllocatorDispatch* self,
98 size_t alignment,
99 size_t size) {
100 void* ret = self->next->alloc_aligned_function(self->next, alignment, size);
101 if (ret != nullptr)
102 RecordAlloc(self->next, ret, size);
103
104 return ret;
105 }
106
107 void* ReallocFn(const AllocatorDispatch* self, void* address, size_t size) {
108 if (address != nullptr)
109 RecordFree(self->next, address);
110
111 void* ret = self->next->realloc_function(self->next, address, size);
112 if (ret != nullptr && size != 0)
113 RecordAlloc(self->next, ret, size);
114
115 return ret;
116 }
117
118 void FreeFn(const AllocatorDispatch* self, void* address) {
119 if (address != nullptr)
120 RecordFree(self->next, address);
121 self->next->free_function(self->next, address);
122 }
123
124 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) {
125 return self->next->get_size_estimate_function(self->next, address);
126 }
127
128 // The allocator dispatch used to intercept heap operations.
129 AllocatorDispatch allocator_dispatch = {
130 &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn,
131 &FreeFn, &GetSizeEstimateFn, nullptr};
132
133 ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() {
134 ScopedThreadHeapUsage::ThreadAllocatorUsage* allocator_usage =
135 static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(
136 g_thread_allocator_usage.Get());
137 if (allocator_usage == kInitializingSentinel)
138 return nullptr; // Re-entrancy case.
139
140 if (allocator_usage == nullptr) {
141 // Prevent reentrancy due to the allocation below.
142 g_thread_allocator_usage.Set(kInitializingSentinel);
143
144 allocator_usage = new ScopedThreadHeapUsage::ThreadAllocatorUsage;
145 memset(allocator_usage, 0, sizeof(*allocator_usage));
146 g_thread_allocator_usage.Set(allocator_usage);
147 }
148
149 return allocator_usage;
150 }
151
152 } // namespace
153
154 ScopedThreadHeapUsage::ScopedThreadHeapUsage() {
155 // Initialize must be called before creating instances of this class.
156 CHECK(g_thread_allocator_usage.initialized());
157
158 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
159 usage_at_creation_ = *usage;
160
161 // Reset the stats for our current scope.
162 // The per-thread usage instance now tracks this scope's usage, while this
163 // instance persists the outer scope's usage stats. On destruction, this
164 // instance will restore the outer scope's usage stats with this scope's usage
165 // added.
166 memset(usage, 0, sizeof(*usage));
167
168 static_assert(std::is_pod<ThreadAllocatorUsage>::value, "Must be POD.");
169 }
170
171 ScopedThreadHeapUsage::~ScopedThreadHeapUsage() {
172 DCHECK(thread_checker_.CalledOnValidThread());
173
174 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
175
176 // Update the outer max.
177 if (usage->max_allocated_bytes) {
178 uint64_t outer_net_alloc_bytes =
179 usage_at_creation_.alloc_bytes - usage_at_creation_.free_bytes;
180
181 usage->max_allocated_bytes =
182 std::max(usage_at_creation_.max_allocated_bytes,
183 outer_net_alloc_bytes + usage->max_allocated_bytes);
184 }
185
186 usage->alloc_ops += usage_at_creation_.alloc_ops;
187 usage->alloc_bytes += usage_at_creation_.alloc_bytes;
188 usage->alloc_overhead_bytes += usage_at_creation_.alloc_overhead_bytes;
189 usage->free_ops += usage_at_creation_.free_ops;
190 usage->free_bytes += usage_at_creation_.free_bytes;
191 }
192
193 ScopedThreadHeapUsage::ThreadAllocatorUsage ScopedThreadHeapUsage::Now() {
194 ThreadAllocatorUsage* usage = GetOrCreateThreadUsage();
195 return *usage;
196 }
197
198 void ScopedThreadHeapUsage::Initialize() {
199 if (!g_thread_allocator_usage.initialized()) {
200 g_thread_allocator_usage.Initialize([](void* allocator_usage) {
201 delete static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(
202 allocator_usage);
203 });
204 }
205 }
206
207 void ScopedThreadHeapUsage::EnableHeapTracking() {
208 CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling.";
Nico 2016/09/09 14:40:33 check isn't thread-safe (doubt it matters much tho
Primiano Tucci (use gerrit) 2016/09/09 14:56:18 InsertAllocatorDispatch has a DCHECK(CalledOnValid
Sigurður Ásgeirsson 2016/09/09 14:58:20 Yeah, this is best effort.
Sigurður Ásgeirsson 2016/09/09 14:58:20 Ah, nice. I figure this is a dev-time buzz only, s
209 g_heap_tracking_enabled = true;
210 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
211 base::allocator::InsertAllocatorDispatch(&allocator_dispatch);
212 #else
213 CHECK(false) << "Can't enable heap tracking without the shim.";
214 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
215 }
216
217 void ScopedThreadHeapUsage::DisableHeapTrackingForTesting() {
218 #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
219 base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch);
220 #else
221 CHECK(false) << "Can't disable heap tracking without the shim.";
222 #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
223 DCHECK_EQ(true, g_heap_tracking_enabled) << "Heap tracking not enabled.";
224 g_heap_tracking_enabled = false;
225 }
226
227 base::allocator::AllocatorDispatch*
228 ScopedThreadHeapUsage::GetDispatchForTesting() {
229 return &allocator_dispatch;
230 }
231
232 } // namespace debug
233 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698