Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(702)

Side by Side Diff: base/debug/thread_heap_usage_tracker.cc

Issue 2697123007: base: Add support for malloc zones to the allocator shim (Closed)
Patch Set: Windows compile error. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/debug/thread_heap_usage_tracker.h" 5 #include "base/debug/thread_heap_usage_tracker.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 #include <algorithm> 8 #include <algorithm>
9 #include <limits> 9 #include <limits>
10 #include <new> 10 #include <new>
(...skipping 25 matching lines...) Expand all
36 reinterpret_cast<ThreadHeapUsage*>(kSentinelMask); 36 reinterpret_cast<ThreadHeapUsage*>(kSentinelMask);
37 ThreadHeapUsage* const kTeardownSentinel = 37 ThreadHeapUsage* const kTeardownSentinel =
38 reinterpret_cast<ThreadHeapUsage*>(kSentinelMask | 1); 38 reinterpret_cast<ThreadHeapUsage*>(kSentinelMask | 1);
39 39
40 bool g_heap_tracking_enabled = false; 40 bool g_heap_tracking_enabled = false;
41 41
42 // Forward declared as it needs to delegate memory allocation to the next 42 // Forward declared as it needs to delegate memory allocation to the next
43 // lower shim. 43 // lower shim.
44 ThreadHeapUsage* GetOrCreateThreadUsage(); 44 ThreadHeapUsage* GetOrCreateThreadUsage();
45 45
46 size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { 46 size_t GetAllocSizeEstimate(const AllocatorDispatch* next,
47 void* ptr,
48 void* context) {
47 if (ptr == nullptr) 49 if (ptr == nullptr)
48 return 0U; 50 return 0U;
49 51
50 return next->get_size_estimate_function(next, ptr); 52 return next->get_size_estimate_function(next, ptr, context);
51 } 53 }
52 54
53 void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) { 55 void RecordAlloc(const AllocatorDispatch* next,
56 void* ptr,
57 size_t size,
58 void* context) {
54 ThreadHeapUsage* usage = GetOrCreateThreadUsage(); 59 ThreadHeapUsage* usage = GetOrCreateThreadUsage();
55 if (usage == nullptr) 60 if (usage == nullptr)
56 return; 61 return;
57 62
58 usage->alloc_ops++; 63 usage->alloc_ops++;
59 size_t estimate = GetAllocSizeEstimate(next, ptr); 64 size_t estimate = GetAllocSizeEstimate(next, ptr, context);
60 if (size && estimate) { 65 if (size && estimate) {
61 // Only keep track of the net number of bytes allocated in the scope if the 66 // Only keep track of the net number of bytes allocated in the scope if the
62 // size estimate function returns sane values, e.g. non-zero. 67 // size estimate function returns sane values, e.g. non-zero.
63 usage->alloc_bytes += estimate; 68 usage->alloc_bytes += estimate;
64 usage->alloc_overhead_bytes += estimate - size; 69 usage->alloc_overhead_bytes += estimate - size;
65 70
66 // Record the max outstanding number of bytes, but only if the difference 71 // Record the max outstanding number of bytes, but only if the difference
67 // is net positive (e.g. more bytes allocated than freed in the scope). 72 // is net positive (e.g. more bytes allocated than freed in the scope).
68 if (usage->alloc_bytes > usage->free_bytes) { 73 if (usage->alloc_bytes > usage->free_bytes) {
69 uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; 74 uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes;
70 if (allocated_bytes > usage->max_allocated_bytes) 75 if (allocated_bytes > usage->max_allocated_bytes)
71 usage->max_allocated_bytes = allocated_bytes; 76 usage->max_allocated_bytes = allocated_bytes;
72 } 77 }
73 } else { 78 } else {
74 usage->alloc_bytes += size; 79 usage->alloc_bytes += size;
75 } 80 }
76 } 81 }
77 82
78 void RecordFree(const AllocatorDispatch* next, void* ptr) { 83 void RecordFree(const AllocatorDispatch* next, void* ptr, void* context) {
79 ThreadHeapUsage* usage = GetOrCreateThreadUsage(); 84 ThreadHeapUsage* usage = GetOrCreateThreadUsage();
80 if (usage == nullptr) 85 if (usage == nullptr)
81 return; 86 return;
82 87
83 size_t estimate = GetAllocSizeEstimate(next, ptr); 88 size_t estimate = GetAllocSizeEstimate(next, ptr, context);
84 usage->free_ops++; 89 usage->free_ops++;
85 usage->free_bytes += estimate; 90 usage->free_bytes += estimate;
86 } 91 }
87 92
88 void* AllocFn(const AllocatorDispatch* self, size_t size) { 93 void* AllocFn(const AllocatorDispatch* self, size_t size, void* context) {
89 void* ret = self->next->alloc_function(self->next, size); 94 void* ret = self->next->alloc_function(self->next, size, context);
90 if (ret != nullptr) 95 if (ret != nullptr)
91 RecordAlloc(self->next, ret, size); 96 RecordAlloc(self->next, ret, size, context);
92 97
93 return ret; 98 return ret;
94 } 99 }
95 100
96 void* AllocZeroInitializedFn(const AllocatorDispatch* self, 101 void* AllocZeroInitializedFn(const AllocatorDispatch* self,
97 size_t n, 102 size_t n,
98 size_t size) { 103 size_t size,
99 void* ret = self->next->alloc_zero_initialized_function(self->next, n, size); 104 void* context) {
105 void* ret =
106 self->next->alloc_zero_initialized_function(self->next, n, size, context);
100 if (ret != nullptr) 107 if (ret != nullptr)
101 RecordAlloc(self->next, ret, size); 108 RecordAlloc(self->next, ret, size, context);
102 109
103 return ret; 110 return ret;
104 } 111 }
105 112
106 void* AllocAlignedFn(const AllocatorDispatch* self, 113 void* AllocAlignedFn(const AllocatorDispatch* self,
107 size_t alignment, 114 size_t alignment,
108 size_t size) { 115 size_t size,
109 void* ret = self->next->alloc_aligned_function(self->next, alignment, size); 116 void* context) {
117 void* ret =
118 self->next->alloc_aligned_function(self->next, alignment, size, context);
110 if (ret != nullptr) 119 if (ret != nullptr)
111 RecordAlloc(self->next, ret, size); 120 RecordAlloc(self->next, ret, size, context);
112 121
113 return ret; 122 return ret;
114 } 123 }
115 124
116 void* ReallocFn(const AllocatorDispatch* self, void* address, size_t size) { 125 void* ReallocFn(const AllocatorDispatch* self,
126 void* address,
127 size_t size,
128 void* context) {
117 if (address != nullptr) 129 if (address != nullptr)
118 RecordFree(self->next, address); 130 RecordFree(self->next, address, context);
119 131
120 void* ret = self->next->realloc_function(self->next, address, size); 132 void* ret = self->next->realloc_function(self->next, address, size, context);
121 if (ret != nullptr && size != 0) 133 if (ret != nullptr && size != 0)
122 RecordAlloc(self->next, ret, size); 134 RecordAlloc(self->next, ret, size, context);
123 135
124 return ret; 136 return ret;
125 } 137 }
126 138
127 void FreeFn(const AllocatorDispatch* self, void* address) { 139 void FreeFn(const AllocatorDispatch* self, void* address, void* context) {
128 if (address != nullptr) 140 if (address != nullptr)
129 RecordFree(self->next, address); 141 RecordFree(self->next, address, context);
130 self->next->free_function(self->next, address); 142 self->next->free_function(self->next, address, context);
131 } 143 }
132 144
133 size_t GetSizeEstimateFn(const AllocatorDispatch* self, void* address) { 145 size_t GetSizeEstimateFn(const AllocatorDispatch* self,
134 return self->next->get_size_estimate_function(self->next, address); 146 void* address,
147 void* context) {
148 return self->next->get_size_estimate_function(self->next, address, context);
135 } 149 }
136 150
137 unsigned BatchMallocFn(const AllocatorDispatch* self, 151 unsigned BatchMallocFn(const AllocatorDispatch* self,
138 size_t size, 152 size_t size,
139 void** results, 153 void** results,
140 unsigned num_requested) { 154 unsigned num_requested,
155 void* context) {
141 unsigned count = self->next->batch_malloc_function(self->next, size, results, 156 unsigned count = self->next->batch_malloc_function(self->next, size, results,
142 num_requested); 157 num_requested, context);
143 for (unsigned i = 0; i < count; ++i) { 158 for (unsigned i = 0; i < count; ++i) {
144 RecordAlloc(self->next, results[i], size); 159 RecordAlloc(self->next, results[i], size, context);
145 } 160 }
146 return count; 161 return count;
147 } 162 }
148 163
149 void BatchFreeFn(const AllocatorDispatch* self, 164 void BatchFreeFn(const AllocatorDispatch* self,
150 void** to_be_freed, 165 void** to_be_freed,
151 unsigned num_to_be_freed) { 166 unsigned num_to_be_freed,
167 void* context) {
152 for (unsigned i = 0; i < num_to_be_freed; ++i) { 168 for (unsigned i = 0; i < num_to_be_freed; ++i) {
153 if (to_be_freed[i] != nullptr) { 169 if (to_be_freed[i] != nullptr) {
154 RecordFree(self->next, to_be_freed[i]); 170 RecordFree(self->next, to_be_freed[i], context);
155 } 171 }
156 } 172 }
157 self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed); 173 self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed,
174 context);
158 } 175 }
159 176
160 void FreeDefiniteSizeFn(const AllocatorDispatch* self, void* ptr, size_t size) { 177 void FreeDefiniteSizeFn(const AllocatorDispatch* self,
178 void* ptr,
179 size_t size,
180 void* context) {
161 if (ptr != nullptr) 181 if (ptr != nullptr)
162 RecordFree(self->next, ptr); 182 RecordFree(self->next, ptr, context);
163 self->next->free_definite_size_function(self->next, ptr, size); 183 self->next->free_definite_size_function(self->next, ptr, size, context);
164 } 184 }
165 185
166 // The allocator dispatch used to intercept heap operations. 186 // The allocator dispatch used to intercept heap operations.
167 AllocatorDispatch allocator_dispatch = {&AllocFn, 187 AllocatorDispatch allocator_dispatch = {&AllocFn,
168 &AllocZeroInitializedFn, 188 &AllocZeroInitializedFn,
169 &AllocAlignedFn, 189 &AllocAlignedFn,
170 &ReallocFn, 190 &ReallocFn,
171 &FreeFn, 191 &FreeFn,
172 &GetSizeEstimateFn, 192 &GetSizeEstimateFn,
173 &BatchMallocFn, 193 &BatchMallocFn,
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after
311 // RecordFree() above. The sentinel prevents RecordFree() from re-creating 331 // RecordFree() above. The sentinel prevents RecordFree() from re-creating
312 // another ThreadHeapUsage object. 332 // another ThreadHeapUsage object.
313 g_thread_allocator_usage.Set(kTeardownSentinel); 333 g_thread_allocator_usage.Set(kTeardownSentinel);
314 delete static_cast<ThreadHeapUsage*>(thread_heap_usage); 334 delete static_cast<ThreadHeapUsage*>(thread_heap_usage);
315 }); 335 });
316 } 336 }
317 } 337 }
318 338
319 } // namespace debug 339 } // namespace debug
320 } // namespace base 340 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698