Index: base/debug/scoped_thread_heap_usage.cc |
diff --git a/base/debug/scoped_thread_heap_usage.cc b/base/debug/scoped_thread_heap_usage.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..65fc3d7ee2c5b7cac36abb5f10a957949feb1f39 |
--- /dev/null |
+++ b/base/debug/scoped_thread_heap_usage.cc |
@@ -0,0 +1,218 @@ |
+// Copyright 2016 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/debug/scoped_thread_heap_usage.h" |
+ |
+#include <algorithm> |
+ |
+#include "base/allocator/features.h" |
+#include "build/build_config.h" |
+ |
+#if defined(OS_MACOSX) || defined(OS_IOS) |
+#include <malloc/malloc.h> |
+#else |
+#include <malloc.h> |
+#endif |
+#include <stdint.h> |
+ |
+#include "base/allocator/allocator_shim.h" |
+#include "base/threading/thread_local_storage.h" |
chrisha
2016/09/01 20:29:17
Wonky include order? Is this necessary for some re
Sigurður Ásgeirsson
2016/09/06 14:58:53
The OS defines require build_config, but the rest
|
+ |
+namespace base { |
+namespace debug { |
+ |
+namespace { |
+ |
+using base::allocator::AllocatorDispatch; |
+ |
+ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; |
+ |
+ScopedThreadHeapUsage::ThreadAllocatorUsage* const kInitializingSentinel = |
+ reinterpret_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(-1); |
+ |
+// Forward declared as it needs to delegate memory allocation to the next |
+// lower shim. |
+ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage(); |
+ |
+size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { |
+ if (ptr == nullptr || !next->get_size_estimate_function) |
+ return 0U; |
+ |
+ return next->get_size_estimate_function(next, ptr); |
+} |
+ |
+void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) { |
+ ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ if (usage == nullptr) |
+ return; |
+ |
+ usage->alloc_ops++; |
+ size_t estimate = GetAllocSizeEstimate(next, ptr); |
+ if (size && estimate) { |
+ usage->alloc_bytes += estimate; |
+ usage->alloc_overhead_bytes += size - estimate; |
chrisha
2016/09/01 20:29:17
estimate includes overhead, and size doesn't, no?
Sigurður Ásgeirsson
2016/09/06 14:58:53
Too right, adding a test. I don't think DCHECK wil
|
+ |
+ // Only keep track of the net number of bytes allocated in the scope if the |
+ // size estimate function returns sane values, e.g. non-zero. |
+ uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; |
+ if (allocated_bytes > usage->max_allocated_bytes) |
+ usage->max_allocated_bytes = allocated_bytes; |
+ } else { |
+ usage->alloc_bytes += size; |
+ } |
+} |
+ |
+void RecordFree(const AllocatorDispatch* next, void* ptr) { |
+ ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ if (usage == nullptr) |
+ return; |
+ |
+ size_t estimate = GetAllocSizeEstimate(next, ptr); |
chrisha
2016/09/01 20:29:17
I'd find a comment about GetAllocSizeEstimate retu
Sigurður Ásgeirsson
2016/09/06 14:58:53
Added a class comment 'splaining this. Maybe more
chrisha
2016/09/07 19:05:45
sgtm
|
+ usage->free_ops++; |
+ usage->free_bytes += estimate; |
+} |
+ |
+void* AllocFn(const AllocatorDispatch* self, size_t size) { |
+ void* ret = self->next->alloc_function(self, size); |
+ if (ret != nullptr) |
+ RecordAlloc(self->next, ret, size); |
+ |
+ return ret; |
+} |
+ |
+void* AllocZeroInitializedFn(const AllocatorDispatch* self, |
+ size_t n, |
+ size_t size) { |
+ void* ret = self->next->alloc_zero_initialized_function(self, n, size); |
+ if (ret != nullptr) |
+ RecordAlloc(self->next, ret, size); |
+ |
+ return ret; |
+} |
+ |
+void* AllocAlignedFn(const AllocatorDispatch* self, |
+ size_t alignment, |
+ size_t size) { |
+ void* ret = self->next->alloc_aligned_function(self, alignment, size); |
+ if (ret != nullptr) |
+ RecordAlloc(self->next, ret, size); |
+ |
+ return ret; |
+} |
+ |
+void* ReallocFn(const AllocatorDispatch* self, void* address, size_t size) { |
+ if (address != nullptr) |
+ RecordFree(self->next, address); |
+ |
+ void* ret = self->next->realloc_function(self, address, size); |
+ if (ret != nullptr && size != 0) |
+ RecordAlloc(self->next, ret, size); |
+ |
+ return ret; |
+} |
+ |
+void FreeFn(const AllocatorDispatch* self, void* address) { |
+ if (address != nullptr) |
+ RecordFree(self->next, address); |
+ self->next->free_function(self, address); |
+} |
+ |
+// The dispatch for the intercept used. |
+AllocatorDispatch allocator_dispatch = { |
+ &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, &FreeFn, |
+ nullptr}; |
chrisha
2016/09/01 20:29:17
(Unfamiliar with the impl details.)
You don't nee
Primiano Tucci (use gerrit)
2016/09/02 17:31:20
Nope, he actually has to do that.
Doing what you s
Sigurður Ásgeirsson
2016/09/06 14:58:53
Nope, you don't slow the critical common path with
Sigurður Ásgeirsson
2016/09/06 14:58:54
Acknowledged.
chrisha
2016/09/06 15:51:43
Maybe I should have been more clear. You appear to
Sigurður Ásgeirsson
2016/09/06 16:25:23
Holy Cannoli batman, you're too right. I'm upside
|
+ |
+ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() { |
+ ScopedThreadHeapUsage::ThreadAllocatorUsage* allocator_usage = |
+ static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( |
+ g_thread_allocator_usage.Get()); |
+ if (allocator_usage == kInitializingSentinel) |
+ return nullptr; // Re-entrancy case. |
+ |
+ if (!allocator_usage) { |
+ // Prevent reentrancy due to the allocation below. |
+ g_thread_allocator_usage.Set(kInitializingSentinel); |
+ |
+ allocator_usage = new ScopedThreadHeapUsage::ThreadAllocatorUsage; |
chrisha
2016/09/01 20:29:17
Do we need this dance? Or could you simply pass in
Primiano Tucci (use gerrit)
2016/09/02 17:31:20
I thinkthe problem is that you never know if in tu
Sigurður Ásgeirsson
2016/09/06 14:58:53
Acknowledged.
Sigurður Ásgeirsson
2016/09/06 14:58:53
Yeah, as Primiano pointed out, if this ever breaks
|
+ memset(allocator_usage, 0, sizeof(*allocator_usage)); |
+ g_thread_allocator_usage.Set(allocator_usage); |
+ } |
+ |
+ return allocator_usage; |
+} |
+ |
+} // namespace |
+ |
+ScopedThreadHeapUsage::ScopedThreadHeapUsage() { |
+ // TODO(siggi): I don't like this much, as this is only really needed |
+ // for testing. |
chrisha
2016/09/01 20:29:17
Expose a static EnsureTLSInitializedForTesting, an
Primiano Tucci (use gerrit)
2016/09/02 17:31:20
+1
Sigurður Ásgeirsson
2016/09/06 14:58:53
Acknowledged.
Sigurður Ásgeirsson
2016/09/06 14:58:54
I'm actually of two minds here. The TLS initializa
|
+ EnsureTLSInitalized(); |
+ |
+ ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ usage_at_creation_ = *usage; |
+ |
+ // Reset the stats for our current scope. |
chrisha
2016/09/01 20:29:17
A clarifying comment would help here:
The TLS inf
Sigurður Ásgeirsson
2016/09/06 14:58:53
Done.
|
+ memset(usage, 0, sizeof(*usage)); |
+} |
+ |
+ScopedThreadHeapUsage::~ScopedThreadHeapUsage() { |
+ DCHECK(thread_checker_.CalledOnValidThread()); |
+ |
+ ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ |
+ // Update the outer max. |
+ if (usage->max_allocated_bytes) { |
+ uint64_t outer_net_alloc_bytes = |
+ usage_at_creation_.alloc_bytes - usage_at_creation_.free_bytes; |
+ |
+ usage->max_allocated_bytes = |
+ std::max(usage_at_creation_.max_allocated_bytes, |
+ outer_net_alloc_bytes + usage->max_allocated_bytes); |
+ } |
+ |
+ usage->alloc_ops += usage_at_creation_.alloc_ops; |
+ usage->alloc_bytes += usage_at_creation_.alloc_bytes; |
+ usage->alloc_overhead_bytes += usage_at_creation_.alloc_overhead_bytes; |
+ usage->free_ops += usage_at_creation_.free_ops; |
+ usage->free_bytes += usage_at_creation_.free_bytes; |
+} |
+ |
+ScopedThreadHeapUsage::ThreadAllocatorUsage ScopedThreadHeapUsage::Now() { |
+ ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ return *usage; |
+} |
+ |
+void ScopedThreadHeapUsage::Initialize() { |
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
+ EnsureTLSInitalized(); |
+ InsertAllocatorDispatch(&allocator_dispatch); |
+#else |
+ CHECK(false) << "The heap shim is not enabled."; |
+#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
+} |
+ |
+void ScopedThreadHeapUsage::EnsureTLSInitalized() { |
chrisha
2016/09/01 20:29:17
Initialized* (here and elsewhere)
Sigurður Ásgeirsson
2016/09/06 14:58:53
Whoops - thanks, fixed.
|
+ if (!g_thread_allocator_usage.initialized()) { |
+ g_thread_allocator_usage.Initialize([](void* allocator_usage) { |
+ delete static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( |
+ allocator_usage); |
+ }); |
+ } |
+} |
+ |
+void ScopedThreadHeapUsage::TearDownForTesting() { |
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
+ RemoveAllocatorDispatchForTesting(&allocator_dispatch); |
+#else |
+ CHECK(false) << "The heap shim is not enabled."; |
+#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
+} |
+ |
+base::allocator::AllocatorDispatch* |
+ScopedThreadHeapUsage::GetDispatchForTesting() { |
+ return &allocator_dispatch; |
+} |
+ |
+} // namespace debug |
+} // namespace base |