Index: base/debug/scoped_heap_usage.cc |
diff --git a/base/debug/scoped_heap_usage.cc b/base/debug/scoped_heap_usage.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..37e11136243ae1a0275edef5807441f5f09ece76 |
--- /dev/null |
+++ b/base/debug/scoped_heap_usage.cc |
@@ -0,0 +1,183 @@ |
+// Copyright 2016 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/debug/scoped_heap_usage.h" |
+ |
+#include "base/allocator/features.h" |
+#include "build/build_config.h" |
+ |
+#if defined(OS_MACOSX) || defined(OS_IOS) |
+#include <malloc/malloc.h> |
+#else |
+#include <malloc.h> |
+#endif |
+#include <stdint.h> |
+ |
+#include "base/allocator/allocator_shim.h" |
+#include "base/threading/thread_local_storage.h" |
+ |
+namespace base { |
+namespace debug { |
+ |
+#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
Primiano Tucci (use gerrit)
2016/08/24 14:11:46
I think you can just restrict this #if to the Init
Sigurður Ásgeirsson
2016/09/01 15:18:18
Done.
|
+namespace { |
+ |
+using base::allocator::AllocatorDispatch; |
+ |
+ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; |
+ |
+ScopedHeapUsage::AllocatorUsage* const kInitializingSentinel = |
+ reinterpret_cast<ScopedHeapUsage::AllocatorUsage*>(-1); |
+ |
+// Forward declared as it needs to delegate memory allocation to the next |
+// lower shim. |
+ScopedHeapUsage::AllocatorUsage* GetOrCreateThreadUsage(); |
+ |
+size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { |
Primiano Tucci (use gerrit)
2016/08/24 15:28:47
So at this point, if we shim GetAllocSize, there s
Sigurður Ásgeirsson
2016/09/01 15:18:18
I can add this if you like, but I don't agree with
|
+ if (ptr == nullptr || !next->get_size_estimate_function) |
+ return 0U; |
+ |
+ return next->get_size_estimate_function(next, ptr); |
+} |
+ |
+void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) { |
+ ScopedHeapUsage::AllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ if (usage == nullptr) |
+ return; |
+ |
+ usage->alloc_ops++; |
+ size_t estimate = GetAllocSizeEstimate(next, ptr); |
Primiano Tucci (use gerrit)
2016/08/24 14:11:46
yeah here I'd just call GetAllocSizeEstimate(ptr),
Sigurður Ásgeirsson
2016/09/01 15:18:18
As-is, I can test the functionality of this shim i
|
+ if (estimate) { |
+ usage->alloc_bytes += estimate; |
+ usage->alloc_overhead_bytes += size - estimate; |
+ } else { |
+ usage->alloc_bytes += size; |
+ } |
+ |
+ uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; |
+ if (allocated_bytes > usage->max_allocated_bytes) |
+ usage->max_allocated_bytes = allocated_bytes; |
+} |
+ |
+void RecordFree(const AllocatorDispatch* next, void* ptr) { |
+ ScopedHeapUsage::AllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ if (usage == nullptr) |
+ return; |
+ |
+ size_t estimate = GetAllocSizeEstimate(next, ptr); |
+ usage->free_ops++; |
+ usage->free_bytes += estimate; |
+} |
+ |
+void* AllocFn(const AllocatorDispatch* self, size_t size) { |
+ void* ret = self->next->alloc_function(self, size); |
+ if (ret != nullptr) |
+ RecordAlloc(self->next, ret, size); |
+ |
+ return ret; |
+} |
+ |
+void* AllocZeroInitializedFn(const AllocatorDispatch* self, |
+ size_t n, |
+ size_t size) { |
+ void* ret = self->next->alloc_zero_initialized_function(self, n, size); |
+ if (ret != nullptr) |
+ RecordAlloc(self->next, ret, size); |
+ |
+ return ret; |
+} |
+ |
+void* AllocAlignedFn(const AllocatorDispatch* self, |
+ size_t alignment, |
+ size_t size) { |
+ void* ret = self->next->alloc_aligned_function(self, alignment, size); |
+ if (ret != nullptr) |
+ RecordAlloc(self->next, ret, size); |
+ |
+ return ret; |
+} |
+ |
+void* ReallocFn(const AllocatorDispatch* self, void* address, size_t size) { |
+ if (address != nullptr) |
+ RecordFree(self->next, address); |
+ |
+ void* ret = self->next->realloc_function(self, address, size); |
+ if (ret != nullptr) |
Primiano Tucci (use gerrit)
2016/08/24 14:11:46
I'd probably be a bit more conservative here and d
Sigurður Ásgeirsson
2016/09/01 15:18:18
Done.
|
+ RecordAlloc(self->next, ret, size); |
+ |
+ return ret; |
+} |
+ |
+void FreeFn(const AllocatorDispatch* self, void* address) { |
+ if (address) |
Primiano Tucci (use gerrit)
2016/08/24 14:11:46
small nit:y ou seem to mix if (address) with if (a
Sigurður Ásgeirsson
2016/09/01 15:18:18
Done.
|
+ RecordFree(self->next, address); |
+ self->next->free_function(self, address); |
+} |
+ |
+// The dispatch for the heap interept used. |
Primiano Tucci (use gerrit)
2016/08/24 14:11:46
typo: s/interept/intercept/
Sigurður Ásgeirsson
2016/09/01 15:18:18
Done.
|
+AllocatorDispatch allocator_dispatch = { |
+ &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, &FreeFn, |
+ nullptr}; |
+ |
+ScopedHeapUsage::AllocatorUsage* GetOrCreateThreadUsage() { |
+ ScopedHeapUsage::AllocatorUsage* allocator_usage = |
+ static_cast<ScopedHeapUsage::AllocatorUsage*>( |
+ g_thread_allocator_usage.Get()); |
+ if (allocator_usage == kInitializingSentinel) |
+ return nullptr; // Re-entrancy case. |
+ |
+ if (!allocator_usage) { |
+ g_thread_allocator_usage.Set(kInitializingSentinel); |
+ |
+ const AllocatorDispatch* next = allocator_dispatch.next; |
Primiano Tucci (use gerrit)
2016/08/24 14:11:46
given the fact that you have a sentinel I think yo
Sigurður Ásgeirsson
2016/09/01 15:18:18
Done.
|
+ allocator_usage = reinterpret_cast<ScopedHeapUsage::AllocatorUsage*>( |
+ next->alloc_zero_initialized_function(next, 1, |
+ sizeof(*allocator_usage))); |
+ g_thread_allocator_usage.Set(allocator_usage); |
+ } |
+ |
+ return allocator_usage; |
+} |
+ |
+void FreeAllocatorUsage(void* allocator_usage) { |
+ const AllocatorDispatch* next = allocator_dispatch.next; |
+ next->free_function(next, allocator_usage); |
+} |
+ |
+} // namespace |
+ |
+ScopedHeapUsage::ScopedHeapUsage() : thread_usage_(GetOrCreateThreadUsage()) { |
Primiano Tucci (use gerrit)
2016/08/24 14:11:46
given that you initialize the other fields in the
Sigurður Ásgeirsson
2016/09/01 15:18:18
Done.
|
+ usage_at_creation_ = *thread_usage_; |
+ // Reset the max allocation tally for this scope. |
+ thread_usage_->max_allocated_bytes = 0U; |
+} |
+ |
+ScopedHeapUsage::~ScopedHeapUsage() { |
+ if (usage_at_creation_.max_allocated_bytes > |
+ thread_usage_->max_allocated_bytes) { |
Primiano Tucci (use gerrit)
2016/08/24 14:11:46
out of curiosity are you caching thread_usage_ her
Sigurður Ásgeirsson
2016/09/01 15:18:18
Done. This does require two TLS lookups per instan
|
+ // Restore the outer scope's max allocation tally, as it's larger than |
+ // our scope's max. |
+ thread_usage_->max_allocated_bytes = usage_at_creation_.max_allocated_bytes; |
+ } |
+} |
+ |
+ScopedHeapUsage::AllocatorUsage ScopedHeapUsage::Now() { |
+ AllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ return *usage; |
+} |
+ |
+void ScopedHeapUsage::Initialize() { |
+ if (!g_thread_allocator_usage.initialized()) |
+ g_thread_allocator_usage.Initialize(FreeAllocatorUsage); |
+ |
+ InsertAllocatorDispatch(&allocator_dispatch); |
+} |
+ |
+void ScopedHeapUsage::TearDownForTesting() { |
+ RemoveAllocatorDispatchForTesting(&allocator_dispatch); |
+} |
+#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
+ |
+} // namespace debug |
+} // namespace base |