Index: base/debug/scoped_heap_usage.cc |
diff --git a/base/debug/scoped_heap_usage.cc b/base/debug/scoped_heap_usage.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..50eadbd71fa391f34e0942bd0464e19a619aebb8 |
--- /dev/null |
+++ b/base/debug/scoped_heap_usage.cc |
@@ -0,0 +1,194 @@ |
+// Copyright 2016 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "base/debug/scoped_heap_usage.h" |
+ |
+#include <malloc.h> |
+#include <stdint.h> |
+// TODO(siggi): DO NOT SUBMIT |
+#include <windows.h> |
+ |
+#include "base/allocator/allocator_shim.h" |
+#include "base/lazy_instance.h" |
+#include "base/threading/thread_local.h" |
+ |
+namespace base { |
+namespace debug { |
+ |
+namespace { |
+ |
+using base::allocator::AllocatorDispatch; |
+ |
+base::LazyInstance<base::ThreadLocalPointer<ScopedHeapUsage::AllocatorUsage>>:: |
+ Leaky g_thread_allocator_usage = LAZY_INSTANCE_INITIALIZER; |
+ |
+// Forward declared as it needs to delegate memory allocation to the next |
+// lower shim. |
+ScopedHeapUsage::AllocatorUsage* GetOrCreateThreadUsage(); |
+ |
+// DO NOT SUBMIT. |
+// This functionality needs to be exposed on the heap shims. Note that under the |
+// linux libc, it appears the heap doesn't keep track of the user size, but |
+// instead allows querying the actual user size of the allocation with |
chrisha
2016/07/21 14:23:30
Did you mean: s/actual user size/actual size/
Sigurður Ásgeirsson
2016/08/19 18:21:49
Done.
|
+// malloc_usable_size. I'm not sure how best to deal with this. |
chrisha
2016/07/21 14:23:31
Maybe by making allocated and freed track actual b
Primiano Tucci (use gerrit)
2016/07/21 15:59:20
So the problem here is that on other platforms mal
Sigurður Ásgeirsson
2016/08/19 18:21:49
Yeah - I think this'll be platform dependent enoug
Sigurður Ásgeirsson
2016/08/19 18:21:49
Side-tracking of individual allocs is a no-go for
|
+size_t GetAllocSize(void* ptr) { |
+ if (ptr == nullptr) |
+ return 0U; |
+ |
+ HANDLE heap_handle = reinterpret_cast<HANDLE>(_get_heap_handle()); |
+ return ::HeapSize(heap_handle, 0, ptr); |
+} |
+ |
+size_t EstimateOverhead(size_t size) { |
Primiano Tucci (use gerrit)
2016/07/21 15:59:20
this is going to be very tricky on tcmalloc (Linux
Sigurður Ásgeirsson
2016/08/19 18:21:49
Yeah, it doesn't have to be very accurate though.
|
+// TODO(siggi): Less Windows specific! |
+ const size_t kHeaderSize = 8; |
+#if defined(ARCH_CPU_64_BITS) |
+ const size_t kAllocationGranularity = 16; |
+#else |
+ const size_t kAllocationGranularity = 8; |
+#endif |
+ size_t overhead = 0; |
+ if (size % kAllocationGranularity != 0) |
+ overhead = kAllocationGranularity - (size % kAllocationGranularity); |
+ |
+ // This is a lower-bound estimate on the Windows heap overhead. |
+ return kHeaderSize + overhead; |
+} |
+ |
+void RecordAlloc(size_t size) { |
+ ScopedHeapUsage::AllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ if (usage == nullptr) |
+ return; |
+ |
+ usage->alloc_ops++; |
+ usage->alloc_bytes += size; |
+ usage->alloc_overhead_bytes += EstimateOverhead(size); |
+ |
+ uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; |
+ if (allocated_bytes > usage->max_allocated_bytes) |
+ usage->max_allocated_bytes = allocated_bytes; |
+} |
+ |
+void RecordFree(size_t size) { |
+ ScopedHeapUsage::AllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ if (usage == nullptr) |
+ return; |
+ |
+ usage->free_ops++; |
+ usage->free_bytes += size; |
+} |
+ |
+void* AllocFn(const AllocatorDispatch* self, size_t size) { |
+ void* ret = self->next->alloc_function(self, size); |
+ if (ret != nullptr) |
+ RecordAlloc(size); |
+ |
+ return ret; |
+} |
+ |
+void* AllocZeroInitializedFn(const AllocatorDispatch* self, |
+ size_t n, |
+ size_t size) { |
+ void* ret = self->next->alloc_zero_initialized_function(self, n, size); |
+ if (ret != nullptr) |
+ RecordAlloc(n * size); |
+ |
+ return ret; |
+} |
+ |
+void* AllocAlignedFn(const AllocatorDispatch* self, |
+ size_t alignment, |
+ size_t size) { |
+ void* ret = self->next->alloc_aligned_function(self, alignment, size); |
+ if (ret != nullptr) |
+ RecordAlloc(size); |
+ |
+ return ret; |
+} |
+ |
+void* ReallocFn(const AllocatorDispatch* self, void* address, size_t size) { |
+ if (address != nullptr) |
+ RecordFree(GetAllocSize(address)); |
+ |
+ void* ret = self->next->realloc_function(self, address, size); |
+ if (ret != nullptr) |
+ RecordAlloc(size); |
+ |
+ return ret; |
+} |
+ |
+void FreeFn(const AllocatorDispatch* self, void* address) { |
+ size_t alloc_size = GetAllocSize(address); |
+ self->next->free_function(self, address); |
+ RecordFree(alloc_size); |
+} |
+ |
+// The dispatch for the heap interept used. |
+AllocatorDispatch allocator_dispatch = { |
+ &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, &FreeFn, |
+ nullptr}; |
+ |
+ScopedHeapUsage::AllocatorUsage* GetOrCreateThreadUsage() { |
+ using AllocatorUsage = ScopedHeapUsage::AllocatorUsage; |
+ |
+ base::ThreadLocalPointer<AllocatorUsage>& usage_tls = |
+ g_thread_allocator_usage.Get(); |
Primiano Tucci (use gerrit)
2016/07/21 15:59:20
Isn't this going to re-enter (either the lazy inst
Sigurður Ásgeirsson
2016/08/19 18:21:49
I haven't seen it reenter, but obviously I haven't
|
+ |
+ AllocatorUsage* usage = usage_tls.Get(); |
+ if (usage == nullptr) { |
+ CHECK(allocator_dispatch.next != nullptr); |
Primiano Tucci (use gerrit)
2016/07/21 15:59:20
no need for this check, if this is null will crash
Sigurður Ásgeirsson
2016/08/19 18:21:49
Acknowledged.
|
+ |
+ // Delegate the memory allocation to the next lower heap shim to avoid |
+ // infinite recursion. |
+ const AllocatorDispatch* next = allocator_dispatch.next; |
+ usage = reinterpret_cast<AllocatorUsage*>( |
+ next->alloc_function(next, sizeof(AllocatorUsage))); |
Primiano Tucci (use gerrit)
2016/07/21 15:59:20
you could just use alloc_zero_initialized_function
Sigurður Ásgeirsson
2016/08/19 18:21:49
Done.
|
+ memset(usage, 0, sizeof(AllocatorUsage)); |
+ usage_tls.Set(usage); |
Primiano Tucci (use gerrit)
2016/07/21 15:59:20
so the problem here is that you are leaking one "u
Sigurður Ásgeirsson
2016/08/19 18:21:49
Done.
|
+ } |
+ |
+ return usage; |
+} |
+ |
+} // namespace |
+ |
+ScopedHeapUsage::ScopedHeapUsage() : thread_usage_(GetOrCreateThreadUsage()) { |
+ if (thread_usage_ != nullptr) { |
+ usage_at_creation_ = *thread_usage_; |
+ // Reset the max allocation tally for this scope. |
+ thread_usage_->max_allocated_bytes = 0U; |
+ } else { |
chrisha
2016/07/21 14:23:31
Shouldn't thread_usage_ always be non-null at this
Primiano Tucci (use gerrit)
2016/07/21 15:59:20
+1
Sigurður Ásgeirsson
2016/08/19 18:21:49
Done.
Sigurður Ásgeirsson
2016/08/19 18:21:49
Done.
|
+ // TODO(siggi): Is this even a good idea? |
+ usage_at_creation_ = AllocatorUsage(); |
+ } |
+} |
+ |
+ScopedHeapUsage::~ScopedHeapUsage() { |
+ if (thread_usage_ != nullptr && |
+ usage_at_creation_.max_allocated_bytes > |
+ thread_usage_->max_allocated_bytes) { |
+ // Restore the outer scope's max allocation tally, as it's larger than |
+ // our scope's max. |
chrisha
2016/07/21 14:23:30
This mechanism requires that scopes be properly ne
Primiano Tucci (use gerrit)
2016/07/21 15:59:20
maybe you can have a global Atomic32 g_scoped_heap
Sigurður Ásgeirsson
2016/08/19 18:21:49
I'd prefer to just document this problem away, but
Sigurður Ásgeirsson
2016/08/19 18:21:49
Acknowledged.
|
+ thread_usage_->max_allocated_bytes = usage_at_creation_.max_allocated_bytes; |
+ } |
+} |
+ |
+ScopedHeapUsage::AllocatorUsage ScopedHeapUsage::Now() { |
+ AllocatorUsage* usage = GetOrCreateThreadUsage(); |
+ if (usage == nullptr) |
chrisha
2016/07/21 14:23:30
Can this even be null?
Sigurður Ásgeirsson
2016/08/19 18:21:49
Done.
|
+ return AllocatorUsage(); |
+ |
+ return *usage; |
+} |
+ |
+void ScopedHeapUsage::Initialize() { |
+ InsertAllocatorDispatch(&allocator_dispatch); |
+} |
+ |
+void ScopedHeapUsage::TearDownForTesting() { |
+ RemoveAllocatorDispatchForTesting(&allocator_dispatch); |
+} |
+ |
+} // namespace debug |
+} // namespace base |