Chromium Code Reviews| Index: base/debug/scoped_thread_heap_usage.cc |
| diff --git a/base/debug/scoped_thread_heap_usage.cc b/base/debug/scoped_thread_heap_usage.cc |
| index 2f5ed8c267e0a59a30d1735f9116bee115fbd831..a75776ccde652ba1807322194b8bb6c9641bdc35 100644 |
| --- a/base/debug/scoped_thread_heap_usage.cc |
| +++ b/base/debug/scoped_thread_heap_usage.cc |
| @@ -4,6 +4,8 @@ |
| #include "base/debug/scoped_thread_heap_usage.h" |
| +#include <windows.h> |
| + |
|
Primiano Tucci (use gerrit)
2016/10/13 20:47:44
if defined(OS_WINDOWS) ?? (but then you need build
Sigurður Ásgeirsson
2016/10/14 13:23:47
Thanks, this is debugging remnants - <windows.h> h
|
| #include <stdint.h> |
| #include <algorithm> |
| #include <type_traits> |
| @@ -29,14 +31,14 @@ using base::allocator::AllocatorDispatch; |
| ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; |
| -ScopedThreadHeapUsage::ThreadAllocatorUsage* const kInitializingSentinel = |
| - reinterpret_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>(-1); |
| +ThreadAllocatorUsage* const kInitializingSentinel = |
| + reinterpret_cast<ThreadAllocatorUsage*>(-1); |
| bool g_heap_tracking_enabled = false; |
| // Forward declared as it needs to delegate memory allocation to the next |
| // lower shim. |
| -ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage(); |
| +ThreadAllocatorUsage* GetOrCreateThreadUsage(); |
| size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { |
| if (ptr == nullptr) |
| @@ -46,28 +48,32 @@ size_t GetAllocSizeEstimate(const AllocatorDispatch* next, void* ptr) { |
| } |
| void RecordAlloc(const AllocatorDispatch* next, void* ptr, size_t size) { |
| - ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
| + ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
| if (usage == nullptr) |
| return; |
| usage->alloc_ops++; |
| size_t estimate = GetAllocSizeEstimate(next, ptr); |
| if (size && estimate) { |
| + // Only keep track of the net number of bytes allocated in the scope if the |
| + // size estimate function returns sane values, e.g. non-zero. |
| usage->alloc_bytes += estimate; |
| usage->alloc_overhead_bytes += estimate - size; |
| - // Only keep track of the net number of bytes allocated in the scope if the |
| - // size estimate function returns sane values, e.g. non-zero. |
| - uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; |
| - if (allocated_bytes > usage->max_allocated_bytes) |
| - usage->max_allocated_bytes = allocated_bytes; |
| + // Record the max outstanding number of bytes, but only if the difference |
| + // is net positive (e.g. more bytes allocated than freed in the scope). |
| + if (usage->alloc_bytes > usage->free_bytes) { |
| + uint64_t allocated_bytes = usage->alloc_bytes - usage->free_bytes; |
| + if (allocated_bytes > usage->max_allocated_bytes) |
| + usage->max_allocated_bytes = allocated_bytes; |
| + } |
| } else { |
| usage->alloc_bytes += size; |
| } |
| } |
| void RecordFree(const AllocatorDispatch* next, void* ptr) { |
| - ScopedThreadHeapUsage::ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
| + ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
| if (usage == nullptr) |
| return; |
| @@ -130,10 +136,13 @@ AllocatorDispatch allocator_dispatch = { |
| &AllocFn, &AllocZeroInitializedFn, &AllocAlignedFn, &ReallocFn, |
| &FreeFn, &GetSizeEstimateFn, nullptr}; |
| -ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() { |
| - ScopedThreadHeapUsage::ThreadAllocatorUsage* allocator_usage = |
| - static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( |
| - g_thread_allocator_usage.Get()); |
| +ThreadAllocatorUsage* GetOrCreateThreadUsage() { |
| + // DO NOT SUBMIT! |
| + if (!g_thread_allocator_usage.initialized()) |
|
Primiano Tucci (use gerrit)
2016/10/13 20:47:44
uh? Shouldn't just you initialize this in some mai
Sigurður Ásgeirsson
2016/10/14 20:11:35
Sorry, more debugging remnants.
|
| + return nullptr; |
| + |
| + ThreadAllocatorUsage* allocator_usage = |
| + static_cast<ThreadAllocatorUsage*>(g_thread_allocator_usage.Get()); |
| if (allocator_usage == kInitializingSentinel) |
| return nullptr; // Re-entrancy case. |
| @@ -141,7 +150,7 @@ ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() { |
| // Prevent reentrancy due to the allocation below. |
| g_thread_allocator_usage.Set(kInitializingSentinel); |
| - allocator_usage = new ScopedThreadHeapUsage::ThreadAllocatorUsage; |
| + allocator_usage = new ThreadAllocatorUsage; |
| memset(allocator_usage, 0, sizeof(*allocator_usage)); |
| g_thread_allocator_usage.Set(allocator_usage); |
| } |
| @@ -151,61 +160,78 @@ ScopedThreadHeapUsage::ThreadAllocatorUsage* GetOrCreateThreadUsage() { |
| } // namespace |
| -ScopedThreadHeapUsage::ScopedThreadHeapUsage() { |
| - // Initialize must be called before creating instances of this class. |
| - CHECK(g_thread_allocator_usage.initialized()); |
| +HeapUsageTracker::HeapUsageTracker() : thread_usage_(nullptr) { |
| + static_assert(std::is_pod<ThreadAllocatorUsage>::value, "Must be POD."); |
| +} |
| + |
| +HeapUsageTracker::~HeapUsageTracker() { |
| + // If this is called, weirdness happens in Chrome's state at large. |
| + // CHECK(thread_checker_.CalledOnValidThread()); |
|
Primiano Tucci (use gerrit)
2016/10/13 20:47:44
Please tell me you did never hit this CHECK :)
Sigurður Ásgeirsson
2016/10/14 20:11:35
No, it just wrecked Chrome's state to the point wh
|
| +} |
| - ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
| - usage_at_creation_ = *usage; |
| +void HeapUsageTracker::Start() { |
| + // TODO(siggi): Grrrr - more usable this way. |
| + if (!g_thread_allocator_usage.initialized()) |
|
Primiano Tucci (use gerrit)
2016/10/13 20:47:44
hmm the TLS slot itself should be initialzied only
Sigurður Ásgeirsson
2016/10/14 20:11:35
Yeah, I'm not sure what's the right way to do this
|
| + return; |
| + |
| + DCHECK(g_thread_allocator_usage.initialized()); |
| + |
| + thread_usage_ = GetOrCreateThreadUsage(); |
| + DCHECK_NE(nullptr, thread_usage_); |
|
Primiano Tucci (use gerrit)
2016/10/13 20:47:44
well, even without this dcheck, if this happens to
Sigurður Ásgeirsson
2016/10/14 20:11:35
Done.
|
| + |
| + usage_ = *thread_usage_; |
| // Reset the stats for our current scope. |
| // The per-thread usage instance now tracks this scope's usage, while this |
| // instance persists the outer scope's usage stats. On destruction, this |
| - // instance will restore the outer scope's usage stats with this scope's usage |
| - // added. |
| - memset(usage, 0, sizeof(*usage)); |
| - |
| - static_assert(std::is_pod<ThreadAllocatorUsage>::value, "Must be POD."); |
| + // instance will restore the outer scope's usage stats with this scope's |
| + // usage added. |
| + memset(thread_usage_, 0, sizeof(*thread_usage_)); |
| } |
| -ScopedThreadHeapUsage::~ScopedThreadHeapUsage() { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| - |
| - ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
| +void HeapUsageTracker::Stop(bool usage_is_exclusive) { |
| + // TODO(siggi): Grrrr - more usable this way. |
| + if (thread_usage_ == nullptr) { |
| + memset(&usage_, 0, sizeof(usage_)); |
| + return; |
| + } |
| - // Update the outer max. |
| - if (usage->max_allocated_bytes) { |
| - uint64_t outer_net_alloc_bytes = |
| - usage_at_creation_.alloc_bytes - usage_at_creation_.free_bytes; |
| + DCHECK_NE(nullptr, thread_usage_); |
| - usage->max_allocated_bytes = |
| - std::max(usage_at_creation_.max_allocated_bytes, |
| - outer_net_alloc_bytes + usage->max_allocated_bytes); |
| + ThreadAllocatorUsage current = CurrentUsage(); |
| + if (usage_is_exclusive) { |
| + *thread_usage_ = usage_; |
| + } else { |
| + // Update the outer max. |
| + if (thread_usage_->max_allocated_bytes) { |
| + uint64_t outer_net_alloc_bytes = usage_.alloc_bytes - usage_.free_bytes; |
| + |
| + thread_usage_->max_allocated_bytes = |
| + std::max(usage_.max_allocated_bytes, |
| + outer_net_alloc_bytes + thread_usage_->max_allocated_bytes); |
| + } |
| + |
| + thread_usage_->alloc_ops += usage_.alloc_ops; |
| + thread_usage_->alloc_bytes += usage_.alloc_bytes; |
| + thread_usage_->alloc_overhead_bytes += usage_.alloc_overhead_bytes; |
| + thread_usage_->free_ops += usage_.free_ops; |
| + thread_usage_->free_bytes += usage_.free_bytes; |
| } |
| - usage->alloc_ops += usage_at_creation_.alloc_ops; |
| - usage->alloc_bytes += usage_at_creation_.alloc_bytes; |
| - usage->alloc_overhead_bytes += usage_at_creation_.alloc_overhead_bytes; |
| - usage->free_ops += usage_at_creation_.free_ops; |
| - usage->free_bytes += usage_at_creation_.free_bytes; |
| + usage_ = current; |
| } |
| -ScopedThreadHeapUsage::ThreadAllocatorUsage |
| -ScopedThreadHeapUsage::CurrentUsage() { |
| +ThreadAllocatorUsage HeapUsageTracker::CurrentUsage() { |
| + DCHECK(g_thread_allocator_usage.initialized()); |
| + |
| ThreadAllocatorUsage* usage = GetOrCreateThreadUsage(); |
| + DCHECK_NE(nullptr, usage); |
| return *usage; |
| } |
| -void ScopedThreadHeapUsage::Initialize() { |
| - if (!g_thread_allocator_usage.initialized()) { |
| - g_thread_allocator_usage.Initialize([](void* allocator_usage) { |
| - delete static_cast<ScopedThreadHeapUsage::ThreadAllocatorUsage*>( |
| - allocator_usage); |
| - }); |
| - } |
| -} |
| +void HeapUsageTracker::EnableHeapTracking() { |
| + EnsureTLSInitialized(); |
| -void ScopedThreadHeapUsage::EnableHeapTracking() { |
| CHECK_EQ(false, g_heap_tracking_enabled) << "No double-enabling."; |
| g_heap_tracking_enabled = true; |
| #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| @@ -215,7 +241,11 @@ void ScopedThreadHeapUsage::EnableHeapTracking() { |
| #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| } |
| -void ScopedThreadHeapUsage::DisableHeapTrackingForTesting() { |
| +bool HeapUsageTracker::IsHeapTrackingEnabled() { |
| + return g_heap_tracking_enabled; |
| +} |
| + |
| +void HeapUsageTracker::DisableHeapTrackingForTesting() { |
| #if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| base::allocator::RemoveAllocatorDispatchForTesting(&allocator_dispatch); |
| #else |
| @@ -225,10 +255,21 @@ void ScopedThreadHeapUsage::DisableHeapTrackingForTesting() { |
| g_heap_tracking_enabled = false; |
| } |
| -base::allocator::AllocatorDispatch* |
| -ScopedThreadHeapUsage::GetDispatchForTesting() { |
| +void HeapUsageTracker::EnsureTLSInitializedForTesting() { |
| + EnsureTLSInitialized(); |
| +} |
| + |
| +base::allocator::AllocatorDispatch* HeapUsageTracker::GetDispatchForTesting() { |
| return &allocator_dispatch; |
| } |
| +void HeapUsageTracker::EnsureTLSInitialized() { |
| + if (!g_thread_allocator_usage.initialized()) { |
| + g_thread_allocator_usage.Initialize([](void* allocator_usage) { |
|
fdoray
2016/10/14 13:03:32
How can you be sure that this isn't initialized co
Sigurður Ásgeirsson
2016/10/14 13:23:47
By contract, EnableHeapTracking can only be called
|
| + delete static_cast<ThreadAllocatorUsage*>(allocator_usage); |
| + }); |
| + } |
| +} |
| + |
| } // namespace debug |
| } // namespace base |