Index: base/debug/thread_heap_usage_tracker.cc |
diff --git a/base/debug/thread_heap_usage_tracker.cc b/base/debug/thread_heap_usage_tracker.cc |
index 3761cf9ab7fc1f43c814ffa6cebc78994cc89f1a..f3bfda860458978a730f391ec3ea835a412aaa17 100644 |
--- a/base/debug/thread_heap_usage_tracker.cc |
+++ b/base/debug/thread_heap_usage_tracker.cc |
@@ -30,7 +30,7 @@ using base::allocator::AllocatorDispatch; |
ThreadLocalStorage::StaticSlot g_thread_allocator_usage = TLS_INITIALIZER; |
-ThreadHeapUsage* const kInitializingSentinel = |
+ThreadHeapUsage* const kInitializationOrTeardownSentinel = |
Sigurður Ásgeirsson
2017/02/01 22:21:27
I'd be tempted to use two sentinels here, as you h
Primiano Tucci (use gerrit)
2017/02/01 22:39:05
Yeah I thought to this. Honestly the only reason I
|
reinterpret_cast<ThreadHeapUsage*>(-1); |
bool g_heap_tracking_enabled = false; |
@@ -174,19 +174,14 @@ AllocatorDispatch allocator_dispatch = {&AllocFn, |
ThreadHeapUsage* GetOrCreateThreadUsage() { |
ThreadHeapUsage* allocator_usage = |
static_cast<ThreadHeapUsage*>(g_thread_allocator_usage.Get()); |
- if (allocator_usage == kInitializingSentinel) |
+ if (allocator_usage == kInitializationOrTeardownSentinel) |
return nullptr; // Re-entrancy case. |
if (allocator_usage == nullptr) { |
// Prevent reentrancy due to the allocation below. |
- g_thread_allocator_usage.Set(kInitializingSentinel); |
- |
- // Delegate the allocation of the per-thread structure to the underlying |
- // heap shim, for symmetry with the deallocation. Otherwise interposing |
- // shims may mis-attribute or mis-direct this allocation. |
- const AllocatorDispatch* next = allocator_dispatch.next; |
- allocator_usage = new (next->alloc_function(next, sizeof(ThreadHeapUsage))) |
- ThreadHeapUsage(); |
+ g_thread_allocator_usage.Set(kInitializationOrTeardownSentinel); |
+ |
+ allocator_usage = new ThreadHeapUsage(); |
Sigurður Ásgeirsson
2017/02/01 22:21:27
comically this'll lead to the shim accounting its
Primiano Tucci (use gerrit)
2017/02/01 22:39:05
Hmm how? This shouldn't happen because we set the
Sigurður Ásgeirsson
2017/02/01 22:46:16
Ah, true :).
|
static_assert(std::is_pod<ThreadHeapUsage>::value, |
"AllocatorDispatch must be POD"); |
memset(allocator_usage, 0, sizeof(*allocator_usage)); |
@@ -297,12 +292,14 @@ ThreadHeapUsageTracker::GetDispatchForTesting() { |
void ThreadHeapUsageTracker::EnsureTLSInitialized() { |
if (!g_thread_allocator_usage.initialized()) { |
- g_thread_allocator_usage.Initialize([](void* allocator_usage) { |
- // Delegate the freeing of the per-thread structure to the next-lower |
- // heap shim. Otherwise this free will re-initialize the TLS on thread |
- // exit. |
- allocator_dispatch.next->free_function(allocator_dispatch.next, |
- allocator_usage); |
+ g_thread_allocator_usage.Initialize([](void* thread_heap_usage) { |
+ // Deleting the ThreadHeapUsage TLS object will re-enter the shim and hit |
+ // RecordFree() above. The sentinel prevents RecordFree() from re-creating |
+ // another ThreadHeapUsage object. |
+ if (thread_heap_usage == kInitializationOrTeardownSentinel) |
Sigurður Ásgeirsson
2017/02/01 22:21:27
How would this ever happen?
Primiano Tucci (use gerrit)
2017/02/01 22:39:05
Ha! this is tricky but will happen all the times.
Sigurður Ásgeirsson
2017/02/01 22:46:16
Ah - perhaps this warrants a quick comment, as it'
Primiano Tucci (use gerrit)
2017/02/02 00:38:27
Done.
|
+ return; |
+ g_thread_allocator_usage.Set(kInitializationOrTeardownSentinel); |
+ delete static_cast<ThreadHeapUsage*>(thread_heap_usage); |
}); |
} |
} |