Chromium Code Reviews| Index: base/trace_event/malloc_dump_provider.cc |
| diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc |
| index 6f9aa9602f7334840489e17c1ca15c7c13198c74..d8f82edfed52090195560dc1aa1dde2b656160f8 100644 |
| --- a/base/trace_event/malloc_dump_provider.cc |
| +++ b/base/trace_event/malloc_dump_provider.cc |
| @@ -7,7 +7,14 @@ |
| #include <stddef.h> |
| #include "base/allocator/allocator_extension.h" |
| +#include "base/allocator/allocator_shim.h" |
| +#include "base/allocator/features.h" |
| +#include "base/trace_event/heap_profiler_allocation_context.h" |
| +#include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
| +#include "base/trace_event/heap_profiler_allocation_register.h" |
| +#include "base/trace_event/heap_profiler_heap_dump_writer.h" |
| #include "base/trace_event/process_memory_dump.h" |
| +#include "base/trace_event/trace_event_argument.h" |
| #include "build/build_config.h" |
| #if defined(OS_MACOSX) |
| @@ -19,6 +26,65 @@ |
| namespace base { |
| namespace trace_event { |
| +#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| +namespace { |
| + |
| +using allocator::AllocatorDispatch; |
| + |
| +void* HookAlloc(const AllocatorDispatch* self, size_t size) { |
| + const AllocatorDispatch* const next = self->next; |
| + void* ptr = next->alloc_function(next, size); |
| + if (ptr) |
| + MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); |
| + return ptr; |
| +} |
| + |
| +void* HookZeroInitAlloc(const AllocatorDispatch* self, size_t n, size_t size) { |
| + const AllocatorDispatch* const next = self->next; |
| + void* ptr = next->alloc_zero_initialized_function(next, n, size); |
| + if (ptr) |
| + MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size); |
| + return ptr; |
| +} |
| + |
| +void* HookllocAligned(const AllocatorDispatch* self, |
| + size_t alignment, |
| + size_t size) { |
| + const AllocatorDispatch* const next = self->next; |
| + void* ptr = next->alloc_aligned_function(next, alignment, size); |
| + if (ptr) |
| + MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); |
| + return ptr; |
| +} |
| + |
| +void* HookRealloc(const AllocatorDispatch* self, void* address, size_t size) { |
| + const AllocatorDispatch* const next = self->next; |
| + void* ptr = next->realloc_function(next, address, size); |
| + MallocDumpProvider::GetInstance()->RemoveAllocation(address); |
| + if (size > 0) // realloc(size == 0) means free(). |
| + MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size); |
| + return ptr; |
| +} |
| + |
| +void HookFree(const AllocatorDispatch* self, void* address) { |
| + if (address) |
| + MallocDumpProvider::GetInstance()->RemoveAllocation(address); |
| + const AllocatorDispatch* const next = self->next; |
| + next->free_function(next, address); |
| +} |
| + |
| +AllocatorDispatch g_allocator_hooks = { |
| + &HookAlloc, /* alloc_function */ |
| + &HookZeroInitAlloc, /* alloc_zero_initialized_function */ |
| + &HookllocAligned, /* alloc_aligned_function */ |
| + &HookRealloc, /* realloc_function */ |
| + &HookFree, /* free_function */ |
| + nullptr, /* next */ |
| +}; |
| + |
| +} // namespace |
| +#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| + |
| // static |
| const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects"; |
| @@ -28,7 +94,8 @@ MallocDumpProvider* MallocDumpProvider::GetInstance() { |
| LeakySingletonTraits<MallocDumpProvider>>::get(); |
| } |
| -MallocDumpProvider::MallocDumpProvider() {} |
| +MallocDumpProvider::MallocDumpProvider() |
| + : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {} |
| MallocDumpProvider::~MallocDumpProvider() {} |
| @@ -96,8 +163,99 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args, |
| resident_size - allocated_objects_size); |
| } |
| + // Heap profiler dumps. |
| + if (!heap_profiler_enabled_) |
| + return true; |
| + |
| + // The dumps of the heap profiler should be created only when heap profiling |
| + // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested. |
| + // However, when enabled, the overhead of the heap profiler should be always |
| + // reported to avoid oscillations of the malloc total in LIGHT dumps. |
| + |
| + tid_dumping_heap_ = PlatformThread::CurrentId(); |
| + // At this point the Insert/RemoveAllocation hooks will ignore this thread. |
| + // Enclosing all the temporariy data structures in a scope, so that the heap |
| + // profiler does not see unabalanced malloc/free calls from these containers. |
| + { |
| + TraceEventMemoryOverhead overhead; |
| + hash_map<AllocationContext, size_t> bytes_by_context; |
| + { |
| + AutoLock lock(allocation_register_lock_); |
| + if (allocation_register_) { |
| + if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) { |
| + for (const auto& alloc_size : *allocation_register_) |
| + bytes_by_context[alloc_size.context] += alloc_size.size; |
| + } |
| + allocation_register_->EstimateTraceMemoryOverhead(&overhead); |
| + } |
| + } // lock(allocation_register_lock_) |
| + |
| + if (!bytes_by_context.empty()) { |
| + scoped_ptr<TracedValue> heap_dump = ExportHeapDump( |
| + bytes_by_context, pmd->session_state()->stack_frame_deduplicator(), |
| + pmd->session_state()->type_name_deduplicator()); |
| + pmd->AddHeapDump("malloc", std::move(heap_dump)); |
| + } |
| + overhead.DumpInto("tracing/heap_profiler_malloc", pmd); |
| + } |
| + tid_dumping_heap_ = kInvalidThreadId; |
| + |
| return true; |
| } |
| +void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) { |
| +#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) |
| + if (enabled) { |
| + { |
| + AutoLock lock(allocation_register_lock_); |
| + allocation_register_.reset(new AllocationRegister()); |
| + } |
| + allocator::InsertAllocatorDispatch(&g_allocator_hooks); |
| + } else { |
| + AutoLock lock(allocation_register_lock_); |
| + allocation_register_.reset(); |
| + // Insert/RemoveAllocation below will no-op if the register is torn down. |
| + // Once disabled, heap profiling will not re-enabled anymore for the |
| + // lifetime of the process. |
| + } |
| +#endif |
| + heap_profiler_enabled_ = enabled; |
| +} |
| + |
| +void MallocDumpProvider::InsertAllocation(void* address, size_t size) { |
| + // CurrentId() can be a slow operation (crbug.com/497226). This apparently |
| + // redundant condition short circuits the CurrentID() calls when unnecessary. |
| + if (tid_dumping_heap_ != kInvalidThreadId && |
| + tid_dumping_heap_ == PlatformThread::CurrentId()) |
| + return; |
| + |
| + // AllocationContextTracker will return nullptr when called re-reentrantly. |
| + // This is the case of GetInstanceForCurrentThread() being called for the |
| + // first time, which causes a new() inside the tracker which re-enters the |
| + // heap profiler, in which case we just want to early out. |
| + auto tracker = AllocationContextTracker::GetInstanceForCurrentThread(); |
| + if (!tracker) |
| + return; |
| + AllocationContext context = tracker->GetContextSnapshot(); |
|
Dmitry Skiba
2016/03/24 22:58:06
BTW, you might want to move this after if (!alloca
Primiano Tucci (use gerrit)
2016/03/25 02:05:03
Hmm I think what is missing here is a speculative
|
| + |
| + AutoLock lock(allocation_register_lock_); |
| + if (!allocation_register_) |
| + return; |
| + |
| + allocation_register_->Insert(address, size, context); |
| +} |
| + |
| +void MallocDumpProvider::RemoveAllocation(void* address) { |
| + // No re-entrancy is expected here as none of the calls below should |
| + // cause a free()-s (|allocation_register_| does its own heap management). |
| + if (tid_dumping_heap_ != kInvalidThreadId && |
| + tid_dumping_heap_ == PlatformThread::CurrentId()) |
| + return; |
| + AutoLock lock(allocation_register_lock_); |
| + if (!allocation_register_) |
| + return; |
| + allocation_register_->Remove(address); |
| +} |
| + |
| } // namespace trace_event |
| } // namespace base |