| Index: base/trace_event/malloc_dump_provider.cc
|
| diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc
|
| index 3565b8b95be29c3b254f45e4b38a2af5b998768e..a13c874848f6cbc2aab0ccaf4528196884a8031b 100644
|
| --- a/base/trace_event/malloc_dump_provider.cc
|
| +++ b/base/trace_event/malloc_dump_provider.cc
|
| @@ -6,6 +6,8 @@
|
|
|
| #include <stddef.h>
|
|
|
| +#include <memory>
|
| +
|
| #include "base/allocator/allocator_extension.h"
|
| #include "base/allocator/allocator_shim.h"
|
| #include "base/allocator/features.h"
|
| @@ -126,6 +128,16 @@ void HookFreeDefiniteSize(const AllocatorDispatch* self,
|
| next->free_definite_size_function(next, ptr, size, context);
|
| }
|
|
|
| +void* HookTaggedAlloc(const AllocatorDispatch* self, size_t size, const char* tag) {
|
| + const AllocatorDispatch* const next = self->next;
|
| + void* ptr = next->tagged_alloc_function ?
|
| + next->tagged_alloc_function(next, size, tag) :
|
| + next->alloc_function(next, size, nullptr);
|
| + if (ptr)
|
| + MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size, tag);
|
| + return ptr;
|
| +}
|
| +
|
| AllocatorDispatch g_allocator_hooks = {
|
| &HookAlloc, /* alloc_function */
|
| &HookZeroInitAlloc, /* alloc_zero_initialized_function */
|
| @@ -136,6 +148,7 @@ AllocatorDispatch g_allocator_hooks = {
|
| &HookBatchMalloc, /* batch_malloc_function */
|
| &HookBatchFree, /* batch_free_function */
|
| &HookFreeDefiniteSize, /* free_definite_size_function */
|
| + &HookTaggedAlloc, /* tagged_alloc_function */
|
| nullptr, /* next */
|
| };
|
| #endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
|
| @@ -281,6 +294,61 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
|
| resident_size - allocated_objects_size);
|
| }
|
|
|
| +#if defined(_LIBCPP_COUNTING_ALLOCATOR)
|
| + {
|
| + std::string dump_name = "stl";
|
| + auto* dump = pmd->CreateAllocatorDump(dump_name.c_str());
|
| +
|
| + size_t total_size = 0;
|
| + for (size_t i = 0; i != std::allocation_group_count; ++i) {
|
| + auto group = static_cast<std::allocation_group>(i);
|
| + auto stats = std::allocation_counter::get(group);
|
| +
|
| + #define DUMP_ALLOCATION_COUNTERS(name) { \
|
| + std::string group_dump_name = \
|
| + dump_name + "/" #name "/" + \
|
| + std::get_allocation_group_name(group); \
|
| + auto* group_dump = pmd->CreateAllocatorDump(group_dump_name.c_str()); \
|
| + group_dump->AddScalar(MemoryAllocatorDump::kNameSize, \
|
| + MemoryAllocatorDump::kUnitsBytes, \
|
| + stats.name.size); \
|
| + group_dump->AddScalar("usable_size", \
|
| + MemoryAllocatorDump::kUnitsBytes, \
|
| + stats.name.usable_size); \
|
| + group_dump->AddScalar("constructed_size", \
|
| + MemoryAllocatorDump::kUnitsBytes, \
|
| + stats.name.constructed_size); \
|
| + group_dump->AddScalar("payload_size", \
|
| + MemoryAllocatorDump::kUnitsBytes, \
|
| + stats.name.payload_size); \
|
| + group_dump->AddScalar("count", \
|
| + MemoryAllocatorDump::kUnitsObjects, \
|
| + stats.name.count); \
|
| + group_dump->AddScalar("wasted_size", \
|
| + MemoryAllocatorDump::kUnitsBytes, \
|
| + stats.name.wasted_size()); \
|
| + group_dump->AddScalar("utilization", \
|
| + MemoryAllocatorDump::kUnitsObjects, \
|
| + stats.name.utilization_pct()); \
|
| + group_dump->AddScalar("overhead", \
|
| + MemoryAllocatorDump::kUnitsObjects, \
|
| + stats.name.overhead_pct()); \
|
| + }
|
| +
|
| + DUMP_ALLOCATION_COUNTERS(live)
|
| + DUMP_ALLOCATION_COUNTERS(max_size)
|
| +
|
| + #undef DUMP_ALLOCATION_COUNTERS
|
| +
|
| + total_size += stats.live.size;
|
| + }
|
| +
|
| + dump->AddScalar(MemoryAllocatorDump::kNameSize,
|
| + MemoryAllocatorDump::kUnitsBytes,
|
| + total_size);
|
| + }
|
| +#endif // _LIBCPP_COUNTING_ALLOCATOR
|
| +
|
| // Heap profiler dumps.
|
| if (!heap_profiler_enabled_)
|
| return true;
|
| @@ -336,7 +404,9 @@ void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
|
| heap_profiler_enabled_ = enabled;
|
| }
|
|
|
| -void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
|
| +void MallocDumpProvider::InsertAllocation(void* address, size_t size, const char* tag) {
|
| + if (!tag) return;
|
| +
|
| // CurrentId() can be a slow operation (crbug.com/497226). This apparently
|
| // redundant condition short circuits the CurrentID() calls when unnecessary.
|
| if (tid_dumping_heap_ != kInvalidThreadId &&
|
| @@ -354,6 +424,7 @@ void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
|
| AllocationContext context;
|
| if (!tracker->GetContextSnapshot(&context))
|
| return;
|
| + context.type_name = tag;
|
|
|
| AutoLock lock(allocation_register_lock_);
|
| if (!allocation_register_)
|
|
|