Index: base/trace_event/malloc_dump_provider.cc |
diff --git a/base/trace_event/malloc_dump_provider.cc b/base/trace_event/malloc_dump_provider.cc |
index 047f2c336d5c6295086316b12f883811ab9f2b0c..e6fb555a3211c5531d66ba65c09aba46e09bd121 100644 |
--- a/base/trace_event/malloc_dump_provider.cc |
+++ b/base/trace_event/malloc_dump_provider.cc |
@@ -15,7 +15,7 @@ |
#include "base/trace_event/heap_profiler_allocation_context.h" |
#include "base/trace_event/heap_profiler_allocation_context_tracker.h" |
#include "base/trace_event/heap_profiler_allocation_register.h" |
-#include "base/trace_event/heap_profiler_heap_dump_writer.h" |
+#include "base/trace_event/heap_profiler_event_writer.h" |
#include "base/trace_event/process_memory_dump.h" |
#include "base/trace_event/trace_event_argument.h" |
#include "build/build_config.h" |
@@ -297,35 +297,25 @@ bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args, |
// Enclosing all the temporary data structures in a scope, so that the heap |
// profiler does not see unbalanced malloc/free calls from these containers. |
{ |
- size_t shim_allocated_objects_size = 0; |
- size_t shim_allocated_objects_count = 0; |
- TraceEventMemoryOverhead overhead; |
- std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context; |
- { |
- AutoLock lock(allocation_register_lock_); |
- if (allocation_register_) { |
- if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) { |
- for (const auto& alloc_size : *allocation_register_) { |
- AllocationMetrics& metrics = metrics_by_context[alloc_size.context]; |
- metrics.size += alloc_size.size; |
- metrics.count++; |
- |
- // Aggregate data for objects allocated through the shim. |
- shim_allocated_objects_size += alloc_size.size; |
- shim_allocated_objects_count++; |
- } |
+ AutoLock lock(allocation_register_lock_); |
+ if (allocation_register_) { |
+ if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) { |
+ size_t shim_allocated_objects_size = 0; |
+ size_t shim_allocated_objects_count = 0; |
+ for (const auto& alloc_size : *allocation_register_) { |
+ // Aggregate data for objects allocated through the shim. |
+ shim_allocated_objects_size += alloc_size.size; |
+ shim_allocated_objects_count++; |
} |
- allocation_register_->EstimateTraceMemoryOverhead(&overhead); |
+ inner_dump->AddScalar("shim_allocated_objects_size", |
+ MemoryAllocatorDump::kUnitsBytes, |
+ shim_allocated_objects_size); |
+ inner_dump->AddScalar("shim_allocator_object_count", |
+ MemoryAllocatorDump::kUnitsObjects, |
+ shim_allocated_objects_count); |
} |
- |
- inner_dump->AddScalar("shim_allocated_objects_size", |
- MemoryAllocatorDump::kUnitsBytes, |
- shim_allocated_objects_size); |
- inner_dump->AddScalar("shim_allocator_object_count", |
- MemoryAllocatorDump::kUnitsObjects, |
- shim_allocated_objects_count); |
- } // lock(allocation_register_lock_) |
- pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc"); |
+ pmd->DumpHeapUsage(*allocation_register_, "malloc"); |
+ } |
} |
tid_dumping_heap_ = kInvalidThreadId; |