| Index: base/trace_event/heap_profiler_heap_dump_writer.cc
|
| diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
|
| index 7ad401bb3f2af4ae08572dcaeb2714347a4318a4..9a3112eacf3ea508c4b6da1adc54d279280c0ae5 100644
|
| --- a/base/trace_event/heap_profiler_heap_dump_writer.cc
|
| +++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
|
| @@ -41,13 +41,21 @@ enum class BreakDownMode { kByBacktrace, kByTypeName };
|
|
|
| // A group of bytes for which the context shares a prefix.
|
| struct Bucket {
|
| - Bucket() : size(0), backtrace_cursor(0), is_broken_down_by_type_name(false) {}
|
| + Bucket()
|
| + : size(0),
|
| + count(0),
|
| + backtrace_cursor(0),
|
| + is_broken_down_by_type_name(false) {}
|
|
|
| - std::vector<std::pair<const AllocationContext*, size_t>> bytes_by_context;
|
| + std::vector<std::pair<const AllocationContext*, AllocationMetrics>>
|
| + metrics_by_context;
|
|
|
| - // The sum of the sizes of |bytes_by_context|.
|
| + // The sum of the sizes of |metrics_by_context|.
|
| size_t size;
|
|
|
| + // The sum of number of allocations of |metrics_by_context|.
|
| + size_t count;
|
| +
|
| // The index of the stack frame that has not yet been broken down by. For all
|
| // elements in this bucket, the stack frames 0 up to (but not including) the
|
| // cursor, must be equal.
|
| @@ -69,8 +77,8 @@ std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
|
| base::hash_map<const char*, Bucket> breakdown;
|
|
|
| if (breakBy == BreakDownMode::kByBacktrace) {
|
| - for (const auto& context_and_size : bucket.bytes_by_context) {
|
| - const Backtrace& backtrace = context_and_size.first->backtrace;
|
| + for (const auto& context_and_metrics : bucket.metrics_by_context) {
|
| + const Backtrace& backtrace = context_and_metrics.first->backtrace;
|
| const char* const* begin = std::begin(backtrace.frames);
|
| const char* const* end = std::end(backtrace.frames);
|
| const char* const* cursor = begin + bucket.backtrace_cursor;
|
| @@ -85,24 +93,28 @@ std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
|
|
|
| if (cursor != end) {
|
| Bucket& subbucket = breakdown[*cursor];
|
| - subbucket.size += context_and_size.second;
|
| - subbucket.bytes_by_context.push_back(context_and_size);
|
| + subbucket.size += context_and_metrics.second.size;
|
| + subbucket.count += context_and_metrics.second.count;
|
| + subbucket.metrics_by_context.push_back(context_and_metrics);
|
| subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
|
| subbucket.is_broken_down_by_type_name =
|
| bucket.is_broken_down_by_type_name;
|
| DCHECK_GT(subbucket.size, 0u);
|
| + DCHECK_GT(subbucket.count, 0u);
|
| }
|
| }
|
| } else if (breakBy == BreakDownMode::kByTypeName) {
|
| if (!bucket.is_broken_down_by_type_name) {
|
| - for (const auto& context_and_size : bucket.bytes_by_context) {
|
| - const AllocationContext* context = context_and_size.first;
|
| + for (const auto& context_and_metrics : bucket.metrics_by_context) {
|
| + const AllocationContext* context = context_and_metrics.first;
|
| Bucket& subbucket = breakdown[context->type_name];
|
| - subbucket.size += context_and_size.second;
|
| - subbucket.bytes_by_context.push_back(context_and_size);
|
| + subbucket.size += context_and_metrics.second.size;
|
| + subbucket.count += context_and_metrics.second.count;
|
| + subbucket.metrics_by_context.push_back(context_and_metrics);
|
| subbucket.backtrace_cursor = bucket.backtrace_cursor;
|
| subbucket.is_broken_down_by_type_name = true;
|
| DCHECK_GT(subbucket.size, 0u);
|
| + DCHECK_GT(subbucket.count, 0u);
|
| }
|
| }
|
| }
|
| @@ -179,9 +191,9 @@ bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
|
| // The contexts in the bucket are all different, but the [begin, cursor) range
|
| // is equal for all contexts in the bucket, and the type names are the same if
|
| // |is_broken_down_by_type_name| is set.
|
| - DCHECK(!bucket.bytes_by_context.empty());
|
| + DCHECK(!bucket.metrics_by_context.empty());
|
|
|
| - const AllocationContext* context = bucket.bytes_by_context.front().first;
|
| + const AllocationContext* context = bucket.metrics_by_context.front().first;
|
|
|
| const char* const* backtrace_begin = std::begin(context->backtrace.frames);
|
| const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
|
| @@ -197,6 +209,7 @@ bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
|
| : -1;
|
|
|
| entry.size = bucket.size;
|
| + entry.count = bucket.count;
|
|
|
| auto position_and_inserted = entries_.insert(entry);
|
| return position_and_inserted.second;
|
| @@ -222,17 +235,19 @@ void HeapDumpWriter::BreakDown(const Bucket& bucket) {
|
| }
|
|
|
| const std::set<Entry>& HeapDumpWriter::Summarize(
|
| - const hash_map<AllocationContext, size_t>& bytes_by_context) {
|
| + const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context) {
|
| // Start with one bucket that represents the entire heap. Iterate by
|
| // reference, because the allocation contexts are going to point to allocation
|
| - // contexts stored in |bytes_by_context|.
|
| + // contexts stored in |metrics_by_context|.
|
| Bucket root_bucket;
|
| - for (const auto& context_and_size : bytes_by_context) {
|
| - DCHECK_GT(context_and_size.second, 0u);
|
| - const AllocationContext* context = &context_and_size.first;
|
| - const size_t size = context_and_size.second;
|
| - root_bucket.bytes_by_context.push_back(std::make_pair(context, size));
|
| - root_bucket.size += size;
|
| + for (const auto& context_and_metrics : metrics_by_context) {
|
| + DCHECK_GT(context_and_metrics.second.size, 0u);
|
| + DCHECK_GT(context_and_metrics.second.count, 0u);
|
| + const AllocationContext* context = &context_and_metrics.first;
|
| + root_bucket.metrics_by_context.push_back(
|
| + std::make_pair(context, context_and_metrics.second));
|
| + root_bucket.size += context_and_metrics.second.size;
|
| + root_bucket.count += context_and_metrics.second.count;
|
| }
|
|
|
| AddEntryForBucket(root_bucket);
|
| @@ -256,6 +271,9 @@ std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
|
| SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size));
|
| traced_value->SetString("size", buffer);
|
|
|
| + SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.count));
|
| + traced_value->SetString("count", buffer);
|
| +
|
| if (entry.stack_frame_id == -1) {
|
| // An empty backtrace (which will have ID -1) is represented by the empty
|
| // string, because there is no leaf frame to reference in |stackFrames|.
|
| @@ -285,12 +303,12 @@ std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
|
| } // namespace internal
|
|
|
| std::unique_ptr<TracedValue> ExportHeapDump(
|
| - const hash_map<AllocationContext, size_t>& bytes_by_size,
|
| + const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
|
| StackFrameDeduplicator* stack_frame_deduplicator,
|
| TypeNameDeduplicator* type_name_deduplicator) {
|
| internal::HeapDumpWriter writer(stack_frame_deduplicator,
|
| type_name_deduplicator);
|
| - return Serialize(writer.Summarize(bytes_by_size));
|
| + return Serialize(writer.Summarize(metrics_by_context));
|
| }
|
|
|
| } // namespace trace_event
|
|
|