| Index: base/trace_event/heap_profiler_heap_dump_writer.cc
|
| diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc
|
| index 7ad401bb3f2af4ae08572dcaeb2714347a4318a4..bb64a74e721572c08a78ce61559a85ea8f5e39df 100644
|
| --- a/base/trace_event/heap_profiler_heap_dump_writer.cc
|
| +++ b/base/trace_event/heap_profiler_heap_dump_writer.cc
|
| @@ -66,25 +66,19 @@ bool operator<(const Bucket& lhs, const Bucket& rhs) {
|
| // returned list will have |backtrace_cursor| advanced or
|
| // |is_broken_down_by_type_name| set depending on the property to group by.
|
| std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
|
| - base::hash_map<const char*, Bucket> breakdown;
|
| + base::hash_map<const void*, Bucket> breakdown;
|
|
|
| if (breakBy == BreakDownMode::kByBacktrace) {
|
| for (const auto& context_and_size : bucket.bytes_by_context) {
|
| const Backtrace& backtrace = context_and_size.first->backtrace;
|
| - const char* const* begin = std::begin(backtrace.frames);
|
| - const char* const* end = std::end(backtrace.frames);
|
| - const char* const* cursor = begin + bucket.backtrace_cursor;
|
| -
|
| - // The backtrace in the context is padded with null pointers, but these
|
| - // should not be considered for breakdown. Adjust end to point past the
|
| - // last non-null frame.
|
| - while (begin != end && *(end - 1) == nullptr)
|
| - end--;
|
| + const StackFrame* begin = std::begin(backtrace.frames);
|
| + const StackFrame* end = begin + backtrace.frame_count;
|
| + const StackFrame* cursor = begin + bucket.backtrace_cursor;
|
|
|
| DCHECK_LE(cursor, end);
|
|
|
| if (cursor != end) {
|
| - Bucket& subbucket = breakdown[*cursor];
|
| + Bucket& subbucket = breakdown[cursor->value];
|
| subbucket.size += context_and_size.second;
|
| subbucket.bytes_by_context.push_back(context_and_size);
|
| subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
|
| @@ -183,13 +177,13 @@ bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
|
|
|
| const AllocationContext* context = bucket.bytes_by_context.front().first;
|
|
|
| - const char* const* backtrace_begin = std::begin(context->backtrace.frames);
|
| - const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
|
| + const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
|
| + const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
|
| DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
|
|
|
| Entry entry;
|
| - entry.stack_frame_id =
|
| - stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end);
|
| + entry.stack_frame_id = stack_frame_deduplicator_->Insert(
|
| + backtrace_begin, backtrace_end);
|
|
|
| // Deduplicate the type name, or use ID -1 if type name is not set.
|
| entry.type_id = bucket.is_broken_down_by_type_name
|
|
|