Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(191)

Side by Side Diff: base/trace_event/heap_profiler_heap_dump_writer.cc

Issue 1891543003: [tracing] Turn StackFrame into struct. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Add "the ones closer to main()" Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <iterator> 10 #include <iterator>
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
67 67
68 // Comparison operator to order buckets by their size. 68 // Comparison operator to order buckets by their size.
69 bool operator<(const Bucket& lhs, const Bucket& rhs) { 69 bool operator<(const Bucket& lhs, const Bucket& rhs) {
70 return lhs.size < rhs.size; 70 return lhs.size < rhs.size;
71 } 71 }
72 72
73 // Groups the allocations in the bucket by |breakBy|. The buckets in the 73 // Groups the allocations in the bucket by |breakBy|. The buckets in the
74 // returned list will have |backtrace_cursor| advanced or 74 // returned list will have |backtrace_cursor| advanced or
75 // |is_broken_down_by_type_name| set depending on the property to group by. 75 // |is_broken_down_by_type_name| set depending on the property to group by.
76 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { 76 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
77 base::hash_map<const char*, Bucket> breakdown; 77 base::hash_map<const void*, Bucket> breakdown;
78 78
79 if (breakBy == BreakDownMode::kByBacktrace) { 79 if (breakBy == BreakDownMode::kByBacktrace) {
80 for (const auto& context_and_metrics : bucket.metrics_by_context) { 80 for (const auto& context_and_metrics : bucket.metrics_by_context) {
81 const Backtrace& backtrace = context_and_metrics.first->backtrace; 81 const Backtrace& backtrace = context_and_metrics.first->backtrace;
82 const char* const* begin = std::begin(backtrace.frames); 82 const StackFrame* begin = std::begin(backtrace.frames);
83 const char* const* end = std::end(backtrace.frames); 83 const StackFrame* end = begin + backtrace.frame_count;
84 const char* const* cursor = begin + bucket.backtrace_cursor; 84 const StackFrame* cursor = begin + bucket.backtrace_cursor;
85
86 // The backtrace in the context is padded with null pointers, but these
87 // should not be considered for breakdown. Adjust end to point past the
88 // last non-null frame.
89 while (begin != end && *(end - 1) == nullptr)
90 end--;
91 85
92 DCHECK_LE(cursor, end); 86 DCHECK_LE(cursor, end);
93 87
94 if (cursor != end) { 88 if (cursor != end) {
95 Bucket& subbucket = breakdown[*cursor]; 89 Bucket& subbucket = breakdown[cursor->value];
96 subbucket.size += context_and_metrics.second.size; 90 subbucket.size += context_and_metrics.second.size;
97 subbucket.count += context_and_metrics.second.count; 91 subbucket.count += context_and_metrics.second.count;
98 subbucket.metrics_by_context.push_back(context_and_metrics); 92 subbucket.metrics_by_context.push_back(context_and_metrics);
99 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; 93 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
100 subbucket.is_broken_down_by_type_name = 94 subbucket.is_broken_down_by_type_name =
101 bucket.is_broken_down_by_type_name; 95 bucket.is_broken_down_by_type_name;
102 DCHECK_GT(subbucket.size, 0u); 96 DCHECK_GT(subbucket.size, 0u);
103 DCHECK_GT(subbucket.count, 0u); 97 DCHECK_GT(subbucket.count, 0u);
104 } 98 }
105 } 99 }
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
188 HeapDumpWriter::~HeapDumpWriter() {} 182 HeapDumpWriter::~HeapDumpWriter() {}
189 183
190 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { 184 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
191 // The contexts in the bucket are all different, but the [begin, cursor) range 185 // The contexts in the bucket are all different, but the [begin, cursor) range
192 // is equal for all contexts in the bucket, and the type names are the same if 186 // is equal for all contexts in the bucket, and the type names are the same if
193 // |is_broken_down_by_type_name| is set. 187 // |is_broken_down_by_type_name| is set.
194 DCHECK(!bucket.metrics_by_context.empty()); 188 DCHECK(!bucket.metrics_by_context.empty());
195 189
196 const AllocationContext* context = bucket.metrics_by_context.front().first; 190 const AllocationContext* context = bucket.metrics_by_context.front().first;
197 191
198 const char* const* backtrace_begin = std::begin(context->backtrace.frames); 192 const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
199 const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor; 193 const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
200 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames)); 194 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
201 195
202 Entry entry; 196 Entry entry;
203 entry.stack_frame_id = 197 entry.stack_frame_id = stack_frame_deduplicator_->Insert(
204 stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end); 198 backtrace_begin, backtrace_end);
205 199
206 // Deduplicate the type name, or use ID -1 if type name is not set. 200 // Deduplicate the type name, or use ID -1 if type name is not set.
207 entry.type_id = bucket.is_broken_down_by_type_name 201 entry.type_id = bucket.is_broken_down_by_type_name
208 ? type_name_deduplicator_->Insert(context->type_name) 202 ? type_name_deduplicator_->Insert(context->type_name)
209 : -1; 203 : -1;
210 204
211 entry.size = bucket.size; 205 entry.size = bucket.size;
212 entry.count = bucket.count; 206 entry.count = bucket.count;
213 207
214 auto position_and_inserted = entries_.insert(entry); 208 auto position_and_inserted = entries_.insert(entry);
(...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after
306 const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context, 300 const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
307 StackFrameDeduplicator* stack_frame_deduplicator, 301 StackFrameDeduplicator* stack_frame_deduplicator,
308 TypeNameDeduplicator* type_name_deduplicator) { 302 TypeNameDeduplicator* type_name_deduplicator) {
309 internal::HeapDumpWriter writer(stack_frame_deduplicator, 303 internal::HeapDumpWriter writer(stack_frame_deduplicator,
310 type_name_deduplicator); 304 type_name_deduplicator);
311 return Serialize(writer.Summarize(metrics_by_context)); 305 return Serialize(writer.Summarize(metrics_by_context));
312 } 306 }
313 307
314 } // namespace trace_event 308 } // namespace trace_event
315 } // namespace base 309 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698