Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(560)

Side by Side Diff: base/trace_event/heap_profiler_heap_dump_writer.cc

Issue 1839503002: [tracing] Add native allocation tracing mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: WalkStackFrames (wants frame pointers) Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <iterator> 10 #include <iterator>
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 59
60 // Comparison operator to order buckets by their size. 60 // Comparison operator to order buckets by their size.
61 bool operator<(const Bucket& lhs, const Bucket& rhs) { 61 bool operator<(const Bucket& lhs, const Bucket& rhs) {
62 return lhs.size < rhs.size; 62 return lhs.size < rhs.size;
63 } 63 }
64 64
65 // Groups the allocations in the bucket by |breakBy|. The buckets in the 65 // Groups the allocations in the bucket by |breakBy|. The buckets in the
66 // returned list will have |backtrace_cursor| advanced or 66 // returned list will have |backtrace_cursor| advanced or
67 // |is_broken_down_by_type_name| set depending on the property to group by. 67 // |is_broken_down_by_type_name| set depending on the property to group by.
68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { 68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
69 base::hash_map<const char*, Bucket> breakdown; 69 base::hash_map<StackFrame, Bucket> breakdown;
70 70
71 if (breakBy == BreakDownMode::kByBacktrace) { 71 if (breakBy == BreakDownMode::kByBacktrace) {
72 for (const auto& context_and_size : bucket.bytes_by_context) { 72 for (const auto& context_and_size : bucket.bytes_by_context) {
73 const Backtrace& backtrace = context_and_size.first->backtrace; 73 const Backtrace& backtrace = context_and_size.first->backtrace;
74 const char* const* begin = std::begin(backtrace.frames); 74 const StackFrame* begin = std::begin(backtrace.frames);
Primiano Tucci (use gerrit) 2016/04/01 15:56:28 if StackFrame is "const char*", I think const char
Dmitry Skiba 2016/04/04 22:38:34 Actually, 'const StackFrame*' and 'StackFrame cons
75 const char* const* end = std::end(backtrace.frames); 75 const StackFrame* end = std::end(backtrace.frames);
76 const char* const* cursor = begin + bucket.backtrace_cursor; 76 const StackFrame* cursor = begin + bucket.backtrace_cursor;
77 77
78 // The backtrace in the context is padded with null pointers, but these 78 // The backtrace in the context is padded with null pointers, but these
79 // should not be considered for breakdown. Adjust end to point past the 79 // should not be considered for breakdown. Adjust end to point past the
80 // last non-null frame. 80 // last non-null frame.
81 while (begin != end && *(end - 1) == nullptr) 81 while (begin != end && *(end - 1) == nullptr)
82 end--; 82 end--;
83 83
84 DCHECK_LE(cursor, end); 84 DCHECK_LE(cursor, end);
85 85
86 if (cursor != end) { 86 if (cursor != end) {
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
182 HeapDumpWriter::~HeapDumpWriter() {} 182 HeapDumpWriter::~HeapDumpWriter() {}
183 183
184 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { 184 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
185 // The contexts in the bucket are all different, but the [begin, cursor) range 185 // The contexts in the bucket are all different, but the [begin, cursor) range
186 // is equal for all contexts in the bucket, and the type names are the same if 186 // is equal for all contexts in the bucket, and the type names are the same if
187 // |is_broken_down_by_type_name| is set. 187 // |is_broken_down_by_type_name| is set.
188 DCHECK(!bucket.bytes_by_context.empty()); 188 DCHECK(!bucket.bytes_by_context.empty());
189 189
190 const AllocationContext* context = bucket.bytes_by_context.front().first; 190 const AllocationContext* context = bucket.bytes_by_context.front().first;
191 191
192 const char* const* backtrace_begin = std::begin(context->backtrace.frames); 192 const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
193 const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor; 193 const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
194 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames)); 194 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
195 195
196 Entry entry; 196 Entry entry;
197 entry.stack_frame_id = 197 entry.stack_frame_id = stack_frame_deduplicator_->Insert(
198 stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end); 198 context->backtrace.frame_type, backtrace_begin, backtrace_end);
199 199
200 // Deduplicate the type name, or use ID -1 if type name is not set. 200 // Deduplicate the type name, or use ID -1 if type name is not set.
201 entry.type_id = bucket.is_broken_down_by_type_name 201 entry.type_id = bucket.is_broken_down_by_type_name
202 ? type_name_deduplicator_->Insert(context->type_name) 202 ? type_name_deduplicator_->Insert(context->type_name)
203 : -1; 203 : -1;
204 204
205 entry.size = bucket.size; 205 entry.size = bucket.size;
206 206
207 auto position_and_inserted = entries_.insert(entry); 207 auto position_and_inserted = entries_.insert(entry);
208 return position_and_inserted.second; 208 return position_and_inserted.second;
(...skipping 20 matching lines...) Expand all
229 229
230 const std::set<Entry>& HeapDumpWriter::Summarize( 230 const std::set<Entry>& HeapDumpWriter::Summarize(
231 const hash_map<AllocationContext, size_t>& bytes_by_context) { 231 const hash_map<AllocationContext, size_t>& bytes_by_context) {
232 // Start with one bucket that represents the entire heap. Iterate by 232 // Start with one bucket that represents the entire heap. Iterate by
233 // reference, because the allocation contexts are going to point to allocation 233 // reference, because the allocation contexts are going to point to allocation
234 // contexts stored in |bytes_by_context|. 234 // contexts stored in |bytes_by_context|.
235 Bucket root_bucket; 235 Bucket root_bucket;
236 for (const auto& context_and_size : bytes_by_context) { 236 for (const auto& context_and_size : bytes_by_context) {
237 const AllocationContext* context = &context_and_size.first; 237 const AllocationContext* context = &context_and_size.first;
238 const size_t size = context_and_size.second; 238 const size_t size = context_and_size.second;
239 root_bucket.bytes_by_context.push_back(std::make_pair(context, size)); 239 if (size != 0) {
Primiano Tucci (use gerrit) 2016/04/01 15:56:28 uh do we see any zero allocations?
Dmitry Skiba 2016/04/04 22:38:34 Yup. Maybe we need to ignore them earlier, in Mall
240 root_bucket.size += size; 240 // GetSubbuckets() expects non-zero allocations
241 root_bucket.bytes_by_context.push_back(std::make_pair(context, size));
242 root_bucket.size += size;
243 }
241 } 244 }
242 245
243 AddEntryForBucket(root_bucket); 246 AddEntryForBucket(root_bucket);
244 247
245 // Recursively break down the heap and fill |entries_| with entries to dump. 248 // Recursively break down the heap and fill |entries_| with entries to dump.
246 BreakDown(root_bucket); 249 BreakDown(root_bucket);
247 250
248 return entries_; 251 return entries_;
249 } 252 }
250 253
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
293 const hash_map<AllocationContext, size_t>& bytes_by_size, 296 const hash_map<AllocationContext, size_t>& bytes_by_size,
294 StackFrameDeduplicator* stack_frame_deduplicator, 297 StackFrameDeduplicator* stack_frame_deduplicator,
295 TypeNameDeduplicator* type_name_deduplicator) { 298 TypeNameDeduplicator* type_name_deduplicator) {
296 internal::HeapDumpWriter writer(stack_frame_deduplicator, 299 internal::HeapDumpWriter writer(stack_frame_deduplicator,
297 type_name_deduplicator); 300 type_name_deduplicator);
298 return Serialize(writer.Summarize(bytes_by_size)); 301 return Serialize(writer.Summarize(bytes_by_size));
299 } 302 }
300 303
301 } // namespace trace_event 304 } // namespace trace_event
302 } // namespace base 305 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698