Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(202)

Side by Side Diff: base/trace_event/heap_profiler_heap_dump_writer.cc

Issue 1839503002: [tracing] Add native allocation tracing mode. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Add type to StackFrame; format thread name Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <iterator> 10 #include <iterator>
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
59 59
60 // Comparison operator to order buckets by their size. 60 // Comparison operator to order buckets by their size.
61 bool operator<(const Bucket& lhs, const Bucket& rhs) { 61 bool operator<(const Bucket& lhs, const Bucket& rhs) {
62 return lhs.size < rhs.size; 62 return lhs.size < rhs.size;
63 } 63 }
64 64
65 // Groups the allocations in the bucket by |breakBy|. The buckets in the 65 // Groups the allocations in the bucket by |breakBy|. The buckets in the
66 // returned list will have |backtrace_cursor| advanced or 66 // returned list will have |backtrace_cursor| advanced or
67 // |is_broken_down_by_type_name| set depending on the property to group by. 67 // |is_broken_down_by_type_name| set depending on the property to group by.
68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { 68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
69 base::hash_map<const char*, Bucket> breakdown; 69 base::hash_map<const void*, Bucket> breakdown;
70 70
71 if (breakBy == BreakDownMode::kByBacktrace) { 71 if (breakBy == BreakDownMode::kByBacktrace) {
72 for (const auto& context_and_size : bucket.bytes_by_context) { 72 for (const auto& context_and_size : bucket.bytes_by_context) {
73 const Backtrace& backtrace = context_and_size.first->backtrace; 73 const Backtrace& backtrace = context_and_size.first->backtrace;
74 const char* const* begin = std::begin(backtrace.frames); 74 const StackFrame* begin = std::begin(backtrace.frames);
75 const char* const* end = std::end(backtrace.frames); 75 const StackFrame* end = std::end(backtrace.frames);
76 const char* const* cursor = begin + bucket.backtrace_cursor; 76 const StackFrame* cursor = begin + bucket.backtrace_cursor;
77 77
78 // The backtrace in the context is padded with null pointers, but these 78 // The backtrace in the context is padded with null pointers, but these
79 // should not be considered for breakdown. Adjust end to point past the 79 // should not be considered for breakdown. Adjust end to point past the
80 // last non-null frame. 80 // last non-null frame.
81 while (begin != end && *(end - 1) == nullptr) 81 while (begin != end && (end - 1)->empty())
82 end--; 82 end--;
83 83
84 DCHECK_LE(cursor, end); 84 DCHECK_LE(cursor, end);
85 85
86 if (cursor != end) { 86 if (cursor != end) {
87 Bucket& subbucket = breakdown[*cursor]; 87 Bucket& subbucket = breakdown[cursor->value];
88 subbucket.size += context_and_size.second; 88 subbucket.size += context_and_size.second;
89 subbucket.bytes_by_context.push_back(context_and_size); 89 subbucket.bytes_by_context.push_back(context_and_size);
90 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; 90 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
91 subbucket.is_broken_down_by_type_name = 91 subbucket.is_broken_down_by_type_name =
92 bucket.is_broken_down_by_type_name; 92 bucket.is_broken_down_by_type_name;
93 DCHECK_GT(subbucket.size, 0u); 93 DCHECK_GT(subbucket.size, 0u);
94 } 94 }
95 } 95 }
96 } else if (breakBy == BreakDownMode::kByTypeName) { 96 } else if (breakBy == BreakDownMode::kByTypeName) {
97 if (!bucket.is_broken_down_by_type_name) { 97 if (!bucket.is_broken_down_by_type_name) {
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
176 HeapDumpWriter::~HeapDumpWriter() {} 176 HeapDumpWriter::~HeapDumpWriter() {}
177 177
178 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { 178 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
179 // The contexts in the bucket are all different, but the [begin, cursor) range 179 // The contexts in the bucket are all different, but the [begin, cursor) range
180 // is equal for all contexts in the bucket, and the type names are the same if 180 // is equal for all contexts in the bucket, and the type names are the same if
181 // |is_broken_down_by_type_name| is set. 181 // |is_broken_down_by_type_name| is set.
182 DCHECK(!bucket.bytes_by_context.empty()); 182 DCHECK(!bucket.bytes_by_context.empty());
183 183
184 const AllocationContext* context = bucket.bytes_by_context.front().first; 184 const AllocationContext* context = bucket.bytes_by_context.front().first;
185 185
186 const char* const* backtrace_begin = std::begin(context->backtrace.frames); 186 const StackFrame* backtrace_begin = std::begin(context->backtrace.frames);
187 const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor; 187 const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
188 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames)); 188 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
189 189
190 Entry entry; 190 Entry entry;
191 entry.stack_frame_id = 191 entry.stack_frame_id = stack_frame_deduplicator_->Insert(
192 stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end); 192 backtrace_begin, backtrace_end);
193 193
194 // Deduplicate the type name, or use ID -1 if type name is not set. 194 // Deduplicate the type name, or use ID -1 if type name is not set.
195 entry.type_id = bucket.is_broken_down_by_type_name 195 entry.type_id = bucket.is_broken_down_by_type_name
196 ? type_name_deduplicator_->Insert(context->type_name) 196 ? type_name_deduplicator_->Insert(context->type_name)
197 : -1; 197 : -1;
198 198
199 entry.size = bucket.size; 199 entry.size = bucket.size;
200 200
201 auto position_and_inserted = entries_.insert(entry); 201 auto position_and_inserted = entries_.insert(entry);
202 return position_and_inserted.second; 202 return position_and_inserted.second;
(...skipping 21 matching lines...) Expand all
224 const std::set<Entry>& HeapDumpWriter::Summarize( 224 const std::set<Entry>& HeapDumpWriter::Summarize(
225 const hash_map<AllocationContext, size_t>& bytes_by_context) { 225 const hash_map<AllocationContext, size_t>& bytes_by_context) {
226 // Start with one bucket that represents the entire heap. Iterate by 226 // Start with one bucket that represents the entire heap. Iterate by
227 // reference, because the allocation contexts are going to point to allocation 227 // reference, because the allocation contexts are going to point to allocation
228 // contexts stored in |bytes_by_context|. 228 // contexts stored in |bytes_by_context|.
229 Bucket root_bucket; 229 Bucket root_bucket;
230 for (const auto& context_and_size : bytes_by_context) { 230 for (const auto& context_and_size : bytes_by_context) {
231 DCHECK_GT(context_and_size.second, 0u); 231 DCHECK_GT(context_and_size.second, 0u);
232 const AllocationContext* context = &context_and_size.first; 232 const AllocationContext* context = &context_and_size.first;
233 const size_t size = context_and_size.second; 233 const size_t size = context_and_size.second;
234 root_bucket.bytes_by_context.push_back(std::make_pair(context, size)); 234 if (size != 0) {
Primiano Tucci (use gerrit) 2016/04/07 15:51:57 I think this has been fixed by kraynov@ in https:/
Dmitry Skiba 2016/04/12 18:22:11 Done.
235 root_bucket.size += size; 235 // GetSubbuckets() expects non-zero allocations
236 root_bucket.bytes_by_context.push_back(std::make_pair(context, size));
237 root_bucket.size += size;
238 }
236 } 239 }
237 240
238 AddEntryForBucket(root_bucket); 241 AddEntryForBucket(root_bucket);
239 242
240 // Recursively break down the heap and fill |entries_| with entries to dump. 243 // Recursively break down the heap and fill |entries_| with entries to dump.
241 BreakDown(root_bucket); 244 BreakDown(root_bucket);
242 245
243 return entries_; 246 return entries_;
244 } 247 }
245 248
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
288 const hash_map<AllocationContext, size_t>& bytes_by_size, 291 const hash_map<AllocationContext, size_t>& bytes_by_size,
289 StackFrameDeduplicator* stack_frame_deduplicator, 292 StackFrameDeduplicator* stack_frame_deduplicator,
290 TypeNameDeduplicator* type_name_deduplicator) { 293 TypeNameDeduplicator* type_name_deduplicator) {
291 internal::HeapDumpWriter writer(stack_frame_deduplicator, 294 internal::HeapDumpWriter writer(stack_frame_deduplicator,
292 type_name_deduplicator); 295 type_name_deduplicator);
293 return Serialize(writer.Summarize(bytes_by_size)); 296 return Serialize(writer.Summarize(bytes_by_size));
294 } 297 }
295 298
296 } // namespace trace_event 299 } // namespace trace_event
297 } // namespace base 300 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698