Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(607)

Side by Side Diff: base/trace_event/heap_profiler_heap_dump_writer.cc

Issue 1877313003: [tracing] Track number of allocations in heap profiler (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fix name and formats. Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <iterator> 10 #include <iterator>
(...skipping 23 matching lines...) Expand all
34 namespace base { 34 namespace base {
35 namespace trace_event { 35 namespace trace_event {
36 namespace internal { 36 namespace internal {
37 namespace { 37 namespace {
38 38
39 // Denotes a property of |AllocationContext| to break down by. 39 // Denotes a property of |AllocationContext| to break down by.
40 enum class BreakDownMode { kByBacktrace, kByTypeName }; 40 enum class BreakDownMode { kByBacktrace, kByTypeName };
41 41
42 // A group of bytes for which the context shares a prefix. 42 // A group of bytes for which the context shares a prefix.
43 struct Bucket { 43 struct Bucket {
44 Bucket() : size(0), backtrace_cursor(0), is_broken_down_by_type_name(false) {} 44 Bucket()
45 : size(0),
46 count(0),
47 backtrace_cursor(0),
48 is_broken_down_by_type_name(false) {}
45 49
46 std::vector<std::pair<const AllocationContext*, size_t>> bytes_by_context; 50 std::vector<std::pair<const AllocationContext*, AllocationMetrics>>
51 metrics_by_context;
47 52
48 // The sum of the sizes of |bytes_by_context|. 53 // The sum of the sizes of |metrics_by_context|.
49 size_t size; 54 size_t size;
50 55
56 // The sum of number of allocations of |metrics_by_context|.
57 size_t count;
58
51 // The index of the stack frame that has not yet been broken down by. For all 59 // The index of the stack frame that has not yet been broken down by. For all
52 // elements in this bucket, the stack frames 0 up to (but not including) the 60 // elements in this bucket, the stack frames 0 up to (but not including) the
53 // cursor, must be equal. 61 // cursor, must be equal.
54 size_t backtrace_cursor; 62 size_t backtrace_cursor;
55 63
56 // When true, the type name for all elements in this bucket must be equal. 64 // When true, the type name for all elements in this bucket must be equal.
57 bool is_broken_down_by_type_name; 65 bool is_broken_down_by_type_name;
58 }; 66 };
59 67
60 // Comparison operator to order buckets by their size. 68 // Comparison operator to order buckets by their size.
61 bool operator<(const Bucket& lhs, const Bucket& rhs) { 69 bool operator<(const Bucket& lhs, const Bucket& rhs) {
62 return lhs.size < rhs.size; 70 return lhs.size < rhs.size;
63 } 71 }
64 72
65 // Groups the allocations in the bucket by |breakBy|. The buckets in the 73 // Groups the allocations in the bucket by |breakBy|. The buckets in the
66 // returned list will have |backtrace_cursor| advanced or 74 // returned list will have |backtrace_cursor| advanced or
67 // |is_broken_down_by_type_name| set depending on the property to group by. 75 // |is_broken_down_by_type_name| set depending on the property to group by.
68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { 76 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
69 base::hash_map<const char*, Bucket> breakdown; 77 base::hash_map<const char*, Bucket> breakdown;
70 78
71 if (breakBy == BreakDownMode::kByBacktrace) { 79 if (breakBy == BreakDownMode::kByBacktrace) {
72 for (const auto& context_and_size : bucket.bytes_by_context) { 80 for (const auto& context_and_metrics : bucket.metrics_by_context) {
73 const Backtrace& backtrace = context_and_size.first->backtrace; 81 const Backtrace& backtrace = context_and_metrics.first->backtrace;
74 const char* const* begin = std::begin(backtrace.frames); 82 const char* const* begin = std::begin(backtrace.frames);
75 const char* const* end = std::end(backtrace.frames); 83 const char* const* end = std::end(backtrace.frames);
76 const char* const* cursor = begin + bucket.backtrace_cursor; 84 const char* const* cursor = begin + bucket.backtrace_cursor;
77 85
78 // The backtrace in the context is padded with null pointers, but these 86 // The backtrace in the context is padded with null pointers, but these
79 // should not be considered for breakdown. Adjust end to point past the 87 // should not be considered for breakdown. Adjust end to point past the
80 // last non-null frame. 88 // last non-null frame.
81 while (begin != end && *(end - 1) == nullptr) 89 while (begin != end && *(end - 1) == nullptr)
82 end--; 90 end--;
83 91
84 DCHECK_LE(cursor, end); 92 DCHECK_LE(cursor, end);
85 93
86 if (cursor != end) { 94 if (cursor != end) {
87 Bucket& subbucket = breakdown[*cursor]; 95 Bucket& subbucket = breakdown[*cursor];
88 subbucket.size += context_and_size.second; 96 subbucket.size += context_and_metrics.second.size;
89 subbucket.bytes_by_context.push_back(context_and_size); 97 subbucket.count += context_and_metrics.second.count;
98 subbucket.metrics_by_context.push_back(context_and_metrics);
90 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; 99 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
91 subbucket.is_broken_down_by_type_name = 100 subbucket.is_broken_down_by_type_name =
92 bucket.is_broken_down_by_type_name; 101 bucket.is_broken_down_by_type_name;
93 DCHECK_GT(subbucket.size, 0u); 102 DCHECK_GT(subbucket.size, 0u);
103 DCHECK_GT(subbucket.count, 0u);
94 } 104 }
95 } 105 }
96 } else if (breakBy == BreakDownMode::kByTypeName) { 106 } else if (breakBy == BreakDownMode::kByTypeName) {
97 if (!bucket.is_broken_down_by_type_name) { 107 if (!bucket.is_broken_down_by_type_name) {
98 for (const auto& context_and_size : bucket.bytes_by_context) { 108 for (const auto& context_and_metrics : bucket.metrics_by_context) {
99 const AllocationContext* context = context_and_size.first; 109 const AllocationContext* context = context_and_metrics.first;
100 Bucket& subbucket = breakdown[context->type_name]; 110 Bucket& subbucket = breakdown[context->type_name];
101 subbucket.size += context_and_size.second; 111 subbucket.size += context_and_metrics.second.size;
102 subbucket.bytes_by_context.push_back(context_and_size); 112 subbucket.count += context_and_metrics.second.count;
113 subbucket.metrics_by_context.push_back(context_and_metrics);
103 subbucket.backtrace_cursor = bucket.backtrace_cursor; 114 subbucket.backtrace_cursor = bucket.backtrace_cursor;
104 subbucket.is_broken_down_by_type_name = true; 115 subbucket.is_broken_down_by_type_name = true;
105 DCHECK_GT(subbucket.size, 0u); 116 DCHECK_GT(subbucket.size, 0u);
117 DCHECK_GT(subbucket.count, 0u);
106 } 118 }
107 } 119 }
108 } 120 }
109 121
110 std::vector<Bucket> buckets; 122 std::vector<Bucket> buckets;
111 buckets.reserve(breakdown.size()); 123 buckets.reserve(breakdown.size());
112 for (auto key_bucket : breakdown) 124 for (auto key_bucket : breakdown)
113 buckets.push_back(key_bucket.second); 125 buckets.push_back(key_bucket.second);
114 126
115 return buckets; 127 return buckets;
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after
178 TypeNameDeduplicator* type_name_deduplicator) 190 TypeNameDeduplicator* type_name_deduplicator)
179 : stack_frame_deduplicator_(stack_frame_deduplicator), 191 : stack_frame_deduplicator_(stack_frame_deduplicator),
180 type_name_deduplicator_(type_name_deduplicator) {} 192 type_name_deduplicator_(type_name_deduplicator) {}
181 193
182 HeapDumpWriter::~HeapDumpWriter() {} 194 HeapDumpWriter::~HeapDumpWriter() {}
183 195
184 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { 196 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
185 // The contexts in the bucket are all different, but the [begin, cursor) range 197 // The contexts in the bucket are all different, but the [begin, cursor) range
186 // is equal for all contexts in the bucket, and the type names are the same if 198 // is equal for all contexts in the bucket, and the type names are the same if
187 // |is_broken_down_by_type_name| is set. 199 // |is_broken_down_by_type_name| is set.
188 DCHECK(!bucket.bytes_by_context.empty()); 200 DCHECK(!bucket.metrics_by_context.empty());
189 201
190 const AllocationContext* context = bucket.bytes_by_context.front().first; 202 const AllocationContext* context = bucket.metrics_by_context.front().first;
191 203
192 const char* const* backtrace_begin = std::begin(context->backtrace.frames); 204 const char* const* backtrace_begin = std::begin(context->backtrace.frames);
193 const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor; 205 const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
194 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames)); 206 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
195 207
196 Entry entry; 208 Entry entry;
197 entry.stack_frame_id = 209 entry.stack_frame_id =
198 stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end); 210 stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end);
199 211
200 // Deduplicate the type name, or use ID -1 if type name is not set. 212 // Deduplicate the type name, or use ID -1 if type name is not set.
201 entry.type_id = bucket.is_broken_down_by_type_name 213 entry.type_id = bucket.is_broken_down_by_type_name
202 ? type_name_deduplicator_->Insert(context->type_name) 214 ? type_name_deduplicator_->Insert(context->type_name)
203 : -1; 215 : -1;
204 216
205 entry.size = bucket.size; 217 entry.size = bucket.size;
218 entry.count = bucket.count;
206 219
207 auto position_and_inserted = entries_.insert(entry); 220 auto position_and_inserted = entries_.insert(entry);
208 return position_and_inserted.second; 221 return position_and_inserted.second;
209 } 222 }
210 223
211 void HeapDumpWriter::BreakDown(const Bucket& bucket) { 224 void HeapDumpWriter::BreakDown(const Bucket& bucket) {
212 auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace); 225 auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace);
213 auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName); 226 auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName);
214 227
215 // Insert entries for the buckets. If a bucket was not present before, it has 228 // Insert entries for the buckets. If a bucket was not present before, it has
216 // not been broken down before, so recursively continue breaking down in that 229 // not been broken down before, so recursively continue breaking down in that
217 // case. There might be multiple routes to the same entry (first break down 230 // case. There might be multiple routes to the same entry (first break down
218 // by type name, then by backtrace, or first by backtrace and then by type), 231 // by type name, then by backtrace, or first by backtrace and then by type),
219 // so a set is used to avoid dumping and breaking down entries more than once. 232 // so a set is used to avoid dumping and breaking down entries more than once.
220 233
221 for (const Bucket& subbucket : by_backtrace) 234 for (const Bucket& subbucket : by_backtrace)
222 if (AddEntryForBucket(subbucket)) 235 if (AddEntryForBucket(subbucket))
223 BreakDown(subbucket); 236 BreakDown(subbucket);
224 237
225 for (const Bucket& subbucket : by_type_name) 238 for (const Bucket& subbucket : by_type_name)
226 if (AddEntryForBucket(subbucket)) 239 if (AddEntryForBucket(subbucket))
227 BreakDown(subbucket); 240 BreakDown(subbucket);
228 } 241 }
229 242
230 const std::set<Entry>& HeapDumpWriter::Summarize( 243 const std::set<Entry>& HeapDumpWriter::Summarize(
231 const hash_map<AllocationContext, size_t>& bytes_by_context) { 244 const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context) {
232 // Start with one bucket that represents the entire heap. Iterate by 245 // Start with one bucket that represents the entire heap. Iterate by
233 // reference, because the allocation contexts are going to point to allocation 246 // reference, because the allocation contexts are going to point to allocation
234 // contexts stored in |bytes_by_context|. 247 // contexts stored in |metrics_by_context|.
235 Bucket root_bucket; 248 Bucket root_bucket;
236 for (const auto& context_and_size : bytes_by_context) { 249 for (const auto& context_and_metrics : metrics_by_context) {
237 const AllocationContext* context = &context_and_size.first; 250 const AllocationContext* context = &context_and_metrics.first;
238 const size_t size = context_and_size.second; 251 root_bucket.metrics_by_context.push_back(
239 root_bucket.bytes_by_context.push_back(std::make_pair(context, size)); 252 std::make_pair(context, context_and_metrics.second));
240 root_bucket.size += size; 253 root_bucket.size += context_and_metrics.second.size;
254 root_bucket.count += context_and_metrics.second.count;
241 } 255 }
242 256
243 AddEntryForBucket(root_bucket); 257 AddEntryForBucket(root_bucket);
244 258
245 // Recursively break down the heap and fill |entries_| with entries to dump. 259 // Recursively break down the heap and fill |entries_| with entries to dump.
246 BreakDown(root_bucket); 260 BreakDown(root_bucket);
247 261
248 return entries_; 262 return entries_;
249 } 263 }
250 264
251 std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) { 265 std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
252 std::string buffer; 266 std::string buffer;
253 std::unique_ptr<TracedValue> traced_value(new TracedValue); 267 std::unique_ptr<TracedValue> traced_value(new TracedValue);
254 268
255 traced_value->BeginArray("entries"); 269 traced_value->BeginArray("entries");
256 270
257 for (const Entry& entry : entries) { 271 for (const Entry& entry : entries) {
258 traced_value->BeginDictionary(); 272 traced_value->BeginDictionary();
259 273
260 // Format size as hexadecimal string into |buffer|. 274 // Format size as hexadecimal string into |buffer|.
261 SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size)); 275 SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size));
262 traced_value->SetString("size", buffer); 276 traced_value->SetString("size", buffer);
263 277
278 SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.count));
279 traced_value->SetString("count", buffer);
280
264 if (entry.stack_frame_id == -1) { 281 if (entry.stack_frame_id == -1) {
265 // An empty backtrace (which will have ID -1) is represented by the empty 282 // An empty backtrace (which will have ID -1) is represented by the empty
266 // string, because there is no leaf frame to reference in |stackFrames|. 283 // string, because there is no leaf frame to reference in |stackFrames|.
267 traced_value->SetString("bt", ""); 284 traced_value->SetString("bt", "");
268 } else { 285 } else {
269 // Format index of the leaf frame as a string, because |stackFrames| is a 286 // Format index of the leaf frame as a string, because |stackFrames| is a
270 // dictionary, not an array. 287 // dictionary, not an array.
271 SStringPrintf(&buffer, "%i", entry.stack_frame_id); 288 SStringPrintf(&buffer, "%i", entry.stack_frame_id);
272 traced_value->SetString("bt", buffer); 289 traced_value->SetString("bt", buffer);
273 } 290 }
274 291
275 // Type ID -1 (cumulative size for all types) is represented by the absence 292 // Type ID -1 (cumulative size for all types) is represented by the absence
276 // of the "type" key in the dictionary. 293 // of the "type" key in the dictionary.
277 if (entry.type_id != -1) { 294 if (entry.type_id != -1) {
278 // Format the type ID as a string. 295 // Format the type ID as a string.
279 SStringPrintf(&buffer, "%i", entry.type_id); 296 SStringPrintf(&buffer, "%i", entry.type_id);
280 traced_value->SetString("type", buffer); 297 traced_value->SetString("type", buffer);
281 } 298 }
282 299
283 traced_value->EndDictionary(); 300 traced_value->EndDictionary();
284 } 301 }
285 302
286 traced_value->EndArray(); // "entries" 303 traced_value->EndArray(); // "entries"
287 return traced_value; 304 return traced_value;
288 } 305 }
289 306
290 } // namespace internal 307 } // namespace internal
291 308
292 std::unique_ptr<TracedValue> ExportHeapDump( 309 std::unique_ptr<TracedValue> ExportHeapDump(
293 const hash_map<AllocationContext, size_t>& bytes_by_size, 310 const hash_map<AllocationContext, AllocationMetrics>& bytes_by_size,
Maria 2016/04/14 20:11:34 metrics_by_size?
ssid 2016/04/14 20:17:11 Done.
294 StackFrameDeduplicator* stack_frame_deduplicator, 311 StackFrameDeduplicator* stack_frame_deduplicator,
295 TypeNameDeduplicator* type_name_deduplicator) { 312 TypeNameDeduplicator* type_name_deduplicator) {
296 internal::HeapDumpWriter writer(stack_frame_deduplicator, 313 internal::HeapDumpWriter writer(stack_frame_deduplicator,
297 type_name_deduplicator); 314 type_name_deduplicator);
298 return Serialize(writer.Summarize(bytes_by_size)); 315 return Serialize(writer.Summarize(bytes_by_size));
299 } 316 }
300 317
301 } // namespace trace_event 318 } // namespace trace_event
302 } // namespace base 319 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698