Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(326)

Side by Side Diff: base/trace_event/heap_profiler_heap_dump_writer.cc

Issue 1877313003: [tracing] Track number of allocations in heap profiler (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: fix indentation. Created 4 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h"
6 6
7 #include <stdint.h> 7 #include <stdint.h>
8 8
9 #include <algorithm> 9 #include <algorithm>
10 #include <iterator> 10 #include <iterator>
(...skipping 23 matching lines...) Expand all
34 namespace base { 34 namespace base {
35 namespace trace_event { 35 namespace trace_event {
36 namespace internal { 36 namespace internal {
37 namespace { 37 namespace {
38 38
39 // Denotes a property of |AllocationContext| to break down by. 39 // Denotes a property of |AllocationContext| to break down by.
40 enum class BreakDownMode { kByBacktrace, kByTypeName }; 40 enum class BreakDownMode { kByBacktrace, kByTypeName };
41 41
42 // A group of bytes for which the context shares a prefix. 42 // A group of bytes for which the context shares a prefix.
43 struct Bucket { 43 struct Bucket {
44 Bucket() : size(0), backtrace_cursor(0), is_broken_down_by_type_name(false) {} 44 Bucket()
45 : size(0),
46 count(0),
47 backtrace_cursor(0),
48 is_broken_down_by_type_name(false) {}
45 49
46 std::vector<std::pair<const AllocationContext*, size_t>> bytes_by_context; 50 std::vector<std::pair<const AllocationContext*, AllocationMetrics>>
51 metrics_by_context;
47 52
48 // The sum of the sizes of |bytes_by_context|. 53 // The sum of the sizes of |metrics_by_context|.
49 size_t size; 54 size_t size;
50 55
56 // The sum of number of allocations of |metrics_by_context|.
57 size_t count;
58
51 // The index of the stack frame that has not yet been broken down by. For all 59 // The index of the stack frame that has not yet been broken down by. For all
52 // elements in this bucket, the stack frames 0 up to (but not including) the 60 // elements in this bucket, the stack frames 0 up to (but not including) the
53 // cursor, must be equal. 61 // cursor, must be equal.
54 size_t backtrace_cursor; 62 size_t backtrace_cursor;
55 63
56 // When true, the type name for all elements in this bucket must be equal. 64 // When true, the type name for all elements in this bucket must be equal.
57 bool is_broken_down_by_type_name; 65 bool is_broken_down_by_type_name;
58 }; 66 };
59 67
60 // Comparison operator to order buckets by their size. 68 // Comparison operator to order buckets by their size.
61 bool operator<(const Bucket& lhs, const Bucket& rhs) { 69 bool operator<(const Bucket& lhs, const Bucket& rhs) {
62 return lhs.size < rhs.size; 70 return lhs.size < rhs.size;
63 } 71 }
64 72
65 // Groups the allocations in the bucket by |breakBy|. The buckets in the 73 // Groups the allocations in the bucket by |breakBy|. The buckets in the
66 // returned list will have |backtrace_cursor| advanced or 74 // returned list will have |backtrace_cursor| advanced or
67 // |is_broken_down_by_type_name| set depending on the property to group by. 75 // |is_broken_down_by_type_name| set depending on the property to group by.
68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { 76 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) {
69 base::hash_map<const char*, Bucket> breakdown; 77 base::hash_map<const char*, Bucket> breakdown;
70 78
71 if (breakBy == BreakDownMode::kByBacktrace) { 79 if (breakBy == BreakDownMode::kByBacktrace) {
72 for (const auto& context_and_size : bucket.bytes_by_context) { 80 for (const auto& context_and_metrics : bucket.metrics_by_context) {
73 const Backtrace& backtrace = context_and_size.first->backtrace; 81 const Backtrace& backtrace = context_and_metrics.first->backtrace;
74 const char* const* begin = std::begin(backtrace.frames); 82 const char* const* begin = std::begin(backtrace.frames);
75 const char* const* end = std::end(backtrace.frames); 83 const char* const* end = std::end(backtrace.frames);
76 const char* const* cursor = begin + bucket.backtrace_cursor; 84 const char* const* cursor = begin + bucket.backtrace_cursor;
77 85
78 // The backtrace in the context is padded with null pointers, but these 86 // The backtrace in the context is padded with null pointers, but these
79 // should not be considered for breakdown. Adjust end to point past the 87 // should not be considered for breakdown. Adjust end to point past the
80 // last non-null frame. 88 // last non-null frame.
81 while (begin != end && *(end - 1) == nullptr) 89 while (begin != end && *(end - 1) == nullptr)
82 end--; 90 end--;
83 91
84 DCHECK_LE(cursor, end); 92 DCHECK_LE(cursor, end);
85 93
86 if (cursor != end) { 94 if (cursor != end) {
87 Bucket& subbucket = breakdown[*cursor]; 95 Bucket& subbucket = breakdown[*cursor];
88 subbucket.size += context_and_size.second; 96 subbucket.size += context_and_metrics.second.size;
89 subbucket.bytes_by_context.push_back(context_and_size); 97 subbucket.count += context_and_metrics.second.count;
98 subbucket.metrics_by_context.push_back(context_and_metrics);
90 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; 99 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1;
91 subbucket.is_broken_down_by_type_name = 100 subbucket.is_broken_down_by_type_name =
92 bucket.is_broken_down_by_type_name; 101 bucket.is_broken_down_by_type_name;
93 DCHECK_GT(subbucket.size, 0u); 102 DCHECK_GT(subbucket.size, 0u);
103 DCHECK_GT(subbucket.count, 0u);
94 } 104 }
95 } 105 }
96 } else if (breakBy == BreakDownMode::kByTypeName) { 106 } else if (breakBy == BreakDownMode::kByTypeName) {
97 if (!bucket.is_broken_down_by_type_name) { 107 if (!bucket.is_broken_down_by_type_name) {
98 for (const auto& context_and_size : bucket.bytes_by_context) { 108 for (const auto& context_and_metrics : bucket.metrics_by_context) {
99 const AllocationContext* context = context_and_size.first; 109 const AllocationContext* context = context_and_metrics.first;
100 Bucket& subbucket = breakdown[context->type_name]; 110 Bucket& subbucket = breakdown[context->type_name];
101 subbucket.size += context_and_size.second; 111 subbucket.size += context_and_metrics.second.size;
102 subbucket.bytes_by_context.push_back(context_and_size); 112 subbucket.count += context_and_metrics.second.count;
113 subbucket.metrics_by_context.push_back(context_and_metrics);
103 subbucket.backtrace_cursor = bucket.backtrace_cursor; 114 subbucket.backtrace_cursor = bucket.backtrace_cursor;
104 subbucket.is_broken_down_by_type_name = true; 115 subbucket.is_broken_down_by_type_name = true;
105 DCHECK_GT(subbucket.size, 0u); 116 DCHECK_GT(subbucket.size, 0u);
117 DCHECK_GT(subbucket.count, 0u);
106 } 118 }
107 } 119 }
108 } 120 }
109 121
110 std::vector<Bucket> buckets; 122 std::vector<Bucket> buckets;
111 buckets.reserve(breakdown.size()); 123 buckets.reserve(breakdown.size());
112 for (auto key_bucket : breakdown) 124 for (auto key_bucket : breakdown)
113 buckets.push_back(key_bucket.second); 125 buckets.push_back(key_bucket.second);
114 126
115 return buckets; 127 return buckets;
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
172 TypeNameDeduplicator* type_name_deduplicator) 184 TypeNameDeduplicator* type_name_deduplicator)
173 : stack_frame_deduplicator_(stack_frame_deduplicator), 185 : stack_frame_deduplicator_(stack_frame_deduplicator),
174 type_name_deduplicator_(type_name_deduplicator) {} 186 type_name_deduplicator_(type_name_deduplicator) {}
175 187
176 HeapDumpWriter::~HeapDumpWriter() {} 188 HeapDumpWriter::~HeapDumpWriter() {}
177 189
178 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { 190 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) {
179 // The contexts in the bucket are all different, but the [begin, cursor) range 191 // The contexts in the bucket are all different, but the [begin, cursor) range
180 // is equal for all contexts in the bucket, and the type names are the same if 192 // is equal for all contexts in the bucket, and the type names are the same if
181 // |is_broken_down_by_type_name| is set. 193 // |is_broken_down_by_type_name| is set.
182 DCHECK(!bucket.bytes_by_context.empty()); 194 DCHECK(!bucket.metrics_by_context.empty());
183 195
184 const AllocationContext* context = bucket.bytes_by_context.front().first; 196 const AllocationContext* context = bucket.metrics_by_context.front().first;
185 197
186 const char* const* backtrace_begin = std::begin(context->backtrace.frames); 198 const char* const* backtrace_begin = std::begin(context->backtrace.frames);
187 const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor; 199 const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor;
188 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames)); 200 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames));
189 201
190 Entry entry; 202 Entry entry;
191 entry.stack_frame_id = 203 entry.stack_frame_id =
192 stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end); 204 stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end);
193 205
194 // Deduplicate the type name, or use ID -1 if type name is not set. 206 // Deduplicate the type name, or use ID -1 if type name is not set.
195 entry.type_id = bucket.is_broken_down_by_type_name 207 entry.type_id = bucket.is_broken_down_by_type_name
196 ? type_name_deduplicator_->Insert(context->type_name) 208 ? type_name_deduplicator_->Insert(context->type_name)
197 : -1; 209 : -1;
198 210
199 entry.size = bucket.size; 211 entry.size = bucket.size;
212 entry.count = bucket.count;
200 213
201 auto position_and_inserted = entries_.insert(entry); 214 auto position_and_inserted = entries_.insert(entry);
202 return position_and_inserted.second; 215 return position_and_inserted.second;
203 } 216 }
204 217
205 void HeapDumpWriter::BreakDown(const Bucket& bucket) { 218 void HeapDumpWriter::BreakDown(const Bucket& bucket) {
206 auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace); 219 auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace);
207 auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName); 220 auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName);
208 221
209 // Insert entries for the buckets. If a bucket was not present before, it has 222 // Insert entries for the buckets. If a bucket was not present before, it has
210 // not been broken down before, so recursively continue breaking down in that 223 // not been broken down before, so recursively continue breaking down in that
211 // case. There might be multiple routes to the same entry (first break down 224 // case. There might be multiple routes to the same entry (first break down
212 // by type name, then by backtrace, or first by backtrace and then by type), 225 // by type name, then by backtrace, or first by backtrace and then by type),
213 // so a set is used to avoid dumping and breaking down entries more than once. 226 // so a set is used to avoid dumping and breaking down entries more than once.
214 227
215 for (const Bucket& subbucket : by_backtrace) 228 for (const Bucket& subbucket : by_backtrace)
216 if (AddEntryForBucket(subbucket)) 229 if (AddEntryForBucket(subbucket))
217 BreakDown(subbucket); 230 BreakDown(subbucket);
218 231
219 for (const Bucket& subbucket : by_type_name) 232 for (const Bucket& subbucket : by_type_name)
220 if (AddEntryForBucket(subbucket)) 233 if (AddEntryForBucket(subbucket))
221 BreakDown(subbucket); 234 BreakDown(subbucket);
222 } 235 }
223 236
224 const std::set<Entry>& HeapDumpWriter::Summarize( 237 const std::set<Entry>& HeapDumpWriter::Summarize(
225 const hash_map<AllocationContext, size_t>& bytes_by_context) { 238 const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context) {
226 // Start with one bucket that represents the entire heap. Iterate by 239 // Start with one bucket that represents the entire heap. Iterate by
227 // reference, because the allocation contexts are going to point to allocation 240 // reference, because the allocation contexts are going to point to allocation
228 // contexts stored in |bytes_by_context|. 241 // contexts stored in |metrics_by_context|.
229 Bucket root_bucket; 242 Bucket root_bucket;
230 for (const auto& context_and_size : bytes_by_context) { 243 for (const auto& context_and_metrics : metrics_by_context) {
231 DCHECK_GT(context_and_size.second, 0u); 244 DCHECK_GT(context_and_metrics.second.size, 0u);
232 const AllocationContext* context = &context_and_size.first; 245 DCHECK_GT(context_and_metrics.second.count, 0u);
233 const size_t size = context_and_size.second; 246 const AllocationContext* context = &context_and_metrics.first;
234 root_bucket.bytes_by_context.push_back(std::make_pair(context, size)); 247 root_bucket.metrics_by_context.push_back(
235 root_bucket.size += size; 248 std::make_pair(context, context_and_metrics.second));
249 root_bucket.size += context_and_metrics.second.size;
250 root_bucket.count += context_and_metrics.second.count;
236 } 251 }
237 252
238 AddEntryForBucket(root_bucket); 253 AddEntryForBucket(root_bucket);
239 254
240 // Recursively break down the heap and fill |entries_| with entries to dump. 255 // Recursively break down the heap and fill |entries_| with entries to dump.
241 BreakDown(root_bucket); 256 BreakDown(root_bucket);
242 257
243 return entries_; 258 return entries_;
244 } 259 }
245 260
246 std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) { 261 std::unique_ptr<TracedValue> Serialize(const std::set<Entry>& entries) {
247 std::string buffer; 262 std::string buffer;
248 std::unique_ptr<TracedValue> traced_value(new TracedValue); 263 std::unique_ptr<TracedValue> traced_value(new TracedValue);
249 264
250 traced_value->BeginArray("entries"); 265 traced_value->BeginArray("entries");
251 266
252 for (const Entry& entry : entries) { 267 for (const Entry& entry : entries) {
253 traced_value->BeginDictionary(); 268 traced_value->BeginDictionary();
254 269
255 // Format size as hexadecimal string into |buffer|. 270 // Format size as hexadecimal string into |buffer|.
256 SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size)); 271 SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.size));
257 traced_value->SetString("size", buffer); 272 traced_value->SetString("size", buffer);
258 273
274 SStringPrintf(&buffer, "%" PRIx64, static_cast<uint64_t>(entry.count));
275 traced_value->SetString("count", buffer);
276
259 if (entry.stack_frame_id == -1) { 277 if (entry.stack_frame_id == -1) {
260 // An empty backtrace (which will have ID -1) is represented by the empty 278 // An empty backtrace (which will have ID -1) is represented by the empty
261 // string, because there is no leaf frame to reference in |stackFrames|. 279 // string, because there is no leaf frame to reference in |stackFrames|.
262 traced_value->SetString("bt", ""); 280 traced_value->SetString("bt", "");
263 } else { 281 } else {
264 // Format index of the leaf frame as a string, because |stackFrames| is a 282 // Format index of the leaf frame as a string, because |stackFrames| is a
265 // dictionary, not an array. 283 // dictionary, not an array.
266 SStringPrintf(&buffer, "%i", entry.stack_frame_id); 284 SStringPrintf(&buffer, "%i", entry.stack_frame_id);
267 traced_value->SetString("bt", buffer); 285 traced_value->SetString("bt", buffer);
268 } 286 }
269 287
270 // Type ID -1 (cumulative size for all types) is represented by the absence 288 // Type ID -1 (cumulative size for all types) is represented by the absence
271 // of the "type" key in the dictionary. 289 // of the "type" key in the dictionary.
272 if (entry.type_id != -1) { 290 if (entry.type_id != -1) {
273 // Format the type ID as a string. 291 // Format the type ID as a string.
274 SStringPrintf(&buffer, "%i", entry.type_id); 292 SStringPrintf(&buffer, "%i", entry.type_id);
275 traced_value->SetString("type", buffer); 293 traced_value->SetString("type", buffer);
276 } 294 }
277 295
278 traced_value->EndDictionary(); 296 traced_value->EndDictionary();
279 } 297 }
280 298
281 traced_value->EndArray(); // "entries" 299 traced_value->EndArray(); // "entries"
282 return traced_value; 300 return traced_value;
283 } 301 }
284 302
285 } // namespace internal 303 } // namespace internal
286 304
287 std::unique_ptr<TracedValue> ExportHeapDump( 305 std::unique_ptr<TracedValue> ExportHeapDump(
288 const hash_map<AllocationContext, size_t>& bytes_by_size, 306 const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context,
289 StackFrameDeduplicator* stack_frame_deduplicator, 307 StackFrameDeduplicator* stack_frame_deduplicator,
290 TypeNameDeduplicator* type_name_deduplicator) { 308 TypeNameDeduplicator* type_name_deduplicator) {
291 internal::HeapDumpWriter writer(stack_frame_deduplicator, 309 internal::HeapDumpWriter writer(stack_frame_deduplicator,
292 type_name_deduplicator); 310 type_name_deduplicator);
293 return Serialize(writer.Summarize(bytes_by_size)); 311 return Serialize(writer.Summarize(metrics_by_context));
294 } 312 }
295 313
296 } // namespace trace_event 314 } // namespace trace_event
297 } // namespace base 315 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_heap_dump_writer.h ('k') | base/trace_event/heap_profiler_heap_dump_writer_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698