OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" | 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 #include <iterator> | 10 #include <iterator> |
11 #include <tuple> | 11 #include <tuple> |
12 #include <utility> | 12 #include <utility> |
13 #include <vector> | 13 #include <vector> |
14 | 14 |
15 #include "base/format_macros.h" | 15 #include "base/format_macros.h" |
16 #include "base/logging.h" | 16 #include "base/logging.h" |
17 #include "base/macros.h" | 17 #include "base/macros.h" |
18 #include "base/strings/stringprintf.h" | 18 #include "base/strings/stringprintf.h" |
19 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" | 19 #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" |
20 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" | 20 #include "base/trace_event/heap_profiler_type_name_deduplicator.h" |
| 21 #include "base/trace_event/memory_dump_session_state.h" |
| 22 #include "base/trace_event/trace_config.h" |
21 #include "base/trace_event/trace_event_argument.h" | 23 #include "base/trace_event/trace_event_argument.h" |
| 24 #include "base/trace_event/trace_log.h" |
22 | 25 |
23 // Most of what the |HeapDumpWriter| does is aggregating detailed information | 26 // Most of what the |HeapDumpWriter| does is aggregating detailed information |
24 // about the heap and deciding what to dump. The Input to this process is a list | 27 // about the heap and deciding what to dump. The Input to this process is a list |
25 // of |AllocationContext|s and size pairs. | 28 // of |AllocationContext|s and size pairs. |
26 // | 29 // |
27 // The pairs are grouped into |Bucket|s. A bucket is a group of (context, size) | 30 // The pairs are grouped into |Bucket|s. A bucket is a group of (context, size) |
28 // pairs where the properties of the contexts share a prefix. (Type name is | 31 // pairs where the properties of the contexts share a prefix. (Type name is |
29 // considered a list of length one here.) First all pairs are put into one | 32 // considered a list of length one here.) First all pairs are put into one |
30 // bucket that represents the entire heap. Then this bucket is recursively | 33 // bucket that represents the entire heap. Then this bucket is recursively |
31 // broken down into smaller buckets. Each bucket keeps track of whether further | 34 // broken down into smaller buckets. Each bucket keeps track of whether further |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
63 | 66 |
64 // When true, the type name for all elements in this bucket must be equal. | 67 // When true, the type name for all elements in this bucket must be equal. |
65 bool is_broken_down_by_type_name; | 68 bool is_broken_down_by_type_name; |
66 }; | 69 }; |
67 | 70 |
68 // Comparison operator to order buckets by their size. | 71 // Comparison operator to order buckets by their size. |
69 bool operator<(const Bucket& lhs, const Bucket& rhs) { | 72 bool operator<(const Bucket& lhs, const Bucket& rhs) { |
70 return lhs.size < rhs.size; | 73 return lhs.size < rhs.size; |
71 } | 74 } |
72 | 75 |
73 // Groups the allocations in the bucket by |breakBy|. The buckets in the | 76 // Groups the allocations in the bucket by |break_by|. The buckets in the |
74 // returned list will have |backtrace_cursor| advanced or | 77 // returned list will have |backtrace_cursor| advanced or |
75 // |is_broken_down_by_type_name| set depending on the property to group by. | 78 // |is_broken_down_by_type_name| set depending on the property to group by. |
76 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { | 79 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, |
| 80 BreakDownMode break_by) { |
77 base::hash_map<const void*, Bucket> breakdown; | 81 base::hash_map<const void*, Bucket> breakdown; |
78 | 82 |
79 if (breakBy == BreakDownMode::kByBacktrace) { | 83 |
| 84 if (break_by == BreakDownMode::kByBacktrace) { |
80 for (const auto& context_and_metrics : bucket.metrics_by_context) { | 85 for (const auto& context_and_metrics : bucket.metrics_by_context) { |
81 const Backtrace& backtrace = context_and_metrics.first->backtrace; | 86 const Backtrace& backtrace = context_and_metrics.first->backtrace; |
82 const StackFrame* begin = std::begin(backtrace.frames); | 87 const StackFrame* begin = std::begin(backtrace.frames); |
83 const StackFrame* end = begin + backtrace.frame_count; | 88 const StackFrame* end = begin + backtrace.frame_count; |
84 const StackFrame* cursor = begin + bucket.backtrace_cursor; | 89 const StackFrame* cursor = begin + bucket.backtrace_cursor; |
85 | 90 |
86 DCHECK_LE(cursor, end); | 91 DCHECK_LE(cursor, end); |
87 | 92 |
88 if (cursor != end) { | 93 if (cursor != end) { |
89 Bucket& subbucket = breakdown[cursor->value]; | 94 Bucket& subbucket = breakdown[cursor->value]; |
90 subbucket.size += context_and_metrics.second.size; | 95 subbucket.size += context_and_metrics.second.size; |
91 subbucket.count += context_and_metrics.second.count; | 96 subbucket.count += context_and_metrics.second.count; |
92 subbucket.metrics_by_context.push_back(context_and_metrics); | 97 subbucket.metrics_by_context.push_back(context_and_metrics); |
93 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; | 98 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; |
94 subbucket.is_broken_down_by_type_name = | 99 subbucket.is_broken_down_by_type_name = |
95 bucket.is_broken_down_by_type_name; | 100 bucket.is_broken_down_by_type_name; |
96 DCHECK_GT(subbucket.size, 0u); | 101 DCHECK_GT(subbucket.size, 0u); |
97 DCHECK_GT(subbucket.count, 0u); | 102 DCHECK_GT(subbucket.count, 0u); |
98 } | 103 } |
99 } | 104 } |
100 } else if (breakBy == BreakDownMode::kByTypeName) { | 105 } else if (break_by == BreakDownMode::kByTypeName) { |
101 if (!bucket.is_broken_down_by_type_name) { | 106 if (!bucket.is_broken_down_by_type_name) { |
102 for (const auto& context_and_metrics : bucket.metrics_by_context) { | 107 for (const auto& context_and_metrics : bucket.metrics_by_context) { |
103 const AllocationContext* context = context_and_metrics.first; | 108 const AllocationContext* context = context_and_metrics.first; |
104 Bucket& subbucket = breakdown[context->type_name]; | 109 Bucket& subbucket = breakdown[context->type_name]; |
105 subbucket.size += context_and_metrics.second.size; | 110 subbucket.size += context_and_metrics.second.size; |
106 subbucket.count += context_and_metrics.second.count; | 111 subbucket.count += context_and_metrics.second.count; |
107 subbucket.metrics_by_context.push_back(context_and_metrics); | 112 subbucket.metrics_by_context.push_back(context_and_metrics); |
108 subbucket.backtrace_cursor = bucket.backtrace_cursor; | 113 subbucket.backtrace_cursor = bucket.backtrace_cursor; |
109 subbucket.is_broken_down_by_type_name = true; | 114 subbucket.is_broken_down_by_type_name = true; |
110 DCHECK_GT(subbucket.size, 0u); | 115 DCHECK_GT(subbucket.size, 0u); |
111 DCHECK_GT(subbucket.count, 0u); | 116 DCHECK_GT(subbucket.count, 0u); |
112 } | 117 } |
113 } | 118 } |
114 } | 119 } |
115 | 120 |
116 std::vector<Bucket> buckets; | 121 std::vector<Bucket> buckets; |
117 buckets.reserve(breakdown.size()); | 122 buckets.reserve(breakdown.size()); |
118 for (auto key_bucket : breakdown) | 123 for (auto key_bucket : breakdown) |
119 buckets.push_back(key_bucket.second); | 124 buckets.push_back(key_bucket.second); |
120 | 125 |
121 return buckets; | 126 return buckets; |
122 } | 127 } |
123 | 128 |
124 // Breaks down the bucket by |breakBy|. Returns only buckets that contribute | 129 // Breaks down the bucket by |break_by|. Returns only buckets that contribute |
125 // significantly to the total size. The long tail is omitted. | 130 // more than |min_size_bytes| to the total size. The long tail is omitted. |
126 std::vector<Bucket> BreakDownBy(const Bucket& bucket, BreakDownMode breakBy) { | 131 std::vector<Bucket> BreakDownBy(const Bucket& bucket, |
127 std::vector<Bucket> buckets = GetSubbuckets(bucket, breakBy); | 132 BreakDownMode break_by, |
| 133 size_t min_size_bytes) { |
| 134 std::vector<Bucket> buckets = GetSubbuckets(bucket, break_by); |
128 | 135 |
129 // Ensure that |buckets| is a max-heap (the data structure, not memory heap), | 136 // Ensure that |buckets| is a max-heap (the data structure, not memory heap), |
130 // so its front contains the largest bucket. Buckets should be iterated | 137 // so its front contains the largest bucket. Buckets should be iterated |
131 // ordered by size, but sorting the vector is overkill because the long tail | 138 // ordered by size, but sorting the vector is overkill because the long tail |
132 // of small buckets will be discarded. By using a max-heap, the optimal case | 139 // of small buckets will be discarded. By using a max-heap, the optimal case |
133 // where all but the first bucket are discarded is O(n). The worst case where | 140 // where all but the first bucket are discarded is O(n). The worst case where |
134 // no bucket is discarded is doing a heap sort, which is O(n log n). | 141 // no bucket is discarded is doing a heap sort, which is O(n log n). |
135 std::make_heap(buckets.begin(), buckets.end()); | 142 std::make_heap(buckets.begin(), buckets.end()); |
136 | 143 |
137 // Keep including buckets until adding one would increase the number of | 144 // Keep including buckets until adding one would increase the number of |
138 // bytes accounted for by less than 0.8% (1/125). This simple heuristic works | 145 // bytes accounted for by |min_size_bytes|. The large buckets end up in |
139 // quite well. The large buckets end up in [it, end()), [begin(), it) is the | 146 // [it, end()), [begin(), it) is the part that contains the max-heap |
140 // part that contains the max-heap of small buckets. | 147 // of small buckets. |
141 size_t accounted_for = 0; | |
142 std::vector<Bucket>::iterator it; | 148 std::vector<Bucket>::iterator it; |
143 for (it = buckets.end(); it != buckets.begin(); --it) { | 149 for (it = buckets.end(); it != buckets.begin(); --it) { |
144 accounted_for += buckets.front().size; | 150 if (buckets.front().size < min_size_bytes) |
145 if (buckets.front().size < (accounted_for / 125)) | |
146 break; | 151 break; |
147 | 152 |
148 // Put the largest bucket in [begin, it) at |it - 1| and max-heapify | 153 // Put the largest bucket in [begin, it) at |it - 1| and max-heapify |
149 // [begin, it - 1). This puts the next largest bucket at |buckets.front()|. | 154 // [begin, it - 1). This puts the next largest bucket at |buckets.front()|. |
150 std::pop_heap(buckets.begin(), it); | 155 std::pop_heap(buckets.begin(), it); |
151 } | 156 } |
152 | 157 |
153 // At this point, |buckets| looks like this (numbers are bucket sizes): | 158 // At this point, |buckets| looks like this (numbers are bucket sizes): |
154 // | 159 // |
155 // <-- max-heap of small buckets ---> | 160 // <-- max-heap of small buckets ---> |
(...skipping 12 matching lines...) Expand all Loading... |
168 } // namespace | 173 } // namespace |
169 | 174 |
170 bool operator<(Entry lhs, Entry rhs) { | 175 bool operator<(Entry lhs, Entry rhs) { |
171 // There is no need to compare |size|. If the backtrace and type name are | 176 // There is no need to compare |size|. If the backtrace and type name are |
172 // equal then the sizes must be equal as well. | 177 // equal then the sizes must be equal as well. |
173 return std::tie(lhs.stack_frame_id, lhs.type_id) < | 178 return std::tie(lhs.stack_frame_id, lhs.type_id) < |
174 std::tie(rhs.stack_frame_id, rhs.type_id); | 179 std::tie(rhs.stack_frame_id, rhs.type_id); |
175 } | 180 } |
176 | 181 |
177 HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator, | 182 HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator, |
178 TypeNameDeduplicator* type_name_deduplicator) | 183 TypeNameDeduplicator* type_name_deduplicator, |
| 184 uint32_t breakdown_threshold_bytes) |
179 : stack_frame_deduplicator_(stack_frame_deduplicator), | 185 : stack_frame_deduplicator_(stack_frame_deduplicator), |
180 type_name_deduplicator_(type_name_deduplicator) {} | 186 type_name_deduplicator_(type_name_deduplicator), |
| 187 breakdown_threshold_bytes_(breakdown_threshold_bytes) { |
| 188 } |
181 | 189 |
182 HeapDumpWriter::~HeapDumpWriter() {} | 190 HeapDumpWriter::~HeapDumpWriter() {} |
183 | 191 |
184 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { | 192 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { |
185 // The contexts in the bucket are all different, but the [begin, cursor) range | 193 // The contexts in the bucket are all different, but the [begin, cursor) range |
186 // is equal for all contexts in the bucket, and the type names are the same if | 194 // is equal for all contexts in the bucket, and the type names are the same if |
187 // |is_broken_down_by_type_name| is set. | 195 // |is_broken_down_by_type_name| is set. |
188 DCHECK(!bucket.metrics_by_context.empty()); | 196 DCHECK(!bucket.metrics_by_context.empty()); |
189 | 197 |
190 const AllocationContext* context = bucket.metrics_by_context.front().first; | 198 const AllocationContext* context = bucket.metrics_by_context.front().first; |
(...skipping 12 matching lines...) Expand all Loading... |
203 : -1; | 211 : -1; |
204 | 212 |
205 entry.size = bucket.size; | 213 entry.size = bucket.size; |
206 entry.count = bucket.count; | 214 entry.count = bucket.count; |
207 | 215 |
208 auto position_and_inserted = entries_.insert(entry); | 216 auto position_and_inserted = entries_.insert(entry); |
209 return position_and_inserted.second; | 217 return position_and_inserted.second; |
210 } | 218 } |
211 | 219 |
212 void HeapDumpWriter::BreakDown(const Bucket& bucket) { | 220 void HeapDumpWriter::BreakDown(const Bucket& bucket) { |
213 auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace); | 221 auto by_backtrace = BreakDownBy(bucket, |
214 auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName); | 222 BreakDownMode::kByBacktrace, |
| 223 breakdown_threshold_bytes_); |
| 224 auto by_type_name = BreakDownBy(bucket, |
| 225 BreakDownMode::kByTypeName, |
| 226 breakdown_threshold_bytes_); |
215 | 227 |
216 // Insert entries for the buckets. If a bucket was not present before, it has | 228 // Insert entries for the buckets. If a bucket was not present before, it has |
217 // not been broken down before, so recursively continue breaking down in that | 229 // not been broken down before, so recursively continue breaking down in that |
218 // case. There might be multiple routes to the same entry (first break down | 230 // case. There might be multiple routes to the same entry (first break down |
219 // by type name, then by backtrace, or first by backtrace and then by type), | 231 // by type name, then by backtrace, or first by backtrace and then by type), |
220 // so a set is used to avoid dumping and breaking down entries more than once. | 232 // so a set is used to avoid dumping and breaking down entries more than once. |
221 | 233 |
222 for (const Bucket& subbucket : by_backtrace) | 234 for (const Bucket& subbucket : by_backtrace) |
223 if (AddEntryForBucket(subbucket)) | 235 if (AddEntryForBucket(subbucket)) |
224 BreakDown(subbucket); | 236 BreakDown(subbucket); |
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
291 } | 303 } |
292 | 304 |
293 traced_value->EndArray(); // "entries" | 305 traced_value->EndArray(); // "entries" |
294 return traced_value; | 306 return traced_value; |
295 } | 307 } |
296 | 308 |
297 } // namespace internal | 309 } // namespace internal |
298 | 310 |
299 std::unique_ptr<TracedValue> ExportHeapDump( | 311 std::unique_ptr<TracedValue> ExportHeapDump( |
300 const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context, | 312 const hash_map<AllocationContext, AllocationMetrics>& metrics_by_context, |
301 StackFrameDeduplicator* stack_frame_deduplicator, | 313 const MemoryDumpSessionState& session_state) { |
302 TypeNameDeduplicator* type_name_deduplicator) { | 314 internal::HeapDumpWriter writer( |
303 internal::HeapDumpWriter writer(stack_frame_deduplicator, | 315 session_state.stack_frame_deduplicator(), |
304 type_name_deduplicator); | 316 session_state.type_name_deduplicator(), |
| 317 session_state.memory_dump_config().heap_profiler_options |
| 318 .breakdown_threshold_bytes); |
305 return Serialize(writer.Summarize(metrics_by_context)); | 319 return Serialize(writer.Summarize(metrics_by_context)); |
306 } | 320 } |
307 | 321 |
308 } // namespace trace_event | 322 } // namespace trace_event |
309 } // namespace base | 323 } // namespace base |
OLD | NEW |