OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" | 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" |
6 | 6 |
7 #include <stdint.h> | 7 #include <stdint.h> |
8 | 8 |
9 #include <algorithm> | 9 #include <algorithm> |
10 #include <iterator> | 10 #include <iterator> |
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
63 } | 63 } |
64 | 64 |
65 // Groups the allocations in the bucket by |breakBy|. The buckets in the | 65 // Groups the allocations in the bucket by |breakBy|. The buckets in the |
66 // returned list will have |backtrace_cursor| advanced or | 66 // returned list will have |backtrace_cursor| advanced or |
67 // |is_broken_down_by_type_name| set depending on the property to group by. | 67 // |is_broken_down_by_type_name| set depending on the property to group by. |
68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { | 68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { |
69 base::hash_map<const char*, Bucket> breakdown; | 69 base::hash_map<const char*, Bucket> breakdown; |
70 | 70 |
71 if (breakBy == BreakDownMode::kByBacktrace) { | 71 if (breakBy == BreakDownMode::kByBacktrace) { |
72 for (const auto& context_and_size : bucket.bytes_by_context) { | 72 for (const auto& context_and_size : bucket.bytes_by_context) { |
73 DCHECK(context_and_size.second > 0); | |
Primiano Tucci (use gerrit)
2016/04/05 18:38:48
I think you don't need these ones. These are inter
| |
73 const Backtrace& backtrace = context_and_size.first->backtrace; | 74 const Backtrace& backtrace = context_and_size.first->backtrace; |
74 const char* const* begin = std::begin(backtrace.frames); | 75 const char* const* begin = std::begin(backtrace.frames); |
75 const char* const* end = std::end(backtrace.frames); | 76 const char* const* end = std::end(backtrace.frames); |
76 const char* const* cursor = begin + bucket.backtrace_cursor; | 77 const char* const* cursor = begin + bucket.backtrace_cursor; |
77 | 78 |
78 // The backtrace in the context is padded with null pointers, but these | 79 // The backtrace in the context is padded with null pointers, but these |
79 // should not be considered for breakdown. Adjust end to point past the | 80 // should not be considered for breakdown. Adjust end to point past the |
80 // last non-null frame. | 81 // last non-null frame. |
81 while (begin != end && *(end - 1) == nullptr) | 82 while (begin != end && *(end - 1) == nullptr) |
82 end--; | 83 end--; |
83 | 84 |
84 DCHECK_LE(cursor, end); | 85 DCHECK_LE(cursor, end); |
85 | 86 |
86 if (cursor != end) { | 87 if (cursor != end) { |
87 Bucket& subbucket = breakdown[*cursor]; | 88 Bucket& subbucket = breakdown[*cursor]; |
88 subbucket.size += context_and_size.second; | 89 subbucket.size += context_and_size.second; |
89 subbucket.bytes_by_context.push_back(context_and_size); | 90 subbucket.bytes_by_context.push_back(context_and_size); |
90 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; | 91 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; |
91 subbucket.is_broken_down_by_type_name = | 92 subbucket.is_broken_down_by_type_name = |
92 bucket.is_broken_down_by_type_name; | 93 bucket.is_broken_down_by_type_name; |
93 DCHECK_GT(subbucket.size, 0u); | 94 DCHECK_GT(subbucket.size, 0u); |
94 } | 95 } |
95 } | 96 } |
96 } else if (breakBy == BreakDownMode::kByTypeName) { | 97 } else if (breakBy == BreakDownMode::kByTypeName) { |
97 if (!bucket.is_broken_down_by_type_name) { | 98 if (!bucket.is_broken_down_by_type_name) { |
98 for (const auto& context_and_size : bucket.bytes_by_context) { | 99 for (const auto& context_and_size : bucket.bytes_by_context) { |
100 DCHECK(context_and_size.second > 0); | |
Primiano Tucci (use gerrit)
2016/04/05 18:38:48
ditto
| |
99 const AllocationContext* context = context_and_size.first; | 101 const AllocationContext* context = context_and_size.first; |
100 Bucket& subbucket = breakdown[context->type_name]; | 102 Bucket& subbucket = breakdown[context->type_name]; |
101 subbucket.size += context_and_size.second; | 103 subbucket.size += context_and_size.second; |
102 subbucket.bytes_by_context.push_back(context_and_size); | 104 subbucket.bytes_by_context.push_back(context_and_size); |
103 subbucket.backtrace_cursor = bucket.backtrace_cursor; | 105 subbucket.backtrace_cursor = bucket.backtrace_cursor; |
104 subbucket.is_broken_down_by_type_name = true; | 106 subbucket.is_broken_down_by_type_name = true; |
105 DCHECK_GT(subbucket.size, 0u); | 107 DCHECK_GT(subbucket.size, 0u); |
106 } | 108 } |
107 } | 109 } |
108 } | 110 } |
(...skipping 25 matching lines...) Expand all Loading... | |
134 // part that contains the max-heap of small buckets. | 136 // part that contains the max-heap of small buckets. |
135 size_t accounted_for = 0; | 137 size_t accounted_for = 0; |
136 std::vector<Bucket>::iterator it; | 138 std::vector<Bucket>::iterator it; |
137 for (it = buckets.end(); it != buckets.begin(); --it) { | 139 for (it = buckets.end(); it != buckets.begin(); --it) { |
138 // Compute the contribution to the number of bytes accounted for as a | 140 // Compute the contribution to the number of bytes accounted for as a |
139 // fraction of 125 (in increments of 0.8 percent). Anything less than 1/125 | 141 // fraction of 125 (in increments of 0.8 percent). Anything less than 1/125 |
140 // is rounded down to 0 due to integer division. Buckets are iterated by | 142 // is rounded down to 0 due to integer division. Buckets are iterated by |
141 // descending size, so later buckets cannot have a larger contribution than | 143 // descending size, so later buckets cannot have a larger contribution than |
142 // this one. | 144 // this one. |
143 accounted_for += buckets.front().size; | 145 accounted_for += buckets.front().size; |
144 size_t contribution = buckets.front().size * 125 / accounted_for; | 146 if (buckets.front().size < (accounted_for / 125)) |
Dmitry Skiba
2016/04/05 18:54:13
With new checks, do we still need this change? Ver
| |
145 if (contribution == 0) | |
146 break; | 147 break; |
147 | 148 |
148 // Put the largest bucket in [begin, it) at |it - 1| and max-heapify | 149 // Put the largest bucket in [begin, it) at |it - 1| and max-heapify |
149 // [begin, it - 1). This puts the next largest bucket at |buckets.front()|. | 150 // [begin, it - 1). This puts the next largest bucket at |buckets.front()|. |
150 std::pop_heap(buckets.begin(), it); | 151 std::pop_heap(buckets.begin(), it); |
151 } | 152 } |
152 | 153 |
153 // At this point, |buckets| looks like this (numbers are bucket sizes): | 154 // At this point, |buckets| looks like this (numbers are bucket sizes): |
154 // | 155 // |
155 // <-- max-heap of small buckets ---> | 156 // <-- max-heap of small buckets ---> |
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
227 BreakDown(subbucket); | 228 BreakDown(subbucket); |
228 } | 229 } |
229 | 230 |
230 const std::set<Entry>& HeapDumpWriter::Summarize( | 231 const std::set<Entry>& HeapDumpWriter::Summarize( |
231 const hash_map<AllocationContext, size_t>& bytes_by_context) { | 232 const hash_map<AllocationContext, size_t>& bytes_by_context) { |
232 // Start with one bucket that represents the entire heap. Iterate by | 233 // Start with one bucket that represents the entire heap. Iterate by |
233 // reference, because the allocation contexts are going to point to allocation | 234 // reference, because the allocation contexts are going to point to allocation |
234 // contexts stored in |bytes_by_context|. | 235 // contexts stored in |bytes_by_context|. |
235 Bucket root_bucket; | 236 Bucket root_bucket; |
236 for (const auto& context_and_size : bytes_by_context) { | 237 for (const auto& context_and_size : bytes_by_context) { |
238 DCHECK(context_and_size.second > 0); | |
Primiano Tucci (use gerrit)
2016/04/05 18:38:48
Right you just need this one.
Nit: DCHECK_GT(conte
| |
237 const AllocationContext* context = &context_and_size.first; | 239 const AllocationContext* context = &context_and_size.first; |
238 const size_t size = context_and_size.second; | 240 const size_t size = context_and_size.second; |
239 root_bucket.bytes_by_context.push_back(std::make_pair(context, size)); | 241 root_bucket.bytes_by_context.push_back(std::make_pair(context, size)); |
240 root_bucket.size += size; | 242 root_bucket.size += size; |
241 } | 243 } |
242 | 244 |
243 AddEntryForBucket(root_bucket); | 245 AddEntryForBucket(root_bucket); |
244 | 246 |
245 // Recursively break down the heap and fill |entries_| with entries to dump. | 247 // Recursively break down the heap and fill |entries_| with entries to dump. |
246 BreakDown(root_bucket); | 248 BreakDown(root_bucket); |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
293 const hash_map<AllocationContext, size_t>& bytes_by_size, | 295 const hash_map<AllocationContext, size_t>& bytes_by_size, |
294 StackFrameDeduplicator* stack_frame_deduplicator, | 296 StackFrameDeduplicator* stack_frame_deduplicator, |
295 TypeNameDeduplicator* type_name_deduplicator) { | 297 TypeNameDeduplicator* type_name_deduplicator) { |
296 internal::HeapDumpWriter writer(stack_frame_deduplicator, | 298 internal::HeapDumpWriter writer(stack_frame_deduplicator, |
297 type_name_deduplicator); | 299 type_name_deduplicator); |
298 return Serialize(writer.Summarize(bytes_by_size)); | 300 return Serialize(writer.Summarize(bytes_by_size)); |
299 } | 301 } |
300 | 302 |
301 } // namespace trace_event | 303 } // namespace trace_event |
302 } // namespace base | 304 } // namespace base |
OLD | NEW |