| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" | 5 #include "base/trace_event/heap_profiler_heap_dump_writer.h" |
| 6 | 6 |
| 7 #include <stdint.h> | 7 #include <stdint.h> |
| 8 | 8 |
| 9 #include <algorithm> | 9 #include <algorithm> |
| 10 #include <iterator> | 10 #include <iterator> |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 59 | 59 |
| 60 // Comparison operator to order buckets by their size. | 60 // Comparison operator to order buckets by their size. |
| 61 bool operator<(const Bucket& lhs, const Bucket& rhs) { | 61 bool operator<(const Bucket& lhs, const Bucket& rhs) { |
| 62 return lhs.size < rhs.size; | 62 return lhs.size < rhs.size; |
| 63 } | 63 } |
| 64 | 64 |
| 65 // Groups the allocations in the bucket by |breakBy|. The buckets in the | 65 // Groups the allocations in the bucket by |breakBy|. The buckets in the |
| 66 // returned list will have |backtrace_cursor| advanced or | 66 // returned list will have |backtrace_cursor| advanced or |
| 67 // |is_broken_down_by_type_name| set depending on the property to group by. | 67 // |is_broken_down_by_type_name| set depending on the property to group by. |
| 68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { | 68 std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { |
| 69 base::hash_map<const char*, Bucket> breakdown; | 69 base::hash_map<const void*, Bucket> breakdown; |
| 70 | 70 |
| 71 if (breakBy == BreakDownMode::kByBacktrace) { | 71 if (breakBy == BreakDownMode::kByBacktrace) { |
| 72 for (const auto& context_and_size : bucket.bytes_by_context) { | 72 for (const auto& context_and_size : bucket.bytes_by_context) { |
| 73 const Backtrace& backtrace = context_and_size.first->backtrace; | 73 const Backtrace& backtrace = context_and_size.first->backtrace; |
| 74 const char* const* begin = std::begin(backtrace.frames); | 74 const StackFrame* begin = std::begin(backtrace.frames); |
| 75 const char* const* end = std::end(backtrace.frames); | 75 const StackFrame* end = begin + backtrace.frame_count; |
| 76 const char* const* cursor = begin + bucket.backtrace_cursor; | 76 const StackFrame* cursor = begin + bucket.backtrace_cursor; |
| 77 | |
| 78 // The backtrace in the context is padded with null pointers, but these | |
| 79 // should not be considered for breakdown. Adjust end to point past the | |
| 80 // last non-null frame. | |
| 81 while (begin != end && *(end - 1) == nullptr) | |
| 82 end--; | |
| 83 | 77 |
| 84 DCHECK_LE(cursor, end); | 78 DCHECK_LE(cursor, end); |
| 85 | 79 |
| 86 if (cursor != end) { | 80 if (cursor != end) { |
| 87 Bucket& subbucket = breakdown[*cursor]; | 81 Bucket& subbucket = breakdown[cursor->value]; |
| 88 subbucket.size += context_and_size.second; | 82 subbucket.size += context_and_size.second; |
| 89 subbucket.bytes_by_context.push_back(context_and_size); | 83 subbucket.bytes_by_context.push_back(context_and_size); |
| 90 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; | 84 subbucket.backtrace_cursor = bucket.backtrace_cursor + 1; |
| 91 subbucket.is_broken_down_by_type_name = | 85 subbucket.is_broken_down_by_type_name = |
| 92 bucket.is_broken_down_by_type_name; | 86 bucket.is_broken_down_by_type_name; |
| 93 DCHECK_GT(subbucket.size, 0u); | 87 DCHECK_GT(subbucket.size, 0u); |
| 94 } | 88 } |
| 95 } | 89 } |
| 96 } else if (breakBy == BreakDownMode::kByTypeName) { | 90 } else if (breakBy == BreakDownMode::kByTypeName) { |
| 97 if (!bucket.is_broken_down_by_type_name) { | 91 if (!bucket.is_broken_down_by_type_name) { |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 176 HeapDumpWriter::~HeapDumpWriter() {} | 170 HeapDumpWriter::~HeapDumpWriter() {} |
| 177 | 171 |
| 178 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { | 172 bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { |
| 179 // The contexts in the bucket are all different, but the [begin, cursor) range | 173 // The contexts in the bucket are all different, but the [begin, cursor) range |
| 180 // is equal for all contexts in the bucket, and the type names are the same if | 174 // is equal for all contexts in the bucket, and the type names are the same if |
| 181 // |is_broken_down_by_type_name| is set. | 175 // |is_broken_down_by_type_name| is set. |
| 182 DCHECK(!bucket.bytes_by_context.empty()); | 176 DCHECK(!bucket.bytes_by_context.empty()); |
| 183 | 177 |
| 184 const AllocationContext* context = bucket.bytes_by_context.front().first; | 178 const AllocationContext* context = bucket.bytes_by_context.front().first; |
| 185 | 179 |
| 186 const char* const* backtrace_begin = std::begin(context->backtrace.frames); | 180 const StackFrame* backtrace_begin = std::begin(context->backtrace.frames); |
| 187 const char* const* backtrace_end = backtrace_begin + bucket.backtrace_cursor; | 181 const StackFrame* backtrace_end = backtrace_begin + bucket.backtrace_cursor; |
| 188 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames)); | 182 DCHECK_LE(bucket.backtrace_cursor, arraysize(context->backtrace.frames)); |
| 189 | 183 |
| 190 Entry entry; | 184 Entry entry; |
| 191 entry.stack_frame_id = | 185 entry.stack_frame_id = stack_frame_deduplicator_->Insert( |
| 192 stack_frame_deduplicator_->Insert(backtrace_begin, backtrace_end); | 186 backtrace_begin, backtrace_end); |
| 193 | 187 |
| 194 // Deduplicate the type name, or use ID -1 if type name is not set. | 188 // Deduplicate the type name, or use ID -1 if type name is not set. |
| 195 entry.type_id = bucket.is_broken_down_by_type_name | 189 entry.type_id = bucket.is_broken_down_by_type_name |
| 196 ? type_name_deduplicator_->Insert(context->type_name) | 190 ? type_name_deduplicator_->Insert(context->type_name) |
| 197 : -1; | 191 : -1; |
| 198 | 192 |
| 199 entry.size = bucket.size; | 193 entry.size = bucket.size; |
| 200 | 194 |
| 201 auto position_and_inserted = entries_.insert(entry); | 195 auto position_and_inserted = entries_.insert(entry); |
| 202 return position_and_inserted.second; | 196 return position_and_inserted.second; |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 288 const hash_map<AllocationContext, size_t>& bytes_by_size, | 282 const hash_map<AllocationContext, size_t>& bytes_by_size, |
| 289 StackFrameDeduplicator* stack_frame_deduplicator, | 283 StackFrameDeduplicator* stack_frame_deduplicator, |
| 290 TypeNameDeduplicator* type_name_deduplicator) { | 284 TypeNameDeduplicator* type_name_deduplicator) { |
| 291 internal::HeapDumpWriter writer(stack_frame_deduplicator, | 285 internal::HeapDumpWriter writer(stack_frame_deduplicator, |
| 292 type_name_deduplicator); | 286 type_name_deduplicator); |
| 293 return Serialize(writer.Summarize(bytes_by_size)); | 287 return Serialize(writer.Summarize(bytes_by_size)); |
| 294 } | 288 } |
| 295 | 289 |
| 296 } // namespace trace_event | 290 } // namespace trace_event |
| 297 } // namespace base | 291 } // namespace base |
| OLD | NEW |