Chromium Code Reviews| Index: base/trace_event/heap_profiler_heap_dump_writer.cc |
| diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc |
| index 9a3112eacf3ea508c4b6da1adc54d279280c0ae5..ab5b12f382c3f3c6a804bdc42e87730fd7e73051 100644 |
| --- a/base/trace_event/heap_profiler_heap_dump_writer.cc |
| +++ b/base/trace_event/heap_profiler_heap_dump_writer.cc |
| @@ -18,7 +18,9 @@ |
| #include "base/strings/stringprintf.h" |
| #include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" |
| #include "base/trace_event/heap_profiler_type_name_deduplicator.h" |
| +#include "base/trace_event/trace_config.h" |
| #include "base/trace_event/trace_event_argument.h" |
| +#include "base/trace_event/trace_log.h" |
| // Most of what the |HeapDumpWriter| does is aggregating detailed information |
| // about the heap and deciding what to dump. The Input to this process is a list |
| @@ -128,8 +130,10 @@ std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { |
| } |
| // Breaks down the bucket by |breakBy|. Returns only buckets that contribute |
| -// significantly to the total size. The long tail is omitted. |
| -std::vector<Bucket> BreakDownBy(const Bucket& bucket, BreakDownMode breakBy) { |
| +// more than |minSizeBytes| to the total size. The long tail is omitted. |
| +std::vector<Bucket> BreakDownBy(const Bucket& bucket, |
| + BreakDownMode breakBy, |
| + size_t minSizeBytes) { |
|
ssid
2016/04/20 23:00:30
Wait, isn't this supposed to be min_size_in_bytes
Maria
2016/04/21 00:03:56
breakBy got me confused :) fixed. I might do a sep
|
| std::vector<Bucket> buckets = GetSubbuckets(bucket, breakBy); |
| // Ensure that |buckets| is a max-heap (the data structure, not memory heap), |
| @@ -141,14 +145,12 @@ std::vector<Bucket> BreakDownBy(const Bucket& bucket, BreakDownMode breakBy) { |
| std::make_heap(buckets.begin(), buckets.end()); |
| // Keep including buckets until adding one would increase the number of |
| - // bytes accounted for by less than 0.8% (1/125). This simple heuristic works |
| - // quite well. The large buckets end up in [it, end()), [begin(), it) is the |
| - // part that contains the max-heap of small buckets. |
| - size_t accounted_for = 0; |
| + // bytes accounted for by |minSizeBytes|. The large buckets end up in |
| + // [it, end()), [begin(), it) is the part that contains the max-heap |
| + // of small buckets. |
| std::vector<Bucket>::iterator it; |
| for (it = buckets.end(); it != buckets.begin(); --it) { |
| - accounted_for += buckets.front().size; |
| - if (buckets.front().size < (accounted_for / 125)) |
| + if (buckets.front().size < minSizeBytes) |
| break; |
| // Put the largest bucket in [begin, it) at |it - 1| and max-heapify |
| @@ -183,7 +185,11 @@ bool operator<(Entry lhs, Entry rhs) { |
| HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator, |
| TypeNameDeduplicator* type_name_deduplicator) |
| : stack_frame_deduplicator_(stack_frame_deduplicator), |
| - type_name_deduplicator_(type_name_deduplicator) {} |
| + type_name_deduplicator_(type_name_deduplicator) { |
| + const TraceConfig trace_config = |
|
ssid
2016/04/20 23:00:30
TraceConfig& will reduce a copy?
Maria
2016/04/21 00:03:56
I'll just inline this one
|
| + TraceLog::GetInstance()->GetCurrentTraceConfig(); |
| + min_allocation_size_bytes_ = trace_config.GetMinAllocationSizeBytes(); |
| +} |
| HeapDumpWriter::~HeapDumpWriter() {} |
| @@ -216,8 +222,12 @@ bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { |
| } |
| void HeapDumpWriter::BreakDown(const Bucket& bucket) { |
| - auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace); |
| - auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName); |
| + auto by_backtrace = BreakDownBy(bucket, |
| + BreakDownMode::kByBacktrace, |
| + min_allocation_size_bytes_); |
| + auto by_type_name = BreakDownBy(bucket, |
| + BreakDownMode::kByTypeName, |
| + min_allocation_size_bytes_); |
| // Insert entries for the buckets. If a bucket was not present before, it has |
| // not been broken down before, so recursively continue breaking down in that |