Index: base/trace_event/heap_profiler_heap_dump_writer.cc |
diff --git a/base/trace_event/heap_profiler_heap_dump_writer.cc b/base/trace_event/heap_profiler_heap_dump_writer.cc |
index 9a3112eacf3ea508c4b6da1adc54d279280c0ae5..ae9ec7be90e24f20f2ad04da5ca98145de27c584 100644 |
--- a/base/trace_event/heap_profiler_heap_dump_writer.cc |
+++ b/base/trace_event/heap_profiler_heap_dump_writer.cc |
@@ -18,7 +18,9 @@ |
#include "base/strings/stringprintf.h" |
#include "base/trace_event/heap_profiler_stack_frame_deduplicator.h" |
#include "base/trace_event/heap_profiler_type_name_deduplicator.h" |
+#include "base/trace_event/trace_config.h" |
#include "base/trace_event/trace_event_argument.h" |
+#include "base/trace_event/trace_log.h" |
// Most of what the |HeapDumpWriter| does is aggregating detailed information |
// about the heap and deciding what to dump. The Input to this process is a list |
@@ -128,8 +130,10 @@ std::vector<Bucket> GetSubbuckets(const Bucket& bucket, BreakDownMode breakBy) { |
} |
// Breaks down the bucket by |breakBy|. Returns only buckets that contribute |
-// significantly to the total size. The long tail is omitted. |
-std::vector<Bucket> BreakDownBy(const Bucket& bucket, BreakDownMode breakBy) { |
+// more than |minSizeBytes| to the total size. The long tail is omitted. |
+std::vector<Bucket> BreakDownBy(const Bucket& bucket, |
+ BreakDownMode breakBy, |
+ size_t min_size_bytes) { |
std::vector<Bucket> buckets = GetSubbuckets(bucket, breakBy); |
// Ensure that |buckets| is a max-heap (the data structure, not memory heap), |
@@ -141,14 +145,12 @@ std::vector<Bucket> BreakDownBy(const Bucket& bucket, BreakDownMode breakBy) { |
std::make_heap(buckets.begin(), buckets.end()); |
// Keep including buckets until adding one would increase the number of |
- // bytes accounted for by less than 0.8% (1/125). This simple heuristic works |
- // quite well. The large buckets end up in [it, end()), [begin(), it) is the |
- // part that contains the max-heap of small buckets. |
- size_t accounted_for = 0; |
+ // bytes accounted for by |min_size_bytes|. The large buckets end up in |
+ // [it, end()), [begin(), it) is the part that contains the max-heap |
+ // of small buckets. |
std::vector<Bucket>::iterator it; |
for (it = buckets.end(); it != buckets.begin(); --it) { |
- accounted_for += buckets.front().size; |
- if (buckets.front().size < (accounted_for / 125)) |
+ if (buckets.front().size < min_size_bytes) |
break; |
// Put the largest bucket in [begin, it) at |it - 1| and max-heapify |
@@ -183,7 +185,10 @@ bool operator<(Entry lhs, Entry rhs) { |
HeapDumpWriter::HeapDumpWriter(StackFrameDeduplicator* stack_frame_deduplicator, |
TypeNameDeduplicator* type_name_deduplicator) |
: stack_frame_deduplicator_(stack_frame_deduplicator), |
- type_name_deduplicator_(type_name_deduplicator) {} |
+ type_name_deduplicator_(type_name_deduplicator) { |
+ min_allocation_size_bytes_ = TraceLog::GetInstance()->GetCurrentTraceConfig() |
Primiano Tucci (use gerrit)
2016/04/21 16:53:21
Hmm this is now introducing the assumption that He
|
+ .GetMinAllocationSizeBytes(); |
+} |
HeapDumpWriter::~HeapDumpWriter() {} |
@@ -216,8 +221,12 @@ bool HeapDumpWriter::AddEntryForBucket(const Bucket& bucket) { |
} |
void HeapDumpWriter::BreakDown(const Bucket& bucket) { |
- auto by_backtrace = BreakDownBy(bucket, BreakDownMode::kByBacktrace); |
- auto by_type_name = BreakDownBy(bucket, BreakDownMode::kByTypeName); |
+ auto by_backtrace = BreakDownBy(bucket, |
+ BreakDownMode::kByBacktrace, |
+ min_allocation_size_bytes_); |
+ auto by_type_name = BreakDownBy(bucket, |
+ BreakDownMode::kByTypeName, |
+ min_allocation_size_bytes_); |
// Insert entries for the buckets. If a bucket was not present before, it has |
// not been broken down before, so recursively continue breaking down in that |