Chromium Code Reviews| Index: base/trace_event/heap_profiler_allocation_register.cc |
| diff --git a/base/trace_event/heap_profiler_allocation_register.cc b/base/trace_event/heap_profiler_allocation_register.cc |
| index 1cfb983852fae5fe66c559fc4788b2d445325d68..370166033094b7937ae2a860294ae5fee721490b 100644 |
| --- a/base/trace_event/heap_profiler_allocation_register.cc |
| +++ b/base/trace_event/heap_profiler_allocation_register.cc |
| @@ -76,22 +76,9 @@ AllocationRegister::AllocationRegister() |
| AllocationRegister::AllocationRegister(size_t allocation_capacity, |
| size_t backtrace_capacity) |
| - : allocations_(allocation_capacity), backtraces_(backtrace_capacity) { |
| - Backtrace sentinel = {}; |
| - sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]"); |
| - sentinel.frame_count = 1; |
| - |
| - // Rationale for max / 2: in theory we could just start the sentinel with a |
| - // refcount == 0. However, this optimization avoids to hit the 2nd condition |
| - // of the "if" in RemoveBacktrace, hence reducing the chances of hurting the |
| - // fastpath. From a functional viewpoint, the sentinel is safe even if we wrap |
| - // over the refcount. |
| - BacktraceMap::KVPair::second_type sentinel_refcount = |
| - std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2; |
| - auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount); |
| - DCHECK(index_and_flag.second); |
| - out_of_storage_backtrace_index_ = index_and_flag.first; |
| -} |
| + : allocations_(allocation_capacity), |
| + backtraces_(backtrace_capacity), |
| + out_of_storage_backtrace_index_(CreateBacktraceSentinel()) {} |
|
Primiano Tucci (use gerrit)
2017/04/04 09:50:23
awesome. I wanted to suggest this yesterday during
awong
2017/04/04 18:28:30
Done.
|
| AllocationRegister::~AllocationRegister() {} |
| @@ -161,6 +148,25 @@ void AllocationRegister::EstimateTraceMemoryOverhead( |
| overhead->Add("AllocationRegister", allocated, resident); |
| } |
| +AllocationRegister::BacktraceMap::KVIndex |
| +AllocationRegister::CreateBacktraceSentinel() { |
| + Backtrace sentinel = {}; |
| + sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]"); |
| + sentinel.frame_count = 1; |
| + |
| + // Rationale for max / 2: in theory we could just start the sentinel with a |
| + // refcount == 0. However, using max / 2 allows short circuiting of the |
| + // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast |
| + // path. From a functional viewpoint, the sentinel is safe even if we wrap |
| + // over the refcount. |
| + BacktraceMap::KVPair::second_type sentinel_refcount = |
| + std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2; |
| + auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount); |
| + DCHECK(index_and_flag.second); |
| + |
| + return index_and_flag.first; |
| +} |
| + |
| AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace( |
| const Backtrace& backtrace) { |
| auto index = backtraces_.Insert(backtrace, 0).first; |