OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "base/trace_event/heap_profiler_allocation_register.h" | 5 #include "base/trace_event/heap_profiler_allocation_register.h" |
6 | 6 |
7 #include <algorithm> | 7 #include <algorithm> |
8 #include <limits> | 8 #include <limits> |
9 | 9 |
10 #include "base/trace_event/trace_event_memory_overhead.h" | 10 #include "base/trace_event/trace_event_memory_overhead.h" |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
69 const uintptr_t shift = 15; | 69 const uintptr_t shift = 15; |
70 const uintptr_t h = (key * a) >> shift; | 70 const uintptr_t h = (key * a) >> shift; |
71 return h; | 71 return h; |
72 } | 72 } |
73 | 73 |
74 AllocationRegister::AllocationRegister() | 74 AllocationRegister::AllocationRegister() |
75 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {} | 75 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {} |
76 | 76 |
77 AllocationRegister::AllocationRegister(size_t allocation_capacity, | 77 AllocationRegister::AllocationRegister(size_t allocation_capacity, |
78 size_t backtrace_capacity) | 78 size_t backtrace_capacity) |
79 : allocations_(allocation_capacity), backtraces_(backtrace_capacity) { | 79 : allocations_(allocation_capacity), |
80 Backtrace sentinel = {}; | 80 backtraces_(backtrace_capacity), |
81 sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]"); | 81 out_of_storage_backtrace_index_(CreateBacktraceSentinel()) {} |
Primiano Tucci (use gerrit)
2017/04/04 09:50:23
awesome. I wanted to suggest this yesterday during
awong
2017/04/04 18:28:30
Done.
| |
82 sentinel.frame_count = 1; | |
83 | |
84 // Rationale for max / 2: in theory we could just start the sentinel with a | |
85 // refcount == 0. However, this optimization avoids to hit the 2nd condition | |
86 // of the "if" in RemoveBacktrace, hence reducing the chances of hurting the | |
87 // fastpath. From a functional viewpoint, the sentinel is safe even if we wrap | |
88 // over the refcount. | |
89 BacktraceMap::KVPair::second_type sentinel_refcount = | |
90 std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2; | |
91 auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount); | |
92 DCHECK(index_and_flag.second); | |
93 out_of_storage_backtrace_index_ = index_and_flag.first; | |
94 } | |
95 | 82 |
96 AllocationRegister::~AllocationRegister() {} | 83 AllocationRegister::~AllocationRegister() {} |
97 | 84 |
98 bool AllocationRegister::Insert(const void* address, | 85 bool AllocationRegister::Insert(const void* address, |
99 size_t size, | 86 size_t size, |
100 const AllocationContext& context) { | 87 const AllocationContext& context) { |
101 DCHECK(address != nullptr); | 88 DCHECK(address != nullptr); |
102 if (size == 0) { | 89 if (size == 0) { |
103 return false; | 90 return false; |
104 } | 91 } |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
154 | 141 |
155 void AllocationRegister::EstimateTraceMemoryOverhead( | 142 void AllocationRegister::EstimateTraceMemoryOverhead( |
156 TraceEventMemoryOverhead* overhead) const { | 143 TraceEventMemoryOverhead* overhead) const { |
157 size_t allocated = sizeof(AllocationRegister); | 144 size_t allocated = sizeof(AllocationRegister); |
158 size_t resident = sizeof(AllocationRegister) + | 145 size_t resident = sizeof(AllocationRegister) + |
159 allocations_.EstimateUsedMemory() + | 146 allocations_.EstimateUsedMemory() + |
160 backtraces_.EstimateUsedMemory(); | 147 backtraces_.EstimateUsedMemory(); |
161 overhead->Add("AllocationRegister", allocated, resident); | 148 overhead->Add("AllocationRegister", allocated, resident); |
162 } | 149 } |
163 | 150 |
151 AllocationRegister::BacktraceMap::KVIndex | |
152 AllocationRegister::CreateBacktraceSentinel() { | |
153 Backtrace sentinel = {}; | |
154 sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]"); | |
155 sentinel.frame_count = 1; | |
156 | |
157 // Rationale for max / 2: in theory we could just start the sentinel with a | |
158 // refcount == 0. However, using max / 2 allows short circuiting of the | |
159 // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast | |
160 // path. From a functional viewpoint, the sentinel is safe even if we wrap | |
161 // over the refcount. | |
162 BacktraceMap::KVPair::second_type sentinel_refcount = | |
163 std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2; | |
164 auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount); | |
165 DCHECK(index_and_flag.second); | |
166 | |
167 return index_and_flag.first; | |
168 } | |
169 | |
164 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace( | 170 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace( |
165 const Backtrace& backtrace) { | 171 const Backtrace& backtrace) { |
166 auto index = backtraces_.Insert(backtrace, 0).first; | 172 auto index = backtraces_.Insert(backtrace, 0).first; |
167 if (index == BacktraceMap::kInvalidKVIndex) | 173 if (index == BacktraceMap::kInvalidKVIndex) |
168 return out_of_storage_backtrace_index_; | 174 return out_of_storage_backtrace_index_; |
169 auto& backtrace_and_count = backtraces_.Get(index); | 175 auto& backtrace_and_count = backtraces_.Get(index); |
170 backtrace_and_count.second++; | 176 backtrace_and_count.second++; |
171 return index; | 177 return index; |
172 } | 178 } |
173 | 179 |
(...skipping 11 matching lines...) Expand all Loading... | |
185 const auto& address_and_info = allocations_.Get(index); | 191 const auto& address_and_info = allocations_.Get(index); |
186 const auto& backtrace_and_count = | 192 const auto& backtrace_and_count = |
187 backtraces_.Get(address_and_info.second.backtrace_index); | 193 backtraces_.Get(address_and_info.second.backtrace_index); |
188 return {address_and_info.first, address_and_info.second.size, | 194 return {address_and_info.first, address_and_info.second.size, |
189 AllocationContext(backtrace_and_count.first, | 195 AllocationContext(backtrace_and_count.first, |
190 address_and_info.second.type_name)}; | 196 address_and_info.second.type_name)}; |
191 } | 197 } |
192 | 198 |
193 } // namespace trace_event | 199 } // namespace trace_event |
194 } // namespace base | 200 } // namespace base |
OLD | NEW |