| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/heap_profiler_allocation_register.h" | 5 #include "base/trace_event/heap_profiler_allocation_register.h" |
| 6 | 6 |
| 7 #include <algorithm> | 7 #include <algorithm> |
| 8 #include <limits> | 8 #include <limits> |
| 9 | 9 |
| 10 #include "base/trace_event/trace_event_memory_overhead.h" | 10 #include "base/trace_event/trace_event_memory_overhead.h" |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 75 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {} | 75 : AllocationRegister(kAllocationCapacity, kBacktraceCapacity) {} |
| 76 | 76 |
| 77 AllocationRegister::AllocationRegister(size_t allocation_capacity, | 77 AllocationRegister::AllocationRegister(size_t allocation_capacity, |
| 78 size_t backtrace_capacity) | 78 size_t backtrace_capacity) |
| 79 : allocations_(allocation_capacity), backtraces_(backtrace_capacity) { | 79 : allocations_(allocation_capacity), backtraces_(backtrace_capacity) { |
| 80 Backtrace sentinel = {}; | 80 Backtrace sentinel = {}; |
| 81 sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]"); | 81 sentinel.frames[0] = StackFrame::FromThreadName("[out of heap profiler mem]"); |
| 82 sentinel.frame_count = 1; | 82 sentinel.frame_count = 1; |
| 83 | 83 |
| 84 // Rationale for max / 2: in theory we could just start the sentinel with a | 84 // Rationale for max / 2: in theory we could just start the sentinel with a |
| 85 // refcount == 0. However, this optimization avoids to hit the 2nd condition | 85 // refcount == 0. However, using max / 2 allows short circuiting of the |
| 86 // of the "if" in RemoveBacktrace, hence reducing the chances of hurting the | 86 // conditional in RemoveBacktrace() keeping the sentinel logic out of the fast |
| 87 // fastpath. From a functional viewpoint, the sentinel is safe even if we wrap | 87 // path. From a functional viewpoint, the sentinel is safe even if we wrap |
| 88 // over the refcount. | 88 // over refcount because . |
| 89 BacktraceMap::KVPair::second_type sentinel_refcount = | 89 BacktraceMap::KVPair::second_type sentinel_refcount = |
| 90 std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2; | 90 std::numeric_limits<BacktraceMap::KVPair::second_type>::max() / 2; |
| 91 auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount); | 91 auto index_and_flag = backtraces_.Insert(sentinel, sentinel_refcount); |
| 92 DCHECK(index_and_flag.second); | 92 DCHECK(index_and_flag.second); |
| 93 out_of_storage_backtrace_index_ = index_and_flag.first; | 93 DCHECK_EQ(index_and_flag.first, kOutOfStorageBacktraceIndex); |
| 94 } | 94 } |
| 95 | 95 |
| 96 AllocationRegister::~AllocationRegister() {} | 96 AllocationRegister::~AllocationRegister() {} |
| 97 | 97 |
| 98 bool AllocationRegister::Insert(const void* address, | 98 bool AllocationRegister::Insert(const void* address, |
| 99 size_t size, | 99 size_t size, |
| 100 const AllocationContext& context) { | 100 const AllocationContext& context) { |
| 101 DCHECK(address != nullptr); | 101 DCHECK(address != nullptr); |
| 102 if (size == 0) { | 102 if (size == 0) { |
| 103 return false; | 103 return false; |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 158 size_t resident = sizeof(AllocationRegister) + | 158 size_t resident = sizeof(AllocationRegister) + |
| 159 allocations_.EstimateUsedMemory() + | 159 allocations_.EstimateUsedMemory() + |
| 160 backtraces_.EstimateUsedMemory(); | 160 backtraces_.EstimateUsedMemory(); |
| 161 overhead->Add("AllocationRegister", allocated, resident); | 161 overhead->Add("AllocationRegister", allocated, resident); |
| 162 } | 162 } |
| 163 | 163 |
| 164 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace( | 164 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace( |
| 165 const Backtrace& backtrace) { | 165 const Backtrace& backtrace) { |
| 166 auto index = backtraces_.Insert(backtrace, 0).first; | 166 auto index = backtraces_.Insert(backtrace, 0).first; |
| 167 if (index == BacktraceMap::kInvalidKVIndex) | 167 if (index == BacktraceMap::kInvalidKVIndex) |
| 168 return out_of_storage_backtrace_index_; | 168 return kOutOfStorageBacktraceIndex; |
| 169 auto& backtrace_and_count = backtraces_.Get(index); | 169 auto& backtrace_and_count = backtraces_.Get(index); |
| 170 backtrace_and_count.second++; | 170 backtrace_and_count.second++; |
| 171 return index; | 171 return index; |
| 172 } | 172 } |
| 173 | 173 |
| 174 void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) { | 174 void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) { |
| 175 auto& backtrace_and_count = backtraces_.Get(index); | 175 auto& backtrace_and_count = backtraces_.Get(index); |
| 176 if (--backtrace_and_count.second == 0 && | 176 if (--backtrace_and_count.second == 0 && |
| 177 index != out_of_storage_backtrace_index_) { | 177 index != kOutOfStorageBacktraceIndex) { |
| 178 // Backtrace is not referenced anymore - remove it. | 178 // Backtrace is not referenced anymore - remove it. |
| 179 backtraces_.Remove(index); | 179 backtraces_.Remove(index); |
| 180 } | 180 } |
| 181 } | 181 } |
| 182 | 182 |
| 183 AllocationRegister::Allocation AllocationRegister::GetAllocation( | 183 AllocationRegister::Allocation AllocationRegister::GetAllocation( |
| 184 AllocationMap::KVIndex index) const { | 184 AllocationMap::KVIndex index) const { |
| 185 const auto& address_and_info = allocations_.Get(index); | 185 const auto& address_and_info = allocations_.Get(index); |
| 186 const auto& backtrace_and_count = | 186 const auto& backtrace_and_count = |
| 187 backtraces_.Get(address_and_info.second.backtrace_index); | 187 backtraces_.Get(address_and_info.second.backtrace_index); |
| 188 return {address_and_info.first, address_and_info.second.size, | 188 return {address_and_info.first, address_and_info.second.size, |
| 189 AllocationContext(backtrace_and_count.first, | 189 AllocationContext(backtrace_and_count.first, |
| 190 address_and_info.second.type_name)}; | 190 address_and_info.second.type_name)}; |
| 191 } | 191 } |
| 192 | 192 |
| 193 } // namespace trace_event | 193 } // namespace trace_event |
| 194 } // namespace base | 194 } // namespace base |
| OLD | NEW |