| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "base/trace_event/heap_profiler_allocation_register.h" | 5 #include "base/trace_event/heap_profiler_allocation_register.h" |
| 6 | 6 |
| 7 #include "base/trace_event/trace_event_memory_overhead.h" | 7 #include "base/trace_event/trace_event_memory_overhead.h" |
| 8 | 8 |
| 9 namespace base { | 9 namespace base { |
| 10 namespace trace_event { | 10 namespace trace_event { |
| 11 | 11 |
| 12 AllocationRegister::AllocationRegister() | |
| 13 : AllocationRegister(kNumBuckets * kNumCellsPerBucket) {} | |
| 14 | |
| 15 AllocationRegister::AllocationRegister(uint32_t num_cells) | |
| 16 // Reserve enough address space to store |num_cells_| entries if necessary, | |
| 17 // with a guard page after it to crash the program when attempting to store | |
| 18 // more entries. | |
| 19 : num_cells_(num_cells), | |
| 20 cells_(static_cast<Cell*>(AllocateVirtualMemory(num_cells_ * | |
| 21 sizeof(Cell)))), | |
| 22 buckets_(static_cast<CellIndex*>( | |
| 23 AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))), | |
| 24 | |
| 25 // The free list is empty. The first unused cell is cell 1, because index | |
| 26 // 0 is used as list terminator. | |
| 27 free_list_(0), | |
| 28 next_unused_cell_(1) {} | |
| 29 | |
| 30 AllocationRegister::~AllocationRegister() { | |
| 31 FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex)); | |
| 32 FreeVirtualMemory(cells_, num_cells_ * sizeof(Cell)); | |
| 33 } | |
| 34 | |
| 35 void AllocationRegister::Insert(void* address, | |
| 36 size_t size, | |
| 37 AllocationContext context) { | |
| 38 DCHECK(address != nullptr); | |
| 39 if (size == 0) | |
| 40 return; | |
| 41 | |
| 42 CellIndex* idx_ptr = Lookup(address); | |
| 43 | |
| 44 // If the index is 0, the address is not yet present, so insert it. | |
| 45 if (*idx_ptr == 0) { | |
| 46 *idx_ptr = GetFreeCell(); | |
| 47 | |
| 48 // The address stored in a cell is const as long as it is exposed (via the | |
| 49 // iterators or |Get|), but because cells are re-used, a const cast is | |
| 50 // required to set it on insert and remove. | |
| 51 void* const& allocation_address = cells_[*idx_ptr].allocation.address; | |
| 52 const_cast<void*&>(allocation_address) = address; | |
| 53 cells_[*idx_ptr].next = 0; | |
| 54 } | |
| 55 | |
| 56 cells_[*idx_ptr].allocation.size = size; | |
| 57 cells_[*idx_ptr].allocation.context = context; | |
| 58 } | |
| 59 | |
| 60 void AllocationRegister::Remove(void* address) { | |
| 61 // Get a pointer to the index of the cell that stores |address|. The index can | |
| 62 // be an element of |buckets_| or the |next| member of a cell. | |
| 63 CellIndex* idx_ptr = Lookup(address); | |
| 64 CellIndex freed_idx = *idx_ptr; | |
| 65 | |
| 66 // If the index is 0, the address was not there in the first place. | |
| 67 if (freed_idx == 0) | |
| 68 return; | |
| 69 | |
| 70 // The cell at the index is now free, remove it from the linked list for | |
| 71 // |Hash(address)|. | |
| 72 Cell* freed_cell = &cells_[freed_idx]; | |
| 73 *idx_ptr = freed_cell->next; | |
| 74 | |
| 75 // Put the free cell at the front of the free list. | |
| 76 freed_cell->next = free_list_; | |
| 77 free_list_ = freed_idx; | |
| 78 | |
| 79 // Reset the address, so that on iteration the free cell is ignored. | |
| 80 const_cast<void*&>(freed_cell->allocation.address) = nullptr; | |
| 81 } | |
| 82 | |
| 83 AllocationRegister::Allocation* AllocationRegister::Get(void* address) { | |
| 84 CellIndex* idx_ptr = Lookup(address); | |
| 85 | |
| 86 // If the index is 0, the address is not present in the table. | |
| 87 return *idx_ptr == 0 ? nullptr : &cells_[*idx_ptr].allocation; | |
| 88 } | |
| 89 | |
| 90 AllocationRegister::ConstIterator AllocationRegister::begin() const { | |
| 91 // Initialize the iterator's index to 0. Cell 0 never stores an entry. | |
| 92 ConstIterator iterator(*this, 0); | |
| 93 // Incrementing will advance the iterator to the first used cell. | |
| 94 ++iterator; | |
| 95 return iterator; | |
| 96 } | |
| 97 | |
| 98 AllocationRegister::ConstIterator AllocationRegister::end() const { | |
| 99 // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry, | |
| 100 // so index |next_unused_cell_| is an iterator past the last element, in line | |
| 101 // with the STL iterator conventions. | |
| 102 return ConstIterator(*this, next_unused_cell_); | |
| 103 } | |
| 104 | |
| 105 AllocationRegister::ConstIterator::ConstIterator( | 12 AllocationRegister::ConstIterator::ConstIterator( |
| 106 const AllocationRegister& alloc_register, | 13 const AllocationRegister& alloc_register, AllocationKVIndex index) |
| 107 CellIndex index) | 14 : register_(alloc_register), |
| 108 : register_(alloc_register), index_(index) {} | 15 index_(index) {} |
| 109 | 16 |
| 110 void AllocationRegister::ConstIterator::operator++() { | 17 void AllocationRegister::ConstIterator::operator++() { |
| 111 // Find the next cell with a non-null address until all cells that could | 18 index_ = register_.allocations_.FindValidIndex(index_ + 1); |
| 112 // possibly be used have been iterated. A null address indicates a free cell. | |
| 113 do { | |
| 114 index_++; | |
| 115 } while (index_ < register_.next_unused_cell_ && | |
| 116 register_.cells_[index_].allocation.address == nullptr); | |
| 117 } | 19 } |
| 118 | 20 |
| 119 bool AllocationRegister::ConstIterator::operator!=( | 21 bool AllocationRegister::ConstIterator::operator!=( |
| 120 const ConstIterator& other) const { | 22 const ConstIterator& other) const { |
| 121 return index_ != other.index_; | 23 return index_ != other.index_; |
| 122 } | 24 } |
| 123 | 25 |
| 124 const AllocationRegister::Allocation& AllocationRegister::ConstIterator:: | 26 AllocationRegister::Allocation |
| 125 operator*() const { | 27 AllocationRegister::ConstIterator::operator*() const { |
| 126 return register_.cells_[index_].allocation; | 28 return register_.GetAllocation(index_); |
| 127 } | 29 } |
| 128 | 30 |
| 129 AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) { | 31 size_t AllocationRegister::AddressHasher::operator () ( |
| 130 // The list head is in |buckets_| at the hash offset. | 32 const void* address) const { |
| 131 CellIndex* idx_ptr = &buckets_[Hash(address)]; | |
| 132 | |
| 133 // Chase down the list until the cell that holds |address| is found, | |
| 134 // or until the list ends. | |
| 135 while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address) | |
| 136 idx_ptr = &cells_[*idx_ptr].next; | |
| 137 | |
| 138 return idx_ptr; | |
| 139 } | |
| 140 | |
| 141 AllocationRegister::CellIndex AllocationRegister::GetFreeCell() { | |
| 142 // First try to re-use a cell from the freelist. | |
| 143 if (free_list_) { | |
| 144 CellIndex idx = free_list_; | |
| 145 free_list_ = cells_[idx].next; | |
| 146 return idx; | |
| 147 } | |
| 148 | |
| 149 // Otherwise pick the next cell that has not been touched before. | |
| 150 CellIndex idx = next_unused_cell_; | |
| 151 next_unused_cell_++; | |
| 152 | |
| 153 // If the hash table has too little capacity (when too little address space | |
| 154 // was reserved for |cells_|), |next_unused_cell_| can be an index outside of | |
| 155 // the allocated storage. A guard page is allocated there to crash the | |
| 156 // program in that case. There are alternative solutions: | |
| 157 // - Deal with it, increase capacity by reallocating |cells_|. | |
| 158 // - Refuse to insert and let the caller deal with it. | |
| 159 // Because free cells are re-used before accessing fresh cells with a higher | |
| 160 // index, and because reserving address space without touching it is cheap, | |
| 161 // the simplest solution is to just allocate a humongous chunk of address | |
| 162 // space. | |
| 163 | |
| 164 DCHECK_LT(next_unused_cell_, num_cells_ + 1); | |
| 165 | |
| 166 return idx; | |
| 167 } | |
| 168 | |
| 169 // static | |
| 170 uint32_t AllocationRegister::Hash(void* address) { | |
| 171 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has | 33 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has |
| 172 // been chosen carefully based on measurements with real-word data (addresses | 34 // been chosen carefully based on measurements with real-word data (addresses |
| 173 // recorded from a Chrome trace run). It is the first prime after 2^17. For | 35 // recorded from a Chrome trace run). It is the first prime after 2^17. For |
| 174 // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18 | 36 // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18 |
| 175 // buckets. Microbenchmarks show that this simple scheme outperforms fancy | 37 // buckets. Microbenchmarks show that this simple scheme outperforms fancy |
| 176 // hashes like Murmur3 by 20 to 40 percent. | 38 // hashes like Murmur3 by 20 to 40 percent. |
| 177 const uintptr_t key = reinterpret_cast<uintptr_t>(address); | 39 const uintptr_t key = reinterpret_cast<uintptr_t>(address); |
| 178 const uintptr_t a = 131101; | 40 const uintptr_t a = 131101; |
| 179 const uintptr_t shift = 14; | 41 const uintptr_t shift = 14; |
| 180 const uintptr_t h = (key * a) >> shift; | 42 const uintptr_t h = (key * a) >> shift; |
| 181 return static_cast<uint32_t>(h) & kNumBucketsMask; | 43 return h; |
| 44 } |
| 45 |
| 46 AllocationRegister::AllocationRegister() |
| 47 : AllocationRegister(kNumAllocationCells, kNumBacktraceCells) {} |
| 48 |
| 49 AllocationRegister::AllocationRegister(size_t num_allocation_cells, |
| 50 size_t num_backtrace_cells) |
| 51 : allocations_(kNumAllocationBuckets, num_allocation_cells), |
| 52 backtraces_(kNumBacktraceBuckets, num_backtrace_cells) {} |
| 53 |
| 54 AllocationRegister::~AllocationRegister() { |
| 55 } |
| 56 |
| 57 void AllocationRegister::Insert(const void* address, |
| 58 size_t size, |
| 59 const AllocationContext& context) { |
| 60 DCHECK(address != nullptr); |
| 61 if (size == 0) { |
| 62 return; |
| 63 } |
| 64 |
| 65 AllocationInfo info = { |
| 66 size, |
| 67 context.type_name, |
| 68 InsertBacktrace(context.backtrace) |
| 69 }; |
| 70 |
| 71 // Try to insert the allocation. |
| 72 auto index_and_flag = allocations_.Insert(address, info); |
| 73 if (!index_and_flag.second) { |
| 74 // |address| is already there - overwrite the allocation info. |
| 75 auto& old_info = allocations_.Get(index_and_flag.first).second; |
| 76 RemoveBacktrace(old_info.backtrace_index); |
| 77 old_info = info; |
| 78 } |
| 79 } |
| 80 |
| 81 void AllocationRegister::Remove(const void* address) { |
| 82 auto index = allocations_.Find(address); |
| 83 if (index == AllocationMap::kInvalidKVIndex) { |
| 84 return; |
| 85 } |
| 86 |
| 87 const AllocationInfo& info = allocations_.Get(index).second; |
| 88 RemoveBacktrace(info.backtrace_index); |
| 89 allocations_.Remove(index); |
| 90 } |
| 91 |
| 92 bool AllocationRegister::Get(const void* address, |
| 93 Allocation* out_allocation) const { |
| 94 auto index = allocations_.Find(address); |
| 95 if (index == AllocationMap::kInvalidKVIndex) { |
| 96 return false; |
| 97 } |
| 98 |
| 99 if (out_allocation) { |
| 100 *out_allocation = GetAllocation(index); |
| 101 } |
| 102 return true; |
| 103 } |
| 104 |
| 105 AllocationRegister::ConstIterator AllocationRegister::begin() const { |
| 106 return ConstIterator(*this, allocations_.FindValidIndex(0)); |
| 107 } |
| 108 |
| 109 AllocationRegister::ConstIterator AllocationRegister::end() const { |
| 110 return ConstIterator(*this, AllocationMap::kInvalidKVIndex); |
| 182 } | 111 } |
| 183 | 112 |
| 184 void AllocationRegister::EstimateTraceMemoryOverhead( | 113 void AllocationRegister::EstimateTraceMemoryOverhead( |
| 185 TraceEventMemoryOverhead* overhead) const { | 114 TraceEventMemoryOverhead* overhead) const { |
| 186 // Estimate memory overhead by counting all of the cells that have ever been | |
| 187 // touched. Don't report mmapped memory as allocated, because it has not been | |
| 188 // allocated by malloc. | |
| 189 size_t allocated = sizeof(AllocationRegister); | 115 size_t allocated = sizeof(AllocationRegister); |
| 190 size_t resident = sizeof(AllocationRegister) | 116 size_t resident = sizeof(AllocationRegister) |
| 191 // Include size of touched cells (size of |*cells_|). | 117 + allocations_.EstimateAllocatedDirty() |
| 192 + sizeof(Cell) * next_unused_cell_ | 118 + backtraces_.EstimateAllocatedDirty(); |
| 193 // Size of |*buckets_|. | |
| 194 + sizeof(CellIndex) * kNumBuckets; | |
| 195 overhead->Add("AllocationRegister", allocated, resident); | 119 overhead->Add("AllocationRegister", allocated, resident); |
| 196 } | 120 } |
| 197 | 121 |
| 122 AllocationRegister::BacktraceMap::KVIndex AllocationRegister::InsertBacktrace( |
| 123 const Backtrace& backtrace) { |
| 124 auto index = backtraces_.Insert(backtrace, 0).first; |
| 125 auto& backtrace_and_count = backtraces_.Get(index); |
| 126 backtrace_and_count.second++; |
| 127 return index; |
| 128 } |
| 129 |
| 130 void AllocationRegister::RemoveBacktrace(BacktraceMap::KVIndex index) { |
| 131 auto& backtrace_and_count = backtraces_.Get(index); |
| 132 if (--backtrace_and_count.second == 0) { |
| 133 // Backtrace is not referenced anymore - remove it. |
| 134 backtraces_.Remove(index); |
| 135 } |
| 136 } |
| 137 |
| 138 AllocationRegister::Allocation AllocationRegister::GetAllocation( |
| 139 AllocationMap::KVIndex index) const { |
| 140 const auto& address_and_info = allocations_.Get(index); |
| 141 const auto& backtrace_and_count = backtraces_.Get( |
| 142 address_and_info.second.backtrace_index); |
| 143 return { |
| 144 address_and_info.first, |
| 145 address_and_info.second.size, |
| 146 AllocationContext( |
| 147 backtrace_and_count.first, |
| 148 address_and_info.second.type_name) |
| 149 }; |
| 150 } |
| 151 |
| 198 } // namespace trace_event | 152 } // namespace trace_event |
| 199 } // namespace base | 153 } // namespace base |
| OLD | NEW |