OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "base/trace_event/memory_profiler_allocation_register.h" |
| 6 |
| 7 namespace base { |
| 8 namespace trace_event { |
| 9 |
| 10 AllocationRegister::AllocationRegister() |
| 11 // Reserve enough address space to store |kNumCells| entries if necessary, |
| 12 // with a guard page after it to crash the program when attempting to store |
| 13 // more entries. |
| 14 : cells_(static_cast<Cell*>(AllocateVirtualMemory(kNumCells * |
| 15 sizeof(Cell)))), |
| 16 buckets_(static_cast<CellIndex*>( |
| 17 AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))), |
| 18 |
| 19 // The free list is empty. The first unused cell is cell 1, because index |
| 20 // 0 is used as list terminator. |
| 21 free_list_(0), |
| 22 next_unused_cell_(1) {} |
| 23 |
| 24 AllocationRegister::~AllocationRegister() { |
| 25 FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex)); |
| 26 FreeVirtualMemory(cells_, kNumCells * sizeof(Cell)); |
| 27 } |
| 28 |
| 29 void AllocationRegister::Insert(void* address, |
| 30 size_t size, |
| 31 AllocationContext context) { |
| 32 DCHECK(address != nullptr); |
| 33 |
| 34 CellIndex* idx_ptr = Lookup(address); |
| 35 |
| 36 // If the index is 0, the address is not yet present, so insert it. |
| 37 if (*idx_ptr == 0) { |
| 38 *idx_ptr = GetFreeCell(); |
| 39 |
| 40 cells_[*idx_ptr].allocation.address = address; |
| 41 cells_[*idx_ptr].next = 0; |
| 42 } |
| 43 |
| 44 cells_[*idx_ptr].allocation.size = size; |
| 45 cells_[*idx_ptr].allocation.context = context; |
| 46 } |
| 47 |
| 48 void AllocationRegister::Remove(void* address) { |
| 49 // Get a pointer to the index of the cell that stores |address|. The index can |
| 50 // be an element of |buckets_| or the |next| member of a cell. |
| 51 CellIndex* idx_ptr = Lookup(address); |
| 52 CellIndex freed_idx = *idx_ptr; |
| 53 |
| 54 // If the index is 0, the address was not there in the first place. |
| 55 if (freed_idx == 0) |
| 56 return; |
| 57 |
| 58 // The cell at the index is now free, remove it from the linked list for |
| 59 // |Hash(address)|. |
| 60 Cell* freed_cell = &cells_[freed_idx]; |
| 61 *idx_ptr = freed_cell->next; |
| 62 |
| 63 // Put the free cell at the front of the free list. |
| 64 freed_cell->next = free_list_; |
| 65 free_list_ = freed_idx; |
| 66 |
| 67 // Reset the address, so that on iteration the free cell is ignored. |
| 68 freed_cell->allocation.address = nullptr; |
| 69 } |
| 70 |
| 71 AllocationRegister::ConstIterator AllocationRegister::begin() const { |
| 72 // Initialize the iterator's index to 0. Cell 0 never stores an entry. |
| 73 ConstIterator iterator(*this, 0); |
| 74 // Incrementing will advance the iterator to the first used cell. |
| 75 ++iterator; |
| 76 return iterator; |
| 77 } |
| 78 |
| 79 AllocationRegister::ConstIterator AllocationRegister::end() const { |
| 80 // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry, |
| 81 // so index |next_unused_cell_| is an iterator past the last element, in line |
| 82 // with the STL iterator conventions. |
| 83 return ConstIterator(*this, next_unused_cell_); |
| 84 } |
| 85 |
| 86 AllocationRegister::ConstIterator::ConstIterator( |
| 87 const AllocationRegister& alloc_register, |
| 88 CellIndex index) |
| 89 : register_(alloc_register), index_(index) {} |
| 90 |
| 91 void AllocationRegister::ConstIterator::operator++() { |
| 92 // Find the next cell with a non-null address until all cells that could |
| 93 // possibly be used have been iterated. A null address indicates a free cell. |
| 94 do { |
| 95 index_++; |
| 96 } while (index_ < register_.next_unused_cell_ && |
| 97 register_.cells_[index_].allocation.address == nullptr); |
| 98 } |
| 99 |
| 100 bool AllocationRegister::ConstIterator::operator!=( |
| 101 const ConstIterator& other) const { |
| 102 return index_ != other.index_; |
| 103 } |
| 104 |
| 105 const AllocationRegister::Allocation& AllocationRegister::ConstIterator:: |
| 106 operator*() const { |
| 107 return register_.cells_[index_].allocation; |
| 108 } |
| 109 |
| 110 AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) { |
| 111 // The list head is in |buckets_| at the hash offset. |
| 112 CellIndex* idx_ptr = &buckets_[Hash(address)]; |
| 113 |
| 114 // Chase down the list until the cell that holds |key| is found, |
| 115 // or until the list ends. |
| 116 while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address) |
| 117 idx_ptr = &cells_[*idx_ptr].next; |
| 118 |
| 119 return idx_ptr; |
| 120 } |
| 121 |
| 122 AllocationRegister::CellIndex AllocationRegister::GetFreeCell() { |
| 123 // First try to re-use a cell from the freelist. |
| 124 if (free_list_) { |
| 125 CellIndex idx = free_list_; |
| 126 free_list_ = cells_[idx].next; |
| 127 return idx; |
| 128 } |
| 129 |
| 130 // Otherwise pick the next cell that has not been touched before. |
| 131 CellIndex idx = next_unused_cell_; |
| 132 next_unused_cell_++; |
| 133 |
| 134 // If the hash table has too little capacity (when too little address space |
| 135 // was reserved for |cells_|), |next_unused_cell_| can be an index outside of |
| 136 // the allocated storage. A guard page is allocated there to crash the |
| 137 // program in that case. There are alternative solutions: |
| 138 // - Deal with it, increase capacity by reallocating |cells_|. |
| 139 // - Refuse to insert and let the caller deal with it. |
| 140 // Because free cells are re-used before accessing fresh cells with a higher |
| 141 // index, and because reserving address space without touching it is cheap, |
| 142 // the simplest solution is to just allocate a humongous chunk of address |
| 143 // space. |
| 144 |
| 145 DCHECK_LT(next_unused_cell_, kNumCells + 1); |
| 146 |
| 147 return idx; |
| 148 } |
| 149 |
| 150 // static |
| 151 uint32_t AllocationRegister::Hash(void* address) { |
| 152 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has |
| 153 // been chosen carefully based on measurements with real-word data (addresses |
| 154 // recorded from a Chrome trace run). It is the first prime after 2^17. For |
| 155 // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18 |
| 156 // buckets. Microbenchmarks show that this simple scheme outperforms fancy |
| 157 // hashes like Murmur3 by 20 to 40 percent. |
| 158 const uintptr_t key = reinterpret_cast<uintptr_t>(address); |
| 159 const uintptr_t a = 131101; |
| 160 const uintptr_t shift = 14; |
| 161 const uintptr_t h = (key * a) >> shift; |
| 162 return static_cast<uint32_t>(h) & kNumBucketsMask; |
| 163 } |
| 164 |
| 165 } // namespace trace_event |
| 166 } // namespace base |
OLD | NEW |