OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/trace_event/memory_profiler_allocation_register.h" | |
6 | |
7 namespace base { | |
8 namespace trace_event { | |
9 | |
10 AllocationRegister::AllocationRegister() | |
11 // Reserve enough address space to store |kNumCells| entries if necessary, | |
12 // with a guard page after it to crash the program when attempting to store | |
13 // more entries. | |
14 : cells_(static_cast<Cell*>(AllocateVirtualMemory(kNumCells * | |
15 sizeof(Cell)))), | |
16 buckets_(static_cast<CellIndex*>( | |
17 AllocateVirtualMemory(kNumBuckets * sizeof(CellIndex)))) { | |
18 // The free list is empty. The first unused cell is cell 1, because index 0 is | |
19 // used as list terminator. | |
20 free_list_ = 0; | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
why don't you inizialize also these in the ctor?
I
Ruud van Asseldonk
2015/10/06 15:44:34
Done. The checker did not complain, |free_list_| w
| |
21 next_unused_cell_ = 1; | |
22 } | |
23 | |
24 AllocationRegister::~AllocationRegister() { | |
25 FreeVirtualMemory(buckets_, kNumBuckets * sizeof(CellIndex)); | |
26 FreeVirtualMemory(cells_, kNumCells * sizeof(Cell)); | |
27 } | |
28 | |
29 void AllocationRegister::Insert(void* address, | |
30 size_t size, | |
31 AllocationContext context) { | |
32 DCHECK(address != nullptr); | |
33 | |
34 CellIndex* idx_ptr = Lookup(address); | |
35 | |
36 // If the index is 0, the address is not yet present, so insert it. | |
37 if (*idx_ptr == 0) { | |
38 *idx_ptr = GetFreeCell(); | |
39 | |
40 cells_[*idx_ptr].allocation.address = address; | |
41 cells_[*idx_ptr].next = 0; | |
42 } | |
43 | |
44 cells_[*idx_ptr].allocation.size = size; | |
45 cells_[*idx_ptr].allocation.context = context; | |
46 } | |
47 | |
48 void AllocationRegister::Remove(void* address) { | |
49 CellIndex* idx_ptr = Lookup(address); | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
I'd probably move the comment that says that idx_p
Ruud van Asseldonk
2015/10/06 15:44:35
Done, though it is not actually relevant where the
| |
50 | |
51 // If the index is 0, the address was not there in the first place. | |
52 if (*idx_ptr == 0) | |
53 return; | |
54 | |
55 // The cell at the index is now free, remove it from the linked list for | |
56 // |Hash(address)|. | |
57 CellIndex free_idx = *idx_ptr; | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
move this up before the if, and use if(free_idx ==
Ruud van Asseldonk
2015/10/06 15:44:34
As you wish.
| |
58 *idx_ptr = cells_[free_idx].next; | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
I this this code below would be more readable if y
Ruud van Asseldonk
2015/10/06 15:44:34
Done. Yeah freeing is not atomic so I think it is
| |
59 | |
60 // Put the free cell at the front of the free list. | |
61 cells_[free_idx].next = free_list_; | |
62 free_list_ = free_idx; | |
63 | |
64 // Reset the address, so that on iteration the free cell is ignored. | |
65 cells_[free_idx].allocation.address = nullptr; | |
66 } | |
67 | |
68 AllocationRegister::ConstIterator AllocationRegister::begin() const { | |
69 // Initialize the iterator's index to 0. Cell 0 never stores an entry. | |
70 ConstIterator iterator(*this, 0); | |
71 // Incrementing will advance the iterator to the first used cell. | |
72 ++iterator; | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
Why don't you just construct the iterator passing
Ruud van Asseldonk
2015/10/06 15:44:35
Because cell 1 might be free.
| |
73 return iterator; | |
74 } | |
75 | |
76 AllocationRegister::ConstIterator AllocationRegister::end() const { | |
77 // Cell |next_unused_cell_ - 1| is the last cell that could contain an entry, | |
78 // so index |next_unused_cell_| is an iterator past the last element, in line | |
79 // with the STL iterator conventions. | |
80 return ConstIterator(*this, next_unused_cell_); | |
81 } | |
82 | |
83 AllocationRegister::ConstIterator::ConstIterator( | |
84 const AllocationRegister& alloc_register, | |
85 CellIndex index) | |
86 : register_(alloc_register), index_(index) {} | |
87 | |
88 void AllocationRegister::ConstIterator::operator++() { | |
89 // Find the next cell with a non-null address until all cells that could | |
90 // possibly be used have been iterated. A null address indicates a free cell. | |
91 do | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
I think you still need {braces} as this is a multi
Ruud van Asseldonk
2015/10/06 15:44:35
As you wish.
| |
92 index_++; | |
93 while (index_ < register_.next_unused_cell_ && | |
94 register_.cells_[index_].allocation.address == nullptr); | |
95 } | |
96 | |
97 bool AllocationRegister::ConstIterator::operator!=( | |
98 const ConstIterator& other) const { | |
99 return index_ != other.index_; | |
100 } | |
101 | |
102 const AllocationRegister::Allocation& AllocationRegister::ConstIterator:: | |
103 operator*() const { | |
104 return register_.cells_[index_].allocation; | |
105 } | |
106 | |
107 uint32_t AllocationRegister::Hash(void* address) { | |
108 // The multiplicative hashing scheme from [Knuth 1998]. The value of |a| has | |
109 // been chosen carefully based on measurements with real-word data (addresses | |
110 // recorded from a Chrome trace run). It is the first prime after 2^17. For | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
This is worth the entire CL
| |
111 // |shift|, 13, 14 and 15 yield good results. These values are tuned to 2^18 | |
112 // buckets. Microbenchmarks show that this simple scheme outperforms fancy | |
113 // hashes like Murmur3 by 20 to 40 percent. | |
114 const uintptr_t key = reinterpret_cast<uintptr_t>(address); | |
115 const uintptr_t a = 131101; | |
116 const uintptr_t shift = 14; | |
117 const uintptr_t h = (key * a) >> shift; | |
118 return static_cast<uint32_t>(h) & kNumBucketsMask; | |
119 } | |
120 | |
121 AllocationRegister::CellIndex* AllocationRegister::Lookup(void* address) { | |
122 // The list head is in |buckets_| at the hash offset. | |
123 CellIndex* idx_ptr = buckets_ + Hash(address); | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
IMHO probably slightly more readable if you:
CellI
Ruud van Asseldonk
2015/10/06 15:44:35
As you wish. Fun fact: as |&buckets_[Hash(address)
| |
124 | |
125 // Chase down the list until the cell that holds |key| is found, | |
126 // or until the list ends. | |
127 while (*idx_ptr != 0 && cells_[*idx_ptr].allocation.address != address) | |
128 idx_ptr = &cells_[*idx_ptr].next; | |
129 | |
130 return idx_ptr; | |
131 } | |
132 | |
133 AllocationRegister::CellIndex AllocationRegister::GetFreeCell() { | |
134 // First try to re-use a cell from the freelist. | |
135 if (free_list_) { | |
136 CellIndex idx = free_list_; | |
137 free_list_ = cells_[idx].next; | |
138 return idx; | |
139 } | |
140 | |
141 // Otherwise pick the next cell that has not been touched before. | |
142 CellIndex idx = next_unused_cell_; | |
143 next_unused_cell_++; | |
144 | |
145 // If the hash table has too little capacity (when too little address space | |
146 // was reserved for |cells_|), |next_unused_cell_| can be an index outside of | |
147 // the allocated storage. A guard page is allocated there to crash the | |
148 // program in that case. There are alternative solutions: | |
149 // - Deal with it, increase capacity by reallocating |cells_|. | |
150 // - Refuse to insert and let the caller deal with it. | |
151 // Because free cells are re-used before accessing fresh cells with a higher | |
152 // index, and because reserving address space without touching it is cheap, | |
153 // the simplest solution is to just allocate a humongous chunk of address | |
154 // space. | |
155 | |
156 return idx; | |
Primiano Tucci (use gerrit)
2015/10/06 13:34:44
I'd probably add a DCHECK here, so in debug builds
Ruud van Asseldonk
2015/10/06 15:44:35
Done.
| |
157 } | |
158 | |
159 } // namespace trace_event | |
160 } // namespace base | |
OLD | NEW |