Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(341)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register.h

Issue 2089253002: [tracing] Optimize AllocationRegister and increase max backtrace depth. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Cleanup tests Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
7 7
8 #include <stddef.h> 8 #include <stddef.h>
9 #include <stdint.h> 9 #include <stdint.h>
10 10
11 #include "base/bits.h"
11 #include "base/logging.h" 12 #include "base/logging.h"
12 #include "base/macros.h" 13 #include "base/macros.h"
14 #include "base/process/process_metrics.h"
13 #include "base/trace_event/heap_profiler_allocation_context.h" 15 #include "base/trace_event/heap_profiler_allocation_context.h"
14 16
15 namespace base { 17 namespace base {
16 namespace trace_event { 18 namespace trace_event {
17 19
20 class AllocationRegisterTest;
21
22 namespace internal {
23
24 // Allocates a region of virtual address space of |size| rounded up to the
25 // system page size. The memory is zeroed by the system. A guard page is
26 // added after the end.
27 void* AllocateGuardedVirtualMemory(size_t size);
28
29 // Frees a region of virtual address space allocated by a call to
30 // |AllocateVirtualMemory|.
31 void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
32
33 // Hash map that mmaps memory only once in the constructor. Its API is
34 // similar to std::unordered_map, only index (KVIndex) is used to address
35 template <class Key, class Value, class KeyHasher>
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 Do you really need the KeyHasher arg here? I think
Dmitry Skiba 2016/06/28 10:55:00 This is for AddressHasher, which implements fast h
Primiano Tucci (use gerrit) 2016/06/28 14:23:07 Ahh Ok I see the point now. Address is a simple ty
36 class FixedHashMap {
37 public:
38 using KVPair = std::pair<const Key, Value>;
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 Include What You Use: add #include to <utility> up
Dmitry Skiba 2016/06/28 10:54:58 Done.
39
40 // For implementation simplicity API uses integer index instead
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 /me loves simplicity! \o/
41 // of iterators. Most operations (except FindValidIndex) on KVIndex
42 // are O(1).
43 using KVIndex = size_t;
Primiano Tucci (use gerrit) 2016/06/23 20:46:26 should we just use size_T everywhere here? I feel
Dmitry Skiba 2016/06/28 10:54:59 Yes, functionally KVIndex is useless, but it helps
Primiano Tucci (use gerrit) 2016/06/28 14:23:07 Ok I see, makes sense.
44 static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
45
46 // Number of cells control how many items this hash map can hold. Since
47 // cell includes both key and value, this value also largely affects
48 // memory footprint.
49 // Number of buckets control how many collisions there will be. Bucket
50 // is just a pointer, so it should be large(ish). It's also a good idea
51 // to make it a prime number.
52 FixedHashMap(size_t num_buckets, size_t num_cells)
53 : num_buckets_(num_buckets),
54 num_cells_(num_cells),
55 cells_(static_cast<Cell*>(
56 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
57 buckets_(static_cast<Cell**>(
58 AllocateGuardedVirtualMemory(num_buckets_ * sizeof(Bucket)))),
59 free_list_(nullptr),
60 next_unused_cell_(0) {}
61
62 ~FixedHashMap() {
63 RemoveAll();
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 do you really need to call RemoveAll here? Once yo
Dmitry Skiba 2016/06/28 10:54:59 Done.
64 FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
65 FreeGuardedVirtualMemory(buckets_, num_buckets_ * sizeof(Bucket));
66 }
67
68 std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
69 Cell** pcell = Lookup(key);
70 Cell* cell = *pcell;
71 if (cell) {
72 return {KVIndex(cell - cells_), false}; // not inserted
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 shouldn't this be a static_cast<KVIndex>(cell - ce
Dmitry Skiba 2016/06/28 10:54:58 Done.
73 }
74
75 // Get a free cell and link it.
76 *pcell = cell = GetFreeCell();
77 cell->pself = pcell;
78 cell->next = nullptr;
79
80 // Construct cell's value.
81 new (&cell->kv) KVPair(key, value);
Primiano Tucci (use gerrit) 2016/06/23 20:46:26 why this is not just: cell->kv.first = key cell->k
Dmitry Skiba 2016/06/28 10:54:59 Actually, we can't do kv.first = key, because firs
82
83 return {KVIndex(cell - cells_), true}; // inserted
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 sitto here about static_cast
Dmitry Skiba 2016/06/28 10:54:59 Done.
84 }
85
86 void Remove(KVIndex index) {
87 if (index == kInvalidKVIndex) {
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 should this be if (index >= num_cells_)?
Dmitry Skiba 2016/06/28 10:54:59 Right, and I'll also convert that to DCHECK, becau
88 return;
89 }
90
91 Cell* cell = &cells_[index];
92
93 // Unlink the cell.
94 *cell->pself = cell->next;
95 if (cell->next) {
96 cell->next->pself = cell->pself;
97 }
98 cell->pself = nullptr; // mark as free
99
100 // Add it to the free list.
101 cell->next = free_list_;
102 free_list_ = cell;
103
104 // Destruct cell's value.
105 cell->kv.~KVPair();
106 }
107
108 void RemoveAll() {
Primiano Tucci (use gerrit) 2016/06/23 20:46:24 Looks like we never end up calling this. Should we
Dmitry Skiba 2016/06/28 10:54:59 Done.
109 KVIndex index = FindValidIndex(0);
110 while (index != kInvalidKVIndex) {
111 Remove(index);
112 index = FindValidIndex(index + 1);
113 }
114 }
115
116 KVIndex Find(const Key& key) const {
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 Not sure why you have this returning an index at a
Dmitry Skiba 2016/06/28 10:54:59 I need index for backtrace_index, i.e. I need some
117 Cell* cell = *Lookup(key);
118 return cell ? KVIndex(cell - cells_) : kInvalidKVIndex;
119 }
120
121 KVPair& Get(KVIndex index) {
122 return cells_[index].kv;
123 }
124
125 const KVPair& Get(KVIndex index) const {
126 return cells_[index].kv;
127 }
128
129 // Finds next index that has a KVPair associated with it. Search starts
130 // with the specified index. Returns kInvalidKVIndex if nothing was found.
131 // To find the first valid index, call this function with 0. Continue
132 // calling with the last_index + 1 until kInvalidKVIndex is returned.
133 KVIndex FindValidIndex(KVIndex index) const {
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 s/FindValidIndex/FindNextValidCellIndex/ (or just
Dmitry Skiba 2016/06/28 10:55:00 The thing with 'Cell' is that it doesn't appear an
Primiano Tucci (use gerrit) 2016/06/28 14:23:07 Oh right realized only now. I thought this was a p
Dmitry Skiba 2016/06/29 16:12:26 Done.
134 for (;index < next_unused_cell_; ++index) {
135 if (cells_[index].pself) {
136 return index;
137 }
138 }
139 return kInvalidKVIndex;
140 }
141
142 // Estimates number of dirty bytes in allocated memory regions.
143 size_t EstimateAllocatedDirty() const {
Primiano Tucci (use gerrit) 2016/06/23 20:46:24 maybe EstimateResidentMemory or EstimateDirtyMemor
Dmitry Skiba 2016/06/28 10:54:59 Done.
144 size_t page_size = base::GetPageSize();
145 // |next_unused_cell_| is the first cell that wasn't touched, i.e.
146 // it's the number of touched cells.
147 return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
148 bits::Align(sizeof(Bucket) * num_buckets_, page_size);
149 }
150
151 private:
152 friend base::trace_event::AllocationRegisterTest;
153
154 struct Cell {
155 KVPair kv;
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 I think if you put kv at the end you might get som
Dmitry Skiba 2016/06/28 10:54:59 Hmm, but both 'next' and 'p_prev' are pointers, so
Primiano Tucci (use gerrit) 2016/06/28 14:23:07 Right silly comment from my side, needs to be alig
156 Cell* next;
157
158 // Conceptually this is |prev| in a double linked list. However, buckets
Primiano Tucci (use gerrit) 2016/06/23 20:46:26 s/double/doubly/
Dmitry Skiba 2016/06/28 10:54:58 Done.
159 // also participate in the bucket's cell list - they point to the list's
160 // head and also need to be linked / unlinked properly. To treat these two
161 // cases uniformly, instead of |prev| we're storing "pointer to a Cell*
162 // that points to this Cell" kind of thing. So |pself| points to a bucket
163 // for the first cell in a list, and points to |next| of the previous cell
164 // for any other cell. With that Lookup() is the only function that handles
165 // buckets / cells differently.
166 // If |pself| is nullptr, the cell is in the free list.
167 Cell** pself;
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 I agree with all the reasoning here. Just from a n
Dmitry Skiba 2016/06/28 10:54:59 Done.
168 };
169
170 using Bucket = Cell*;
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 It seems you use this only in one place below, I'd
Dmitry Skiba 2016/06/28 10:54:59 Actually I added it for ctor/dtor - without it st
171
172 // Returns a pointer to the cell that contains or should contain the entry
173 // for |key|. The pointer may point at an element of |buckets_| or at the
174 // |next| member of an element of |cells_|.
175 Cell** Lookup(const Key& key) const {
176 // The list head is in |buckets_| at the hash offset.
177 Cell** pcell = &buckets_[Hash(key)];
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 I think that p_cell is more readable (here and els
Dmitry Skiba 2016/06/28 10:55:00 Done.
178
179 // Chase down the list until the cell that holds |key| is found,
180 // or until the list ends.
181 while (*pcell) {
182 Cell* cell = *pcell;
Primiano Tucci (use gerrit) 2016/06/23 20:46:24 This line IMHO just makes the loop more complicate
Dmitry Skiba 2016/06/28 10:54:59 Done.
183 if (cell->kv.first == key) {
184 break;
185 }
186 pcell = &cell->next;
187 }
188
189 return pcell;
190 }
191
192 // Returns a cell that is not being used to store an entry (either by
193 // recycling from the free list or by taking a fresh cell).
194 Cell* GetFreeCell() {
195 // First try to re-use a cell from the free list.
196 if (free_list_) {
197 Cell* cell = free_list_;
198 free_list_ = cell->next;
199 return cell;
200 }
201
202 // Otherwise pick the next cell that has not been touched before.
203 size_t idx = next_unused_cell_;
204 next_unused_cell_++;
205
206 // If the hash table has too little capacity (when too little address space
207 // was reserved for |cells_|), |next_unused_cell_| can be an index outside
208 // of the allocated storage. A guard page is allocated there to crash the
209 // program in that case. There are alternative solutions:
210 // - Deal with it, increase capacity by reallocating |cells_|.
211 // - Refuse to insert and let the caller deal with it.
212 // Because free cells are re-used before accessing fresh cells with a higher
213 // index, and because reserving address space without touching it is cheap,
214 // the simplest solution is to just allocate a humongous chunk of address
215 // space.
216
217 DCHECK_LT(next_unused_cell_, num_cells_ + 1);
218
219 return &cells_[idx];
220 }
221
222 // Returns a value in the range [0, NumBuckets - 1] (inclusive).
223 size_t Hash(const Key& key) const {
224 return KeyHasher()(key) % num_buckets_;
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 tip: if we enforced that num_buckets was a power o
Dmitry Skiba 2016/06/28 10:54:59 Done. However, it required change to the backtrace
225 }
226
227 // Number of buckets.
228 size_t const num_buckets_;
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 I wonder if this (and num_cells_) should instead b
Dmitry Skiba 2016/06/28 10:55:00 Hmm, unlike number of buckets number of cells is n
229
230 // Number of cells.
231 size_t const num_cells_;
232
233 // The array of cells. This array is backed by mmapped memory. Lower indices
234 // are accessed first, higher indices are only accessed when required. In
Primiano Tucci (use gerrit) 2016/06/23 20:46:24 s/when required/only when the |free_list_| is empt
Dmitry Skiba 2016/06/28 10:55:00 Done.
235 // this way, even if a huge amount of address space has been mmapped, only
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 I'd just say, instead of "In this way..." -> "This
Dmitry Skiba 2016/06/28 10:54:59 Done.
236 // the cells that are actually used will be backed by physical memory.
237 Cell* const cells_;
238
239 // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
240 // contain the index of the head of the linked list for |Hash(key)|.
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 does it really contain the index? This seems to be
Dmitry Skiba 2016/06/28 10:54:59 Right, this is a leftover from the old implementat
241 // This array is backed by mmapped memory.
242 mutable Bucket* buckets_;
Primiano Tucci (use gerrit) 2016/06/23 20:46:24 I think this is more readable as: Cell*[] buckets_
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 why is this mutable?
Dmitry Skiba 2016/06/28 10:54:59 Find() is const, and is uses Lookup() which return
Primiano Tucci (use gerrit) 2016/06/28 14:23:07 I see, isn't the right thing to do having Lookup r
Dmitry Skiba 2016/06/29 16:12:26 Hmm, but mutable was created exactly to avoid doin
Primiano Tucci (use gerrit) 2016/06/29 16:55:03 Well depends on the cast. mutable is more for thin
243
244 // The head of the free list.
245 Cell* free_list_;
246
247 // The index of the first element of |cells_| that has not been used before.
248 // If the free list is empty and a new cell is needed, the cell at this index
249 // is used. This is the high water mark for the number of entries stored.
250 size_t next_unused_cell_;
251
252 DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
Primiano Tucci (use gerrit) 2016/06/23 20:46:26 I think you need the template arguments here?
Dmitry Skiba 2016/06/28 10:54:59 Yeah, you can just use class name in templates, an
253 };
254
255 } // namespace internal
256
18 class TraceEventMemoryOverhead; 257 class TraceEventMemoryOverhead;
19 258
20 // The allocation register keeps track of all allocations that have not been 259 // The allocation register keeps track of all allocations that have not been
21 // freed. It is a memory map-backed hash table that stores size and context 260 // freed. Internally it has two hashtables: one for Backtraces and one for
22 // indexed by address. The hash table is tailored specifically for this use 261 // actual allocations. Sizes of both hashtables are fixed, and this class
23 // case. The common case is that an entry is inserted and removed after a 262 // allocates (mmaps) only in its constructor.
24 // while, lookup without modifying the table is not an intended use case. The
25 // hash table is implemented as an array of linked lists. The size of this
26 // array is fixed, but it does not limit the amount of entries that can be
27 // stored.
28 //
29 // Replaying a recording of Chrome's allocations and frees against this hash
30 // table takes about 15% of the time that it takes to replay them against
31 // |std::map|.
32 class BASE_EXPORT AllocationRegister { 263 class BASE_EXPORT AllocationRegister {
33 public: 264 public:
34 // The data stored in the hash table; 265 // Details about an allocation.
35 // contains the details about an allocation.
36 struct Allocation { 266 struct Allocation {
37 void* const address; 267 const void* address;
38 size_t size; 268 size_t size;
39 AllocationContext context; 269 AllocationContext context;
40 }; 270 };
41 271
42 // An iterator that iterates entries in the hash table efficiently, but in no 272 // An iterator that iterates entries in no particular order.
43 // particular order. It can do this by iterating the cells and ignoring the
44 // linked lists altogether. Instead of checking whether a cell is in the free
45 // list to see if it should be skipped, a null address is used to indicate
46 // that a cell is free.
47 class BASE_EXPORT ConstIterator { 273 class BASE_EXPORT ConstIterator {
48 public: 274 public:
49 void operator++(); 275 void operator++();
50 bool operator!=(const ConstIterator& other) const; 276 bool operator!=(const ConstIterator& other) const;
51 const Allocation& operator*() const; 277 Allocation operator*() const;
52 278
53 private: 279 private:
54 friend class AllocationRegister; 280 friend class AllocationRegister;
55 using CellIndex = uint32_t; 281 using AllocationKVIndex = size_t;
56 282
57 ConstIterator(const AllocationRegister& alloc_register, CellIndex index); 283 ConstIterator(const AllocationRegister& alloc_register,
284 AllocationKVIndex index);
58 285
59 const AllocationRegister& register_; 286 const AllocationRegister& register_;
60 CellIndex index_; 287 AllocationKVIndex index_;
61 }; 288 };
62 289
63 AllocationRegister(); 290 AllocationRegister();
64 explicit AllocationRegister(uint32_t num_cells); 291 explicit AllocationRegister(size_t num_allocation_cells,
Primiano Tucci (use gerrit) 2016/06/23 20:46:26 no need for explicit if you have two args here
Dmitry Skiba 2016/06/28 10:54:59 Done.
292 size_t num_backtrace_cells);
65 293
66 ~AllocationRegister(); 294 ~AllocationRegister();
67 295
68 // Inserts allocation details into the table. If the address was present 296 // Inserts allocation details into the table. If the address was present
69 // already, its details are updated. |address| must not be null. (This is 297 // already, its details are updated. |address| must not be null.
70 // because null is used to mark free cells, to allow efficient iteration of 298 void Insert(const void* address,
71 // the hash table.) 299 size_t size,
72 void Insert(void* address, size_t size, AllocationContext context); 300 const AllocationContext& context);
73 301
74 // Removes the address from the table if it is present. It is ok to call this 302 // Removes the address from the table if it is present. It is ok to call this
75 // with a null pointer. 303 // with a null pointer.
76 void Remove(void* address); 304 void Remove(const void* address);
77 305
78 // Returns a pointer to the allocation at the address, or null if there is no 306 // Finds allocation for the address and fills |out_allocation|.
79 // allocation at that address. This can be used to change the allocation 307 bool Get(const void* address, Allocation* out_allocation) const;
Primiano Tucci (use gerrit) 2016/06/23 20:46:25 out of curiosity what was the issue with returning
Dmitry Skiba 2016/06/28 10:55:00 I simply can't :) The issue is that previously 'Al
80 // context after insertion, for example to change the type name.
81 Allocation* Get(void* address);
82 308
83 ConstIterator begin() const; 309 ConstIterator begin() const;
84 ConstIterator end() const; 310 ConstIterator end() const;
85 311
86 // Estimates memory overhead including |sizeof(AllocationRegister)|. 312 // Estimates memory overhead including |sizeof(AllocationRegister)|.
87 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; 313 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
88 314
89 private: 315 private:
90 friend class AllocationRegisterTest; 316 friend AllocationRegisterTest;
91 using CellIndex = uint32_t;
92 317
93 // A cell can store allocation details (size and context) by address. Cells 318 // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
94 // are part of a linked list via the |next| member. This list is either the 319 // hashing and should be changed together with AddressHasher.
95 // list for a particular hash, or the free list. All cells are contiguous in 320 static const size_t kNumAllocationBuckets = 0x40000;
96 // memory in one big array. Therefore, on 64-bit systems, space can be saved 321 static const size_t kNumAllocationCells = 1500000;
97 // by storing 32-bit indices instead of pointers as links. Index 0 is used as 322
98 // the list terminator. 323 // Expect max 30K unique backtraces.
99 struct Cell { 324 static const size_t kNumBacktraceBuckets = 30011; // prime
Primiano Tucci (use gerrit) 2016/06/23 20:46:26 nit: add extra space before comment Maybe the pow2
Dmitry Skiba 2016/06/28 10:54:59 Done.
100 CellIndex next; 325 static const size_t kNumBacktraceCells = kNumBacktraceBuckets;
101 Allocation allocation; 326
327 using BacktraceMap = internal::FixedHashMap<
328 Backtrace,
329 size_t,
Primiano Tucci (use gerrit) 2016/06/23 20:46:26 add a comment explaining that this size_t is an in
Dmitry Skiba 2016/06/28 10:55:00 Yeah, it desperately needs a comment, because actu
330 BASE_HASH_NAMESPACE::hash<Backtrace>>;
331
332 struct AllocationInfo {
333 size_t size;
334 const char* type_name;
335 BacktraceMap::KVIndex backtrace_index;
102 }; 336 };
103 337
104 // The number of buckets, 2^17, approximately 130 000, has been tuned for 338 struct AddressHasher {
105 // Chrome's typical number of outstanding allocations. (This number varies 339 size_t operator () (const void* address) const;
106 // between processes. Most processes have a sustained load of ~30k unfreed 340 };
107 // allocations, but some processes have peeks around 100k-400k allocations.)
108 // Because of the size of the table, it is likely that every |buckets_|
109 // access and every |cells_| access will incur a cache miss. Microbenchmarks
110 // suggest that it is worthwile to use more memory for the table to avoid
111 // chasing down the linked list, until the size is 2^18. The number of buckets
112 // is a power of two so modular indexing can be done with bitwise and.
113 static const uint32_t kNumBuckets = 0x20000;
114 static const uint32_t kNumBucketsMask = kNumBuckets - 1;
115 341
116 // Reserve address space to store at most this number of entries. High 342 using AllocationMap = internal::FixedHashMap<
117 // capacity does not imply high memory usage due to the access pattern. The 343 const void*,
118 // only constraint on the number of cells is that on 32-bit systems address 344 AllocationInfo,
119 // space is scarce (i.e. reserving 2GiB of address space for the entries is 345 AddressHasher>;
120 // not an option). A value of ~3M entries is large enough to handle spikes in
121 // the number of allocations, and modest enough to require no more than a few
122 // dozens of MiB of address space.
123 static const uint32_t kNumCellsPerBucket = 10;
124 346
125 // Returns a value in the range [0, kNumBuckets - 1] (inclusive). 347 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
126 static uint32_t Hash(void* address); 348 void RemoveBacktrace(BacktraceMap::KVIndex index);
127 349
128 // Allocates a region of virtual address space of |size| rounded up to the 350 Allocation GetAllocation(AllocationMap::KVIndex) const;
129 // system page size. The memory is zeroed by the system. A guard page is
130 // added after the end.
131 static void* AllocateVirtualMemory(size_t size);
132 351
133 // Frees a region of virtual address space allocated by a call to 352 AllocationMap allocations_;
134 // |AllocateVirtualMemory|. 353 BacktraceMap backtraces_;
135 static void FreeVirtualMemory(void* address, size_t allocated_size);
136
137 // Returns a pointer to the variable that contains or should contain the
138 // index of the cell that stores the entry for |address|. The pointer may
139 // point at an element of |buckets_| or at the |next| member of an element of
140 // |cells_|. If the value pointed at is 0, |address| is not in the table.
141 CellIndex* Lookup(void* address);
142
143 // Takes a cell that is not being used to store an entry (either by recycling
144 // from the free list or by taking a fresh cell) and returns its index.
145 CellIndex GetFreeCell();
146
147 // The maximum number of cells which can be allocated.
148 uint32_t const num_cells_;
149
150 // The array of cells. This array is backed by mmapped memory. Lower indices
151 // are accessed first, higher indices are only accessed when required. In
152 // this way, even if a huge amount of address space has been mmapped, only
153 // the cells that are actually used will be backed by physical memory.
154 Cell* const cells_;
155
156 // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain
157 // the index of the head of the linked list for |Hash(address)|. A value of 0
158 // indicates an empty list. This array is backed by mmapped memory.
159 CellIndex* const buckets_;
160
161 // The head of the free list. This is the index of the cell. A value of 0
162 // means that the free list is empty.
163 CellIndex free_list_;
164
165 // The index of the first element of |cells_| that has not been used before.
166 // If the free list is empty and a new cell is needed, the cell at this index
167 // is used. This is the high water mark for the number of entries stored.
168 CellIndex next_unused_cell_;
169 354
170 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); 355 DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
171 }; 356 };
172 357
173 } // namespace trace_event 358 } // namespace trace_event
174 } // namespace base 359 } // namespace base
175 360
176 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 361 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698