Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(842)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register.h

Issue 2089253002: [tracing] Optimize AllocationRegister and increase max backtrace depth. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix Windows Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
7 7
8 #include <stddef.h> 8 #include <stddef.h>
9 #include <stdint.h> 9 #include <stdint.h>
10 10
11 #include <utility>
12
13 #include "base/bits.h"
11 #include "base/logging.h" 14 #include "base/logging.h"
12 #include "base/macros.h" 15 #include "base/macros.h"
16 #include "base/process/process_metrics.h"
17 #include "base/template_util.h"
13 #include "base/trace_event/heap_profiler_allocation_context.h" 18 #include "base/trace_event/heap_profiler_allocation_context.h"
14 19
15 namespace base { 20 namespace base {
16 namespace trace_event { 21 namespace trace_event {
17 22
23 class AllocationRegisterTest;
24
25 namespace internal {
26
27 // Allocates a region of virtual address space of |size| rounded up to the
28 // system page size. The memory is zeroed by the system. A guard page is
29 // added after the end.
30 void* AllocateGuardedVirtualMemory(size_t size);
31
32 // Frees a region of virtual address space allocated by a call to
33 // |AllocateVirtualMemory|.
34 void FreeGuardedVirtualMemory(void* address, size_t allocated_size);
35
36 // Hash map that mmaps memory only once in the constructor. Its API is
37 // similar to std::unordered_map, only index (KVIndex) is used to address
38 template <size_t NumBuckets, class Key, class Value, class KeyHasher>
39 class FixedHashMap {
40 // To keep things simple we don't call destructors.
41 static_assert(is_trivially_destructible<Key>::value &&
42 is_trivially_destructible<Value>::value,
43 "Key and Value shouldn't have destructors");
44 public:
45 using KVPair = std::pair<const Key, Value>;
46
47 // For implementation simplicity API uses integer index instead
48 // of iterators. Most operations (except FindValidIndex) on KVIndex
49 // are O(1).
50 using KVIndex = size_t;
51 static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1);
52
53 // Capacity controls how many items this hash map can hold, and largely
54 // affects memory footprint.
55 FixedHashMap(size_t capacity)
56 : num_cells_(capacity),
57 cells_(static_cast<Cell*>(
58 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
59 buckets_(static_cast<Bucket*>(
60 AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
61 free_list_(nullptr),
62 next_unused_cell_(0) {}
63
64 ~FixedHashMap() {
65 FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
66 FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
67 }
68
69 std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
70 Cell** p_cell = Lookup(key);
71 Cell* cell = *p_cell;
72 if (cell) {
73 return {static_cast<KVIndex>(cell - cells_), false}; // not inserted
74 }
75
76 // Get a free cell and link it.
77 *p_cell = cell = GetFreeCell();
78 cell->p_prev = p_cell;
79 cell->next = nullptr;
80
81 // Initialize key/value pair. Since key is 'const Key' this is the
82 // only way to initialize it.
83 new (&cell->kv) KVPair(key, value);
84
85 return {static_cast<KVIndex>(cell - cells_), true}; // inserted
86 }
87
88 void Remove(KVIndex index) {
89 DCHECK_LT(index, next_unused_cell_);
90
91 Cell* cell = &cells_[index];
92
93 // Unlink the cell.
94 *cell->p_prev = cell->next;
95 if (cell->next) {
96 cell->next->p_prev = cell->p_prev;
97 }
98 cell->p_prev = nullptr; // mark as free
99
100 // Add it to the free list.
101 cell->next = free_list_;
102 free_list_ = cell;
103 }
104
105 KVIndex Find(const Key& key) const {
106 Cell* cell = *Lookup(key);
107 return cell ? static_cast<KVIndex>(cell - cells_) : kInvalidKVIndex;
108 }
109
110 KVPair& Get(KVIndex index) {
111 return cells_[index].kv;
112 }
113
114 const KVPair& Get(KVIndex index) const {
115 return cells_[index].kv;
116 }
117
118 // Finds next index that has a KVPair associated with it. Search starts
119 // with the specified index. Returns kInvalidKVIndex if nothing was found.
120 // To find the first valid index, call this function with 0. Continue
121 // calling with the last_index + 1 until kInvalidKVIndex is returned.
122 KVIndex Next(KVIndex index) const {
123 for (;index < next_unused_cell_; ++index) {
124 if (cells_[index].p_prev) {
125 return index;
126 }
127 }
128 return kInvalidKVIndex;
129 }
130
131 // Estimates number of bytes used in allocated memory regions.
132 size_t EstimateUsedMemory() const {
133 size_t page_size = base::GetPageSize();
134 // |next_unused_cell_| is the first cell that wasn't touched, i.e.
135 // it's the number of touched cells.
136 return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
137 bits::Align(sizeof(Bucket) * NumBuckets, page_size);
138 }
139
140 private:
141 friend base::trace_event::AllocationRegisterTest;
142
143 struct Cell {
144 KVPair kv;
145 Cell* next;
146
147 // Conceptually this is |prev| in a doubly linked list. However, buckets
148 // also participate in the bucket's cell list - they point to the list's
149 // head and also need to be linked / unlinked properly. To treat these two
150 // cases uniformly, instead of |prev| we're storing "pointer to a Cell*
151 // that points to this Cell" kind of thing. So |p_prev| points to a bucket
152 // for the first cell in a list, and points to |next| of the previous cell
153 // for any other cell. With that Lookup() is the only function that handles
154 // buckets / cells differently.
155 // If |p_prev| is nullptr, the cell is in the free list.
156 Cell** p_prev;
157 };
158
159 using Bucket = Cell*;
160
161 // Returns a pointer to the cell that contains or should contain the entry
162 // for |key|. The pointer may point at an element of |buckets_| or at the
163 // |next| member of an element of |cells_|.
164 Cell** Lookup(const Key& key) const {
165 // The list head is in |buckets_| at the hash offset.
166 Cell** p_cell = &buckets_[Hash(key)];
167
168 // Chase down the list until the cell that holds |key| is found,
169 // or until the list ends.
170 while (*p_cell && (*p_cell)->kv.first != key) {
171 p_cell = &(*p_cell)->next;
172 }
173
174 return p_cell;
175 }
176
177 // Returns a cell that is not being used to store an entry (either by
178 // recycling from the free list or by taking a fresh cell).
179 Cell* GetFreeCell() {
180 // First try to re-use a cell from the free list.
181 if (free_list_) {
182 Cell* cell = free_list_;
183 free_list_ = cell->next;
184 return cell;
185 }
186
187 // Otherwise pick the next cell that has not been touched before.
188 size_t idx = next_unused_cell_;
189 next_unused_cell_++;
190
191 // If the hash table has too little capacity (when too little address space
192 // was reserved for |cells_|), |next_unused_cell_| can be an index outside
193 // of the allocated storage. A guard page is allocated there to crash the
194 // program in that case. There are alternative solutions:
195 // - Deal with it, increase capacity by reallocating |cells_|.
196 // - Refuse to insert and let the caller deal with it.
197 // Because free cells are re-used before accessing fresh cells with a higher
198 // index, and because reserving address space without touching it is cheap,
199 // the simplest solution is to just allocate a humongous chunk of address
200 // space.
201
202 DCHECK_LT(next_unused_cell_, num_cells_ + 1);
203
204 return &cells_[idx];
205 }
206
207 // Returns a value in the range [0, NumBuckets - 1] (inclusive).
208 size_t Hash(const Key& key) const {
209 if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) {
210 // NumBuckets is a power of 2.
211 return KeyHasher()(key) & (NumBuckets - 1);
212 } else {
213 return KeyHasher()(key) % NumBuckets;
214 }
215 }
216
217 // Number of cells.
218 size_t const num_cells_;
219
220 // The array of cells. This array is backed by mmapped memory. Lower indices
221 // are accessed first, higher indices are accessed only when the |free_list_|
222 // is empty. This is to minimize the amount of resident memory used.
223 Cell* const cells_;
224
225 // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
226 // contain the pointer to the linked list of cells for |Hash(key)|.
227 // This array is backed by mmapped memory.
228 mutable Bucket* buckets_;
229
230 // The head of the free list.
231 Cell* free_list_;
232
233 // The index of the first element of |cells_| that has not been used before.
234 // If the free list is empty and a new cell is needed, the cell at this index
235 // is used. This is the high water mark for the number of entries stored.
236 size_t next_unused_cell_;
237
238 DISALLOW_COPY_AND_ASSIGN(FixedHashMap);
239 };
240
241 } // namespace internal
242
18 class TraceEventMemoryOverhead; 243 class TraceEventMemoryOverhead;
19 244
20 // The allocation register keeps track of all allocations that have not been 245 // The allocation register keeps track of all allocations that have not been
21 // freed. It is a memory map-backed hash table that stores size and context 246 // freed. Internally it has two hashtables: one for Backtraces and one for
22 // indexed by address. The hash table is tailored specifically for this use 247 // actual allocations. Sizes of both hashtables are fixed, and this class
23 // case. The common case is that an entry is inserted and removed after a 248 // allocates (mmaps) only in its constructor.
24 // while, lookup without modifying the table is not an intended use case. The
25 // hash table is implemented as an array of linked lists. The size of this
26 // array is fixed, but it does not limit the amount of entries that can be
27 // stored.
28 //
29 // Replaying a recording of Chrome's allocations and frees against this hash
30 // table takes about 15% of the time that it takes to replay them against
31 // |std::map|.
32 class BASE_EXPORT AllocationRegister { 249 class BASE_EXPORT AllocationRegister {
33 public: 250 public:
34 // The data stored in the hash table; 251 // Details about an allocation.
35 // contains the details about an allocation.
36 struct Allocation { 252 struct Allocation {
37 void* const address; 253 const void* address;
38 size_t size; 254 size_t size;
39 AllocationContext context; 255 AllocationContext context;
40 }; 256 };
41 257
42 // An iterator that iterates entries in the hash table efficiently, but in no 258 // An iterator that iterates entries in no particular order.
43 // particular order. It can do this by iterating the cells and ignoring the
44 // linked lists altogether. Instead of checking whether a cell is in the free
45 // list to see if it should be skipped, a null address is used to indicate
46 // that a cell is free.
47 class BASE_EXPORT ConstIterator { 259 class BASE_EXPORT ConstIterator {
48 public: 260 public:
49 void operator++(); 261 void operator++();
50 bool operator!=(const ConstIterator& other) const; 262 bool operator!=(const ConstIterator& other) const;
51 const Allocation& operator*() const; 263 Allocation operator*() const;
52 264
53 private: 265 private:
54 friend class AllocationRegister; 266 friend class AllocationRegister;
55 using CellIndex = uint32_t; 267 using AllocationIndex = size_t;
56 268
57 ConstIterator(const AllocationRegister& alloc_register, CellIndex index); 269 ConstIterator(const AllocationRegister& alloc_register,
270 AllocationIndex index);
58 271
59 const AllocationRegister& register_; 272 const AllocationRegister& register_;
60 CellIndex index_; 273 AllocationIndex index_;
61 }; 274 };
62 275
63 AllocationRegister(); 276 AllocationRegister();
64 explicit AllocationRegister(uint32_t num_cells); 277 AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity);
65 278
66 ~AllocationRegister(); 279 ~AllocationRegister();
67 280
68 // Inserts allocation details into the table. If the address was present 281 // Inserts allocation details into the table. If the address was present
69 // already, its details are updated. |address| must not be null. (This is 282 // already, its details are updated. |address| must not be null.
70 // because null is used to mark free cells, to allow efficient iteration of 283 void Insert(const void* address,
71 // the hash table.) 284 size_t size,
72 void Insert(void* address, size_t size, AllocationContext context); 285 const AllocationContext& context);
73 286
74 // Removes the address from the table if it is present. It is ok to call this 287 // Removes the address from the table if it is present. It is ok to call this
75 // with a null pointer. 288 // with a null pointer.
76 void Remove(void* address); 289 void Remove(const void* address);
77 290
78 // Returns a pointer to the allocation at the address, or null if there is no 291 // Finds allocation for the address and fills |out_allocation|.
79 // allocation at that address. This can be used to change the allocation 292 bool Get(const void* address, Allocation* out_allocation) const;
80 // context after insertion, for example to change the type name.
81 Allocation* Get(void* address);
82 293
83 ConstIterator begin() const; 294 ConstIterator begin() const;
84 ConstIterator end() const; 295 ConstIterator end() const;
85 296
86 // Estimates memory overhead including |sizeof(AllocationRegister)|. 297 // Estimates memory overhead including |sizeof(AllocationRegister)|.
87 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; 298 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const;
88 299
89 private: 300 private:
90 friend class AllocationRegisterTest; 301 friend AllocationRegisterTest;
91 using CellIndex = uint32_t;
92 302
93 // A cell can store allocation details (size and context) by address. Cells 303 // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal
94 // are part of a linked list via the |next| member. This list is either the 304 // hashing and should be changed together with AddressHasher.
95 // list for a particular hash, or the free list. All cells are contiguous in 305 static const size_t kAllocationBuckets = 1 << 18;
96 // memory in one big array. Therefore, on 64-bit systems, space can be saved 306 static const size_t kAllocationCapacity = 1500000;
97 // by storing 32-bit indices instead of pointers as links. Index 0 is used as 307
98 // the list terminator. 308 // Expect max 2^15 unique backtraces. Can be changed to 2^16 without
99 struct Cell { 309 // needing to tweak BacktraceHasher implementation.
100 CellIndex next; 310 static const size_t kBacktraceBuckets = 1 << 15;
101 Allocation allocation; 311 static const size_t kBacktraceCapacity = kBacktraceBuckets;
312
313 struct BacktraceHasher {
314 size_t operator () (const Backtrace& backtrace) const;
102 }; 315 };
103 316
104 // The number of buckets, 2^17, approximately 130 000, has been tuned for 317 using BacktraceMap = internal::FixedHashMap<
105 // Chrome's typical number of outstanding allocations. (This number varies 318 kBacktraceBuckets,
106 // between processes. Most processes have a sustained load of ~30k unfreed 319 Backtrace,
107 // allocations, but some processes have peeks around 100k-400k allocations.) 320 size_t, // Number of references to the backtrace (the key). Incremented
108 // Because of the size of the table, it is likely that every |buckets_| 321 // when an allocation that references the backtrace is inserted,
109 // access and every |cells_| access will incur a cache miss. Microbenchmarks 322 // and decremented when the allocation is removed. When the
110 // suggest that it is worthwile to use more memory for the table to avoid 323 // number drops to zero, the backtrace is removed from the map.
111 // chasing down the linked list, until the size is 2^18. The number of buckets 324 BacktraceHasher>;
112 // is a power of two so modular indexing can be done with bitwise and.
113 static const uint32_t kNumBuckets = 0x20000;
114 static const uint32_t kNumBucketsMask = kNumBuckets - 1;
115 325
116 // Reserve address space to store at most this number of entries. High 326 struct AllocationInfo {
117 // capacity does not imply high memory usage due to the access pattern. The 327 size_t size;
118 // only constraint on the number of cells is that on 32-bit systems address 328 const char* type_name;
119 // space is scarce (i.e. reserving 2GiB of address space for the entries is 329 BacktraceMap::KVIndex backtrace_index;
120 // not an option). A value of ~3M entries is large enough to handle spikes in 330 };
121 // the number of allocations, and modest enough to require no more than a few
122 // dozens of MiB of address space.
123 static const uint32_t kNumCellsPerBucket = 10;
124 331
125 // Returns a value in the range [0, kNumBuckets - 1] (inclusive). 332 struct AddressHasher {
126 static uint32_t Hash(void* address); 333 size_t operator () (const void* address) const;
334 };
127 335
128 // Allocates a region of virtual address space of |size| rounded up to the 336 using AllocationMap = internal::FixedHashMap<
129 // system page size. The memory is zeroed by the system. A guard page is 337 kAllocationBuckets,
130 // added after the end. 338 const void*,
131 static void* AllocateVirtualMemory(size_t size); 339 AllocationInfo,
340 AddressHasher>;
132 341
133 // Frees a region of virtual address space allocated by a call to 342 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace);
134 // |AllocateVirtualMemory|. 343 void RemoveBacktrace(BacktraceMap::KVIndex index);
135 static void FreeVirtualMemory(void* address, size_t allocated_size);
136 344
137 // Returns a pointer to the variable that contains or should contain the 345 Allocation GetAllocation(AllocationMap::KVIndex) const;
138 // index of the cell that stores the entry for |address|. The pointer may
139 // point at an element of |buckets_| or at the |next| member of an element of
140 // |cells_|. If the value pointed at is 0, |address| is not in the table.
141 CellIndex* Lookup(void* address);
142 346
143 // Takes a cell that is not being used to store an entry (either by recycling 347 AllocationMap allocations_;
144 // from the free list or by taking a fresh cell) and returns its index. 348 BacktraceMap backtraces_;
145 CellIndex GetFreeCell();
146
147 // The maximum number of cells which can be allocated.
148 uint32_t const num_cells_;
149
150 // The array of cells. This array is backed by mmapped memory. Lower indices
151 // are accessed first, higher indices are only accessed when required. In
152 // this way, even if a huge amount of address space has been mmapped, only
153 // the cells that are actually used will be backed by physical memory.
154 Cell* const cells_;
155
156 // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain
157 // the index of the head of the linked list for |Hash(address)|. A value of 0
158 // indicates an empty list. This array is backed by mmapped memory.
159 CellIndex* const buckets_;
160
161 // The head of the free list. This is the index of the cell. A value of 0
162 // means that the free list is empty.
163 CellIndex free_list_;
164
165 // The index of the first element of |cells_| that has not been used before.
166 // If the free list is empty and a new cell is needed, the cell at this index
167 // is used. This is the high water mark for the number of entries stored.
168 CellIndex next_unused_cell_;
169 349
170 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); 350 DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
171 }; 351 };
172 352
173 } // namespace trace_event 353 } // namespace trace_event
174 } // namespace base 354 } // namespace base
175 355
176 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 356 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_allocation_context.cc ('k') | base/trace_event/heap_profiler_allocation_register.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698