Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(269)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register.h

Issue 2784783003: On heap tracking datastructure overflow, degrade instead of CHECK() (Closed)
Patch Set: Address comments. Created 3 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
7 7
8 #include <stddef.h> 8 #include <stddef.h>
9 #include <stdint.h> 9 #include <stdint.h>
10 10
(...skipping 30 matching lines...) Expand all
41 // To keep things simple we don't call destructors. 41 // To keep things simple we don't call destructors.
42 static_assert(is_trivially_destructible<Key>::value && 42 static_assert(is_trivially_destructible<Key>::value &&
43 is_trivially_destructible<Value>::value, 43 is_trivially_destructible<Value>::value,
44 "Key and Value shouldn't have destructors"); 44 "Key and Value shouldn't have destructors");
45 public: 45 public:
46 using KVPair = std::pair<const Key, Value>; 46 using KVPair = std::pair<const Key, Value>;
47 47
48 // For implementation simplicity API uses integer index instead 48 // For implementation simplicity API uses integer index instead
49 // of iterators. Most operations (except Find) on KVIndex are O(1). 49 // of iterators. Most operations (except Find) on KVIndex are O(1).
50 using KVIndex = size_t; 50 using KVIndex = size_t;
51 static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1); 51 enum { kInvalidKVIndex = static_cast<KVIndex>(-1) };
Primiano Tucci (use gerrit) 2017/03/31 19:05:47 unfortunately now we are one bug out and one bug i
52 52
53 // Capacity controls how many items this hash map can hold, and largely 53 // Capacity controls how many items this hash map can hold, and largely
54 // affects memory footprint. 54 // affects memory footprint.
55 FixedHashMap(size_t capacity) 55 FixedHashMap(size_t capacity)
56 : num_cells_(capacity), 56 : num_cells_(capacity),
57 cells_(static_cast<Cell*>( 57 num_inserts_dropped_(0),
58 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), 58 cells_(static_cast<Cell*>(
59 buckets_(static_cast<Bucket*>( 59 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))),
60 AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), 60 buckets_(static_cast<Bucket*>(
61 free_list_(nullptr), 61 AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))),
62 next_unused_cell_(0) {} 62 free_list_(nullptr),
63 next_unused_cell_(0) {}
63 64
64 ~FixedHashMap() { 65 ~FixedHashMap() {
65 FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell)); 66 FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell));
66 FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket)); 67 FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket));
67 } 68 }
68 69
70 // Returns {kInvalidKVIndex, false} if the table is full.
69 std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) { 71 std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) {
70 Cell** p_cell = Lookup(key); 72 Cell** p_cell = Lookup(key);
71 Cell* cell = *p_cell; 73 Cell* cell = *p_cell;
72 if (cell) { 74 if (cell) {
73 return {static_cast<KVIndex>(cell - cells_), false}; // not inserted 75 return {static_cast<KVIndex>(cell - cells_), false}; // not inserted
74 } 76 }
75 77
76 // Get a free cell and link it. 78 // Get a free cell and link it.
77 *p_cell = cell = GetFreeCell(); 79 cell = GetFreeCell();
80 if (!cell) {
81 if (num_inserts_dropped_ <
82 std::numeric_limits<decltype(num_inserts_dropped_)>::max()) {
83 ++num_inserts_dropped_;
84 }
85 return {kInvalidKVIndex, false};
86 }
87 *p_cell = cell;
78 cell->p_prev = p_cell; 88 cell->p_prev = p_cell;
79 cell->next = nullptr; 89 cell->next = nullptr;
80 90
81 // Initialize key/value pair. Since key is 'const Key' this is the 91 // Initialize key/value pair. Since key is 'const Key' this is the
82 // only way to initialize it. 92 // only way to initialize it.
83 new (&cell->kv) KVPair(key, value); 93 new (&cell->kv) KVPair(key, value);
84 94
85 return {static_cast<KVIndex>(cell - cells_), true}; // inserted 95 return {static_cast<KVIndex>(cell - cells_), true}; // inserted
86 } 96 }
87 97
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
130 140
131 // Estimates number of bytes used in allocated memory regions. 141 // Estimates number of bytes used in allocated memory regions.
132 size_t EstimateUsedMemory() const { 142 size_t EstimateUsedMemory() const {
133 size_t page_size = base::GetPageSize(); 143 size_t page_size = base::GetPageSize();
134 // |next_unused_cell_| is the first cell that wasn't touched, i.e. 144 // |next_unused_cell_| is the first cell that wasn't touched, i.e.
135 // it's the number of touched cells. 145 // it's the number of touched cells.
136 return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) + 146 return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) +
137 bits::Align(sizeof(Bucket) * NumBuckets, page_size); 147 bits::Align(sizeof(Bucket) * NumBuckets, page_size);
138 } 148 }
139 149
150 size_t num_inserts_dropped() const { return num_inserts_dropped_; }
151
140 private: 152 private:
141 friend base::trace_event::AllocationRegisterTest; 153 friend base::trace_event::AllocationRegisterTest;
142 154
143 struct Cell { 155 struct Cell {
144 KVPair kv; 156 KVPair kv;
145 Cell* next; 157 Cell* next;
146 158
147 // Conceptually this is |prev| in a doubly linked list. However, buckets 159 // Conceptually this is |prev| in a doubly linked list. However, buckets
148 // also participate in the bucket's cell list - they point to the list's 160 // also participate in the bucket's cell list - they point to the list's
149 // head and also need to be linked / unlinked properly. To treat these two 161 // head and also need to be linked / unlinked properly. To treat these two
(...skipping 18 matching lines...) Expand all
168 // Chase down the list until the cell that holds |key| is found, 180 // Chase down the list until the cell that holds |key| is found,
169 // or until the list ends. 181 // or until the list ends.
170 while (*p_cell && (*p_cell)->kv.first != key) { 182 while (*p_cell && (*p_cell)->kv.first != key) {
171 p_cell = &(*p_cell)->next; 183 p_cell = &(*p_cell)->next;
172 } 184 }
173 185
174 return p_cell; 186 return p_cell;
175 } 187 }
176 188
177 // Returns a cell that is not being used to store an entry (either by 189 // Returns a cell that is not being used to store an entry (either by
178 // recycling from the free list or by taking a fresh cell). 190 // recycling from the free list or by taking a fresh cell). May return
191 // nullptr if the hash table has run out of memory.
179 Cell* GetFreeCell() { 192 Cell* GetFreeCell() {
180 // First try to re-use a cell from the free list. 193 // First try to re-use a cell from the free list.
181 if (free_list_) { 194 if (free_list_) {
182 Cell* cell = free_list_; 195 Cell* cell = free_list_;
183 free_list_ = cell->next; 196 free_list_ = cell->next;
184 return cell; 197 return cell;
185 } 198 }
186 199
200 // If the hash table has too little capacity (when too little address space
201 // was reserved for |cells_|), return nullptr.
202 if (next_unused_cell_ >= num_cells_) {
203 return nullptr;
204 }
205
187 // Otherwise pick the next cell that has not been touched before. 206 // Otherwise pick the next cell that has not been touched before.
188 size_t idx = next_unused_cell_; 207 return &cells_[next_unused_cell_++];
189 next_unused_cell_++;
190
191 // If the hash table has too little capacity (when too little address space
192 // was reserved for |cells_|), |next_unused_cell_| can be an index outside
193 // of the allocated storage. A guard page is allocated there to crash the
194 // program in that case. There are alternative solutions:
195 // - Deal with it, increase capacity by reallocating |cells_|.
196 // - Refuse to insert and let the caller deal with it.
197 // Because free cells are re-used before accessing fresh cells with a higher
198 // index, and because reserving address space without touching it is cheap,
199 // the simplest solution is to just allocate a humongous chunk of address
200 // space.
201
202 CHECK_LT(next_unused_cell_, num_cells_ + 1)
203 << "Allocation Register hash table has too little capacity. Increase "
204 "the capacity to run heap profiler in large sessions.";
205
206 return &cells_[idx];
207 } 208 }
208 209
209 // Returns a value in the range [0, NumBuckets - 1] (inclusive). 210 // Returns a value in the range [0, NumBuckets - 1] (inclusive).
210 size_t Hash(const Key& key) const { 211 size_t Hash(const Key& key) const {
211 if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) { 212 if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) {
212 // NumBuckets is a power of 2. 213 // NumBuckets is a power of 2.
213 return KeyHasher()(key) & (NumBuckets - 1); 214 return KeyHasher()(key) & (NumBuckets - 1);
214 } else { 215 } else {
215 return KeyHasher()(key) % NumBuckets; 216 return KeyHasher()(key) % NumBuckets;
216 } 217 }
217 } 218 }
218 219
219 // Number of cells. 220 // Number of cells.
220 size_t const num_cells_; 221 size_t const num_cells_;
221 222
223 // Number of calls to Insert() that were lost because the hashtable was full.
224 size_t num_inserts_dropped_;
225
222 // The array of cells. This array is backed by mmapped memory. Lower indices 226 // The array of cells. This array is backed by mmapped memory. Lower indices
223 // are accessed first, higher indices are accessed only when the |free_list_| 227 // are accessed first, higher indices are accessed only when the |free_list_|
224 // is empty. This is to minimize the amount of resident memory used. 228 // is empty. This is to minimize the amount of resident memory used.
225 Cell* const cells_; 229 Cell* const cells_;
226 230
227 // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will 231 // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will
228 // contain the pointer to the linked list of cells for |Hash(key)|. 232 // contain the pointer to the linked list of cells for |Hash(key)|.
229 // This array is backed by mmapped memory. 233 // This array is backed by mmapped memory.
230 mutable Bucket* buckets_; 234 mutable Bucket* buckets_;
231 235
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
275 AllocationIndex index_; 279 AllocationIndex index_;
276 }; 280 };
277 281
278 AllocationRegister(); 282 AllocationRegister();
279 AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity); 283 AllocationRegister(size_t allocation_capacity, size_t backtrace_capacity);
280 284
281 ~AllocationRegister(); 285 ~AllocationRegister();
282 286
283 // Inserts allocation details into the table. If the address was present 287 // Inserts allocation details into the table. If the address was present
284 // already, its details are updated. |address| must not be null. 288 // already, its details are updated. |address| must not be null.
285 void Insert(const void* address, 289 //
290 // Returns true if an insert occurred. Inserts may fail because the table
291 // is full.
292 bool Insert(const void* address,
286 size_t size, 293 size_t size,
287 const AllocationContext& context); 294 const AllocationContext& context);
288 295
289 // Removes the address from the table if it is present. It is ok to call this 296 // Removes the address from the table if it is present. It is ok to call this
290 // with a null pointer. 297 // with a null pointer.
291 void Remove(const void* address); 298 void Remove(const void* address);
292 299
293 // Finds allocation for the address and fills |out_allocation|. 300 // Finds allocation for the address and fills |out_allocation|.
294 bool Get(const void* address, Allocation* out_allocation) const; 301 bool Get(const void* address, Allocation* out_allocation) const;
295 302
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
359 AllocationMap allocations_; 366 AllocationMap allocations_;
360 BacktraceMap backtraces_; 367 BacktraceMap backtraces_;
361 368
362 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); 369 DISALLOW_COPY_AND_ASSIGN(AllocationRegister);
363 }; 370 };
364 371
365 } // namespace trace_event 372 } // namespace trace_event
366 } // namespace base 373 } // namespace base
367 374
368 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ 375 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698