OLD | NEW |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
7 | 7 |
8 #include <stddef.h> | 8 #include <stddef.h> |
9 #include <stdint.h> | 9 #include <stdint.h> |
10 | 10 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
45 public: | 45 public: |
46 using KVPair = std::pair<const Key, Value>; | 46 using KVPair = std::pair<const Key, Value>; |
47 | 47 |
48 // For implementation simplicity API uses integer index instead | 48 // For implementation simplicity API uses integer index instead |
49 // of iterators. Most operations (except Find) on KVIndex are O(1). | 49 // of iterators. Most operations (except Find) on KVIndex are O(1). |
50 using KVIndex = size_t; | 50 using KVIndex = size_t; |
51 enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) }; | 51 enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) }; |
52 | 52 |
53 // Capacity controls how many items this hash map can hold, and largely | 53 // Capacity controls how many items this hash map can hold, and largely |
54 // affects memory footprint. | 54 // affects memory footprint. |
55 FixedHashMap(size_t capacity) | 55 explicit FixedHashMap(size_t capacity) |
56 : num_cells_(capacity), | 56 : num_cells_(capacity), |
57 num_inserts_dropped_(0), | 57 num_inserts_dropped_(0), |
58 cells_(static_cast<Cell*>( | 58 cells_(static_cast<Cell*>( |
59 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), | 59 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), |
60 buckets_(static_cast<Bucket*>( | 60 buckets_(static_cast<Bucket*>( |
61 AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), | 61 AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), |
62 free_list_(nullptr), | 62 free_list_(nullptr), |
63 next_unused_cell_(0) {} | 63 next_unused_cell_(0) {} |
64 | 64 |
65 ~FixedHashMap() { | 65 ~FixedHashMap() { |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
245 }; | 245 }; |
246 | 246 |
247 } // namespace internal | 247 } // namespace internal |
248 | 248 |
249 class TraceEventMemoryOverhead; | 249 class TraceEventMemoryOverhead; |
250 | 250 |
251 // The allocation register keeps track of all allocations that have not been | 251 // The allocation register keeps track of all allocations that have not been |
252 // freed. Internally it has two hashtables: one for Backtraces and one for | 252 // freed. Internally it has two hashtables: one for Backtraces and one for |
253 // actual allocations. Sizes of both hashtables are fixed, and this class | 253 // actual allocations. Sizes of both hashtables are fixed, and this class |
254 // allocates (mmaps) only in its constructor. | 254 // allocates (mmaps) only in its constructor. |
| 255 // |
| 256 // When either hash table hits max size, new inserts are dropped. |
255 class BASE_EXPORT AllocationRegister { | 257 class BASE_EXPORT AllocationRegister { |
256 public: | 258 public: |
257 // Details about an allocation. | 259 // Details about an allocation. |
258 struct Allocation { | 260 struct Allocation { |
259 const void* address; | 261 const void* address; |
260 size_t size; | 262 size_t size; |
261 AllocationContext context; | 263 AllocationContext context; |
262 }; | 264 }; |
263 | 265 |
264 // An iterator that iterates entries in no particular order. | 266 // An iterator that iterates entries in no particular order. |
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
359 AddressHasher>; | 361 AddressHasher>; |
360 | 362 |
361 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace); | 363 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace); |
362 void RemoveBacktrace(BacktraceMap::KVIndex index); | 364 void RemoveBacktrace(BacktraceMap::KVIndex index); |
363 | 365 |
364 Allocation GetAllocation(AllocationMap::KVIndex) const; | 366 Allocation GetAllocation(AllocationMap::KVIndex) const; |
365 | 367 |
366 AllocationMap allocations_; | 368 AllocationMap allocations_; |
367 BacktraceMap backtraces_; | 369 BacktraceMap backtraces_; |
368 | 370 |
369 // Sentinel used when we run out of backtraces_ storage. | 371 // Sentinel used when the |backtraces_| table is full. |
370 BacktraceMap::KVIndex out_of_storage_backtrace_index_; | 372 // |
| 373 // This is a slightly abstraction to allow for constant propagation. It |
| 374 // knows that the sentinel will be the first item inserted into the table |
| 375 // and that the first index retuned will be 0. The constructor DCHECKs |
| 376 // this assumption. |
| 377 enum : BacktraceMap::KVIndex { kOutOfStorageBacktraceIndex = 0 }; |
371 | 378 |
372 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); | 379 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); |
373 }; | 380 }; |
374 | 381 |
375 } // namespace trace_event | 382 } // namespace trace_event |
376 } // namespace base | 383 } // namespace base |
377 | 384 |
378 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 385 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
OLD | NEW |