Chromium Code Reviews| Index: base/trace_event/heap_profiler_allocation_register.h |
| diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h |
| index d6a02faeaeaacca487e3ff13d7f71dac34768da1..74f376c317eaafe73bb9695986bd28169ef24187 100644 |
| --- a/base/trace_event/heap_profiler_allocation_register.h |
| +++ b/base/trace_event/heap_profiler_allocation_register.h |
| @@ -48,24 +48,26 @@ class FixedHashMap { |
| // For implementation simplicity API uses integer index instead |
| // of iterators. Most operations (except Find) on KVIndex are O(1). |
| using KVIndex = size_t; |
| - static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1); |
|
Primiano Tucci (use gerrit)
2017/03/30 13:32:40
uh why? initializing a static const int-type in a
awong
2017/03/31 04:28:31
This is a common C++ mistake that only often doesn
Primiano Tucci (use gerrit)
2017/03/31 19:05:47
The defense rests, your honor
|
| + static const KVIndex kInvalidKVIndex; |
| // Capacity controls how many items this hash map can hold, and largely |
| // affects memory footprint. |
| FixedHashMap(size_t capacity) |
| - : num_cells_(capacity), |
| - cells_(static_cast<Cell*>( |
| - AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), |
| - buckets_(static_cast<Bucket*>( |
| - AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), |
| - free_list_(nullptr), |
| - next_unused_cell_(0) {} |
| + : num_cells_(capacity), |
| + num_inserts_dropped_(0), |
| + cells_(static_cast<Cell*>( |
| + AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), |
| + buckets_(static_cast<Bucket*>( |
| + AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), |
| + free_list_(nullptr), |
| + next_unused_cell_(0) {} |
| ~FixedHashMap() { |
| FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell)); |
| FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket)); |
| } |
| + // Returns {kInvalidKVIndex, false} if the table is full. |
| std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) { |
| Cell** p_cell = Lookup(key); |
| Cell* cell = *p_cell; |
| @@ -74,7 +76,15 @@ class FixedHashMap { |
| } |
| // Get a free cell and link it. |
| - *p_cell = cell = GetFreeCell(); |
| + cell = GetFreeCell(); |
| + if (!cell) { |
| + if (num_inserts_dropped_ < |
| + std::numeric_limits<decltype(num_inserts_dropped_)>::max()) { |
|
Primiano Tucci (use gerrit)
2017/03/30 13:32:41
I'd make num_inserts_dropped either a bool or a in
awong
2017/03/31 04:28:30
Is that really necessary? The branch prediction o
Primiano Tucci (use gerrit)
2017/03/31 19:05:47
Another battle I don't care enough to respin a dis
|
| + ++num_inserts_dropped_; |
| + } |
| + return {kInvalidKVIndex, false}; |
| + } |
| + *p_cell = cell; |
| cell->p_prev = p_cell; |
| cell->next = nullptr; |
| @@ -137,6 +147,8 @@ class FixedHashMap { |
| bits::Align(sizeof(Bucket) * NumBuckets, page_size); |
| } |
| + int32_t NumInsertsDropped() const { return num_inserts_dropped_; } |
|
Primiano Tucci (use gerrit)
2017/03/30 13:32:40
nit: lower_with_under() for trivial accessors.
awong
2017/03/31 04:28:30
Done.
|
| + |
| private: |
| friend base::trace_event::AllocationRegisterTest; |
| @@ -175,7 +187,8 @@ class FixedHashMap { |
| } |
| // Returns a cell that is not being used to store an entry (either by |
| - // recycling from the free list or by taking a fresh cell). |
| + // recycling from the free list or by taking a fresh cell). May return |
| + // NULL if the hash table has run out of memory. |
|
Primiano Tucci (use gerrit)
2017/03/30 13:32:40
s/NULL/nullptr. It's C++11 year :)
awong
2017/03/31 04:28:31
Done.
|
| Cell* GetFreeCell() { |
| // First try to re-use a cell from the free list. |
| if (free_list_) { |
| @@ -184,26 +197,14 @@ class FixedHashMap { |
| return cell; |
| } |
| - // Otherwise pick the next cell that has not been touched before. |
| - size_t idx = next_unused_cell_; |
| - next_unused_cell_++; |
| - |
| // If the hash table has too little capacity (when too little address space |
| - // was reserved for |cells_|), |next_unused_cell_| can be an index outside |
| - // of the allocated storage. A guard page is allocated there to crash the |
| - // program in that case. There are alternative solutions: |
| - // - Deal with it, increase capacity by reallocating |cells_|. |
| - // - Refuse to insert and let the caller deal with it. |
| - // Because free cells are re-used before accessing fresh cells with a higher |
| - // index, and because reserving address space without touching it is cheap, |
| - // the simplest solution is to just allocate a humongous chunk of address |
| - // space. |
| - |
| - CHECK_LT(next_unused_cell_, num_cells_ + 1) |
| - << "Allocation Register hash table has too little capacity. Increase " |
| - "the capacity to run heap profiler in large sessions."; |
| - |
| - return &cells_[idx]; |
| + // was reserved for |cells_|), return nullptr. |
| + if (next_unused_cell_ >= num_cells_) { |
| + return nullptr; |
| + } |
| + |
| + // Otherwise pick the next cell that has not been touched before. |
| + return &cells_[next_unused_cell_++]; |
| } |
| // Returns a value in the range [0, NumBuckets - 1] (inclusive). |
| @@ -219,6 +220,9 @@ class FixedHashMap { |
| // Number of cells. |
| size_t const num_cells_; |
| + // Number of calls to Insert() that were lost because the hashtable was full. |
| + int32_t num_inserts_dropped_; |
| + |
| // The array of cells. This array is backed by mmapped memory. Lower indices |
| // are accessed first, higher indices are accessed only when the |free_list_| |
| // is empty. This is to minimize the amount of resident memory used. |
| @@ -240,6 +244,10 @@ class FixedHashMap { |
| DISALLOW_COPY_AND_ASSIGN(FixedHashMap); |
| }; |
| +template <size_t NumBuckets, class Key, class Value, class KeyHasher> |
| +const typename FixedHashMap<NumBuckets, Key, Value, KeyHasher>::KVIndex |
| + FixedHashMap<NumBuckets, Key, Value, KeyHasher>::kInvalidKVIndex(-1); |
|
Primiano Tucci (use gerrit)
2017/03/30 13:32:41
I never seen a -1 more strongly typed than this.
awong
2017/03/31 04:28:30
Yeah.... bleh to templates.
As noted in chat, we
Primiano Tucci (use gerrit)
2017/03/31 19:05:47
Acknowledged. But MSVC didn't like it unfortunatel
|
| + |
| } // namespace internal |
| class TraceEventMemoryOverhead; |
| @@ -282,7 +290,10 @@ class BASE_EXPORT AllocationRegister { |
| // Inserts allocation details into the table. If the address was present |
| // already, its details are updated. |address| must not be null. |
| - void Insert(const void* address, |
| + // |
| + // Returns true if an insert occurred. Inserts may fail because the table |
| + // is full. |
| + bool Insert(const void* address, |
| size_t size, |
| const AllocationContext& context); |