Index: base/trace_event/heap_profiler_allocation_register.h |
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h |
index d6a02faeaeaacca487e3ff13d7f71dac34768da1..6d63923f4c164e7a51a2eaf6686d94a87de1f4a9 100644 |
--- a/base/trace_event/heap_profiler_allocation_register.h |
+++ b/base/trace_event/heap_profiler_allocation_register.h |
@@ -48,24 +48,26 @@ class FixedHashMap { |
// For implementation simplicity API uses integer index instead |
// of iterators. Most operations (except Find) on KVIndex are O(1). |
using KVIndex = size_t; |
- static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1); |
+ enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) }; |
// Capacity controls how many items this hash map can hold, and largely |
// affects memory footprint. |
FixedHashMap(size_t capacity) |
- : num_cells_(capacity), |
- cells_(static_cast<Cell*>( |
- AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), |
- buckets_(static_cast<Bucket*>( |
- AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), |
- free_list_(nullptr), |
- next_unused_cell_(0) {} |
+ : num_cells_(capacity), |
+ num_inserts_dropped_(0), |
+ cells_(static_cast<Cell*>( |
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), |
+ buckets_(static_cast<Bucket*>( |
+ AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), |
+ free_list_(nullptr), |
+ next_unused_cell_(0) {} |
~FixedHashMap() { |
FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell)); |
FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket)); |
} |
+ // Returns {kInvalidKVIndex, false} if the table is full. |
std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) { |
Cell** p_cell = Lookup(key); |
Cell* cell = *p_cell; |
@@ -74,7 +76,15 @@ class FixedHashMap { |
} |
// Get a free cell and link it. |
- *p_cell = cell = GetFreeCell(); |
+ cell = GetFreeCell(); |
+ if (!cell) { |
+ if (num_inserts_dropped_ < |
+ std::numeric_limits<decltype(num_inserts_dropped_)>::max()) { |
+ ++num_inserts_dropped_; |
+ } |
+ return {kInvalidKVIndex, false}; |
+ } |
+ *p_cell = cell; |
cell->p_prev = p_cell; |
cell->next = nullptr; |
@@ -137,6 +147,8 @@ class FixedHashMap { |
bits::Align(sizeof(Bucket) * NumBuckets, page_size); |
} |
+ size_t num_inserts_dropped() const { return num_inserts_dropped_; } |
+ |
private: |
friend base::trace_event::AllocationRegisterTest; |
@@ -175,7 +187,8 @@ class FixedHashMap { |
} |
// Returns a cell that is not being used to store an entry (either by |
- // recycling from the free list or by taking a fresh cell). |
+ // recycling from the free list or by taking a fresh cell). May return |
+ // nullptr if the hash table has run out of memory. |
Cell* GetFreeCell() { |
// First try to re-use a cell from the free list. |
if (free_list_) { |
@@ -184,26 +197,14 @@ class FixedHashMap { |
return cell; |
} |
- // Otherwise pick the next cell that has not been touched before. |
- size_t idx = next_unused_cell_; |
- next_unused_cell_++; |
- |
// If the hash table has too little capacity (when too little address space |
- // was reserved for |cells_|), |next_unused_cell_| can be an index outside |
- // of the allocated storage. A guard page is allocated there to crash the |
- // program in that case. There are alternative solutions: |
- // - Deal with it, increase capacity by reallocating |cells_|. |
- // - Refuse to insert and let the caller deal with it. |
- // Because free cells are re-used before accessing fresh cells with a higher |
- // index, and because reserving address space without touching it is cheap, |
- // the simplest solution is to just allocate a humongous chunk of address |
- // space. |
- |
- CHECK_LT(next_unused_cell_, num_cells_ + 1) |
- << "Allocation Register hash table has too little capacity. Increase " |
- "the capacity to run heap profiler in large sessions."; |
- |
- return &cells_[idx]; |
+ // was reserved for |cells_|), return nullptr. |
+ if (next_unused_cell_ >= num_cells_) { |
+ return nullptr; |
+ } |
+ |
+ // Otherwise pick the next cell that has not been touched before. |
+ return &cells_[next_unused_cell_++]; |
} |
// Returns a value in the range [0, NumBuckets - 1] (inclusive). |
@@ -219,6 +220,9 @@ class FixedHashMap { |
// Number of cells. |
size_t const num_cells_; |
+ // Number of calls to Insert() that were lost because the hashtable was full. |
+ size_t num_inserts_dropped_; |
+ |
// The array of cells. This array is backed by mmapped memory. Lower indices |
// are accessed first, higher indices are accessed only when the |free_list_| |
// is empty. This is to minimize the amount of resident memory used. |
@@ -282,7 +286,10 @@ class BASE_EXPORT AllocationRegister { |
// Inserts allocation details into the table. If the address was present |
// already, its details are updated. |address| must not be null. |
- void Insert(const void* address, |
+ // |
+ // Returns true if an insert occurred. Inserts may fail because the table |
+ // is full. |
+ bool Insert(const void* address, |
size_t size, |
const AllocationContext& context); |
@@ -359,6 +366,9 @@ class BASE_EXPORT AllocationRegister { |
AllocationMap allocations_; |
BacktraceMap backtraces_; |
+ // Sentinel used when we run out of backtraces_ storage. |
+ BacktraceMap::KVIndex out_of_storage_backtrace_index_; |
+ |
DISALLOW_COPY_AND_ASSIGN(AllocationRegister); |
}; |