Index: base/trace_event/heap_profiler_allocation_register.h |
diff --git a/base/trace_event/heap_profiler_allocation_register.h b/base/trace_event/heap_profiler_allocation_register.h |
index 976f2f50a9c24ff1656a104c46ed8d289d6d1a6a..3b151e5ae355aa2d26022835648e664546da06ed 100644 |
--- a/base/trace_event/heap_profiler_allocation_register.h |
+++ b/base/trace_event/heap_profiler_allocation_register.h |
@@ -8,77 +8,303 @@ |
#include <stddef.h> |
#include <stdint.h> |
+#include "base/bits.h" |
#include "base/logging.h" |
#include "base/macros.h" |
+#include "base/process/process_metrics.h" |
#include "base/trace_event/heap_profiler_allocation_context.h" |
namespace base { |
namespace trace_event { |
+class AllocationRegisterTest; |
+ |
+namespace internal { |
+ |
+// Allocates a region of virtual address space of |size| rounded up to the |
+// system page size. The memory is zeroed by the system. A guard page is |
+// added after the end. |
+void* AllocateGuardedVirtualMemory(size_t size); |
+ |
+// Frees a region of virtual address space allocated by a call to |
+// |AllocateVirtualMemory|. |
+void FreeGuardedVirtualMemory(void* address, size_t allocated_size); |
+ |
+// Hash map that mmaps memory only once in the constructor. Its API is |
+// similar to std::unordered_map, only index (KVIndex) is used to address |
+template <class Key, class Value, class KeyHasher> |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
Do you really need the KeyHasher arg here?
I think
Dmitry Skiba
2016/06/28 10:55:00
This is for AddressHasher, which implements fast h
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
Ahh Ok I see the point now. Address is a simple ty
|
+class FixedHashMap { |
+ public: |
+ using KVPair = std::pair<const Key, Value>; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
Include What You Use: add #include to <utility> up
Dmitry Skiba
2016/06/28 10:54:58
Done.
|
+ |
+ // For implementation simplicity API uses integer index instead |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
/me loves simplicity! \o/
|
+ // of iterators. Most operations (except FindValidIndex) on KVIndex |
+ // are O(1). |
+ using KVIndex = size_t; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:26
should we just use size_T everywhere here?
I feel
Dmitry Skiba
2016/06/28 10:54:59
Yes, functionally KVIndex is useless, but it helps
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
Ok I see, makes sense.
|
+ static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1); |
+ |
+ // Number of cells control how many items this hash map can hold. Since |
+ // cell includes both key and value, this value also largely affects |
+ // memory footprint. |
+ // Number of buckets control how many collisions there will be. Bucket |
+ // is just a pointer, so it should be large(ish). It's also a good idea |
+ // to make it a prime number. |
+ FixedHashMap(size_t num_buckets, size_t num_cells) |
+ : num_buckets_(num_buckets), |
+ num_cells_(num_cells), |
+ cells_(static_cast<Cell*>( |
+ AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), |
+ buckets_(static_cast<Cell**>( |
+ AllocateGuardedVirtualMemory(num_buckets_ * sizeof(Bucket)))), |
+ free_list_(nullptr), |
+ next_unused_cell_(0) {} |
+ |
+ ~FixedHashMap() { |
+ RemoveAll(); |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
do you really need to call RemoveAll here? Once yo
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell)); |
+ FreeGuardedVirtualMemory(buckets_, num_buckets_ * sizeof(Bucket)); |
+ } |
+ |
+ std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) { |
+ Cell** pcell = Lookup(key); |
+ Cell* cell = *pcell; |
+ if (cell) { |
+ return {KVIndex(cell - cells_), false}; // not inserted |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
shouldn't this be a static_cast<KVIndex>(cell - ce
Dmitry Skiba
2016/06/28 10:54:58
Done.
|
+ } |
+ |
+ // Get a free cell and link it. |
+ *pcell = cell = GetFreeCell(); |
+ cell->pself = pcell; |
+ cell->next = nullptr; |
+ |
+ // Construct cell's value. |
+ new (&cell->kv) KVPair(key, value); |
Primiano Tucci (use gerrit)
2016/06/23 20:46:26
why this is not just:
cell->kv.first = key
cell->k
Dmitry Skiba
2016/06/28 10:54:59
Actually, we can't do kv.first = key, because firs
|
+ |
+ return {KVIndex(cell - cells_), true}; // inserted |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
sitto here about static_cast
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ } |
+ |
+ void Remove(KVIndex index) { |
+ if (index == kInvalidKVIndex) { |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
should this be if (index >= num_cells_)?
Dmitry Skiba
2016/06/28 10:54:59
Right, and I'll also convert that to DCHECK, becau
|
+ return; |
+ } |
+ |
+ Cell* cell = &cells_[index]; |
+ |
+ // Unlink the cell. |
+ *cell->pself = cell->next; |
+ if (cell->next) { |
+ cell->next->pself = cell->pself; |
+ } |
+ cell->pself = nullptr; // mark as free |
+ |
+ // Add it to the free list. |
+ cell->next = free_list_; |
+ free_list_ = cell; |
+ |
+ // Destruct cell's value. |
+ cell->kv.~KVPair(); |
+ } |
+ |
+ void RemoveAll() { |
Primiano Tucci (use gerrit)
2016/06/23 20:46:24
Looks like we never end up calling this. Should we
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ KVIndex index = FindValidIndex(0); |
+ while (index != kInvalidKVIndex) { |
+ Remove(index); |
+ index = FindValidIndex(index + 1); |
+ } |
+ } |
+ |
+ KVIndex Find(const Key& key) const { |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
Not sure why you have this returning an index at a
Dmitry Skiba
2016/06/28 10:54:59
I need index for backtrace_index, i.e. I need some
|
+ Cell* cell = *Lookup(key); |
+ return cell ? KVIndex(cell - cells_) : kInvalidKVIndex; |
+ } |
+ |
+ KVPair& Get(KVIndex index) { |
+ return cells_[index].kv; |
+ } |
+ |
+ const KVPair& Get(KVIndex index) const { |
+ return cells_[index].kv; |
+ } |
+ |
+ // Finds next index that has a KVPair associated with it. Search starts |
+ // with the specified index. Returns kInvalidKVIndex if nothing was found. |
+ // To find the first valid index, call this function with 0. Continue |
+ // calling with the last_index + 1 until kInvalidKVIndex is returned. |
+ KVIndex FindValidIndex(KVIndex index) const { |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
s/FindValidIndex/FindNextValidCellIndex/ (or just
Dmitry Skiba
2016/06/28 10:55:00
The thing with 'Cell' is that it doesn't appear an
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
Oh right realized only now. I thought this was a p
Dmitry Skiba
2016/06/29 16:12:26
Done.
|
+ for (;index < next_unused_cell_; ++index) { |
+ if (cells_[index].pself) { |
+ return index; |
+ } |
+ } |
+ return kInvalidKVIndex; |
+ } |
+ |
+ // Estimates number of dirty bytes in allocated memory regions. |
+ size_t EstimateAllocatedDirty() const { |
Primiano Tucci (use gerrit)
2016/06/23 20:46:24
maybe EstimateResidentMemory or EstimateDirtyMemor
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ size_t page_size = base::GetPageSize(); |
+ // |next_unused_cell_| is the first cell that wasn't touched, i.e. |
+ // it's the number of touched cells. |
+ return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) + |
+ bits::Align(sizeof(Bucket) * num_buckets_, page_size); |
+ } |
+ |
+ private: |
+ friend base::trace_event::AllocationRegisterTest; |
+ |
+ struct Cell { |
+ KVPair kv; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
I think if you put kv at the end you might get som
Dmitry Skiba
2016/06/28 10:54:59
Hmm, but both 'next' and 'p_prev' are pointers, so
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
Right silly comment from my side, needs to be alig
|
+ Cell* next; |
+ |
+ // Conceptually this is |prev| in a double linked list. However, buckets |
Primiano Tucci (use gerrit)
2016/06/23 20:46:26
s/double/doubly/
Dmitry Skiba
2016/06/28 10:54:58
Done.
|
+ // also participate in the bucket's cell list - they point to the list's |
+ // head and also need to be linked / unlinked properly. To treat these two |
+ // cases uniformly, instead of |prev| we're storing "pointer to a Cell* |
+ // that points to this Cell" kind of thing. So |pself| points to a bucket |
+ // for the first cell in a list, and points to |next| of the previous cell |
+ // for any other cell. With that Lookup() is the only function that handles |
+ // buckets / cells differently. |
+ // If |pself| is nullptr, the cell is in the free list. |
+ Cell** pself; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
I agree with all the reasoning here. Just from a n
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ }; |
+ |
+ using Bucket = Cell*; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
It seems you use this only in one place below, I'd
Dmitry Skiba
2016/06/28 10:54:59
Actually I added it for ctor/dtor - without it
st
|
+ |
+ // Returns a pointer to the cell that contains or should contain the entry |
+ // for |key|. The pointer may point at an element of |buckets_| or at the |
+ // |next| member of an element of |cells_|. |
+ Cell** Lookup(const Key& key) const { |
+ // The list head is in |buckets_| at the hash offset. |
+ Cell** pcell = &buckets_[Hash(key)]; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
I think that p_cell is more readable (here and els
Dmitry Skiba
2016/06/28 10:55:00
Done.
|
+ |
+ // Chase down the list until the cell that holds |key| is found, |
+ // or until the list ends. |
+ while (*pcell) { |
+ Cell* cell = *pcell; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:24
This line IMHO just makes the loop more complicate
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ if (cell->kv.first == key) { |
+ break; |
+ } |
+ pcell = &cell->next; |
+ } |
+ |
+ return pcell; |
+ } |
+ |
+ // Returns a cell that is not being used to store an entry (either by |
+ // recycling from the free list or by taking a fresh cell). |
+ Cell* GetFreeCell() { |
+ // First try to re-use a cell from the free list. |
+ if (free_list_) { |
+ Cell* cell = free_list_; |
+ free_list_ = cell->next; |
+ return cell; |
+ } |
+ |
+ // Otherwise pick the next cell that has not been touched before. |
+ size_t idx = next_unused_cell_; |
+ next_unused_cell_++; |
+ |
+ // If the hash table has too little capacity (when too little address space |
+ // was reserved for |cells_|), |next_unused_cell_| can be an index outside |
+ // of the allocated storage. A guard page is allocated there to crash the |
+ // program in that case. There are alternative solutions: |
+ // - Deal with it, increase capacity by reallocating |cells_|. |
+ // - Refuse to insert and let the caller deal with it. |
+ // Because free cells are re-used before accessing fresh cells with a higher |
+ // index, and because reserving address space without touching it is cheap, |
+ // the simplest solution is to just allocate a humongous chunk of address |
+ // space. |
+ |
+ DCHECK_LT(next_unused_cell_, num_cells_ + 1); |
+ |
+ return &cells_[idx]; |
+ } |
+ |
+ // Returns a value in the range [0, NumBuckets - 1] (inclusive). |
+ size_t Hash(const Key& key) const { |
+ return KeyHasher()(key) % num_buckets_; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
tip: if we enforced that num_buckets was a power o
Dmitry Skiba
2016/06/28 10:54:59
Done. However, it required change to the backtrace
|
+ } |
+ |
+ // Number of buckets. |
+ size_t const num_buckets_; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
I wonder if this (and num_cells_) should instead b
Dmitry Skiba
2016/06/28 10:55:00
Hmm, unlike number of buckets number of cells is n
|
+ |
+ // Number of cells. |
+ size_t const num_cells_; |
+ |
+ // The array of cells. This array is backed by mmapped memory. Lower indices |
+ // are accessed first, higher indices are only accessed when required. In |
Primiano Tucci (use gerrit)
2016/06/23 20:46:24
s/when required/only when the |free_list_| is empt
Dmitry Skiba
2016/06/28 10:55:00
Done.
|
+ // this way, even if a huge amount of address space has been mmapped, only |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
I'd just say, instead of "In this way..." -> "This
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ // the cells that are actually used will be backed by physical memory. |
+ Cell* const cells_; |
+ |
+ // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will |
+ // contain the index of the head of the linked list for |Hash(key)|. |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
does it really contain the index? This seems to be
Dmitry Skiba
2016/06/28 10:54:59
Right, this is a leftover from the old implementat
|
+ // This array is backed by mmapped memory. |
+ mutable Bucket* buckets_; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:24
I think this is more readable as:
Cell*[] buckets_
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
why is this mutable?
Dmitry Skiba
2016/06/28 10:54:59
Find() is const, and is uses Lookup() which return
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
I see, isn't the right thing to do having Lookup r
Dmitry Skiba
2016/06/29 16:12:26
Hmm, but mutable was created exactly to avoid doin
Primiano Tucci (use gerrit)
2016/06/29 16:55:03
Well depends on the cast.
mutable is more for thin
|
+ |
+ // The head of the free list. |
+ Cell* free_list_; |
+ |
+ // The index of the first element of |cells_| that has not been used before. |
+ // If the free list is empty and a new cell is needed, the cell at this index |
+ // is used. This is the high water mark for the number of entries stored. |
+ size_t next_unused_cell_; |
+ |
+ DISALLOW_COPY_AND_ASSIGN(FixedHashMap); |
Primiano Tucci (use gerrit)
2016/06/23 20:46:26
I think you need the template arguments here?
Dmitry Skiba
2016/06/28 10:54:59
Yeah, you can just use class name in templates, an
|
+}; |
+ |
+} // namespace internal |
+ |
class TraceEventMemoryOverhead; |
// The allocation register keeps track of all allocations that have not been |
-// freed. It is a memory map-backed hash table that stores size and context |
-// indexed by address. The hash table is tailored specifically for this use |
-// case. The common case is that an entry is inserted and removed after a |
-// while, lookup without modifying the table is not an intended use case. The |
-// hash table is implemented as an array of linked lists. The size of this |
-// array is fixed, but it does not limit the amount of entries that can be |
-// stored. |
-// |
-// Replaying a recording of Chrome's allocations and frees against this hash |
-// table takes about 15% of the time that it takes to replay them against |
-// |std::map|. |
+// freed. Internally it has two hashtables: one for Backtraces and one for |
+// actual allocations. Sizes of both hashtables are fixed, and this class |
+// allocates (mmaps) only in its constructor. |
class BASE_EXPORT AllocationRegister { |
public: |
- // The data stored in the hash table; |
- // contains the details about an allocation. |
+ // Details about an allocation. |
struct Allocation { |
- void* const address; |
+ const void* address; |
size_t size; |
AllocationContext context; |
}; |
- // An iterator that iterates entries in the hash table efficiently, but in no |
- // particular order. It can do this by iterating the cells and ignoring the |
- // linked lists altogether. Instead of checking whether a cell is in the free |
- // list to see if it should be skipped, a null address is used to indicate |
- // that a cell is free. |
+ // An iterator that iterates entries in no particular order. |
class BASE_EXPORT ConstIterator { |
public: |
void operator++(); |
bool operator!=(const ConstIterator& other) const; |
- const Allocation& operator*() const; |
+ Allocation operator*() const; |
private: |
friend class AllocationRegister; |
- using CellIndex = uint32_t; |
+ using AllocationKVIndex = size_t; |
- ConstIterator(const AllocationRegister& alloc_register, CellIndex index); |
+ ConstIterator(const AllocationRegister& alloc_register, |
+ AllocationKVIndex index); |
const AllocationRegister& register_; |
- CellIndex index_; |
+ AllocationKVIndex index_; |
}; |
AllocationRegister(); |
- explicit AllocationRegister(uint32_t num_cells); |
+ explicit AllocationRegister(size_t num_allocation_cells, |
Primiano Tucci (use gerrit)
2016/06/23 20:46:26
no need for explicit if you have two args here
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ size_t num_backtrace_cells); |
~AllocationRegister(); |
// Inserts allocation details into the table. If the address was present |
- // already, its details are updated. |address| must not be null. (This is |
- // because null is used to mark free cells, to allow efficient iteration of |
- // the hash table.) |
- void Insert(void* address, size_t size, AllocationContext context); |
+ // already, its details are updated. |address| must not be null. |
+ void Insert(const void* address, |
+ size_t size, |
+ const AllocationContext& context); |
// Removes the address from the table if it is present. It is ok to call this |
// with a null pointer. |
- void Remove(void* address); |
+ void Remove(const void* address); |
- // Returns a pointer to the allocation at the address, or null if there is no |
- // allocation at that address. This can be used to change the allocation |
- // context after insertion, for example to change the type name. |
- Allocation* Get(void* address); |
+ // Finds allocation for the address and fills |out_allocation|. |
+ bool Get(const void* address, Allocation* out_allocation) const; |
Primiano Tucci (use gerrit)
2016/06/23 20:46:25
out of curiosity what was the issue with returning
Dmitry Skiba
2016/06/28 10:55:00
I simply can't :) The issue is that previously 'Al
|
ConstIterator begin() const; |
ConstIterator end() const; |
@@ -87,85 +313,44 @@ class BASE_EXPORT AllocationRegister { |
void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; |
private: |
- friend class AllocationRegisterTest; |
- using CellIndex = uint32_t; |
- |
- // A cell can store allocation details (size and context) by address. Cells |
- // are part of a linked list via the |next| member. This list is either the |
- // list for a particular hash, or the free list. All cells are contiguous in |
- // memory in one big array. Therefore, on 64-bit systems, space can be saved |
- // by storing 32-bit indices instead of pointers as links. Index 0 is used as |
- // the list terminator. |
- struct Cell { |
- CellIndex next; |
- Allocation allocation; |
+ friend AllocationRegisterTest; |
+ |
+ // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal |
+ // hashing and should be changed together with AddressHasher. |
+ static const size_t kNumAllocationBuckets = 0x40000; |
+ static const size_t kNumAllocationCells = 1500000; |
+ |
+ // Expect max 30K unique backtraces. |
+ static const size_t kNumBacktraceBuckets = 30011; // prime |
Primiano Tucci (use gerrit)
2016/06/23 20:46:26
nit: add extra space before comment
Maybe the pow2
Dmitry Skiba
2016/06/28 10:54:59
Done.
|
+ static const size_t kNumBacktraceCells = kNumBacktraceBuckets; |
+ |
+ using BacktraceMap = internal::FixedHashMap< |
+ Backtrace, |
+ size_t, |
Primiano Tucci (use gerrit)
2016/06/23 20:46:26
add a comment explaining that this size_t is an in
Dmitry Skiba
2016/06/28 10:55:00
Yeah, it desperately needs a comment, because actu
|
+ BASE_HASH_NAMESPACE::hash<Backtrace>>; |
+ |
+ struct AllocationInfo { |
+ size_t size; |
+ const char* type_name; |
+ BacktraceMap::KVIndex backtrace_index; |
}; |
- // The number of buckets, 2^17, approximately 130 000, has been tuned for |
- // Chrome's typical number of outstanding allocations. (This number varies |
- // between processes. Most processes have a sustained load of ~30k unfreed |
- // allocations, but some processes have peeks around 100k-400k allocations.) |
- // Because of the size of the table, it is likely that every |buckets_| |
- // access and every |cells_| access will incur a cache miss. Microbenchmarks |
- // suggest that it is worthwile to use more memory for the table to avoid |
- // chasing down the linked list, until the size is 2^18. The number of buckets |
- // is a power of two so modular indexing can be done with bitwise and. |
- static const uint32_t kNumBuckets = 0x20000; |
- static const uint32_t kNumBucketsMask = kNumBuckets - 1; |
- |
- // Reserve address space to store at most this number of entries. High |
- // capacity does not imply high memory usage due to the access pattern. The |
- // only constraint on the number of cells is that on 32-bit systems address |
- // space is scarce (i.e. reserving 2GiB of address space for the entries is |
- // not an option). A value of ~3M entries is large enough to handle spikes in |
- // the number of allocations, and modest enough to require no more than a few |
- // dozens of MiB of address space. |
- static const uint32_t kNumCellsPerBucket = 10; |
- |
- // Returns a value in the range [0, kNumBuckets - 1] (inclusive). |
- static uint32_t Hash(void* address); |
- |
- // Allocates a region of virtual address space of |size| rounded up to the |
- // system page size. The memory is zeroed by the system. A guard page is |
- // added after the end. |
- static void* AllocateVirtualMemory(size_t size); |
- |
- // Frees a region of virtual address space allocated by a call to |
- // |AllocateVirtualMemory|. |
- static void FreeVirtualMemory(void* address, size_t allocated_size); |
- |
- // Returns a pointer to the variable that contains or should contain the |
- // index of the cell that stores the entry for |address|. The pointer may |
- // point at an element of |buckets_| or at the |next| member of an element of |
- // |cells_|. If the value pointed at is 0, |address| is not in the table. |
- CellIndex* Lookup(void* address); |
- |
- // Takes a cell that is not being used to store an entry (either by recycling |
- // from the free list or by taking a fresh cell) and returns its index. |
- CellIndex GetFreeCell(); |
- |
- // The maximum number of cells which can be allocated. |
- uint32_t const num_cells_; |
+ struct AddressHasher { |
+ size_t operator () (const void* address) const; |
+ }; |
- // The array of cells. This array is backed by mmapped memory. Lower indices |
- // are accessed first, higher indices are only accessed when required. In |
- // this way, even if a huge amount of address space has been mmapped, only |
- // the cells that are actually used will be backed by physical memory. |
- Cell* const cells_; |
+ using AllocationMap = internal::FixedHashMap< |
+ const void*, |
+ AllocationInfo, |
+ AddressHasher>; |
- // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain |
- // the index of the head of the linked list for |Hash(address)|. A value of 0 |
- // indicates an empty list. This array is backed by mmapped memory. |
- CellIndex* const buckets_; |
+ BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace); |
+ void RemoveBacktrace(BacktraceMap::KVIndex index); |
- // The head of the free list. This is the index of the cell. A value of 0 |
- // means that the free list is empty. |
- CellIndex free_list_; |
+ Allocation GetAllocation(AllocationMap::KVIndex) const; |
- // The index of the first element of |cells_| that has not been used before. |
- // If the free list is empty and a new cell is needed, the cell at this index |
- // is used. This is the high water mark for the number of entries stored. |
- CellIndex next_unused_cell_; |
+ AllocationMap allocations_; |
+ BacktraceMap backtraces_; |
DISALLOW_COPY_AND_ASSIGN(AllocationRegister); |
}; |