OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
7 | 7 |
8 #include <stddef.h> | 8 #include <stddef.h> |
9 #include <stdint.h> | 9 #include <stdint.h> |
10 | 10 |
11 #include <utility> | |
12 | |
13 #include "base/bits.h" | |
11 #include "base/logging.h" | 14 #include "base/logging.h" |
12 #include "base/macros.h" | 15 #include "base/macros.h" |
16 #include "base/process/process_metrics.h" | |
17 #include "base/template_util.h" | |
13 #include "base/trace_event/heap_profiler_allocation_context.h" | 18 #include "base/trace_event/heap_profiler_allocation_context.h" |
14 | 19 |
15 namespace base { | 20 namespace base { |
16 namespace trace_event { | 21 namespace trace_event { |
17 | 22 |
23 class AllocationRegisterTest; | |
24 | |
25 namespace internal { | |
26 | |
27 // Allocates a region of virtual address space of |size| rounded up to the | |
28 // system page size. The memory is zeroed by the system. A guard page is | |
29 // added after the end. | |
30 void* AllocateGuardedVirtualMemory(size_t size); | |
31 | |
32 // Frees a region of virtual address space allocated by a call to | |
33 // |AllocateVirtualMemory|. | |
34 void FreeGuardedVirtualMemory(void* address, size_t allocated_size); | |
35 | |
36 // Hash map that mmaps memory only once in the constructor. Its API is | |
37 // similar to std::unordered_map, only index (KVIndex) is used to address | |
38 template <size_t NumBuckets, class Key, class Value, class KeyHasher> | |
39 class FixedHashMap { | |
40 // To keep things simple we don't call destructors. | |
41 static_assert(is_trivially_destructible<Key>::value && | |
42 is_trivially_destructible<Value>::value, | |
43 "Key and Value shouldn't have destructors"); | |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
Excellent, TIL std::is_trivially_destructible. I t
Dmitry Skiba
2016/06/29 16:12:26
Yeah, the only problem is that std::is_trivially_d
| |
44 public: | |
45 using KVPair = std::pair<const Key, Value>; | |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
to be honest I think that this "const" here is cau
Dmitry Skiba
2016/06/29 16:12:26
This is how std::map/set/etc does it, and it makes
Primiano Tucci (use gerrit)
2016/06/29 16:55:03
ok fine. As we don't have destructors anymore agre
| |
46 | |
47 // For implementation simplicity API uses integer index instead | |
48 // of iterators. Most operations (except FindValidIndex) on KVIndex | |
49 // are O(1). | |
50 using KVIndex = size_t; | |
51 static const KVIndex kInvalidKVIndex = static_cast<KVIndex>(-1); | |
52 | |
53 // Number of cells control how many items this hash map can hold. Since | |
54 // cell includes both key and value, this value also largely affects | |
55 // memory footprint. | |
56 // Number of buckets control how many collisions there will be. Bucket | |
57 // is just a pointer, so it should be large(ish). It's also a good idea | |
58 // to make it a prime number. | |
59 FixedHashMap(size_t num_cells) | |
60 : num_cells_(num_cells), | |
61 cells_(static_cast<Cell*>( | |
62 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), | |
63 buckets_(static_cast<Bucket*>( | |
64 AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), | |
65 free_list_(nullptr), | |
66 next_unused_cell_(0) {} | |
67 | |
68 ~FixedHashMap() { | |
69 FreeGuardedVirtualMemory(cells_, num_cells_ * sizeof(Cell)); | |
70 FreeGuardedVirtualMemory(buckets_, NumBuckets * sizeof(Bucket)); | |
71 } | |
72 | |
73 std::pair<KVIndex, bool> Insert(const Key& key, const Value& value) { | |
74 Cell** p_cell = Lookup(key); | |
75 Cell* cell = *p_cell; | |
76 if (cell) { | |
77 return {static_cast<KVIndex>(cell - cells_), false}; // not inserted | |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
nit: add extra space before //
Dmitry Skiba
2016/06/29 16:12:26
Done.
| |
78 } | |
79 | |
80 // Get a free cell and link it. | |
81 *p_cell = cell = GetFreeCell(); | |
82 cell->p_prev = p_cell; | |
83 cell->next = nullptr; | |
84 | |
85 // Initialize key/value pair. Since key is 'const Key' this is the | |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
right your comment here is a hint to my comment ab
| |
86 // only way to initialize it. | |
87 new (&cell->kv) KVPair(key, value); | |
88 | |
89 return {static_cast<KVIndex>(cell - cells_), true}; // inserted | |
90 } | |
91 | |
92 void Remove(KVIndex index) { | |
93 DCHECK_LT(index, next_unused_cell_); | |
94 | |
95 Cell* cell = &cells_[index]; | |
96 | |
97 // Unlink the cell. | |
98 *cell->p_prev = cell->next; | |
99 if (cell->next) { | |
100 cell->next->p_prev = cell->p_prev; | |
101 } | |
102 cell->p_prev = nullptr; // mark as free | |
103 | |
104 // Add it to the free list. | |
105 cell->next = free_list_; | |
106 free_list_ = cell; | |
107 } | |
108 | |
109 KVIndex Find(const Key& key) const { | |
110 Cell* cell = *Lookup(key); | |
111 return cell ? KVIndex(cell - cells_) : kInvalidKVIndex; | |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
KVIndex -> static_cast
Dmitry Skiba
2016/06/29 16:12:26
Done.
| |
112 } | |
113 | |
114 KVPair& Get(KVIndex index) { | |
115 return cells_[index].kv; | |
116 } | |
117 | |
118 const KVPair& Get(KVIndex index) const { | |
119 return cells_[index].kv; | |
120 } | |
121 | |
122 // Finds next index that has a KVPair associated with it. Search starts | |
123 // with the specified index. Returns kInvalidKVIndex if nothing was found. | |
124 // To find the first valid index, call this function with 0. Continue | |
125 // calling with the last_index + 1 until kInvalidKVIndex is returned. | |
126 KVIndex FindNextIndex(KVIndex index) const { | |
127 for (;index < next_unused_cell_; ++index) { | |
128 if (cells_[index].p_prev) { | |
129 return index; | |
130 } | |
131 } | |
132 return kInvalidKVIndex; | |
133 } | |
134 | |
135 // Estimates number of bytes used in allocated memory regions. | |
136 size_t EstimateUsedMemory() const { | |
137 size_t page_size = base::GetPageSize(); | |
138 // |next_unused_cell_| is the first cell that wasn't touched, i.e. | |
139 // it's the number of touched cells. | |
140 return bits::Align(sizeof(Cell) * next_unused_cell_, page_size) + | |
141 bits::Align(sizeof(Bucket) * NumBuckets, page_size); | |
142 } | |
143 | |
144 private: | |
145 friend base::trace_event::AllocationRegisterTest; | |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
out of curiosity why did you drop "class" here?
Dmitry Skiba
2016/06/29 16:12:26
Yeah, so FixedHashMap is inside 'internal' namespa
Primiano Tucci (use gerrit)
2016/06/29 16:55:03
Ok the namespace thing makes sense, but my point i
| |
146 | |
147 struct Cell { | |
148 KVPair kv; | |
149 Cell* next; | |
150 | |
151 // Conceptually this is |prev| in a doubly linked list. However, buckets | |
152 // also participate in the bucket's cell list - they point to the list's | |
153 // head and also need to be linked / unlinked properly. To treat these two | |
154 // cases uniformly, instead of |prev| we're storing "pointer to a Cell* | |
155 // that points to this Cell" kind of thing. So |p_prev| points to a bucket | |
156 // for the first cell in a list, and points to |next| of the previous cell | |
157 // for any other cell. With that Lookup() is the only function that handles | |
158 // buckets / cells differently. | |
159 // If |p_prev| is nullptr, the cell is in the free list. | |
160 Cell** p_prev; | |
161 }; | |
162 | |
163 using Bucket = Cell*; | |
164 | |
165 // Returns a pointer to the cell that contains or should contain the entry | |
166 // for |key|. The pointer may point at an element of |buckets_| or at the | |
167 // |next| member of an element of |cells_|. | |
168 Cell** Lookup(const Key& key) const { | |
169 // The list head is in |buckets_| at the hash offset. | |
170 Cell** p_cell = &buckets_[Hash(key)]; | |
171 | |
172 // Chase down the list until the cell that holds |key| is found, | |
173 // or until the list ends. | |
174 while (*p_cell && (*p_cell)->kv.first == key) { | |
175 p_cell = &(*p_cell)->next; | |
176 } | |
177 | |
178 return p_cell; | |
179 } | |
180 | |
181 // Returns a cell that is not being used to store an entry (either by | |
182 // recycling from the free list or by taking a fresh cell). | |
183 Cell* GetFreeCell() { | |
184 // First try to re-use a cell from the free list. | |
185 if (free_list_) { | |
186 Cell* cell = free_list_; | |
187 free_list_ = cell->next; | |
188 return cell; | |
189 } | |
190 | |
191 // Otherwise pick the next cell that has not been touched before. | |
192 size_t idx = next_unused_cell_; | |
193 next_unused_cell_++; | |
194 | |
195 // If the hash table has too little capacity (when too little address space | |
196 // was reserved for |cells_|), |next_unused_cell_| can be an index outside | |
197 // of the allocated storage. A guard page is allocated there to crash the | |
198 // program in that case. There are alternative solutions: | |
199 // - Deal with it, increase capacity by reallocating |cells_|. | |
200 // - Refuse to insert and let the caller deal with it. | |
201 // Because free cells are re-used before accessing fresh cells with a higher | |
202 // index, and because reserving address space without touching it is cheap, | |
203 // the simplest solution is to just allocate a humongous chunk of address | |
204 // space. | |
205 | |
206 DCHECK_LT(next_unused_cell_, num_cells_ + 1); | |
207 | |
208 return &cells_[idx]; | |
209 } | |
210 | |
211 // Returns a value in the range [0, NumBuckets - 1] (inclusive). | |
212 size_t Hash(const Key& key) const { | |
213 if (NumBuckets == (NumBuckets & ~(NumBuckets - 1))) { | |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
hmm this is going to hurt your fastpath. Sorry pro
Dmitry Skiba
2016/06/29 16:12:26
I don't think that's the case - NumBuckets is a te
Primiano Tucci (use gerrit)
2016/06/29 16:55:03
Ah k didn't realize that is a constexpr sorry.
| |
214 // NumBuckets is a power of 2. | |
215 return KeyHasher()(key) & (NumBuckets - 1); | |
216 } else { | |
217 return KeyHasher()(key) % NumBuckets; | |
218 } | |
219 } | |
220 | |
221 // Number of cells. | |
222 size_t const num_cells_; | |
223 | |
224 // The array of cells. This array is backed by mmapped memory. Lower indices | |
225 // are accessed first, higher indices are accessed only when the |free_list_| | |
226 // is empty. This is to minimize the amount of resident memory used. | |
227 Cell* const cells_; | |
228 | |
229 // The array of buckets (pointers into |cells_|). |buckets_[Hash(key)]| will | |
230 // contain the pointer to the linked list of cells for |Hash(key)|. | |
231 // This array is backed by mmapped memory. | |
232 mutable Bucket* buckets_; | |
233 | |
234 // The head of the free list. | |
235 Cell* free_list_; | |
236 | |
237 // The index of the first element of |cells_| that has not been used before. | |
238 // If the free list is empty and a new cell is needed, the cell at this index | |
239 // is used. This is the high water mark for the number of entries stored. | |
240 size_t next_unused_cell_; | |
241 | |
242 DISALLOW_COPY_AND_ASSIGN(FixedHashMap); | |
243 }; | |
244 | |
245 } // namespace internal | |
246 | |
18 class TraceEventMemoryOverhead; | 247 class TraceEventMemoryOverhead; |
19 | 248 |
20 // The allocation register keeps track of all allocations that have not been | 249 // The allocation register keeps track of all allocations that have not been |
21 // freed. It is a memory map-backed hash table that stores size and context | 250 // freed. Internally it has two hashtables: one for Backtraces and one for |
22 // indexed by address. The hash table is tailored specifically for this use | 251 // actual allocations. Sizes of both hashtables are fixed, and this class |
23 // case. The common case is that an entry is inserted and removed after a | 252 // allocates (mmaps) only in its constructor. |
24 // while, lookup without modifying the table is not an intended use case. The | |
25 // hash table is implemented as an array of linked lists. The size of this | |
26 // array is fixed, but it does not limit the amount of entries that can be | |
27 // stored. | |
28 // | |
29 // Replaying a recording of Chrome's allocations and frees against this hash | |
30 // table takes about 15% of the time that it takes to replay them against | |
31 // |std::map|. | |
32 class BASE_EXPORT AllocationRegister { | 253 class BASE_EXPORT AllocationRegister { |
33 public: | 254 public: |
34 // The data stored in the hash table; | 255 // Details about an allocation. |
35 // contains the details about an allocation. | |
36 struct Allocation { | 256 struct Allocation { |
37 void* const address; | 257 const void* address; |
38 size_t size; | 258 size_t size; |
39 AllocationContext context; | 259 AllocationContext context; |
40 }; | 260 }; |
41 | 261 |
42 // An iterator that iterates entries in the hash table efficiently, but in no | 262 // An iterator that iterates entries in no particular order. |
43 // particular order. It can do this by iterating the cells and ignoring the | |
44 // linked lists altogether. Instead of checking whether a cell is in the free | |
45 // list to see if it should be skipped, a null address is used to indicate | |
46 // that a cell is free. | |
47 class BASE_EXPORT ConstIterator { | 263 class BASE_EXPORT ConstIterator { |
48 public: | 264 public: |
49 void operator++(); | 265 void operator++(); |
50 bool operator!=(const ConstIterator& other) const; | 266 bool operator!=(const ConstIterator& other) const; |
51 const Allocation& operator*() const; | 267 Allocation operator*() const; |
52 | 268 |
53 private: | 269 private: |
54 friend class AllocationRegister; | 270 friend class AllocationRegister; |
55 using CellIndex = uint32_t; | 271 using AllocationKVIndex = size_t; |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
shouldn't this be = FixedHashMap::Index ?
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
nit: not sure what the "KV" part adds to the name,
Dmitry Skiba
2016/06/29 16:12:26
Removed KV. Yes, it should be AllocationMap::KVInd
| |
56 | 272 |
57 ConstIterator(const AllocationRegister& alloc_register, CellIndex index); | 273 ConstIterator(const AllocationRegister& alloc_register, |
274 AllocationKVIndex index); | |
58 | 275 |
59 const AllocationRegister& register_; | 276 const AllocationRegister& register_; |
60 CellIndex index_; | 277 AllocationKVIndex index_; |
61 }; | 278 }; |
62 | 279 |
63 AllocationRegister(); | 280 AllocationRegister(); |
64 explicit AllocationRegister(uint32_t num_cells); | 281 AllocationRegister(size_t num_allocation_cells, size_t num_backtrace_cells); |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
since you carefully avoided exposing the "cells" m
Dmitry Skiba
2016/06/29 16:12:26
Done.
| |
65 | 282 |
66 ~AllocationRegister(); | 283 ~AllocationRegister(); |
67 | 284 |
68 // Inserts allocation details into the table. If the address was present | 285 // Inserts allocation details into the table. If the address was present |
69 // already, its details are updated. |address| must not be null. (This is | 286 // already, its details are updated. |address| must not be null. |
70 // because null is used to mark free cells, to allow efficient iteration of | 287 void Insert(const void* address, |
71 // the hash table.) | 288 size_t size, |
72 void Insert(void* address, size_t size, AllocationContext context); | 289 const AllocationContext& context); |
73 | 290 |
74 // Removes the address from the table if it is present. It is ok to call this | 291 // Removes the address from the table if it is present. It is ok to call this |
75 // with a null pointer. | 292 // with a null pointer. |
76 void Remove(void* address); | 293 void Remove(const void* address); |
77 | 294 |
78 // Returns a pointer to the allocation at the address, or null if there is no | 295 // Finds allocation for the address and fills |out_allocation|. |
79 // allocation at that address. This can be used to change the allocation | 296 bool Get(const void* address, Allocation* out_allocation) const; |
80 // context after insertion, for example to change the type name. | |
81 Allocation* Get(void* address); | |
82 | 297 |
83 ConstIterator begin() const; | 298 ConstIterator begin() const; |
84 ConstIterator end() const; | 299 ConstIterator end() const; |
85 | 300 |
86 // Estimates memory overhead including |sizeof(AllocationRegister)|. | 301 // Estimates memory overhead including |sizeof(AllocationRegister)|. |
87 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; | 302 void EstimateTraceMemoryOverhead(TraceEventMemoryOverhead* overhead) const; |
88 | 303 |
89 private: | 304 private: |
90 friend class AllocationRegisterTest; | 305 friend AllocationRegisterTest; |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
y u not like class? :)
Dmitry Skiba
2016/06/29 16:12:26
It's not needed, since I forward-declared it on to
Primiano Tucci (use gerrit)
2016/06/29 16:55:03
Yup I know but this looks like a friend function n
| |
91 using CellIndex = uint32_t; | |
92 | 306 |
93 // A cell can store allocation details (size and context) by address. Cells | 307 // Expect max 1.5M allocations. Number of buckets is 2^18 for optimal |
94 // are part of a linked list via the |next| member. This list is either the | 308 // hashing and should be changed together with AddressHasher. |
95 // list for a particular hash, or the free list. All cells are contiguous in | 309 static const size_t kNumAllocationBuckets = 1 << 18; |
96 // memory in one big array. Therefore, on 64-bit systems, space can be saved | 310 static const size_t kNumAllocationCells = 1500000; |
97 // by storing 32-bit indices instead of pointers as links. Index 0 is used as | 311 |
98 // the list terminator. | 312 // Expect max 2^15 unique backtraces. Can be changed to 2^16 without |
99 struct Cell { | 313 // needing to tweak BacktraceHasher implementation. |
100 CellIndex next; | 314 static const size_t kNumBacktraceBuckets = 1 << 15; |
101 Allocation allocation; | 315 static const size_t kNumBacktraceCells = kNumBacktraceBuckets; |
316 | |
317 struct BacktraceHasher { | |
318 size_t operator () (const Backtrace& backtrace) const; | |
102 }; | 319 }; |
103 | 320 |
104 // The number of buckets, 2^17, approximately 130 000, has been tuned for | 321 using BacktraceMap = internal::FixedHashMap< |
105 // Chrome's typical number of outstanding allocations. (This number varies | 322 kNumBacktraceBuckets, |
106 // between processes. Most processes have a sustained load of ~30k unfreed | 323 Backtrace, |
107 // allocations, but some processes have peeks around 100k-400k allocations.) | 324 size_t, // Number of references to the backtrace (the key). Incremented |
108 // Because of the size of the table, it is likely that every |buckets_| | 325 // when an allocation that references the backtrace is inserted, |
109 // access and every |cells_| access will incur a cache miss. Microbenchmarks | 326 // and decremented when the allocation removed. When the number |
110 // suggest that it is worthwile to use more memory for the table to avoid | 327 // drops to zero, the backtrace is removed from the map. |
Primiano Tucci (use gerrit)
2016/06/28 14:23:07
ahhh now makes way more sense.
| |
111 // chasing down the linked list, until the size is 2^18. The number of buckets | 328 BacktraceHasher>; |
112 // is a power of two so modular indexing can be done with bitwise and. | |
113 static const uint32_t kNumBuckets = 0x20000; | |
114 static const uint32_t kNumBucketsMask = kNumBuckets - 1; | |
115 | 329 |
116 // Reserve address space to store at most this number of entries. High | 330 struct AllocationInfo { |
117 // capacity does not imply high memory usage due to the access pattern. The | 331 size_t size; |
118 // only constraint on the number of cells is that on 32-bit systems address | 332 const char* type_name; |
119 // space is scarce (i.e. reserving 2GiB of address space for the entries is | 333 BacktraceMap::KVIndex backtrace_index; |
120 // not an option). A value of ~3M entries is large enough to handle spikes in | 334 }; |
121 // the number of allocations, and modest enough to require no more than a few | |
122 // dozens of MiB of address space. | |
123 static const uint32_t kNumCellsPerBucket = 10; | |
124 | 335 |
125 // Returns a value in the range [0, kNumBuckets - 1] (inclusive). | 336 struct AddressHasher { |
126 static uint32_t Hash(void* address); | 337 size_t operator () (const void* address) const; |
338 }; | |
127 | 339 |
128 // Allocates a region of virtual address space of |size| rounded up to the | 340 using AllocationMap = internal::FixedHashMap< |
129 // system page size. The memory is zeroed by the system. A guard page is | 341 kNumAllocationBuckets, |
130 // added after the end. | 342 const void*, |
131 static void* AllocateVirtualMemory(size_t size); | 343 AllocationInfo, |
344 AddressHasher>; | |
132 | 345 |
133 // Frees a region of virtual address space allocated by a call to | 346 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace); |
134 // |AllocateVirtualMemory|. | 347 void RemoveBacktrace(BacktraceMap::KVIndex index); |
135 static void FreeVirtualMemory(void* address, size_t allocated_size); | |
136 | 348 |
137 // Returns a pointer to the variable that contains or should contain the | 349 Allocation GetAllocation(AllocationMap::KVIndex) const; |
138 // index of the cell that stores the entry for |address|. The pointer may | |
139 // point at an element of |buckets_| or at the |next| member of an element of | |
140 // |cells_|. If the value pointed at is 0, |address| is not in the table. | |
141 CellIndex* Lookup(void* address); | |
142 | 350 |
143 // Takes a cell that is not being used to store an entry (either by recycling | 351 AllocationMap allocations_; |
144 // from the free list or by taking a fresh cell) and returns its index. | 352 BacktraceMap backtraces_; |
145 CellIndex GetFreeCell(); | |
146 | |
147 // The maximum number of cells which can be allocated. | |
148 uint32_t const num_cells_; | |
149 | |
150 // The array of cells. This array is backed by mmapped memory. Lower indices | |
151 // are accessed first, higher indices are only accessed when required. In | |
152 // this way, even if a huge amount of address space has been mmapped, only | |
153 // the cells that are actually used will be backed by physical memory. | |
154 Cell* const cells_; | |
155 | |
156 // The array of indices into |cells_|. |buckets_[Hash(address)]| will contain | |
157 // the index of the head of the linked list for |Hash(address)|. A value of 0 | |
158 // indicates an empty list. This array is backed by mmapped memory. | |
159 CellIndex* const buckets_; | |
160 | |
161 // The head of the free list. This is the index of the cell. A value of 0 | |
162 // means that the free list is empty. | |
163 CellIndex free_list_; | |
164 | |
165 // The index of the first element of |cells_| that has not been used before. | |
166 // If the free list is empty and a new cell is needed, the cell at this index | |
167 // is used. This is the high water mark for the number of entries stored. | |
168 CellIndex next_unused_cell_; | |
169 | 353 |
170 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); | 354 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); |
171 }; | 355 }; |
172 | 356 |
173 } // namespace trace_event | 357 } // namespace trace_event |
174 } // namespace base | 358 } // namespace base |
175 | 359 |
176 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 360 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
OLD | NEW |