OLD | NEW |
---|---|
1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 5 #ifndef BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 6 #define BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
7 | 7 |
8 #include <stddef.h> | 8 #include <stddef.h> |
9 #include <stdint.h> | 9 #include <stdint.h> |
10 | 10 |
(...skipping 10 matching lines...) Expand all Loading... | |
21 namespace base { | 21 namespace base { |
22 namespace trace_event { | 22 namespace trace_event { |
23 | 23 |
24 class AllocationRegisterTest; | 24 class AllocationRegisterTest; |
25 | 25 |
26 namespace internal { | 26 namespace internal { |
27 | 27 |
28 // Allocates a region of virtual address space of |size| rounded up to the | 28 // Allocates a region of virtual address space of |size| rounded up to the |
29 // system page size. The memory is zeroed by the system. A guard page is | 29 // system page size. The memory is zeroed by the system. A guard page is |
30 // added after the end. | 30 // added after the end. |
31 // | |
32 // TODO(awong): Remove the guarded memory. This isn't currently being used and | |
Primiano Tucci (use gerrit)
2017/04/04 09:50:23
You say "this isn't currently used" but that is wh
awong
2017/04/04 18:28:30
I'll remove it. The code just has too many concep
| |
33 // this code is complex looking enough that this costs more than its bug-bashing | |
34 // weight. | |
31 void* AllocateGuardedVirtualMemory(size_t size); | 35 void* AllocateGuardedVirtualMemory(size_t size); |
32 | 36 |
33 // Frees a region of virtual address space allocated by a call to | 37 // Frees a region of virtual address space allocated by a call to |
34 // |AllocateVirtualMemory|. | 38 // |AllocateVirtualMemory|. |
35 void FreeGuardedVirtualMemory(void* address, size_t allocated_size); | 39 void FreeGuardedVirtualMemory(void* address, size_t allocated_size); |
36 | 40 |
37 // Hash map that mmaps memory only once in the constructor. Its API is | 41 // Hash map that mmaps memory only once in the constructor. Its API is |
38 // similar to std::unordered_map, only index (KVIndex) is used to address | 42 // similar to std::unordered_map, only index (KVIndex) is used to address |
39 template <size_t NumBuckets, class Key, class Value, class KeyHasher> | 43 template <size_t NumBuckets, class Key, class Value, class KeyHasher> |
40 class FixedHashMap { | 44 class FixedHashMap { |
41 // To keep things simple we don't call destructors. | 45 // To keep things simple we don't call destructors. |
42 static_assert(is_trivially_destructible<Key>::value && | 46 static_assert(is_trivially_destructible<Key>::value && |
43 is_trivially_destructible<Value>::value, | 47 is_trivially_destructible<Value>::value, |
44 "Key and Value shouldn't have destructors"); | 48 "Key and Value shouldn't have destructors"); |
45 public: | 49 public: |
46 using KVPair = std::pair<const Key, Value>; | 50 using KVPair = std::pair<const Key, Value>; |
47 | 51 |
48 // For implementation simplicity API uses integer index instead | 52 // For implementation simplicity API uses integer index instead |
49 // of iterators. Most operations (except Find) on KVIndex are O(1). | 53 // of iterators. Most operations (except Find) on KVIndex are O(1). |
50 using KVIndex = size_t; | 54 using KVIndex = size_t; |
51 enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) }; | 55 enum : KVIndex { kInvalidKVIndex = static_cast<KVIndex>(-1) }; |
52 | 56 |
53 // Capacity controls how many items this hash map can hold, and largely | 57 // Capacity controls how many items this hash map can hold, and largely |
54 // affects memory footprint. | 58 // affects memory footprint. |
55 FixedHashMap(size_t capacity) | 59 explicit FixedHashMap(size_t capacity) |
56 : num_cells_(capacity), | 60 : num_cells_(capacity), |
57 num_inserts_dropped_(0), | 61 num_inserts_dropped_(0), |
58 cells_(static_cast<Cell*>( | 62 cells_(static_cast<Cell*>( |
59 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), | 63 AllocateGuardedVirtualMemory(num_cells_ * sizeof(Cell)))), |
60 buckets_(static_cast<Bucket*>( | 64 buckets_(static_cast<Bucket*>( |
61 AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), | 65 AllocateGuardedVirtualMemory(NumBuckets * sizeof(Bucket)))), |
62 free_list_(nullptr), | 66 free_list_(nullptr), |
63 next_unused_cell_(0) {} | 67 next_unused_cell_(0) {} |
64 | 68 |
65 ~FixedHashMap() { | 69 ~FixedHashMap() { |
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
245 }; | 249 }; |
246 | 250 |
247 } // namespace internal | 251 } // namespace internal |
248 | 252 |
249 class TraceEventMemoryOverhead; | 253 class TraceEventMemoryOverhead; |
250 | 254 |
251 // The allocation register keeps track of all allocations that have not been | 255 // The allocation register keeps track of all allocations that have not been |
252 // freed. Internally it has two hashtables: one for Backtraces and one for | 256 // freed. Internally it has two hashtables: one for Backtraces and one for |
253 // actual allocations. Sizes of both hashtables are fixed, and this class | 257 // actual allocations. Sizes of both hashtables are fixed, and this class |
254 // allocates (mmaps) only in its constructor. | 258 // allocates (mmaps) only in its constructor. |
259 // | |
260 // When either hash table hits max size, new inserts are dropped. | |
255 class BASE_EXPORT AllocationRegister { | 261 class BASE_EXPORT AllocationRegister { |
256 public: | 262 public: |
257 // Details about an allocation. | 263 // Details about an allocation. |
258 struct Allocation { | 264 struct Allocation { |
259 const void* address; | 265 const void* address; |
260 size_t size; | 266 size_t size; |
261 AllocationContext context; | 267 AllocationContext context; |
262 }; | 268 }; |
263 | 269 |
264 // An iterator that iterates entries in no particular order. | 270 // An iterator that iterates entries in no particular order. |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
351 struct AddressHasher { | 357 struct AddressHasher { |
352 size_t operator () (const void* address) const; | 358 size_t operator () (const void* address) const; |
353 }; | 359 }; |
354 | 360 |
355 using AllocationMap = internal::FixedHashMap< | 361 using AllocationMap = internal::FixedHashMap< |
356 kAllocationBuckets, | 362 kAllocationBuckets, |
357 const void*, | 363 const void*, |
358 AllocationInfo, | 364 AllocationInfo, |
359 AddressHasher>; | 365 AddressHasher>; |
360 | 366 |
367 // This method should be called once to initialize | |
368 // |out_of_storage_backtrace_index_|. The first call will create a unique | |
369 // sentinel entry. The return value of multiple calls is undefined. | |
370 BacktraceMap::KVIndex CreateBacktraceSentinel(); | |
371 | |
361 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace); | 372 BacktraceMap::KVIndex InsertBacktrace(const Backtrace& backtrace); |
362 void RemoveBacktrace(BacktraceMap::KVIndex index); | 373 void RemoveBacktrace(BacktraceMap::KVIndex index); |
363 | 374 |
364 Allocation GetAllocation(AllocationMap::KVIndex) const; | 375 Allocation GetAllocation(AllocationMap::KVIndex) const; |
365 | 376 |
366 AllocationMap allocations_; | 377 AllocationMap allocations_; |
367 BacktraceMap backtraces_; | 378 BacktraceMap backtraces_; |
368 | 379 |
369 // Sentinel used when we run out of backtraces_ storage. | 380 // Sentinel used when the |backtraces_| table is full. |
370 BacktraceMap::KVIndex out_of_storage_backtrace_index_; | 381 const BacktraceMap::KVIndex out_of_storage_backtrace_index_; |
371 | 382 |
372 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); | 383 DISALLOW_COPY_AND_ASSIGN(AllocationRegister); |
373 }; | 384 }; |
374 | 385 |
375 } // namespace trace_event | 386 } // namespace trace_event |
376 } // namespace base | 387 } // namespace base |
377 | 388 |
378 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ | 389 #endif // BASE_TRACE_EVENT_HEAP_PROFILER_ALLOCATION_REGISTER_H_ |
OLD | NEW |