OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #ifndef _LEAK_DETECTOR_IMPL_H_ |
| 6 #define _LEAK_DETECTOR_IMPL_H_ |
| 7 |
| 8 #include <unordered_set> |
| 9 |
| 10 #include "addressmap-inl.h" |
| 11 #include "base/basictypes.h" |
| 12 #include "base/custom_allocator.h" |
| 13 #include "heap-profile-stats.h" |
| 14 #include "leak_analyzer.h" |
| 15 |
| 16 namespace leak_detector { |
| 17 |
| 18 class CallStackTable; |
| 19 |
| 20 //---------------------------------------------------------------------- |
| 21 // Class that contains the actual leak detection mechanism. |
| 22 //---------------------------------------------------------------------- |
| 23 class LeakDetectorImpl { |
| 24 public: |
| 25 // Used for tracking allocation stats. |
| 26 using Stats = HeapProfileStats; |
| 27 |
| 28 LeakDetectorImpl(uint64 mapping_addr, uint64 mapping_size); |
| 29 ~LeakDetectorImpl(); |
| 30 |
| 31 // Indicates whether the given allocation size has an associated call stack |
| 32 // table, and thus requires a stack unwind. |
| 33 bool ShouldGetStackTraceForSize(size_t size) const; |
| 34 |
| 35 // Record allocs and frees. |
| 36 void RecordAlloc(const void* ptr, size_t size, |
| 37 int stack_depth, const void* const call_stack[]); |
| 38 void RecordFree(const void* ptr); |
| 39 |
| 40 const Stats& stats() const { |
| 41 return stats_; |
| 42 } |
| 43 |
| 44 // Run check for possible leaks based on the current profiling data. |
| 45 void TestForLeaks(); |
| 46 |
| 47 // Dump current profiling statistics to log. |
| 48 void DumpStats() const; |
| 49 |
| 50 private: |
| 51 // Used for tracking unique call stacks. |
| 52 using CallStack = HeapProfileBucket; |
| 53 |
| 54 // A record of allocations for a particular size. |
| 55 struct AllocSizeEntry { |
| 56 // Number of allocations and frees for this size. |
| 57 uint32 num_allocs; |
| 58 uint32 num_frees; |
| 59 |
| 60 // A stack table, if this size is being profiled for stack as well. |
| 61 CallStackTable* stack_table; |
| 62 }; |
| 63 |
| 64 // Info stored in the address map |
| 65 struct AllocInfo { |
| 66 AllocInfo() : call_stack(NULL) {} |
| 67 |
| 68 // Number of bytes in this allocation. |
| 69 size_t bytes; |
| 70 |
| 71 // Points to a unique call stack. |
| 72 CallStack* call_stack; |
| 73 }; |
| 74 |
| 75 // Used for recording size and call stack info for each allocation. |
| 76 using AllocationMap = AddressMap<AllocInfo>; |
| 77 |
| 78 // Hash table for tracking unique call stacks. |
| 79 // TODO: Using std::unordered_set makes |CallStack::next| redundant. Consider |
| 80 // creating a new struct or removing |next|. |
| 81 using TableEntryAllocator = STL_Allocator<const CallStack*, CustomAllocator>; |
| 82 |
| 83 struct CallStackCompare { |
| 84 bool operator() (const CallStack* c1, const CallStack* c2) const { |
| 85 return c1->depth == c2->depth && |
| 86 std::equal(c1->stack, c1->stack + c1->depth, c2->stack); |
| 87 } |
| 88 }; |
| 89 |
| 90 struct CallStackHash { |
| 91 size_t operator() (const CallStack* call_stack) const { |
| 92 return CallStackToHash(call_stack); |
| 93 } |
| 94 }; |
| 95 |
| 96 // Number of entries in the alloc size table. As sizes are aligned to 32-bits |
| 97 // the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger |
| 98 // sizes are ignored. This value is chosen high enough that such large sizes |
| 99 // are rare if not nonexistent. |
| 100 static const int kNumSizeEntries = 2048; |
| 101 |
| 102 // Converts an allocation size to/from the array index used for |entries_|. |
| 103 static int SizeToIndex(size_t size); |
| 104 static size_t IndexToSize(int index); |
| 105 |
| 106 // Accessor for the entry table. |
| 107 inline AllocSizeEntry* GetEntryForSize(size_t size) { |
| 108 return &entries_[SizeToIndex(size)]; |
| 109 } |
| 110 inline const AllocSizeEntry& GetConstEntryForSize(size_t size) const { |
| 111 return entries_[SizeToIndex(size)]; |
| 112 } |
| 113 |
| 114 // Returns a CallStack object for a given call stack. Each unique call stack |
| 115 // has its own CallStack object. If the given call stack has already been |
| 116 // created by a previous call to this function, return a pointer to that same |
| 117 // call stack object. |
| 118 CallStack* GetCallStack(int depth, const void* const stack[]); |
| 119 |
| 120 // Returns the offset of |ptr| within the current binary. If it is not in the |
| 121 // current binary, just return |ptr| as an integer. |
| 122 uint64 GetOffset(const void *ptr) const; |
| 123 |
| 124 // Hash function for stack tables. |
| 125 static size_t CallStackToHash(const CallStack* call_stack); |
| 126 |
| 127 std::unordered_set<CallStack*, |
| 128 CallStackHash, |
| 129 CallStackCompare, |
| 130 TableEntryAllocator> call_stacks_; |
| 131 |
| 132 // For tracking allocation stats. |
| 133 Stats stats_; |
| 134 Stats call_stack_stats_; |
| 135 int num_stack_tables_; |
| 136 |
| 137 // Stores all individual recorded allocations. |
| 138 AllocationMap address_map_; |
| 139 |
| 140 // Used to analyze potential leak patterns in the allocation sizes. |
| 141 LeakAnalyzer size_leak_analyzer_; |
| 142 |
| 143 // Allocation stats for each size. |
| 144 AllocSizeEntry entries_[kNumSizeEntries]; |
| 145 |
| 146 // Address mapping info of the current binary. |
| 147 uint64 mapping_addr_; |
| 148 uint64 mapping_size_; |
| 149 |
| 150 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImpl); |
| 151 }; |
| 152 |
| 153 } // namespace leak_detector |
| 154 |
| 155 #endif // _LEAK_DETECTOR_IMPL_H_ |
OLD | NEW |