| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ | 5 #ifndef COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ |
| 6 #define COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ | 6 #define COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ |
| 7 | 7 |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 #include <stdint.h> | 9 #include <stdint.h> |
| 10 | 10 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 60 ~LeakReport(); | 60 ~LeakReport(); |
| 61 | 61 |
| 62 size_t alloc_size_bytes() const { return alloc_size_bytes_; } | 62 size_t alloc_size_bytes() const { return alloc_size_bytes_; } |
| 63 | 63 |
| 64 const InternalVector<uintptr_t>& call_stack() const { return call_stack_; } | 64 const InternalVector<uintptr_t>& call_stack() const { return call_stack_; } |
| 65 | 65 |
| 66 const InternalVector<AllocationBreakdown>& alloc_breakdown_history() const { | 66 const InternalVector<AllocationBreakdown>& alloc_breakdown_history() const { |
| 67 return alloc_breakdown_history_; | 67 return alloc_breakdown_history_; |
| 68 } | 68 } |
| 69 | 69 |
| 70 size_t num_rising_intervals() const { return num_rising_intervals_; } | |
| 71 | |
| 72 uint32_t num_allocs_increase() const { return num_allocs_increase_; } | |
| 73 | |
| 74 void set_num_rising_intervals(size_t num_rising_intervals) { | |
| 75 num_rising_intervals_ = num_rising_intervals; | |
| 76 } | |
| 77 | |
| 78 // Used to compare the contents of two leak reports. | 70 // Used to compare the contents of two leak reports. |
| 79 bool operator<(const LeakReport& other) const; | 71 bool operator<(const LeakReport& other) const; |
| 80 | 72 |
| 81 private: | 73 private: |
| 82 // LeakDetectorImpl needs access to class members when creating a new leak | 74 // LeakDetectorImpl needs access to class members when creating a new leak |
| 83 // report. | 75 // report. |
| 84 friend class LeakDetectorImpl; | 76 friend class LeakDetectorImpl; |
| 85 | 77 |
| 86 // Number of bytes allocated by the leak site during each allocation. | 78 // Number of bytes allocated by the leak site during each allocation. |
| 87 size_t alloc_size_bytes_; | 79 size_t alloc_size_bytes_; |
| 88 | 80 |
| 89 // Number of intervals in the last uptrend. | |
| 90 size_t num_rising_intervals_; | |
| 91 | |
| 92 // Net number of bytes allocated in the last uptrend. | |
| 93 uint32_t num_allocs_increase_; | |
| 94 | |
| 95 // Unlike the CallStack struct, which consists of addresses, this call stack | 81 // Unlike the CallStack struct, which consists of addresses, this call stack |
| 96 // will contain offsets in the executable binary. | 82 // will contain offsets in the executable binary. |
| 97 InternalVector<uintptr_t> call_stack_; | 83 InternalVector<uintptr_t> call_stack_; |
| 98 | 84 |
| 99 // Records of allocation bookkeeping over time. The first element is the | 85 // Records of allocation bookkeeping over time. The first element is the |
| 100 // oldest entry and the last element is the newest. | 86 // oldest entry and the last element is the newest. |
| 101 InternalVector<AllocationBreakdown> alloc_breakdown_history_; | 87 InternalVector<AllocationBreakdown> alloc_breakdown_history_; |
| 102 }; | 88 }; |
| 103 | 89 |
| 104 LeakDetectorImpl(uintptr_t mapping_addr, | 90 LeakDetectorImpl(uintptr_t mapping_addr, |
| 105 size_t mapping_size, | 91 size_t mapping_size, |
| 106 int size_suspicion_threshold, | 92 int size_suspicion_threshold, |
| 107 int call_stack_suspicion_threshold); | 93 int call_stack_suspicion_threshold); |
| 108 ~LeakDetectorImpl(); | 94 ~LeakDetectorImpl(); |
| 109 | 95 |
| 110 // Indicates whether the given allocation size has an associated call stack | 96 // Indicates whether the given allocation size has an associated call stack |
| 111 // table, and thus requires a stack unwind. | 97 // table, and thus requires a stack unwind. |
| 112 bool ShouldGetStackTraceForSize(size_t size) const; | 98 bool ShouldGetStackTraceForSize(size_t size) const; |
| 113 | 99 |
| 114 // Record allocs and frees. | 100 // Record allocs and frees. |
| 115 void RecordAlloc(const void* ptr, | 101 void RecordAlloc(const void* ptr, |
| 116 size_t size, | 102 size_t size, |
| 117 int stack_depth, | 103 int stack_depth, |
| 118 const void* const call_stack[]); | 104 const void* const call_stack[]); |
| 119 void RecordFree(const void* ptr); | 105 void RecordFree(const void* ptr); |
| 120 | 106 |
| 121 // Run check for possible leaks based on the current profiling data. | 107 // Run check for possible leaks based on the current profiling data. |
| 122 void TestForLeaks(InternalVector<LeakReport>* reports, size_t timestamp); | 108 void TestForLeaks(InternalVector<LeakReport>* reports); |
| 123 | 109 |
| 124 private: | 110 private: |
| 125 // A record of allocations for a particular size. | 111 // A record of allocations for a particular size. |
| 126 struct AllocSizeEntry { | 112 struct AllocSizeEntry { |
| 127 // Number of allocations and frees for this size. | 113 // Number of allocations and frees for this size. |
| 128 uint32_t num_allocs; | 114 uint32_t num_allocs; |
| 129 uint32_t num_frees; | 115 uint32_t num_frees; |
| 130 | 116 |
| 131 // A stack table, if this size is being profiled for stack as well. | 117 // A stack table, if this size is being profiled for stack as well. |
| 132 CallStackTable* stack_table; | 118 CallStackTable* stack_table; |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 167 }; | 153 }; |
| 168 | 154 |
| 169 // Returns the offset of |ptr| within the current binary. If it is not in the | 155 // Returns the offset of |ptr| within the current binary. If it is not in the |
| 170 // current binary, return |UINTPTR_MAX|. | 156 // current binary, return |UINTPTR_MAX|. |
| 171 uintptr_t GetOffset(const void* ptr) const; | 157 uintptr_t GetOffset(const void* ptr) const; |
| 172 | 158 |
| 173 // Record some of the current allocation bookkeeping. The net number of allocs | 159 // Record some of the current allocation bookkeeping. The net number of allocs |
| 174 // per size is recorded in |size_breakdown_history_|. The net number of allocs | 160 // per size is recorded in |size_breakdown_history_|. The net number of allocs |
| 175 // per call site for each size is recorded in | 161 // per call site for each size is recorded in |
| 176 // |AllocSizeEntry::call_site_breakdown_history|. | 162 // |AllocSizeEntry::call_site_breakdown_history|. |
| 177 // Argument |timestamp| is used to update information about drops in | |
| 178 // allocation number for each stored call stack. | |
| 179 // | 163 // |
| 180 // Not all the net alloc counts are recorded. And the number of historical | 164 // Not all the net alloc counts are recorded. And the number of historical |
| 181 // records kept is capped. If adding a new record exceeds that limit, the | 165 // records kept is capped. If adding a new record exceeds that limit, the |
| 182 // oldest record is discarded. See the function definition for more details. | 166 // oldest record is discarded. See the function definition for more details. |
| 183 void RecordCurrentAllocationDataInHistory(size_t timestamp); | 167 void RecordCurrentAllocationDataInHistory(); |
| 184 | 168 |
| 185 // Store the data collected by RecordCurrentAllocationDataInHistory() in | 169 // Store the data collected by RecordCurrentAllocationDataInHistory() in |
| 186 // |*report|. Not all net alloc counts per call site will be stored, only the | 170 // |*report|. Not all net alloc counts per call site will be stored, only the |
| 187 // count for size=|size| and made from |call_site|. Also information | 171 // count for size=|size| and made from |call_site|. |
| 188 // about the last uptrend in net allocations for |size| and |call_site| | |
| 189 // is recorded with help of |timestamp|. | |
| 190 void StoreHistoricalDataInReport(size_t size, const CallStack* call_site, | 172 void StoreHistoricalDataInReport(size_t size, const CallStack* call_site, |
| 191 LeakReport* report, size_t timestamp); | 173 LeakReport* report); |
| 192 | 174 |
| 193 // Decrements the cooldown counter (value) for each entry in | 175 // Decrements the cooldown counter (value) for each entry in |
| 194 // |cooldowns_per_leak_|. If the cooldown counter reaches 0, the entry is | 176 // |cooldowns_per_leak_|. If the cooldown counter reaches 0, the entry is |
| 195 // removed. Thus, all extantentries in |cooldowns_per_leak_| maintain a | 177 // removed. Thus, all extantentries in |cooldowns_per_leak_| maintain a |
| 196 // positive count. | 178 // positive count. |
| 197 void UpdateLeakCooldowns(); | 179 void UpdateLeakCooldowns(); |
| 198 | 180 |
| 199 // Returns true if a particular leak signature (alloc size + call site) does | 181 // Returns true if a particular leak signature (alloc size + call site) does |
| 200 // not have an active cooldown counter (i.e. does not have an entry in | 182 // not have an active cooldown counter (i.e. does not have an entry in |
| 201 // |cooldowns_per_leak_|. | 183 // |cooldowns_per_leak_|. |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 253 // considered a leak suspect. | 235 // considered a leak suspect. |
| 254 int call_stack_suspicion_threshold_; | 236 int call_stack_suspicion_threshold_; |
| 255 | 237 |
| 256 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImpl); | 238 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImpl); |
| 257 }; | 239 }; |
| 258 | 240 |
| 259 } // namespace leak_detector | 241 } // namespace leak_detector |
| 260 } // namespace metrics | 242 } // namespace metrics |
| 261 | 243 |
| 262 #endif // COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ | 244 #endif // COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ |
| OLD | NEW |