| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #ifndef COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ | 5 #ifndef COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ |
| 6 #define COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ | 6 #define COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ |
| 7 | 7 |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 #include <stdint.h> | 9 #include <stdint.h> |
| 10 | 10 |
| (...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 60 ~LeakReport(); | 60 ~LeakReport(); |
| 61 | 61 |
| 62 size_t alloc_size_bytes() const { return alloc_size_bytes_; } | 62 size_t alloc_size_bytes() const { return alloc_size_bytes_; } |
| 63 | 63 |
| 64 const InternalVector<uintptr_t>& call_stack() const { return call_stack_; } | 64 const InternalVector<uintptr_t>& call_stack() const { return call_stack_; } |
| 65 | 65 |
| 66 const InternalVector<AllocationBreakdown>& alloc_breakdown_history() const { | 66 const InternalVector<AllocationBreakdown>& alloc_breakdown_history() const { |
| 67 return alloc_breakdown_history_; | 67 return alloc_breakdown_history_; |
| 68 } | 68 } |
| 69 | 69 |
| 70 size_t num_rising_intervals() const { return num_rising_intervals_; } |
| 71 |
| 72 uint32_t num_allocs_increase() const { return num_allocs_increase_; } |
| 73 |
| 74 void set_num_rising_intervals(size_t num_rising_intervals) { |
| 75 num_rising_intervals_ = num_rising_intervals; |
| 76 } |
| 77 |
| 70 // Used to compare the contents of two leak reports. | 78 // Used to compare the contents of two leak reports. |
| 71 bool operator<(const LeakReport& other) const; | 79 bool operator<(const LeakReport& other) const; |
| 72 | 80 |
| 73 private: | 81 private: |
| 74 // LeakDetectorImpl needs access to class members when creating a new leak | 82 // LeakDetectorImpl needs access to class members when creating a new leak |
| 75 // report. | 83 // report. |
| 76 friend class LeakDetectorImpl; | 84 friend class LeakDetectorImpl; |
| 77 | 85 |
| 78 // Number of bytes allocated by the leak site during each allocation. | 86 // Number of bytes allocated by the leak site during each allocation. |
| 79 size_t alloc_size_bytes_; | 87 size_t alloc_size_bytes_; |
| 80 | 88 |
| 89 // Number of intervals in the last uptrend. |
| 90 size_t num_rising_intervals_; |
| 91 |
| 92 // Net number of bytes allocated in the last uptrend. |
| 93 uint32_t num_allocs_increase_; |
| 94 |
| 81 // Unlike the CallStack struct, which consists of addresses, this call stack | 95 // Unlike the CallStack struct, which consists of addresses, this call stack |
| 82 // will contain offsets in the executable binary. | 96 // will contain offsets in the executable binary. |
| 83 InternalVector<uintptr_t> call_stack_; | 97 InternalVector<uintptr_t> call_stack_; |
| 84 | 98 |
| 85 // Records of allocation bookkeeping over time. The first element is the | 99 // Records of allocation bookkeeping over time. The first element is the |
| 86 // oldest entry and the last element is the newest. | 100 // oldest entry and the last element is the newest. |
| 87 InternalVector<AllocationBreakdown> alloc_breakdown_history_; | 101 InternalVector<AllocationBreakdown> alloc_breakdown_history_; |
| 88 }; | 102 }; |
| 89 | 103 |
| 90 LeakDetectorImpl(uintptr_t mapping_addr, | 104 LeakDetectorImpl(uintptr_t mapping_addr, |
| 91 size_t mapping_size, | 105 size_t mapping_size, |
| 92 int size_suspicion_threshold, | 106 int size_suspicion_threshold, |
| 93 int call_stack_suspicion_threshold); | 107 int call_stack_suspicion_threshold); |
| 94 ~LeakDetectorImpl(); | 108 ~LeakDetectorImpl(); |
| 95 | 109 |
| 96 // Indicates whether the given allocation size has an associated call stack | 110 // Indicates whether the given allocation size has an associated call stack |
| 97 // table, and thus requires a stack unwind. | 111 // table, and thus requires a stack unwind. |
| 98 bool ShouldGetStackTraceForSize(size_t size) const; | 112 bool ShouldGetStackTraceForSize(size_t size) const; |
| 99 | 113 |
| 100 // Record allocs and frees. | 114 // Record allocs and frees. |
| 101 void RecordAlloc(const void* ptr, | 115 void RecordAlloc(const void* ptr, |
| 102 size_t size, | 116 size_t size, |
| 103 int stack_depth, | 117 int stack_depth, |
| 104 const void* const call_stack[]); | 118 const void* const call_stack[]); |
| 105 void RecordFree(const void* ptr); | 119 void RecordFree(const void* ptr); |
| 106 | 120 |
| 107 // Run check for possible leaks based on the current profiling data. | 121 // Run check for possible leaks based on the current profiling data. |
| 108 void TestForLeaks(InternalVector<LeakReport>* reports); | 122 void TestForLeaks(InternalVector<LeakReport>* reports, size_t timestamp); |
| 109 | 123 |
| 110 private: | 124 private: |
| 111 // A record of allocations for a particular size. | 125 // A record of allocations for a particular size. |
| 112 struct AllocSizeEntry { | 126 struct AllocSizeEntry { |
| 113 // Number of allocations and frees for this size. | 127 // Number of allocations and frees for this size. |
| 114 uint32_t num_allocs; | 128 uint32_t num_allocs; |
| 115 uint32_t num_frees; | 129 uint32_t num_frees; |
| 116 | 130 |
| 117 // A stack table, if this size is being profiled for stack as well. | 131 // A stack table, if this size is being profiled for stack as well. |
| 118 CallStackTable* stack_table; | 132 CallStackTable* stack_table; |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 153 }; | 167 }; |
| 154 | 168 |
| 155 // Returns the offset of |ptr| within the current binary. If it is not in the | 169 // Returns the offset of |ptr| within the current binary. If it is not in the |
| 156 // current binary, return |UINTPTR_MAX|. | 170 // current binary, return |UINTPTR_MAX|. |
| 157 uintptr_t GetOffset(const void* ptr) const; | 171 uintptr_t GetOffset(const void* ptr) const; |
| 158 | 172 |
| 159 // Record some of the current allocation bookkeeping. The net number of allocs | 173 // Record some of the current allocation bookkeeping. The net number of allocs |
| 160 // per size is recorded in |size_breakdown_history_|. The net number of allocs | 174 // per size is recorded in |size_breakdown_history_|. The net number of allocs |
| 161 // per call site for each size is recorded in | 175 // per call site for each size is recorded in |
| 162 // |AllocSizeEntry::call_site_breakdown_history|. | 176 // |AllocSizeEntry::call_site_breakdown_history|. |
| 177 // Argument |timestamp| is used to update information about drops in |
| 178 // allocation number for each stored call stack. |
| 163 // | 179 // |
| 164 // Not all the net alloc counts are recorded. And the number of historical | 180 // Not all the net alloc counts are recorded. And the number of historical |
| 165 // records kept is capped. If adding a new record exceeds that limit, the | 181 // records kept is capped. If adding a new record exceeds that limit, the |
| 166 // oldest record is discarded. See the function definition for more details. | 182 // oldest record is discarded. See the function definition for more details. |
| 167 void RecordCurrentAllocationDataInHistory(); | 183 void RecordCurrentAllocationDataInHistory(size_t timestamp); |
| 168 | 184 |
| 169 // Store the data collected by RecordCurrentAllocationDataInHistory() in | 185 // Store the data collected by RecordCurrentAllocationDataInHistory() in |
| 170 // |*report|. Not all net alloc counts per call site will be stored, only the | 186 // |*report|. Not all net alloc counts per call site will be stored, only the |
| 171 // count for size=|size| and made from |call_site|. | 187 // count for size=|size| and made from |call_site|. Also information |
| 188 // about the last uptrend in net allocations for |size| and |call_site| |
| 189 // is recorded with help of |timestamp|. |
| 172 void StoreHistoricalDataInReport(size_t size, const CallStack* call_site, | 190 void StoreHistoricalDataInReport(size_t size, const CallStack* call_site, |
| 173 LeakReport* report); | 191 LeakReport* report, size_t timestamp); |
| 174 | 192 |
| 175 // Decrements the cooldown counter (value) for each entry in | 193 // Decrements the cooldown counter (value) for each entry in |
| 176 // |cooldowns_per_leak_|. If the cooldown counter reaches 0, the entry is | 194 // |cooldowns_per_leak_|. If the cooldown counter reaches 0, the entry is |
| 177 // removed. Thus, all extantentries in |cooldowns_per_leak_| maintain a | 195 // removed. Thus, all extantentries in |cooldowns_per_leak_| maintain a |
| 178 // positive count. | 196 // positive count. |
| 179 void UpdateLeakCooldowns(); | 197 void UpdateLeakCooldowns(); |
| 180 | 198 |
| 181 // Returns true if a particular leak signature (alloc size + call site) does | 199 // Returns true if a particular leak signature (alloc size + call site) does |
| 182 // not have an active cooldown counter (i.e. does not have an entry in | 200 // not have an active cooldown counter (i.e. does not have an entry in |
| 183 // |cooldowns_per_leak_|. | 201 // |cooldowns_per_leak_|. |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 235 // considered a leak suspect. | 253 // considered a leak suspect. |
| 236 int call_stack_suspicion_threshold_; | 254 int call_stack_suspicion_threshold_; |
| 237 | 255 |
| 238 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImpl); | 256 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImpl); |
| 239 }; | 257 }; |
| 240 | 258 |
| 241 } // namespace leak_detector | 259 } // namespace leak_detector |
| 242 } // namespace metrics | 260 } // namespace metrics |
| 243 | 261 |
| 244 #endif // COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ | 262 #endif // COMPONENTS_METRICS_LEAK_DETECTOR_LEAK_DETECTOR_IMPL_H_ |
| OLD | NEW |