OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "leak_detector_impl.h" | |
6 | |
7 #include <inttypes.h> | |
8 #include <stddef.h> | |
9 #include <unistd.h> // for getpid() | |
10 | |
11 #include <algorithm> | |
12 #include <new> | |
13 #include <utility> | |
14 | |
15 #include "base/hash.h" | |
16 #include "components/metrics/leak_detector/call_stack_table.h" | |
17 #include "components/metrics/leak_detector/custom_allocator.h" | |
18 #include "components/metrics/leak_detector/ranked_list.h" | |
19 | |
20 namespace metrics { | |
21 namespace leak_detector { | |
22 | |
23 namespace { | |
24 | |
25 // Look for leaks in the the top N entries in each tier, where N is this value. | |
26 const int kRankedListSize = 16; | |
27 | |
28 // Initial hash table size for |LeakDetectorImpl::address_map_|. | |
29 const int kAddressMapNumBuckets = 100003; | |
30 | |
31 // Number of entries in the alloc size table. As sizes are aligned to 32-bits | |
32 // the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger | |
33 // sizes are ignored. This value is chosen high enough that such large sizes | |
34 // are rare if not nonexistent. | |
35 const int kNumSizeEntries = 2048; | |
36 | |
37 using ValueType = LeakDetectorValueType; | |
38 | |
39 // Print the contents of |str| prefixed with the current pid. | |
40 void PrintWithPid(const char* str) { | |
41 char line[1024]; | |
42 snprintf(line, sizeof(line), "%d: %s\n", getpid(), str); | |
Will Harris
2015/11/11 17:33:01
use base::GetCurrentProcId()
Simon Que
2015/11/12 17:29:24
Done.
| |
43 RAW_LOG(ERROR, line); | |
44 } | |
45 | |
46 // Prints the input string buffer using RAW_LOG, pre-fixing each line with the | |
47 // process id. Will modify |str| temporarily but restore it at the end. | |
48 void PrintWithPidOnEachLine(char* str) { | |
49 char* current_line = str; | |
50 // Attempt to find a newline that will indicate the end of the first line | |
51 // and the start of the second line. | |
52 while (char *newline_ptr = strchr(current_line, '\n')) { | |
53 // Terminate the current line so it can be printed as a separate string. | |
54 // Restore the original string when done. | |
55 *newline_ptr = '\0'; | |
56 PrintWithPid(current_line); | |
57 *newline_ptr = '\n'; | |
58 | |
59 // Point |current_line| to the next line. | |
60 current_line = newline_ptr + 1; | |
61 } | |
62 // There may be an extra line at the end of the input string that is not | |
63 // newline-terminated. e.g. if the input was only one line, or the last line | |
64 // did not end with a newline. | |
65 if (current_line[0] != '\0') | |
66 PrintWithPid(current_line); | |
67 } | |
68 | |
69 // Functions to convert an allocation size to/from the array index used for | |
70 // |LeakDetectorImpl::size_entries_|. | |
71 int SizeToIndex(const size_t size) { | |
72 int result = static_cast<int>(size / sizeof(uint32_t)); | |
Will Harris
2015/11/11 17:33:01
Max 64-bit unsigned int divided by 4 is larger tha
Simon Que
2015/11/12 17:29:24
Done.
| |
73 if (result < kNumSizeEntries) | |
74 return result; | |
75 return 0; | |
76 } | |
77 | |
78 size_t IndexToSize(int index){ | |
79 return sizeof(uint32_t) * index; | |
80 } | |
81 | |
82 } // namespace | |
83 | |
84 bool InternalLeakReport::operator< (const InternalLeakReport& other) const { | |
85 if (alloc_size_bytes != other.alloc_size_bytes) | |
86 return alloc_size_bytes < other.alloc_size_bytes; | |
87 for (size_t i = 0; | |
88 i < call_stack.size() && i < other.call_stack.size(); | |
89 ++i) { | |
90 if (call_stack[i] != other.call_stack[i]) | |
91 return call_stack[i] < other.call_stack[i]; | |
92 } | |
93 return call_stack.size() < other.call_stack.size(); | |
94 } | |
95 | |
96 LeakDetectorImpl::LeakDetectorImpl(uintptr_t mapping_addr, | |
97 size_t mapping_size, | |
98 int size_suspicion_threshold, | |
99 int call_stack_suspicion_threshold, | |
100 bool verbose) | |
101 : num_allocs_(0), | |
102 num_frees_(0), | |
103 alloc_size_(0), | |
104 free_size_(0), | |
105 num_allocs_with_call_stack_(0), | |
106 num_stack_tables_(0), | |
107 address_map_(kAddressMapNumBuckets), | |
108 size_leak_analyzer_(kRankedListSize, size_suspicion_threshold), | |
109 size_entries_(kNumSizeEntries, {0}), | |
110 mapping_addr_(mapping_addr), | |
111 mapping_size_(mapping_size), | |
112 call_stack_suspicion_threshold_(call_stack_suspicion_threshold), | |
113 verbose_(verbose) { | |
114 } | |
115 | |
116 LeakDetectorImpl::~LeakDetectorImpl() { | |
117 // Free any call stack tables. | |
118 for (AllocSizeEntry& entry : size_entries_) { | |
119 CallStackTable* table = entry.stack_table; | |
120 if (!table) | |
121 continue; | |
122 table->~CallStackTable(); | |
123 CustomAllocator::Free(table, sizeof(CallStackTable)); | |
124 } | |
125 size_entries_.clear(); | |
126 } | |
127 | |
128 bool LeakDetectorImpl::ShouldGetStackTraceForSize(size_t size) const { | |
129 return size_entries_[SizeToIndex(size)].stack_table != nullptr; | |
130 } | |
131 | |
132 void LeakDetectorImpl::RecordAlloc( | |
133 const void* ptr, size_t size, | |
134 int stack_depth, const void* const stack[]) { | |
135 AllocInfo alloc_info; | |
136 alloc_info.size = size; | |
137 | |
138 alloc_size_ += alloc_info.size; | |
139 ++num_allocs_; | |
140 | |
141 AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)]; | |
142 ++entry->num_allocs; | |
143 | |
144 if (entry->stack_table && stack_depth > 0) { | |
145 alloc_info.call_stack = | |
146 call_stack_manager_.GetCallStack(stack_depth, stack); | |
147 entry->stack_table->Add(alloc_info.call_stack); | |
148 | |
149 ++num_allocs_with_call_stack_; | |
150 } | |
151 | |
152 uintptr_t addr = reinterpret_cast<uintptr_t>(ptr); | |
153 address_map_.insert(std::pair<uintptr_t, AllocInfo>(addr, alloc_info)); | |
154 } | |
155 | |
156 void LeakDetectorImpl::RecordFree(const void* ptr) { | |
157 // Look up address. | |
158 uintptr_t addr = reinterpret_cast<uintptr_t>(ptr); | |
159 auto iter = address_map_.find(addr); | |
160 if (iter == address_map_.end()) | |
Will Harris
2015/11/11 17:33:01
might it be good to catch these double frees?
Simon Que
2015/11/12 17:29:24
Good idea. I'll leave a TODO here for a future rev
| |
161 return; | |
162 | |
163 const AllocInfo& alloc_info = iter->second; | |
164 | |
165 AllocSizeEntry* entry = &size_entries_[SizeToIndex(alloc_info.size)]; | |
166 ++entry->num_frees; | |
167 | |
168 const CallStack* call_stack = alloc_info.call_stack; | |
169 if (call_stack) { | |
170 if (entry->stack_table) | |
171 entry->stack_table->Remove(call_stack); | |
172 } | |
173 ++num_frees_; | |
174 free_size_ += alloc_info.size; | |
175 | |
176 address_map_.erase(iter); | |
177 } | |
178 | |
179 void LeakDetectorImpl::TestForLeaks( | |
180 bool do_logging, | |
181 InternalVector<InternalLeakReport>* reports) { | |
182 if (do_logging) | |
183 DumpStats(); | |
184 | |
185 // Add net alloc counts for each size to a ranked list. | |
186 RankedList size_ranked_list(kRankedListSize); | |
187 for (size_t i = 0; i < size_entries_.size(); ++i) { | |
188 const AllocSizeEntry& entry = size_entries_[i]; | |
189 ValueType size_value(IndexToSize(i)); | |
190 size_ranked_list.Add(size_value, entry.num_allocs - entry.num_frees); | |
191 } | |
192 size_leak_analyzer_.AddSample(std::move(size_ranked_list)); | |
Will Harris
2015/11/11 17:33:01
I don't think C++11 move semantics are allowed in
Simon Que
2015/11/12 17:29:24
Done.
| |
193 | |
194 // Dump out the top entries. | |
195 char buf[0x4000]; | |
196 if (do_logging && verbose_) { | |
197 if (size_leak_analyzer_.Dump(sizeof(buf), buf) < sizeof(buf)) | |
198 PrintWithPidOnEachLine(buf); | |
199 } | |
200 | |
201 // Get suspected leaks by size. | |
202 for (const ValueType& size_value : size_leak_analyzer_.suspected_leaks()) { | |
203 uint32_t size = size_value.size(); | |
204 AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)]; | |
205 if (entry->stack_table) | |
206 continue; | |
207 if (do_logging) { | |
208 snprintf(buf, sizeof(buf), "Adding stack table for size %u\n", size); | |
209 PrintWithPidOnEachLine(buf); | |
210 } | |
211 entry->stack_table = new(CustomAllocator::Allocate(sizeof(CallStackTable))) | |
212 CallStackTable(call_stack_suspicion_threshold_); | |
213 ++num_stack_tables_; | |
214 } | |
215 | |
216 // Check for leaks in each CallStackTable. It makes sense to this before | |
217 // checking the size allocations, because that could potentially create new | |
218 // CallStackTable. However, the overhead to check a new CallStackTable is | |
219 // small since this function is run very rarely. So handle the leak checks of | |
220 // Tier 2 here. | |
221 reports->clear(); | |
222 for (size_t i = 0; i < size_entries_.size(); ++i) { | |
223 const AllocSizeEntry& entry = size_entries_[i]; | |
224 CallStackTable* stack_table = entry.stack_table; | |
225 if (!stack_table || stack_table->empty()) | |
226 continue; | |
227 | |
228 size_t size = IndexToSize(i); | |
229 if (do_logging && verbose_) { | |
230 // Dump table info. | |
231 snprintf(buf, sizeof(buf), "Stack table for size %zu:\n", size); | |
232 PrintWithPidOnEachLine(buf); | |
233 | |
234 if (stack_table->Dump(sizeof(buf), buf) < sizeof(buf)) | |
235 PrintWithPidOnEachLine(buf); | |
236 } | |
237 | |
238 // Get suspected leaks by call stack. | |
239 stack_table->TestForLeaks(); | |
240 const LeakAnalyzer& leak_analyzer = stack_table->leak_analyzer(); | |
241 for (const ValueType& call_stack_value : leak_analyzer.suspected_leaks()) { | |
242 const CallStack* call_stack = call_stack_value.call_stack(); | |
243 | |
244 // Return reports by storing in |*reports|. | |
245 reports->resize(reports->size() + 1); | |
246 InternalLeakReport* report = &reports->back(); | |
247 report->alloc_size_bytes = size; | |
248 report->call_stack.resize(call_stack->depth); | |
249 for (size_t j = 0; j < call_stack->depth; ++j) { | |
250 report->call_stack[j] = GetOffset(call_stack->stack[j]); | |
251 } | |
252 | |
253 if (do_logging) { | |
254 int offset = snprintf(buf, sizeof(buf), | |
255 "Suspected call stack for size %zu, %p:\n", | |
256 size, call_stack); | |
257 for (size_t j = 0; j < call_stack->depth; ++j) { | |
258 offset += snprintf(buf + offset, sizeof(buf) - offset, | |
259 "\t%" PRIxPTR "\n", | |
260 GetOffset(call_stack->stack[j])); | |
261 } | |
262 PrintWithPidOnEachLine(buf); | |
263 } | |
264 } | |
265 } | |
266 } | |
267 | |
268 size_t LeakDetectorImpl::AddressHash::operator() (uintptr_t addr) const { | |
269 return base::Hash(reinterpret_cast<const char*>(&addr), sizeof(addr)); | |
270 } | |
271 | |
272 uintptr_t LeakDetectorImpl::GetOffset(const void *ptr) const { | |
273 uintptr_t ptr_value = reinterpret_cast<uintptr_t>(ptr); | |
274 if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_) | |
Will Harris
2015/11/11 17:33:01
can you explain how this works with allocations th
Simon Que
2015/11/12 17:29:24
I'm not sure what those special cases are, as I on
| |
275 return ptr_value - mapping_addr_; | |
276 return ptr_value; | |
277 } | |
278 | |
279 void LeakDetectorImpl::DumpStats() const { | |
280 char buf[1024]; | |
281 snprintf(buf, sizeof(buf), | |
282 "Alloc size: %" PRIu64"\n" | |
283 "Free size: %" PRIu64 "\n" | |
284 "Net alloc size: %" PRIu64 "\n" | |
285 "Number of stack tables: %u\n" | |
286 "Percentage of allocs with stack traces: %.2f%%\n" | |
287 "Number of call stack buckets: %zu\n", | |
288 alloc_size_, free_size_, alloc_size_ - free_size_, num_stack_tables_, | |
289 num_allocs_ ? 100.0f * num_allocs_with_call_stack_ / num_allocs_ : 0, | |
290 call_stack_manager_.size()); | |
291 PrintWithPidOnEachLine(buf); | |
292 } | |
293 | |
294 } // namespace leak_detector | |
295 } // namespace metrics | |
OLD | NEW |