Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Side by Side Diff: components/metrics/leak_detector/leak_detector_impl.cc

Issue 986503002: components/metrics: Add runtime memory leak detector (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Add OWNERS file Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "leak_detector_impl.h"
6
7 #include <inttypes.h>
8 #include <stddef.h>
9
10 #include <algorithm>
11 #include <new>
12
13 #include "base/hash.h"
14 #include "base/process/process_handle.h"
15 #include "components/metrics/leak_detector/call_stack_table.h"
16 #include "components/metrics/leak_detector/custom_allocator.h"
17 #include "components/metrics/leak_detector/ranked_list.h"
18
19 namespace metrics {
20 namespace leak_detector {
21
22 namespace {
23
24 // Look for leaks in the the top N entries in each tier, where N is this value.
25 const int kRankedListSize = 16;
26
27 // Initial hash table size for |LeakDetectorImpl::address_map_|.
28 const int kAddressMapNumBuckets = 100003;
29
30 // Number of entries in the alloc size table. As sizes are aligned to 32-bits
31 // the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger
32 // sizes are ignored. This value is chosen high enough that such large sizes
33 // are rare if not nonexistent.
34 const int kNumSizeEntries = 2048;
35
36 using ValueType = LeakDetectorValueType;
37
38 // Functions to convert an allocation size to/from the array index used for
39 // |LeakDetectorImpl::size_entries_|.
40 size_t SizeToIndex(const size_t size) {
41 int result = static_cast<int>(size / sizeof(uint32_t));
42 if (result < kNumSizeEntries)
43 return result;
44 return 0;
45 }
46
47 size_t IndexToSize(size_t index){
48 return sizeof(uint32_t) * index;
49 }
50
51 } // namespace
52
53 bool InternalLeakReport::operator< (const InternalLeakReport& other) const {
54 if (alloc_size_bytes != other.alloc_size_bytes)
55 return alloc_size_bytes < other.alloc_size_bytes;
56 for (size_t i = 0;
57 i < call_stack.size() && i < other.call_stack.size();
58 ++i) {
59 if (call_stack[i] != other.call_stack[i])
60 return call_stack[i] < other.call_stack[i];
61 }
62 return call_stack.size() < other.call_stack.size();
63 }
64
65 LeakDetectorImpl::LeakDetectorImpl(uintptr_t mapping_addr,
66 size_t mapping_size,
67 int size_suspicion_threshold,
68 int call_stack_suspicion_threshold)
69 : num_allocs_(0),
70 num_frees_(0),
71 alloc_size_(0),
72 free_size_(0),
73 num_allocs_with_call_stack_(0),
74 num_stack_tables_(0),
75 address_map_(kAddressMapNumBuckets),
76 size_leak_analyzer_(kRankedListSize, size_suspicion_threshold),
77 size_entries_(kNumSizeEntries, {0}),
78 mapping_addr_(mapping_addr),
79 mapping_size_(mapping_size),
80 call_stack_suspicion_threshold_(call_stack_suspicion_threshold) {}
81
82 LeakDetectorImpl::~LeakDetectorImpl() {
83 // Free any call stack tables.
84 for (AllocSizeEntry& entry : size_entries_) {
85 CallStackTable* table = entry.stack_table;
86 if (!table)
87 continue;
88 table->~CallStackTable();
89 CustomAllocator::Free(table, sizeof(CallStackTable));
90 }
91 size_entries_.clear();
92 }
93
94 bool LeakDetectorImpl::ShouldGetStackTraceForSize(size_t size) const {
95 return size_entries_[SizeToIndex(size)].stack_table != nullptr;
96 }
97
98 void LeakDetectorImpl::RecordAlloc(
99 const void* ptr, size_t size,
100 int stack_depth, const void* const stack[]) {
101 AllocInfo alloc_info;
102 alloc_info.size = size;
103
104 alloc_size_ += alloc_info.size;
105 ++num_allocs_;
106
107 AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)];
108 ++entry->num_allocs;
109
110 if (entry->stack_table && stack_depth > 0) {
111 alloc_info.call_stack =
112 call_stack_manager_.GetCallStack(stack_depth, stack);
113 entry->stack_table->Add(alloc_info.call_stack);
114
115 ++num_allocs_with_call_stack_;
116 }
117
118 uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
119 address_map_.insert(std::pair<uintptr_t, AllocInfo>(addr, alloc_info));
120 }
121
122 void LeakDetectorImpl::RecordFree(const void* ptr) {
123 // Look up address.
124 uintptr_t addr = reinterpret_cast<uintptr_t>(ptr);
125 auto iter = address_map_.find(addr);
126 // TODO(sque): Catch and report double frees.
127 if (iter == address_map_.end())
128 return;
129
130 const AllocInfo& alloc_info = iter->second;
131
132 AllocSizeEntry* entry = &size_entries_[SizeToIndex(alloc_info.size)];
133 ++entry->num_frees;
134
135 const CallStack* call_stack = alloc_info.call_stack;
136 if (call_stack) {
137 if (entry->stack_table)
138 entry->stack_table->Remove(call_stack);
139 }
140 ++num_frees_;
141 free_size_ += alloc_info.size;
142
143 address_map_.erase(iter);
144 }
145
146 void LeakDetectorImpl::TestForLeaks(
147 InternalVector<InternalLeakReport>* reports) {
148 // Add net alloc counts for each size to a ranked list.
149 RankedList size_ranked_list(kRankedListSize);
150 for (size_t i = 0; i < size_entries_.size(); ++i) {
151 const AllocSizeEntry& entry = size_entries_[i];
152 ValueType size_value(IndexToSize(i));
153 size_ranked_list.Add(size_value, entry.num_allocs - entry.num_frees);
154 }
155 size_leak_analyzer_.AddSample(size_ranked_list.Pass());
156
157 // Get suspected leaks by size.
158 for (const ValueType& size_value : size_leak_analyzer_.suspected_leaks()) {
159 uint32_t size = size_value.size();
160 AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)];
161 if (entry->stack_table)
162 continue;
163 entry->stack_table = new(CustomAllocator::Allocate(sizeof(CallStackTable)))
164 CallStackTable(call_stack_suspicion_threshold_);
165 ++num_stack_tables_;
166 }
167
168 // Check for leaks in each CallStackTable. It makes sense to this before
169 // checking the size allocations, because that could potentially create new
170 // CallStackTable. However, the overhead to check a new CallStackTable is
171 // small since this function is run very rarely. So handle the leak checks of
172 // Tier 2 here.
173 reports->clear();
174 for (size_t i = 0; i < size_entries_.size(); ++i) {
175 const AllocSizeEntry& entry = size_entries_[i];
176 CallStackTable* stack_table = entry.stack_table;
177 if (!stack_table || stack_table->empty())
178 continue;
179
180 size_t size = IndexToSize(i);
181
182 // Get suspected leaks by call stack.
183 stack_table->TestForLeaks();
184 const LeakAnalyzer& leak_analyzer = stack_table->leak_analyzer();
185 for (const ValueType& call_stack_value : leak_analyzer.suspected_leaks()) {
186 const CallStack* call_stack = call_stack_value.call_stack();
187
188 // Return reports by storing in |*reports|.
189 reports->resize(reports->size() + 1);
190 InternalLeakReport* report = &reports->back();
191 report->alloc_size_bytes = size;
192 report->call_stack.resize(call_stack->depth);
193 for (size_t j = 0; j < call_stack->depth; ++j) {
194 report->call_stack[j] = GetOffset(call_stack->stack[j]);
195 }
196 }
197 }
198 }
199
200 size_t LeakDetectorImpl::AddressHash::operator() (uintptr_t addr) const {
201 return base::Hash(reinterpret_cast<const char*>(&addr), sizeof(addr));
202 }
203
204 uintptr_t LeakDetectorImpl::GetOffset(const void *ptr) const {
205 uintptr_t ptr_value = reinterpret_cast<uintptr_t>(ptr);
206 if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_)
207 return ptr_value - mapping_addr_;
208 return ptr_value;
209 }
210
211 } // namespace leak_detector
212 } // namespace metrics
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698