OLD | NEW |
---|---|
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/metrics/leak_detector/leak_detector.h" | 5 #include "components/metrics/leak_detector/leak_detector.h" |
6 | 6 |
7 #include <stdint.h> | |
8 #include <unistd.h> | |
9 | |
10 #include <algorithm> // for std::min | |
11 | |
12 #include "base/allocator/allocator_extension.h" | |
13 #include "base/logging.h" | |
14 #include "base/synchronization/lock.h" | |
15 #include "components/metrics/leak_detector/custom_allocator.h" | |
7 #include "content/public/browser/browser_thread.h" | 16 #include "content/public/browser/browser_thread.h" |
8 | 17 |
18 #if defined(OS_CHROMEOS) | |
19 #include <link.h> // for dl_iterate_phdr | |
20 #else | |
21 #error "Getting binary mapping info is not supported on this platform." | |
22 #endif // defined(OS_CHROMEOS) | |
23 | |
9 namespace metrics { | 24 namespace metrics { |
10 | 25 |
26 using LeakReport = LeakDetector::LeakReport; | |
27 using leak_detector::CustomAllocator; | |
28 using leak_detector::LeakDetectorImpl; | |
29 using InternalLeakReport = LeakDetectorImpl::LeakReport; | |
30 template <typename T> | |
31 using InternalVector = LeakDetectorImpl::InternalVector<T>; | |
32 | |
33 namespace { | |
34 | |
35 // Create an object of this class to store the current new/delete hooks and | |
36 // then remove them. When this object goes out of scope, it will automatically | |
37 // restore the original hooks if they existed. | |
38 // | |
39 // If multiple instances of this class are created and there are hooks | |
40 // registered, only the first object will save and restore the hook functions. | |
41 // The others will have no effect. However, all concurrent instances MUST be | |
42 // destroyed in reverse relative to their instantiation. | |
43 // | |
44 // This is useful in situations such as: | |
45 // - Calling alloc or free from within a hook function, which would otherwise | |
46 // result in recursive hook calls. | |
47 // - Calling LOG() when |g_lock| is being held, as LOG will call malloc, which | |
48 // calls NewHook(), which then attempts to acquire the lock, resulting in it | |
49 // being blocked. | |
50 // | |
51 // This class is not thread-safe and should be called under lock. | |
52 class AllocatorHookDisabler { | |
53 public: | |
54 AllocatorHookDisabler() { | |
55 base::allocator::SetHooks(nullptr, nullptr); | |
56 } | |
57 | |
58 ~AllocatorHookDisabler() { | |
59 base::allocator::SetHooks(alloc_hook_, free_hook_); | |
60 } | |
61 | |
62 static base::allocator::AllocHookFunc alloc_hook_; | |
Primiano Tucci (use gerrit)
2016/03/03 16:23:44
It seems that this variable is only ever set to be
Simon Que
2016/03/04 01:59:18
That would require AllocHook/FreeHook to be public
Simon Que
2016/03/04 02:55:19
One argument in favor of the current design: what
Primiano Tucci (use gerrit)
2016/03/04 06:57:35
That's why I was suggesting to have a boolean stat
Primiano Tucci (use gerrit)
2016/03/04 06:57:35
I am not sure I follow. AllocHook and FreeHook her
| |
63 static base::allocator::FreeHookFunc free_hook_; | |
64 | |
65 private: | |
66 DISALLOW_COPY_AND_ASSIGN(AllocatorHookDisabler); | |
67 }; | |
68 | |
69 base::allocator::AllocHookFunc AllocatorHookDisabler::alloc_hook_ = nullptr; | |
70 base::allocator::FreeHookFunc AllocatorHookDisabler::free_hook_ = nullptr; | |
71 | |
72 // Disables hooks before calling new. | |
73 void* InternalAlloc(size_t size) { | |
74 AllocatorHookDisabler disabler; | |
75 return new char[size]; | |
76 } | |
77 | |
78 // Disables hooks before calling delete. | |
79 void InternalFree(void* ptr, size_t /* size */) { | |
80 AllocatorHookDisabler disabler; | |
81 delete[] reinterpret_cast<char*>(ptr); | |
82 } | |
83 | |
84 #if defined(OS_CHROMEOS) | |
85 // For storing the address range of the Chrome binary in memory. | |
86 struct MappingInfo { | |
87 uintptr_t addr; | |
88 size_t size; | |
89 }; | |
90 | |
91 // Callback for dl_iterate_phdr() to find the Chrome binary mapping. | |
92 int IterateLoadedObjects(struct dl_phdr_info* shared_object, | |
93 size_t /* size */, | |
94 void* data) { | |
95 for (int i = 0; i < shared_object->dlpi_phnum; i++) { | |
96 // Find the ELF segment header that contains the actual code of the Chrome | |
97 // binary. | |
98 const ElfW(Phdr)& segment_header = shared_object->dlpi_phdr[i]; | |
99 if (segment_header.p_type == SHT_PROGBITS && segment_header.p_offset == 0 && | |
100 data) { | |
101 MappingInfo* mapping = static_cast<MappingInfo*>(data); | |
102 | |
103 // Make sure the fields in the ELF header and MappingInfo have the | |
104 // same size. | |
105 static_assert(sizeof(mapping->addr) == sizeof(shared_object->dlpi_addr), | |
106 "Integer size mismatch between MappingInfo::addr and " | |
107 "dl_phdr_info::dlpi_addr."); | |
108 static_assert(sizeof(mapping->size) == sizeof(segment_header.p_offset), | |
109 "Integer size mismatch between MappingInfo::size and " | |
110 "ElfW(Phdr)::p_memsz."); | |
111 | |
112 mapping->addr = shared_object->dlpi_addr + segment_header.p_offset; | |
113 mapping->size = segment_header.p_memsz; | |
114 return 1; | |
115 } | |
116 } | |
117 return 0; | |
118 } | |
119 #endif // defined(OS_CHROMEOS) | |
120 | |
121 // Convert a pointer to a hash value. Returns only the upper eight bits. | |
122 inline uint64_t PointerToHash(const void* ptr) { | |
123 // The input data is the pointer address, not the location in memory pointed | |
124 // to by the pointer. | |
125 // The multiplier is taken from Farmhash code: | |
126 // https://github.com/google/farmhash/blob/master/src/farmhash.cc | |
127 const uint64_t kMultiplier = 0x9ddfea08eb382d69ULL; | |
128 return reinterpret_cast<uint64_t>(ptr) * kMultiplier; | |
129 } | |
130 | |
131 // A singleton instance of LeakDetector, used by the hook functions, which are | |
132 // static members of LeakDetector. | |
133 LeakDetector* g_instance = nullptr; | |
134 | |
135 // For atomic access to |g_instance| -- the pointer, not the pointed-to object. | |
Primiano Tucci (use gerrit)
2016/03/03 17:51:30
You say "not the pointed-to object" but in reality
| |
136 base::Lock* g_instance_ptr_lock = nullptr; | |
Primiano Tucci (use gerrit)
2016/03/03 16:23:44
This lock as is is not really solving any problem.
Simon Que
2016/03/04 01:59:18
What we really need here is a one-shot rwlock. I'm
Primiano Tucci (use gerrit)
2016/03/04 06:57:35
Is not that I am adverse. As usually in chrome the
| |
137 | |
138 } // namespace | |
139 | |
11 LeakDetector::LeakReport::LeakReport() {} | 140 LeakDetector::LeakReport::LeakReport() {} |
12 | 141 |
13 LeakDetector::LeakReport::~LeakReport() {} | 142 LeakDetector::LeakReport::~LeakReport() {} |
14 | 143 |
15 LeakDetector::LeakDetector(float sampling_rate, | 144 LeakDetector::LeakDetector(float sampling_rate, |
16 size_t max_call_stack_unwind_depth, | 145 size_t max_call_stack_unwind_depth, |
17 uint64_t analysis_interval_bytes, | 146 uint64_t analysis_interval_bytes, |
18 uint32_t size_suspicion_threshold, | 147 uint32_t size_suspicion_threshold, |
19 uint32_t call_stack_suspicion_threshold) | 148 uint32_t call_stack_suspicion_threshold) |
20 : weak_factory_(this) { | 149 : total_alloc_size_(0), |
Primiano Tucci (use gerrit)
2016/03/03 16:23:44
Can you reset binary_mapping_addr_ and binary_ma
Simon Que
2016/03/04 01:59:18
Done.
| |
21 // TODO(sque): Connect this class to LeakDetectorImpl and base::allocator. | 150 last_analysis_alloc_size_(0), |
151 analysis_interval_bytes_(analysis_interval_bytes), | |
152 max_call_stack_unwind_depth_(max_call_stack_unwind_depth), | |
153 sampling_factor_(std::min(sampling_rate, 1.0f) * UINT64_MAX), | |
154 weak_factory_(this) { | |
155 DCHECK(thread_checker_.CalledOnValidThread()); | |
156 CHECK(!g_instance) << "Cannot instantiate multiple instances of this class."; | |
Primiano Tucci (use gerrit)
2016/03/03 16:23:44
don't you need the g_instance_ptr_lock to make thi
Simon Que
2016/03/04 01:59:18
The DCHECK above should ensure that the ctor is al
Primiano Tucci (use gerrit)
2016/03/04 06:57:35
Acknowledged.
| |
157 | |
158 #if defined(OS_CHROMEOS) | |
159 // Locate the Chrome binary mapping info. | |
160 MappingInfo mapping; | |
161 dl_iterate_phdr(IterateLoadedObjects, &mapping); | |
162 binary_mapping_addr_ = mapping.addr; | |
163 binary_mapping_size_ = mapping.size; | |
164 #endif // defined(OS_CHROMEOS) | |
165 | |
166 // InternalAlloc() and InternalFree() are not thread-safe, but neither is | |
167 // CustomAllocator. CustomAllocator should always be accessed under lock. | |
168 // There is no need to further lock InternalAlloc()/-Free(). | |
169 CustomAllocator::Initialize(&InternalAlloc, &InternalFree); | |
170 impl_.reset(new LeakDetectorImpl(mapping.addr, mapping.size, | |
171 size_suspicion_threshold, | |
172 call_stack_suspicion_threshold)); | |
173 | |
174 g_instance_ptr_lock = new base::Lock; | |
175 g_instance = this; | |
176 | |
177 AllocatorHookDisabler::alloc_hook_ = &AllocHook; | |
178 AllocatorHookDisabler::free_hook_ = &FreeHook; | |
179 // Register allocator hook functions. | |
180 base::allocator::SetHooks(&AllocHook, &FreeHook); | |
22 } | 181 } |
23 | 182 |
24 LeakDetector::~LeakDetector() {} | 183 LeakDetector::~LeakDetector() { |
184 DCHECK(thread_checker_.CalledOnValidThread()); | |
185 | |
186 // Unregister allocator hook functions. | |
187 base::allocator::SetHooks(nullptr, nullptr); | |
188 AllocatorHookDisabler::alloc_hook_ = nullptr; | |
189 AllocatorHookDisabler::free_hook_ = nullptr; | |
190 | |
191 delete g_instance_ptr_lock; | |
192 g_instance_ptr_lock = nullptr; | |
193 | |
194 impl_.reset(); | |
195 if (!CustomAllocator::Shutdown()) { | |
196 LOG(ERROR) << "Memory leak in leak detector, allocated objects remain."; | |
197 } | |
198 } | |
25 | 199 |
26 void LeakDetector::AddObserver(Observer* observer) { | 200 void LeakDetector::AddObserver(Observer* observer) { |
27 DCHECK(thread_checker_.CalledOnValidThread()); | 201 DCHECK(thread_checker_.CalledOnValidThread()); |
28 observers_.AddObserver(observer); | 202 observers_.AddObserver(observer); |
29 } | 203 } |
30 | 204 |
31 void LeakDetector::RemoveObserver(Observer* observer) { | 205 void LeakDetector::RemoveObserver(Observer* observer) { |
32 DCHECK(thread_checker_.CalledOnValidThread()); | 206 DCHECK(thread_checker_.CalledOnValidThread()); |
33 observers_.RemoveObserver(observer); | 207 observers_.RemoveObserver(observer); |
34 } | 208 } |
35 | 209 |
210 // static | |
211 void LeakDetector::AllocHook(const void* ptr, size_t size) { | |
212 if (!ptr) | |
213 return; | |
214 | |
215 base::AutoLock lock(*g_instance_ptr_lock); | |
Primiano Tucci (use gerrit)
2016/03/03 16:23:44
At this point you already have to take a lock ever
| |
216 | |
217 if (!g_instance || !g_instance->ShouldSample(ptr)) | |
218 return; | |
219 | |
220 g_instance->total_alloc_size_ += size; | |
221 | |
222 // Must be modified under lock as it uses the shared resources of | |
223 // CustomAllocator. | |
224 InternalVector<void*> stack; | |
225 int depth = 0; | |
226 if (g_instance->impl_->ShouldGetStackTraceForSize(size)) { | |
227 stack.resize(g_instance->max_call_stack_unwind_depth_); | |
228 depth = base::allocator::GetCallStack(stack.data(), stack.size()); | |
229 } | |
230 | |
231 g_instance->RecordAlloc(ptr, size, depth, stack.data()); | |
232 } | |
233 | |
234 // static | |
235 void LeakDetector::FreeHook(const void* ptr) { | |
236 if (!ptr) | |
237 return; | |
238 | |
239 base::AutoLock lock(*g_instance_ptr_lock); | |
240 | |
241 if (!g_instance || !g_instance->ShouldSample(ptr)) | |
242 return; | |
243 | |
244 g_instance->impl_->RecordFree(ptr); | |
245 } | |
246 | |
247 inline bool LeakDetector::ShouldSample(const void* ptr) const { | |
248 return PointerToHash(ptr) < sampling_factor_; | |
249 } | |
250 | |
251 void LeakDetector::RecordAlloc(const void* ptr, | |
252 size_t size, | |
253 int depth, | |
254 void** call_stack) { | |
255 // This should be modified only with a lock because it uses the shared | |
Primiano Tucci (use gerrit)
2016/03/03 17:51:30
You can make this comment more explicit by adding
| |
256 // resources in CustomAllocator. | |
257 InternalVector<InternalLeakReport> leak_reports; | |
258 | |
259 impl_->RecordAlloc(ptr, size, depth, call_stack); | |
260 | |
261 // Check for leaks after |analysis_interval_bytes_| bytes have been | |
262 // allocated since the last time that was done. Should be called with a lock | |
263 // since it: | |
264 // - Modifies the global variable |g_last_analysis_alloc_size|. | |
265 // - Updates internals of |*impl|. | |
266 // - Possibly generates a vector of LeakReports using CustomAllocator. | |
Primiano Tucci (use gerrit)
2016/03/03 17:51:30
Suggestion (maybe for a future CL): probably would
Simon Que
2016/03/04 01:59:18
Added a TODO.
| |
267 if (total_alloc_size_ > | |
268 last_analysis_alloc_size_ + analysis_interval_bytes_) { | |
269 // Try to maintain regular intervals of size |analysis_interval_bytes_|. | |
270 last_analysis_alloc_size_ = | |
271 total_alloc_size_ - total_alloc_size_ % analysis_interval_bytes_; | |
272 impl_->TestForLeaks(&leak_reports); | |
273 } | |
274 | |
275 std::vector<LeakReport> leak_reports_for_observers; | |
Primiano Tucci (use gerrit)
2016/03/03 17:51:30
isn't std::vector cause mallocs and going to re-en
Simon Que
2016/03/04 01:59:18
Yes. Recursive calls to the allocator are actually
Primiano Tucci (use gerrit)
2016/03/04 06:57:35
How can you say that re-entrant calls to RecordAll
| |
276 leak_reports_for_observers.reserve(leak_reports.size()); | |
277 for (const InternalLeakReport& report : leak_reports) { | |
278 leak_reports_for_observers.push_back(LeakReport()); | |
279 | |
280 LeakReport* new_report = &leak_reports_for_observers.back(); | |
281 new_report->alloc_size_bytes = report.alloc_size_bytes(); | |
282 if (!report.call_stack().empty()) { | |
283 new_report->call_stack.resize(report.call_stack().size()); | |
284 memcpy(new_report->call_stack.data(), report.call_stack().data(), | |
285 report.call_stack().size() * sizeof(report.call_stack()[0])); | |
286 } | |
287 } | |
288 | |
289 // Pass leak reports to observers. The observers must be called outside of the | |
290 // locked area to avoid slowdown. | |
291 NotifyObservers(leak_reports_for_observers); | |
Primiano Tucci (use gerrit)
2016/03/03 17:51:30
NotifyObserver does all sort of thigs (PosTask, in
| |
292 } | |
293 | |
36 void LeakDetector::NotifyObservers(const std::vector<LeakReport>& reports) { | 294 void LeakDetector::NotifyObservers(const std::vector<LeakReport>& reports) { |
295 if (reports.empty()) | |
296 return; | |
297 | |
37 if (!content::BrowserThread::CurrentlyOn(content::BrowserThread::UI)) { | 298 if (!content::BrowserThread::CurrentlyOn(content::BrowserThread::UI)) { |
38 content::BrowserThread::PostTask( | 299 content::BrowserThread::PostTask( |
39 content::BrowserThread::UI, FROM_HERE, | 300 content::BrowserThread::UI, FROM_HERE, |
40 base::Bind(&LeakDetector::NotifyObservers, weak_factory_.GetWeakPtr(), | 301 base::Bind(&LeakDetector::NotifyObservers, weak_factory_.GetWeakPtr(), |
41 reports)); | 302 reports)); |
42 return; | 303 return; |
43 } | 304 } |
44 | 305 |
45 for (const LeakReport& report : reports) { | 306 for (const LeakReport& report : reports) { |
46 FOR_EACH_OBSERVER(Observer, observers_, OnLeakFound(report)); | 307 FOR_EACH_OBSERVER(Observer, observers_, OnLeakFound(report)); |
47 } | 308 } |
48 } | 309 } |
49 | 310 |
50 } // namespace metrics | 311 } // namespace metrics |
OLD | NEW |