OLD | NEW |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | 1 // Copyright 2016 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/metrics/leak_detector/leak_detector.h" | 5 #include "components/metrics/leak_detector/leak_detector.h" |
6 | 6 |
| 7 #include <link.h> |
| 8 #include <stdint.h> |
| 9 #include <unistd.h> |
| 10 |
| 11 #include "base/allocator/allocator_extension.h" |
| 12 #include "base/logging.h" |
7 #include "content/public/browser/browser_thread.h" | 13 #include "content/public/browser/browser_thread.h" |
8 | 14 |
9 namespace metrics { | 15 namespace metrics { |
10 | 16 |
| 17 using LeakReport = LeakDetector::LeakReport; |
| 18 using leak_detector::CustomAllocator; |
| 19 using leak_detector::LeakDetectorImpl; |
| 20 using InternalLeakReport = LeakDetectorImpl::LeakReport; |
| 21 template <typename T> |
| 22 using InternalVector = LeakDetectorImpl::InternalVector<T>; |
| 23 |
| 24 namespace { |
| 25 |
| 26 // The sampling of allocs and frees is handled internally using integer values, |
| 27 // not floating point values. This is the integer value that represents a 100% |
| 28 // sampling rate. See |g_sampling_factor|. |
| 29 const int kMaxSamplingFactor = 256; |
| 30 |
| 31 // Create an object of this class to store the current new/delete hooks and |
| 32 // then remove them. When this object goes out of scope, it will automatically |
| 33 // restore the original hooks if they existed. |
| 34 // |
| 35 // If multiple instances of this class are created and there are hooks |
| 36 // registered, only the first object will save and restore the hook functions. |
| 37 // The others will have no effect. However, all concurrent instances MUST be |
| 38 // destroyed in reverse relative to their instantiation. |
| 39 // |
| 40 // This is useful in situations such as: |
| 41 // - Calling alloc or free from within a hook function, which would otherwise |
| 42 // result in recursive hook calls. |
| 43 // - Calling LOG() when |g_lock| is being held, as LOG will call malloc, which |
| 44 // calls NewHook(), which then attempts to acquire the lock, resulting in it |
| 45 // being blocked. |
| 46 class MallocHookDisabler { |
| 47 public: |
| 48 MallocHookDisabler() |
| 49 : new_hook_(base::allocator::SetSingleAllocHook(nullptr)), |
| 50 delete_hook_(base::allocator::SetSingleFreeHook(nullptr)) {} |
| 51 |
| 52 ~MallocHookDisabler() { |
| 53 if (new_hook_) |
| 54 base::allocator::SetSingleAllocHook(new_hook_); |
| 55 if (delete_hook_) |
| 56 base::allocator::SetSingleFreeHook(delete_hook_); |
| 57 } |
| 58 |
| 59 private: |
| 60 base::allocator::AllocHookFunc new_hook_; |
| 61 base::allocator::FreeHookFunc delete_hook_; |
| 62 |
| 63 DISALLOW_COPY_AND_ASSIGN(MallocHookDisabler); |
| 64 }; |
| 65 |
| 66 // For storing the address range of the Chrome binary in memory. |
| 67 struct MappingInfo { |
| 68 uintptr_t addr; |
| 69 size_t size; |
| 70 }; |
| 71 |
| 72 // Disables hooks before calling new. |
| 73 void* InternalAlloc(size_t size) { |
| 74 MallocHookDisabler disabler; |
| 75 return new char[size]; |
| 76 } |
| 77 |
| 78 // Disables hooks before calling delete. |
| 79 void InternalFree(void* ptr, size_t /* size */) { |
| 80 MallocHookDisabler disabler; |
| 81 delete[] reinterpret_cast<char*>(ptr); |
| 82 } |
| 83 |
| 84 // Callback for dl_iterate_phdr() to find the Chrome binary mapping. |
| 85 int IterateLoadedObjects(struct dl_phdr_info* shared_object, |
| 86 size_t /* size */, |
| 87 void* data) { |
| 88 for (int i = 0; i < shared_object->dlpi_phnum; i++) { |
| 89 // Find the ELF segment header that contains the actual code of the Chrome |
| 90 // binary. |
| 91 const ElfW(Phdr)& segment_header = shared_object->dlpi_phdr[i]; |
| 92 if (segment_header.p_type == SHT_PROGBITS && segment_header.p_offset == 0 && |
| 93 data) { |
| 94 MappingInfo* mapping = static_cast<MappingInfo*>(data); |
| 95 |
| 96 // Make sure the fields in the ELF header and MappingInfo have the |
| 97 // same size. |
| 98 static_assert(sizeof(mapping->addr) == sizeof(shared_object->dlpi_addr), |
| 99 "Integer size mismatch between MappingInfo::addr and " |
| 100 "dl_phdr_info::dlpi_addr."); |
| 101 static_assert(sizeof(mapping->size) == sizeof(segment_header.p_offset), |
| 102 "Integer size mismatch between MappingInfo::size and " |
| 103 "ElfW(Phdr)::p_memsz."); |
| 104 |
| 105 mapping->addr = shared_object->dlpi_addr + segment_header.p_offset; |
| 106 mapping->size = segment_header.p_memsz; |
| 107 return 1; |
| 108 } |
| 109 } |
| 110 return 0; |
| 111 } |
| 112 |
| 113 // Convert a pointer to a hash value. Returns only the upper eight bits. |
| 114 inline uint64_t PointerToHash(const void* ptr) { |
| 115 // The input data is the pointer address, not the location in memory pointed |
| 116 // to by the pointer. |
| 117 // The multiplier is taken from Farmhash code: |
| 118 // https://github.com/google/farmhash/blob/master/src/farmhash.cc |
| 119 const uint64_t kMultiplier = 0x9ddfea08eb382d69ULL; |
| 120 uint64_t value = reinterpret_cast<uint64_t>(ptr) * kMultiplier; |
| 121 return value >> 56; |
| 122 } |
| 123 |
| 124 LeakDetector* g_instance = nullptr; |
| 125 |
| 126 } // namespace |
| 127 |
11 LeakDetector::LeakReport::LeakReport() {} | 128 LeakDetector::LeakReport::LeakReport() {} |
12 | 129 |
13 LeakDetector::LeakReport::~LeakReport() {} | 130 LeakDetector::LeakReport::~LeakReport() {} |
14 | 131 |
15 LeakDetector::LeakDetector(float sampling_rate, | 132 LeakDetector::LeakDetector(float sampling_rate, |
16 size_t max_call_stack_unwind_depth, | 133 size_t max_call_stack_unwind_depth, |
17 uint64_t analysis_interval_bytes, | 134 uint64_t analysis_interval_bytes, |
18 uint32_t size_suspicion_threshold, | 135 uint32_t size_suspicion_threshold, |
19 uint32_t call_stack_suspicion_threshold) | 136 uint32_t call_stack_suspicion_threshold) |
20 : weak_factory_(this) { | 137 : total_alloc_size_(0), |
21 // TODO(sque): Connect this class to LeakDetectorImpl and base::allocator. | 138 last_analysis_alloc_size_(0), |
| 139 analysis_interval_bytes_(analysis_interval_bytes), |
| 140 max_call_stack_unwind_depth_(max_call_stack_unwind_depth), |
| 141 sampling_factor_(sampling_rate * kMaxSamplingFactor), |
| 142 weak_factory_(this) { |
| 143 DCHECK(thread_checker_.CalledOnValidThread()); |
| 144 CHECK(!g_instance) << "Cannot instantiate multiple instances of this class."; |
| 145 |
| 146 // Locate the Chrome binary mapping info. |
| 147 MappingInfo mapping; |
| 148 dl_iterate_phdr(IterateLoadedObjects, &mapping); |
| 149 binary_mapping_addr_ = mapping.addr; |
| 150 binary_mapping_size_ = mapping.size; |
| 151 |
| 152 CustomAllocator::Initialize(&InternalAlloc, &InternalFree); |
| 153 impl_.reset(new LeakDetectorImpl(mapping.addr, mapping.size, |
| 154 size_suspicion_threshold, |
| 155 call_stack_suspicion_threshold)); |
| 156 |
| 157 // Register allocator hook functions. |
| 158 if (base::allocator::SetSingleAllocHook(&AllocHook) != nullptr || |
| 159 base::allocator::SetSingleFreeHook(&FreeHook) != nullptr) { |
| 160 MallocHookDisabler disabler; |
| 161 LOG(FATAL) << "Overwrote existing callback."; |
| 162 } else if (base::allocator::GetSingleAllocHook() != &AllocHook || |
| 163 base::allocator::GetSingleFreeHook() != &FreeHook) { |
| 164 MallocHookDisabler disabler; |
| 165 LOG(FATAL) << "Failed to register free callback."; |
| 166 return; |
| 167 } |
| 168 |
| 169 g_instance = this; |
22 } | 170 } |
23 | 171 |
24 LeakDetector::~LeakDetector() {} | 172 LeakDetector::~LeakDetector() { |
| 173 DCHECK(thread_checker_.CalledOnValidThread()); |
| 174 g_instance = nullptr; |
| 175 |
| 176 // Unregister allocator hook functions. |
| 177 if (base::allocator::GetSingleAllocHook() == &AllocHook) |
| 178 base::allocator::SetSingleAllocHook(nullptr); |
| 179 if (base::allocator::GetSingleFreeHook() == &FreeHook) |
| 180 base::allocator::SetSingleFreeHook(nullptr); |
| 181 |
| 182 impl_.reset(); |
| 183 if (!CustomAllocator::Shutdown()) { |
| 184 LOG(ERROR) << "Memory leak in leak detector, allocated objects remain."; |
| 185 } |
| 186 } |
25 | 187 |
26 void LeakDetector::AddObserver(Observer* observer) { | 188 void LeakDetector::AddObserver(Observer* observer) { |
27 DCHECK(thread_checker_.CalledOnValidThread()); | 189 DCHECK(thread_checker_.CalledOnValidThread()); |
28 observers_.AddObserver(observer); | 190 observers_.AddObserver(observer); |
29 } | 191 } |
30 | 192 |
31 void LeakDetector::RemoveObserver(Observer* observer) { | 193 void LeakDetector::RemoveObserver(Observer* observer) { |
32 DCHECK(thread_checker_.CalledOnValidThread()); | 194 DCHECK(thread_checker_.CalledOnValidThread()); |
33 observers_.RemoveObserver(observer); | 195 observers_.RemoveObserver(observer); |
34 } | 196 } |
35 | 197 |
| 198 // static |
| 199 void LeakDetector::AllocHook(const void* ptr, size_t size) { |
| 200 CHECK(g_instance); |
| 201 |
| 202 { |
| 203 base::AutoLock lock(g_instance->lock_); |
| 204 g_instance->total_alloc_size_ += size; |
| 205 |
| 206 if (!ptr || !g_instance->ShouldSample(ptr)) |
| 207 return; |
| 208 } |
| 209 |
| 210 auto& impl = g_instance->impl_; |
| 211 |
| 212 // Must be modified under lock as it uses the shared resources of |
| 213 // CustomAllocator. |
| 214 InternalVector<void*> stack; |
| 215 |
| 216 // Take the stack trace outside the critical section. |
| 217 // |LeakDetectorImpl::ShouldGetStackTraceForSize()| is const; there is no |
| 218 // need for a lock. |
| 219 int depth = 0; |
| 220 if (impl->ShouldGetStackTraceForSize(size)) { |
| 221 { |
| 222 base::AutoLock lock(g_instance->lock_); |
| 223 stack.resize(g_instance->max_call_stack_unwind_depth_); |
| 224 } |
| 225 depth = base::allocator::GetCallStack(stack.data(), stack.size(), 0); |
| 226 } |
| 227 |
| 228 g_instance->RecordAlloc(ptr, size, depth, stack.data()); |
| 229 |
| 230 { |
| 231 base::AutoLock lock(g_instance->lock_); |
| 232 |
| 233 // InternalVectors must be cleaned up under lock, so we can't wait for them |
| 234 // to go out of scope. |
| 235 // std::vector::clear() still leaves reserved memory inside that will be |
| 236 // cleaned up by the destructor when it goes out of scope. And |
| 237 // vector::shrink_to_fit() is not allowed to be used yet. Instead swap |
| 238 // out the contents to a local container that is cleaned up when it goes |
| 239 // out of scope. |
| 240 InternalVector<void*> dummy_stack; |
| 241 stack.swap(dummy_stack); |
| 242 } |
| 243 } |
| 244 |
| 245 // static |
| 246 void LeakDetector::FreeHook(const void* ptr) { |
| 247 if (!ptr || !g_instance->ShouldSample(ptr)) |
| 248 return; |
| 249 |
| 250 base::AutoLock lock(g_instance->lock_); |
| 251 g_instance->impl_->RecordFree(ptr); |
| 252 } |
| 253 |
| 254 inline bool LeakDetector::ShouldSample(const void* ptr) const { |
| 255 return PointerToHash(ptr) < static_cast<uint64_t>(sampling_factor_); |
| 256 } |
| 257 |
| 258 void LeakDetector::RecordAlloc(const void* ptr, size_t size, int depth, |
| 259 void** call_stack) { |
| 260 // This should be modified only with a lock because it uses the shared |
| 261 // resources in CustomAllocator. |
| 262 InternalVector<InternalLeakReport> leak_reports; |
| 263 |
| 264 { |
| 265 base::AutoLock lock(lock_); |
| 266 impl_->RecordAlloc(ptr, size, depth, call_stack); |
| 267 |
| 268 // Check for leaks after |analysis_interval_bytes_| bytes have been |
| 269 // allocated since the last time that was done. Should be called with a lock |
| 270 // since it: |
| 271 // - Modifies the global variable |g_last_analysis_alloc_size|. |
| 272 // - Updates internals of |*impl|. |
| 273 // - Possibly generates a vector of LeakReports using CustomAllocator. |
| 274 if (total_alloc_size_ > |
| 275 last_analysis_alloc_size_ + analysis_interval_bytes_) { |
| 276 // Try to maintain regular intervals of size |analysis_interval_bytes_|. |
| 277 last_analysis_alloc_size_ = |
| 278 total_alloc_size_ - total_alloc_size_ % analysis_interval_bytes_; |
| 279 impl_->TestForLeaks(&leak_reports); |
| 280 } |
| 281 } |
| 282 |
| 283 std::vector<LeakReport> leak_reports_for_observers; |
| 284 leak_reports_for_observers.reserve(leak_reports.size()); |
| 285 for (const InternalLeakReport& report : leak_reports) { |
| 286 leak_reports_for_observers.push_back(LeakReport()); |
| 287 |
| 288 LeakReport* new_report = &leak_reports_for_observers.back(); |
| 289 new_report->alloc_size_bytes = report.alloc_size_bytes(); |
| 290 if (!report.call_stack().empty()) { |
| 291 new_report->call_stack.resize(report.call_stack().size()); |
| 292 memcpy(new_report->call_stack.data(), report.call_stack().data(), |
| 293 report.call_stack().size() * sizeof(report.call_stack()[0])); |
| 294 } |
| 295 } |
| 296 |
| 297 { |
| 298 base::AutoLock lock(lock_); |
| 299 InternalVector<InternalLeakReport> dummy_leak_reports; |
| 300 leak_reports.swap(dummy_leak_reports); |
| 301 } |
| 302 |
| 303 // Pass leak reports to observers. The observers must be called outside of the |
| 304 // locked area to avoid slowdown. |
| 305 NotifyObservers(leak_reports_for_observers); |
| 306 } |
| 307 |
36 void LeakDetector::NotifyObservers(const std::vector<LeakReport>& reports) { | 308 void LeakDetector::NotifyObservers(const std::vector<LeakReport>& reports) { |
| 309 if (reports.empty()) |
| 310 return; |
| 311 |
37 if (!content::BrowserThread::CurrentlyOn(content::BrowserThread::UI)) { | 312 if (!content::BrowserThread::CurrentlyOn(content::BrowserThread::UI)) { |
38 content::BrowserThread::PostTask( | 313 content::BrowserThread::PostTask( |
39 content::BrowserThread::UI, FROM_HERE, | 314 content::BrowserThread::UI, FROM_HERE, |
40 base::Bind(&LeakDetector::NotifyObservers, weak_factory_.GetWeakPtr(), | 315 base::Bind(&LeakDetector::NotifyObservers, weak_factory_.GetWeakPtr(), |
41 reports)); | 316 reports)); |
42 return; | 317 return; |
43 } | 318 } |
44 | 319 |
45 for (const LeakReport& report : reports) { | 320 for (const LeakReport& report : reports) { |
46 FOR_EACH_OBSERVER(Observer, observers_, OnLeakFound(report)); | 321 FOR_EACH_OBSERVER(Observer, observers_, OnLeakFound(report)); |
47 } | 322 } |
48 } | 323 } |
49 | 324 |
50 } // namespace metrics | 325 } // namespace metrics |
OLD | NEW |