| Index: components/metrics/leak_detector/leak_detector.cc
|
| diff --git a/components/metrics/leak_detector/leak_detector.cc b/components/metrics/leak_detector/leak_detector.cc
|
| index 0fbd868ccfa2df810e08ea523bd7f82b093f07c6..1d440e9cb2df43b868b6f31c30ee8f52bcc91a6c 100644
|
| --- a/components/metrics/leak_detector/leak_detector.cc
|
| +++ b/components/metrics/leak_detector/leak_detector.cc
|
| @@ -4,10 +4,139 @@
|
|
|
| #include "components/metrics/leak_detector/leak_detector.h"
|
|
|
| +#include <stdint.h>
|
| +#include <unistd.h>
|
| +
|
| +#include <algorithm> // for std::min
|
| +
|
| +#include "base/allocator/allocator_extension.h"
|
| +#include "base/logging.h"
|
| +#include "base/synchronization/lock.h"
|
| +#include "components/metrics/leak_detector/custom_allocator.h"
|
| #include "content/public/browser/browser_thread.h"
|
|
|
| +#if defined(OS_CHROMEOS)
|
| +#include <link.h> // for dl_iterate_phdr
|
| +#else
|
| +#error "Getting binary mapping info is not supported on this platform."
|
| +#endif // defined(OS_CHROMEOS)
|
| +
|
| namespace metrics {
|
|
|
| +using LeakReport = LeakDetector::LeakReport;
|
| +using leak_detector::CustomAllocator;
|
| +using leak_detector::LeakDetectorImpl;
|
| +using InternalLeakReport = LeakDetectorImpl::LeakReport;
|
| +template <typename T>
|
| +using InternalVector = LeakDetectorImpl::InternalVector<T>;
|
| +
|
| +namespace {
|
| +
|
| +// Create an object of this class to store the current new/delete hooks and
|
| +// then remove them. When this object goes out of scope, it will automatically
|
| +// restore the original hooks if they existed.
|
| +//
|
| +// If multiple instances of this class are created and there are hooks
|
| +// registered, only the first object will save and restore the hook functions.
|
| +// The others will have no effect. However, all concurrent instances MUST be
|
| +// destroyed in reverse relative to their instantiation.
|
| +//
|
| +// This is useful in situations such as:
|
| +// - Calling alloc or free from within a hook function, which would otherwise
|
| +// result in recursive hook calls.
|
| +// - Calling LOG() when |g_lock| is being held, as LOG will call malloc, which
|
| +// calls NewHook(), which then attempts to acquire the lock, resulting in it
|
| +// being blocked.
|
| +//
|
| +// This class is not thread-safe and should be called under lock.
|
| +class AllocatorHookDisabler {
|
| + public:
|
| + AllocatorHookDisabler() {
|
| + base::allocator::SetHooks(nullptr, nullptr);
|
| + }
|
| +
|
| + ~AllocatorHookDisabler() {
|
| + base::allocator::SetHooks(alloc_hook_, free_hook_);
|
| + }
|
| +
|
| + static base::allocator::AllocHookFunc alloc_hook_;
|
| + static base::allocator::FreeHookFunc free_hook_;
|
| +
|
| + private:
|
| + DISALLOW_COPY_AND_ASSIGN(AllocatorHookDisabler);
|
| +};
|
| +
|
| +base::allocator::AllocHookFunc AllocatorHookDisabler::alloc_hook_ = nullptr;
|
| +base::allocator::FreeHookFunc AllocatorHookDisabler::free_hook_ = nullptr;
|
| +
|
| +// Disables hooks before calling new.
|
| +void* InternalAlloc(size_t size) {
|
| + AllocatorHookDisabler disabler;
|
| + return new char[size];
|
| +}
|
| +
|
| +// Disables hooks before calling delete.
|
| +void InternalFree(void* ptr, size_t /* size */) {
|
| + AllocatorHookDisabler disabler;
|
| + delete[] reinterpret_cast<char*>(ptr);
|
| +}
|
| +
|
| +#if defined(OS_CHROMEOS)
|
| +// For storing the address range of the Chrome binary in memory.
|
| +struct MappingInfo {
|
| + uintptr_t addr;
|
| + size_t size;
|
| +};
|
| +
|
| +// Callback for dl_iterate_phdr() to find the Chrome binary mapping.
|
| +int IterateLoadedObjects(struct dl_phdr_info* shared_object,
|
| + size_t /* size */,
|
| + void* data) {
|
| + for (int i = 0; i < shared_object->dlpi_phnum; i++) {
|
| + // Find the ELF segment header that contains the actual code of the Chrome
|
| + // binary.
|
| + const ElfW(Phdr)& segment_header = shared_object->dlpi_phdr[i];
|
| + if (segment_header.p_type == SHT_PROGBITS && segment_header.p_offset == 0 &&
|
| + data) {
|
| + MappingInfo* mapping = static_cast<MappingInfo*>(data);
|
| +
|
| + // Make sure the fields in the ELF header and MappingInfo have the
|
| + // same size.
|
| + static_assert(sizeof(mapping->addr) == sizeof(shared_object->dlpi_addr),
|
| + "Integer size mismatch between MappingInfo::addr and "
|
| + "dl_phdr_info::dlpi_addr.");
|
| + static_assert(sizeof(mapping->size) == sizeof(segment_header.p_offset),
|
| + "Integer size mismatch between MappingInfo::size and "
|
| + "ElfW(Phdr)::p_memsz.");
|
| +
|
| + mapping->addr = shared_object->dlpi_addr + segment_header.p_offset;
|
| + mapping->size = segment_header.p_memsz;
|
| + return 1;
|
| + }
|
| + }
|
| + return 0;
|
| +}
|
| +#endif // defined(OS_CHROMEOS)
|
| +
|
| +// Convert a pointer to a hash value. Returns only the upper eight bits.
|
| +inline uint64_t PointerToHash(const void* ptr) {
|
| + // The input data is the pointer address, not the location in memory pointed
|
| + // to by the pointer.
|
| + // The multiplier is taken from Farmhash code:
|
| + // https://github.com/google/farmhash/blob/master/src/farmhash.cc
|
| + const uint64_t kMultiplier = 0x9ddfea08eb382d69ULL;
|
| + return reinterpret_cast<uint64_t>(ptr) * kMultiplier;
|
| +}
|
| +
|
| +// A singleton instance of LeakDetector, used by the hook functions, which are
|
| +// static members of LeakDetector.
|
| +LeakDetector* g_instance = nullptr;
|
| +
|
| +// For atomic access to |g_instance| -- the pointer, not the pointed-to object.
|
| +base::Lock* g_instance_ptr_lock = nullptr;
|
| +
|
| +} // namespace
|
| +
|
| LeakDetector::LeakReport::LeakReport() {}
|
|
|
| LeakDetector::LeakReport::~LeakReport() {}
|
| @@ -17,11 +146,56 @@ LeakDetector::LeakDetector(float sampling_rate,
|
| uint64_t analysis_interval_bytes,
|
| uint32_t size_suspicion_threshold,
|
| uint32_t call_stack_suspicion_threshold)
|
| - : weak_factory_(this) {
|
| - // TODO(sque): Connect this class to LeakDetectorImpl and base::allocator.
|
| + : total_alloc_size_(0),
|
| + last_analysis_alloc_size_(0),
|
| + analysis_interval_bytes_(analysis_interval_bytes),
|
| + max_call_stack_unwind_depth_(max_call_stack_unwind_depth),
|
| + sampling_factor_(std::min(sampling_rate, 1.0f) * UINT64_MAX),
|
| + weak_factory_(this) {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| + CHECK(!g_instance) << "Cannot instantiate multiple instances of this class.";
|
| +
|
| +#if defined(OS_CHROMEOS)
|
| + // Locate the Chrome binary mapping info.
|
| + MappingInfo mapping;
|
| + dl_iterate_phdr(IterateLoadedObjects, &mapping);
|
| + binary_mapping_addr_ = mapping.addr;
|
| + binary_mapping_size_ = mapping.size;
|
| +#endif // defined(OS_CHROMEOS)
|
| +
|
| + // InternalAlloc() and InternalFree() are not thread-safe, but neither is
|
| + // CustomAllocator. CustomAllocator should always be accessed under lock.
|
| + // There is no need to further lock InternalAlloc()/-Free().
|
| + CustomAllocator::Initialize(&InternalAlloc, &InternalFree);
|
| + impl_.reset(new LeakDetectorImpl(mapping.addr, mapping.size,
|
| + size_suspicion_threshold,
|
| + call_stack_suspicion_threshold));
|
| +
|
| + g_instance_ptr_lock = new base::Lock;
|
| + g_instance = this;
|
| +
|
| + AllocatorHookDisabler::alloc_hook_ = &AllocHook;
|
| + AllocatorHookDisabler::free_hook_ = &FreeHook;
|
| + // Register allocator hook functions.
|
| + base::allocator::SetHooks(&AllocHook, &FreeHook);
|
| }
|
|
|
| -LeakDetector::~LeakDetector() {}
|
| +LeakDetector::~LeakDetector() {
|
| + DCHECK(thread_checker_.CalledOnValidThread());
|
| +
|
| + // Unregister allocator hook functions.
|
| + base::allocator::SetHooks(nullptr, nullptr);
|
| + AllocatorHookDisabler::alloc_hook_ = nullptr;
|
| + AllocatorHookDisabler::free_hook_ = nullptr;
|
| +
|
| + delete g_instance_ptr_lock;
|
| + g_instance_ptr_lock = nullptr;
|
| +
|
| + impl_.reset();
|
| + if (!CustomAllocator::Shutdown()) {
|
| + LOG(ERROR) << "Memory leak in leak detector, allocated objects remain.";
|
| + }
|
| +}
|
|
|
| void LeakDetector::AddObserver(Observer* observer) {
|
| DCHECK(thread_checker_.CalledOnValidThread());
|
| @@ -33,7 +207,94 @@ void LeakDetector::RemoveObserver(Observer* observer) {
|
| observers_.RemoveObserver(observer);
|
| }
|
|
|
| +// static
|
| +void LeakDetector::AllocHook(const void* ptr, size_t size) {
|
| + if (!ptr)
|
| + return;
|
| +
|
| + base::AutoLock lock(*g_instance_ptr_lock);
|
| +
|
| + if (!g_instance || !g_instance->ShouldSample(ptr))
|
| + return;
|
| +
|
| + g_instance->total_alloc_size_ += size;
|
| +
|
| + // Must be modified under lock as it uses the shared resources of
|
| + // CustomAllocator.
|
| + InternalVector<void*> stack;
|
| + int depth = 0;
|
| + if (g_instance->impl_->ShouldGetStackTraceForSize(size)) {
|
| + stack.resize(g_instance->max_call_stack_unwind_depth_);
|
| + depth = base::allocator::GetCallStack(stack.data(), stack.size(), 0);
|
| + }
|
| +
|
| + g_instance->RecordAlloc(ptr, size, depth, stack.data());
|
| +}
|
| +
|
| +// static
|
| +void LeakDetector::FreeHook(const void* ptr) {
|
| + if (!ptr)
|
| + return;
|
| +
|
| + base::AutoLock lock(*g_instance_ptr_lock);
|
| +
|
| + if (!g_instance || !g_instance->ShouldSample(ptr))
|
| + return;
|
| +
|
| + g_instance->impl_->RecordFree(ptr);
|
| +}
|
| +
|
| +inline bool LeakDetector::ShouldSample(const void* ptr) const {
|
| + return PointerToHash(ptr) < sampling_factor_;
|
| +}
|
| +
|
| +void LeakDetector::RecordAlloc(const void* ptr,
|
| + size_t size,
|
| + int depth,
|
| + void** call_stack) {
|
| + // This should be modified only with a lock because it uses the shared
|
| + // resources in CustomAllocator.
|
| + InternalVector<InternalLeakReport> leak_reports;
|
| +
|
| + impl_->RecordAlloc(ptr, size, depth, call_stack);
|
| +
|
| + // Check for leaks after |analysis_interval_bytes_| bytes have been
|
| + // allocated since the last time that was done. Should be called with a lock
|
| + // since it:
|
| + // - Modifies the global variable |g_last_analysis_alloc_size|.
|
| + // - Updates internals of |*impl|.
|
| + // - Possibly generates a vector of LeakReports using CustomAllocator.
|
| + if (total_alloc_size_ >
|
| + last_analysis_alloc_size_ + analysis_interval_bytes_) {
|
| + // Try to maintain regular intervals of size |analysis_interval_bytes_|.
|
| + last_analysis_alloc_size_ =
|
| + total_alloc_size_ - total_alloc_size_ % analysis_interval_bytes_;
|
| + impl_->TestForLeaks(&leak_reports);
|
| + }
|
| +
|
| + std::vector<LeakReport> leak_reports_for_observers;
|
| + leak_reports_for_observers.reserve(leak_reports.size());
|
| + for (const InternalLeakReport& report : leak_reports) {
|
| + leak_reports_for_observers.push_back(LeakReport());
|
| +
|
| + LeakReport* new_report = &leak_reports_for_observers.back();
|
| + new_report->alloc_size_bytes = report.alloc_size_bytes();
|
| + if (!report.call_stack().empty()) {
|
| + new_report->call_stack.resize(report.call_stack().size());
|
| + memcpy(new_report->call_stack.data(), report.call_stack().data(),
|
| + report.call_stack().size() * sizeof(report.call_stack()[0]));
|
| + }
|
| + }
|
| +
|
| + // Pass leak reports to observers. The observers must be called outside of the
|
| + // locked area to avoid slowdown.
|
| + NotifyObservers(leak_reports_for_observers);
|
| +}
|
| +
|
| void LeakDetector::NotifyObservers(const std::vector<LeakReport>& reports) {
|
| + if (reports.empty())
|
| + return;
|
| +
|
| if (!content::BrowserThread::CurrentlyOn(content::BrowserThread::UI)) {
|
| content::BrowserThread::PostTask(
|
| content::BrowserThread::UI, FROM_HERE,
|
|
|