| Index: third_party/tcmalloc/chromium/src/leak_detector_impl.cc
|
| diff --git a/third_party/tcmalloc/chromium/src/leak_detector_impl.cc b/third_party/tcmalloc/chromium/src/leak_detector_impl.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..21d0d126426778b5e446d376924fbcb8468cd104
|
| --- /dev/null
|
| +++ b/third_party/tcmalloc/chromium/src/leak_detector_impl.cc
|
| @@ -0,0 +1,301 @@
|
| +// Copyright (c) 2015 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +// ---
|
| +// Author: Simon Que
|
| +
|
| +#include "leak_detector_impl.h"
|
| +
|
| +#include <cstddef>
|
| +#include <cstring>
|
| +
|
| +#include <algorithm>
|
| +
|
| +#include "base/basictypes.h"
|
| +#include "base/commandlineflags.h"
|
| +#include "base/logging.h"
|
| +#include "call_stack_table.h"
|
| +#include "farmhash.h"
|
| +#include "ranked_list.h"
|
| +
|
| +// A size must be suspected this many times to be reported as a leak suspect.
|
| +DECLARE_int32(size_suspicion_threshold);
|
| +
|
| +// If set, dumps all leak analysis data, not just suspected leak reports.
|
| +DECLARE_bool(dump_leak_analysis);
|
| +
|
| +namespace leak_detector {
|
| +
|
| +namespace {
|
| +
|
| +// Look for leaks in the the top N entries in each tier, where N is this value.
|
| +const int kRankedListSize = 16;
|
| +
|
| +// Used for printing size values in LeakAnalyzer.
|
| +class SizeStringPrint : public LeakAnalyzer<uint32>::StringPrint {
|
| + public:
|
| + // Gets the string representation of a value.
|
| + virtual const char* ValueToString(const uint32& value, bool spacing_on) {
|
| + snprintf(buffer_, sizeof(buffer_), spacing_on ? "%10u" : "%u", value);
|
| + return buffer_;
|
| + }
|
| +
|
| + // Gets the word that describes the value type.
|
| + virtual const char* ValueTypeName(bool is_plural) {
|
| + return is_plural ? "sizes" : "size";
|
| + }
|
| +} size_string_print;
|
| +
|
| +// Prints the input string buffer using RAW_LOG, pre-fixing each line with the
|
| +// process id.
|
| +void PrintWithPidOnEachLine(char* buf) {
|
| + char *ptr = strchr(buf, '\n');
|
| + do {
|
| + // Break up the string.
|
| + if (ptr)
|
| + *ptr = '\0';
|
| + // Print out the former part.
|
| + RAW_LOG(0, "%d: %s", getpid(), buf);
|
| + // Re-point |buf| to the latter part.
|
| + if (ptr)
|
| + buf = ptr + 1;
|
| + } while (ptr = strchr(buf, '\n'));
|
| +}
|
| +
|
| +// Use CRC-32 to hash a call stack.
|
| +inline uint64 CallStackToHash(int depth, const void* const stack[]) {
|
| + return util::Hash(reinterpret_cast<const char*>(stack),
|
| + sizeof(*stack) * depth);
|
| +}
|
| +
|
| +} // namespace
|
| +
|
| +LeakDetectorImpl::LeakDetectorImpl(Allocator alloc, DeAllocator dealloc,
|
| + uint64 mapping_addr, uint64 mapping_size)
|
| + : alloc_(alloc),
|
| + dealloc_(dealloc),
|
| + num_stack_tables_(0),
|
| + address_map_(alloc_, dealloc_),
|
| + size_leak_analyzer_(kRankedListSize, FLAGS_size_suspicion_threshold,
|
| + alloc_, dealloc_, &size_string_print),
|
| + mapping_addr_(mapping_addr),
|
| + mapping_size_(mapping_size) {
|
| + // Clear the hash table for buckets.
|
| + memset(bucket_table_, 0, sizeof(bucket_table_));
|
| +
|
| + // Initialize.
|
| + num_buckets_ = 0;
|
| + memset(entries_, 0, sizeof(entries_));
|
| + memset(&stats_, 0, sizeof(stats_));
|
| + memset(&call_stack_stats_, 0, sizeof(call_stack_stats_));
|
| +}
|
| +
|
| +LeakDetectorImpl::~LeakDetectorImpl() {
|
| + // Free the hash table.
|
| + for (int i = 0; i < kHashTableSize; i++) {
|
| + for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) {
|
| + Bucket* bucket = curr;
|
| + curr = curr->next;
|
| + dealloc_(bucket->stack);
|
| + dealloc_(bucket);
|
| + }
|
| + }
|
| +
|
| + // Free any call stack tables.
|
| + for (int i = 0; i < kNumSizeEntries; ++i) {
|
| + CallStackTable* table = entries_[i].stack_table;
|
| + if (!table)
|
| + continue;
|
| + table->~CallStackTable();
|
| + dealloc_(table);
|
| + }
|
| +}
|
| +
|
| +bool LeakDetectorImpl::ShouldGetStackTraceForSize(size_t size) const {
|
| + return entries_[SizeToIndex(size)].stack_table != NULL;
|
| +}
|
| +
|
| +void LeakDetectorImpl::RecordAlloc(
|
| + const void* ptr, size_t size,
|
| + int stack_depth, const void* const call_stack[]) {
|
| + AllocInfo alloc_info;
|
| + alloc_info.bytes = size;
|
| +
|
| + stats_.alloc_size += alloc_info.bytes;
|
| + stats_.allocs++;
|
| +
|
| + AllocSizeEntry* entry = GetEntryForSize(size);
|
| + ++entry->num_allocs;
|
| +
|
| + if (stack_depth > 0) {
|
| + Bucket* bucket = GetBucket(stack_depth, call_stack);
|
| + bucket->allocs++;
|
| + bucket->alloc_size += size;
|
| + alloc_info.bucket = bucket;
|
| +
|
| + call_stack_stats_.alloc_size += alloc_info.bytes;
|
| + call_stack_stats_.allocs++;
|
| +
|
| + if (entry->stack_table)
|
| + entry->stack_table->Add(bucket);
|
| + }
|
| +
|
| + address_map_.Insert(ptr, alloc_info);
|
| +}
|
| +
|
| +void LeakDetectorImpl::RecordFree(const void* ptr) {
|
| + // Look up entry.
|
| + AllocInfo alloc_info;
|
| + if (!address_map_.FindAndRemove(ptr, &alloc_info))
|
| + return;
|
| +
|
| + AllocSizeEntry* entry = GetEntryForSize(alloc_info.bytes);
|
| + ++entry->num_frees;
|
| +
|
| + Bucket* bucket = alloc_info.bucket;
|
| + if (bucket) {
|
| + bucket->frees++;
|
| + bucket->free_size += alloc_info.bytes;
|
| +
|
| + call_stack_stats_.frees++;
|
| + call_stack_stats_.free_size += alloc_info.bytes;
|
| +
|
| + if (entry->stack_table)
|
| + entry->stack_table->Remove(bucket);
|
| + }
|
| + stats_.frees++;
|
| + stats_.free_size += alloc_info.bytes;
|
| +}
|
| +
|
| +void LeakDetectorImpl::TestForLeaks() {
|
| + // Add net alloc counts for each size to a ranked list.
|
| + RankedList<uint32> size_ranked_list(kRankedListSize, alloc_, dealloc_);
|
| + for (int i = 0; i < kNumSizeEntries; ++i) {
|
| + const AllocSizeEntry& entry = entries_[i];
|
| + size_ranked_list.Add(IndexToSize(i), entry.num_allocs - entry.num_frees);
|
| + }
|
| + size_leak_analyzer_.AddSample(size_ranked_list);
|
| +
|
| + // Dump out the top entries.
|
| + if (FLAGS_dump_leak_analysis) {
|
| + char buf[0x4000];
|
| + buf[0] = '\0';
|
| + size_leak_analyzer_.Dump(buf, sizeof(buf));
|
| + PrintWithPidOnEachLine(buf);
|
| + }
|
| +
|
| + // Get suspected leaks by size.
|
| + const uint32* suspected_leaks_by_size =
|
| + size_leak_analyzer_.suspected_leaks();
|
| + for (int j = 0; j < size_leak_analyzer_.num_suspected_leaks(); ++j) {
|
| + uint32 size = suspected_leaks_by_size[j];
|
| + AllocSizeEntry* entry = GetEntryForSize(size);
|
| + if (entry->stack_table)
|
| + continue;
|
| + RAW_VLOG(0, "%d: Adding stack table for size %u", getpid(), size);
|
| + entry->stack_table =
|
| + new(alloc_(sizeof(CallStackTable))) CallStackTable(alloc_, dealloc_);
|
| + ++num_stack_tables_;
|
| + }
|
| +
|
| + // Check for leaks in each CallStackTable. It makes sense to this before
|
| + // checking the size allocations, because that could potentially create new
|
| + // CallStackTable. However, the overhead to check a new CallStackTable is
|
| + // small since this function is run very rarely. So handle the leak checks of
|
| + // Tier 2 here.
|
| + for (int i = 0; i < arraysize(entries_); ++i) {
|
| + const AllocSizeEntry& entry = entries_[i];
|
| + CallStackTable* stack_table = entry.stack_table;
|
| + if (!stack_table)
|
| + continue;
|
| +
|
| + if (FLAGS_dump_leak_analysis) {
|
| + // Dump table info.
|
| + char buf[0x4000];
|
| + RAW_VLOG(0, "%d: Stack table for size %d", getpid(), IndexToSize(i));
|
| + buf[0] = '\0';
|
| + stack_table->Dump(buf, sizeof(buf));
|
| + PrintWithPidOnEachLine(buf);
|
| + }
|
| +
|
| + // Get suspected leaks by call stack.
|
| + stack_table->TestForLeaks();
|
| + const LeakAnalyzer<const Bucket*>& leak_analyzer =
|
| + stack_table->leak_analyzer();
|
| + for (int j = 0; j < leak_analyzer.num_suspected_leaks(); ++j) {
|
| + const Bucket& bucket = *leak_analyzer.suspected_leaks()[j];
|
| + RAW_VLOG(0, "%d: Suspected call stack for size %u: %p",
|
| + getpid(), IndexToSize(i), &bucket);
|
| + for (int k = 0; k < bucket.depth; ++k) {
|
| + RAW_VLOG(0, "%d: %p", getpid(), GetOffset(bucket.stack[k]));
|
| + }
|
| + }
|
| + }
|
| +}
|
| +
|
| +void LeakDetectorImpl::DumpStats() const {
|
| + RAW_VLOG(0, "%d: Alloc size: %lu\n", getpid(), stats_.alloc_size);
|
| + RAW_VLOG(0, "%d: Free size: %lu\n", getpid(), stats_.free_size);
|
| + RAW_VLOG(0, "%d: Net alloc size: %lu\n", getpid(),
|
| + stats_.alloc_size - stats_.free_size);
|
| + RAW_VLOG(0, "%d: Number of stack tables: %d\n", getpid(), num_stack_tables_);
|
| + if (stats_.alloc_size) {
|
| + RAW_VLOG(0, "%d: %% of calls with stack trace: %.2f%%\n", getpid(),
|
| + static_cast<double>(call_stack_stats_.alloc_size * 100) /
|
| + stats_.alloc_size);
|
| + }
|
| + RAW_VLOG(0, "%d: Number of call stack buckets: %d\n", getpid(), num_buckets_);
|
| +}
|
| +
|
| +// static
|
| +inline int LeakDetectorImpl::SizeToIndex(size_t size) {
|
| + int result = static_cast<int>(size /= sizeof(uint32));
|
| + if (result < kNumSizeEntries)
|
| + return result;
|
| + return 0;
|
| +}
|
| +
|
| +// static
|
| +inline size_t LeakDetectorImpl::IndexToSize(int index) {
|
| + return sizeof(uint32) * index;
|
| +}
|
| +
|
| +LeakDetectorImpl::Bucket* LeakDetectorImpl::GetBucket(int depth,
|
| + const void* const key[]) {
|
| + // Lookup stack trace in table.
|
| + uint64_t hash = CallStackToHash(depth, key);
|
| + unsigned int index = hash % kHashTableSize;
|
| + for (Bucket* bucket = bucket_table_[index]; bucket; bucket = bucket->next) {
|
| + if (bucket->hash == hash &&
|
| + bucket->depth == depth &&
|
| + std::equal(key, key + depth, bucket->stack)) {
|
| + return bucket;
|
| + }
|
| + }
|
| +
|
| + // Create new bucket.
|
| + const size_t key_size = sizeof(key[0]) * depth;
|
| + const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size));
|
| + std::copy(key, key + depth, kcopy);
|
| + Bucket* bucket = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket)));
|
| + memset(bucket, 0, sizeof(*bucket));
|
| + bucket->hash = hash;
|
| + bucket->depth = depth;
|
| + bucket->stack = kcopy;
|
| +
|
| + // Insert the bucket into the hash table.
|
| + bucket->next = bucket_table_[index];
|
| + bucket_table_[index] = bucket;
|
| + num_buckets_++;
|
| + return bucket;
|
| +}
|
| +
|
| +uint64 LeakDetectorImpl::GetOffset(const void *ptr) const {
|
| + uint64 ptr_value = reinterpret_cast<uint64>(ptr);
|
| + if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_)
|
| + return ptr_value - mapping_addr_;
|
| + return ptr_value;
|
| +}
|
| +
|
| +} // namespace leak_detector
|
|
|