Index: components/metrics/leak_detector/leak_detector_impl.cc |
diff --git a/components/metrics/leak_detector/leak_detector_impl.cc b/components/metrics/leak_detector/leak_detector_impl.cc |
new file mode 100644 |
index 0000000000000000000000000000000000000000..4e6ec0698f1e54c83666098901a582a4535393d5 |
--- /dev/null |
+++ b/components/metrics/leak_detector/leak_detector_impl.cc |
@@ -0,0 +1,280 @@ |
+// Copyright 2015 The Chromium Authors. All rights reserved. |
+// Use of this source code is governed by a BSD-style license that can be |
+// found in the LICENSE file. |
+ |
+#include "leak_detector_impl.h" |
+ |
+#include <inttypes.h> |
+#include <stddef.h> |
+#include <stdint.h> |
wtc
2015/08/21 20:02:12
Nit: <stdint.h> can be omitted since we already in
Simon Que
2015/08/22 22:41:51
Done.
|
+ |
+#include <algorithm> |
+#include <new> |
+ |
+#include "base/hash.h" |
+#include "components/metrics/leak_detector/call_stack_table.h" |
+#include "components/metrics/leak_detector/ranked_list.h" |
+ |
+namespace leak_detector { |
+ |
+namespace { |
+ |
+// Look for leaks in the the top N entries in each tier, where N is this value. |
+const int kRankedListSize = 16; |
+ |
+// Initial hash table size for |LeakDetectorImpl::address_map_|. |
+const int kAddressMapNumBuckets = 100003; |
+ |
+// Number of entries in the alloc size table. As sizes are aligned to 32-bits |
+// the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger |
+// sizes are ignored. This value is chosen high enough that such large sizes |
+// are rare if not nonexistent. |
+const int kNumSizeEntries = 2048; |
+ |
+using ValueType = LeakDetectorValueType; |
+ |
+// Prints the input string buffer using RAW_LOG, pre-fixing each line with the |
+// process id. |
+void PrintWithPidOnEachLine(char* buf) { |
+ char *ptr = strchr(buf, '\n'); |
+ do { |
+ // Break up the string. |
+ if (ptr) |
+ *ptr = '\0'; |
+ // Print out the former part. |
+ char line[1024]; |
+ snprintf(line, sizeof(line), "%d: %s\n", getpid(), buf); |
+ RAW_LOG(ERROR, line); |
+ |
+ // Re-point |buf| to the latter part. |
+ if (ptr) |
+ buf = ptr + 1; |
+ ptr = strchr(buf, '\n'); |
+ } while (ptr); |
+} |
+ |
+// Functions to convert an allocation size to/from the array index used for |
+// |LeakDetectorImpl::size_entries_|. |
+int SizeToIndex(size_t size){ |
+ int result = static_cast<int>(size /= sizeof(uint32_t)); |
+ if (result < kNumSizeEntries) |
+ return result; |
+ return 0; |
+} |
+ |
+size_t IndexToSize(int index){ |
+ return sizeof(uint32_t) * index; |
+} |
+ |
+} // namespace |
+ |
+LeakDetectorImpl::LeakDetectorImpl(uintptr_t mapping_addr, |
+ size_t mapping_size, |
+ int size_suspicion_threshold, |
+ int call_stack_suspicion_threshold, |
+ bool verbose) |
+ : num_stack_tables_(0), |
+ address_map_(kAddressMapNumBuckets), |
+ size_leak_analyzer_(kRankedListSize, size_suspicion_threshold), |
+ size_entries_(kNumSizeEntries, {0}), |
+ mapping_addr_(mapping_addr), |
+ mapping_size_(mapping_size), |
+ call_stack_suspicion_threshold_(call_stack_suspicion_threshold), |
+ verbose_(verbose) { |
+} |
+ |
+LeakDetectorImpl::~LeakDetectorImpl() { |
+ for (CallStack* call_stack : call_stacks_) |
+ CustomAllocator::Free(call_stack, sizeof(CallStack)); |
+ |
+ // Free any call stack tables. |
+ for (AllocSizeEntry& entry : size_entries_) { |
+ CallStackTable* table = entry.stack_table; |
+ if (!table) |
+ continue; |
+ table->~CallStackTable(); |
+ CustomAllocator::Free(table, sizeof(CallStackTable)); |
+ } |
+} |
+ |
+bool LeakDetectorImpl::ShouldGetStackTraceForSize(size_t size) const { |
+ return size_entries_[SizeToIndex(size)].stack_table != nullptr; |
+} |
+ |
+void LeakDetectorImpl::RecordAlloc( |
+ const void* ptr, size_t size, |
+ int stack_depth, const void* const stack[]) { |
+ AllocInfo alloc_info; |
+ alloc_info.size = size; |
+ |
+ alloc_size_ += alloc_info.size; |
+ ++num_allocs_; |
+ |
+ AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)]; |
+ ++entry->num_allocs; |
+ |
+ if (stack_depth > 0) { |
+ CallStack* call_stack = GetCallStack(stack_depth, stack); |
+ alloc_info.call_stack = call_stack; |
+ |
+ ++num_allocs_with_call_stack_; |
+ |
+ if (entry->stack_table) |
+ entry->stack_table->Add(call_stack); |
+ } |
+ |
+ address_map_.insert(std::pair<const void*, AllocInfo>(ptr, alloc_info)); |
+} |
+ |
+void LeakDetectorImpl::RecordFree(const void* ptr) { |
+ // Look up entry. |
+ auto iter = address_map_.find(ptr); |
+ if (iter == address_map_.end()) |
+ return; |
+ |
+ const AllocInfo& alloc_info = iter->second; |
+ |
+ AllocSizeEntry* entry = &size_entries_[SizeToIndex(alloc_info.size)]; |
+ ++entry->num_frees; |
+ |
+ const CallStack* call_stack = alloc_info.call_stack; |
+ if (call_stack) { |
+ if (entry->stack_table) |
+ entry->stack_table->Remove(call_stack); |
+ } |
+ ++num_frees_; |
+ free_size_ += alloc_info.size; |
+ |
+ address_map_.erase(iter); |
+} |
+ |
+void LeakDetectorImpl::TestForLeaks() { |
+ // Add net alloc counts for each size to a ranked list. |
+ RankedList size_ranked_list(kRankedListSize); |
+ for (size_t i = 0; i < size_entries_.size(); ++i) { |
+ const AllocSizeEntry& entry = size_entries_[i]; |
+ ValueType size_value(IndexToSize(i)); |
+ size_ranked_list.Add(size_value, entry.num_allocs - entry.num_frees); |
+ } |
+ size_leak_analyzer_.AddSample(size_ranked_list); |
+ |
+ // Dump out the top entries. |
+ char buf[0x4000]; |
+ if (verbose_) { |
+ buf[0] = '\0'; |
+ size_leak_analyzer_.Dump(buf, sizeof(buf)); |
+ PrintWithPidOnEachLine(buf); |
+ } |
+ |
+ // Get suspected leaks by size. |
+ for (const ValueType& size_value : size_leak_analyzer_.suspected_leaks()) { |
+ uint32_t size = size_value.size(); |
+ AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)]; |
+ if (entry->stack_table) |
+ continue; |
+ snprintf(buf, sizeof(buf), "Adding stack table for size %u\n", size); |
+ PrintWithPidOnEachLine(buf); |
+ entry->stack_table = new(CustomAllocator::Allocate(sizeof(CallStackTable))) |
+ CallStackTable(call_stack_suspicion_threshold_); |
+ ++num_stack_tables_; |
+ } |
+ |
+ // Check for leaks in each CallStackTable. It makes sense to this before |
+ // checking the size allocations, because that could potentially create new |
+ // CallStackTable. However, the overhead to check a new CallStackTable is |
+ // small since this function is run very rarely. So handle the leak checks of |
+ // Tier 2 here. |
+ for (size_t i = 0; i < size_entries_.size(); ++i) { |
+ const AllocSizeEntry& entry = size_entries_[i]; |
+ CallStackTable* stack_table = entry.stack_table; |
+ if (!stack_table || stack_table->empty()) |
+ continue; |
+ |
+ if (verbose_) { |
+ // Dump table info. |
+ snprintf(buf, sizeof(buf), "Stack table for size %zu:\n", IndexToSize(i)); |
+ PrintWithPidOnEachLine(buf); |
+ |
+ buf[0] = '\0'; |
+ stack_table->Dump(buf, sizeof(buf)); |
+ PrintWithPidOnEachLine(buf); |
+ } |
+ |
+ // Get suspected leaks by call stack. |
+ stack_table->TestForLeaks(); |
+ const LeakAnalyzer& leak_analyzer = stack_table->leak_analyzer(); |
+ for (const ValueType& call_stack_value : leak_analyzer.suspected_leaks()) { |
+ const CallStack* call_stack = call_stack_value.call_stack(); |
+ int offset = snprintf(buf, sizeof(buf), |
+ "Suspected call stack for size %zu, %p:\n", |
+ IndexToSize(i), call_stack); |
+ for (size_t j = 0; j < call_stack->depth; ++j) { |
+ offset += snprintf(buf + offset, sizeof(buf) - offset, |
+ "\t%" PRIxPTR "\n", GetOffset(call_stack->stack[j])); |
+ } |
+ PrintWithPidOnEachLine(buf); |
+ } |
+ } |
+} |
+ |
+void LeakDetectorImpl::DumpStats() const { |
+ char buf[1024]; |
+ snprintf(buf, sizeof(buf), |
+ "Alloc size: %" PRIu64"\n" |
+ "Free size: %" PRIu64 "\n" |
+ "Net alloc size: %" PRIu64 "\n" |
+ "Number of stack tables: %u\n" |
+ "Percentage of allocs with stack traces: %.2f%%\n" |
+ "Number of call stack buckets: %zu\n", |
+ alloc_size_, free_size_, alloc_size_ - free_size_, num_stack_tables_, |
+ num_allocs_ ? 100.0f * num_allocs_with_call_stack_ / num_allocs_ : 0, |
+ call_stacks_.bucket_count()); |
+ PrintWithPidOnEachLine(buf); |
+} |
+ |
+size_t LeakDetectorImpl::AddressHash::operator() (const void* ptr) const { |
+ return base::Hash(reinterpret_cast<const char*>(&ptr), sizeof(ptr)); |
+} |
+ |
+size_t LeakDetectorImpl::CallStackHash::operator() ( |
+ const CallStack* call_stack) const { |
+ // Generate hash from call stack. |
+ return base::Hash(reinterpret_cast<const char*>(call_stack->stack), |
+ sizeof(*(call_stack->stack)) * call_stack->depth); |
+} |
+ |
+CallStack* LeakDetectorImpl::GetCallStack( |
+ int depth, const void* const stack[]) { |
+ // Temporarily create a call stack object for lookup in |call_stacks_|. |
+ CallStack temp; |
+ temp.depth = depth; |
+ temp.stack = const_cast<const void**>(stack); |
+ |
+ auto iter = call_stacks_.find(&temp); |
+ if (iter != call_stacks_.end()) |
+ return *iter; |
+ |
+ // Since |call_stacks_| stores CallStack pointers rather than actual objects, |
+ // create new call objects manually here. |
+ CallStack* new_call_stack = |
+ new(CustomAllocator::Allocate(sizeof(CallStack))) CallStack; |
+ memset(new_call_stack, 0, sizeof(*new_call_stack)); |
+ new_call_stack->depth = depth; |
+ new_call_stack->hash = call_stacks_.hash_function()(&temp); |
+ new_call_stack->stack = |
+ reinterpret_cast<const void**>( |
+ CustomAllocator::Allocate(sizeof(*stack) * depth)); |
+ std::copy(stack, stack + depth, new_call_stack->stack); |
+ |
+ call_stacks_.insert(new_call_stack); |
+ return new_call_stack; |
+} |
+ |
+uintptr_t LeakDetectorImpl::GetOffset(const void *ptr) const { |
+ uintptr_t ptr_value = reinterpret_cast<uintptr_t>(ptr); |
+ if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_) |
+ return ptr_value - mapping_addr_; |
+ return ptr_value; |
+} |
+ |
+} // namespace leak_detector |