Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(11)

Unified Diff: third_party/tcmalloc/chromium/src/leak_detector_impl.h

Issue 986503002: components/metrics: Add runtime memory leak detector (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Remove from gn build; keep it simple; can add it in later Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/tcmalloc/chromium/src/leak_detector_impl.h
diff --git a/third_party/tcmalloc/chromium/src/leak_detector_impl.h b/third_party/tcmalloc/chromium/src/leak_detector_impl.h
new file mode 100644
index 0000000000000000000000000000000000000000..40e50b6efd48ae3528306ab59d4155019f4f66cc
--- /dev/null
+++ b/third_party/tcmalloc/chromium/src/leak_detector_impl.h
@@ -0,0 +1,158 @@
+// Copyright (c) 2015 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+// ---
+// Author: Simon Que
+
+#ifndef _LEAK_DETECTOR_IMPL_H_
+#define _LEAK_DETECTOR_IMPL_H_
+
+#include <unordered_set>
+
+#include "addressmap-inl.h"
+#include "base/basictypes.h"
+#include "base/custom_allocator.h"
+#include "heap-profile-stats.h"
+#include "leak_analyzer.h"
+
+namespace leak_detector {
+
+class CallStackTable;
+
+//----------------------------------------------------------------------
+// Class that contains the actual leak detection mechanism.
+//----------------------------------------------------------------------
+class LeakDetectorImpl {
+ public:
+ // Used for tracking allocation stats.
+ using Stats = HeapProfileStats;
+
+ LeakDetectorImpl(uint64 mapping_addr, uint64 mapping_size);
+ ~LeakDetectorImpl();
+
+ // Indicates whether the given allocation size has an associated call stack
+ // table, and thus requires a stack unwind.
+ bool ShouldGetStackTraceForSize(size_t size) const;
+
+ // Record allocs and frees.
+ void RecordAlloc(const void* ptr, size_t size,
+ int stack_depth, const void* const call_stack[]);
+ void RecordFree(const void* ptr);
+
+ const Stats& stats() const {
+ return stats_;
+ }
+
+ // Run check for possible leaks based on the current profiling data.
+ void TestForLeaks();
+
+ // Dump current profiling statistics to log.
+ void DumpStats() const;
+
+ private:
+ // Used for tracking unique call stacks.
+ using CallStack = HeapProfileBucket;
+
+ // A record of allocations for a particular size.
+ struct AllocSizeEntry {
+ // Number of allocations and frees for this size.
+ uint32 num_allocs;
+ uint32 num_frees;
+
+ // A stack table, if this size is being profiled for stack as well.
+ CallStackTable* stack_table;
+ };
+
+ // Info stored in the address map
+ struct AllocInfo {
+ AllocInfo() : call_stack(NULL) {}
+
+ // Number of bytes in this allocation.
+ size_t bytes;
+
+ // Points to a unique call stack.
+ CallStack* call_stack;
+ };
+
+ // Used for recording size and call stack info for each allocation.
+ using AllocationMap = AddressMap<AllocInfo>;
+
+ // Hash table for tracking unique call stacks.
+ // TODO: Using std::unordered_set makes |CallStack::next| redundant. Consider
+ // creating a new struct or removing |next|.
+ using TableEntryAllocator = STL_Allocator<const CallStack*, CustomAllocator>;
+
+ struct CallStackCompare {
+ bool operator() (const CallStack* c1, const CallStack* c2) const {
+ return c1->depth == c2->depth &&
+ std::equal(c1->stack, c1->stack + c1->depth, c2->stack);
+ }
+ };
+
+ struct CallStackHash {
+ size_t operator() (const CallStack* call_stack) const {
+ return CallStackToHash(call_stack);
+ }
+ };
+
+ // Number of entries in the alloc size table. As sizes are aligned to 32-bits
+ // the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger
+ // sizes are ignored. This value is chosen high enough that such large sizes
+ // are rare if not nonexistent.
+ static const int kNumSizeEntries = 2048;
+
+ // Converts an allocation size to/from the array index used for |entries_|.
+ static int SizeToIndex(size_t size);
+ static size_t IndexToSize(int index);
+
+ // Accessor for the entry table.
+ inline AllocSizeEntry* GetEntryForSize(size_t size) {
+ return &entries_[SizeToIndex(size)];
+ }
+ inline const AllocSizeEntry& GetConstEntryForSize(size_t size) const {
+ return entries_[SizeToIndex(size)];
+ }
+
+ // Returns a CallStack object for a given call stack. Each unique call stack
+ // has its own CallStack object. If the given call stack has already been
+ // created by a previous call to this function, return a pointer to that same
+ // call stack object.
+ CallStack* GetCallStack(int depth, const void* const stack[]);
+
+ // Returns the offset of |ptr| within the current binary. If it is not in the
+ // current binary, just return |ptr| as an integer.
+ uint64 GetOffset(const void *ptr) const;
+
+ // Hash function for stack tables.
+ static size_t CallStackToHash(const CallStack* call_stack);
+
+ std::unordered_set<CallStack*,
+ CallStackHash,
+ CallStackCompare,
+ TableEntryAllocator> call_stacks_;
+
+ // For tracking allocation stats.
+ Stats stats_;
+ Stats call_stack_stats_;
+ int num_stack_tables_;
+
+ // Stores all individual recorded allocations.
+ AllocationMap address_map_;
+
+ // Used to analyze potential leak patterns in the allocation sizes.
+ LeakAnalyzer<uint32> size_leak_analyzer_;
+
+ // Allocation stats for each size.
+ AllocSizeEntry entries_[kNumSizeEntries];
+
+ // Address mapping info of the current binary.
+ uint64 mapping_addr_;
+ uint64 mapping_size_;
+
+ DISALLOW_COPY_AND_ASSIGN(LeakDetectorImpl);
+};
+
+} // namespace leak_detector
+
+#endif // _LEAK_DETECTOR_IMPL_H_

Powered by Google App Engine
This is Rietveld 408576698