Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 #include "call_stack_table.h" | |
| 2 | |
| 3 #include <cstring> | |
| 4 | |
| 5 #include "base/commandlineflags.h" | |
| 6 #include "heap-profile-stats.h" | |
| 7 | |
| 8 // A call stack must be suspected this many times to be reported as a leak | |
| 9 // suspect. | |
| 10 DECLARE_int32(call_stack_size_suspicion_threshold); | |
| 11 | |
| 12 namespace leak_detector { | |
| 13 | |
| 14 namespace { | |
| 15 | |
| 16 using Bucket = CallStackTable::Bucket; | |
| 17 | |
| 18 // Get the top |kRankedListSize| entries. | |
| 19 const int kRankedListSize = 16; | |
| 20 | |
| 21 // Initial number of hash table buckets. | |
| 22 const int kInitialHashTableSize = 1999; | |
| 23 | |
| 24 // Used for printing call stacks in LeakAnalyzer. | |
| 25 class StringPrint : public LeakAnalyzer<const Bucket*>::StringPrint { | |
| 26 public: | |
| 27 // Gets the string representation of a value. | |
| 28 virtual const char* ValueToString(const Bucket* const& ptr, bool spacing_on) { | |
| 29 snprintf(buffer_, sizeof(buffer_), spacing_on ? "%16p" : "%p", ptr); | |
| 30 return buffer_; | |
| 31 } | |
| 32 | |
| 33 // Gets the word that describes the value type. | |
| 34 virtual const char* ValueTypeName(bool is_plural) { | |
| 35 return is_plural ? "buckets" : "bucket"; | |
| 36 } | |
| 37 } string_print; | |
| 38 | |
| 39 } // namespace | |
| 40 | |
| 41 CallStackTable::CallStackTable(Allocator alloc, DeAllocator dealloc) | |
| 42 : num_allocs_(0), | |
| 43 num_frees_(0), | |
| 44 alloc_(alloc), | |
| 45 dealloc_(dealloc), | |
| 46 entry_map_(kInitialHashTableSize), | |
| 47 leak_analyzer_(kRankedListSize, FLAGS_call_stack_size_suspicion_threshold, | |
| 48 alloc, dealloc, &string_print) { | |
| 49 } | |
| 50 | |
| 51 CallStackTable::~CallStackTable() {} | |
| 52 | |
| 53 void CallStackTable::Add(const Bucket* ptr) { | |
| 54 auto iter = entry_map_.find(ptr); | |
| 55 Entry* entry = NULL; | |
| 56 if (iter == entry_map_.end()) { | |
| 57 entry = &entry_map_[ptr]; | |
| 58 } else { | |
| 59 entry = &iter->second; | |
| 60 } | |
| 61 | |
| 62 ++entry->num_allocs; | |
| 63 ++num_allocs_; | |
| 64 } | |
| 65 | |
| 66 void CallStackTable::Remove(const Bucket* ptr) { | |
| 67 auto iter = entry_map_.find(ptr); | |
| 68 if (iter == entry_map_.end()) | |
| 69 return; | |
| 70 Entry* entry = &iter->second; | |
| 71 ++entry->num_frees; | |
| 72 ++num_frees_; | |
| 73 | |
| 74 // If there are no net allocs, delete the entry. | |
| 75 if (entry->num_allocs == entry->num_frees) | |
| 76 entry_map_.erase(iter); | |
| 77 } | |
| 78 | |
| 79 int CallStackTable::Dump(char* buffer, const int buffer_size) const { | |
| 80 int size_left = buffer_size; | |
| 81 | |
| 82 if (entry_map_.empty()) | |
| 83 return size_left; | |
| 84 | |
| 85 int attempted_size = | |
| 86 snprintf(buffer, size_left, | |
| 87 "Total number of allocations: %u\n" | |
| 88 "Total number of frees: %u\n" | |
| 89 "Net number of allocations: %u\n" | |
| 90 "Total number of distinct stack traces: %u\n", | |
| 91 num_allocs_, num_frees_, num_allocs_ - num_frees_, | |
| 92 entry_map_.size()); | |
| 93 size_left -= attempted_size; | |
| 94 buffer += attempted_size; | |
| 95 | |
| 96 if (size_left > 0) { | |
| 97 int attempted_size = leak_analyzer_.Dump(buffer, size_left); | |
| 98 size_left -= attempted_size; | |
| 99 buffer += attempted_size; | |
| 100 } | |
| 101 | |
| 102 if (size_left > 0) | |
| 103 return buffer_size - size_left; | |
| 104 | |
| 105 return buffer_size; | |
| 106 } | |
| 107 | |
| 108 void CallStackTable::TestForLeaks() { | |
| 109 // Add all entries to the ranked list. | |
| 110 RankedList<const Bucket*> ranked_list(kRankedListSize, alloc_, dealloc_); | |
| 111 | |
| 112 for (const auto& entry_pair : entry_map_) { | |
| 113 const Entry& entry = entry_pair.second; | |
| 114 if (entry.num_allocs != entry.num_frees) | |
| 115 ranked_list.Add(entry_pair.first, entry.num_allocs - entry.num_frees); | |
| 116 } | |
| 117 leak_analyzer_.AddSample(ranked_list); | |
| 118 } | |
| 119 | |
| 120 // static | |
| 121 void CallStackTable::Alloc::Init(Allocator alloc, DeAllocator dealloc) { | |
| 122 alloc_ = alloc; | |
| 123 dealloc_ = dealloc; | |
| 124 } | |
| 125 | |
| 126 // static | |
| 127 void* CallStackTable::Alloc::Allocate(size_t size) { | |
| 128 if (alloc_) | |
| 129 return alloc_(size); | |
| 130 return NULL; | |
| 131 } | |
| 132 | |
| 133 // static | |
| 134 void CallStackTable::Alloc::Free(void* ptr, size_t size) { | |
| 135 if (free) | |
|
gpike
2015/07/01 21:38:21
why free?
Simon Que
2015/07/10 00:09:06
Meant dealloc_.
| |
| 136 dealloc_(ptr); | |
| 137 } | |
| 138 | |
| 139 // static | |
| 140 CallStackTable::Allocator CallStackTable::Alloc::alloc_ = NULL; | |
| 141 // static | |
| 142 CallStackTable::DeAllocator CallStackTable::Alloc::dealloc_ = NULL; | |
| 143 | |
| 144 | |
| 145 } // namespace leak_detector | |
| OLD | NEW |