OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 // --- |
| 6 // Author: Simon Que |
| 7 |
| 8 #include "leak_detector_impl.h" |
| 9 |
| 10 #include <cstddef> |
| 11 #include <cstring> |
| 12 |
| 13 #include <algorithm> |
| 14 |
| 15 #include "base/basictypes.h" |
| 16 #include "base/commandlineflags.h" |
| 17 #include "base/logging.h" |
| 18 #include "call_stack_table.h" |
| 19 #include "farmhash.h" |
| 20 #include "ranked_list.h" |
| 21 |
| 22 // A size must be suspected this many times to be reported as a leak suspect. |
| 23 DECLARE_int32(size_suspicion_threshold); |
| 24 |
| 25 // If set, dumps all leak analysis data, not just suspected leak reports. |
| 26 DECLARE_bool(dump_leak_analysis); |
| 27 |
| 28 namespace leak_detector { |
| 29 |
| 30 namespace { |
| 31 |
| 32 // Look for leaks in the the top N entries in each tier, where N is this value. |
| 33 const int kRankedListSize = 16; |
| 34 |
| 35 // Used for printing size values in LeakAnalyzer. |
| 36 class SizeStringPrint : public LeakAnalyzer<uint32>::StringPrint { |
| 37 public: |
| 38 // Gets the string representation of a value. |
| 39 virtual const char* ValueToString(const uint32& value, bool spacing_on) { |
| 40 snprintf(buffer_, sizeof(buffer_), spacing_on ? "%10u" : "%u", value); |
| 41 return buffer_; |
| 42 } |
| 43 |
| 44 // Gets the word that describes the value type. |
| 45 virtual const char* ValueTypeName(bool is_plural) { |
| 46 return is_plural ? "sizes" : "size"; |
| 47 } |
| 48 } size_string_print; |
| 49 |
| 50 // Prints the input string buffer using RAW_LOG, pre-fixing each line with the |
| 51 // process id. |
| 52 void PrintWithPidOnEachLine(char* buf) { |
| 53 char *ptr = strchr(buf, '\n'); |
| 54 do { |
| 55 // Break up the string. |
| 56 if (ptr) |
| 57 *ptr = '\0'; |
| 58 // Print out the former part. |
| 59 RAW_LOG(0, "%d: %s", getpid(), buf); |
| 60 // Re-point |buf| to the latter part. |
| 61 if (ptr) |
| 62 buf = ptr + 1; |
| 63 } while (ptr = strchr(buf, '\n')); |
| 64 } |
| 65 |
| 66 // Use CRC-32 to hash a call stack. |
| 67 inline uint64 CallStackToHash(int depth, const void* const stack[]) { |
| 68 return util::Hash(reinterpret_cast<const char*>(stack), |
| 69 sizeof(*stack) * depth); |
| 70 } |
| 71 |
| 72 } // namespace |
| 73 |
| 74 LeakDetectorImpl::LeakDetectorImpl(Allocator alloc, DeAllocator dealloc, |
| 75 uint64 mapping_addr, uint64 mapping_size) |
| 76 : alloc_(alloc), |
| 77 dealloc_(dealloc), |
| 78 num_stack_tables_(0), |
| 79 address_map_(alloc_, dealloc_), |
| 80 size_leak_analyzer_(kRankedListSize, FLAGS_size_suspicion_threshold, |
| 81 alloc_, dealloc_, &size_string_print), |
| 82 mapping_addr_(mapping_addr), |
| 83 mapping_size_(mapping_size) { |
| 84 // Clear the hash table for buckets. |
| 85 memset(bucket_table_, 0, sizeof(bucket_table_)); |
| 86 |
| 87 // Initialize. |
| 88 num_buckets_ = 0; |
| 89 memset(entries_, 0, sizeof(entries_)); |
| 90 memset(&stats_, 0, sizeof(stats_)); |
| 91 memset(&call_stack_stats_, 0, sizeof(call_stack_stats_)); |
| 92 } |
| 93 |
| 94 LeakDetectorImpl::~LeakDetectorImpl() { |
| 95 // Free the hash table. |
| 96 for (int i = 0; i < kHashTableSize; i++) { |
| 97 for (Bucket* curr = bucket_table_[i]; curr != 0; /**/) { |
| 98 Bucket* bucket = curr; |
| 99 curr = curr->next; |
| 100 dealloc_(bucket->stack); |
| 101 dealloc_(bucket); |
| 102 } |
| 103 } |
| 104 |
| 105 // Free any call stack tables. |
| 106 for (int i = 0; i < kNumSizeEntries; ++i) { |
| 107 CallStackTable* table = entries_[i].stack_table; |
| 108 if (!table) |
| 109 continue; |
| 110 table->~CallStackTable(); |
| 111 dealloc_(table); |
| 112 } |
| 113 } |
| 114 |
| 115 bool LeakDetectorImpl::ShouldGetStackTraceForSize(size_t size) const { |
| 116 return entries_[SizeToIndex(size)].stack_table != NULL; |
| 117 } |
| 118 |
| 119 void LeakDetectorImpl::RecordAlloc( |
| 120 const void* ptr, size_t size, |
| 121 int stack_depth, const void* const call_stack[]) { |
| 122 AllocInfo alloc_info; |
| 123 alloc_info.bytes = size; |
| 124 |
| 125 stats_.alloc_size += alloc_info.bytes; |
| 126 stats_.allocs++; |
| 127 |
| 128 AllocSizeEntry* entry = GetEntryForSize(size); |
| 129 ++entry->num_allocs; |
| 130 |
| 131 if (stack_depth > 0) { |
| 132 Bucket* bucket = GetBucket(stack_depth, call_stack); |
| 133 bucket->allocs++; |
| 134 bucket->alloc_size += size; |
| 135 alloc_info.bucket = bucket; |
| 136 |
| 137 call_stack_stats_.alloc_size += alloc_info.bytes; |
| 138 call_stack_stats_.allocs++; |
| 139 |
| 140 if (entry->stack_table) |
| 141 entry->stack_table->Add(bucket); |
| 142 } |
| 143 |
| 144 address_map_.Insert(ptr, alloc_info); |
| 145 } |
| 146 |
| 147 void LeakDetectorImpl::RecordFree(const void* ptr) { |
| 148 // Look up entry. |
| 149 AllocInfo alloc_info; |
| 150 if (!address_map_.FindAndRemove(ptr, &alloc_info)) |
| 151 return; |
| 152 |
| 153 AllocSizeEntry* entry = GetEntryForSize(alloc_info.bytes); |
| 154 ++entry->num_frees; |
| 155 |
| 156 Bucket* bucket = alloc_info.bucket; |
| 157 if (bucket) { |
| 158 bucket->frees++; |
| 159 bucket->free_size += alloc_info.bytes; |
| 160 |
| 161 call_stack_stats_.frees++; |
| 162 call_stack_stats_.free_size += alloc_info.bytes; |
| 163 |
| 164 if (entry->stack_table) |
| 165 entry->stack_table->Remove(bucket); |
| 166 } |
| 167 stats_.frees++; |
| 168 stats_.free_size += alloc_info.bytes; |
| 169 } |
| 170 |
| 171 void LeakDetectorImpl::TestForLeaks() { |
| 172 // Add net alloc counts for each size to a ranked list. |
| 173 RankedList<uint32> size_ranked_list(kRankedListSize, alloc_, dealloc_); |
| 174 for (int i = 0; i < kNumSizeEntries; ++i) { |
| 175 const AllocSizeEntry& entry = entries_[i]; |
| 176 size_ranked_list.Add(IndexToSize(i), entry.num_allocs - entry.num_frees); |
| 177 } |
| 178 size_leak_analyzer_.AddSample(size_ranked_list); |
| 179 |
| 180 // Dump out the top entries. |
| 181 if (FLAGS_dump_leak_analysis) { |
| 182 char buf[0x4000]; |
| 183 buf[0] = '\0'; |
| 184 size_leak_analyzer_.Dump(buf, sizeof(buf)); |
| 185 PrintWithPidOnEachLine(buf); |
| 186 } |
| 187 |
| 188 // Get suspected leaks by size. |
| 189 const uint32* suspected_leaks_by_size = |
| 190 size_leak_analyzer_.suspected_leaks(); |
| 191 for (int j = 0; j < size_leak_analyzer_.num_suspected_leaks(); ++j) { |
| 192 uint32 size = suspected_leaks_by_size[j]; |
| 193 AllocSizeEntry* entry = GetEntryForSize(size); |
| 194 if (entry->stack_table) |
| 195 continue; |
| 196 RAW_VLOG(0, "%d: Adding stack table for size %u", getpid(), size); |
| 197 entry->stack_table = |
| 198 new(alloc_(sizeof(CallStackTable))) CallStackTable(alloc_, dealloc_); |
| 199 ++num_stack_tables_; |
| 200 } |
| 201 |
| 202 // Check for leaks in each CallStackTable. It makes sense to this before |
| 203 // checking the size allocations, because that could potentially create new |
| 204 // CallStackTable. However, the overhead to check a new CallStackTable is |
| 205 // small since this function is run very rarely. So handle the leak checks of |
| 206 // Tier 2 here. |
| 207 for (int i = 0; i < arraysize(entries_); ++i) { |
| 208 const AllocSizeEntry& entry = entries_[i]; |
| 209 CallStackTable* stack_table = entry.stack_table; |
| 210 if (!stack_table) |
| 211 continue; |
| 212 |
| 213 if (FLAGS_dump_leak_analysis) { |
| 214 // Dump table info. |
| 215 char buf[0x4000]; |
| 216 RAW_VLOG(0, "%d: Stack table for size %d", getpid(), IndexToSize(i)); |
| 217 buf[0] = '\0'; |
| 218 stack_table->Dump(buf, sizeof(buf)); |
| 219 PrintWithPidOnEachLine(buf); |
| 220 } |
| 221 |
| 222 // Get suspected leaks by call stack. |
| 223 stack_table->TestForLeaks(); |
| 224 const LeakAnalyzer<const Bucket*>& leak_analyzer = |
| 225 stack_table->leak_analyzer(); |
| 226 for (int j = 0; j < leak_analyzer.num_suspected_leaks(); ++j) { |
| 227 const Bucket& bucket = *leak_analyzer.suspected_leaks()[j]; |
| 228 RAW_VLOG(0, "%d: Suspected call stack for size %u: %p", |
| 229 getpid(), IndexToSize(i), &bucket); |
| 230 for (int k = 0; k < bucket.depth; ++k) { |
| 231 RAW_VLOG(0, "%d: %p", getpid(), GetOffset(bucket.stack[k])); |
| 232 } |
| 233 } |
| 234 } |
| 235 } |
| 236 |
| 237 void LeakDetectorImpl::DumpStats() const { |
| 238 RAW_VLOG(0, "%d: Alloc size: %lu\n", getpid(), stats_.alloc_size); |
| 239 RAW_VLOG(0, "%d: Free size: %lu\n", getpid(), stats_.free_size); |
| 240 RAW_VLOG(0, "%d: Net alloc size: %lu\n", getpid(), |
| 241 stats_.alloc_size - stats_.free_size); |
| 242 RAW_VLOG(0, "%d: Number of stack tables: %d\n", getpid(), num_stack_tables_); |
| 243 if (stats_.alloc_size) { |
| 244 RAW_VLOG(0, "%d: %% of calls with stack trace: %.2f%%\n", getpid(), |
| 245 static_cast<double>(call_stack_stats_.alloc_size * 100) / |
| 246 stats_.alloc_size); |
| 247 } |
| 248 RAW_VLOG(0, "%d: Number of call stack buckets: %d\n", getpid(), num_buckets_); |
| 249 } |
| 250 |
| 251 // static |
| 252 inline int LeakDetectorImpl::SizeToIndex(size_t size) { |
| 253 int result = static_cast<int>(size /= sizeof(uint32)); |
| 254 if (result < kNumSizeEntries) |
| 255 return result; |
| 256 return 0; |
| 257 } |
| 258 |
| 259 // static |
| 260 inline size_t LeakDetectorImpl::IndexToSize(int index) { |
| 261 return sizeof(uint32) * index; |
| 262 } |
| 263 |
| 264 LeakDetectorImpl::Bucket* LeakDetectorImpl::GetBucket(int depth, |
| 265 const void* const key[]) { |
| 266 // Lookup stack trace in table. |
| 267 uint64_t hash = CallStackToHash(depth, key); |
| 268 unsigned int index = hash % kHashTableSize; |
| 269 for (Bucket* bucket = bucket_table_[index]; bucket; bucket = bucket->next) { |
| 270 if (bucket->hash == hash && |
| 271 bucket->depth == depth && |
| 272 std::equal(key, key + depth, bucket->stack)) { |
| 273 return bucket; |
| 274 } |
| 275 } |
| 276 |
| 277 // Create new bucket. |
| 278 const size_t key_size = sizeof(key[0]) * depth; |
| 279 const void** kcopy = reinterpret_cast<const void**>(alloc_(key_size)); |
| 280 std::copy(key, key + depth, kcopy); |
| 281 Bucket* bucket = reinterpret_cast<Bucket*>(alloc_(sizeof(Bucket))); |
| 282 memset(bucket, 0, sizeof(*bucket)); |
| 283 bucket->hash = hash; |
| 284 bucket->depth = depth; |
| 285 bucket->stack = kcopy; |
| 286 |
| 287 // Insert the bucket into the hash table. |
| 288 bucket->next = bucket_table_[index]; |
| 289 bucket_table_[index] = bucket; |
| 290 num_buckets_++; |
| 291 return bucket; |
| 292 } |
| 293 |
| 294 uint64 LeakDetectorImpl::GetOffset(const void *ptr) const { |
| 295 uint64 ptr_value = reinterpret_cast<uint64>(ptr); |
| 296 if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_) |
| 297 return ptr_value - mapping_addr_; |
| 298 return ptr_value; |
| 299 } |
| 300 |
| 301 } // namespace leak_detector |
OLD | NEW |