| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "leak_detector_impl.h" | 5 #include "leak_detector_impl.h" |
| 6 | 6 |
| 7 #include <inttypes.h> | 7 #include <inttypes.h> |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 | 9 |
| 10 #include <algorithm> // For std::move | 10 #include <algorithm> |
| 11 #include <iterator> // For std::advance | |
| 12 #include <new> | 11 #include <new> |
| 13 | 12 |
| 14 #include "base/hash.h" | 13 #include "base/hash.h" |
| 15 #include "base/process/process_handle.h" | 14 #include "base/process/process_handle.h" |
| 16 #include "components/metrics/leak_detector/call_stack_table.h" | 15 #include "components/metrics/leak_detector/call_stack_table.h" |
| 17 #include "components/metrics/leak_detector/custom_allocator.h" | 16 #include "components/metrics/leak_detector/custom_allocator.h" |
| 18 #include "components/metrics/leak_detector/ranked_set.h" | 17 #include "components/metrics/leak_detector/ranked_set.h" |
| 19 | 18 |
| 20 namespace metrics { | 19 namespace metrics { |
| 21 namespace leak_detector { | 20 namespace leak_detector { |
| 22 | 21 |
| 23 namespace { | 22 namespace { |
| 24 | 23 |
| 25 // Look for leaks in the the top N entries in each tier, where N is this value. | 24 // Look for leaks in the the top N entries in each tier, where N is this value. |
| 26 const int kRankedSetSize = 16; | 25 const int kRankedSetSize = 16; |
| 27 | 26 |
| 28 // Initial hash table size for |LeakDetectorImpl::address_map_|. | 27 // Initial hash table size for |LeakDetectorImpl::address_map_|. |
| 29 const int kAddressMapNumBuckets = 100003; | 28 const int kAddressMapNumBuckets = 100003; |
| 30 | 29 |
| 31 // Number of entries in the alloc size table. As sizes are aligned to 32-bits | 30 // Number of entries in the alloc size table. As sizes are aligned to 32-bits |
| 32 // the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger | 31 // the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger |
| 33 // sizes are ignored. This value is chosen high enough that such large sizes | 32 // sizes are ignored. This value is chosen high enough that such large sizes |
| 34 // are rare if not nonexistent. | 33 // are rare if not nonexistent. |
| 35 const int kNumSizeEntries = 2048; | 34 const int kNumSizeEntries = 2048; |
| 36 | 35 |
| 37 // Record only the first |kNumSizeEntriesInHistory| size classes in | 36 // Record only the first |kNumSizeEntriesInHistory| size classes in |
| 38 // |LeakDetectorImpl::size_breakdown_history_|. | 37 // |LeakDetectorImpl::size_breakdown_history_|. |
| 39 const int kNumSizeEntriesInHistory = 32; | 38 const int kNumSizeEntriesInHistory = 32; |
| 40 | 39 |
| 41 // Record only the top |kNumTopCallStacksInHistory| call sites, ordered by | 40 // |LeakDetectorImpl::size_breakdown_history_| can have up to this many entries. |
| 42 // number of allocations at each site, in | 41 // Any older entries must be discarded to make way for new ones. |
| 43 // |AllocSizeEntry::call_site_breakdown_history|. | |
| 44 const int kNumTopCallStacksInHistory = 32; | |
| 45 | |
| 46 // |LeakDetectorImpl::size_breakdown_history_| and | |
| 47 // |AllocSizeEntry::call_site_breakdown_history| can have up to this many | |
| 48 // entries. Any older entries must be discarded to make way for new ones. | |
| 49 const int kMaxNumHistoryEntries = 32; | 42 const int kMaxNumHistoryEntries = 32; |
| 50 | 43 |
| 51 using ValueType = LeakDetectorValueType; | 44 using ValueType = LeakDetectorValueType; |
| 52 | 45 |
| 53 // Functions to convert an allocation size to/from the array index used for | 46 // Functions to convert an allocation size to/from the array index used for |
| 54 // |LeakDetectorImpl::size_entries_|. | 47 // |LeakDetectorImpl::size_entries_|. |
| 55 size_t SizeToIndex(const size_t size) { | 48 size_t SizeToIndex(const size_t size) { |
| 56 int result = static_cast<int>(size / sizeof(uint32_t)); | 49 int result = static_cast<int>(size / sizeof(uint32_t)); |
| 57 if (result < kNumSizeEntries) | 50 if (result < kNumSizeEntries) |
| 58 return result; | 51 return result; |
| 59 return 0; | 52 return 0; |
| 60 } | 53 } |
| 61 | 54 |
| 62 size_t IndexToSize(size_t index) { | 55 size_t IndexToSize(size_t index) { |
| 63 return sizeof(uint32_t) * index; | 56 return sizeof(uint32_t) * index; |
| 64 } | 57 } |
| 65 | 58 |
| 66 } // namespace | 59 } // namespace |
| 67 | 60 |
| 68 LeakDetectorImpl::LeakReport::AllocationBreakdown::AllocationBreakdown() | |
| 69 : count_for_call_stack(0) {} | |
| 70 | |
| 71 LeakDetectorImpl::LeakReport::AllocationBreakdown::~AllocationBreakdown() {} | |
| 72 | |
| 73 LeakDetectorImpl::LeakReport::LeakReport() : alloc_size_bytes_(0) {} | 61 LeakDetectorImpl::LeakReport::LeakReport() : alloc_size_bytes_(0) {} |
| 74 | 62 |
| 75 LeakDetectorImpl::LeakReport::~LeakReport() {} | 63 LeakDetectorImpl::LeakReport::~LeakReport() {} |
| 76 | 64 |
| 77 bool LeakDetectorImpl::LeakReport::operator<(const LeakReport& other) const { | 65 bool LeakDetectorImpl::LeakReport::operator<(const LeakReport& other) const { |
| 78 if (alloc_size_bytes_ != other.alloc_size_bytes_) | 66 if (alloc_size_bytes_ != other.alloc_size_bytes_) |
| 79 return alloc_size_bytes_ < other.alloc_size_bytes_; | 67 return alloc_size_bytes_ < other.alloc_size_bytes_; |
| 80 for (size_t i = 0; i < call_stack_.size() && i < other.call_stack_.size(); | 68 for (size_t i = 0; i < call_stack_.size() && i < other.call_stack_.size(); |
| 81 ++i) { | 69 ++i) { |
| 82 if (call_stack_[i] != other.call_stack_[i]) | 70 if (call_stack_[i] != other.call_stack_[i]) |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 170 void LeakDetectorImpl::TestForLeaks(InternalVector<LeakReport>* reports) { | 158 void LeakDetectorImpl::TestForLeaks(InternalVector<LeakReport>* reports) { |
| 171 // Add net alloc counts for each size to a ranked list. | 159 // Add net alloc counts for each size to a ranked list. |
| 172 RankedSet size_ranked_set(kRankedSetSize); | 160 RankedSet size_ranked_set(kRankedSetSize); |
| 173 for (size_t i = 0; i < size_entries_.size(); ++i) { | 161 for (size_t i = 0; i < size_entries_.size(); ++i) { |
| 174 const AllocSizeEntry& entry = size_entries_[i]; | 162 const AllocSizeEntry& entry = size_entries_[i]; |
| 175 ValueType size_value(IndexToSize(i)); | 163 ValueType size_value(IndexToSize(i)); |
| 176 size_ranked_set.Add(size_value, entry.GetNetAllocs()); | 164 size_ranked_set.Add(size_value, entry.GetNetAllocs()); |
| 177 } | 165 } |
| 178 size_leak_analyzer_.AddSample(std::move(size_ranked_set)); | 166 size_leak_analyzer_.AddSample(std::move(size_ranked_set)); |
| 179 | 167 |
| 180 RecordCurrentAllocationDataInHistory(); | 168 // Record a snapshot of the current size table. |
| 169 InternalVector<uint32_t> current_size_table_record; |
| 170 current_size_table_record.reserve(kNumSizeEntriesInHistory); |
| 171 for (const AllocSizeEntry& entry : size_entries_) { |
| 172 if (current_size_table_record.size() == kNumSizeEntriesInHistory) |
| 173 break; |
| 174 current_size_table_record.push_back(entry.GetNetAllocs()); |
| 175 } |
| 176 size_breakdown_history_.emplace_back(std::move(current_size_table_record)); |
| 177 if (size_breakdown_history_.size() > kMaxNumHistoryEntries) |
| 178 size_breakdown_history_.pop_front(); |
| 181 | 179 |
| 182 // Get suspected leaks by size. | 180 // Get suspected leaks by size. |
| 183 for (const ValueType& size_value : size_leak_analyzer_.suspected_leaks()) { | 181 for (const ValueType& size_value : size_leak_analyzer_.suspected_leaks()) { |
| 184 uint32_t size = size_value.size(); | 182 uint32_t size = size_value.size(); |
| 185 AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)]; | 183 AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)]; |
| 186 if (entry->stack_table) | 184 if (entry->stack_table) |
| 187 continue; | 185 continue; |
| 188 entry->stack_table = new (CustomAllocator::Allocate(sizeof(CallStackTable))) | 186 entry->stack_table = new (CustomAllocator::Allocate(sizeof(CallStackTable))) |
| 189 CallStackTable(call_stack_suspicion_threshold_); | 187 CallStackTable(call_stack_suspicion_threshold_); |
| 190 ++num_stack_tables_; | 188 ++num_stack_tables_; |
| (...skipping 20 matching lines...) Expand all Loading... |
| 211 const CallStack* call_stack = call_stack_value.call_stack(); | 209 const CallStack* call_stack = call_stack_value.call_stack(); |
| 212 | 210 |
| 213 // Return reports by storing in |*reports|. | 211 // Return reports by storing in |*reports|. |
| 214 reports->resize(reports->size() + 1); | 212 reports->resize(reports->size() + 1); |
| 215 LeakReport* report = &reports->back(); | 213 LeakReport* report = &reports->back(); |
| 216 report->alloc_size_bytes_ = size; | 214 report->alloc_size_bytes_ = size; |
| 217 report->call_stack_.resize(call_stack->depth); | 215 report->call_stack_.resize(call_stack->depth); |
| 218 for (size_t j = 0; j < call_stack->depth; ++j) { | 216 for (size_t j = 0; j < call_stack->depth; ++j) { |
| 219 report->call_stack_[j] = GetOffset(call_stack->stack[j]); | 217 report->call_stack_[j] = GetOffset(call_stack->stack[j]); |
| 220 } | 218 } |
| 221 | 219 // Copy over the historical size data. |
| 222 StoreHistoricalDataInReport(size, call_stack, report); | 220 report->size_breakdown_history_.reserve(size_breakdown_history_.size()); |
| 221 report->size_breakdown_history_.assign(size_breakdown_history_.begin(), |
| 222 size_breakdown_history_.end()); |
| 223 } | 223 } |
| 224 } | 224 } |
| 225 } | 225 } |
| 226 | 226 |
| 227 LeakDetectorImpl::AllocSizeEntry::AllocSizeEntry() {} | |
| 228 LeakDetectorImpl::AllocSizeEntry::~AllocSizeEntry() {} | |
| 229 | |
| 230 size_t LeakDetectorImpl::AddressHash::operator()(uintptr_t addr) const { | 227 size_t LeakDetectorImpl::AddressHash::operator()(uintptr_t addr) const { |
| 231 return base::Hash(reinterpret_cast<const char*>(&addr), sizeof(addr)); | 228 return base::Hash(reinterpret_cast<const char*>(&addr), sizeof(addr)); |
| 232 } | 229 } |
| 233 | 230 |
| 234 uintptr_t LeakDetectorImpl::GetOffset(const void* ptr) const { | 231 uintptr_t LeakDetectorImpl::GetOffset(const void* ptr) const { |
| 235 uintptr_t ptr_value = reinterpret_cast<uintptr_t>(ptr); | 232 uintptr_t ptr_value = reinterpret_cast<uintptr_t>(ptr); |
| 236 if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_) | 233 if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_) |
| 237 return ptr_value - mapping_addr_; | 234 return ptr_value - mapping_addr_; |
| 238 return ptr_value; | 235 return ptr_value; |
| 239 } | 236 } |
| 240 | 237 |
| 241 void LeakDetectorImpl::RecordCurrentAllocationDataInHistory() { | |
| 242 // Record a snapshot of the current size table. | |
| 243 InternalVector<uint32_t> current_size_table_record; | |
| 244 current_size_table_record.reserve(kNumSizeEntriesInHistory); | |
| 245 for (const AllocSizeEntry& entry : size_entries_) { | |
| 246 if (current_size_table_record.size() == kNumSizeEntriesInHistory) | |
| 247 break; | |
| 248 current_size_table_record.push_back(entry.GetNetAllocs()); | |
| 249 } | |
| 250 size_breakdown_history_.emplace_back(std::move(current_size_table_record)); | |
| 251 if (size_breakdown_history_.size() > kMaxNumHistoryEntries) | |
| 252 size_breakdown_history_.pop_front(); | |
| 253 | |
| 254 // For each allocation size that has started profiling by call site, record a | |
| 255 // snapshot of the top call sites by number of allocations. | |
| 256 for (AllocSizeEntry& entry : size_entries_) { | |
| 257 if (!entry.stack_table) | |
| 258 continue; | |
| 259 RankedSet top_call_stacks(kNumTopCallStacksInHistory); | |
| 260 entry.stack_table->GetTopCallStacks(&top_call_stacks); | |
| 261 entry.call_site_breakdown_history.push_back(std::move(top_call_stacks)); | |
| 262 if (entry.call_site_breakdown_history.size() > kMaxNumHistoryEntries) | |
| 263 entry.call_site_breakdown_history.pop_front(); | |
| 264 } | |
| 265 } | |
| 266 | |
| 267 void LeakDetectorImpl::StoreHistoricalDataInReport(size_t size, | |
| 268 const CallStack* call_site, | |
| 269 LeakReport* report) { | |
| 270 using AllocationBreakdown = LeakReport::AllocationBreakdown; | |
| 271 // Copy historical allocation data into the report. | |
| 272 InternalVector<AllocationBreakdown>* dest = &report->alloc_breakdown_history_; | |
| 273 dest->reserve(size_breakdown_history_.size()); | |
| 274 | |
| 275 // Store each frame of the breakdown by size. | |
| 276 for (const InternalVector<uint32_t>& breakdown : size_breakdown_history_) { | |
| 277 dest->push_back(AllocationBreakdown()); | |
| 278 dest->back().counts_by_size = breakdown; | |
| 279 } | |
| 280 | |
| 281 // Store the count of all allocations with size=|size| and made from call site | |
| 282 // |call_site|. | |
| 283 const InternalList<RankedSet>& src = | |
| 284 size_entries_[SizeToIndex(size)].call_site_breakdown_history; | |
| 285 | |
| 286 auto src_iter = src.begin(); | |
| 287 auto dest_iter = dest->begin(); | |
| 288 // The call site history and the destination container may be of different | |
| 289 // sizes. Adjust their iterators so they are the same distance from the last | |
| 290 // element of each container, i.e. they will point to the frames corresponding | |
| 291 // to the same time. | |
| 292 if (src.size() > dest->size()) | |
| 293 std::advance(src_iter, src.size() - dest->size()); | |
| 294 else if (dest->size() > src.size()) | |
| 295 std::advance(dest_iter, dest->size() - src.size()); | |
| 296 | |
| 297 while (src_iter != src.end() && dest_iter != dest->end()) { | |
| 298 const RankedSet& ranked_call_sites = *src_iter; | |
| 299 auto find_call_site_iter = ranked_call_sites.FindCallStack(call_site); | |
| 300 if (find_call_site_iter != ranked_call_sites.end()) | |
| 301 dest_iter->count_for_call_stack = find_call_site_iter->count; | |
| 302 ++src_iter; | |
| 303 ++dest_iter; | |
| 304 } | |
| 305 } | |
| 306 | |
| 307 } // namespace leak_detector | 238 } // namespace leak_detector |
| 308 } // namespace metrics | 239 } // namespace metrics |
| OLD | NEW |