| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "leak_detector_impl.h" | 5 #include "leak_detector_impl.h" |
| 6 | 6 |
| 7 #include <inttypes.h> | 7 #include <inttypes.h> |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 | 9 |
| 10 #include <algorithm> | 10 #include <algorithm> // For std::move |
| 11 #include <iterator> // For std::advance |
| 11 #include <new> | 12 #include <new> |
| 12 | 13 |
| 13 #include "base/hash.h" | 14 #include "base/hash.h" |
| 14 #include "base/process/process_handle.h" | 15 #include "base/process/process_handle.h" |
| 15 #include "components/metrics/leak_detector/call_stack_table.h" | 16 #include "components/metrics/leak_detector/call_stack_table.h" |
| 16 #include "components/metrics/leak_detector/custom_allocator.h" | 17 #include "components/metrics/leak_detector/custom_allocator.h" |
| 17 #include "components/metrics/leak_detector/ranked_set.h" | 18 #include "components/metrics/leak_detector/ranked_set.h" |
| 18 | 19 |
| 19 namespace metrics { | 20 namespace metrics { |
| 20 namespace leak_detector { | 21 namespace leak_detector { |
| 21 | 22 |
| 22 namespace { | 23 namespace { |
| 23 | 24 |
| 24 // Look for leaks in the the top N entries in each tier, where N is this value. | 25 // Look for leaks in the the top N entries in each tier, where N is this value. |
| 25 const int kRankedSetSize = 16; | 26 const int kRankedSetSize = 16; |
| 26 | 27 |
| 27 // Initial hash table size for |LeakDetectorImpl::address_map_|. | 28 // Initial hash table size for |LeakDetectorImpl::address_map_|. |
| 28 const int kAddressMapNumBuckets = 100003; | 29 const int kAddressMapNumBuckets = 100003; |
| 29 | 30 |
| 30 // Number of entries in the alloc size table. As sizes are aligned to 32-bits | 31 // Number of entries in the alloc size table. As sizes are aligned to 32-bits |
| 31 // the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger | 32 // the max supported allocation size is (kNumSizeEntries * 4 - 1). Any larger |
| 32 // sizes are ignored. This value is chosen high enough that such large sizes | 33 // sizes are ignored. This value is chosen high enough that such large sizes |
| 33 // are rare if not nonexistent. | 34 // are rare if not nonexistent. |
| 34 const int kNumSizeEntries = 2048; | 35 const int kNumSizeEntries = 2048; |
| 35 | 36 |
| 36 // Record only the first |kNumSizeEntriesInHistory| size classes in | 37 // Record only the first |kNumSizeEntriesInHistory| size classes in |
| 37 // |LeakDetectorImpl::size_breakdown_history_|. | 38 // |LeakDetectorImpl::size_breakdown_history_|. |
| 38 const int kNumSizeEntriesInHistory = 32; | 39 const int kNumSizeEntriesInHistory = 32; |
| 39 | 40 |
| 40 // |LeakDetectorImpl::size_breakdown_history_| can have up to this many entries. | 41 // Record only the top |kNumTopCallStacksInHistory| call sites, ordered by |
| 41 // Any older entries must be discarded to make way for new ones. | 42 // number of allocations at each site, in |
| 43 // |AllocSizeEntry::call_site_breakdown_history|. |
| 44 const int kNumTopCallStacksInHistory = 32; |
| 45 |
| 46 // |LeakDetectorImpl::size_breakdown_history_| and |
| 47 // |AllocSizeEntry::call_site_breakdown_history| can have up to this many |
| 48 // entries. Any older entries must be discarded to make way for new ones. |
| 42 const int kMaxNumHistoryEntries = 32; | 49 const int kMaxNumHistoryEntries = 32; |
| 43 | 50 |
| 44 using ValueType = LeakDetectorValueType; | 51 using ValueType = LeakDetectorValueType; |
| 45 | 52 |
| 46 // Functions to convert an allocation size to/from the array index used for | 53 // Functions to convert an allocation size to/from the array index used for |
| 47 // |LeakDetectorImpl::size_entries_|. | 54 // |LeakDetectorImpl::size_entries_|. |
| 48 size_t SizeToIndex(const size_t size) { | 55 size_t SizeToIndex(const size_t size) { |
| 49 int result = static_cast<int>(size / sizeof(uint32_t)); | 56 int result = static_cast<int>(size / sizeof(uint32_t)); |
| 50 if (result < kNumSizeEntries) | 57 if (result < kNumSizeEntries) |
| 51 return result; | 58 return result; |
| 52 return 0; | 59 return 0; |
| 53 } | 60 } |
| 54 | 61 |
| 55 size_t IndexToSize(size_t index) { | 62 size_t IndexToSize(size_t index) { |
| 56 return sizeof(uint32_t) * index; | 63 return sizeof(uint32_t) * index; |
| 57 } | 64 } |
| 58 | 65 |
| 59 } // namespace | 66 } // namespace |
| 60 | 67 |
| 68 LeakDetectorImpl::LeakReport::AllocationBreakdown::AllocationBreakdown() |
| 69 : count_for_call_stack(0) {} |
| 70 |
| 71 LeakDetectorImpl::LeakReport::AllocationBreakdown::~AllocationBreakdown() {} |
| 72 |
| 61 LeakDetectorImpl::LeakReport::LeakReport() : alloc_size_bytes_(0) {} | 73 LeakDetectorImpl::LeakReport::LeakReport() : alloc_size_bytes_(0) {} |
| 62 | 74 |
| 63 LeakDetectorImpl::LeakReport::~LeakReport() {} | 75 LeakDetectorImpl::LeakReport::~LeakReport() {} |
| 64 | 76 |
| 65 bool LeakDetectorImpl::LeakReport::operator<(const LeakReport& other) const { | 77 bool LeakDetectorImpl::LeakReport::operator<(const LeakReport& other) const { |
| 66 if (alloc_size_bytes_ != other.alloc_size_bytes_) | 78 if (alloc_size_bytes_ != other.alloc_size_bytes_) |
| 67 return alloc_size_bytes_ < other.alloc_size_bytes_; | 79 return alloc_size_bytes_ < other.alloc_size_bytes_; |
| 68 for (size_t i = 0; i < call_stack_.size() && i < other.call_stack_.size(); | 80 for (size_t i = 0; i < call_stack_.size() && i < other.call_stack_.size(); |
| 69 ++i) { | 81 ++i) { |
| 70 if (call_stack_[i] != other.call_stack_[i]) | 82 if (call_stack_[i] != other.call_stack_[i]) |
| (...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 158 void LeakDetectorImpl::TestForLeaks(InternalVector<LeakReport>* reports) { | 170 void LeakDetectorImpl::TestForLeaks(InternalVector<LeakReport>* reports) { |
| 159 // Add net alloc counts for each size to a ranked list. | 171 // Add net alloc counts for each size to a ranked list. |
| 160 RankedSet size_ranked_set(kRankedSetSize); | 172 RankedSet size_ranked_set(kRankedSetSize); |
| 161 for (size_t i = 0; i < size_entries_.size(); ++i) { | 173 for (size_t i = 0; i < size_entries_.size(); ++i) { |
| 162 const AllocSizeEntry& entry = size_entries_[i]; | 174 const AllocSizeEntry& entry = size_entries_[i]; |
| 163 ValueType size_value(IndexToSize(i)); | 175 ValueType size_value(IndexToSize(i)); |
| 164 size_ranked_set.Add(size_value, entry.GetNetAllocs()); | 176 size_ranked_set.Add(size_value, entry.GetNetAllocs()); |
| 165 } | 177 } |
| 166 size_leak_analyzer_.AddSample(std::move(size_ranked_set)); | 178 size_leak_analyzer_.AddSample(std::move(size_ranked_set)); |
| 167 | 179 |
| 168 // Record a snapshot of the current size table. | 180 RecordCurrentAllocationDataInHistory(); |
| 169 InternalVector<uint32_t> current_size_table_record; | |
| 170 current_size_table_record.reserve(kNumSizeEntriesInHistory); | |
| 171 for (const AllocSizeEntry& entry : size_entries_) { | |
| 172 if (current_size_table_record.size() == kNumSizeEntriesInHistory) | |
| 173 break; | |
| 174 current_size_table_record.push_back(entry.GetNetAllocs()); | |
| 175 } | |
| 176 size_breakdown_history_.emplace_back(std::move(current_size_table_record)); | |
| 177 if (size_breakdown_history_.size() > kMaxNumHistoryEntries) | |
| 178 size_breakdown_history_.pop_front(); | |
| 179 | 181 |
| 180 // Get suspected leaks by size. | 182 // Get suspected leaks by size. |
| 181 for (const ValueType& size_value : size_leak_analyzer_.suspected_leaks()) { | 183 for (const ValueType& size_value : size_leak_analyzer_.suspected_leaks()) { |
| 182 uint32_t size = size_value.size(); | 184 uint32_t size = size_value.size(); |
| 183 AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)]; | 185 AllocSizeEntry* entry = &size_entries_[SizeToIndex(size)]; |
| 184 if (entry->stack_table) | 186 if (entry->stack_table) |
| 185 continue; | 187 continue; |
| 186 entry->stack_table = new (CustomAllocator::Allocate(sizeof(CallStackTable))) | 188 entry->stack_table = new (CustomAllocator::Allocate(sizeof(CallStackTable))) |
| 187 CallStackTable(call_stack_suspicion_threshold_); | 189 CallStackTable(call_stack_suspicion_threshold_); |
| 188 ++num_stack_tables_; | 190 ++num_stack_tables_; |
| (...skipping 20 matching lines...) Expand all Loading... |
| 209 const CallStack* call_stack = call_stack_value.call_stack(); | 211 const CallStack* call_stack = call_stack_value.call_stack(); |
| 210 | 212 |
| 211 // Return reports by storing in |*reports|. | 213 // Return reports by storing in |*reports|. |
| 212 reports->resize(reports->size() + 1); | 214 reports->resize(reports->size() + 1); |
| 213 LeakReport* report = &reports->back(); | 215 LeakReport* report = &reports->back(); |
| 214 report->alloc_size_bytes_ = size; | 216 report->alloc_size_bytes_ = size; |
| 215 report->call_stack_.resize(call_stack->depth); | 217 report->call_stack_.resize(call_stack->depth); |
| 216 for (size_t j = 0; j < call_stack->depth; ++j) { | 218 for (size_t j = 0; j < call_stack->depth; ++j) { |
| 217 report->call_stack_[j] = GetOffset(call_stack->stack[j]); | 219 report->call_stack_[j] = GetOffset(call_stack->stack[j]); |
| 218 } | 220 } |
| 219 // Copy over the historical size data. | 221 |
| 220 report->size_breakdown_history_.reserve(size_breakdown_history_.size()); | 222 StoreHistoricalDataInReport(size, call_stack, report); |
| 221 report->size_breakdown_history_.assign(size_breakdown_history_.begin(), | |
| 222 size_breakdown_history_.end()); | |
| 223 } | 223 } |
| 224 } | 224 } |
| 225 } | 225 } |
| 226 | 226 |
| 227 LeakDetectorImpl::AllocSizeEntry::AllocSizeEntry() {} |
| 228 LeakDetectorImpl::AllocSizeEntry::~AllocSizeEntry() {} |
| 229 |
| 227 size_t LeakDetectorImpl::AddressHash::operator()(uintptr_t addr) const { | 230 size_t LeakDetectorImpl::AddressHash::operator()(uintptr_t addr) const { |
| 228 return base::Hash(reinterpret_cast<const char*>(&addr), sizeof(addr)); | 231 return base::Hash(reinterpret_cast<const char*>(&addr), sizeof(addr)); |
| 229 } | 232 } |
| 230 | 233 |
| 231 uintptr_t LeakDetectorImpl::GetOffset(const void* ptr) const { | 234 uintptr_t LeakDetectorImpl::GetOffset(const void* ptr) const { |
| 232 uintptr_t ptr_value = reinterpret_cast<uintptr_t>(ptr); | 235 uintptr_t ptr_value = reinterpret_cast<uintptr_t>(ptr); |
| 233 if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_) | 236 if (ptr_value >= mapping_addr_ && ptr_value < mapping_addr_ + mapping_size_) |
| 234 return ptr_value - mapping_addr_; | 237 return ptr_value - mapping_addr_; |
| 235 return ptr_value; | 238 return ptr_value; |
| 236 } | 239 } |
| 237 | 240 |
| 241 void LeakDetectorImpl::RecordCurrentAllocationDataInHistory() { |
| 242 // Record a snapshot of the current size table. |
| 243 InternalVector<uint32_t> current_size_table_record; |
| 244 current_size_table_record.reserve(kNumSizeEntriesInHistory); |
| 245 for (const AllocSizeEntry& entry : size_entries_) { |
| 246 if (current_size_table_record.size() == kNumSizeEntriesInHistory) |
| 247 break; |
| 248 current_size_table_record.push_back(entry.GetNetAllocs()); |
| 249 } |
| 250 size_breakdown_history_.emplace_back(std::move(current_size_table_record)); |
| 251 if (size_breakdown_history_.size() > kMaxNumHistoryEntries) |
| 252 size_breakdown_history_.pop_front(); |
| 253 |
| 254 // For each allocation size that has started profiling by call site, record a |
| 255 // snapshot of the top call sites by number of allocations. |
| 256 for (AllocSizeEntry& entry : size_entries_) { |
| 257 if (!entry.stack_table) |
| 258 continue; |
| 259 RankedSet top_call_stacks(kNumTopCallStacksInHistory); |
| 260 entry.stack_table->GetTopCallStacks(&top_call_stacks); |
| 261 entry.call_site_breakdown_history.push_back(std::move(top_call_stacks)); |
| 262 if (entry.call_site_breakdown_history.size() > kMaxNumHistoryEntries) |
| 263 entry.call_site_breakdown_history.pop_front(); |
| 264 } |
| 265 } |
| 266 |
| 267 void LeakDetectorImpl::StoreHistoricalDataInReport(size_t size, |
| 268 const CallStack* call_site, |
| 269 LeakReport* report) { |
| 270 using AllocationBreakdown = LeakReport::AllocationBreakdown; |
| 271 // Copy historical allocation data into the report. |
| 272 InternalVector<AllocationBreakdown>* dest = &report->alloc_breakdown_history_; |
| 273 dest->reserve(size_breakdown_history_.size()); |
| 274 |
| 275 // Store each frame of the breakdown by size. |
| 276 for (const InternalVector<uint32_t>& breakdown : size_breakdown_history_) { |
| 277 dest->push_back(AllocationBreakdown()); |
| 278 dest->back().counts_by_size = breakdown; |
| 279 } |
| 280 |
| 281 // Store the count of all allocations with size=|size| and made from call site |
| 282 // |call_site|. |
| 283 const InternalList<RankedSet>& src = |
| 284 size_entries_[SizeToIndex(size)].call_site_breakdown_history; |
| 285 |
| 286 auto src_iter = src.begin(); |
| 287 auto dest_iter = dest->begin(); |
| 288 // The call site history and the destination container may be of different |
| 289 // sizes. Adjust their iterators so they are the same distance from the last |
| 290 // element of each container, i.e. they will point to the frames corresponding |
| 291 // to the same time. |
| 292 if (src.size() > dest->size()) |
| 293 std::advance(src_iter, src.size() - dest->size()); |
| 294 else if (dest->size() > src.size()) |
| 295 std::advance(dest_iter, dest->size() - src.size()); |
| 296 |
| 297 while (src_iter != src.end() && dest_iter != dest->end()) { |
| 298 const RankedSet& ranked_call_sites = *src_iter; |
| 299 auto find_call_site_iter = ranked_call_sites.FindCallStack(call_site); |
| 300 if (find_call_site_iter != ranked_call_sites.end()) |
| 301 dest_iter->count_for_call_stack = find_call_site_iter->count; |
| 302 ++src_iter; |
| 303 ++dest_iter; |
| 304 } |
| 305 } |
| 306 |
| 238 } // namespace leak_detector | 307 } // namespace leak_detector |
| 239 } // namespace metrics | 308 } // namespace metrics |
| OLD | NEW |