Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "components/metrics/leak_detector/leak_detector_impl.h" | 5 #include "components/metrics/leak_detector/leak_detector_impl.h" |
| 6 | 6 |
| 7 #include <math.h> | 7 #include <math.h> |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 #include <stdint.h> | 9 #include <stdint.h> |
| 10 | 10 |
| 11 #include <complex> | 11 #include <complex> |
| 12 #include <new> | 12 #include <new> |
| 13 #include <set> | 13 #include <set> |
| 14 #include <vector> | 14 #include <vector> |
| 15 | 15 |
| 16 #include "base/macros.h" | 16 #include "base/macros.h" |
| 17 #include "base/memory/scoped_ptr.h" | 17 #include "base/memory/scoped_ptr.h" |
| 18 #include "components/metrics/leak_detector/custom_allocator.h" | 18 #include "components/metrics/leak_detector/custom_allocator.h" |
| 19 #include "testing/gtest/include/gtest/gtest.h" | 19 #include "testing/gtest/include/gtest/gtest.h" |
| 20 | 20 |
| 21 namespace metrics { | 21 namespace metrics { |
| 22 namespace leak_detector { | 22 namespace leak_detector { |
| 23 | 23 |
| 24 namespace { | 24 namespace { |
| 25 | 25 |
| 26 // Makes working with complex numbers easier. | 26 // Makes working with complex numbers easier. |
| 27 using Complex = std::complex<double>; | 27 using Complex = std::complex<double>; |
| 28 | 28 |
| 29 using InternalLeakReport = LeakDetectorImpl::LeakReport; | |
| 30 using AllocationBreakdown = LeakDetectorImpl::LeakReport::AllocationBreakdown; | |
| 31 | |
| 29 // The mapping location in memory for a fictional executable. | 32 // The mapping location in memory for a fictional executable. |
| 30 const uintptr_t kMappingAddr = 0x800000; | 33 const uintptr_t kMappingAddr = 0x800000; |
| 31 const size_t kMappingSize = 0x200000; | 34 const size_t kMappingSize = 0x200000; |
| 32 | 35 |
| 33 // Some call stacks within the fictional executable. | 36 // Some call stacks within the fictional executable. |
| 34 // * - outside the mapping range, e.g. JIT code. | 37 // * - outside the mapping range, e.g. JIT code. |
| 35 const uintptr_t kRawStack0[] = { | 38 const uintptr_t kRawStack0[] = { |
| 36 0x800100, 0x900000, 0x880080, 0x810000, | 39 0x800100, 0x900000, 0x880080, 0x810000, |
| 37 }; | 40 }; |
| 38 const uintptr_t kRawStack1[] = { | 41 const uintptr_t kRawStack1[] = { |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 75 | 78 |
| 76 // The interval between consecutive analyses (LeakDetectorImpl::TestForLeaks), | 79 // The interval between consecutive analyses (LeakDetectorImpl::TestForLeaks), |
| 77 // in number of bytes allocated. e.g. if |kAllocedSizeAnalysisInterval| = 1024 | 80 // in number of bytes allocated. e.g. if |kAllocedSizeAnalysisInterval| = 1024 |
| 78 // then call TestForLeaks() every 1024 bytes of allocation that occur. | 81 // then call TestForLeaks() every 1024 bytes of allocation that occur. |
| 79 const size_t kAllocedSizeAnalysisInterval = 8192; | 82 const size_t kAllocedSizeAnalysisInterval = 8192; |
| 80 | 83 |
| 81 // Suspicion thresholds used by LeakDetectorImpl for size and call stacks. | 84 // Suspicion thresholds used by LeakDetectorImpl for size and call stacks. |
| 82 const uint32_t kSizeSuspicionThreshold = 4; | 85 const uint32_t kSizeSuspicionThreshold = 4; |
| 83 const uint32_t kCallStackSuspicionThreshold = 4; | 86 const uint32_t kCallStackSuspicionThreshold = 4; |
| 84 | 87 |
| 88 // Because it takes N+1 analyses to reach a suspicion threshold of N (the | |
| 89 // suspicion score is only calculated based on deltas from the previous | |
| 90 // analysis), the actual number of analyses it takes to generate a report for | |
| 91 // the first time is: | |
| 92 const uint32_t kMinNumAnalysesToGenerateReport = | |
| 93 kSizeSuspicionThreshold + 1 + kCallStackSuspicionThreshold + 1; | |
| 94 | |
| 85 // Returns the offset within [kMappingAddr, kMappingAddr + kMappingSize) if | 95 // Returns the offset within [kMappingAddr, kMappingAddr + kMappingSize) if |
| 86 // |addr| falls in that range. Otherwise, returns |addr|. | 96 // |addr| falls in that range. Otherwise, returns |addr|. |
| 87 uintptr_t GetOffsetInMapping(uintptr_t addr) { | 97 uintptr_t GetOffsetInMapping(uintptr_t addr) { |
| 88 if (addr >= kMappingAddr && addr < kMappingAddr + kMappingSize) | 98 if (addr >= kMappingAddr && addr < kMappingAddr + kMappingSize) |
| 89 return addr - kMappingAddr; | 99 return addr - kMappingAddr; |
| 90 return addr; | 100 return addr; |
| 91 } | 101 } |
| 92 | 102 |
| 93 // Copied from leak_detector_impl.cc. Converts a size to a size class index. | 103 // Copied from leak_detector_impl.cc. Converts a size to a size class index. |
| 94 // Any size in the range [index * 4, index * 4 + 3] falls into that size class. | 104 // Any size in the range [index * 4, index * 4 + 3] falls into that size class. |
| 95 uint32_t SizeToIndex(size_t size) { | 105 uint32_t SizeToIndex(size_t size) { |
| 96 return size / sizeof(uint32_t); | 106 return size / sizeof(uint32_t); |
| 97 } | 107 } |
| 98 | 108 |
| 109 // Returns true if the |alloc_breakdown_history_| field of the two LeakReports | |
| 110 // |a| and |b| are the same. | |
| 111 bool CompareReportAllocHistory(const InternalLeakReport& a, | |
| 112 const InternalLeakReport& b) { | |
| 113 auto alloc_breakdown_compare_func = [](AllocationBreakdown a, | |
| 114 AllocationBreakdown b) -> bool { | |
| 115 return std::equal(a.counts_by_size.begin(), a.counts_by_size.end(), | |
| 116 b.counts_by_size.begin()) && | |
| 117 a.count_for_call_stack == b.count_for_call_stack; | |
| 118 }; | |
| 119 return std::equal( | |
| 120 a.alloc_breakdown_history().begin(), a.alloc_breakdown_history().end(), | |
| 121 b.alloc_breakdown_history().begin(), alloc_breakdown_compare_func); | |
| 122 } | |
| 123 | |
| 99 } // namespace | 124 } // namespace |
| 100 | 125 |
| 101 // This test suite will test the ability of LeakDetectorImpl to catch leaks in | 126 // This test suite will test the ability of LeakDetectorImpl to catch leaks in |
| 102 // a program. Individual tests can run leaky code locally. | 127 // a program. Individual tests can run leaky code locally. |
| 103 // | 128 // |
| 104 // The leaky code must call Alloc() and Free() for heap memory management. It | 129 // The leaky code must call Alloc() and Free() for heap memory management. It |
| 105 // should not call See comments on those | 130 // should not call See comments on those |
| 106 // functions for more details. | 131 // functions for more details. |
| 107 class LeakDetectorImplTest : public ::testing::Test { | 132 class LeakDetectorImplTest : public ::testing::Test { |
| 108 public: | 133 public: |
| 109 LeakDetectorImplTest() | 134 LeakDetectorImplTest() |
| 110 : total_num_allocs_(0), | 135 : total_num_allocs_(0), |
| 111 total_num_frees_(0), | 136 total_num_frees_(0), |
| 112 total_alloced_size_(0), | 137 total_alloced_size_(0), |
| 113 next_analysis_total_alloced_size_(kAllocedSizeAnalysisInterval) {} | 138 next_analysis_total_alloced_size_(kAllocedSizeAnalysisInterval), |
| 139 num_reports_generated_(0) {} | |
| 114 | 140 |
| 115 void SetUp() override { | 141 void SetUp() override { |
| 116 CustomAllocator::Initialize(); | 142 CustomAllocator::Initialize(); |
| 117 | 143 |
| 118 detector_.reset(new LeakDetectorImpl(kMappingAddr, kMappingSize, | 144 detector_.reset(new LeakDetectorImpl(kMappingAddr, kMappingSize, |
| 119 kSizeSuspicionThreshold, | 145 kSizeSuspicionThreshold, |
| 120 kCallStackSuspicionThreshold)); | 146 kCallStackSuspicionThreshold)); |
| 121 } | 147 } |
| 122 | 148 |
| 123 void TearDown() override { | 149 void TearDown() override { |
| 124 // Free any memory that was leaked by test cases. Do not use Free() because | 150 // Free any memory that was leaked by test cases. Do not use Free() because |
| 125 // that will try to modify |alloced_ptrs_|. | 151 // that will try to modify |alloced_ptrs_|. |
| 126 for (void* ptr : alloced_ptrs_) | 152 for (void* ptr : alloced_ptrs_) |
| 127 delete[] reinterpret_cast<char*>(ptr); | 153 delete[] reinterpret_cast<char*>(ptr); |
| 128 alloced_ptrs_.clear(); | 154 alloced_ptrs_.clear(); |
| 129 | 155 |
| 130 // Must destroy all objects that use CustomAllocator before shutting down. | 156 // Must destroy all objects that use CustomAllocator before shutting down. |
| 131 detector_.reset(); | 157 detector_.reset(); |
| 132 stored_reports_.clear(); | 158 stored_reports_.clear(); |
| 133 | 159 |
| 134 EXPECT_TRUE(CustomAllocator::Shutdown()); | 160 EXPECT_TRUE(CustomAllocator::Shutdown()); |
| 135 } | 161 } |
| 136 | 162 |
| 137 protected: | 163 protected: |
| 138 using InternalLeakReport = LeakDetectorImpl::LeakReport; | |
| 139 template <typename T> | 164 template <typename T> |
| 140 using InternalVector = LeakDetectorImpl::InternalVector<T>; | 165 using InternalVector = LeakDetectorImpl::InternalVector<T>; |
| 141 using AllocationBreakdown = LeakDetectorImpl::LeakReport::AllocationBreakdown; | |
| 142 | 166 |
| 143 // Alloc and free functions that allocate and free heap memory and | 167 // Alloc and free functions that allocate and free heap memory and |
| 144 // automatically pass alloc/free info to |detector_|. They emulate the | 168 // automatically pass alloc/free info to |detector_|. They emulate the |
| 145 // alloc/free hook functions that would call into LeakDetectorImpl in | 169 // alloc/free hook functions that would call into LeakDetectorImpl in |
| 146 // real-life usage. They also keep track of individual allocations locally, so | 170 // real-life usage. They also keep track of individual allocations locally, so |
| 147 // any leaked memory could be cleaned up. | 171 // any leaked memory could be cleaned up. |
| 148 // | 172 // |
| 149 // |stack| is just a nominal call stack object to identify the call site. It | 173 // |stack| is just a nominal call stack object to identify the call site. It |
| 150 // doesn't have to contain the stack trace of the actual call stack. | 174 // doesn't have to contain the stack trace of the actual call stack. |
| 151 void* Alloc(size_t size, const TestCallStack& stack) { | 175 void* Alloc(size_t size, const TestCallStack& stack) { |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 167 stored_reports_.insert(report); | 191 stored_reports_.insert(report); |
| 168 } else { | 192 } else { |
| 169 // InternalLeakReports are uniquely identified by |alloc_size_bytes_| | 193 // InternalLeakReports are uniquely identified by |alloc_size_bytes_| |
| 170 // and |call_stack_|. See InternalLeakReport::operator<(). | 194 // and |call_stack_|. See InternalLeakReport::operator<(). |
| 171 // If a report with the same size and call stack already exists, | 195 // If a report with the same size and call stack already exists, |
| 172 // overwrite it with the new report, which has a newer history. | 196 // overwrite it with the new report, which has a newer history. |
| 173 stored_reports_.erase(iter); | 197 stored_reports_.erase(iter); |
| 174 stored_reports_.insert(report); | 198 stored_reports_.insert(report); |
| 175 } | 199 } |
| 176 } | 200 } |
| 201 num_reports_generated_ += reports.size(); | |
| 177 | 202 |
| 178 // Determine when the next leak analysis should occur. | 203 // Determine when the next leak analysis should occur. |
| 179 while (total_alloced_size_ >= next_analysis_total_alloced_size_) | 204 while (total_alloced_size_ >= next_analysis_total_alloced_size_) |
| 180 next_analysis_total_alloced_size_ += kAllocedSizeAnalysisInterval; | 205 next_analysis_total_alloced_size_ += kAllocedSizeAnalysisInterval; |
| 181 } | 206 } |
| 182 return ptr; | 207 return ptr; |
| 183 } | 208 } |
| 184 | 209 |
| 185 // See comment for Alloc(). | 210 // See comment for Alloc(). |
| 186 void Free(void* ptr) { | 211 void Free(void* ptr) { |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 219 | 244 |
| 220 // Stores all pointers to memory allocated by by Alloc() so we can manually | 245 // Stores all pointers to memory allocated by by Alloc() so we can manually |
| 221 // free the leaked pointers at the end. This also serves as redundant | 246 // free the leaked pointers at the end. This also serves as redundant |
| 222 // bookkeepping: it stores all pointers that have been allocated but not yet | 247 // bookkeepping: it stores all pointers that have been allocated but not yet |
| 223 // freed. | 248 // freed. |
| 224 std::set<void*> alloced_ptrs_; | 249 std::set<void*> alloced_ptrs_; |
| 225 | 250 |
| 226 // Store leak reports here. Use a set so duplicate reports are not stored. | 251 // Store leak reports here. Use a set so duplicate reports are not stored. |
| 227 std::set<InternalLeakReport> stored_reports_; | 252 std::set<InternalLeakReport> stored_reports_; |
| 228 | 253 |
| 254 // Keeps track of the actual number of reports (duplicate or not) that were | |
| 255 // generated by |detector_|. | |
| 256 size_t num_reports_generated_; | |
| 257 | |
| 229 private: | 258 private: |
| 230 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImplTest); | 259 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImplTest); |
| 231 }; | 260 }; |
| 232 | 261 |
| 233 void LeakDetectorImplTest::SimpleLeakyFunction(bool enable_leaks) { | 262 void LeakDetectorImplTest::SimpleLeakyFunction(bool enable_leaks) { |
| 234 std::vector<uint32_t*> ptrs(7); | 263 std::vector<uint32_t*> ptrs(7); |
| 235 | 264 |
| 236 const int kNumOuterIterations = 20; | 265 const int kNumOuterIterations = 32; |
| 237 for (int j = 0; j < kNumOuterIterations; ++j) { | 266 for (int j = 0; j < kNumOuterIterations; ++j) { |
| 238 // The inner loop allocates 256 bytes. Run it 32 times so that 8192 bytes | 267 // The inner loop allocates 256 bytes. Run it 32 times so that 8192 bytes |
| 239 // (|kAllocedSizeAnalysisInterval|) are allocated for each iteration of the | 268 // (|kAllocedSizeAnalysisInterval|) are allocated for each iteration of the |
| 240 // outer loop. | 269 // outer loop. |
| 241 const int kNumInnerIterations = 32; | 270 const int kNumInnerIterations = 32; |
| 242 static_assert(kNumInnerIterations * 256 == kAllocedSizeAnalysisInterval, | 271 static_assert(kNumInnerIterations * 256 == kAllocedSizeAnalysisInterval, |
| 243 "Inner loop iterations do not allocate the correct number of " | 272 "Inner loop iterations do not allocate the correct number of " |
| 244 "bytes."); | 273 "bytes."); |
| 245 for (int i = 0; i < kNumInnerIterations; ++i) { | 274 for (int i = 0; i < kNumInnerIterations; ++i) { |
| 246 size_t alloc_size_at_beginning = total_alloced_size_; | 275 size_t alloc_size_at_beginning = total_alloced_size_; |
| (...skipping 237 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 484 EXPECT_EQ(7U, total_num_frees_); | 513 EXPECT_EQ(7U, total_num_frees_); |
| 485 EXPECT_EQ(0U, alloced_ptrs_.size()); | 514 EXPECT_EQ(0U, alloced_ptrs_.size()); |
| 486 } | 515 } |
| 487 | 516 |
| 488 TEST_F(LeakDetectorImplTest, SimpleLeakyFunctionNoLeak) { | 517 TEST_F(LeakDetectorImplTest, SimpleLeakyFunctionNoLeak) { |
| 489 SimpleLeakyFunction(false /* enable_leaks */); | 518 SimpleLeakyFunction(false /* enable_leaks */); |
| 490 | 519 |
| 491 // SimpleLeakyFunction() should have run cleanly without leaking. | 520 // SimpleLeakyFunction() should have run cleanly without leaking. |
| 492 EXPECT_EQ(total_num_allocs_, total_num_frees_); | 521 EXPECT_EQ(total_num_allocs_, total_num_frees_); |
| 493 EXPECT_EQ(0U, alloced_ptrs_.size()); | 522 EXPECT_EQ(0U, alloced_ptrs_.size()); |
| 494 ASSERT_EQ(0U, stored_reports_.size()); | 523 EXPECT_EQ(0U, num_reports_generated_); |
| 524 EXPECT_EQ(0U, stored_reports_.size()); | |
| 495 } | 525 } |
| 496 | 526 |
| 497 TEST_F(LeakDetectorImplTest, SimpleLeakyFunctionWithLeak) { | 527 TEST_F(LeakDetectorImplTest, SimpleLeakyFunctionWithLeak) { |
| 498 SimpleLeakyFunction(true /* enable_leaks */); | 528 SimpleLeakyFunction(true /* enable_leaks */); |
| 499 | 529 |
| 500 // SimpleLeakyFunction() should generated some leak reports. | 530 // SimpleLeakyFunction() should generated some leak reports. |
| 501 EXPECT_GT(total_num_allocs_, total_num_frees_); | 531 EXPECT_GT(total_num_allocs_, total_num_frees_); |
| 502 EXPECT_GT(alloced_ptrs_.size(), 0U); | 532 EXPECT_GT(alloced_ptrs_.size(), 0U); |
| 533 EXPECT_EQ(2U, num_reports_generated_); | |
| 503 ASSERT_EQ(2U, stored_reports_.size()); | 534 ASSERT_EQ(2U, stored_reports_.size()); |
| 504 | 535 |
| 505 // The reports should be stored in order of size. | 536 // The reports should be stored in order of size. |
| 506 | 537 |
| 507 // |report1| comes from the call site marked with kStack1, with size=32. | 538 // |report1| comes from the call site marked with kStack1, with size=32. |
| 508 const InternalLeakReport& report1 = *stored_reports_.begin(); | 539 const InternalLeakReport& report1 = *stored_reports_.begin(); |
| 509 EXPECT_EQ(32U, report1.alloc_size_bytes()); | 540 EXPECT_EQ(32U, report1.alloc_size_bytes()); |
| 510 ASSERT_EQ(kStack1.depth, report1.call_stack().size()); | 541 ASSERT_EQ(kStack1.depth, report1.call_stack().size()); |
| 511 for (size_t i = 0; i < kStack1.depth; ++i) { | 542 for (size_t i = 0; i < kStack1.depth; ++i) { |
| 512 EXPECT_EQ(GetOffsetInMapping(kStack1.stack[i]), | 543 EXPECT_EQ(GetOffsetInMapping(kStack1.stack[i]), |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 523 } | 554 } |
| 524 | 555 |
| 525 // Check historical data recorded in the reports. | 556 // Check historical data recorded in the reports. |
| 526 // - Each inner loop iteration allocates a net of 1x 32 bytes and 1x 48 bytes. | 557 // - Each inner loop iteration allocates a net of 1x 32 bytes and 1x 48 bytes. |
| 527 // - Each outer loop iteration allocates a net of 32x 32 bytes and 32x 48 | 558 // - Each outer loop iteration allocates a net of 32x 32 bytes and 32x 48 |
| 528 // bytes. | 559 // bytes. |
| 529 // - However, the leak analysis happens after the allocs but before the frees | 560 // - However, the leak analysis happens after the allocs but before the frees |
| 530 // that come right after. So it should count the two extra allocs made at | 561 // that come right after. So it should count the two extra allocs made at |
| 531 // call sites |kStack3| and |kStack4|. The formula is |(i + 1) * 32 + 2|, | 562 // call sites |kStack3| and |kStack4|. The formula is |(i + 1) * 32 + 2|, |
| 532 // where |i| is the iteration index. | 563 // where |i| is the iteration index. |
| 533 // There should have been one leak analysis per outer loop iteration, for a | 564 // - It takes |kMinNumAnalysesToGenerateReport| analyses for the first report |
| 534 // total of 20 history records (|kNumOuterIterations|) per report. | 565 // to be generated. Subsequent analyises do not generate reports due to the |
| 566 // cooldown mechanism. | |
| 535 | 567 |
| 536 const auto& report1_history = report1.alloc_breakdown_history(); | 568 const auto& report1_history = report1.alloc_breakdown_history(); |
| 537 EXPECT_EQ(20U, report1_history.size()); | 569 EXPECT_EQ(kMinNumAnalysesToGenerateReport, report1_history.size()); |
| 538 | 570 |
| 539 for (size_t i = 0; i < report1_history.size(); ++i) { | 571 for (size_t i = 0; i < report1_history.size(); ++i) { |
| 540 const AllocationBreakdown& entry = report1_history[i]; | 572 const AllocationBreakdown& entry = report1_history[i]; |
| 541 | 573 |
| 542 const InternalVector<uint32_t>& counts_by_size = entry.counts_by_size; | 574 const InternalVector<uint32_t>& counts_by_size = entry.counts_by_size; |
| 543 ASSERT_GT(counts_by_size.size(), SizeToIndex(48)); | 575 ASSERT_GT(counts_by_size.size(), SizeToIndex(48)); |
| 544 | 576 |
| 545 // Check the two leaky sizes, 32 and 48. | 577 // Check the two leaky sizes, 32 and 48. |
| 546 uint32_t expected_leaky_count = (i + 1) * 32 + 2; | 578 uint32_t expected_leaky_count = (i + 1) * 32 + 2; |
| 547 EXPECT_EQ(expected_leaky_count, counts_by_size[SizeToIndex(32)]); | 579 EXPECT_EQ(expected_leaky_count, counts_by_size[SizeToIndex(32)]); |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 563 // increase by 32 each frame. See comments above. | 595 // increase by 32 each frame. See comments above. |
| 564 uint32_t expected_call_stack_count = 0; | 596 uint32_t expected_call_stack_count = 0; |
| 565 for (size_t i = kSizeSuspicionThreshold; i < report1_history.size(); ++i) { | 597 for (size_t i = kSizeSuspicionThreshold; i < report1_history.size(); ++i) { |
| 566 EXPECT_EQ(expected_call_stack_count, | 598 EXPECT_EQ(expected_call_stack_count, |
| 567 report1_history[i].count_for_call_stack); | 599 report1_history[i].count_for_call_stack); |
| 568 expected_call_stack_count += 32; | 600 expected_call_stack_count += 32; |
| 569 } | 601 } |
| 570 | 602 |
| 571 // |report2| should have the same size history and call stack history as | 603 // |report2| should have the same size history and call stack history as |
| 572 // |report1|. | 604 // |report1|. |
| 573 const auto& report2_history = report2.alloc_breakdown_history(); | 605 EXPECT_TRUE(CompareReportAllocHistory(report1, report2)); |
| 574 auto compare_history_func = | 606 } |
| 575 [](AllocationBreakdown a, AllocationBreakdown b) -> bool { | 607 |
| 576 return std::equal(a.counts_by_size.begin(), a.counts_by_size.end(), | 608 TEST_F(LeakDetectorImplTest, SimpleLeakyFunctionWithLeakThreeTimes) { |
| 577 b.counts_by_size.begin()) && | 609 // Run three iterations of the leaky function. |
| 578 a.count_for_call_stack == b.count_for_call_stack; | 610 SimpleLeakyFunction(true /* enable_leaks */); |
| 579 }; | 611 SimpleLeakyFunction(true /* enable_leaks */); |
| 580 EXPECT_TRUE(std::equal(report1_history.begin(), report1_history.end(), | 612 SimpleLeakyFunction(true /* enable_leaks */); |
| 581 report2_history.begin(), compare_history_func)); | 613 |
| 614 // SimpleLeakyFunction() should have generated three times as many leak | |
| 615 // reports, because the number of iterations is the same as the cooldown of | |
| 616 // LeakDetectorImpl. But the number of unique reports stored is still two. | |
| 617 EXPECT_EQ(6U, num_reports_generated_); | |
| 618 ASSERT_EQ(2U, stored_reports_.size()); | |
| 619 | |
| 620 // The reports should be stored in order of size. | |
| 621 | |
| 622 // |report1| comes from the call site marked with kStack1, with size=32. | |
| 623 const InternalLeakReport& report1 = *stored_reports_.begin(); | |
| 624 EXPECT_EQ(32U, report1.alloc_size_bytes()); | |
| 625 ASSERT_EQ(kStack1.depth, report1.call_stack().size()); | |
| 626 for (size_t i = 0; i < kStack1.depth; ++i) { | |
| 627 EXPECT_EQ(GetOffsetInMapping(kStack1.stack[i]), report1.call_stack()[i]) | |
| 628 << i; | |
| 629 } | |
| 630 | |
| 631 // |report2| comes from the call site marked with kStack2, with size=48. | |
| 632 const InternalLeakReport& report2 = *(++stored_reports_.begin()); | |
| 633 EXPECT_EQ(48U, report2.alloc_size_bytes()); | |
| 634 ASSERT_EQ(kStack2.depth, report2.call_stack().size()); | |
| 635 for (size_t i = 0; i < kStack2.depth; ++i) { | |
| 636 EXPECT_EQ(GetOffsetInMapping(kStack2.stack[i]), report2.call_stack()[i]) | |
| 637 << i; | |
| 638 } | |
| 639 | |
| 640 const auto& report1_history = report1.alloc_breakdown_history(); | |
| 641 EXPECT_EQ(32U, report1_history.size()); | |
| 642 | |
| 643 for (size_t i = 1; i < report1_history.size(); ++i) { | |
| 644 const InternalVector<uint32_t>& counts_by_size = | |
| 645 report1_history[i].counts_by_size; | |
| 646 const InternalVector<uint32_t>& prev_counts_by_size = | |
| 647 report1_history[i - 1].counts_by_size; | |
| 648 ASSERT_GT(counts_by_size.size(), SizeToIndex(48)); | |
| 649 | |
| 650 // Check the two leaky sizes, 32 and 48. At this point, the exact counts | |
| 651 // could be computed but the computations are too complex for a unit test. | |
| 652 // Instead, check that the counts increase by 32 from the previous count. | |
| 653 // Same goes for checking call site counts later. | |
| 654 EXPECT_GT(counts_by_size[SizeToIndex(32)], 0U); | |
| 655 EXPECT_GT(counts_by_size[SizeToIndex(48)], 0U); | |
| 656 EXPECT_EQ(prev_counts_by_size[SizeToIndex(32)] + 32, | |
| 657 counts_by_size[SizeToIndex(32)]); | |
| 658 EXPECT_EQ(prev_counts_by_size[SizeToIndex(48)] + 32, | |
| 659 counts_by_size[SizeToIndex(48)]); | |
| 660 | |
| 661 // Not related to the leaks, but there should be a dangling 16-byte | |
| 662 // allocation during each leak analysis, because it hasn't yet been freed. | |
| 663 EXPECT_EQ(1U, counts_by_size[SizeToIndex(16)]); | |
| 664 } | |
| 665 | |
| 666 // Check call site count over time. | |
| 667 ASSERT_LT(kSizeSuspicionThreshold, report1_history.size()); | |
| 668 // Sufficient time has passed since the first report was generated. The entire | |
| 669 // alloc history should contain call site counts. | |
| 670 for (size_t i = 1; i < report1_history.size(); ++i) { | |
| 671 EXPECT_GT(report1_history[i].count_for_call_stack, 0U); | |
| 672 EXPECT_EQ(report1_history[i - 1].count_for_call_stack + 32, | |
| 673 report1_history[i].count_for_call_stack); | |
| 674 } | |
| 675 | |
| 676 // |report2| should have the same size history and call stack history as | |
| 677 // |report1|. | |
| 678 EXPECT_TRUE(CompareReportAllocHistory(report1, report2)); | |
| 582 } | 679 } |
| 583 | 680 |
| 584 TEST_F(LeakDetectorImplTest, JuliaSetNoLeak) { | 681 TEST_F(LeakDetectorImplTest, JuliaSetNoLeak) { |
| 585 JuliaSet(false /* enable_leaks */); | 682 JuliaSet(false /* enable_leaks */); |
| 586 | 683 |
| 587 // JuliaSet() should have run cleanly without leaking. | 684 // JuliaSet() should have run cleanly without leaking. |
| 588 EXPECT_EQ(total_num_allocs_, total_num_frees_); | 685 EXPECT_EQ(total_num_allocs_, total_num_frees_); |
| 589 EXPECT_EQ(0U, alloced_ptrs_.size()); | 686 EXPECT_EQ(0U, alloced_ptrs_.size()); |
| 687 EXPECT_EQ(0U, num_reports_generated_); | |
| 590 ASSERT_EQ(0U, stored_reports_.size()); | 688 ASSERT_EQ(0U, stored_reports_.size()); |
| 591 } | 689 } |
| 592 | 690 |
| 593 TEST_F(LeakDetectorImplTest, JuliaSetWithLeak) { | 691 TEST_F(LeakDetectorImplTest, JuliaSetWithLeak) { |
| 594 JuliaSet(true /* enable_leaks */); | 692 JuliaSet(true /* enable_leaks */); |
| 595 | 693 |
| 596 // JuliaSet() should have leaked some memory from two call sites. | 694 // JuliaSet() should have leaked some memory from two call sites. |
| 597 EXPECT_GT(total_num_allocs_, total_num_frees_); | 695 EXPECT_GT(total_num_allocs_, total_num_frees_); |
| 598 EXPECT_GT(alloced_ptrs_.size(), 0U); | 696 EXPECT_GT(alloced_ptrs_.size(), 0U); |
| 697 EXPECT_GT(num_reports_generated_, 0U); | |
| 599 | 698 |
| 600 // There should be one unique leak report generated for each leaky call site. | 699 // There should be one unique leak report generated for each leaky call site. |
| 601 ASSERT_EQ(2U, stored_reports_.size()); | 700 ASSERT_EQ(2U, stored_reports_.size()); |
| 602 | 701 |
| 603 // The reports should be stored in order of size. | 702 // The reports should be stored in order of size. |
| 604 | 703 |
| 605 // |report1| comes from the call site in JuliaSet() corresponding to | 704 // |report1| comes from the call site in JuliaSet() corresponding to |
| 606 // |kStack3|. | 705 // |kStack3|. |
| 607 const InternalLeakReport& report1 = *stored_reports_.begin(); | 706 const InternalLeakReport& report1 = *stored_reports_.begin(); |
| 608 EXPECT_EQ(sizeof(Complex) + 40, report1.alloc_size_bytes()); | 707 EXPECT_EQ(sizeof(Complex) + 40, report1.alloc_size_bytes()); |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 637 uint32_t size_0_index = SizeToIndex(sizeof(Complex) + 24); | 736 uint32_t size_0_index = SizeToIndex(sizeof(Complex) + 24); |
| 638 uint32_t size_1_index = SizeToIndex(sizeof(Complex) + 40); | 737 uint32_t size_1_index = SizeToIndex(sizeof(Complex) + 40); |
| 639 uint32_t size_2_index = SizeToIndex(sizeof(Complex) + 52); | 738 uint32_t size_2_index = SizeToIndex(sizeof(Complex) + 52); |
| 640 ASSERT_LT(size_0_index, counts_by_size.size()); | 739 ASSERT_LT(size_0_index, counts_by_size.size()); |
| 641 ASSERT_LT(size_1_index, counts_by_size.size()); | 740 ASSERT_LT(size_1_index, counts_by_size.size()); |
| 642 ASSERT_LT(size_2_index, counts_by_size.size()); | 741 ASSERT_LT(size_2_index, counts_by_size.size()); |
| 643 | 742 |
| 644 EXPECT_GT(counts_by_size[size_1_index], counts_by_size[size_0_index] * 10); | 743 EXPECT_GT(counts_by_size[size_1_index], counts_by_size[size_0_index] * 10); |
| 645 EXPECT_GT(counts_by_size[size_2_index], counts_by_size[size_0_index] * 10); | 744 EXPECT_GT(counts_by_size[size_2_index], counts_by_size[size_0_index] * 10); |
| 646 | 745 |
| 647 // |report2| should have the same size history as |report1|, but not the same | 746 // |report1| and |report2| do not necessarily have the same allocation history |
| 648 // call stack history. | 747 // due to the different rates at which they were generated. |
|
Simon Que
2016/04/21 00:33:08
I forgot to remove this in the previous patch set.
| |
| 649 const auto& report2_history = report2.alloc_breakdown_history(); | |
| 650 auto compare_size_history_func = | |
| 651 [](AllocationBreakdown a, AllocationBreakdown b) -> bool { | |
| 652 return std::equal(a.counts_by_size.begin(), a.counts_by_size.end(), | |
| 653 b.counts_by_size.begin()); | |
| 654 }; | |
| 655 EXPECT_TRUE(std::equal(report1_history.begin(), report1_history.end(), | |
| 656 report2_history.begin(), compare_size_history_func)); | |
| 657 } | 748 } |
| 658 | 749 |
| 659 } // namespace leak_detector | 750 } // namespace leak_detector |
| 660 } // namespace metrics | 751 } // namespace metrics |
| OLD | NEW |