| OLD | NEW |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. | 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "components/metrics/leak_detector/leak_detector_impl.h" | 5 #include "components/metrics/leak_detector/leak_detector_impl.h" |
| 6 | 6 |
| 7 #include <math.h> | 7 #include <math.h> |
| 8 #include <stddef.h> | 8 #include <stddef.h> |
| 9 #include <stdint.h> | 9 #include <stdint.h> |
| 10 | 10 |
| 11 #include <complex> | 11 #include <complex> |
| 12 #include <new> | 12 #include <new> |
| 13 #include <set> | 13 #include <set> |
| 14 #include <vector> | 14 #include <vector> |
| 15 | 15 |
| 16 #include "base/macros.h" | 16 #include "base/macros.h" |
| 17 #include "base/memory/scoped_ptr.h" | 17 #include "base/memory/scoped_ptr.h" |
| 18 #include "components/metrics/leak_detector/custom_allocator.h" | 18 #include "components/metrics/leak_detector/custom_allocator.h" |
| 19 #include "testing/gtest/include/gtest/gtest.h" | 19 #include "testing/gtest/include/gtest/gtest.h" |
| 20 | 20 |
| 21 namespace metrics { | 21 namespace metrics { |
| 22 namespace leak_detector { | 22 namespace leak_detector { |
| 23 | 23 |
| 24 using InternalLeakReport = LeakDetectorImpl::LeakReport; | |
| 25 template <typename T> | |
| 26 using InternalVector = LeakDetectorImpl::InternalVector<T>; | |
| 27 | |
| 28 namespace { | 24 namespace { |
| 29 | 25 |
| 30 // Makes working with complex numbers easier. | 26 // Makes working with complex numbers easier. |
| 31 using Complex = std::complex<double>; | 27 using Complex = std::complex<double>; |
| 32 | 28 |
| 33 // The mapping location in memory for a fictional executable. | 29 // The mapping location in memory for a fictional executable. |
| 34 const uintptr_t kMappingAddr = 0x800000; | 30 const uintptr_t kMappingAddr = 0x800000; |
| 35 const size_t kMappingSize = 0x200000; | 31 const size_t kMappingSize = 0x200000; |
| 36 | 32 |
| 37 // Some call stacks within the fictional executable. | 33 // Some call stacks within the fictional executable. |
| (...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 132 alloced_ptrs_.clear(); | 128 alloced_ptrs_.clear(); |
| 133 | 129 |
| 134 // Must destroy all objects that use CustomAllocator before shutting down. | 130 // Must destroy all objects that use CustomAllocator before shutting down. |
| 135 detector_.reset(); | 131 detector_.reset(); |
| 136 stored_reports_.clear(); | 132 stored_reports_.clear(); |
| 137 | 133 |
| 138 EXPECT_TRUE(CustomAllocator::Shutdown()); | 134 EXPECT_TRUE(CustomAllocator::Shutdown()); |
| 139 } | 135 } |
| 140 | 136 |
| 141 protected: | 137 protected: |
| 138 using InternalLeakReport = LeakDetectorImpl::LeakReport; |
| 139 template <typename T> |
| 140 using InternalVector = LeakDetectorImpl::InternalVector<T>; |
| 141 using AllocationBreakdown = LeakDetectorImpl::LeakReport::AllocationBreakdown; |
| 142 |
| 142 // Alloc and free functions that allocate and free heap memory and | 143 // Alloc and free functions that allocate and free heap memory and |
| 143 // automatically pass alloc/free info to |detector_|. They emulate the | 144 // automatically pass alloc/free info to |detector_|. They emulate the |
| 144 // alloc/free hook functions that would call into LeakDetectorImpl in | 145 // alloc/free hook functions that would call into LeakDetectorImpl in |
| 145 // real-life usage. They also keep track of individual allocations locally, so | 146 // real-life usage. They also keep track of individual allocations locally, so |
| 146 // any leaked memory could be cleaned up. | 147 // any leaked memory could be cleaned up. |
| 147 // | 148 // |
| 148 // |stack| is just a nominal call stack object to identify the call site. It | 149 // |stack| is just a nominal call stack object to identify the call site. It |
| 149 // doesn't have to contain the stack trace of the actual call stack. | 150 // doesn't have to contain the stack trace of the actual call stack. |
| 150 void* Alloc(size_t size, const TestCallStack& stack) { | 151 void* Alloc(size_t size, const TestCallStack& stack) { |
| 151 void* ptr = new char[size]; | 152 void* ptr = new char[size]; |
| (...skipping 373 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 525 // - Each inner loop iteration allocates a net of 1x 32 bytes and 1x 48 bytes. | 526 // - Each inner loop iteration allocates a net of 1x 32 bytes and 1x 48 bytes. |
| 526 // - Each outer loop iteration allocates a net of 32x 32 bytes and 32x 48 | 527 // - Each outer loop iteration allocates a net of 32x 32 bytes and 32x 48 |
| 527 // bytes. | 528 // bytes. |
| 528 // - However, the leak analysis happens after the allocs but before the frees | 529 // - However, the leak analysis happens after the allocs but before the frees |
| 529 // that come right after. So it should count the two extra allocs made at | 530 // that come right after. So it should count the two extra allocs made at |
| 530 // call sites |kStack3| and |kStack4|. The formula is |(i + 1) * 32 + 2|, | 531 // call sites |kStack3| and |kStack4|. The formula is |(i + 1) * 32 + 2|, |
| 531 // where |i| is the iteration index. | 532 // where |i| is the iteration index. |
| 532 // There should have been one leak analysis per outer loop iteration, for a | 533 // There should have been one leak analysis per outer loop iteration, for a |
| 533 // total of 20 history records (|kNumOuterIterations|) per report. | 534 // total of 20 history records (|kNumOuterIterations|) per report. |
| 534 | 535 |
| 535 const auto& report1_size_history = report1.size_breakdown_history(); | 536 const auto& report1_history = report1.alloc_breakdown_history(); |
| 536 EXPECT_EQ(20U, report1_size_history.size()); | 537 EXPECT_EQ(20U, report1_history.size()); |
| 537 | 538 |
| 538 size_t index = 0; | 539 for (size_t i = 0; i < report1_history.size(); ++i) { |
| 539 for (const InternalVector<uint32_t>& entry : report1_size_history) { | 540 const AllocationBreakdown& entry = report1_history[i]; |
| 540 ASSERT_GT(entry.size(), SizeToIndex(48)); | 541 |
| 542 const InternalVector<uint32_t>& counts_by_size = entry.counts_by_size; |
| 543 ASSERT_GT(counts_by_size.size(), SizeToIndex(48)); |
| 541 | 544 |
| 542 // Check the two leaky sizes, 32 and 48. | 545 // Check the two leaky sizes, 32 and 48. |
| 543 EXPECT_EQ((index + 1) * 32 + 2, entry[SizeToIndex(32)]); | 546 uint32_t expected_leaky_count = (i + 1) * 32 + 2; |
| 544 EXPECT_EQ((index + 1) * 32 + 2, entry[SizeToIndex(48)]); | 547 EXPECT_EQ(expected_leaky_count, counts_by_size[SizeToIndex(32)]); |
| 548 EXPECT_EQ(expected_leaky_count, counts_by_size[SizeToIndex(48)]); |
| 545 | 549 |
| 546 // Not related to the leaks, but there should be a dangling 16-byte | 550 // Not related to the leaks, but there should be a dangling 16-byte |
| 547 // allocation during each leak analysis, because it hasn't yet been freed. | 551 // allocation during each leak analysis, because it hasn't yet been freed. |
| 548 EXPECT_EQ(1U, entry[SizeToIndex(16)]); | 552 EXPECT_EQ(1U, counts_by_size[SizeToIndex(16)]); |
| 549 ++index; | |
| 550 } | 553 } |
| 551 | 554 |
| 552 // |report2| should have the same size history as |report1|. | 555 // Check call site count over time. |
| 553 const auto& report2_size_history = report2.size_breakdown_history(); | 556 ASSERT_LT(kSizeSuspicionThreshold, report1_history.size()); |
| 554 EXPECT_TRUE(std::equal(report1_size_history.begin(), | 557 // Initially, there has been no call site tracking. |
| 555 report1_size_history.end(), | 558 for (size_t i = 0; i < kSizeSuspicionThreshold; ++i) |
| 556 report2_size_history.begin())); | 559 EXPECT_EQ(0U, report1_history[i].count_for_call_stack); |
| 560 |
| 561 // Once |kSizeSuspicionThreshold| has been reached and call site tracking has |
| 562 // begun, the number of allocations for the suspected call site should |
| 563 // increase by 32 each frame. See comments above. |
| 564 uint32_t expected_call_stack_count = 0; |
| 565 for (size_t i = kSizeSuspicionThreshold; i < report1_history.size(); ++i) { |
| 566 EXPECT_EQ(expected_call_stack_count, |
| 567 report1_history[i].count_for_call_stack); |
| 568 expected_call_stack_count += 32; |
| 569 } |
| 570 |
| 571 // |report2| should have the same size history and call stack history as |
| 572 // |report1|. |
| 573 const auto& report2_history = report2.alloc_breakdown_history(); |
| 574 auto compare_history_func = |
| 575 [](AllocationBreakdown a, AllocationBreakdown b) -> bool { |
| 576 return std::equal(a.counts_by_size.begin(), a.counts_by_size.end(), |
| 577 b.counts_by_size.begin()) && |
| 578 a.count_for_call_stack == b.count_for_call_stack; |
| 579 }; |
| 580 EXPECT_TRUE(std::equal(report1_history.begin(), report1_history.end(), |
| 581 report2_history.begin(), compare_history_func)); |
| 557 } | 582 } |
| 558 | 583 |
| 559 TEST_F(LeakDetectorImplTest, JuliaSetNoLeak) { | 584 TEST_F(LeakDetectorImplTest, JuliaSetNoLeak) { |
| 560 JuliaSet(false /* enable_leaks */); | 585 JuliaSet(false /* enable_leaks */); |
| 561 | 586 |
| 562 // JuliaSet() should have run cleanly without leaking. | 587 // JuliaSet() should have run cleanly without leaking. |
| 563 EXPECT_EQ(total_num_allocs_, total_num_frees_); | 588 EXPECT_EQ(total_num_allocs_, total_num_frees_); |
| 564 EXPECT_EQ(0U, alloced_ptrs_.size()); | 589 EXPECT_EQ(0U, alloced_ptrs_.size()); |
| 565 ASSERT_EQ(0U, stored_reports_.size()); | 590 ASSERT_EQ(0U, stored_reports_.size()); |
| 566 } | 591 } |
| (...skipping 24 matching lines...) Expand all Loading... |
| 591 // |kStack4|. | 616 // |kStack4|. |
| 592 const InternalLeakReport& report2 = *(++stored_reports_.begin()); | 617 const InternalLeakReport& report2 = *(++stored_reports_.begin()); |
| 593 EXPECT_EQ(sizeof(Complex) + 52, report2.alloc_size_bytes()); | 618 EXPECT_EQ(sizeof(Complex) + 52, report2.alloc_size_bytes()); |
| 594 ASSERT_EQ(kStack4.depth, report2.call_stack().size()); | 619 ASSERT_EQ(kStack4.depth, report2.call_stack().size()); |
| 595 for (size_t i = 0; i < kStack4.depth; ++i) { | 620 for (size_t i = 0; i < kStack4.depth; ++i) { |
| 596 EXPECT_EQ(GetOffsetInMapping(kStack4.stack[i]), | 621 EXPECT_EQ(GetOffsetInMapping(kStack4.stack[i]), |
| 597 report2.call_stack()[i]) << i; | 622 report2.call_stack()[i]) << i; |
| 598 } | 623 } |
| 599 | 624 |
| 600 // Check |report1|'s historical data. | 625 // Check |report1|'s historical data. |
| 601 const auto& report1_size_history = report1.size_breakdown_history(); | 626 const auto& report1_history = report1.alloc_breakdown_history(); |
| 602 // Computing the exact number of leak analyses is not trivial, but we know it | 627 // Computing the exact number of leak analyses is not trivial, but we know it |
| 603 // must be at least |kSizeSuspicionThreshold + kCallStackSuspicionThreshold| | 628 // must be at least |kSizeSuspicionThreshold + kCallStackSuspicionThreshold| |
| 604 // in order to have generated a report. | 629 // in order to have generated a report. |
| 605 EXPECT_GT(report1_size_history.size(), | 630 EXPECT_GT(report1_history.size(), |
| 606 kSizeSuspicionThreshold + kCallStackSuspicionThreshold); | 631 kSizeSuspicionThreshold + kCallStackSuspicionThreshold); |
| 607 | 632 |
| 608 // Make sure that the final allocation counts for the leaky sizes are larger | 633 // Make sure that the final allocation counts for the leaky sizes are larger |
| 609 // than that of the non-leaky size by at least an order of magnitude. | 634 // than that of the non-leaky size by at least an order of magnitude. |
| 610 const InternalVector<uint32_t>& final_entry = *report1_size_history.rbegin(); | 635 const AllocationBreakdown& final_entry = *report1_history.rbegin(); |
| 636 const InternalVector<uint32_t>& counts_by_size = final_entry.counts_by_size; |
| 611 uint32_t size_0_index = SizeToIndex(sizeof(Complex) + 24); | 637 uint32_t size_0_index = SizeToIndex(sizeof(Complex) + 24); |
| 612 uint32_t size_1_index = SizeToIndex(sizeof(Complex) + 40); | 638 uint32_t size_1_index = SizeToIndex(sizeof(Complex) + 40); |
| 613 uint32_t size_2_index = SizeToIndex(sizeof(Complex) + 52); | 639 uint32_t size_2_index = SizeToIndex(sizeof(Complex) + 52); |
| 614 ASSERT_LT(size_0_index, final_entry.size()); | 640 ASSERT_LT(size_0_index, counts_by_size.size()); |
| 615 ASSERT_LT(size_1_index, final_entry.size()); | 641 ASSERT_LT(size_1_index, counts_by_size.size()); |
| 616 ASSERT_LT(size_2_index, final_entry.size()); | 642 ASSERT_LT(size_2_index, counts_by_size.size()); |
| 617 | 643 |
| 618 EXPECT_GT(final_entry[size_1_index], final_entry[size_0_index] * 10); | 644 EXPECT_GT(counts_by_size[size_1_index], counts_by_size[size_0_index] * 10); |
| 619 EXPECT_GT(final_entry[size_2_index], final_entry[size_0_index] * 10); | 645 EXPECT_GT(counts_by_size[size_2_index], counts_by_size[size_0_index] * 10); |
| 620 | 646 |
| 621 // |report2| should have the same size history as |report1|. | 647 // |report2| should have the same size history as |report1|, but not the same |
| 622 const auto& report2_size_history = report2.size_breakdown_history(); | 648 // call stack history. |
| 623 EXPECT_TRUE(std::equal(report1_size_history.begin(), | 649 const auto& report2_history = report2.alloc_breakdown_history(); |
| 624 report1_size_history.end(), | 650 auto compare_size_history_func = |
| 625 report2_size_history.begin())); | 651 [](AllocationBreakdown a, AllocationBreakdown b) -> bool { |
| 652 return std::equal(a.counts_by_size.begin(), a.counts_by_size.end(), |
| 653 b.counts_by_size.begin()); |
| 654 }; |
| 655 EXPECT_TRUE(std::equal(report1_history.begin(), report1_history.end(), |
| 656 report2_history.begin(), compare_size_history_func)); |
| 626 } | 657 } |
| 627 | 658 |
| 628 } // namespace leak_detector | 659 } // namespace leak_detector |
| 629 } // namespace metrics | 660 } // namespace metrics |
| OLD | NEW |