OLD | NEW |
(Empty) | |
| 1 // Copyright 2015 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "components/metrics/leak_detector/leak_detector_impl.h" |
| 6 |
| 7 #include <math.h> |
| 8 #include <stdint.h> |
| 9 |
| 10 #include <complex> |
| 11 #include <new> |
| 12 #include <set> |
| 13 #include <vector> |
| 14 |
| 15 #include "base/macros.h" |
| 16 #include "base/memory/scoped_ptr.h" |
| 17 #include "components/metrics/leak_detector/custom_allocator.h" |
| 18 #include "testing/gtest/include/gtest/gtest.h" |
| 19 |
| 20 namespace metrics { |
| 21 namespace leak_detector { |
| 22 |
| 23 using InternalLeakReport = LeakDetectorImpl::LeakReport; |
| 24 |
| 25 namespace { |
| 26 |
| 27 // Makes working with complex numbers easier. |
| 28 using Complex = std::complex<double>; |
| 29 |
| 30 // The mapping location in memory for a fictional executable. |
| 31 const uintptr_t kMappingAddr = 0x800000; |
| 32 const size_t kMappingSize = 0x200000; |
| 33 |
| 34 // Some call stacks within the fictional executable. |
| 35 // * - outside the mapping range, e.g. JIT code. |
| 36 const uintptr_t kRawStack0[] = { |
| 37 0x800100, 0x900000, 0x880080, 0x810000, |
| 38 }; |
| 39 const uintptr_t kRawStack1[] = { |
| 40 0x940000, 0x980000, |
| 41 0xdeadbeef, // * |
| 42 0x9a0000, |
| 43 }; |
| 44 const uintptr_t kRawStack2[] = { |
| 45 0x8f0d00, 0x803abc, 0x9100a0, |
| 46 }; |
| 47 const uintptr_t kRawStack3[] = { |
| 48 0x90fcde, |
| 49 0x900df00d, // * |
| 50 0x801000, 0x880088, |
| 51 0xdeadcafe, // * |
| 52 0x9f0000, 0x8700a0, 0x96037c, |
| 53 }; |
| 54 const uintptr_t kRawStack4[] = { |
| 55 0x8c0000, 0x85d00d, 0x921337, |
| 56 0x780000, // * |
| 57 }; |
| 58 const uintptr_t kRawStack5[] = { |
| 59 0x990000, 0x888888, 0x830ac0, 0x8e0000, |
| 60 0xc00000, // * |
| 61 }; |
| 62 |
| 63 // This struct makes it easier to pass call stack info to |
| 64 // LeakDetectorImplTest::Alloc(). |
| 65 struct TestCallStack { |
| 66 const uintptr_t* stack; // A reference to the original stack data. |
| 67 size_t depth; |
| 68 }; |
| 69 |
| 70 const TestCallStack kStack0 = {kRawStack0, arraysize(kRawStack0)}; |
| 71 const TestCallStack kStack1 = {kRawStack1, arraysize(kRawStack1)}; |
| 72 const TestCallStack kStack2 = {kRawStack2, arraysize(kRawStack2)}; |
| 73 const TestCallStack kStack3 = {kRawStack3, arraysize(kRawStack3)}; |
| 74 const TestCallStack kStack4 = {kRawStack4, arraysize(kRawStack4)}; |
| 75 const TestCallStack kStack5 = {kRawStack5, arraysize(kRawStack5)}; |
| 76 |
| 77 // The interval between consecutive analyses (LeakDetectorImpl::TestForLeaks), |
| 78 // in number of bytes allocated. e.g. if |kAllocedSizeAnalysisInterval| = 1024 |
| 79 // then call TestForLeaks() every 1024 bytes of allocation that occur. |
| 80 static const size_t kAllocedSizeAnalysisInterval = 8192; |
| 81 |
| 82 } // namespace |
| 83 |
| 84 // This test suite will test the ability of LeakDetectorImpl to catch leaks in |
| 85 // a program. Individual tests can run leaky code locally. |
| 86 // |
| 87 // The leaky code must call Alloc() and Free() for heap memory management. It |
| 88 // should not call See comments on those |
| 89 // functions for more details. |
| 90 class LeakDetectorImplTest : public ::testing::Test { |
| 91 public: |
| 92 LeakDetectorImplTest() |
| 93 : total_num_allocs_(0), |
| 94 total_num_frees_(0), |
| 95 total_alloced_size_(0), |
| 96 next_analysis_total_alloced_size_(kAllocedSizeAnalysisInterval) {} |
| 97 |
| 98 void SetUp() override { |
| 99 CustomAllocator::Initialize(); |
| 100 |
| 101 const int kSizeSuspicionThreshold = 4; |
| 102 const int kCallStackSuspicionThreshold = 4; |
| 103 detector_.reset(new LeakDetectorImpl(kMappingAddr, kMappingSize, |
| 104 kSizeSuspicionThreshold, |
| 105 kCallStackSuspicionThreshold)); |
| 106 } |
| 107 |
| 108 void TearDown() override { |
| 109 // Free any memory that was leaked by test cases. Do not use Free() because |
| 110 // that will try to modify |alloced_ptrs_|. |
| 111 for (void* ptr : alloced_ptrs_) |
| 112 delete[] reinterpret_cast<char*>(ptr); |
| 113 alloced_ptrs_.clear(); |
| 114 |
| 115 // Must destroy all objects that use CustomAllocator before shutting down. |
| 116 detector_.reset(); |
| 117 stored_reports_.clear(); |
| 118 |
| 119 EXPECT_TRUE(CustomAllocator::Shutdown()); |
| 120 } |
| 121 |
| 122 protected: |
| 123 // Alloc and free functions that allocate and free heap memory and |
| 124 // automatically pass alloc/free info to |detector_|. They emulate the |
| 125 // alloc/free hook functions that would call into LeakDetectorImpl in |
| 126 // real-life usage. They also keep track of individual allocations locally, so |
| 127 // any leaked memory could be cleaned up. |
| 128 // |
| 129 // |stack| is just a nominal call stack object to identify the call site. It |
| 130 // doesn't have to contain the stack trace of the actual call stack. |
| 131 void* Alloc(size_t size, const TestCallStack& stack) { |
| 132 void* ptr = new char[size]; |
| 133 detector_->RecordAlloc(ptr, size, stack.depth, |
| 134 reinterpret_cast<const void* const*>(stack.stack)); |
| 135 |
| 136 EXPECT_TRUE(alloced_ptrs_.find(ptr) == alloced_ptrs_.end()); |
| 137 alloced_ptrs_.insert(ptr); |
| 138 |
| 139 ++total_num_allocs_; |
| 140 total_alloced_size_ += size; |
| 141 if (total_alloced_size_ >= next_analysis_total_alloced_size_) { |
| 142 LeakDetectorImpl::InternalVector<InternalLeakReport> reports; |
| 143 detector_->TestForLeaks(&reports); |
| 144 for (const InternalLeakReport& report : reports) |
| 145 stored_reports_.insert(report); |
| 146 |
| 147 // Determine when the next leak analysis should occur. |
| 148 while (total_alloced_size_ >= next_analysis_total_alloced_size_) |
| 149 next_analysis_total_alloced_size_ += kAllocedSizeAnalysisInterval; |
| 150 } |
| 151 return ptr; |
| 152 } |
| 153 |
| 154 // See comment for Alloc(). |
| 155 void Free(void* ptr) { |
| 156 auto find_ptr_iter = alloced_ptrs_.find(ptr); |
| 157 EXPECT_FALSE(find_ptr_iter == alloced_ptrs_.end()); |
| 158 if (find_ptr_iter == alloced_ptrs_.end()) |
| 159 return; |
| 160 alloced_ptrs_.erase(find_ptr_iter); |
| 161 ++total_num_frees_; |
| 162 |
| 163 detector_->RecordFree(ptr); |
| 164 |
| 165 delete[] reinterpret_cast<char*>(ptr); |
| 166 } |
| 167 |
| 168 // TEST CASE: Julia set fractal computation. Pass in enable_leaks=true to |
| 169 // trigger some memory leaks. |
| 170 void JuliaSet(bool enable_leaks); |
| 171 |
| 172 // Instance of the class being tested. |
| 173 scoped_ptr<LeakDetectorImpl> detector_; |
| 174 |
| 175 // Number of pointers allocated and freed so far. |
| 176 size_t total_num_allocs_; |
| 177 size_t total_num_frees_; |
| 178 |
| 179 // Keeps count of total size allocated by Alloc(). |
| 180 size_t total_alloced_size_; |
| 181 |
| 182 // The cumulative allocation size at which to trigger the TestForLeaks() call. |
| 183 size_t next_analysis_total_alloced_size_; |
| 184 |
| 185 // Stores all pointers to memory allocated by by Alloc() so we can manually |
| 186 // free the leaked pointers at the end. This also serves as redundant |
| 187 // bookkeepping: it stores all pointers that have been allocated but not yet |
| 188 // freed. |
| 189 std::set<void*> alloced_ptrs_; |
| 190 |
| 191 // Store leak reports here. Use a set so duplicate reports are not stored. |
| 192 std::set<InternalLeakReport> stored_reports_; |
| 193 |
| 194 private: |
| 195 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImplTest); |
| 196 }; |
| 197 |
| 198 void LeakDetectorImplTest::JuliaSet(bool enable_leaks) { |
| 199 // The center region of the complex plane that is the basis for our Julia set |
| 200 // computations is a circle of radius kRadius. |
| 201 constexpr double kRadius = 2; |
| 202 |
| 203 // To track points in the complex plane, we will use a rectangular grid in the |
| 204 // range defined by [-kRadius, kRadius] along both axes. |
| 205 constexpr double kRangeMin = -kRadius; |
| 206 constexpr double kRangeMax = kRadius; |
| 207 |
| 208 // Divide each axis into intervals, each of which is associated with a point |
| 209 // on that axis at its center. |
| 210 constexpr double kIntervalInverse = 64; |
| 211 constexpr double kInterval = 1.0 / kIntervalInverse; |
| 212 constexpr int kNumPoints = (kRangeMax - kRangeMin) / kInterval + 1; |
| 213 |
| 214 // Contains some useful functions for converting between points on the complex |
| 215 // plane and in a gridlike data structure. |
| 216 struct ComplexPlane { |
| 217 static int GetXGridIndex(const Complex& value) { |
| 218 return (value.real() + kInterval / 2 - kRangeMin) / kInterval; |
| 219 } |
| 220 static int GetYGridIndex(const Complex& value) { |
| 221 return (value.imag() + kInterval / 2 - kRangeMin) / kInterval; |
| 222 } |
| 223 static int GetArrayIndex(const Complex& value) { |
| 224 return GetXGridIndex(value) + GetYGridIndex(value) * kNumPoints; |
| 225 } |
| 226 static Complex GetComplexForGridPoint(size_t x, size_t y) { |
| 227 return Complex(kRangeMin + x * kInterval, kRangeMin + y * kInterval); |
| 228 } |
| 229 }; |
| 230 |
| 231 // Make sure the choice of interval doesn't result in any loss of precision. |
| 232 ASSERT_EQ(1.0, kInterval * kIntervalInverse); |
| 233 |
| 234 // Create a grid for part of the complex plane, with each axis within the |
| 235 // range [kRangeMin, kRangeMax]. |
| 236 constexpr size_t width = kNumPoints; |
| 237 constexpr size_t height = kNumPoints; |
| 238 std::vector<Complex*> grid(width * height); |
| 239 |
| 240 // Initialize an object for each point within the inner circle |z| < kRadius. |
| 241 for (size_t i = 0; i < width; ++i) { |
| 242 for (size_t j = 0; j < height; ++j) { |
| 243 Complex point = ComplexPlane::GetComplexForGridPoint(i, j); |
| 244 // Do not store any values outside the inner circle. |
| 245 if (abs(point) <= kRadius) { |
| 246 grid[i + j * width] = |
| 247 new (Alloc(sizeof(Complex), kStack0)) Complex(point); |
| 248 } |
| 249 } |
| 250 } |
| 251 EXPECT_LE(alloced_ptrs_.size(), width * height); |
| 252 |
| 253 // Create a new grid for the result of the transformation. |
| 254 std::vector<Complex*> next_grid(width * height, nullptr); |
| 255 |
| 256 const int kNumIterations = 20; |
| 257 for (int n = 0; n < kNumIterations; ++n) { |
| 258 for (int i = 0; i < kNumPoints; ++i) { |
| 259 for (int j = 0; j < kNumPoints; ++j) { |
| 260 if (!grid[i + j * width]) |
| 261 continue; |
| 262 |
| 263 // NOTE: The below code is NOT an efficient way to compute a Julia set. |
| 264 // This is only to test the leak detector with some nontrivial code. |
| 265 |
| 266 // A simple polynomial function for generating Julia sets is: |
| 267 // f(z) = z^n + c |
| 268 |
| 269 // But in this algorithm, we need the inverse: |
| 270 // fInv(z) = (z - c)^(1/n) |
| 271 |
| 272 // Here, let's use n=5 and c=0.544. |
| 273 const Complex c(0.544, 0); |
| 274 const Complex& z = *grid[i + j * width]; |
| 275 |
| 276 // This is the principal root. |
| 277 Complex root = pow(z - c, 0.2); |
| 278 |
| 279 // Discard the result if it is too far out from the center of the plane. |
| 280 if (abs(root) > kRadius) |
| 281 continue; |
| 282 |
| 283 // The below code only allocates Complex objects of the same size. The |
| 284 // leak detector expects various sizes, so increase the allocation size |
| 285 // by a different amount at each call site. |
| 286 |
| 287 // Nth root produces N results. |
| 288 // Place all root results on |next_grid|. |
| 289 |
| 290 // First, place the principal root. |
| 291 if (!next_grid[ComplexPlane::GetArrayIndex(root)]) { |
| 292 next_grid[ComplexPlane::GetArrayIndex(root)] = |
| 293 new (Alloc(sizeof(Complex) + 24, kStack1)) Complex(root); |
| 294 } |
| 295 |
| 296 double magnitude = abs(root); |
| 297 double angle = arg(root); |
| 298 // To generate other roots, rotate the principal root by increments of |
| 299 // 1/N of a full circle. |
| 300 const double kAngleIncrement = M_PI * 2 / 5; |
| 301 |
| 302 // Second root. |
| 303 root = std::polar(magnitude, angle + kAngleIncrement); |
| 304 if (!next_grid[ComplexPlane::GetArrayIndex(root)]) { |
| 305 next_grid[ComplexPlane::GetArrayIndex(root)] = |
| 306 new (Alloc(sizeof(Complex) + 40, kStack2)) Complex(root); |
| 307 } |
| 308 |
| 309 // In some of the sections below, setting |enable_leaks| to true will |
| 310 // trigger a memory leak by overwriting the old Complex pointer value |
| 311 // without freeing it. Due to the nature of complex roots being confined |
| 312 // to equal sections of the complex plane, each new pointer will |
| 313 // displace an old pointer that was allocated from the same line of |
| 314 // code. |
| 315 |
| 316 // Third root. |
| 317 root = std::polar(magnitude, angle + kAngleIncrement * 2); |
| 318 // *** LEAK *** |
| 319 if (enable_leaks || !next_grid[ComplexPlane::GetArrayIndex(root)]) { |
| 320 next_grid[ComplexPlane::GetArrayIndex(root)] = |
| 321 new (Alloc(sizeof(Complex) + 40, kStack3)) Complex(root); |
| 322 } |
| 323 |
| 324 // Fourth root. |
| 325 root = std::polar(magnitude, angle + kAngleIncrement * 3); |
| 326 // *** LEAK *** |
| 327 if (enable_leaks || !next_grid[ComplexPlane::GetArrayIndex(root)]) { |
| 328 next_grid[ComplexPlane::GetArrayIndex(root)] = |
| 329 new (Alloc(sizeof(Complex) + 52, kStack4)) Complex(root); |
| 330 } |
| 331 |
| 332 // Fifth root. |
| 333 root = std::polar(magnitude, angle + kAngleIncrement * 4); |
| 334 if (!next_grid[ComplexPlane::GetArrayIndex(root)]) { |
| 335 next_grid[ComplexPlane::GetArrayIndex(root)] = |
| 336 new (Alloc(sizeof(Complex) + 52, kStack5)) Complex(root); |
| 337 } |
| 338 } |
| 339 } |
| 340 |
| 341 // Clear the previously allocated points. |
| 342 for (Complex*& point : grid) { |
| 343 if (point) { |
| 344 Free(point); |
| 345 point = nullptr; |
| 346 } |
| 347 } |
| 348 |
| 349 // Now swap the two grids for the next iteration. |
| 350 grid.swap(next_grid); |
| 351 } |
| 352 |
| 353 // Clear the previously allocated points. |
| 354 for (Complex*& point : grid) { |
| 355 if (point) { |
| 356 Free(point); |
| 357 point = nullptr; |
| 358 } |
| 359 } |
| 360 } |
| 361 |
| 362 TEST_F(LeakDetectorImplTest, CheckTestFramework) { |
| 363 EXPECT_EQ(0U, total_num_allocs_); |
| 364 EXPECT_EQ(0U, total_num_frees_); |
| 365 EXPECT_EQ(0U, alloced_ptrs_.size()); |
| 366 |
| 367 // Allocate some memory. |
| 368 void* ptr0 = Alloc(12, kStack0); |
| 369 void* ptr1 = Alloc(16, kStack0); |
| 370 void* ptr2 = Alloc(24, kStack0); |
| 371 EXPECT_EQ(3U, total_num_allocs_); |
| 372 EXPECT_EQ(0U, total_num_frees_); |
| 373 EXPECT_EQ(3U, alloced_ptrs_.size()); |
| 374 |
| 375 // Free one of the pointers. |
| 376 Free(ptr1); |
| 377 EXPECT_EQ(3U, total_num_allocs_); |
| 378 EXPECT_EQ(1U, total_num_frees_); |
| 379 EXPECT_EQ(2U, alloced_ptrs_.size()); |
| 380 |
| 381 // Allocate some more memory. |
| 382 void* ptr3 = Alloc(72, kStack1); |
| 383 void* ptr4 = Alloc(104, kStack1); |
| 384 void* ptr5 = Alloc(96, kStack1); |
| 385 void* ptr6 = Alloc(24, kStack1); |
| 386 EXPECT_EQ(7U, total_num_allocs_); |
| 387 EXPECT_EQ(1U, total_num_frees_); |
| 388 EXPECT_EQ(6U, alloced_ptrs_.size()); |
| 389 |
| 390 // Free more pointers. |
| 391 Free(ptr2); |
| 392 Free(ptr4); |
| 393 Free(ptr6); |
| 394 EXPECT_EQ(7U, total_num_allocs_); |
| 395 EXPECT_EQ(4U, total_num_frees_); |
| 396 EXPECT_EQ(3U, alloced_ptrs_.size()); |
| 397 |
| 398 // Free remaining memory. |
| 399 Free(ptr0); |
| 400 Free(ptr3); |
| 401 Free(ptr5); |
| 402 EXPECT_EQ(7U, total_num_allocs_); |
| 403 EXPECT_EQ(7U, total_num_frees_); |
| 404 EXPECT_EQ(0U, alloced_ptrs_.size()); |
| 405 } |
| 406 |
| 407 TEST_F(LeakDetectorImplTest, JuliaSetNoLeak) { |
| 408 JuliaSet(false /* enable_leaks */); |
| 409 |
| 410 // JuliaSet() should have run cleanly without leaking. |
| 411 EXPECT_EQ(total_num_allocs_, total_num_frees_); |
| 412 EXPECT_EQ(0U, alloced_ptrs_.size()); |
| 413 ASSERT_EQ(0U, stored_reports_.size()); |
| 414 } |
| 415 |
| 416 TEST_F(LeakDetectorImplTest, JuliaSetWithLeak) { |
| 417 JuliaSet(true /* enable_leaks */); |
| 418 |
| 419 // JuliaSet() should have leaked some memory from two call sites. |
| 420 EXPECT_GT(total_num_allocs_, total_num_frees_); |
| 421 EXPECT_GT(alloced_ptrs_.size(), 0U); |
| 422 |
| 423 // There should be one unique leak report generated for each leaky call site. |
| 424 ASSERT_EQ(2U, stored_reports_.size()); |
| 425 |
| 426 // The reports should be stored in order of size. |
| 427 |
| 428 // |report1| comes from the call site in JuliaSet() corresponding to |
| 429 // |kStack3|. |
| 430 const InternalLeakReport& report1 = *stored_reports_.begin(); |
| 431 EXPECT_EQ(sizeof(Complex) + 40, report1.alloc_size_bytes()); |
| 432 EXPECT_EQ(kStack3.depth, report1.call_stack().size()); |
| 433 for (size_t i = 0; i < kStack3.depth && i < report1.call_stack().size(); |
| 434 ++i) { |
| 435 // The call stack's addresses may not fall within the mapping address. |
| 436 // Those that don't will not be converted to mapping offsets. |
| 437 if (kStack3.stack[i] >= kMappingAddr && |
| 438 kStack3.stack[i] <= kMappingAddr + kMappingSize) { |
| 439 EXPECT_EQ(kStack3.stack[i] - kMappingAddr, report1.call_stack()[i]); |
| 440 } else { |
| 441 EXPECT_EQ(kStack3.stack[i], report1.call_stack()[i]); |
| 442 } |
| 443 } |
| 444 |
| 445 // |report2| comes from the call site in JuliaSet() corresponding to |
| 446 // |kStack4|. |
| 447 const InternalLeakReport& report2 = *(++stored_reports_.begin()); |
| 448 EXPECT_EQ(sizeof(Complex) + 52, report2.alloc_size_bytes()); |
| 449 EXPECT_EQ(kStack4.depth, report2.call_stack().size()); |
| 450 for (size_t i = 0; i < kStack4.depth && i < report2.call_stack().size(); |
| 451 ++i) { |
| 452 // The call stack's addresses may not fall within the mapping address. |
| 453 // Those that don't will not be converted to mapping offsets. |
| 454 if (kStack4.stack[i] >= kMappingAddr && |
| 455 kStack4.stack[i] <= kMappingAddr + kMappingSize) { |
| 456 EXPECT_EQ(kStack4.stack[i] - kMappingAddr, report2.call_stack()[i]); |
| 457 } else { |
| 458 EXPECT_EQ(kStack4.stack[i], report2.call_stack()[i]); |
| 459 } |
| 460 } |
| 461 } |
| 462 |
| 463 } // namespace leak_detector |
| 464 } // namespace metrics |
OLD | NEW |