Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(585)

Side by Side Diff: components/metrics/leak_detector/leak_detector_impl_unittest.cc

Issue 986503002: components/metrics: Add runtime memory leak detector (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: use namespace metrics::leak_detector; remove GYP flag (not needed for this CL) Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/metrics/leak_detector/leak_detector_impl.h"
6
7 #include <math.h>
8 #include <stdint.h>
9
10 #include <complex>
11 #include <new>
12 #include <set>
13 #include <vector>
14
15 #include "base/macros.h"
16 #include "base/memory/scoped_ptr.h"
17 #include "components/metrics/leak_detector/custom_allocator.h"
18 #include "testing/gtest/include/gtest/gtest.h"
19
20 namespace metrics {
21 namespace leak_detector {
22
23 namespace {
24
25 // Makes working with complex numbers easier.
26 using Complex = std::complex<double>;
27
28 // The mapping location in memory for a fictional executable.
29 const uintptr_t kMappingAddr = 0x800000;
30 const size_t kMappingSize = 0x200000;
31
32 // Some call stacks within the fictional executable.
33 // * - outside the mapping range, e.g. JIT code.
34 const uintptr_t kRawStack0[] = {
35 0x800100,
36 0x900000,
37 0x880080,
38 0x810000,
39 };
40 const uintptr_t kRawStack1[] = {
41 0x940000,
42 0x980000,
43 0xdeadbeef, // *
44 0x9a0000,
45 };
46 const uintptr_t kRawStack2[] = {
47 0x8f0d00,
48 0x803abc,
49 0x9100a0,
50 };
51 const uintptr_t kRawStack3[] = {
52 0x90fcde,
53 0x900df00d, // *
54 0x801000,
55 0x880088,
56 0xdeadcafe, // *
57 0x9f0000,
58 0x8700a0,
59 0x96037c,
60 };
61 const uintptr_t kRawStack4[] = {
62 0x8c0000,
63 0x85d00d,
64 0x921337,
65 0x780000, // *
66 };
67 const uintptr_t kRawStack5[] = {
68 0x990000,
69 0x888888,
70 0x830ac0,
71 0x8e0000,
72 0xc00000, // *
73 };
74
75 // This struct makes it easier to pass call stack info to
76 // LeakDetectorImplTest::Alloc().
77 struct TestCallStack {
78 const uintptr_t* stack; // A reference to the original stack data.
79 size_t depth;
80 };
81
82 const TestCallStack kStack0 = { kRawStack0, arraysize(kRawStack0) };
83 const TestCallStack kStack1 = { kRawStack1, arraysize(kRawStack1) };
84 const TestCallStack kStack2 = { kRawStack2, arraysize(kRawStack2) };
85 const TestCallStack kStack3 = { kRawStack3, arraysize(kRawStack3) };
86 const TestCallStack kStack4 = { kRawStack4, arraysize(kRawStack4) };
87 const TestCallStack kStack5 = { kRawStack5, arraysize(kRawStack5) };
88
89 // The interval between consecutive analyses (LeakDetectorImpl::TestForLeaks),
90 // in number of bytes allocated. e.g. if |kAllocedSizeAnalysisInterval| = 1024
91 // then call TestForLeaks() every 1024 bytes of allocation that occur.
92 static const size_t kAllocedSizeAnalysisInterval = 8192;
93
94 } // namespace
95
96 // This test suite will test the ability of LeakDetectorImpl to catch leaks in
97 // a program. Individual tests can run leaky code locally.
98 //
99 // The leaky code must call Alloc() and Free() for heap memory management. It
100 // should not call See comments on those
101 // functions for more details.
102 class LeakDetectorImplTest : public ::testing::Test {
103 public:
104 LeakDetectorImplTest()
105 : total_num_allocs_(0),
106 total_num_frees_(0),
107 total_alloced_size_(0),
108 next_analysis_total_alloced_size_(kAllocedSizeAnalysisInterval) {}
109
110 void SetUp() override {
111 CustomAllocator::Initialize();
112
113 const int kSizeSuspicionThreshold = 4;
114 const int kCallStackSuspicionThreshold = 4;
115 detector_.reset(new LeakDetectorImpl(kMappingAddr,
116 kMappingSize,
117 kSizeSuspicionThreshold,
118 kCallStackSuspicionThreshold,
119 true /* verbose */));
120 }
121
122 void TearDown() override {
123 // Free any memory that was leaked by test cases. Do not use Free() because
124 // that will try to modify |alloced_ptrs_|.
125 for (void* ptr : alloced_ptrs_)
126 delete [] reinterpret_cast<char*>(ptr);
127 alloced_ptrs_.clear();
128
129 // Must destroy all objects that use CustomAllocator before shutting down.
130 detector_.reset();
131 stored_reports_.clear();
132
133 EXPECT_TRUE(CustomAllocator::Shutdown());
134 }
135
136 protected:
137 // Alloc and free functions that allocate and free heap memory and
138 // automatically pass alloc/free info to |detector_|. They emulate the
139 // alloc/free hook functions that would call into LeakDetectorImpl in
140 // real-life usage. They also keep track of individual allocations locally, so
141 // any leaked memory could be cleaned up.
142 //
143 // |stack| is just a nominal call stack object to identify the call site. It
144 // doesn't have to contain the stack trace of the actual call stack.
145 void* Alloc(size_t size, const TestCallStack& stack) {
146 void* ptr = new char[size];
147 detector_->RecordAlloc(ptr, size, stack.depth,
148 reinterpret_cast<const void* const*>(stack.stack));
149
150 EXPECT_TRUE(alloced_ptrs_.find(ptr) == alloced_ptrs_.end());
151 alloced_ptrs_.insert(ptr);
152
153 ++total_num_allocs_;
154 total_alloced_size_ += size;
155 if (total_alloced_size_ >= next_analysis_total_alloced_size_) {
156 InternalVector<InternalLeakReport> reports;
157 detector_->TestForLeaks(false /* do_logging */, &reports);
158 for (const InternalLeakReport& report : reports)
159 stored_reports_.insert(report);
160
161 // Determine when the next leak analysis should occur.
162 while (total_alloced_size_ >= next_analysis_total_alloced_size_)
163 next_analysis_total_alloced_size_ += kAllocedSizeAnalysisInterval;
164 }
165 return ptr;
166 }
167
168 // See comment for Alloc().
169 void Free(void* ptr) {
170 auto find_ptr_iter = alloced_ptrs_.find(ptr);
171 EXPECT_FALSE(find_ptr_iter == alloced_ptrs_.end());
172 if (find_ptr_iter == alloced_ptrs_.end())
173 return;
174 alloced_ptrs_.erase(find_ptr_iter);
175 ++total_num_frees_;
176
177 detector_->RecordFree(ptr);
178
179 delete [] reinterpret_cast<char*>(ptr);
180 }
181
182 // TEST CASE: Julia set fractal computation. Pass in enable_leaks=true to
183 // trigger some memory leaks.
184 void JuliaSet(bool enable_leaks);
185
186 // Instance of the class being tested.
187 scoped_ptr<LeakDetectorImpl> detector_;
188
189 // Number of pointers allocated and freed so far.
190 size_t total_num_allocs_;
191 size_t total_num_frees_;
192
193 // Keeps count of total size allocated by Alloc().
194 size_t total_alloced_size_;
195
196 // The cumulative allocation size at which to trigger the TestForLeaks() call.
197 size_t next_analysis_total_alloced_size_;
198
199 // Stores all pointers to memory allocated by by Alloc() so we can manually
200 // free the leaked pointers at the end. This also serves as redundant
201 // bookkeepping: it stores all pointers that have been allocated but not yet
202 // freed.
203 std::set<void*> alloced_ptrs_;
204
205 // Store leak reports here. Use a set so duplicate reports are not stored.
206 std::set<InternalLeakReport> stored_reports_;
207
208 private:
209 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImplTest);
210 };
211
212 void LeakDetectorImplTest::JuliaSet(bool enable_leaks) {
213 // The center region of the complex plane that is the basis for our Julia set
214 // computations is a circle of radius kRadius.
215 constexpr double kRadius = 2;
216
217 // To track points in the complex plane, we will use a rectangular grid in the
218 // range defined by [-kRadius, kRadius] along both axes.
219 constexpr double kRangeMin = -kRadius;
220 constexpr double kRangeMax = kRadius;
221
222 // Divide each axis into intervals, each of which is associated with a point
223 // on that axis at its center.
224 constexpr double kIntervalInverse = 64;
225 constexpr double kInterval = 1.0 / kIntervalInverse;
226 constexpr int kNumPoints = (kRangeMax - kRangeMin) / kInterval + 1;
227
228 // Contains some useful functions for converting between points on the complex
229 // plane and in a gridlike data structure.
230 struct ComplexPlane {
231 static int GetXGridIndex(const Complex& value) {
232 return (value.real() + kInterval / 2 - kRangeMin) / kInterval;
233 }
234 static int GetYGridIndex(const Complex& value) {
235 return (value.imag() + kInterval / 2 - kRangeMin) / kInterval;
236 }
237 static Complex GetComplexForGridPoint(size_t x, size_t y) {
238 return Complex(kRangeMin + x * kInterval, kRangeMin + y * kInterval);
239 }
240 };
241
242 // Make sure the choice of interval doesn't result in any loss of precision.
243 ASSERT_EQ(1.0, kInterval * kIntervalInverse);
244
245 // Create a grid for part of the complex plane, with each axis within the
246 // range [kRangeMin, kRangeMax].
247 constexpr size_t width = kNumPoints;
248 constexpr size_t height = kNumPoints;
249 std::vector<Complex*> grid(width * height);
250
251 // Initialize an object for each point within the inner circle |z| < kRadius.
252 for (size_t i = 0; i < width; ++i) {
253 for (size_t j = 0; j < height; ++j) {
254 Complex point = ComplexPlane::GetComplexForGridPoint(i, j);
255 // Do not store any values outside the inner circle.
256 if (abs(point) <= kRadius) {
257 grid[i + j * width] =
258 new(Alloc(sizeof(Complex), kStack0)) Complex(point);
259 }
260 }
261 }
262 EXPECT_LE(alloced_ptrs_.size(), width * height);
263
264 // Create a new grid for the result of the transformation.
265 std::vector<Complex*> next_grid(width * height, nullptr);
266
267 const int kNumIterations = 20;
268 for (int n = 0; n < kNumIterations; ++n) {
269 for (int i = 0; i < kNumPoints; ++i) {
270 for (int j = 0; j < kNumPoints; ++j) {
271 if (!grid[i + j * width])
272 continue;
273
274 // NOTE: The below code is NOT an efficient way to compute a Julia set.
275 // This is only to test the leak detector with some nontrivial code.
276
277 // A simple polynomial function for generating Julia sets is:
278 // f(z) = z^n + c
279
280 // But in this algorithm, we need the inverse:
281 // fInv(z) = (z - c)^(1/n)
282
283 // Here, let's use n=5 and c=0.544.
284 const Complex c(0.544, 0);
285 const Complex& z = *grid[i + j * width];
286
287 // This is the principal root.
288 Complex root = pow(z - c, 0.2);
289
290 // Discard the result if it is too far out from the center of the plane.
291 if (abs(root) > kRadius)
292 continue;
293
294 // The below code only allocates Complex objects of the same size. The
295 // leak detector expects various sizes, so increase the allocation size
296 // by a different amount at each call site.
297
298 // Nth root produces N results.
299 // Place all root results on |next_grid|.
300
301 // First, place the principal root.
302 int next_i = ComplexPlane::GetXGridIndex(root);
303 int next_j = ComplexPlane::GetYGridIndex(root);
304 if (!next_grid[next_i + next_j * width]) {
305 next_grid[next_i + next_j * width] =
306 new(Alloc(sizeof(Complex) + 24, kStack1)) Complex(root);
307 }
308
309 double magnitude = abs(root);
310 double angle = arg(root);
311 // To generate other roots, rotate the principal root by increments of
312 // 1/N of a full circle.
313 const double kAngleIncrement = M_PI * 2 / 5;
314
315 // Second root.
316 root = std::polar(magnitude, angle + kAngleIncrement);
317 next_i = ComplexPlane::GetXGridIndex(root);
318 next_j = ComplexPlane::GetYGridIndex(root);
319 if (!next_grid[next_i + next_j * width]) {
320 next_grid[next_i + next_j * width] =
321 new(Alloc(sizeof(Complex) + 40, kStack2)) Complex(root);
322 }
323
324 // In some of the sections below, setting |enable_leaks| to true will
325 // trigger a memory leak by overwriting the old Complex pointer value
326 // without freeing it. Due to the nature of complex roots being confined
327 // to equal sections of the complex plane, each new pointer will
328 // displace an old pointer that was allocated from the same line of
329 // code.
330
331 // Third root.
332 root = std::polar(magnitude, angle + kAngleIncrement * 2);
333 next_i = ComplexPlane::GetXGridIndex(root);
334 next_j = ComplexPlane::GetYGridIndex(root);
335 // *** LEAK ***
336 if (enable_leaks || !next_grid[next_i + next_j * width]) {
337 next_grid[next_i + next_j * width] =
338 new(Alloc(sizeof(Complex) + 40, kStack3)) Complex(root);
339 }
340
341 // Fourth root.
342 root = std::polar(magnitude, angle + kAngleIncrement * 3);
343 next_i = ComplexPlane::GetXGridIndex(root);
344 next_j = ComplexPlane::GetYGridIndex(root);
345 // *** LEAK ***
346 if (enable_leaks || !next_grid[next_i + next_j * width]) {
347 next_grid[next_i + next_j * width] =
348 new(Alloc(sizeof(Complex) + 52, kStack4)) Complex(root);
349 }
350
351 // Fifth root.
352 root = std::polar(magnitude, angle + kAngleIncrement * 4);
353 next_i = ComplexPlane::GetXGridIndex(root);
354 next_j = ComplexPlane::GetYGridIndex(root);
355 if (!next_grid[next_i + next_j * width]) {
356 next_grid[next_i + next_j * width] =
357 new(Alloc(sizeof(Complex) + 52, kStack5)) Complex(root);
358 }
359 }
360 }
361
362 // Clear the previously allocated points.
363 for (Complex*& point : grid) {
364 if (point) {
365 Free(point);
366 point = nullptr;
367 }
368 }
369
370 // Now swap the two grids for the next iteration.
371 grid.swap(next_grid);
372 }
373
374 // Clear the previously allocated points.
375 for (Complex*& point : grid) {
376 if (point) {
377 Free(point);
378 point = nullptr;
379 }
380 }
381 }
382
383 TEST_F(LeakDetectorImplTest, CheckTestFramework) {
384 EXPECT_EQ(0U, total_num_allocs_);
385 EXPECT_EQ(0U, total_num_frees_);
386 EXPECT_EQ(0U, alloced_ptrs_.size());
387
388 // Allocate some memory.
389 void* ptr0 = Alloc(12, kStack0);
390 void* ptr1 = Alloc(16, kStack0);
391 void* ptr2 = Alloc(24, kStack0);
392 EXPECT_EQ(3U, total_num_allocs_);
393 EXPECT_EQ(0U, total_num_frees_);
394 EXPECT_EQ(3U, alloced_ptrs_.size());
395
396 // Free one of the pointers.
397 Free(ptr1);
398 EXPECT_EQ(3U, total_num_allocs_);
399 EXPECT_EQ(1U, total_num_frees_);
400 EXPECT_EQ(2U, alloced_ptrs_.size());
401
402 // Allocate some more memory.
403 void* ptr3 = Alloc(72, kStack1);
404 void* ptr4 = Alloc(104, kStack1);
405 void* ptr5 = Alloc(96, kStack1);
406 void* ptr6 = Alloc(24, kStack1);
407 EXPECT_EQ(7U, total_num_allocs_);
408 EXPECT_EQ(1U, total_num_frees_);
409 EXPECT_EQ(6U, alloced_ptrs_.size());
410
411 // Free more pointers.
412 Free(ptr2);
413 Free(ptr4);
414 Free(ptr6);
415 EXPECT_EQ(7U, total_num_allocs_);
416 EXPECT_EQ(4U, total_num_frees_);
417 EXPECT_EQ(3U, alloced_ptrs_.size());
418
419 // Free remaining memory.
420 Free(ptr0);
421 Free(ptr3);
422 Free(ptr5);
423 EXPECT_EQ(7U, total_num_allocs_);
424 EXPECT_EQ(7U, total_num_frees_);
425 EXPECT_EQ(0U, alloced_ptrs_.size());
426 }
427
428 TEST_F(LeakDetectorImplTest, JuliaSetNoLeak) {
429 JuliaSet(false /* enable_leaks */);
430
431 // JuliaSet() should have run cleanly without leaking.
432 EXPECT_EQ(total_num_allocs_, total_num_frees_);
433 EXPECT_EQ(0U, alloced_ptrs_.size());
434 ASSERT_EQ(0U, stored_reports_.size());
435 }
436
437 TEST_F(LeakDetectorImplTest, JuliaSetWithLeak) {
438 JuliaSet(true /* enable_leaks */);
439
440 // JuliaSet() should have leaked some memory from two call sites.
441 EXPECT_GT(total_num_allocs_, total_num_frees_);
442 EXPECT_GT(alloced_ptrs_.size(), 0U);
443
444 // There should be one unique leak report generated for each leaky call site.
445 ASSERT_EQ(2U, stored_reports_.size());
446
447 // The reports should be stored in order of size.
448
449 // |report1| comes from the call site in JuliaSet() corresponding to
450 // |kStack3|.
451 const InternalLeakReport& report1 = *stored_reports_.begin();
452 EXPECT_EQ(sizeof(Complex) + 40, report1.alloc_size_bytes);
453 EXPECT_EQ(kStack3.depth, report1.call_stack.size());
454 for (size_t i = 0; i < kStack3.depth && i < report1.call_stack.size(); ++i) {
455 // The call stack's addresses may not fall within the mapping address.
456 // Those that don't will not be converted to mapping offsets.
457 if (kStack3.stack[i] >= kMappingAddr &&
458 kStack3.stack[i] <= kMappingAddr + kMappingSize) {
459 EXPECT_EQ(kStack3.stack[i] - kMappingAddr, report1.call_stack[i]);
460 } else {
461 EXPECT_EQ(kStack3.stack[i], report1.call_stack[i]);
462 }
463 }
464
465 // |report2| comes from the call site in JuliaSet() corresponding to
466 // |kStack4|.
467 const InternalLeakReport& report2 = *(++stored_reports_.begin());
468 EXPECT_EQ(sizeof(Complex) + 52, report2.alloc_size_bytes);
469 EXPECT_EQ(kStack4.depth, report2.call_stack.size());
470 for (size_t i = 0; i < kStack4.depth && i < report2.call_stack.size(); ++i) {
471 // The call stack's addresses may not fall within the mapping address.
472 // Those that don't will not be converted to mapping offsets.
473 if (kStack4.stack[i] >= kMappingAddr &&
474 kStack4.stack[i] <= kMappingAddr + kMappingSize) {
475 EXPECT_EQ(kStack4.stack[i] - kMappingAddr, report2.call_stack[i]);
476 } else {
477 EXPECT_EQ(kStack4.stack[i], report2.call_stack[i]);
478 }
479 }
480 }
481
482 } // namespace leak_detector
483 } // namespace metrics
OLDNEW
« no previous file with comments | « components/metrics/leak_detector/leak_detector_impl.cc ('k') | components/metrics/leak_detector/leak_detector_value_type.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698