OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2015 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "components/metrics/leak_detector/leak_detector_impl.h" | |
6 | |
7 #include <math.h> | |
8 #include <stdint.h> | |
9 | |
10 #include <complex> | |
11 #include <new> | |
12 #include <set> | |
13 #include <vector> | |
14 | |
15 #include "base/macros.h" | |
16 #include "base/memory/scoped_ptr.h" | |
17 #include "components/metrics/leak_detector/custom_allocator.h" | |
18 #include "testing/gtest/include/gtest/gtest.h" | |
19 | |
20 namespace metrics { | |
21 namespace leak_detector { | |
22 | |
23 namespace { | |
24 | |
25 // Makes working with complex numbers easier. | |
26 using Complex = std::complex<double>; | |
27 | |
28 // The mapping location in memory for a fictional executable. | |
29 const uintptr_t kMappingAddr = 0x800000; | |
30 const size_t kMappingSize = 0x200000; | |
31 | |
32 // Some call stacks within the fictional executable. | |
33 // * - outside the mapping range, e.g. JIT code. | |
34 const uintptr_t kRawStack0[] = { | |
35 0x800100, | |
36 0x900000, | |
37 0x880080, | |
38 0x810000, | |
39 }; | |
40 const uintptr_t kRawStack1[] = { | |
41 0x940000, | |
42 0x980000, | |
43 0xdeadbeef, // * | |
44 0x9a0000, | |
45 }; | |
46 const uintptr_t kRawStack2[] = { | |
47 0x8f0d00, | |
48 0x803abc, | |
49 0x9100a0, | |
50 }; | |
51 const uintptr_t kRawStack3[] = { | |
52 0x90fcde, | |
53 0x900df00d, // * | |
54 0x801000, | |
55 0x880088, | |
56 0xdeadcafe, // * | |
57 0x9f0000, | |
58 0x8700a0, | |
59 0x96037c, | |
60 }; | |
61 const uintptr_t kRawStack4[] = { | |
62 0x8c0000, | |
63 0x85d00d, | |
64 0x921337, | |
65 0x780000, // * | |
66 }; | |
67 const uintptr_t kRawStack5[] = { | |
68 0x990000, | |
69 0x888888, | |
70 0x830ac0, | |
71 0x8e0000, | |
72 0xc00000, // * | |
73 }; | |
74 | |
75 // This struct makes it easier to pass call stack info to | |
76 // LeakDetectorImplTest::Alloc(). | |
77 struct TestCallStack { | |
78 const uintptr_t* stack; // A reference to the original stack data. | |
79 size_t depth; | |
80 }; | |
81 | |
82 const TestCallStack kStack0 = { kRawStack0, arraysize(kRawStack0) }; | |
83 const TestCallStack kStack1 = { kRawStack1, arraysize(kRawStack1) }; | |
84 const TestCallStack kStack2 = { kRawStack2, arraysize(kRawStack2) }; | |
85 const TestCallStack kStack3 = { kRawStack3, arraysize(kRawStack3) }; | |
86 const TestCallStack kStack4 = { kRawStack4, arraysize(kRawStack4) }; | |
87 const TestCallStack kStack5 = { kRawStack5, arraysize(kRawStack5) }; | |
88 | |
89 // The interval between consecutive analyses (LeakDetectorImpl::TestForLeaks), | |
90 // in number of bytes allocated. e.g. if |kAllocedSizeAnalysisInterval| = 1024 | |
91 // then call TestForLeaks() every 1024 bytes of allocation that occur. | |
92 static const size_t kAllocedSizeAnalysisInterval = 8192; | |
93 | |
94 } // namespace | |
95 | |
96 // This test suite will test the ability of LeakDetectorImpl to catch leaks in | |
97 // a program. Individual tests can run leaky code locally. | |
98 // | |
99 // The leaky code must call Alloc() and Free() for heap memory management. It | |
100 // should not call See comments on those | |
101 // functions for more details. | |
102 class LeakDetectorImplTest : public ::testing::Test { | |
103 public: | |
104 LeakDetectorImplTest() | |
105 : total_num_allocs_(0), | |
106 total_num_frees_(0), | |
107 total_alloced_size_(0), | |
108 next_analysis_total_alloced_size_(kAllocedSizeAnalysisInterval) {} | |
109 | |
110 void SetUp() override { | |
111 CustomAllocator::Initialize(); | |
112 | |
113 const int kSizeSuspicionThreshold = 4; | |
114 const int kCallStackSuspicionThreshold = 4; | |
115 detector_.reset(new LeakDetectorImpl(kMappingAddr, | |
116 kMappingSize, | |
117 kSizeSuspicionThreshold, | |
118 kCallStackSuspicionThreshold)); | |
119 } | |
120 | |
121 void TearDown() override { | |
122 // Free any memory that was leaked by test cases. Do not use Free() because | |
123 // that will try to modify |alloced_ptrs_|. | |
124 for (void* ptr : alloced_ptrs_) | |
125 delete [] reinterpret_cast<char*>(ptr); | |
126 alloced_ptrs_.clear(); | |
127 | |
128 // Must destroy all objects that use CustomAllocator before shutting down. | |
129 detector_.reset(); | |
130 stored_reports_.clear(); | |
131 | |
132 EXPECT_TRUE(CustomAllocator::Shutdown()); | |
133 } | |
134 | |
135 protected: | |
136 // Alloc and free functions that allocate and free heap memory and | |
137 // automatically pass alloc/free info to |detector_|. They emulate the | |
138 // alloc/free hook functions that would call into LeakDetectorImpl in | |
139 // real-life usage. They also keep track of individual allocations locally, so | |
140 // any leaked memory could be cleaned up. | |
141 // | |
142 // |stack| is just a nominal call stack object to identify the call site. It | |
143 // doesn't have to contain the stack trace of the actual call stack. | |
144 void* Alloc(size_t size, const TestCallStack& stack) { | |
145 void* ptr = new char[size]; | |
146 detector_->RecordAlloc(ptr, size, stack.depth, | |
147 reinterpret_cast<const void* const*>(stack.stack)); | |
148 | |
149 EXPECT_TRUE(alloced_ptrs_.find(ptr) == alloced_ptrs_.end()); | |
150 alloced_ptrs_.insert(ptr); | |
151 | |
152 ++total_num_allocs_; | |
153 total_alloced_size_ += size; | |
154 if (total_alloced_size_ >= next_analysis_total_alloced_size_) { | |
155 InternalVector<InternalLeakReport> reports; | |
156 detector_->TestForLeaks(&reports); | |
157 for (const InternalLeakReport& report : reports) | |
158 stored_reports_.insert(report); | |
159 | |
160 // Determine when the next leak analysis should occur. | |
161 while (total_alloced_size_ >= next_analysis_total_alloced_size_) | |
162 next_analysis_total_alloced_size_ += kAllocedSizeAnalysisInterval; | |
163 } | |
164 return ptr; | |
165 } | |
166 | |
167 // See comment for Alloc(). | |
168 void Free(void* ptr) { | |
169 auto find_ptr_iter = alloced_ptrs_.find(ptr); | |
170 EXPECT_FALSE(find_ptr_iter == alloced_ptrs_.end()); | |
171 if (find_ptr_iter == alloced_ptrs_.end()) | |
172 return; | |
173 alloced_ptrs_.erase(find_ptr_iter); | |
174 ++total_num_frees_; | |
175 | |
176 detector_->RecordFree(ptr); | |
177 | |
178 delete [] reinterpret_cast<char*>(ptr); | |
179 } | |
180 | |
181 // TEST CASE: Julia set fractal computation. Pass in enable_leaks=true to | |
182 // trigger some memory leaks. | |
183 void JuliaSet(bool enable_leaks); | |
184 | |
185 // Instance of the class being tested. | |
186 scoped_ptr<LeakDetectorImpl> detector_; | |
187 | |
188 // Number of pointers allocated and freed so far. | |
189 size_t total_num_allocs_; | |
190 size_t total_num_frees_; | |
191 | |
192 // Keeps count of total size allocated by Alloc(). | |
193 size_t total_alloced_size_; | |
194 | |
195 // The cumulative allocation size at which to trigger the TestForLeaks() call. | |
196 size_t next_analysis_total_alloced_size_; | |
197 | |
198 // Stores all pointers to memory allocated by by Alloc() so we can manually | |
199 // free the leaked pointers at the end. This also serves as redundant | |
200 // bookkeepping: it stores all pointers that have been allocated but not yet | |
201 // freed. | |
202 std::set<void*> alloced_ptrs_; | |
203 | |
204 // Store leak reports here. Use a set so duplicate reports are not stored. | |
205 std::set<InternalLeakReport> stored_reports_; | |
206 | |
207 private: | |
208 DISALLOW_COPY_AND_ASSIGN(LeakDetectorImplTest); | |
209 }; | |
210 | |
211 void LeakDetectorImplTest::JuliaSet(bool enable_leaks) { | |
212 // The center region of the complex plane that is the basis for our Julia set | |
jar (doing other things)
2015/11/14 02:58:37
I was amazed/surprised to see code working with Co
Simon Que
2015/11/17 00:28:48
I tried to come up with a self-contained program t
| |
213 // computations is a circle of radius kRadius. | |
214 constexpr double kRadius = 2; | |
215 | |
216 // To track points in the complex plane, we will use a rectangular grid in the | |
217 // range defined by [-kRadius, kRadius] along both axes. | |
218 constexpr double kRangeMin = -kRadius; | |
219 constexpr double kRangeMax = kRadius; | |
220 | |
221 // Divide each axis into intervals, each of which is associated with a point | |
222 // on that axis at its center. | |
223 constexpr double kIntervalInverse = 64; | |
224 constexpr double kInterval = 1.0 / kIntervalInverse; | |
225 constexpr int kNumPoints = (kRangeMax - kRangeMin) / kInterval + 1; | |
226 | |
227 // Contains some useful functions for converting between points on the complex | |
228 // plane and in a gridlike data structure. | |
229 struct ComplexPlane { | |
230 static int GetXGridIndex(const Complex& value) { | |
231 return (value.real() + kInterval / 2 - kRangeMin) / kInterval; | |
232 } | |
233 static int GetYGridIndex(const Complex& value) { | |
234 return (value.imag() + kInterval / 2 - kRangeMin) / kInterval; | |
235 } | |
236 static Complex GetComplexForGridPoint(size_t x, size_t y) { | |
237 return Complex(kRangeMin + x * kInterval, kRangeMin + y * kInterval); | |
238 } | |
239 }; | |
240 | |
241 // Make sure the choice of interval doesn't result in any loss of precision. | |
242 ASSERT_EQ(1.0, kInterval * kIntervalInverse); | |
243 | |
244 // Create a grid for part of the complex plane, with each axis within the | |
245 // range [kRangeMin, kRangeMax]. | |
246 constexpr size_t width = kNumPoints; | |
247 constexpr size_t height = kNumPoints; | |
248 std::vector<Complex*> grid(width * height); | |
249 | |
250 // Initialize an object for each point within the inner circle |z| < kRadius. | |
251 for (size_t i = 0; i < width; ++i) { | |
252 for (size_t j = 0; j < height; ++j) { | |
253 Complex point = ComplexPlane::GetComplexForGridPoint(i, j); | |
254 // Do not store any values outside the inner circle. | |
255 if (abs(point) <= kRadius) { | |
256 grid[i + j * width] = | |
257 new(Alloc(sizeof(Complex), kStack0)) Complex(point); | |
258 } | |
259 } | |
260 } | |
261 EXPECT_LE(alloced_ptrs_.size(), width * height); | |
262 | |
263 // Create a new grid for the result of the transformation. | |
264 std::vector<Complex*> next_grid(width * height, nullptr); | |
265 | |
266 const int kNumIterations = 20; | |
267 for (int n = 0; n < kNumIterations; ++n) { | |
268 for (int i = 0; i < kNumPoints; ++i) { | |
269 for (int j = 0; j < kNumPoints; ++j) { | |
270 if (!grid[i + j * width]) | |
271 continue; | |
272 | |
273 // NOTE: The below code is NOT an efficient way to compute a Julia set. | |
274 // This is only to test the leak detector with some nontrivial code. | |
275 | |
276 // A simple polynomial function for generating Julia sets is: | |
277 // f(z) = z^n + c | |
278 | |
279 // But in this algorithm, we need the inverse: | |
280 // fInv(z) = (z - c)^(1/n) | |
281 | |
282 // Here, let's use n=5 and c=0.544. | |
283 const Complex c(0.544, 0); | |
284 const Complex& z = *grid[i + j * width]; | |
285 | |
286 // This is the principal root. | |
287 Complex root = pow(z - c, 0.2); | |
288 | |
289 // Discard the result if it is too far out from the center of the plane. | |
290 if (abs(root) > kRadius) | |
291 continue; | |
292 | |
293 // The below code only allocates Complex objects of the same size. The | |
294 // leak detector expects various sizes, so increase the allocation size | |
295 // by a different amount at each call site. | |
296 | |
297 // Nth root produces N results. | |
298 // Place all root results on |next_grid|. | |
299 | |
300 // First, place the principal root. | |
301 int next_i = ComplexPlane::GetXGridIndex(root); | |
302 int next_j = ComplexPlane::GetYGridIndex(root); | |
303 if (!next_grid[next_i + next_j * width]) { | |
304 next_grid[next_i + next_j * width] = | |
305 new(Alloc(sizeof(Complex) + 24, kStack1)) Complex(root); | |
306 } | |
307 | |
308 double magnitude = abs(root); | |
309 double angle = arg(root); | |
310 // To generate other roots, rotate the principal root by increments of | |
311 // 1/N of a full circle. | |
312 const double kAngleIncrement = M_PI * 2 / 5; | |
313 | |
314 // Second root. | |
315 root = std::polar(magnitude, angle + kAngleIncrement); | |
316 next_i = ComplexPlane::GetXGridIndex(root); | |
317 next_j = ComplexPlane::GetYGridIndex(root); | |
318 if (!next_grid[next_i + next_j * width]) { | |
319 next_grid[next_i + next_j * width] = | |
320 new(Alloc(sizeof(Complex) + 40, kStack2)) Complex(root); | |
321 } | |
322 | |
323 // In some of the sections below, setting |enable_leaks| to true will | |
324 // trigger a memory leak by overwriting the old Complex pointer value | |
325 // without freeing it. Due to the nature of complex roots being confined | |
326 // to equal sections of the complex plane, each new pointer will | |
327 // displace an old pointer that was allocated from the same line of | |
328 // code. | |
329 | |
330 // Third root. | |
331 root = std::polar(magnitude, angle + kAngleIncrement * 2); | |
332 next_i = ComplexPlane::GetXGridIndex(root); | |
333 next_j = ComplexPlane::GetYGridIndex(root); | |
334 // *** LEAK *** | |
335 if (enable_leaks || !next_grid[next_i + next_j * width]) { | |
336 next_grid[next_i + next_j * width] = | |
337 new(Alloc(sizeof(Complex) + 40, kStack3)) Complex(root); | |
338 } | |
339 | |
340 // Fourth root. | |
341 root = std::polar(magnitude, angle + kAngleIncrement * 3); | |
342 next_i = ComplexPlane::GetXGridIndex(root); | |
343 next_j = ComplexPlane::GetYGridIndex(root); | |
344 // *** LEAK *** | |
345 if (enable_leaks || !next_grid[next_i + next_j * width]) { | |
346 next_grid[next_i + next_j * width] = | |
347 new(Alloc(sizeof(Complex) + 52, kStack4)) Complex(root); | |
348 } | |
349 | |
350 // Fifth root. | |
351 root = std::polar(magnitude, angle + kAngleIncrement * 4); | |
352 next_i = ComplexPlane::GetXGridIndex(root); | |
353 next_j = ComplexPlane::GetYGridIndex(root); | |
354 if (!next_grid[next_i + next_j * width]) { | |
355 next_grid[next_i + next_j * width] = | |
356 new(Alloc(sizeof(Complex) + 52, kStack5)) Complex(root); | |
357 } | |
358 } | |
359 } | |
360 | |
361 // Clear the previously allocated points. | |
362 for (Complex*& point : grid) { | |
363 if (point) { | |
364 Free(point); | |
365 point = nullptr; | |
366 } | |
367 } | |
368 | |
369 // Now swap the two grids for the next iteration. | |
370 grid.swap(next_grid); | |
371 } | |
372 | |
373 // Clear the previously allocated points. | |
374 for (Complex*& point : grid) { | |
375 if (point) { | |
376 Free(point); | |
377 point = nullptr; | |
378 } | |
379 } | |
380 } | |
381 | |
382 TEST_F(LeakDetectorImplTest, CheckTestFramework) { | |
383 EXPECT_EQ(0U, total_num_allocs_); | |
384 EXPECT_EQ(0U, total_num_frees_); | |
385 EXPECT_EQ(0U, alloced_ptrs_.size()); | |
386 | |
387 // Allocate some memory. | |
388 void* ptr0 = Alloc(12, kStack0); | |
389 void* ptr1 = Alloc(16, kStack0); | |
390 void* ptr2 = Alloc(24, kStack0); | |
391 EXPECT_EQ(3U, total_num_allocs_); | |
392 EXPECT_EQ(0U, total_num_frees_); | |
393 EXPECT_EQ(3U, alloced_ptrs_.size()); | |
394 | |
395 // Free one of the pointers. | |
396 Free(ptr1); | |
397 EXPECT_EQ(3U, total_num_allocs_); | |
398 EXPECT_EQ(1U, total_num_frees_); | |
399 EXPECT_EQ(2U, alloced_ptrs_.size()); | |
400 | |
401 // Allocate some more memory. | |
402 void* ptr3 = Alloc(72, kStack1); | |
403 void* ptr4 = Alloc(104, kStack1); | |
404 void* ptr5 = Alloc(96, kStack1); | |
405 void* ptr6 = Alloc(24, kStack1); | |
406 EXPECT_EQ(7U, total_num_allocs_); | |
407 EXPECT_EQ(1U, total_num_frees_); | |
408 EXPECT_EQ(6U, alloced_ptrs_.size()); | |
409 | |
410 // Free more pointers. | |
411 Free(ptr2); | |
412 Free(ptr4); | |
413 Free(ptr6); | |
414 EXPECT_EQ(7U, total_num_allocs_); | |
415 EXPECT_EQ(4U, total_num_frees_); | |
416 EXPECT_EQ(3U, alloced_ptrs_.size()); | |
417 | |
418 // Free remaining memory. | |
419 Free(ptr0); | |
420 Free(ptr3); | |
421 Free(ptr5); | |
422 EXPECT_EQ(7U, total_num_allocs_); | |
423 EXPECT_EQ(7U, total_num_frees_); | |
424 EXPECT_EQ(0U, alloced_ptrs_.size()); | |
425 } | |
426 | |
427 TEST_F(LeakDetectorImplTest, JuliaSetNoLeak) { | |
428 JuliaSet(false /* enable_leaks */); | |
429 | |
430 // JuliaSet() should have run cleanly without leaking. | |
431 EXPECT_EQ(total_num_allocs_, total_num_frees_); | |
432 EXPECT_EQ(0U, alloced_ptrs_.size()); | |
433 ASSERT_EQ(0U, stored_reports_.size()); | |
434 } | |
435 | |
436 TEST_F(LeakDetectorImplTest, JuliaSetWithLeak) { | |
437 JuliaSet(true /* enable_leaks */); | |
438 | |
439 // JuliaSet() should have leaked some memory from two call sites. | |
440 EXPECT_GT(total_num_allocs_, total_num_frees_); | |
441 EXPECT_GT(alloced_ptrs_.size(), 0U); | |
442 | |
443 // There should be one unique leak report generated for each leaky call site. | |
444 ASSERT_EQ(2U, stored_reports_.size()); | |
445 | |
446 // The reports should be stored in order of size. | |
447 | |
448 // |report1| comes from the call site in JuliaSet() corresponding to | |
449 // |kStack3|. | |
450 const InternalLeakReport& report1 = *stored_reports_.begin(); | |
451 EXPECT_EQ(sizeof(Complex) + 40, report1.alloc_size_bytes); | |
452 EXPECT_EQ(kStack3.depth, report1.call_stack.size()); | |
453 for (size_t i = 0; i < kStack3.depth && i < report1.call_stack.size(); ++i) { | |
454 // The call stack's addresses may not fall within the mapping address. | |
455 // Those that don't will not be converted to mapping offsets. | |
456 if (kStack3.stack[i] >= kMappingAddr && | |
457 kStack3.stack[i] <= kMappingAddr + kMappingSize) { | |
458 EXPECT_EQ(kStack3.stack[i] - kMappingAddr, report1.call_stack[i]); | |
459 } else { | |
460 EXPECT_EQ(kStack3.stack[i], report1.call_stack[i]); | |
461 } | |
462 } | |
463 | |
464 // |report2| comes from the call site in JuliaSet() corresponding to | |
465 // |kStack4|. | |
466 const InternalLeakReport& report2 = *(++stored_reports_.begin()); | |
467 EXPECT_EQ(sizeof(Complex) + 52, report2.alloc_size_bytes); | |
468 EXPECT_EQ(kStack4.depth, report2.call_stack.size()); | |
469 for (size_t i = 0; i < kStack4.depth && i < report2.call_stack.size(); ++i) { | |
470 // The call stack's addresses may not fall within the mapping address. | |
471 // Those that don't will not be converted to mapping offsets. | |
472 if (kStack4.stack[i] >= kMappingAddr && | |
473 kStack4.stack[i] <= kMappingAddr + kMappingSize) { | |
474 EXPECT_EQ(kStack4.stack[i] - kMappingAddr, report2.call_stack[i]); | |
475 } else { | |
476 EXPECT_EQ(kStack4.stack[i], report2.call_stack[i]); | |
477 } | |
478 } | |
479 } | |
480 | |
481 } // namespace leak_detector | |
482 } // namespace metrics | |
OLD | NEW |