Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(50)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register_unittest.cc

Issue 2089253002: [tracing] Optimize AllocationRegister and increase max backtrace depth. (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Fix Windows Created 4 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_register.h" 5 #include "base/trace_event/heap_profiler_allocation_register.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include "base/process/process_metrics.h" 10 #include "base/process/process_metrics.h"
11 #include "base/trace_event/heap_profiler_allocation_context.h" 11 #include "base/trace_event/heap_profiler_allocation_context.h"
12 #include "testing/gtest/include/gtest/gtest.h" 12 #include "testing/gtest/include/gtest/gtest.h"
13 13
14 namespace base { 14 namespace base {
15 namespace trace_event { 15 namespace trace_event {
16 16
17 class AllocationRegisterTest : public testing::Test { 17 class AllocationRegisterTest : public testing::Test {
18 public: 18 public:
19 // Use a lower number of cells for unittests to avoid reserving a virtual 19 // Use a lower number of backtrace cells for unittests to avoid reserving
20 // region which is too big. 20 // a virtual region which is too big.
21 static const uint32_t kNumCellsForTesting = 21 static const size_t kAllocationBuckets =
22 AllocationRegister::kNumBuckets + 100; 22 AllocationRegister::kAllocationBuckets + 100;
23 static const size_t kAllocationCapacity = kAllocationBuckets;
24 static const size_t kBacktraceCapacity = 10;
23 25
24 // Returns the number of cells that the |AllocationRegister| can store per 26 // Returns the number of cells that the |AllocationRegister| can store per
25 // system page. 27 // system page.
26 size_t GetNumCellsPerPage() { 28 size_t GetAllocationCapacityPerPage() {
27 return GetPageSize() / sizeof(AllocationRegister::Cell); 29 return GetPageSize() / sizeof(AllocationRegister::AllocationMap::Cell);
28 } 30 }
29 31
30 uint32_t GetHighWaterMark(const AllocationRegister& reg) { 32 size_t GetHighWaterMark(const AllocationRegister& reg) {
31 return reg.next_unused_cell_; 33 return reg.allocations_.next_unused_cell_;
32 }
33
34 uint32_t GetNumCells(const AllocationRegister& reg) {
35 return reg.num_cells_;
36 } 34 }
37 }; 35 };
38 36
39 // Iterates over all entries in the allocation register and returns the bitwise 37 // Iterates over all entries in the allocation register and returns the bitwise
40 // or of all addresses stored in it. 38 // or of all addresses stored in it.
41 uintptr_t OrAllAddresses(const AllocationRegister& reg) { 39 uintptr_t OrAllAddresses(const AllocationRegister& reg) {
42 uintptr_t acc = 0; 40 uintptr_t acc = 0;
43 41
44 for (auto i : reg) 42 for (auto i : reg)
45 acc |= reinterpret_cast<uintptr_t>(i.address); 43 acc |= reinterpret_cast<uintptr_t>(i.address);
46 44
47 return acc; 45 return acc;
48 } 46 }
49 47
50 // Iterates over all entries in the allocation register and returns the sum of 48 // Iterates over all entries in the allocation register and returns the sum of
51 // the sizes of the entries. 49 // the sizes of the entries.
52 size_t SumAllSizes(const AllocationRegister& reg) { 50 size_t SumAllSizes(const AllocationRegister& reg) {
53 size_t sum = 0; 51 size_t sum = 0;
54 52
55 for (auto i : reg) 53 for (auto i : reg)
56 sum += i.size; 54 sum += i.size;
57 55
58 return sum; 56 return sum;
59 } 57 }
60 58
61 TEST_F(AllocationRegisterTest, InsertRemove) { 59 TEST_F(AllocationRegisterTest, InsertRemove) {
62 AllocationRegister reg(kNumCellsForTesting); 60 AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
63 AllocationContext ctx; 61 AllocationContext ctx;
64 62
65 // Zero-sized allocations should be discarded. 63 // Zero-sized allocations should be discarded.
66 reg.Insert(reinterpret_cast<void*>(1), 0, ctx); 64 reg.Insert(reinterpret_cast<void*>(1), 0, ctx);
67 65
68 EXPECT_EQ(0u, OrAllAddresses(reg)); 66 EXPECT_EQ(0u, OrAllAddresses(reg));
69 67
70 reg.Insert(reinterpret_cast<void*>(1), 1, ctx); 68 reg.Insert(reinterpret_cast<void*>(1), 1, ctx);
71 69
72 EXPECT_EQ(1u, OrAllAddresses(reg)); 70 EXPECT_EQ(1u, OrAllAddresses(reg));
(...skipping 13 matching lines...) Expand all
86 reg.Remove(reinterpret_cast<void*>(4)); 84 reg.Remove(reinterpret_cast<void*>(4));
87 85
88 EXPECT_EQ(1u, OrAllAddresses(reg)); 86 EXPECT_EQ(1u, OrAllAddresses(reg));
89 87
90 reg.Remove(reinterpret_cast<void*>(1)); 88 reg.Remove(reinterpret_cast<void*>(1));
91 89
92 EXPECT_EQ(0u, OrAllAddresses(reg)); 90 EXPECT_EQ(0u, OrAllAddresses(reg));
93 } 91 }
94 92
95 TEST_F(AllocationRegisterTest, DoubleFreeIsAllowed) { 93 TEST_F(AllocationRegisterTest, DoubleFreeIsAllowed) {
96 AllocationRegister reg(kNumCellsForTesting); 94 AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
97 AllocationContext ctx; 95 AllocationContext ctx;
98 96
99 reg.Insert(reinterpret_cast<void*>(1), 1, ctx); 97 reg.Insert(reinterpret_cast<void*>(1), 1, ctx);
100 reg.Insert(reinterpret_cast<void*>(2), 1, ctx); 98 reg.Insert(reinterpret_cast<void*>(2), 1, ctx);
101 reg.Remove(reinterpret_cast<void*>(1)); 99 reg.Remove(reinterpret_cast<void*>(1));
102 reg.Remove(reinterpret_cast<void*>(1)); // Remove for the second time. 100 reg.Remove(reinterpret_cast<void*>(1)); // Remove for the second time.
103 reg.Remove(reinterpret_cast<void*>(4)); // Remove never inserted address. 101 reg.Remove(reinterpret_cast<void*>(4)); // Remove never inserted address.
104 102
105 EXPECT_EQ(2u, OrAllAddresses(reg)); 103 EXPECT_EQ(2u, OrAllAddresses(reg));
106 } 104 }
107 105
108 TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) { 106 TEST_F(AllocationRegisterTest, DoubleInsertOverwrites) {
109 AllocationRegister reg(kNumCellsForTesting); 107 AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
110 AllocationContext ctx; 108 AllocationContext ctx;
111 StackFrame frame1 = StackFrame::FromTraceEventName("Foo"); 109 StackFrame frame1 = StackFrame::FromTraceEventName("Foo");
112 StackFrame frame2 = StackFrame::FromTraceEventName("Bar"); 110 StackFrame frame2 = StackFrame::FromTraceEventName("Bar");
113 111
114 ctx.backtrace.frame_count = 1; 112 ctx.backtrace.frame_count = 1;
115 113
116 ctx.backtrace.frames[0] = frame1; 114 ctx.backtrace.frames[0] = frame1;
117 reg.Insert(reinterpret_cast<void*>(1), 11, ctx); 115 reg.Insert(reinterpret_cast<void*>(1), 11, ctx);
118 116
119 { 117 {
(...skipping 13 matching lines...) Expand all
133 EXPECT_EQ(frame2, elem.context.backtrace.frames[0]); 131 EXPECT_EQ(frame2, elem.context.backtrace.frames[0]);
134 EXPECT_EQ(13u, elem.size); 132 EXPECT_EQ(13u, elem.size);
135 EXPECT_EQ(reinterpret_cast<void*>(1), elem.address); 133 EXPECT_EQ(reinterpret_cast<void*>(1), elem.address);
136 } 134 }
137 } 135 }
138 136
139 // Check that even if more entries than the number of buckets are inserted, the 137 // Check that even if more entries than the number of buckets are inserted, the
140 // register still behaves correctly. 138 // register still behaves correctly.
141 TEST_F(AllocationRegisterTest, InsertRemoveCollisions) { 139 TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
142 size_t expected_sum = 0; 140 size_t expected_sum = 0;
143 AllocationRegister reg(kNumCellsForTesting); 141 AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
144 AllocationContext ctx; 142 AllocationContext ctx;
145 143
146 // By inserting 100 more entries than the number of buckets, there will be at 144 // By inserting 100 more entries than the number of buckets, there will be at
147 // least 100 collisions (100 = kNumCellsForTesting - kNumBuckets). 145 // least 100 collisions (100 = kAllocationCapacity - kAllocationBuckets).
148 for (uintptr_t i = 1; i <= kNumCellsForTesting; i++) { 146 for (uintptr_t i = 1; i <= kAllocationCapacity; i++) {
149 size_t size = i % 31; 147 size_t size = i % 31;
150 expected_sum += size; 148 expected_sum += size;
151 reg.Insert(reinterpret_cast<void*>(i), size, ctx); 149 reg.Insert(reinterpret_cast<void*>(i), size, ctx);
152 150
153 // Don't check the sum on every iteration to keep the test fast. 151 // Don't check the sum on every iteration to keep the test fast.
154 if (i % (1 << 14) == 0) 152 if (i % (1 << 14) == 0)
155 EXPECT_EQ(expected_sum, SumAllSizes(reg)); 153 EXPECT_EQ(expected_sum, SumAllSizes(reg));
156 } 154 }
157 155
158 EXPECT_EQ(expected_sum, SumAllSizes(reg)); 156 EXPECT_EQ(expected_sum, SumAllSizes(reg));
159 157
160 for (uintptr_t i = 1; i <= kNumCellsForTesting; i++) { 158 for (uintptr_t i = 1; i <= kAllocationCapacity; i++) {
161 size_t size = i % 31; 159 size_t size = i % 31;
162 expected_sum -= size; 160 expected_sum -= size;
163 reg.Remove(reinterpret_cast<void*>(i)); 161 reg.Remove(reinterpret_cast<void*>(i));
164 162
165 if (i % (1 << 14) == 0) 163 if (i % (1 << 14) == 0)
166 EXPECT_EQ(expected_sum, SumAllSizes(reg)); 164 EXPECT_EQ(expected_sum, SumAllSizes(reg));
167 } 165 }
168 166
169 EXPECT_EQ(expected_sum, SumAllSizes(reg)); 167 EXPECT_EQ(expected_sum, SumAllSizes(reg));
170 } 168 }
171 169
172 // The previous tests are not particularly good for testing iterators, because 170 // The previous tests are not particularly good for testing iterators, because
173 // elements are removed and inserted in the same order, meaning that the cells 171 // elements are removed and inserted in the same order, meaning that the cells
174 // fill up from low to high index, and are then freed from low to high index. 172 // fill up from low to high index, and are then freed from low to high index.
175 // This test removes entries in a different order, to ensure that the iterator 173 // This test removes entries in a different order, to ensure that the iterator
176 // skips over the freed cells properly. Then insert again to ensure that the 174 // skips over the freed cells properly. Then insert again to ensure that the
177 // free list is utilised properly. 175 // free list is utilised properly.
178 TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) { 176 TEST_F(AllocationRegisterTest, InsertRemoveRandomOrder) {
179 size_t expected_sum = 0; 177 size_t expected_sum = 0;
180 AllocationRegister reg(kNumCellsForTesting); 178 AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
181 AllocationContext ctx; 179 AllocationContext ctx;
182 180
183 uintptr_t generator = 3; 181 uintptr_t generator = 3;
184 uintptr_t prime = 1013; 182 uintptr_t prime = 1013;
185 uint32_t initial_water_mark = GetHighWaterMark(reg); 183 uint32_t initial_water_mark = GetHighWaterMark(reg);
186 184
187 for (uintptr_t i = 2; i < prime; i++) { 185 for (uintptr_t i = 2; i < prime; i++) {
188 size_t size = i % 31 + 1; 186 size_t size = i % 31 + 1;
189 expected_sum += size; 187 expected_sum += size;
190 reg.Insert(reinterpret_cast<void*>(i), size, ctx); 188 reg.Insert(reinterpret_cast<void*>(i), size, ctx);
(...skipping 19 matching lines...) Expand all
210 208
211 ASSERT_EQ(prime - 2, GetHighWaterMark(reg) - initial_water_mark); 209 ASSERT_EQ(prime - 2, GetHighWaterMark(reg) - initial_water_mark);
212 210
213 // Inserting one more entry should use a fresh cell again. 211 // Inserting one more entry should use a fresh cell again.
214 reg.Insert(reinterpret_cast<void*>(prime), 1, ctx); 212 reg.Insert(reinterpret_cast<void*>(prime), 1, ctx);
215 ASSERT_EQ(prime - 1, GetHighWaterMark(reg) - initial_water_mark); 213 ASSERT_EQ(prime - 1, GetHighWaterMark(reg) - initial_water_mark);
216 } 214 }
217 215
218 TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) { 216 TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) {
219 using Allocation = AllocationRegister::Allocation; 217 using Allocation = AllocationRegister::Allocation;
220 const char kStdString[] = "std::string"; 218 AllocationRegister reg(kAllocationCapacity, kBacktraceCapacity);
221 AllocationRegister reg(kNumCellsForTesting);
222 AllocationContext ctx; 219 AllocationContext ctx;
223 220
224 reg.Insert(reinterpret_cast<void*>(17), 1, ctx); 221 reg.Insert(reinterpret_cast<void*>(17), 1, ctx);
225 reg.Insert(reinterpret_cast<void*>(19), 2, ctx); 222 reg.Insert(reinterpret_cast<void*>(19), 2, ctx);
226 reg.Insert(reinterpret_cast<void*>(23), 3, ctx); 223 reg.Insert(reinterpret_cast<void*>(23), 3, ctx);
227 224
225 Allocation a;
226
228 // Looking up addresses that were not inserted should return null. 227 // Looking up addresses that were not inserted should return null.
229 // A null pointer lookup is a valid thing to do. 228 // A null pointer lookup is a valid thing to do.
230 EXPECT_EQ(nullptr, reg.Get(nullptr)); 229 EXPECT_FALSE(reg.Get(nullptr, &a));
231 EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(13))); 230 EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(13), &a));
232 231
233 Allocation* a17 = reg.Get(reinterpret_cast<void*>(17)); 232 EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(17), &a));
234 Allocation* a19 = reg.Get(reinterpret_cast<void*>(19)); 233 EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(19), &a));
235 Allocation* a23 = reg.Get(reinterpret_cast<void*>(23)); 234 EXPECT_TRUE(reg.Get(reinterpret_cast<void*>(23), &a));
236
237 EXPECT_NE(nullptr, a17);
238 EXPECT_NE(nullptr, a19);
239 EXPECT_NE(nullptr, a23);
240
241 a17->size = 100;
242 a19->context.type_name = kStdString;
243 235
244 reg.Remove(reinterpret_cast<void*>(23)); 236 reg.Remove(reinterpret_cast<void*>(23));
245 237
246 // Lookup should not find any garbage after removal. 238 // Lookup should not find any garbage after removal.
247 EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(23))); 239 EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(23), &a));
248
249 // Mutating allocations should have modified the allocations in the register.
250 for (const Allocation& allocation : reg) {
251 if (allocation.address == reinterpret_cast<void*>(17))
252 EXPECT_EQ(100u, allocation.size);
253 if (allocation.address == reinterpret_cast<void*>(19))
254 EXPECT_EQ(kStdString, allocation.context.type_name);
255 }
256 240
257 reg.Remove(reinterpret_cast<void*>(17)); 241 reg.Remove(reinterpret_cast<void*>(17));
258 reg.Remove(reinterpret_cast<void*>(19)); 242 reg.Remove(reinterpret_cast<void*>(19));
259 243
260 EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(17))); 244 EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(17), &a));
261 EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(19))); 245 EXPECT_FALSE(reg.Get(reinterpret_cast<void*>(19), &a));
262 } 246 }
263 247
264 // Check that the process aborts due to hitting the guard page when inserting 248 // Check that the process aborts due to hitting the guard page when inserting
265 // too many elements. 249 // too many elements.
266 #if GTEST_HAS_DEATH_TEST 250 #if GTEST_HAS_DEATH_TEST
267 TEST_F(AllocationRegisterTest, OverflowDeathTest) { 251 TEST_F(AllocationRegisterTest, OverflowDeathTest) {
268 // Use a smaller register to prevent OOM errors on low-end devices. 252 const size_t allocation_capacity = GetAllocationCapacityPerPage();
269 AllocationRegister reg(static_cast<uint32_t>(GetNumCellsPerPage())); 253 AllocationRegister reg(allocation_capacity, kBacktraceCapacity);
270 AllocationContext ctx; 254 AllocationContext ctx;
271 uintptr_t i; 255 size_t i;
272 256
273 // Fill up all of the memory allocated for the register. |GetNumCells(reg)| 257 // Fill up all of the memory allocated for the register's allocation map.
274 // minus 1 elements are inserted, because cell 0 is unused, so this should 258 for (i = 0; i < allocation_capacity; i++) {
275 // fill up the available cells exactly. 259 reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx);
276 for (i = 1; i < GetNumCells(reg); i++) {
277 reg.Insert(reinterpret_cast<void*>(i), 1, ctx);
278 } 260 }
279 261
280 // Adding just one extra element might still work because the allocated memory 262 // Adding just one extra element should cause overflow.
281 // is rounded up to the page size. Adding a page full of elements should cause 263 ASSERT_DEATH(reg.Insert(reinterpret_cast<void*>(i + 1), 1, ctx), "");
282 // overflow.
283 const size_t cells_per_page = GetNumCellsPerPage();
284
285 ASSERT_DEATH(for (size_t j = 0; j < cells_per_page; j++) {
286 reg.Insert(reinterpret_cast<void*>(i + j), 1, ctx);
287 }, "");
288 } 264 }
289 #endif 265 #endif
290 266
291 } // namespace trace_event 267 } // namespace trace_event
292 } // namespace base 268 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_allocation_register_posix.cc ('k') | base/trace_event/heap_profiler_allocation_register_win.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698