Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(146)

Side by Side Diff: base/trace_event/heap_profiler_allocation_register_unittest.cc

Issue 1574493002: [Tracing] Add lookup support to AllocationRegister (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « base/trace_event/heap_profiler_allocation_register.cc ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2015 The Chromium Authors. All rights reserved. 1 // Copyright 2015 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/trace_event/heap_profiler_allocation_register.h" 5 #include "base/trace_event/heap_profiler_allocation_register.h"
6 6
7 #include <stddef.h> 7 #include <stddef.h>
8 #include <stdint.h> 8 #include <stdint.h>
9 9
10 #include "base/process/process_metrics.h" 10 #include "base/process/process_metrics.h"
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
103 // TODO(ruuda): Although double insert happens in practice, it should not. 103 // TODO(ruuda): Although double insert happens in practice, it should not.
104 // Find out the cause and ban double insert if possible. 104 // Find out the cause and ban double insert if possible.
105 AllocationRegister reg; 105 AllocationRegister reg;
106 AllocationContext ctx = AllocationContext::Empty(); 106 AllocationContext ctx = AllocationContext::Empty();
107 StackFrame frame1 = "Foo"; 107 StackFrame frame1 = "Foo";
108 StackFrame frame2 = "Bar"; 108 StackFrame frame2 = "Bar";
109 109
110 ctx.backtrace.frames[0] = frame1; 110 ctx.backtrace.frames[0] = frame1;
111 reg.Insert(reinterpret_cast<void*>(1), 11, ctx); 111 reg.Insert(reinterpret_cast<void*>(1), 11, ctx);
112 112
113 auto elem = *reg.begin(); 113 {
114 AllocationRegister::Allocation elem = *reg.begin();
114 115
115 EXPECT_EQ(frame1, elem.context.backtrace.frames[0]); 116 EXPECT_EQ(frame1, elem.context.backtrace.frames[0]);
116 EXPECT_EQ(11u, elem.size); 117 EXPECT_EQ(11u, elem.size);
117 EXPECT_EQ(reinterpret_cast<void*>(1), elem.address); 118 EXPECT_EQ(reinterpret_cast<void*>(1), elem.address);
119 }
118 120
119 ctx.backtrace.frames[0] = frame2; 121 ctx.backtrace.frames[0] = frame2;
120 reg.Insert(reinterpret_cast<void*>(1), 13, ctx); 122 reg.Insert(reinterpret_cast<void*>(1), 13, ctx);
121 123
122 elem = *reg.begin(); 124 {
125 AllocationRegister::Allocation elem = *reg.begin();
123 126
124 EXPECT_EQ(frame2, elem.context.backtrace.frames[0]); 127 EXPECT_EQ(frame2, elem.context.backtrace.frames[0]);
125 EXPECT_EQ(13u, elem.size); 128 EXPECT_EQ(13u, elem.size);
126 EXPECT_EQ(reinterpret_cast<void*>(1), elem.address); 129 EXPECT_EQ(reinterpret_cast<void*>(1), elem.address);
130 }
127 } 131 }
128 132
129 // Check that even if more entries than the number of buckets are inserted, the 133 // Check that even if more entries than the number of buckets are inserted, the
130 // register still behaves correctly. 134 // register still behaves correctly.
131 TEST_F(AllocationRegisterTest, InsertRemoveCollisions) { 135 TEST_F(AllocationRegisterTest, InsertRemoveCollisions) {
132 size_t expected_sum = 0; 136 size_t expected_sum = 0;
133 AllocationRegister reg; 137 AllocationRegister reg;
134 AllocationContext ctx = AllocationContext::Empty(); 138 AllocationContext ctx = AllocationContext::Empty();
135 139
136 // By inserting 100 more entries than the number of buckets, there will be at 140 // By inserting 100 more entries than the number of buckets, there will be at
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
198 for (uintptr_t i = 2; i < prime; i++) 202 for (uintptr_t i = 2; i < prime; i++)
199 reg.Insert(reinterpret_cast<void*>(i), 0, ctx); 203 reg.Insert(reinterpret_cast<void*>(i), 0, ctx);
200 204
201 ASSERT_EQ(prime - 2, GetHighWaterMark(reg) - initial_water_mark); 205 ASSERT_EQ(prime - 2, GetHighWaterMark(reg) - initial_water_mark);
202 206
203 // Inserting one more entry should use a fresh cell again. 207 // Inserting one more entry should use a fresh cell again.
204 reg.Insert(reinterpret_cast<void*>(prime), 0, ctx); 208 reg.Insert(reinterpret_cast<void*>(prime), 0, ctx);
205 ASSERT_EQ(prime - 1, GetHighWaterMark(reg) - initial_water_mark); 209 ASSERT_EQ(prime - 1, GetHighWaterMark(reg) - initial_water_mark);
206 } 210 }
207 211
212 TEST_F(AllocationRegisterTest, ChangeContextAfterInsertion) {
213 using Allocation = AllocationRegister::Allocation;
214 const char kStdString[] = "std::string";
215 AllocationRegister reg;
216 AllocationContext ctx = AllocationContext::Empty();
217
218 reg.Insert(reinterpret_cast<void*>(17), 1, ctx);
219 reg.Insert(reinterpret_cast<void*>(19), 2, ctx);
220 reg.Insert(reinterpret_cast<void*>(23), 3, ctx);
221
222 // Looking up addresses that were not inserted should return null.
223 // A null pointer lookup is a valid thing to do.
224 EXPECT_EQ(nullptr, reg.Get(nullptr));
225 EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(13)));
226
227 Allocation* a17 = reg.Get(reinterpret_cast<void*>(17));
228 Allocation* a19 = reg.Get(reinterpret_cast<void*>(19));
229 Allocation* a23 = reg.Get(reinterpret_cast<void*>(23));
230
231 EXPECT_NE(nullptr, a17);
232 EXPECT_NE(nullptr, a19);
233 EXPECT_NE(nullptr, a23);
234
235 a17->size = 100;
236 a19->context.type_name = kStdString;
237
238 reg.Remove(reinterpret_cast<void*>(23));
239
240 // Lookup should not find any garbage after removal.
241 EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(23)));
242
243 // Mutating allocations should have modified the allocations in the register.
244 for (const Allocation& allocation : reg) {
245 if (allocation.address == reinterpret_cast<void*>(17))
246 EXPECT_EQ(100u, allocation.size);
247 if (allocation.address == reinterpret_cast<void*>(19))
248 EXPECT_EQ(kStdString, allocation.context.type_name);
249 }
250
251 reg.Remove(reinterpret_cast<void*>(17));
252 reg.Remove(reinterpret_cast<void*>(19));
253
254 EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(17)));
255 EXPECT_EQ(nullptr, reg.Get(reinterpret_cast<void*>(19)));
256 }
257
208 // Check that the process aborts due to hitting the guard page when inserting 258 // Check that the process aborts due to hitting the guard page when inserting
209 // too many elements. 259 // too many elements.
210 #if GTEST_HAS_DEATH_TEST 260 #if GTEST_HAS_DEATH_TEST
211 TEST_F(AllocationRegisterTest, OverflowDeathTest) { 261 TEST_F(AllocationRegisterTest, OverflowDeathTest) {
212 // Use a smaller register to prevent OOM errors on low-end devices. 262 // Use a smaller register to prevent OOM errors on low-end devices.
213 AllocationRegister reg(static_cast<uint32_t>(GetNumCellsPerPage())); 263 AllocationRegister reg(static_cast<uint32_t>(GetNumCellsPerPage()));
214 AllocationContext ctx = AllocationContext::Empty(); 264 AllocationContext ctx = AllocationContext::Empty();
215 uintptr_t i; 265 uintptr_t i;
216 266
217 // Fill up all of the memory allocated for the register. |GetNumCells(reg)| 267 // Fill up all of the memory allocated for the register. |GetNumCells(reg)|
218 // minus 1 elements are inserted, because cell 0 is unused, so this should 268 // minus 1 elements are inserted, because cell 0 is unused, so this should
219 // fill up the available cells exactly. 269 // fill up the available cells exactly.
220 for (i = 1; i < GetNumCells(reg); i++) { 270 for (i = 1; i < GetNumCells(reg); i++) {
221 reg.Insert(reinterpret_cast<void*>(i), 0, ctx); 271 reg.Insert(reinterpret_cast<void*>(i), 0, ctx);
222 } 272 }
223 273
224 // Adding just one extra element might still work because the allocated memory 274 // Adding just one extra element might still work because the allocated memory
225 // is rounded up to the page size. Adding a page full of elements should cause 275 // is rounded up to the page size. Adding a page full of elements should cause
226 // overflow. 276 // overflow.
227 const size_t cells_per_page = GetNumCellsPerPage(); 277 const size_t cells_per_page = GetNumCellsPerPage();
228 278
229 ASSERT_DEATH(for (size_t j = 0; j < cells_per_page; j++) { 279 ASSERT_DEATH(for (size_t j = 0; j < cells_per_page; j++) {
230 reg.Insert(reinterpret_cast<void*>(i + j), 0, ctx); 280 reg.Insert(reinterpret_cast<void*>(i + j), 0, ctx);
231 }, ""); 281 }, "");
232 } 282 }
233 #endif 283 #endif
234 284
235 } // namespace trace_event 285 } // namespace trace_event
236 } // namespace base 286 } // namespace base
OLDNEW
« no previous file with comments | « base/trace_event/heap_profiler_allocation_register.cc ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698