Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(272)

Side by Side Diff: base/allocator/allocator_shim_unittest.cc

Issue 2658723007: Hook up allocator shim on mac. (Closed)
Patch Set: Remove CallUnshimmed. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/allocator/allocator_shim.h" 5 #include "base/allocator/allocator_shim.h"
6 6
7 #include <malloc.h>
8 #include <stdlib.h> 7 #include <stdlib.h>
9 #include <string.h> 8 #include <string.h>
10 9
11 #include <memory> 10 #include <memory>
12 #include <new> 11 #include <new>
13 #include <vector> 12 #include <vector>
14 13
15 #include "base/allocator/features.h" 14 #include "base/allocator/features.h"
16 #include "base/atomicops.h" 15 #include "base/atomicops.h"
17 #include "base/process/process_metrics.h" 16 #include "base/process/process_metrics.h"
18 #include "base/synchronization/waitable_event.h" 17 #include "base/synchronization/waitable_event.h"
19 #include "base/threading/platform_thread.h" 18 #include "base/threading/platform_thread.h"
20 #include "base/threading/thread_local.h" 19 #include "base/threading/thread_local.h"
20 #include "build/build_config.h"
21 #include "testing/gmock/include/gmock/gmock.h" 21 #include "testing/gmock/include/gmock/gmock.h"
22 #include "testing/gtest/include/gtest/gtest.h" 22 #include "testing/gtest/include/gtest/gtest.h"
23 23
24 #if defined(OS_WIN) 24 #if defined(OS_WIN)
25 #include <windows.h> 25 #include <windows.h>
26 #elif defined(OS_MACOSX)
27 #include <malloc/malloc.h>
28 #include "third_party/apple_apsl/malloc.h"
26 #else 29 #else
30 #include <malloc.h>
31 #endif
32
33 #if !defined(OS_WIN)
27 #include <unistd.h> 34 #include <unistd.h>
28 #endif 35 #endif
29 36
30 // Some new Android NDKs (64 bit) does not expose (p)valloc anymore. These 37 // Some new Android NDKs (64 bit) does not expose (p)valloc anymore. These
31 // functions are implemented at the shim-layer level. 38 // functions are implemented at the shim-layer level.
32 #if defined(OS_ANDROID) 39 #if defined(OS_ANDROID)
33 extern "C" { 40 extern "C" {
34 void* valloc(size_t size); 41 void* valloc(size_t size);
35 void* pvalloc(size_t size); 42 void* pvalloc(size_t size);
36 } 43 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
76 if (alignment < kMaxSizeTracked) 83 if (alignment < kMaxSizeTracked)
77 ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]); 84 ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]);
78 } 85 }
79 return self->next->alloc_aligned_function(self->next, alignment, size); 86 return self->next->alloc_aligned_function(self->next, alignment, size);
80 } 87 }
81 88
82 static void* MockRealloc(const AllocatorDispatch* self, 89 static void* MockRealloc(const AllocatorDispatch* self,
83 void* address, 90 void* address,
84 size_t size) { 91 size_t size) {
85 if (instance_) { 92 if (instance_) {
86 // Address 0x420 is a special sentinel for the NewHandlerConcurrency test. 93 // Size 0xFEED a special sentinel for the NewHandlerConcurrency test.
87 // The first time (but only the first one) it is hit it fails, causing the 94 // Hitting it for the first time will cause a failure, causing the
88 // invocation of the std::new_handler. 95 // invocation of the std::new_handler.
89 if (address == reinterpret_cast<void*>(0x420)) { 96 if (size == 0xFEED) {
90 if (!instance_->did_fail_realloc_0x420_once->Get()) { 97 if (!instance_->did_fail_realloc_0xfeed_once->Get()) {
91 instance_->did_fail_realloc_0x420_once->Set(true); 98 instance_->did_fail_realloc_0xfeed_once->Set(true);
92 return nullptr; 99 return nullptr;
93 } else { 100 } else {
94 return reinterpret_cast<void*>(0x420ul); 101 return address;
95 } 102 }
96 } 103 }
97 104
98 if (size < kMaxSizeTracked) 105 if (size < kMaxSizeTracked)
99 ++(instance_->reallocs_intercepted_by_size[size]); 106 ++(instance_->reallocs_intercepted_by_size[size]);
100 ++instance_->reallocs_intercepted_by_addr[Hash(address)]; 107 ++instance_->reallocs_intercepted_by_addr[Hash(address)];
101 } 108 }
102 return self->next->realloc_function(self->next, address, size); 109 return self->next->realloc_function(self->next, address, size);
103 } 110 }
104 111
105 static void MockFree(const AllocatorDispatch* self, void* address) { 112 static void MockFree(const AllocatorDispatch* self, void* address) {
106 if (instance_) { 113 if (instance_) {
107 ++instance_->frees_intercepted_by_addr[Hash(address)]; 114 ++instance_->frees_intercepted_by_addr[Hash(address)];
108 } 115 }
109 self->next->free_function(self->next, address); 116 self->next->free_function(self->next, address);
110 } 117 }
111 118
119 static size_t MockGetSizeEstimate(const AllocatorDispatch* self,
120 void* address) {
121 return self->next->get_size_estimate_function(self->next, address);
122 }
123
124 static unsigned MockBatchMalloc(const AllocatorDispatch* self,
125 size_t size,
126 void** results,
127 unsigned num_requested) {
128 if (instance_) {
129 instance_->batch_mallocs_intercepted_by_size[size] =
130 instance_->batch_mallocs_intercepted_by_size[size] + num_requested;
131 }
132 return self->next->batch_malloc_function(self->next, size, results,
133 num_requested);
134 }
135
136 static void MockBatchFree(const AllocatorDispatch* self,
137 void** to_be_freed,
138 unsigned num_to_be_freed) {
139 if (instance_) {
140 for (unsigned i = 0; i < num_to_be_freed; ++i) {
141 ++instance_->batch_frees_intercepted_by_addr[Hash(to_be_freed[i])];
142 }
143 }
144 self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed);
145 }
146
147 static void MockFreeDefiniteSize(const AllocatorDispatch* self,
148 void* ptr,
149 size_t size) {
150 if (instance_) {
151 ++instance_->frees_intercepted_by_addr[Hash(ptr)];
152 ++instance_->free_definite_sizes_intercepted_by_size[size];
153 }
154 self->next->free_definite_size_function(self->next, ptr, size);
155 }
156
112 static void NewHandler() { 157 static void NewHandler() {
113 if (!instance_) 158 if (!instance_)
114 return; 159 return;
115 subtle::Barrier_AtomicIncrement(&instance_->num_new_handler_calls, 1); 160 subtle::Barrier_AtomicIncrement(&instance_->num_new_handler_calls, 1);
116 } 161 }
117 162
118 int32_t GetNumberOfNewHandlerCalls() { 163 int32_t GetNumberOfNewHandlerCalls() {
119 return subtle::Acquire_Load(&instance_->num_new_handler_calls); 164 return subtle::Acquire_Load(&instance_->num_new_handler_calls);
120 } 165 }
121 166
122 void SetUp() override { 167 void SetUp() override {
123 const size_t array_size = kMaxSizeTracked * sizeof(size_t); 168 const size_t array_size = kMaxSizeTracked * sizeof(size_t);
124 memset(&allocs_intercepted_by_size, 0, array_size); 169 memset(&allocs_intercepted_by_size, 0, array_size);
125 memset(&zero_allocs_intercepted_by_size, 0, array_size); 170 memset(&zero_allocs_intercepted_by_size, 0, array_size);
126 memset(&aligned_allocs_intercepted_by_size, 0, array_size); 171 memset(&aligned_allocs_intercepted_by_size, 0, array_size);
127 memset(&aligned_allocs_intercepted_by_alignment, 0, array_size); 172 memset(&aligned_allocs_intercepted_by_alignment, 0, array_size);
128 memset(&reallocs_intercepted_by_size, 0, array_size); 173 memset(&reallocs_intercepted_by_size, 0, array_size);
129 memset(&frees_intercepted_by_addr, 0, array_size); 174 memset(&frees_intercepted_by_addr, 0, array_size);
130 did_fail_realloc_0x420_once.reset(new ThreadLocalBoolean()); 175 memset(&batch_mallocs_intercepted_by_size, 0, array_size);
176 memset(&batch_frees_intercepted_by_addr, 0, array_size);
177 memset(&free_definite_sizes_intercepted_by_size, 0, array_size);
178 did_fail_realloc_0xfeed_once.reset(new ThreadLocalBoolean());
131 subtle::Release_Store(&num_new_handler_calls, 0); 179 subtle::Release_Store(&num_new_handler_calls, 0);
132 instance_ = this; 180 instance_ = this;
181
133 } 182 }
134 183
184 #if defined(OS_MACOSX)
185 static void SetUpTestCase() {
186 InitializeAllocatorShim();
187 }
188 #endif
189
135 void TearDown() override { instance_ = nullptr; } 190 void TearDown() override { instance_ = nullptr; }
136 191
137 protected: 192 protected:
138 size_t allocs_intercepted_by_size[kMaxSizeTracked]; 193 size_t allocs_intercepted_by_size[kMaxSizeTracked];
139 size_t zero_allocs_intercepted_by_size[kMaxSizeTracked]; 194 size_t zero_allocs_intercepted_by_size[kMaxSizeTracked];
140 size_t aligned_allocs_intercepted_by_size[kMaxSizeTracked]; 195 size_t aligned_allocs_intercepted_by_size[kMaxSizeTracked];
141 size_t aligned_allocs_intercepted_by_alignment[kMaxSizeTracked]; 196 size_t aligned_allocs_intercepted_by_alignment[kMaxSizeTracked];
142 size_t reallocs_intercepted_by_size[kMaxSizeTracked]; 197 size_t reallocs_intercepted_by_size[kMaxSizeTracked];
143 size_t reallocs_intercepted_by_addr[kMaxSizeTracked]; 198 size_t reallocs_intercepted_by_addr[kMaxSizeTracked];
144 size_t frees_intercepted_by_addr[kMaxSizeTracked]; 199 size_t frees_intercepted_by_addr[kMaxSizeTracked];
145 std::unique_ptr<ThreadLocalBoolean> did_fail_realloc_0x420_once; 200 size_t batch_mallocs_intercepted_by_size[kMaxSizeTracked];
201 size_t batch_frees_intercepted_by_addr[kMaxSizeTracked];
202 size_t free_definite_sizes_intercepted_by_size[kMaxSizeTracked];
203 std::unique_ptr<ThreadLocalBoolean> did_fail_realloc_0xfeed_once;
146 subtle::Atomic32 num_new_handler_calls; 204 subtle::Atomic32 num_new_handler_calls;
147 205
148 private: 206 private:
149 static AllocatorShimTest* instance_; 207 static AllocatorShimTest* instance_;
150 }; 208 };
151 209
152 struct TestStruct1 { 210 struct TestStruct1 {
153 uint32_t ignored; 211 uint32_t ignored;
154 uint8_t ignored_2; 212 uint8_t ignored_2;
155 }; 213 };
156 214
157 struct TestStruct2 { 215 struct TestStruct2 {
158 uint64_t ignored; 216 uint64_t ignored;
159 uint8_t ignored_3; 217 uint8_t ignored_3;
160 }; 218 };
161 219
162 class ThreadDelegateForNewHandlerTest : public PlatformThread::Delegate { 220 class ThreadDelegateForNewHandlerTest : public PlatformThread::Delegate {
163 public: 221 public:
164 ThreadDelegateForNewHandlerTest(WaitableEvent* event) : event_(event) {} 222 ThreadDelegateForNewHandlerTest(WaitableEvent* event) : event_(event) {}
165 223
166 void ThreadMain() override { 224 void ThreadMain() override {
167 event_->Wait(); 225 event_->Wait();
168 void* res = realloc(reinterpret_cast<void*>(0x420ul), 1); 226 void* temp = malloc(1);
169 EXPECT_EQ(reinterpret_cast<void*>(0x420ul), res); 227 void* res = realloc(temp, 0xFEED);
228 EXPECT_EQ(temp, res);
170 } 229 }
171 230
172 private: 231 private:
173 WaitableEvent* event_; 232 WaitableEvent* event_;
174 }; 233 };
175 234
176 AllocatorShimTest* AllocatorShimTest::instance_ = nullptr; 235 AllocatorShimTest* AllocatorShimTest::instance_ = nullptr;
177 236
178 AllocatorDispatch g_mock_dispatch = { 237 AllocatorDispatch g_mock_dispatch = {
179 &AllocatorShimTest::MockAlloc, /* alloc_function */ 238 &AllocatorShimTest::MockAlloc, /* alloc_function */
180 &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */ 239 &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */
181 &AllocatorShimTest::MockAllocAligned, /* alloc_aligned_function */ 240 &AllocatorShimTest::MockAllocAligned, /* alloc_aligned_function */
182 &AllocatorShimTest::MockRealloc, /* realloc_function */ 241 &AllocatorShimTest::MockRealloc, /* realloc_function */
183 &AllocatorShimTest::MockFree, /* free_function */ 242 &AllocatorShimTest::MockFree, /* free_function */
184 nullptr, /* next */ 243 &AllocatorShimTest::MockGetSizeEstimate, /* get_size_estimate_function */
244 &AllocatorShimTest::MockBatchMalloc, /* batch_malloc_function */
245 &AllocatorShimTest::MockBatchFree, /* batch_free_function */
246 &AllocatorShimTest::MockFreeDefiniteSize, /* free_definite_size_function */
247 nullptr, /* next */
185 }; 248 };
186 249
250 TEST_F(AllocatorShimTest, asdf) {
Primiano Tucci (use gerrit) 2017/02/01 22:07:10 I might be a naming freak, but this seems quite ea
erikchen 2017/02/01 23:10:03 Done.
251 }
187 TEST_F(AllocatorShimTest, InterceptLibcSymbols) { 252 TEST_F(AllocatorShimTest, InterceptLibcSymbols) {
188 InsertAllocatorDispatch(&g_mock_dispatch); 253 InsertAllocatorDispatch(&g_mock_dispatch);
189 254
190 void* alloc_ptr = malloc(19); 255 void* alloc_ptr = malloc(19);
191 ASSERT_NE(nullptr, alloc_ptr); 256 ASSERT_NE(nullptr, alloc_ptr);
192 ASSERT_GE(allocs_intercepted_by_size[19], 1u); 257 ASSERT_GE(allocs_intercepted_by_size[19], 1u);
193 258
194 void* zero_alloc_ptr = calloc(2, 23); 259 void* zero_alloc_ptr = calloc(2, 23);
195 ASSERT_NE(nullptr, zero_alloc_ptr); 260 ASSERT_NE(nullptr, zero_alloc_ptr);
196 ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u); 261 ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u);
197 262
198 #if !defined(OS_WIN) 263 #if !defined(OS_WIN)
199 void* memalign_ptr = memalign(128, 53); 264 const size_t kPageSize = base::GetPageSize();
200 ASSERT_NE(nullptr, memalign_ptr);
201 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128);
202 ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
203 ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
204
205 void* posix_memalign_ptr = nullptr; 265 void* posix_memalign_ptr = nullptr;
206 int res = posix_memalign(&posix_memalign_ptr, 256, 59); 266 int res = posix_memalign(&posix_memalign_ptr, 256, 59);
207 ASSERT_EQ(0, res); 267 ASSERT_EQ(0, res);
208 ASSERT_NE(nullptr, posix_memalign_ptr); 268 ASSERT_NE(nullptr, posix_memalign_ptr);
209 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256); 269 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256);
210 ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u); 270 ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u);
211 ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u); 271 ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u);
212 272
213 void* valloc_ptr = valloc(61); 273 void* valloc_ptr = valloc(61);
214 ASSERT_NE(nullptr, valloc_ptr); 274 ASSERT_NE(nullptr, valloc_ptr);
215 const size_t kPageSize = base::GetPageSize();
216 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize); 275 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize);
217 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u); 276 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
218 ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u); 277 ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u);
278 #endif // !OS_WIN
279
280 #if !defined(OS_WIN) && !defined(OS_MACOSX)
281 void* memalign_ptr = memalign(128, 53);
282 ASSERT_NE(nullptr, memalign_ptr);
283 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128);
284 ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
285 ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
219 286
220 void* pvalloc_ptr = pvalloc(67); 287 void* pvalloc_ptr = pvalloc(67);
221 ASSERT_NE(nullptr, pvalloc_ptr); 288 ASSERT_NE(nullptr, pvalloc_ptr);
222 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize); 289 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize);
223 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u); 290 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
224 // pvalloc rounds the size up to the next page. 291 // pvalloc rounds the size up to the next page.
225 ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u); 292 ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u);
226 #endif // OS_WIN 293 #endif // !OS_WIN && !OS_MACOSX
227 294
228 char* realloc_ptr = static_cast<char*>(realloc(nullptr, 71)); 295 char* realloc_ptr = static_cast<char*>(malloc(10));
229 ASSERT_NE(nullptr, realloc_ptr);
230 ASSERT_GE(reallocs_intercepted_by_size[71], 1u);
231 ASSERT_GE(reallocs_intercepted_by_addr[Hash(nullptr)], 1u);
232 strcpy(realloc_ptr, "foobar"); 296 strcpy(realloc_ptr, "foobar");
233 void* old_realloc_ptr = realloc_ptr; 297 void* old_realloc_ptr = realloc_ptr;
234 realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73)); 298 realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73));
235 ASSERT_GE(reallocs_intercepted_by_size[73], 1u); 299 ASSERT_GE(reallocs_intercepted_by_size[73], 1u);
236 ASSERT_GE(reallocs_intercepted_by_addr[Hash(old_realloc_ptr)], 1u); 300 ASSERT_GE(reallocs_intercepted_by_addr[Hash(old_realloc_ptr)], 1u);
237 ASSERT_EQ(0, strcmp(realloc_ptr, "foobar")); 301 ASSERT_EQ(0, strcmp(realloc_ptr, "foobar"));
238 302
239 free(alloc_ptr); 303 free(alloc_ptr);
240 ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u); 304 ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u);
241 305
242 free(zero_alloc_ptr); 306 free(zero_alloc_ptr);
243 ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u); 307 ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u);
244 308
245 #if !defined(OS_WIN) 309 #if !defined(OS_WIN) && !defined(OS_MACOSX)
246 free(memalign_ptr); 310 free(memalign_ptr);
247 ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u); 311 ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
248 312
313 free(pvalloc_ptr);
314 ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
315 #endif // !OS_WIN && !OS_MACOSX
316
317 #if !defined(OS_WIN)
249 free(posix_memalign_ptr); 318 free(posix_memalign_ptr);
250 ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u); 319 ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u);
251 320
252 free(valloc_ptr); 321 free(valloc_ptr);
253 ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u); 322 ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u);
254 323 #endif // !OS_WIN
255 free(pvalloc_ptr);
256 ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
257 #endif // OS_WIN
258 324
259 free(realloc_ptr); 325 free(realloc_ptr);
260 ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u); 326 ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u);
261 327
262 RemoveAllocatorDispatchForTesting(&g_mock_dispatch); 328 RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
263 329
264 void* non_hooked_ptr = malloc(4095); 330 void* non_hooked_ptr = malloc(4095);
265 ASSERT_NE(nullptr, non_hooked_ptr); 331 ASSERT_NE(nullptr, non_hooked_ptr);
266 ASSERT_EQ(0u, allocs_intercepted_by_size[4095]); 332 ASSERT_EQ(0u, allocs_intercepted_by_size[4095]);
267 free(non_hooked_ptr); 333 free(non_hooked_ptr);
268 } 334 }
269 335
336 #if defined(OS_MACOSX)
337 TEST_F(AllocatorShimTest, InterceptLibcSymbolsBatchMallocFree) {
338 InsertAllocatorDispatch(&g_mock_dispatch);
339
340 unsigned count = 13;
341 std::vector<void*> results;
342 results.resize(count);
343 unsigned result_count = malloc_zone_batch_malloc(malloc_default_zone(), 99,
344 results.data(), count);
345 ASSERT_EQ(count, result_count);
346 ASSERT_EQ(count, batch_mallocs_intercepted_by_size[99]);
347
348 std::vector<void*> results_copy(results);
349 malloc_zone_batch_free(malloc_default_zone(), results.data(), count);
350 for (void* result : results_copy) {
351 ASSERT_GE(batch_frees_intercepted_by_addr[Hash(result)], 1u);
352 }
353 RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
354 }
355
356 TEST_F(AllocatorShimTest, InterceptLibcSymbolsFreeDefiniteSize) {
357 InsertAllocatorDispatch(&g_mock_dispatch);
358
359 void* alloc_ptr = malloc(19);
360 ASSERT_NE(nullptr, alloc_ptr);
361 ASSERT_GE(allocs_intercepted_by_size[19], 1u);
362
363 ChromeMallocZone* default_zone =
364 reinterpret_cast<ChromeMallocZone*>(malloc_default_zone());
365 default_zone->free_definite_size(malloc_default_zone(), alloc_ptr, 19);
366 ASSERT_GE(free_definite_sizes_intercepted_by_size[19], 1u);
367 RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
368 }
369 #endif // defined(OS_MACOSX)
370
270 TEST_F(AllocatorShimTest, InterceptCppSymbols) { 371 TEST_F(AllocatorShimTest, InterceptCppSymbols) {
271 InsertAllocatorDispatch(&g_mock_dispatch); 372 InsertAllocatorDispatch(&g_mock_dispatch);
272 373
273 TestStruct1* new_ptr = new TestStruct1; 374 TestStruct1* new_ptr = new TestStruct1;
274 ASSERT_NE(nullptr, new_ptr); 375 ASSERT_NE(nullptr, new_ptr);
275 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u); 376 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u);
276 377
277 TestStruct1* new_array_ptr = new TestStruct1[3]; 378 TestStruct1* new_array_ptr = new TestStruct1[3];
278 ASSERT_NE(nullptr, new_array_ptr); 379 ASSERT_NE(nullptr, new_array_ptr);
279 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u); 380 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u);
(...skipping 17 matching lines...) Expand all
297 398
298 delete[] new_array_nt_ptr; 399 delete[] new_array_nt_ptr;
299 ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u); 400 ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u);
300 401
301 RemoveAllocatorDispatchForTesting(&g_mock_dispatch); 402 RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
302 } 403 }
303 404
304 // This test exercises the case of concurrent OOM failure, which would end up 405 // This test exercises the case of concurrent OOM failure, which would end up
305 // invoking std::new_handler concurrently. This is to cover the CallNewHandler() 406 // invoking std::new_handler concurrently. This is to cover the CallNewHandler()
306 // paths of allocator_shim.cc and smoke-test its thread safey. 407 // paths of allocator_shim.cc and smoke-test its thread safey.
307 // The test creates kNumThreads threads. Each of them does just a 408 // The test creates kNumThreads threads. Each of them mallocs some memory, and
308 // realloc(0x420). 409 // then does a realloc(<new memory>, 0xFEED).
309 // The shim intercepts such realloc and makes it fail only once on each thread. 410 // The shim intercepts such realloc and makes it fail only once on each thread.
310 // We expect to see excactly kNumThreads invocations of the new_handler. 411 // We expect to see excactly kNumThreads invocations of the new_handler.
311 TEST_F(AllocatorShimTest, NewHandlerConcurrency) { 412 TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
312 const int kNumThreads = 32; 413 const int kNumThreads = 32;
313 PlatformThreadHandle threads[kNumThreads]; 414 PlatformThreadHandle threads[kNumThreads];
314 415
315 // The WaitableEvent here is used to attempt to trigger all the threads at 416 // The WaitableEvent here is used to attempt to trigger all the threads at
316 // the same time, after they have been initialized. 417 // the same time, after they have been initialized.
317 WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL, 418 WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
318 WaitableEvent::InitialState::NOT_SIGNALED); 419 WaitableEvent::InitialState::NOT_SIGNALED);
(...skipping 15 matching lines...) Expand all
334 435
335 #if defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 436 #if defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
336 TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) { 437 TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
337 ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle())); 438 ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
338 } 439 }
339 #endif // defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 440 #endif // defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
340 441
341 } // namespace 442 } // namespace
342 } // namespace allocator 443 } // namespace allocator
343 } // namespace base 444 } // namespace base
OLDNEW
« no previous file with comments | « base/allocator/allocator_shim_override_mac_symbols.h ('k') | base/debug/thread_heap_usage_tracker.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698