Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(156)

Side by Side Diff: base/allocator/allocator_shim_unittest.cc

Issue 2658723007: Hook up allocator shim on mac. (Closed)
Patch Set: Rebase. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2016 The Chromium Authors. All rights reserved. 1 // Copyright 2016 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "base/allocator/allocator_shim.h" 5 #include "base/allocator/allocator_shim.h"
6 6
7 #include <malloc.h>
8 #include <stdlib.h> 7 #include <stdlib.h>
9 #include <string.h> 8 #include <string.h>
10 9
11 #include <memory> 10 #include <memory>
12 #include <new> 11 #include <new>
13 #include <vector> 12 #include <vector>
14 13
15 #include "base/allocator/features.h" 14 #include "base/allocator/features.h"
16 #include "base/atomicops.h" 15 #include "base/atomicops.h"
17 #include "base/process/process_metrics.h" 16 #include "base/process/process_metrics.h"
18 #include "base/synchronization/waitable_event.h" 17 #include "base/synchronization/waitable_event.h"
19 #include "base/threading/platform_thread.h" 18 #include "base/threading/platform_thread.h"
20 #include "base/threading/thread_local.h" 19 #include "base/threading/thread_local.h"
20 #include "build/build_config.h"
21 #include "testing/gmock/include/gmock/gmock.h" 21 #include "testing/gmock/include/gmock/gmock.h"
22 #include "testing/gtest/include/gtest/gtest.h" 22 #include "testing/gtest/include/gtest/gtest.h"
23 23
24 #if defined(OS_WIN) 24 #if defined(OS_WIN)
25 #include <windows.h> 25 #include <windows.h>
26 #elif defined(OS_MACOSX)
27 #include <malloc/malloc.h>
26 #else 28 #else
29 #include <malloc.h>
30 #endif
31
32 #if !defined(OS_WIN)
27 #include <unistd.h> 33 #include <unistd.h>
28 #endif 34 #endif
29 35
30 // Some new Android NDKs (64 bit) does not expose (p)valloc anymore. These 36 // Some new Android NDKs (64 bit) does not expose (p)valloc anymore. These
31 // functions are implemented at the shim-layer level. 37 // functions are implemented at the shim-layer level.
32 #if defined(OS_ANDROID) 38 #if defined(OS_ANDROID)
33 extern "C" { 39 extern "C" {
34 void* valloc(size_t size); 40 void* valloc(size_t size);
35 void* pvalloc(size_t size); 41 void* pvalloc(size_t size);
36 } 42 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
76 if (alignment < kMaxSizeTracked) 82 if (alignment < kMaxSizeTracked)
77 ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]); 83 ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]);
78 } 84 }
79 return self->next->alloc_aligned_function(self->next, alignment, size); 85 return self->next->alloc_aligned_function(self->next, alignment, size);
80 } 86 }
81 87
82 static void* MockRealloc(const AllocatorDispatch* self, 88 static void* MockRealloc(const AllocatorDispatch* self,
83 void* address, 89 void* address,
84 size_t size) { 90 size_t size) {
85 if (instance_) { 91 if (instance_) {
86 // Address 0x420 is a special sentinel for the NewHandlerConcurrency test. 92 // Size 0xFEED a special sentinel for the NewHandlerConcurrency test.
87 // The first time (but only the first one) it is hit it fails, causing the 93 // Hitting it for the first time will always cause a failure, causing the
Primiano Tucci (use gerrit) 2017/01/28 05:10:02 I'd remove the "always", as it is confusing. The
erikchen 2017/01/31 02:21:22 Done.
88 // invocation of the std::new_handler. 94 // invocation of the std::new_handler.
89 if (address == reinterpret_cast<void*>(0x420)) { 95 if (size == 0xFEED) {
90 if (!instance_->did_fail_realloc_0x420_once->Get()) { 96 if (!instance_->did_fail_realloc_0xfeed_once->Get()) {
91 instance_->did_fail_realloc_0x420_once->Set(true); 97 instance_->did_fail_realloc_0xfeed_once->Set(true);
92 return nullptr; 98 return nullptr;
93 } else { 99 } else {
94 return reinterpret_cast<void*>(0x420ul); 100 return address;
95 } 101 }
96 } 102 }
97 103
98 if (size < kMaxSizeTracked) 104 if (size < kMaxSizeTracked)
99 ++(instance_->reallocs_intercepted_by_size[size]); 105 ++(instance_->reallocs_intercepted_by_size[size]);
100 ++instance_->reallocs_intercepted_by_addr[Hash(address)]; 106 ++instance_->reallocs_intercepted_by_addr[Hash(address)];
101 } 107 }
102 return self->next->realloc_function(self->next, address, size); 108 return self->next->realloc_function(self->next, address, size);
103 } 109 }
104 110
105 static void MockFree(const AllocatorDispatch* self, void* address) { 111 static void MockFree(const AllocatorDispatch* self, void* address) {
106 if (instance_) { 112 if (instance_) {
107 ++instance_->frees_intercepted_by_addr[Hash(address)]; 113 ++instance_->frees_intercepted_by_addr[Hash(address)];
108 } 114 }
109 self->next->free_function(self->next, address); 115 self->next->free_function(self->next, address);
110 } 116 }
111 117
118 static size_t MockGetSizeEstimate(const AllocatorDispatch* self,
119 void* address) {
120 return self->next->get_size_estimate_function(self->next, address);
121 }
122
123 static unsigned MockBatchMalloc(const AllocatorDispatch* self,
124 size_t size,
125 void** results,
126 unsigned num_requested) {
127 if (instance_) {
128 instance_->batch_mallocs_intercepted_by_size[size] =
129 instance_->batch_mallocs_intercepted_by_size[size] + num_requested;
130 }
131 return self->next->batch_malloc_function(self->next, size, results,
132 num_requested);
133 }
134
135 static void MockBatchFree(const AllocatorDispatch* self,
136 void** to_be_freed,
137 unsigned num_to_be_freed) {
138 if (instance_) {
139 for (unsigned i = 0; i < num_to_be_freed; ++i) {
140 ++instance_->batch_frees_intercepted_by_addr[Hash(to_be_freed[i])];
141 }
142 }
143 self->next->batch_free_function(self->next, to_be_freed, num_to_be_freed);
144 }
145
112 static void NewHandler() { 146 static void NewHandler() {
113 if (!instance_) 147 if (!instance_)
114 return; 148 return;
115 subtle::Barrier_AtomicIncrement(&instance_->num_new_handler_calls, 1); 149 subtle::Barrier_AtomicIncrement(&instance_->num_new_handler_calls, 1);
116 } 150 }
117 151
118 int32_t GetNumberOfNewHandlerCalls() { 152 int32_t GetNumberOfNewHandlerCalls() {
119 return subtle::Acquire_Load(&instance_->num_new_handler_calls); 153 return subtle::Acquire_Load(&instance_->num_new_handler_calls);
120 } 154 }
121 155
122 void SetUp() override { 156 void SetUp() override {
123 const size_t array_size = kMaxSizeTracked * sizeof(size_t); 157 const size_t array_size = kMaxSizeTracked * sizeof(size_t);
124 memset(&allocs_intercepted_by_size, 0, array_size); 158 memset(&allocs_intercepted_by_size, 0, array_size);
125 memset(&zero_allocs_intercepted_by_size, 0, array_size); 159 memset(&zero_allocs_intercepted_by_size, 0, array_size);
126 memset(&aligned_allocs_intercepted_by_size, 0, array_size); 160 memset(&aligned_allocs_intercepted_by_size, 0, array_size);
127 memset(&aligned_allocs_intercepted_by_alignment, 0, array_size); 161 memset(&aligned_allocs_intercepted_by_alignment, 0, array_size);
128 memset(&reallocs_intercepted_by_size, 0, array_size); 162 memset(&reallocs_intercepted_by_size, 0, array_size);
129 memset(&frees_intercepted_by_addr, 0, array_size); 163 memset(&frees_intercepted_by_addr, 0, array_size);
130 did_fail_realloc_0x420_once.reset(new ThreadLocalBoolean()); 164 memset(&batch_mallocs_intercepted_by_size, 0, array_size);
165 memset(&batch_frees_intercepted_by_addr, 0, array_size);
166 did_fail_realloc_0xfeed_once.reset(new ThreadLocalBoolean());
131 subtle::Release_Store(&num_new_handler_calls, 0); 167 subtle::Release_Store(&num_new_handler_calls, 0);
132 instance_ = this; 168 instance_ = this;
133 } 169 }
134 170
135 void TearDown() override { instance_ = nullptr; } 171 void TearDown() override { instance_ = nullptr; }
136 172
137 protected: 173 protected:
138 size_t allocs_intercepted_by_size[kMaxSizeTracked]; 174 size_t allocs_intercepted_by_size[kMaxSizeTracked];
139 size_t zero_allocs_intercepted_by_size[kMaxSizeTracked]; 175 size_t zero_allocs_intercepted_by_size[kMaxSizeTracked];
140 size_t aligned_allocs_intercepted_by_size[kMaxSizeTracked]; 176 size_t aligned_allocs_intercepted_by_size[kMaxSizeTracked];
141 size_t aligned_allocs_intercepted_by_alignment[kMaxSizeTracked]; 177 size_t aligned_allocs_intercepted_by_alignment[kMaxSizeTracked];
142 size_t reallocs_intercepted_by_size[kMaxSizeTracked]; 178 size_t reallocs_intercepted_by_size[kMaxSizeTracked];
143 size_t reallocs_intercepted_by_addr[kMaxSizeTracked]; 179 size_t reallocs_intercepted_by_addr[kMaxSizeTracked];
144 size_t frees_intercepted_by_addr[kMaxSizeTracked]; 180 size_t frees_intercepted_by_addr[kMaxSizeTracked];
145 std::unique_ptr<ThreadLocalBoolean> did_fail_realloc_0x420_once; 181 size_t batch_mallocs_intercepted_by_size[kMaxSizeTracked];
182 size_t batch_frees_intercepted_by_addr[kMaxSizeTracked];
183 std::unique_ptr<ThreadLocalBoolean> did_fail_realloc_0xfeed_once;
146 subtle::Atomic32 num_new_handler_calls; 184 subtle::Atomic32 num_new_handler_calls;
147 185
148 private: 186 private:
149 static AllocatorShimTest* instance_; 187 static AllocatorShimTest* instance_;
150 }; 188 };
151 189
152 struct TestStruct1 { 190 struct TestStruct1 {
153 uint32_t ignored; 191 uint32_t ignored;
154 uint8_t ignored_2; 192 uint8_t ignored_2;
155 }; 193 };
156 194
157 struct TestStruct2 { 195 struct TestStruct2 {
158 uint64_t ignored; 196 uint64_t ignored;
159 uint8_t ignored_3; 197 uint8_t ignored_3;
160 }; 198 };
161 199
162 class ThreadDelegateForNewHandlerTest : public PlatformThread::Delegate { 200 class ThreadDelegateForNewHandlerTest : public PlatformThread::Delegate {
163 public: 201 public:
164 ThreadDelegateForNewHandlerTest(WaitableEvent* event) : event_(event) {} 202 ThreadDelegateForNewHandlerTest(WaitableEvent* event) : event_(event) {}
165 203
166 void ThreadMain() override { 204 void ThreadMain() override {
167 event_->Wait(); 205 event_->Wait();
168 void* res = realloc(reinterpret_cast<void*>(0x420ul), 1); 206 void* temp = malloc(1);
169 EXPECT_EQ(reinterpret_cast<void*>(0x420ul), res); 207 void* res = realloc(temp, 0xFEED);
208 EXPECT_EQ(temp, res);
170 } 209 }
171 210
172 private: 211 private:
173 WaitableEvent* event_; 212 WaitableEvent* event_;
174 }; 213 };
175 214
176 AllocatorShimTest* AllocatorShimTest::instance_ = nullptr; 215 AllocatorShimTest* AllocatorShimTest::instance_ = nullptr;
177 216
178 AllocatorDispatch g_mock_dispatch = { 217 AllocatorDispatch g_mock_dispatch = {
179 &AllocatorShimTest::MockAlloc, /* alloc_function */ 218 &AllocatorShimTest::MockAlloc, /* alloc_function */
180 &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */ 219 &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */
181 &AllocatorShimTest::MockAllocAligned, /* alloc_aligned_function */ 220 &AllocatorShimTest::MockAllocAligned, /* alloc_aligned_function */
182 &AllocatorShimTest::MockRealloc, /* realloc_function */ 221 &AllocatorShimTest::MockRealloc, /* realloc_function */
183 &AllocatorShimTest::MockFree, /* free_function */ 222 &AllocatorShimTest::MockFree, /* free_function */
184 nullptr, /* next */ 223 &AllocatorShimTest::MockGetSizeEstimate, /* get_size_estimate_function */
224 &AllocatorShimTest::MockBatchMalloc, /* batch_malloc_function */
225 &AllocatorShimTest::MockBatchFree, /* batch_free_function */
226 nullptr, /* next */
185 }; 227 };
186 228
187 TEST_F(AllocatorShimTest, InterceptLibcSymbols) { 229 TEST_F(AllocatorShimTest, InterceptLibcSymbols) {
188 InsertAllocatorDispatch(&g_mock_dispatch); 230 InsertAllocatorDispatch(&g_mock_dispatch);
189 231
190 void* alloc_ptr = malloc(19); 232 void* alloc_ptr = malloc(19);
191 ASSERT_NE(nullptr, alloc_ptr); 233 ASSERT_NE(nullptr, alloc_ptr);
192 ASSERT_GE(allocs_intercepted_by_size[19], 1u); 234 ASSERT_GE(allocs_intercepted_by_size[19], 1u);
193 235
194 void* zero_alloc_ptr = calloc(2, 23); 236 void* zero_alloc_ptr = calloc(2, 23);
195 ASSERT_NE(nullptr, zero_alloc_ptr); 237 ASSERT_NE(nullptr, zero_alloc_ptr);
196 ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u); 238 ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u);
197 239
198 #if !defined(OS_WIN) 240 #if !defined(OS_WIN)
199 void* memalign_ptr = memalign(128, 53); 241 const size_t kPageSize = base::GetPageSize();
200 ASSERT_NE(nullptr, memalign_ptr);
201 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128);
202 ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
203 ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
204
205 void* posix_memalign_ptr = nullptr; 242 void* posix_memalign_ptr = nullptr;
206 int res = posix_memalign(&posix_memalign_ptr, 256, 59); 243 int res = posix_memalign(&posix_memalign_ptr, 256, 59);
207 ASSERT_EQ(0, res); 244 ASSERT_EQ(0, res);
208 ASSERT_NE(nullptr, posix_memalign_ptr); 245 ASSERT_NE(nullptr, posix_memalign_ptr);
209 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256); 246 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256);
210 ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u); 247 ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u);
211 ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u); 248 ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u);
212 249
213 void* valloc_ptr = valloc(61); 250 void* valloc_ptr = valloc(61);
214 ASSERT_NE(nullptr, valloc_ptr); 251 ASSERT_NE(nullptr, valloc_ptr);
215 const size_t kPageSize = base::GetPageSize();
216 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize); 252 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize);
217 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u); 253 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
218 ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u); 254 ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u);
255 #endif // !OS_WIN
256
257 #if !defined(OS_WIN) && !defined(OS_MACOSX)
258 void* memalign_ptr = memalign(128, 53);
259 ASSERT_NE(nullptr, memalign_ptr);
260 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128);
261 ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u);
262 ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u);
219 263
220 void* pvalloc_ptr = pvalloc(67); 264 void* pvalloc_ptr = pvalloc(67);
221 ASSERT_NE(nullptr, pvalloc_ptr); 265 ASSERT_NE(nullptr, pvalloc_ptr);
222 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize); 266 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize);
223 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u); 267 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u);
224 // pvalloc rounds the size up to the next page. 268 // pvalloc rounds the size up to the next page.
225 ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u); 269 ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u);
226 #endif // OS_WIN 270 #endif // !OS_WIN && !OS_MACOSX
227 271
228 char* realloc_ptr = static_cast<char*>(realloc(nullptr, 71)); 272 char* realloc_ptr = static_cast<char*>(malloc(10));
229 ASSERT_NE(nullptr, realloc_ptr);
230 ASSERT_GE(reallocs_intercepted_by_size[71], 1u);
231 ASSERT_GE(reallocs_intercepted_by_addr[Hash(nullptr)], 1u);
232 strcpy(realloc_ptr, "foobar"); 273 strcpy(realloc_ptr, "foobar");
233 void* old_realloc_ptr = realloc_ptr; 274 void* old_realloc_ptr = realloc_ptr;
234 realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73)); 275 realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73));
235 ASSERT_GE(reallocs_intercepted_by_size[73], 1u); 276 ASSERT_GE(reallocs_intercepted_by_size[73], 1u);
236 ASSERT_GE(reallocs_intercepted_by_addr[Hash(old_realloc_ptr)], 1u); 277 ASSERT_GE(reallocs_intercepted_by_addr[Hash(old_realloc_ptr)], 1u);
237 ASSERT_EQ(0, strcmp(realloc_ptr, "foobar")); 278 ASSERT_EQ(0, strcmp(realloc_ptr, "foobar"));
238 279
239 free(alloc_ptr); 280 free(alloc_ptr);
240 ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u); 281 ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u);
241 282
242 free(zero_alloc_ptr); 283 free(zero_alloc_ptr);
243 ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u); 284 ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u);
244 285
245 #if !defined(OS_WIN) 286 #if !defined(OS_WIN) && !defined(OS_MACOSX)
246 free(memalign_ptr); 287 free(memalign_ptr);
247 ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u); 288 ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u);
248 289
290 free(pvalloc_ptr);
291 ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
292 #endif // !OS_WIN && !OS_MACOSX
293
294 #if !defined(OS_WIN)
249 free(posix_memalign_ptr); 295 free(posix_memalign_ptr);
250 ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u); 296 ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u);
251 297
252 free(valloc_ptr); 298 free(valloc_ptr);
253 ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u); 299 ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u);
254 300 #endif // !OS_WIN
255 free(pvalloc_ptr);
256 ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u);
257 #endif // OS_WIN
258 301
259 free(realloc_ptr); 302 free(realloc_ptr);
260 ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u); 303 ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u);
261 304
262 RemoveAllocatorDispatchForTesting(&g_mock_dispatch); 305 RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
263 306
264 void* non_hooked_ptr = malloc(4095); 307 void* non_hooked_ptr = malloc(4095);
265 ASSERT_NE(nullptr, non_hooked_ptr); 308 ASSERT_NE(nullptr, non_hooked_ptr);
266 ASSERT_EQ(0u, allocs_intercepted_by_size[4095]); 309 ASSERT_EQ(0u, allocs_intercepted_by_size[4095]);
267 free(non_hooked_ptr); 310 free(non_hooked_ptr);
268 } 311 }
269 312
313 #if defined(OS_MACOSX)
314 TEST_F(AllocatorShimTest, InterceptLibcSymbolsBatchMallocFree) {
315 InsertAllocatorDispatch(&g_mock_dispatch);
316
317 unsigned count = 13;
318 std::vector<void*> results;
319 results.resize(count);
320 unsigned result_count = malloc_zone_batch_malloc(malloc_default_zone(), 99,
321 results.data(), count);
322 ASSERT_EQ(count, result_count);
323 ASSERT_EQ(count, batch_mallocs_intercepted_by_size[99]);
324
325 std::vector<void*> results_copy(results);
326 malloc_zone_batch_free(malloc_default_zone(), results.data(), count);
327 for (void* result : results_copy) {
328 ASSERT_GE(batch_frees_intercepted_by_addr[Hash(result)], 1u);
329 }
330 RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
331 }
332 #endif // defined(OS_MACOSX)
333
270 TEST_F(AllocatorShimTest, InterceptCppSymbols) { 334 TEST_F(AllocatorShimTest, InterceptCppSymbols) {
271 InsertAllocatorDispatch(&g_mock_dispatch); 335 InsertAllocatorDispatch(&g_mock_dispatch);
272 336
273 TestStruct1* new_ptr = new TestStruct1; 337 TestStruct1* new_ptr = new TestStruct1;
274 ASSERT_NE(nullptr, new_ptr); 338 ASSERT_NE(nullptr, new_ptr);
275 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u); 339 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u);
276 340
277 TestStruct1* new_array_ptr = new TestStruct1[3]; 341 TestStruct1* new_array_ptr = new TestStruct1[3];
278 ASSERT_NE(nullptr, new_array_ptr); 342 ASSERT_NE(nullptr, new_array_ptr);
279 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u); 343 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u);
(...skipping 17 matching lines...) Expand all
297 361
298 delete[] new_array_nt_ptr; 362 delete[] new_array_nt_ptr;
299 ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u); 363 ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u);
300 364
301 RemoveAllocatorDispatchForTesting(&g_mock_dispatch); 365 RemoveAllocatorDispatchForTesting(&g_mock_dispatch);
302 } 366 }
303 367
304 // This test exercises the case of concurrent OOM failure, which would end up 368 // This test exercises the case of concurrent OOM failure, which would end up
305 // invoking std::new_handler concurrently. This is to cover the CallNewHandler() 369 // invoking std::new_handler concurrently. This is to cover the CallNewHandler()
306 // paths of allocator_shim.cc and smoke-test its thread safey. 370 // paths of allocator_shim.cc and smoke-test its thread safey.
307 // The test creates kNumThreads threads. Each of them does just a 371 // The test creates kNumThreads threads. Each of them mallocs some memory, and
308 // realloc(0x420). 372 // then does a realloc(<new memory>, 0xDEAD).
309 // The shim intercepts such realloc and makes it fail only once on each thread. 373 // The shim intercepts such realloc and makes it fail only once on each thread.
310 // We expect to see excactly kNumThreads invocations of the new_handler. 374 // We expect to see excactly kNumThreads invocations of the new_handler.
311 TEST_F(AllocatorShimTest, NewHandlerConcurrency) { 375 TEST_F(AllocatorShimTest, NewHandlerConcurrency) {
312 const int kNumThreads = 32; 376 const int kNumThreads = 32;
313 PlatformThreadHandle threads[kNumThreads]; 377 PlatformThreadHandle threads[kNumThreads];
314 378
315 // The WaitableEvent here is used to attempt to trigger all the threads at 379 // The WaitableEvent here is used to attempt to trigger all the threads at
316 // the same time, after they have been initialized. 380 // the same time, after they have been initialized.
317 WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL, 381 WaitableEvent event(WaitableEvent::ResetPolicy::MANUAL,
318 WaitableEvent::InitialState::NOT_SIGNALED); 382 WaitableEvent::InitialState::NOT_SIGNALED);
(...skipping 15 matching lines...) Expand all
334 398
335 #if defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 399 #if defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
336 TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) { 400 TEST_F(AllocatorShimTest, ShimReplacesCRTHeapWhenEnabled) {
337 ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle())); 401 ASSERT_NE(::GetProcessHeap(), reinterpret_cast<HANDLE>(_get_heap_handle()));
338 } 402 }
339 #endif // defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM) 403 #endif // defined(OS_WIN) && BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
340 404
341 } // namespace 405 } // namespace
342 } // namespace allocator 406 } // namespace allocator
343 } // namespace base 407 } // namespace base
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698