Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/allocator/allocator_shim.h" | |
| 6 | |
| 7 #include <malloc.h> | |
| 8 #include <stdlib.h> | |
| 9 #include <string.h> | |
| 10 #include <unistd.h> | |
| 11 | |
| 12 #include <new> | |
| 13 #include <vector> | |
| 14 | |
| 15 #include "base/atomicops.h" | |
| 16 #include "base/memory/scoped_ptr.h" | |
| 17 #include "base/synchronization/waitable_event.h" | |
| 18 #include "base/threading/platform_thread.h" | |
| 19 #include "base/threading/thread_local.h" | |
| 20 #include "testing/gmock/include/gmock/gmock.h" | |
| 21 #include "testing/gtest/include/gtest/gtest.h" | |
| 22 | |
| 23 namespace base { | |
| 24 namespace allocator { | |
| 25 namespace { | |
| 26 | |
| 27 using testing::MockFunction; | |
| 28 using testing::_; | |
| 29 | |
| 30 class AllocatorShimTest : public testing::Test { | |
| 31 public: | |
| 32 static const size_t kMaxSizeTracked = 8192; | |
| 33 AllocatorShimTest() : testing::Test() {} | |
| 34 | |
| 35 static size_t Hash(const void* ptr) { | |
| 36 return reinterpret_cast<uintptr_t>(ptr) % kMaxSizeTracked; | |
| 37 } | |
| 38 | |
| 39 static void* MockAlloc(size_t size, const AllocatorDispatch* self) { | |
| 40 if (instance_ && size < kMaxSizeTracked) | |
| 41 ++(instance_->allocs_intercepted_by_size[size]); | |
| 42 return self->next->alloc_function(size, self->next); | |
| 43 } | |
| 44 | |
| 45 static void* MockAllocZeroInit(size_t n, | |
| 46 size_t size, | |
| 47 const AllocatorDispatch* self) { | |
| 48 const size_t real_size = n * size; | |
| 49 if (instance_ && real_size < kMaxSizeTracked) | |
| 50 ++(instance_->zero_allocs_intercepted_by_size[real_size]); | |
| 51 return self->next->alloc_zero_initialized_function(n, size, self->next); | |
| 52 } | |
| 53 | |
| 54 static void* MockAllocAligned(size_t alignment, | |
| 55 size_t size, | |
| 56 const AllocatorDispatch* self) { | |
| 57 if (instance_) { | |
| 58 if (size < kMaxSizeTracked) | |
| 59 ++(instance_->aligned_allocs_intercepted_by_size[size]); | |
| 60 if (alignment < kMaxSizeTracked) | |
| 61 ++(instance_->aligned_allocs_intercepted_by_alignment[alignment]); | |
| 62 } | |
| 63 return self->next->alloc_aligned_function(alignment, size, self->next); | |
| 64 } | |
| 65 | |
| 66 static void* MockRealloc(void* address, | |
| 67 size_t size, | |
| 68 const AllocatorDispatch* self) { | |
| 69 if (instance_) { | |
| 70 // Address 0x42 is a special sentinel for the NewHandlerConcurrency test. | |
| 71 // The first time (but only the first one) it is hit it fails, causing the | |
| 72 // invocation of the std::new_handler. | |
| 73 if (address == reinterpret_cast<void*>(0x42)) { | |
| 74 if (!instance_->did_fail_realloc_0x42_once->Get()) { | |
| 75 instance_->did_fail_realloc_0x42_once->Set(true); | |
| 76 return nullptr; | |
| 77 } else { | |
| 78 return reinterpret_cast<void*>(0x42ul); | |
| 79 } | |
| 80 } | |
| 81 | |
| 82 if (size < kMaxSizeTracked) | |
| 83 ++(instance_->reallocs_intercepted_by_size[size]); | |
| 84 ++instance_->reallocs_intercepted_by_addr[Hash(address)]; | |
| 85 } | |
| 86 return self->next->realloc_function(address, size, self->next); | |
| 87 } | |
| 88 | |
| 89 static void MockFree(void* address, const AllocatorDispatch* self) { | |
| 90 if (instance_) { | |
| 91 ++instance_->frees_intercepted_by_addr[Hash(address)]; | |
| 92 } | |
| 93 self->next->free_function(address, self->next); | |
| 94 } | |
| 95 | |
| 96 static void NewHandler() { | |
| 97 if (!instance_) | |
| 98 return; | |
| 99 subtle::Barrier_AtomicIncrement(&instance_->num_new_handler_calls, 1); | |
| 100 } | |
| 101 | |
| 102 int32_t GetNumberOfNewHandlerCalls() { | |
| 103 return subtle::Acquire_Load(&instance_->num_new_handler_calls); | |
| 104 } | |
| 105 | |
| 106 void SetUp() override { | |
| 107 const size_t array_size = kMaxSizeTracked * sizeof(size_t); | |
| 108 memset(&allocs_intercepted_by_size, 0, array_size); | |
| 109 memset(&zero_allocs_intercepted_by_size, 0, array_size); | |
| 110 memset(&aligned_allocs_intercepted_by_size, 0, array_size); | |
| 111 memset(&aligned_allocs_intercepted_by_alignment, 0, array_size); | |
| 112 memset(&reallocs_intercepted_by_size, 0, array_size); | |
| 113 memset(&frees_intercepted_by_addr, 0, array_size); | |
| 114 did_fail_realloc_0x42_once.reset(new ThreadLocalBoolean()); | |
| 115 subtle::Release_Store(&num_new_handler_calls, 0); | |
| 116 instance_ = this; | |
| 117 } | |
| 118 | |
| 119 void TearDown() override { instance_ = nullptr; } | |
| 120 | |
| 121 protected: | |
| 122 size_t allocs_intercepted_by_size[kMaxSizeTracked]; | |
| 123 size_t zero_allocs_intercepted_by_size[kMaxSizeTracked]; | |
| 124 size_t aligned_allocs_intercepted_by_size[kMaxSizeTracked]; | |
| 125 size_t aligned_allocs_intercepted_by_alignment[kMaxSizeTracked]; | |
| 126 size_t reallocs_intercepted_by_size[kMaxSizeTracked]; | |
| 127 size_t reallocs_intercepted_by_addr[kMaxSizeTracked]; | |
| 128 size_t frees_intercepted_by_addr[kMaxSizeTracked]; | |
| 129 scoped_ptr<ThreadLocalBoolean> did_fail_realloc_0x42_once; | |
| 130 subtle::Atomic32 num_new_handler_calls; | |
| 131 | |
| 132 private: | |
| 133 static AllocatorShimTest* instance_; | |
| 134 }; | |
| 135 | |
| 136 struct TestStruct1 { | |
| 137 uint32_t ignored; | |
| 138 uint8_t ignored_2; | |
| 139 }; | |
| 140 | |
| 141 struct TestStruct2 { | |
| 142 uint64_t ignored; | |
| 143 uint8_t ignored_3; | |
| 144 }; | |
| 145 | |
| 146 class ThreadDelegateForNewHandlerTest : public PlatformThread::Delegate { | |
| 147 public: | |
| 148 ThreadDelegateForNewHandlerTest(WaitableEvent* event) : event_(event) {} | |
| 149 | |
| 150 void ThreadMain() override { | |
| 151 event_->Wait(); | |
| 152 void* res = realloc(reinterpret_cast<void*>(0x42ul), 1); | |
| 153 EXPECT_EQ(0x42u, reinterpret_cast<uintptr_t>(res)); | |
| 154 } | |
| 155 | |
| 156 private: | |
| 157 WaitableEvent* event_; | |
| 158 }; | |
| 159 | |
| 160 AllocatorShimTest* AllocatorShimTest::instance_ = nullptr; | |
| 161 | |
| 162 AllocatorDispatch g_mock_dispatch = { | |
| 163 &AllocatorShimTest::MockAlloc, /* alloc_function */ | |
| 164 &AllocatorShimTest::MockAllocZeroInit, /* alloc_zero_initialized_function */ | |
| 165 &AllocatorShimTest::MockAllocAligned, /* alloc_aligned_function */ | |
| 166 &AllocatorShimTest::MockRealloc, /* realloc_function */ | |
| 167 &AllocatorShimTest::MockFree, /* free_function */ | |
| 168 nullptr, /* next */ | |
| 169 }; | |
| 170 | |
| 171 TEST_F(AllocatorShimTest, InterceptLibcSymbols) { | |
| 172 const size_t kPageSize = sysconf(_SC_PAGESIZE); | |
| 173 InsertAllocatorDispatch(&g_mock_dispatch); | |
| 174 | |
| 175 void* alloc_ptr = malloc(19); | |
| 176 ASSERT_NE(nullptr, alloc_ptr); | |
| 177 ASSERT_GE(allocs_intercepted_by_size[19], 1u); | |
| 178 | |
| 179 void* zero_alloc_ptr = calloc(2, 23); | |
| 180 ASSERT_NE(nullptr, zero_alloc_ptr); | |
| 181 ASSERT_GE(zero_allocs_intercepted_by_size[2 * 23], 1u); | |
| 182 | |
| 183 void* memalign_ptr = memalign(128, 53); | |
| 184 ASSERT_NE(nullptr, memalign_ptr); | |
| 185 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(memalign_ptr) % 128); | |
| 186 ASSERT_GE(aligned_allocs_intercepted_by_alignment[128], 1u); | |
| 187 ASSERT_GE(aligned_allocs_intercepted_by_size[53], 1u); | |
| 188 | |
| 189 void* posix_memalign_ptr = nullptr; | |
| 190 int res = posix_memalign(&posix_memalign_ptr, 256, 59); | |
| 191 ASSERT_EQ(0, res); | |
| 192 ASSERT_NE(nullptr, posix_memalign_ptr); | |
| 193 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(posix_memalign_ptr) % 256); | |
| 194 ASSERT_GE(aligned_allocs_intercepted_by_alignment[256], 1u); | |
| 195 ASSERT_GE(aligned_allocs_intercepted_by_size[59], 1u); | |
| 196 | |
| 197 void* valloc_ptr = valloc(61); | |
| 198 ASSERT_NE(nullptr, valloc_ptr); | |
| 199 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(valloc_ptr) % kPageSize); | |
| 200 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u); | |
| 201 ASSERT_GE(aligned_allocs_intercepted_by_size[61], 1u); | |
| 202 | |
| 203 void* pvalloc_ptr = pvalloc(67); | |
| 204 ASSERT_NE(nullptr, pvalloc_ptr); | |
| 205 ASSERT_EQ(0u, reinterpret_cast<uintptr_t>(pvalloc_ptr) % kPageSize); | |
| 206 ASSERT_GE(aligned_allocs_intercepted_by_alignment[kPageSize], 1u); | |
| 207 // pvalloc rounds the size up to the next page. | |
| 208 ASSERT_GE(aligned_allocs_intercepted_by_size[kPageSize], 1u); | |
| 209 | |
| 210 char* realloc_ptr = static_cast<char*>(realloc(nullptr, 71)); | |
| 211 ASSERT_NE(nullptr, realloc_ptr); | |
| 212 ASSERT_GE(reallocs_intercepted_by_size[71], 1u); | |
| 213 ASSERT_GE(reallocs_intercepted_by_addr[Hash(nullptr)], 1u); | |
| 214 strcpy(realloc_ptr, "foobar"); | |
| 215 realloc_ptr = static_cast<char*>(realloc(realloc_ptr, 73)); | |
| 216 ASSERT_GE(reallocs_intercepted_by_size[73], 1u); | |
| 217 ASSERT_GE(reallocs_intercepted_by_addr[Hash(realloc_ptr)], 1u); | |
| 218 ASSERT_EQ(0, strcmp(realloc_ptr, "foobar")); | |
| 219 | |
| 220 free(alloc_ptr); | |
| 221 ASSERT_GE(frees_intercepted_by_addr[Hash(alloc_ptr)], 1u); | |
| 222 | |
| 223 free(zero_alloc_ptr); | |
| 224 ASSERT_GE(frees_intercepted_by_addr[Hash(zero_alloc_ptr)], 1u); | |
| 225 | |
| 226 free(memalign_ptr); | |
| 227 ASSERT_GE(frees_intercepted_by_addr[Hash(memalign_ptr)], 1u); | |
| 228 | |
| 229 free(posix_memalign_ptr); | |
| 230 ASSERT_GE(frees_intercepted_by_addr[Hash(posix_memalign_ptr)], 1u); | |
| 231 | |
| 232 free(valloc_ptr); | |
| 233 ASSERT_GE(frees_intercepted_by_addr[Hash(valloc_ptr)], 1u); | |
| 234 | |
| 235 free(pvalloc_ptr); | |
| 236 ASSERT_GE(frees_intercepted_by_addr[Hash(pvalloc_ptr)], 1u); | |
| 237 | |
| 238 free(realloc_ptr); | |
| 239 ASSERT_GE(frees_intercepted_by_addr[Hash(realloc_ptr)], 1u); | |
| 240 | |
| 241 RemoveAllocatorDispatch(&g_mock_dispatch); | |
| 242 | |
| 243 void* non_hooked_ptr = malloc(4095); | |
| 244 ASSERT_NE(nullptr, non_hooked_ptr); | |
| 245 ASSERT_EQ(0u, allocs_intercepted_by_size[4095]); | |
| 246 free(non_hooked_ptr); | |
| 247 } | |
| 248 | |
| 249 TEST_F(AllocatorShimTest, InterceptCppSymbols) { | |
| 250 InsertAllocatorDispatch(&g_mock_dispatch); | |
| 251 | |
| 252 TestStruct1* new_ptr = new TestStruct1; | |
| 253 ASSERT_NE(nullptr, new_ptr); | |
| 254 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1)], 1u); | |
| 255 | |
| 256 TestStruct1* new_array_ptr = new TestStruct1[3]; | |
| 257 ASSERT_NE(nullptr, new_array_ptr); | |
| 258 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct1) * 3], 1u); | |
| 259 | |
| 260 TestStruct2* new_nt_ptr = new (std::nothrow) TestStruct2; | |
| 261 ASSERT_NE(nullptr, new_nt_ptr); | |
| 262 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2)], 1u); | |
| 263 | |
| 264 TestStruct2* new_array_nt_ptr = new TestStruct2[3]; | |
| 265 ASSERT_NE(nullptr, new_array_nt_ptr); | |
| 266 ASSERT_GE(allocs_intercepted_by_size[sizeof(TestStruct2) * 3], 1u); | |
| 267 | |
| 268 delete new_ptr; | |
| 269 ASSERT_GE(frees_intercepted_by_addr[Hash(new_ptr)], 1u); | |
| 270 | |
| 271 delete[] new_array_ptr; | |
| 272 ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_ptr)], 1u); | |
| 273 | |
| 274 delete new_nt_ptr; | |
| 275 ASSERT_GE(frees_intercepted_by_addr[Hash(new_nt_ptr)], 1u); | |
| 276 | |
| 277 delete[] new_array_nt_ptr; | |
| 278 ASSERT_GE(frees_intercepted_by_addr[Hash(new_array_nt_ptr)], 1u); | |
| 279 | |
| 280 RemoveAllocatorDispatch(&g_mock_dispatch); | |
| 281 } | |
| 282 | |
| 283 // This test exercises the case of concurrent OOM failure, which would end up | |
| 284 // invoking std::new_handler concurrently. This is to cover the CallNewHandler() | |
| 285 // paths of allocator_shim.cc and smoke-test its thread safey. | |
|
Nico
2016/03/08 03:08:42
nice!
Primiano Tucci (use gerrit)
2016/03/08 20:54:05
;-)
| |
| 286 // The test creates kNumThreads threads. Each of them does just a realloc(0x42). | |
| 287 // The shim intercepts such realloc and makes it fail only once on each thread. | |
| 288 // We expect to see excactly kNumThreads invocations of the new_handler. | |
| 289 TEST_F(AllocatorShimTest, NewHandlerConcurrency) { | |
| 290 const int kNumThreads = 32; | |
| 291 PlatformThreadHandle threads[kNumThreads]; | |
| 292 | |
| 293 // The WaitableEvent here is used to attempt to trigger all the threads at | |
| 294 // the same time, after they have been initialized. | |
| 295 WaitableEvent event(true /* manual_reset */, false /* initially_signaled */); | |
|
Nico
2016/03/08 03:08:42
nit: use
/*manual_reset=*/true, /*initialliy_si
Primiano Tucci (use gerrit)
2016/03/08 20:54:05
Ahhh. Suddenly all that code that previously looke
| |
| 296 | |
| 297 ThreadDelegateForNewHandlerTest mock_thread_main(&event); | |
| 298 | |
| 299 for (int i = 0; i < kNumThreads; ++i) | |
| 300 PlatformThread::Create(0, &mock_thread_main, &threads[i]); | |
| 301 | |
| 302 std::set_new_handler(&AllocatorShimTest::NewHandler); | |
| 303 SetCallNewHandlerOnMallocFailure(true); // It's going to fail on realloc(). | |
| 304 InsertAllocatorDispatch(&g_mock_dispatch); | |
| 305 event.Signal(); | |
| 306 for (int i = 0; i < kNumThreads; ++i) | |
| 307 PlatformThread::Join(threads[i]); | |
| 308 RemoveAllocatorDispatch(&g_mock_dispatch); | |
| 309 ASSERT_EQ(kNumThreads, GetNumberOfNewHandlerCalls()); | |
| 310 } | |
| 311 | |
| 312 } // namespace | |
| 313 } // namespace allocator | |
| 314 } // namespace base | |
| OLD | NEW |