| OLD | NEW |
| (Empty) |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include <stdio.h> | |
| 6 #include <stdlib.h> | |
| 7 #include <algorithm> // for min() | |
| 8 #include "base/atomicops.h" | |
| 9 #include "testing/gtest/include/gtest/gtest.h" | |
| 10 | |
| 11 // Number of bits in a size_t. | |
| 12 static const int kSizeBits = 8 * sizeof(size_t); | |
| 13 // The maximum size of a size_t. | |
| 14 static const size_t kMaxSize = ~static_cast<size_t>(0); | |
| 15 // Maximum positive size of a size_t if it were signed. | |
| 16 static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1); | |
| 17 // An allocation size which is not too big to be reasonable. | |
| 18 static const size_t kNotTooBig = 100000; | |
| 19 // An allocation size which is just too big. | |
| 20 static const size_t kTooBig = ~static_cast<size_t>(0); | |
| 21 | |
| 22 namespace { | |
| 23 | |
| 24 using std::min; | |
| 25 | |
| 26 // Fill a buffer of the specified size with a predetermined pattern | |
| 27 static void Fill(unsigned char* buffer, int n) { | |
| 28 for (int i = 0; i < n; i++) { | |
| 29 buffer[i] = (i & 0xff); | |
| 30 } | |
| 31 } | |
| 32 | |
| 33 // Check that the specified buffer has the predetermined pattern | |
| 34 // generated by Fill() | |
| 35 static bool Valid(unsigned char* buffer, int n) { | |
| 36 for (int i = 0; i < n; i++) { | |
| 37 if (buffer[i] != (i & 0xff)) { | |
| 38 return false; | |
| 39 } | |
| 40 } | |
| 41 return true; | |
| 42 } | |
| 43 | |
| 44 // Check that a buffer is completely zeroed. | |
| 45 static bool IsZeroed(unsigned char* buffer, int n) { | |
| 46 for (int i = 0; i < n; i++) { | |
| 47 if (buffer[i] != 0) { | |
| 48 return false; | |
| 49 } | |
| 50 } | |
| 51 return true; | |
| 52 } | |
| 53 | |
| 54 // Check alignment | |
| 55 static void CheckAlignment(void* p, int align) { | |
| 56 EXPECT_EQ(0, reinterpret_cast<uintptr_t>(p) & (align-1)); | |
| 57 } | |
| 58 | |
| 59 // Return the next interesting size/delta to check. Returns -1 if no more. | |
| 60 static int NextSize(int size) { | |
| 61 if (size < 100) | |
| 62 return size+1; | |
| 63 | |
| 64 if (size < 100000) { | |
| 65 // Find next power of two | |
| 66 int power = 1; | |
| 67 while (power < size) | |
| 68 power <<= 1; | |
| 69 | |
| 70 // Yield (power-1, power, power+1) | |
| 71 if (size < power-1) | |
| 72 return power-1; | |
| 73 | |
| 74 if (size == power-1) | |
| 75 return power; | |
| 76 | |
| 77 assert(size == power); | |
| 78 return power+1; | |
| 79 } else { | |
| 80 return -1; | |
| 81 } | |
| 82 } | |
| 83 | |
| 84 #define GG_ULONGLONG(x) static_cast<uint64>(x) | |
| 85 | |
| 86 template <class AtomicType> | |
| 87 static void TestAtomicIncrement() { | |
| 88 // For now, we just test single threaded execution | |
| 89 | |
| 90 // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go | |
| 91 // outside the expected address bounds. This is in particular to | |
| 92 // test that some future change to the asm code doesn't cause the | |
| 93 // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines. | |
| 94 struct { | |
| 95 AtomicType prev_word; | |
| 96 AtomicType count; | |
| 97 AtomicType next_word; | |
| 98 } s; | |
| 99 | |
| 100 AtomicType prev_word_value, next_word_value; | |
| 101 memset(&prev_word_value, 0xFF, sizeof(AtomicType)); | |
| 102 memset(&next_word_value, 0xEE, sizeof(AtomicType)); | |
| 103 | |
| 104 s.prev_word = prev_word_value; | |
| 105 s.count = 0; | |
| 106 s.next_word = next_word_value; | |
| 107 | |
| 108 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1); | |
| 109 EXPECT_EQ(s.count, 1); | |
| 110 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 111 EXPECT_EQ(s.next_word, next_word_value); | |
| 112 | |
| 113 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3); | |
| 114 EXPECT_EQ(s.count, 3); | |
| 115 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 116 EXPECT_EQ(s.next_word, next_word_value); | |
| 117 | |
| 118 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6); | |
| 119 EXPECT_EQ(s.count, 6); | |
| 120 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 121 EXPECT_EQ(s.next_word, next_word_value); | |
| 122 | |
| 123 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3); | |
| 124 EXPECT_EQ(s.count, 3); | |
| 125 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 126 EXPECT_EQ(s.next_word, next_word_value); | |
| 127 | |
| 128 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1); | |
| 129 EXPECT_EQ(s.count, 1); | |
| 130 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 131 EXPECT_EQ(s.next_word, next_word_value); | |
| 132 | |
| 133 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0); | |
| 134 EXPECT_EQ(s.count, 0); | |
| 135 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 136 EXPECT_EQ(s.next_word, next_word_value); | |
| 137 | |
| 138 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1); | |
| 139 EXPECT_EQ(s.count, -1); | |
| 140 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 141 EXPECT_EQ(s.next_word, next_word_value); | |
| 142 | |
| 143 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5); | |
| 144 EXPECT_EQ(s.count, -5); | |
| 145 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 146 EXPECT_EQ(s.next_word, next_word_value); | |
| 147 | |
| 148 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0); | |
| 149 EXPECT_EQ(s.count, 0); | |
| 150 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 151 EXPECT_EQ(s.next_word, next_word_value); | |
| 152 } | |
| 153 | |
| 154 | |
| 155 #define NUM_BITS(T) (sizeof(T) * 8) | |
| 156 | |
| 157 | |
| 158 template <class AtomicType> | |
| 159 static void TestCompareAndSwap() { | |
| 160 AtomicType value = 0; | |
| 161 AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1); | |
| 162 EXPECT_EQ(1, value); | |
| 163 EXPECT_EQ(0, prev); | |
| 164 | |
| 165 // Use test value that has non-zero bits in both halves, more for testing | |
| 166 // 64-bit implementation on 32-bit platforms. | |
| 167 const AtomicType k_test_val = (GG_ULONGLONG(1) << | |
| 168 (NUM_BITS(AtomicType) - 2)) + 11; | |
| 169 value = k_test_val; | |
| 170 prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5); | |
| 171 EXPECT_EQ(k_test_val, value); | |
| 172 EXPECT_EQ(k_test_val, prev); | |
| 173 | |
| 174 value = k_test_val; | |
| 175 prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5); | |
| 176 EXPECT_EQ(5, value); | |
| 177 EXPECT_EQ(k_test_val, prev); | |
| 178 } | |
| 179 | |
| 180 | |
| 181 template <class AtomicType> | |
| 182 static void TestAtomicExchange() { | |
| 183 AtomicType value = 0; | |
| 184 AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1); | |
| 185 EXPECT_EQ(1, value); | |
| 186 EXPECT_EQ(0, new_value); | |
| 187 | |
| 188 // Use test value that has non-zero bits in both halves, more for testing | |
| 189 // 64-bit implementation on 32-bit platforms. | |
| 190 const AtomicType k_test_val = (GG_ULONGLONG(1) << | |
| 191 (NUM_BITS(AtomicType) - 2)) + 11; | |
| 192 value = k_test_val; | |
| 193 new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val); | |
| 194 EXPECT_EQ(k_test_val, value); | |
| 195 EXPECT_EQ(k_test_val, new_value); | |
| 196 | |
| 197 value = k_test_val; | |
| 198 new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5); | |
| 199 EXPECT_EQ(5, value); | |
| 200 EXPECT_EQ(k_test_val, new_value); | |
| 201 } | |
| 202 | |
| 203 | |
| 204 template <class AtomicType> | |
| 205 static void TestAtomicIncrementBounds() { | |
| 206 // Test increment at the half-width boundary of the atomic type. | |
| 207 // It is primarily for testing at the 32-bit boundary for 64-bit atomic type. | |
| 208 AtomicType test_val = GG_ULONGLONG(1) << (NUM_BITS(AtomicType) / 2); | |
| 209 AtomicType value = test_val - 1; | |
| 210 AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1); | |
| 211 EXPECT_EQ(test_val, value); | |
| 212 EXPECT_EQ(value, new_value); | |
| 213 | |
| 214 base::subtle::NoBarrier_AtomicIncrement(&value, -1); | |
| 215 EXPECT_EQ(test_val - 1, value); | |
| 216 } | |
| 217 | |
| 218 // This is a simple sanity check that values are correct. Not testing | |
| 219 // atomicity | |
| 220 template <class AtomicType> | |
| 221 static void TestStore() { | |
| 222 const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL); | |
| 223 const AtomicType kVal2 = static_cast<AtomicType>(-1); | |
| 224 | |
| 225 AtomicType value; | |
| 226 | |
| 227 base::subtle::NoBarrier_Store(&value, kVal1); | |
| 228 EXPECT_EQ(kVal1, value); | |
| 229 base::subtle::NoBarrier_Store(&value, kVal2); | |
| 230 EXPECT_EQ(kVal2, value); | |
| 231 | |
| 232 base::subtle::Acquire_Store(&value, kVal1); | |
| 233 EXPECT_EQ(kVal1, value); | |
| 234 base::subtle::Acquire_Store(&value, kVal2); | |
| 235 EXPECT_EQ(kVal2, value); | |
| 236 | |
| 237 base::subtle::Release_Store(&value, kVal1); | |
| 238 EXPECT_EQ(kVal1, value); | |
| 239 base::subtle::Release_Store(&value, kVal2); | |
| 240 EXPECT_EQ(kVal2, value); | |
| 241 } | |
| 242 | |
| 243 // This is a simple sanity check that values are correct. Not testing | |
| 244 // atomicity | |
| 245 template <class AtomicType> | |
| 246 static void TestLoad() { | |
| 247 const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL); | |
| 248 const AtomicType kVal2 = static_cast<AtomicType>(-1); | |
| 249 | |
| 250 AtomicType value; | |
| 251 | |
| 252 value = kVal1; | |
| 253 EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value)); | |
| 254 value = kVal2; | |
| 255 EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value)); | |
| 256 | |
| 257 value = kVal1; | |
| 258 EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value)); | |
| 259 value = kVal2; | |
| 260 EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value)); | |
| 261 | |
| 262 value = kVal1; | |
| 263 EXPECT_EQ(kVal1, base::subtle::Release_Load(&value)); | |
| 264 value = kVal2; | |
| 265 EXPECT_EQ(kVal2, base::subtle::Release_Load(&value)); | |
| 266 } | |
| 267 | |
| 268 template <class AtomicType> | |
| 269 static void TestAtomicOps() { | |
| 270 TestCompareAndSwap<AtomicType>(); | |
| 271 TestAtomicExchange<AtomicType>(); | |
| 272 TestAtomicIncrementBounds<AtomicType>(); | |
| 273 TestStore<AtomicType>(); | |
| 274 TestLoad<AtomicType>(); | |
| 275 } | |
| 276 | |
| 277 static void TestCalloc(size_t n, size_t s, bool ok) { | |
| 278 char* p = reinterpret_cast<char*>(calloc(n, s)); | |
| 279 if (!ok) { | |
| 280 EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed"; | |
| 281 } else { | |
| 282 EXPECT_NE(reinterpret_cast<void*>(NULL), p) << | |
| 283 "calloc(n, s) should succeed"; | |
| 284 for (int i = 0; i < n*s; i++) { | |
| 285 EXPECT_EQ('\0', p[i]); | |
| 286 } | |
| 287 free(p); | |
| 288 } | |
| 289 } | |
| 290 | |
| 291 | |
| 292 // A global test counter for number of times the NewHandler is called. | |
| 293 static int news_handled = 0; | |
| 294 static void TestNewHandler() { | |
| 295 ++news_handled; | |
| 296 throw std::bad_alloc(); | |
| 297 } | |
| 298 | |
| 299 // Because we compile without exceptions, we expect these will not throw. | |
| 300 static void TestOneNewWithoutExceptions(void* (*func)(size_t), | |
| 301 bool should_throw) { | |
| 302 // success test | |
| 303 try { | |
| 304 void* ptr = (*func)(kNotTooBig); | |
| 305 EXPECT_NE(reinterpret_cast<void*>(NULL), ptr) << | |
| 306 "allocation should not have failed."; | |
| 307 } catch(...) { | |
| 308 EXPECT_EQ(0, 1) << "allocation threw unexpected exception."; | |
| 309 } | |
| 310 | |
| 311 // failure test | |
| 312 try { | |
| 313 void* rv = (*func)(kTooBig); | |
| 314 EXPECT_EQ(NULL, rv); | |
| 315 EXPECT_FALSE(should_throw) << "allocation should have thrown."; | |
| 316 } catch(...) { | |
| 317 EXPECT_TRUE(should_throw) << "allocation threw unexpected exception."; | |
| 318 } | |
| 319 } | |
| 320 | |
| 321 static void TestNothrowNew(void* (*func)(size_t)) { | |
| 322 news_handled = 0; | |
| 323 | |
| 324 // test without new_handler: | |
| 325 std::new_handler saved_handler = std::set_new_handler(0); | |
| 326 TestOneNewWithoutExceptions(func, false); | |
| 327 | |
| 328 // test with new_handler: | |
| 329 std::set_new_handler(TestNewHandler); | |
| 330 TestOneNewWithoutExceptions(func, true); | |
| 331 EXPECT_EQ(news_handled, 1) << "nothrow new_handler was not called."; | |
| 332 std::set_new_handler(saved_handler); | |
| 333 } | |
| 334 | |
| 335 } // namespace | |
| 336 | |
| 337 //----------------------------------------------------------------------------- | |
| 338 | |
| 339 TEST(Atomics, AtomicIncrementWord) { | |
| 340 TestAtomicIncrement<AtomicWord>(); | |
| 341 } | |
| 342 | |
| 343 TEST(Atomics, AtomicIncrement32) { | |
| 344 TestAtomicIncrement<Atomic32>(); | |
| 345 } | |
| 346 | |
| 347 TEST(Atomics, AtomicOpsWord) { | |
| 348 TestAtomicIncrement<AtomicWord>(); | |
| 349 } | |
| 350 | |
| 351 TEST(Atomics, AtomicOps32) { | |
| 352 TestAtomicIncrement<Atomic32>(); | |
| 353 } | |
| 354 | |
| 355 TEST(Allocators, Malloc) { | |
| 356 // Try allocating data with a bunch of alignments and sizes | |
| 357 for (int size = 1; size < 1048576; size *= 2) { | |
| 358 unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size)); | |
| 359 CheckAlignment(ptr, 2); // Should be 2 byte aligned | |
| 360 Fill(ptr, size); | |
| 361 EXPECT_TRUE(Valid(ptr, size)); | |
| 362 free(ptr); | |
| 363 } | |
| 364 } | |
| 365 | |
| 366 TEST(Allocators, Calloc) { | |
| 367 TestCalloc(0, 0, true); | |
| 368 TestCalloc(0, 1, true); | |
| 369 TestCalloc(1, 1, true); | |
| 370 TestCalloc(1<<10, 0, true); | |
| 371 TestCalloc(1<<20, 0, true); | |
| 372 TestCalloc(0, 1<<10, true); | |
| 373 TestCalloc(0, 1<<20, true); | |
| 374 TestCalloc(1<<20, 2, true); | |
| 375 TestCalloc(2, 1<<20, true); | |
| 376 TestCalloc(1000, 1000, true); | |
| 377 | |
| 378 TestCalloc(kMaxSize, 2, false); | |
| 379 TestCalloc(2, kMaxSize, false); | |
| 380 TestCalloc(kMaxSize, kMaxSize, false); | |
| 381 | |
| 382 TestCalloc(kMaxSignedSize, 3, false); | |
| 383 TestCalloc(3, kMaxSignedSize, false); | |
| 384 TestCalloc(kMaxSignedSize, kMaxSignedSize, false); | |
| 385 } | |
| 386 | |
| 387 TEST(Allocators, New) { | |
| 388 TestNothrowNew(&::operator new); | |
| 389 TestNothrowNew(&::operator new[]); | |
| 390 } | |
| 391 | |
| 392 // This makes sure that reallocing a small number of bytes in either | |
| 393 // direction doesn't cause us to allocate new memory. | |
| 394 TEST(Allocators, Realloc1) { | |
| 395 int start_sizes[] = { 100, 1000, 10000, 100000 }; | |
| 396 int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 }; | |
| 397 | |
| 398 for (int s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) { | |
| 399 void* p = malloc(start_sizes[s]); | |
| 400 ASSERT_TRUE(p); | |
| 401 // The larger the start-size, the larger the non-reallocing delta. | |
| 402 for (int d = 0; d < s*2; ++d) { | |
| 403 void* new_p = realloc(p, start_sizes[s] + deltas[d]); | |
| 404 ASSERT_EQ(p, new_p); // realloc should not allocate new memory | |
| 405 } | |
| 406 // Test again, but this time reallocing smaller first. | |
| 407 for (int d = 0; d < s*2; ++d) { | |
| 408 void* new_p = realloc(p, start_sizes[s] - deltas[d]); | |
| 409 ASSERT_EQ(p, new_p); // realloc should not allocate new memory | |
| 410 } | |
| 411 free(p); | |
| 412 } | |
| 413 } | |
| 414 | |
| 415 TEST(Allocators, Realloc2) { | |
| 416 for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) { | |
| 417 for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) { | |
| 418 unsigned char* src = reinterpret_cast<unsigned char*>(malloc(src_size)); | |
| 419 Fill(src, src_size); | |
| 420 unsigned char* dst = | |
| 421 reinterpret_cast<unsigned char*>(realloc(src, dst_size)); | |
| 422 EXPECT_TRUE(Valid(dst, min(src_size, dst_size))); | |
| 423 Fill(dst, dst_size); | |
| 424 EXPECT_TRUE(Valid(dst, dst_size)); | |
| 425 if (dst != NULL) free(dst); | |
| 426 } | |
| 427 } | |
| 428 | |
| 429 // Now make sure realloc works correctly even when we overflow the | |
| 430 // packed cache, so some entries are evicted from the cache. | |
| 431 // The cache has 2^12 entries, keyed by page number. | |
| 432 const int kNumEntries = 1 << 14; | |
| 433 int** p = reinterpret_cast<int**>(malloc(sizeof(*p) * kNumEntries)); | |
| 434 int sum = 0; | |
| 435 for (int i = 0; i < kNumEntries; i++) { | |
| 436 // no page size is likely to be bigger than 8192? | |
| 437 p[i] = reinterpret_cast<int*>(malloc(8192)); | |
| 438 p[i][1000] = i; // use memory deep in the heart of p | |
| 439 } | |
| 440 for (int i = 0; i < kNumEntries; i++) { | |
| 441 p[i] = reinterpret_cast<int*>(realloc(p[i], 9000)); | |
| 442 } | |
| 443 for (int i = 0; i < kNumEntries; i++) { | |
| 444 sum += p[i][1000]; | |
| 445 free(p[i]); | |
| 446 } | |
| 447 EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum); // assume kNE is even | |
| 448 free(p); | |
| 449 } | |
| 450 | |
| 451 TEST(Allocators, ReallocZero) { | |
| 452 // Test that realloc to zero does not return NULL. | |
| 453 for (int size = 0; size >= 0; size = NextSize(size)) { | |
| 454 char* ptr = reinterpret_cast<char*>(malloc(size)); | |
| 455 EXPECT_NE(static_cast<char*>(NULL), ptr); | |
| 456 ptr = reinterpret_cast<char*>(realloc(ptr, 0)); | |
| 457 EXPECT_NE(static_cast<char*>(NULL), ptr); | |
| 458 if (ptr) | |
| 459 free(ptr); | |
| 460 } | |
| 461 } | |
| 462 | |
| 463 #ifdef WIN32 | |
| 464 // Test recalloc | |
| 465 TEST(Allocators, Recalloc) { | |
| 466 for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) { | |
| 467 for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) { | |
| 468 unsigned char* src = | |
| 469 reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size)); | |
| 470 EXPECT_TRUE(IsZeroed(src, src_size)); | |
| 471 Fill(src, src_size); | |
| 472 unsigned char* dst = | |
| 473 reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size)); | |
| 474 EXPECT_TRUE(Valid(dst, min(src_size, dst_size))); | |
| 475 Fill(dst, dst_size); | |
| 476 EXPECT_TRUE(Valid(dst, dst_size)); | |
| 477 if (dst != NULL) | |
| 478 free(dst); | |
| 479 } | |
| 480 } | |
| 481 } | |
| 482 | |
| 483 // Test windows specific _aligned_malloc() and _aligned_free() methods. | |
| 484 TEST(Allocators, AlignedMalloc) { | |
| 485 // Try allocating data with a bunch of alignments and sizes | |
| 486 static const int kTestAlignments[] = {8, 16, 256, 4096, 8192, 16384}; | |
| 487 for (int size = 1; size > 0; size = NextSize(size)) { | |
| 488 for (int i = 0; i < ARRAYSIZE(kTestAlignments); ++i) { | |
| 489 unsigned char* ptr = static_cast<unsigned char*>( | |
| 490 _aligned_malloc(size, kTestAlignments[i])); | |
| 491 CheckAlignment(ptr, kTestAlignments[i]); | |
| 492 Fill(ptr, size); | |
| 493 EXPECT_TRUE(Valid(ptr, size)); | |
| 494 | |
| 495 // Make a second allocation of the same size and alignment to prevent | |
| 496 // allocators from passing this test by accident. Per jar, tcmalloc | |
| 497 // provides allocations for new (never before seen) sizes out of a thread | |
| 498 // local heap of a given "size class." Each time the test requests a new | |
| 499 // size, it will usually get the first element of a span, which is a | |
| 500 // 4K aligned allocation. | |
| 501 unsigned char* ptr2 = static_cast<unsigned char*>( | |
| 502 _aligned_malloc(size, kTestAlignments[i])); | |
| 503 CheckAlignment(ptr2, kTestAlignments[i]); | |
| 504 Fill(ptr2, size); | |
| 505 EXPECT_TRUE(Valid(ptr2, size)); | |
| 506 | |
| 507 // Should never happen, but sanity check just in case. | |
| 508 ASSERT_NE(ptr, ptr2); | |
| 509 _aligned_free(ptr); | |
| 510 _aligned_free(ptr2); | |
| 511 } | |
| 512 } | |
| 513 } | |
| 514 | |
| 515 #endif | |
| 516 | |
| 517 | |
| 518 int main(int argc, char** argv) { | |
| 519 testing::InitGoogleTest(&argc, argv); | |
| 520 return RUN_ALL_TESTS(); | |
| 521 } | |
| OLD | NEW |