| OLD | NEW |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include <stdio.h> | 5 #include <stdio.h> |
| 6 #include <stdlib.h> | 6 #include <stdlib.h> |
| 7 #include <algorithm> // for min() | 7 #include <algorithm> // for min() |
| 8 | 8 |
| 9 #include "base/atomicops.h" | 9 #include "base/macros.h" |
| 10 #include "testing/gtest/include/gtest/gtest.h" | 10 #include "testing/gtest/include/gtest/gtest.h" |
| 11 | 11 |
| 12 // Number of bits in a size_t. | 12 // Number of bits in a size_t. |
| 13 static const int kSizeBits = 8 * sizeof(size_t); | 13 static const int kSizeBits = 8 * sizeof(size_t); |
| 14 // The maximum size of a size_t. | 14 // The maximum size of a size_t. |
| 15 static const size_t kMaxSize = ~static_cast<size_t>(0); | 15 static const size_t kMaxSize = ~static_cast<size_t>(0); |
| 16 // Maximum positive size of a size_t if it were signed. | 16 // Maximum positive size of a size_t if it were signed. |
| 17 static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1); | 17 static const size_t kMaxSignedSize = ((size_t(1) << (kSizeBits-1)) - 1); |
| 18 // An allocation size which is not too big to be reasonable. | |
| 19 static const size_t kNotTooBig = 100000; | |
| 20 // An allocation size which is just too big. | |
| 21 static const size_t kTooBig = ~static_cast<size_t>(0); | |
| 22 | 18 |
| 23 namespace { | 19 namespace { |
| 24 | 20 |
| 25 using std::min; | 21 using std::min; |
| 26 | 22 |
| 27 // Fill a buffer of the specified size with a predetermined pattern | 23 // Fill a buffer of the specified size with a predetermined pattern |
| 28 static void Fill(unsigned char* buffer, int n) { | 24 static void Fill(unsigned char* buffer, int n) { |
| 29 for (int i = 0; i < n; i++) { | 25 for (int i = 0; i < n; i++) { |
| 30 buffer[i] = (i & 0xff); | 26 buffer[i] = (i & 0xff); |
| 31 } | 27 } |
| (...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 75 if (size == power-1) | 71 if (size == power-1) |
| 76 return power; | 72 return power; |
| 77 | 73 |
| 78 assert(size == power); | 74 assert(size == power); |
| 79 return power+1; | 75 return power+1; |
| 80 } else { | 76 } else { |
| 81 return -1; | 77 return -1; |
| 82 } | 78 } |
| 83 } | 79 } |
| 84 | 80 |
| 85 template <class AtomicType> | |
| 86 static void TestAtomicIncrement() { | |
| 87 // For now, we just test single threaded execution | |
| 88 | |
| 89 // use a guard value to make sure the NoBarrier_AtomicIncrement doesn't go | |
| 90 // outside the expected address bounds. This is in particular to | |
| 91 // test that some future change to the asm code doesn't cause the | |
| 92 // 32-bit NoBarrier_AtomicIncrement to do the wrong thing on 64-bit machines. | |
| 93 struct { | |
| 94 AtomicType prev_word; | |
| 95 AtomicType count; | |
| 96 AtomicType next_word; | |
| 97 } s; | |
| 98 | |
| 99 AtomicType prev_word_value, next_word_value; | |
| 100 memset(&prev_word_value, 0xFF, sizeof(AtomicType)); | |
| 101 memset(&next_word_value, 0xEE, sizeof(AtomicType)); | |
| 102 | |
| 103 s.prev_word = prev_word_value; | |
| 104 s.count = 0; | |
| 105 s.next_word = next_word_value; | |
| 106 | |
| 107 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 1), 1); | |
| 108 EXPECT_EQ(s.count, 1); | |
| 109 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 110 EXPECT_EQ(s.next_word, next_word_value); | |
| 111 | |
| 112 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 2), 3); | |
| 113 EXPECT_EQ(s.count, 3); | |
| 114 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 115 EXPECT_EQ(s.next_word, next_word_value); | |
| 116 | |
| 117 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 3), 6); | |
| 118 EXPECT_EQ(s.count, 6); | |
| 119 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 120 EXPECT_EQ(s.next_word, next_word_value); | |
| 121 | |
| 122 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -3), 3); | |
| 123 EXPECT_EQ(s.count, 3); | |
| 124 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 125 EXPECT_EQ(s.next_word, next_word_value); | |
| 126 | |
| 127 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -2), 1); | |
| 128 EXPECT_EQ(s.count, 1); | |
| 129 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 130 EXPECT_EQ(s.next_word, next_word_value); | |
| 131 | |
| 132 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), 0); | |
| 133 EXPECT_EQ(s.count, 0); | |
| 134 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 135 EXPECT_EQ(s.next_word, next_word_value); | |
| 136 | |
| 137 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -1), -1); | |
| 138 EXPECT_EQ(s.count, -1); | |
| 139 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 140 EXPECT_EQ(s.next_word, next_word_value); | |
| 141 | |
| 142 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, -4), -5); | |
| 143 EXPECT_EQ(s.count, -5); | |
| 144 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 145 EXPECT_EQ(s.next_word, next_word_value); | |
| 146 | |
| 147 EXPECT_EQ(base::subtle::NoBarrier_AtomicIncrement(&s.count, 5), 0); | |
| 148 EXPECT_EQ(s.count, 0); | |
| 149 EXPECT_EQ(s.prev_word, prev_word_value); | |
| 150 EXPECT_EQ(s.next_word, next_word_value); | |
| 151 } | |
| 152 | |
| 153 | |
| 154 #define NUM_BITS(T) (sizeof(T) * 8) | |
| 155 | |
| 156 | |
| 157 template <class AtomicType> | |
| 158 static void TestCompareAndSwap() { | |
| 159 AtomicType value = 0; | |
| 160 AtomicType prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 1); | |
| 161 EXPECT_EQ(1, value); | |
| 162 EXPECT_EQ(0, prev); | |
| 163 | |
| 164 // Use test value that has non-zero bits in both halves, more for testing | |
| 165 // 64-bit implementation on 32-bit platforms. | |
| 166 const AtomicType k_test_val = (static_cast<uint64_t>(1) << | |
| 167 (NUM_BITS(AtomicType) - 2)) + 11; | |
| 168 value = k_test_val; | |
| 169 prev = base::subtle::NoBarrier_CompareAndSwap(&value, 0, 5); | |
| 170 EXPECT_EQ(k_test_val, value); | |
| 171 EXPECT_EQ(k_test_val, prev); | |
| 172 | |
| 173 value = k_test_val; | |
| 174 prev = base::subtle::NoBarrier_CompareAndSwap(&value, k_test_val, 5); | |
| 175 EXPECT_EQ(5, value); | |
| 176 EXPECT_EQ(k_test_val, prev); | |
| 177 } | |
| 178 | |
| 179 | |
| 180 template <class AtomicType> | |
| 181 static void TestAtomicExchange() { | |
| 182 AtomicType value = 0; | |
| 183 AtomicType new_value = base::subtle::NoBarrier_AtomicExchange(&value, 1); | |
| 184 EXPECT_EQ(1, value); | |
| 185 EXPECT_EQ(0, new_value); | |
| 186 | |
| 187 // Use test value that has non-zero bits in both halves, more for testing | |
| 188 // 64-bit implementation on 32-bit platforms. | |
| 189 const AtomicType k_test_val = (static_cast<uint64_t>(1) << | |
| 190 (NUM_BITS(AtomicType) - 2)) + 11; | |
| 191 value = k_test_val; | |
| 192 new_value = base::subtle::NoBarrier_AtomicExchange(&value, k_test_val); | |
| 193 EXPECT_EQ(k_test_val, value); | |
| 194 EXPECT_EQ(k_test_val, new_value); | |
| 195 | |
| 196 value = k_test_val; | |
| 197 new_value = base::subtle::NoBarrier_AtomicExchange(&value, 5); | |
| 198 EXPECT_EQ(5, value); | |
| 199 EXPECT_EQ(k_test_val, new_value); | |
| 200 } | |
| 201 | |
| 202 | |
| 203 template <class AtomicType> | |
| 204 static void TestAtomicIncrementBounds() { | |
| 205 // Test increment at the half-width boundary of the atomic type. | |
| 206 // It is primarily for testing at the 32-bit boundary for 64-bit atomic type. | |
| 207 AtomicType test_val = static_cast<uint64_t>(1) << (NUM_BITS(AtomicType) / 2); | |
| 208 AtomicType value = test_val - 1; | |
| 209 AtomicType new_value = base::subtle::NoBarrier_AtomicIncrement(&value, 1); | |
| 210 EXPECT_EQ(test_val, value); | |
| 211 EXPECT_EQ(value, new_value); | |
| 212 | |
| 213 base::subtle::NoBarrier_AtomicIncrement(&value, -1); | |
| 214 EXPECT_EQ(test_val - 1, value); | |
| 215 } | |
| 216 | |
| 217 // This is a simple sanity check that values are correct. Not testing | |
| 218 // atomicity | |
| 219 template <class AtomicType> | |
| 220 static void TestStore() { | |
| 221 const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL); | |
| 222 const AtomicType kVal2 = static_cast<AtomicType>(-1); | |
| 223 | |
| 224 AtomicType value; | |
| 225 | |
| 226 base::subtle::NoBarrier_Store(&value, kVal1); | |
| 227 EXPECT_EQ(kVal1, value); | |
| 228 base::subtle::NoBarrier_Store(&value, kVal2); | |
| 229 EXPECT_EQ(kVal2, value); | |
| 230 | |
| 231 base::subtle::Acquire_Store(&value, kVal1); | |
| 232 EXPECT_EQ(kVal1, value); | |
| 233 base::subtle::Acquire_Store(&value, kVal2); | |
| 234 EXPECT_EQ(kVal2, value); | |
| 235 | |
| 236 base::subtle::Release_Store(&value, kVal1); | |
| 237 EXPECT_EQ(kVal1, value); | |
| 238 base::subtle::Release_Store(&value, kVal2); | |
| 239 EXPECT_EQ(kVal2, value); | |
| 240 } | |
| 241 | |
| 242 // This is a simple sanity check that values are correct. Not testing | |
| 243 // atomicity | |
| 244 template <class AtomicType> | |
| 245 static void TestLoad() { | |
| 246 const AtomicType kVal1 = static_cast<AtomicType>(0xa5a5a5a5a5a5a5a5LL); | |
| 247 const AtomicType kVal2 = static_cast<AtomicType>(-1); | |
| 248 | |
| 249 AtomicType value; | |
| 250 | |
| 251 value = kVal1; | |
| 252 EXPECT_EQ(kVal1, base::subtle::NoBarrier_Load(&value)); | |
| 253 value = kVal2; | |
| 254 EXPECT_EQ(kVal2, base::subtle::NoBarrier_Load(&value)); | |
| 255 | |
| 256 value = kVal1; | |
| 257 EXPECT_EQ(kVal1, base::subtle::Acquire_Load(&value)); | |
| 258 value = kVal2; | |
| 259 EXPECT_EQ(kVal2, base::subtle::Acquire_Load(&value)); | |
| 260 | |
| 261 value = kVal1; | |
| 262 EXPECT_EQ(kVal1, base::subtle::Release_Load(&value)); | |
| 263 value = kVal2; | |
| 264 EXPECT_EQ(kVal2, base::subtle::Release_Load(&value)); | |
| 265 } | |
| 266 | |
| 267 template <class AtomicType> | |
| 268 static void TestAtomicOps() { | |
| 269 TestCompareAndSwap<AtomicType>(); | |
| 270 TestAtomicExchange<AtomicType>(); | |
| 271 TestAtomicIncrementBounds<AtomicType>(); | |
| 272 TestStore<AtomicType>(); | |
| 273 TestLoad<AtomicType>(); | |
| 274 } | |
| 275 | |
| 276 static void TestCalloc(size_t n, size_t s, bool ok) { | 81 static void TestCalloc(size_t n, size_t s, bool ok) { |
| 277 char* p = reinterpret_cast<char*>(calloc(n, s)); | 82 char* p = reinterpret_cast<char*>(calloc(n, s)); |
| 278 if (!ok) { | 83 if (!ok) { |
| 279 EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed"; | 84 EXPECT_EQ(NULL, p) << "calloc(n, s) should not succeed"; |
| 280 } else { | 85 } else { |
| 281 EXPECT_NE(reinterpret_cast<void*>(NULL), p) << | 86 EXPECT_NE(reinterpret_cast<void*>(NULL), p) << |
| 282 "calloc(n, s) should succeed"; | 87 "calloc(n, s) should succeed"; |
| 283 for (size_t i = 0; i < n*s; i++) { | 88 for (size_t i = 0; i < n*s; i++) { |
| 284 EXPECT_EQ('\0', p[i]); | 89 EXPECT_EQ('\0', p[i]); |
| 285 } | 90 } |
| 286 free(p); | 91 free(p); |
| 287 } | 92 } |
| 288 } | 93 } |
| 289 | 94 |
| 290 // MSVC C4530 complains about exception handler usage when exceptions are | |
| 291 // disabled. Temporarily disable that warning so we can test that they are, in | |
| 292 // fact, disabled. | |
| 293 #if defined(OS_WIN) | |
| 294 #pragma warning(push) | |
| 295 #pragma warning(disable: 4530) | |
| 296 #endif | |
| 297 | |
| 298 // A global test counter for number of times the NewHandler is called. | |
| 299 static int news_handled = 0; | |
| 300 static void TestNewHandler() { | |
| 301 ++news_handled; | |
| 302 throw std::bad_alloc(); | |
| 303 } | |
| 304 | |
| 305 // Because we compile without exceptions, we expect these will not throw. | |
| 306 static void TestOneNewWithoutExceptions(void* (*func)(size_t), | |
| 307 bool should_throw) { | |
| 308 // success test | |
| 309 try { | |
| 310 void* ptr = (*func)(kNotTooBig); | |
| 311 EXPECT_NE(reinterpret_cast<void*>(NULL), ptr) << | |
| 312 "allocation should not have failed."; | |
| 313 } catch(...) { | |
| 314 EXPECT_EQ(0, 1) << "allocation threw unexpected exception."; | |
| 315 } | |
| 316 | |
| 317 // failure test | |
| 318 try { | |
| 319 void* rv = (*func)(kTooBig); | |
| 320 EXPECT_EQ(NULL, rv); | |
| 321 EXPECT_FALSE(should_throw) << "allocation should have thrown."; | |
| 322 } catch(...) { | |
| 323 EXPECT_TRUE(should_throw) << "allocation threw unexpected exception."; | |
| 324 } | |
| 325 } | |
| 326 | |
| 327 static void TestNothrowNew(void* (*func)(size_t)) { | |
| 328 news_handled = 0; | |
| 329 | |
| 330 // test without new_handler: | |
| 331 std::new_handler saved_handler = std::set_new_handler(0); | |
| 332 TestOneNewWithoutExceptions(func, false); | |
| 333 | |
| 334 // test with new_handler: | |
| 335 std::set_new_handler(TestNewHandler); | |
| 336 TestOneNewWithoutExceptions(func, true); | |
| 337 EXPECT_EQ(news_handled, 1) << "nothrow new_handler was not called."; | |
| 338 std::set_new_handler(saved_handler); | |
| 339 } | |
| 340 | |
| 341 #if defined(OS_WIN) | |
| 342 #pragma warning(pop) | |
| 343 #endif | |
| 344 | |
| 345 } // namespace | 95 } // namespace |
| 346 | 96 |
| 347 //----------------------------------------------------------------------------- | 97 //----------------------------------------------------------------------------- |
| 348 | 98 |
| 349 TEST(Atomics, AtomicIncrementWord) { | |
| 350 TestAtomicIncrement<base::subtle::AtomicWord>(); | |
| 351 } | |
| 352 | |
| 353 TEST(Atomics, AtomicIncrement32) { | |
| 354 TestAtomicIncrement<base::subtle::Atomic32>(); | |
| 355 } | |
| 356 | |
| 357 TEST(Atomics, AtomicOpsWord) { | |
| 358 TestAtomicIncrement<base::subtle::AtomicWord>(); | |
| 359 } | |
| 360 | |
| 361 TEST(Atomics, AtomicOps32) { | |
| 362 TestAtomicIncrement<base::subtle::Atomic32>(); | |
| 363 } | |
| 364 | 99 |
| 365 TEST(Allocators, Malloc) { | 100 TEST(Allocators, Malloc) { |
| 366 // Try allocating data with a bunch of alignments and sizes | 101 // Try allocating data with a bunch of alignments and sizes |
| 367 for (int size = 1; size < 1048576; size *= 2) { | 102 for (int size = 1; size < 1048576; size *= 2) { |
| 368 unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size)); | 103 unsigned char* ptr = reinterpret_cast<unsigned char*>(malloc(size)); |
| 369 CheckAlignment(ptr, 2); // Should be 2 byte aligned | 104 CheckAlignment(ptr, 2); // Should be 2 byte aligned |
| 370 Fill(ptr, size); | 105 Fill(ptr, size); |
| 371 EXPECT_TRUE(Valid(ptr, size)); | 106 EXPECT_TRUE(Valid(ptr, size)); |
| 372 free(ptr); | 107 free(ptr); |
| 373 } | 108 } |
| (...skipping 13 matching lines...) Expand all Loading... |
| 387 | 122 |
| 388 TestCalloc(kMaxSize, 2, false); | 123 TestCalloc(kMaxSize, 2, false); |
| 389 TestCalloc(2, kMaxSize, false); | 124 TestCalloc(2, kMaxSize, false); |
| 390 TestCalloc(kMaxSize, kMaxSize, false); | 125 TestCalloc(kMaxSize, kMaxSize, false); |
| 391 | 126 |
| 392 TestCalloc(kMaxSignedSize, 3, false); | 127 TestCalloc(kMaxSignedSize, 3, false); |
| 393 TestCalloc(3, kMaxSignedSize, false); | 128 TestCalloc(3, kMaxSignedSize, false); |
| 394 TestCalloc(kMaxSignedSize, kMaxSignedSize, false); | 129 TestCalloc(kMaxSignedSize, kMaxSignedSize, false); |
| 395 } | 130 } |
| 396 | 131 |
| 397 TEST(Allocators, New) { | |
| 398 TestNothrowNew(&::operator new); | |
| 399 TestNothrowNew(&::operator new[]); | |
| 400 } | |
| 401 | |
| 402 // This makes sure that reallocing a small number of bytes in either | 132 // This makes sure that reallocing a small number of bytes in either |
| 403 // direction doesn't cause us to allocate new memory. | 133 // direction doesn't cause us to allocate new memory. |
| 404 TEST(Allocators, Realloc1) { | 134 TEST(Allocators, Realloc1) { |
| 405 int start_sizes[] = { 100, 1000, 10000, 100000 }; | 135 int start_sizes[] = { 100, 1000, 10000, 100000 }; |
| 406 int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 }; | 136 int deltas[] = { 1, -2, 4, -8, 16, -32, 64, -128 }; |
| 407 | 137 |
| 408 for (int s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) { | 138 for (int s = 0; s < sizeof(start_sizes)/sizeof(*start_sizes); ++s) { |
| 409 void* p = malloc(start_sizes[s]); | 139 void* p = malloc(start_sizes[s]); |
| 410 ASSERT_TRUE(p); | 140 ASSERT_TRUE(p); |
| 411 // The larger the start-size, the larger the non-reallocing delta. | 141 // The larger the start-size, the larger the non-reallocing delta. |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 451 p[i] = reinterpret_cast<int*>(realloc(p[i], 9000)); | 181 p[i] = reinterpret_cast<int*>(realloc(p[i], 9000)); |
| 452 } | 182 } |
| 453 for (int i = 0; i < kNumEntries; i++) { | 183 for (int i = 0; i < kNumEntries; i++) { |
| 454 sum += p[i][1000]; | 184 sum += p[i][1000]; |
| 455 free(p[i]); | 185 free(p[i]); |
| 456 } | 186 } |
| 457 EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum); // assume kNE is even | 187 EXPECT_EQ(kNumEntries/2 * (kNumEntries - 1), sum); // assume kNE is even |
| 458 free(p); | 188 free(p); |
| 459 } | 189 } |
| 460 | 190 |
| 461 // tcmalloc uses these semantics but system allocators can return NULL for | |
| 462 // realloc(ptr, 0). | |
| 463 #if defined(USE_TCMALLOC) | |
| 464 TEST(Allocators, ReallocZero) { | |
| 465 // Test that realloc to zero does not return NULL. | |
| 466 for (int size = 0; size >= 0; size = NextSize(size)) { | |
| 467 char* ptr = reinterpret_cast<char*>(malloc(size)); | |
| 468 EXPECT_NE(static_cast<char*>(NULL), ptr); | |
| 469 ptr = reinterpret_cast<char*>(realloc(ptr, 0)); | |
| 470 EXPECT_NE(static_cast<char*>(NULL), ptr); | |
| 471 if (ptr) | |
| 472 free(ptr); | |
| 473 } | |
| 474 } | |
| 475 #endif | |
| 476 | |
| 477 #ifdef WIN32 | |
| 478 // Test recalloc | 191 // Test recalloc |
| 479 TEST(Allocators, Recalloc) { | 192 TEST(Allocators, Recalloc) { |
| 480 for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) { | 193 for (int src_size = 0; src_size >= 0; src_size = NextSize(src_size)) { |
| 481 for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) { | 194 for (int dst_size = 0; dst_size >= 0; dst_size = NextSize(dst_size)) { |
| 482 unsigned char* src = | 195 unsigned char* src = |
| 483 reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size)); | 196 reinterpret_cast<unsigned char*>(_recalloc(NULL, 1, src_size)); |
| 484 EXPECT_TRUE(IsZeroed(src, src_size)); | 197 EXPECT_TRUE(IsZeroed(src, src_size)); |
| 485 Fill(src, src_size); | 198 Fill(src, src_size); |
| 486 unsigned char* dst = | 199 unsigned char* dst = |
| 487 reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size)); | 200 reinterpret_cast<unsigned char*>(_recalloc(src, 1, dst_size)); |
| 488 EXPECT_TRUE(Valid(dst, min(src_size, dst_size))); | 201 EXPECT_TRUE(Valid(dst, min(src_size, dst_size))); |
| 489 Fill(dst, dst_size); | 202 Fill(dst, dst_size); |
| 490 EXPECT_TRUE(Valid(dst, dst_size)); | 203 EXPECT_TRUE(Valid(dst, dst_size)); |
| 491 if (dst != NULL) | 204 if (dst != NULL) |
| 492 free(dst); | 205 free(dst); |
| 493 } | 206 } |
| 494 } | 207 } |
| 495 } | 208 } |
| 496 | 209 |
| 497 // Test windows specific _aligned_malloc() and _aligned_free() methods. | 210 // Test windows specific _aligned_malloc() and _aligned_free() methods. |
| 498 TEST(Allocators, AlignedMalloc) { | 211 TEST(Allocators, AlignedMalloc) { |
| 499 // Try allocating data with a bunch of alignments and sizes | 212 // Try allocating data with a bunch of alignments and sizes |
| 500 static const int kTestAlignments[] = {8, 16, 256, 4096, 8192, 16384}; | 213 static const int kTestAlignments[] = {8, 16, 256, 4096, 8192, 16384}; |
| 501 for (int size = 1; size > 0; size = NextSize(size)) { | 214 for (int size = 1; size > 0; size = NextSize(size)) { |
| 502 for (int i = 0; i < ARRAYSIZE(kTestAlignments); ++i) { | 215 for (int i = 0; i < arraysize(kTestAlignments); ++i) { |
| 503 unsigned char* ptr = static_cast<unsigned char*>( | 216 unsigned char* ptr = static_cast<unsigned char*>( |
| 504 _aligned_malloc(size, kTestAlignments[i])); | 217 _aligned_malloc(size, kTestAlignments[i])); |
| 505 CheckAlignment(ptr, kTestAlignments[i]); | 218 CheckAlignment(ptr, kTestAlignments[i]); |
| 506 Fill(ptr, size); | 219 Fill(ptr, size); |
| 507 EXPECT_TRUE(Valid(ptr, size)); | 220 EXPECT_TRUE(Valid(ptr, size)); |
| 508 | 221 |
| 509 // Make a second allocation of the same size and alignment to prevent | 222 // Make a second allocation of the same size and alignment to prevent |
| 510 // allocators from passing this test by accident. Per jar, tcmalloc | 223 // allocators from passing this test by accident. Per jar, tcmalloc |
| 511 // provides allocations for new (never before seen) sizes out of a thread | 224 // provides allocations for new (never before seen) sizes out of a thread |
| 512 // local heap of a given "size class." Each time the test requests a new | 225 // local heap of a given "size class." Each time the test requests a new |
| 513 // size, it will usually get the first element of a span, which is a | 226 // size, it will usually get the first element of a span, which is a |
| 514 // 4K aligned allocation. | 227 // 4K aligned allocation. |
| 515 unsigned char* ptr2 = static_cast<unsigned char*>( | 228 unsigned char* ptr2 = static_cast<unsigned char*>( |
| 516 _aligned_malloc(size, kTestAlignments[i])); | 229 _aligned_malloc(size, kTestAlignments[i])); |
| 517 CheckAlignment(ptr2, kTestAlignments[i]); | 230 CheckAlignment(ptr2, kTestAlignments[i]); |
| 518 Fill(ptr2, size); | 231 Fill(ptr2, size); |
| 519 EXPECT_TRUE(Valid(ptr2, size)); | 232 EXPECT_TRUE(Valid(ptr2, size)); |
| 520 | 233 |
| 521 // Should never happen, but sanity check just in case. | 234 // Should never happen, but sanity check just in case. |
| 522 ASSERT_NE(ptr, ptr2); | 235 ASSERT_NE(ptr, ptr2); |
| 523 _aligned_free(ptr); | 236 _aligned_free(ptr); |
| 524 _aligned_free(ptr2); | 237 _aligned_free(ptr2); |
| 525 } | 238 } |
| 526 } | 239 } |
| 527 } | 240 } |
| 528 | 241 |
| 529 #endif | |
| 530 | |
| 531 | |
| 532 int main(int argc, char** argv) { | 242 int main(int argc, char** argv) { |
| 533 testing::InitGoogleTest(&argc, argv); | 243 testing::InitGoogleTest(&argc, argv); |
| 534 return RUN_ALL_TESTS(); | 244 return RUN_ALL_TESTS(); |
| 535 } | 245 } |
| OLD | NEW |