| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 91 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 102 #include "wtf/CPU.h" | 102 #include "wtf/CPU.h" |
| 103 #include "wtf/PageAllocator.h" | 103 #include "wtf/PageAllocator.h" |
| 104 #include "wtf/SpinLock.h" | 104 #include "wtf/SpinLock.h" |
| 105 | 105 |
| 106 #include <limits.h> | 106 #include <limits.h> |
| 107 | 107 |
| 108 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 108 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 109 #include <stdlib.h> | 109 #include <stdlib.h> |
| 110 #endif | 110 #endif |
| 111 | 111 |
| 112 #ifndef NDEBUG | 112 #if ENABLE(ASSERT) |
| 113 #include <string.h> | 113 #include <string.h> |
| 114 #endif | 114 #endif |
| 115 | 115 |
| 116 namespace WTF { | 116 namespace WTF { |
| 117 | 117 |
| 118 // Maximum size of a partition's mappings. 2046MB. Note that the total amount of | 118 // Maximum size of a partition's mappings. 2046MB. Note that the total amount of |
| 119 // bytes allocatable at the API will be smaller. This is because things like | 119 // bytes allocatable at the API will be smaller. This is because things like |
| 120 // guard pages, metadata, page headers and wasted space come out of the total. | 120 // guard pages, metadata, page headers and wasted space come out of the total. |
| 121 // The 2GB is not necessarily contiguous in virtual address space. | 121 // The 2GB is not necessarily contiguous in virtual address space. |
| 122 static const size_t kMaxPartitionSize = 2046u * 1024u * 1024u; | 122 static const size_t kMaxPartitionSize = 2046u * 1024u * 1024u; |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 183 static const size_t kGenericSmallestBucket = 1 << (kGenericMinBucketedOrder - 1)
; | 183 static const size_t kGenericSmallestBucket = 1 << (kGenericMinBucketedOrder - 1)
; |
| 184 static const size_t kGenericMaxBucketSpacing = 1 << ((kGenericMaxBucketedOrder -
1) - kGenericNumBucketsPerOrderBits); | 184 static const size_t kGenericMaxBucketSpacing = 1 << ((kGenericMaxBucketedOrder -
1) - kGenericNumBucketsPerOrderBits); |
| 185 static const size_t kGenericMaxBucketed = (1 << (kGenericMaxBucketedOrder - 1))
+ ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing); | 185 static const size_t kGenericMaxBucketed = (1 << (kGenericMaxBucketedOrder - 1))
+ ((kGenericNumBucketsPerOrder - 1) * kGenericMaxBucketSpacing); |
| 186 static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; /
/ Limit when downsizing a direct mapping using realloc(). | 186 static const size_t kGenericMinDirectMappedDownsize = kGenericMaxBucketed + 1; /
/ Limit when downsizing a direct mapping using realloc(). |
| 187 static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize; | 187 static const size_t kGenericMaxDirectMapped = INT_MAX - kSystemPageSize; |
| 188 static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT; | 188 static const size_t kBitsPerSizet = sizeof(void*) * CHAR_BIT; |
| 189 | 189 |
| 190 // Constants for the memory reclaim logic. | 190 // Constants for the memory reclaim logic. |
| 191 static const size_t kMaxFreeableSpans = 16; | 191 static const size_t kMaxFreeableSpans = 16; |
| 192 | 192 |
| 193 #ifndef NDEBUG | 193 #if ENABLE(ASSERT) |
| 194 // These two byte values match tcmalloc. | 194 // These two byte values match tcmalloc. |
| 195 static const unsigned char kUninitializedByte = 0xAB; | 195 static const unsigned char kUninitializedByte = 0xAB; |
| 196 static const unsigned char kFreedByte = 0xCD; | 196 static const unsigned char kFreedByte = 0xCD; |
| 197 static const uint32_t kCookieValue = 0xDEADBEEFu; | 197 static const uint32_t kCookieValue = 0xDEADBEEFu; |
| 198 static const size_t kCookieSize = 16; // Handles alignment up to XMM instruction
s on Intel. | 198 static const size_t kCookieSize = 16; // Handles alignment up to XMM instruction
s on Intel. |
| 199 #endif | 199 #endif |
| 200 | 200 |
| 201 struct PartitionBucket; | 201 struct PartitionBucket; |
| 202 struct PartitionRootBase; | 202 struct PartitionRootBase; |
| 203 | 203 |
| (...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 326 #if CPU(BIG_ENDIAN) | 326 #if CPU(BIG_ENDIAN) |
| 327 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); | 327 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); |
| 328 #else | 328 #else |
| 329 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); | 329 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); |
| 330 #endif | 330 #endif |
| 331 return reinterpret_cast<PartitionFreelistEntry*>(masked); | 331 return reinterpret_cast<PartitionFreelistEntry*>(masked); |
| 332 } | 332 } |
| 333 | 333 |
| 334 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) | 334 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) |
| 335 { | 335 { |
| 336 #ifndef NDEBUG | 336 #if ENABLE(ASSERT) |
| 337 // Add space for cookies, checking for integer overflow. | 337 // Add space for cookies, checking for integer overflow. |
| 338 ASSERT(size + (2 * kCookieSize) > size); | 338 ASSERT(size + (2 * kCookieSize) > size); |
| 339 size += 2 * kCookieSize; | 339 size += 2 * kCookieSize; |
| 340 #endif | 340 #endif |
| 341 return size; | 341 return size; |
| 342 } | 342 } |
| 343 | 343 |
| 344 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) | 344 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) |
| 345 { | 345 { |
| 346 #ifndef NDEBUG | 346 #if ENABLE(ASSERT) |
| 347 // Remove space for cookies. | 347 // Remove space for cookies. |
| 348 ASSERT(size >= 2 * kCookieSize); | 348 ASSERT(size >= 2 * kCookieSize); |
| 349 size -= 2 * kCookieSize; | 349 size -= 2 * kCookieSize; |
| 350 #endif | 350 #endif |
| 351 return size; | 351 return size; |
| 352 } | 352 } |
| 353 | 353 |
| 354 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) | 354 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) |
| 355 { | 355 { |
| 356 #ifndef NDEBUG | 356 #if ENABLE(ASSERT) |
| 357 // The value given to the application is actually just after the cookie. | 357 // The value given to the application is actually just after the cookie. |
| 358 ptr = static_cast<char*>(ptr) - kCookieSize; | 358 ptr = static_cast<char*>(ptr) - kCookieSize; |
| 359 #endif | 359 #endif |
| 360 return ptr; | 360 return ptr; |
| 361 } | 361 } |
| 362 | 362 |
| 363 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) | 363 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) |
| 364 { | 364 { |
| 365 #ifndef NDEBUG | 365 #if ENABLE(ASSERT) |
| 366 uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr); | 366 uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr); |
| 367 for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr) | 367 for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr) |
| 368 *cookiePtr = kCookieValue; | 368 *cookiePtr = kCookieValue; |
| 369 #endif | 369 #endif |
| 370 } | 370 } |
| 371 | 371 |
| 372 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) | 372 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) |
| 373 { | 373 { |
| 374 #ifndef NDEBUG | 374 #if ENABLE(ASSERT) |
| 375 uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr); | 375 uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr); |
| 376 for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr) | 376 for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr) |
| 377 ASSERT(*cookiePtr == kCookieValue); | 377 ASSERT(*cookiePtr == kCookieValue); |
| 378 #endif | 378 #endif |
| 379 } | 379 } |
| 380 | 380 |
| 381 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) | 381 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) |
| 382 { | 382 { |
| 383 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | 383 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); |
| 384 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); | 384 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 446 if (LIKELY(ret != 0)) { | 446 if (LIKELY(ret != 0)) { |
| 447 // If these asserts fire, you probably corrupted memory. | 447 // If these asserts fire, you probably corrupted memory. |
| 448 ASSERT(partitionPointerIsValid(ret)); | 448 ASSERT(partitionPointerIsValid(ret)); |
| 449 PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<Part
itionFreelistEntry*>(ret)->next); | 449 PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<Part
itionFreelistEntry*>(ret)->next); |
| 450 page->freelistHead = newHead; | 450 page->freelistHead = newHead; |
| 451 ASSERT(!ret || partitionPointerIsValid(ret)); | 451 ASSERT(!ret || partitionPointerIsValid(ret)); |
| 452 page->numAllocatedSlots++; | 452 page->numAllocatedSlots++; |
| 453 } else { | 453 } else { |
| 454 ret = partitionAllocSlowPath(root, flags, size, bucket); | 454 ret = partitionAllocSlowPath(root, flags, size, bucket); |
| 455 } | 455 } |
| 456 #ifndef NDEBUG | 456 #if ENABLE(ASSERT) |
| 457 if (!ret) | 457 if (!ret) |
| 458 return 0; | 458 return 0; |
| 459 // Fill the uninitialized pattern. and write the cookies. | 459 // Fill the uninitialized pattern. and write the cookies. |
| 460 page = partitionPointerToPage(ret); | 460 page = partitionPointerToPage(ret); |
| 461 size_t bucketSize = page->bucket->slotSize; | 461 size_t bucketSize = page->bucket->slotSize; |
| 462 memset(ret, kUninitializedByte, bucketSize); | 462 memset(ret, kUninitializedByte, bucketSize); |
| 463 partitionCookieWriteValue(ret); | 463 partitionCookieWriteValue(ret); |
| 464 partitionCookieWriteValue(reinterpret_cast<char*>(ret) + bucketSize - kCooki
eSize); | 464 partitionCookieWriteValue(reinterpret_cast<char*>(ret) + bucketSize - kCooki
eSize); |
| 465 // The value given to the application is actually just after the cookie. | 465 // The value given to the application is actually just after the cookie. |
| 466 ret = static_cast<char*>(ret) + kCookieSize; | 466 ret = static_cast<char*>(ret) + kCookieSize; |
| (...skipping 14 matching lines...) Expand all Loading... |
| 481 ASSERT(index < root->numBuckets); | 481 ASSERT(index < root->numBuckets); |
| 482 ASSERT(size == index << kBucketShift); | 482 ASSERT(size == index << kBucketShift); |
| 483 PartitionBucket* bucket = &root->buckets()[index]; | 483 PartitionBucket* bucket = &root->buckets()[index]; |
| 484 return partitionBucketAlloc(root, 0, size, bucket); | 484 return partitionBucketAlloc(root, 0, size, bucket); |
| 485 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 485 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 486 } | 486 } |
| 487 | 487 |
| 488 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) | 488 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) |
| 489 { | 489 { |
| 490 // If these asserts fire, you probably corrupted memory. | 490 // If these asserts fire, you probably corrupted memory. |
| 491 #ifndef NDEBUG | 491 #if ENABLE(ASSERT) |
| 492 size_t bucketSize = page->bucket->slotSize; | 492 size_t bucketSize = page->bucket->slotSize; |
| 493 partitionCookieCheckValue(ptr); | 493 partitionCookieCheckValue(ptr); |
| 494 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + bucketSize - kCooki
eSize); | 494 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + bucketSize - kCooki
eSize); |
| 495 memset(ptr, kFreedByte, bucketSize); | 495 memset(ptr, kFreedByte, bucketSize); |
| 496 #endif | 496 #endif |
| 497 ASSERT(page->numAllocatedSlots); | 497 ASSERT(page->numAllocatedSlots); |
| 498 PartitionFreelistEntry* freelistHead = page->freelistHead; | 498 PartitionFreelistEntry* freelistHead = page->freelistHead; |
| 499 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); | 499 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); |
| 500 RELEASE_ASSERT(ptr != freelistHead); // Catches an immediate double free. | 500 RELEASE_ASSERT(ptr != freelistHead); // Catches an immediate double free. |
| 501 ASSERT(!freelistHead || ptr != partitionFreelistMask(freelistHead->next)); /
/ Look for double free one level deeper in debug. | 501 ASSERT(!freelistHead || ptr != partitionFreelistMask(freelistHead->next)); /
/ Look for double free one level deeper in debug. |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 663 using WTF::partitionAlloc; | 663 using WTF::partitionAlloc; |
| 664 using WTF::partitionFree; | 664 using WTF::partitionFree; |
| 665 using WTF::partitionAllocGeneric; | 665 using WTF::partitionAllocGeneric; |
| 666 using WTF::partitionFreeGeneric; | 666 using WTF::partitionFreeGeneric; |
| 667 using WTF::partitionReallocGeneric; | 667 using WTF::partitionReallocGeneric; |
| 668 using WTF::partitionAllocActualSize; | 668 using WTF::partitionAllocActualSize; |
| 669 using WTF::partitionAllocSupportsGetSize; | 669 using WTF::partitionAllocSupportsGetSize; |
| 670 using WTF::partitionAllocGetSize; | 670 using WTF::partitionAllocGetSize; |
| 671 | 671 |
| 672 #endif // WTF_PartitionAlloc_h | 672 #endif // WTF_PartitionAlloc_h |
| OLD | NEW |