Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 * | 3 // found in the LICENSE file. |
| 4 * Redistribution and use in source and binary forms, with or without | |
| 5 * modification, are permitted provided that the following conditions are | |
| 6 * met: | |
| 7 * | |
| 8 * * Redistributions of source code must retain the above copyright | |
| 9 * notice, this list of conditions and the following disclaimer. | |
| 10 * * Redistributions in binary form must reproduce the above | |
| 11 * copyright notice, this list of conditions and the following disclaimer | |
| 12 * in the documentation and/or other materials provided with the | |
| 13 * distribution. | |
| 14 * * Neither the name of Google Inc. nor the names of its | |
| 15 * contributors may be used to endorse or promote products derived from | |
| 16 * this software without specific prior written permission. | |
| 17 * | |
| 18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
| 19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
| 20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
| 21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
| 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
| 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
| 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
| 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
| 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
| 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
| 29 */ | |
| 30 | 4 |
| 31 #ifndef WTF_PartitionAlloc_h | 5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
| 32 #define WTF_PartitionAlloc_h | 6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
| 33 | 7 |
| 34 // DESCRIPTION | 8 // DESCRIPTION |
| 35 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / | 9 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / |
| 36 // partitionFreeGeneric() are approximately analagous to malloc() and free(). | 10 // partitionFreeGeneric() are approximately analagous to malloc() and free(). |
| 37 // | 11 // |
| 38 // The main difference is that a PartitionRoot / PartitionRootGeneric object | 12 // The main difference is that a PartitionRoot / PartitionRootGeneric object |
| 39 // must be supplied to these functions, representing a specific "heap partition" | 13 // must be supplied to these functions, representing a specific "heap partition" |
| 40 // that will be used to satisfy the allocation. Different partitions are | 14 // that will be used to satisfy the allocation. Different partitions are |
| 41 // guaranteed to exist in separate address spaces, including being separate from | 15 // guaranteed to exist in separate address spaces, including being separate from |
| 42 // the main system heap. If the contained objects are all freed, physical memory | 16 // the main system heap. If the contained objects are all freed, physical memory |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 78 // pages, enabling various simple tricks to try and minimize fragmentation. | 52 // pages, enabling various simple tricks to try and minimize fragmentation. |
| 79 // - Fine-grained bucket sizes leading to less waste and better packing. | 53 // - Fine-grained bucket sizes leading to less waste and better packing. |
| 80 // | 54 // |
| 81 // The following security properties could be investigated in the future: | 55 // The following security properties could be investigated in the future: |
| 82 // - Per-object bucketing (instead of per-size) is mostly available at the API, | 56 // - Per-object bucketing (instead of per-size) is mostly available at the API, |
| 83 // but not used yet. | 57 // but not used yet. |
| 84 // - No randomness of freelist entries or bucket position. | 58 // - No randomness of freelist entries or bucket position. |
| 85 // - Better checking for wild pointers in free(). | 59 // - Better checking for wild pointers in free(). |
| 86 // - Better freelist masking function to guarantee fault on 32-bit. | 60 // - Better freelist masking function to guarantee fault on 32-bit. |
| 87 | 61 |
| 88 #include "wtf/Assertions.h" | 62 #include <limits.h> |
| 89 #include "wtf/BitwiseOperations.h" | |
| 90 #include "wtf/ByteSwap.h" | |
| 91 #include "wtf/CPU.h" | |
| 92 #include "wtf/SpinLock.h" | |
| 93 #include "wtf/TypeTraits.h" | |
| 94 #include "wtf/allocator/PageAllocator.h" | |
| 95 | 63 |
| 96 #include <limits.h> | 64 #include "base/allocator/partition_allocator/page_allocator.h" |
| 65 #include "base/bits.h" | |
| 66 #include "base/compiler_specific.h" | |
| 67 #include "base/synchronization/spin_lock.h" | |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:33
IWYU: +base/logging.h for DCHECK_IS_ON
+build/buil
palmer
2016/11/24 01:05:56
Done.
| |
| 97 | 68 |
| 98 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 69 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 99 #include <stdlib.h> | 70 #include <stdlib.h> |
| 100 #endif | 71 #endif |
| 101 | 72 |
| 102 #if ENABLE(ASSERT) | 73 namespace base { |
| 103 #include <string.h> | |
| 104 #endif | |
| 105 | |
| 106 namespace WTF { | |
| 107 | 74 |
| 108 // Allocation granularity of sizeof(void*) bytes. | 75 // Allocation granularity of sizeof(void*) bytes. |
| 109 static const size_t kAllocationGranularity = sizeof(void*); | 76 static const size_t kAllocationGranularity = sizeof(void*); |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:33
it feels like a lot of this stuff should be in bas
palmer
2016/11/24 01:05:56
I am thinking of putting the public interfaces in
| |
| 110 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; | 77 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; |
| 111 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; | 78 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; |
| 112 | 79 |
| 113 // Underlying partition storage pages are a power-of-two size. It is typical | 80 // Underlying partition storage pages are a power-of-two size. It is typical |
| 114 // for a partition page to be based on multiple system pages. Most references to | 81 // for a partition page to be based on multiple system pages. Most references to |
| 115 // "page" refer to partition pages. | 82 // "page" refer to partition pages. |
| 116 // We also have the concept of "super pages" -- these are the underlying system | 83 // We also have the concept of "super pages" -- these are the underlying system |
| 117 // allocations we make. Super pages contain multiple partition pages inside them | 84 // allocations we make. Super pages contain multiple partition pages inside them |
| 118 // and include space for a small amount of metadata per partition page. | 85 // and include space for a small amount of metadata per partition page. |
| 119 // Inside super pages, we store "slot spans". A slot span is a continguous range | 86 // Inside super pages, we store "slot spans". A slot span is a continguous range |
| (...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 234 // Constants for the memory reclaim logic. | 201 // Constants for the memory reclaim logic. |
| 235 static const size_t kMaxFreeableSpans = 16; | 202 static const size_t kMaxFreeableSpans = 16; |
| 236 | 203 |
| 237 // If the total size in bytes of allocated but not committed pages exceeds this | 204 // If the total size in bytes of allocated but not committed pages exceeds this |
| 238 // value (probably it is a "out of virtual address space" crash), | 205 // value (probably it is a "out of virtual address space" crash), |
| 239 // a special crash stack trace is generated at |partitionOutOfMemory|. | 206 // a special crash stack trace is generated at |partitionOutOfMemory|. |
| 240 // This is to distinguish "out of virtual address space" from | 207 // This is to distinguish "out of virtual address space" from |
| 241 // "out of physical memory" in crash reports. | 208 // "out of physical memory" in crash reports. |
| 242 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB | 209 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB |
| 243 | 210 |
| 244 #if ENABLE(ASSERT) | 211 #if DCHECK_IS_ON() |
| 245 // These two byte values match tcmalloc. | 212 // These two byte values match tcmalloc. |
| 246 static const unsigned char kUninitializedByte = 0xAB; | 213 static const unsigned char kUninitializedByte = 0xAB; |
| 247 static const unsigned char kFreedByte = 0xCD; | 214 static const unsigned char kFreedByte = 0xCD; |
| 248 static const size_t kCookieSize = | 215 static const size_t kCookieSize = |
| 249 16; // Handles alignment up to XMM instructions on Intel. | 216 16; // Handles alignment up to XMM instructions on Intel. |
| 250 static const unsigned char kCookieValue[kCookieSize] = { | 217 static const unsigned char kCookieValue[kCookieSize] = { |
| 251 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, | 218 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, |
| 252 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; | 219 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; |
| 253 #endif | 220 #endif |
| 254 | 221 |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 314 PartitionSuperPageExtentEntry* next; | 281 PartitionSuperPageExtentEntry* next; |
| 315 }; | 282 }; |
| 316 | 283 |
| 317 struct PartitionDirectMapExtent { | 284 struct PartitionDirectMapExtent { |
| 318 PartitionDirectMapExtent* nextExtent; | 285 PartitionDirectMapExtent* nextExtent; |
| 319 PartitionDirectMapExtent* prevExtent; | 286 PartitionDirectMapExtent* prevExtent; |
| 320 PartitionBucket* bucket; | 287 PartitionBucket* bucket; |
| 321 size_t mapSize; // Mapped size, not including guard pages and meta-data. | 288 size_t mapSize; // Mapped size, not including guard pages and meta-data. |
| 322 }; | 289 }; |
| 323 | 290 |
| 324 struct WTF_EXPORT PartitionRootBase { | 291 struct BASE_EXPORT PartitionRootBase { |
| 325 size_t totalSizeOfCommittedPages; | 292 size_t totalSizeOfCommittedPages; |
| 326 size_t totalSizeOfSuperPages; | 293 size_t totalSizeOfSuperPages; |
| 327 size_t totalSizeOfDirectMappedPages; | 294 size_t totalSizeOfDirectMappedPages; |
| 328 // Invariant: totalSizeOfCommittedPages <= | 295 // Invariant: totalSizeOfCommittedPages <= |
| 329 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. | 296 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. |
| 330 unsigned numBuckets; | 297 unsigned numBuckets; |
| 331 unsigned maxAllocation; | 298 unsigned maxAllocation; |
| 332 bool initialized; | 299 bool initialized; |
| 333 char* nextSuperPage; | 300 char* nextSuperPage; |
| 334 char* nextPartitionPage; | 301 char* nextPartitionPage; |
| 335 char* nextPartitionPageEnd; | 302 char* nextPartitionPageEnd; |
| 336 PartitionSuperPageExtentEntry* currentExtent; | 303 PartitionSuperPageExtentEntry* currentExtent; |
| 337 PartitionSuperPageExtentEntry* firstExtent; | 304 PartitionSuperPageExtentEntry* firstExtent; |
| 338 PartitionDirectMapExtent* directMapList; | 305 PartitionDirectMapExtent* directMapList; |
| 339 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; | 306 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; |
| 340 int16_t globalEmptyPageRingIndex; | 307 int16_t globalEmptyPageRingIndex; |
| 341 uintptr_t invertedSelf; | 308 uintptr_t invertedSelf; |
| 342 | 309 |
| 343 static SpinLock gInitializedLock; | 310 static subtle::SpinLock gInitializedLock; |
| 344 static bool gInitialized; | 311 static bool gInitialized; |
| 345 // gSeedPage is used as a sentinel to indicate that there is no page | 312 // gSeedPage is used as a sentinel to indicate that there is no page |
| 346 // in the active page list. We can use nullptr, but in that case we need | 313 // in the active page list. We can use nullptr, but in that case we need |
| 347 // to add a null-check branch to the hot allocation path. We want to avoid | 314 // to add a null-check branch to the hot allocation path. We want to avoid |
| 348 // that. | 315 // that. |
| 349 static PartitionPage gSeedPage; | 316 static PartitionPage gSeedPage; |
| 350 static PartitionBucket gPagedBucket; | 317 static PartitionBucket gPagedBucket; |
| 351 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. | 318 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. |
| 352 static void (*gOomHandlingFunction)(); | 319 static void (*gOomHandlingFunction)(); |
| 353 }; | 320 }; |
| 354 | 321 |
| 355 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. | 322 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. |
| 356 struct PartitionRoot : public PartitionRootBase { | 323 struct PartitionRoot : public PartitionRootBase { |
| 357 // The PartitionAlloc templated class ensures the following is correct. | 324 // The PartitionAlloc templated class ensures the following is correct. |
| 358 ALWAYS_INLINE PartitionBucket* buckets() { | 325 ALWAYS_INLINE PartitionBucket* buckets() { |
| 359 return reinterpret_cast<PartitionBucket*>(this + 1); | 326 return reinterpret_cast<PartitionBucket*>(this + 1); |
| 360 } | 327 } |
| 361 ALWAYS_INLINE const PartitionBucket* buckets() const { | 328 ALWAYS_INLINE const PartitionBucket* buckets() const { |
| 362 return reinterpret_cast<const PartitionBucket*>(this + 1); | 329 return reinterpret_cast<const PartitionBucket*>(this + 1); |
| 363 } | 330 } |
| 364 }; | 331 }; |
| 365 | 332 |
| 366 // Never instantiate a PartitionRootGeneric directly, instead use | 333 // Never instantiate a PartitionRootGeneric directly, instead use |
| 367 // PartitionAllocatorGeneric. | 334 // PartitionAllocatorGeneric. |
| 368 struct PartitionRootGeneric : public PartitionRootBase { | 335 struct PartitionRootGeneric : public PartitionRootBase { |
| 369 SpinLock lock; | 336 subtle::SpinLock lock; |
| 370 // Some pre-computed constants. | 337 // Some pre-computed constants. |
| 371 size_t orderIndexShifts[kBitsPerSizet + 1]; | 338 size_t orderIndexShifts[kBitsPerSizet + 1]; |
| 372 size_t orderSubIndexMasks[kBitsPerSizet + 1]; | 339 size_t orderSubIndexMasks[kBitsPerSizet + 1]; |
| 373 // The bucket lookup table lets us map a size_t to a bucket quickly. | 340 // The bucket lookup table lets us map a size_t to a bucket quickly. |
| 374 // The trailing +1 caters for the overflow case for very large allocation | 341 // The trailing +1 caters for the overflow case for very large allocation |
| 375 // sizes. It is one flat array instead of a 2D array because in the 2D | 342 // sizes. It is one flat array instead of a 2D array because in the 2D |
| 376 // world, we'd need to index array[blah][max+1] which risks undefined | 343 // world, we'd need to index array[blah][max+1] which risks undefined |
| 377 // behavior. | 344 // behavior. |
| 378 PartitionBucket* | 345 PartitionBucket* |
| 379 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; | 346 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 413 uint32_t numActivePages; // Number of pages that have at least one | 380 uint32_t numActivePages; // Number of pages that have at least one |
| 414 // provisioned slot. | 381 // provisioned slot. |
| 415 uint32_t numEmptyPages; // Number of pages that are empty | 382 uint32_t numEmptyPages; // Number of pages that are empty |
| 416 // but not decommitted. | 383 // but not decommitted. |
| 417 uint32_t numDecommittedPages; // Number of pages that are empty | 384 uint32_t numDecommittedPages; // Number of pages that are empty |
| 418 // and decommitted. | 385 // and decommitted. |
| 419 }; | 386 }; |
| 420 | 387 |
| 421 // Interface that is passed to partitionDumpStats and | 388 // Interface that is passed to partitionDumpStats and |
| 422 // partitionDumpStatsGeneric for using the memory statistics. | 389 // partitionDumpStatsGeneric for using the memory statistics. |
| 423 class WTF_EXPORT PartitionStatsDumper { | 390 class BASE_EXPORT PartitionStatsDumper { |
| 424 public: | 391 public: |
| 425 // Called to dump total memory used by partition, once per partition. | 392 // Called to dump total memory used by partition, once per partition. |
| 426 virtual void partitionDumpTotals(const char* partitionName, | 393 virtual void partitionDumpTotals(const char* partitionName, |
| 427 const PartitionMemoryStats*) = 0; | 394 const PartitionMemoryStats*) = 0; |
| 428 | 395 |
| 429 // Called to dump stats about buckets, for each bucket. | 396 // Called to dump stats about buckets, for each bucket. |
| 430 virtual void partitionsDumpBucketStats(const char* partitionName, | 397 virtual void partitionsDumpBucketStats(const char* partitionName, |
| 431 const PartitionBucketMemoryStats*) = 0; | 398 const PartitionBucketMemoryStats*) = 0; |
| 432 }; | 399 }; |
| 433 | 400 |
| 434 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); | 401 BASE_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); |
| 435 WTF_EXPORT void partitionAllocInit(PartitionRoot*, | 402 BASE_EXPORT void partitionAllocInit(PartitionRoot*, |
| 436 size_t numBuckets, | 403 size_t numBuckets, |
| 437 size_t maxAllocation); | 404 size_t maxAllocation); |
| 438 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*); | 405 BASE_EXPORT bool partitionAllocShutdown(PartitionRoot*); |
| 439 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); | 406 BASE_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); |
| 440 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); | 407 BASE_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); |
| 441 | 408 |
| 442 enum PartitionPurgeFlags { | 409 enum PartitionPurgeFlags { |
| 443 // Decommitting the ring list of empty pages is reasonably fast. | 410 // Decommitting the ring list of empty pages is reasonably fast. |
| 444 PartitionPurgeDecommitEmptyPages = 1 << 0, | 411 PartitionPurgeDecommitEmptyPages = 1 << 0, |
| 445 // Discarding unused system pages is slower, because it involves walking all | 412 // Discarding unused system pages is slower, because it involves walking all |
| 446 // freelists in all active partition pages of all buckets >= system page | 413 // freelists in all active partition pages of all buckets >= system page |
| 447 // size. It often frees a similar amount of memory to decommitting the empty | 414 // size. It often frees a similar amount of memory to decommitting the empty |
| 448 // pages, though. | 415 // pages, though. |
| 449 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, | 416 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, |
| 450 }; | 417 }; |
| 451 | 418 |
| 452 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int); | 419 BASE_EXPORT void partitionPurgeMemory(PartitionRoot*, int); |
| 453 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); | 420 BASE_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); |
| 454 | 421 |
| 455 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, | 422 BASE_EXPORT NOINLINE void* partitionAllocSlowPath(PartitionRootBase*, |
| 456 int, | 423 int, |
| 457 size_t, | 424 size_t, |
| 458 PartitionBucket*); | 425 PartitionBucket*); |
| 459 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*); | 426 BASE_EXPORT NOINLINE void partitionFreeSlowPath(PartitionPage*); |
| 460 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, | 427 BASE_EXPORT NOINLINE void* partitionReallocGeneric(PartitionRootGeneric*, |
| 461 void*, | 428 void*, |
| 462 size_t, | 429 size_t, |
| 463 const char* typeName); | 430 const char* typeName); |
| 464 | 431 |
| 465 WTF_EXPORT void partitionDumpStats(PartitionRoot*, | 432 BASE_EXPORT void partitionDumpStats(PartitionRoot*, |
| 466 const char* partitionName, | 433 const char* partitionName, |
| 467 bool isLightDump, | 434 bool isLightDump, |
| 468 PartitionStatsDumper*); | 435 PartitionStatsDumper*); |
| 469 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, | 436 BASE_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, |
| 470 const char* partitionName, | 437 const char* partitionName, |
| 471 bool isLightDump, | 438 bool isLightDump, |
| 472 PartitionStatsDumper*); | 439 PartitionStatsDumper*); |
| 473 | 440 |
| 474 class WTF_EXPORT PartitionAllocHooks { | 441 class BASE_EXPORT PartitionAllocHooks { |
| 475 public: | 442 public: |
| 476 typedef void AllocationHook(void* address, size_t, const char* typeName); | 443 typedef void AllocationHook(void* address, size_t, const char* typeName); |
| 477 typedef void FreeHook(void* address); | 444 typedef void FreeHook(void* address); |
| 478 | 445 |
| 479 static void setAllocationHook(AllocationHook* hook) { | 446 static void setAllocationHook(AllocationHook* hook) { |
| 480 m_allocationHook = hook; | 447 m_allocationHook = hook; |
| 481 } | 448 } |
| 482 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } | 449 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } |
| 483 | 450 |
| 484 static void allocationHookIfEnabled(void* address, | 451 static void allocationHookIfEnabled(void* address, |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 511 private: | 478 private: |
| 512 // Pointers to hook functions that PartitionAlloc will call on allocation and | 479 // Pointers to hook functions that PartitionAlloc will call on allocation and |
| 513 // free if the pointers are non-null. | 480 // free if the pointers are non-null. |
| 514 static AllocationHook* m_allocationHook; | 481 static AllocationHook* m_allocationHook; |
| 515 static FreeHook* m_freeHook; | 482 static FreeHook* m_freeHook; |
| 516 }; | 483 }; |
| 517 | 484 |
| 518 // In official builds, do not include type info string literals to avoid | 485 // In official builds, do not include type info string literals to avoid |
| 519 // bloating the binary. | 486 // bloating the binary. |
| 520 #if defined(OFFICIAL_BUILD) | 487 #if defined(OFFICIAL_BUILD) |
| 521 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr | 488 #define PARTITION_HEAP_PROFILER_TYPE_NAME(T) nullptr |
| 522 #else | 489 #else |
| 523 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>() | 490 #define PARTITION_HEAP_PROFILER_TYPE_NAME(T) GetStringWithTypeName<T>() |
| 524 #endif | 491 #endif |
| 525 | 492 |
| 526 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( | 493 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( |
| 527 PartitionFreelistEntry* ptr) { | 494 PartitionFreelistEntry* ptr) { |
| 528 // We use bswap on little endian as a fast mask for two reasons: | 495 // We use bswap on little endian as a fast mask for two reasons: |
| 529 // 1) If an object is freed and its vtable used where the attacker doesn't | 496 // 1) If an object is freed and its vtable used where the attacker doesn't |
| 530 // get the chance to run allocations between the free and use, the vtable | 497 // get the chance to run allocations between the free and use, the vtable |
| 531 // dereference is likely to fault. | 498 // dereference is likely to fault. |
| 532 // 2) If the attacker has a linear buffer overflow and elects to try and | 499 // 2) If the attacker has a linear buffer overflow and elects to try and |
| 533 // corrupt a freelist pointer, partial pointer overwrite attacks are | 500 // corrupt a freelist pointer, partial pointer overwrite attacks are |
| 534 // thwarted. | 501 // thwarted. |
| 535 // For big endian, similar guarantees are arrived at with a negation. | 502 // For big endian, similar guarantees are arrived at with a negation. |
| 536 #if CPU(BIG_ENDIAN) | 503 #if CPU(BIG_ENDIAN) |
| 537 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); | 504 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); |
| 538 #else | 505 #else |
| 539 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); | 506 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); |
| 540 #endif | 507 #endif |
| 541 return reinterpret_cast<PartitionFreelistEntry*>(masked); | 508 return reinterpret_cast<PartitionFreelistEntry*>(masked); |
| 542 } | 509 } |
| 543 | 510 |
| 544 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { | 511 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { |
| 545 #if ENABLE(ASSERT) | 512 #if DCHECK_IS_ON() |
| 546 // Add space for cookies, checking for integer overflow. | 513 // Add space for cookies, checking for integer overflow. |
| 547 ASSERT(size + (2 * kCookieSize) > size); | 514 DCHECK(size + (2 * kCookieSize) > size); |
| 548 size += 2 * kCookieSize; | 515 size += 2 * kCookieSize; |
| 549 #endif | 516 #endif |
| 550 return size; | 517 return size; |
| 551 } | 518 } |
| 552 | 519 |
| 553 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { | 520 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { |
| 554 #if ENABLE(ASSERT) | 521 #if DCHECK_IS_ON() |
| 555 // Remove space for cookies. | 522 // Remove space for cookies. |
| 556 ASSERT(size >= 2 * kCookieSize); | 523 DCHECK(size >= 2 * kCookieSize); |
| 557 size -= 2 * kCookieSize; | 524 size -= 2 * kCookieSize; |
| 558 #endif | 525 #endif |
| 559 return size; | 526 return size; |
| 560 } | 527 } |
| 561 | 528 |
| 562 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { | 529 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { |
| 563 #if ENABLE(ASSERT) | 530 #if DCHECK_IS_ON() |
| 564 // The value given to the application is actually just after the cookie. | 531 // The value given to the application is actually just after the cookie. |
| 565 ptr = static_cast<char*>(ptr) - kCookieSize; | 532 ptr = static_cast<char*>(ptr) - kCookieSize; |
| 566 #endif | 533 #endif |
| 567 return ptr; | 534 return ptr; |
| 568 } | 535 } |
| 569 | 536 |
| 570 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { | 537 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { |
| 571 #if ENABLE(ASSERT) | 538 #if DCHECK_IS_ON() |
| 572 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); | 539 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); |
| 573 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) | 540 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) |
| 574 *cookiePtr = kCookieValue[i]; | 541 *cookiePtr = kCookieValue[i]; |
| 575 #endif | 542 #endif |
| 576 } | 543 } |
| 577 | 544 |
| 578 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { | 545 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { |
| 579 #if ENABLE(ASSERT) | 546 #if DCHECK_IS_ON() |
| 580 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); | 547 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); |
| 581 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) | 548 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) |
| 582 ASSERT(*cookiePtr == kCookieValue[i]); | 549 DCHECK(*cookiePtr == kCookieValue[i]); |
| 583 #endif | 550 #endif |
| 584 } | 551 } |
| 585 | 552 |
| 586 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { | 553 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { |
| 587 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | 554 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); |
| 588 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); | 555 DCHECK(!(pointerAsUint & kSuperPageOffsetMask)); |
| 589 // The metadata area is exactly one system page (the guard page) into the | 556 // The metadata area is exactly one system page (the guard page) into the |
| 590 // super page. | 557 // super page. |
| 591 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); | 558 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); |
| 592 } | 559 } |
| 593 | 560 |
| 594 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { | 561 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { |
| 595 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | 562 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); |
| 596 char* superPagePtr = | 563 char* superPagePtr = |
| 597 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); | 564 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); |
| 598 uintptr_t partitionPageIndex = | 565 uintptr_t partitionPageIndex = |
| 599 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; | 566 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; |
| 600 // Index 0 is invalid because it is the metadata and guard area and | 567 // Index 0 is invalid because it is the metadata and guard area and |
| 601 // the last index is invalid because it is a guard page. | 568 // the last index is invalid because it is a guard page. |
| 602 ASSERT(partitionPageIndex); | 569 DCHECK(partitionPageIndex); |
| 603 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); | 570 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); |
| 604 PartitionPage* page = reinterpret_cast<PartitionPage*>( | 571 PartitionPage* page = reinterpret_cast<PartitionPage*>( |
| 605 partitionSuperPageToMetadataArea(superPagePtr) + | 572 partitionSuperPageToMetadataArea(superPagePtr) + |
| 606 (partitionPageIndex << kPageMetadataShift)); | 573 (partitionPageIndex << kPageMetadataShift)); |
| 607 // Partition pages in the same slot span can share the same page object. | 574 // Partition pages in the same slot span can share the same page object. |
| 608 // Adjust for that. | 575 // Adjust for that. |
| 609 size_t delta = page->pageOffset << kPageMetadataShift; | 576 size_t delta = page->pageOffset << kPageMetadataShift; |
| 610 page = | 577 page = |
| 611 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); | 578 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); |
| 612 return page; | 579 return page; |
| 613 } | 580 } |
| 614 | 581 |
| 615 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { | 582 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { |
| 616 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); | 583 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); |
| 617 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); | 584 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); |
| 618 ASSERT(superPageOffset > kSystemPageSize); | 585 DCHECK(superPageOffset > kSystemPageSize); |
| 619 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * | 586 DCHECK(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * |
| 620 kPageMetadataSize)); | 587 kPageMetadataSize)); |
| 621 uintptr_t partitionPageIndex = | 588 uintptr_t partitionPageIndex = |
| 622 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; | 589 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; |
| 623 // Index 0 is invalid because it is the metadata area and the last index is | 590 // Index 0 is invalid because it is the metadata area and the last index is |
| 624 // invalid because it is a guard page. | 591 // invalid because it is a guard page. |
| 625 ASSERT(partitionPageIndex); | 592 DCHECK(partitionPageIndex); |
| 626 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); | 593 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); |
| 627 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); | 594 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); |
| 628 void* ret = reinterpret_cast<void*>( | 595 void* ret = reinterpret_cast<void*>( |
| 629 superPageBase + (partitionPageIndex << kPartitionPageShift)); | 596 superPageBase + (partitionPageIndex << kPartitionPageShift)); |
| 630 return ret; | 597 return ret; |
| 631 } | 598 } |
| 632 | 599 |
| 633 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { | 600 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { |
| 634 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); | 601 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); |
| 635 // Checks that the pointer is a multiple of bucket size. | 602 // Checks that the pointer is a multiple of bucket size. |
| 636 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - | 603 DCHECK(!((reinterpret_cast<uintptr_t>(ptr) - |
| 637 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % | 604 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % |
| 638 page->bucket->slotSize)); | 605 page->bucket->slotSize)); |
| 639 return page; | 606 return page; |
| 640 } | 607 } |
| 641 | 608 |
| 642 ALWAYS_INLINE bool partitionBucketIsDirectMapped( | 609 ALWAYS_INLINE bool partitionBucketIsDirectMapped( |
| 643 const PartitionBucket* bucket) { | 610 const PartitionBucket* bucket) { |
| 644 return !bucket->numSystemPagesPerSlotSpan; | 611 return !bucket->numSystemPagesPerSlotSpan; |
| 645 } | 612 } |
| 646 | 613 |
| 647 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { | 614 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { |
| 648 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; | 615 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; |
| 649 } | 616 } |
| 650 | 617 |
| 651 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { | 618 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { |
| 652 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); | 619 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); |
| 653 } | 620 } |
| 654 | 621 |
| 655 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { | 622 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { |
| 656 // For single-slot buckets which span more than one partition page, we | 623 // For single-slot buckets which span more than one partition page, we |
| 657 // have some spare metadata space to store the raw allocation size. We | 624 // have some spare metadata space to store the raw allocation size. We |
| 658 // can use this to report better statistics. | 625 // can use this to report better statistics. |
| 659 PartitionBucket* bucket = page->bucket; | 626 PartitionBucket* bucket = page->bucket; |
| 660 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) | 627 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) |
| 661 return nullptr; | 628 return nullptr; |
| 662 | 629 |
| 663 ASSERT((bucket->slotSize % kSystemPageSize) == 0); | 630 DCHECK((bucket->slotSize % kSystemPageSize) == 0); |
| 664 ASSERT(partitionBucketIsDirectMapped(bucket) || | 631 DCHECK(partitionBucketIsDirectMapped(bucket) || |
| 665 partitionBucketSlots(bucket) == 1); | 632 partitionBucketSlots(bucket) == 1); |
| 666 page++; | 633 page++; |
| 667 return reinterpret_cast<size_t*>(&page->freelistHead); | 634 return reinterpret_cast<size_t*>(&page->freelistHead); |
| 668 } | 635 } |
| 669 | 636 |
| 670 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { | 637 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { |
| 671 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | 638 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); |
| 672 if (UNLIKELY(rawSizePtr != nullptr)) | 639 if (UNLIKELY(rawSizePtr != nullptr)) |
| 673 return *rawSizePtr; | 640 return *rawSizePtr; |
| 674 return 0; | 641 return 0; |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 686 PartitionRootBase* root = partitionPageToRoot(page); | 653 PartitionRootBase* root = partitionPageToRoot(page); |
| 687 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); | 654 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); |
| 688 } | 655 } |
| 689 | 656 |
| 690 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, | 657 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, |
| 691 int flags, | 658 int flags, |
| 692 size_t size, | 659 size_t size, |
| 693 PartitionBucket* bucket) { | 660 PartitionBucket* bucket) { |
| 694 PartitionPage* page = bucket->activePagesHead; | 661 PartitionPage* page = bucket->activePagesHead; |
| 695 // Check that this page is neither full nor freed. | 662 // Check that this page is neither full nor freed. |
| 696 ASSERT(page->numAllocatedSlots >= 0); | 663 DCHECK(page->numAllocatedSlots >= 0); |
| 697 void* ret = page->freelistHead; | 664 void* ret = page->freelistHead; |
| 698 if (LIKELY(ret != 0)) { | 665 if (LIKELY(ret != 0)) { |
| 699 // If these asserts fire, you probably corrupted memory. | 666 // If these asserts fire, you probably corrupted memory. |
| 700 ASSERT(partitionPointerIsValid(ret)); | 667 DCHECK(partitionPointerIsValid(ret)); |
| 701 // All large allocations must go through the slow path to correctly | 668 // All large allocations must go through the slow path to correctly |
| 702 // update the size metadata. | 669 // update the size metadata. |
| 703 ASSERT(partitionPageGetRawSize(page) == 0); | 670 DCHECK(partitionPageGetRawSize(page) == 0); |
| 704 PartitionFreelistEntry* newHead = | 671 PartitionFreelistEntry* newHead = |
| 705 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); | 672 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); |
| 706 page->freelistHead = newHead; | 673 page->freelistHead = newHead; |
| 707 page->numAllocatedSlots++; | 674 page->numAllocatedSlots++; |
| 708 } else { | 675 } else { |
| 709 ret = partitionAllocSlowPath(root, flags, size, bucket); | 676 ret = partitionAllocSlowPath(root, flags, size, bucket); |
| 710 ASSERT(!ret || partitionPointerIsValid(ret)); | 677 DCHECK(!ret || partitionPointerIsValid(ret)); |
| 711 } | 678 } |
| 712 #if ENABLE(ASSERT) | 679 #if DCHECK_IS_ON() |
| 713 if (!ret) | 680 if (!ret) |
| 714 return 0; | 681 return 0; |
| 715 // Fill the uninitialized pattern, and write the cookies. | 682 // Fill the uninitialized pattern, and write the cookies. |
| 716 page = partitionPointerToPage(ret); | 683 page = partitionPointerToPage(ret); |
| 717 size_t slotSize = page->bucket->slotSize; | 684 size_t slotSize = page->bucket->slotSize; |
| 718 size_t rawSize = partitionPageGetRawSize(page); | 685 size_t rawSize = partitionPageGetRawSize(page); |
| 719 if (rawSize) { | 686 if (rawSize) { |
| 720 ASSERT(rawSize == size); | 687 DCHECK(rawSize == size); |
| 721 slotSize = rawSize; | 688 slotSize = rawSize; |
| 722 } | 689 } |
| 723 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); | 690 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); |
| 724 char* charRet = static_cast<char*>(ret); | 691 char* charRet = static_cast<char*>(ret); |
| 725 // The value given to the application is actually just after the cookie. | 692 // The value given to the application is actually just after the cookie. |
| 726 ret = charRet + kCookieSize; | 693 ret = charRet + kCookieSize; |
| 727 memset(ret, kUninitializedByte, noCookieSize); | 694 memset(ret, kUninitializedByte, noCookieSize); |
| 728 partitionCookieWriteValue(charRet); | 695 partitionCookieWriteValue(charRet); |
| 729 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); | 696 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); |
| 730 #endif | 697 #endif |
| 731 return ret; | 698 return ret; |
| 732 } | 699 } |
| 733 | 700 |
| 734 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, | 701 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, |
| 735 size_t size, | 702 size_t size, |
| 736 const char* typeName) { | 703 const char* typeName) { |
| 737 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 704 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 738 void* result = malloc(size); | 705 void* result = malloc(size); |
| 739 RELEASE_ASSERT(result); | 706 CHECK(result); |
| 740 return result; | 707 return result; |
| 741 #else | 708 #else |
| 742 size_t requestedSize = size; | 709 size_t requestedSize = size; |
| 743 size = partitionCookieSizeAdjustAdd(size); | 710 size = partitionCookieSizeAdjustAdd(size); |
| 744 ASSERT(root->initialized); | 711 DCHECK(root->initialized); |
| 745 size_t index = size >> kBucketShift; | 712 size_t index = size >> kBucketShift; |
| 746 ASSERT(index < root->numBuckets); | 713 DCHECK(index < root->numBuckets); |
| 747 ASSERT(size == index << kBucketShift); | 714 DCHECK(size == index << kBucketShift); |
| 748 PartitionBucket* bucket = &root->buckets()[index]; | 715 PartitionBucket* bucket = &root->buckets()[index]; |
| 749 void* result = partitionBucketAlloc(root, 0, size, bucket); | 716 void* result = partitionBucketAlloc(root, 0, size, bucket); |
| 750 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); | 717 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); |
| 751 return result; | 718 return result; |
| 752 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 719 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 753 } | 720 } |
| 754 | 721 |
| 755 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { | 722 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { |
| 756 // If these asserts fire, you probably corrupted memory. | 723 // If these asserts fire, you probably corrupted memory. |
| 757 #if ENABLE(ASSERT) | 724 #if DCHECK_IS_ON() |
| 758 size_t slotSize = page->bucket->slotSize; | 725 size_t slotSize = page->bucket->slotSize; |
| 759 size_t rawSize = partitionPageGetRawSize(page); | 726 size_t rawSize = partitionPageGetRawSize(page); |
| 760 if (rawSize) | 727 if (rawSize) |
| 761 slotSize = rawSize; | 728 slotSize = rawSize; |
| 762 partitionCookieCheckValue(ptr); | 729 partitionCookieCheckValue(ptr); |
| 763 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - | 730 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - |
| 764 kCookieSize); | 731 kCookieSize); |
| 765 memset(ptr, kFreedByte, slotSize); | 732 memset(ptr, kFreedByte, slotSize); |
| 766 #endif | 733 #endif |
| 767 ASSERT(page->numAllocatedSlots); | 734 DCHECK(page->numAllocatedSlots); |
| 768 PartitionFreelistEntry* freelistHead = page->freelistHead; | 735 PartitionFreelistEntry* freelistHead = page->freelistHead; |
| 769 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); | 736 DCHECK(!freelistHead || partitionPointerIsValid(freelistHead)); |
| 770 SECURITY_CHECK(ptr != freelistHead); // Catches an immediate double free. | 737 SECURITY_CHECK(ptr != freelistHead); // Catches an immediate double free. |
| 771 // Look for double free one level deeper in debug. | 738 // Look for double free one level deeper in debug. |
| 772 SECURITY_DCHECK(!freelistHead || | 739 SECURITY_DCHECK(!freelistHead || |
| 773 ptr != partitionFreelistMask(freelistHead->next)); | 740 ptr != partitionFreelistMask(freelistHead->next)); |
| 774 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); | 741 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); |
| 775 entry->next = partitionFreelistMask(freelistHead); | 742 entry->next = partitionFreelistMask(freelistHead); |
| 776 page->freelistHead = entry; | 743 page->freelistHead = entry; |
| 777 --page->numAllocatedSlots; | 744 --page->numAllocatedSlots; |
| 778 if (UNLIKELY(page->numAllocatedSlots <= 0)) { | 745 if (UNLIKELY(page->numAllocatedSlots <= 0)) { |
| 779 partitionFreeSlowPath(page); | 746 partitionFreeSlowPath(page); |
| 780 } else { | 747 } else { |
| 781 // All single-slot allocations must go through the slow path to | 748 // All single-slot allocations must go through the slow path to |
| 782 // correctly update the size metadata. | 749 // correctly update the size metadata. |
| 783 ASSERT(partitionPageGetRawSize(page) == 0); | 750 DCHECK(partitionPageGetRawSize(page) == 0); |
| 784 } | 751 } |
| 785 } | 752 } |
| 786 | 753 |
| 787 ALWAYS_INLINE void partitionFree(void* ptr) { | 754 ALWAYS_INLINE void partitionFree(void* ptr) { |
| 788 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 755 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 789 free(ptr); | 756 free(ptr); |
| 790 #else | 757 #else |
| 791 PartitionAllocHooks::freeHookIfEnabled(ptr); | 758 PartitionAllocHooks::freeHookIfEnabled(ptr); |
| 792 ptr = partitionCookieFreePointerAdjust(ptr); | 759 ptr = partitionCookieFreePointerAdjust(ptr); |
| 793 ASSERT(partitionPointerIsValid(ptr)); | 760 DCHECK(partitionPointerIsValid(ptr)); |
| 794 PartitionPage* page = partitionPointerToPage(ptr); | 761 PartitionPage* page = partitionPointerToPage(ptr); |
| 795 partitionFreeWithPage(ptr, page); | 762 partitionFreeWithPage(ptr, page); |
| 796 #endif | 763 #endif |
| 797 } | 764 } |
| 798 | 765 |
| 799 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( | 766 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( |
| 800 PartitionRootGeneric* root, | 767 PartitionRootGeneric* root, |
| 801 size_t size) { | 768 size_t size) { |
| 802 size_t order = kBitsPerSizet - CountLeadingZeroBitsSizeT(size); | 769 size_t order = kBitsPerSizet - bits::CountLeadingZeroBitsSizeT(size); |
| 803 // The order index is simply the next few bits after the most significant bit. | 770 // The order index is simply the next few bits after the most significant bit. |
| 804 size_t orderIndex = (size >> root->orderIndexShifts[order]) & | 771 size_t orderIndex = (size >> root->orderIndexShifts[order]) & |
| 805 (kGenericNumBucketsPerOrder - 1); | 772 (kGenericNumBucketsPerOrder - 1); |
| 806 // And if the remaining bits are non-zero we must bump the bucket up. | 773 // And if the remaining bits are non-zero we must bump the bucket up. |
| 807 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; | 774 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; |
| 808 PartitionBucket* bucket = | 775 PartitionBucket* bucket = |
| 809 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + | 776 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + |
| 810 orderIndex + !!subOrderIndex]; | 777 orderIndex + !!subOrderIndex]; |
| 811 ASSERT(!bucket->slotSize || bucket->slotSize >= size); | 778 DCHECK(!bucket->slotSize || bucket->slotSize >= size); |
| 812 ASSERT(!(bucket->slotSize % kGenericSmallestBucket)); | 779 DCHECK(!(bucket->slotSize % kGenericSmallestBucket)); |
| 813 return bucket; | 780 return bucket; |
| 814 } | 781 } |
| 815 | 782 |
| 816 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, | 783 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, |
| 817 int flags, | 784 int flags, |
| 818 size_t size, | 785 size_t size, |
| 819 const char* typeName) { | 786 const char* typeName) { |
| 820 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 787 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 821 void* result = malloc(size); | 788 void* result = malloc(size); |
| 822 RELEASE_ASSERT(result || flags & PartitionAllocReturnNull); | 789 CHECK(result || flags & PartitionAllocReturnNull); |
| 823 return result; | 790 return result; |
| 824 #else | 791 #else |
| 825 ASSERT(root->initialized); | 792 DCHECK(root->initialized); |
| 826 size_t requestedSize = size; | 793 size_t requestedSize = size; |
| 827 size = partitionCookieSizeAdjustAdd(size); | 794 size = partitionCookieSizeAdjustAdd(size); |
| 828 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | 795 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); |
| 829 void* ret = nullptr; | 796 void* ret = nullptr; |
| 830 { | 797 { |
| 831 SpinLock::Guard guard(root->lock); | 798 subtle::SpinLock::Guard guard(root->lock); |
| 832 ret = partitionBucketAlloc(root, flags, size, bucket); | 799 ret = partitionBucketAlloc(root, flags, size, bucket); |
| 833 } | 800 } |
| 834 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); | 801 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); |
| 835 return ret; | 802 return ret; |
| 836 #endif | 803 #endif |
| 837 } | 804 } |
| 838 | 805 |
| 839 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, | 806 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, |
| 840 size_t size, | 807 size_t size, |
| 841 const char* typeName) { | 808 const char* typeName) { |
| 842 return partitionAllocGenericFlags(root, 0, size, typeName); | 809 return partitionAllocGenericFlags(root, 0, size, typeName); |
| 843 } | 810 } |
| 844 | 811 |
| 845 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { | 812 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { |
| 846 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 813 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 847 free(ptr); | 814 free(ptr); |
| 848 #else | 815 #else |
| 849 ASSERT(root->initialized); | 816 DCHECK(root->initialized); |
| 850 | 817 |
| 851 if (UNLIKELY(!ptr)) | 818 if (UNLIKELY(!ptr)) |
| 852 return; | 819 return; |
| 853 | 820 |
| 854 PartitionAllocHooks::freeHookIfEnabled(ptr); | 821 PartitionAllocHooks::freeHookIfEnabled(ptr); |
| 855 ptr = partitionCookieFreePointerAdjust(ptr); | 822 ptr = partitionCookieFreePointerAdjust(ptr); |
| 856 ASSERT(partitionPointerIsValid(ptr)); | 823 DCHECK(partitionPointerIsValid(ptr)); |
| 857 PartitionPage* page = partitionPointerToPage(ptr); | 824 PartitionPage* page = partitionPointerToPage(ptr); |
| 858 { | 825 { |
| 859 SpinLock::Guard guard(root->lock); | 826 subtle::SpinLock::Guard guard(root->lock); |
| 860 partitionFreeWithPage(ptr, page); | 827 partitionFreeWithPage(ptr, page); |
| 861 } | 828 } |
| 862 #endif | 829 #endif |
| 863 } | 830 } |
| 864 | 831 |
| 865 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { | 832 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { |
| 866 // Caller must check that the size is not above the kGenericMaxDirectMapped | 833 // Caller must check that the size is not above the kGenericMaxDirectMapped |
| 867 // limit before calling. This also guards against integer overflow in the | 834 // limit before calling. This also guards against integer overflow in the |
| 868 // calculation here. | 835 // calculation here. |
| 869 ASSERT(size <= kGenericMaxDirectMapped); | 836 DCHECK(size <= kGenericMaxDirectMapped); |
| 870 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | 837 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; |
| 871 } | 838 } |
| 872 | 839 |
| 873 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, | 840 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, |
| 874 size_t size) { | 841 size_t size) { |
| 875 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 842 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 876 return size; | 843 return size; |
| 877 #else | 844 #else |
| 878 ASSERT(root->initialized); | 845 DCHECK(root->initialized); |
| 879 size = partitionCookieSizeAdjustAdd(size); | 846 size = partitionCookieSizeAdjustAdd(size); |
| 880 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | 847 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); |
| 881 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { | 848 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { |
| 882 size = bucket->slotSize; | 849 size = bucket->slotSize; |
| 883 } else if (size > kGenericMaxDirectMapped) { | 850 } else if (size > kGenericMaxDirectMapped) { |
| 884 // Too large to allocate => return the size unchanged. | 851 // Too large to allocate => return the size unchanged. |
| 885 } else { | 852 } else { |
| 886 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | 853 DCHECK(bucket == &PartitionRootBase::gPagedBucket); |
| 887 size = partitionDirectMapSize(size); | 854 size = partitionDirectMapSize(size); |
| 888 } | 855 } |
| 889 return partitionCookieSizeAdjustSubtract(size); | 856 return partitionCookieSizeAdjustSubtract(size); |
| 890 #endif | 857 #endif |
| 891 } | 858 } |
| 892 | 859 |
| 893 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { | 860 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { |
| 894 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 861 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
| 895 return false; | 862 return false; |
| 896 #else | 863 #else |
| 897 return true; | 864 return true; |
| 898 #endif | 865 #endif |
| 899 } | 866 } |
| 900 | 867 |
| 901 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { | 868 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { |
| 902 // No need to lock here. Only 'ptr' being freed by another thread could | 869 // No need to lock here. Only 'ptr' being freed by another thread could |
| 903 // cause trouble, and the caller is responsible for that not happening. | 870 // cause trouble, and the caller is responsible for that not happening. |
| 904 ASSERT(partitionAllocSupportsGetSize()); | 871 DCHECK(partitionAllocSupportsGetSize()); |
| 905 ptr = partitionCookieFreePointerAdjust(ptr); | 872 ptr = partitionCookieFreePointerAdjust(ptr); |
| 906 ASSERT(partitionPointerIsValid(ptr)); | 873 DCHECK(partitionPointerIsValid(ptr)); |
| 907 PartitionPage* page = partitionPointerToPage(ptr); | 874 PartitionPage* page = partitionPointerToPage(ptr); |
| 908 size_t size = page->bucket->slotSize; | 875 size_t size = page->bucket->slotSize; |
| 909 return partitionCookieSizeAdjustSubtract(size); | 876 return partitionCookieSizeAdjustSubtract(size); |
| 910 } | 877 } |
| 911 | 878 |
| 912 // N (or more accurately, N - sizeof(void*)) represents the largest size in | 879 // N (or more accurately, N - sizeof(void*)) represents the largest size in |
| 913 // bytes that will be handled by a SizeSpecificPartitionAllocator. | 880 // bytes that will be handled by a SizeSpecificPartitionAllocator. |
| 914 // Attempts to partitionAlloc() more than this amount will fail. | 881 // Attempts to partitionAlloc() more than this amount will fail. |
| 915 template <size_t N> | 882 template <size_t N> |
| 916 class SizeSpecificPartitionAllocator { | 883 class SizeSpecificPartitionAllocator { |
| (...skipping 14 matching lines...) Expand all Loading... | |
| 931 class PartitionAllocatorGeneric { | 898 class PartitionAllocatorGeneric { |
| 932 public: | 899 public: |
| 933 void init() { partitionAllocGenericInit(&m_partitionRoot); } | 900 void init() { partitionAllocGenericInit(&m_partitionRoot); } |
| 934 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } | 901 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } |
| 935 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } | 902 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } |
| 936 | 903 |
| 937 private: | 904 private: |
| 938 PartitionRootGeneric m_partitionRoot; | 905 PartitionRootGeneric m_partitionRoot; |
| 939 }; | 906 }; |
| 940 | 907 |
| 941 } // namespace WTF | 908 } // namespace base |
| 942 | 909 |
| 943 using WTF::SizeSpecificPartitionAllocator; | 910 using base::SizeSpecificPartitionAllocator; |
| 944 using WTF::PartitionAllocatorGeneric; | 911 using base::PartitionAllocatorGeneric; |
|
Primiano Tucci (use gerrit)
2016/11/22 14:28:33
Can't figure out what these are for. The .cc file
palmer
2016/11/24 01:05:56
Done.
| |
| 945 using WTF::PartitionRoot; | 912 using base::PartitionRoot; |
| 946 using WTF::partitionAllocInit; | 913 using base::partitionAllocInit; |
| 947 using WTF::partitionAllocShutdown; | 914 using base::partitionAllocShutdown; |
| 948 using WTF::partitionAlloc; | 915 using base::partitionAlloc; |
| 949 using WTF::partitionFree; | 916 using base::partitionFree; |
| 950 using WTF::partitionAllocGeneric; | 917 using base::partitionAllocGeneric; |
| 951 using WTF::partitionFreeGeneric; | 918 using base::partitionFreeGeneric; |
| 952 using WTF::partitionReallocGeneric; | 919 using base::partitionReallocGeneric; |
| 953 using WTF::partitionAllocActualSize; | 920 using base::partitionAllocActualSize; |
| 954 using WTF::partitionAllocSupportsGetSize; | 921 using base::partitionAllocSupportsGetSize; |
| 955 using WTF::partitionAllocGetSize; | 922 using base::partitionAllocGetSize; |
| 956 | 923 |
| 957 #endif // WTF_PartitionAlloc_h | 924 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
| OLD | NEW |