OLD | NEW |
1 /* | 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved. |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 // Use of this source code is governed by a BSD-style license that can be |
3 * | 3 // found in the LICENSE file. |
4 * Redistribution and use in source and binary forms, with or without | |
5 * modification, are permitted provided that the following conditions are | |
6 * met: | |
7 * | |
8 * * Redistributions of source code must retain the above copyright | |
9 * notice, this list of conditions and the following disclaimer. | |
10 * * Redistributions in binary form must reproduce the above | |
11 * copyright notice, this list of conditions and the following disclaimer | |
12 * in the documentation and/or other materials provided with the | |
13 * distribution. | |
14 * * Neither the name of Google Inc. nor the names of its | |
15 * contributors may be used to endorse or promote products derived from | |
16 * this software without specific prior written permission. | |
17 * | |
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
29 */ | |
30 | 4 |
31 #ifndef WTF_PartitionAlloc_h | 5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
32 #define WTF_PartitionAlloc_h | 6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
33 | 7 |
34 // DESCRIPTION | 8 // DESCRIPTION |
35 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / | 9 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / |
36 // partitionFreeGeneric() are approximately analagous to malloc() and free(). | 10 // partitionFreeGeneric() are approximately analagous to malloc() and free(). |
37 // | 11 // |
38 // The main difference is that a PartitionRoot / PartitionRootGeneric object | 12 // The main difference is that a PartitionRoot / PartitionRootGeneric object |
39 // must be supplied to these functions, representing a specific "heap partition" | 13 // must be supplied to these functions, representing a specific "heap partition" |
40 // that will be used to satisfy the allocation. Different partitions are | 14 // that will be used to satisfy the allocation. Different partitions are |
41 // guaranteed to exist in separate address spaces, including being separate from | 15 // guaranteed to exist in separate address spaces, including being separate from |
42 // the main system heap. If the contained objects are all freed, physical memory | 16 // the main system heap. If the contained objects are all freed, physical memory |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
78 // pages, enabling various simple tricks to try and minimize fragmentation. | 52 // pages, enabling various simple tricks to try and minimize fragmentation. |
79 // - Fine-grained bucket sizes leading to less waste and better packing. | 53 // - Fine-grained bucket sizes leading to less waste and better packing. |
80 // | 54 // |
81 // The following security properties could be investigated in the future: | 55 // The following security properties could be investigated in the future: |
82 // - Per-object bucketing (instead of per-size) is mostly available at the API, | 56 // - Per-object bucketing (instead of per-size) is mostly available at the API, |
83 // but not used yet. | 57 // but not used yet. |
84 // - No randomness of freelist entries or bucket position. | 58 // - No randomness of freelist entries or bucket position. |
85 // - Better checking for wild pointers in free(). | 59 // - Better checking for wild pointers in free(). |
86 // - Better freelist masking function to guarantee fault on 32-bit. | 60 // - Better freelist masking function to guarantee fault on 32-bit. |
87 | 61 |
88 #include "wtf/Assertions.h" | 62 #include <limits.h> |
89 #include "wtf/BitwiseOperations.h" | |
90 #include "wtf/ByteSwap.h" | |
91 #include "wtf/CPU.h" | |
92 #include "wtf/SpinLock.h" | |
93 #include "wtf/TypeTraits.h" | |
94 #include "wtf/allocator/PageAllocator.h" | |
95 | 63 |
96 #include <limits.h> | 64 #include "base/allocator/partition_allocator/page_allocator.h" |
| 65 #include "base/bits.h" |
| 66 #include "base/compiler_specific.h" |
| 67 #include "base/logging.h" |
| 68 #include "base/synchronization/spin_lock.h" |
| 69 #include "base/sys_byteorder.h" |
| 70 #include "build/build_config.h" |
97 | 71 |
98 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 72 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
99 #include <stdlib.h> | 73 #include <stdlib.h> |
100 #endif | 74 #endif |
101 | 75 |
102 #if ENABLE(ASSERT) | 76 namespace base { |
103 #include <string.h> | |
104 #endif | |
105 | |
106 namespace WTF { | |
107 | 77 |
108 // Allocation granularity of sizeof(void*) bytes. | 78 // Allocation granularity of sizeof(void*) bytes. |
109 static const size_t kAllocationGranularity = sizeof(void*); | 79 static const size_t kAllocationGranularity = sizeof(void*); |
110 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; | 80 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; |
111 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; | 81 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; |
112 | 82 |
113 // Underlying partition storage pages are a power-of-two size. It is typical | 83 // Underlying partition storage pages are a power-of-two size. It is typical |
114 // for a partition page to be based on multiple system pages. Most references to | 84 // for a partition page to be based on multiple system pages. Most references to |
115 // "page" refer to partition pages. | 85 // "page" refer to partition pages. |
116 // We also have the concept of "super pages" -- these are the underlying system | 86 // We also have the concept of "super pages" -- these are the underlying system |
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
234 // Constants for the memory reclaim logic. | 204 // Constants for the memory reclaim logic. |
235 static const size_t kMaxFreeableSpans = 16; | 205 static const size_t kMaxFreeableSpans = 16; |
236 | 206 |
237 // If the total size in bytes of allocated but not committed pages exceeds this | 207 // If the total size in bytes of allocated but not committed pages exceeds this |
238 // value (probably it is a "out of virtual address space" crash), | 208 // value (probably it is a "out of virtual address space" crash), |
239 // a special crash stack trace is generated at |partitionOutOfMemory|. | 209 // a special crash stack trace is generated at |partitionOutOfMemory|. |
240 // This is to distinguish "out of virtual address space" from | 210 // This is to distinguish "out of virtual address space" from |
241 // "out of physical memory" in crash reports. | 211 // "out of physical memory" in crash reports. |
242 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB | 212 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB |
243 | 213 |
244 #if ENABLE(ASSERT) | 214 #if DCHECK_IS_ON() |
245 // These two byte values match tcmalloc. | 215 // These two byte values match tcmalloc. |
246 static const unsigned char kUninitializedByte = 0xAB; | 216 static const unsigned char kUninitializedByte = 0xAB; |
247 static const unsigned char kFreedByte = 0xCD; | 217 static const unsigned char kFreedByte = 0xCD; |
248 static const size_t kCookieSize = | 218 static const size_t kCookieSize = |
249 16; // Handles alignment up to XMM instructions on Intel. | 219 16; // Handles alignment up to XMM instructions on Intel. |
250 static const unsigned char kCookieValue[kCookieSize] = { | 220 static const unsigned char kCookieValue[kCookieSize] = { |
251 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, | 221 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, |
252 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; | 222 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; |
253 #endif | 223 #endif |
254 | 224 |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
314 PartitionSuperPageExtentEntry* next; | 284 PartitionSuperPageExtentEntry* next; |
315 }; | 285 }; |
316 | 286 |
317 struct PartitionDirectMapExtent { | 287 struct PartitionDirectMapExtent { |
318 PartitionDirectMapExtent* nextExtent; | 288 PartitionDirectMapExtent* nextExtent; |
319 PartitionDirectMapExtent* prevExtent; | 289 PartitionDirectMapExtent* prevExtent; |
320 PartitionBucket* bucket; | 290 PartitionBucket* bucket; |
321 size_t mapSize; // Mapped size, not including guard pages and meta-data. | 291 size_t mapSize; // Mapped size, not including guard pages and meta-data. |
322 }; | 292 }; |
323 | 293 |
324 struct WTF_EXPORT PartitionRootBase { | 294 struct BASE_EXPORT PartitionRootBase { |
325 size_t totalSizeOfCommittedPages; | 295 size_t totalSizeOfCommittedPages; |
326 size_t totalSizeOfSuperPages; | 296 size_t totalSizeOfSuperPages; |
327 size_t totalSizeOfDirectMappedPages; | 297 size_t totalSizeOfDirectMappedPages; |
328 // Invariant: totalSizeOfCommittedPages <= | 298 // Invariant: totalSizeOfCommittedPages <= |
329 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. | 299 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. |
330 unsigned numBuckets; | 300 unsigned numBuckets; |
331 unsigned maxAllocation; | 301 unsigned maxAllocation; |
332 bool initialized; | 302 bool initialized; |
333 char* nextSuperPage; | 303 char* nextSuperPage; |
334 char* nextPartitionPage; | 304 char* nextPartitionPage; |
335 char* nextPartitionPageEnd; | 305 char* nextPartitionPageEnd; |
336 PartitionSuperPageExtentEntry* currentExtent; | 306 PartitionSuperPageExtentEntry* currentExtent; |
337 PartitionSuperPageExtentEntry* firstExtent; | 307 PartitionSuperPageExtentEntry* firstExtent; |
338 PartitionDirectMapExtent* directMapList; | 308 PartitionDirectMapExtent* directMapList; |
339 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; | 309 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; |
340 int16_t globalEmptyPageRingIndex; | 310 int16_t globalEmptyPageRingIndex; |
341 uintptr_t invertedSelf; | 311 uintptr_t invertedSelf; |
342 | 312 |
343 static SpinLock gInitializedLock; | 313 static subtle::SpinLock gInitializedLock; |
344 static bool gInitialized; | 314 static bool gInitialized; |
345 // gSeedPage is used as a sentinel to indicate that there is no page | 315 // gSeedPage is used as a sentinel to indicate that there is no page |
346 // in the active page list. We can use nullptr, but in that case we need | 316 // in the active page list. We can use nullptr, but in that case we need |
347 // to add a null-check branch to the hot allocation path. We want to avoid | 317 // to add a null-check branch to the hot allocation path. We want to avoid |
348 // that. | 318 // that. |
349 static PartitionPage gSeedPage; | 319 static PartitionPage gSeedPage; |
350 static PartitionBucket gPagedBucket; | 320 static PartitionBucket gPagedBucket; |
351 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. | 321 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. |
352 static void (*gOomHandlingFunction)(); | 322 static void (*gOomHandlingFunction)(); |
353 }; | 323 }; |
354 | 324 |
355 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. | 325 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. |
356 struct PartitionRoot : public PartitionRootBase { | 326 struct PartitionRoot : public PartitionRootBase { |
357 // The PartitionAlloc templated class ensures the following is correct. | 327 // The PartitionAlloc templated class ensures the following is correct. |
358 ALWAYS_INLINE PartitionBucket* buckets() { | 328 ALWAYS_INLINE PartitionBucket* buckets() { |
359 return reinterpret_cast<PartitionBucket*>(this + 1); | 329 return reinterpret_cast<PartitionBucket*>(this + 1); |
360 } | 330 } |
361 ALWAYS_INLINE const PartitionBucket* buckets() const { | 331 ALWAYS_INLINE const PartitionBucket* buckets() const { |
362 return reinterpret_cast<const PartitionBucket*>(this + 1); | 332 return reinterpret_cast<const PartitionBucket*>(this + 1); |
363 } | 333 } |
364 }; | 334 }; |
365 | 335 |
366 // Never instantiate a PartitionRootGeneric directly, instead use | 336 // Never instantiate a PartitionRootGeneric directly, instead use |
367 // PartitionAllocatorGeneric. | 337 // PartitionAllocatorGeneric. |
368 struct PartitionRootGeneric : public PartitionRootBase { | 338 struct PartitionRootGeneric : public PartitionRootBase { |
369 SpinLock lock; | 339 subtle::SpinLock lock; |
370 // Some pre-computed constants. | 340 // Some pre-computed constants. |
371 size_t orderIndexShifts[kBitsPerSizet + 1]; | 341 size_t orderIndexShifts[kBitsPerSizet + 1]; |
372 size_t orderSubIndexMasks[kBitsPerSizet + 1]; | 342 size_t orderSubIndexMasks[kBitsPerSizet + 1]; |
373 // The bucket lookup table lets us map a size_t to a bucket quickly. | 343 // The bucket lookup table lets us map a size_t to a bucket quickly. |
374 // The trailing +1 caters for the overflow case for very large allocation | 344 // The trailing +1 caters for the overflow case for very large allocation |
375 // sizes. It is one flat array instead of a 2D array because in the 2D | 345 // sizes. It is one flat array instead of a 2D array because in the 2D |
376 // world, we'd need to index array[blah][max+1] which risks undefined | 346 // world, we'd need to index array[blah][max+1] which risks undefined |
377 // behavior. | 347 // behavior. |
378 PartitionBucket* | 348 PartitionBucket* |
379 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; | 349 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
413 uint32_t numActivePages; // Number of pages that have at least one | 383 uint32_t numActivePages; // Number of pages that have at least one |
414 // provisioned slot. | 384 // provisioned slot. |
415 uint32_t numEmptyPages; // Number of pages that are empty | 385 uint32_t numEmptyPages; // Number of pages that are empty |
416 // but not decommitted. | 386 // but not decommitted. |
417 uint32_t numDecommittedPages; // Number of pages that are empty | 387 uint32_t numDecommittedPages; // Number of pages that are empty |
418 // and decommitted. | 388 // and decommitted. |
419 }; | 389 }; |
420 | 390 |
421 // Interface that is passed to partitionDumpStats and | 391 // Interface that is passed to partitionDumpStats and |
422 // partitionDumpStatsGeneric for using the memory statistics. | 392 // partitionDumpStatsGeneric for using the memory statistics. |
423 class WTF_EXPORT PartitionStatsDumper { | 393 class BASE_EXPORT PartitionStatsDumper { |
424 public: | 394 public: |
425 // Called to dump total memory used by partition, once per partition. | 395 // Called to dump total memory used by partition, once per partition. |
426 virtual void partitionDumpTotals(const char* partitionName, | 396 virtual void partitionDumpTotals(const char* partitionName, |
427 const PartitionMemoryStats*) = 0; | 397 const PartitionMemoryStats*) = 0; |
428 | 398 |
429 // Called to dump stats about buckets, for each bucket. | 399 // Called to dump stats about buckets, for each bucket. |
430 virtual void partitionsDumpBucketStats(const char* partitionName, | 400 virtual void partitionsDumpBucketStats(const char* partitionName, |
431 const PartitionBucketMemoryStats*) = 0; | 401 const PartitionBucketMemoryStats*) = 0; |
432 }; | 402 }; |
433 | 403 |
434 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); | 404 BASE_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); |
435 WTF_EXPORT void partitionAllocInit(PartitionRoot*, | 405 BASE_EXPORT void partitionAllocInit(PartitionRoot*, |
436 size_t numBuckets, | 406 size_t numBuckets, |
437 size_t maxAllocation); | 407 size_t maxAllocation); |
438 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*); | 408 BASE_EXPORT bool partitionAllocShutdown(PartitionRoot*); |
439 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); | 409 BASE_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); |
440 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); | 410 BASE_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); |
441 | 411 |
442 enum PartitionPurgeFlags { | 412 enum PartitionPurgeFlags { |
443 // Decommitting the ring list of empty pages is reasonably fast. | 413 // Decommitting the ring list of empty pages is reasonably fast. |
444 PartitionPurgeDecommitEmptyPages = 1 << 0, | 414 PartitionPurgeDecommitEmptyPages = 1 << 0, |
445 // Discarding unused system pages is slower, because it involves walking all | 415 // Discarding unused system pages is slower, because it involves walking all |
446 // freelists in all active partition pages of all buckets >= system page | 416 // freelists in all active partition pages of all buckets >= system page |
447 // size. It often frees a similar amount of memory to decommitting the empty | 417 // size. It often frees a similar amount of memory to decommitting the empty |
448 // pages, though. | 418 // pages, though. |
449 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, | 419 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, |
450 }; | 420 }; |
451 | 421 |
452 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int); | 422 BASE_EXPORT void partitionPurgeMemory(PartitionRoot*, int); |
453 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); | 423 BASE_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); |
454 | 424 |
455 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, | 425 BASE_EXPORT NOINLINE void* partitionAllocSlowPath(PartitionRootBase*, |
456 int, | 426 int, |
457 size_t, | 427 size_t, |
458 PartitionBucket*); | 428 PartitionBucket*); |
459 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*); | 429 BASE_EXPORT NOINLINE void partitionFreeSlowPath(PartitionPage*); |
460 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, | 430 BASE_EXPORT NOINLINE void* partitionReallocGeneric(PartitionRootGeneric*, |
461 void*, | 431 void*, |
462 size_t, | 432 size_t, |
463 const char* typeName); | 433 const char* typeName); |
464 | 434 |
465 WTF_EXPORT void partitionDumpStats(PartitionRoot*, | 435 BASE_EXPORT void partitionDumpStats(PartitionRoot*, |
466 const char* partitionName, | 436 const char* partitionName, |
467 bool isLightDump, | 437 bool isLightDump, |
468 PartitionStatsDumper*); | 438 PartitionStatsDumper*); |
469 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, | 439 BASE_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, |
470 const char* partitionName, | 440 const char* partitionName, |
471 bool isLightDump, | 441 bool isLightDump, |
472 PartitionStatsDumper*); | 442 PartitionStatsDumper*); |
473 | 443 |
474 class WTF_EXPORT PartitionAllocHooks { | 444 class BASE_EXPORT PartitionAllocHooks { |
475 public: | 445 public: |
476 typedef void AllocationHook(void* address, size_t, const char* typeName); | 446 typedef void AllocationHook(void* address, size_t, const char* typeName); |
477 typedef void FreeHook(void* address); | 447 typedef void FreeHook(void* address); |
478 | 448 |
479 static void setAllocationHook(AllocationHook* hook) { | 449 static void setAllocationHook(AllocationHook* hook) { |
480 m_allocationHook = hook; | 450 m_allocationHook = hook; |
481 } | 451 } |
482 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } | 452 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } |
483 | 453 |
484 static void allocationHookIfEnabled(void* address, | 454 static void allocationHookIfEnabled(void* address, |
(...skipping 26 matching lines...) Expand all Loading... |
511 private: | 481 private: |
512 // Pointers to hook functions that PartitionAlloc will call on allocation and | 482 // Pointers to hook functions that PartitionAlloc will call on allocation and |
513 // free if the pointers are non-null. | 483 // free if the pointers are non-null. |
514 static AllocationHook* m_allocationHook; | 484 static AllocationHook* m_allocationHook; |
515 static FreeHook* m_freeHook; | 485 static FreeHook* m_freeHook; |
516 }; | 486 }; |
517 | 487 |
518 // In official builds, do not include type info string literals to avoid | 488 // In official builds, do not include type info string literals to avoid |
519 // bloating the binary. | 489 // bloating the binary. |
520 #if defined(OFFICIAL_BUILD) | 490 #if defined(OFFICIAL_BUILD) |
521 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr | 491 #define PARTITION_HEAP_PROFILER_TYPE_NAME(T) nullptr |
522 #else | 492 #else |
523 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>() | 493 #define PARTITION_HEAP_PROFILER_TYPE_NAME(T) GetStringWithTypeName<T>() |
524 #endif | 494 #endif |
525 | 495 |
526 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( | 496 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( |
527 PartitionFreelistEntry* ptr) { | 497 PartitionFreelistEntry* ptr) { |
528 // We use bswap on little endian as a fast mask for two reasons: | 498 // We use bswap on little endian as a fast mask for two reasons: |
529 // 1) If an object is freed and its vtable used where the attacker doesn't | 499 // 1) If an object is freed and its vtable used where the attacker doesn't |
530 // get the chance to run allocations between the free and use, the vtable | 500 // get the chance to run allocations between the free and use, the vtable |
531 // dereference is likely to fault. | 501 // dereference is likely to fault. |
532 // 2) If the attacker has a linear buffer overflow and elects to try and | 502 // 2) If the attacker has a linear buffer overflow and elects to try and |
533 // corrupt a freelist pointer, partial pointer overwrite attacks are | 503 // corrupt a freelist pointer, partial pointer overwrite attacks are |
534 // thwarted. | 504 // thwarted. |
535 // For big endian, similar guarantees are arrived at with a negation. | 505 // For big endian, similar guarantees are arrived at with a negation. |
536 #if CPU(BIG_ENDIAN) | 506 #if defined(ARCH_CPU_BIG_ENDIAN) |
537 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); | 507 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); |
538 #else | 508 #else |
539 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); | 509 uintptr_t masked = ByteSwap(reinterpret_cast<uintptr_t>(ptr)); |
540 #endif | 510 #endif |
541 return reinterpret_cast<PartitionFreelistEntry*>(masked); | 511 return reinterpret_cast<PartitionFreelistEntry*>(masked); |
542 } | 512 } |
543 | 513 |
544 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { | 514 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { |
545 #if ENABLE(ASSERT) | 515 #if DCHECK_IS_ON() |
546 // Add space for cookies, checking for integer overflow. | 516 // Add space for cookies, checking for integer overflow. |
547 ASSERT(size + (2 * kCookieSize) > size); | 517 DCHECK(size + (2 * kCookieSize) > size); |
548 size += 2 * kCookieSize; | 518 size += 2 * kCookieSize; |
549 #endif | 519 #endif |
550 return size; | 520 return size; |
551 } | 521 } |
552 | 522 |
553 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { | 523 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { |
554 #if ENABLE(ASSERT) | 524 #if DCHECK_IS_ON() |
555 // Remove space for cookies. | 525 // Remove space for cookies. |
556 ASSERT(size >= 2 * kCookieSize); | 526 DCHECK(size >= 2 * kCookieSize); |
557 size -= 2 * kCookieSize; | 527 size -= 2 * kCookieSize; |
558 #endif | 528 #endif |
559 return size; | 529 return size; |
560 } | 530 } |
561 | 531 |
562 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { | 532 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { |
563 #if ENABLE(ASSERT) | 533 #if DCHECK_IS_ON() |
564 // The value given to the application is actually just after the cookie. | 534 // The value given to the application is actually just after the cookie. |
565 ptr = static_cast<char*>(ptr) - kCookieSize; | 535 ptr = static_cast<char*>(ptr) - kCookieSize; |
566 #endif | 536 #endif |
567 return ptr; | 537 return ptr; |
568 } | 538 } |
569 | 539 |
570 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { | 540 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { |
571 #if ENABLE(ASSERT) | 541 #if DCHECK_IS_ON() |
572 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); | 542 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); |
573 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) | 543 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) |
574 *cookiePtr = kCookieValue[i]; | 544 *cookiePtr = kCookieValue[i]; |
575 #endif | 545 #endif |
576 } | 546 } |
577 | 547 |
578 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { | 548 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { |
579 #if ENABLE(ASSERT) | 549 #if DCHECK_IS_ON() |
580 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); | 550 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); |
581 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) | 551 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) |
582 ASSERT(*cookiePtr == kCookieValue[i]); | 552 DCHECK(*cookiePtr == kCookieValue[i]); |
583 #endif | 553 #endif |
584 } | 554 } |
585 | 555 |
586 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { | 556 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { |
587 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | 557 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); |
588 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); | 558 DCHECK(!(pointerAsUint & kSuperPageOffsetMask)); |
589 // The metadata area is exactly one system page (the guard page) into the | 559 // The metadata area is exactly one system page (the guard page) into the |
590 // super page. | 560 // super page. |
591 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); | 561 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); |
592 } | 562 } |
593 | 563 |
594 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { | 564 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { |
595 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); | 565 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); |
596 char* superPagePtr = | 566 char* superPagePtr = |
597 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); | 567 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); |
598 uintptr_t partitionPageIndex = | 568 uintptr_t partitionPageIndex = |
599 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; | 569 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; |
600 // Index 0 is invalid because it is the metadata and guard area and | 570 // Index 0 is invalid because it is the metadata and guard area and |
601 // the last index is invalid because it is a guard page. | 571 // the last index is invalid because it is a guard page. |
602 ASSERT(partitionPageIndex); | 572 DCHECK(partitionPageIndex); |
603 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); | 573 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); |
604 PartitionPage* page = reinterpret_cast<PartitionPage*>( | 574 PartitionPage* page = reinterpret_cast<PartitionPage*>( |
605 partitionSuperPageToMetadataArea(superPagePtr) + | 575 partitionSuperPageToMetadataArea(superPagePtr) + |
606 (partitionPageIndex << kPageMetadataShift)); | 576 (partitionPageIndex << kPageMetadataShift)); |
607 // Partition pages in the same slot span can share the same page object. | 577 // Partition pages in the same slot span can share the same page object. |
608 // Adjust for that. | 578 // Adjust for that. |
609 size_t delta = page->pageOffset << kPageMetadataShift; | 579 size_t delta = page->pageOffset << kPageMetadataShift; |
610 page = | 580 page = |
611 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); | 581 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); |
612 return page; | 582 return page; |
613 } | 583 } |
614 | 584 |
615 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { | 585 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { |
616 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); | 586 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); |
617 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); | 587 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); |
618 ASSERT(superPageOffset > kSystemPageSize); | 588 DCHECK(superPageOffset > kSystemPageSize); |
619 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * | 589 DCHECK(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * |
620 kPageMetadataSize)); | 590 kPageMetadataSize)); |
621 uintptr_t partitionPageIndex = | 591 uintptr_t partitionPageIndex = |
622 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; | 592 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; |
623 // Index 0 is invalid because it is the metadata area and the last index is | 593 // Index 0 is invalid because it is the metadata area and the last index is |
624 // invalid because it is a guard page. | 594 // invalid because it is a guard page. |
625 ASSERT(partitionPageIndex); | 595 DCHECK(partitionPageIndex); |
626 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); | 596 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); |
627 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); | 597 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); |
628 void* ret = reinterpret_cast<void*>( | 598 void* ret = reinterpret_cast<void*>( |
629 superPageBase + (partitionPageIndex << kPartitionPageShift)); | 599 superPageBase + (partitionPageIndex << kPartitionPageShift)); |
630 return ret; | 600 return ret; |
631 } | 601 } |
632 | 602 |
633 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { | 603 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { |
634 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); | 604 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); |
635 // Checks that the pointer is a multiple of bucket size. | 605 // Checks that the pointer is a multiple of bucket size. |
636 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - | 606 DCHECK(!((reinterpret_cast<uintptr_t>(ptr) - |
637 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % | 607 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % |
638 page->bucket->slotSize)); | 608 page->bucket->slotSize)); |
639 return page; | 609 return page; |
640 } | 610 } |
641 | 611 |
642 ALWAYS_INLINE bool partitionBucketIsDirectMapped( | 612 ALWAYS_INLINE bool partitionBucketIsDirectMapped( |
643 const PartitionBucket* bucket) { | 613 const PartitionBucket* bucket) { |
644 return !bucket->numSystemPagesPerSlotSpan; | 614 return !bucket->numSystemPagesPerSlotSpan; |
645 } | 615 } |
646 | 616 |
647 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { | 617 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { |
648 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; | 618 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; |
649 } | 619 } |
650 | 620 |
651 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { | 621 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { |
652 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); | 622 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); |
653 } | 623 } |
654 | 624 |
655 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { | 625 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { |
656 // For single-slot buckets which span more than one partition page, we | 626 // For single-slot buckets which span more than one partition page, we |
657 // have some spare metadata space to store the raw allocation size. We | 627 // have some spare metadata space to store the raw allocation size. We |
658 // can use this to report better statistics. | 628 // can use this to report better statistics. |
659 PartitionBucket* bucket = page->bucket; | 629 PartitionBucket* bucket = page->bucket; |
660 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) | 630 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) |
661 return nullptr; | 631 return nullptr; |
662 | 632 |
663 ASSERT((bucket->slotSize % kSystemPageSize) == 0); | 633 DCHECK((bucket->slotSize % kSystemPageSize) == 0); |
664 ASSERT(partitionBucketIsDirectMapped(bucket) || | 634 DCHECK(partitionBucketIsDirectMapped(bucket) || |
665 partitionBucketSlots(bucket) == 1); | 635 partitionBucketSlots(bucket) == 1); |
666 page++; | 636 page++; |
667 return reinterpret_cast<size_t*>(&page->freelistHead); | 637 return reinterpret_cast<size_t*>(&page->freelistHead); |
668 } | 638 } |
669 | 639 |
670 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { | 640 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { |
671 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); | 641 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); |
672 if (UNLIKELY(rawSizePtr != nullptr)) | 642 if (UNLIKELY(rawSizePtr != nullptr)) |
673 return *rawSizePtr; | 643 return *rawSizePtr; |
674 return 0; | 644 return 0; |
(...skipping 11 matching lines...) Expand all Loading... |
686 PartitionRootBase* root = partitionPageToRoot(page); | 656 PartitionRootBase* root = partitionPageToRoot(page); |
687 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); | 657 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); |
688 } | 658 } |
689 | 659 |
690 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, | 660 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, |
691 int flags, | 661 int flags, |
692 size_t size, | 662 size_t size, |
693 PartitionBucket* bucket) { | 663 PartitionBucket* bucket) { |
694 PartitionPage* page = bucket->activePagesHead; | 664 PartitionPage* page = bucket->activePagesHead; |
695 // Check that this page is neither full nor freed. | 665 // Check that this page is neither full nor freed. |
696 ASSERT(page->numAllocatedSlots >= 0); | 666 DCHECK(page->numAllocatedSlots >= 0); |
697 void* ret = page->freelistHead; | 667 void* ret = page->freelistHead; |
698 if (LIKELY(ret != 0)) { | 668 if (LIKELY(ret != 0)) { |
699 // If these asserts fire, you probably corrupted memory. | 669 // If these asserts fire, you probably corrupted memory. |
700 ASSERT(partitionPointerIsValid(ret)); | 670 DCHECK(partitionPointerIsValid(ret)); |
701 // All large allocations must go through the slow path to correctly | 671 // All large allocations must go through the slow path to correctly |
702 // update the size metadata. | 672 // update the size metadata. |
703 ASSERT(partitionPageGetRawSize(page) == 0); | 673 DCHECK(partitionPageGetRawSize(page) == 0); |
704 PartitionFreelistEntry* newHead = | 674 PartitionFreelistEntry* newHead = |
705 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); | 675 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); |
706 page->freelistHead = newHead; | 676 page->freelistHead = newHead; |
707 page->numAllocatedSlots++; | 677 page->numAllocatedSlots++; |
708 } else { | 678 } else { |
709 ret = partitionAllocSlowPath(root, flags, size, bucket); | 679 ret = partitionAllocSlowPath(root, flags, size, bucket); |
710 ASSERT(!ret || partitionPointerIsValid(ret)); | 680 DCHECK(!ret || partitionPointerIsValid(ret)); |
711 } | 681 } |
712 #if ENABLE(ASSERT) | 682 #if DCHECK_IS_ON() |
713 if (!ret) | 683 if (!ret) |
714 return 0; | 684 return 0; |
715 // Fill the uninitialized pattern, and write the cookies. | 685 // Fill the uninitialized pattern, and write the cookies. |
716 page = partitionPointerToPage(ret); | 686 page = partitionPointerToPage(ret); |
717 size_t slotSize = page->bucket->slotSize; | 687 size_t slotSize = page->bucket->slotSize; |
718 size_t rawSize = partitionPageGetRawSize(page); | 688 size_t rawSize = partitionPageGetRawSize(page); |
719 if (rawSize) { | 689 if (rawSize) { |
720 ASSERT(rawSize == size); | 690 DCHECK(rawSize == size); |
721 slotSize = rawSize; | 691 slotSize = rawSize; |
722 } | 692 } |
723 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); | 693 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); |
724 char* charRet = static_cast<char*>(ret); | 694 char* charRet = static_cast<char*>(ret); |
725 // The value given to the application is actually just after the cookie. | 695 // The value given to the application is actually just after the cookie. |
726 ret = charRet + kCookieSize; | 696 ret = charRet + kCookieSize; |
727 memset(ret, kUninitializedByte, noCookieSize); | 697 memset(ret, kUninitializedByte, noCookieSize); |
728 partitionCookieWriteValue(charRet); | 698 partitionCookieWriteValue(charRet); |
729 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); | 699 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); |
730 #endif | 700 #endif |
731 return ret; | 701 return ret; |
732 } | 702 } |
733 | 703 |
734 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, | 704 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, |
735 size_t size, | 705 size_t size, |
736 const char* typeName) { | 706 const char* typeName) { |
737 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 707 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
738 void* result = malloc(size); | 708 void* result = malloc(size); |
739 RELEASE_ASSERT(result); | 709 CHECK(result); |
740 return result; | 710 return result; |
741 #else | 711 #else |
742 size_t requestedSize = size; | 712 size_t requestedSize = size; |
743 size = partitionCookieSizeAdjustAdd(size); | 713 size = partitionCookieSizeAdjustAdd(size); |
744 ASSERT(root->initialized); | 714 DCHECK(root->initialized); |
745 size_t index = size >> kBucketShift; | 715 size_t index = size >> kBucketShift; |
746 ASSERT(index < root->numBuckets); | 716 DCHECK(index < root->numBuckets); |
747 ASSERT(size == index << kBucketShift); | 717 DCHECK(size == index << kBucketShift); |
748 PartitionBucket* bucket = &root->buckets()[index]; | 718 PartitionBucket* bucket = &root->buckets()[index]; |
749 void* result = partitionBucketAlloc(root, 0, size, bucket); | 719 void* result = partitionBucketAlloc(root, 0, size, bucket); |
750 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); | 720 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); |
751 return result; | 721 return result; |
752 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 722 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
753 } | 723 } |
754 | 724 |
755 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { | 725 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { |
756 // If these asserts fire, you probably corrupted memory. | 726 // If these asserts fire, you probably corrupted memory. |
757 #if ENABLE(ASSERT) | 727 #if DCHECK_IS_ON() |
758 size_t slotSize = page->bucket->slotSize; | 728 size_t slotSize = page->bucket->slotSize; |
759 size_t rawSize = partitionPageGetRawSize(page); | 729 size_t rawSize = partitionPageGetRawSize(page); |
760 if (rawSize) | 730 if (rawSize) |
761 slotSize = rawSize; | 731 slotSize = rawSize; |
762 partitionCookieCheckValue(ptr); | 732 partitionCookieCheckValue(ptr); |
763 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - | 733 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - |
764 kCookieSize); | 734 kCookieSize); |
765 memset(ptr, kFreedByte, slotSize); | 735 memset(ptr, kFreedByte, slotSize); |
766 #endif | 736 #endif |
767 ASSERT(page->numAllocatedSlots); | 737 DCHECK(page->numAllocatedSlots); |
768 PartitionFreelistEntry* freelistHead = page->freelistHead; | 738 PartitionFreelistEntry* freelistHead = page->freelistHead; |
769 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); | 739 DCHECK(!freelistHead || partitionPointerIsValid(freelistHead)); |
770 SECURITY_CHECK(ptr != freelistHead); // Catches an immediate double free. | 740 CHECK(ptr != freelistHead); // Catches an immediate double free. |
771 // Look for double free one level deeper in debug. | 741 // Look for double free one level deeper in debug. |
772 SECURITY_DCHECK(!freelistHead || | 742 DCHECK(!freelistHead || ptr != partitionFreelistMask(freelistHead->next)); |
773 ptr != partitionFreelistMask(freelistHead->next)); | |
774 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); | 743 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); |
775 entry->next = partitionFreelistMask(freelistHead); | 744 entry->next = partitionFreelistMask(freelistHead); |
776 page->freelistHead = entry; | 745 page->freelistHead = entry; |
777 --page->numAllocatedSlots; | 746 --page->numAllocatedSlots; |
778 if (UNLIKELY(page->numAllocatedSlots <= 0)) { | 747 if (UNLIKELY(page->numAllocatedSlots <= 0)) { |
779 partitionFreeSlowPath(page); | 748 partitionFreeSlowPath(page); |
780 } else { | 749 } else { |
781 // All single-slot allocations must go through the slow path to | 750 // All single-slot allocations must go through the slow path to |
782 // correctly update the size metadata. | 751 // correctly update the size metadata. |
783 ASSERT(partitionPageGetRawSize(page) == 0); | 752 DCHECK(partitionPageGetRawSize(page) == 0); |
784 } | 753 } |
785 } | 754 } |
786 | 755 |
787 ALWAYS_INLINE void partitionFree(void* ptr) { | 756 ALWAYS_INLINE void partitionFree(void* ptr) { |
788 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 757 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
789 free(ptr); | 758 free(ptr); |
790 #else | 759 #else |
791 PartitionAllocHooks::freeHookIfEnabled(ptr); | 760 PartitionAllocHooks::freeHookIfEnabled(ptr); |
792 ptr = partitionCookieFreePointerAdjust(ptr); | 761 ptr = partitionCookieFreePointerAdjust(ptr); |
793 ASSERT(partitionPointerIsValid(ptr)); | 762 DCHECK(partitionPointerIsValid(ptr)); |
794 PartitionPage* page = partitionPointerToPage(ptr); | 763 PartitionPage* page = partitionPointerToPage(ptr); |
795 partitionFreeWithPage(ptr, page); | 764 partitionFreeWithPage(ptr, page); |
796 #endif | 765 #endif |
797 } | 766 } |
798 | 767 |
799 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( | 768 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( |
800 PartitionRootGeneric* root, | 769 PartitionRootGeneric* root, |
801 size_t size) { | 770 size_t size) { |
802 size_t order = kBitsPerSizet - CountLeadingZeroBitsSizeT(size); | 771 size_t order = kBitsPerSizet - bits::CountLeadingZeroBitsSizeT(size); |
803 // The order index is simply the next few bits after the most significant bit. | 772 // The order index is simply the next few bits after the most significant bit. |
804 size_t orderIndex = (size >> root->orderIndexShifts[order]) & | 773 size_t orderIndex = (size >> root->orderIndexShifts[order]) & |
805 (kGenericNumBucketsPerOrder - 1); | 774 (kGenericNumBucketsPerOrder - 1); |
806 // And if the remaining bits are non-zero we must bump the bucket up. | 775 // And if the remaining bits are non-zero we must bump the bucket up. |
807 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; | 776 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; |
808 PartitionBucket* bucket = | 777 PartitionBucket* bucket = |
809 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + | 778 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + |
810 orderIndex + !!subOrderIndex]; | 779 orderIndex + !!subOrderIndex]; |
811 ASSERT(!bucket->slotSize || bucket->slotSize >= size); | 780 DCHECK(!bucket->slotSize || bucket->slotSize >= size); |
812 ASSERT(!(bucket->slotSize % kGenericSmallestBucket)); | 781 DCHECK(!(bucket->slotSize % kGenericSmallestBucket)); |
813 return bucket; | 782 return bucket; |
814 } | 783 } |
815 | 784 |
816 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, | 785 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, |
817 int flags, | 786 int flags, |
818 size_t size, | 787 size_t size, |
819 const char* typeName) { | 788 const char* typeName) { |
820 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 789 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
821 void* result = malloc(size); | 790 void* result = malloc(size); |
822 RELEASE_ASSERT(result || flags & PartitionAllocReturnNull); | 791 CHECK(result || flags & PartitionAllocReturnNull); |
823 return result; | 792 return result; |
824 #else | 793 #else |
825 ASSERT(root->initialized); | 794 DCHECK(root->initialized); |
826 size_t requestedSize = size; | 795 size_t requestedSize = size; |
827 size = partitionCookieSizeAdjustAdd(size); | 796 size = partitionCookieSizeAdjustAdd(size); |
828 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | 797 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); |
829 void* ret = nullptr; | 798 void* ret = nullptr; |
830 { | 799 { |
831 SpinLock::Guard guard(root->lock); | 800 subtle::SpinLock::Guard guard(root->lock); |
832 ret = partitionBucketAlloc(root, flags, size, bucket); | 801 ret = partitionBucketAlloc(root, flags, size, bucket); |
833 } | 802 } |
834 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); | 803 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); |
835 return ret; | 804 return ret; |
836 #endif | 805 #endif |
837 } | 806 } |
838 | 807 |
839 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, | 808 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, |
840 size_t size, | 809 size_t size, |
841 const char* typeName) { | 810 const char* typeName) { |
842 return partitionAllocGenericFlags(root, 0, size, typeName); | 811 return partitionAllocGenericFlags(root, 0, size, typeName); |
843 } | 812 } |
844 | 813 |
845 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { | 814 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { |
846 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 815 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
847 free(ptr); | 816 free(ptr); |
848 #else | 817 #else |
849 ASSERT(root->initialized); | 818 DCHECK(root->initialized); |
850 | 819 |
851 if (UNLIKELY(!ptr)) | 820 if (UNLIKELY(!ptr)) |
852 return; | 821 return; |
853 | 822 |
854 PartitionAllocHooks::freeHookIfEnabled(ptr); | 823 PartitionAllocHooks::freeHookIfEnabled(ptr); |
855 ptr = partitionCookieFreePointerAdjust(ptr); | 824 ptr = partitionCookieFreePointerAdjust(ptr); |
856 ASSERT(partitionPointerIsValid(ptr)); | 825 DCHECK(partitionPointerIsValid(ptr)); |
857 PartitionPage* page = partitionPointerToPage(ptr); | 826 PartitionPage* page = partitionPointerToPage(ptr); |
858 { | 827 { |
859 SpinLock::Guard guard(root->lock); | 828 subtle::SpinLock::Guard guard(root->lock); |
860 partitionFreeWithPage(ptr, page); | 829 partitionFreeWithPage(ptr, page); |
861 } | 830 } |
862 #endif | 831 #endif |
863 } | 832 } |
864 | 833 |
865 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { | 834 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { |
866 // Caller must check that the size is not above the kGenericMaxDirectMapped | 835 // Caller must check that the size is not above the kGenericMaxDirectMapped |
867 // limit before calling. This also guards against integer overflow in the | 836 // limit before calling. This also guards against integer overflow in the |
868 // calculation here. | 837 // calculation here. |
869 ASSERT(size <= kGenericMaxDirectMapped); | 838 DCHECK(size <= kGenericMaxDirectMapped); |
870 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; | 839 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; |
871 } | 840 } |
872 | 841 |
873 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, | 842 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, |
874 size_t size) { | 843 size_t size) { |
875 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 844 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
876 return size; | 845 return size; |
877 #else | 846 #else |
878 ASSERT(root->initialized); | 847 DCHECK(root->initialized); |
879 size = partitionCookieSizeAdjustAdd(size); | 848 size = partitionCookieSizeAdjustAdd(size); |
880 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); | 849 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); |
881 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { | 850 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { |
882 size = bucket->slotSize; | 851 size = bucket->slotSize; |
883 } else if (size > kGenericMaxDirectMapped) { | 852 } else if (size > kGenericMaxDirectMapped) { |
884 // Too large to allocate => return the size unchanged. | 853 // Too large to allocate => return the size unchanged. |
885 } else { | 854 } else { |
886 ASSERT(bucket == &PartitionRootBase::gPagedBucket); | 855 DCHECK(bucket == &PartitionRootBase::gPagedBucket); |
887 size = partitionDirectMapSize(size); | 856 size = partitionDirectMapSize(size); |
888 } | 857 } |
889 return partitionCookieSizeAdjustSubtract(size); | 858 return partitionCookieSizeAdjustSubtract(size); |
890 #endif | 859 #endif |
891 } | 860 } |
892 | 861 |
893 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { | 862 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { |
894 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) | 863 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) |
895 return false; | 864 return false; |
896 #else | 865 #else |
897 return true; | 866 return true; |
898 #endif | 867 #endif |
899 } | 868 } |
900 | 869 |
901 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { | 870 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { |
902 // No need to lock here. Only 'ptr' being freed by another thread could | 871 // No need to lock here. Only 'ptr' being freed by another thread could |
903 // cause trouble, and the caller is responsible for that not happening. | 872 // cause trouble, and the caller is responsible for that not happening. |
904 ASSERT(partitionAllocSupportsGetSize()); | 873 DCHECK(partitionAllocSupportsGetSize()); |
905 ptr = partitionCookieFreePointerAdjust(ptr); | 874 ptr = partitionCookieFreePointerAdjust(ptr); |
906 ASSERT(partitionPointerIsValid(ptr)); | 875 DCHECK(partitionPointerIsValid(ptr)); |
907 PartitionPage* page = partitionPointerToPage(ptr); | 876 PartitionPage* page = partitionPointerToPage(ptr); |
908 size_t size = page->bucket->slotSize; | 877 size_t size = page->bucket->slotSize; |
909 return partitionCookieSizeAdjustSubtract(size); | 878 return partitionCookieSizeAdjustSubtract(size); |
910 } | 879 } |
911 | 880 |
912 // N (or more accurately, N - sizeof(void*)) represents the largest size in | 881 // N (or more accurately, N - sizeof(void*)) represents the largest size in |
913 // bytes that will be handled by a SizeSpecificPartitionAllocator. | 882 // bytes that will be handled by a SizeSpecificPartitionAllocator. |
914 // Attempts to partitionAlloc() more than this amount will fail. | 883 // Attempts to partitionAlloc() more than this amount will fail. |
915 template <size_t N> | 884 template <size_t N> |
916 class SizeSpecificPartitionAllocator { | 885 class SizeSpecificPartitionAllocator { |
(...skipping 14 matching lines...) Expand all Loading... |
931 class PartitionAllocatorGeneric { | 900 class PartitionAllocatorGeneric { |
932 public: | 901 public: |
933 void init() { partitionAllocGenericInit(&m_partitionRoot); } | 902 void init() { partitionAllocGenericInit(&m_partitionRoot); } |
934 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } | 903 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } |
935 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } | 904 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } |
936 | 905 |
937 private: | 906 private: |
938 PartitionRootGeneric m_partitionRoot; | 907 PartitionRootGeneric m_partitionRoot; |
939 }; | 908 }; |
940 | 909 |
941 } // namespace WTF | 910 } // namespace base |
942 | 911 |
943 using WTF::SizeSpecificPartitionAllocator; | 912 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H |
944 using WTF::PartitionAllocatorGeneric; | |
945 using WTF::PartitionRoot; | |
946 using WTF::partitionAllocInit; | |
947 using WTF::partitionAllocShutdown; | |
948 using WTF::partitionAlloc; | |
949 using WTF::partitionFree; | |
950 using WTF::partitionAllocGeneric; | |
951 using WTF::partitionFreeGeneric; | |
952 using WTF::partitionReallocGeneric; | |
953 using WTF::partitionAllocActualSize; | |
954 using WTF::partitionAllocSupportsGetSize; | |
955 using WTF::partitionAllocGetSize; | |
956 | |
957 #endif // WTF_PartitionAlloc_h | |
OLD | NEW |