Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(44)

Side by Side Diff: base/allocator/partition_allocator/partition_alloc.h

Issue 2518253002: Move Partition Allocator into Chromium base. (Closed)
Patch Set: Respond to comments. Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be
3 * 3 // found in the LICENSE file.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 4
31 #ifndef WTF_PartitionAlloc_h 5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
32 #define WTF_PartitionAlloc_h 6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
33 7
34 // DESCRIPTION 8 // DESCRIPTION
35 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / 9 // partitionAlloc() / partitionAllocGeneric() and partitionFree() /
36 // partitionFreeGeneric() are approximately analagous to malloc() and free(). 10 // partitionFreeGeneric() are approximately analagous to malloc() and free().
37 // 11 //
38 // The main difference is that a PartitionRoot / PartitionRootGeneric object 12 // The main difference is that a PartitionRoot / PartitionRootGeneric object
39 // must be supplied to these functions, representing a specific "heap partition" 13 // must be supplied to these functions, representing a specific "heap partition"
40 // that will be used to satisfy the allocation. Different partitions are 14 // that will be used to satisfy the allocation. Different partitions are
41 // guaranteed to exist in separate address spaces, including being separate from 15 // guaranteed to exist in separate address spaces, including being separate from
42 // the main system heap. If the contained objects are all freed, physical memory 16 // the main system heap. If the contained objects are all freed, physical memory
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
78 // pages, enabling various simple tricks to try and minimize fragmentation. 52 // pages, enabling various simple tricks to try and minimize fragmentation.
79 // - Fine-grained bucket sizes leading to less waste and better packing. 53 // - Fine-grained bucket sizes leading to less waste and better packing.
80 // 54 //
81 // The following security properties could be investigated in the future: 55 // The following security properties could be investigated in the future:
82 // - Per-object bucketing (instead of per-size) is mostly available at the API, 56 // - Per-object bucketing (instead of per-size) is mostly available at the API,
83 // but not used yet. 57 // but not used yet.
84 // - No randomness of freelist entries or bucket position. 58 // - No randomness of freelist entries or bucket position.
85 // - Better checking for wild pointers in free(). 59 // - Better checking for wild pointers in free().
86 // - Better freelist masking function to guarantee fault on 32-bit. 60 // - Better freelist masking function to guarantee fault on 32-bit.
87 61
88 #include "wtf/Assertions.h" 62 #include <limits.h>
89 #include "wtf/BitwiseOperations.h"
90 #include "wtf/ByteSwap.h"
91 #include "wtf/CPU.h"
92 #include "wtf/SpinLock.h"
93 #include "wtf/TypeTraits.h"
94 #include "wtf/allocator/PageAllocator.h"
95 63
96 #include <limits.h> 64 #include "base/allocator/partition_allocator/page_allocator.h"
65 #include "base/bits.h"
66 #include "base/compiler_specific.h"
67 #include "base/logging.h"
68 #include "base/synchronization/spin_lock.h"
69 #include "base/sys_byteorder.h"
70 #include "build/build_config.h"
97 71
98 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 72 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
99 #include <stdlib.h> 73 #include <stdlib.h>
100 #endif 74 #endif
101 75
102 #if ENABLE(ASSERT) 76 #define RELEASE_ASSERT(assertion) \
103 #include <string.h> 77 (UNLIKELY(!(assertion)) ? (IMMEDIATE_CRASH()) : (void)0)
104 #endif
105 78
106 namespace WTF { 79 namespace base {
107 80
108 // Allocation granularity of sizeof(void*) bytes. 81 // Allocation granularity of sizeof(void*) bytes.
109 static const size_t kAllocationGranularity = sizeof(void*); 82 static const size_t kAllocationGranularity = sizeof(void*);
110 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; 83 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
111 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; 84 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
112 85
113 // Underlying partition storage pages are a power-of-two size. It is typical 86 // Underlying partition storage pages are a power-of-two size. It is typical
114 // for a partition page to be based on multiple system pages. Most references to 87 // for a partition page to be based on multiple system pages. Most references to
115 // "page" refer to partition pages. 88 // "page" refer to partition pages.
116 // We also have the concept of "super pages" -- these are the underlying system 89 // We also have the concept of "super pages" -- these are the underlying system
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
234 // Constants for the memory reclaim logic. 207 // Constants for the memory reclaim logic.
235 static const size_t kMaxFreeableSpans = 16; 208 static const size_t kMaxFreeableSpans = 16;
236 209
237 // If the total size in bytes of allocated but not committed pages exceeds this 210 // If the total size in bytes of allocated but not committed pages exceeds this
238 // value (probably it is a "out of virtual address space" crash), 211 // value (probably it is a "out of virtual address space" crash),
239 // a special crash stack trace is generated at |partitionOutOfMemory|. 212 // a special crash stack trace is generated at |partitionOutOfMemory|.
240 // This is to distinguish "out of virtual address space" from 213 // This is to distinguish "out of virtual address space" from
241 // "out of physical memory" in crash reports. 214 // "out of physical memory" in crash reports.
242 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB 215 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
243 216
244 #if ENABLE(ASSERT) 217 #if DCHECK_IS_ON()
245 // These two byte values match tcmalloc. 218 // These two byte values match tcmalloc.
246 static const unsigned char kUninitializedByte = 0xAB; 219 static const unsigned char kUninitializedByte = 0xAB;
247 static const unsigned char kFreedByte = 0xCD; 220 static const unsigned char kFreedByte = 0xCD;
248 static const size_t kCookieSize = 221 static const size_t kCookieSize =
249 16; // Handles alignment up to XMM instructions on Intel. 222 16; // Handles alignment up to XMM instructions on Intel.
250 static const unsigned char kCookieValue[kCookieSize] = { 223 static const unsigned char kCookieValue[kCookieSize] = {
251 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 224 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
252 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; 225 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
253 #endif 226 #endif
254 227
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 PartitionSuperPageExtentEntry* next; 287 PartitionSuperPageExtentEntry* next;
315 }; 288 };
316 289
317 struct PartitionDirectMapExtent { 290 struct PartitionDirectMapExtent {
318 PartitionDirectMapExtent* nextExtent; 291 PartitionDirectMapExtent* nextExtent;
319 PartitionDirectMapExtent* prevExtent; 292 PartitionDirectMapExtent* prevExtent;
320 PartitionBucket* bucket; 293 PartitionBucket* bucket;
321 size_t mapSize; // Mapped size, not including guard pages and meta-data. 294 size_t mapSize; // Mapped size, not including guard pages and meta-data.
322 }; 295 };
323 296
324 struct WTF_EXPORT PartitionRootBase { 297 struct BASE_EXPORT PartitionRootBase {
325 size_t totalSizeOfCommittedPages; 298 size_t totalSizeOfCommittedPages;
326 size_t totalSizeOfSuperPages; 299 size_t totalSizeOfSuperPages;
327 size_t totalSizeOfDirectMappedPages; 300 size_t totalSizeOfDirectMappedPages;
328 // Invariant: totalSizeOfCommittedPages <= 301 // Invariant: totalSizeOfCommittedPages <=
329 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. 302 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages.
330 unsigned numBuckets; 303 unsigned numBuckets;
331 unsigned maxAllocation; 304 unsigned maxAllocation;
332 bool initialized; 305 bool initialized;
333 char* nextSuperPage; 306 char* nextSuperPage;
334 char* nextPartitionPage; 307 char* nextPartitionPage;
335 char* nextPartitionPageEnd; 308 char* nextPartitionPageEnd;
336 PartitionSuperPageExtentEntry* currentExtent; 309 PartitionSuperPageExtentEntry* currentExtent;
337 PartitionSuperPageExtentEntry* firstExtent; 310 PartitionSuperPageExtentEntry* firstExtent;
338 PartitionDirectMapExtent* directMapList; 311 PartitionDirectMapExtent* directMapList;
339 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; 312 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans];
340 int16_t globalEmptyPageRingIndex; 313 int16_t globalEmptyPageRingIndex;
341 uintptr_t invertedSelf; 314 uintptr_t invertedSelf;
342 315
343 static SpinLock gInitializedLock; 316 static subtle::SpinLock gInitializedLock;
344 static bool gInitialized; 317 static bool gInitialized;
345 // gSeedPage is used as a sentinel to indicate that there is no page 318 // gSeedPage is used as a sentinel to indicate that there is no page
346 // in the active page list. We can use nullptr, but in that case we need 319 // in the active page list. We can use nullptr, but in that case we need
347 // to add a null-check branch to the hot allocation path. We want to avoid 320 // to add a null-check branch to the hot allocation path. We want to avoid
348 // that. 321 // that.
349 static PartitionPage gSeedPage; 322 static PartitionPage gSeedPage;
350 static PartitionBucket gPagedBucket; 323 static PartitionBucket gPagedBucket;
351 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. 324 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
352 static void (*gOomHandlingFunction)(); 325 static void (*gOomHandlingFunction)();
353 }; 326 };
354 327
355 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. 328 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
356 struct PartitionRoot : public PartitionRootBase { 329 struct PartitionRoot : public PartitionRootBase {
357 // The PartitionAlloc templated class ensures the following is correct. 330 // The PartitionAlloc templated class ensures the following is correct.
358 ALWAYS_INLINE PartitionBucket* buckets() { 331 ALWAYS_INLINE PartitionBucket* buckets() {
359 return reinterpret_cast<PartitionBucket*>(this + 1); 332 return reinterpret_cast<PartitionBucket*>(this + 1);
360 } 333 }
361 ALWAYS_INLINE const PartitionBucket* buckets() const { 334 ALWAYS_INLINE const PartitionBucket* buckets() const {
362 return reinterpret_cast<const PartitionBucket*>(this + 1); 335 return reinterpret_cast<const PartitionBucket*>(this + 1);
363 } 336 }
364 }; 337 };
365 338
366 // Never instantiate a PartitionRootGeneric directly, instead use 339 // Never instantiate a PartitionRootGeneric directly, instead use
367 // PartitionAllocatorGeneric. 340 // PartitionAllocatorGeneric.
368 struct PartitionRootGeneric : public PartitionRootBase { 341 struct PartitionRootGeneric : public PartitionRootBase {
369 SpinLock lock; 342 subtle::SpinLock lock;
370 // Some pre-computed constants. 343 // Some pre-computed constants.
371 size_t orderIndexShifts[kBitsPerSizet + 1]; 344 size_t orderIndexShifts[kBitsPerSizet + 1];
372 size_t orderSubIndexMasks[kBitsPerSizet + 1]; 345 size_t orderSubIndexMasks[kBitsPerSizet + 1];
373 // The bucket lookup table lets us map a size_t to a bucket quickly. 346 // The bucket lookup table lets us map a size_t to a bucket quickly.
374 // The trailing +1 caters for the overflow case for very large allocation 347 // The trailing +1 caters for the overflow case for very large allocation
375 // sizes. It is one flat array instead of a 2D array because in the 2D 348 // sizes. It is one flat array instead of a 2D array because in the 2D
376 // world, we'd need to index array[blah][max+1] which risks undefined 349 // world, we'd need to index array[blah][max+1] which risks undefined
377 // behavior. 350 // behavior.
378 PartitionBucket* 351 PartitionBucket*
379 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; 352 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1];
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
413 uint32_t numActivePages; // Number of pages that have at least one 386 uint32_t numActivePages; // Number of pages that have at least one
414 // provisioned slot. 387 // provisioned slot.
415 uint32_t numEmptyPages; // Number of pages that are empty 388 uint32_t numEmptyPages; // Number of pages that are empty
416 // but not decommitted. 389 // but not decommitted.
417 uint32_t numDecommittedPages; // Number of pages that are empty 390 uint32_t numDecommittedPages; // Number of pages that are empty
418 // and decommitted. 391 // and decommitted.
419 }; 392 };
420 393
421 // Interface that is passed to partitionDumpStats and 394 // Interface that is passed to partitionDumpStats and
422 // partitionDumpStatsGeneric for using the memory statistics. 395 // partitionDumpStatsGeneric for using the memory statistics.
423 class WTF_EXPORT PartitionStatsDumper { 396 class BASE_EXPORT PartitionStatsDumper {
424 public: 397 public:
425 // Called to dump total memory used by partition, once per partition. 398 // Called to dump total memory used by partition, once per partition.
426 virtual void partitionDumpTotals(const char* partitionName, 399 virtual void partitionDumpTotals(const char* partitionName,
427 const PartitionMemoryStats*) = 0; 400 const PartitionMemoryStats*) = 0;
428 401
429 // Called to dump stats about buckets, for each bucket. 402 // Called to dump stats about buckets, for each bucket.
430 virtual void partitionsDumpBucketStats(const char* partitionName, 403 virtual void partitionsDumpBucketStats(const char* partitionName,
431 const PartitionBucketMemoryStats*) = 0; 404 const PartitionBucketMemoryStats*) = 0;
432 }; 405 };
433 406
434 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); 407 BASE_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)());
435 WTF_EXPORT void partitionAllocInit(PartitionRoot*, 408 BASE_EXPORT void partitionAllocInit(PartitionRoot*,
436 size_t numBuckets, 409 size_t numBuckets,
437 size_t maxAllocation); 410 size_t maxAllocation);
438 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*); 411 BASE_EXPORT bool partitionAllocShutdown(PartitionRoot*);
439 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); 412 BASE_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*);
440 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); 413 BASE_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*);
441 414
442 enum PartitionPurgeFlags { 415 enum PartitionPurgeFlags {
443 // Decommitting the ring list of empty pages is reasonably fast. 416 // Decommitting the ring list of empty pages is reasonably fast.
444 PartitionPurgeDecommitEmptyPages = 1 << 0, 417 PartitionPurgeDecommitEmptyPages = 1 << 0,
445 // Discarding unused system pages is slower, because it involves walking all 418 // Discarding unused system pages is slower, because it involves walking all
446 // freelists in all active partition pages of all buckets >= system page 419 // freelists in all active partition pages of all buckets >= system page
447 // size. It often frees a similar amount of memory to decommitting the empty 420 // size. It often frees a similar amount of memory to decommitting the empty
448 // pages, though. 421 // pages, though.
449 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, 422 PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
450 }; 423 };
451 424
452 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int); 425 BASE_EXPORT void partitionPurgeMemory(PartitionRoot*, int);
453 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); 426 BASE_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
454 427
455 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, 428 BASE_EXPORT NOINLINE void* partitionAllocSlowPath(PartitionRootBase*,
456 int, 429 int,
457 size_t, 430 size_t,
458 PartitionBucket*); 431 PartitionBucket*);
459 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*); 432 BASE_EXPORT NOINLINE void partitionFreeSlowPath(PartitionPage*);
460 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, 433 BASE_EXPORT NOINLINE void* partitionReallocGeneric(PartitionRootGeneric*,
461 void*, 434 void*,
462 size_t, 435 size_t,
463 const char* typeName); 436 const char* typeName);
464 437
465 WTF_EXPORT void partitionDumpStats(PartitionRoot*, 438 BASE_EXPORT void partitionDumpStats(PartitionRoot*,
466 const char* partitionName, 439 const char* partitionName,
467 bool isLightDump, 440 bool isLightDump,
468 PartitionStatsDumper*); 441 PartitionStatsDumper*);
469 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, 442 BASE_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*,
470 const char* partitionName, 443 const char* partitionName,
471 bool isLightDump, 444 bool isLightDump,
472 PartitionStatsDumper*); 445 PartitionStatsDumper*);
473 446
474 class WTF_EXPORT PartitionAllocHooks { 447 class BASE_EXPORT PartitionAllocHooks {
475 public: 448 public:
476 typedef void AllocationHook(void* address, size_t, const char* typeName); 449 typedef void AllocationHook(void* address, size_t, const char* typeName);
477 typedef void FreeHook(void* address); 450 typedef void FreeHook(void* address);
478 451
479 static void setAllocationHook(AllocationHook* hook) { 452 static void setAllocationHook(AllocationHook* hook) {
480 m_allocationHook = hook; 453 m_allocationHook = hook;
481 } 454 }
482 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } 455 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; }
483 456
484 static void allocationHookIfEnabled(void* address, 457 static void allocationHookIfEnabled(void* address,
(...skipping 23 matching lines...) Expand all
508 } 481 }
509 } 482 }
510 483
511 private: 484 private:
512 // Pointers to hook functions that PartitionAlloc will call on allocation and 485 // Pointers to hook functions that PartitionAlloc will call on allocation and
513 // free if the pointers are non-null. 486 // free if the pointers are non-null.
514 static AllocationHook* m_allocationHook; 487 static AllocationHook* m_allocationHook;
515 static FreeHook* m_freeHook; 488 static FreeHook* m_freeHook;
516 }; 489 };
517 490
518 // In official builds, do not include type info string literals to avoid
519 // bloating the binary.
520 #if defined(OFFICIAL_BUILD)
521 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr
522 #else
523 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>()
524 #endif
525
526 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( 491 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(
527 PartitionFreelistEntry* ptr) { 492 PartitionFreelistEntry* ptr) {
528 // We use bswap on little endian as a fast mask for two reasons: 493 // We use bswap on little endian as a fast mask for two reasons:
529 // 1) If an object is freed and its vtable used where the attacker doesn't 494 // 1) If an object is freed and its vtable used where the attacker doesn't
530 // get the chance to run allocations between the free and use, the vtable 495 // get the chance to run allocations between the free and use, the vtable
531 // dereference is likely to fault. 496 // dereference is likely to fault.
532 // 2) If the attacker has a linear buffer overflow and elects to try and 497 // 2) If the attacker has a linear buffer overflow and elects to try and
533 // corrupt a freelist pointer, partial pointer overwrite attacks are 498 // corrupt a freelist pointer, partial pointer overwrite attacks are
534 // thwarted. 499 // thwarted.
535 // For big endian, similar guarantees are arrived at with a negation. 500 // For big endian, similar guarantees are arrived at with a negation.
536 #if CPU(BIG_ENDIAN) 501 #if defined(ARCH_CPU_BIG_ENDIAN)
537 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); 502 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
538 #else 503 #else
539 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); 504 uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
540 #endif 505 #endif
541 return reinterpret_cast<PartitionFreelistEntry*>(masked); 506 return reinterpret_cast<PartitionFreelistEntry*>(masked);
542 } 507 }
543 508
544 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { 509 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) {
545 #if ENABLE(ASSERT) 510 #if DCHECK_IS_ON()
546 // Add space for cookies, checking for integer overflow. 511 // Add space for cookies, checking for integer overflow.
547 ASSERT(size + (2 * kCookieSize) > size); 512 DCHECK(size + (2 * kCookieSize) > size);
548 size += 2 * kCookieSize; 513 size += 2 * kCookieSize;
549 #endif 514 #endif
550 return size; 515 return size;
551 } 516 }
552 517
553 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { 518 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) {
554 #if ENABLE(ASSERT) 519 #if DCHECK_IS_ON()
555 // Remove space for cookies. 520 // Remove space for cookies.
556 ASSERT(size >= 2 * kCookieSize); 521 DCHECK(size >= 2 * kCookieSize);
557 size -= 2 * kCookieSize; 522 size -= 2 * kCookieSize;
558 #endif 523 #endif
559 return size; 524 return size;
560 } 525 }
561 526
562 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { 527 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) {
563 #if ENABLE(ASSERT) 528 #if DCHECK_IS_ON()
564 // The value given to the application is actually just after the cookie. 529 // The value given to the application is actually just after the cookie.
565 ptr = static_cast<char*>(ptr) - kCookieSize; 530 ptr = static_cast<char*>(ptr) - kCookieSize;
566 #endif 531 #endif
567 return ptr; 532 return ptr;
568 } 533 }
569 534
570 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { 535 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) {
571 #if ENABLE(ASSERT) 536 #if DCHECK_IS_ON()
572 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); 537 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
573 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) 538 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
574 *cookiePtr = kCookieValue[i]; 539 *cookiePtr = kCookieValue[i];
575 #endif 540 #endif
576 } 541 }
577 542
578 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { 543 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) {
579 #if ENABLE(ASSERT) 544 #if DCHECK_IS_ON()
580 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); 545 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
581 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) 546 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
582 ASSERT(*cookiePtr == kCookieValue[i]); 547 DCHECK(*cookiePtr == kCookieValue[i]);
583 #endif 548 #endif
584 } 549 }
585 550
586 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { 551 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) {
587 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 552 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
588 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); 553 DCHECK(!(pointerAsUint & kSuperPageOffsetMask));
589 // The metadata area is exactly one system page (the guard page) into the 554 // The metadata area is exactly one system page (the guard page) into the
590 // super page. 555 // super page.
591 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); 556 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
592 } 557 }
593 558
594 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { 559 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) {
595 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 560 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
596 char* superPagePtr = 561 char* superPagePtr =
597 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); 562 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask);
598 uintptr_t partitionPageIndex = 563 uintptr_t partitionPageIndex =
599 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; 564 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift;
600 // Index 0 is invalid because it is the metadata and guard area and 565 // Index 0 is invalid because it is the metadata and guard area and
601 // the last index is invalid because it is a guard page. 566 // the last index is invalid because it is a guard page.
602 ASSERT(partitionPageIndex); 567 DCHECK(partitionPageIndex);
603 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 568 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
604 PartitionPage* page = reinterpret_cast<PartitionPage*>( 569 PartitionPage* page = reinterpret_cast<PartitionPage*>(
605 partitionSuperPageToMetadataArea(superPagePtr) + 570 partitionSuperPageToMetadataArea(superPagePtr) +
606 (partitionPageIndex << kPageMetadataShift)); 571 (partitionPageIndex << kPageMetadataShift));
607 // Partition pages in the same slot span can share the same page object. 572 // Partition pages in the same slot span can share the same page object.
608 // Adjust for that. 573 // Adjust for that.
609 size_t delta = page->pageOffset << kPageMetadataShift; 574 size_t delta = page->pageOffset << kPageMetadataShift;
610 page = 575 page =
611 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); 576 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
612 return page; 577 return page;
613 } 578 }
614 579
615 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { 580 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) {
616 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); 581 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
617 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); 582 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
618 ASSERT(superPageOffset > kSystemPageSize); 583 DCHECK(superPageOffset > kSystemPageSize);
619 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * 584 DCHECK(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
620 kPageMetadataSize)); 585 kPageMetadataSize));
621 uintptr_t partitionPageIndex = 586 uintptr_t partitionPageIndex =
622 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; 587 (superPageOffset - kSystemPageSize) >> kPageMetadataShift;
623 // Index 0 is invalid because it is the metadata area and the last index is 588 // Index 0 is invalid because it is the metadata area and the last index is
624 // invalid because it is a guard page. 589 // invalid because it is a guard page.
625 ASSERT(partitionPageIndex); 590 DCHECK(partitionPageIndex);
626 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 591 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
627 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); 592 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
628 void* ret = reinterpret_cast<void*>( 593 void* ret = reinterpret_cast<void*>(
629 superPageBase + (partitionPageIndex << kPartitionPageShift)); 594 superPageBase + (partitionPageIndex << kPartitionPageShift));
630 return ret; 595 return ret;
631 } 596 }
632 597
633 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { 598 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) {
634 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); 599 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
635 // Checks that the pointer is a multiple of bucket size. 600 // Checks that the pointer is a multiple of bucket size.
636 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - 601 DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
637 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % 602 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) %
638 page->bucket->slotSize)); 603 page->bucket->slotSize));
639 return page; 604 return page;
640 } 605 }
641 606
642 ALWAYS_INLINE bool partitionBucketIsDirectMapped( 607 ALWAYS_INLINE bool partitionBucketIsDirectMapped(
643 const PartitionBucket* bucket) { 608 const PartitionBucket* bucket) {
644 return !bucket->numSystemPagesPerSlotSpan; 609 return !bucket->numSystemPagesPerSlotSpan;
645 } 610 }
646 611
647 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { 612 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) {
648 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; 613 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize;
649 } 614 }
650 615
651 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { 616 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) {
652 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); 617 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize);
653 } 618 }
654 619
655 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { 620 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) {
656 // For single-slot buckets which span more than one partition page, we 621 // For single-slot buckets which span more than one partition page, we
657 // have some spare metadata space to store the raw allocation size. We 622 // have some spare metadata space to store the raw allocation size. We
658 // can use this to report better statistics. 623 // can use this to report better statistics.
659 PartitionBucket* bucket = page->bucket; 624 PartitionBucket* bucket = page->bucket;
660 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) 625 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
661 return nullptr; 626 return nullptr;
662 627
663 ASSERT((bucket->slotSize % kSystemPageSize) == 0); 628 DCHECK((bucket->slotSize % kSystemPageSize) == 0);
664 ASSERT(partitionBucketIsDirectMapped(bucket) || 629 DCHECK(partitionBucketIsDirectMapped(bucket) ||
665 partitionBucketSlots(bucket) == 1); 630 partitionBucketSlots(bucket) == 1);
666 page++; 631 page++;
667 return reinterpret_cast<size_t*>(&page->freelistHead); 632 return reinterpret_cast<size_t*>(&page->freelistHead);
668 } 633 }
669 634
670 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { 635 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) {
671 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); 636 size_t* rawSizePtr = partitionPageGetRawSizePtr(page);
672 if (UNLIKELY(rawSizePtr != nullptr)) 637 if (UNLIKELY(rawSizePtr != nullptr))
673 return *rawSizePtr; 638 return *rawSizePtr;
674 return 0; 639 return 0;
(...skipping 11 matching lines...) Expand all
686 PartitionRootBase* root = partitionPageToRoot(page); 651 PartitionRootBase* root = partitionPageToRoot(page);
687 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); 652 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
688 } 653 }
689 654
690 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, 655 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root,
691 int flags, 656 int flags,
692 size_t size, 657 size_t size,
693 PartitionBucket* bucket) { 658 PartitionBucket* bucket) {
694 PartitionPage* page = bucket->activePagesHead; 659 PartitionPage* page = bucket->activePagesHead;
695 // Check that this page is neither full nor freed. 660 // Check that this page is neither full nor freed.
696 ASSERT(page->numAllocatedSlots >= 0); 661 DCHECK(page->numAllocatedSlots >= 0);
697 void* ret = page->freelistHead; 662 void* ret = page->freelistHead;
698 if (LIKELY(ret != 0)) { 663 if (LIKELY(ret != 0)) {
699 // If these asserts fire, you probably corrupted memory. 664 // If these asserts fire, you probably corrupted memory.
700 ASSERT(partitionPointerIsValid(ret)); 665 DCHECK(partitionPointerIsValid(ret));
701 // All large allocations must go through the slow path to correctly 666 // All large allocations must go through the slow path to correctly
702 // update the size metadata. 667 // update the size metadata.
703 ASSERT(partitionPageGetRawSize(page) == 0); 668 DCHECK(partitionPageGetRawSize(page) == 0);
704 PartitionFreelistEntry* newHead = 669 PartitionFreelistEntry* newHead =
705 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); 670 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
706 page->freelistHead = newHead; 671 page->freelistHead = newHead;
707 page->numAllocatedSlots++; 672 page->numAllocatedSlots++;
708 } else { 673 } else {
709 ret = partitionAllocSlowPath(root, flags, size, bucket); 674 ret = partitionAllocSlowPath(root, flags, size, bucket);
710 ASSERT(!ret || partitionPointerIsValid(ret)); 675 DCHECK(!ret || partitionPointerIsValid(ret));
711 } 676 }
712 #if ENABLE(ASSERT) 677 #if DCHECK_IS_ON()
713 if (!ret) 678 if (!ret)
714 return 0; 679 return 0;
715 // Fill the uninitialized pattern, and write the cookies. 680 // Fill the uninitialized pattern, and write the cookies.
716 page = partitionPointerToPage(ret); 681 page = partitionPointerToPage(ret);
717 size_t slotSize = page->bucket->slotSize; 682 size_t slotSize = page->bucket->slotSize;
718 size_t rawSize = partitionPageGetRawSize(page); 683 size_t rawSize = partitionPageGetRawSize(page);
719 if (rawSize) { 684 if (rawSize) {
720 ASSERT(rawSize == size); 685 DCHECK(rawSize == size);
721 slotSize = rawSize; 686 slotSize = rawSize;
722 } 687 }
723 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); 688 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize);
724 char* charRet = static_cast<char*>(ret); 689 char* charRet = static_cast<char*>(ret);
725 // The value given to the application is actually just after the cookie. 690 // The value given to the application is actually just after the cookie.
726 ret = charRet + kCookieSize; 691 ret = charRet + kCookieSize;
727 memset(ret, kUninitializedByte, noCookieSize); 692 memset(ret, kUninitializedByte, noCookieSize);
728 partitionCookieWriteValue(charRet); 693 partitionCookieWriteValue(charRet);
729 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); 694 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize);
730 #endif 695 #endif
731 return ret; 696 return ret;
732 } 697 }
733 698
734 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, 699 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root,
735 size_t size, 700 size_t size,
736 const char* typeName) { 701 const char* typeName) {
737 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 702 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
738 void* result = malloc(size); 703 void* result = malloc(size);
739 RELEASE_ASSERT(result); 704 RELEASE_ASSERT(result);
740 return result; 705 return result;
741 #else 706 #else
742 size_t requestedSize = size; 707 size_t requestedSize = size;
743 size = partitionCookieSizeAdjustAdd(size); 708 size = partitionCookieSizeAdjustAdd(size);
744 ASSERT(root->initialized); 709 DCHECK(root->initialized);
745 size_t index = size >> kBucketShift; 710 size_t index = size >> kBucketShift;
746 ASSERT(index < root->numBuckets); 711 DCHECK(index < root->numBuckets);
747 ASSERT(size == index << kBucketShift); 712 DCHECK(size == index << kBucketShift);
748 PartitionBucket* bucket = &root->buckets()[index]; 713 PartitionBucket* bucket = &root->buckets()[index];
749 void* result = partitionBucketAlloc(root, 0, size, bucket); 714 void* result = partitionBucketAlloc(root, 0, size, bucket);
750 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); 715 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName);
751 return result; 716 return result;
752 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 717 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
753 } 718 }
754 719
755 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { 720 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) {
756 // If these asserts fire, you probably corrupted memory. 721 // If these asserts fire, you probably corrupted memory.
757 #if ENABLE(ASSERT) 722 #if DCHECK_IS_ON()
758 size_t slotSize = page->bucket->slotSize; 723 size_t slotSize = page->bucket->slotSize;
759 size_t rawSize = partitionPageGetRawSize(page); 724 size_t rawSize = partitionPageGetRawSize(page);
760 if (rawSize) 725 if (rawSize)
761 slotSize = rawSize; 726 slotSize = rawSize;
762 partitionCookieCheckValue(ptr); 727 partitionCookieCheckValue(ptr);
763 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - 728 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize -
764 kCookieSize); 729 kCookieSize);
765 memset(ptr, kFreedByte, slotSize); 730 memset(ptr, kFreedByte, slotSize);
766 #endif 731 #endif
767 ASSERT(page->numAllocatedSlots); 732 DCHECK(page->numAllocatedSlots);
768 PartitionFreelistEntry* freelistHead = page->freelistHead; 733 PartitionFreelistEntry* freelistHead = page->freelistHead;
769 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); 734 DCHECK(!freelistHead || partitionPointerIsValid(freelistHead));
770 SECURITY_CHECK(ptr != freelistHead); // Catches an immediate double free. 735 RELEASE_ASSERT(ptr != freelistHead); // Catches an immediate double free.
771 // Look for double free one level deeper in debug. 736 // Look for double free one level deeper in debug.
772 SECURITY_DCHECK(!freelistHead || 737 DCHECK(!freelistHead || ptr != partitionFreelistMask(freelistHead->next));
773 ptr != partitionFreelistMask(freelistHead->next));
774 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); 738 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
775 entry->next = partitionFreelistMask(freelistHead); 739 entry->next = partitionFreelistMask(freelistHead);
776 page->freelistHead = entry; 740 page->freelistHead = entry;
777 --page->numAllocatedSlots; 741 --page->numAllocatedSlots;
778 if (UNLIKELY(page->numAllocatedSlots <= 0)) { 742 if (UNLIKELY(page->numAllocatedSlots <= 0)) {
779 partitionFreeSlowPath(page); 743 partitionFreeSlowPath(page);
780 } else { 744 } else {
781 // All single-slot allocations must go through the slow path to 745 // All single-slot allocations must go through the slow path to
782 // correctly update the size metadata. 746 // correctly update the size metadata.
783 ASSERT(partitionPageGetRawSize(page) == 0); 747 DCHECK(partitionPageGetRawSize(page) == 0);
784 } 748 }
785 } 749 }
786 750
787 ALWAYS_INLINE void partitionFree(void* ptr) { 751 ALWAYS_INLINE void partitionFree(void* ptr) {
788 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 752 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
789 free(ptr); 753 free(ptr);
790 #else 754 #else
791 PartitionAllocHooks::freeHookIfEnabled(ptr); 755 PartitionAllocHooks::freeHookIfEnabled(ptr);
792 ptr = partitionCookieFreePointerAdjust(ptr); 756 ptr = partitionCookieFreePointerAdjust(ptr);
793 ASSERT(partitionPointerIsValid(ptr)); 757 DCHECK(partitionPointerIsValid(ptr));
794 PartitionPage* page = partitionPointerToPage(ptr); 758 PartitionPage* page = partitionPointerToPage(ptr);
795 partitionFreeWithPage(ptr, page); 759 partitionFreeWithPage(ptr, page);
796 #endif 760 #endif
797 } 761 }
798 762
799 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( 763 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(
800 PartitionRootGeneric* root, 764 PartitionRootGeneric* root,
801 size_t size) { 765 size_t size) {
802 size_t order = kBitsPerSizet - CountLeadingZeroBitsSizeT(size); 766 size_t order = kBitsPerSizet - bits::CountLeadingZeroBitsSizeT(size);
803 // The order index is simply the next few bits after the most significant bit. 767 // The order index is simply the next few bits after the most significant bit.
804 size_t orderIndex = (size >> root->orderIndexShifts[order]) & 768 size_t orderIndex = (size >> root->orderIndexShifts[order]) &
805 (kGenericNumBucketsPerOrder - 1); 769 (kGenericNumBucketsPerOrder - 1);
806 // And if the remaining bits are non-zero we must bump the bucket up. 770 // And if the remaining bits are non-zero we must bump the bucket up.
807 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; 771 size_t subOrderIndex = size & root->orderSubIndexMasks[order];
808 PartitionBucket* bucket = 772 PartitionBucket* bucket =
809 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + 773 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) +
810 orderIndex + !!subOrderIndex]; 774 orderIndex + !!subOrderIndex];
811 ASSERT(!bucket->slotSize || bucket->slotSize >= size); 775 DCHECK(!bucket->slotSize || bucket->slotSize >= size);
812 ASSERT(!(bucket->slotSize % kGenericSmallestBucket)); 776 DCHECK(!(bucket->slotSize % kGenericSmallestBucket));
813 return bucket; 777 return bucket;
814 } 778 }
815 779
816 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, 780 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root,
817 int flags, 781 int flags,
818 size_t size, 782 size_t size,
819 const char* typeName) { 783 const char* typeName) {
820 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 784 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
821 void* result = malloc(size); 785 void* result = malloc(size);
822 RELEASE_ASSERT(result || flags & PartitionAllocReturnNull); 786 RELEASE_ASSERT(result || flags & PartitionAllocReturnNull);
823 return result; 787 return result;
824 #else 788 #else
825 ASSERT(root->initialized); 789 DCHECK(root->initialized);
826 size_t requestedSize = size; 790 size_t requestedSize = size;
827 size = partitionCookieSizeAdjustAdd(size); 791 size = partitionCookieSizeAdjustAdd(size);
828 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 792 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
829 void* ret = nullptr; 793 void* ret = nullptr;
830 { 794 {
831 SpinLock::Guard guard(root->lock); 795 subtle::SpinLock::Guard guard(root->lock);
832 ret = partitionBucketAlloc(root, flags, size, bucket); 796 ret = partitionBucketAlloc(root, flags, size, bucket);
833 } 797 }
834 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); 798 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName);
835 return ret; 799 return ret;
836 #endif 800 #endif
837 } 801 }
838 802
839 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, 803 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root,
840 size_t size, 804 size_t size,
841 const char* typeName) { 805 const char* typeName) {
842 return partitionAllocGenericFlags(root, 0, size, typeName); 806 return partitionAllocGenericFlags(root, 0, size, typeName);
843 } 807 }
844 808
845 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { 809 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
846 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 810 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
847 free(ptr); 811 free(ptr);
848 #else 812 #else
849 ASSERT(root->initialized); 813 DCHECK(root->initialized);
850 814
851 if (UNLIKELY(!ptr)) 815 if (UNLIKELY(!ptr))
852 return; 816 return;
853 817
854 PartitionAllocHooks::freeHookIfEnabled(ptr); 818 PartitionAllocHooks::freeHookIfEnabled(ptr);
855 ptr = partitionCookieFreePointerAdjust(ptr); 819 ptr = partitionCookieFreePointerAdjust(ptr);
856 ASSERT(partitionPointerIsValid(ptr)); 820 DCHECK(partitionPointerIsValid(ptr));
857 PartitionPage* page = partitionPointerToPage(ptr); 821 PartitionPage* page = partitionPointerToPage(ptr);
858 { 822 {
859 SpinLock::Guard guard(root->lock); 823 subtle::SpinLock::Guard guard(root->lock);
860 partitionFreeWithPage(ptr, page); 824 partitionFreeWithPage(ptr, page);
861 } 825 }
862 #endif 826 #endif
863 } 827 }
864 828
865 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { 829 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) {
866 // Caller must check that the size is not above the kGenericMaxDirectMapped 830 // Caller must check that the size is not above the kGenericMaxDirectMapped
867 // limit before calling. This also guards against integer overflow in the 831 // limit before calling. This also guards against integer overflow in the
868 // calculation here. 832 // calculation here.
869 ASSERT(size <= kGenericMaxDirectMapped); 833 DCHECK(size <= kGenericMaxDirectMapped);
870 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; 834 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
871 } 835 }
872 836
873 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, 837 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root,
874 size_t size) { 838 size_t size) {
875 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 839 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
876 return size; 840 return size;
877 #else 841 #else
878 ASSERT(root->initialized); 842 DCHECK(root->initialized);
879 size = partitionCookieSizeAdjustAdd(size); 843 size = partitionCookieSizeAdjustAdd(size);
880 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 844 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
881 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { 845 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
882 size = bucket->slotSize; 846 size = bucket->slotSize;
883 } else if (size > kGenericMaxDirectMapped) { 847 } else if (size > kGenericMaxDirectMapped) {
884 // Too large to allocate => return the size unchanged. 848 // Too large to allocate => return the size unchanged.
885 } else { 849 } else {
886 ASSERT(bucket == &PartitionRootBase::gPagedBucket); 850 DCHECK(bucket == &PartitionRootBase::gPagedBucket);
887 size = partitionDirectMapSize(size); 851 size = partitionDirectMapSize(size);
888 } 852 }
889 return partitionCookieSizeAdjustSubtract(size); 853 return partitionCookieSizeAdjustSubtract(size);
890 #endif 854 #endif
891 } 855 }
892 856
893 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { 857 ALWAYS_INLINE bool partitionAllocSupportsGetSize() {
894 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 858 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
895 return false; 859 return false;
896 #else 860 #else
897 return true; 861 return true;
898 #endif 862 #endif
899 } 863 }
900 864
901 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { 865 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) {
902 // No need to lock here. Only 'ptr' being freed by another thread could 866 // No need to lock here. Only 'ptr' being freed by another thread could
903 // cause trouble, and the caller is responsible for that not happening. 867 // cause trouble, and the caller is responsible for that not happening.
904 ASSERT(partitionAllocSupportsGetSize()); 868 DCHECK(partitionAllocSupportsGetSize());
905 ptr = partitionCookieFreePointerAdjust(ptr); 869 ptr = partitionCookieFreePointerAdjust(ptr);
906 ASSERT(partitionPointerIsValid(ptr)); 870 DCHECK(partitionPointerIsValid(ptr));
907 PartitionPage* page = partitionPointerToPage(ptr); 871 PartitionPage* page = partitionPointerToPage(ptr);
908 size_t size = page->bucket->slotSize; 872 size_t size = page->bucket->slotSize;
909 return partitionCookieSizeAdjustSubtract(size); 873 return partitionCookieSizeAdjustSubtract(size);
910 } 874 }
911 875
912 // N (or more accurately, N - sizeof(void*)) represents the largest size in 876 // N (or more accurately, N - sizeof(void*)) represents the largest size in
913 // bytes that will be handled by a SizeSpecificPartitionAllocator. 877 // bytes that will be handled by a SizeSpecificPartitionAllocator.
914 // Attempts to partitionAlloc() more than this amount will fail. 878 // Attempts to partitionAlloc() more than this amount will fail.
915 template <size_t N> 879 template <size_t N>
916 class SizeSpecificPartitionAllocator { 880 class SizeSpecificPartitionAllocator {
(...skipping 14 matching lines...) Expand all
931 class PartitionAllocatorGeneric { 895 class PartitionAllocatorGeneric {
932 public: 896 public:
933 void init() { partitionAllocGenericInit(&m_partitionRoot); } 897 void init() { partitionAllocGenericInit(&m_partitionRoot); }
934 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } 898 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); }
935 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } 899 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; }
936 900
937 private: 901 private:
938 PartitionRootGeneric m_partitionRoot; 902 PartitionRootGeneric m_partitionRoot;
939 }; 903 };
940 904
941 } // namespace WTF 905 } // namespace base
942 906
943 using WTF::SizeSpecificPartitionAllocator; 907 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
944 using WTF::PartitionAllocatorGeneric;
945 using WTF::PartitionRoot;
946 using WTF::partitionAllocInit;
947 using WTF::partitionAllocShutdown;
948 using WTF::partitionAlloc;
949 using WTF::partitionFree;
950 using WTF::partitionAllocGeneric;
951 using WTF::partitionFreeGeneric;
952 using WTF::partitionReallocGeneric;
953 using WTF::partitionAllocActualSize;
954 using WTF::partitionAllocSupportsGetSize;
955 using WTF::partitionAllocGetSize;
956
957 #endif // WTF_PartitionAlloc_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698