Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(57)

Side by Side Diff: base/allocator/partition_allocator/partition_alloc.h

Issue 2518253002: Move Partition Allocator into Chromium base. (Closed)
Patch Set: Rebase and resolve conflict. Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be
3 * 3 // found in the LICENSE file.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 4
31 #ifndef WTF_PartitionAlloc_h 5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
32 #define WTF_PartitionAlloc_h 6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
33 7
34 // DESCRIPTION 8 // DESCRIPTION
35 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / 9 // partitionAlloc() / partitionAllocGeneric() and partitionFree() /
36 // partitionFreeGeneric() are approximately analagous to malloc() and free(). 10 // partitionFreeGeneric() are approximately analagous to malloc() and free().
37 // 11 //
38 // The main difference is that a PartitionRoot / PartitionRootGeneric object 12 // The main difference is that a PartitionRoot / PartitionRootGeneric object
39 // must be supplied to these functions, representing a specific "heap partition" 13 // must be supplied to these functions, representing a specific "heap partition"
40 // that will be used to satisfy the allocation. Different partitions are 14 // that will be used to satisfy the allocation. Different partitions are
41 // guaranteed to exist in separate address spaces, including being separate from 15 // guaranteed to exist in separate address spaces, including being separate from
42 // the main system heap. If the contained objects are all freed, physical memory 16 // the main system heap. If the contained objects are all freed, physical memory
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
78 // pages, enabling various simple tricks to try and minimize fragmentation. 52 // pages, enabling various simple tricks to try and minimize fragmentation.
79 // - Fine-grained bucket sizes leading to less waste and better packing. 53 // - Fine-grained bucket sizes leading to less waste and better packing.
80 // 54 //
81 // The following security properties could be investigated in the future: 55 // The following security properties could be investigated in the future:
82 // - Per-object bucketing (instead of per-size) is mostly available at the API, 56 // - Per-object bucketing (instead of per-size) is mostly available at the API,
83 // but not used yet. 57 // but not used yet.
84 // - No randomness of freelist entries or bucket position. 58 // - No randomness of freelist entries or bucket position.
85 // - Better checking for wild pointers in free(). 59 // - Better checking for wild pointers in free().
86 // - Better freelist masking function to guarantee fault on 32-bit. 60 // - Better freelist masking function to guarantee fault on 32-bit.
87 61
88 #include "wtf/Assertions.h" 62 #include <limits.h>
89 #include "wtf/BitwiseOperations.h"
90 #include "wtf/ByteSwap.h"
91 #include "wtf/CPU.h"
92 #include "wtf/SpinLock.h"
93 #include "wtf/TypeTraits.h"
94 #include "wtf/allocator/PageAllocator.h"
95 63
96 #include <limits.h> 64 #include "base/allocator/partition_allocator/page_allocator.h"
65 #include "base/bits.h"
66 #include "base/compiler_specific.h"
67 #include "base/logging.h"
68 #include "base/synchronization/spin_lock.h"
69 #include "base/sys_byteorder.h"
70 #include "build/build_config.h"
97 71
98 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 72 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
99 #include <stdlib.h> 73 #include <stdlib.h>
100 #endif 74 #endif
101 75
102 #if ENABLE(ASSERT) 76 namespace base {
103 #include <string.h>
104 #endif
105
106 namespace WTF {
107 77
108 // Allocation granularity of sizeof(void*) bytes. 78 // Allocation granularity of sizeof(void*) bytes.
109 static const size_t kAllocationGranularity = sizeof(void*); 79 static const size_t kAllocationGranularity = sizeof(void*);
110 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; 80 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
111 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; 81 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
112 82
113 // Underlying partition storage pages are a power-of-two size. It is typical 83 // Underlying partition storage pages are a power-of-two size. It is typical
114 // for a partition page to be based on multiple system pages. Most references to 84 // for a partition page to be based on multiple system pages. Most references to
115 // "page" refer to partition pages. 85 // "page" refer to partition pages.
116 // We also have the concept of "super pages" -- these are the underlying system 86 // We also have the concept of "super pages" -- these are the underlying system
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
234 // Constants for the memory reclaim logic. 204 // Constants for the memory reclaim logic.
235 static const size_t kMaxFreeableSpans = 16; 205 static const size_t kMaxFreeableSpans = 16;
236 206
237 // If the total size in bytes of allocated but not committed pages exceeds this 207 // If the total size in bytes of allocated but not committed pages exceeds this
238 // value (probably it is a "out of virtual address space" crash), 208 // value (probably it is a "out of virtual address space" crash),
239 // a special crash stack trace is generated at |partitionOutOfMemory|. 209 // a special crash stack trace is generated at |partitionOutOfMemory|.
240 // This is to distinguish "out of virtual address space" from 210 // This is to distinguish "out of virtual address space" from
241 // "out of physical memory" in crash reports. 211 // "out of physical memory" in crash reports.
242 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB 212 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
243 213
244 #if ENABLE(ASSERT) 214 #if DCHECK_IS_ON()
245 // These two byte values match tcmalloc. 215 // These two byte values match tcmalloc.
246 static const unsigned char kUninitializedByte = 0xAB; 216 static const unsigned char kUninitializedByte = 0xAB;
247 static const unsigned char kFreedByte = 0xCD; 217 static const unsigned char kFreedByte = 0xCD;
248 static const size_t kCookieSize = 218 static const size_t kCookieSize =
249 16; // Handles alignment up to XMM instructions on Intel. 219 16; // Handles alignment up to XMM instructions on Intel.
250 static const unsigned char kCookieValue[kCookieSize] = { 220 static const unsigned char kCookieValue[kCookieSize] = {
251 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 221 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
252 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; 222 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
253 #endif 223 #endif
254 224
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 PartitionSuperPageExtentEntry* next; 284 PartitionSuperPageExtentEntry* next;
315 }; 285 };
316 286
317 struct PartitionDirectMapExtent { 287 struct PartitionDirectMapExtent {
318 PartitionDirectMapExtent* nextExtent; 288 PartitionDirectMapExtent* nextExtent;
319 PartitionDirectMapExtent* prevExtent; 289 PartitionDirectMapExtent* prevExtent;
320 PartitionBucket* bucket; 290 PartitionBucket* bucket;
321 size_t mapSize; // Mapped size, not including guard pages and meta-data. 291 size_t mapSize; // Mapped size, not including guard pages and meta-data.
322 }; 292 };
323 293
324 struct WTF_EXPORT PartitionRootBase { 294 struct BASE_EXPORT PartitionRootBase {
325 size_t totalSizeOfCommittedPages; 295 size_t totalSizeOfCommittedPages;
326 size_t totalSizeOfSuperPages; 296 size_t totalSizeOfSuperPages;
327 size_t totalSizeOfDirectMappedPages; 297 size_t totalSizeOfDirectMappedPages;
328 // Invariant: totalSizeOfCommittedPages <= 298 // Invariant: totalSizeOfCommittedPages <=
329 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. 299 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages.
330 unsigned numBuckets; 300 unsigned numBuckets;
331 unsigned maxAllocation; 301 unsigned maxAllocation;
332 bool initialized; 302 bool initialized;
333 char* nextSuperPage; 303 char* nextSuperPage;
334 char* nextPartitionPage; 304 char* nextPartitionPage;
335 char* nextPartitionPageEnd; 305 char* nextPartitionPageEnd;
336 PartitionSuperPageExtentEntry* currentExtent; 306 PartitionSuperPageExtentEntry* currentExtent;
337 PartitionSuperPageExtentEntry* firstExtent; 307 PartitionSuperPageExtentEntry* firstExtent;
338 PartitionDirectMapExtent* directMapList; 308 PartitionDirectMapExtent* directMapList;
339 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; 309 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans];
340 int16_t globalEmptyPageRingIndex; 310 int16_t globalEmptyPageRingIndex;
341 uintptr_t invertedSelf; 311 uintptr_t invertedSelf;
342 312
343 static SpinLock gInitializedLock; 313 static subtle::SpinLock gInitializedLock;
344 static bool gInitialized; 314 static bool gInitialized;
345 // gSeedPage is used as a sentinel to indicate that there is no page 315 // gSeedPage is used as a sentinel to indicate that there is no page
346 // in the active page list. We can use nullptr, but in that case we need 316 // in the active page list. We can use nullptr, but in that case we need
347 // to add a null-check branch to the hot allocation path. We want to avoid 317 // to add a null-check branch to the hot allocation path. We want to avoid
348 // that. 318 // that.
349 static PartitionPage gSeedPage; 319 static PartitionPage gSeedPage;
350 static PartitionBucket gPagedBucket; 320 static PartitionBucket gPagedBucket;
351 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. 321 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
352 static void (*gOomHandlingFunction)(); 322 static void (*gOomHandlingFunction)();
353 }; 323 };
354 324
355 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. 325 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
356 struct PartitionRoot : public PartitionRootBase { 326 struct PartitionRoot : public PartitionRootBase {
357 // The PartitionAlloc templated class ensures the following is correct. 327 // The PartitionAlloc templated class ensures the following is correct.
358 ALWAYS_INLINE PartitionBucket* buckets() { 328 ALWAYS_INLINE PartitionBucket* buckets() {
359 return reinterpret_cast<PartitionBucket*>(this + 1); 329 return reinterpret_cast<PartitionBucket*>(this + 1);
360 } 330 }
361 ALWAYS_INLINE const PartitionBucket* buckets() const { 331 ALWAYS_INLINE const PartitionBucket* buckets() const {
362 return reinterpret_cast<const PartitionBucket*>(this + 1); 332 return reinterpret_cast<const PartitionBucket*>(this + 1);
363 } 333 }
364 }; 334 };
365 335
366 // Never instantiate a PartitionRootGeneric directly, instead use 336 // Never instantiate a PartitionRootGeneric directly, instead use
367 // PartitionAllocatorGeneric. 337 // PartitionAllocatorGeneric.
368 struct PartitionRootGeneric : public PartitionRootBase { 338 struct PartitionRootGeneric : public PartitionRootBase {
369 SpinLock lock; 339 subtle::SpinLock lock;
370 // Some pre-computed constants. 340 // Some pre-computed constants.
371 size_t orderIndexShifts[kBitsPerSizet + 1]; 341 size_t orderIndexShifts[kBitsPerSizet + 1];
372 size_t orderSubIndexMasks[kBitsPerSizet + 1]; 342 size_t orderSubIndexMasks[kBitsPerSizet + 1];
373 // The bucket lookup table lets us map a size_t to a bucket quickly. 343 // The bucket lookup table lets us map a size_t to a bucket quickly.
374 // The trailing +1 caters for the overflow case for very large allocation 344 // The trailing +1 caters for the overflow case for very large allocation
375 // sizes. It is one flat array instead of a 2D array because in the 2D 345 // sizes. It is one flat array instead of a 2D array because in the 2D
376 // world, we'd need to index array[blah][max+1] which risks undefined 346 // world, we'd need to index array[blah][max+1] which risks undefined
377 // behavior. 347 // behavior.
378 PartitionBucket* 348 PartitionBucket*
379 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; 349 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1];
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
413 uint32_t numActivePages; // Number of pages that have at least one 383 uint32_t numActivePages; // Number of pages that have at least one
414 // provisioned slot. 384 // provisioned slot.
415 uint32_t numEmptyPages; // Number of pages that are empty 385 uint32_t numEmptyPages; // Number of pages that are empty
416 // but not decommitted. 386 // but not decommitted.
417 uint32_t numDecommittedPages; // Number of pages that are empty 387 uint32_t numDecommittedPages; // Number of pages that are empty
418 // and decommitted. 388 // and decommitted.
419 }; 389 };
420 390
421 // Interface that is passed to partitionDumpStats and 391 // Interface that is passed to partitionDumpStats and
422 // partitionDumpStatsGeneric for using the memory statistics. 392 // partitionDumpStatsGeneric for using the memory statistics.
423 class WTF_EXPORT PartitionStatsDumper { 393 class BASE_EXPORT PartitionStatsDumper {
424 public: 394 public:
425 // Called to dump total memory used by partition, once per partition. 395 // Called to dump total memory used by partition, once per partition.
426 virtual void partitionDumpTotals(const char* partitionName, 396 virtual void partitionDumpTotals(const char* partitionName,
427 const PartitionMemoryStats*) = 0; 397 const PartitionMemoryStats*) = 0;
428 398
429 // Called to dump stats about buckets, for each bucket. 399 // Called to dump stats about buckets, for each bucket.
430 virtual void partitionsDumpBucketStats(const char* partitionName, 400 virtual void partitionsDumpBucketStats(const char* partitionName,
431 const PartitionBucketMemoryStats*) = 0; 401 const PartitionBucketMemoryStats*) = 0;
432 }; 402 };
433 403
434 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); 404 BASE_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)());
435 WTF_EXPORT void partitionAllocInit(PartitionRoot*, 405 BASE_EXPORT void partitionAllocInit(PartitionRoot*,
436 size_t numBuckets, 406 size_t numBuckets,
437 size_t maxAllocation); 407 size_t maxAllocation);
438 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*); 408 BASE_EXPORT bool partitionAllocShutdown(PartitionRoot*);
439 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); 409 BASE_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*);
440 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); 410 BASE_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*);
441 411
442 enum PartitionPurgeFlags { 412 enum PartitionPurgeFlags {
443 // Decommitting the ring list of empty pages is reasonably fast. 413 // Decommitting the ring list of empty pages is reasonably fast.
444 PartitionPurgeDecommitEmptyPages = 1 << 0, 414 PartitionPurgeDecommitEmptyPages = 1 << 0,
445 // Discarding unused system pages is slower, because it involves walking all 415 // Discarding unused system pages is slower, because it involves walking all
446 // freelists in all active partition pages of all buckets >= system page 416 // freelists in all active partition pages of all buckets >= system page
447 // size. It often frees a similar amount of memory to decommitting the empty 417 // size. It often frees a similar amount of memory to decommitting the empty
448 // pages, though. 418 // pages, though.
449 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, 419 PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
450 }; 420 };
451 421
452 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int); 422 BASE_EXPORT void partitionPurgeMemory(PartitionRoot*, int);
453 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); 423 BASE_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
454 424
455 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, 425 BASE_EXPORT NOINLINE void* partitionAllocSlowPath(PartitionRootBase*,
456 int, 426 int,
457 size_t, 427 size_t,
458 PartitionBucket*); 428 PartitionBucket*);
459 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*); 429 BASE_EXPORT NOINLINE void partitionFreeSlowPath(PartitionPage*);
460 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, 430 BASE_EXPORT NOINLINE void* partitionReallocGeneric(PartitionRootGeneric*,
461 void*, 431 void*,
462 size_t, 432 size_t,
463 const char* typeName); 433 const char* typeName);
464 434
465 WTF_EXPORT void partitionDumpStats(PartitionRoot*, 435 BASE_EXPORT void partitionDumpStats(PartitionRoot*,
466 const char* partitionName, 436 const char* partitionName,
467 bool isLightDump, 437 bool isLightDump,
468 PartitionStatsDumper*); 438 PartitionStatsDumper*);
469 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, 439 BASE_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*,
470 const char* partitionName, 440 const char* partitionName,
471 bool isLightDump, 441 bool isLightDump,
472 PartitionStatsDumper*); 442 PartitionStatsDumper*);
473 443
474 class WTF_EXPORT PartitionAllocHooks { 444 class BASE_EXPORT PartitionAllocHooks {
475 public: 445 public:
476 typedef void AllocationHook(void* address, size_t, const char* typeName); 446 typedef void AllocationHook(void* address, size_t, const char* typeName);
477 typedef void FreeHook(void* address); 447 typedef void FreeHook(void* address);
478 448
479 static void setAllocationHook(AllocationHook* hook) { 449 static void setAllocationHook(AllocationHook* hook) {
480 m_allocationHook = hook; 450 m_allocationHook = hook;
481 } 451 }
482 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } 452 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; }
483 453
484 static void allocationHookIfEnabled(void* address, 454 static void allocationHookIfEnabled(void* address,
(...skipping 23 matching lines...) Expand all
508 } 478 }
509 } 479 }
510 480
511 private: 481 private:
512 // Pointers to hook functions that PartitionAlloc will call on allocation and 482 // Pointers to hook functions that PartitionAlloc will call on allocation and
513 // free if the pointers are non-null. 483 // free if the pointers are non-null.
514 static AllocationHook* m_allocationHook; 484 static AllocationHook* m_allocationHook;
515 static FreeHook* m_freeHook; 485 static FreeHook* m_freeHook;
516 }; 486 };
517 487
518 // In official builds, do not include type info string literals to avoid
519 // bloating the binary.
520 #if defined(OFFICIAL_BUILD)
521 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr
522 #else
523 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>()
524 #endif
525
526 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( 488 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(
527 PartitionFreelistEntry* ptr) { 489 PartitionFreelistEntry* ptr) {
528 // We use bswap on little endian as a fast mask for two reasons: 490 // We use bswap on little endian as a fast mask for two reasons:
529 // 1) If an object is freed and its vtable used where the attacker doesn't 491 // 1) If an object is freed and its vtable used where the attacker doesn't
530 // get the chance to run allocations between the free and use, the vtable 492 // get the chance to run allocations between the free and use, the vtable
531 // dereference is likely to fault. 493 // dereference is likely to fault.
532 // 2) If the attacker has a linear buffer overflow and elects to try and 494 // 2) If the attacker has a linear buffer overflow and elects to try and
533 // corrupt a freelist pointer, partial pointer overwrite attacks are 495 // corrupt a freelist pointer, partial pointer overwrite attacks are
534 // thwarted. 496 // thwarted.
535 // For big endian, similar guarantees are arrived at with a negation. 497 // For big endian, similar guarantees are arrived at with a negation.
536 #if CPU(BIG_ENDIAN) 498 #if defined(ARCH_CPU_BIG_ENDIAN)
537 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); 499 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
538 #else 500 #else
539 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr)); 501 uintptr_t masked = ByteSwapUintPtrT(reinterpret_cast<uintptr_t>(ptr));
540 #endif 502 #endif
541 return reinterpret_cast<PartitionFreelistEntry*>(masked); 503 return reinterpret_cast<PartitionFreelistEntry*>(masked);
542 } 504 }
543 505
544 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { 506 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) {
545 #if ENABLE(ASSERT) 507 #if DCHECK_IS_ON()
546 // Add space for cookies, checking for integer overflow. 508 // Add space for cookies, checking for integer overflow.
547 ASSERT(size + (2 * kCookieSize) > size); 509 DCHECK(size + (2 * kCookieSize) > size);
548 size += 2 * kCookieSize; 510 size += 2 * kCookieSize;
549 #endif 511 #endif
550 return size; 512 return size;
551 } 513 }
552 514
553 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { 515 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) {
554 #if ENABLE(ASSERT) 516 #if DCHECK_IS_ON()
555 // Remove space for cookies. 517 // Remove space for cookies.
556 ASSERT(size >= 2 * kCookieSize); 518 DCHECK(size >= 2 * kCookieSize);
557 size -= 2 * kCookieSize; 519 size -= 2 * kCookieSize;
558 #endif 520 #endif
559 return size; 521 return size;
560 } 522 }
561 523
562 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { 524 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) {
563 #if ENABLE(ASSERT) 525 #if DCHECK_IS_ON()
564 // The value given to the application is actually just after the cookie. 526 // The value given to the application is actually just after the cookie.
565 ptr = static_cast<char*>(ptr) - kCookieSize; 527 ptr = static_cast<char*>(ptr) - kCookieSize;
566 #endif 528 #endif
567 return ptr; 529 return ptr;
568 } 530 }
569 531
570 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { 532 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) {
571 #if ENABLE(ASSERT) 533 #if DCHECK_IS_ON()
572 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); 534 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
573 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) 535 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
574 *cookiePtr = kCookieValue[i]; 536 *cookiePtr = kCookieValue[i];
575 #endif 537 #endif
576 } 538 }
577 539
578 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { 540 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) {
579 #if ENABLE(ASSERT) 541 #if DCHECK_IS_ON()
580 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); 542 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
581 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) 543 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
582 ASSERT(*cookiePtr == kCookieValue[i]); 544 DCHECK(*cookiePtr == kCookieValue[i]);
583 #endif 545 #endif
584 } 546 }
585 547
586 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { 548 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) {
587 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 549 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
588 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); 550 DCHECK(!(pointerAsUint & kSuperPageOffsetMask));
589 // The metadata area is exactly one system page (the guard page) into the 551 // The metadata area is exactly one system page (the guard page) into the
590 // super page. 552 // super page.
591 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); 553 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
592 } 554 }
593 555
594 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { 556 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) {
595 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 557 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
596 char* superPagePtr = 558 char* superPagePtr =
597 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); 559 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask);
598 uintptr_t partitionPageIndex = 560 uintptr_t partitionPageIndex =
599 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; 561 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift;
600 // Index 0 is invalid because it is the metadata and guard area and 562 // Index 0 is invalid because it is the metadata and guard area and
601 // the last index is invalid because it is a guard page. 563 // the last index is invalid because it is a guard page.
602 ASSERT(partitionPageIndex); 564 DCHECK(partitionPageIndex);
603 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 565 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
604 PartitionPage* page = reinterpret_cast<PartitionPage*>( 566 PartitionPage* page = reinterpret_cast<PartitionPage*>(
605 partitionSuperPageToMetadataArea(superPagePtr) + 567 partitionSuperPageToMetadataArea(superPagePtr) +
606 (partitionPageIndex << kPageMetadataShift)); 568 (partitionPageIndex << kPageMetadataShift));
607 // Partition pages in the same slot span can share the same page object. 569 // Partition pages in the same slot span can share the same page object.
608 // Adjust for that. 570 // Adjust for that.
609 size_t delta = page->pageOffset << kPageMetadataShift; 571 size_t delta = page->pageOffset << kPageMetadataShift;
610 page = 572 page =
611 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); 573 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
612 return page; 574 return page;
613 } 575 }
614 576
615 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { 577 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) {
616 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); 578 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
617 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); 579 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
618 ASSERT(superPageOffset > kSystemPageSize); 580 DCHECK(superPageOffset > kSystemPageSize);
619 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * 581 DCHECK(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
620 kPageMetadataSize)); 582 kPageMetadataSize));
621 uintptr_t partitionPageIndex = 583 uintptr_t partitionPageIndex =
622 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; 584 (superPageOffset - kSystemPageSize) >> kPageMetadataShift;
623 // Index 0 is invalid because it is the metadata area and the last index is 585 // Index 0 is invalid because it is the metadata area and the last index is
624 // invalid because it is a guard page. 586 // invalid because it is a guard page.
625 ASSERT(partitionPageIndex); 587 DCHECK(partitionPageIndex);
626 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 588 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
627 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); 589 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
628 void* ret = reinterpret_cast<void*>( 590 void* ret = reinterpret_cast<void*>(
629 superPageBase + (partitionPageIndex << kPartitionPageShift)); 591 superPageBase + (partitionPageIndex << kPartitionPageShift));
630 return ret; 592 return ret;
631 } 593 }
632 594
633 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { 595 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) {
634 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); 596 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
635 // Checks that the pointer is a multiple of bucket size. 597 // Checks that the pointer is a multiple of bucket size.
636 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - 598 DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
637 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % 599 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) %
638 page->bucket->slotSize)); 600 page->bucket->slotSize));
639 return page; 601 return page;
640 } 602 }
641 603
642 ALWAYS_INLINE bool partitionBucketIsDirectMapped( 604 ALWAYS_INLINE bool partitionBucketIsDirectMapped(
643 const PartitionBucket* bucket) { 605 const PartitionBucket* bucket) {
644 return !bucket->numSystemPagesPerSlotSpan; 606 return !bucket->numSystemPagesPerSlotSpan;
645 } 607 }
646 608
647 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { 609 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) {
648 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; 610 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize;
649 } 611 }
650 612
651 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { 613 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) {
652 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); 614 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize);
653 } 615 }
654 616
655 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { 617 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) {
656 // For single-slot buckets which span more than one partition page, we 618 // For single-slot buckets which span more than one partition page, we
657 // have some spare metadata space to store the raw allocation size. We 619 // have some spare metadata space to store the raw allocation size. We
658 // can use this to report better statistics. 620 // can use this to report better statistics.
659 PartitionBucket* bucket = page->bucket; 621 PartitionBucket* bucket = page->bucket;
660 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) 622 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
661 return nullptr; 623 return nullptr;
662 624
663 ASSERT((bucket->slotSize % kSystemPageSize) == 0); 625 DCHECK((bucket->slotSize % kSystemPageSize) == 0);
664 ASSERT(partitionBucketIsDirectMapped(bucket) || 626 DCHECK(partitionBucketIsDirectMapped(bucket) ||
665 partitionBucketSlots(bucket) == 1); 627 partitionBucketSlots(bucket) == 1);
666 page++; 628 page++;
667 return reinterpret_cast<size_t*>(&page->freelistHead); 629 return reinterpret_cast<size_t*>(&page->freelistHead);
668 } 630 }
669 631
670 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { 632 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) {
671 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); 633 size_t* rawSizePtr = partitionPageGetRawSizePtr(page);
672 if (UNLIKELY(rawSizePtr != nullptr)) 634 if (UNLIKELY(rawSizePtr != nullptr))
673 return *rawSizePtr; 635 return *rawSizePtr;
674 return 0; 636 return 0;
(...skipping 11 matching lines...) Expand all
686 PartitionRootBase* root = partitionPageToRoot(page); 648 PartitionRootBase* root = partitionPageToRoot(page);
687 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); 649 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
688 } 650 }
689 651
690 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, 652 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root,
691 int flags, 653 int flags,
692 size_t size, 654 size_t size,
693 PartitionBucket* bucket) { 655 PartitionBucket* bucket) {
694 PartitionPage* page = bucket->activePagesHead; 656 PartitionPage* page = bucket->activePagesHead;
695 // Check that this page is neither full nor freed. 657 // Check that this page is neither full nor freed.
696 ASSERT(page->numAllocatedSlots >= 0); 658 DCHECK(page->numAllocatedSlots >= 0);
697 void* ret = page->freelistHead; 659 void* ret = page->freelistHead;
698 if (LIKELY(ret != 0)) { 660 if (LIKELY(ret != 0)) {
699 // If these asserts fire, you probably corrupted memory. 661 // If these asserts fire, you probably corrupted memory.
700 ASSERT(partitionPointerIsValid(ret)); 662 DCHECK(partitionPointerIsValid(ret));
701 // All large allocations must go through the slow path to correctly 663 // All large allocations must go through the slow path to correctly
702 // update the size metadata. 664 // update the size metadata.
703 ASSERT(partitionPageGetRawSize(page) == 0); 665 DCHECK(partitionPageGetRawSize(page) == 0);
704 PartitionFreelistEntry* newHead = 666 PartitionFreelistEntry* newHead =
705 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); 667 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
706 page->freelistHead = newHead; 668 page->freelistHead = newHead;
707 page->numAllocatedSlots++; 669 page->numAllocatedSlots++;
708 } else { 670 } else {
709 ret = partitionAllocSlowPath(root, flags, size, bucket); 671 ret = partitionAllocSlowPath(root, flags, size, bucket);
710 ASSERT(!ret || partitionPointerIsValid(ret)); 672 DCHECK(!ret || partitionPointerIsValid(ret));
711 } 673 }
712 #if ENABLE(ASSERT) 674 #if DCHECK_IS_ON()
713 if (!ret) 675 if (!ret)
714 return 0; 676 return 0;
715 // Fill the uninitialized pattern, and write the cookies. 677 // Fill the uninitialized pattern, and write the cookies.
716 page = partitionPointerToPage(ret); 678 page = partitionPointerToPage(ret);
717 size_t slotSize = page->bucket->slotSize; 679 size_t slotSize = page->bucket->slotSize;
718 size_t rawSize = partitionPageGetRawSize(page); 680 size_t rawSize = partitionPageGetRawSize(page);
719 if (rawSize) { 681 if (rawSize) {
720 ASSERT(rawSize == size); 682 DCHECK(rawSize == size);
721 slotSize = rawSize; 683 slotSize = rawSize;
722 } 684 }
723 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); 685 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize);
724 char* charRet = static_cast<char*>(ret); 686 char* charRet = static_cast<char*>(ret);
725 // The value given to the application is actually just after the cookie. 687 // The value given to the application is actually just after the cookie.
726 ret = charRet + kCookieSize; 688 ret = charRet + kCookieSize;
727 memset(ret, kUninitializedByte, noCookieSize); 689 memset(ret, kUninitializedByte, noCookieSize);
728 partitionCookieWriteValue(charRet); 690 partitionCookieWriteValue(charRet);
729 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); 691 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize);
730 #endif 692 #endif
731 return ret; 693 return ret;
732 } 694 }
733 695
734 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, 696 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root,
735 size_t size, 697 size_t size,
736 const char* typeName) { 698 const char* typeName) {
737 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 699 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
738 void* result = malloc(size); 700 void* result = malloc(size);
739 RELEASE_ASSERT(result); 701 CHECK(result);
haraken 2016/12/08 08:43:55 This is a super performance-critical method. I'm c
Primiano Tucci (use gerrit) 2016/12/08 17:14:05 I think this is only for *san (asan,lsan) builds,
palmer 2016/12/08 22:29:58 I thought that CHECK and RELEASE_ASSERT both boil
danakj 2016/12/09 00:20:15 It only affects weird cases with destructors I tho
740 return result; 702 return result;
741 #else 703 #else
742 size_t requestedSize = size; 704 size_t requestedSize = size;
743 size = partitionCookieSizeAdjustAdd(size); 705 size = partitionCookieSizeAdjustAdd(size);
744 ASSERT(root->initialized); 706 DCHECK(root->initialized);
745 size_t index = size >> kBucketShift; 707 size_t index = size >> kBucketShift;
746 ASSERT(index < root->numBuckets); 708 DCHECK(index < root->numBuckets);
747 ASSERT(size == index << kBucketShift); 709 DCHECK(size == index << kBucketShift);
748 PartitionBucket* bucket = &root->buckets()[index]; 710 PartitionBucket* bucket = &root->buckets()[index];
749 void* result = partitionBucketAlloc(root, 0, size, bucket); 711 void* result = partitionBucketAlloc(root, 0, size, bucket);
750 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); 712 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName);
751 return result; 713 return result;
752 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 714 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
753 } 715 }
754 716
755 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { 717 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) {
756 // If these asserts fire, you probably corrupted memory. 718 // If these asserts fire, you probably corrupted memory.
757 #if ENABLE(ASSERT) 719 #if DCHECK_IS_ON()
758 size_t slotSize = page->bucket->slotSize; 720 size_t slotSize = page->bucket->slotSize;
759 size_t rawSize = partitionPageGetRawSize(page); 721 size_t rawSize = partitionPageGetRawSize(page);
760 if (rawSize) 722 if (rawSize)
761 slotSize = rawSize; 723 slotSize = rawSize;
762 partitionCookieCheckValue(ptr); 724 partitionCookieCheckValue(ptr);
763 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - 725 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize -
764 kCookieSize); 726 kCookieSize);
765 memset(ptr, kFreedByte, slotSize); 727 memset(ptr, kFreedByte, slotSize);
766 #endif 728 #endif
767 ASSERT(page->numAllocatedSlots); 729 DCHECK(page->numAllocatedSlots);
768 PartitionFreelistEntry* freelistHead = page->freelistHead; 730 PartitionFreelistEntry* freelistHead = page->freelistHead;
769 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); 731 DCHECK(!freelistHead || partitionPointerIsValid(freelistHead));
770 SECURITY_CHECK(ptr != freelistHead); // Catches an immediate double free. 732 CHECK(ptr != freelistHead); // Catches an immediate double free.
771 // Look for double free one level deeper in debug. 733 // Look for double free one level deeper in debug.
772 SECURITY_DCHECK(!freelistHead || 734 DCHECK(!freelistHead || ptr != partitionFreelistMask(freelistHead->next));
773 ptr != partitionFreelistMask(freelistHead->next));
774 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); 735 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
775 entry->next = partitionFreelistMask(freelistHead); 736 entry->next = partitionFreelistMask(freelistHead);
776 page->freelistHead = entry; 737 page->freelistHead = entry;
777 --page->numAllocatedSlots; 738 --page->numAllocatedSlots;
778 if (UNLIKELY(page->numAllocatedSlots <= 0)) { 739 if (UNLIKELY(page->numAllocatedSlots <= 0)) {
779 partitionFreeSlowPath(page); 740 partitionFreeSlowPath(page);
780 } else { 741 } else {
781 // All single-slot allocations must go through the slow path to 742 // All single-slot allocations must go through the slow path to
782 // correctly update the size metadata. 743 // correctly update the size metadata.
783 ASSERT(partitionPageGetRawSize(page) == 0); 744 DCHECK(partitionPageGetRawSize(page) == 0);
784 } 745 }
785 } 746 }
786 747
787 ALWAYS_INLINE void partitionFree(void* ptr) { 748 ALWAYS_INLINE void partitionFree(void* ptr) {
788 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 749 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
789 free(ptr); 750 free(ptr);
790 #else 751 #else
791 PartitionAllocHooks::freeHookIfEnabled(ptr); 752 PartitionAllocHooks::freeHookIfEnabled(ptr);
792 ptr = partitionCookieFreePointerAdjust(ptr); 753 ptr = partitionCookieFreePointerAdjust(ptr);
793 ASSERT(partitionPointerIsValid(ptr)); 754 DCHECK(partitionPointerIsValid(ptr));
794 PartitionPage* page = partitionPointerToPage(ptr); 755 PartitionPage* page = partitionPointerToPage(ptr);
795 partitionFreeWithPage(ptr, page); 756 partitionFreeWithPage(ptr, page);
796 #endif 757 #endif
797 } 758 }
798 759
799 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( 760 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(
800 PartitionRootGeneric* root, 761 PartitionRootGeneric* root,
801 size_t size) { 762 size_t size) {
802 size_t order = kBitsPerSizet - CountLeadingZeroBitsSizeT(size); 763 size_t order = kBitsPerSizet - bits::CountLeadingZeroBitsSizeT(size);
803 // The order index is simply the next few bits after the most significant bit. 764 // The order index is simply the next few bits after the most significant bit.
804 size_t orderIndex = (size >> root->orderIndexShifts[order]) & 765 size_t orderIndex = (size >> root->orderIndexShifts[order]) &
805 (kGenericNumBucketsPerOrder - 1); 766 (kGenericNumBucketsPerOrder - 1);
806 // And if the remaining bits are non-zero we must bump the bucket up. 767 // And if the remaining bits are non-zero we must bump the bucket up.
807 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; 768 size_t subOrderIndex = size & root->orderSubIndexMasks[order];
808 PartitionBucket* bucket = 769 PartitionBucket* bucket =
809 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + 770 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) +
810 orderIndex + !!subOrderIndex]; 771 orderIndex + !!subOrderIndex];
811 ASSERT(!bucket->slotSize || bucket->slotSize >= size); 772 DCHECK(!bucket->slotSize || bucket->slotSize >= size);
812 ASSERT(!(bucket->slotSize % kGenericSmallestBucket)); 773 DCHECK(!(bucket->slotSize % kGenericSmallestBucket));
813 return bucket; 774 return bucket;
814 } 775 }
815 776
816 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, 777 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root,
817 int flags, 778 int flags,
818 size_t size, 779 size_t size,
819 const char* typeName) { 780 const char* typeName) {
820 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 781 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
821 void* result = malloc(size); 782 void* result = malloc(size);
822 RELEASE_ASSERT(result || flags & PartitionAllocReturnNull); 783 CHECK(result || flags & PartitionAllocReturnNull);
823 return result; 784 return result;
824 #else 785 #else
825 ASSERT(root->initialized); 786 DCHECK(root->initialized);
826 size_t requestedSize = size; 787 size_t requestedSize = size;
827 size = partitionCookieSizeAdjustAdd(size); 788 size = partitionCookieSizeAdjustAdd(size);
828 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 789 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
829 void* ret = nullptr; 790 void* ret = nullptr;
830 { 791 {
831 SpinLock::Guard guard(root->lock); 792 subtle::SpinLock::Guard guard(root->lock);
832 ret = partitionBucketAlloc(root, flags, size, bucket); 793 ret = partitionBucketAlloc(root, flags, size, bucket);
833 } 794 }
834 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); 795 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName);
835 return ret; 796 return ret;
836 #endif 797 #endif
837 } 798 }
838 799
839 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, 800 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root,
840 size_t size, 801 size_t size,
841 const char* typeName) { 802 const char* typeName) {
842 return partitionAllocGenericFlags(root, 0, size, typeName); 803 return partitionAllocGenericFlags(root, 0, size, typeName);
843 } 804 }
844 805
845 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { 806 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
846 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 807 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
847 free(ptr); 808 free(ptr);
848 #else 809 #else
849 ASSERT(root->initialized); 810 DCHECK(root->initialized);
850 811
851 if (UNLIKELY(!ptr)) 812 if (UNLIKELY(!ptr))
852 return; 813 return;
853 814
854 PartitionAllocHooks::freeHookIfEnabled(ptr); 815 PartitionAllocHooks::freeHookIfEnabled(ptr);
855 ptr = partitionCookieFreePointerAdjust(ptr); 816 ptr = partitionCookieFreePointerAdjust(ptr);
856 ASSERT(partitionPointerIsValid(ptr)); 817 DCHECK(partitionPointerIsValid(ptr));
857 PartitionPage* page = partitionPointerToPage(ptr); 818 PartitionPage* page = partitionPointerToPage(ptr);
858 { 819 {
859 SpinLock::Guard guard(root->lock); 820 subtle::SpinLock::Guard guard(root->lock);
860 partitionFreeWithPage(ptr, page); 821 partitionFreeWithPage(ptr, page);
861 } 822 }
862 #endif 823 #endif
863 } 824 }
864 825
865 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { 826 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) {
866 // Caller must check that the size is not above the kGenericMaxDirectMapped 827 // Caller must check that the size is not above the kGenericMaxDirectMapped
867 // limit before calling. This also guards against integer overflow in the 828 // limit before calling. This also guards against integer overflow in the
868 // calculation here. 829 // calculation here.
869 ASSERT(size <= kGenericMaxDirectMapped); 830 DCHECK(size <= kGenericMaxDirectMapped);
870 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; 831 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
871 } 832 }
872 833
873 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, 834 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root,
874 size_t size) { 835 size_t size) {
875 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 836 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
876 return size; 837 return size;
877 #else 838 #else
878 ASSERT(root->initialized); 839 DCHECK(root->initialized);
879 size = partitionCookieSizeAdjustAdd(size); 840 size = partitionCookieSizeAdjustAdd(size);
880 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 841 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
881 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { 842 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
882 size = bucket->slotSize; 843 size = bucket->slotSize;
883 } else if (size > kGenericMaxDirectMapped) { 844 } else if (size > kGenericMaxDirectMapped) {
884 // Too large to allocate => return the size unchanged. 845 // Too large to allocate => return the size unchanged.
885 } else { 846 } else {
886 ASSERT(bucket == &PartitionRootBase::gPagedBucket); 847 DCHECK(bucket == &PartitionRootBase::gPagedBucket);
887 size = partitionDirectMapSize(size); 848 size = partitionDirectMapSize(size);
888 } 849 }
889 return partitionCookieSizeAdjustSubtract(size); 850 return partitionCookieSizeAdjustSubtract(size);
890 #endif 851 #endif
891 } 852 }
892 853
893 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { 854 ALWAYS_INLINE bool partitionAllocSupportsGetSize() {
894 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 855 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
895 return false; 856 return false;
896 #else 857 #else
897 return true; 858 return true;
898 #endif 859 #endif
899 } 860 }
900 861
901 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { 862 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) {
902 // No need to lock here. Only 'ptr' being freed by another thread could 863 // No need to lock here. Only 'ptr' being freed by another thread could
903 // cause trouble, and the caller is responsible for that not happening. 864 // cause trouble, and the caller is responsible for that not happening.
904 ASSERT(partitionAllocSupportsGetSize()); 865 DCHECK(partitionAllocSupportsGetSize());
905 ptr = partitionCookieFreePointerAdjust(ptr); 866 ptr = partitionCookieFreePointerAdjust(ptr);
906 ASSERT(partitionPointerIsValid(ptr)); 867 DCHECK(partitionPointerIsValid(ptr));
907 PartitionPage* page = partitionPointerToPage(ptr); 868 PartitionPage* page = partitionPointerToPage(ptr);
908 size_t size = page->bucket->slotSize; 869 size_t size = page->bucket->slotSize;
909 return partitionCookieSizeAdjustSubtract(size); 870 return partitionCookieSizeAdjustSubtract(size);
910 } 871 }
911 872
912 // N (or more accurately, N - sizeof(void*)) represents the largest size in 873 // N (or more accurately, N - sizeof(void*)) represents the largest size in
913 // bytes that will be handled by a SizeSpecificPartitionAllocator. 874 // bytes that will be handled by a SizeSpecificPartitionAllocator.
914 // Attempts to partitionAlloc() more than this amount will fail. 875 // Attempts to partitionAlloc() more than this amount will fail.
915 template <size_t N> 876 template <size_t N>
916 class SizeSpecificPartitionAllocator { 877 class SizeSpecificPartitionAllocator {
(...skipping 14 matching lines...) Expand all
931 class PartitionAllocatorGeneric { 892 class PartitionAllocatorGeneric {
932 public: 893 public:
933 void init() { partitionAllocGenericInit(&m_partitionRoot); } 894 void init() { partitionAllocGenericInit(&m_partitionRoot); }
934 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } 895 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); }
935 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } 896 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; }
936 897
937 private: 898 private:
938 PartitionRootGeneric m_partitionRoot; 899 PartitionRootGeneric m_partitionRoot;
939 }; 900 };
940 901
941 } // namespace WTF 902 } // namespace base
942 903
943 using WTF::SizeSpecificPartitionAllocator; 904 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
944 using WTF::PartitionAllocatorGeneric;
945 using WTF::PartitionRoot;
946 using WTF::partitionAllocInit;
947 using WTF::partitionAllocShutdown;
948 using WTF::partitionAlloc;
949 using WTF::partitionFree;
950 using WTF::partitionAllocGeneric;
951 using WTF::partitionFreeGeneric;
952 using WTF::partitionReallocGeneric;
953 using WTF::partitionAllocActualSize;
954 using WTF::partitionAllocSupportsGetSize;
955 using WTF::partitionAllocGetSize;
956
957 #endif // WTF_PartitionAlloc_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698