Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(14)

Side by Side Diff: base/allocator/partition_allocator/partition_alloc.h

Issue 2518253002: Move Partition Allocator into Chromium base. (Closed)
Patch Set: Respond to comments. Created 4 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 // Copyright (c) 2013 The Chromium Authors. All rights reserved.
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be
3 * 3 // found in the LICENSE file.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are
6 * met:
7 *
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above
11 * copyright notice, this list of conditions and the following disclaimer
12 * in the documentation and/or other materials provided with the
13 * distribution.
14 * * Neither the name of Google Inc. nor the names of its
15 * contributors may be used to endorse or promote products derived from
16 * this software without specific prior written permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30 4
31 #ifndef WTF_PartitionAlloc_h 5 #ifndef BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
32 #define WTF_PartitionAlloc_h 6 #define BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
33 7
34 // DESCRIPTION 8 // DESCRIPTION
35 // partitionAlloc() / partitionAllocGeneric() and partitionFree() / 9 // partitionAlloc() / partitionAllocGeneric() and partitionFree() /
36 // partitionFreeGeneric() are approximately analagous to malloc() and free(). 10 // partitionFreeGeneric() are approximately analagous to malloc() and free().
37 // 11 //
38 // The main difference is that a PartitionRoot / PartitionRootGeneric object 12 // The main difference is that a PartitionRoot / PartitionRootGeneric object
39 // must be supplied to these functions, representing a specific "heap partition" 13 // must be supplied to these functions, representing a specific "heap partition"
40 // that will be used to satisfy the allocation. Different partitions are 14 // that will be used to satisfy the allocation. Different partitions are
41 // guaranteed to exist in separate address spaces, including being separate from 15 // guaranteed to exist in separate address spaces, including being separate from
42 // the main system heap. If the contained objects are all freed, physical memory 16 // the main system heap. If the contained objects are all freed, physical memory
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
78 // pages, enabling various simple tricks to try and minimize fragmentation. 52 // pages, enabling various simple tricks to try and minimize fragmentation.
79 // - Fine-grained bucket sizes leading to less waste and better packing. 53 // - Fine-grained bucket sizes leading to less waste and better packing.
80 // 54 //
81 // The following security properties could be investigated in the future: 55 // The following security properties could be investigated in the future:
82 // - Per-object bucketing (instead of per-size) is mostly available at the API, 56 // - Per-object bucketing (instead of per-size) is mostly available at the API,
83 // but not used yet. 57 // but not used yet.
84 // - No randomness of freelist entries or bucket position. 58 // - No randomness of freelist entries or bucket position.
85 // - Better checking for wild pointers in free(). 59 // - Better checking for wild pointers in free().
86 // - Better freelist masking function to guarantee fault on 32-bit. 60 // - Better freelist masking function to guarantee fault on 32-bit.
87 61
88 #include "wtf/Assertions.h" 62 #include <limits.h>
89 #include "wtf/BitwiseOperations.h"
90 #include "wtf/ByteSwap.h"
91 #include "wtf/CPU.h"
92 #include "wtf/SpinLock.h"
93 #include "wtf/TypeTraits.h"
94 #include "wtf/allocator/PageAllocator.h"
95 63
96 #include <limits.h> 64 #include "base/allocator/partition_allocator/page_allocator.h"
65 #include "base/bits.h"
66 #include "base/compiler_specific.h"
67 #include "base/logging.h"
68 #include "base/synchronization/spin_lock.h"
69 #include "build/build_config.h"
97 70
98 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 71 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
99 #include <stdlib.h> 72 #include <stdlib.h>
100 #endif 73 #endif
101 74
102 #if ENABLE(ASSERT) 75 namespace base {
103 #include <string.h>
104 #endif
105
106 namespace WTF {
107 76
108 // Allocation granularity of sizeof(void*) bytes. 77 // Allocation granularity of sizeof(void*) bytes.
109 static const size_t kAllocationGranularity = sizeof(void*); 78 static const size_t kAllocationGranularity = sizeof(void*);
110 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1; 79 static const size_t kAllocationGranularityMask = kAllocationGranularity - 1;
111 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2; 80 static const size_t kBucketShift = (kAllocationGranularity == 8) ? 3 : 2;
112 81
113 // Underlying partition storage pages are a power-of-two size. It is typical 82 // Underlying partition storage pages are a power-of-two size. It is typical
114 // for a partition page to be based on multiple system pages. Most references to 83 // for a partition page to be based on multiple system pages. Most references to
115 // "page" refer to partition pages. 84 // "page" refer to partition pages.
116 // We also have the concept of "super pages" -- these are the underlying system 85 // We also have the concept of "super pages" -- these are the underlying system
(...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after
234 // Constants for the memory reclaim logic. 203 // Constants for the memory reclaim logic.
235 static const size_t kMaxFreeableSpans = 16; 204 static const size_t kMaxFreeableSpans = 16;
236 205
237 // If the total size in bytes of allocated but not committed pages exceeds this 206 // If the total size in bytes of allocated but not committed pages exceeds this
238 // value (probably it is a "out of virtual address space" crash), 207 // value (probably it is a "out of virtual address space" crash),
239 // a special crash stack trace is generated at |partitionOutOfMemory|. 208 // a special crash stack trace is generated at |partitionOutOfMemory|.
240 // This is to distinguish "out of virtual address space" from 209 // This is to distinguish "out of virtual address space" from
241 // "out of physical memory" in crash reports. 210 // "out of physical memory" in crash reports.
242 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB 211 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
243 212
244 #if ENABLE(ASSERT) 213 #if DCHECK_IS_ON()
245 // These two byte values match tcmalloc. 214 // These two byte values match tcmalloc.
246 static const unsigned char kUninitializedByte = 0xAB; 215 static const unsigned char kUninitializedByte = 0xAB;
247 static const unsigned char kFreedByte = 0xCD; 216 static const unsigned char kFreedByte = 0xCD;
248 static const size_t kCookieSize = 217 static const size_t kCookieSize =
249 16; // Handles alignment up to XMM instructions on Intel. 218 16; // Handles alignment up to XMM instructions on Intel.
250 static const unsigned char kCookieValue[kCookieSize] = { 219 static const unsigned char kCookieValue[kCookieSize] = {
251 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 220 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D,
252 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E}; 221 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E};
253 #endif 222 #endif
254 223
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
314 PartitionSuperPageExtentEntry* next; 283 PartitionSuperPageExtentEntry* next;
315 }; 284 };
316 285
317 struct PartitionDirectMapExtent { 286 struct PartitionDirectMapExtent {
318 PartitionDirectMapExtent* nextExtent; 287 PartitionDirectMapExtent* nextExtent;
319 PartitionDirectMapExtent* prevExtent; 288 PartitionDirectMapExtent* prevExtent;
320 PartitionBucket* bucket; 289 PartitionBucket* bucket;
321 size_t mapSize; // Mapped size, not including guard pages and meta-data. 290 size_t mapSize; // Mapped size, not including guard pages and meta-data.
322 }; 291 };
323 292
324 struct WTF_EXPORT PartitionRootBase { 293 struct BASE_EXPORT PartitionRootBase {
325 size_t totalSizeOfCommittedPages; 294 size_t totalSizeOfCommittedPages;
326 size_t totalSizeOfSuperPages; 295 size_t totalSizeOfSuperPages;
327 size_t totalSizeOfDirectMappedPages; 296 size_t totalSizeOfDirectMappedPages;
328 // Invariant: totalSizeOfCommittedPages <= 297 // Invariant: totalSizeOfCommittedPages <=
329 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages. 298 // totalSizeOfSuperPages + totalSizeOfDirectMappedPages.
330 unsigned numBuckets; 299 unsigned numBuckets;
331 unsigned maxAllocation; 300 unsigned maxAllocation;
332 bool initialized; 301 bool initialized;
333 char* nextSuperPage; 302 char* nextSuperPage;
334 char* nextPartitionPage; 303 char* nextPartitionPage;
335 char* nextPartitionPageEnd; 304 char* nextPartitionPageEnd;
336 PartitionSuperPageExtentEntry* currentExtent; 305 PartitionSuperPageExtentEntry* currentExtent;
337 PartitionSuperPageExtentEntry* firstExtent; 306 PartitionSuperPageExtentEntry* firstExtent;
338 PartitionDirectMapExtent* directMapList; 307 PartitionDirectMapExtent* directMapList;
339 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans]; 308 PartitionPage* globalEmptyPageRing[kMaxFreeableSpans];
340 int16_t globalEmptyPageRingIndex; 309 int16_t globalEmptyPageRingIndex;
341 uintptr_t invertedSelf; 310 uintptr_t invertedSelf;
342 311
343 static SpinLock gInitializedLock; 312 static subtle::SpinLock gInitializedLock;
344 static bool gInitialized; 313 static bool gInitialized;
345 // gSeedPage is used as a sentinel to indicate that there is no page 314 // gSeedPage is used as a sentinel to indicate that there is no page
346 // in the active page list. We can use nullptr, but in that case we need 315 // in the active page list. We can use nullptr, but in that case we need
347 // to add a null-check branch to the hot allocation path. We want to avoid 316 // to add a null-check branch to the hot allocation path. We want to avoid
348 // that. 317 // that.
349 static PartitionPage gSeedPage; 318 static PartitionPage gSeedPage;
350 static PartitionBucket gPagedBucket; 319 static PartitionBucket gPagedBucket;
351 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory. 320 // gOomHandlingFunction is invoked when ParitionAlloc hits OutOfMemory.
352 static void (*gOomHandlingFunction)(); 321 static void (*gOomHandlingFunction)();
353 }; 322 };
354 323
355 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc. 324 // Never instantiate a PartitionRoot directly, instead use PartitionAlloc.
356 struct PartitionRoot : public PartitionRootBase { 325 struct PartitionRoot : public PartitionRootBase {
357 // The PartitionAlloc templated class ensures the following is correct. 326 // The PartitionAlloc templated class ensures the following is correct.
358 ALWAYS_INLINE PartitionBucket* buckets() { 327 ALWAYS_INLINE PartitionBucket* buckets() {
359 return reinterpret_cast<PartitionBucket*>(this + 1); 328 return reinterpret_cast<PartitionBucket*>(this + 1);
360 } 329 }
361 ALWAYS_INLINE const PartitionBucket* buckets() const { 330 ALWAYS_INLINE const PartitionBucket* buckets() const {
362 return reinterpret_cast<const PartitionBucket*>(this + 1); 331 return reinterpret_cast<const PartitionBucket*>(this + 1);
363 } 332 }
364 }; 333 };
365 334
366 // Never instantiate a PartitionRootGeneric directly, instead use 335 // Never instantiate a PartitionRootGeneric directly, instead use
367 // PartitionAllocatorGeneric. 336 // PartitionAllocatorGeneric.
368 struct PartitionRootGeneric : public PartitionRootBase { 337 struct PartitionRootGeneric : public PartitionRootBase {
369 SpinLock lock; 338 subtle::SpinLock lock;
370 // Some pre-computed constants. 339 // Some pre-computed constants.
371 size_t orderIndexShifts[kBitsPerSizet + 1]; 340 size_t orderIndexShifts[kBitsPerSizet + 1];
372 size_t orderSubIndexMasks[kBitsPerSizet + 1]; 341 size_t orderSubIndexMasks[kBitsPerSizet + 1];
373 // The bucket lookup table lets us map a size_t to a bucket quickly. 342 // The bucket lookup table lets us map a size_t to a bucket quickly.
374 // The trailing +1 caters for the overflow case for very large allocation 343 // The trailing +1 caters for the overflow case for very large allocation
375 // sizes. It is one flat array instead of a 2D array because in the 2D 344 // sizes. It is one flat array instead of a 2D array because in the 2D
376 // world, we'd need to index array[blah][max+1] which risks undefined 345 // world, we'd need to index array[blah][max+1] which risks undefined
377 // behavior. 346 // behavior.
378 PartitionBucket* 347 PartitionBucket*
379 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1]; 348 bucketLookups[((kBitsPerSizet + 1) * kGenericNumBucketsPerOrder) + 1];
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
413 uint32_t numActivePages; // Number of pages that have at least one 382 uint32_t numActivePages; // Number of pages that have at least one
414 // provisioned slot. 383 // provisioned slot.
415 uint32_t numEmptyPages; // Number of pages that are empty 384 uint32_t numEmptyPages; // Number of pages that are empty
416 // but not decommitted. 385 // but not decommitted.
417 uint32_t numDecommittedPages; // Number of pages that are empty 386 uint32_t numDecommittedPages; // Number of pages that are empty
418 // and decommitted. 387 // and decommitted.
419 }; 388 };
420 389
421 // Interface that is passed to partitionDumpStats and 390 // Interface that is passed to partitionDumpStats and
422 // partitionDumpStatsGeneric for using the memory statistics. 391 // partitionDumpStatsGeneric for using the memory statistics.
423 class WTF_EXPORT PartitionStatsDumper { 392 class BASE_EXPORT PartitionStatsDumper {
424 public: 393 public:
425 // Called to dump total memory used by partition, once per partition. 394 // Called to dump total memory used by partition, once per partition.
426 virtual void partitionDumpTotals(const char* partitionName, 395 virtual void partitionDumpTotals(const char* partitionName,
427 const PartitionMemoryStats*) = 0; 396 const PartitionMemoryStats*) = 0;
428 397
429 // Called to dump stats about buckets, for each bucket. 398 // Called to dump stats about buckets, for each bucket.
430 virtual void partitionsDumpBucketStats(const char* partitionName, 399 virtual void partitionsDumpBucketStats(const char* partitionName,
431 const PartitionBucketMemoryStats*) = 0; 400 const PartitionBucketMemoryStats*) = 0;
432 }; 401 };
433 402
434 WTF_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)()); 403 BASE_EXPORT void partitionAllocGlobalInit(void (*oomHandlingFunction)());
435 WTF_EXPORT void partitionAllocInit(PartitionRoot*, 404 BASE_EXPORT void partitionAllocInit(PartitionRoot*,
436 size_t numBuckets, 405 size_t numBuckets,
437 size_t maxAllocation); 406 size_t maxAllocation);
438 WTF_EXPORT bool partitionAllocShutdown(PartitionRoot*); 407 BASE_EXPORT bool partitionAllocShutdown(PartitionRoot*);
439 WTF_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*); 408 BASE_EXPORT void partitionAllocGenericInit(PartitionRootGeneric*);
440 WTF_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*); 409 BASE_EXPORT bool partitionAllocGenericShutdown(PartitionRootGeneric*);
441 410
442 enum PartitionPurgeFlags { 411 enum PartitionPurgeFlags {
443 // Decommitting the ring list of empty pages is reasonably fast. 412 // Decommitting the ring list of empty pages is reasonably fast.
444 PartitionPurgeDecommitEmptyPages = 1 << 0, 413 PartitionPurgeDecommitEmptyPages = 1 << 0,
445 // Discarding unused system pages is slower, because it involves walking all 414 // Discarding unused system pages is slower, because it involves walking all
446 // freelists in all active partition pages of all buckets >= system page 415 // freelists in all active partition pages of all buckets >= system page
447 // size. It often frees a similar amount of memory to decommitting the empty 416 // size. It often frees a similar amount of memory to decommitting the empty
448 // pages, though. 417 // pages, though.
449 PartitionPurgeDiscardUnusedSystemPages = 1 << 1, 418 PartitionPurgeDiscardUnusedSystemPages = 1 << 1,
450 }; 419 };
451 420
452 WTF_EXPORT void partitionPurgeMemory(PartitionRoot*, int); 421 BASE_EXPORT void partitionPurgeMemory(PartitionRoot*, int);
453 WTF_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int); 422 BASE_EXPORT void partitionPurgeMemoryGeneric(PartitionRootGeneric*, int);
454 423
455 WTF_EXPORT NEVER_INLINE void* partitionAllocSlowPath(PartitionRootBase*, 424 BASE_EXPORT NOINLINE void* partitionAllocSlowPath(PartitionRootBase*,
456 int, 425 int,
457 size_t, 426 size_t,
458 PartitionBucket*); 427 PartitionBucket*);
459 WTF_EXPORT NEVER_INLINE void partitionFreeSlowPath(PartitionPage*); 428 BASE_EXPORT NOINLINE void partitionFreeSlowPath(PartitionPage*);
460 WTF_EXPORT NEVER_INLINE void* partitionReallocGeneric(PartitionRootGeneric*, 429 BASE_EXPORT NOINLINE void* partitionReallocGeneric(PartitionRootGeneric*,
461 void*, 430 void*,
462 size_t, 431 size_t,
463 const char* typeName); 432 const char* typeName);
464 433
465 WTF_EXPORT void partitionDumpStats(PartitionRoot*, 434 BASE_EXPORT void partitionDumpStats(PartitionRoot*,
466 const char* partitionName, 435 const char* partitionName,
467 bool isLightDump, 436 bool isLightDump,
468 PartitionStatsDumper*); 437 PartitionStatsDumper*);
469 WTF_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*, 438 BASE_EXPORT void partitionDumpStatsGeneric(PartitionRootGeneric*,
470 const char* partitionName, 439 const char* partitionName,
471 bool isLightDump, 440 bool isLightDump,
472 PartitionStatsDumper*); 441 PartitionStatsDumper*);
473 442
474 class WTF_EXPORT PartitionAllocHooks { 443 class BASE_EXPORT PartitionAllocHooks {
475 public: 444 public:
476 typedef void AllocationHook(void* address, size_t, const char* typeName); 445 typedef void AllocationHook(void* address, size_t, const char* typeName);
477 typedef void FreeHook(void* address); 446 typedef void FreeHook(void* address);
478 447
479 static void setAllocationHook(AllocationHook* hook) { 448 static void setAllocationHook(AllocationHook* hook) {
480 m_allocationHook = hook; 449 m_allocationHook = hook;
481 } 450 }
482 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; } 451 static void setFreeHook(FreeHook* hook) { m_freeHook = hook; }
483 452
484 static void allocationHookIfEnabled(void* address, 453 static void allocationHookIfEnabled(void* address,
(...skipping 26 matching lines...) Expand all
511 private: 480 private:
512 // Pointers to hook functions that PartitionAlloc will call on allocation and 481 // Pointers to hook functions that PartitionAlloc will call on allocation and
513 // free if the pointers are non-null. 482 // free if the pointers are non-null.
514 static AllocationHook* m_allocationHook; 483 static AllocationHook* m_allocationHook;
515 static FreeHook* m_freeHook; 484 static FreeHook* m_freeHook;
516 }; 485 };
517 486
518 // In official builds, do not include type info string literals to avoid 487 // In official builds, do not include type info string literals to avoid
519 // bloating the binary. 488 // bloating the binary.
520 #if defined(OFFICIAL_BUILD) 489 #if defined(OFFICIAL_BUILD)
521 #define WTF_HEAP_PROFILER_TYPE_NAME(T) nullptr 490 #define PARTITION_HEAP_PROFILER_TYPE_NAME(T) nullptr
522 #else 491 #else
523 #define WTF_HEAP_PROFILER_TYPE_NAME(T) ::WTF::getStringWithTypeName<T>() 492 #define PARTITION_HEAP_PROFILER_TYPE_NAME(T) GetStringWithTypeName<T>()
524 #endif 493 #endif
525 494
526 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask( 495 ALWAYS_INLINE PartitionFreelistEntry* partitionFreelistMask(
527 PartitionFreelistEntry* ptr) { 496 PartitionFreelistEntry* ptr) {
528 // We use bswap on little endian as a fast mask for two reasons: 497 // We use bswap on little endian as a fast mask for two reasons:
529 // 1) If an object is freed and its vtable used where the attacker doesn't 498 // 1) If an object is freed and its vtable used where the attacker doesn't
530 // get the chance to run allocations between the free and use, the vtable 499 // get the chance to run allocations between the free and use, the vtable
531 // dereference is likely to fault. 500 // dereference is likely to fault.
532 // 2) If the attacker has a linear buffer overflow and elects to try and 501 // 2) If the attacker has a linear buffer overflow and elects to try and
533 // corrupt a freelist pointer, partial pointer overwrite attacks are 502 // corrupt a freelist pointer, partial pointer overwrite attacks are
534 // thwarted. 503 // thwarted.
535 // For big endian, similar guarantees are arrived at with a negation. 504 // For big endian, similar guarantees are arrived at with a negation.
536 #if CPU(BIG_ENDIAN) 505 #if !defined(ARCH_CPU_LITTLE_ENDIAN)
Primiano Tucci (use gerrit) 2016/11/28 12:06:50 why not just defined(ARCH_CPU_BIG_ENDIAN) ? Also r
palmer 2016/12/01 00:48:24 Oops! Fixed.
506 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr));
507 #else
537 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr); 508 uintptr_t masked = ~reinterpret_cast<uintptr_t>(ptr);
538 #else
539 uintptr_t masked = bswapuintptrt(reinterpret_cast<uintptr_t>(ptr));
540 #endif 509 #endif
541 return reinterpret_cast<PartitionFreelistEntry*>(masked); 510 return reinterpret_cast<PartitionFreelistEntry*>(masked);
542 } 511 }
543 512
544 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) { 513 ALWAYS_INLINE size_t partitionCookieSizeAdjustAdd(size_t size) {
545 #if ENABLE(ASSERT) 514 #if DCHECK_IS_ON()
546 // Add space for cookies, checking for integer overflow. 515 // Add space for cookies, checking for integer overflow.
547 ASSERT(size + (2 * kCookieSize) > size); 516 DCHECK(size + (2 * kCookieSize) > size);
548 size += 2 * kCookieSize; 517 size += 2 * kCookieSize;
549 #endif 518 #endif
550 return size; 519 return size;
551 } 520 }
552 521
553 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) { 522 ALWAYS_INLINE size_t partitionCookieSizeAdjustSubtract(size_t size) {
554 #if ENABLE(ASSERT) 523 #if DCHECK_IS_ON()
555 // Remove space for cookies. 524 // Remove space for cookies.
556 ASSERT(size >= 2 * kCookieSize); 525 DCHECK(size >= 2 * kCookieSize);
557 size -= 2 * kCookieSize; 526 size -= 2 * kCookieSize;
558 #endif 527 #endif
559 return size; 528 return size;
560 } 529 }
561 530
562 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) { 531 ALWAYS_INLINE void* partitionCookieFreePointerAdjust(void* ptr) {
563 #if ENABLE(ASSERT) 532 #if DCHECK_IS_ON()
564 // The value given to the application is actually just after the cookie. 533 // The value given to the application is actually just after the cookie.
565 ptr = static_cast<char*>(ptr) - kCookieSize; 534 ptr = static_cast<char*>(ptr) - kCookieSize;
566 #endif 535 #endif
567 return ptr; 536 return ptr;
568 } 537 }
569 538
570 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) { 539 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) {
571 #if ENABLE(ASSERT) 540 #if DCHECK_IS_ON()
572 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); 541 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
573 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) 542 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
574 *cookiePtr = kCookieValue[i]; 543 *cookiePtr = kCookieValue[i];
575 #endif 544 #endif
576 } 545 }
577 546
578 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) { 547 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) {
579 #if ENABLE(ASSERT) 548 #if DCHECK_IS_ON()
580 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr); 549 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
581 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr) 550 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
582 ASSERT(*cookiePtr == kCookieValue[i]); 551 DCHECK(*cookiePtr == kCookieValue[i]);
583 #endif 552 #endif
584 } 553 }
585 554
586 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) { 555 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) {
587 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 556 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
588 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); 557 DCHECK(!(pointerAsUint & kSuperPageOffsetMask));
589 // The metadata area is exactly one system page (the guard page) into the 558 // The metadata area is exactly one system page (the guard page) into the
590 // super page. 559 // super page.
591 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); 560 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
592 } 561 }
593 562
594 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) { 563 ALWAYS_INLINE PartitionPage* partitionPointerToPageNoAlignmentCheck(void* ptr) {
595 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 564 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
596 char* superPagePtr = 565 char* superPagePtr =
597 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask); 566 reinterpret_cast<char*>(pointerAsUint & kSuperPageBaseMask);
598 uintptr_t partitionPageIndex = 567 uintptr_t partitionPageIndex =
599 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift; 568 (pointerAsUint & kSuperPageOffsetMask) >> kPartitionPageShift;
600 // Index 0 is invalid because it is the metadata and guard area and 569 // Index 0 is invalid because it is the metadata and guard area and
601 // the last index is invalid because it is a guard page. 570 // the last index is invalid because it is a guard page.
602 ASSERT(partitionPageIndex); 571 DCHECK(partitionPageIndex);
603 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 572 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
604 PartitionPage* page = reinterpret_cast<PartitionPage*>( 573 PartitionPage* page = reinterpret_cast<PartitionPage*>(
605 partitionSuperPageToMetadataArea(superPagePtr) + 574 partitionSuperPageToMetadataArea(superPagePtr) +
606 (partitionPageIndex << kPageMetadataShift)); 575 (partitionPageIndex << kPageMetadataShift));
607 // Partition pages in the same slot span can share the same page object. 576 // Partition pages in the same slot span can share the same page object.
608 // Adjust for that. 577 // Adjust for that.
609 size_t delta = page->pageOffset << kPageMetadataShift; 578 size_t delta = page->pageOffset << kPageMetadataShift;
610 page = 579 page =
611 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta); 580 reinterpret_cast<PartitionPage*>(reinterpret_cast<char*>(page) - delta);
612 return page; 581 return page;
613 } 582 }
614 583
615 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) { 584 ALWAYS_INLINE void* partitionPageToPointer(const PartitionPage* page) {
616 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page); 585 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(page);
617 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask); 586 uintptr_t superPageOffset = (pointerAsUint & kSuperPageOffsetMask);
618 ASSERT(superPageOffset > kSystemPageSize); 587 DCHECK(superPageOffset > kSystemPageSize);
619 ASSERT(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage * 588 DCHECK(superPageOffset < kSystemPageSize + (kNumPartitionPagesPerSuperPage *
620 kPageMetadataSize)); 589 kPageMetadataSize));
621 uintptr_t partitionPageIndex = 590 uintptr_t partitionPageIndex =
622 (superPageOffset - kSystemPageSize) >> kPageMetadataShift; 591 (superPageOffset - kSystemPageSize) >> kPageMetadataShift;
623 // Index 0 is invalid because it is the metadata area and the last index is 592 // Index 0 is invalid because it is the metadata area and the last index is
624 // invalid because it is a guard page. 593 // invalid because it is a guard page.
625 ASSERT(partitionPageIndex); 594 DCHECK(partitionPageIndex);
626 ASSERT(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1); 595 DCHECK(partitionPageIndex < kNumPartitionPagesPerSuperPage - 1);
627 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask); 596 uintptr_t superPageBase = (pointerAsUint & kSuperPageBaseMask);
628 void* ret = reinterpret_cast<void*>( 597 void* ret = reinterpret_cast<void*>(
629 superPageBase + (partitionPageIndex << kPartitionPageShift)); 598 superPageBase + (partitionPageIndex << kPartitionPageShift));
630 return ret; 599 return ret;
631 } 600 }
632 601
633 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) { 602 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) {
634 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); 603 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
635 // Checks that the pointer is a multiple of bucket size. 604 // Checks that the pointer is a multiple of bucket size.
636 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - 605 DCHECK(!((reinterpret_cast<uintptr_t>(ptr) -
637 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) % 606 reinterpret_cast<uintptr_t>(partitionPageToPointer(page))) %
638 page->bucket->slotSize)); 607 page->bucket->slotSize));
639 return page; 608 return page;
640 } 609 }
641 610
642 ALWAYS_INLINE bool partitionBucketIsDirectMapped( 611 ALWAYS_INLINE bool partitionBucketIsDirectMapped(
643 const PartitionBucket* bucket) { 612 const PartitionBucket* bucket) {
644 return !bucket->numSystemPagesPerSlotSpan; 613 return !bucket->numSystemPagesPerSlotSpan;
645 } 614 }
646 615
647 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) { 616 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket) {
648 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize; 617 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize;
649 } 618 }
650 619
651 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) { 620 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket) {
652 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize); 621 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize);
653 } 622 }
654 623
655 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) { 624 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page) {
656 // For single-slot buckets which span more than one partition page, we 625 // For single-slot buckets which span more than one partition page, we
657 // have some spare metadata space to store the raw allocation size. We 626 // have some spare metadata space to store the raw allocation size. We
658 // can use this to report better statistics. 627 // can use this to report better statistics.
659 PartitionBucket* bucket = page->bucket; 628 PartitionBucket* bucket = page->bucket;
660 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize) 629 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
661 return nullptr; 630 return nullptr;
662 631
663 ASSERT((bucket->slotSize % kSystemPageSize) == 0); 632 DCHECK((bucket->slotSize % kSystemPageSize) == 0);
664 ASSERT(partitionBucketIsDirectMapped(bucket) || 633 DCHECK(partitionBucketIsDirectMapped(bucket) ||
665 partitionBucketSlots(bucket) == 1); 634 partitionBucketSlots(bucket) == 1);
666 page++; 635 page++;
667 return reinterpret_cast<size_t*>(&page->freelistHead); 636 return reinterpret_cast<size_t*>(&page->freelistHead);
668 } 637 }
669 638
670 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) { 639 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page) {
671 size_t* rawSizePtr = partitionPageGetRawSizePtr(page); 640 size_t* rawSizePtr = partitionPageGetRawSizePtr(page);
672 if (UNLIKELY(rawSizePtr != nullptr)) 641 if (UNLIKELY(rawSizePtr != nullptr))
673 return *rawSizePtr; 642 return *rawSizePtr;
674 return 0; 643 return 0;
(...skipping 11 matching lines...) Expand all
686 PartitionRootBase* root = partitionPageToRoot(page); 655 PartitionRootBase* root = partitionPageToRoot(page);
687 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); 656 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
688 } 657 }
689 658
690 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, 659 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root,
691 int flags, 660 int flags,
692 size_t size, 661 size_t size,
693 PartitionBucket* bucket) { 662 PartitionBucket* bucket) {
694 PartitionPage* page = bucket->activePagesHead; 663 PartitionPage* page = bucket->activePagesHead;
695 // Check that this page is neither full nor freed. 664 // Check that this page is neither full nor freed.
696 ASSERT(page->numAllocatedSlots >= 0); 665 DCHECK(page->numAllocatedSlots >= 0);
697 void* ret = page->freelistHead; 666 void* ret = page->freelistHead;
698 if (LIKELY(ret != 0)) { 667 if (LIKELY(ret != 0)) {
699 // If these asserts fire, you probably corrupted memory. 668 // If these asserts fire, you probably corrupted memory.
700 ASSERT(partitionPointerIsValid(ret)); 669 DCHECK(partitionPointerIsValid(ret));
701 // All large allocations must go through the slow path to correctly 670 // All large allocations must go through the slow path to correctly
702 // update the size metadata. 671 // update the size metadata.
703 ASSERT(partitionPageGetRawSize(page) == 0); 672 DCHECK(partitionPageGetRawSize(page) == 0);
704 PartitionFreelistEntry* newHead = 673 PartitionFreelistEntry* newHead =
705 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next); 674 partitionFreelistMask(static_cast<PartitionFreelistEntry*>(ret)->next);
706 page->freelistHead = newHead; 675 page->freelistHead = newHead;
707 page->numAllocatedSlots++; 676 page->numAllocatedSlots++;
708 } else { 677 } else {
709 ret = partitionAllocSlowPath(root, flags, size, bucket); 678 ret = partitionAllocSlowPath(root, flags, size, bucket);
710 ASSERT(!ret || partitionPointerIsValid(ret)); 679 DCHECK(!ret || partitionPointerIsValid(ret));
711 } 680 }
712 #if ENABLE(ASSERT) 681 #if DCHECK_IS_ON()
713 if (!ret) 682 if (!ret)
714 return 0; 683 return 0;
715 // Fill the uninitialized pattern, and write the cookies. 684 // Fill the uninitialized pattern, and write the cookies.
716 page = partitionPointerToPage(ret); 685 page = partitionPointerToPage(ret);
717 size_t slotSize = page->bucket->slotSize; 686 size_t slotSize = page->bucket->slotSize;
718 size_t rawSize = partitionPageGetRawSize(page); 687 size_t rawSize = partitionPageGetRawSize(page);
719 if (rawSize) { 688 if (rawSize) {
720 ASSERT(rawSize == size); 689 DCHECK(rawSize == size);
721 slotSize = rawSize; 690 slotSize = rawSize;
722 } 691 }
723 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize); 692 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize);
724 char* charRet = static_cast<char*>(ret); 693 char* charRet = static_cast<char*>(ret);
725 // The value given to the application is actually just after the cookie. 694 // The value given to the application is actually just after the cookie.
726 ret = charRet + kCookieSize; 695 ret = charRet + kCookieSize;
727 memset(ret, kUninitializedByte, noCookieSize); 696 memset(ret, kUninitializedByte, noCookieSize);
728 partitionCookieWriteValue(charRet); 697 partitionCookieWriteValue(charRet);
729 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize); 698 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize);
730 #endif 699 #endif
731 return ret; 700 return ret;
732 } 701 }
733 702
734 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, 703 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root,
735 size_t size, 704 size_t size,
736 const char* typeName) { 705 const char* typeName) {
737 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 706 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
738 void* result = malloc(size); 707 void* result = malloc(size);
739 RELEASE_ASSERT(result); 708 CHECK(result);
740 return result; 709 return result;
741 #else 710 #else
742 size_t requestedSize = size; 711 size_t requestedSize = size;
743 size = partitionCookieSizeAdjustAdd(size); 712 size = partitionCookieSizeAdjustAdd(size);
744 ASSERT(root->initialized); 713 DCHECK(root->initialized);
745 size_t index = size >> kBucketShift; 714 size_t index = size >> kBucketShift;
746 ASSERT(index < root->numBuckets); 715 DCHECK(index < root->numBuckets);
747 ASSERT(size == index << kBucketShift); 716 DCHECK(size == index << kBucketShift);
748 PartitionBucket* bucket = &root->buckets()[index]; 717 PartitionBucket* bucket = &root->buckets()[index];
749 void* result = partitionBucketAlloc(root, 0, size, bucket); 718 void* result = partitionBucketAlloc(root, 0, size, bucket);
750 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName); 719 PartitionAllocHooks::allocationHookIfEnabled(result, requestedSize, typeName);
751 return result; 720 return result;
752 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 721 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
753 } 722 }
754 723
755 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) { 724 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) {
756 // If these asserts fire, you probably corrupted memory. 725 // If these asserts fire, you probably corrupted memory.
757 #if ENABLE(ASSERT) 726 #if DCHECK_IS_ON()
758 size_t slotSize = page->bucket->slotSize; 727 size_t slotSize = page->bucket->slotSize;
759 size_t rawSize = partitionPageGetRawSize(page); 728 size_t rawSize = partitionPageGetRawSize(page);
760 if (rawSize) 729 if (rawSize)
761 slotSize = rawSize; 730 slotSize = rawSize;
762 partitionCookieCheckValue(ptr); 731 partitionCookieCheckValue(ptr);
763 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - 732 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize -
764 kCookieSize); 733 kCookieSize);
765 memset(ptr, kFreedByte, slotSize); 734 memset(ptr, kFreedByte, slotSize);
766 #endif 735 #endif
767 ASSERT(page->numAllocatedSlots); 736 DCHECK(page->numAllocatedSlots);
768 PartitionFreelistEntry* freelistHead = page->freelistHead; 737 PartitionFreelistEntry* freelistHead = page->freelistHead;
769 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); 738 DCHECK(!freelistHead || partitionPointerIsValid(freelistHead));
770 SECURITY_CHECK(ptr != freelistHead); // Catches an immediate double free. 739 CHECK(ptr != freelistHead); // Catches an immediate double free.
771 // Look for double free one level deeper in debug. 740 // Look for double free one level deeper in debug.
772 SECURITY_DCHECK(!freelistHead || 741 DCHECK(!freelistHead || ptr != partitionFreelistMask(freelistHead->next));
773 ptr != partitionFreelistMask(freelistHead->next));
774 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); 742 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
775 entry->next = partitionFreelistMask(freelistHead); 743 entry->next = partitionFreelistMask(freelistHead);
776 page->freelistHead = entry; 744 page->freelistHead = entry;
777 --page->numAllocatedSlots; 745 --page->numAllocatedSlots;
778 if (UNLIKELY(page->numAllocatedSlots <= 0)) { 746 if (UNLIKELY(page->numAllocatedSlots <= 0)) {
779 partitionFreeSlowPath(page); 747 partitionFreeSlowPath(page);
780 } else { 748 } else {
781 // All single-slot allocations must go through the slow path to 749 // All single-slot allocations must go through the slow path to
782 // correctly update the size metadata. 750 // correctly update the size metadata.
783 ASSERT(partitionPageGetRawSize(page) == 0); 751 DCHECK(partitionPageGetRawSize(page) == 0);
784 } 752 }
785 } 753 }
786 754
787 ALWAYS_INLINE void partitionFree(void* ptr) { 755 ALWAYS_INLINE void partitionFree(void* ptr) {
788 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 756 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
789 free(ptr); 757 free(ptr);
790 #else 758 #else
791 PartitionAllocHooks::freeHookIfEnabled(ptr); 759 PartitionAllocHooks::freeHookIfEnabled(ptr);
792 ptr = partitionCookieFreePointerAdjust(ptr); 760 ptr = partitionCookieFreePointerAdjust(ptr);
793 ASSERT(partitionPointerIsValid(ptr)); 761 DCHECK(partitionPointerIsValid(ptr));
794 PartitionPage* page = partitionPointerToPage(ptr); 762 PartitionPage* page = partitionPointerToPage(ptr);
795 partitionFreeWithPage(ptr, page); 763 partitionFreeWithPage(ptr, page);
796 #endif 764 #endif
797 } 765 }
798 766
799 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket( 767 ALWAYS_INLINE PartitionBucket* partitionGenericSizeToBucket(
800 PartitionRootGeneric* root, 768 PartitionRootGeneric* root,
801 size_t size) { 769 size_t size) {
802 size_t order = kBitsPerSizet - CountLeadingZeroBitsSizeT(size); 770 size_t order = kBitsPerSizet - bits::CountLeadingZeroBitsSizeT(size);
803 // The order index is simply the next few bits after the most significant bit. 771 // The order index is simply the next few bits after the most significant bit.
804 size_t orderIndex = (size >> root->orderIndexShifts[order]) & 772 size_t orderIndex = (size >> root->orderIndexShifts[order]) &
805 (kGenericNumBucketsPerOrder - 1); 773 (kGenericNumBucketsPerOrder - 1);
806 // And if the remaining bits are non-zero we must bump the bucket up. 774 // And if the remaining bits are non-zero we must bump the bucket up.
807 size_t subOrderIndex = size & root->orderSubIndexMasks[order]; 775 size_t subOrderIndex = size & root->orderSubIndexMasks[order];
808 PartitionBucket* bucket = 776 PartitionBucket* bucket =
809 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) + 777 root->bucketLookups[(order << kGenericNumBucketsPerOrderBits) +
810 orderIndex + !!subOrderIndex]; 778 orderIndex + !!subOrderIndex];
811 ASSERT(!bucket->slotSize || bucket->slotSize >= size); 779 DCHECK(!bucket->slotSize || bucket->slotSize >= size);
812 ASSERT(!(bucket->slotSize % kGenericSmallestBucket)); 780 DCHECK(!(bucket->slotSize % kGenericSmallestBucket));
813 return bucket; 781 return bucket;
814 } 782 }
815 783
816 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root, 784 ALWAYS_INLINE void* partitionAllocGenericFlags(PartitionRootGeneric* root,
817 int flags, 785 int flags,
818 size_t size, 786 size_t size,
819 const char* typeName) { 787 const char* typeName) {
820 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 788 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
821 void* result = malloc(size); 789 void* result = malloc(size);
822 RELEASE_ASSERT(result || flags & PartitionAllocReturnNull); 790 CHECK(result || flags & PartitionAllocReturnNull);
823 return result; 791 return result;
824 #else 792 #else
825 ASSERT(root->initialized); 793 DCHECK(root->initialized);
826 size_t requestedSize = size; 794 size_t requestedSize = size;
827 size = partitionCookieSizeAdjustAdd(size); 795 size = partitionCookieSizeAdjustAdd(size);
828 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 796 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
829 void* ret = nullptr; 797 void* ret = nullptr;
830 { 798 {
831 SpinLock::Guard guard(root->lock); 799 subtle::SpinLock::Guard guard(root->lock);
832 ret = partitionBucketAlloc(root, flags, size, bucket); 800 ret = partitionBucketAlloc(root, flags, size, bucket);
833 } 801 }
834 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName); 802 PartitionAllocHooks::allocationHookIfEnabled(ret, requestedSize, typeName);
835 return ret; 803 return ret;
836 #endif 804 #endif
837 } 805 }
838 806
839 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root, 807 ALWAYS_INLINE void* partitionAllocGeneric(PartitionRootGeneric* root,
840 size_t size, 808 size_t size,
841 const char* typeName) { 809 const char* typeName) {
842 return partitionAllocGenericFlags(root, 0, size, typeName); 810 return partitionAllocGenericFlags(root, 0, size, typeName);
843 } 811 }
844 812
845 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) { 813 ALWAYS_INLINE void partitionFreeGeneric(PartitionRootGeneric* root, void* ptr) {
846 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 814 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
847 free(ptr); 815 free(ptr);
848 #else 816 #else
849 ASSERT(root->initialized); 817 DCHECK(root->initialized);
850 818
851 if (UNLIKELY(!ptr)) 819 if (UNLIKELY(!ptr))
852 return; 820 return;
853 821
854 PartitionAllocHooks::freeHookIfEnabled(ptr); 822 PartitionAllocHooks::freeHookIfEnabled(ptr);
855 ptr = partitionCookieFreePointerAdjust(ptr); 823 ptr = partitionCookieFreePointerAdjust(ptr);
856 ASSERT(partitionPointerIsValid(ptr)); 824 DCHECK(partitionPointerIsValid(ptr));
857 PartitionPage* page = partitionPointerToPage(ptr); 825 PartitionPage* page = partitionPointerToPage(ptr);
858 { 826 {
859 SpinLock::Guard guard(root->lock); 827 subtle::SpinLock::Guard guard(root->lock);
860 partitionFreeWithPage(ptr, page); 828 partitionFreeWithPage(ptr, page);
861 } 829 }
862 #endif 830 #endif
863 } 831 }
864 832
865 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) { 833 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) {
866 // Caller must check that the size is not above the kGenericMaxDirectMapped 834 // Caller must check that the size is not above the kGenericMaxDirectMapped
867 // limit before calling. This also guards against integer overflow in the 835 // limit before calling. This also guards against integer overflow in the
868 // calculation here. 836 // calculation here.
869 ASSERT(size <= kGenericMaxDirectMapped); 837 DCHECK(size <= kGenericMaxDirectMapped);
870 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; 838 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
871 } 839 }
872 840
873 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, 841 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root,
874 size_t size) { 842 size_t size) {
875 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 843 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
876 return size; 844 return size;
877 #else 845 #else
878 ASSERT(root->initialized); 846 DCHECK(root->initialized);
879 size = partitionCookieSizeAdjustAdd(size); 847 size = partitionCookieSizeAdjustAdd(size);
880 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size); 848 PartitionBucket* bucket = partitionGenericSizeToBucket(root, size);
881 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) { 849 if (LIKELY(!partitionBucketIsDirectMapped(bucket))) {
882 size = bucket->slotSize; 850 size = bucket->slotSize;
883 } else if (size > kGenericMaxDirectMapped) { 851 } else if (size > kGenericMaxDirectMapped) {
884 // Too large to allocate => return the size unchanged. 852 // Too large to allocate => return the size unchanged.
885 } else { 853 } else {
886 ASSERT(bucket == &PartitionRootBase::gPagedBucket); 854 DCHECK(bucket == &PartitionRootBase::gPagedBucket);
887 size = partitionDirectMapSize(size); 855 size = partitionDirectMapSize(size);
888 } 856 }
889 return partitionCookieSizeAdjustSubtract(size); 857 return partitionCookieSizeAdjustSubtract(size);
890 #endif 858 #endif
891 } 859 }
892 860
893 ALWAYS_INLINE bool partitionAllocSupportsGetSize() { 861 ALWAYS_INLINE bool partitionAllocSupportsGetSize() {
894 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 862 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
895 return false; 863 return false;
896 #else 864 #else
897 return true; 865 return true;
898 #endif 866 #endif
899 } 867 }
900 868
901 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) { 869 ALWAYS_INLINE size_t partitionAllocGetSize(void* ptr) {
902 // No need to lock here. Only 'ptr' being freed by another thread could 870 // No need to lock here. Only 'ptr' being freed by another thread could
903 // cause trouble, and the caller is responsible for that not happening. 871 // cause trouble, and the caller is responsible for that not happening.
904 ASSERT(partitionAllocSupportsGetSize()); 872 DCHECK(partitionAllocSupportsGetSize());
905 ptr = partitionCookieFreePointerAdjust(ptr); 873 ptr = partitionCookieFreePointerAdjust(ptr);
906 ASSERT(partitionPointerIsValid(ptr)); 874 DCHECK(partitionPointerIsValid(ptr));
907 PartitionPage* page = partitionPointerToPage(ptr); 875 PartitionPage* page = partitionPointerToPage(ptr);
908 size_t size = page->bucket->slotSize; 876 size_t size = page->bucket->slotSize;
909 return partitionCookieSizeAdjustSubtract(size); 877 return partitionCookieSizeAdjustSubtract(size);
910 } 878 }
911 879
912 // N (or more accurately, N - sizeof(void*)) represents the largest size in 880 // N (or more accurately, N - sizeof(void*)) represents the largest size in
913 // bytes that will be handled by a SizeSpecificPartitionAllocator. 881 // bytes that will be handled by a SizeSpecificPartitionAllocator.
914 // Attempts to partitionAlloc() more than this amount will fail. 882 // Attempts to partitionAlloc() more than this amount will fail.
915 template <size_t N> 883 template <size_t N>
916 class SizeSpecificPartitionAllocator { 884 class SizeSpecificPartitionAllocator {
(...skipping 14 matching lines...) Expand all
931 class PartitionAllocatorGeneric { 899 class PartitionAllocatorGeneric {
932 public: 900 public:
933 void init() { partitionAllocGenericInit(&m_partitionRoot); } 901 void init() { partitionAllocGenericInit(&m_partitionRoot); }
934 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); } 902 bool shutdown() { return partitionAllocGenericShutdown(&m_partitionRoot); }
935 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; } 903 ALWAYS_INLINE PartitionRootGeneric* root() { return &m_partitionRoot; }
936 904
937 private: 905 private:
938 PartitionRootGeneric m_partitionRoot; 906 PartitionRootGeneric m_partitionRoot;
939 }; 907 };
940 908
941 } // namespace WTF 909 } // namespace base
942 910
943 using WTF::SizeSpecificPartitionAllocator; 911 #endif // BASE_ALLOCATOR_PARTITION_ALLOCATOR_PARTITION_ALLOC_H
944 using WTF::PartitionAllocatorGeneric;
945 using WTF::PartitionRoot;
946 using WTF::partitionAllocInit;
947 using WTF::partitionAllocShutdown;
948 using WTF::partitionAlloc;
949 using WTF::partitionFree;
950 using WTF::partitionAllocGeneric;
951 using WTF::partitionFreeGeneric;
952 using WTF::partitionReallocGeneric;
953 using WTF::partitionAllocActualSize;
954 using WTF::partitionAllocSupportsGetSize;
955 using WTF::partitionAllocGetSize;
956
957 #endif // WTF_PartitionAlloc_h
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698