Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(513)

Side by Side Diff: Source/wtf/PartitionAlloc.h

Issue 1195543005: PartitionAlloc: fixes and improvements to large-chunk size tracking. (Closed) Base URL: https://chromium.googlesource.com/chromium/blink.git@master
Patch Set: Review feedback. Created 5 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | Source/wtf/PartitionAlloc.cpp » ('j') | Source/wtf/PartitionAlloc.cpp » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 200 matching lines...) Expand 10 before | Expand all | Expand 10 after
211 // value (probably it is a "out of virtual address space" crash), 211 // value (probably it is a "out of virtual address space" crash),
212 // a special crash stack trace is generated at |partitionOutOfMemory|. 212 // a special crash stack trace is generated at |partitionOutOfMemory|.
213 // This is to distinguish "out of virtual address space" from 213 // This is to distinguish "out of virtual address space" from
214 // "out of physical memory" in crash reports. 214 // "out of physical memory" in crash reports.
215 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB 215 static const size_t kReasonableSizeOfUnusedPages = 1024 * 1024 * 1024; // 1GiB
216 216
217 #if ENABLE(ASSERT) 217 #if ENABLE(ASSERT)
218 // These two byte values match tcmalloc. 218 // These two byte values match tcmalloc.
219 static const unsigned char kUninitializedByte = 0xAB; 219 static const unsigned char kUninitializedByte = 0xAB;
220 static const unsigned char kFreedByte = 0xCD; 220 static const unsigned char kFreedByte = 0xCD;
221 static const uint32_t kCookieValue = 0xDEADBEEFu;
222 static const size_t kCookieSize = 16; // Handles alignment up to XMM instruction s on Intel. 221 static const size_t kCookieSize = 16; // Handles alignment up to XMM instruction s on Intel.
222 static const unsigned char kCookieValue[kCookieSize] = { 0xDE, 0xAD, 0xBE, 0xEF, 0xCA, 0xFE, 0xD0, 0x0D, 0x13, 0x37, 0xF0, 0x05, 0xBA, 0x11, 0xAB, 0x1E };
223 #endif 223 #endif
224 224
225 struct PartitionBucket; 225 struct PartitionBucket;
226 struct PartitionRootBase; 226 struct PartitionRootBase;
227 227
228 struct PartitionFreelistEntry { 228 struct PartitionFreelistEntry {
229 PartitionFreelistEntry* next; 229 PartitionFreelistEntry* next;
230 }; 230 };
231 231
232 // Some notes on page states. A page can be in one of four major states: 232 // Some notes on page states. A page can be in one of four major states:
(...skipping 197 matching lines...) Expand 10 before | Expand all | Expand 10 after
430 #if ENABLE(ASSERT) 430 #if ENABLE(ASSERT)
431 // The value given to the application is actually just after the cookie. 431 // The value given to the application is actually just after the cookie.
432 ptr = static_cast<char*>(ptr) - kCookieSize; 432 ptr = static_cast<char*>(ptr) - kCookieSize;
433 #endif 433 #endif
434 return ptr; 434 return ptr;
435 } 435 }
436 436
437 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr) 437 ALWAYS_INLINE void partitionCookieWriteValue(void* ptr)
438 { 438 {
439 #if ENABLE(ASSERT) 439 #if ENABLE(ASSERT)
440 uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr); 440 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
441 for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr) 441 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
442 *cookiePtr = kCookieValue; 442 *cookiePtr = kCookieValue[i];
443 #endif 443 #endif
444 } 444 }
445 445
446 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr) 446 ALWAYS_INLINE void partitionCookieCheckValue(void* ptr)
447 { 447 {
448 #if ENABLE(ASSERT) 448 #if ENABLE(ASSERT)
449 uint32_t* cookiePtr = reinterpret_cast<uint32_t*>(ptr); 449 unsigned char* cookiePtr = reinterpret_cast<unsigned char*>(ptr);
450 for (size_t i = 0; i < kCookieSize / sizeof(kCookieValue); ++i, ++cookiePtr) 450 for (size_t i = 0; i < kCookieSize; ++i, ++cookiePtr)
451 ASSERT(*cookiePtr == kCookieValue); 451 ASSERT(*cookiePtr == kCookieValue[i]);
452 #endif 452 #endif
453 } 453 }
454 454
455 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr) 455 ALWAYS_INLINE char* partitionSuperPageToMetadataArea(char* ptr)
456 { 456 {
457 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr); 457 uintptr_t pointerAsUint = reinterpret_cast<uintptr_t>(ptr);
458 ASSERT(!(pointerAsUint & kSuperPageOffsetMask)); 458 ASSERT(!(pointerAsUint & kSuperPageOffsetMask));
459 // The metadata area is exactly one system page (the guard page) into the 459 // The metadata area is exactly one system page (the guard page) into the
460 // super page. 460 // super page.
461 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize); 461 return reinterpret_cast<char*>(pointerAsUint + kSystemPageSize);
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
493 } 493 }
494 494
495 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr) 495 ALWAYS_INLINE PartitionPage* partitionPointerToPage(void* ptr)
496 { 496 {
497 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr); 497 PartitionPage* page = partitionPointerToPageNoAlignmentCheck(ptr);
498 // Checks that the pointer is a multiple of bucket size. 498 // Checks that the pointer is a multiple of bucket size.
499 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(par titionPageToPointer(page))) % page->bucket->slotSize)); 499 ASSERT(!((reinterpret_cast<uintptr_t>(ptr) - reinterpret_cast<uintptr_t>(par titionPageToPointer(page))) % page->bucket->slotSize));
500 return page; 500 return page;
501 } 501 }
502 502
503 ALWAYS_INLINE bool partitionBucketIsDirectMapped(const PartitionBucket* bucket)
504 {
505 return !bucket->numSystemPagesPerSlotSpan;
506 }
507
508 ALWAYS_INLINE size_t partitionBucketBytes(const PartitionBucket* bucket)
509 {
510 return bucket->numSystemPagesPerSlotSpan * kSystemPageSize;
511 }
512
513 ALWAYS_INLINE uint16_t partitionBucketSlots(const PartitionBucket* bucket)
514 {
515 return static_cast<uint16_t>(partitionBucketBytes(bucket) / bucket->slotSize );
516 }
517
518 ALWAYS_INLINE size_t* partitionPageGetRawSizePtr(PartitionPage* page)
519 {
520 // For single-slot buckets which span more than one partition page, we
521 // have some spare metadata space to store the raw allocation size. We
522 // can use this to report better statistics.
523 PartitionBucket* bucket = page->bucket;
524 if (bucket->slotSize <= kMaxSystemPagesPerSlotSpan * kSystemPageSize)
525 return nullptr;
526
527 ASSERT(partitionBucketIsDirectMapped(bucket) || partitionBucketSlots(bucket) == 1);
528 page++;
529 return reinterpret_cast<size_t*>(&page->freelistHead);
530 }
531
532 ALWAYS_INLINE size_t partitionPageGetRawSize(PartitionPage* page)
533 {
534 size_t* rawSizePtr = partitionPageGetRawSizePtr(page);
535 if (UNLIKELY(rawSizePtr != nullptr))
536 return *rawSizePtr;
537 return 0;
538 }
539
503 ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page) 540 ALWAYS_INLINE PartitionRootBase* partitionPageToRoot(PartitionPage* page)
504 { 541 {
505 PartitionSuperPageExtentEntry* extentEntry = reinterpret_cast<PartitionSuper PageExtentEntry*>(reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask); 542 PartitionSuperPageExtentEntry* extentEntry = reinterpret_cast<PartitionSuper PageExtentEntry*>(reinterpret_cast<uintptr_t>(page) & kSystemPageBaseMask);
506 return extentEntry->root; 543 return extentEntry->root;
507 } 544 }
508 545
509 ALWAYS_INLINE bool partitionPointerIsValid(void* ptr) 546 ALWAYS_INLINE bool partitionPointerIsValid(void* ptr)
510 { 547 {
511 PartitionPage* page = partitionPointerToPage(ptr); 548 PartitionPage* page = partitionPointerToPage(ptr);
512 PartitionRootBase* root = partitionPageToRoot(page); 549 PartitionRootBase* root = partitionPageToRoot(page);
513 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root); 550 return root->invertedSelf == ~reinterpret_cast<uintptr_t>(root);
514 } 551 }
515 552
516 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, int flags, siz e_t size, PartitionBucket* bucket) 553 ALWAYS_INLINE void* partitionBucketAlloc(PartitionRootBase* root, int flags, siz e_t size, PartitionBucket* bucket)
517 { 554 {
518 PartitionPage* page = bucket->activePagesHead; 555 PartitionPage* page = bucket->activePagesHead;
519 // Check that this page is neither full nor freed. 556 // Check that this page is neither full nor freed.
520 ASSERT(page->numAllocatedSlots >= 0); 557 ASSERT(page->numAllocatedSlots >= 0);
521 void* ret = page->freelistHead; 558 void* ret = page->freelistHead;
522 if (LIKELY(ret != 0)) { 559 if (LIKELY(ret != 0)) {
523 // If these asserts fire, you probably corrupted memory. 560 // If these asserts fire, you probably corrupted memory.
524 ASSERT(partitionPointerIsValid(ret)); 561 ASSERT(partitionPointerIsValid(ret));
562 // All large allocations must go through the slow path to correctly
563 // update the size metadata.
564 ASSERT(partitionPageGetRawSize(page) == 0);
525 PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<Part itionFreelistEntry*>(ret)->next); 565 PartitionFreelistEntry* newHead = partitionFreelistMask(static_cast<Part itionFreelistEntry*>(ret)->next);
526 page->freelistHead = newHead; 566 page->freelistHead = newHead;
527 page->numAllocatedSlots++; 567 page->numAllocatedSlots++;
528 } else { 568 } else {
529 ret = partitionAllocSlowPath(root, flags, size, bucket); 569 ret = partitionAllocSlowPath(root, flags, size, bucket);
530 ASSERT(!ret || partitionPointerIsValid(ret)); 570 ASSERT(!ret || partitionPointerIsValid(ret));
531 } 571 }
532 #if ENABLE(ASSERT) 572 #if ENABLE(ASSERT)
533 if (!ret) 573 if (!ret)
534 return 0; 574 return 0;
535 // Fill the uninitialized pattern. and write the cookies. 575 // Fill the uninitialized pattern, and write the cookies.
536 page = partitionPointerToPage(ret); 576 page = partitionPointerToPage(ret);
537 size_t bucketSize = page->bucket->slotSize; 577 size_t slotSize = page->bucket->slotSize;
538 memset(ret, kUninitializedByte, bucketSize); 578 size_t rawSize = partitionPageGetRawSize(page);
539 partitionCookieWriteValue(ret); 579 if (rawSize) {
540 partitionCookieWriteValue(reinterpret_cast<char*>(ret) + bucketSize - kCooki eSize); 580 ASSERT(rawSize == size);
581 slotSize = rawSize;
582 }
583 size_t noCookieSize = partitionCookieSizeAdjustSubtract(slotSize);
584 char* charRet = static_cast<char*>(ret);
541 // The value given to the application is actually just after the cookie. 585 // The value given to the application is actually just after the cookie.
542 ret = static_cast<char*>(ret) + kCookieSize; 586 ret = charRet + kCookieSize;
587 memset(ret, kUninitializedByte, noCookieSize);
588 partitionCookieWriteValue(charRet);
589 partitionCookieWriteValue(charRet + kCookieSize + noCookieSize);
543 #endif 590 #endif
544 return ret; 591 return ret;
545 } 592 }
546 593
547 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size) 594 ALWAYS_INLINE void* partitionAlloc(PartitionRoot* root, size_t size)
548 { 595 {
549 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 596 #if defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
550 void* result = malloc(size); 597 void* result = malloc(size);
551 RELEASE_ASSERT(result); 598 RELEASE_ASSERT(result);
552 return result; 599 return result;
553 #else 600 #else
554 size = partitionCookieSizeAdjustAdd(size); 601 size = partitionCookieSizeAdjustAdd(size);
555 ASSERT(root->initialized); 602 ASSERT(root->initialized);
556 size_t index = size >> kBucketShift; 603 size_t index = size >> kBucketShift;
557 ASSERT(index < root->numBuckets); 604 ASSERT(index < root->numBuckets);
558 ASSERT(size == index << kBucketShift); 605 ASSERT(size == index << kBucketShift);
559 PartitionBucket* bucket = &root->buckets()[index]; 606 PartitionBucket* bucket = &root->buckets()[index];
560 return partitionBucketAlloc(root, 0, size, bucket); 607 return partitionBucketAlloc(root, 0, size, bucket);
561 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR) 608 #endif // defined(MEMORY_TOOL_REPLACES_ALLOCATOR)
562 } 609 }
563 610
564 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page) 611 ALWAYS_INLINE void partitionFreeWithPage(void* ptr, PartitionPage* page)
565 { 612 {
566 // If these asserts fire, you probably corrupted memory. 613 // If these asserts fire, you probably corrupted memory.
567 #if ENABLE(ASSERT) 614 #if ENABLE(ASSERT)
568 size_t bucketSize = page->bucket->slotSize; 615 size_t slotSize = page->bucket->slotSize;
616 size_t rawSize = partitionPageGetRawSize(page);
617 if (rawSize)
618 slotSize = rawSize;
569 partitionCookieCheckValue(ptr); 619 partitionCookieCheckValue(ptr);
570 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + bucketSize - kCooki eSize); 620 partitionCookieCheckValue(reinterpret_cast<char*>(ptr) + slotSize - kCookieS ize);
571 memset(ptr, kFreedByte, bucketSize); 621 memset(ptr, kFreedByte, slotSize);
572 #endif 622 #endif
573 ASSERT(page->numAllocatedSlots); 623 ASSERT(page->numAllocatedSlots);
574 PartitionFreelistEntry* freelistHead = page->freelistHead; 624 PartitionFreelistEntry* freelistHead = page->freelistHead;
575 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead)); 625 ASSERT(!freelistHead || partitionPointerIsValid(freelistHead));
576 RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(ptr != freelistHead); // Catches an immediate double free. 626 RELEASE_ASSERT_WITH_SECURITY_IMPLICATION(ptr != freelistHead); // Catches an immediate double free.
577 ASSERT_WITH_SECURITY_IMPLICATION(!freelistHead || ptr != partitionFreelistMa sk(freelistHead->next)); // Look for double free one level deeper in debug. 627 ASSERT_WITH_SECURITY_IMPLICATION(!freelistHead || ptr != partitionFreelistMa sk(freelistHead->next)); // Look for double free one level deeper in debug.
578 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr); 628 PartitionFreelistEntry* entry = static_cast<PartitionFreelistEntry*>(ptr);
579 entry->next = partitionFreelistMask(freelistHead); 629 entry->next = partitionFreelistMask(freelistHead);
580 page->freelistHead = entry; 630 page->freelistHead = entry;
581 --page->numAllocatedSlots; 631 --page->numAllocatedSlots;
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
642 692
643 ptr = partitionCookieFreePointerAdjust(ptr); 693 ptr = partitionCookieFreePointerAdjust(ptr);
644 ASSERT(partitionPointerIsValid(ptr)); 694 ASSERT(partitionPointerIsValid(ptr));
645 PartitionPage* page = partitionPointerToPage(ptr); 695 PartitionPage* page = partitionPointerToPage(ptr);
646 spinLockLock(&root->lock); 696 spinLockLock(&root->lock);
647 partitionFreeWithPage(ptr, page); 697 partitionFreeWithPage(ptr, page);
648 spinLockUnlock(&root->lock); 698 spinLockUnlock(&root->lock);
649 #endif 699 #endif
650 } 700 }
651 701
652 ALWAYS_INLINE bool partitionBucketIsDirectMapped(const PartitionBucket* bucket)
653 {
654 return !bucket->numSystemPagesPerSlotSpan;
655 }
656
657 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size) 702 ALWAYS_INLINE size_t partitionDirectMapSize(size_t size)
658 { 703 {
659 // Caller must check that the size is not above the kGenericMaxDirectMapped 704 // Caller must check that the size is not above the kGenericMaxDirectMapped
660 // limit before calling. This also guards against integer overflow in the 705 // limit before calling. This also guards against integer overflow in the
661 // calculation here. 706 // calculation here.
662 ASSERT(size <= kGenericMaxDirectMapped); 707 ASSERT(size <= kGenericMaxDirectMapped);
663 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask; 708 return (size + kSystemPageOffsetMask) & kSystemPageBaseMask;
664 } 709 }
665 710
666 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, size_t size) 711 ALWAYS_INLINE size_t partitionAllocActualSize(PartitionRootGeneric* root, size_t size)
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
739 using WTF::partitionAlloc; 784 using WTF::partitionAlloc;
740 using WTF::partitionFree; 785 using WTF::partitionFree;
741 using WTF::partitionAllocGeneric; 786 using WTF::partitionAllocGeneric;
742 using WTF::partitionFreeGeneric; 787 using WTF::partitionFreeGeneric;
743 using WTF::partitionReallocGeneric; 788 using WTF::partitionReallocGeneric;
744 using WTF::partitionAllocActualSize; 789 using WTF::partitionAllocActualSize;
745 using WTF::partitionAllocSupportsGetSize; 790 using WTF::partitionAllocSupportsGetSize;
746 using WTF::partitionAllocGetSize; 791 using WTF::partitionAllocGetSize;
747 792
748 #endif // WTF_PartitionAlloc_h 793 #endif // WTF_PartitionAlloc_h
OLDNEW
« no previous file with comments | « no previous file | Source/wtf/PartitionAlloc.cpp » ('j') | Source/wtf/PartitionAlloc.cpp » ('J')

Powered by Google App Engine
This is Rietveld 408576698