OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 660 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
671 void ThreadHeap<Header>::updateRemainingAllocationSize() | 671 void ThreadHeap<Header>::updateRemainingAllocationSize() |
672 { | 672 { |
673 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 673 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
674 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); | 674 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); |
675 m_lastRemainingAllocationSize = remainingAllocationSize(); | 675 m_lastRemainingAllocationSize = remainingAllocationSize(); |
676 } | 676 } |
677 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 677 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
678 } | 678 } |
679 | 679 |
680 template<typename Header> | 680 template<typename Header> |
681 Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat
ionSize, const GCInfo* gcInfo) | 681 Address ThreadHeap<Header>::outOfLineAllocate(size_t allocationSize, const GCInf
o* gcInfo) |
682 { | 682 { |
683 ASSERT(allocationSize > remainingAllocationSize()); | 683 ASSERT(allocationSize > remainingAllocationSize()); |
684 if (allocationSize > blinkPageSize / 2) | 684 if (allocationSize > blinkPageSize / 2) |
685 return allocateLargeObject(allocationSize, gcInfo); | 685 return allocateLargeObject(allocationSize, gcInfo); |
686 | 686 |
687 updateRemainingAllocationSize(); | 687 updateRemainingAllocationSize(); |
688 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); | 688 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); |
689 | 689 |
| 690 ASSERT(allocationSize >= allocationGranularity); |
| 691 Address result = allocateFromFreeList(allocationSize, gcInfo); |
| 692 if (result) |
| 693 return result; |
690 setAllocationPoint(nullptr, 0); | 694 setAllocationPoint(nullptr, 0); |
691 ASSERT(allocationSize >= allocationGranularity); | 695 if (coalesce(allocationSize)) { |
692 if (allocateFromFreeList(allocationSize)) | 696 result = allocateFromFreeList(allocationSize, gcInfo); |
693 return allocate(payloadSize, gcInfo); | 697 if (result) |
694 if (coalesce(allocationSize) && allocateFromFreeList(allocationSize)) | 698 return result; |
695 return allocate(payloadSize, gcInfo); | 699 } |
696 | 700 |
697 addPageToHeap(gcInfo); | 701 addPageToHeap(gcInfo); |
698 bool success = allocateFromFreeList(allocationSize); | 702 result = allocateFromFreeList(allocationSize, gcInfo); |
699 RELEASE_ASSERT(success); | 703 RELEASE_ASSERT(result); |
700 return allocate(payloadSize, gcInfo); | 704 return result; |
| 705 } |
| 706 |
| 707 static bool shouldUseFirstFitForHeap(int heapIndex) |
| 708 { |
| 709 // For an allocation of size N, should a heap perform a first-fit |
| 710 // matching within the sized bin that N belongs to? |
| 711 // |
| 712 // Theory: quickly reusing a previously freed backing store block stands |
| 713 // a chance of maintaining cached presence of that block (maintains |
| 714 // "locality".) This is preferable to starting to bump allocate from a |
| 715 // new and bigger block, which is what allocateFromFreeList() does by |
| 716 // default. Hence, the backing store heaps are considered for binned |
| 717 // first-fit matching. |
| 718 // |
| 719 // This appears to hold true through performance expermentation; at |
| 720 // least no signficant performance regressions have been observed. |
| 721 // |
| 722 // This theory of improved performance does not hold true for other |
| 723 // heap types. We are currently seeking an understanding of why; |
| 724 // larger amounts of small block fragmentation might be one reason |
| 725 // for it. TBC. |
| 726 // |
| 727 switch (heapIndex) { |
| 728 case VectorBackingHeap: |
| 729 case InlineVectorBackingHeap: |
| 730 case HashTableBackingHeap: |
| 731 case VectorBackingHeapNonFinalized: |
| 732 case InlineVectorBackingHeapNonFinalized: |
| 733 case HashTableBackingHeapNonFinalized: |
| 734 return true; |
| 735 default: |
| 736 return false; |
| 737 } |
701 } | 738 } |
702 | 739 |
703 template<typename Header> | 740 template<typename Header> |
704 FreeListEntry* FreeList<Header>::takeEntry(size_t allocationSize) | 741 Address ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize, const GC
Info* gcInfo) |
705 { | 742 { |
706 size_t bucketSize = 1 << m_biggestFreeListIndex; | 743 // The freelist allocation scheme is currently as follows: |
707 int i = m_biggestFreeListIndex; | 744 // |
708 for (; i > 0; i--, bucketSize >>= 1) { | 745 // - If the heap is of an appropriate type, try to pick the first |
709 if (bucketSize < allocationSize) { | 746 // entry from the sized bin corresponding to |allocationSize|. |
710 // A FreeListEntry for bucketSize might be larger than allocationSiz
e. | 747 // [See shouldUseFirstFitForHeap() comment for motivation on why.] |
711 // FIXME: We check only the first FreeListEntry because searching | 748 // |
712 // the entire list is costly. | 749 // - If that didn't satisfy the allocation, try reusing a block |
713 if (!m_freeLists[i] || m_freeLists[i]->size() < allocationSize) | 750 // from the largest bin. The underlying reasoning being that |
| 751 // we want to amortize this slow allocation call by carving |
| 752 // off as a large a free block as possible in one go; a block |
| 753 // that will service this block and let following allocations |
| 754 // be serviced quickly by bump allocation. |
| 755 // |
| 756 // - Fail; allocation cannot be serviced by the freelist. |
| 757 // The allocator will handle that failure by requesting more |
| 758 // heap pages from the OS and re-initiate the allocation request. |
| 759 // |
| 760 int index = FreeList<Header>::bucketIndexForSize(allocationSize) + 1; |
| 761 if (index <= m_freeList.m_biggestFreeListIndex && shouldUseFirstFitForHeap(m
_index)) { |
| 762 if (FreeListEntry* entry = m_freeList.m_freeLists[index]) { |
| 763 entry->unlink(&m_freeList.m_freeLists[index]); |
| 764 if (!m_freeList.m_freeLists[index] && index == m_freeList.m_biggestF
reeListIndex) { |
| 765 // Biggest bucket drained, adjust biggest index downwards. |
| 766 int maxIndex = m_freeList.m_biggestFreeListIndex - 1; |
| 767 for (; maxIndex >= 0 && !m_freeList.m_freeLists[maxIndex]; --max
Index) { } |
| 768 m_freeList.m_biggestFreeListIndex = maxIndex < 0 ? 0 : maxIndex; |
| 769 } |
| 770 // Allocate into the freelist block without disturbing the current a
llocation area. |
| 771 ASSERT(entry->size() >= allocationSize); |
| 772 if (entry->size() > allocationSize) |
| 773 addToFreeList(entry->address() + allocationSize, entry->size() -
allocationSize); |
| 774 Heap::increaseAllocatedObjectSize(allocationSize); |
| 775 return allocateAtAddress(entry->address(), allocationSize, gcInfo); |
| 776 } |
| 777 // Failed to find a first-fit freelist entry; fall into the standard cas
e of |
| 778 // chopping off the largest free block and bump allocate from it. |
| 779 } |
| 780 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; |
| 781 index = m_freeList.m_biggestFreeListIndex; |
| 782 for (; index > 0; --index, bucketSize >>= 1) { |
| 783 FreeListEntry* entry = m_freeList.m_freeLists[index]; |
| 784 if (allocationSize > bucketSize) { |
| 785 // Final bucket candidate; check initial entry if it is able |
| 786 // to service this allocation. Do not perform a linear scan, |
| 787 // as it is considered too costly. |
| 788 if (!entry || entry->size() < allocationSize) |
714 break; | 789 break; |
715 } | 790 } |
716 if (FreeListEntry* entry = m_freeLists[i]) { | 791 if (entry) { |
717 m_biggestFreeListIndex = i; | 792 entry->unlink(&m_freeList.m_freeLists[index]); |
718 entry->unlink(&m_freeLists[i]); | 793 setAllocationPoint(entry->address(), entry->size()); |
719 return entry; | 794 ASSERT(hasCurrentAllocationArea()); |
| 795 ASSERT(remainingAllocationSize() >= allocationSize); |
| 796 m_freeList.m_biggestFreeListIndex = index; |
| 797 return allocateSize(allocationSize, gcInfo); |
720 } | 798 } |
721 } | 799 } |
722 m_biggestFreeListIndex = i; | 800 m_freeList.m_biggestFreeListIndex = index; |
723 return nullptr; | 801 return nullptr; |
724 } | 802 } |
725 | 803 |
726 template<typename Header> | |
727 bool ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize) | |
728 { | |
729 ASSERT(!hasCurrentAllocationArea()); | |
730 if (FreeListEntry* entry = m_freeList.takeEntry(allocationSize)) { | |
731 setAllocationPoint(entry->address(), entry->size()); | |
732 ASSERT(hasCurrentAllocationArea()); | |
733 ASSERT(remainingAllocationSize() >= allocationSize); | |
734 return true; | |
735 } | |
736 return false; | |
737 } | |
738 | |
739 #if ENABLE(ASSERT) | 804 #if ENABLE(ASSERT) |
740 template<typename Header> | 805 template<typename Header> |
741 static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address addre
ss) | 806 static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address addre
ss) |
742 { | 807 { |
743 // Check that a large object is blinkPageSize aligned (modulo the osPageSize | 808 // Check that a large object is blinkPageSize aligned (modulo the osPageSize |
744 // for the guard page). | 809 // for the guard page). |
745 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun
dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); | 810 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun
dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); |
746 } | 811 } |
747 #endif | 812 #endif |
748 | 813 |
(...skipping 2176 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2925 bool Heap::s_shutdownCalled = false; | 2990 bool Heap::s_shutdownCalled = false; |
2926 bool Heap::s_lastGCWasConservative = false; | 2991 bool Heap::s_lastGCWasConservative = false; |
2927 FreePagePool* Heap::s_freePagePool; | 2992 FreePagePool* Heap::s_freePagePool; |
2928 OrphanedPagePool* Heap::s_orphanedPagePool; | 2993 OrphanedPagePool* Heap::s_orphanedPagePool; |
2929 Heap::RegionTree* Heap::s_regionTree = nullptr; | 2994 Heap::RegionTree* Heap::s_regionTree = nullptr; |
2930 size_t Heap::s_allocatedObjectSize = 0; | 2995 size_t Heap::s_allocatedObjectSize = 0; |
2931 size_t Heap::s_allocatedSpace = 0; | 2996 size_t Heap::s_allocatedSpace = 0; |
2932 size_t Heap::s_markedObjectSize = 0; | 2997 size_t Heap::s_markedObjectSize = 0; |
2933 | 2998 |
2934 } // namespace blink | 2999 } // namespace blink |
OLD | NEW |