OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 660 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
671 void ThreadHeap<Header>::updateRemainingAllocationSize() | 671 void ThreadHeap<Header>::updateRemainingAllocationSize() |
672 { | 672 { |
673 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 673 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
674 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); | 674 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); |
675 m_lastRemainingAllocationSize = remainingAllocationSize(); | 675 m_lastRemainingAllocationSize = remainingAllocationSize(); |
676 } | 676 } |
677 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 677 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
678 } | 678 } |
679 | 679 |
680 template<typename Header> | 680 template<typename Header> |
681 Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat ionSize, const GCInfo* gcInfo) | 681 Address ThreadHeap<Header>::outOfLineAllocate(size_t allocationSize, const GCInf o* gcInfo) |
682 { | 682 { |
683 ASSERT(allocationSize > remainingAllocationSize()); | 683 ASSERT(allocationSize > remainingAllocationSize()); |
684 if (allocationSize > blinkPageSize / 2) | 684 if (allocationSize > blinkPageSize / 2) |
685 return allocateLargeObject(allocationSize, gcInfo); | 685 return allocateLargeObject(allocationSize, gcInfo); |
686 | 686 |
687 updateRemainingAllocationSize(); | 687 updateRemainingAllocationSize(); |
688 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); | 688 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); |
689 | 689 |
690 ASSERT(allocationSize >= allocationGranularity); | |
691 Address result = allocateFromFreeList(allocationSize, gcInfo); | |
692 if (result) | |
693 return result; | |
690 setAllocationPoint(nullptr, 0); | 694 setAllocationPoint(nullptr, 0); |
691 ASSERT(allocationSize >= allocationGranularity); | 695 if (coalesce(allocationSize)) { |
692 if (allocateFromFreeList(allocationSize)) | 696 result = allocateFromFreeList(allocationSize, gcInfo); |
693 return allocate(payloadSize, gcInfo); | 697 if (result) |
694 if (coalesce(allocationSize) && allocateFromFreeList(allocationSize)) | 698 return result; |
695 return allocate(payloadSize, gcInfo); | 699 } |
696 | 700 |
697 addPageToHeap(gcInfo); | 701 addPageToHeap(gcInfo); |
698 bool success = allocateFromFreeList(allocationSize); | 702 result = allocateFromFreeList(allocationSize, gcInfo); |
699 RELEASE_ASSERT(success); | 703 RELEASE_ASSERT(result); |
700 return allocate(payloadSize, gcInfo); | 704 return result; |
705 } | |
706 | |
707 static bool shouldUseFirstFitForHeap(int heapIndex) | |
haraken
2014/12/16 02:26:33
Isn't this "best fit", not "first fit"?
sof
2014/12/16 07:31:54
Not as I know the terms -- best fit would search t
haraken
2014/12/16 08:26:14
(I'm not intending to bike-shed terms but) as far
sof
2014/12/16 08:32:15
It performs first fit within segregated bins.
haraken
2014/12/16 08:51:19
Then our worst-fit is also working as a first-fit
sof
2014/12/16 08:54:22
It's performing first-fit within the worst bin in
haraken
2014/12/16 09:06:37
That's why I think shouldUseFirstFit doesn't captu
| |
708 { | |
709 // For an allocation of size N, should a heap perform a first-fit | |
710 // matching within the sized bin that N belongs to? | |
711 // | |
712 // Theory: quickly reusing a previously freed backing store block stands | |
713 // a chance of maintaining cached presence of that block (maintains | |
714 // "locality".) This is preferable to starting to bump allocate from a | |
715 // new and bigger block, which is what allocateFromFreeList() does by | |
716 // default. Hence, the backing store heaps are considered for binned | |
717 // first-fit matching. | |
haraken
2014/12/16 02:26:33
Shall we also mention that this optimization makes
sof
2014/12/16 07:31:54
Coalescing is not the key -- as I pointed out yest
haraken
2014/12/16 08:26:14
Hmm. Then probably the situation is like this?
Fa
sof
2014/12/16 08:36:18
As I said earlier, doing a decision based on size
haraken
2014/12/16 08:51:19
I'm sorry about it... Then the situation is that w
| |
718 // | |
719 // This appears to hold true through performance expermentation; at | |
720 // least no signficant performance regressions have been observed. | |
721 // | |
722 // This theory of improved performance does not hold true for other | |
723 // heap types. We are currently seeking an understanding of why; | |
haraken
2014/12/16 02:26:33
I guess the reason this optimization doesn't help
sof
2014/12/16 07:31:54
They do coalesce when sweeping though.
| |
724 // larger amounts of small block fragmentation might be one reason | |
725 // for it. TBC. | |
726 // | |
727 if (heapIndex >= VectorBackingHeap && heapIndex <= HashTableBackingHeap) | |
haraken
2014/12/16 02:26:33
For safety, I'd prefer:
heapIndex == VectorBack
sof
2014/12/16 07:31:54
And also the inline heap case -- this seems verbos
| |
728 return true; | |
729 if (heapIndex >= VectorBackingHeapNonFinalized && heapIndex <= HashTableBack ingHeapNonFinalized) | |
haraken
2014/12/16 02:26:33
Ditto.
| |
730 return true; | |
731 | |
732 return false; | |
701 } | 733 } |
702 | 734 |
703 template<typename Header> | 735 template<typename Header> |
704 FreeListEntry* FreeList<Header>::takeEntry(size_t allocationSize) | 736 Address ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize, const GC Info* gcInfo) |
705 { | 737 { |
706 size_t bucketSize = 1 << m_biggestFreeListIndex; | 738 // The freelist allocation scheme is currently as follows: |
707 int i = m_biggestFreeListIndex; | 739 // |
708 for (; i > 0; i--, bucketSize >>= 1) { | 740 // - If the heap is of an appropriate type, try to pick the first |
709 if (bucketSize < allocationSize) { | 741 // entry from the sized bin corresponding to |allocationSize|. |
710 // A FreeListEntry for bucketSize might be larger than allocationSiz e. | 742 // [See shouldUseFirstFitForHeap() comment for motivation on why.] |
711 // FIXME: We check only the first FreeListEntry because searching | 743 // |
712 // the entire list is costly. | 744 // - If that didn't satisfy the allocation, try reusing a block |
713 if (!m_freeLists[i] || m_freeLists[i]->size() < allocationSize) | 745 // from the largest bin. The underlying reasoning being that |
746 // we want to amortize this slow allocation call by carving | |
747 // off as a large a free block as possible in one go; a block | |
748 // that will service this block and let following allocations | |
749 // be serviced quickly by bump allocation. | |
750 // | |
751 // - Fail; allocation cannot be serviced by the freelist. | |
752 // The allocator will handle that failure by requesting more | |
753 // heap pages from the OS and re-initiate the allocation request. | |
754 // | |
755 int bucket = FreeList<Header>::bucketIndexForSize(allocationSize) + 1; | |
haraken
2014/12/16 02:26:33
bucket => index
sof
2014/12/16 07:31:54
No, sorry - this is a bucket, not an index. Or do
| |
756 if (bucket <= m_freeList.m_biggestFreeListIndex && shouldUseFirstFitForHeap( m_index)) { | |
757 if (FreeListEntry* entry = m_freeList.m_freeLists[bucket]) { | |
haraken
2014/12/16 02:26:33
Don't we want to look up the entry in m_freeLists[
sof
2014/12/16 07:31:54
That's been checked as regressing performance earl
| |
758 entry->unlink(&m_freeList.m_freeLists[bucket]); | |
759 if (!m_freeList.m_freeLists[bucket] && bucket == m_freeList.m_bigges tFreeListIndex) { | |
760 // Recalculate the biggest index of non-empty buckets. | |
761 int maxIndex = m_freeList.m_biggestFreeListIndex - 1; | |
762 for (; maxIndex >= 0 && !m_freeList.m_freeLists[maxIndex]; maxIn dex--) { } | |
763 m_freeList.m_biggestFreeListIndex = maxIndex < 0 ? 0 : maxIndex; | |
haraken
2014/12/16 02:26:33
The calculation of m_biggestFreeListIndex is getti
| |
764 } | |
765 // Allocate into the freelist block without disturbing the current a llocation area. | |
766 ASSERT(entry->size() >= allocationSize); | |
767 if (entry->size() > allocationSize) | |
768 addToFreeList(entry->address() + allocationSize, entry->size() - allocationSize); | |
769 Heap::increaseAllocatedObjectSize(allocationSize); | |
770 return allocateAtAddress(entry->address(), allocationSize, gcInfo); | |
771 } | |
772 // Failed to find a first-fit freelist entry; fall into the standard cas e of | |
773 // chopping off the largest free block and bump allocate from it. | |
774 } | |
775 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; | |
776 int index = m_freeList.m_biggestFreeListIndex; | |
777 for (; index > 0; index--, bucketSize >>= 1) { | |
haraken
2014/12/16 02:26:33
--index
| |
778 FreeListEntry* entry = m_freeList.m_freeLists[index]; | |
779 if (allocationSize > bucketSize) { | |
780 // Final bucket candidate; check initial entry if it is able | |
781 // to service this allocation. Do not perform a linear scan, | |
782 // as it is considered too costly. | |
783 if (!entry || entry->size() < allocationSize) | |
714 break; | 784 break; |
715 } | 785 } |
716 if (FreeListEntry* entry = m_freeLists[i]) { | 786 if (entry) { |
717 m_biggestFreeListIndex = i; | 787 entry->unlink(&m_freeList.m_freeLists[index]); |
718 entry->unlink(&m_freeLists[i]); | 788 setAllocationPoint(entry->address(), entry->size()); |
719 return entry; | 789 ASSERT(hasCurrentAllocationArea()); |
790 ASSERT(remainingAllocationSize() >= allocationSize); | |
791 m_freeList.m_biggestFreeListIndex = index; | |
792 return allocateSize(allocationSize, gcInfo); | |
720 } | 793 } |
721 } | 794 } |
722 m_biggestFreeListIndex = i; | 795 m_freeList.m_biggestFreeListIndex = index; |
723 return nullptr; | 796 return nullptr; |
724 } | 797 } |
725 | 798 |
726 template<typename Header> | |
727 bool ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize) | |
728 { | |
729 ASSERT(!hasCurrentAllocationArea()); | |
730 if (FreeListEntry* entry = m_freeList.takeEntry(allocationSize)) { | |
731 setAllocationPoint(entry->address(), entry->size()); | |
732 ASSERT(hasCurrentAllocationArea()); | |
733 ASSERT(remainingAllocationSize() >= allocationSize); | |
734 return true; | |
735 } | |
736 return false; | |
737 } | |
738 | |
739 #if ENABLE(ASSERT) | 799 #if ENABLE(ASSERT) |
740 template<typename Header> | 800 template<typename Header> |
741 static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address addre ss) | 801 static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address addre ss) |
742 { | 802 { |
743 // Check that a large object is blinkPageSize aligned (modulo the osPageSize | 803 // Check that a large object is blinkPageSize aligned (modulo the osPageSize |
744 // for the guard page). | 804 // for the guard page). |
745 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); | 805 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); |
746 } | 806 } |
747 #endif | 807 #endif |
748 | 808 |
(...skipping 2193 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2942 bool Heap::s_shutdownCalled = false; | 3002 bool Heap::s_shutdownCalled = false; |
2943 bool Heap::s_lastGCWasConservative = false; | 3003 bool Heap::s_lastGCWasConservative = false; |
2944 FreePagePool* Heap::s_freePagePool; | 3004 FreePagePool* Heap::s_freePagePool; |
2945 OrphanedPagePool* Heap::s_orphanedPagePool; | 3005 OrphanedPagePool* Heap::s_orphanedPagePool; |
2946 Heap::RegionTree* Heap::s_regionTree = nullptr; | 3006 Heap::RegionTree* Heap::s_regionTree = nullptr; |
2947 size_t Heap::s_allocatedObjectSize = 0; | 3007 size_t Heap::s_allocatedObjectSize = 0; |
2948 size_t Heap::s_allocatedSpace = 0; | 3008 size_t Heap::s_allocatedSpace = 0; |
2949 size_t Heap::s_markedObjectSize = 0; | 3009 size_t Heap::s_markedObjectSize = 0; |
2950 | 3010 |
2951 } // namespace blink | 3011 } // namespace blink |
OLD | NEW |