OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
49 #include <stdio.h> | 49 #include <stdio.h> |
50 #include <utility> | 50 #include <utility> |
51 #endif | 51 #endif |
52 #if ENABLE(GC_PROFILE_HEAP) | 52 #if ENABLE(GC_PROFILE_HEAP) |
53 #include "platform/TracedValue.h" | 53 #include "platform/TracedValue.h" |
54 #endif | 54 #endif |
55 | 55 |
56 #if OS(POSIX) | 56 #if OS(POSIX) |
57 #include <sys/mman.h> | 57 #include <sys/mman.h> |
58 #include <unistd.h> | 58 #include <unistd.h> |
59 #if COMPILER(GCC) | |
60 #define PREFETCH(ptr) __builtin_prefetch(static_cast<void*>(ptr)) | |
61 #else | |
62 #define PREFETCH(ptr) | |
63 #endif | |
59 #elif OS(WIN) | 64 #elif OS(WIN) |
60 #include <windows.h> | 65 #include <windows.h> |
66 #include <xmmintrin.h> | |
67 #define PREFETCH(ptr) _mm_prefetch((char*)(ptr), _MM_HINT_T0) | |
haraken
2014/12/15 15:42:55
I think we should add these macros to wtf/, or at
| |
61 #endif | 68 #endif |
62 | 69 |
63 namespace blink { | 70 namespace blink { |
64 | 71 |
65 #if ENABLE(GC_PROFILE_MARKING) | 72 #if ENABLE(GC_PROFILE_MARKING) |
66 static String classOf(const void* object) | 73 static String classOf(const void* object) |
67 { | 74 { |
68 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_ cast<void*>(object)))) | 75 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_ cast<void*>(object)))) |
69 return gcInfo->m_className; | 76 return gcInfo->m_className; |
70 return "unknown"; | 77 return "unknown"; |
(...skipping 600 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
671 void ThreadHeap<Header>::updateRemainingAllocationSize() | 678 void ThreadHeap<Header>::updateRemainingAllocationSize() |
672 { | 679 { |
673 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 680 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
674 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); | 681 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); |
675 m_lastRemainingAllocationSize = remainingAllocationSize(); | 682 m_lastRemainingAllocationSize = remainingAllocationSize(); |
676 } | 683 } |
677 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 684 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
678 } | 685 } |
679 | 686 |
680 template<typename Header> | 687 template<typename Header> |
681 Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat ionSize, const GCInfo* gcInfo) | 688 Address ThreadHeap<Header>::outOfLineAllocate(size_t allocationSize, const GCInf o* gcInfo) |
682 { | 689 { |
683 ASSERT(allocationSize > remainingAllocationSize()); | 690 ASSERT(allocationSize > remainingAllocationSize()); |
684 if (allocationSize > blinkPageSize / 2) | 691 if (allocationSize > blinkPageSize / 2) |
685 return allocateLargeObject(allocationSize, gcInfo); | 692 return allocateLargeObject(allocationSize, gcInfo); |
686 | 693 |
687 updateRemainingAllocationSize(); | 694 updateRemainingAllocationSize(); |
688 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); | 695 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); |
689 | 696 |
697 ASSERT(allocationSize >= allocationGranularity); | |
698 Address result = allocateFromFreeList(allocationSize, gcInfo); | |
699 if (result) | |
700 return result; | |
690 setAllocationPoint(nullptr, 0); | 701 setAllocationPoint(nullptr, 0); |
691 ASSERT(allocationSize >= allocationGranularity); | 702 if (coalesce(allocationSize)) { |
692 if (allocateFromFreeList(allocationSize)) | 703 result = allocateFromFreeList(allocationSize, gcInfo); |
693 return allocate(payloadSize, gcInfo); | 704 if (result) |
694 if (coalesce(allocationSize) && allocateFromFreeList(allocationSize)) | 705 return result; |
695 return allocate(payloadSize, gcInfo); | 706 } |
696 | 707 |
697 addPageToHeap(gcInfo); | 708 addPageToHeap(gcInfo); |
698 bool success = allocateFromFreeList(allocationSize); | 709 result = allocateFromFreeList(allocationSize, gcInfo); |
699 RELEASE_ASSERT(success); | 710 RELEASE_ASSERT(result); |
700 return allocate(payloadSize, gcInfo); | 711 return result; |
701 } | 712 } |
702 | 713 |
703 template<typename Header> | 714 template<typename Header> |
704 FreeListEntry* FreeList<Header>::takeEntry(size_t allocationSize) | 715 Address ThreadHeap<Header>::allocateFromFreeListEntry(FreeListEntry* entry, size _t allocationSize, const GCInfo* gcInfo) |
haraken
2014/12/15 15:42:55
I'd prefer inline this method in the (only one) ca
sof
2014/12/15 16:25:11
I'd hope to reuse it for quicklists, but why not f
sof
2014/12/15 21:36:40
Done.
| |
705 { | 716 { |
706 size_t bucketSize = 1 << m_biggestFreeListIndex; | 717 ASSERT(entry->size() >= allocationSize); |
707 int i = m_biggestFreeListIndex; | 718 if (entry->size() > allocationSize) |
708 for (; i > 0; i--, bucketSize >>= 1) { | 719 addToFreeList(entry->address() + allocationSize, entry->size() - allocat ionSize); |
709 if (bucketSize < allocationSize) { | 720 Heap::increaseAllocatedObjectSize(allocationSize); |
710 // A FreeListEntry for bucketSize might be larger than allocationSiz e. | 721 return allocateInto(entry->address(), allocationSize, gcInfo); |
711 // FIXME: We check only the first FreeListEntry because searching | 722 } |
712 // the entire list is costly. | 723 |
713 if (!m_freeLists[i] || m_freeLists[i]->size() < allocationSize) | 724 static bool useFirstFitForHeap(int heapIndex) |
haraken
2014/12/15 15:42:55
useFirstFitForHeap => shouldUseBestFit ?
sof
2014/12/15 21:36:40
shouldUseFirstFitForHeap()
| |
714 break; | 725 { |
715 } | 726 if (heapIndex >= VectorBackingHeap && heapIndex <= HashTableBackingHeap) |
haraken
2014/12/15 15:42:55
Add a comment about why we should use a best-fit s
sof
2014/12/15 21:36:40
Done.
| |
716 if (FreeListEntry* entry = m_freeLists[i]) { | 727 return true; |
717 m_biggestFreeListIndex = i; | 728 if (heapIndex >= VectorBackingHeapNonFinalized && heapIndex <= HashTableBack ingHeapNonFinalized) |
718 entry->unlink(&m_freeLists[i]); | 729 return true; |
719 return entry; | 730 |
720 } | 731 return false; |
721 } | |
722 m_biggestFreeListIndex = i; | |
723 return nullptr; | |
724 } | 732 } |
725 | 733 |
726 template<typename Header> | 734 template<typename Header> |
727 bool ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize) | 735 Address ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize, const GC Info* gcInfo) |
haraken
2014/12/15 15:42:56
Let's add a comment about the allocation scheme.
sof
2014/12/15 21:36:40
Done.
| |
728 { | 736 { |
729 ASSERT(!hasCurrentAllocationArea()); | 737 int bucket = FreeList<Header>::bucketIndexForSize(allocationSize) + 1; |
haraken
2014/12/15 15:42:55
Nit: I'm not sure if this matters for performance,
sof
2014/12/15 16:25:11
Thought you said last week that outOfLineAllocate(
haraken
2014/12/15 16:38:18
Right, so it wouldn't be worth doing. Ignore my co
| |
730 if (FreeListEntry* entry = m_freeList.takeEntry(allocationSize)) { | 738 if (bucket <= m_freeList.m_biggestFreeListIndex && useFirstFitForHeap(m_inde x)) { |
731 setAllocationPoint(entry->address(), entry->size()); | 739 if (FreeListEntry* entry = m_freeList.m_freeLists[bucket]) { |
732 ASSERT(hasCurrentAllocationArea()); | 740 PREFETCH(entry->address()); |
haraken
2014/12/15 15:42:55
Does this matter for performance?
- If no, let's
sof
2014/12/15 21:36:40
It improves perf by 0.5-1% on a handful of shadow_
haraken
2014/12/16 02:26:33
Ah, it wouldn't be trivial to utilize prefetching
| |
733 ASSERT(remainingAllocationSize() >= allocationSize); | 741 entry->unlink(&m_freeList.m_freeLists[bucket]); |
734 return true; | 742 if (!m_freeList.m_freeLists[bucket] && bucket == m_freeList.m_bigges tFreeListIndex) { |
743 // Recalculate the biggest index of non-empty buckets. | |
744 int maxIndex = m_freeList.m_biggestFreeListIndex; | |
haraken
2014/12/15 15:42:56
int maxIndex = m_freeList.m_biggestFreeListIndex -
sof
2014/12/15 21:36:40
Done.
| |
745 for (; maxIndex >= 0 && !m_freeList.m_freeLists[maxIndex]; maxIn dex--) { } | |
746 m_freeList.m_biggestFreeListIndex = maxIndex < 0 ? 0 : maxIndex; | |
747 } | |
748 // Allocate into the freelist block without disturbing the current a llocation area. | |
haraken
2014/12/15 15:42:56
I do agree that the best fit scheme works fine for
sof
2014/12/15 16:25:11
What if you have a fairly big slab left, but you s
haraken
2014/12/15 16:38:18
Hmm, I'm getting a bit confused.
You're at this p
sof
2014/12/15 16:52:52
It does matter in this case. What if the backing s
haraken
2014/12/15 17:03:32
Thanks, I'm 80% convinced :) Either way let's land
| |
749 return allocateFromFreeListEntry(entry, allocationSize, gcInfo); | |
750 } | |
751 // Failed to find a first-fit freelist entry; fall into the standard cas e of | |
752 // chopping off the largest free block and bump allocate from it. | |
735 } | 753 } |
736 return false; | 754 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; |
755 int i = m_freeList.m_biggestFreeListIndex; | |
haraken
2014/12/15 15:42:56
i => index
sof
2014/12/15 21:36:40
Done.
| |
756 for (; i > 0; i--, bucketSize >>= 1) { | |
757 FreeListEntry* entry = m_freeList.m_freeLists[i]; | |
758 if (allocationSize > bucketSize) { | |
759 // Final bucket candidate; check initial entry if it is able | |
760 // to service this allocation. Do not perform a linear scan, | |
761 // as it is considered too costly. | |
762 if (!entry || entry->size() < allocationSize) | |
763 break; | |
764 } | |
765 if (entry) { | |
766 PREFETCH(entry->address()); | |
767 entry->unlink(&m_freeList.m_freeLists[i]); | |
768 setAllocationPoint(entry->address(), entry->size()); | |
769 ASSERT(hasCurrentAllocationArea()); | |
770 ASSERT(remainingAllocationSize() >= allocationSize); | |
771 m_freeList.m_biggestFreeListIndex = i; | |
772 return allocateSize(allocationSize, gcInfo); | |
773 } | |
774 } | |
775 m_freeList.m_biggestFreeListIndex = i; | |
776 return nullptr; | |
737 } | 777 } |
738 | 778 |
739 #if ENABLE(ASSERT) | 779 #if ENABLE(ASSERT) |
740 template<typename Header> | 780 template<typename Header> |
741 static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address addre ss) | 781 static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address addre ss) |
742 { | 782 { |
743 // Check that a large object is blinkPageSize aligned (modulo the osPageSize | 783 // Check that a large object is blinkPageSize aligned (modulo the osPageSize |
744 // for the guard page). | 784 // for the guard page). |
745 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); | 785 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); |
746 } | 786 } |
(...skipping 2195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2942 bool Heap::s_shutdownCalled = false; | 2982 bool Heap::s_shutdownCalled = false; |
2943 bool Heap::s_lastGCWasConservative = false; | 2983 bool Heap::s_lastGCWasConservative = false; |
2944 FreePagePool* Heap::s_freePagePool; | 2984 FreePagePool* Heap::s_freePagePool; |
2945 OrphanedPagePool* Heap::s_orphanedPagePool; | 2985 OrphanedPagePool* Heap::s_orphanedPagePool; |
2946 Heap::RegionTree* Heap::s_regionTree = nullptr; | 2986 Heap::RegionTree* Heap::s_regionTree = nullptr; |
2947 size_t Heap::s_allocatedObjectSize = 0; | 2987 size_t Heap::s_allocatedObjectSize = 0; |
2948 size_t Heap::s_allocatedSpace = 0; | 2988 size_t Heap::s_allocatedSpace = 0; |
2949 size_t Heap::s_markedObjectSize = 0; | 2989 size_t Heap::s_markedObjectSize = 0; |
2950 | 2990 |
2951 } // namespace blink | 2991 } // namespace blink |
OLD | NEW |