Index: Source/platform/heap/Heap.h |
diff --git a/Source/platform/heap/Heap.h b/Source/platform/heap/Heap.h |
index 4d10b2fc376de5b42420c025b53c9f188e0abcc6..59d97971102df2600770f2c769e24e1efbc0af4f 100644 |
--- a/Source/platform/heap/Heap.h |
+++ b/Source/platform/heap/Heap.h |
@@ -840,14 +840,21 @@ private: |
static size_t allocationSizeFromSize(size_t); |
PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
- size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
+ size_t remainingAllocationSize() const |
+ { |
+ RELEASE_ASSERT(m_allocationLimit >= m_currentAllocationPoint); |
+ return static_cast<size_t>(m_allocationLimit - m_currentAllocationPoint); |
+ } |
bool ownsNonEmptyAllocationArea() const { return currentAllocationPoint() && remainingAllocationSize(); } |
void setAllocationPoint(Address point, size_t size) |
{ |
ASSERT(!point || heapPageFromAddress(point)); |
ASSERT(size <= HeapPage<Header>::payloadSize()); |
m_currentAllocationPoint = point; |
- m_remainingAllocationSize = size; |
+ m_allocationLimit = point + size; |
+ if (m_lastRemainingAllocationSize != remainingAllocationSize()) |
+ stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAllocationSize()); |
+ m_lastRemainingAllocationSize = remainingAllocationSize(); |
} |
void ensureCurrentAllocation(size_t, const GCInfo*); |
bool allocateFromFreeList(size_t); |
@@ -865,7 +872,8 @@ private: |
bool coalesce(size_t); |
Address m_currentAllocationPoint; |
- size_t m_remainingAllocationSize; |
+ Address m_allocationLimit; |
+ size_t m_lastRemainingAllocationSize; |
HeapPage<Header>* m_firstPage; |
LargeHeapObject<Header>* m_firstLargeHeapObject; |
@@ -1399,14 +1407,14 @@ size_t FinalizedHeapObjectHeader::payloadSize() |
template<typename Header> |
size_t ThreadHeap<Header>::allocationSizeFromSize(size_t size) |
{ |
+ size_t allocationSize = size + sizeof(Header); |
Erik Corry
2014/10/13 13:20:32
You can't move this above the release assert, beca
haraken
2014/10/14 10:43:07
Thanks for catching this, done.
|
+ |
// Check the size before computing the actual allocation size. The |
// allocation size calculation can overflow for large sizes and |
// the check therefore has to happen before any calculation on the |
// size. |
- RELEASE_ASSERT(size < maxHeapObjectSize); |
+ RELEASE_ASSERT(allocationSize < maxHeapObjectSize); |
- // Add space for header. |
- size_t allocationSize = size + sizeof(Header); |
// Align size with allocation granularity. |
allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
return allocationSize; |
@@ -1416,26 +1424,24 @@ template<typename Header> |
Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
{ |
size_t allocationSize = allocationSizeFromSize(size); |
- bool isLargeObject = allocationSize > blinkPageSize / 2; |
- if (isLargeObject) |
- return allocateLargeObject(allocationSize, gcInfo); |
- if (m_remainingAllocationSize < allocationSize) |
- return outOfLineAllocate(size, gcInfo); |
- Address headerAddress = m_currentAllocationPoint; |
- m_currentAllocationPoint += allocationSize; |
- m_remainingAllocationSize -= allocationSize; |
- Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo); |
- size_t payloadSize = allocationSize - sizeof(Header); |
- stats().increaseObjectSpace(payloadSize); |
- Address result = headerAddress + sizeof(*header); |
- ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
- // Unpoison the memory used for the object (payload). |
- ASAN_UNPOISON_MEMORY_REGION(result, payloadSize); |
+ Address nextAllocationPoint = m_currentAllocationPoint + allocationSize; |
+ if (LIKELY(nextAllocationPoint <= m_allocationLimit)) { |
+ Address headerAddress = m_currentAllocationPoint; |
+ m_currentAllocationPoint = nextAllocationPoint; |
+ Header* header = new (NotNull, headerAddress) Header(allocationSize, gcInfo); |
+ Address result = headerAddress + sizeof(*header); |
+ ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
+ |
+ // Unpoison the memory used for the object (payload). |
+ ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); |
#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
- memset(result, 0, payloadSize); |
+ memset(result, 0, allocationSize - sizeof(Header)); |
#endif |
- ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); |
- return result; |
+ ASSERT(heapPageFromAddress(headerAddress + allocationSize - 1)); |
+ return result; |
+ } |
+ ASSERT(allocationSize > remainingAllocationSize()); |
+ return outOfLineAllocate(size, gcInfo); |
} |
template<typename T, typename HeapTraits> |
@@ -1444,8 +1450,7 @@ Address Heap::allocate(size_t size) |
ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
ASSERT(state->isAllocationAllowed()); |
const GCInfo* gcInfo = GCInfoTrait<T>::get(); |
- int heapIndex = HeapTraits::index(gcInfo->hasFinalizer()); |
- BaseHeap* heap = state->heap(heapIndex); |
+ BaseHeap* heap = state->heap(gcInfo->heapIndex()); |
Erik Corry
2014/10/13 13:20:32
Why is this faster?
The HeapTraits should be know
haraken
2014/10/14 10:43:07
You're right. I confirmed that the assembly is the
|
return static_cast<typename HeapTraits::HeapType*>(heap)->allocate(size, gcInfo); |
} |