Index: Source/platform/heap/Heap.cpp |
diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp |
index fb73381446f1fa96c9e0ffac812c603b15e9e05c..b525712cecbedb03c0cd70cfd4f1b1396dd03f9e 100644 |
--- a/Source/platform/heap/Heap.cpp |
+++ b/Source/platform/heap/Heap.cpp |
@@ -641,7 +641,8 @@ FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa |
template<typename Header> |
ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
: m_currentAllocationPoint(0) |
- , m_remainingAllocationSize(0) |
+ , m_allocationLimit(0) |
+ , m_lastRemainingAllocationSize(0) |
, m_firstPage(0) |
, m_firstLargeHeapObject(0) |
, m_firstPageAllocatedDuringSweeping(0) |
@@ -682,6 +683,14 @@ template<typename Header> |
Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) |
{ |
size_t allocationSize = allocationSizeFromSize(size); |
+ ASSERT(allocationSize > remainingAllocationSize()); |
+ if (allocationSize > HeapPage<Header>::payloadSize() / 2) |
+ return allocateLargeObject(allocationSize, gcInfo); |
+ |
+ if (remainingAllocationSize() > 0) |
+ addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
+ // This also updates the stats. |
+ setAllocationPoint(0, 0); |
if (threadState()->shouldGC()) { |
if (threadState()->shouldForceConservativeGC()) |
Heap::collectGarbage(ThreadState::HeapPointersOnStack); |
@@ -717,13 +726,6 @@ template<typename Header> |
void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo) |
{ |
ASSERT(minSize >= allocationGranularity); |
- if (remainingAllocationSize() >= minSize) |
- return; |
- |
- if (remainingAllocationSize() > 0) { |
- addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
- setAllocationPoint(0, 0); |
- } |
if (allocateFromFreeList(minSize)) |
return; |
if (coalesce(minSize) && allocateFromFreeList(minSize)) |
@@ -913,7 +915,7 @@ bool ThreadHeap<Header>::coalesce(size_t minSize) |
ASSERT(basicHeader->size() < blinkPagePayloadSize()); |
if (basicHeader->isPromptlyFreed()) { |
- stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->payloadSize()); |
+ stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->size()); |
size_t size = basicHeader->size(); |
ASSERT(size >= sizeof(Header)); |
#if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
@@ -978,6 +980,12 @@ Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf |
#if defined(ADDRESS_SANITIZER) |
allocationSize += allocationGranularity; |
#endif |
+ |
+ // Update stats before checking if we should GC. |
+ if (m_lastRemainingAllocationSize != remainingAllocationSize()) { |
+ stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAllocationSize()); |
+ m_lastRemainingAllocationSize = remainingAllocationSize(); |
+ } |
if (m_threadState->shouldGC()) |
m_threadState->setGCRequested(); |
m_threadState->shouldFlushHeapDoesNotContainCache(); |
@@ -996,7 +1004,7 @@ Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf |
ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity); |
largeObject->link(&m_firstLargeHeapObject); |
stats().increaseAllocatedSpace(largeObject->size()); |
- stats().increaseObjectSpace(largeObject->payloadSize()); |
+ stats().increaseObjectSpace(largeObject->size()); |
return result; |
} |
@@ -1358,7 +1366,7 @@ void ThreadHeap<Header>::sweepLargePages(HeapStats* stats) |
for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
if (current->isMarked()) { |
stats->increaseAllocatedSpace(current->size()); |
- stats->increaseObjectSpace(current->payloadSize()); |
+ stats->increaseObjectSpace(current->size()); |
current->unmark(); |
previousNext = ¤t->m_next; |
current = current->next(); |
@@ -1509,7 +1517,7 @@ void HeapPage<Header>::getStats(HeapStats& stats) |
do { |
Header* header = reinterpret_cast<Header*>(headerAddress); |
if (!header->isFree()) |
- stats.increaseObjectSpace(header->payloadSize()); |
+ stats.increaseObjectSpace(header->size()); |
ASSERT(header->size() < blinkPagePayloadSize()); |
headerAddress += header->size(); |
ASSERT(headerAddress <= end()); |
@@ -1573,7 +1581,7 @@ void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap) |
heap->addToFreeList(startOfGap, headerAddress - startOfGap); |
header->unmark(); |
headerAddress += header->size(); |
- stats->increaseObjectSpace(header->payloadSize()); |
+ stats->increaseObjectSpace(header->size()); |
startOfGap = headerAddress; |
} |
if (startOfGap != end()) |
@@ -1802,7 +1810,7 @@ template<typename Header> |
void LargeHeapObject<Header>::getStats(HeapStats& stats) |
{ |
stats.increaseAllocatedSpace(size()); |
- stats.increaseObjectSpace(payloadSize()); |
+ stats.increaseObjectSpace(size()); |
} |
#if ENABLE(GC_PROFILE_HEAP) |