Index: Source/platform/heap/Heap.cpp |
diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp |
index 9dddb4a39bcc2d0e7c440d37ba35ea3befd19fbe..4ca81cd8e8265a45822fbcf2bd395784859506ad 100644 |
--- a/Source/platform/heap/Heap.cpp |
+++ b/Source/platform/heap/Heap.cpp |
@@ -584,6 +584,7 @@ template<typename Header> |
ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
: m_currentAllocationPoint(0) |
, m_remainingAllocationSize(0) |
+ , m_lastRemainingAllocationSize(0) |
, m_firstPage(0) |
, m_firstLargeHeapObject(0) |
, m_firstPageAllocatedDuringSweeping(0) |
@@ -625,6 +626,14 @@ template<typename Header> |
Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) |
{ |
size_t allocationSize = allocationSizeFromSize(size); |
+ ASSERT(allocationSize > remainingAllocationSize()); |
+ if (allocationSize > HeapPage<Header>::payloadSize() / 2) |
+ return allocateLargeObject(allocationSize, gcInfo); |
+ |
+ if (remainingAllocationSize() > 0) |
+ addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
+ // This also updates the stats. |
+ setAllocationPoint(0, 0); |
if (threadState()->shouldGC()) { |
if (threadState()->shouldForceConservativeGC()) |
Heap::collectGarbage(ThreadState::HeapPointersOnStack); |
@@ -660,13 +669,6 @@ template<typename Header> |
void ThreadHeap<Header>::ensureCurrentAllocation(size_t minSize, const GCInfo* gcInfo) |
{ |
ASSERT(minSize >= allocationGranularity); |
- if (remainingAllocationSize() >= minSize) |
- return; |
- |
- if (remainingAllocationSize() > 0) { |
- addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
- setAllocationPoint(0, 0); |
- } |
if (allocateFromFreeList(minSize)) |
return; |
if (coalesce(minSize) && allocateFromFreeList(minSize)) |
@@ -856,7 +858,7 @@ bool ThreadHeap<Header>::coalesce(size_t minSize) |
ASSERT(basicHeader->size() < blinkPagePayloadSize()); |
if (basicHeader->isPromptlyFreed()) { |
- stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->payloadSize()); |
+ stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->size()); |
size_t size = basicHeader->size(); |
ASSERT(size >= sizeof(Header)); |
#if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER) |
@@ -921,6 +923,12 @@ Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf |
#if defined(ADDRESS_SANITIZER) |
allocationSize += allocationGranularity; |
#endif |
+ |
+ // Update stats before checking if we should GC. |
+ if (m_lastRemainingAllocationSize != m_remainingAllocationSize) { |
+ stats().increaseObjectSpace(m_lastRemainingAllocationSize-m_remainingAllocationSize); |
+ m_lastRemainingAllocationSize = m_remainingAllocationSize; |
+ } |
if (threadState()->shouldGC()) |
threadState()->setGCRequested(); |
Heap::flushHeapDoesNotContainCache(); |
@@ -938,7 +946,7 @@ Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf |
ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity); |
largeObject->link(&m_firstLargeHeapObject); |
stats().increaseAllocatedSpace(largeObject->size()); |
- stats().increaseObjectSpace(largeObject->payloadSize()); |
+ stats().increaseObjectSpace(largeObject->size()); |
return result; |
} |
@@ -1272,7 +1280,7 @@ void ThreadHeap<Header>::sweepLargePages(HeapStats* stats) |
for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
if (current->isMarked()) { |
stats->increaseAllocatedSpace(current->size()); |
- stats->increaseObjectSpace(current->payloadSize()); |
+ stats->increaseObjectSpace(current->size()); |
current->unmark(); |
previousNext = ¤t->m_next; |
current = current->next(); |
@@ -1423,7 +1431,7 @@ void HeapPage<Header>::getStats(HeapStats& stats) |
do { |
Header* header = reinterpret_cast<Header*>(headerAddress); |
if (!header->isFree()) |
- stats.increaseObjectSpace(header->payloadSize()); |
+ stats.increaseObjectSpace(header->size()); |
ASSERT(header->size() < blinkPagePayloadSize()); |
headerAddress += header->size(); |
ASSERT(headerAddress <= end()); |
@@ -1487,7 +1495,7 @@ void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap) |
heap->addToFreeList(startOfGap, headerAddress - startOfGap); |
header->unmark(); |
headerAddress += header->size(); |
- stats->increaseObjectSpace(header->payloadSize()); |
+ stats->increaseObjectSpace(header->size()); |
startOfGap = headerAddress; |
} |
if (startOfGap != end()) |
@@ -1716,7 +1724,7 @@ template<typename Header> |
void LargeHeapObject<Header>::getStats(HeapStats& stats) |
{ |
stats.increaseAllocatedSpace(size()); |
- stats.increaseObjectSpace(payloadSize()); |
+ stats.increaseObjectSpace(size()); |
} |
#if ENABLE(GC_PROFILE_HEAP) |