Index: Source/platform/heap/Heap.cpp |
diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp |
index 3b877a5d5ad7cbfa1969bc62d6ecf1033f3f44b0..6a05c30ac78c5391796b1d1fd391f9e41b75f83e 100644 |
--- a/Source/platform/heap/Heap.cpp |
+++ b/Source/platform/heap/Heap.cpp |
@@ -534,37 +534,43 @@ void LargeObject::markUnmarkedObjectsDead() |
header->markDead(); |
} |
-void LargeObject::removeFromHeap(ThreadHeap* heap) |
+void LargeObject::removeFromHeap() |
+{ |
+ static_cast<ThreadHeapForLargeObject*>(heap())->freeLargeObject(this); |
+} |
+ |
+FreeList::FreeList() |
+ : m_biggestFreeListIndex(0) |
{ |
- heap->freeLargeObject(this); |
} |
ThreadHeap::ThreadHeap(ThreadState* state, int index) |
- : m_currentAllocationPoint(nullptr) |
- , m_remainingAllocationSize(0) |
- , m_lastRemainingAllocationSize(0) |
- , m_firstPage(nullptr) |
- , m_firstLargeObject(nullptr) |
+ : m_firstPage(nullptr) |
, m_firstUnsweptPage(nullptr) |
- , m_firstUnsweptLargeObject(nullptr) |
, m_threadState(state) |
, m_index(index) |
- , m_promptlyFreedSize(0) |
{ |
clearFreeLists(); |
} |
-FreeList::FreeList() |
- : m_biggestFreeListIndex(0) |
+ThreadHeapForHeapPage::ThreadHeapForHeapPage(ThreadState* state, int index) |
+ : ThreadHeap(state, index) |
+ , m_currentAllocationPoint(nullptr) |
+ , m_remainingAllocationSize(0) |
+ , m_lastRemainingAllocationSize(0) |
+ , m_promptlyFreedSize(0) |
+{ |
+} |
+ |
+ThreadHeapForLargeObject::ThreadHeapForLargeObject(ThreadState* state, int index) |
+ : ThreadHeap(state, index) |
{ |
} |
ThreadHeap::~ThreadHeap() |
{ |
ASSERT(!m_firstPage); |
- ASSERT(!m_firstLargeObject); |
ASSERT(!m_firstUnsweptPage); |
- ASSERT(!m_firstUnsweptLargeObject); |
} |
void ThreadHeap::cleanupPages() |
@@ -572,22 +578,15 @@ void ThreadHeap::cleanupPages() |
clearFreeLists(); |
ASSERT(!m_firstUnsweptPage); |
- ASSERT(!m_firstUnsweptLargeObject); |
// Add the ThreadHeap's pages to the orphanedPagePool. |
- for (HeapPage* page = m_firstPage; page; page = page->m_next) { |
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { |
Heap::decreaseAllocatedSpace(blinkPageSize); |
- Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
+ Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); |
} |
m_firstPage = nullptr; |
- |
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->m_next) { |
- Heap::decreaseAllocatedSpace(largeObject->size()); |
- Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); |
- } |
- m_firstLargeObject = nullptr; |
} |
-void ThreadHeap::updateRemainingAllocationSize() |
+void ThreadHeapForHeapPage::updateRemainingAllocationSize() |
{ |
if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remainingAllocationSize()); |
@@ -596,7 +595,7 @@ void ThreadHeap::updateRemainingAllocationSize() |
ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
} |
-void ThreadHeap::setAllocationPoint(Address point, size_t size) |
+void ThreadHeapForHeapPage::setAllocationPoint(Address point, size_t size) |
{ |
#if ENABLE(ASSERT) |
if (point) { |
@@ -606,21 +605,22 @@ void ThreadHeap::setAllocationPoint(Address point, size_t size) |
ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize()); |
} |
#endif |
- if (hasCurrentAllocationArea()) |
+ if (hasCurrentAllocationArea()) { |
addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
+ } |
updateRemainingAllocationSize(); |
m_currentAllocationPoint = point; |
m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
} |
-Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) |
+Address ThreadHeapForHeapPage::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) |
{ |
ASSERT(allocationSize > remainingAllocationSize()); |
ASSERT(allocationSize >= allocationGranularity); |
// 1. If this allocation is big enough, allocate a large object. |
if (allocationSize >= largeObjectSizeThreshold) |
- return allocateLargeObject(allocationSize, gcInfoIndex); |
+ return static_cast<ThreadHeapForLargeObject*>(threadState()->heap(LargeObjectHeap))->allocateLargeObject(allocationSize, gcInfoIndex); |
// 2. Check if we should trigger a GC. |
updateRemainingAllocationSize(); |
@@ -636,7 +636,7 @@ Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) |
// 5. Lazily sweep pages of this heap until we find a freed area for |
// this allocation or we finish sweeping all pages of this heap. |
- result = lazySweepPages(allocationSize, gcInfoIndex); |
+ result = lazySweep(allocationSize, gcInfoIndex); |
if (result) |
return result; |
@@ -660,7 +660,7 @@ Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) |
return result; |
} |
-Address ThreadHeap::allocateFromFreeList(size_t allocationSize, size_t gcInfoIndex) |
+Address ThreadHeapForHeapPage::allocateFromFreeList(size_t allocationSize, size_t gcInfoIndex) |
{ |
// Try reusing a block from the largest bin. The underlying reasoning |
// being that we want to amortize this slow allocation call by carving |
@@ -695,20 +695,14 @@ void ThreadHeap::prepareForSweep() |
{ |
ASSERT(!threadState()->isInGC()); |
ASSERT(!m_firstUnsweptPage); |
- ASSERT(!m_firstUnsweptLargeObject); |
// Move all pages to a list of unswept pages. |
m_firstUnsweptPage = m_firstPage; |
- m_firstUnsweptLargeObject = m_firstLargeObject; |
m_firstPage = nullptr; |
- m_firstLargeObject = nullptr; |
} |
-Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) |
+Address ThreadHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex) |
{ |
- ASSERT(!hasCurrentAllocationArea()); |
- ASSERT(allocationSize < largeObjectSizeThreshold); |
- |
// If there are no pages to be swept, return immediately. |
if (!m_firstUnsweptPage) |
return nullptr; |
@@ -723,17 +717,27 @@ Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) |
return nullptr; |
TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages"); |
- ThreadState::SweepForbiddenScope scope(m_threadState); |
+ ThreadState::SweepForbiddenScope scope(threadState()); |
if (threadState()->isMainThread()) |
ScriptForbiddenScope::enter(); |
+ Address result = lazySweepPages(allocationSize, gcInfoIndex); |
+ |
+ if (threadState()->isMainThread()) |
+ ScriptForbiddenScope::exit(); |
+ return result; |
+} |
+ |
+Address ThreadHeapForHeapPage::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) |
+{ |
+ ASSERT(!hasCurrentAllocationArea()); |
Address result = nullptr; |
while (m_firstUnsweptPage) { |
- HeapPage* page = m_firstUnsweptPage; |
+ BaseHeapPage* page = m_firstUnsweptPage; |
if (page->isEmpty()) { |
page->unlink(&m_firstUnsweptPage); |
- page->removeFromHeap(this); |
+ page->removeFromHeap(); |
} else { |
// Sweep a page and move the page from m_firstUnsweptPages to |
// m_firstPages. |
@@ -742,67 +746,43 @@ Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) |
page->link(&m_firstPage); |
page->markAsSwept(); |
+ // For HeapPage, stop lazy sweeping once we find a slot to |
+ // allocate a new object. |
result = allocateFromFreeList(allocationSize, gcInfoIndex); |
- if (result) |
+ if (result) { |
break; |
+ } |
} |
} |
- |
- if (threadState()->isMainThread()) |
- ScriptForbiddenScope::exit(); |
return result; |
} |
-bool ThreadHeap::lazySweepLargeObjects(size_t allocationSize) |
+Address ThreadHeapForLargeObject::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) |
{ |
- ASSERT(allocationSize >= largeObjectSizeThreshold); |
- |
- // If there are no large objects to be swept, return immediately. |
- if (!m_firstUnsweptLargeObject) |
- return false; |
- |
- RELEASE_ASSERT(threadState()->isSweepingInProgress()); |
- |
- // lazySweepLargeObjects() can be called recursively if finalizers invoked |
- // in page->sweep() allocate memory and the allocation triggers |
- // lazySweepLargeObjects(). This check prevents the sweeping from being |
- // executed recursively. |
- if (threadState()->sweepForbidden()) |
- return false; |
- |
- TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepLargeObjects"); |
- ThreadState::SweepForbiddenScope scope(m_threadState); |
- |
- if (threadState()->isMainThread()) |
- ScriptForbiddenScope::enter(); |
- |
- bool result = false; |
+ Address result = nullptr; |
size_t sweptSize = 0; |
- while (m_firstUnsweptLargeObject) { |
- LargeObject* largeObject = m_firstUnsweptLargeObject; |
- if (largeObject->isEmpty()) { |
- sweptSize += largeObject->size(); |
- largeObject->unlink(&m_firstUnsweptLargeObject); |
- largeObject->removeFromHeap(this); |
- |
- // If we have swept large objects more than allocationSize, |
- // we stop the lazy sweeping. |
+ while (m_firstUnsweptPage) { |
+ BaseHeapPage* page = m_firstUnsweptPage; |
+ if (page->isEmpty()) { |
+ sweptSize += static_cast<LargeObject*>(page)->payloadSize() + sizeof(HeapObjectHeader); |
+ page->unlink(&m_firstUnsweptPage); |
+ page->removeFromHeap(); |
+ // For LargeObject, stop lazy sweeping once we have swept |
+ // more than allocationSize bytes. |
if (sweptSize >= allocationSize) { |
- result = true; |
+ result = doAllocateLargeObject(allocationSize, gcInfoIndex); |
+ ASSERT(result); |
break; |
} |
} else { |
- // Sweep a large object and move the large object from |
- // m_firstUnsweptLargeObjects to m_firstLargeObjects. |
- largeObject->sweep(); |
- largeObject->unlink(&m_firstUnsweptLargeObject); |
- largeObject->link(&m_firstLargeObject); |
- largeObject->markAsSwept(); |
+ // Sweep a page and move the page from m_firstUnsweptPages to |
+ // m_firstPages. |
+ page->sweep(); |
+ page->unlink(&m_firstUnsweptPage); |
+ page->link(&m_firstPage); |
+ page->markAsSwept(); |
} |
} |
- |
- if (threadState()->isMainThread()) |
- ScriptForbiddenScope::exit(); |
return result; |
} |
@@ -814,12 +794,11 @@ void ThreadHeap::completeSweep() |
if (threadState()->isMainThread()) |
ScriptForbiddenScope::enter(); |
- // Sweep normal pages. |
while (m_firstUnsweptPage) { |
- HeapPage* page = m_firstUnsweptPage; |
+ BaseHeapPage* page = m_firstUnsweptPage; |
if (page->isEmpty()) { |
page->unlink(&m_firstUnsweptPage); |
- page->removeFromHeap(this); |
+ page->removeFromHeap(); |
} else { |
// Sweep a page and move the page from m_firstUnsweptPages to |
// m_firstPages. |
@@ -830,56 +809,21 @@ void ThreadHeap::completeSweep() |
} |
} |
- // Sweep large objects. |
- while (m_firstUnsweptLargeObject) { |
- LargeObject* largeObject = m_firstUnsweptLargeObject; |
- if (largeObject->isEmpty()) { |
- largeObject->unlink(&m_firstUnsweptLargeObject); |
- largeObject->removeFromHeap(this); |
- } else { |
- // Sweep a large object and move the large object from |
- // m_firstUnsweptLargeObjects to m_firstLargeObjects. |
- largeObject->sweep(); |
- largeObject->unlink(&m_firstUnsweptLargeObject); |
- largeObject->link(&m_firstLargeObject); |
- largeObject->markAsSwept(); |
- } |
- } |
- |
if (threadState()->isMainThread()) |
ScriptForbiddenScope::exit(); |
} |
#if ENABLE(ASSERT) |
-static bool isLargeObjectAligned(LargeObject* largeObject, Address address) |
-{ |
- // Check that a large object is blinkPageSize aligned (modulo the osPageSize |
- // for the guard page). |
- return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roundToBlinkPageStart(reinterpret_cast<Address>(largeObject)); |
-} |
-#endif |
- |
-#if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING) |
BaseHeapPage* ThreadHeap::findPageFromAddress(Address address) |
{ |
- for (HeapPage* page = m_firstPage; page; page = page->next()) { |
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { |
if (page->contains(address)) |
return page; |
} |
- for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
+ for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
if (page->contains(address)) |
return page; |
} |
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) { |
- ASSERT(isLargeObjectAligned(largeObject, address)); |
- if (largeObject->contains(address)) |
- return largeObject; |
- } |
- for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; largeObject = largeObject->next()) { |
- ASSERT(isLargeObjectAligned(largeObject, address)); |
- if (largeObject->contains(address)) |
- return largeObject; |
- } |
return nullptr; |
} |
#endif |
@@ -892,7 +836,7 @@ void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
size_t previousPageCount = info->pageCount; |
json->beginArray("pages"); |
- for (HeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCount) { |
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCount) { |
// FIXME: To limit the size of the snapshot we only output "threshold" many page snapshots. |
if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { |
json->beginArray(); |
@@ -905,14 +849,6 @@ void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
} |
json->endArray(); |
- json->beginArray("largeObjects"); |
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) { |
- json->beginDictionary(); |
- largeObject->snapshot(json, info); |
- json->endDictionary(); |
- } |
- json->endArray(); |
- |
json->setInteger("pageCount", info->pageCount - previousPageCount); |
} |
#endif |
@@ -951,7 +887,7 @@ void FreeList::addToFreeList(Address address, size_t size) |
m_biggestFreeListIndex = index; |
} |
-bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize) |
+bool ThreadHeapForHeapPage::expandObject(HeapObjectHeader* header, size_t newSize) |
{ |
// It's possible that Vector requests a smaller expanded size because |
// Vector::shrinkCapacity can set a capacity smaller than the actual payload |
@@ -975,7 +911,7 @@ bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize) |
return false; |
} |
-void ThreadHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) |
+void ThreadHeapForHeapPage::shrinkObject(HeapObjectHeader* header, size_t newSize) |
{ |
ASSERT(header->payloadSize() > newSize); |
size_t allocationSize = allocationSizeFromSize(newSize); |
@@ -998,9 +934,9 @@ void ThreadHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) |
} |
} |
-void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header) |
+void ThreadHeapForHeapPage::promptlyFreeObject(HeapObjectHeader* header) |
{ |
- ASSERT(!m_threadState->sweepForbidden()); |
+ ASSERT(!threadState()->sweepForbidden()); |
header->checkHeader(); |
Address address = reinterpret_cast<Address>(header); |
Address payload = header->payload(); |
@@ -1010,7 +946,7 @@ void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header) |
ASSERT(pageFromObject(address) == findPageFromAddress(address)); |
{ |
- ThreadState::SweepForbiddenScope forbiddenScope(m_threadState); |
+ ThreadState::SweepForbiddenScope forbiddenScope(threadState()); |
header->finalize(payload, payloadSize); |
if (address + size == m_currentAllocationPoint) { |
m_currentAllocationPoint = address; |
@@ -1030,7 +966,7 @@ void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header) |
m_promptlyFreedSize += size; |
} |
-bool ThreadHeap::coalesce() |
+bool ThreadHeapForHeapPage::coalesce() |
{ |
// Don't coalesce heaps if there are not enough promptly freed entries |
// to be coalesced. |
@@ -1041,7 +977,7 @@ bool ThreadHeap::coalesce() |
if (m_promptlyFreedSize < 1024 * 1024) |
return false; |
- if (m_threadState->sweepForbidden()) |
+ if (threadState()->sweepForbidden()) |
return false; |
ASSERT(!hasCurrentAllocationArea()); |
@@ -1050,7 +986,7 @@ bool ThreadHeap::coalesce() |
// Rebuild free lists. |
m_freeList.clear(); |
size_t freedSize = 0; |
- for (HeapPage* page = m_firstPage; page; page = page->next()) { |
+ for (HeapPage* page = static_cast<HeapPage*>(m_firstPage); page; page = static_cast<HeapPage*>(page->next())) { |
page->clearObjectStartBitMap(); |
Address startOfGap = page->payload(); |
for (Address headerAddress = startOfGap; headerAddress < page->payloadEnd(); ) { |
@@ -1091,66 +1027,65 @@ bool ThreadHeap::coalesce() |
return true; |
} |
-Address ThreadHeap::allocateLargeObject(size_t size, size_t gcInfoIndex) |
+Address ThreadHeapForLargeObject::allocateLargeObject(size_t allocationSize, size_t gcInfoIndex) |
{ |
// Caller already added space for object header and rounded up to allocation |
// alignment |
- ASSERT(!(size & allocationMask)); |
+ ASSERT(!(allocationSize & allocationMask)); |
- size_t allocationSize = sizeof(LargeObject) + size; |
+ // 1. Check if we should trigger a GC. |
+ threadState()->scheduleGCOrForceConservativeGCIfNeeded(); |
- // Ensure that there is enough space for alignment. If the header |
- // is not a multiple of 8 bytes we will allocate an extra |
- // headerPadding bytes to ensure it 8 byte aligned. |
- allocationSize += headerPadding(); |
+ // 2. Try to sweep large objects more than allocationSize bytes |
+ // before allocating a new large object. |
+ Address result = lazySweep(allocationSize, gcInfoIndex); |
+ if (result) |
+ return result; |
+ // 3. If we have failed in sweeping allocationSize bytes, |
+ // we complete sweeping before allocating this large object. |
+ threadState()->completeSweep(); |
+ return doAllocateLargeObject(allocationSize, gcInfoIndex); |
+} |
+ |
+Address ThreadHeapForLargeObject::doAllocateLargeObject(size_t allocationSize, size_t gcInfoIndex) |
+{ |
+ size_t largeObjectSize = sizeof(LargeObject) + headerPadding() + allocationSize; |
// If ASan is supported we add allocationGranularity bytes to the allocated |
// space and poison that to detect overflows |
#if defined(ADDRESS_SANITIZER) |
- allocationSize += allocationGranularity; |
+ largeObjectSize += allocationGranularity; |
#endif |
- // 1. Check if we should trigger a GC. |
- updateRemainingAllocationSize(); |
- m_threadState->scheduleGCOrForceConservativeGCIfNeeded(); |
- |
- // 2. Try to sweep large objects more than allocationSize bytes |
- // before allocating a new large object. |
- if (!lazySweepLargeObjects(allocationSize)) { |
- // 3. If we have failed in sweeping allocationSize bytes, |
- // we complete sweeping before allocating this large object. |
- m_threadState->completeSweep(); |
- } |
- |
- m_threadState->shouldFlushHeapDoesNotContainCache(); |
- PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
- m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); |
+ threadState()->shouldFlushHeapDoesNotContainCache(); |
+ PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); |
+ threadState()->allocatedRegionsSinceLastGC().append(pageMemory->region()); |
Address largeObjectAddress = pageMemory->writableStart(); |
Address headerAddress = largeObjectAddress + sizeof(LargeObject) + headerPadding(); |
#if ENABLE(ASSERT) |
// Verify that the allocated PageMemory is expectedly zeroed. |
- for (size_t i = 0; i < size; ++i) |
+ for (size_t i = 0; i < largeObjectSize; ++i) |
ASSERT(!headerAddress[i]); |
#endif |
ASSERT(gcInfoIndex > 0); |
HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex); |
Address result = headerAddress + sizeof(*header); |
ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
- LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory, this, size); |
+ LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory, this, allocationSize); |
header->checkHeader(); |
// Poison the object header and allocationGranularity bytes after the object |
ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity); |
- largeObject->link(&m_firstLargeObject); |
+ largeObject->link(&m_firstPage); |
Heap::increaseAllocatedSpace(largeObject->size()); |
Heap::increaseAllocatedObjectSize(largeObject->size()); |
return result; |
} |
-void ThreadHeap::freeLargeObject(LargeObject* object) |
+void ThreadHeapForLargeObject::freeLargeObject(LargeObject* object) |
{ |
object->heapObjectHeader()->finalize(object->payload(), object->payloadSize()); |
Heap::decreaseAllocatedSpace(object->size()); |
@@ -1170,7 +1105,7 @@ void ThreadHeap::freeLargeObject(LargeObject* object) |
// ensures that tracing the dangling pointer in the next global GC just |
// crashes instead of causing use-after-frees. After the next global |
// GC, the orphaned pages are removed. |
- Heap::orphanedPagePool()->addOrphanedPage(m_index, object); |
+ Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object); |
} else { |
ASSERT(!ThreadState::current()->isTerminating()); |
PageMemory* memory = object->storage(); |
@@ -1231,6 +1166,7 @@ PageMemory* FreePagePool::takeFreePage(int index) |
BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap) |
: m_storage(storage) |
, m_heap(heap) |
+ , m_next(nullptr) |
, m_terminating(false) |
, m_swept(true) |
{ |
@@ -1336,7 +1272,7 @@ bool OrphanedPagePool::contains(void* object) |
} |
#endif |
-void ThreadHeap::freePage(HeapPage* page) |
+void ThreadHeapForHeapPage::freePage(HeapPage* page) |
{ |
Heap::decreaseAllocatedSpace(blinkPageSize); |
@@ -1349,18 +1285,18 @@ void ThreadHeap::freePage(HeapPage* page) |
// ensures that tracing the dangling pointer in the next global GC just |
// crashes instead of causing use-after-frees. After the next global |
// GC, the orphaned pages are removed. |
- Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
+ Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); |
} else { |
PageMemory* memory = page->storage(); |
page->~HeapPage(); |
- Heap::freePagePool()->addFreePage(m_index, memory); |
+ Heap::freePagePool()->addFreePage(heapIndex(), memory); |
} |
} |
-void ThreadHeap::allocatePage() |
+void ThreadHeapForHeapPage::allocatePage() |
{ |
- m_threadState->shouldFlushHeapDoesNotContainCache(); |
- PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
+ threadState()->shouldFlushHeapDoesNotContainCache(); |
+ PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); |
// We continue allocating page memory until we succeed in committing one. |
while (!pageMemory) { |
// Allocate a memory region for blinkPagesPerRegion pages that |
@@ -1369,7 +1305,7 @@ void ThreadHeap::allocatePage() |
// [ guard os page | ... payload ... | guard os page ] |
// ^---{ aligned to blink page size } |
PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); |
- m_threadState->allocatedRegionsSinceLastGC().append(region); |
+ threadState()->allocatedRegionsSinceLastGC().append(region); |
// Setup the PageMemory object for each of the pages in the region. |
size_t offset = 0; |
@@ -1383,13 +1319,12 @@ void ThreadHeap::allocatePage() |
else |
delete memory; |
} else { |
- Heap::freePagePool()->addFreePage(m_index, memory); |
+ Heap::freePagePool()->addFreePage(heapIndex(), memory); |
} |
offset += blinkPageSize; |
} |
} |
HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this); |
- |
page->link(&m_firstPage); |
Heap::increaseAllocatedSpace(blinkPageSize); |
@@ -1397,9 +1332,9 @@ void ThreadHeap::allocatePage() |
} |
#if ENABLE(ASSERT) |
-bool ThreadHeap::pagesToBeSweptContains(Address address) |
+bool ThreadHeapForHeapPage::pagesToBeSweptContains(Address address) |
{ |
- for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
+ for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
if (page->contains(address)) |
return true; |
} |
@@ -1411,18 +1346,15 @@ size_t ThreadHeap::objectPayloadSizeForTesting() |
{ |
ASSERT(isConsistentForSweeping()); |
ASSERT(!m_firstUnsweptPage); |
- ASSERT(!m_firstUnsweptLargeObject); |
size_t objectPayloadSize = 0; |
- for (HeapPage* page = m_firstPage; page; page = page->next()) |
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next()) |
objectPayloadSize += page->objectPayloadSizeForTesting(); |
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) |
- objectPayloadSize += largeObject->objectPayloadSizeForTesting(); |
return objectPayloadSize; |
} |
#if ENABLE(ASSERT) |
-bool ThreadHeap::isConsistentForSweeping() |
+bool ThreadHeapForHeapPage::isConsistentForSweeping() |
{ |
// A thread heap is consistent for sweeping if none of the pages to be swept |
// contain a freelist block or the current allocation point. |
@@ -1442,15 +1374,9 @@ bool ThreadHeap::isConsistentForSweeping() |
void ThreadHeap::makeConsistentForSweeping() |
{ |
- preparePagesForSweeping(); |
- setAllocationPoint(nullptr, 0); |
clearFreeLists(); |
-} |
- |
-void ThreadHeap::preparePagesForSweeping() |
-{ |
ASSERT(isConsistentForSweeping()); |
- for (HeapPage* page = m_firstPage; page; page = page->next()) |
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next()) |
page->markAsUnswept(); |
// If a new GC is requested before this thread got around to sweep, |
@@ -1460,8 +1386,8 @@ void ThreadHeap::preparePagesForSweeping() |
// objects. If we trace a dead object we could end up tracing into |
// garbage or the middle of another object via the newly conservatively |
// found object. |
- HeapPage* previousPage = nullptr; |
- for (HeapPage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) { |
+ BaseHeapPage* previousPage = nullptr; |
+ for (BaseHeapPage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) { |
page->markUnmarkedObjectsDead(); |
ASSERT(!page->hasBeenSwept()); |
} |
@@ -1472,26 +1398,11 @@ void ThreadHeap::preparePagesForSweeping() |
m_firstUnsweptPage = nullptr; |
} |
ASSERT(!m_firstUnsweptPage); |
- |
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) |
- largeObject->markAsUnswept(); |
- |
- LargeObject* previousLargeObject = nullptr; |
- for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; previousLargeObject = largeObject, largeObject = largeObject->next()) { |
- largeObject->markUnmarkedObjectsDead(); |
- ASSERT(!largeObject->hasBeenSwept()); |
- } |
- if (previousLargeObject) { |
- ASSERT(m_firstUnsweptLargeObject); |
- previousLargeObject->m_next = m_firstLargeObject; |
- m_firstLargeObject = m_firstUnsweptLargeObject; |
- m_firstUnsweptLargeObject = nullptr; |
- } |
- ASSERT(!m_firstUnsweptLargeObject); |
} |
-void ThreadHeap::clearFreeLists() |
+void ThreadHeapForHeapPage::clearFreeLists() |
{ |
+ setAllocationPoint(nullptr, 0); |
sof
2015/02/06 09:46:52
Why is this here?
haraken
2015/02/06 09:59:28
Since setAllocationPoint is a method of ThreadHeap
|
m_freeList.clear(); |
} |
@@ -1515,7 +1426,6 @@ int FreeList::bucketIndexForSize(size_t size) |
HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap) |
: BaseHeapPage(storage, heap) |
- , m_next(nullptr) |
{ |
m_objectStartBitMapComputed = false; |
ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
@@ -1558,7 +1468,7 @@ void HeapPage::sweep() |
ASSERT(header->size() < blinkPagePayloadSize()); |
if (header->isPromptlyFreed()) |
- heap()->decreasePromptlyFreedSize(header->size()); |
+ heapForHeapPage()->decreasePromptlyFreedSize(header->size()); |
if (header->isFree()) { |
size_t size = header->size(); |
// Zero the memory in the free list header to maintain the |
@@ -1591,14 +1501,14 @@ void HeapPage::sweep() |
} |
if (startOfGap != headerAddress) |
- heap()->addToFreeList(startOfGap, headerAddress - startOfGap); |
+ heapForHeapPage()->addToFreeList(startOfGap, headerAddress - startOfGap); |
header->unmark(); |
headerAddress += header->size(); |
markedObjectSize += header->size(); |
startOfGap = headerAddress; |
} |
if (startOfGap != payloadEnd()) |
- heap()->addToFreeList(startOfGap, payloadEnd() - startOfGap); |
+ heapForHeapPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap); |
if (markedObjectSize) |
Heap::increaseMarkedObjectSize(markedObjectSize); |
@@ -1624,9 +1534,14 @@ void HeapPage::markUnmarkedObjectsDead() |
} |
} |
-void HeapPage::removeFromHeap(ThreadHeap* heap) |
+void HeapPage::removeFromHeap() |
+{ |
+ heapForHeapPage()->freePage(this); |
+} |
+ |
+ThreadHeapForHeapPage* HeapPage::heapForHeapPage() |
{ |
- heap->freePage(this); |
+ return static_cast<ThreadHeapForHeapPage*>(heap()); |
} |
void HeapPage::populateObjectStartBitMap() |
@@ -2484,13 +2399,9 @@ void Heap::collectAllGarbage() |
void ThreadHeap::prepareHeapForTermination() |
{ |
ASSERT(!m_firstUnsweptPage); |
- ASSERT(!m_firstUnsweptLargeObject); |
- for (HeapPage* page = m_firstPage; page; page = page->next()) { |
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { |
page->setTerminating(); |
} |
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) { |
- largeObject->setTerminating(); |
- } |
} |
size_t Heap::objectPayloadSizeForTesting() |
@@ -2524,7 +2435,7 @@ void HeapAllocator::backingFree(void* address) |
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
header->checkHeader(); |
- static_cast<HeapPage*>(page)->heap()->promptlyFreeObject(header); |
+ static_cast<HeapPage*>(page)->heapForHeapPage()->promptlyFreeObject(header); |
} |
void HeapAllocator::freeVectorBacking(void* address) |
@@ -2558,7 +2469,7 @@ bool HeapAllocator::backingExpand(void* address, size_t newSize) |
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
header->checkHeader(); |
- return static_cast<HeapPage*>(page)->heap()->expandObject(header, newSize); |
+ return static_cast<HeapPage*>(page)->heapForHeapPage()->expandObject(header, newSize); |
} |
bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) |
@@ -2604,7 +2515,7 @@ void HeapAllocator::backingShrink(void* address, size_t quantizedCurrentSize, si |
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
header->checkHeader(); |
- static_cast<HeapPage*>(page)->heap()->shrinkObject(header, quantizedShrunkSize); |
+ static_cast<HeapPage*>(page)->heapForHeapPage()->shrinkObject(header, quantizedShrunkSize); |
} |
void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedCurrentSize, size_t quantizedShrunkSize) |