Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1312)

Unified Diff: Source/platform/heap/Heap.cpp

Issue 840223002: Oilpan: Remove duplicated code between HeapPage and LargeObject (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/ThreadState.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: Source/platform/heap/Heap.cpp
diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp
index ee78353fee1f2e20aa0e6d4c70db925e4b5823d1..dc391406101c38aa94b5a8a1d73ab1639cfa8367 100644
--- a/Source/platform/heap/Heap.cpp
+++ b/Source/platform/heap/Heap.cpp
@@ -532,42 +532,48 @@ void LargeObject::markUnmarkedObjectsDead()
header->markDead();
}
-void LargeObject::removeFromHeap(ThreadHeap* heap)
+void LargeObject::removeFromHeap()
+{
+ static_cast<ThreadHeapForLargeObject*>(heap())->freeLargeObject(this);
+}
+
+FreeList::FreeList()
+ : m_biggestFreeListIndex(0)
{
- heap->freeLargeObject(this);
}
ThreadHeap::ThreadHeap(ThreadState* state, int index)
- : m_currentAllocationPoint(nullptr)
- , m_remainingAllocationSize(0)
- , m_lastRemainingAllocationSize(0)
- , m_firstPage(nullptr)
- , m_firstLargeObject(nullptr)
+ : m_firstPage(nullptr)
, m_firstUnsweptPage(nullptr)
- , m_firstUnsweptLargeObject(nullptr)
, m_threadState(state)
, m_index(index)
- , m_promptlyFreedSize(0)
#if ENABLE(GC_PROFILING)
, m_cumulativeAllocationSize(0)
, m_allocationCount(0)
, m_inlineAllocationCount(0)
#endif
{
+}
+
+ThreadHeapForHeapPage::ThreadHeapForHeapPage(ThreadState* state, int index)
+ : ThreadHeap(state, index)
+ , m_currentAllocationPoint(nullptr)
+ , m_remainingAllocationSize(0)
+ , m_lastRemainingAllocationSize(0)
+ , m_promptlyFreedSize(0)
+{
clearFreeLists();
}
-FreeList::FreeList()
- : m_biggestFreeListIndex(0)
+ThreadHeapForLargeObject::ThreadHeapForLargeObject(ThreadState* state, int index)
+ : ThreadHeap(state, index)
{
}
ThreadHeap::~ThreadHeap()
{
ASSERT(!m_firstPage);
- ASSERT(!m_firstLargeObject);
ASSERT(!m_firstUnsweptPage);
- ASSERT(!m_firstUnsweptLargeObject);
}
void ThreadHeap::cleanupPages()
@@ -575,22 +581,15 @@ void ThreadHeap::cleanupPages()
clearFreeLists();
ASSERT(!m_firstUnsweptPage);
- ASSERT(!m_firstUnsweptLargeObject);
// Add the ThreadHeap's pages to the orphanedPagePool.
- for (HeapPage* page = m_firstPage; page; page = page->m_next) {
- Heap::decreaseAllocatedSpace(blinkPageSize);
- Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
+ Heap::decreaseAllocatedSpace(page->size());
+ Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page);
}
m_firstPage = nullptr;
-
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->m_next) {
- Heap::decreaseAllocatedSpace(largeObject->size());
- Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
- }
- m_firstLargeObject = nullptr;
}
-void ThreadHeap::updateRemainingAllocationSize()
+void ThreadHeapForHeapPage::updateRemainingAllocationSize()
{
if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remainingAllocationSize());
@@ -599,7 +598,7 @@ void ThreadHeap::updateRemainingAllocationSize()
ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
}
-void ThreadHeap::setAllocationPoint(Address point, size_t size)
+void ThreadHeapForHeapPage::setAllocationPoint(Address point, size_t size)
{
#if ENABLE(ASSERT)
if (point) {
@@ -609,14 +608,15 @@ void ThreadHeap::setAllocationPoint(Address point, size_t size)
ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize());
}
#endif
- if (hasCurrentAllocationArea())
+ if (hasCurrentAllocationArea()) {
addToFreeList(currentAllocationPoint(), remainingAllocationSize());
+ }
updateRemainingAllocationSize();
m_currentAllocationPoint = point;
m_lastRemainingAllocationSize = m_remainingAllocationSize = size;
}
-Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex)
+Address ThreadHeapForHeapPage::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex)
{
ASSERT(allocationSize > remainingAllocationSize());
ASSERT(allocationSize >= allocationGranularity);
@@ -627,7 +627,7 @@ Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex)
// 1. If this allocation is big enough, allocate a large object.
if (allocationSize >= largeObjectSizeThreshold)
- return allocateLargeObject(allocationSize, gcInfoIndex);
+ return static_cast<ThreadHeapForLargeObject*>(threadState()->heap(LargeObjectHeap))->allocateLargeObject(allocationSize, gcInfoIndex);
// 2. Check if we should trigger a GC.
updateRemainingAllocationSize();
@@ -643,7 +643,7 @@ Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex)
// 5. Lazily sweep pages of this heap until we find a freed area for
// this allocation or we finish sweeping all pages of this heap.
- result = lazySweepPages(allocationSize, gcInfoIndex);
+ result = lazySweep(allocationSize, gcInfoIndex);
if (result)
return result;
@@ -667,7 +667,7 @@ Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex)
return result;
}
-Address ThreadHeap::allocateFromFreeList(size_t allocationSize, size_t gcInfoIndex)
+Address ThreadHeapForHeapPage::allocateFromFreeList(size_t allocationSize, size_t gcInfoIndex)
{
// Try reusing a block from the largest bin. The underlying reasoning
// being that we want to amortize this slow allocation call by carving
@@ -702,20 +702,14 @@ void ThreadHeap::prepareForSweep()
{
ASSERT(!threadState()->isInGC());
ASSERT(!m_firstUnsweptPage);
- ASSERT(!m_firstUnsweptLargeObject);
// Move all pages to a list of unswept pages.
m_firstUnsweptPage = m_firstPage;
- m_firstUnsweptLargeObject = m_firstLargeObject;
m_firstPage = nullptr;
- m_firstLargeObject = nullptr;
}
-Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex)
+Address ThreadHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex)
{
- ASSERT(!hasCurrentAllocationArea());
- ASSERT(allocationSize < largeObjectSizeThreshold);
-
// If there are no pages to be swept, return immediately.
if (!m_firstUnsweptPage)
return nullptr;
@@ -730,17 +724,27 @@ Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex)
return nullptr;
TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages");
- ThreadState::SweepForbiddenScope scope(m_threadState);
+ ThreadState::SweepForbiddenScope scope(threadState());
if (threadState()->isMainThread())
ScriptForbiddenScope::enter();
+ Address result = lazySweepPages(allocationSize, gcInfoIndex);
+
+ if (threadState()->isMainThread())
+ ScriptForbiddenScope::exit();
+ return result;
+}
+
+Address ThreadHeapForHeapPage::lazySweepPages(size_t allocationSize, size_t gcInfoIndex)
+{
+ ASSERT(!hasCurrentAllocationArea());
Address result = nullptr;
while (m_firstUnsweptPage) {
- HeapPage* page = m_firstUnsweptPage;
+ BaseHeapPage* page = m_firstUnsweptPage;
if (page->isEmpty()) {
page->unlink(&m_firstUnsweptPage);
- page->removeFromHeap(this);
+ page->removeFromHeap();
} else {
// Sweep a page and move the page from m_firstUnsweptPages to
// m_firstPages.
@@ -749,67 +753,42 @@ Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex)
page->link(&m_firstPage);
page->markAsSwept();
+ // For HeapPage, stop lazy sweeping once we find a slot to
+ // allocate a new object.
result = allocateFromFreeList(allocationSize, gcInfoIndex);
if (result)
break;
}
}
-
- if (threadState()->isMainThread())
- ScriptForbiddenScope::exit();
return result;
}
-bool ThreadHeap::lazySweepLargeObjects(size_t allocationSize)
+Address ThreadHeapForLargeObject::lazySweepPages(size_t allocationSize, size_t gcInfoIndex)
{
- ASSERT(allocationSize >= largeObjectSizeThreshold);
-
- // If there are no large objects to be swept, return immediately.
- if (!m_firstUnsweptLargeObject)
- return false;
-
- RELEASE_ASSERT(threadState()->isSweepingInProgress());
-
- // lazySweepLargeObjects() can be called recursively if finalizers invoked
- // in page->sweep() allocate memory and the allocation triggers
- // lazySweepLargeObjects(). This check prevents the sweeping from being
- // executed recursively.
- if (threadState()->sweepForbidden())
- return false;
-
- TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepLargeObjects");
- ThreadState::SweepForbiddenScope scope(m_threadState);
-
- if (threadState()->isMainThread())
- ScriptForbiddenScope::enter();
-
- bool result = false;
+ Address result = nullptr;
size_t sweptSize = 0;
- while (m_firstUnsweptLargeObject) {
- LargeObject* largeObject = m_firstUnsweptLargeObject;
- if (largeObject->isEmpty()) {
- sweptSize += largeObject->size();
- largeObject->unlink(&m_firstUnsweptLargeObject);
- largeObject->removeFromHeap(this);
-
- // If we have swept large objects more than allocationSize,
- // we stop the lazy sweeping.
+ while (m_firstUnsweptPage) {
+ BaseHeapPage* page = m_firstUnsweptPage;
+ if (page->isEmpty()) {
+ sweptSize += static_cast<LargeObject*>(page)->payloadSize() + sizeof(HeapObjectHeader);
+ page->unlink(&m_firstUnsweptPage);
+ page->removeFromHeap();
+ // For LargeObject, stop lazy sweeping once we have swept
+ // more than allocationSize bytes.
if (sweptSize >= allocationSize) {
- result = true;
+ result = doAllocateLargeObject(allocationSize, gcInfoIndex);
+ ASSERT(result);
break;
}
} else {
- // Sweep a large object and move the large object from
- // m_firstUnsweptLargeObjects to m_firstLargeObjects.
- largeObject->sweep();
- largeObject->unlink(&m_firstUnsweptLargeObject);
- largeObject->link(&m_firstLargeObject);
- largeObject->markAsSwept();
+ // Sweep a page and move the page from m_firstUnsweptPages to
+ // m_firstPages.
+ page->sweep();
+ page->unlink(&m_firstUnsweptPage);
+ page->link(&m_firstPage);
+ page->markAsSwept();
}
}
-
- if (threadState()->isMainThread())
- ScriptForbiddenScope::exit();
return result;
}
@@ -821,12 +800,11 @@ void ThreadHeap::completeSweep()
if (threadState()->isMainThread())
ScriptForbiddenScope::enter();
- // Sweep normal pages.
while (m_firstUnsweptPage) {
- HeapPage* page = m_firstUnsweptPage;
+ BaseHeapPage* page = m_firstUnsweptPage;
if (page->isEmpty()) {
page->unlink(&m_firstUnsweptPage);
- page->removeFromHeap(this);
+ page->removeFromHeap();
} else {
// Sweep a page and move the page from m_firstUnsweptPages to
// m_firstPages.
@@ -837,56 +815,21 @@ void ThreadHeap::completeSweep()
}
}
- // Sweep large objects.
- while (m_firstUnsweptLargeObject) {
- LargeObject* largeObject = m_firstUnsweptLargeObject;
- if (largeObject->isEmpty()) {
- largeObject->unlink(&m_firstUnsweptLargeObject);
- largeObject->removeFromHeap(this);
- } else {
- // Sweep a large object and move the large object from
- // m_firstUnsweptLargeObjects to m_firstLargeObjects.
- largeObject->sweep();
- largeObject->unlink(&m_firstUnsweptLargeObject);
- largeObject->link(&m_firstLargeObject);
- largeObject->markAsSwept();
- }
- }
-
if (threadState()->isMainThread())
ScriptForbiddenScope::exit();
}
#if ENABLE(ASSERT)
-static bool isLargeObjectAligned(LargeObject* largeObject, Address address)
-{
- // Check that a large object is blinkPageSize aligned (modulo the osPageSize
- // for the guard page).
- return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roundToBlinkPageStart(reinterpret_cast<Address>(largeObject));
-}
-#endif
-
-#if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
BaseHeapPage* ThreadHeap::findPageFromAddress(Address address)
{
- for (HeapPage* page = m_firstPage; page; page = page->next()) {
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
if (page->contains(address))
return page;
}
- for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) {
+ for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) {
if (page->contains(address))
return page;
}
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
- ASSERT(isLargeObjectAligned(largeObject, address));
- if (largeObject->contains(address))
- return largeObject;
- }
- for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; largeObject = largeObject->next()) {
- ASSERT(isLargeObjectAligned(largeObject, address));
- if (largeObject->contains(address))
- return largeObject;
- }
return nullptr;
}
#endif
@@ -899,7 +842,7 @@ void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
size_t previousPageCount = info->pageCount;
json->beginArray("pages");
- for (HeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCount) {
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCount) {
// FIXME: To limit the size of the snapshot we only output "threshold" many page snapshots.
if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) {
json->beginArray();
@@ -912,14 +855,6 @@ void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
}
json->endArray();
- json->beginArray("largeObjects");
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
- json->beginDictionary();
- largeObject->snapshot(json, info);
- json->endDictionary();
- }
- json->endArray();
-
json->setInteger("pageCount", info->pageCount - previousPageCount);
}
@@ -966,7 +901,7 @@ void FreeList::addToFreeList(Address address, size_t size)
m_biggestFreeListIndex = index;
}
-bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize)
+bool ThreadHeapForHeapPage::expandObject(HeapObjectHeader* header, size_t newSize)
{
// It's possible that Vector requests a smaller expanded size because
// Vector::shrinkCapacity can set a capacity smaller than the actual payload
@@ -990,7 +925,7 @@ bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize)
return false;
}
-void ThreadHeap::shrinkObject(HeapObjectHeader* header, size_t newSize)
+void ThreadHeapForHeapPage::shrinkObject(HeapObjectHeader* header, size_t newSize)
{
ASSERT(header->payloadSize() > newSize);
size_t allocationSize = allocationSizeFromSize(newSize);
@@ -1013,9 +948,9 @@ void ThreadHeap::shrinkObject(HeapObjectHeader* header, size_t newSize)
}
}
-void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header)
+void ThreadHeapForHeapPage::promptlyFreeObject(HeapObjectHeader* header)
{
- ASSERT(!m_threadState->sweepForbidden());
+ ASSERT(!threadState()->sweepForbidden());
header->checkHeader();
Address address = reinterpret_cast<Address>(header);
Address payload = header->payload();
@@ -1025,7 +960,7 @@ void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header)
ASSERT(pageFromObject(address) == findPageFromAddress(address));
{
- ThreadState::SweepForbiddenScope forbiddenScope(m_threadState);
+ ThreadState::SweepForbiddenScope forbiddenScope(threadState());
header->finalize(payload, payloadSize);
if (address + size == m_currentAllocationPoint) {
m_currentAllocationPoint = address;
@@ -1045,7 +980,7 @@ void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header)
m_promptlyFreedSize += size;
}
-bool ThreadHeap::coalesce()
+bool ThreadHeapForHeapPage::coalesce()
{
// Don't coalesce heaps if there are not enough promptly freed entries
// to be coalesced.
@@ -1056,7 +991,7 @@ bool ThreadHeap::coalesce()
if (m_promptlyFreedSize < 1024 * 1024)
return false;
- if (m_threadState->sweepForbidden())
+ if (threadState()->sweepForbidden())
return false;
ASSERT(!hasCurrentAllocationArea());
@@ -1065,7 +1000,7 @@ bool ThreadHeap::coalesce()
// Rebuild free lists.
m_freeList.clear();
size_t freedSize = 0;
- for (HeapPage* page = m_firstPage; page; page = page->next()) {
+ for (HeapPage* page = static_cast<HeapPage*>(m_firstPage); page; page = static_cast<HeapPage*>(page->next())) {
page->clearObjectStartBitMap();
Address startOfGap = page->payload();
for (Address headerAddress = startOfGap; headerAddress < page->payloadEnd(); ) {
@@ -1106,66 +1041,65 @@ bool ThreadHeap::coalesce()
return true;
}
-Address ThreadHeap::allocateLargeObject(size_t size, size_t gcInfoIndex)
+Address ThreadHeapForLargeObject::allocateLargeObject(size_t allocationSize, size_t gcInfoIndex)
{
// Caller already added space for object header and rounded up to allocation
// alignment
- ASSERT(!(size & allocationMask));
+ ASSERT(!(allocationSize & allocationMask));
+
+ // 1. Check if we should trigger a GC.
+ threadState()->scheduleGCIfNeeded();
- size_t allocationSize = sizeof(LargeObject) + size;
+ // 2. Try to sweep large objects more than allocationSize bytes
+ // before allocating a new large object.
+ Address result = lazySweep(allocationSize, gcInfoIndex);
+ if (result)
+ return result;
- // Ensure that there is enough space for alignment. If the header
- // is not a multiple of 8 bytes we will allocate an extra
- // headerPadding bytes to ensure it 8 byte aligned.
- allocationSize += headerPadding();
+ // 3. If we have failed in sweeping allocationSize bytes,
+ // we complete sweeping before allocating this large object.
+ threadState()->completeSweep();
+ return doAllocateLargeObject(allocationSize, gcInfoIndex);
+}
+Address ThreadHeapForLargeObject::doAllocateLargeObject(size_t allocationSize, size_t gcInfoIndex)
+{
+ size_t largeObjectSize = sizeof(LargeObject) + LargeObject::headerPadding() + allocationSize;
// If ASan is supported we add allocationGranularity bytes to the allocated
// space and poison that to detect overflows
#if defined(ADDRESS_SANITIZER)
- allocationSize += allocationGranularity;
+ largeObjectSize += allocationGranularity;
#endif
- // 1. Check if we should trigger a GC.
- updateRemainingAllocationSize();
- m_threadState->scheduleGCIfNeeded();
-
- // 2. Try to sweep large objects more than allocationSize bytes
- // before allocating a new large object.
- if (!lazySweepLargeObjects(allocationSize)) {
- // 3. If we have failed in sweeping allocationSize bytes,
- // we complete sweeping before allocating this large object.
- m_threadState->completeSweep();
- }
-
- m_threadState->shouldFlushHeapDoesNotContainCache();
- PageMemory* pageMemory = PageMemory::allocate(allocationSize);
- m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
+ threadState()->shouldFlushHeapDoesNotContainCache();
+ PageMemory* pageMemory = PageMemory::allocate(largeObjectSize);
+ threadState()->allocatedRegionsSinceLastGC().append(pageMemory->region());
Address largeObjectAddress = pageMemory->writableStart();
- Address headerAddress = largeObjectAddress + sizeof(LargeObject) + headerPadding();
+ Address headerAddress = largeObjectAddress + sizeof(LargeObject) + LargeObject::headerPadding();
#if ENABLE(ASSERT)
// Verify that the allocated PageMemory is expectedly zeroed.
- for (size_t i = 0; i < size; ++i)
+ for (size_t i = 0; i < largeObjectSize; ++i)
ASSERT(!headerAddress[i]);
#endif
ASSERT(gcInfoIndex > 0);
HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex);
Address result = headerAddress + sizeof(*header);
ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
- LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory, this, size);
+ LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory, this, allocationSize);
header->checkHeader();
// Poison the object header and allocationGranularity bytes after the object
ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
- largeObject->link(&m_firstLargeObject);
+ largeObject->link(&m_firstPage);
Heap::increaseAllocatedSpace(largeObject->size());
Heap::increaseAllocatedObjectSize(largeObject->size());
return result;
}
-void ThreadHeap::freeLargeObject(LargeObject* object)
+void ThreadHeapForLargeObject::freeLargeObject(LargeObject* object)
{
object->heapObjectHeader()->finalize(object->payload(), object->payloadSize());
Heap::decreaseAllocatedSpace(object->size());
@@ -1185,7 +1119,7 @@ void ThreadHeap::freeLargeObject(LargeObject* object)
// ensures that tracing the dangling pointer in the next global GC just
// crashes instead of causing use-after-frees. After the next global
// GC, the orphaned pages are removed.
- Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
+ Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object);
} else {
ASSERT(!ThreadState::current()->isTerminating());
PageMemory* memory = object->storage();
@@ -1246,6 +1180,7 @@ PageMemory* FreePagePool::takeFreePage(int index)
BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap)
: m_storage(storage)
, m_heap(heap)
+ , m_next(nullptr)
, m_terminating(false)
, m_swept(true)
{
@@ -1351,9 +1286,9 @@ bool OrphanedPagePool::contains(void* object)
}
#endif
-void ThreadHeap::freePage(HeapPage* page)
+void ThreadHeapForHeapPage::freePage(HeapPage* page)
{
- Heap::decreaseAllocatedSpace(blinkPageSize);
+ Heap::decreaseAllocatedSpace(page->size());
if (page->terminating()) {
// The thread is shutting down and this page is being removed as a part
@@ -1364,18 +1299,18 @@ void ThreadHeap::freePage(HeapPage* page)
// ensures that tracing the dangling pointer in the next global GC just
// crashes instead of causing use-after-frees. After the next global
// GC, the orphaned pages are removed.
- Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
+ Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page);
} else {
PageMemory* memory = page->storage();
page->~HeapPage();
- Heap::freePagePool()->addFreePage(m_index, memory);
+ Heap::freePagePool()->addFreePage(heapIndex(), memory);
}
}
-void ThreadHeap::allocatePage()
+void ThreadHeapForHeapPage::allocatePage()
{
- m_threadState->shouldFlushHeapDoesNotContainCache();
- PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index);
+ threadState()->shouldFlushHeapDoesNotContainCache();
+ PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex());
// We continue allocating page memory until we succeed in committing one.
while (!pageMemory) {
// Allocate a memory region for blinkPagesPerRegion pages that
@@ -1384,7 +1319,7 @@ void ThreadHeap::allocatePage()
// [ guard os page | ... payload ... | guard os page ]
// ^---{ aligned to blink page size }
PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages();
- m_threadState->allocatedRegionsSinceLastGC().append(region);
+ threadState()->allocatedRegionsSinceLastGC().append(region);
// Setup the PageMemory object for each of the pages in the region.
size_t offset = 0;
@@ -1398,23 +1333,22 @@ void ThreadHeap::allocatePage()
else
delete memory;
} else {
- Heap::freePagePool()->addFreePage(m_index, memory);
+ Heap::freePagePool()->addFreePage(heapIndex(), memory);
}
offset += blinkPageSize;
}
}
HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this);
-
page->link(&m_firstPage);
- Heap::increaseAllocatedSpace(blinkPageSize);
+ Heap::increaseAllocatedSpace(page->size());
addToFreeList(page->payload(), page->payloadSize());
}
#if ENABLE(ASSERT)
-bool ThreadHeap::pagesToBeSweptContains(Address address)
+bool ThreadHeapForHeapPage::pagesToBeSweptContains(Address address)
{
- for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) {
+ for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) {
if (page->contains(address))
return true;
}
@@ -1426,18 +1360,15 @@ size_t ThreadHeap::objectPayloadSizeForTesting()
{
ASSERT(isConsistentForSweeping());
ASSERT(!m_firstUnsweptPage);
- ASSERT(!m_firstUnsweptLargeObject);
size_t objectPayloadSize = 0;
- for (HeapPage* page = m_firstPage; page; page = page->next())
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next())
objectPayloadSize += page->objectPayloadSizeForTesting();
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next())
- objectPayloadSize += largeObject->objectPayloadSizeForTesting();
return objectPayloadSize;
}
#if ENABLE(ASSERT)
-bool ThreadHeap::isConsistentForSweeping()
+bool ThreadHeapForHeapPage::isConsistentForSweeping()
{
// A thread heap is consistent for sweeping if none of the pages to be swept
// contain a freelist block or the current allocation point.
@@ -1457,15 +1388,9 @@ bool ThreadHeap::isConsistentForSweeping()
void ThreadHeap::makeConsistentForSweeping()
{
- preparePagesForSweeping();
- setAllocationPoint(nullptr, 0);
clearFreeLists();
-}
-
-void ThreadHeap::preparePagesForSweeping()
-{
ASSERT(isConsistentForSweeping());
- for (HeapPage* page = m_firstPage; page; page = page->next())
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next())
page->markAsUnswept();
// If a new GC is requested before this thread got around to sweep,
@@ -1475,8 +1400,8 @@ void ThreadHeap::preparePagesForSweeping()
// objects. If we trace a dead object we could end up tracing into
// garbage or the middle of another object via the newly conservatively
// found object.
- HeapPage* previousPage = nullptr;
- for (HeapPage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) {
+ BaseHeapPage* previousPage = nullptr;
+ for (BaseHeapPage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) {
page->markUnmarkedObjectsDead();
ASSERT(!page->hasBeenSwept());
}
@@ -1487,26 +1412,11 @@ void ThreadHeap::preparePagesForSweeping()
m_firstUnsweptPage = nullptr;
}
ASSERT(!m_firstUnsweptPage);
-
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next())
- largeObject->markAsUnswept();
-
- LargeObject* previousLargeObject = nullptr;
- for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; previousLargeObject = largeObject, largeObject = largeObject->next()) {
- largeObject->markUnmarkedObjectsDead();
- ASSERT(!largeObject->hasBeenSwept());
- }
- if (previousLargeObject) {
- ASSERT(m_firstUnsweptLargeObject);
- previousLargeObject->m_next = m_firstLargeObject;
- m_firstLargeObject = m_firstUnsweptLargeObject;
- m_firstUnsweptLargeObject = nullptr;
- }
- ASSERT(!m_firstUnsweptLargeObject);
}
-void ThreadHeap::clearFreeLists()
+void ThreadHeapForHeapPage::clearFreeLists()
{
+ setAllocationPoint(nullptr, 0);
m_freeList.clear();
}
@@ -1595,7 +1505,6 @@ void FreeList::getFreeSizeStats(PerBucketFreeListStats bucketStats[], size_t& to
HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap)
: BaseHeapPage(storage, heap)
- , m_next(nullptr)
{
m_objectStartBitMapComputed = false;
ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
@@ -1638,7 +1547,7 @@ void HeapPage::sweep()
ASSERT(header->size() < blinkPagePayloadSize());
if (header->isPromptlyFreed())
- heap()->decreasePromptlyFreedSize(header->size());
+ heapForHeapPage()->decreasePromptlyFreedSize(header->size());
if (header->isFree()) {
size_t size = header->size();
// Zero the memory in the free list header to maintain the
@@ -1671,14 +1580,14 @@ void HeapPage::sweep()
}
if (startOfGap != headerAddress)
- heap()->addToFreeList(startOfGap, headerAddress - startOfGap);
+ heapForHeapPage()->addToFreeList(startOfGap, headerAddress - startOfGap);
header->unmark();
headerAddress += header->size();
markedObjectSize += header->size();
startOfGap = headerAddress;
}
if (startOfGap != payloadEnd())
- heap()->addToFreeList(startOfGap, payloadEnd() - startOfGap);
+ heapForHeapPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap);
if (markedObjectSize)
Heap::increaseMarkedObjectSize(markedObjectSize);
@@ -1704,9 +1613,14 @@ void HeapPage::markUnmarkedObjectsDead()
}
}
-void HeapPage::removeFromHeap(ThreadHeap* heap)
+void HeapPage::removeFromHeap()
{
- heap->freePage(this);
+ heapForHeapPage()->freePage(this);
+}
+
+ThreadHeapForHeapPage* HeapPage::heapForHeapPage()
+{
+ return static_cast<ThreadHeapForHeapPage*>(heap());
}
void HeapPage::populateObjectStartBitMap()
@@ -2625,13 +2539,9 @@ double Heap::estimatedMarkingTime()
void ThreadHeap::prepareHeapForTermination()
{
ASSERT(!m_firstUnsweptPage);
- ASSERT(!m_firstUnsweptLargeObject);
- for (HeapPage* page = m_firstPage; page; page = page->next()) {
+ for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
page->setTerminating();
}
- for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
- largeObject->setTerminating();
- }
}
size_t Heap::objectPayloadSizeForTesting()
@@ -2666,7 +2576,7 @@ void HeapAllocator::backingFree(void* address)
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
header->checkHeader();
- static_cast<HeapPage*>(page)->heap()->promptlyFreeObject(header);
+ static_cast<HeapPage*>(page)->heapForHeapPage()->promptlyFreeObject(header);
}
void HeapAllocator::freeVectorBacking(void* address)
@@ -2703,7 +2613,7 @@ bool HeapAllocator::backingExpand(void* address, size_t newSize)
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
header->checkHeader();
- return static_cast<HeapPage*>(page)->heap()->expandObject(header, newSize);
+ return static_cast<HeapPage*>(page)->heapForHeapPage()->expandObject(header, newSize);
}
bool HeapAllocator::expandVectorBacking(void* address, size_t newSize)
@@ -2746,7 +2656,7 @@ void HeapAllocator::backingShrink(void* address, size_t quantizedCurrentSize, si
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
header->checkHeader();
- static_cast<HeapPage*>(page)->heap()->shrinkObject(header, quantizedShrunkSize);
+ static_cast<HeapPage*>(page)->heapForHeapPage()->shrinkObject(header, quantizedShrunkSize);
}
void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedCurrentSize, size_t quantizedShrunkSize)
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/ThreadState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698