Index: Source/platform/heap/Heap.cpp |
diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp |
index 82bf5004634adfe7d318875ee824dccbe5100611..1fd7ec83a49193c54a9ee81dad84907b96cd74ba 100644 |
--- a/Source/platform/heap/Heap.cpp |
+++ b/Source/platform/heap/Heap.cpp |
@@ -543,6 +543,7 @@ void LargeObject::markUnmarkedObjectsDead() |
void LargeObject::removeFromHeap(ThreadHeap* heap) |
{ |
+ markAsUnswept(); |
haraken
2015/01/16 17:29:49
Isn't it guaranteed that the page is already marke
sof
2015/01/16 19:30:17
Removed.
|
heap->freeLargeObject(this); |
} |
@@ -747,11 +748,11 @@ Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) |
page->sweep(); |
page->unlink(&m_firstUnsweptPage); |
page->link(&m_firstPage); |
+ page->markAsSwept(); |
result = allocateFromFreeList(allocationSize, gcInfoIndex); |
- if (result) { |
+ if (result) |
break; |
- } |
} |
} |
@@ -804,6 +805,7 @@ bool ThreadHeap::lazySweepLargeObjects(size_t allocationSize) |
largeObject->sweep(); |
largeObject->unlink(&m_firstUnsweptLargeObject); |
largeObject->link(&m_firstLargeObject); |
+ largeObject->markAsSwept(); |
} |
} |
@@ -832,6 +834,7 @@ void ThreadHeap::completeSweep() |
page->sweep(); |
page->unlink(&m_firstUnsweptPage); |
page->link(&m_firstPage); |
+ page->markAsSwept(); |
} |
} |
@@ -847,6 +850,7 @@ void ThreadHeap::completeSweep() |
largeObject->sweep(); |
largeObject->unlink(&m_firstUnsweptLargeObject); |
largeObject->link(&m_firstLargeObject); |
+ largeObject->markAsSwept(); |
} |
} |
@@ -1146,6 +1150,8 @@ Address ThreadHeap::allocateLargeObject(size_t size, size_t gcInfoIndex) |
ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity); |
largeObject->link(&m_firstLargeObject); |
+ if (threadState()->isSweepingInProgress()) |
+ largeObject->markAsSwept(); |
haraken
2015/01/16 17:29:49
It's a bit inconsistent that a page allocated duri
sof
2015/01/16 19:30:17
We can flip defaults that way.
|
Heap::increaseAllocatedSpace(largeObject->size()); |
Heap::increaseAllocatedObjectSize(largeObject->size()); |
@@ -1234,6 +1240,7 @@ BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap) |
: m_storage(storage) |
, m_heap(heap) |
, m_terminating(false) |
+ , m_swept(false) |
haraken
2015/01/16 17:29:49
As commented above, m_swept(true) will make the co
sof
2015/01/16 19:30:17
Done.
|
{ |
ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
} |
@@ -1392,6 +1399,8 @@ void ThreadHeap::allocatePage() |
HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this); |
page->link(&m_firstPage); |
+ if (threadState()->isSweepingInProgress()) |
+ page->markAsSwept(); |
haraken
2015/01/16 17:29:49
Ditto. I think we can remove this code.
sof
2015/01/16 19:30:17
Done.
|
Heap::increaseAllocatedSpace(blinkPageSize); |
addToFreeList(page->payload(), page->payloadSize()); |
@@ -1443,14 +1452,17 @@ bool ThreadHeap::isConsistentForSweeping() |
void ThreadHeap::makeConsistentForSweeping() |
{ |
- markUnmarkedObjectsDead(); |
+ preparePagesForSweeping(); |
setAllocationPoint(nullptr, 0); |
clearFreeLists(); |
} |
-void ThreadHeap::markUnmarkedObjectsDead() |
+void ThreadHeap::preparePagesForSweeping() |
{ |
ASSERT(isConsistentForSweeping()); |
+ for (HeapPage* page = m_firstPage; page; page = page->next()) |
haraken
2015/01/16 17:29:49
You can move this loop to after line 1484.
sof
2015/01/16 19:30:17
It's preferable to have this loop here & over thes
|
+ page->markAsUnswept(); |
+ |
// If a new GC is requested before this thread got around to sweep, |
// ie. due to the thread doing a long running operation, we clear |
// the mark bits and mark any of the dead objects as dead. The latter |
@@ -1461,6 +1473,7 @@ void ThreadHeap::markUnmarkedObjectsDead() |
HeapPage* previousPage = nullptr; |
for (HeapPage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) { |
page->markUnmarkedObjectsDead(); |
+ page->markAsUnswept(); |
haraken
2015/01/16 17:29:49
Then you won't need this.
|
} |
if (previousPage) { |
ASSERT(m_firstUnsweptPage); |
@@ -1470,9 +1483,13 @@ void ThreadHeap::markUnmarkedObjectsDead() |
} |
ASSERT(!m_firstUnsweptPage); |
+ for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) |
haraken
2015/01/16 17:29:49
Ditto.
sof
2015/01/16 19:30:17
See above.
|
+ largeObject->markAsUnswept(); |
+ |
LargeObject* previousLargeObject = nullptr; |
for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; previousLargeObject = largeObject, largeObject = largeObject->next()) { |
largeObject->markUnmarkedObjectsDead(); |
+ largeObject->markAsUnswept(); |
} |
if (previousLargeObject) { |
ASSERT(m_firstUnsweptLargeObject); |
@@ -1618,6 +1635,7 @@ void HeapPage::markUnmarkedObjectsDead() |
void HeapPage::removeFromHeap(ThreadHeap* heap) |
{ |
+ markAsUnswept(); |
haraken
2015/01/16 17:29:49
I think the page is already marked as unswept. ASS
sof
2015/01/16 19:30:17
Removed.
|
heap->freePage(this); |
} |