Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1936)

Unified Diff: Source/platform/heap/Heap.cpp

Issue 371623002: [oilpan]: Make thread shutdown more robust. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: review feedback Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: Source/platform/heap/Heap.cpp
diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp
index 625b35b32c0fd34f061d027fd04370f5c71d2f61..e62ea8386fb8dd3273c88ae32c46294c79a94ad6 100644
--- a/Source/platform/heap/Heap.cpp
+++ b/Source/platform/heap/Heap.cpp
@@ -422,24 +422,25 @@ void HeapObjectHeader::unmark()
}
NO_SANITIZE_ADDRESS
-bool HeapObjectHeader::hasDebugMark() const
+bool HeapObjectHeader::hasDeadMark() const
{
checkHeader();
- return m_size & debugBitMask;
+ return m_size & deadBitMask;
}
NO_SANITIZE_ADDRESS
-void HeapObjectHeader::clearDebugMark()
+void HeapObjectHeader::clearDeadMark()
{
checkHeader();
- m_size &= ~debugBitMask;
+ m_size &= ~deadBitMask;
}
NO_SANITIZE_ADDRESS
-void HeapObjectHeader::setDebugMark()
+void HeapObjectHeader::setDeadMark()
{
+ ASSERT(!isMarked());
checkHeader();
- m_size |= debugBitMask;
+ m_size |= deadBitMask;
}
#ifndef NDEBUG
@@ -499,10 +500,16 @@ bool LargeHeapObject<Header>::isMarked()
}
template<typename Header>
+void LargeHeapObject<Header>::setDeadMark()
+{
+ heapObjectHeader()->setDeadMark();
+}
+
+template<typename Header>
void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
{
ASSERT(contains(address));
- if (!objectContains(address))
+ if (!objectContains(address) || heapObjectHeader()->hasDeadMark())
return;
#if ENABLE(GC_TRACING)
visitor->setHostInfo(&address, "stack");
@@ -551,14 +558,14 @@ FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa
}
template<typename Header>
-ThreadHeap<Header>::ThreadHeap(ThreadState* state)
+ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
: m_currentAllocationPoint(0)
, m_remainingAllocationSize(0)
, m_firstPage(0)
, m_firstLargeHeapObject(0)
, m_biggestFreeListIndex(0)
, m_threadState(state)
- , m_pagePool(0)
+ , m_index(index)
{
clearFreeLists();
}
@@ -567,9 +574,18 @@ template<typename Header>
ThreadHeap<Header>::~ThreadHeap()
{
clearFreeLists();
- if (!ThreadState::current()->isMainThread())
- assertEmpty();
- deletePages();
+ flushHeapContainsCache();
haraken 2014/07/08 05:44:51 Don't you need to flush HeapDoesNotContainCache as
wibling-chromium 2014/07/08 13:39:45 No, we only need to flush the HeapDoesNotContainCa
+
+ // Add the ThreadHeap's pages to the orphanedPagePool.
+ Vector<BaseHeapPage*> pages;
+ for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
+ pages.append(page);
+ m_firstPage = 0;
+
+ for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObject; largeObject = largeObject->m_next)
+ pages.append(largeObject);
+ m_firstLargeHeapObject = 0;
+ Heap::orphanedPagePool()->addOrphanedPages(m_index, pages);
}
template<typename Header>
@@ -739,73 +755,204 @@ void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
// object before freeing.
ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGranularity);
- delete object->storage();
+
+ if (object->shuttingDown()) {
+ // The thread is shutting down so this object is being removed as part
+ // of a thread local GC. In that case the object could be revived in the
Mads Ager (chromium) 2014/07/08 08:24:56 revived -> traced
wibling-chromium 2014/07/08 13:39:46 Done.
+ // next global GC either due to a dead object being revived via a
Mads Ager (chromium) 2014/07/08 08:24:55 revived -> traced
wibling-chromium 2014/07/08 13:39:46 Done.
+ // conservative pointer or due to a programming error where an object
+ // in another thread heap keeps a dangling pointer to the this object.
haraken 2014/07/08 05:44:50 the this object => this object
wibling-chromium 2014/07/08 13:39:45 Done.
+ // To guard agains this we put the large object memory in the
haraken 2014/07/08 05:44:50 against
wibling-chromium 2014/07/08 13:39:46 Done.
+ // orphanedPagePool to ensure it is still reachable. After the next full
+ // GC it can be released assuming no rogue/dangling pointers refer to
haraken 2014/07/08 05:44:51 full GC => global GC
wibling-chromium 2014/07/08 13:39:46 Done.
+ // it.
+ // NOTE: large objects are not moved to the memory pool as it is unlikely
+ // they can be reused due to their individual sizes.
+ Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
+ } else {
+ PageMemory* memory = object->storage();
+ object->~LargeHeapObject<Header>();
+ delete memory;
+ }
}
-template<>
-void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
+template<typename DataType>
+HeapPool<DataType>::HeapPool()
{
- // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
- // the heap should be unused (ie. 0).
- allocatePage(0);
+ for (int i = 0; i < NumberOfHeaps; ++i) {
+ m_pool[i] = 0;
+ }
}
-template<>
-void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
+HeapMemoryPool::~HeapMemoryPool()
{
- // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
- // since it is the same for all objects
- ASSERT(gcInfo);
- allocatePage(gcInfo);
+ for (int index = 0; index < NumberOfHeaps; ++index) {
+ while (PoolEntry* entry = m_pool[index]) {
+ m_pool[index] = entry->next;
+ PageMemory* memory = entry->data;
+ ASSERT(memory);
+ delete memory;
+ delete entry;
+ }
+ }
}
-template<typename Header>
-void ThreadHeap<Header>::clearPagePool()
+void HeapMemoryPool::addMemory(int index, PageMemory* memory)
{
- while (takePageFromPool()) { }
+ // When adding memory to the pool we decommit it to ensure it is unused
+ // while in the pool. This also allows the physical memory backing the
+ // page to be given back to the OS.
+ memory->decommit();
+ MutexLocker locker(m_mutex[index]);
haraken 2014/07/08 05:44:51 Just help me understand: Why do we need a mutex he
wibling-chromium 2014/07/08 13:39:46 I have changed the page pool to be global. The rea
+ PoolEntry* entry = new PoolEntry(memory, m_pool[index]);
+ m_pool[index] = entry;
}
-template<typename Header>
-PageMemory* ThreadHeap<Header>::takePageFromPool()
+PageMemory* HeapMemoryPool::takeMemory(int index)
{
- Heap::flushHeapDoesNotContainCache();
- while (PagePoolEntry* entry = m_pagePool) {
- m_pagePool = entry->next();
- PageMemory* storage = entry->storage();
+ MutexLocker locker(m_mutex[index]);
+ while (PoolEntry* entry = m_pool[index]) {
+ m_pool[index] = entry->next;
+ PageMemory* memory = entry->data;
+ ASSERT(memory);
delete entry;
+ if (memory->commit())
+ return memory;
- if (storage->commit())
- return storage;
+ // We got some memory, but failed to commit it, try again.
+ delete memory;
+ }
+ return 0;
+}
- // Failed to commit pooled storage. Release it.
- delete storage;
+HeapOrphanedPagePool::~HeapOrphanedPagePool()
+{
+ for (int index = 0; index < NumberOfHeaps; ++index) {
+ while (PoolEntry* entry = m_pool[index]) {
+ m_pool[index] = entry->next;
+ BaseHeapPage* page = entry->data;
+ delete entry;
+ PageMemory* memory = page->storage();
+ ASSERT(memory);
+ page->~BaseHeapPage();
+ delete memory;
+ }
}
+}
- return 0;
+void HeapOrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page)
Mads Ager (chromium) 2014/07/08 08:24:56 For these methods we know that some mutex is alrea
wibling-chromium 2014/07/08 13:39:46 Yes, basically we only add pages to the orphaned p
+{
+ page->markOrphaned();
+ PoolEntry* entry = new PoolEntry(page, m_pool[index]);
+ m_pool[index] = entry;
}
-template<typename Header>
-void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage)
+void HeapOrphanedPagePool::addOrphanedPages(int index, Vector<BaseHeapPage*>& pages)
{
- flushHeapContainsCache();
- PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool);
- m_pagePool = entry;
+ for (Vector<BaseHeapPage*>::const_iterator it = pages.begin(); it != pages.end(); ++it) {
+ addOrphanedPage(index, *it);
+ }
+}
+
+void HeapOrphanedPagePool::decommitOrphanedPages()
+{
+ // No locking needed as all threads are at safepoints at this point in time.
haraken 2014/07/08 05:44:51 Can we add an ASSERT about this?
wibling-chromium 2014/07/08 13:39:46 Done.
+ for (int index = 0; index < NumberOfHeaps; ++index) {
+ PoolEntry* entry = m_pool[index];
+ PoolEntry** prevNext = &m_pool[index];
+ while (entry) {
+ BaseHeapPage* page = entry->data;
+ if (page->traced()) {
+ // If the page was traced in the last GC it is not decommited.
+ // We only decommit a page, ie. put it in the memory pool,
+ // when the page has no objects pointing to it.
+ // We mark the page as orphaned. This clears the traced flag
+ // and any object trace bits that were set during tracing.
+ page->markOrphaned();
+ prevNext = &entry->next;
+ entry = entry->next;
+ continue;
+ }
+
+ // Page was not traced. Check if we should reuse the memory or just
+ // free it. Large object memory is not reused, but freed, normal
+ // blink heap pages are reused.
+ PageMemory* memory = page->storage();
+
+ // Call the destructor before freeing or adding to the memory pool.
haraken 2014/07/08 05:44:50 Just help me understand: Why does this order matte
wibling-chromium 2014/07/08 13:39:46 We cannot call the destructor after adding the mem
+ if (page->reuseMemory()) {
haraken 2014/07/08 05:44:50 reuseMemory => shouldReuseMemory ?
Mads Ager (chromium) 2014/07/08 08:24:56 !page->isLargeObject()
wibling-chromium 2014/07/08 13:39:45 Done.
+ page->~BaseHeapPage();
+ Heap::memoryPool()->addMemory(index, memory);
Mads Ager (chromium) 2014/07/08 08:24:55 So this is where we use that the page pool is now
wibling-chromium 2014/07/08 13:39:45 Yes, I would like to keep it global for now and in
+ } else {
+ page->~BaseHeapPage();
+ delete memory;
+ }
+
+ PoolEntry* deadEntry = entry;
+ entry = entry->next;
+ *prevNext = entry;
+ delete deadEntry;
+ }
+ }
+}
+
+bool HeapOrphanedPagePool::contains(void* object)
haraken 2014/07/08 05:44:50 Just to confirm: HeapOrphanedPagePool::contains()
wibling-chromium 2014/07/08 13:39:46 No, it is only called inside an ASSERT. I will wra
+{
+ for (int index = 0; index < NumberOfHeaps; ++index) {
+ for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
+ BaseHeapPage* page = entry->data;
+ if (page->contains(reinterpret_cast<Address>(object)))
+ return true;
+ }
+ }
+ return false;
+}
+
+template<>
+void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
+{
+ // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
+ // the heap should be unused (ie. 0).
+ allocatePage(0);
+}
+
+template<>
+void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
+{
+ // When adding a page to the ThreadHeap using HeapObjectHeaders store the GCInfo on the heap
+ // since it is the same for all objects
+ ASSERT(gcInfo);
+ allocatePage(gcInfo);
}
template <typename Header>
-void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page)
+void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
{
- PageMemory* storage = page->storage();
- storage->decommit();
- addPageMemoryToPool(storage);
+ flushHeapContainsCache();
+ if (page->shuttingDown()) {
+ // The thread is shutting down so this page is being removed as part
+ // of a thread local GC. In that case the page could be revived in the
Mads Ager (chromium) 2014/07/08 08:24:55 revived -> accessed
wibling-chromium 2014/07/08 13:39:45 Done.
+ // next global GC either due to a dead object being revived via a
Mads Ager (chromium) 2014/07/08 08:24:56 revived -> traced
wibling-chromium 2014/07/08 13:39:46 Done.
+ // conservative pointer or due to a programming error where an object
+ // in another thread heap keeps a dangling pointer to the this object.
haraken 2014/07/08 05:44:51 the this object => this object
wibling-chromium 2014/07/08 13:39:46 Done.
+ // To guard agains this we put the page in the orphanedPagePool to
haraken 2014/07/08 05:44:50 against
wibling-chromium 2014/07/08 13:39:45 Done.
+ // ensure it is still reachable. After the next full GC it can be
haraken 2014/07/08 05:44:51 full GC => global GC
wibling-chromium 2014/07/08 13:39:45 Done.
+ // decommitted and moved to the memory pool assuming no rogue/dangling
+ // pointers refer to it.
+ Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
+ } else {
+ PageMemory* memory = page->storage();
+ page->~HeapPage<Header>();
+ Heap::memoryPool()->addMemory(m_index, memory);
+ }
}
template<typename Header>
void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
{
Heap::flushHeapDoesNotContainCache();
- PageMemory* pageMemory = takePageFromPool();
- if (!pageMemory) {
+ PageMemory* pageMemory = Heap::memoryPool()->takeMemory(m_index);
+ while (!pageMemory) {
haraken 2014/07/08 05:44:50 I'm curious why we need change 'if' to 'while'?
Mads Ager (chromium) 2014/07/08 08:24:55 Because the page pool is not global and other thre
// Allocate a memory region for blinkPagesPerRegion pages that
// will each have the following layout.
//
@@ -816,11 +963,10 @@ void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
// region.
size_t offset = 0;
for (size_t i = 0; i < blinkPagesPerRegion; i++) {
- addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offset, blinkPagePayloadSize()));
+ Heap::memoryPool()->addMemory(m_index, PageMemory::setupPageMemoryInRegion(region, offset, blinkPagePayloadSize()));
offset += blinkPageSize;
}
- pageMemory = takePageFromPool();
- RELEASE_ASSERT(pageMemory);
+ pageMemory = Heap::memoryPool()->takeMemory(m_index);
}
HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
// FIXME: Oilpan: Linking new pages into the front of the list is
@@ -864,22 +1010,17 @@ void ThreadHeap<Header>::sweep()
#endif
HeapPage<Header>* page = m_firstPage;
HeapPage<Header>** previous = &m_firstPage;
- bool pagesRemoved = false;
while (page) {
if (page->isEmpty()) {
- flushHeapContainsCache();
haraken 2014/07/08 05:44:50 Just help me understand: Why can we drop flushHeap
Mads Ager (chromium) 2014/07/08 08:24:56 Because unlink now calls removePageFromHeap which
wibling-chromium 2014/07/08 13:39:45 Yes, it seemed like we did this a bit too many tim
HeapPage<Header>* unused = page;
page = page->next();
HeapPage<Header>::unlink(unused, previous);
- pagesRemoved = true;
} else {
page->sweep();
previous = &page->m_next;
page = page->next();
}
}
- if (pagesRemoved)
- flushHeapContainsCache();
LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
@@ -954,39 +1095,17 @@ void ThreadHeap<Header>::makeConsistentForGC()
}
template<typename Header>
-void ThreadHeap<Header>::clearMarks()
+void ThreadHeap<Header>::clearLiveAndMarkDead()
{
ASSERT(isConsistentForGC());
for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
- page->clearMarks();
- for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
- current->unmark();
-}
-
-template<typename Header>
-void ThreadHeap<Header>::deletePages()
-{
- flushHeapContainsCache();
- // Add all pages in the pool to the heap's list of pages before deleting
- clearPagePool();
-
- for (HeapPage<Header>* page = m_firstPage; page; ) {
- HeapPage<Header>* dead = page;
- page = page->next();
- PageMemory* storage = dead->storage();
- dead->~HeapPage();
- delete storage;
- }
- m_firstPage = 0;
-
- for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
- LargeHeapObject<Header>* dead = current;
- current = current->next();
- PageMemory* storage = dead->storage();
- dead->~LargeHeapObject();
- delete storage;
+ page->clearLiveAndMarkDead();
+ for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
+ if (current->isMarked())
+ current->unmark();
+ else
+ current->setDeadMark();
}
- m_firstLargeHeapObject = 0;
}
template<typename Header>
@@ -1030,7 +1149,7 @@ template<typename Header>
void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
{
*prevNext = unused->m_next;
- unused->heap()->addPageToPool(unused);
+ unused->heap()->removePageFromHeap(unused);
}
template<typename Header>
@@ -1096,13 +1215,20 @@ void HeapPage<Header>::sweep()
}
template<typename Header>
-void HeapPage<Header>::clearMarks()
+void HeapPage<Header>::clearLiveAndMarkDead()
{
for (Address headerAddress = payload(); headerAddress < end();) {
Header* header = reinterpret_cast<Header*>(headerAddress);
ASSERT(header->size() < blinkPagePayloadSize());
- if (!header->isFree())
+ // Skip freelist entries.
+ if (header->isFree()) {
+ headerAddress += header->size();
+ continue;
+ }
+ if (header->isMarked())
header->unmark();
+ else
+ header->setDeadMark();
haraken 2014/07/08 05:44:50 Slightly better: if (header->isMarked()) header
wibling-chromium 2014/07/08 13:39:45 Done.
headerAddress += header->size();
}
}
@@ -1182,7 +1308,7 @@ void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
{
ASSERT(contains(address));
Header* header = findHeaderFromAddress(address);
- if (!header)
+ if (!header || header->hasDeadMark())
return;
#if ENABLE(GC_TRACING)
@@ -1370,32 +1496,7 @@ bool CallbackStack::isEmpty()
return m_current == &(m_buffer[0]) && !m_next;
}
-bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor)
-{
- if (m_current == &(m_buffer[0])) {
- if (!m_next) {
-#ifndef NDEBUG
- clearUnused();
-#endif
- return false;
- }
- CallbackStack* nextStack = m_next;
- *first = nextStack;
- delete this;
- return nextStack->popAndInvokeCallback(first, visitor);
- }
- Item* item = --m_current;
-
- VisitorCallback callback = item->callback();
-#if ENABLE(GC_TRACING)
- if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndInvokeCallback
- visitor->setHostInfo(item->object(), classOf(item->object()));
-#endif
- callback(visitor, item->object());
-
- return true;
-}
-
+template<bool ThreadLocal>
void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor)
{
CallbackStack* stack = 0;
@@ -1408,21 +1509,30 @@ void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor)
// a second time.
while (stack != *first) {
stack = *first;
- stack->invokeOldestCallbacks(visitor);
+ stack->invokeOldestCallbacks<ThreadLocal>(visitor);
}
}
+template<bool ThreadLocal>
void CallbackStack::invokeOldestCallbacks(Visitor* visitor)
{
// Recurse first (bufferSize at a time) so we get to the newly added entries
// last.
if (m_next)
- m_next->invokeOldestCallbacks(visitor);
+ m_next->invokeOldestCallbacks<ThreadLocal>(visitor);
// This loop can tolerate entries being added by the callbacks after
// iteration starts.
for (unsigned i = 0; m_buffer + i < m_current; i++) {
Item& item = m_buffer[i];
+
+ BaseHeapPage* heapPage = pageHeaderFromObject(item.object());
+ if (ThreadLocal ? (heapPage->orphaned() || !heapPage->shuttingDown()) : heapPage->orphaned()) {
+ // If tracing this from a global GC set the traced bit.
+ if (!ThreadLocal)
+ heapPage->setTraced();
+ continue;
+ }
item.callback()(visitor, item.object());
}
}
@@ -1675,6 +1785,8 @@ void Heap::init()
CallbackStack::init(&s_ephemeronStack);
s_heapDoesNotContainCache = new HeapDoesNotContainCache();
s_markingVisitor = new MarkingVisitor();
+ s_memoryPool = new HeapMemoryPool();
+ s_orphanedPagePool = new HeapOrphanedPagePool();
}
void Heap::shutdown()
@@ -1695,6 +1807,10 @@ void Heap::doShutdown()
s_markingVisitor = 0;
delete s_heapDoesNotContainCache;
s_heapDoesNotContainCache = 0;
+ delete s_memoryPool;
+ s_memoryPool = 0;
+ delete s_orphanedPagePool;
+ s_orphanedPagePool = 0;
CallbackStack::shutdown(&s_weakCallbackStack);
CallbackStack::shutdown(&s_markingStack);
CallbackStack::shutdown(&s_ephemeronStack);
@@ -1713,6 +1829,13 @@ BaseHeapPage* Heap::contains(Address address)
return 0;
}
+#ifndef NDEBUG
+bool Heap::containedInHeapOrOrphanedPage(void* object)
+{
+ return contains(object) || orphanedPagePool()->contains(object);
+}
+#endif
+
Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
{
ASSERT(ThreadState::isAnyThreadInGC());
@@ -1791,14 +1914,15 @@ String Heap::createBacktraceString()
void Heap::pushTraceCallback(void* object, TraceCallback callback)
{
- ASSERT(Heap::contains(object));
+ ASSERT(Heap::containedInHeapOrOrphanedPage(object));
CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
*slot = CallbackStack::Item(object, callback);
}
+template<bool ThreadLocal>
bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
{
- return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor);
+ return s_markingStack->popAndInvokeCallback<ThreadLocal>(&s_markingStack, visitor);
}
void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback)
@@ -1811,7 +1935,7 @@ void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback
void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointerCallback callback)
{
ASSERT(Heap::contains(object));
- BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeaderAddress(reinterpret_cast<Address>(object)));
+ BaseHeapPage* heapPageForObject = pageHeaderFromObject(object);
ASSERT(Heap::contains(object) == heapPageForObject);
ThreadState* state = heapPageForObject->threadState();
state->pushWeakObjectPointerCallback(closure, callback);
@@ -1819,7 +1943,7 @@ void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe
bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
{
- return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visitor);
+ return s_weakCallbackStack->popAndInvokeCallback<false>(&s_weakCallbackStack, visitor);
haraken 2014/07/08 05:44:51 We prefer enum than true/false.
wibling-chromium 2014/07/08 13:39:45 Done.
}
void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
@@ -1877,16 +2001,66 @@ void Heap::collectGarbage(ThreadState::StackState stackState)
prepareForGC();
- ThreadState::visitRoots(s_markingVisitor);
+ tracingAndGlobalWeakProcessing<false>();
+
+ // After a global marking we know that any orphaned page that was not reached
+ // cannot be revived in a subsequent GC. This is due to a thread either having
Mads Ager (chromium) 2014/07/08 08:24:56 revived -> reached
wibling-chromium 2014/07/08 13:39:46 Done.
+ // swept its heap or having done a "poor mans sweep" in prepareForGC which marks
+ // objects that are dead, but not swept in the previous GC as dead. In this GC's
+ // marking we check that any object marked as dead is not revived. E.g. via a
Mads Ager (chromium) 2014/07/08 08:24:55 revived -> traced
wibling-chromium 2014/07/08 13:39:46 Done.
+ // conservatively found pointer or a programming error with an object containing
+ // a dangling pointer.
haraken 2014/07/08 05:44:50 In my understanding, if we're performing a precise
wibling-chromium 2014/07/08 13:39:45 That is correct. I will try to add a RELEASE_ASSER
+ orphanedPagePool()->decommitOrphanedPages();
+
+#if ENABLE(GC_TRACING)
+ static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
+#endif
+
+ if (blink::Platform::current()) {
+ uint64_t objectSpaceSize;
+ uint64_t allocatedSpaceSize;
+ getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
+ blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
+ blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
+ blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
+ }
+}
+
+void Heap::collectGarbageForThread(ThreadState* state, bool sweepOnly)
+{
+ // We explicitly do not enter a safepoint while doing thread specific
+ // garbage collection since we don't want to allow a global GC at the
+ // same time as a thread local GC.
+
+ NoAllocationScope<AnyThread> noAllocationScope;
Mads Ager (chromium) 2014/07/08 08:24:55 This no allocation scope covers the sweep as well.
wibling-chromium 2014/07/08 13:39:45 Good point. Fixed.
+
+ state->enterGC();
+ state->prepareForGC();
+
+ if (!sweepOnly)
+ tracingAndGlobalWeakProcessing<true>();
+
+ state->leaveGC();
+ state->performPendingSweep();
+}
+
+template<bool ThreadLocal>
+void Heap::tracingAndGlobalWeakProcessing()
+{
+ if (ThreadLocal)
+ ThreadState::current()->visitLocalRoots(s_markingVisitor);
+ else
+ ThreadState::visitRoots(s_markingVisitor);
// Ephemeron fixed point loop.
do {
- // Recursively mark all objects that are reachable from the roots.
- while (popAndInvokeTraceCallback(s_markingVisitor)) { }
+ // Recursively mark all objects that are reachable from the roots for this thread.
+ // Also don't continue tracing if the trace hits an object on another thread's heap.
+ while (popAndInvokeTraceCallback<ThreadLocal>(s_markingVisitor)) { }
// Mark any strong pointers that have now become reachable in ephemeron
// maps.
- CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor);
+ CallbackStack::invokeCallbacks<ThreadLocal>(&s_ephemeronStack, s_markingVisitor);
// Rerun loop if ephemeron processing queued more objects for tracing.
} while (!s_markingStack->isEmpty());
@@ -1902,19 +2076,6 @@ void Heap::collectGarbage(ThreadState::StackState stackState)
// It is not permitted to trace pointers of live objects in the weak
// callback phase, so the marking stack should still be empty here.
ASSERT(s_markingStack->isEmpty());
-
-#if ENABLE(GC_TRACING)
- static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
-#endif
-
- if (blink::Platform::current()) {
- uint64_t objectSpaceSize;
- uint64_t allocatedSpaceSize;
- getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
- blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
- blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
- blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
- }
}
void Heap::collectAllGarbage()
@@ -1933,6 +2094,17 @@ void Heap::setForcePreciseGCForTesting()
ThreadState::current()->setForcePreciseGCForTesting(true);
}
+template<typename Header>
+void ThreadHeap<Header>::setShutdown()
+{
+ for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
+ page->setShutdown();
+ }
+ for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next()) {
+ current->setShutdown();
+ }
+}
+
void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceSize)
{
*objectSpaceSize = 0;
@@ -1991,4 +2163,6 @@ CallbackStack* Heap::s_ephemeronStack;
HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
bool Heap::s_shutdownCalled = false;
bool Heap::s_lastGCWasConservative = false;
+HeapMemoryPool* Heap::s_memoryPool;
+HeapOrphanedPagePool* Heap::s_orphanedPagePool;
}

Powered by Google App Engine
This is Rietveld 408576698