| Index: Source/platform/heap/Heap.cpp
|
| diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp
|
| index 21c61e3b32f650c6314909d91b13fbdcc12040de..22168e8af8a4c0ffc4892a48da1fec2633ccf2a7 100644
|
| --- a/Source/platform/heap/Heap.cpp
|
| +++ b/Source/platform/heap/Heap.cpp
|
| @@ -427,11 +427,8 @@ public:
|
| // FIXME: in an unlikely coincidence that two threads decide
|
| // to collect garbage at the same time, avoid doing two GCs in
|
| // a row.
|
| - RELEASE_ASSERT(!m_state->isInGC());
|
| - RELEASE_ASSERT(!m_state->isSweepInProgress());
|
| if (LIKELY(ThreadState::stopThreads())) {
|
| m_parkedAllThreads = true;
|
| - m_state->enterGC();
|
| }
|
| if (m_state->isMainThread())
|
| TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
|
| @@ -444,8 +441,6 @@ public:
|
| // Only cleanup if we parked all threads in which case the GC happened
|
| // and we need to resume the other threads.
|
| if (LIKELY(m_parkedAllThreads)) {
|
| - m_state->leaveGC();
|
| - ASSERT(!m_state->isInGC());
|
| ThreadState::resumeThreads();
|
| }
|
| }
|
| @@ -693,7 +688,7 @@ Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat
|
| if (threadState()->shouldForceConservativeGC())
|
| Heap::collectGarbage(ThreadState::HeapPointersOnStack);
|
| else
|
| - threadState()->setGCRequested();
|
| + threadState()->setGCState(ThreadState::GCScheduled);
|
| }
|
| if (remainingAllocationSize() > 0) {
|
| m_freeList.addToFreeList(currentAllocationPoint(), remainingAllocationSize());
|
| @@ -1028,7 +1023,7 @@ Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf
|
|
|
| updateRemainingAllocationSize();
|
| if (m_threadState->shouldGC())
|
| - m_threadState->setGCRequested();
|
| + m_threadState->setGCState(ThreadState::GCScheduled);
|
| m_threadState->shouldFlushHeapDoesNotContainCache();
|
| PageMemory* pageMemory = PageMemory::allocate(allocationSize);
|
| m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
|
| @@ -1892,7 +1887,7 @@ void LargeObject<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo*
|
|
|
| void HeapDoesNotContainCache::flush()
|
| {
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| + ASSERT(Heap::isInGC());
|
|
|
| if (m_hasEntries) {
|
| for (int i = 0; i < numberOfEntries; i++)
|
| @@ -1912,7 +1907,7 @@ size_t HeapDoesNotContainCache::hash(Address address)
|
|
|
| bool HeapDoesNotContainCache::lookup(Address address)
|
| {
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| + ASSERT(Heap::isInGC());
|
|
|
| size_t index = hash(address);
|
| ASSERT(!(index & 1));
|
| @@ -1926,7 +1921,7 @@ bool HeapDoesNotContainCache::lookup(Address address)
|
|
|
| void HeapDoesNotContainCache::addEntry(Address address)
|
| {
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| + ASSERT(Heap::isInGC());
|
|
|
| m_hasEntries = true;
|
| size_t index = hash(address);
|
| @@ -1968,7 +1963,7 @@ public:
|
| // call Heap::contains when outside a GC and we call mark
|
| // when doing weakness for ephemerons. Hence we only check
|
| // when called within.
|
| - ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrphanedPage(header));
|
| + ASSERT(!Heap::isInGC() || Heap::containedInHeapOrOrphanedPage(header));
|
| }
|
| #endif
|
| ASSERT(objectPointer);
|
| @@ -2191,7 +2186,7 @@ void Heap::doShutdown()
|
| if (!s_markingVisitor)
|
| return;
|
|
|
| - ASSERT(!ThreadState::isAnyThreadInGC());
|
| + ASSERT(!Heap::isInGC());
|
| ASSERT(!ThreadState::attachedThreads().size());
|
| delete s_markingVisitor;
|
| s_markingVisitor = 0;
|
| @@ -2217,7 +2212,7 @@ void Heap::doShutdown()
|
|
|
| BaseHeapPage* Heap::contains(Address address)
|
| {
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| + ASSERT(Heap::isInGC());
|
| ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
|
| for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
|
| BaseHeapPage* page = (*it)->contains(address);
|
| @@ -2236,7 +2231,7 @@ bool Heap::containedInHeapOrOrphanedPage(void* object)
|
|
|
| Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
|
| {
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| + ASSERT(Heap::isInGC());
|
|
|
| #if !ENABLE(ASSERT)
|
| if (s_heapDoesNotContainCache->lookup(address))
|
| @@ -2427,23 +2422,30 @@ bool Heap::weakTableRegistered(const void* table)
|
| }
|
| #endif
|
|
|
| -void Heap::prepareForGC()
|
| +void Heap::preGC()
|
| {
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
|
| for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
|
| - (*it)->prepareForGC();
|
| + (*it)->preGC();
|
| +}
|
| +
|
| +void Heap::postGC()
|
| +{
|
| + ASSERT(Heap::isInGC());
|
| + ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
|
| + for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
|
| + (*it)->postGC();
|
| }
|
|
|
| void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::CauseOfGC cause)
|
| {
|
| ThreadState* state = ThreadState::current();
|
| - state->clearGCRequested();
|
| + state->setGCState(ThreadState::StoppingOtherThreads);
|
|
|
| GCScope gcScope(stackState);
|
| // Check if we successfully parked the other threads. If not we bail out of the GC.
|
| if (!gcScope.allThreadsParked()) {
|
| - ThreadState::current()->setGCRequested();
|
| + state->setGCState(ThreadState::GCScheduled);
|
| return;
|
| }
|
|
|
| @@ -2452,9 +2454,6 @@ void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause
|
|
|
| s_lastGCWasConservative = false;
|
|
|
| - Heap::resetMarkedObjectSize();
|
| - Heap::resetAllocatedObjectSize();
|
| -
|
| TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
|
| "precise", stackState == ThreadState::NoHeapPointersOnStack,
|
| "forced", cause == ThreadState::ForcedGC);
|
| @@ -2469,7 +2468,11 @@ void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause
|
| // torn down).
|
| NoAllocationScope<AnyThread> noAllocationScope;
|
|
|
| - prepareForGC();
|
| + enterGC();
|
| + preGC();
|
| +
|
| + Heap::resetMarkedObjectSize();
|
| + Heap::resetAllocatedObjectSize();
|
|
|
| // 1. trace persistent roots.
|
| ThreadState::visitPersistentRoots(s_markingVisitor);
|
| @@ -2496,6 +2499,9 @@ void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause
|
| // we should have crashed during marking before getting here.)
|
| orphanedPagePool()->decommitOrphanedPages();
|
|
|
| + postGC();
|
| + leaveGC();
|
| +
|
| #if ENABLE(GC_PROFILE_MARKING)
|
| static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
|
| #endif
|
| @@ -2519,8 +2525,8 @@ void Heap::collectGarbageForTerminatingThread(ThreadState* state)
|
| {
|
| NoAllocationScope<AnyThread> noAllocationScope;
|
|
|
| - state->enterGC();
|
| - state->prepareForGC();
|
| + enterGC();
|
| + state->preGC();
|
|
|
| // 1. trace the thread local persistent roots. For thread local GCs we
|
| // don't trace the stack (ie. no conservative scanning) since this is
|
| @@ -2541,7 +2547,8 @@ void Heap::collectGarbageForTerminatingThread(ThreadState* state)
|
| postMarkingProcessing();
|
| globalWeakProcessing();
|
|
|
| - state->leaveGC();
|
| + state->postGC();
|
| + leaveGC();
|
| }
|
| state->performPendingSweep();
|
| }
|
| @@ -2630,7 +2637,7 @@ void ThreadHeap<Header>::prepareHeapForTermination()
|
| size_t Heap::objectPayloadSizeForTesting()
|
| {
|
| size_t objectPayloadSize = 0;
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| + ASSERT(Heap::isInGC());
|
| ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
|
| typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
|
| for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
|
| @@ -2643,7 +2650,7 @@ size_t Heap::objectPayloadSizeForTesting()
|
| template<typename HeapTraits, typename HeapType, typename HeaderType>
|
| void HeapAllocator::backingFree(void* address)
|
| {
|
| - if (!address || ThreadState::isAnyThreadInGC())
|
| + if (!address || Heap::isInGC())
|
| return;
|
|
|
| ThreadState* state = ThreadState::current();
|
| @@ -2684,7 +2691,7 @@ void HeapAllocator::hashTableBackingFree(void* address)
|
| template<typename HeapTraits, typename HeapType, typename HeaderType>
|
| bool HeapAllocator::backingExpand(void* address, size_t newSize)
|
| {
|
| - if (!address || ThreadState::isAnyThreadInGC())
|
| + if (!address || Heap::isInGC())
|
| return false;
|
|
|
| ThreadState* state = ThreadState::current();
|
| @@ -2715,7 +2722,7 @@ bool HeapAllocator::vectorBackingExpand(void* address, size_t newSize)
|
|
|
| BaseHeapPage* Heap::lookup(Address address)
|
| {
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| + ASSERT(Heap::isInGC());
|
| if (!s_regionTree)
|
| return 0;
|
| if (PageMemoryRegion* region = s_regionTree->lookup(address)) {
|
| @@ -2742,7 +2749,7 @@ void Heap::removePageMemoryRegion(PageMemoryRegion* region)
|
|
|
| void Heap::addPageMemoryRegion(PageMemoryRegion* region)
|
| {
|
| - ASSERT(ThreadState::isAnyThreadInGC());
|
| + ASSERT(Heap::isInGC());
|
| RegionTree::add(new RegionTree(region), &s_regionTree);
|
| }
|
|
|
| @@ -2818,6 +2825,7 @@ CallbackStack* Heap::s_ephemeronStack;
|
| HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
|
| bool Heap::s_shutdownCalled = false;
|
| bool Heap::s_lastGCWasConservative = false;
|
| +bool Heap::s_inGC = false;
|
| FreePagePool* Heap::s_freePagePool;
|
| OrphanedPagePool* Heap::s_orphanedPagePool;
|
| Heap::RegionTree* Heap::s_regionTree = 0;
|
|
|