Chromium Code Reviews| Index: third_party/WebKit/Source/platform/heap/Heap.cpp |
| diff --git a/third_party/WebKit/Source/platform/heap/Heap.cpp b/third_party/WebKit/Source/platform/heap/Heap.cpp |
| index 62e4f91db267be1281a3fb97dca1db2cade1a3ff..c94c185fc139ec10a94577a71ea94ea0c60b88a0 100644 |
| --- a/third_party/WebKit/Source/platform/heap/Heap.cpp |
| +++ b/third_party/WebKit/Source/platform/heap/Heap.cpp |
| @@ -59,16 +59,17 @@ HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr; |
| class ParkThreadsScope final { |
| STACK_ALLOCATED(); |
| public: |
| - ParkThreadsScope() |
| - : m_shouldResumeThreads(false) |
| + ParkThreadsScope(ThreadState* state) |
| + : m_state(state) |
| + , m_shouldResumeThreads(false) |
| { |
| } |
| - bool parkThreads(ThreadState* state) |
| + bool parkThreads() |
| { |
| TRACE_EVENT0("blink_gc", "Heap::ParkThreadsScope"); |
| const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); |
| - if (state->isMainThread()) |
| + if (m_state->isMainThread()) |
| TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); |
| // TODO(haraken): In an unlikely coincidence that two threads decide |
| @@ -76,13 +77,13 @@ public: |
| // a row and return false. |
| double startTime = WTF::currentTimeMS(); |
| - m_shouldResumeThreads = ThreadState::stopThreads(); |
| + m_shouldResumeThreads = m_state->heap().park(); |
| double timeForStoppingThreads = WTF::currentTimeMS() - startTime; |
| DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsHistogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50)); |
| timeToStopThreadsHistogram.count(timeForStoppingThreads); |
| - if (state->isMainThread()) |
| + if (m_state->isMainThread()) |
| TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); |
| return m_shouldResumeThreads; |
| } |
| @@ -92,97 +93,155 @@ public: |
| // Only cleanup if we parked all threads in which case the GC happened |
| // and we need to resume the other threads. |
| if (m_shouldResumeThreads) |
| - ThreadState::resumeThreads(); |
| + m_state->heap().resume(); |
| } |
| private: |
| + ThreadState* m_state; |
| bool m_shouldResumeThreads; |
| }; |
| -void Heap::flushHeapDoesNotContainCache() |
| +GCHeapStats::GCHeapStats() |
| + : m_allocatedSpace(0) |
| + , m_allocatedObjectSize(0) |
| + , m_objectSizeAtLastGC(0) |
| + , m_markedObjectSize(0) |
| + , m_markedObjectSizeAtLastCompleteSweep(0) |
| + , m_wrapperCount(0) |
| + , m_wrapperCountAtLastGC(0) |
| + , m_collectedWrapperCount(0) |
| + , m_partitionAllocSizeAtLastGC(WTF::Partitions::totalSizeOfCommittedPages()) |
| + , m_estimatedMarkingTimePerByte(0.0) |
| { |
| - s_heapDoesNotContainCache->flush(); |
| } |
| -void Heap::init() |
| +double GCHeapStats::estimatedMarkingTime() |
| { |
| - ThreadState::init(); |
| - s_markingStack = new CallbackStack(); |
| - s_postMarkingCallbackStack = new CallbackStack(); |
| - s_globalWeakCallbackStack = new CallbackStack(); |
| - // Use smallest supported block size for ephemerons. |
| - s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize); |
| - s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
| - s_freePagePool = new FreePagePool(); |
| - s_orphanedPagePool = new OrphanedPagePool(); |
| - s_allocatedSpace = 0; |
| - s_allocatedObjectSize = 0; |
| - s_objectSizeAtLastGC = 0; |
| - s_markedObjectSize = 0; |
| - s_markedObjectSizeAtLastCompleteSweep = 0; |
| - s_wrapperCount = 0; |
| - s_wrapperCountAtLastGC = 0; |
| - s_collectedWrapperCount = 0; |
| - s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); |
| - s_estimatedMarkingTimePerByte = 0.0; |
| - s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); |
| -#if ENABLE(ASSERT) |
| - s_gcGeneration = 1; |
| -#endif |
| + // Use 8 ms as initial estimated marking time. |
| + // 8 ms is long enough for low-end mobile devices to mark common |
| + // real-world object graphs. |
| + if (m_estimatedMarkingTimePerByte == 0) |
| + return 0.008; |
| - GCInfoTable::init(); |
| + // Assuming that the collection rate of this GC will be mostly equal to |
| + // the collection rate of the last GC, estimate the marking time of this GC. |
| + return m_estimatedMarkingTimePerByte * (allocatedObjectSize() + markedObjectSize()); |
| +} |
| - if (Platform::current() && Platform::current()->currentThread()) |
| - Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvider::instance(), "BlinkGC"); |
| +void GCHeapStats::reset() |
| +{ |
| + m_objectSizeAtLastGC = m_allocatedObjectSize + m_markedObjectSize; |
| + m_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); |
| + m_allocatedObjectSize = 0; |
| + m_markedObjectSize = 0; |
| + m_wrapperCountAtLastGC = m_wrapperCount; |
| + m_collectedWrapperCount = 0; |
| } |
| -void Heap::shutdown() |
| +void GCHeapStats::increaseAllocatedObjectSize(size_t delta) |
| { |
| - if (Platform::current() && Platform::current()->currentThread()) |
| - Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvider::instance()); |
| - s_shutdownCalled = true; |
| - ThreadState::shutdownHeapIfNecessary(); |
| + atomicAdd(&m_allocatedObjectSize, static_cast<long>(delta)); |
| + Heap::increaseTotalAllocatedObjectSize(delta); |
| } |
| -void Heap::doShutdown() |
| +void GCHeapStats::decreaseAllocatedObjectSize(size_t delta) |
| { |
| - // We don't want to call doShutdown() twice. |
| - if (!s_markingStack) |
| - return; |
| + atomicSubtract(&m_allocatedObjectSize, static_cast<long>(delta)); |
| + Heap::decreaseTotalAllocatedObjectSize(delta); |
| +} |
| - ASSERT(!ThreadState::attachedThreads().size()); |
| - delete s_heapDoesNotContainCache; |
| - s_heapDoesNotContainCache = nullptr; |
| - delete s_freePagePool; |
| - s_freePagePool = nullptr; |
| - delete s_orphanedPagePool; |
| - s_orphanedPagePool = nullptr; |
| - delete s_globalWeakCallbackStack; |
| - s_globalWeakCallbackStack = nullptr; |
| - delete s_postMarkingCallbackStack; |
| - s_postMarkingCallbackStack = nullptr; |
| - delete s_markingStack; |
| - s_markingStack = nullptr; |
| - delete s_ephemeronStack; |
| - s_ephemeronStack = nullptr; |
| - delete s_regionTree; |
| - s_regionTree = nullptr; |
| - GCInfoTable::shutdown(); |
| - ThreadState::shutdown(); |
| - ASSERT(Heap::allocatedSpace() == 0); |
| +void GCHeapStats::increaseMarkedObjectSize(size_t delta) |
| +{ |
| + atomicAdd(&m_markedObjectSize, static_cast<long>(delta)); |
| + Heap::increaseTotalMarkedObjectSize(delta); |
| } |
| -CrossThreadPersistentRegion& Heap::crossThreadPersistentRegion() |
| +void GCHeapStats::increaseAllocatedSpace(size_t delta) |
| { |
| - DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegion, new CrossThreadPersistentRegion()); |
| - return persistentRegion; |
| + atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); |
| + Heap::increaseTotalAllocatedSpace(delta); |
| +} |
| + |
| +void GCHeapStats::decreaseAllocatedSpace(size_t delta) |
| +{ |
| + atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); |
| + Heap::decreaseTotalAllocatedSpace(delta); |
| +} |
| + |
| +Heap::Heap() |
| + : m_regionTree(nullptr) |
| + , m_heapDoesNotContainCache(adoptPtr(new HeapDoesNotContainCache)) |
| + , m_safePointBarrier(adoptPtr(new SafePointBarrier(this))) |
| + , m_freePagePool(adoptPtr(new FreePagePool)) |
| + , m_orphanedPagePool(adoptPtr(new OrphanedPagePool)) |
| + , m_markingStack(adoptPtr(new CallbackStack())) |
| + , m_postMarkingCallbackStack(adoptPtr(new CallbackStack())) |
| + , m_globalWeakCallbackStack(adoptPtr(new CallbackStack())) |
| + , m_ephemeronStack(adoptPtr(new CallbackStack(CallbackStack::kMinimalBlockSize))) |
| +{ |
| + MutexLocker locker(Heap::heapAttachMutex()); |
| + all().add(this); |
| +} |
| + |
| +Heap::~Heap() |
| +{ |
| + MutexLocker locker(Heap::heapAttachMutex()); |
| + all().remove(this); |
| +} |
| + |
| +void Heap::attach(ThreadState* thread) |
| +{ |
| + MutexLocker locker(m_threadAttachMutex); |
| + m_threads.add(thread); |
| +} |
| + |
| +void Heap::detach(ThreadState* thread) |
| +{ |
| + { |
| + SafePointAwareMutexLocker locker(m_threadAttachMutex, BlinkGC::NoHeapPointersOnStack); |
| + thread->cleanup(); |
| + ASSERT(m_threads.contains(thread)); |
| + m_threads.remove(thread); |
| + } |
| + if (m_threads.isEmpty()) { |
| + delete this; |
| + Heap::doShutdownIfNecessary(); |
| + } |
| +} |
| + |
| +void Heap::lockThreadAttachMutex() |
| +{ |
| + m_threadAttachMutex.lock(); |
| +} |
| + |
| +void Heap::unlockThreadAttachMutex() |
| +{ |
| + m_threadAttachMutex.unlock(); |
| +} |
| + |
| +bool Heap::park() |
| +{ |
| + return m_safePointBarrier->parkOthers(); |
| +} |
| + |
| +void Heap::resume() |
| +{ |
| + m_safePointBarrier->resumeOthers(); |
| } |
| #if ENABLE(ASSERT) |
| +bool Heap::isAtSafePoint() const |
| +{ |
| + for (ThreadState* state : m_threads) { |
| + if (!state->isAtSafePoint()) |
| + return false; |
| + } |
| + return true; |
| +} |
| + |
| BasePage* Heap::findPageFromAddress(Address address) |
| { |
| - MutexLocker lock(ThreadState::threadAttachMutex()); |
| - for (ThreadState* state : ThreadState::attachedThreads()) { |
| + for (ThreadState* state : m_threads) { |
| if (BasePage* page = state->findPageFromAddress(address)) |
| return page; |
| } |
| @@ -190,141 +249,343 @@ BasePage* Heap::findPageFromAddress(Address address) |
| } |
| #endif |
| -Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| +void Heap::preGC() |
| { |
| - ASSERT(ThreadState::current()->isInGC()); |
| + for (ThreadState* state : m_threads) { |
| + state->preGC(); |
| + } |
| +} |
| -#if !ENABLE(ASSERT) |
| - if (s_heapDoesNotContainCache->lookup(address)) |
| - return nullptr; |
| -#endif |
| +void Heap::postGC(BlinkGC::GCType gcType) |
| +{ |
| + for (ThreadState* state : m_threads) { |
| + state->postGC(gcType); |
| + } |
| +} |
| - if (BasePage* page = lookup(address)) { |
| - ASSERT(page->contains(address)); |
| - ASSERT(!page->orphaned()); |
| - ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| - page->checkAndMarkPointer(visitor, address); |
| - return address; |
| +size_t Heap::objectPayloadSizeForTesting() |
| +{ |
| + size_t objectPayloadSize = 0; |
| + for (ThreadState* state : m_threads) { |
| + state->setGCState(ThreadState::GCRunning); |
| + state->makeConsistentForGC(); |
| + objectPayloadSize += state->objectPayloadSizeForTesting(); |
| + state->setGCState(ThreadState::EagerSweepScheduled); |
| + state->setGCState(ThreadState::Sweeping); |
| + state->setGCState(ThreadState::NoGCScheduled); |
| } |
| + return objectPayloadSize; |
| +} |
| -#if !ENABLE(ASSERT) |
| - s_heapDoesNotContainCache->addEntry(address); |
| -#else |
| - if (!s_heapDoesNotContainCache->lookup(address)) |
| - s_heapDoesNotContainCache->addEntry(address); |
| -#endif |
| +void Heap::visitPersistentRoots(Visitor* visitor) |
| +{ |
| + TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots"); |
| + Heap::crossThreadPersistentRegion().tracePersistentNodes(visitor); |
| + |
| + for (ThreadState* state : m_threads) { |
| + state->visitPersistents(visitor); |
| + } |
| +} |
| + |
| +void Heap::visitStackRoots(Visitor* visitor) |
| +{ |
| + TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots"); |
| + for (ThreadState* state : m_threads) { |
| + state->visitStack(visitor); |
| + } |
| +} |
| + |
| +void Heap::checkAndPark(ThreadState* threadState, SafePointAwareMutexLocker* locker) |
| +{ |
| + m_safePointBarrier->checkAndPark(threadState, locker); |
| +} |
| + |
| +void Heap::enterSafePoint(ThreadState* threadState) |
| +{ |
| + m_safePointBarrier->enterSafePoint(threadState); |
| +} |
| + |
| +void Heap::leaveSafePoint(ThreadState* threadState, SafePointAwareMutexLocker* locker) |
| +{ |
| + m_safePointBarrier->leaveSafePoint(threadState, locker); |
| +} |
| + |
| +void Heap::flushHeapDoesNotContainCache() |
| +{ |
| + m_heapDoesNotContainCache->flush(); |
| +} |
| + |
| +BasePage* Heap::lookupPageForAddress(Address address) |
| +{ |
| + ASSERT(ThreadState::current()->isInGC()); |
| + if (!m_regionTree) |
| + return nullptr; |
| + if (PageMemoryRegion* region = m_regionTree->lookup(address)) { |
| + BasePage* page = region->pageFromAddress(address); |
| + return page && !page->orphaned() ? page : nullptr; |
| + } |
| return nullptr; |
| } |
| -void Heap::pushTraceCallback(void* object, TraceCallback callback) |
| +void Heap::addPageMemoryRegion(PageMemoryRegion* region) |
| +{ |
| + MutexLocker locker(m_regionTreeMutex); |
| + RegionTree::add(new RegionTree(region), &m_regionTree); |
| +} |
| + |
| +void Heap::removePageMemoryRegion(PageMemoryRegion* region) |
| +{ |
| + // Deletion of large objects (and thus their regions) can happen |
| + // concurrently on sweeper threads. Removal can also happen during thread |
| + // shutdown, but that case is safe. Regardless, we make all removals |
| + // mutually exclusive. |
| + MutexLocker locker(m_regionTreeMutex); |
| + RegionTree::remove(region, &m_regionTree); |
| +} |
| + |
| +void Heap::pushTraceCallback(void* containerObject, TraceCallback callback) |
| { |
| ASSERT(ThreadState::current()->isInGC()); |
| // Trace should never reach an orphaned page. |
| - ASSERT(!Heap::orphanedPagePool()->contains(object)); |
| - CallbackStack::Item* slot = s_markingStack->allocateEntry(); |
| - *slot = CallbackStack::Item(object, callback); |
| + ASSERT(!orphanedPagePool()->contains(containerObject)); |
| + CallbackStack::Item* slot = m_markingStack->allocateEntry(); |
| + *slot = CallbackStack::Item(containerObject, callback); |
| } |
| bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
| { |
| - CallbackStack::Item* item = s_markingStack->pop(); |
| + CallbackStack::Item* item = m_markingStack->pop(); |
| if (!item) |
| return false; |
| item->call(visitor); |
| return true; |
| } |
| -void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) |
| +void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) |
| { |
| ASSERT(ThreadState::current()->isInGC()); |
| // Trace should never reach an orphaned page. |
| - ASSERT(!Heap::orphanedPagePool()->contains(object)); |
| - CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); |
| - *slot = CallbackStack::Item(object, callback); |
| + ASSERT(!orphanedPagePool()->contains(cell)); |
| + CallbackStack::Item* slot = m_globalWeakCallbackStack->allocateEntry(); |
| + *slot = CallbackStack::Item(cell, callback); |
| } |
| -bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) |
| +bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) |
| { |
| - if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { |
| + if (CallbackStack::Item* item = m_globalWeakCallbackStack->pop()) { |
| item->call(visitor); |
| return true; |
| } |
| return false; |
| } |
| -void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) |
| +void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback) |
| { |
| ASSERT(ThreadState::current()->isInGC()); |
| // Trace should never reach an orphaned page. |
| - ASSERT(!Heap::orphanedPagePool()->contains(cell)); |
| - CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); |
| - *slot = CallbackStack::Item(cell, callback); |
| + ASSERT(!orphanedPagePool()->contains(table)); |
| + CallbackStack::Item* slot = m_ephemeronStack->allocateEntry(); |
| + *slot = CallbackStack::Item(table, iterationCallback); |
| + |
| + // Register a post-marking callback to tell the tables that |
| + // ephemeron iteration is complete. |
| + pushPostMarkingCallback(table, iterationDoneCallback); |
| } |
| -void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) |
| +#if ENABLE(ASSERT) |
| +bool Heap::weakTableRegistered(const void* table) |
| +{ |
| + ASSERT(m_ephemeronStack); |
| + return m_ephemeronStack->hasCallbackForObject(table); |
| +} |
| +#endif |
| + |
| +void Heap::decommitCallbackStacks() |
| +{ |
| + m_markingStack->decommit(); |
| + m_postMarkingCallbackStack->decommit(); |
| + m_globalWeakCallbackStack->decommit(); |
| + m_ephemeronStack->decommit(); |
| +} |
| + |
| +Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| +{ |
| + ASSERT(ThreadState::current()->isInGC()); |
| +#if !ENABLE(ASSERT) |
| + if (m_heapDoesNotContainCache->lookup(address)) |
| + return nullptr; |
| +#endif |
| + |
| + if (BasePage* page = lookupPageForAddress(address)) { |
| + ASSERT(page->contains(address)); |
| + ASSERT(!page->orphaned()); |
| + ASSERT(!m_heapDoesNotContainCache->lookup(address)); |
| + page->checkAndMarkPointer(visitor, address); |
| + return address; |
| + } |
| + |
| +#if !ENABLE(ASSERT) |
| + m_heapDoesNotContainCache->addEntry(address); |
| +#else |
| + if (!m_heapDoesNotContainCache->lookup(address)) |
| + m_heapDoesNotContainCache->addEntry(address); |
| +#endif |
| + return nullptr; |
| +} |
| + |
| +void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) |
| { |
| ASSERT(ThreadState::current()->isInGC()); |
|
haraken
2016/02/29 11:17:45
ThreadState::current() => state() throughout this
keishi
2016/03/02 06:01:03
? Heap doesn't have state()
|
| // Trace should never reach an orphaned page. |
| - ASSERT(!Heap::orphanedPagePool()->contains(object)); |
| - ThreadState* state = pageFromObject(object)->heap()->threadState(); |
| - state->pushThreadLocalWeakCallback(closure, callback); |
| + ASSERT(!orphanedPagePool()->contains(object)); |
| + CallbackStack::Item* slot = m_postMarkingCallbackStack->allocateEntry(); |
| + *slot = CallbackStack::Item(object, callback); |
| } |
| -bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) |
| +bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) |
| { |
| - if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { |
| + if (CallbackStack::Item* item = m_postMarkingCallbackStack->pop()) { |
| item->call(visitor); |
| return true; |
| } |
| return false; |
| } |
| -void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback) |
| +void Heap::processMarkingStack(Visitor* visitor) |
| +{ |
| + // Ephemeron fixed point loop. |
| + do { |
| + { |
| + // Iteratively mark all objects that are reachable from the objects |
| + // currently pushed onto the marking stack. |
| + TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); |
| + while (popAndInvokeTraceCallback(visitor)) { } |
| + } |
| + |
| + { |
| + // Mark any strong pointers that have now become reachable in |
| + // ephemeron maps. |
| + TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); |
| + m_ephemeronStack->invokeEphemeronCallbacks(visitor); |
| + } |
| + |
| + // Rerun loop if ephemeron processing queued more objects for tracing. |
| + } while (!m_markingStack->isEmpty()); |
| +} |
| + |
| +void Heap::postMarkingProcessing(Visitor* visitor) |
| +{ |
| + TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); |
| + // Call post-marking callbacks including: |
| + // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup |
| + // (specifically to clear the queued bits for weak hash tables), and |
| + // 2. the markNoTracing callbacks on collection backings to mark them |
| + // if they are only reachable from their front objects. |
| + while (popAndInvokePostMarkingCallback(visitor)) { } |
| + |
| + // Post-marking callbacks should not trace any objects and |
| + // therefore the marking stack should be empty after the |
| + // post-marking callbacks. |
| + ASSERT(m_markingStack->isEmpty()); |
| +} |
| + |
| +void Heap::globalWeakProcessing(Visitor* visitor) |
| +{ |
| + TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); |
| + double startTime = WTF::currentTimeMS(); |
| + |
| + // Call weak callbacks on objects that may now be pointing to dead objects. |
| + while (popAndInvokeGlobalWeakCallback(visitor)) { } |
| + |
| + // It is not permitted to trace pointers of live objects in the weak |
| + // callback phase, so the marking stack should still be empty here. |
| + ASSERT(m_markingStack->isEmpty()); |
| + |
| + double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; |
| + DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogram, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakPrcessing", 1, 10 * 1000, 50)); |
| + globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); |
| +} |
| + |
| +void Heap::resetHeapCounters() |
| +{ |
| + ASSERT(ThreadState::current()->isInGC()); |
| + |
| + Heap::reportMemoryUsageForTracing(); |
| + |
| + m_stats.reset(); |
| + { |
| + MutexLocker locker(m_threadAttachMutex); |
| + for (ThreadState* state : m_threads) |
| + state->resetHeapCounters(); |
| + } |
| +} |
| + |
| +void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) |
| { |
| ASSERT(ThreadState::current()->isInGC()); |
| // Trace should never reach an orphaned page. |
| - ASSERT(!Heap::orphanedPagePool()->contains(table)); |
| - CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); |
| - *slot = CallbackStack::Item(table, iterationCallback); |
| + ASSERT(!orphanedPagePool()->contains(object)); |
| + ThreadState* state = pageFromObject(object)->arena()->threadState(); |
| + state->pushThreadLocalWeakCallback(closure, callback); |
| +} |
| - // Register a post-marking callback to tell the tables that |
| - // ephemeron iteration is complete. |
| - pushPostMarkingCallback(table, iterationDoneCallback); |
| +void Heap::init() |
| +{ |
| + ThreadState::init(); |
| + s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); |
| + |
| + GCInfoTable::init(); |
| + |
| + if (Platform::current() && Platform::current()->currentThread()) |
| + Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvider::instance(), "BlinkGC"); |
| } |
| -#if ENABLE(ASSERT) |
| -bool Heap::weakTableRegistered(const void* table) |
| +void Heap::shutdown() |
| { |
| - ASSERT(s_ephemeronStack); |
| - return s_ephemeronStack->hasCallbackForObject(table); |
| + if (Platform::current() && Platform::current()->currentThread()) |
| + Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvider::instance()); |
| + |
| + s_shutdownCalled = true; |
| + doShutdownIfNecessary(); |
| } |
| -#endif |
| -void Heap::decommitCallbackStacks() |
| +void Heap::doShutdownIfNecessary() |
|
haraken
2016/02/29 11:17:45
BTW, I'm wondering if we still need the complicate
|
| +{ |
| + MutexLocker locker(Heap::heapAttachMutex()); |
| + if (!s_shutdownCalled || !all().isEmpty()) |
|
haraken
2016/02/29 11:17:45
Checking all().isEmpty() here isn't quite correct.
|
| + return; |
| + |
| + // We don't want to shutdown twice. |
| + if (s_shutdownComplete) |
| + return; |
| + |
| + GCInfoTable::shutdown(); |
|
haraken
2016/02/29 11:17:45
This must be executed once per process.
|
| + ThreadState::shutdown(); |
|
haraken
2016/02/29 11:17:45
This must be executed once per ThreadState.
|
| + ASSERT(Heap::totalAllocatedSpace() == 0); |
|
haraken
2016/02/29 11:17:45
This must be executed once per process.
Also mayb
|
| + s_shutdownComplete = true; |
| +} |
| + |
| +HashSet<Heap*>& Heap::all() |
| { |
| - s_markingStack->decommit(); |
| - s_postMarkingCallbackStack->decommit(); |
| - s_globalWeakCallbackStack->decommit(); |
| - s_ephemeronStack->decommit(); |
| + DEFINE_STATIC_LOCAL(HashSet<Heap*>, heaps, ()); |
| + return heaps; |
| } |
| -void Heap::preGC() |
| +RecursiveMutex& Heap::heapAttachMutex() |
| { |
| - ASSERT(!ThreadState::current()->isInGC()); |
| - for (ThreadState* state : ThreadState::attachedThreads()) |
| - state->preGC(); |
| + DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)); |
| + return mutex; |
| } |
| -void Heap::postGC(BlinkGC::GCType gcType) |
| +CrossThreadPersistentRegion& Heap::crossThreadPersistentRegion() |
|
haraken
2016/02/29 11:17:45
This is also per-process stuff.
|
| { |
| - ASSERT(ThreadState::current()->isInGC()); |
| - for (ThreadState* state : ThreadState::attachedThreads()) |
| - state->postGC(gcType); |
| + DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegion, new CrossThreadPersistentRegion()); |
| + return persistentRegion; |
| } |
| const char* Heap::gcReasonString(BlinkGC::GCReason reason) |
| @@ -362,10 +623,10 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType |
| SafePointScope safePointScope(stackState, state); |
| // Resume all parked threads upon leaving this scope. |
| - ParkThreadsScope parkThreadsScope; |
| + ParkThreadsScope parkThreadsScope(state); |
| // Try to park the other threads. If we're unable to, bail out of the GC. |
| - if (!parkThreadsScope.parkThreads(state)) |
| + if (!parkThreadsScope.parkThreads()) |
| return; |
| ScriptForbiddenIfMainThreadScope scriptForbidden; |
| @@ -383,34 +644,34 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType |
| // finalization that happens when the visitorScope is torn down). |
| ThreadState::NoAllocationScope noAllocationScope(state); |
| - preGC(); |
| + state->heap().preGC(); |
| StackFrameDepthScope stackDepthScope; |
| - size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSize(); |
| + size_t totalObjectSize = state->heap().heapStats().allocatedObjectSize() + state->heap().heapStats().markedObjectSize(); |
| if (gcType != BlinkGC::TakeSnapshot) |
| - Heap::resetHeapCounters(); |
| + state->heap().resetHeapCounters(); |
| // 1. Trace persistent roots. |
| - ThreadState::visitPersistentRoots(visitorScope.visitor()); |
| + state->heap().visitPersistentRoots(visitorScope.visitor()); |
| // 2. Trace objects reachable from the stack. We do this independent of the |
| // given stackState since other threads might have a different stack state. |
| - ThreadState::visitStackRoots(visitorScope.visitor()); |
| + state->heap().visitStackRoots(visitorScope.visitor()); |
| // 3. Transitive closure to trace objects including ephemerons. |
| - processMarkingStack(visitorScope.visitor()); |
| + state->heap().processMarkingStack(visitorScope.visitor()); |
| - postMarkingProcessing(visitorScope.visitor()); |
| - globalWeakProcessing(visitorScope.visitor()); |
| + state->heap().postMarkingProcessing(visitorScope.visitor()); |
| + state->heap().globalWeakProcessing(visitorScope.visitor()); |
| // Now we can delete all orphaned pages because there are no dangling |
| // pointers to the orphaned pages. (If we have such dangling pointers, |
| // we should have crashed during marking before getting here.) |
| - orphanedPagePool()->decommitOrphanedPages(); |
| + state->heap().orphanedPagePool()->decommitOrphanedPages(); |
| double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; |
| - s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0; |
| + state->heap().heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0); |
| #if PRINT_HEAP_STATS |
| dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMilliseconds); |
| @@ -419,24 +680,17 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType |
| DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); |
| markingTimeHistogram.count(markingTimeInMilliseconds); |
| DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistogram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50)); |
| - totalObjectSpaceHistogram.count(Heap::allocatedObjectSize() / 1024); |
| + totalObjectSpaceHistogram.count(Heap::totalAllocatedObjectSize() / 1024); |
| DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHistogram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 1024, 50)); |
| - totalAllocatedSpaceHistogram.count(Heap::allocatedSpace() / 1024); |
| + totalAllocatedSpaceHistogram.count(Heap::totalAllocatedSpace() / 1024); |
| DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); |
| gcReasonHistogram.count(reason); |
| Heap::reportMemoryUsageHistogram(); |
| WTF::Partitions::reportMemoryUsageHistogram(); |
| - postGC(gcType); |
| - Heap::decommitCallbackStacks(); |
| - |
| -#if ENABLE(ASSERT) |
| - // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneration. |
| - if (++s_gcGeneration == 0) { |
| - s_gcGeneration = 1; |
| - } |
| -#endif |
| + state->heap().postGC(gcType); |
| + state->heap().decommitCallbackStacks(); |
| } |
| void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| @@ -466,100 +720,30 @@ void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| // 2. Trace objects reachable from the thread's persistent roots |
| // including ephemerons. |
| - processMarkingStack(visitorScope.visitor()); |
| + state->heap().processMarkingStack(visitorScope.visitor()); |
| - postMarkingProcessing(visitorScope.visitor()); |
| - globalWeakProcessing(visitorScope.visitor()); |
| + state->heap().postMarkingProcessing(visitorScope.visitor()); |
| + state->heap().globalWeakProcessing(visitorScope.visitor()); |
| state->postGC(BlinkGC::GCWithSweep); |
| - Heap::decommitCallbackStacks(); |
| + state->heap().decommitCallbackStacks(); |
| } |
| state->preSweep(); |
| } |
| -void Heap::processMarkingStack(Visitor* visitor) |
| -{ |
| - // Ephemeron fixed point loop. |
| - do { |
| - { |
| - // Iteratively mark all objects that are reachable from the objects |
| - // currently pushed onto the marking stack. |
| - TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); |
| - while (popAndInvokeTraceCallback(visitor)) { } |
| - } |
| - |
| - { |
| - // Mark any strong pointers that have now become reachable in |
| - // ephemeron maps. |
| - TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); |
| - s_ephemeronStack->invokeEphemeronCallbacks(visitor); |
| - } |
| - |
| - // Rerun loop if ephemeron processing queued more objects for tracing. |
| - } while (!s_markingStack->isEmpty()); |
| -} |
| - |
| -void Heap::postMarkingProcessing(Visitor* visitor) |
| -{ |
| - TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); |
| - // Call post-marking callbacks including: |
| - // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup |
| - // (specifically to clear the queued bits for weak hash tables), and |
| - // 2. the markNoTracing callbacks on collection backings to mark them |
| - // if they are only reachable from their front objects. |
| - while (popAndInvokePostMarkingCallback(visitor)) { } |
| - |
| - // Post-marking callbacks should not trace any objects and |
| - // therefore the marking stack should be empty after the |
| - // post-marking callbacks. |
| - ASSERT(s_markingStack->isEmpty()); |
| -} |
| - |
| -void Heap::globalWeakProcessing(Visitor* visitor) |
| -{ |
| - TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); |
| - double startTime = WTF::currentTimeMS(); |
| - |
| - // Call weak callbacks on objects that may now be pointing to dead objects. |
| - while (popAndInvokeGlobalWeakCallback(visitor)) { } |
| - |
| - // It is not permitted to trace pointers of live objects in the weak |
| - // callback phase, so the marking stack should still be empty here. |
| - ASSERT(s_markingStack->isEmpty()); |
| - |
| - double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; |
| - DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogram, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakPrcessing", 1, 10 * 1000, 50)); |
| - globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); |
| -} |
| - |
| void Heap::collectAllGarbage() |
| { |
| // We need to run multiple GCs to collect a chain of persistent handles. |
| size_t previousLiveObjects = 0; |
| for (int i = 0; i < 5; ++i) { |
| collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, BlinkGC::ForcedGC); |
| - size_t liveObjects = Heap::markedObjectSize(); |
| + size_t liveObjects = ThreadState::current()->heap().heapStats().markedObjectSize(); |
| if (liveObjects == previousLiveObjects) |
| break; |
| previousLiveObjects = liveObjects; |
| } |
| } |
| -double Heap::estimatedMarkingTime() |
| -{ |
| - ASSERT(ThreadState::current()->isMainThread()); |
| - |
| - // Use 8 ms as initial estimated marking time. |
| - // 8 ms is long enough for low-end mobile devices to mark common |
| - // real-world object graphs. |
| - if (s_estimatedMarkingTimePerByte == 0) |
| - return 0.008; |
| - |
| - // Assuming that the collection rate of this GC will be mostly equal to |
| - // the collection rate of the last GC, estimate the marking time of this GC. |
| - return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap::markedObjectSize()); |
| -} |
| - |
| void Heap::reportMemoryUsageHistogram() |
| { |
| static size_t supportedMaxSizeInMB = 4 * 1024; |
| @@ -569,7 +753,7 @@ void Heap::reportMemoryUsageHistogram() |
| if (!isMainThread()) |
| return; |
| // +1 is for rounding up the sizeInMB. |
| - size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1; |
| + size_t sizeInMB = ThreadState::current()->heap().heapStats().allocatedSpace() / 1024 / 1024 + 1; |
| if (sizeInMB >= supportedMaxSizeInMB) |
| sizeInMB = supportedMaxSizeInMB - 1; |
| if (sizeInMB > observedMaxSizeInMB) { |
| @@ -592,106 +776,26 @@ void Heap::reportMemoryUsageForTracing() |
| if (!gcTracingEnabled) |
| return; |
| + Heap& heap = ThreadState::current()->heap(); |
| // These values are divided by 1024 to avoid overflow in practical cases (TRACE_COUNTER values are 32-bit ints). |
| // They are capped to INT_MAX just in case. |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedObjectSizeKB", std::min(Heap::allocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSizeKB", std::min(Heap::markedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSizeAtLastCompleteSweepKB", std::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedSpaceKB", std::min(Heap::allocatedSpace() / 1024, static_cast<size_t>(INT_MAX))); |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLastGCKB", std::min(Heap::objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(Heap::wrapperCount(), static_cast<size_t>(INT_MAX))); |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtLastGC", std::min(Heap::wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX))); |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrapperCount", std::min(Heap::collectedWrapperCount(), static_cast<size_t>(INT_MAX))); |
| - TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocSizeAtLastGCKB", std::min(Heap::partitionAllocSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedObjectSizeKB", std::min(heap.heapStats().allocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSizeKB", std::min(heap.heapStats().markedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSizeAtLastCompleteSweepKB", std::min(heap.heapStats().markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedSpaceKB", std::min(heap.heapStats().allocatedSpace() / 1024, static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLastGCKB", std::min(heap.heapStats().objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(heap.heapStats().wrapperCount(), static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtLastGC", std::min(heap.heapStats().wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrapperCount", std::min(heap.heapStats().collectedWrapperCount(), static_cast<size_t>(INT_MAX))); |
| + TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocSizeAtLastGCKB", std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); |
| TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSizeOfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_MAX))); |
| } |
| -size_t Heap::objectPayloadSizeForTesting() |
| -{ |
| - size_t objectPayloadSize = 0; |
| - for (ThreadState* state : ThreadState::attachedThreads()) { |
| - state->setGCState(ThreadState::GCRunning); |
| - state->makeConsistentForGC(); |
| - objectPayloadSize += state->objectPayloadSizeForTesting(); |
| - state->setGCState(ThreadState::EagerSweepScheduled); |
| - state->setGCState(ThreadState::Sweeping); |
| - state->setGCState(ThreadState::NoGCScheduled); |
| - } |
| - return objectPayloadSize; |
| -} |
| - |
| -BasePage* Heap::lookup(Address address) |
| -{ |
| - ASSERT(ThreadState::current()->isInGC()); |
| - if (!s_regionTree) |
| - return nullptr; |
| - if (PageMemoryRegion* region = s_regionTree->lookup(address)) { |
| - BasePage* page = region->pageFromAddress(address); |
| - return page && !page->orphaned() ? page : nullptr; |
| - } |
| - return nullptr; |
| -} |
| - |
| -static Mutex& regionTreeMutex() |
| -{ |
| - DEFINE_THREAD_SAFE_STATIC_LOCAL(Mutex, mutex, new Mutex); |
| - return mutex; |
| -} |
| - |
| -void Heap::removePageMemoryRegion(PageMemoryRegion* region) |
| -{ |
| - // Deletion of large objects (and thus their regions) can happen |
| - // concurrently on sweeper threads. Removal can also happen during thread |
| - // shutdown, but that case is safe. Regardless, we make all removals |
| - // mutually exclusive. |
| - MutexLocker locker(regionTreeMutex()); |
| - RegionTree::remove(region, &s_regionTree); |
| -} |
| - |
| -void Heap::addPageMemoryRegion(PageMemoryRegion* region) |
| -{ |
| - MutexLocker locker(regionTreeMutex()); |
| - RegionTree::add(new RegionTree(region), &s_regionTree); |
| -} |
| - |
| -void Heap::resetHeapCounters() |
| -{ |
| - ASSERT(ThreadState::current()->isInGC()); |
| - |
| - Heap::reportMemoryUsageForTracing(); |
| - |
| - s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize; |
| - s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); |
| - s_allocatedObjectSize = 0; |
| - s_markedObjectSize = 0; |
| - s_wrapperCountAtLastGC = s_wrapperCount; |
| - s_collectedWrapperCount = 0; |
| - for (ThreadState* state : ThreadState::attachedThreads()) |
| - state->resetHeapCounters(); |
| -} |
| - |
| -CallbackStack* Heap::s_markingStack; |
| -CallbackStack* Heap::s_postMarkingCallbackStack; |
| -CallbackStack* Heap::s_globalWeakCallbackStack; |
| -CallbackStack* Heap::s_ephemeronStack; |
| -HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| bool Heap::s_shutdownCalled = false; |
| -FreePagePool* Heap::s_freePagePool; |
| -OrphanedPagePool* Heap::s_orphanedPagePool; |
| -RegionTree* Heap::s_regionTree = nullptr; |
| -size_t Heap::s_allocatedSpace = 0; |
| -size_t Heap::s_allocatedObjectSize = 0; |
| -size_t Heap::s_objectSizeAtLastGC = 0; |
| -size_t Heap::s_markedObjectSize = 0; |
| -size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; |
| -size_t Heap::s_wrapperCount = 0; |
| -size_t Heap::s_wrapperCountAtLastGC = 0; |
| -size_t Heap::s_collectedWrapperCount = 0; |
| -size_t Heap::s_partitionAllocSizeAtLastGC = 0; |
| -double Heap::s_estimatedMarkingTimePerByte = 0.0; |
| +bool Heap::s_shutdownComplete = false; |
| bool Heap::s_isLowEndDevice = false; |
| -#if ENABLE(ASSERT) |
| -uint16_t Heap::s_gcGeneration = 0; |
| -#endif |
| +size_t Heap::s_totalAllocatedSpace = 0; |
| +size_t Heap::s_totalAllocatedObjectSize = 0; |
| +size_t Heap::s_totalMarkedObjectSize = 0; |
| } // namespace blink |