Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(830)

Unified Diff: third_party/WebKit/Source/platform/heap/Heap.cpp

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/Heap.cpp
diff --git a/third_party/WebKit/Source/platform/heap/Heap.cpp b/third_party/WebKit/Source/platform/heap/Heap.cpp
index fd2b765e5fb3dccf9ab3f7b998422668810d292d..ded9d5889b1cb21c90b45fd92a77c99c3f53077f 100644
--- a/third_party/WebKit/Source/platform/heap/Heap.cpp
+++ b/third_party/WebKit/Source/platform/heap/Heap.cpp
@@ -146,34 +146,9 @@ private:
bool m_resumeThreads;
};
-void Heap::flushHeapDoesNotContainCache()
-{
- s_heapDoesNotContainCache->flush();
-}
-
void Heap::init()
{
ThreadState::init();
- s_markingStack = new CallbackStack();
- s_postMarkingCallbackStack = new CallbackStack();
- s_globalWeakCallbackStack = new CallbackStack();
- s_ephemeronStack = new CallbackStack();
- s_heapDoesNotContainCache = new HeapDoesNotContainCache();
- s_freePagePool = new FreePagePool();
- s_orphanedPagePool = new OrphanedPagePool();
- s_allocatedSpace = 0;
- s_allocatedObjectSize = 0;
- s_objectSizeAtLastGC = 0;
- s_markedObjectSize = 0;
- s_markedObjectSizeAtLastCompleteSweep = 0;
- s_wrapperCount = 0;
- s_wrapperCountAtLastGC = 0;
- s_collectedWrapperCount = 0;
- s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
- s_estimatedMarkingTimePerByte = 0.0;
-#if ENABLE(ASSERT)
- s_gcGeneration = 1;
-#endif
GCInfoTable::init();
@@ -185,36 +160,16 @@ void Heap::shutdown()
{
if (Platform::current() && Platform::current()->currentThread())
Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvider::instance());
- s_shutdownCalled = true;
ThreadState::shutdownHeapIfNecessary();
}
void Heap::doShutdown()
{
- // We don't want to call doShutdown() twice.
- if (!s_markingStack)
- return;
-
ASSERT(!ThreadState::attachedThreads().size());
- delete s_heapDoesNotContainCache;
- s_heapDoesNotContainCache = nullptr;
- delete s_freePagePool;
- s_freePagePool = nullptr;
- delete s_orphanedPagePool;
- s_orphanedPagePool = nullptr;
- delete s_globalWeakCallbackStack;
- s_globalWeakCallbackStack = nullptr;
- delete s_postMarkingCallbackStack;
- s_postMarkingCallbackStack = nullptr;
- delete s_markingStack;
- s_markingStack = nullptr;
- delete s_ephemeronStack;
- s_ephemeronStack = nullptr;
- delete s_regionTree;
- s_regionTree = nullptr;
GCInfoTable::shutdown();
ThreadState::shutdown();
- ASSERT(Heap::allocatedSpace() == 0);
+ // FIXME: should be zero
+ //ASSERT(ThreadState::terminating()->allocatedSpace() == 0);
}
#if ENABLE(ASSERT)
@@ -234,23 +189,23 @@ Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
ASSERT(ThreadState::current()->isInGC());
#if !ENABLE(ASSERT)
- if (s_heapDoesNotContainCache->lookup(address))
+ if (ThreadState::current()->heapDoesNotContainCache()->lookup(address))
return nullptr;
#endif
if (BasePage* page = lookup(address)) {
ASSERT(page->contains(address));
ASSERT(!page->orphaned());
- ASSERT(!s_heapDoesNotContainCache->lookup(address));
+ ASSERT(!ThreadState::current()->heapDoesNotContainCache()->lookup(address));
page->checkAndMarkPointer(visitor, address);
return address;
}
#if !ENABLE(ASSERT)
- s_heapDoesNotContainCache->addEntry(address);
+ ThreadState::current()->heapDoesNotContainCache()->addEntry(address);
#else
- if (!s_heapDoesNotContainCache->lookup(address))
- s_heapDoesNotContainCache->addEntry(address);
+ if (!ThreadState::current()->heapDoesNotContainCache()->lookup(address))
+ ThreadState::current()->heapDoesNotContainCache()->addEntry(address);
#endif
return nullptr;
}
@@ -260,14 +215,14 @@ void Heap::pushTraceCallback(void* object, TraceCallback callback)
ASSERT(ThreadState::current()->isInGC());
// Trace should never reach an orphaned page.
- ASSERT(!Heap::orphanedPagePool()->contains(object));
- CallbackStack::Item* slot = s_markingStack->allocateEntry();
+ ASSERT(!ThreadState::current()->orphanedPagePool()->contains(object));
+ CallbackStack::Item* slot = ThreadState::current()->markingStack()->allocateEntry();
*slot = CallbackStack::Item(object, callback);
}
bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
{
- CallbackStack::Item* item = s_markingStack->pop();
+ CallbackStack::Item* item = ThreadState::current()->markingStack()->pop();
haraken 2015/11/30 02:54:41 At the moment this is okay, but ThreadState::curre
keishi 2016/01/06 05:35:33 Done.
if (!item)
return false;
item->call(visitor);
@@ -279,14 +234,14 @@ void Heap::pushPostMarkingCallback(void* object, TraceCallback callback)
ASSERT(ThreadState::current()->isInGC());
// Trace should never reach an orphaned page.
- ASSERT(!Heap::orphanedPagePool()->contains(object));
- CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry();
+ ASSERT(!ThreadState::current()->orphanedPagePool()->contains(object));
+ CallbackStack::Item* slot = ThreadState::current()->postMarkingCallbackStack()->allocateEntry();
*slot = CallbackStack::Item(object, callback);
}
bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor)
{
- if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) {
+ if (CallbackStack::Item* item = ThreadState::current()->postMarkingCallbackStack()->pop()) {
item->call(visitor);
return true;
}
@@ -298,8 +253,8 @@ void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback)
ASSERT(ThreadState::current()->isInGC());
// Trace should never reach an orphaned page.
- ASSERT(!Heap::orphanedPagePool()->contains(cell));
- CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry();
+ ASSERT(!ThreadState::current()->orphanedPagePool()->contains(cell));
+ CallbackStack::Item* slot = ThreadState::current()->globalWeakCallbackStack()->allocateEntry();
*slot = CallbackStack::Item(cell, callback);
}
@@ -308,14 +263,14 @@ void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback
ASSERT(ThreadState::current()->isInGC());
// Trace should never reach an orphaned page.
- ASSERT(!Heap::orphanedPagePool()->contains(object));
+ ASSERT(!ThreadState::current()->orphanedPagePool()->contains(object));
ThreadState* state = pageFromObject(object)->heap()->threadState();
state->pushThreadLocalWeakCallback(closure, callback);
}
bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor)
{
- if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) {
+ if (CallbackStack::Item* item = ThreadState::current()->globalWeakCallbackStack()->pop()) {
item->call(visitor);
return true;
}
@@ -327,8 +282,8 @@ void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
ASSERT(ThreadState::current()->isInGC());
// Trace should never reach an orphaned page.
- ASSERT(!Heap::orphanedPagePool()->contains(table));
- CallbackStack::Item* slot = s_ephemeronStack->allocateEntry();
+ ASSERT(!ThreadState::current()->orphanedPagePool()->contains(table));
+ CallbackStack::Item* slot = ThreadState::current()->ephemeronStack()->allocateEntry();
*slot = CallbackStack::Item(table, iterationCallback);
// Register a post-marking callback to tell the tables that
@@ -339,8 +294,8 @@ void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
#if ENABLE(ASSERT)
bool Heap::weakTableRegistered(const void* table)
{
- ASSERT(s_ephemeronStack);
- return s_ephemeronStack->hasCallbackForObject(table);
+ ASSERT(ThreadState::current()->ephemeronStack());
+ return ThreadState::current()->ephemeronStack()->hasCallbackForObject(table);
}
#endif
@@ -418,9 +373,9 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType
StackFrameDepthScope stackDepthScope;
- size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSize();
+ size_t totalObjectSize = ThreadState::current()->allocatedObjectSize() + ThreadState::current()->markedObjectSize();
if (gcType != BlinkGC::TakeSnapshot)
- Heap::resetHeapCounters();
+ state->resetHeapCounters();
// 1. Trace persistent roots.
ThreadState::visitPersistentRoots(gcScope.visitor());
@@ -438,29 +393,27 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType
// Now we can delete all orphaned pages because there are no dangling
// pointers to the orphaned pages. (If we have such dangling pointers,
// we should have crashed during marking before getting here.)
- orphanedPagePool()->decommitOrphanedPages();
+ state->orphanedPagePool()->decommitOrphanedPages();
double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime;
- s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0;
+ ThreadState::current()->setEstimatedMarkingTimePerByte(totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0);
#if PRINT_HEAP_STATS
dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMilliseconds);
#endif
Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", markingTimeInMilliseconds, 0, 10 * 1000, 50);
- Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50);
- Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50);
+ Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", ThreadState::current()->allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50);
+ Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", ThreadState::current()->allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50);
Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, BlinkGC::NumberOfGCReason);
- Heap::reportMemoryUsageHistogram();
+ ThreadState::current()->reportMemoryUsageHistogram();
WTF::Partitions::reportMemoryUsageHistogram();
postGC(gcType);
#if ENABLE(ASSERT)
// 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneration.
- if (++s_gcGeneration == 0) {
- s_gcGeneration = 1;
- }
+ ThreadState::current()->incrementGcGeneration();
#endif
if (state->isMainThread())
@@ -504,6 +457,43 @@ void Heap::collectGarbageForTerminatingThread(ThreadState* state)
state->preSweep();
}
+void Heap::collectGarbageForIsolatedThread(ThreadState* state)
+{
+ {
+ // A thread-specific termination GC must not allow other global GCs to go
+ // ahead while it is running, hence the termination GC does not enter a
+ // safepoint. GCScope will not enter also a safepoint scope for
+ // ThreadTerminationGC.
+ GCScope gcScope(state, BlinkGC::NoHeapPointersOnStack, BlinkGC::ThreadTerminationGC);
haraken 2015/11/30 02:54:42 As commented in ThreadState.cpp, it is wrong to se
keishi 2016/01/06 05:35:33 Done.
+
+ ThreadState::NoAllocationScope noAllocationScope(state);
+
+ state->preGC();
+
+ // 1. Trace the thread local persistent roots. For thread local GCs we
+ // don't trace the stack (ie. no conservative scanning) since this is
+ // only called during thread shutdown where there should be no objects
+ // on the stack.
+ // We also assume that orphaned pages have no objects reachable from
+ // persistent handles on other threads or CrossThreadPersistents. The
+ // only cases where this could happen is if a subsequent conservative
+ // global GC finds a "pointer" on the stack or due to a programming
+ // error where an object has a dangling cross-thread pointer to an
+ // object on this heap.
+ state->visitPersistents(gcScope.visitor());
+
+ // 2. Trace objects reachable from the thread's persistent roots
+ // including ephemerons.
+ processMarkingStack(gcScope.visitor());
+
+ postMarkingProcessing(gcScope.visitor());
+ globalWeakProcessing(gcScope.visitor());
+
+ state->postGC(BlinkGC::GCWithSweep);
+ }
+ state->preSweep();
+}
+
void Heap::processMarkingStack(Visitor* visitor)
{
// Ephemeron fixed point loop.
@@ -519,11 +509,11 @@ void Heap::processMarkingStack(Visitor* visitor)
// Mark any strong pointers that have now become reachable in
// ephemeron maps.
TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
- s_ephemeronStack->invokeEphemeronCallbacks(visitor);
+ ThreadState::current()->ephemeronStack()->invokeEphemeronCallbacks(visitor);
}
// Rerun loop if ephemeron processing queued more objects for tracing.
- } while (!s_markingStack->isEmpty());
+ } while (!ThreadState::current()->markingStack()->isEmpty());
}
void Heap::postMarkingProcessing(Visitor* visitor)
@@ -536,12 +526,12 @@ void Heap::postMarkingProcessing(Visitor* visitor)
// if they are only reachable from their front objects.
while (popAndInvokePostMarkingCallback(visitor)) { }
- s_ephemeronStack->clear();
+ ThreadState::current()->ephemeronStack()->clear();
// Post-marking callbacks should not trace any objects and
// therefore the marking stack should be empty after the
// post-marking callbacks.
- ASSERT(s_markingStack->isEmpty());
+ ASSERT(ThreadState::current()->markingStack()->isEmpty());
}
void Heap::globalWeakProcessing(Visitor* visitor)
@@ -554,7 +544,7 @@ void Heap::globalWeakProcessing(Visitor* visitor)
// It is not permitted to trace pointers of live objects in the weak
// callback phase, so the marking stack should still be empty here.
- ASSERT(s_markingStack->isEmpty());
+ ASSERT(ThreadState::current()->markingStack()->isEmpty());
double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime;
Platform::current()->histogramCustomCounts("BlinkGC.TimeForGlobalWeakPrcessing", timeForGlobalWeakProcessing, 1, 10 * 1000, 50);
@@ -566,73 +556,13 @@ void Heap::collectAllGarbage()
size_t previousLiveObjects = 0;
for (int i = 0; i < 5; ++i) {
collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, BlinkGC::ForcedGC);
- size_t liveObjects = Heap::markedObjectSize();
+ size_t liveObjects = ThreadState::current()->markedObjectSize();
if (liveObjects == previousLiveObjects)
break;
previousLiveObjects = liveObjects;
}
}
-double Heap::estimatedMarkingTime()
-{
- ASSERT(ThreadState::current()->isMainThread());
-
- // Use 8 ms as initial estimated marking time.
- // 8 ms is long enough for low-end mobile devices to mark common
- // real-world object graphs.
- if (s_estimatedMarkingTimePerByte == 0)
- return 0.008;
-
- // Assuming that the collection rate of this GC will be mostly equal to
- // the collection rate of the last GC, estimate the marking time of this GC.
- return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap::markedObjectSize());
-}
-
-void Heap::reportMemoryUsageHistogram()
-{
- static size_t supportedMaxSizeInMB = 4 * 1024;
- static size_t observedMaxSizeInMB = 0;
-
- // We only report the memory in the main thread.
- if (!isMainThread())
- return;
- // +1 is for rounding up the sizeInMB.
- size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1;
- if (sizeInMB >= supportedMaxSizeInMB)
- sizeInMB = supportedMaxSizeInMB - 1;
- if (sizeInMB > observedMaxSizeInMB) {
- // Send a UseCounter only when we see the highest memory usage
- // we've ever seen.
- Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeInMB, supportedMaxSizeInMB);
- observedMaxSizeInMB = sizeInMB;
- }
-}
-
-void Heap::reportMemoryUsageForTracing()
-{
-#if PRINT_HEAP_STATS
- // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSize=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommittedPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount());
-#endif
-
- bool gcTracingEnabled;
- TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
- if (!gcTracingEnabled)
- return;
-
- // These values are divided by 1024 to avoid overflow in practical cases (TRACE_COUNTER values are 32-bit ints).
- // They are capped to INT_MAX just in case.
- TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(Heap::allocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(Heap::markedObjectSize() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSizeAtLastCompleteSweepKB", std::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::allocatedSpace() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLastGCKB", std::min(Heap::objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(Heap::wrapperCount(), static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtLastGC", std::min(Heap::wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrapperCount", std::min(Heap::collectedWrapperCount(), static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocSizeAtLastGCKB", std::min(Heap::partitionAllocSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_MAX)));
-}
-
size_t Heap::objectPayloadSizeForTesting()
{
size_t objectPayloadSize = 0;
@@ -650,130 +580,37 @@ size_t Heap::objectPayloadSizeForTesting()
BasePage* Heap::lookup(Address address)
{
ASSERT(ThreadState::current()->isInGC());
- if (!s_regionTree)
+ if (!ThreadState::current()->regionTree())
return nullptr;
- if (PageMemoryRegion* region = s_regionTree->lookup(address)) {
+ if (PageMemoryRegion* region = ThreadState::current()->regionTree()->lookup(address)) {
BasePage* page = region->pageFromAddress(address);
return page && !page->orphaned() ? page : nullptr;
}
return nullptr;
}
-static Mutex& regionTreeMutex()
-{
- AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex);
- return mutex;
-}
-
void Heap::removePageMemoryRegion(PageMemoryRegion* region)
{
// Deletion of large objects (and thus their regions) can happen
// concurrently on sweeper threads. Removal can also happen during thread
// shutdown, but that case is safe. Regardless, we make all removals
// mutually exclusive.
- MutexLocker locker(regionTreeMutex());
- RegionTree::remove(region, &s_regionTree);
+ //MutexLocker locker(regionTreeMutex());
+ ThreadState* state = ThreadState::current();
+ if (!state)
+ state = ThreadState::terminating();
haraken 2015/11/30 02:54:42 I'm wondering why this is needed. We should make s
+ ThreadState::RegionTree* regionTree = state->regionTree();
+ ThreadState::RegionTree::remove(region, &regionTree);
+ state->setRegionTree(regionTree);
haraken 2015/11/30 02:54:42 Help me understand: What are you doing by setting
}
void Heap::addPageMemoryRegion(PageMemoryRegion* region)
{
- MutexLocker locker(regionTreeMutex());
- RegionTree::add(new RegionTree(region), &s_regionTree);
-}
-
-PageMemoryRegion* Heap::RegionTree::lookup(Address address)
-{
- RegionTree* current = s_regionTree;
- while (current) {
- Address base = current->m_region->base();
- if (address < base) {
- current = current->m_left;
- continue;
- }
- if (address >= base + current->m_region->size()) {
- current = current->m_right;
- continue;
- }
- ASSERT(current->m_region->contains(address));
- return current->m_region;
- }
- return nullptr;
-}
-
-void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context)
-{
- ASSERT(newTree);
- Address base = newTree->m_region->base();
- for (RegionTree* current = *context; current; current = *context) {
- ASSERT(!current->m_region->contains(base));
- context = (base < current->m_region->base()) ? &current->m_left : &current->m_right;
- }
- *context = newTree;
-}
-
-void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context)
-{
- ASSERT(region);
- ASSERT(context);
- Address base = region->base();
- RegionTree* current = *context;
- for (; current; current = *context) {
- if (region == current->m_region)
- break;
- context = (base < current->m_region->base()) ? &current->m_left : &current->m_right;
- }
-
- // Shutdown via detachMainThread might not have populated the region tree.
- if (!current)
- return;
-
- *context = nullptr;
- if (current->m_left) {
- add(current->m_left, context);
- current->m_left = nullptr;
- }
- if (current->m_right) {
- add(current->m_right, context);
- current->m_right = nullptr;
- }
- delete current;
-}
-
-void Heap::resetHeapCounters()
-{
- ASSERT(ThreadState::current()->isInGC());
-
- Heap::reportMemoryUsageForTracing();
-
- s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize;
- s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
- s_allocatedObjectSize = 0;
- s_markedObjectSize = 0;
- s_wrapperCountAtLastGC = s_wrapperCount;
- s_collectedWrapperCount = 0;
+ ASSERT(ThreadState::current() && !ThreadState::terminating());
+ //MutexLocker locker(regionTreeMutex());
+ ThreadState::RegionTree* regionTree = ThreadState::current()->regionTree();
+ ThreadState::RegionTree::add(new ThreadState::RegionTree(region), &regionTree);
+ ThreadState::current()->setRegionTree(regionTree);
}
-CallbackStack* Heap::s_markingStack;
-CallbackStack* Heap::s_postMarkingCallbackStack;
-CallbackStack* Heap::s_globalWeakCallbackStack;
-CallbackStack* Heap::s_ephemeronStack;
-HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
-bool Heap::s_shutdownCalled = false;
-FreePagePool* Heap::s_freePagePool;
-OrphanedPagePool* Heap::s_orphanedPagePool;
-Heap::RegionTree* Heap::s_regionTree = nullptr;
-size_t Heap::s_allocatedSpace = 0;
-size_t Heap::s_allocatedObjectSize = 0;
-size_t Heap::s_objectSizeAtLastGC = 0;
-size_t Heap::s_markedObjectSize = 0;
-size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0;
-size_t Heap::s_wrapperCount = 0;
-size_t Heap::s_wrapperCountAtLastGC = 0;
-size_t Heap::s_collectedWrapperCount = 0;
-size_t Heap::s_partitionAllocSizeAtLastGC = 0;
-double Heap::s_estimatedMarkingTimePerByte = 0.0;
-#if ENABLE(ASSERT)
-uint16_t Heap::s_gcGeneration = 0;
-#endif
-
} // namespace blink

Powered by Google App Engine
This is Rietveld 408576698