Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Unified Diff: third_party/WebKit/Source/platform/heap/Heap.cpp

Issue 1477023003: Refactor the Heap into ThreadHeap to prepare for per thread heaps Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: third_party/WebKit/Source/platform/heap/Heap.cpp
diff --git a/third_party/WebKit/Source/platform/heap/Heap.cpp b/third_party/WebKit/Source/platform/heap/Heap.cpp
index 1b27d1e49dae6ef3a2f86fd70fb47e012d0b1799..00a203d118799ded7bb46112a9714c71710ffb99 100644
--- a/third_party/WebKit/Source/platform/heap/Heap.cpp
+++ b/third_party/WebKit/Source/platform/heap/Heap.cpp
@@ -144,34 +144,12 @@ private:
bool m_resumeThreads;
};
-void Heap::flushHeapDoesNotContainCache()
-{
- s_heapDoesNotContainCache->flush();
-}
-
void Heap::init()
{
ThreadState::init();
- s_markingStack = new CallbackStack();
- s_postMarkingCallbackStack = new CallbackStack();
- s_globalWeakCallbackStack = new CallbackStack();
- s_ephemeronStack = new CallbackStack();
- s_heapDoesNotContainCache = new HeapDoesNotContainCache();
+
s_freePagePool = new FreePagePool();
s_orphanedPagePool = new OrphanedPagePool();
- s_allocatedSpace = 0;
- s_allocatedObjectSize = 0;
- s_objectSizeAtLastGC = 0;
- s_markedObjectSize = 0;
- s_markedObjectSizeAtLastCompleteSweep = 0;
- s_wrapperCount = 0;
- s_wrapperCountAtLastGC = 0;
- s_collectedWrapperCount = 0;
- s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
- s_estimatedMarkingTimePerByte = 0.0;
-#if ENABLE(ASSERT)
- s_gcGeneration = 1;
-#endif
GCInfoTable::init();
@@ -190,29 +168,15 @@ void Heap::shutdown()
void Heap::doShutdown()
{
// We don't want to call doShutdown() twice.
- if (!s_markingStack)
+ if (!s_freePagePool)
return;
-
ASSERT(!ThreadState::attachedThreads().size());
- delete s_heapDoesNotContainCache;
- s_heapDoesNotContainCache = nullptr;
+ delete s_orphanedPagePool;
delete s_freePagePool;
s_freePagePool = nullptr;
- delete s_orphanedPagePool;
s_orphanedPagePool = nullptr;
- delete s_globalWeakCallbackStack;
- s_globalWeakCallbackStack = nullptr;
- delete s_postMarkingCallbackStack;
- s_postMarkingCallbackStack = nullptr;
- delete s_markingStack;
- s_markingStack = nullptr;
- delete s_ephemeronStack;
- s_ephemeronStack = nullptr;
- delete s_regionTree;
- s_regionTree = nullptr;
GCInfoTable::shutdown();
ThreadState::shutdown();
- ASSERT(Heap::allocatedSpace() == 0);
}
#if ENABLE(ASSERT)
@@ -229,43 +193,44 @@ BasePage* Heap::findPageFromAddress(Address address)
Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
{
- ASSERT(ThreadState::current()->isInGC());
+ ThreadState* threadState = visitor->threadState();
+ ASSERT(threadState->isInGC());
#if !ENABLE(ASSERT)
- if (s_heapDoesNotContainCache->lookup(address))
+ if (threadState->heapDoesNotContainCache()->lookup(address))
return nullptr;
#endif
- if (BasePage* page = lookup(address)) {
+ if (BasePage* page = lookup(address, threadState)) {
ASSERT(page->contains(address));
ASSERT(!page->orphaned());
- ASSERT(!s_heapDoesNotContainCache->lookup(address));
+ ASSERT(!threadState->heapDoesNotContainCache()->lookup(address));
page->checkAndMarkPointer(visitor, address);
return address;
}
#if !ENABLE(ASSERT)
- s_heapDoesNotContainCache->addEntry(address);
+ threadState->heapDoesNotContainCache()->addEntry(address);
#else
- if (!s_heapDoesNotContainCache->lookup(address))
- s_heapDoesNotContainCache->addEntry(address);
+ if (!threadState->heapDoesNotContainCache()->lookup(address))
+ threadState->heapDoesNotContainCache()->addEntry(address);
#endif
return nullptr;
}
-void Heap::pushTraceCallback(void* object, TraceCallback callback)
+void Heap::pushTraceCallback(void* object, TraceCallback callback, ThreadState* threadState)
{
- ASSERT(ThreadState::current()->isInGC());
+ ASSERT(threadState->isInGC());
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(object));
- CallbackStack::Item* slot = s_markingStack->allocateEntry();
+ CallbackStack::Item* slot = threadState->markingStack()->allocateEntry();
*slot = CallbackStack::Item(object, callback);
}
bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
{
- CallbackStack::Item* item = s_markingStack->pop();
+ CallbackStack::Item* item = visitor->threadState()->markingStack()->pop();
if (!item)
return false;
item->call(visitor);
@@ -278,26 +243,26 @@ void Heap::pushPostMarkingCallback(void* object, TraceCallback callback)
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(object));
- CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry();
+ CallbackStack::Item* slot = ThreadState::current()->postMarkingCallbackStack()->allocateEntry();
haraken 2016/01/07 08:06:22 Can we avoid calling ThreadState::current()?
*slot = CallbackStack::Item(object, callback);
}
bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor)
{
- if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) {
+ if (CallbackStack::Item* item = visitor->threadState()->postMarkingCallbackStack()->pop()) {
item->call(visitor);
return true;
}
return false;
}
-void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback)
+void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback, ThreadState* threadState)
{
- ASSERT(ThreadState::current()->isInGC());
+ ASSERT(threadState->isInGC());
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(cell));
- CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry();
+ CallbackStack::Item* slot = threadState->globalWeakCallbackStack()->allocateEntry();
*slot = CallbackStack::Item(cell, callback);
}
@@ -313,20 +278,20 @@ void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback
bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor)
{
- if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) {
+ if (CallbackStack::Item* item = visitor->threadState()->globalWeakCallbackStack()->pop()) {
item->call(visitor);
return true;
}
return false;
}
-void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
+void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback, ThreadState* threadState)
{
- ASSERT(ThreadState::current()->isInGC());
+ ASSERT(threadState->isInGC());
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(table));
- CallbackStack::Item* slot = s_ephemeronStack->allocateEntry();
+ CallbackStack::Item* slot = threadState->ephemeronStack()->allocateEntry();
*slot = CallbackStack::Item(table, iterationCallback);
// Register a post-marking callback to tell the tables that
@@ -335,10 +300,10 @@ void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
}
#if ENABLE(ASSERT)
-bool Heap::weakTableRegistered(const void* table)
+bool Heap::weakTableRegistered(const void* table, ThreadState* threadState)
{
- ASSERT(s_ephemeronStack);
- return s_ephemeronStack->hasCallbackForObject(table);
+ ASSERT(threadState->ephemeronStack());
+ return threadState->ephemeronStack()->hasCallbackForObject(table);
}
#endif
@@ -414,9 +379,9 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType
StackFrameDepthScope stackDepthScope;
- size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSize();
+ size_t totalObjectSize = state->allocatedObjectSize() + state->markedObjectSize();
if (gcType != BlinkGC::TakeSnapshot)
- Heap::resetHeapCounters();
+ state->resetHeapCounters();
// 1. Trace persistent roots.
ThreadState::visitPersistentRoots(gcScope.visitor());
@@ -437,26 +402,23 @@ void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType
orphanedPagePool()->decommitOrphanedPages();
double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime;
- s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0;
+ state->setEstimatedMarkingTimePerByte(totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0);
#if PRINT_HEAP_STATS
dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMilliseconds);
#endif
Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", markingTimeInMilliseconds, 0, 10 * 1000, 50);
- Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50);
- Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50);
+ Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", ThreadState::totalAllocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50);
+ Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", ThreadState::totalAllocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50);
Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, BlinkGC::NumberOfGCReason);
- Heap::reportMemoryUsageHistogram();
+ state->reportMemoryUsageHistogram();
WTF::Partitions::reportMemoryUsageHistogram();
postGC(gcType);
#if ENABLE(ASSERT)
- // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneration.
- if (++s_gcGeneration == 0) {
- s_gcGeneration = 1;
- }
+ state->incrementGcGeneration();
#endif
}
@@ -497,6 +459,45 @@ void Heap::collectGarbageForTerminatingThread(ThreadState* state)
state->preSweep();
}
+void Heap::collectGarbageForIsolatedThread(ThreadState* state)
+{
+ {
+ // A thread-specific termination GC must not allow other global GCs to go
+ // ahead while it is running, hence the termination GC does not enter a
+ // safepoint. GCScope will not enter also a safepoint scope for
+ // ThreadTerminationGC.
+ GCScope gcScope(state, BlinkGC::HeapPointersOnStack, BlinkGC::ThreadTerminationGC);
haraken 2016/01/07 08:06:22 It is not a good idea to unconditionally assume Bl
haraken 2016/01/07 08:06:22 It is not a good idea to "reuse" the concept of Th
+
+ ThreadState::NoAllocationScope noAllocationScope(state);
+
+ state->preGC();
+
+ // 1. Trace the thread local persistent roots. For thread local GCs we
+ // don't trace the stack (ie. no conservative scanning) since this is
+ // only called during thread shutdown where there should be no objects
+ // on the stack.
+ // We also assume that orphaned pages have no objects reachable from
+ // persistent handles on other threads or CrossThreadPersistents. The
+ // only cases where this could happen is if a subsequent conservative
+ // global GC finds a "pointer" on the stack or due to a programming
+ // error where an object has a dangling cross-thread pointer to an
+ // object on this heap.
+ state->visitPersistents(gcScope.visitor());
+
+ state->visitStack(gcScope.visitor());
+
+ // 2. Trace objects reachable from the thread's persistent roots
+ // including ephemerons.
+ processMarkingStack(gcScope.visitor());
+
+ postMarkingProcessing(gcScope.visitor());
+ globalWeakProcessing(gcScope.visitor());
+
+ state->postGC(BlinkGC::GCWithSweep);
haraken 2016/01/07 08:06:22 We don't want to force sweeping. I don't think we
+ }
+ state->preSweep();
+}
+
void Heap::processMarkingStack(Visitor* visitor)
{
// Ephemeron fixed point loop.
@@ -512,11 +513,11 @@ void Heap::processMarkingStack(Visitor* visitor)
// Mark any strong pointers that have now become reachable in
// ephemeron maps.
TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack");
- s_ephemeronStack->invokeEphemeronCallbacks(visitor);
+ visitor->threadState()->ephemeronStack()->invokeEphemeronCallbacks(visitor);
}
// Rerun loop if ephemeron processing queued more objects for tracing.
- } while (!s_markingStack->isEmpty());
+ } while (!visitor->threadState()->markingStack()->isEmpty());
}
void Heap::postMarkingProcessing(Visitor* visitor)
@@ -529,12 +530,12 @@ void Heap::postMarkingProcessing(Visitor* visitor)
// if they are only reachable from their front objects.
while (popAndInvokePostMarkingCallback(visitor)) { }
- s_ephemeronStack->clear();
+ visitor->threadState()->ephemeronStack()->clear();
// Post-marking callbacks should not trace any objects and
// therefore the marking stack should be empty after the
// post-marking callbacks.
- ASSERT(s_markingStack->isEmpty());
+ ASSERT(visitor->threadState()->markingStack()->isEmpty());
}
void Heap::globalWeakProcessing(Visitor* visitor)
@@ -547,7 +548,7 @@ void Heap::globalWeakProcessing(Visitor* visitor)
// It is not permitted to trace pointers of live objects in the weak
// callback phase, so the marking stack should still be empty here.
- ASSERT(s_markingStack->isEmpty());
+ ASSERT(visitor->threadState()->markingStack()->isEmpty());
double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime;
Platform::current()->histogramCustomCounts("BlinkGC.TimeForGlobalWeakPrcessing", timeForGlobalWeakProcessing, 1, 10 * 1000, 50);
@@ -559,73 +560,13 @@ void Heap::collectAllGarbage()
size_t previousLiveObjects = 0;
for (int i = 0; i < 5; ++i) {
collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, BlinkGC::ForcedGC);
- size_t liveObjects = Heap::markedObjectSize();
+ size_t liveObjects = ThreadState::current()->markedObjectSize();
if (liveObjects == previousLiveObjects)
break;
previousLiveObjects = liveObjects;
}
}
-double Heap::estimatedMarkingTime()
-{
- ASSERT(ThreadState::current()->isMainThread());
-
- // Use 8 ms as initial estimated marking time.
- // 8 ms is long enough for low-end mobile devices to mark common
- // real-world object graphs.
- if (s_estimatedMarkingTimePerByte == 0)
- return 0.008;
-
- // Assuming that the collection rate of this GC will be mostly equal to
- // the collection rate of the last GC, estimate the marking time of this GC.
- return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap::markedObjectSize());
-}
-
-void Heap::reportMemoryUsageHistogram()
-{
- static size_t supportedMaxSizeInMB = 4 * 1024;
- static size_t observedMaxSizeInMB = 0;
-
- // We only report the memory in the main thread.
- if (!isMainThread())
- return;
- // +1 is for rounding up the sizeInMB.
- size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1;
- if (sizeInMB >= supportedMaxSizeInMB)
- sizeInMB = supportedMaxSizeInMB - 1;
- if (sizeInMB > observedMaxSizeInMB) {
- // Send a UseCounter only when we see the highest memory usage
- // we've ever seen.
- Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeInMB, supportedMaxSizeInMB);
- observedMaxSizeInMB = sizeInMB;
- }
-}
-
-void Heap::reportMemoryUsageForTracing()
-{
-#if PRINT_HEAP_STATS
- // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSize=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommittedPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount());
-#endif
-
- bool gcTracingEnabled;
- TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
- if (!gcTracingEnabled)
- return;
-
- // These values are divided by 1024 to avoid overflow in practical cases (TRACE_COUNTER values are 32-bit ints).
- // They are capped to INT_MAX just in case.
- TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(Heap::allocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(Heap::markedObjectSize() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSizeAtLastCompleteSweepKB", std::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::allocatedSpace() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLastGCKB", std::min(Heap::objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(Heap::wrapperCount(), static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtLastGC", std::min(Heap::wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrapperCount", std::min(Heap::collectedWrapperCount(), static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocSizeAtLastGCKB", std::min(Heap::partitionAllocSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX)));
- TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_MAX)));
-}
-
size_t Heap::objectPayloadSizeForTesting()
{
size_t objectPayloadSize = 0;
@@ -640,133 +581,38 @@ size_t Heap::objectPayloadSizeForTesting()
return objectPayloadSize;
}
-BasePage* Heap::lookup(Address address)
+BasePage* Heap::lookup(Address address, ThreadState* threadState)
{
- ASSERT(ThreadState::current()->isInGC());
- if (!s_regionTree)
+ ASSERT(threadState->isInGC());
+ if (!threadState->regionTree())
return nullptr;
- if (PageMemoryRegion* region = s_regionTree->lookup(address)) {
+ if (PageMemoryRegion* region = threadState->regionTree()->lookup(address)) {
BasePage* page = region->pageFromAddress(address);
return page && !page->orphaned() ? page : nullptr;
}
return nullptr;
}
-static Mutex& regionTreeMutex()
-{
- DEFINE_THREAD_SAFE_STATIC_LOCAL(Mutex, mutex, new Mutex);
- return mutex;
-}
-
void Heap::removePageMemoryRegion(PageMemoryRegion* region)
{
- // Deletion of large objects (and thus their regions) can happen
- // concurrently on sweeper threads. Removal can also happen during thread
- // shutdown, but that case is safe. Regardless, we make all removals
- // mutually exclusive.
- MutexLocker locker(regionTreeMutex());
- RegionTree::remove(region, &s_regionTree);
-}
-
-void Heap::addPageMemoryRegion(PageMemoryRegion* region)
-{
- MutexLocker locker(regionTreeMutex());
- RegionTree::add(new RegionTree(region), &s_regionTree);
-}
-
-PageMemoryRegion* Heap::RegionTree::lookup(Address address)
-{
- RegionTree* current = s_regionTree;
- while (current) {
- Address base = current->m_region->base();
- if (address < base) {
- current = current->m_left;
- continue;
- }
- if (address >= base + current->m_region->size()) {
- current = current->m_right;
- continue;
- }
- ASSERT(current->m_region->contains(address));
- return current->m_region;
- }
- return nullptr;
-}
-
-void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context)
-{
- ASSERT(newTree);
- Address base = newTree->m_region->base();
- for (RegionTree* current = *context; current; current = *context) {
- ASSERT(!current->m_region->contains(base));
- context = (base < current->m_region->base()) ? &current->m_left : &current->m_right;
- }
- *context = newTree;
-}
-
-void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context)
-{
- ASSERT(region);
- ASSERT(context);
- Address base = region->base();
- RegionTree* current = *context;
- for (; current; current = *context) {
- if (region == current->m_region)
- break;
- context = (base < current->m_region->base()) ? &current->m_left : &current->m_right;
- }
-
- // Shutdown via detachMainThread might not have populated the region tree.
- if (!current)
+ ThreadState* threadState = ThreadState::current();
+ // When the render process shuts down, the main thread state may already be destroyed.
+ if (!threadState)
return;
-
- *context = nullptr;
- if (current->m_left) {
- add(current->m_left, context);
- current->m_left = nullptr;
- }
- if (current->m_right) {
- add(current->m_right, context);
- current->m_right = nullptr;
- }
- delete current;
+ threadState->removeFromRegionTree(region);
}
-void Heap::resetHeapCounters()
+void Heap::addPageMemoryRegion(PageMemoryRegion* region)
{
- ASSERT(ThreadState::current()->isInGC());
-
- Heap::reportMemoryUsageForTracing();
-
- s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize;
- s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages();
- s_allocatedObjectSize = 0;
- s_markedObjectSize = 0;
- s_wrapperCountAtLastGC = s_wrapperCount;
- s_collectedWrapperCount = 0;
+ ThreadState* threadState = ThreadState::current();
+ ASSERT(threadState);
+ ThreadState::RegionTree* regionTree = threadState->regionTree();
+ ThreadState::RegionTree::add(new ThreadState::RegionTree(region), &regionTree);
+ threadState->setRegionTree(regionTree);
}
-CallbackStack* Heap::s_markingStack;
-CallbackStack* Heap::s_postMarkingCallbackStack;
-CallbackStack* Heap::s_globalWeakCallbackStack;
-CallbackStack* Heap::s_ephemeronStack;
-HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
bool Heap::s_shutdownCalled = false;
FreePagePool* Heap::s_freePagePool;
OrphanedPagePool* Heap::s_orphanedPagePool;
-Heap::RegionTree* Heap::s_regionTree = nullptr;
-size_t Heap::s_allocatedSpace = 0;
-size_t Heap::s_allocatedObjectSize = 0;
-size_t Heap::s_objectSizeAtLastGC = 0;
-size_t Heap::s_markedObjectSize = 0;
-size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0;
-size_t Heap::s_wrapperCount = 0;
-size_t Heap::s_wrapperCountAtLastGC = 0;
-size_t Heap::s_collectedWrapperCount = 0;
-size_t Heap::s_partitionAllocSizeAtLastGC = 0;
-double Heap::s_estimatedMarkingTimePerByte = 0.0;
-#if ENABLE(ASSERT)
-uint16_t Heap::s_gcGeneration = 0;
-#endif
} // namespace blink

Powered by Google App Engine
This is Rietveld 408576698