Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(292)

Unified Diff: Source/platform/heap/Heap.cpp

Issue 1159773004: Oilpan: Implement a GC to take a heap snapshot (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: Source/platform/heap/Heap.cpp
diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp
index 5b22fc005fe02f671dabda31ac24c08980abb24e..1ac77326969df1a8acde065f31e13ce5af8ad282 100644
--- a/Source/platform/heap/Heap.cpp
+++ b/Source/platform/heap/Heap.cpp
@@ -111,8 +111,16 @@ static String classOf(const void* object)
class GCScope {
public:
- explicit GCScope(ThreadState::StackState stackState)
+ static GCScope* current()
+ {
+ ASSERT(ThreadState::current()->isInGC());
+ ASSERT(s_currentGCScope);
+ return s_currentGCScope;
+ }
+
+ GCScope(ThreadState::StackState stackState, ThreadState::GCType gcType)
: m_state(ThreadState::current())
+ , m_gcType(gcType)
, m_safePointScope(stackState)
, m_parkedAllThreads(false)
{
@@ -131,12 +139,17 @@ public:
}
if (m_state->isMainThread())
TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState);
+
+ ASSERT(!s_currentGCScope);
+ s_currentGCScope = this;
}
- bool allThreadsParked() { return m_parkedAllThreads; }
+ bool allThreadsParked() const { return m_parkedAllThreads; }
+ ThreadState::GCType gcType() const { return m_gcType; }
~GCScope()
{
+ s_currentGCScope = nullptr;
// Only cleanup if we parked all threads in which case the GC happened
// and we need to resume the other threads.
if (LIKELY(m_parkedAllThreads)) {
@@ -146,10 +159,15 @@ public:
private:
ThreadState* m_state;
+ ThreadState::GCType m_gcType;
SafePointScope m_safePointScope;
bool m_parkedAllThreads; // False if we fail to park all threads
+
+ static GCScope* s_currentGCScope;
};
+GCScope* GCScope::s_currentGCScope = nullptr;
+
#if ENABLE(ASSERT)
NO_SANITIZE_ADDRESS
void HeapObjectHeader::zapMagic()
@@ -299,6 +317,28 @@ void BaseHeap::makeConsistentForGC()
ASSERT(!m_firstUnsweptPage);
}
+void BaseHeap::makeConsistentForMutator()
+{
+ clearFreeLists();
+ ASSERT(isConsistentForGC());
+ ASSERT(!m_firstPage);
+
+ // Drop marks from marked objects and rebuild free lists in preparation for
+ // resuming the executions of mutators.
+ BasePage* previousPage = nullptr;
+ for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) {
+ page->makeConsistentForMutator();
+ page->markAsSwept();
+ }
+ if (previousPage) {
+ ASSERT(m_firstUnsweptPage);
+ previousPage->m_next = m_firstPage;
+ m_firstPage = m_firstUnsweptPage;
+ m_firstUnsweptPage = nullptr;
+ }
+ ASSERT(!m_firstUnsweptPage);
+}
+
size_t BaseHeap::objectPayloadSizeForTesting()
{
ASSERT(isConsistentForGC());
@@ -320,7 +360,7 @@ void BaseHeap::prepareHeapForTermination()
void BaseHeap::prepareForSweep()
{
- ASSERT(!threadState()->isInGC());
+ ASSERT(threadState()->isInGC());
ASSERT(!m_firstUnsweptPage);
// Move all pages to a list of unswept pages.
@@ -1179,6 +1219,37 @@ void NormalPage::makeConsistentForGC()
Heap::increaseMarkedObjectSize(markedObjectSize);
}
+void NormalPage::makeConsistentForMutator()
+{
+ size_t markedObjectSize = 0;
+ Address startOfGap = payload();
+ for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
+ HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAddress);
+ ASSERT(header->size() < blinkPagePayloadSize());
+ // Check if a free list entry first since we cannot call
+ // isMarked on a free list entry.
+ if (header->isFree()) {
+ headerAddress += header->size();
+ continue;
+ }
+ header->checkHeader();
+
+ if (startOfGap != headerAddress)
+ heapForNormalPage()->addToFreeList(startOfGap, headerAddress - startOfGap);
+ if (header->isMarked()) {
+ header->unmark();
+ markedObjectSize += header->size();
+ }
+ headerAddress += header->size();
+ startOfGap = headerAddress;
+ }
+ if (startOfGap != payloadEnd())
+ heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap);
+
+ if (markedObjectSize)
+ Heap::increaseMarkedObjectSize(markedObjectSize);
+}
+
#if defined(ADDRESS_SANITIZER)
void NormalPage::poisonUnmarkedObjects()
{
@@ -1468,6 +1539,15 @@ void LargeObjectPage::makeConsistentForGC()
}
}
+void LargeObjectPage::makeConsistentForMutator()
+{
+ HeapObjectHeader* header = heapObjectHeader();
+ if (header->isMarked()) {
+ header->unmark();
+ Heap::increaseMarkedObjectSize(size());
+ }
+}
+
#if defined(ADDRESS_SANITIZER)
void LargeObjectPage::poisonUnmarkedObjects()
{
@@ -1760,6 +1840,8 @@ String Heap::createBacktraceString()
void Heap::pushTraceCallback(void* object, TraceCallback callback)
{
+ ASSERT(ThreadState::current()->isInGC());
+
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(object));
CallbackStack::Item* slot = s_markingStack->allocateEntry();
@@ -1781,6 +1863,8 @@ bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
void Heap::pushPostMarkingCallback(void* object, TraceCallback callback)
{
+ ASSERT(ThreadState::current()->isInGC());
+
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(object));
CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry();
@@ -1798,6 +1882,11 @@ bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor)
void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback)
{
+ ASSERT(ThreadState::current()->isInGC());
+ // We don't want to run weak processings when taking a snapshot.
+ if (GCScope::current()->gcType() == ThreadState::TakeSnapshot)
sof 2015/05/30 20:50:06 Did you explore providing MarkingVisitor<GlobalMar
+ return;
+
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(cell));
CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry();
@@ -1806,6 +1895,11 @@ void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback)
void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback)
{
+ ASSERT(ThreadState::current()->isInGC());
+ // We don't want to run weak processings when taking a snapshot.
+ if (GCScope::current()->gcType() == ThreadState::TakeSnapshot)
+ return;
+
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(object));
ThreadState* state = pageFromObject(object)->heap()->threadState();
@@ -1823,6 +1917,8 @@ bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor)
void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, EphemeronCallback iterationDoneCallback)
{
+ ASSERT(ThreadState::current()->isInGC());
+
// Trace should never reach an orphaned page.
ASSERT(!Heap::orphanedPagePool()->contains(table));
CallbackStack::Item* slot = s_ephemeronStack->allocateEntry();
@@ -1877,7 +1973,7 @@ void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp
ThreadState::GCState originalGCState = state->gcState();
state->setGCState(ThreadState::StoppingOtherThreads);
- GCScope gcScope(stackState);
+ GCScope gcScope(stackState, gcType);
// Check if we successfully parked the other threads. If not we bail out of
// the GC.
if (!gcScope.allThreadsParked()) {
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/HeapTest.cpp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698