Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(219)

Unified Diff: Source/platform/heap/ThreadState.cpp

Issue 738773003: Revert of Oilpan: Refactor the way we calculate heap statistics (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | Source/wtf/Atomics.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: Source/platform/heap/ThreadState.cpp
diff --git a/Source/platform/heap/ThreadState.cpp b/Source/platform/heap/ThreadState.cpp
index 04643aa4a33affe0e813b69822230af4f1ac6bfa..7ed2986a17c949a0a5a09d2c44f8d16593653a70 100644
--- a/Source/platform/heap/ThreadState.cpp
+++ b/Source/platform/heap/ThreadState.cpp
@@ -663,8 +663,8 @@
json->endArray();
#undef SNAPSHOT_HEAP
- json->setInteger("allocatedSpace", Heap::allocatedSpace());
- json->setInteger("objectSpace", Heap::allocatedObjectSize());
+ json->setInteger("allocatedSpace", m_stats.totalAllocatedSpace());
+ json->setInteger("objectSpace", m_stats.totalObjectSpace());
json->setInteger("pageCount", info.pageCount);
json->setInteger("freeSize", info.freeSize);
@@ -731,35 +731,47 @@
return mutex;
}
+// Trigger garbage collection on a 50% increase in size, but not for
+// less than 512kbytes.
+bool ThreadState::increasedEnoughToGC(size_t newSize, size_t oldSize)
+{
+ if (newSize < 1 << 19)
+ return false;
+ size_t limit = oldSize + (oldSize >> 1);
+ return newSize > limit;
+}
+
+// FIXME: The heuristics are local for a thread at this
+// point. Consider using heuristics that take memory for all threads
+// into account.
bool ThreadState::shouldGC()
{
- // Do not GC during sweeping. We allow allocation during finalization,
- // but those allocations are not allowed to lead to nested GCs.
- if (m_sweepInProgress)
+ // Do not GC during sweeping. We allow allocation during
+ // finalization, but those allocations are not allowed
+ // to lead to nested garbage collections.
+ return !m_sweepInProgress && increasedEnoughToGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
+}
+
+// Trigger conservative garbage collection on a 100% increase in size,
+// but not for less than 4Mbytes. If the system currently has a low
+// collection rate, then require a 300% increase in size.
+bool ThreadState::increasedEnoughToForceConservativeGC(size_t newSize, size_t oldSize)
+{
+ if (newSize < 1 << 22)
return false;
-
- // Trigger garbage collection on a 50% increase in size,
- // but not for less than 512 KB.
- if (Heap::allocatedObjectSize() < 1 << 19)
- return false;
- size_t limit = Heap::markedObjectSize() + Heap::markedObjectSize() / 2;
- return Heap::allocatedObjectSize() > limit;
-}
-
+ size_t limit = (m_lowCollectionRate ? 4 : 2) * oldSize;
+ return newSize > limit;
+}
+
+// FIXME: The heuristics are local for a thread at this
+// point. Consider using heuristics that take memory for all threads
+// into account.
bool ThreadState::shouldForceConservativeGC()
{
- // Do not GC during sweeping. We allow allocation during finalization,
- // but those allocations are not allowed to lead to nested GCs.
- if (m_sweepInProgress)
- return false;
-
- // Trigger conservative garbage collection on a 100% increase in size,
- // but not for less than 4Mbytes. If the system currently has a low
- // collection rate, then require a 300% increase in size.
- if (Heap::allocatedObjectSize() < 1 << 22)
- return false;
- size_t limit = (m_lowCollectionRate ? 4 : 2) * Heap::markedObjectSize();
- return Heap::allocatedObjectSize() > limit;
+ // Do not GC during sweeping. We allow allocation during
+ // finalization, but those allocations are not allowed
+ // to lead to nested garbage collections.
+ return !m_sweepInProgress && increasedEnoughToForceConservativeGC(m_stats.totalObjectSpace(), m_statsAfterLastGC.totalObjectSpace());
}
bool ThreadState::sweepRequested()
@@ -892,13 +904,16 @@
return 0;
}
-size_t ThreadState::objectPayloadSizeForTesting()
+void ThreadState::getStats(HeapStats& stats)
+{
+ stats = m_stats;
+}
+
+void ThreadState::getStatsForTesting(HeapStats& stats)
{
ASSERT(isConsistentForSweeping());
- size_t objectPayloadSize = 0;
for (int i = 0; i < NumberOfHeaps; i++)
- objectPayloadSize += m_heaps[i]->objectPayloadSizeForTesting();
- return objectPayloadSize;
+ m_heaps[i]->getStatsForTesting(stats);
}
bool ThreadState::stopThreads()
@@ -1027,9 +1042,10 @@
class SweepNonFinalizedHeapTask final : public WebThread::Task {
public:
- SweepNonFinalizedHeapTask(ThreadState* state, BaseHeap* heap)
+ SweepNonFinalizedHeapTask(ThreadState* state, BaseHeap* heap, HeapStats* stats)
: m_threadState(state)
, m_heap(heap)
+ , m_stats(stats)
{
m_threadState->registerSweepingTask();
}
@@ -1042,12 +1058,13 @@
virtual void run()
{
TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps");
- m_heap->sweep();
+ m_heap->sweep(m_stats);
}
private:
ThreadState* m_threadState;
BaseHeap* m_heap;
+ HeapStats* m_stats;
};
void ThreadState::performPendingSweep()
@@ -1061,7 +1078,7 @@
// going to be freed.
bool gcTracingEnabled;
TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
- if (gcTracingEnabled)
+ if (gcTracingEnabled && m_stats.totalObjectSpace() > 0)
snapshot();
#endif
@@ -1074,7 +1091,7 @@
TRACE_EVENT_SET_SAMPLING_STATE("blink", "BlinkGCSweeping");
}
- size_t allocatedObjectSizeBeforeSweeping = Heap::allocatedObjectSize();
+ size_t objectSpaceBeforeSweep = m_stats.totalObjectSpace();
{
NoSweepScope scope(this);
@@ -1097,14 +1114,10 @@
clearGCRequested();
clearSweepRequested();
-
- // If we collected less than 50% of objects, record that the collection rate
- // is low which we use to determine when to perform the next GC.
- // FIXME: We should make m_lowCollectionRate available in non-main threads.
- // FIXME: Heap::markedObjectSize() may not be accurate because other threads
- // may not have finished sweeping.
- if (isMainThread())
- m_lowCollectionRate = Heap::markedObjectSize() > (allocatedObjectSizeBeforeSweeping / 2);
+ // If we collected less than 50% of objects, record that the
+ // collection rate is low which we use to determine when to
+ // perform the next GC.
+ setLowCollectionRate(m_stats.totalObjectSpace() > (objectSpaceBeforeSweep / 2));
if (Platform::current()) {
Platform::current()->histogramCustomCounts("BlinkGC.PerformPendingSweep", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
@@ -1118,6 +1131,9 @@
void ThreadState::performPendingSweepInParallel()
{
+ // Sweeping will recalculate the stats
+ m_stats.clear();
+
// Sweep the non-finalized heap pages on multiple threads.
// Attempt to load-balance by having the sweeper thread sweep as
// close to half of the pages as possible.
@@ -1135,6 +1151,7 @@
// finalizers need to run and therefore the pages can be
// swept on other threads.
static const int minNumberOfPagesForParallelSweep = 10;
+ HeapStats heapStatsVector[NumberOfNonFinalizedHeaps];
OwnPtr<BaseHeap> splitOffHeaps[NumberOfNonFinalizedHeaps];
for (int i = 0; i < NumberOfNonFinalizedHeaps && pagesToSweepInParallel > 0; i++) {
BaseHeap* heap = m_heaps[FirstNonFinalizedHeap + i];
@@ -1148,7 +1165,8 @@
int pagesToSplitOff = std::min(pageCount, pagesToSweepInParallel);
pagesToSweepInParallel -= pagesToSplitOff;
splitOffHeaps[i] = heap->split(pagesToSplitOff);
- m_sweeperThread->postTask(new SweepNonFinalizedHeapTask(this, splitOffHeaps[i].get()));
+ HeapStats* stats = &heapStatsVector[i];
+ m_sweeperThread->postTask(new SweepNonFinalizedHeapTask(this, splitOffHeaps[i].get(), stats));
}
}
@@ -1157,7 +1175,9 @@
// if there is no sweeper thread).
TRACE_EVENT0("blink_gc", "ThreadState::sweepNonFinalizedHeaps");
for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) {
- m_heaps[FirstNonFinalizedHeap + i]->sweep();
+ HeapStats stats;
+ m_heaps[FirstNonFinalizedHeap + i]->sweep(&stats);
+ m_stats.add(&stats);
}
}
@@ -1165,18 +1185,25 @@
// Sweep the finalized pages.
TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps");
for (int i = 0; i < NumberOfFinalizedHeaps; i++) {
- m_heaps[FirstFinalizedHeap + i]->sweep();
- }
- }
-
+ HeapStats stats;
+ m_heaps[FirstFinalizedHeap + i]->sweep(&stats);
+ m_stats.add(&stats);
+ }
+ }
+
+ // Wait for the sweeper threads and update the heap stats with the
+ // stats for the heap portions swept by those threads.
waitUntilSweepersDone();
for (int i = 0; i < NumberOfNonFinalizedHeaps; i++) {
+ m_stats.add(&heapStatsVector[i]);
if (splitOffHeaps[i])
m_heaps[FirstNonFinalizedHeap + i]->merge(splitOffHeaps[i].release());
}
for (int i = 0; i < NumberOfHeaps; i++)
m_heaps[i]->postSweepProcessing();
+
+ getStats(m_statsAfterLastGC);
}
void ThreadState::addInterruptor(Interruptor* interruptor)
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | Source/wtf/Atomics.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698