Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(51)

Unified Diff: Source/platform/heap/Heap.cpp

Issue 723513002: Oilpan: Refactor the way we calculate heap statistics (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: Source/platform/heap/Heap.cpp
diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp
index d775343e9681faf8f23c2f66261482d130fad816..7e218ced1c5e629ab25fb5b534e9843ae6de9b4c 100644
--- a/Source/platform/heap/Heap.cpp
+++ b/Source/platform/heap/Heap.cpp
@@ -681,12 +681,16 @@ void ThreadHeap<Header>::cleanupPages()
clearFreeLists();
// Add the ThreadHeap's pages to the orphanedPagePool.
- for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
+ for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) {
+ Heap::decreaseAllocatedSpace(blinkPageSize);
Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
+ }
m_firstPage = 0;
- for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObject; largeObject = largeObject->m_next)
+ for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObject; largeObject = largeObject->m_next) {
+ Heap::decreaseAllocatedSpace(largeObject->size());
Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
+ }
m_firstLargeHeapObject = 0;
}
@@ -694,7 +698,7 @@ template<typename Header>
void ThreadHeap<Header>::updateRemainingAllocationSize()
{
if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
- stats().increaseObjectSpace(m_lastRemainingAllocationSize - remainingAllocationSize());
+ Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remainingAllocationSize());
m_lastRemainingAllocationSize = remainingAllocationSize();
}
ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
@@ -940,7 +944,7 @@ bool ThreadHeap<Header>::coalesce(size_t minSize)
ASSERT(basicHeader->size() < blinkPagePayloadSize());
if (basicHeader->isPromptlyFreed()) {
- stats().decreaseObjectSpace(reinterpret_cast<Header*>(basicHeader)->size());
+ Heap::decreaseAllocatedObjectSize(reinterpret_cast<Header*>(basicHeader)->size());
size_t size = basicHeader->size();
ASSERT(size >= sizeof(Header));
#if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
@@ -1024,8 +1028,8 @@ Address ThreadHeap<Header>::allocateLargeObject(size_t size, const GCInfo* gcInf
ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allocationGranularity);
largeObject->link(&m_firstLargeHeapObject);
- stats().increaseAllocatedSpace(largeObject->size());
- stats().increaseObjectSpace(largeObject->size());
+ Heap::increaseAllocatedSpace(largeObject->size());
+ Heap::increaseAllocatedObjectSize(largeObject->size());
return result;
}
@@ -1034,6 +1038,7 @@ void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
{
object->unlink(previousNext);
object->finalize();
+ Heap::decreaseAllocatedSpace(object->size());
// Unpoison the object header and allocationGranularity bytes after the
// object before freeing.
@@ -1255,6 +1260,8 @@ void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
template <typename Header>
void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
{
+ Heap::decreaseAllocatedSpace(blinkPageSize);
+
MutexLocker locker(m_threadState->sweepMutex());
if (page->terminating()) {
// The thread is shutting down so this page is being removed as part
@@ -1308,6 +1315,7 @@ void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
}
}
HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(pageMemory, this, gcInfo);
+ Heap::increaseAllocatedSpace(blinkPageSize);
// Use a separate list for pages allocated during sweeping to make
// sure that we do not accidentally sweep objects that have been
// allocated during sweeping.
@@ -1345,17 +1353,19 @@ bool ThreadHeap<Header>::pagesAllocatedDuringSweepingContains(Address address)
#endif
template<typename Header>
-void ThreadHeap<Header>::getStatsForTesting(HeapStats& stats)
+size_t ThreadHeap<Header>::objectPayloadSizeForTesting()
{
ASSERT(!m_firstPageAllocatedDuringSweeping);
+ size_t objectPayloadSize = 0;
for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
- page->getStatsForTesting(stats);
+ objectPayloadSize += page->objectPayloadSizeForTesting();
for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; current = current->next())
- current->getStatsForTesting(stats);
+ objectPayloadSize += current->objectPayloadSizeForTesting();
+ return objectPayloadSize;
}
template<typename Header>
-void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
+void ThreadHeap<Header>::sweepNormalPages()
{
TRACE_EVENT0("blink_gc", "ThreadHeap::sweepNormalPages");
HeapPage<Header>* page = m_firstPage;
@@ -1371,7 +1381,7 @@ void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
HeapPage<Header>::unlink(this, unused, previousNext);
--m_numberOfNormalPages;
} else {
- page->sweep(stats, this);
+ page->sweep(this);
previousNext = &page->m_next;
previous = page;
page = page->next();
@@ -1380,14 +1390,13 @@ void ThreadHeap<Header>::sweepNormalPages(HeapStats* stats)
}
template<typename Header>
-void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
+void ThreadHeap<Header>::sweepLargePages()
{
TRACE_EVENT0("blink_gc", "ThreadHeap::sweepLargePages");
LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
if (current->isMarked()) {
- stats->increaseAllocatedSpace(current->size());
sof 2014/11/14 12:44:20 Could you explain why we don't need to increase th
haraken 2014/11/14 15:15:00 After this CL, s_allocatedSpace is never reset or
sof 2014/11/14 16:37:21 Thanks for clarifying.
- stats->increaseObjectSpace(current->size());
+ Heap::increaseLiveObjectSize(current->size());
current->unmark();
previousNext = &current->m_next;
current = current->next();
@@ -1408,7 +1417,7 @@ void ThreadHeap<Header>::sweepLargePages(HeapStats* stats)
#define STRICT_ASAN_FINALIZATION_CHECKING 0
template<typename Header>
-void ThreadHeap<Header>::sweep(HeapStats* stats)
+void ThreadHeap<Header>::sweep()
{
ASSERT(isConsistentForSweeping());
#if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
@@ -1419,8 +1428,8 @@ void ThreadHeap<Header>::sweep(HeapStats* stats)
for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
page->poisonUnmarkedObjects();
#endif
- sweepNormalPages(stats);
- sweepLargePages(stats);
+ sweepNormalPages();
+ sweepLargePages();
}
template<typename Header>
@@ -1520,7 +1529,6 @@ HeapPage<Header>::HeapPage(PageMemory* storage, ThreadHeap<Header>* heap, const
COMPILE_ASSERT(!(sizeof(HeapPage<Header>) & allocationMask), page_header_incorrectly_aligned);
m_objectStartBitMapComputed = false;
ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
- heap->stats().increaseAllocatedSpace(blinkPageSize);
}
template<typename Header>
@@ -1538,20 +1546,21 @@ void HeapPage<Header>::unlink(ThreadHeap<Header>* heap, HeapPage* unused, HeapPa
}
template<typename Header>
-void HeapPage<Header>::getStatsForTesting(HeapStats& stats)
+size_t HeapPage<Header>::objectPayloadSizeForTesting()
{
- stats.increaseAllocatedSpace(blinkPageSize);
+ size_t objectPayloadSize = 0;
Address headerAddress = payload();
ASSERT(headerAddress != end());
do {
Header* header = reinterpret_cast<Header*>(headerAddress);
if (!header->isFree()) {
- stats.increaseObjectSpace(header->payloadSize());
+ objectPayloadSize += header->payloadSize();
}
ASSERT(header->size() < blinkPagePayloadSize());
headerAddress += header->size();
ASSERT(headerAddress <= end());
} while (headerAddress < end());
+ return objectPayloadSize;
}
template<typename Header>
@@ -1562,10 +1571,9 @@ bool HeapPage<Header>::isEmpty()
}
template<typename Header>
-void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap)
+void HeapPage<Header>::sweep(ThreadHeap<Header>* heap)
{
clearObjectStartBitMap();
- stats->increaseAllocatedSpace(blinkPageSize);
Address startOfGap = payload();
for (Address headerAddress = startOfGap; headerAddress < end(); ) {
BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*>(headerAddress);
@@ -1611,7 +1619,7 @@ void HeapPage<Header>::sweep(HeapStats* stats, ThreadHeap<Header>* heap)
heap->addToFreeList(startOfGap, headerAddress - startOfGap);
header->unmark();
headerAddress += header->size();
- stats->increaseObjectSpace(header->size());
+ Heap::increaseLiveObjectSize(header->size());
startOfGap = headerAddress;
}
if (startOfGap != end())
@@ -1837,10 +1845,9 @@ inline bool HeapPage<FinalizedHeapObjectHeader>::hasVTable(FinalizedHeapObjectHe
}
template<typename Header>
-void LargeHeapObject<Header>::getStatsForTesting(HeapStats& stats)
+size_t LargeHeapObject<Header>::objectPayloadSizeForTesting()
{
- stats.increaseAllocatedSpace(size());
- stats.increaseObjectSpace(payloadSize());
+ return payloadSize();
}
#if ENABLE(GC_PROFILE_HEAP)
@@ -2180,6 +2187,9 @@ void Heap::init()
s_freePagePool = new FreePagePool();
s_orphanedPagePool = new OrphanedPagePool();
s_markingThreads = new Vector<OwnPtr<WebThread>>();
+ s_allocatedObjectSize = 0;
sof 2014/11/14 12:44:20 Use resetAllocatedObjectSize() ?
haraken 2014/11/14 15:15:00 Because I added ASSERT(ThreadState::isAnyThreadInG
+ s_allocatedSpace = 0;
+ s_liveObjectSize = 0;
sof 2014/11/14 12:44:20 Use resetLiveObjectSize() ?
if (Platform::current()) {
int processors = Platform::current()->numberOfProcessors();
int numberOfMarkingThreads = std::min(processors, maxNumberOfMarkingThreads);
@@ -2223,6 +2233,7 @@ void Heap::doShutdown()
delete s_regionTree;
s_regionTree = 0;
ThreadState::shutdown();
+ ASSERT(Heap::allocatedSpace() == 0);
}
BaseHeapPage* Heap::contains(Address address)
@@ -2475,6 +2486,9 @@ void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause
s_lastGCWasConservative = false;
+ Heap::resetLiveObjectSize();
+ Heap::resetAllocatedObjectSize();
+
TRACE_EVENT2("blink_gc", "Heap::collectGarbage",
"precise", stackState == ThreadState::NoHeapPointersOnStack,
"forced", cause == ThreadState::ForcedGC);
@@ -2533,12 +2547,9 @@ void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::Cause
#endif
if (Platform::current()) {
- uint64_t objectSpaceSize;
- uint64_t allocatedSpaceSize;
- getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
- Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
- Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
+ Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50);
+ Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50);
}
if (state->isMainThread())
@@ -2777,44 +2788,17 @@ void ThreadHeap<Header>::merge(PassOwnPtr<BaseHeap> splitOffBase)
}
}
-void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceSize)
-{
- *objectSpaceSize = 0;
- *allocatedSpaceSize = 0;
- ASSERT(ThreadState::isAnyThreadInGC());
- ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
- typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
- for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
- *objectSpaceSize += (*it)->stats().totalObjectSpace();
- *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
- }
-}
-
-void Heap::getStats(HeapStats* stats)
-{
- stats->clear();
- ASSERT(ThreadState::isAnyThreadInGC());
- ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
- typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
- for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
- HeapStats temp;
- (*it)->getStats(temp);
- stats->add(&temp);
- }
-}
-
-void Heap::getStatsForTesting(HeapStats* stats)
+size_t Heap::objectPayloadSizeForTesting()
{
- stats->clear();
+ size_t objectPayloadSize = 0;
ASSERT(ThreadState::isAnyThreadInGC());
makeConsistentForSweeping();
ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads();
typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != end; ++it) {
- HeapStats temp;
- (*it)->getStatsForTesting(temp);
- stats->add(&temp);
+ objectPayloadSize += (*it)->objectPayloadSizeForTesting();
}
+ return objectPayloadSize;
}
#if ENABLE(ASSERT)
@@ -2973,5 +2957,8 @@ bool Heap::s_lastGCWasConservative = false;
FreePagePool* Heap::s_freePagePool;
OrphanedPagePool* Heap::s_orphanedPagePool;
Heap::RegionTree* Heap::s_regionTree = 0;
+size_t Heap::s_allocatedObjectSize = 0;
+size_t Heap::s_allocatedSpace = 0;
+size_t Heap::s_liveObjectSize = 0;
} // namespace blink

Powered by Google App Engine
This is Rietveld 408576698