Chromium Code Reviews| Index: Source/platform/heap/Heap.cpp |
| diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp |
| index a519807be2a1398aa02b6b504861a0aa568bae51..2150588636253ab3d56acfa1ea751bd0c4f2bdd3 100644 |
| --- a/Source/platform/heap/Heap.cpp |
| +++ b/Source/platform/heap/Heap.cpp |
| @@ -49,7 +49,7 @@ |
| #include <stdio.h> |
| #include <utility> |
| #endif |
| -#if ENABLE(GC_PROFILE_HEAP) |
| +#if ENABLE(GC_PROFILE_HEAP) || ENABLE(GC_PROFILE_FREE_LIST) || ENABLE(GC_PROFILE_MARKING) |
| #include "platform/TracedValue.h" |
| #endif |
| @@ -62,6 +62,36 @@ |
| namespace blink { |
| +struct AgeHistogram { |
| + int data[8]; |
| +}; |
| + |
| +typedef HashMap<String, AgeHistogram> ObjectAgeMap; |
| + |
| +static ObjectAgeMap& uom() |
|
keishi
2015/01/27 08:59:01
Unmarked object map.
|
| +{ |
| + static ObjectAgeMap uomap; |
| + return uomap; |
| +} |
| + |
| +static Mutex& uomMutex() |
| +{ |
| + AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| + return mutex; |
| +} |
| + |
| +static ObjectAgeMap& mom() |
|
keishi
2015/01/27 08:59:00
Marked object map.
|
| +{ |
| + static ObjectAgeMap momap; |
| + return momap; |
| +} |
| + |
| +static Mutex& momMutex() |
| +{ |
| + AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| + return mutex; |
| +} |
| + |
| #if ENABLE(GC_PROFILE_MARKING) |
| static String classOf(const void* object) |
| { |
| @@ -622,6 +652,11 @@ ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
| : m_currentAllocationPoint(nullptr) |
| , m_remainingAllocationSize(0) |
| , m_lastRemainingAllocationSize(0) |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| + , m_totalAllocationSize(0.0) |
| + , m_allocationCount(0) |
| + , m_inlineAllocationCount(0) |
| +#endif |
| , m_firstPage(nullptr) |
| , m_firstLargeObject(nullptr) |
| , m_firstPageAllocatedDuringSweeping(nullptr) |
| @@ -678,8 +713,11 @@ void ThreadHeap<Header>::updateRemainingAllocationSize() |
| } |
| template<typename Header> |
| -Address ThreadHeap<Header>::outOfLineAllocate(size_t allocationSize, const GCInfo* gcInfo) |
| +Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocationSize, const GCInfo* gcInfo) |
| { |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| + m_threadState->snapshotFreeListIfNecessary(); |
| +#endif |
| ASSERT(allocationSize > remainingAllocationSize()); |
| if (allocationSize > blinkPageSize / 2) |
| return allocateLargeObject(allocationSize, gcInfo); |
| @@ -687,120 +725,55 @@ Address ThreadHeap<Header>::outOfLineAllocate(size_t allocationSize, const GCInf |
| updateRemainingAllocationSize(); |
| threadState()->scheduleGCOrForceConservativeGCIfNeeded(); |
| - ASSERT(allocationSize >= allocationGranularity); |
| - Address result = allocateFromFreeList(allocationSize, gcInfo); |
| - if (result) |
| - return result; |
| setAllocationPoint(nullptr, 0); |
|
keishi
2015/01/27 08:59:00
The changes below to outOfLineAllocate and allocat
|
| - if (coalesce(allocationSize)) { |
| - result = allocateFromFreeList(allocationSize, gcInfo); |
| - if (result) |
| - return result; |
| - } |
| + ASSERT(allocationSize >= allocationGranularity); |
| + if (allocateFromFreeList(allocationSize)) |
| + return allocate(payloadSize, gcInfo); |
| + if (coalesce(allocationSize) && allocateFromFreeList(allocationSize)) |
| + return allocate(payloadSize, gcInfo); |
| addPageToHeap(gcInfo); |
| - result = allocateFromFreeList(allocationSize, gcInfo); |
| - RELEASE_ASSERT(result); |
| - return result; |
| -} |
| - |
| -static bool shouldUseFirstFitForHeap(int heapIndex) |
| -{ |
| - // For an allocation of size N, should a heap perform a first-fit |
| - // matching within the sized bin that N belongs to? |
| - // |
| - // Theory: quickly reusing a previously freed backing store block stands |
| - // a chance of maintaining cached presence of that block (maintains |
| - // "locality".) This is preferable to starting to bump allocate from a |
| - // new and bigger block, which is what allocateFromFreeList() does by |
| - // default. Hence, the backing store heaps are considered for binned |
| - // first-fit matching. |
| - // |
| - // This appears to hold true through performance expermentation; at |
| - // least no signficant performance regressions have been observed. |
| - // |
| - // This theory of improved performance does not hold true for other |
| - // heap types. We are currently seeking an understanding of why; |
| - // larger amounts of small block fragmentation might be one reason |
| - // for it. TBC. |
| - // |
| - switch (heapIndex) { |
| - case VectorBackingHeap: |
| - case InlineVectorBackingHeap: |
| - case HashTableBackingHeap: |
| - case VectorBackingHeapNonFinalized: |
| - case InlineVectorBackingHeapNonFinalized: |
| - case HashTableBackingHeapNonFinalized: |
| - return true; |
| - default: |
| - return false; |
| - } |
| + bool success = allocateFromFreeList(allocationSize); |
| + RELEASE_ASSERT(success); |
| + return allocate(payloadSize, gcInfo); |
| } |
| template<typename Header> |
| -Address ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize, const GCInfo* gcInfo) |
| -{ |
| - // The freelist allocation scheme is currently as follows: |
| - // |
| - // - If the heap is of an appropriate type, try to pick the first |
| - // entry from the sized bin corresponding to |allocationSize|. |
| - // [See shouldUseFirstFitForHeap() comment for motivation on why.] |
| - // |
| - // - If that didn't satisfy the allocation, try reusing a block |
| - // from the largest bin. The underlying reasoning being that |
| - // we want to amortize this slow allocation call by carving |
| - // off as a large a free block as possible in one go; a block |
| - // that will service this block and let following allocations |
| - // be serviced quickly by bump allocation. |
| - // |
| - // - Fail; allocation cannot be serviced by the freelist. |
| - // The allocator will handle that failure by requesting more |
| - // heap pages from the OS and re-initiate the allocation request. |
| - // |
| - int index = FreeList<Header>::bucketIndexForSize(allocationSize) + 1; |
| - if (index <= m_freeList.m_biggestFreeListIndex && shouldUseFirstFitForHeap(m_index)) { |
| - if (FreeListEntry* entry = m_freeList.m_freeLists[index]) { |
| - entry->unlink(&m_freeList.m_freeLists[index]); |
| - if (!m_freeList.m_freeLists[index] && index == m_freeList.m_biggestFreeListIndex) { |
| - // Biggest bucket drained, adjust biggest index downwards. |
| - int maxIndex = m_freeList.m_biggestFreeListIndex - 1; |
| - for (; maxIndex >= 0 && !m_freeList.m_freeLists[maxIndex]; --maxIndex) { } |
| - m_freeList.m_biggestFreeListIndex = maxIndex < 0 ? 0 : maxIndex; |
| - } |
| - // Allocate into the freelist block without disturbing the current allocation area. |
| - ASSERT(entry->size() >= allocationSize); |
| - if (entry->size() > allocationSize) |
| - addToFreeList(entry->address() + allocationSize, entry->size() - allocationSize); |
| - Heap::increaseAllocatedObjectSize(allocationSize); |
| - return allocateAtAddress(entry->address(), allocationSize, gcInfo); |
| - } |
| - // Failed to find a first-fit freelist entry; fall into the standard case of |
| - // chopping off the largest free block and bump allocate from it. |
| - } |
| - size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; |
| - index = m_freeList.m_biggestFreeListIndex; |
| - for (; index > 0; --index, bucketSize >>= 1) { |
| - FreeListEntry* entry = m_freeList.m_freeLists[index]; |
| - if (allocationSize > bucketSize) { |
| - // Final bucket candidate; check initial entry if it is able |
| - // to service this allocation. Do not perform a linear scan, |
| - // as it is considered too costly. |
| - if (!entry || entry->size() < allocationSize) |
| +FreeListEntry* FreeList<Header>::takeEntry(size_t allocationSize) |
| +{ |
| + size_t bucketSize = 1 << m_biggestFreeListIndex; |
| + int i = m_biggestFreeListIndex; |
| + for (; i > 0; i--, bucketSize >>= 1) { |
| + if (bucketSize < allocationSize) { |
| + // A FreeListEntry for bucketSize might be larger than allocationSize. |
| + // FIXME: We check only the first FreeListEntry because searching |
| + // the entire list is costly. |
| + if (!m_freeLists[i] || m_freeLists[i]->size() < allocationSize) |
| break; |
| } |
| - if (entry) { |
| - entry->unlink(&m_freeList.m_freeLists[index]); |
| - setAllocationPoint(entry->address(), entry->size()); |
| - ASSERT(hasCurrentAllocationArea()); |
| - ASSERT(remainingAllocationSize() >= allocationSize); |
| - m_freeList.m_biggestFreeListIndex = index; |
| - return allocateSize(allocationSize, gcInfo); |
| + if (FreeListEntry* entry = m_freeLists[i]) { |
| + m_biggestFreeListIndex = i; |
| + entry->unlink(&m_freeLists[i]); |
| + return entry; |
| } |
| } |
| - m_freeList.m_biggestFreeListIndex = index; |
| + m_biggestFreeListIndex = i; |
| return nullptr; |
| } |
| +template<typename Header> |
| +bool ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize) |
| +{ |
| + ASSERT(!hasCurrentAllocationArea()); |
| + if (FreeListEntry* entry = m_freeList.takeEntry(allocationSize)) { |
| + setAllocationPoint(entry->address(), entry->size()); |
| + ASSERT(hasCurrentAllocationArea()); |
| + ASSERT(remainingAllocationSize() >= allocationSize); |
| + return true; |
| + } |
| + return false; |
| +} |
| + |
| #if ENABLE(ASSERT) |
| template<typename Header> |
| static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address address) |
| @@ -847,6 +820,45 @@ const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeObject(Address address) |
| } |
| #endif |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| +template<typename Header> |
| +void ThreadHeap<Header>::snapshotFreeList(TracedValue* json) |
| +{ |
| + json->setDouble("totalAllocationSize", m_totalAllocationSize); |
| + json->setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocationCount) / m_allocationCount); |
| + json->setInteger("inlineAllocationCount", m_inlineAllocationCount); |
| + json->setInteger("allocationCount", m_allocationCount); |
| + if (m_setAllocationPointCount > 0) { |
| + json->setDouble("averageAllocationPointSize", static_cast<double>(m_allocationPointSizeSum) / m_setAllocationPointCount); |
| + } |
| + m_allocationPointSizeSum = 0; |
| + m_setAllocationPointCount = 0; |
| + size_t pageCount = 0; |
| + size_t totalPageSize = 0; |
| + for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { |
| + ++pageCount; |
| + totalPageSize += page->payloadSize(); |
| + } |
| + json->setInteger("pageCount", pageCount); |
| + json->setInteger("totalPageSize", totalPageSize); |
| + size_t bucketSizes[blinkPageSizeLog2]; |
| + size_t bucketTotalSizes[blinkPageSizeLog2]; |
| + size_t freeSize = 0; |
| + m_freeList.countBucketSizes(bucketSizes, bucketTotalSizes, &freeSize); |
| + json->setInteger("freeSize", freeSize); |
| + json->beginArray("bucketSizes"); |
| + for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| + json->pushInteger(bucketSizes[i]); |
| + } |
| + json->endArray(); |
| + json->beginArray("bucketTotalSizes"); |
| + for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| + json->pushInteger(bucketTotalSizes[i]); |
| + } |
| + json->endArray(); |
| +} |
| +#endif |
| + |
| #if ENABLE(GC_PROFILE_HEAP) |
| #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 |
| template<typename Header> |
| @@ -914,6 +926,25 @@ void FreeList<Header>::addToFreeList(Address address, size_t size) |
| m_biggestFreeListIndex = index; |
| } |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| +template<typename Header> |
| +void FreeList<Header>::countBucketSizes(size_t sizes[], size_t totalSizes[], size_t* freeSize) const |
| +{ |
| + *freeSize = 0; |
| + for (size_t i = 0; i < blinkPageSizeLog2; i++) { |
| + sizes[i] = 0; |
| + totalSizes[i] = 0; |
| + FreeListEntry* entry = m_freeLists[i]; |
| + while (entry) { |
| + ++sizes[i]; |
| + *freeSize += entry->size(); |
| + totalSizes[i] += entry->size(); |
| + entry = entry->next(); |
| + } |
| + } |
| +} |
| +#endif |
| + |
| template<typename Header> |
| bool ThreadHeap<Header>::expandObject(Header* header, size_t newSize) |
| { |
| @@ -1517,6 +1548,9 @@ void ThreadHeap<Header>::sweepLargePages() |
| template<typename Header> |
| void ThreadHeap<Header>::sweep() |
| { |
| + for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { |
| + page->countUnmarkedObjects(); |
| + } |
| ASSERT(isConsistentForSweeping()); |
| #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING |
| // When using ASan do a pre-sweep where all unmarked objects are |
| @@ -1886,6 +1920,23 @@ void HeapPage<Header>::poisonUnmarkedObjects() |
| } |
| #endif |
| +template<typename Header> |
| +void HeapPage<Header>::countUnmarkedObjects() |
|
keishi
2015/01/27 08:59:00
Scans heap for objects that haven't been marked, a
|
| +{ |
| + MutexLocker locker(uomMutex()); |
| + for (Address headerAddress = payload(); headerAddress < end(); ) { |
| + Header* header = reinterpret_cast<Header*>(headerAddress); |
| + ASSERT(header->size() < blinkPagePayloadSize()); |
| + |
| + if (!header->isFree() && !header->isMarked()) { |
| + String className(classOf(header->payload())); |
| + ObjectAgeMap::AddResult result = uom().add(className, AgeHistogram()); |
| + result.storedValue->value.data[header->age()]++; |
| + } |
| + headerAddress += header->size(); |
| + } |
| +} |
| + |
| template<> |
| inline void HeapPage<GeneralHeapObjectHeader>::finalize(GeneralHeapObjectHeader* header) |
| { |
| @@ -2061,12 +2112,19 @@ public: |
| header->mark(); |
| #if ENABLE(GC_PROFILE_MARKING) |
| + header->incAge(); |
| + |
| MutexLocker locker(objectGraphMutex()); |
| String className(classOf(objectPointer)); |
| { |
| LiveObjectMap::AddResult result = currentlyLive().add(className, LiveObjectSet()); |
| result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPointer)); |
| } |
| + { |
| + MutexLocker locker(momMutex()); |
| + ObjectAgeMap::AddResult result = mom().add(className, AgeHistogram()); |
|
keishi
2015/01/27 08:59:01
Record that the object has been marked to mom()
|
| + result.storedValue->value.data[header->age()]++; |
| + } |
| ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintptr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName)); |
| ASSERT(result.isNewEntry); |
| // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_hostObject, className.ascii().data(), objectPointer); |
| @@ -2216,6 +2274,21 @@ public: |
| } |
| } |
| + void reportMarkingStats() |
| + { |
| + MutexLocker locker(momMutex()); |
| + RefPtr<TracedValue> json = TracedValue::create(); |
| + for (ObjectAgeMap::iterator it = mom().begin(), end = mom().end(); it != end; ++it) { |
| + json->beginArray(it->key.ascii().data()); |
| + for (size_t i = 0; i < 8; ++i) { |
| + json->pushInteger(it->value.data[i]); |
| + } |
| + json->endArray(); |
| + } |
| + TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "MarkingStats", (unsigned long long)0, json.release()); |
| + mom().clear(); |
| + } |
| + |
| static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous) |
| { |
| int count = 0; |
| @@ -2554,6 +2627,21 @@ void Heap::preGC() |
| state->preGC(); |
| } |
| +void Heap::reportSweepingStats() |
| +{ |
| + MutexLocker locker(uomMutex()); |
| + RefPtr<TracedValue> json = TracedValue::create(); |
| + for (ObjectAgeMap::iterator it = uom().begin(), end = uom().end(); it != end; ++it) { |
| + json->beginArray(it->key.ascii().data()); |
| + for (size_t i = 0; i < 8; ++i) { |
| + json->pushInteger(it->value.data[i]); |
| + } |
| + json->endArray(); |
| + } |
| + TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "SweepingStats", (unsigned long long)0, json.release()); |
| + uom().clear(); |
| +} |
| + |
| void Heap::postGC() |
| { |
| ASSERT(ThreadState::current()->isInGC()); |
| @@ -2566,6 +2654,10 @@ void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp |
| ThreadState* state = ThreadState::current(); |
| state->setGCState(ThreadState::StoppingOtherThreads); |
| +#if ENABLE(GC_PROFILE_FREE_LIST) |
| + state->snapshotFreeListIfNecessary(); |
| +#endif |
| + |
| GCScope gcScope(stackState); |
| // Check if we successfully parked the other threads. If not we bail out of |
| // the GC. |
| @@ -2625,7 +2717,8 @@ void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp |
| postGC(); |
| #if ENABLE(GC_PROFILE_MARKING) |
| - static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportStats(); |
| + //static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportStats(); |
| + static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportMarkingStats(); |
| #endif |
| if (Platform::current()) { |