| Index: Source/platform/heap/Heap.cpp
|
| diff --git a/Source/platform/heap/Heap.cpp b/Source/platform/heap/Heap.cpp
|
| index 92ab2bbe1497a2add57f62cabbed4200dc956eaf..ae4cdfa9322bad53064de1aed934906b3175bbb1 100644
|
| --- a/Source/platform/heap/Heap.cpp
|
| +++ b/Source/platform/heap/Heap.cpp
|
| @@ -141,6 +141,8 @@ public:
|
| , m_gcType(gcType)
|
| , m_parkedAllThreads(false)
|
| {
|
| + BufferAllocator::setIgnoreHitRateCount(true);
|
| + ASSERT(state->attached());
|
| TRACE_EVENT0("blink_gc", "Heap::GCScope");
|
| const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE();
|
| if (m_state->isMainThread())
|
| @@ -178,6 +180,7 @@ public:
|
|
|
| ~GCScope()
|
| {
|
| + BufferAllocator::setIgnoreHitRateCount(false);
|
| // Only cleanup if we parked all threads in which case the GC happened
|
| // and we need to resume the other threads.
|
| if (LIKELY(m_gcType != ThreadState::ThreadTerminationGC && m_parkedAllThreads))
|
| @@ -234,10 +237,12 @@ void BaseHeap::cleanupPages()
|
| clearFreeLists();
|
|
|
| ASSERT(!m_firstUnsweptPage);
|
| - // Add the BaseHeap's pages to the orphanedPagePool.
|
| - for (BasePage* page = m_firstPage; page; page = page->next()) {
|
| - Heap::decreaseAllocatedSpace(page->size());
|
| - Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page);
|
| + if (threadState()->attached()) {
|
| + // Add the BaseHeap's pages to the orphanedPagePool.
|
| + for (BasePage* page = m_firstPage; page; page = page->next()) {
|
| + Heap::decreaseAllocatedSpace(page->size());
|
| + Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page);
|
| + }
|
| }
|
| m_firstPage = nullptr;
|
| }
|
| @@ -477,6 +482,28 @@ void BaseHeap::sweepUnsweptPage()
|
| }
|
| }
|
|
|
| +void BaseHeap::freePage(BasePage* removedPage)
|
| +{
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "freePage: isLargeObjectPage=%d\n", removedPage->isLargeObjectPage());
|
| +#endif
|
| + ASSERT(!threadState()->attached());
|
| + ASSERT(!m_firstUnsweptPage);
|
| + BasePage* page = m_firstPage;
|
| + BasePage** prevNext = &m_firstPage;
|
| + while (page) {
|
| + if (page == removedPage) {
|
| + ASSERT(page->isEmpty());
|
| + page->unlink(prevNext);
|
| + page->removeFromHeap();
|
| + return;
|
| + }
|
| + prevNext = &page->m_next;
|
| + page = page->next();
|
| + }
|
| + ASSERT_NOT_REACHED();
|
| +}
|
| +
|
| bool BaseHeap::lazySweepWithDeadline(double deadlineSeconds)
|
| {
|
| // It might be heavy to call Platform::current()->monotonicallyIncreasingTime()
|
| @@ -528,6 +555,14 @@ NormalPageHeap::NormalPageHeap(ThreadState* state, int index)
|
| , m_allocationCount(0)
|
| , m_inlineAllocationCount(0)
|
| #endif
|
| + , m_promptlyFreeHit(0)
|
| + , m_promptlyFreeHitIgnored(0)
|
| + , m_promptlyFreeMiss(0)
|
| + , m_promptlyFreeMissIgnored(0)
|
| + , m_expandHit(0)
|
| + , m_expandHitIgnored(0)
|
| + , m_expandMiss(0)
|
| + , m_expandMissIgnored(0)
|
| {
|
| clearFreeLists();
|
| }
|
| @@ -638,10 +673,11 @@ void NormalPageHeap::allocatePage()
|
| offset += blinkPageSize;
|
| }
|
| }
|
| - NormalPage* page = new (pageMemory->writableStart()) NormalPage(pageMemory, this);
|
| + NormalPage* page = new (NotNull, pageMemory->writableStart()) NormalPage(pageMemory, this);
|
| page->link(&m_firstPage);
|
|
|
| - Heap::increaseAllocatedSpace(page->size());
|
| + if (threadState()->attached())
|
| + Heap::increaseAllocatedSpace(page->size());
|
| #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
|
| // Allow the following addToFreeList() to add the newly allocated memory
|
| // to the free list.
|
| @@ -656,7 +692,8 @@ void NormalPageHeap::allocatePage()
|
|
|
| void NormalPageHeap::freePage(NormalPage* page)
|
| {
|
| - Heap::decreaseAllocatedSpace(page->size());
|
| + if (threadState()->attached())
|
| + Heap::decreaseAllocatedSpace(page->size());
|
|
|
| if (page->terminating()) {
|
| // The thread is shutting down and this page is being removed as a part
|
| @@ -683,6 +720,10 @@ bool NormalPageHeap::coalesce()
|
| // FIXME: This threshold is determined just to optimize blink_perf
|
| // benchmarks. Coalescing is very sensitive to the threashold and
|
| // we need further investigations on the coalescing scheme.
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + if (!threadState()->attached())
|
| + fprintf(stderr, "coalesce: heapIndex=%d m_promptlyFreedSize=%ld\n", heapIndex(), m_promptlyFreedSize);
|
| +#endif
|
| if (m_promptlyFreedSize < 1024 * 1024)
|
| return false;
|
|
|
| @@ -692,6 +733,11 @@ bool NormalPageHeap::coalesce()
|
| ASSERT(!hasCurrentAllocationArea());
|
| TRACE_EVENT0("blink_gc", "BaseHeap::coalesce");
|
|
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + if (!threadState()->attached())
|
| + fprintf(stderr, "coalesce begin\n");
|
| +#endif
|
| + threadState()->clearHeapAges();
|
| // Rebuild free lists.
|
| m_freeList.clear();
|
| size_t freedSize = 0;
|
| @@ -735,12 +781,39 @@ bool NormalPageHeap::coalesce()
|
| if (startOfGap != page->payloadEnd())
|
| addToFreeList(startOfGap, page->payloadEnd() - startOfGap);
|
| }
|
| - Heap::decreaseAllocatedObjectSize(freedSize);
|
| + if (threadState()->attached())
|
| + Heap::decreaseAllocatedObjectSize(freedSize);
|
| ASSERT(m_promptlyFreedSize == freedSize);
|
| m_promptlyFreedSize = 0;
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + if (!threadState()->attached())
|
| + fprintf(stderr, "coalesce end\n");
|
| +#endif
|
| return true;
|
| }
|
|
|
| +void NormalPageHeap::printStats()
|
| +{
|
| + fprintf(stderr, "heapIndex=%d\n", heapIndex());
|
| + fprintf(stderr, "expand hit/total = %d/%d (%.2lf%%)\n"
|
| + , m_expandHit, m_expandHit + m_expandMiss
|
| + , 100.0 * m_expandHit / (m_expandHit + m_expandMiss));
|
| + fprintf(stderr, "promptlyFree hit/total = %d/%d (%.2lf%%)\n"
|
| + , m_promptlyFreeHit, m_promptlyFreeHit + m_promptlyFreeMiss
|
| + , 100.0 * m_promptlyFreeHit / (m_promptlyFreeHit + m_promptlyFreeMiss));
|
| + fprintf(stderr, "expand hit ignored=%d, miss ignored=%d\n", m_expandHitIgnored, m_expandMissIgnored);
|
| + fprintf(stderr, "promptlyFree hit ignored=%d, miss ignored=%d\n", m_promptlyFreeHitIgnored, m_promptlyFreeMissIgnored);
|
| +
|
| + m_promptlyFreeHit = 0;
|
| + m_promptlyFreeHitIgnored = 0;
|
| + m_promptlyFreeMiss = 0;
|
| + m_promptlyFreeMissIgnored = 0;
|
| + m_expandHit = 0;
|
| + m_expandHitIgnored = 0;
|
| + m_expandMiss = 0;
|
| + m_expandMissIgnored = 0;
|
| +}
|
| +
|
| void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header)
|
| {
|
| ASSERT(!threadState()->sweepForbidden());
|
| @@ -752,23 +825,49 @@ void NormalPageHeap::promptlyFreeObject(HeapObjectHeader* header)
|
| ASSERT(size > 0);
|
| ASSERT(pageFromObject(address) == findPageFromAddress(address));
|
|
|
| - {
|
| + if (threadState()->attached()) {
|
| ThreadState::SweepForbiddenScope forbiddenScope(threadState());
|
| header->finalize(payload, payloadSize);
|
| - if (address + size == m_currentAllocationPoint) {
|
| - m_currentAllocationPoint = address;
|
| - if (m_lastRemainingAllocationSize == m_remainingAllocationSize) {
|
| + }
|
| + if (address + size == m_currentAllocationPoint) {
|
| + if (!threadState()->attached()) {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "promptlyFree hit: address=%p size=%ld\n", address, size);
|
| +#endif
|
| + if (BufferAllocator::ignoreHitRateCount())
|
| + m_promptlyFreeHitIgnored++;
|
| + else
|
| + m_promptlyFreeHit++;
|
| + }
|
| + m_currentAllocationPoint = address;
|
| + if (m_lastRemainingAllocationSize == m_remainingAllocationSize) {
|
| + if (threadState()->attached())
|
| Heap::decreaseAllocatedObjectSize(size);
|
| - m_lastRemainingAllocationSize += size;
|
| - }
|
| - m_remainingAllocationSize += size;
|
| - SET_MEMORY_INACCESSIBLE(address, size);
|
| - return;
|
| + m_lastRemainingAllocationSize += size;
|
| }
|
| - SET_MEMORY_INACCESSIBLE(payload, payloadSize);
|
| - header->markPromptlyFreed();
|
| + m_remainingAllocationSize += size;
|
| + SET_MEMORY_INACCESSIBLE(address, size);
|
| + return;
|
| }
|
| -
|
| + if (!threadState()->attached()) {
|
| + if (BufferAllocator::ignoreHitRateCount()) {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "promptlyFree miss ignored: address=%p size=%ld\n", address, size);
|
| +#endif
|
| + m_promptlyFreeMissIgnored++;
|
| + } else {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + static int count = 0;
|
| + count++;
|
| + if (count % 100 == 99)
|
| + fprintf(stderr, "count%%100\n");
|
| + fprintf(stderr, "promptlyFree miss: address=%p size=%ld\n", address, size);
|
| +#endif
|
| + m_promptlyFreeMiss++;
|
| + }
|
| + }
|
| + SET_MEMORY_INACCESSIBLE(payload, payloadSize);
|
| + header->markPromptlyFreed();
|
| m_promptlyFreedSize += size;
|
| }
|
|
|
| @@ -784,6 +883,15 @@ bool NormalPageHeap::expandObject(HeapObjectHeader* header, size_t newSize)
|
| ASSERT(allocationSize > header->size());
|
| size_t expandSize = allocationSize - header->size();
|
| if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_remainingAllocationSize) {
|
| + if (!threadState()->attached()) {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "expandObject hit: current=%p expandSize=%ld\n", m_currentAllocationPoint, expandSize);
|
| +#endif
|
| + if (BufferAllocator::ignoreHitRateCount())
|
| + m_expandHitIgnored++;
|
| + else
|
| + m_expandHit++;
|
| + }
|
| m_currentAllocationPoint += expandSize;
|
| m_remainingAllocationSize -= expandSize;
|
|
|
| @@ -793,6 +901,19 @@ bool NormalPageHeap::expandObject(HeapObjectHeader* header, size_t newSize)
|
| ASSERT(findPageFromAddress(header->payloadEnd() - 1));
|
| return true;
|
| }
|
| + if (!threadState()->attached()) {
|
| + if (BufferAllocator::ignoreHitRateCount()) {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "expandObject miss ignored: current=%p expandSize=%ld remaining=%ld\n", m_currentAllocationPoint, expandSize, m_remainingAllocationSize);
|
| +#endif
|
| + m_expandMissIgnored++;
|
| + } else {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + fprintf(stderr, "expandObject miss: current=%p expandSize=%ld remaining=%ld\n", m_currentAllocationPoint, expandSize, m_remainingAllocationSize);
|
| +#endif
|
| + m_expandMiss++;
|
| + }
|
| + }
|
| return false;
|
| }
|
|
|
| @@ -804,6 +925,10 @@ bool NormalPageHeap::shrinkObject(HeapObjectHeader* header, size_t newSize)
|
| ASSERT(header->size() > allocationSize);
|
| size_t shrinkSize = header->size() - allocationSize;
|
| if (header->payloadEnd() == m_currentAllocationPoint) {
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + if (!threadState()->attached())
|
| + fprintf(stderr, "shrinkObject hit: current=%p shrinkSize=%ld\n", m_currentAllocationPoint, shrinkSize);
|
| +#endif
|
| m_currentAllocationPoint -= shrinkSize;
|
| m_remainingAllocationSize += shrinkSize;
|
| SET_MEMORY_INACCESSIBLE(m_currentAllocationPoint, shrinkSize);
|
| @@ -819,6 +944,10 @@ bool NormalPageHeap::shrinkObject(HeapObjectHeader* header, size_t newSize)
|
| m_promptlyFreedSize += shrinkSize;
|
| header->setSize(allocationSize);
|
| SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader), shrinkSize - sizeof(HeapObjectHeader));
|
| +#if BUFFER_ALLOCATOR_DEBUG
|
| + if (!threadState()->attached())
|
| + fprintf(stderr, "shrinkObject miss: shrinkAddress=%p shrinkSize=%ld\n", shrinkAddress, shrinkSize);
|
| +#endif
|
| return false;
|
| }
|
|
|
| @@ -852,7 +981,8 @@ Address NormalPageHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex
|
| void NormalPageHeap::updateRemainingAllocationSize()
|
| {
|
| if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
|
| - Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remainingAllocationSize());
|
| + if (threadState()->attached())
|
| + Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remainingAllocationSize());
|
| m_lastRemainingAllocationSize = remainingAllocationSize();
|
| }
|
| ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
|
| @@ -923,11 +1053,13 @@ Address NormalPageHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIn
|
| return result;
|
| }
|
|
|
| - // 6. Complete sweeping.
|
| - threadState()->completeSweep();
|
| + if (threadState()->attached()) {
|
| + // 6. Complete sweeping.
|
| + threadState()->completeSweep();
|
|
|
| - // 7. Check if we should trigger a GC.
|
| - threadState()->scheduleGCIfNeeded();
|
| + // 7. Check if we should trigger a GC.
|
| + threadState()->scheduleGCIfNeeded();
|
| + }
|
|
|
| // 8. Add a new page to this heap.
|
| allocatePage();
|
| @@ -986,12 +1118,14 @@ Address LargeObjectHeap::allocateLargeObjectPage(size_t allocationSize, size_t g
|
| if (result)
|
| return result;
|
|
|
| - // 2. If we have failed in sweeping allocationSize bytes,
|
| - // we complete sweeping before allocating this large object.
|
| - threadState()->completeSweep();
|
| + if (threadState()->attached()) {
|
| + // 2. If we have failed in sweeping allocationSize bytes,
|
| + // we complete sweeping before allocating this large object.
|
| + threadState()->completeSweep();
|
|
|
| - // 3. Check if we should trigger a GC.
|
| - threadState()->scheduleGCIfNeeded();
|
| + // 3. Check if we should trigger a GC.
|
| + threadState()->scheduleGCIfNeeded();
|
| + }
|
|
|
| return doAllocateLargeObjectPage(allocationSize, gcInfoIndex);
|
| }
|
| @@ -1018,7 +1152,7 @@ Address LargeObjectHeap::doAllocateLargeObjectPage(size_t allocationSize, size_t
|
| HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex);
|
| Address result = headerAddress + sizeof(*header);
|
| ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
|
| - LargeObjectPage* largeObject = new (largeObjectAddress) LargeObjectPage(pageMemory, this, allocationSize);
|
| + LargeObjectPage* largeObject = new (NotNull, largeObjectAddress) LargeObjectPage(pageMemory, this, allocationSize);
|
| ASSERT(header->checkHeader());
|
|
|
| // Poison the object header and allocationGranularity bytes after the object
|
| @@ -1027,16 +1161,20 @@ Address LargeObjectHeap::doAllocateLargeObjectPage(size_t allocationSize, size_t
|
|
|
| largeObject->link(&m_firstPage);
|
|
|
| - Heap::increaseAllocatedSpace(largeObject->size());
|
| - Heap::increaseAllocatedObjectSize(largeObject->size());
|
| + if (threadState()->attached()) {
|
| + Heap::increaseAllocatedSpace(largeObject->size());
|
| + Heap::increaseAllocatedObjectSize(largeObject->size());
|
| + }
|
| return result;
|
| }
|
|
|
| void LargeObjectHeap::freeLargeObjectPage(LargeObjectPage* object)
|
| {
|
| ASAN_UNPOISON_MEMORY_REGION(object->payload(), object->payloadSize());
|
| - object->heapObjectHeader()->finalize(object->payload(), object->payloadSize());
|
| - Heap::decreaseAllocatedSpace(object->size());
|
| + if (threadState()->attached()) {
|
| + object->heapObjectHeader()->finalize(object->payload(), object->payloadSize());
|
| + Heap::decreaseAllocatedSpace(object->size());
|
| + }
|
|
|
| // Unpoison the object header and allocationGranularity bytes after the
|
| // object before freeing.
|
| @@ -1293,6 +1431,7 @@ void NormalPage::removeFromHeap()
|
|
|
| void NormalPage::sweep()
|
| {
|
| + ASSERT(heap()->threadState()->attached());
|
| clearObjectStartBitMap();
|
|
|
| size_t markedObjectSize = 0;
|
| @@ -1375,6 +1514,7 @@ void NormalPage::makeConsistentForGC()
|
|
|
| void NormalPage::makeConsistentForMutator()
|
| {
|
| + ASSERT(heap()->threadState()->attached());
|
| Address startOfGap = payload();
|
| for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
|
| HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| @@ -2417,7 +2557,9 @@ BasePage* Heap::lookup(Address address)
|
| return nullptr;
|
| if (PageMemoryRegion* region = s_regionTree->lookup(address)) {
|
| BasePage* page = region->pageFromAddress(address);
|
| - return page && !page->orphaned() ? page : nullptr;
|
| + if (page && !page->orphaned() && page->heap()->threadState()->attached())
|
| + return page;
|
| + return nullptr;
|
| }
|
| return nullptr;
|
| }
|
|
|