| Index: third_party/WebKit/Source/platform/heap/HeapPage.cpp
|
| diff --git a/third_party/WebKit/Source/platform/heap/HeapPage.cpp b/third_party/WebKit/Source/platform/heap/HeapPage.cpp
|
| index 686365454619cbc1cb638b324e2bc129aef302a3..e0e2cb6b51b7197be6da320ae710785cad4594d4 100644
|
| --- a/third_party/WebKit/Source/platform/heap/HeapPage.cpp
|
| +++ b/third_party/WebKit/Source/platform/heap/HeapPage.cpp
|
| @@ -62,7 +62,7 @@
|
| #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize) \
|
| do { \
|
| BasePage* page = pageFromObject(object); \
|
| - ASSERT(page); \
|
| + DCHECK(page); \
|
| bool isContainer = \
|
| ThreadState::isVectorArenaIndex(page->arena()->arenaIndex()); \
|
| if (!isContainer && page->isLargeObjectPage()) \
|
| @@ -78,7 +78,7 @@
|
| #define ASAN_MARK_LARGE_VECTOR_CONTAINER(arena, largeObject) \
|
| if (ThreadState::isVectorArenaIndex(arena->arenaIndex())) { \
|
| BasePage* largePage = pageFromObject(largeObject); \
|
| - ASSERT(largePage->isLargeObjectPage()); \
|
| + DCHECK(largePage->isLargeObjectPage()); \
|
| static_cast<LargeObjectPage*>(largePage)->setIsVectorBackingPage(); \
|
| }
|
| #else
|
| @@ -89,10 +89,10 @@
|
|
|
| namespace blink {
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| NO_SANITIZE_ADDRESS
|
| void HeapObjectHeader::zapMagic() {
|
| - ASSERT(checkHeader());
|
| + DCHECK(checkHeader());
|
| m_magic = zappedMagic;
|
| }
|
| #endif
|
| @@ -113,14 +113,14 @@ BaseArena::BaseArena(ThreadState* state, int index)
|
| m_index(index) {}
|
|
|
| BaseArena::~BaseArena() {
|
| - ASSERT(!m_firstPage);
|
| - ASSERT(!m_firstUnsweptPage);
|
| + DCHECK(!m_firstPage);
|
| + DCHECK(!m_firstUnsweptPage);
|
| }
|
|
|
| void BaseArena::cleanupPages() {
|
| clearFreeLists();
|
|
|
| - ASSERT(!m_firstUnsweptPage);
|
| + DCHECK(!m_firstUnsweptPage);
|
| // Add the BaseArena's pages to the orphanedPagePool.
|
| for (BasePage* page = m_firstPage; page; page = page->next()) {
|
| getThreadState()->heap().heapStats().decreaseAllocatedSpace(page->size());
|
| @@ -158,7 +158,7 @@ void BaseArena::takeSnapshot(const String& dumpBaseName,
|
| allocatorDump->AddScalar("free_count", "objects", heapInfo.freeCount);
|
| }
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| BasePage* BaseArena::findPageFromAddress(Address address) {
|
| for (BasePage* page = m_firstPage; page; page = page->next()) {
|
| if (page->contains(address))
|
| @@ -174,7 +174,7 @@ BasePage* BaseArena::findPageFromAddress(Address address) {
|
|
|
| void BaseArena::makeConsistentForGC() {
|
| clearFreeLists();
|
| - ASSERT(isConsistentForGC());
|
| + DCHECK(isConsistentForGC());
|
| for (BasePage* page = m_firstPage; page; page = page->next()) {
|
| page->markAsUnswept();
|
| page->invalidateObjectStartBitmap();
|
| @@ -191,16 +191,16 @@ void BaseArena::makeConsistentForGC() {
|
| for (BasePage *page = m_firstUnsweptPage; page;
|
| previousPage = page, page = page->next()) {
|
| page->makeConsistentForGC();
|
| - ASSERT(!page->hasBeenSwept());
|
| + DCHECK(!page->hasBeenSwept());
|
| page->invalidateObjectStartBitmap();
|
| }
|
| if (previousPage) {
|
| - ASSERT(m_firstUnsweptPage);
|
| + DCHECK(m_firstUnsweptPage);
|
| previousPage->m_next = m_firstPage;
|
| m_firstPage = m_firstUnsweptPage;
|
| m_firstUnsweptPage = nullptr;
|
| }
|
| - ASSERT(!m_firstUnsweptPage);
|
| + DCHECK(!m_firstUnsweptPage);
|
|
|
| HeapCompact* heapCompactor = getThreadState()->heap().compaction();
|
| if (!heapCompactor->isCompactingArena(arenaIndex()))
|
| @@ -216,8 +216,8 @@ void BaseArena::makeConsistentForGC() {
|
|
|
| void BaseArena::makeConsistentForMutator() {
|
| clearFreeLists();
|
| - ASSERT(isConsistentForGC());
|
| - ASSERT(!m_firstPage);
|
| + DCHECK(isConsistentForGC());
|
| + DCHECK(!m_firstPage);
|
|
|
| // Drop marks from marked objects and rebuild free lists in preparation for
|
| // resuming the executions of mutators.
|
| @@ -229,17 +229,17 @@ void BaseArena::makeConsistentForMutator() {
|
| page->invalidateObjectStartBitmap();
|
| }
|
| if (previousPage) {
|
| - ASSERT(m_firstUnsweptPage);
|
| + DCHECK(m_firstUnsweptPage);
|
| previousPage->m_next = m_firstPage;
|
| m_firstPage = m_firstUnsweptPage;
|
| m_firstUnsweptPage = nullptr;
|
| }
|
| - ASSERT(!m_firstUnsweptPage);
|
| + DCHECK(!m_firstUnsweptPage);
|
| }
|
|
|
| size_t BaseArena::objectPayloadSizeForTesting() {
|
| - ASSERT(isConsistentForGC());
|
| - ASSERT(!m_firstUnsweptPage);
|
| + DCHECK(isConsistentForGC());
|
| + DCHECK(!m_firstUnsweptPage);
|
|
|
| size_t objectPayloadSize = 0;
|
| for (BasePage* page = m_firstPage; page; page = page->next())
|
| @@ -248,15 +248,15 @@ size_t BaseArena::objectPayloadSizeForTesting() {
|
| }
|
|
|
| void BaseArena::prepareHeapForTermination() {
|
| - ASSERT(!m_firstUnsweptPage);
|
| + DCHECK(!m_firstUnsweptPage);
|
| for (BasePage* page = m_firstPage; page; page = page->next()) {
|
| page->setTerminating();
|
| }
|
| }
|
|
|
| void BaseArena::prepareForSweep() {
|
| - ASSERT(getThreadState()->isInGC());
|
| - ASSERT(!m_firstUnsweptPage);
|
| + DCHECK(getThreadState()->isInGC());
|
| + DCHECK(!m_firstUnsweptPage);
|
|
|
| // Move all pages to a list of unswept pages.
|
| m_firstUnsweptPage = m_firstPage;
|
| @@ -275,7 +275,7 @@ Address BaseArena::lazySweep(size_t allocationSize, size_t gcInfoIndex) {
|
| if (!m_firstUnsweptPage)
|
| return nullptr;
|
|
|
| - RELEASE_ASSERT(getThreadState()->isSweepingInProgress());
|
| + CHECK(getThreadState()->isSweepingInProgress());
|
|
|
| // lazySweepPages() can be called recursively if finalizers invoked in
|
| // page->sweep() allocate memory and the allocation triggers
|
| @@ -318,9 +318,9 @@ bool BaseArena::lazySweepWithDeadline(double deadlineSeconds) {
|
| // pages.
|
| static const int deadlineCheckInterval = 10;
|
|
|
| - RELEASE_ASSERT(getThreadState()->isSweepingInProgress());
|
| - ASSERT(getThreadState()->sweepForbidden());
|
| - ASSERT(!getThreadState()->isMainThread() ||
|
| + CHECK(getThreadState()->isSweepingInProgress());
|
| + DCHECK(getThreadState()->sweepForbidden());
|
| + DCHECK(!getThreadState()->isMainThread() ||
|
| ScriptForbiddenScope::isScriptForbidden());
|
|
|
| NormalPageArena* normalArena = nullptr;
|
| @@ -351,9 +351,9 @@ bool BaseArena::lazySweepWithDeadline(double deadlineSeconds) {
|
| }
|
|
|
| void BaseArena::completeSweep() {
|
| - RELEASE_ASSERT(getThreadState()->isSweepingInProgress());
|
| - ASSERT(getThreadState()->sweepForbidden());
|
| - ASSERT(!getThreadState()->isMainThread() ||
|
| + CHECK(getThreadState()->isSweepingInProgress());
|
| + DCHECK(getThreadState()->sweepForbidden());
|
| + DCHECK(!getThreadState()->isMainThread() ||
|
| ScriptForbiddenScope::isScriptForbidden());
|
|
|
| while (m_firstUnsweptPage) {
|
| @@ -427,7 +427,7 @@ bool BaseArena::willObjectBeLazilySwept(BasePage* page,
|
| if (!header->isFree() && header->isMarked()) {
|
| // There must be a marked object on this page and the one located must
|
| // have room after it for the unmarked |objectPointer| object.
|
| - DCHECK(headerAddress + size < pageEnd);
|
| + DCHECK_LT(headerAddress + size, pageEnd);
|
| return true;
|
| }
|
| headerAddress += size;
|
| @@ -562,7 +562,7 @@ void NormalPageArena::sweepAndCompact() {
|
| freedPageCount++;
|
| BasePage* nextPage;
|
| availablePages->unlink(&nextPage);
|
| -#if !(ENABLE(ASSERT) || defined(LEAK_SANITIZER) || \
|
| +#if !(DCHECK_IS_ON() || defined(LEAK_SANITIZER) || \
|
| defined(ADDRESS_SANITIZER) || defined(MEMORY_SANITIZER))
|
| // Clear out the page before adding it to the free page pool, which
|
| // decommits it. Recommitting the page must find a zeroed page later.
|
| @@ -583,7 +583,7 @@ void NormalPageArena::sweepAndCompact() {
|
| heap.compaction()->finishedArenaCompaction(this, freedPageCount, freedSize);
|
| }
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| bool NormalPageArena::isConsistentForGC() {
|
| // A thread heap is consistent for sweeping if none of the pages to be swept
|
| // contain a freelist block or the current allocation point.
|
| @@ -646,10 +646,10 @@ void NormalPageArena::allocatePage() {
|
| // gets a page and add the rest to the page pool.
|
| if (!pageMemory) {
|
| bool result = memory->commit();
|
| - // If you hit the ASSERT, it will mean that you're hitting
|
| + // If you hit the DCHECK, it will mean that you're hitting
|
| // the limit of the number of mmapped regions OS can support
|
| // (e.g., /proc/sys/vm/max_map_count in Linux).
|
| - RELEASE_ASSERT(result);
|
| + CHECK(result);
|
| pageMemory = memory;
|
| } else {
|
| getThreadState()->heap().getFreePagePool()->addFreePage(arenaIndex(),
|
| @@ -662,7 +662,7 @@ void NormalPageArena::allocatePage() {
|
| page->link(&m_firstPage);
|
|
|
| getThreadState()->heap().heapStats().increaseAllocatedSpace(page->size());
|
| -#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
|
| +#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
|
| // Allow the following addToFreeList() to add the newly allocated memory
|
| // to the free list.
|
| ASAN_UNPOISON_MEMORY_REGION(page->payload(), page->payloadSize());
|
| @@ -709,7 +709,7 @@ bool NormalPageArena::coalesce() {
|
| if (getThreadState()->sweepForbidden())
|
| return false;
|
|
|
| - ASSERT(!hasCurrentAllocationArea());
|
| + DCHECK(!hasCurrentAllocationArea());
|
| TRACE_EVENT0("blink_gc", "BaseArena::coalesce");
|
|
|
| // Rebuild free lists.
|
| @@ -723,11 +723,11 @@ bool NormalPageArena::coalesce() {
|
| HeapObjectHeader* header =
|
| reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| size_t size = header->size();
|
| - ASSERT(size > 0);
|
| - ASSERT(size < blinkPagePayloadSize());
|
| + DCHECK_GT(size, 0UL);
|
| + DCHECK_LT(size, blinkPagePayloadSize());
|
|
|
| if (header->isPromptlyFreed()) {
|
| - ASSERT(size >= sizeof(HeapObjectHeader));
|
| + DCHECK_GE(size, sizeof(HeapObjectHeader));
|
| // Zero the memory in the free list header to maintain the
|
| // invariant that memory on the free list is zero filled.
|
| // The rest of the memory is already on the free list and is
|
| @@ -750,7 +750,7 @@ bool NormalPageArena::coalesce() {
|
| headerAddress += size;
|
| continue;
|
| }
|
| - ASSERT(header->checkHeader());
|
| + DCHECK(header->checkHeader());
|
| if (startOfGap != headerAddress)
|
| addToFreeList(startOfGap, headerAddress - startOfGap);
|
|
|
| @@ -762,20 +762,20 @@ bool NormalPageArena::coalesce() {
|
| addToFreeList(startOfGap, page->payloadEnd() - startOfGap);
|
| }
|
| getThreadState()->decreaseAllocatedObjectSize(freedSize);
|
| - ASSERT(m_promptlyFreedSize == freedSize);
|
| + DCHECK_EQ(m_promptlyFreedSize, freedSize);
|
| m_promptlyFreedSize = 0;
|
| return true;
|
| }
|
|
|
| void NormalPageArena::promptlyFreeObject(HeapObjectHeader* header) {
|
| - ASSERT(!getThreadState()->sweepForbidden());
|
| - ASSERT(header->checkHeader());
|
| + DCHECK(!getThreadState()->sweepForbidden());
|
| + DCHECK(header->checkHeader());
|
| Address address = reinterpret_cast<Address>(header);
|
| Address payload = header->payload();
|
| size_t size = header->size();
|
| size_t payloadSize = header->payloadSize();
|
| - ASSERT(size > 0);
|
| - ASSERT(pageFromObject(address) == findPageFromAddress(address));
|
| + DCHECK_GT(size, 0UL);
|
| + DCHECK_EQ(pageFromObject(address), findPageFromAddress(address));
|
|
|
| {
|
| ThreadState::SweepForbiddenScope forbiddenScope(getThreadState());
|
| @@ -797,31 +797,31 @@ bool NormalPageArena::expandObject(HeapObjectHeader* header, size_t newSize) {
|
| // It's possible that Vector requests a smaller expanded size because
|
| // Vector::shrinkCapacity can set a capacity smaller than the actual payload
|
| // size.
|
| - ASSERT(header->checkHeader());
|
| + DCHECK(header->checkHeader());
|
| if (header->payloadSize() >= newSize)
|
| return true;
|
| size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize);
|
| - ASSERT(allocationSize > header->size());
|
| + DCHECK_GT(allocationSize, header->size());
|
| size_t expandSize = allocationSize - header->size();
|
| if (isObjectAllocatedAtAllocationPoint(header) &&
|
| expandSize <= m_remainingAllocationSize) {
|
| m_currentAllocationPoint += expandSize;
|
| - ASSERT(m_remainingAllocationSize >= expandSize);
|
| + DCHECK_GE(m_remainingAllocationSize, expandSize);
|
| setRemainingAllocationSize(m_remainingAllocationSize - expandSize);
|
| // Unpoison the memory used for the object (payload).
|
| SET_MEMORY_ACCESSIBLE(header->payloadEnd(), expandSize);
|
| header->setSize(allocationSize);
|
| - ASSERT(findPageFromAddress(header->payloadEnd() - 1));
|
| + DCHECK(findPageFromAddress(header->payloadEnd() - 1));
|
| return true;
|
| }
|
| return false;
|
| }
|
|
|
| bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) {
|
| - ASSERT(header->checkHeader());
|
| - ASSERT(header->payloadSize() > newSize);
|
| + DCHECK(header->checkHeader());
|
| + DCHECK_GT(header->payloadSize(), newSize);
|
| size_t allocationSize = ThreadHeap::allocationSizeFromSize(newSize);
|
| - ASSERT(header->size() > allocationSize);
|
| + DCHECK_GT(header->size(), allocationSize);
|
| size_t shrinkSize = header->size() - allocationSize;
|
| if (isObjectAllocatedAtAllocationPoint(header)) {
|
| m_currentAllocationPoint -= shrinkSize;
|
| @@ -830,14 +830,14 @@ bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) {
|
| header->setSize(allocationSize);
|
| return true;
|
| }
|
| - ASSERT(shrinkSize >= sizeof(HeapObjectHeader));
|
| - ASSERT(header->gcInfoIndex() > 0);
|
| + DCHECK_GE(shrinkSize, sizeof(HeapObjectHeader));
|
| + DCHECK_GT(header->gcInfoIndex(), 0UL);
|
| Address shrinkAddress = header->payloadEnd() - shrinkSize;
|
| HeapObjectHeader* freedHeader = new (NotNull, shrinkAddress)
|
| HeapObjectHeader(shrinkSize, header->gcInfoIndex());
|
| freedHeader->markPromptlyFreed();
|
| - ASSERT(pageFromObject(reinterpret_cast<Address>(header)) ==
|
| - findPageFromAddress(reinterpret_cast<Address>(header)));
|
| + DCHECK_EQ(pageFromObject(reinterpret_cast<Address>(header)),
|
| + findPageFromAddress(reinterpret_cast<Address>(header)));
|
| m_promptlyFreedSize += shrinkSize;
|
| header->setSize(allocationSize);
|
| SET_MEMORY_INACCESSIBLE(shrinkAddress + sizeof(HeapObjectHeader),
|
| @@ -847,7 +847,7 @@ bool NormalPageArena::shrinkObject(HeapObjectHeader* header, size_t newSize) {
|
|
|
| Address NormalPageArena::lazySweepPages(size_t allocationSize,
|
| size_t gcInfoIndex) {
|
| - ASSERT(!hasCurrentAllocationArea());
|
| + DCHECK(!hasCurrentAllocationArea());
|
| AutoReset<bool> isLazySweeping(&m_isLazySweeping, true);
|
| Address result = nullptr;
|
| while (m_firstUnsweptPage) {
|
| @@ -896,16 +896,16 @@ void NormalPageArena::updateRemainingAllocationSize() {
|
| m_lastRemainingAllocationSize - remainingAllocationSize());
|
| m_lastRemainingAllocationSize = remainingAllocationSize();
|
| }
|
| - ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
|
| + DCHECK_EQ(m_lastRemainingAllocationSize, remainingAllocationSize());
|
| }
|
|
|
| void NormalPageArena::setAllocationPoint(Address point, size_t size) {
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| if (point) {
|
| - ASSERT(size);
|
| + DCHECK(size);
|
| BasePage* page = pageFromObject(point);
|
| - ASSERT(!page->isLargeObjectPage());
|
| - ASSERT(size <= static_cast<NormalPage*>(page)->payloadSize());
|
| + DCHECK(!page->isLargeObjectPage());
|
| + DCHECK_LE(size, static_cast<NormalPage*>(page)->payloadSize());
|
| }
|
| #endif
|
| if (hasCurrentAllocationArea()) {
|
| @@ -918,8 +918,8 @@ void NormalPageArena::setAllocationPoint(Address point, size_t size) {
|
|
|
| Address NormalPageArena::outOfLineAllocate(size_t allocationSize,
|
| size_t gcInfoIndex) {
|
| - ASSERT(allocationSize > remainingAllocationSize());
|
| - ASSERT(allocationSize >= allocationGranularity);
|
| + DCHECK_GT(allocationSize, remainingAllocationSize());
|
| + DCHECK_GE(allocationSize, allocationGranularity);
|
|
|
| // 1. If this allocation is big enough, allocate a large object.
|
| if (allocationSize >= largeObjectSizeThreshold)
|
| @@ -959,7 +959,7 @@ Address NormalPageArena::outOfLineAllocate(size_t allocationSize,
|
|
|
| // 9. Try to allocate from a free list. This allocation must succeed.
|
| result = allocateFromFreeList(allocationSize, gcInfoIndex);
|
| - RELEASE_ASSERT(result);
|
| + CHECK(result);
|
| return result;
|
| }
|
|
|
| @@ -985,8 +985,8 @@ Address NormalPageArena::allocateFromFreeList(size_t allocationSize,
|
| if (entry) {
|
| entry->unlink(&m_freeList.m_freeLists[index]);
|
| setAllocationPoint(entry->getAddress(), entry->size());
|
| - ASSERT(hasCurrentAllocationArea());
|
| - ASSERT(remainingAllocationSize() >= allocationSize);
|
| + DCHECK(hasCurrentAllocationArea());
|
| + DCHECK_GE(remainingAllocationSize(), allocationSize);
|
| m_freeList.m_biggestFreeListIndex = index;
|
| return allocateObject(allocationSize, gcInfoIndex);
|
| }
|
| @@ -1002,7 +1002,7 @@ Address LargeObjectArena::allocateLargeObjectPage(size_t allocationSize,
|
| size_t gcInfoIndex) {
|
| // Caller already added space for object header and rounded up to allocation
|
| // alignment
|
| - ASSERT(!(allocationSize & allocationMask));
|
| + DCHECK(!(allocationSize & allocationMask));
|
|
|
| // 1. Try to sweep large objects more than allocationSize bytes
|
| // before allocating a new large object.
|
| @@ -1035,19 +1035,19 @@ Address LargeObjectArena::doAllocateLargeObjectPage(size_t allocationSize,
|
| Address largeObjectAddress = pageMemory->writableStart();
|
| Address headerAddress =
|
| largeObjectAddress + LargeObjectPage::pageHeaderSize();
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| // Verify that the allocated PageMemory is expectedly zeroed.
|
| for (size_t i = 0; i < largeObjectSize; ++i)
|
| - ASSERT(!largeObjectAddress[i]);
|
| + DCHECK(!largeObjectAddress[i]);
|
| #endif
|
| - ASSERT(gcInfoIndex > 0);
|
| + DCHECK_GT(gcInfoIndex, 0UL);
|
| HeapObjectHeader* header = new (NotNull, headerAddress)
|
| HeapObjectHeader(largeObjectSizeInHeader, gcInfoIndex);
|
| Address result = headerAddress + sizeof(*header);
|
| - ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
|
| + DCHECK(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
|
| LargeObjectPage* largeObject = new (largeObjectAddress)
|
| LargeObjectPage(pageMemory, this, allocationSize);
|
| - ASSERT(header->checkHeader());
|
| + DCHECK(header->checkHeader());
|
|
|
| // Poison the object header and allocationGranularity bytes after the object
|
| ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
|
| @@ -1076,7 +1076,7 @@ void LargeObjectArena::freeLargeObjectPage(LargeObjectPage* object) {
|
| allocationGranularity);
|
|
|
| if (object->terminating()) {
|
| - ASSERT(ThreadState::current()->isTerminating());
|
| + DCHECK(ThreadState::current()->isTerminating());
|
| // The thread is shutting down and this page is being removed as a part
|
| // of the thread local GC. In that case the object could be traced in
|
| // the next global GC if there is a dangling pointer from a live thread
|
| @@ -1088,7 +1088,7 @@ void LargeObjectArena::freeLargeObjectPage(LargeObjectPage* object) {
|
| getThreadState()->heap().getOrphanedPagePool()->addOrphanedPage(
|
| arenaIndex(), object);
|
| } else {
|
| - ASSERT(!ThreadState::current()->isTerminating());
|
| + DCHECK(!ThreadState::current()->isTerminating());
|
| PageMemory* memory = object->storage();
|
| object->~LargeObjectPage();
|
| delete memory;
|
| @@ -1110,7 +1110,7 @@ Address LargeObjectArena::lazySweepPages(size_t allocationSize,
|
| // more than allocationSize bytes.
|
| if (sweptSize >= allocationSize) {
|
| result = doAllocateLargeObjectPage(allocationSize, gcInfoIndex);
|
| - ASSERT(result);
|
| + DCHECK(result);
|
| break;
|
| }
|
| } else {
|
| @@ -1128,17 +1128,17 @@ Address LargeObjectArena::lazySweepPages(size_t allocationSize,
|
| FreeList::FreeList() : m_biggestFreeListIndex(0) {}
|
|
|
| void FreeList::addToFreeList(Address address, size_t size) {
|
| - ASSERT(size < blinkPagePayloadSize());
|
| + DCHECK_LT(size, blinkPagePayloadSize());
|
| // The free list entries are only pointer aligned (but when we allocate
|
| // from them we are 8 byte aligned due to the header size).
|
| - ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) &
|
| + DCHECK(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) &
|
| allocationMask));
|
| - ASSERT(!(size & allocationMask));
|
| + DCHECK(!(size & allocationMask));
|
| ASAN_UNPOISON_MEMORY_REGION(address, size);
|
| FreeListEntry* entry;
|
| if (size < sizeof(*entry)) {
|
| // Create a dummy header with only a size and freelist bit set.
|
| - ASSERT(size >= sizeof(HeapObjectHeader));
|
| + DCHECK_GE(size, sizeof(HeapObjectHeader));
|
| // Free list encode the size to mark the lost memory as freelist memory.
|
| new (NotNull, address) HeapObjectHeader(size, gcInfoIndexForFreeListHeader);
|
|
|
| @@ -1148,7 +1148,7 @@ void FreeList::addToFreeList(Address address, size_t size) {
|
| }
|
| entry = new (NotNull, address) FreeListEntry(size);
|
|
|
| -#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
|
| +#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
|
| // The following logic delays reusing free lists for (at least) one GC
|
| // cycle or coalescing. This is helpful to detect use-after-free errors
|
| // that could be caused by lazy sweeping etc.
|
| @@ -1160,7 +1160,7 @@ void FreeList::addToFreeList(Address address, size_t size) {
|
| else if (address[i] == reuseForbiddenZapValue)
|
| forbiddenCount++;
|
| else
|
| - ASSERT_NOT_REACHED();
|
| + NOTREACHED();
|
| }
|
| size_t entryCount = size - sizeof(FreeListEntry);
|
| if (forbiddenCount == entryCount) {
|
| @@ -1198,7 +1198,7 @@ void FreeList::addToFreeList(Address address, size_t size) {
|
| m_biggestFreeListIndex = index;
|
| }
|
|
|
| -#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
|
| +#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
|
| defined(MEMORY_SANITIZER)
|
| NO_SANITIZE_ADDRESS
|
| NO_SANITIZE_MEMORY
|
| @@ -1213,7 +1213,7 @@ void NEVER_INLINE FreeList::zapFreedMemory(Address address, size_t size) {
|
| void NEVER_INLINE FreeList::checkFreedMemoryIsZapped(Address address,
|
| size_t size) {
|
| for (size_t i = 0; i < size; i++) {
|
| - ASSERT(address[i] == reuseAllowedZapValue ||
|
| + DCHECK(address[i] == reuseAllowedZapValue ||
|
| address[i] == reuseForbiddenZapValue);
|
| }
|
| }
|
| @@ -1257,7 +1257,7 @@ void FreeList::clear() {
|
| }
|
|
|
| int FreeList::bucketIndexForSize(size_t size) {
|
| - ASSERT(size > 0);
|
| + DCHECK_GT(size, 0UL);
|
| int index = -1;
|
| while (size) {
|
| size >>= 1;
|
| @@ -1295,7 +1295,7 @@ BasePage::BasePage(PageMemory* storage, BaseArena* arena)
|
| m_next(nullptr),
|
| m_terminating(false),
|
| m_swept(true) {
|
| - ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
|
| + DCHECK(isPageHeaderAddress(reinterpret_cast<Address>(this)));
|
| }
|
|
|
| void BasePage::markOrphaned() {
|
| @@ -1308,24 +1308,24 @@ void BasePage::markOrphaned() {
|
|
|
| NormalPage::NormalPage(PageMemory* storage, BaseArena* arena)
|
| : BasePage(storage, arena), m_objectStartBitMapComputed(false) {
|
| - ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
|
| + DCHECK(isPageHeaderAddress(reinterpret_cast<Address>(this)));
|
| }
|
|
|
| size_t NormalPage::objectPayloadSizeForTesting() {
|
| size_t objectPayloadSize = 0;
|
| Address headerAddress = payload();
|
| markAsSwept();
|
| - ASSERT(headerAddress != payloadEnd());
|
| + DCHECK_NE(headerAddress, payloadEnd());
|
| do {
|
| HeapObjectHeader* header =
|
| reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| if (!header->isFree()) {
|
| - ASSERT(header->checkHeader());
|
| + DCHECK(header->checkHeader());
|
| objectPayloadSize += header->payloadSize();
|
| }
|
| - ASSERT(header->size() < blinkPagePayloadSize());
|
| + DCHECK_LT(header->size(), blinkPagePayloadSize());
|
| headerAddress += header->size();
|
| - ASSERT(headerAddress <= payloadEnd());
|
| + DCHECK_LE(headerAddress, payloadEnd());
|
| } while (headerAddress < payloadEnd());
|
| return objectPayloadSize;
|
| }
|
| @@ -1339,7 +1339,7 @@ void NormalPage::removeFromHeap() {
|
| arenaForNormalPage()->freePage(this);
|
| }
|
|
|
| -#if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
|
| +#if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
|
| static void discardPages(Address begin, Address end) {
|
| uintptr_t beginAddress =
|
| WTF::roundUpToSystemPage(reinterpret_cast<uintptr_t>(begin));
|
| @@ -1359,8 +1359,8 @@ void NormalPage::sweep() {
|
| HeapObjectHeader* header =
|
| reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| size_t size = header->size();
|
| - ASSERT(size > 0);
|
| - ASSERT(size < blinkPagePayloadSize());
|
| + DCHECK_GT(size, 0UL);
|
| + DCHECK_LT(size, blinkPagePayloadSize());
|
|
|
| if (header->isPromptlyFreed())
|
| pageArena->decreasePromptlyFreedSize(size);
|
| @@ -1395,7 +1395,7 @@ void NormalPage::sweep() {
|
| }
|
| if (startOfGap != headerAddress) {
|
| pageArena->addToFreeList(startOfGap, headerAddress - startOfGap);
|
| -#if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
|
| +#if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
|
| // Discarding pages increases page faults and may regress performance.
|
| // So we enable this only on low-RAM devices.
|
| if (MemoryCoordinator::isLowEndDevice())
|
| @@ -1409,7 +1409,7 @@ void NormalPage::sweep() {
|
| }
|
| if (startOfGap != payloadEnd()) {
|
| pageArena->addToFreeList(startOfGap, payloadEnd() - startOfGap);
|
| -#if !ENABLE(ASSERT) && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
|
| +#if !DCHECK_IS_ON() && !defined(LEAK_SANITIZER) && !defined(ADDRESS_SANITIZER)
|
| if (MemoryCoordinator::isLowEndDevice())
|
| discardPages(startOfGap + sizeof(FreeListEntry), payloadEnd());
|
| #endif
|
| @@ -1459,7 +1459,7 @@ void NormalPage::sweepAndCompact(CompactionContext& context) {
|
| // As compaction is under way, leave the freed memory accessible
|
| // while compacting the rest of the page. We just zap the payload
|
| // to catch out other finalizers trying to access it.
|
| -#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
|
| +#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
|
| defined(MEMORY_SANITIZER)
|
| FreeList::zapFreedMemory(payload, payloadSize);
|
| #endif
|
| @@ -1509,12 +1509,12 @@ void NormalPage::sweepAndCompact(CompactionContext& context) {
|
| headerAddress += size;
|
| markedObjectSize += size;
|
| allocationPoint += size;
|
| - DCHECK(allocationPoint <= currentPage->payloadSize());
|
| + DCHECK_LE(allocationPoint, currentPage->payloadSize());
|
| }
|
| if (markedObjectSize)
|
| pageArena->getThreadState()->increaseMarkedObjectSize(markedObjectSize);
|
|
|
| -#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
|
| +#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \
|
| defined(MEMORY_SANITIZER)
|
| // Zap the unused portion, until it is either compacted into or freed.
|
| if (currentPage != this) {
|
| @@ -1531,7 +1531,7 @@ void NormalPage::makeConsistentForGC() {
|
| for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
|
| HeapObjectHeader* header =
|
| reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| - ASSERT(header->size() < blinkPagePayloadSize());
|
| + DCHECK_LT(header->size(), blinkPagePayloadSize());
|
| // Check if a free list entry first since we cannot call
|
| // isMarked on a free list entry.
|
| if (header->isFree()) {
|
| @@ -1558,7 +1558,7 @@ void NormalPage::makeConsistentForMutator() {
|
| HeapObjectHeader* header =
|
| reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| size_t size = header->size();
|
| - ASSERT(size < blinkPagePayloadSize());
|
| + DCHECK_LT(size, blinkPagePayloadSize());
|
| if (header->isPromptlyFreed())
|
| arenaForNormalPage()->decreasePromptlyFreedSize(size);
|
| if (header->isFree()) {
|
| @@ -1579,7 +1579,7 @@ void NormalPage::makeConsistentForMutator() {
|
| header->unmark();
|
| headerAddress += size;
|
| startOfGap = headerAddress;
|
| - ASSERT(headerAddress <= payloadEnd());
|
| + DCHECK_LE(headerAddress, payloadEnd());
|
| }
|
| if (startOfGap != payloadEnd())
|
| normalArena->addToFreeList(startOfGap, payloadEnd() - startOfGap);
|
| @@ -1590,7 +1590,7 @@ void NormalPage::poisonUnmarkedObjects() {
|
| for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
|
| HeapObjectHeader* header =
|
| reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| - ASSERT(header->size() < blinkPagePayloadSize());
|
| + DCHECK_LT(header->size(), blinkPagePayloadSize());
|
| // Check if a free list entry first since we cannot call
|
| // isMarked on a free list entry.
|
| if (header->isFree()) {
|
| @@ -1611,13 +1611,13 @@ void NormalPage::populateObjectStartBitMap() {
|
| HeapObjectHeader* header =
|
| reinterpret_cast<HeapObjectHeader*>(headerAddress);
|
| size_t objectOffset = headerAddress - start;
|
| - ASSERT(!(objectOffset & allocationMask));
|
| + DCHECK(!(objectOffset & allocationMask));
|
| size_t objectStartNumber = objectOffset / allocationGranularity;
|
| size_t mapIndex = objectStartNumber / 8;
|
| - ASSERT(mapIndex < objectStartBitMapSize);
|
| + DCHECK_LT(mapIndex, objectStartBitMapSize);
|
| m_objectStartBitMap[mapIndex] |= (1 << (objectStartNumber & 7));
|
| headerAddress += header->size();
|
| - ASSERT(headerAddress <= payloadEnd());
|
| + DCHECK_LE(headerAddress, payloadEnd());
|
| }
|
| m_objectStartBitMapComputed = true;
|
| }
|
| @@ -1647,11 +1647,11 @@ HeapObjectHeader* NormalPage::findHeaderFromAddress(Address address) {
|
| size_t objectOffset = address - payload();
|
| size_t objectStartNumber = objectOffset / allocationGranularity;
|
| size_t mapIndex = objectStartNumber / 8;
|
| - ASSERT(mapIndex < objectStartBitMapSize);
|
| + DCHECK_LT(mapIndex, objectStartBitMapSize);
|
| size_t bit = objectStartNumber & 7;
|
| uint8_t byte = m_objectStartBitMap[mapIndex] & ((1 << (bit + 1)) - 1);
|
| while (!byte) {
|
| - ASSERT(mapIndex > 0);
|
| + DCHECK_GT(mapIndex, 0UL);
|
| byte = m_objectStartBitMap[--mapIndex];
|
| }
|
| int leadingZeroes = numberOfLeadingZeroes(byte);
|
| @@ -1661,11 +1661,11 @@ HeapObjectHeader* NormalPage::findHeaderFromAddress(Address address) {
|
| HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(objectAddress);
|
| if (header->isFree())
|
| return nullptr;
|
| - ASSERT(header->checkHeader());
|
| + DCHECK(header->checkHeader());
|
| return header;
|
| }
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| static bool isUninitializedMemory(void* objectPointer, size_t objectSize) {
|
| // Scan through the object's fields and check that they are all zero.
|
| Address* objectFields = reinterpret_cast<Address*>(objectPointer);
|
| @@ -1678,7 +1678,7 @@ static bool isUninitializedMemory(void* objectPointer, size_t objectSize) {
|
| #endif
|
|
|
| static void markPointer(Visitor* visitor, HeapObjectHeader* header) {
|
| - ASSERT(header->checkHeader());
|
| + DCHECK(header->checkHeader());
|
| const GCInfo* gcInfo = ThreadHeap::gcInfo(header->gcInfoIndex());
|
| if (gcInfo->hasVTable() && !vTableInitialized(header->payload())) {
|
| // We hit this branch when a GC strikes before GarbageCollected<>'s
|
| @@ -1693,14 +1693,14 @@ static void markPointer(Visitor* visitor, HeapObjectHeader* header) {
|
| // has not yet been initialized. In this case, we should mark the A
|
| // object without tracing any member of the A object.
|
| visitor->markHeaderNoTracing(header);
|
| - ASSERT(isUninitializedMemory(header->payload(), header->payloadSize()));
|
| + DCHECK(isUninitializedMemory(header->payload(), header->payloadSize()));
|
| } else {
|
| visitor->markHeader(header, gcInfo->m_trace);
|
| }
|
| }
|
|
|
| void NormalPage::checkAndMarkPointer(Visitor* visitor, Address address) {
|
| - ASSERT(contains(address));
|
| + DCHECK(contains(address));
|
| HeapObjectHeader* header = findHeaderFromAddress(address);
|
| if (!header || header->isDead())
|
| return;
|
| @@ -1763,11 +1763,11 @@ void NormalPage::takeSnapshot(base::trace_event::MemoryAllocatorDump* pageDump,
|
| heapInfo.freeCount += freeCount;
|
| }
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| bool NormalPage::contains(Address addr) {
|
| Address blinkPageStart = roundToBlinkPageStart(getAddress());
|
| // Page is at aligned address plus guard page size.
|
| - ASSERT(blinkPageStart == getAddress() - blinkGuardPageSize);
|
| + DCHECK_EQ(blinkPageStart, getAddress() - blinkGuardPageSize);
|
| return blinkPageStart <= addr && addr < blinkPageStart + blinkPageSize;
|
| }
|
| #endif
|
| @@ -1827,7 +1827,7 @@ void LargeObjectPage::poisonUnmarkedObjects() {
|
| #endif
|
|
|
| void LargeObjectPage::checkAndMarkPointer(Visitor* visitor, Address address) {
|
| - ASSERT(contains(address));
|
| + DCHECK(contains(address));
|
| if (!containedInObjectPayload(address) || heapObjectHeader()->isDead())
|
| return;
|
| markPointer(visitor, heapObjectHeader());
|
| @@ -1870,7 +1870,7 @@ void LargeObjectPage::takeSnapshot(
|
| pageDump->AddScalar("dead_size", "bytes", deadSize);
|
| }
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| bool LargeObjectPage::contains(Address object) {
|
| return roundToBlinkPageStart(getAddress()) <= object &&
|
| object < roundToBlinkPageEnd(getAddress() + size());
|
| @@ -1894,10 +1894,10 @@ size_t HeapDoesNotContainCache::hash(Address address) {
|
| }
|
|
|
| bool HeapDoesNotContainCache::lookup(Address address) {
|
| - ASSERT(ThreadState::current()->isInGC());
|
| + DCHECK(ThreadState::current()->isInGC());
|
|
|
| size_t index = hash(address);
|
| - ASSERT(!(index & 1));
|
| + DCHECK(!(index & 1));
|
| Address cachePage = roundToBlinkPageStart(address);
|
| if (m_entries[index] == cachePage)
|
| return m_entries[index];
|
| @@ -1907,11 +1907,11 @@ bool HeapDoesNotContainCache::lookup(Address address) {
|
| }
|
|
|
| void HeapDoesNotContainCache::addEntry(Address address) {
|
| - ASSERT(ThreadState::current()->isInGC());
|
| + DCHECK(ThreadState::current()->isInGC());
|
|
|
| m_hasEntries = true;
|
| size_t index = hash(address);
|
| - ASSERT(!(index & 1));
|
| + DCHECK(!(index & 1));
|
| Address cachePage = roundToBlinkPageStart(address);
|
| m_entries[index + 1] = m_entries[index];
|
| m_entries[index] = cachePage;
|
|
|