Index: third_party/WebKit/Source/platform/heap/HeapPage.h |
diff --git a/third_party/WebKit/Source/platform/heap/HeapPage.h b/third_party/WebKit/Source/platform/heap/HeapPage.h |
index 69d369dc9ef587180b79c1c9ae17260cbd4b794c..43983dfde446ba6babd65c9a5a2cc754558bdacb 100644 |
--- a/third_party/WebKit/Source/platform/heap/HeapPage.h |
+++ b/third_party/WebKit/Source/platform/heap/HeapPage.h |
@@ -103,7 +103,7 @@ const uint8_t reuseForbiddenZapValue = 0x2c; |
ASAN_UNPOISON_MEMORY_REGION(address, size); \ |
FreeList::checkFreedMemoryIsZapped(address, size); \ |
ASAN_POISON_MEMORY_REGION(address, size) |
-#elif ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
+#elif DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
#define SET_MEMORY_INACCESSIBLE(address, size) \ |
FreeList::zapFreedMemory(address, size); \ |
ASAN_POISON_MEMORY_REGION(address, size) |
@@ -124,7 +124,7 @@ const uint8_t reuseForbiddenZapValue = 0x2c; |
} while (false) |
#endif |
-#if !ENABLE(ASSERT) && CPU(64BIT) |
+#if !DCHECK_IS_ON() && CPU(64BIT) |
#define USE_4BYTE_HEADER_PADDING 1 |
#else |
#define USE_4BYTE_HEADER_PADDING 0 |
@@ -183,7 +183,7 @@ class PLATFORM_EXPORT HeapObjectHeader { |
// If gcInfoIndex is 0, this header is interpreted as a free list header. |
NO_SANITIZE_ADDRESS |
HeapObjectHeader(size_t size, size_t gcInfoIndex) { |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
m_magic = magic; |
#endif |
// sizeof(HeapObjectHeader) must be equal to or smaller than |
@@ -198,9 +198,9 @@ class PLATFORM_EXPORT HeapObjectHeader { |
"size of HeapObjectHeader must be 8 byte aligned"); |
#endif |
- ASSERT(gcInfoIndex < GCInfoTable::maxIndex); |
- ASSERT(size < nonLargeObjectPageSizeMax); |
- ASSERT(!(size & allocationMask)); |
+ DCHECK_LT(gcInfoIndex, gcInfoMaxIndex); |
+ DCHECK_LT(size, nonLargeObjectPageSizeMax); |
+ DCHECK(!(size & allocationMask)); |
m_encoded = static_cast<uint32_t>( |
(gcInfoIndex << headerGCInfoIndexShift) | size | |
(gcInfoIndex == gcInfoIndexForFreeListHeader ? headerFreedBitMask : 0)); |
@@ -223,7 +223,7 @@ class PLATFORM_EXPORT HeapObjectHeader { |
} |
NO_SANITIZE_ADDRESS |
void setSize(size_t size) { |
- ASSERT(size < nonLargeObjectPageSizeMax); |
+ DCHECK_LT(size, nonLargeObjectPageSizeMax); |
m_encoded = static_cast<uint32_t>(size) | (m_encoded & ~headerSizeMask); |
} |
bool isWrapperHeaderMarked() const; |
@@ -239,7 +239,7 @@ class PLATFORM_EXPORT HeapObjectHeader { |
size_t payloadSize(); |
Address payloadEnd(); |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
bool checkHeader() const; |
// Zap magic number with a new magic number that means there was once an |
// object allocated here, but it was freed because nobody marked it during |
@@ -255,7 +255,7 @@ class PLATFORM_EXPORT HeapObjectHeader { |
private: |
uint32_t m_encoded; |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
uint16_t m_magic; |
#endif |
@@ -284,8 +284,8 @@ class FreeListEntry final : public HeapObjectHeader { |
NO_SANITIZE_ADDRESS |
explicit FreeListEntry(size_t size) |
: HeapObjectHeader(size, gcInfoIndexForFreeListHeader), m_next(nullptr) { |
-#if ENABLE(ASSERT) |
- ASSERT(size >= sizeof(HeapObjectHeader)); |
+#if DCHECK_IS_ON() |
+ DCHECK_GE(size, sizeof(HeapObjectHeader)); |
zapMagic(); |
#endif |
} |
@@ -309,7 +309,7 @@ class FreeListEntry final : public HeapObjectHeader { |
NO_SANITIZE_ADDRESS |
void append(FreeListEntry* next) { |
- ASSERT(!m_next); |
+ DCHECK(!m_next); |
m_next = next; |
} |
@@ -346,7 +346,7 @@ inline bool vTableInitialized(void* objectPointer) { |
return !!(*reinterpret_cast<Address*>(objectPointer)); |
} |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
// Sanity check for a page header address: the address of the page |
// header should be OS page size away from being Blink page size |
// aligned. |
@@ -421,7 +421,7 @@ class BasePage { |
virtual void takeSnapshot(base::trace_event::MemoryAllocatorDump*, |
ThreadState::GCSnapshotInfo&, |
HeapSnapshotInfo&) = 0; |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
virtual bool contains(Address) = 0; |
#endif |
virtual size_t size() = 0; |
@@ -438,12 +438,12 @@ class BasePage { |
bool hasBeenSwept() const { return m_swept; } |
void markAsSwept() { |
- ASSERT(!m_swept); |
+ DCHECK(!m_swept); |
m_swept = true; |
} |
void markAsUnswept() { |
- ASSERT(m_swept); |
+ DCHECK(m_swept); |
m_swept = false; |
} |
@@ -494,7 +494,7 @@ class NormalPage final : public BasePage { |
void takeSnapshot(base::trace_event::MemoryAllocatorDump*, |
ThreadState::GCSnapshotInfo&, |
HeapSnapshotInfo&) override; |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
// Returns true for the whole blinkPageSize page that the page is on, even |
// for the header, and the unmapped guard page at the start. That ensures |
// the result can be used to populate the negative page cache. |
@@ -572,7 +572,7 @@ class LargeObjectPage final : public BasePage { |
void takeSnapshot(base::trace_event::MemoryAllocatorDump*, |
ThreadState::GCSnapshotInfo&, |
HeapSnapshotInfo&) override; |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
// Returns true for any address that is on one of the pages that this |
// large object uses. That ensures that we can use a negative result to |
// populate the negative page cache. |
@@ -673,7 +673,7 @@ class FreeList { |
// Returns true if the freelist snapshot is captured. |
bool takeSnapshot(const String& dumpBaseName); |
-#if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
+#if DCHECK_IS_ON() || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) || \ |
defined(MEMORY_SANITIZER) |
static void zapFreedMemory(Address, size_t); |
static void checkFreedMemoryIsZapped(Address, size_t); |
@@ -707,14 +707,14 @@ class PLATFORM_EXPORT BaseArena { |
void cleanupPages(); |
void takeSnapshot(const String& dumpBaseName, ThreadState::GCSnapshotInfo&); |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
BasePage* findPageFromAddress(Address); |
#endif |
virtual void takeFreelistSnapshot(const String& dumpBaseName) {} |
virtual void clearFreeLists() {} |
void makeConsistentForGC(); |
void makeConsistentForMutator(); |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
virtual bool isConsistentForGC() = 0; |
#endif |
size_t objectPayloadSizeForTesting(); |
@@ -755,12 +755,12 @@ class PLATFORM_EXPORT NormalPageArena final : public BaseArena { |
public: |
NormalPageArena(ThreadState*, int); |
void addToFreeList(Address address, size_t size) { |
- ASSERT(findPageFromAddress(address)); |
- ASSERT(findPageFromAddress(address + size - 1)); |
+ DCHECK(findPageFromAddress(address)); |
+ DCHECK(findPageFromAddress(address + size - 1)); |
m_freeList.addToFreeList(address, size); |
} |
void clearFreeLists() override; |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
bool isConsistentForGC() override; |
bool pagesToBeSweptContains(Address); |
#endif |
@@ -822,7 +822,7 @@ class LargeObjectArena final : public BaseArena { |
LargeObjectArena(ThreadState*, int); |
Address allocateLargeObjectPage(size_t, size_t gcInfoIndex); |
void freeLargeObjectPage(LargeObjectPage*); |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
bool isConsistentForGC() override { return true; } |
#endif |
private: |
@@ -838,7 +838,7 @@ PLATFORM_EXPORT inline BasePage* pageFromObject(const void* object) { |
Address address = reinterpret_cast<Address>(const_cast<void*>(object)); |
BasePage* page = reinterpret_cast<BasePage*>(blinkPageAddress(address) + |
blinkGuardPageSize); |
- ASSERT(page->contains(address)); |
+ DCHECK(page->contains(address)); |
return page; |
} |
@@ -847,12 +847,12 @@ NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::size() const { |
// Large objects should not refer to header->size(). |
// The actual size of a large object is stored in |
// LargeObjectPage::m_payloadSize. |
- ASSERT(result != largeObjectSizeInHeader); |
- ASSERT(!pageFromObject(this)->isLargeObjectPage()); |
+ DCHECK_NE(result, largeObjectSizeInHeader); |
+ DCHECK(!pageFromObject(this)->isLargeObjectPage()); |
return result; |
} |
-#if ENABLE(ASSERT) |
+#if DCHECK_IS_ON() |
NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::checkHeader() const { |
return !pageFromObject(this)->orphaned() && m_magic == magic; |
} |
@@ -869,10 +869,10 @@ inline Address HeapObjectHeader::payloadEnd() { |
NO_SANITIZE_ADDRESS inline size_t HeapObjectHeader::payloadSize() { |
size_t size = m_encoded & headerSizeMask; |
if (UNLIKELY(size == largeObjectSizeInHeader)) { |
- ASSERT(pageFromObject(this)->isLargeObjectPage()); |
+ DCHECK(pageFromObject(this)->isLargeObjectPage()); |
return static_cast<LargeObjectPage*>(pageFromObject(this))->payloadSize(); |
} |
- ASSERT(!pageFromObject(this)->isLargeObjectPage()); |
+ DCHECK(!pageFromObject(this)->isLargeObjectPage()); |
return size - sizeof(HeapObjectHeader); |
} |
@@ -880,53 +880,53 @@ inline HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) { |
Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); |
HeapObjectHeader* header = |
reinterpret_cast<HeapObjectHeader*>(addr - sizeof(HeapObjectHeader)); |
- ASSERT(header->checkHeader()); |
+ DCHECK(header->checkHeader()); |
return header; |
} |
NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isWrapperHeaderMarked() |
const { |
- ASSERT(checkHeader()); |
+ DCHECK(checkHeader()); |
return m_encoded & headerWrapperMarkBitMask; |
} |
NO_SANITIZE_ADDRESS inline void HeapObjectHeader::markWrapperHeader() { |
- ASSERT(checkHeader()); |
- ASSERT(!isWrapperHeaderMarked()); |
+ DCHECK(checkHeader()); |
+ DCHECK(!isWrapperHeaderMarked()); |
m_encoded |= headerWrapperMarkBitMask; |
} |
NO_SANITIZE_ADDRESS inline void HeapObjectHeader::unmarkWrapperHeader() { |
- ASSERT(checkHeader()); |
- ASSERT(isWrapperHeaderMarked()); |
+ DCHECK(checkHeader()); |
+ DCHECK(isWrapperHeaderMarked()); |
m_encoded &= ~headerWrapperMarkBitMask; |
} |
NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isMarked() const { |
- ASSERT(checkHeader()); |
+ DCHECK(checkHeader()); |
return m_encoded & headerMarkBitMask; |
} |
NO_SANITIZE_ADDRESS inline void HeapObjectHeader::mark() { |
- ASSERT(checkHeader()); |
- ASSERT(!isMarked()); |
+ DCHECK(checkHeader()); |
+ DCHECK(!isMarked()); |
m_encoded = m_encoded | headerMarkBitMask; |
} |
NO_SANITIZE_ADDRESS inline void HeapObjectHeader::unmark() { |
- ASSERT(checkHeader()); |
- ASSERT(isMarked()); |
+ DCHECK(checkHeader()); |
+ DCHECK(isMarked()); |
m_encoded &= ~headerMarkBitMask; |
} |
NO_SANITIZE_ADDRESS inline bool HeapObjectHeader::isDead() const { |
- ASSERT(checkHeader()); |
+ DCHECK(checkHeader()); |
return m_encoded & headerDeadBitMask; |
} |
NO_SANITIZE_ADDRESS inline void HeapObjectHeader::markDead() { |
- ASSERT(checkHeader()); |
- ASSERT(!isMarked()); |
+ DCHECK(checkHeader()); |
+ DCHECK(!isMarked()); |
m_encoded |= headerDeadBitMask; |
} |
@@ -936,13 +936,13 @@ inline Address NormalPageArena::allocateObject(size_t allocationSize, |
Address headerAddress = m_currentAllocationPoint; |
m_currentAllocationPoint += allocationSize; |
m_remainingAllocationSize -= allocationSize; |
- ASSERT(gcInfoIndex > 0); |
+ DCHECK_GT(gcInfoIndex, 0UL); |
new (NotNull, headerAddress) HeapObjectHeader(allocationSize, gcInfoIndex); |
Address result = headerAddress + sizeof(HeapObjectHeader); |
- ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
+ DCHECK(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
SET_MEMORY_ACCESSIBLE(result, allocationSize - sizeof(HeapObjectHeader)); |
- ASSERT(findPageFromAddress(headerAddress + allocationSize - 1)); |
+ DCHECK(findPageFromAddress(headerAddress + allocationSize - 1)); |
return result; |
} |
return outOfLineAllocate(allocationSize, gcInfoIndex); |