Index: third_party/WebKit/Source/platform/heap/HeapAllocator.cpp |
diff --git a/third_party/WebKit/Source/platform/heap/HeapAllocator.cpp b/third_party/WebKit/Source/platform/heap/HeapAllocator.cpp |
index 8b6ecc0edf25397a45713a3bf0f69cbeed58021a..2ae3e50b97ace4241def7a1af2cc6613c0eecf70 100644 |
--- a/third_party/WebKit/Source/platform/heap/HeapAllocator.cpp |
+++ b/third_party/WebKit/Source/platform/heap/HeapAllocator.cpp |
@@ -13,7 +13,7 @@ void HeapAllocator::backingFree(void* address) { |
ThreadState* state = ThreadState::current(); |
if (state->sweepForbidden()) |
return; |
- ASSERT(!state->isInGC()); |
+ DCHECK(!state->isInGC()); |
// Don't promptly free large objects because their page is never reused. |
// Don't free backings allocated on other threads. |
@@ -22,7 +22,7 @@ void HeapAllocator::backingFree(void* address) { |
return; |
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
- ASSERT(header->checkHeader()); |
+ DCHECK(header->checkHeader()); |
NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); |
state->promptlyFreed(header->gcInfoIndex()); |
arena->promptlyFreeObject(header); |
@@ -47,8 +47,8 @@ bool HeapAllocator::backingExpand(void* address, size_t newSize) { |
ThreadState* state = ThreadState::current(); |
if (state->sweepForbidden()) |
return false; |
- ASSERT(!state->isInGC()); |
- ASSERT(state->isAllocationAllowed()); |
+ DCHECK(!state->isInGC()); |
+ DCHECK(state->isAllocationAllowed()); |
DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap()); |
// FIXME: Support expand for large objects. |
@@ -58,7 +58,7 @@ bool HeapAllocator::backingExpand(void* address, size_t newSize) { |
return false; |
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
- ASSERT(header->checkHeader()); |
+ DCHECK(header->checkHeader()); |
NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); |
bool succeed = arena->expandObject(header, newSize); |
if (succeed) |
@@ -84,13 +84,13 @@ bool HeapAllocator::backingShrink(void* address, |
if (!address || quantizedShrunkSize == quantizedCurrentSize) |
return true; |
- ASSERT(quantizedShrunkSize < quantizedCurrentSize); |
+ DCHECK(quantizedShrunkSize < quantizedCurrentSize); |
ThreadState* state = ThreadState::current(); |
if (state->sweepForbidden()) |
return false; |
- ASSERT(!state->isInGC()); |
- ASSERT(state->isAllocationAllowed()); |
+ DCHECK(!state->isInGC()); |
+ DCHECK(state->isAllocationAllowed()); |
DCHECK_EQ(&state->heap(), &ThreadState::fromObject(address)->heap()); |
// FIXME: Support shrink for large objects. |
@@ -100,7 +100,7 @@ bool HeapAllocator::backingShrink(void* address, |
return false; |
HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
- ASSERT(header->checkHeader()); |
+ DCHECK(header->checkHeader()); |
NormalPageArena* arena = static_cast<NormalPage*>(page)->arenaForNormalPage(); |
// We shrink the object only if the shrinking will make a non-small |
// prompt-free block. |