| Index: third_party/WebKit/Source/platform/heap/ThreadState.cpp
|
| diff --git a/third_party/WebKit/Source/platform/heap/ThreadState.cpp b/third_party/WebKit/Source/platform/heap/ThreadState.cpp
|
| index 6ae1096dacf00a01271040cb4cd6b5fd044a5a1e..3636110dd37d812cbf12c4af86df45e864c40c0e 100644
|
| --- a/third_party/WebKit/Source/platform/heap/ThreadState.cpp
|
| +++ b/third_party/WebKit/Source/platform/heap/ThreadState.cpp
|
| @@ -177,8 +177,8 @@ ThreadState::ThreadState(BlinkGC::ThreadHeapMode threadHeapMode)
|
| m_allocatedObjectSize(0),
|
| m_markedObjectSize(0),
|
| m_reportedMemoryToV8(0) {
|
| - ASSERT(checkThread());
|
| - ASSERT(!**s_threadSpecific);
|
| + DCHECK(checkThread());
|
| + DCHECK(!**s_threadSpecific);
|
| **s_threadSpecific = this;
|
|
|
| switch (m_threadHeapMode) {
|
| @@ -200,7 +200,7 @@ ThreadState::ThreadState(BlinkGC::ThreadHeapMode threadHeapMode)
|
| m_heap = new ThreadHeap();
|
| break;
|
| }
|
| - ASSERT(m_heap);
|
| + DCHECK(m_heap);
|
| m_heap->attach(this);
|
|
|
| for (int arenaIndex = 0; arenaIndex < BlinkGC::LargeObjectArenaIndex;
|
| @@ -215,7 +215,7 @@ ThreadState::ThreadState(BlinkGC::ThreadHeapMode threadHeapMode)
|
| }
|
|
|
| ThreadState::~ThreadState() {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
|
| delete m_arenas[i];
|
|
|
| @@ -243,7 +243,7 @@ size_t ThreadState::threadStackSize() {
|
|
|
| Address stackStart =
|
| reinterpret_cast<Address>(StackFrameDepth::getStackStart());
|
| - RELEASE_ASSERT(stackStart && stackStart > stackEnd);
|
| + CHECK(stackStart && stackStart > stackEnd);
|
| m_threadStackSize = static_cast<size_t>(stackStart - stackEnd);
|
| // When the third last page of the reserved stack is accessed as a
|
| // guard page, the second last page will be committed (along with removing
|
| @@ -256,25 +256,25 @@ size_t ThreadState::threadStackSize() {
|
| //
|
| // http://blogs.msdn.com/b/satyem/archive/2012/08/13/thread-s-stack-memory-management.aspx
|
| // explains the details.
|
| - RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000);
|
| + CHECK(m_threadStackSize > 4 * 0x1000);
|
| m_threadStackSize -= 4 * 0x1000;
|
| return m_threadStackSize;
|
| }
|
| #endif
|
|
|
| void ThreadState::attachMainThread() {
|
| - RELEASE_ASSERT(!ProcessHeap::s_shutdownComplete);
|
| + CHECK(!ProcessHeap::s_shutdownComplete);
|
| s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>();
|
| new (s_mainThreadStateStorage) ThreadState(BlinkGC::MainThreadHeapMode);
|
| }
|
|
|
| void ThreadState::attachCurrentThread(BlinkGC::ThreadHeapMode threadHeapMode) {
|
| - RELEASE_ASSERT(!ProcessHeap::s_shutdownComplete);
|
| + CHECK(!ProcessHeap::s_shutdownComplete);
|
| new ThreadState(threadHeapMode);
|
| }
|
|
|
| void ThreadState::cleanupPages() {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
|
| m_arenas[i]->cleanupPages();
|
| }
|
| @@ -284,7 +284,7 @@ void ThreadState::runTerminationGC() {
|
| cleanupPages();
|
| return;
|
| }
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
|
|
| // Finish sweeping.
|
| completeSweep();
|
| @@ -307,7 +307,7 @@ void ThreadState::runTerminationGC() {
|
| // changes and is above zero.
|
| int oldCount = -1;
|
| int currentCount = getPersistentRegion()->numberOfPersistents();
|
| - ASSERT(currentCount >= 0);
|
| + DCHECK_GE(currentCount, 0);
|
| while (currentCount != oldCount) {
|
| collectGarbageForTerminatingThread();
|
| // Release the thread-local static persistents that were
|
| @@ -317,11 +317,11 @@ void ThreadState::runTerminationGC() {
|
| currentCount = getPersistentRegion()->numberOfPersistents();
|
| }
|
| // We should not have any persistents left when getting to this point,
|
| - // if we have it is probably a bug so adding a debug ASSERT to catch this.
|
| - ASSERT(!currentCount);
|
| + // if we have it is probably a bug so adding a debug DCHECK to catch this.
|
| + DCHECK(!currentCount);
|
| // All of pre-finalizers should be consumed.
|
| - ASSERT(m_orderedPreFinalizers.isEmpty());
|
| - RELEASE_ASSERT(gcState() == NoGCScheduled);
|
| + DCHECK(m_orderedPreFinalizers.isEmpty());
|
| + CHECK(gcState() == NoGCScheduled);
|
|
|
| // Add pages to the orphaned page pool to ensure any global GCs from this
|
| // point on will not trace objects on this thread's arenas.
|
| @@ -329,7 +329,7 @@ void ThreadState::runTerminationGC() {
|
| }
|
|
|
| void ThreadState::cleanupMainThread() {
|
| - ASSERT(isMainThread());
|
| + DCHECK(isMainThread());
|
|
|
| releaseStaticPersistentNodes();
|
|
|
| @@ -349,7 +349,7 @@ void ThreadState::detachMainThread() {
|
| // threadAttachMutex and waiting for other threads to pause or reach a
|
| // safepoint.
|
| ThreadState* state = mainThreadState();
|
| - ASSERT(!state->isSweepingInProgress());
|
| + DCHECK(!state->isSweepingInProgress());
|
|
|
| state->heap().detach(state);
|
| state->~ThreadState();
|
| @@ -358,7 +358,7 @@ void ThreadState::detachMainThread() {
|
| void ThreadState::detachCurrentThread() {
|
| ThreadState* state = current();
|
| state->heap().detach(state);
|
| - RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled);
|
| + CHECK(state->gcState() == ThreadState::NoGCScheduled);
|
| delete state;
|
| }
|
|
|
| @@ -456,7 +456,7 @@ void ThreadState::pushThreadLocalWeakCallback(void* object,
|
| }
|
|
|
| bool ThreadState::popAndInvokeThreadLocalWeakCallback(Visitor* visitor) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| // For weak processing we should never reach orphaned pages since orphaned
|
| // pages are not traced and thus objects on those pages are never be
|
| // registered as objects on orphaned pages. We cannot assert this here since
|
| @@ -470,8 +470,8 @@ bool ThreadState::popAndInvokeThreadLocalWeakCallback(Visitor* visitor) {
|
| }
|
|
|
| void ThreadState::threadLocalWeakProcessing() {
|
| - ASSERT(checkThread());
|
| - ASSERT(!sweepForbidden());
|
| + DCHECK(checkThread());
|
| + DCHECK(!sweepForbidden());
|
| TRACE_EVENT0("blink_gc", "ThreadState::threadLocalWeakProcessing");
|
| double startTime = WTF::currentTimeMS();
|
|
|
| @@ -621,7 +621,7 @@ bool ThreadState::shouldForceMemoryPressureGC() {
|
| }
|
|
|
| void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| ThreadHeap::reportMemoryUsageForTracing();
|
|
|
| #if PRINT_HEAP_STATS
|
| @@ -635,8 +635,8 @@ void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) {
|
| // This completeSweep() will do nothing in common cases since we've
|
| // called completeSweep() before V8 starts minor/major GCs.
|
| completeSweep();
|
| - ASSERT(!isSweepingInProgress());
|
| - ASSERT(!sweepForbidden());
|
| + DCHECK(!isSweepingInProgress());
|
| + DCHECK(!sweepForbidden());
|
|
|
| if ((gcType == BlinkGC::V8MajorGC && shouldForceMemoryPressureGC()) ||
|
| shouldScheduleV8FollowupGC()) {
|
| @@ -669,7 +669,7 @@ void ThreadState::willStartV8GC(BlinkGC::V8GCType gcType) {
|
|
|
| void ThreadState::schedulePageNavigationGCIfNeeded(
|
| float estimatedRemovalRatio) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| ThreadHeap::reportMemoryUsageForTracing();
|
|
|
| #if PRINT_HEAP_STATS
|
| @@ -686,8 +686,8 @@ void ThreadState::schedulePageNavigationGCIfNeeded(
|
| // TODO(haraken): It might not make sense to force completeSweep() for all
|
| // page navigations.
|
| completeSweep();
|
| - ASSERT(!isSweepingInProgress());
|
| - ASSERT(!sweepForbidden());
|
| + DCHECK(!isSweepingInProgress());
|
| + DCHECK(!sweepForbidden());
|
|
|
| if (shouldForceMemoryPressureGC()) {
|
| #if PRINT_HEAP_STATS
|
| @@ -706,13 +706,13 @@ void ThreadState::schedulePageNavigationGCIfNeeded(
|
| }
|
|
|
| void ThreadState::schedulePageNavigationGC() {
|
| - ASSERT(checkThread());
|
| - ASSERT(!isSweepingInProgress());
|
| + DCHECK(checkThread());
|
| + DCHECK(!isSweepingInProgress());
|
| setGCState(PageNavigationGCScheduled);
|
| }
|
|
|
| void ThreadState::scheduleGCIfNeeded() {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| ThreadHeap::reportMemoryUsageForTracing();
|
|
|
| #if PRINT_HEAP_STATS
|
| @@ -726,7 +726,7 @@ void ThreadState::scheduleGCIfNeeded() {
|
|
|
| if (isSweepingInProgress())
|
| return;
|
| - ASSERT(!sweepForbidden());
|
| + DCHECK(!sweepForbidden());
|
|
|
| reportMemoryToV8();
|
|
|
| @@ -763,17 +763,17 @@ void ThreadState::scheduleGCIfNeeded() {
|
| }
|
|
|
| ThreadState* ThreadState::fromObject(const void* object) {
|
| - ASSERT(object);
|
| + DCHECK(object);
|
| BasePage* page = pageFromObject(object);
|
| - ASSERT(page);
|
| - ASSERT(page->arena());
|
| + DCHECK(page);
|
| + DCHECK(page->arena());
|
| return page->arena()->getThreadState();
|
| }
|
|
|
| void ThreadState::performIdleGC(double deadlineSeconds) {
|
| - ASSERT(checkThread());
|
| - ASSERT(isMainThread());
|
| - ASSERT(Platform::current()->currentThread()->scheduler());
|
| + DCHECK(checkThread());
|
| + DCHECK(isMainThread());
|
| + DCHECK(Platform::current()->currentThread()->scheduler());
|
|
|
| if (gcState() != IdleGCScheduled)
|
| return;
|
| @@ -804,8 +804,8 @@ void ThreadState::performIdleGC(double deadlineSeconds) {
|
| }
|
|
|
| void ThreadState::performIdleLazySweep(double deadlineSeconds) {
|
| - ASSERT(checkThread());
|
| - ASSERT(isMainThread());
|
| + DCHECK(checkThread());
|
| + DCHECK(isMainThread());
|
|
|
| // If we are not in a sweeping phase, there is nothing to do here.
|
| if (!isSweepingInProgress())
|
| @@ -885,7 +885,7 @@ void ThreadState::scheduleIdleLazySweep() {
|
| }
|
|
|
| void ThreadState::schedulePreciseGC() {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| if (isSweepingInProgress()) {
|
| setGCState(SweepingAndPreciseGCScheduled);
|
| return;
|
| @@ -914,7 +914,7 @@ void unexpectedGCState(ThreadState::GCState gcState) {
|
| UNEXPECTED_GCSTATE(SweepingAndIdleGCScheduled);
|
| UNEXPECTED_GCSTATE(SweepingAndPreciseGCScheduled);
|
| default:
|
| - ASSERT_NOT_REACHED();
|
| + NOTREACHED();
|
| return;
|
| }
|
| }
|
| @@ -924,13 +924,14 @@ void unexpectedGCState(ThreadState::GCState gcState) {
|
| } // namespace
|
|
|
| #define VERIFY_STATE_TRANSITION(condition) \
|
| - if (UNLIKELY(!(condition))) \
|
| - unexpectedGCState(m_gcState)
|
| + if (UNLIKELY(!(condition))) { \
|
| + unexpectedGCState(m_gcState); \
|
| + }
|
|
|
| void ThreadState::setGCState(GCState gcState) {
|
| switch (gcState) {
|
| case NoGCScheduled:
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| VERIFY_STATE_TRANSITION(m_gcState == Sweeping ||
|
| m_gcState == SweepingAndIdleGCScheduled);
|
| break;
|
| @@ -938,7 +939,7 @@ void ThreadState::setGCState(GCState gcState) {
|
| case PreciseGCScheduled:
|
| case FullGCScheduled:
|
| case PageNavigationGCScheduled:
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| VERIFY_STATE_TRANSITION(
|
| m_gcState == NoGCScheduled || m_gcState == IdleGCScheduled ||
|
| m_gcState == PreciseGCScheduled || m_gcState == FullGCScheduled ||
|
| @@ -948,28 +949,28 @@ void ThreadState::setGCState(GCState gcState) {
|
| completeSweep();
|
| break;
|
| case GCRunning:
|
| - ASSERT(!isInGC());
|
| + DCHECK(!isInGC());
|
| VERIFY_STATE_TRANSITION(m_gcState != GCRunning);
|
| break;
|
| case EagerSweepScheduled:
|
| case LazySweepScheduled:
|
| - ASSERT(isInGC());
|
| + DCHECK(isInGC());
|
| VERIFY_STATE_TRANSITION(m_gcState == GCRunning);
|
| break;
|
| case Sweeping:
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| VERIFY_STATE_TRANSITION(m_gcState == EagerSweepScheduled ||
|
| m_gcState == LazySweepScheduled);
|
| break;
|
| case SweepingAndIdleGCScheduled:
|
| case SweepingAndPreciseGCScheduled:
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| VERIFY_STATE_TRANSITION(m_gcState == Sweeping ||
|
| m_gcState == SweepingAndIdleGCScheduled ||
|
| m_gcState == SweepingAndPreciseGCScheduled);
|
| break;
|
| default:
|
| - ASSERT_NOT_REACHED();
|
| + NOTREACHED();
|
| }
|
| m_gcState = gcState;
|
| }
|
| @@ -977,7 +978,7 @@ void ThreadState::setGCState(GCState gcState) {
|
| #undef VERIFY_STATE_TRANSITION
|
|
|
| void ThreadState::runScheduledGC(BlinkGC::StackState stackState) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| if (stackState != BlinkGC::NoHeapPointersOnStack)
|
| return;
|
|
|
| @@ -1017,7 +1018,7 @@ void ThreadState::flushHeapDoesNotContainCacheIfNeeded() {
|
| }
|
|
|
| void ThreadState::makeConsistentForGC() {
|
| - ASSERT(isInGC());
|
| + DCHECK(isInGC());
|
| TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC");
|
| for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
|
| m_arenas[i]->makeConsistentForGC();
|
| @@ -1052,7 +1053,7 @@ void ThreadState::compact() {
|
| }
|
|
|
| void ThreadState::makeConsistentForMutator() {
|
| - ASSERT(isInGC());
|
| + DCHECK(isInGC());
|
| for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
|
| m_arenas[i]->makeConsistentForMutator();
|
| }
|
| @@ -1062,7 +1063,7 @@ void ThreadState::preGC() {
|
| m_performCleanup)
|
| m_performCleanup(m_isolate);
|
|
|
| - ASSERT(!isInGC());
|
| + DCHECK(!isInGC());
|
| setGCState(GCRunning);
|
| makeConsistentForGC();
|
| flushHeapDoesNotContainCacheIfNeeded();
|
| @@ -1091,7 +1092,7 @@ void ThreadState::postGC(BlinkGC::GCType gcType) {
|
| m_invalidateDeadObjectsInWrappersMarkingDeque(m_isolate);
|
| }
|
|
|
| - ASSERT(isInGC());
|
| + DCHECK(isInGC());
|
| for (int i = 0; i < BlinkGC::NumberOfArenas; i++)
|
| m_arenas[i]->prepareForSweep();
|
|
|
| @@ -1114,7 +1115,7 @@ void ThreadState::postGC(BlinkGC::GCType gcType) {
|
| }
|
|
|
| void ThreadState::preSweep() {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| if (gcState() != EagerSweepScheduled && gcState() != LazySweepScheduled)
|
| return;
|
|
|
| @@ -1184,11 +1185,11 @@ void ThreadState::eagerSweep() {
|
| #if defined(ADDRESS_SANITIZER)
|
| poisonEagerArena();
|
| #endif
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| // Some objects need to be finalized promptly and cannot be handled
|
| // by lazy sweeping. Keep those in a designated heap and sweep it
|
| // eagerly.
|
| - ASSERT(isSweepingInProgress());
|
| + DCHECK(isSweepingInProgress());
|
|
|
| // Mirroring the completeSweep() condition; see its comment.
|
| if (sweepForbidden())
|
| @@ -1203,7 +1204,7 @@ void ThreadState::eagerSweep() {
|
| }
|
|
|
| void ThreadState::completeSweep() {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| // If we are not in a sweeping phase, there is nothing to do here.
|
| if (!isSweepingInProgress())
|
| return;
|
| @@ -1240,7 +1241,7 @@ void ThreadState::completeSweep() {
|
| }
|
|
|
| void ThreadState::postSweep() {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| ThreadHeap::reportMemoryUsageForTracing();
|
|
|
| if (isMainThread()) {
|
| @@ -1311,17 +1312,17 @@ void ThreadState::postSweep() {
|
| scheduleIdleGC();
|
| break;
|
| default:
|
| - ASSERT_NOT_REACHED();
|
| + NOTREACHED();
|
| }
|
| }
|
|
|
| void ThreadState::prepareForThreadStateTermination() {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| for (int i = 0; i < BlinkGC::NumberOfArenas; ++i)
|
| m_arenas[i]->prepareHeapForTermination();
|
| }
|
|
|
| -#if ENABLE(ASSERT)
|
| +#if DCHECK_IS_ON()
|
| BasePage* ThreadState::findPageFromAddress(Address address) {
|
| for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) {
|
| if (BasePage* page = m_arenas[i]->findPageFromAddress(address))
|
| @@ -1339,11 +1340,11 @@ size_t ThreadState::objectPayloadSizeForTesting() {
|
| }
|
|
|
| void ThreadState::safePoint(BlinkGC::StackState stackState) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| ThreadHeap::reportMemoryUsageForTracing();
|
|
|
| runScheduledGC(stackState);
|
| - ASSERT(!m_atSafePoint);
|
| + DCHECK(!m_atSafePoint);
|
| m_stackState = stackState;
|
| m_atSafePoint = true;
|
| m_heap->checkAndPark(this, nullptr);
|
| @@ -1363,7 +1364,7 @@ NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer(
|
| void* scopeMarker) {
|
| Address start = reinterpret_cast<Address>(StackFrameDepth::getStackStart());
|
| Address end = reinterpret_cast<Address>(&start);
|
| - RELEASE_ASSERT(end < start);
|
| + CHECK(end < start);
|
|
|
| if (end <= scopeMarker && scopeMarker < start)
|
| return scopeMarker;
|
| @@ -1379,14 +1380,14 @@ NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer(
|
|
|
| void ThreadState::enterSafePoint(BlinkGC::StackState stackState,
|
| void* scopeMarker) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| #ifdef ADDRESS_SANITIZER
|
| if (stackState == BlinkGC::HeapPointersOnStack)
|
| scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker);
|
| #endif
|
| - ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker);
|
| + DCHECK(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker);
|
| runScheduledGC(stackState);
|
| - ASSERT(!m_atSafePoint);
|
| + DCHECK(!m_atSafePoint);
|
| m_atSafePoint = true;
|
| m_stackState = stackState;
|
| m_safePointScopeMarker = scopeMarker;
|
| @@ -1394,8 +1395,8 @@ void ThreadState::enterSafePoint(BlinkGC::StackState stackState,
|
| }
|
|
|
| void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker) {
|
| - ASSERT(checkThread());
|
| - ASSERT(m_atSafePoint);
|
| + DCHECK(checkThread());
|
| + DCHECK(m_atSafePoint);
|
| m_heap->leaveSafePoint(this, locker);
|
| m_atSafePoint = false;
|
| m_stackState = BlinkGC::HeapPointersOnStack;
|
| @@ -1440,19 +1441,19 @@ void ThreadState::copyStackUntilSafePointScope() {
|
|
|
| Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker);
|
| Address* from = reinterpret_cast<Address*>(m_endOfStack);
|
| - RELEASE_ASSERT(from < to);
|
| - RELEASE_ASSERT(to <= reinterpret_cast<Address*>(m_startOfStack));
|
| + CHECK(from < to);
|
| + CHECK(to <= reinterpret_cast<Address*>(m_startOfStack));
|
| size_t slotCount = static_cast<size_t>(to - from);
|
| // Catch potential performance issues.
|
| #if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER)
|
| // ASan/LSan use more space on the stack and we therefore
|
| // increase the allowed stack copying for those builds.
|
| - ASSERT(slotCount < 2048);
|
| + DCHECK_LT(slotCount, 2048UL);
|
| #else
|
| - ASSERT(slotCount < 1024);
|
| + DCHECK_LT(slotCount, 1024UL);
|
| #endif
|
|
|
| - ASSERT(!m_safePointStackCopy.size());
|
| + DCHECK(!m_safePointStackCopy.size());
|
| m_safePointStackCopy.resize(slotCount);
|
| for (size_t i = 0; i < slotCount; ++i) {
|
| m_safePointStackCopy[i] = from[i];
|
| @@ -1461,7 +1462,7 @@ void ThreadState::copyStackUntilSafePointScope() {
|
|
|
| void ThreadState::addInterruptor(
|
| std::unique_ptr<BlinkGCInterruptor> interruptor) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| SafePointScope scope(BlinkGC::HeapPointersOnStack);
|
| {
|
| MutexLocker locker(m_heap->threadAttachMutex());
|
| @@ -1477,7 +1478,7 @@ void ThreadState::registerStaticPersistentNode(
|
| return;
|
| #endif
|
|
|
| - ASSERT(!m_staticPersistents.contains(node));
|
| + DCHECK(!m_staticPersistents.contains(node));
|
| m_staticPersistents.add(node, callback);
|
| }
|
|
|
| @@ -1499,7 +1500,7 @@ void ThreadState::freePersistentNode(PersistentNode* persistentNode) {
|
| //
|
| // There's no fundamental reason why this couldn't be supported,
|
| // but no known use for it.
|
| - ASSERT(!m_staticPersistents.contains(persistentNode));
|
| + DCHECK(!m_staticPersistents.contains(persistentNode));
|
| }
|
|
|
| #if defined(LEAK_SANITIZER)
|
| @@ -1508,7 +1509,7 @@ void ThreadState::enterStaticReferenceRegistrationDisabledScope() {
|
| }
|
|
|
| void ThreadState::leaveStaticReferenceRegistrationDisabledScope() {
|
| - ASSERT(m_disabledStaticPersistentsRegistration);
|
| + DCHECK(m_disabledStaticPersistentsRegistration);
|
| m_disabledStaticPersistentsRegistration--;
|
| }
|
| #endif
|
| @@ -1522,8 +1523,8 @@ void ThreadState::unlockThreadAttachMutex() {
|
| }
|
|
|
| void ThreadState::invokePreFinalizers() {
|
| - ASSERT(checkThread());
|
| - ASSERT(!sweepForbidden());
|
| + DCHECK(checkThread());
|
| + DCHECK(!sweepForbidden());
|
| TRACE_EVENT0("blink_gc", "ThreadState::invokePreFinalizers");
|
|
|
| double startTime = WTF::currentTimeMS();
|
| @@ -1576,12 +1577,12 @@ int ThreadState::arenaIndexOfVectorArenaLeastRecentlyExpanded(
|
| arenaIndexWithMinArenaAge = arenaIndex;
|
| }
|
| }
|
| - ASSERT(isVectorArenaIndex(arenaIndexWithMinArenaAge));
|
| + DCHECK(isVectorArenaIndex(arenaIndexWithMinArenaAge));
|
| return arenaIndexWithMinArenaAge;
|
| }
|
|
|
| BaseArena* ThreadState::expandedVectorBackingArena(size_t gcInfoIndex) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask;
|
| --m_likelyToBePromptlyFreed[entryIndex];
|
| int arenaIndex = m_vectorBackingArenaIndex;
|
| @@ -1599,14 +1600,14 @@ void ThreadState::allocationPointAdjusted(int arenaIndex) {
|
| }
|
|
|
| void ThreadState::promptlyFreed(size_t gcInfoIndex) {
|
| - ASSERT(checkThread());
|
| + DCHECK(checkThread());
|
| size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask;
|
| // See the comment in vectorBackingArena() for why this is +3.
|
| m_likelyToBePromptlyFreed[entryIndex] += 3;
|
| }
|
|
|
| void ThreadState::takeSnapshot(SnapshotType type) {
|
| - ASSERT(isInGC());
|
| + DCHECK(isInGC());
|
|
|
| // 0 is used as index for freelist entries. Objects are indexed 1 to
|
| // gcInfoIndex.
|
| @@ -1630,7 +1631,7 @@ void ThreadState::takeSnapshot(SnapshotType type) {
|
| heapsDumpName + "/" #ArenaType); \
|
| break; \
|
| default: \
|
| - ASSERT_NOT_REACHED(); \
|
| + NOTREACHED(); \
|
| } \
|
| }
|
|
|
| @@ -1648,7 +1649,7 @@ void ThreadState::takeSnapshot(SnapshotType type) {
|
| SNAPSHOT_HEAP(LargeObject);
|
| FOR_EACH_TYPED_ARENA(SNAPSHOT_HEAP);
|
|
|
| - ASSERT(numberOfHeapsReported == BlinkGC::NumberOfArenas);
|
| + DCHECK_EQ(numberOfHeapsReported, BlinkGC::NumberOfArenas);
|
|
|
| #undef SNAPSHOT_HEAP
|
|
|
| @@ -1690,7 +1691,7 @@ void ThreadState::collectGarbage(BlinkGC::StackState stackState,
|
| BlinkGC::GCType gcType,
|
| BlinkGC::GCReason reason) {
|
| // Nested collectGarbage() invocations aren't supported.
|
| - RELEASE_ASSERT(!isGCForbidden());
|
| + CHECK(!isGCForbidden());
|
| completeSweep();
|
|
|
| GCForbiddenScope gcForbiddenScope(this);
|
|
|