| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 91 : m_thread(currentThread()) | 91 : m_thread(currentThread()) |
| 92 , m_persistentRegion(adoptPtr(new PersistentRegion())) | 92 , m_persistentRegion(adoptPtr(new PersistentRegion())) |
| 93 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) | 93 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) |
| 94 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) | 94 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) |
| 95 , m_safePointScopeMarker(nullptr) | 95 , m_safePointScopeMarker(nullptr) |
| 96 , m_atSafePoint(false) | 96 , m_atSafePoint(false) |
| 97 , m_interruptors() | 97 , m_interruptors() |
| 98 , m_sweepForbidden(false) | 98 , m_sweepForbidden(false) |
| 99 , m_noAllocationCount(0) | 99 , m_noAllocationCount(0) |
| 100 , m_gcForbiddenCount(0) | 100 , m_gcForbiddenCount(0) |
| 101 , m_vectorBackingHeapIndex(Vector1HeapIndex) | 101 , m_vectorBackingHeapIndex(BlinkGC::Vector1HeapIndex) |
| 102 , m_currentHeapAges(0) | 102 , m_currentHeapAges(0) |
| 103 , m_isTerminating(false) | 103 , m_isTerminating(false) |
| 104 , m_gcMixinMarker(nullptr) | 104 , m_gcMixinMarker(nullptr) |
| 105 , m_shouldFlushHeapDoesNotContainCache(false) | 105 , m_shouldFlushHeapDoesNotContainCache(false) |
| 106 , m_gcState(NoGCScheduled) | 106 , m_gcState(NoGCScheduled) |
| 107 , m_traceDOMWrappers(nullptr) | 107 , m_traceDOMWrappers(nullptr) |
| 108 #if defined(ADDRESS_SANITIZER) | 108 #if defined(ADDRESS_SANITIZER) |
| 109 , m_asanFakeStack(__asan_get_current_fake_stack()) | 109 , m_asanFakeStack(__asan_get_current_fake_stack()) |
| 110 #endif | 110 #endif |
| 111 #if ENABLE(GC_PROFILING) | 111 #if ENABLE(GC_PROFILING) |
| 112 , m_nextFreeListSnapshotTime(-std::numeric_limits<double>::infinity()) | 112 , m_nextFreeListSnapshotTime(-std::numeric_limits<double>::infinity()) |
| 113 #endif | 113 #endif |
| 114 { | 114 { |
| 115 ASSERT(checkThread()); | 115 ASSERT(checkThread()); |
| 116 ASSERT(!**s_threadSpecific); | 116 ASSERT(!**s_threadSpecific); |
| 117 **s_threadSpecific = this; | 117 **s_threadSpecific = this; |
| 118 | 118 |
| 119 if (isMainThread()) { | 119 if (isMainThread()) { |
| 120 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s
izeof(void*); | 120 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s
izeof(void*); |
| 121 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack
Size(); | 121 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack
Size(); |
| 122 if (underestimatedStackSize > sizeof(void*)) | 122 if (underestimatedStackSize > sizeof(void*)) |
| 123 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size
of(void*); | 123 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size
of(void*); |
| 124 } | 124 } |
| 125 | 125 |
| 126 for (int heapIndex = 0; heapIndex < LargeObjectHeapIndex; heapIndex++) | 126 for (int heapIndex = 0; heapIndex < BlinkGC::LargeObjectHeapIndex; heapIndex
++) |
| 127 m_heaps[heapIndex] = new NormalPageHeap(this, heapIndex); | 127 m_heaps[heapIndex] = new NormalPageHeap(this, heapIndex); |
| 128 m_heaps[LargeObjectHeapIndex] = new LargeObjectHeap(this, LargeObjectHeapInd
ex); | 128 m_heaps[BlinkGC::LargeObjectHeapIndex] = new LargeObjectHeap(this, BlinkGC::
LargeObjectHeapIndex); |
| 129 | 129 |
| 130 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr
aySize]); | 130 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr
aySize]); |
| 131 clearHeapAges(); | 131 clearHeapAges(); |
| 132 | 132 |
| 133 m_threadLocalWeakCallbackStack = new CallbackStack(); | 133 m_threadLocalWeakCallbackStack = new CallbackStack(); |
| 134 } | 134 } |
| 135 | 135 |
| 136 ThreadState::~ThreadState() | 136 ThreadState::~ThreadState() |
| 137 { | 137 { |
| 138 ASSERT(checkThread()); | 138 ASSERT(checkThread()); |
| 139 delete m_threadLocalWeakCallbackStack; | 139 delete m_threadLocalWeakCallbackStack; |
| 140 m_threadLocalWeakCallbackStack = nullptr; | 140 m_threadLocalWeakCallbackStack = nullptr; |
| 141 for (int i = 0; i < NumberOfHeaps; ++i) | 141 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 142 delete m_heaps[i]; | 142 delete m_heaps[i]; |
| 143 | 143 |
| 144 **s_threadSpecific = nullptr; | 144 **s_threadSpecific = nullptr; |
| 145 if (isMainThread()) { | 145 if (isMainThread()) { |
| 146 s_mainThreadStackStart = 0; | 146 s_mainThreadStackStart = 0; |
| 147 s_mainThreadUnderestimatedStackSize = 0; | 147 s_mainThreadUnderestimatedStackSize = 0; |
| 148 } | 148 } |
| 149 } | 149 } |
| 150 | 150 |
| 151 void ThreadState::init() | 151 void ThreadState::init() |
| (...skipping 22 matching lines...) Expand all Loading... |
| 174 { | 174 { |
| 175 // Enter a safe point before trying to acquire threadAttachMutex | 175 // Enter a safe point before trying to acquire threadAttachMutex |
| 176 // to avoid dead lock if another thread is preparing for GC, has acquired | 176 // to avoid dead lock if another thread is preparing for GC, has acquired |
| 177 // threadAttachMutex and waiting for other threads to pause or reach a | 177 // threadAttachMutex and waiting for other threads to pause or reach a |
| 178 // safepoint. | 178 // safepoint. |
| 179 ThreadState* state = mainThreadState(); | 179 ThreadState* state = mainThreadState(); |
| 180 | 180 |
| 181 // 1. Finish sweeping. | 181 // 1. Finish sweeping. |
| 182 state->completeSweep(); | 182 state->completeSweep(); |
| 183 { | 183 { |
| 184 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt
ack); | 184 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi
ntersOnStack); |
| 185 | 185 |
| 186 // 2. Add the main thread's heap pages to the orphaned pool. | 186 // 2. Add the main thread's heap pages to the orphaned pool. |
| 187 state->cleanupPages(); | 187 state->cleanupPages(); |
| 188 | 188 |
| 189 // 3. Detach the main thread. | 189 // 3. Detach the main thread. |
| 190 ASSERT(attachedThreads().contains(state)); | 190 ASSERT(attachedThreads().contains(state)); |
| 191 attachedThreads().remove(state); | 191 attachedThreads().remove(state); |
| 192 state->~ThreadState(); | 192 state->~ThreadState(); |
| 193 } | 193 } |
| 194 shutdownHeapIfNecessary(); | 194 shutdownHeapIfNecessary(); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 210 { | 210 { |
| 211 RELEASE_ASSERT(!Heap::s_shutdownCalled); | 211 RELEASE_ASSERT(!Heap::s_shutdownCalled); |
| 212 MutexLocker locker(threadAttachMutex()); | 212 MutexLocker locker(threadAttachMutex()); |
| 213 ThreadState* state = new ThreadState(); | 213 ThreadState* state = new ThreadState(); |
| 214 attachedThreads().add(state); | 214 attachedThreads().add(state); |
| 215 } | 215 } |
| 216 | 216 |
| 217 void ThreadState::cleanupPages() | 217 void ThreadState::cleanupPages() |
| 218 { | 218 { |
| 219 ASSERT(checkThread()); | 219 ASSERT(checkThread()); |
| 220 for (int i = 0; i < NumberOfHeaps; ++i) | 220 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 221 m_heaps[i]->cleanupPages(); | 221 m_heaps[i]->cleanupPages(); |
| 222 } | 222 } |
| 223 | 223 |
| 224 void ThreadState::cleanup() | 224 void ThreadState::cleanup() |
| 225 { | 225 { |
| 226 ASSERT(checkThread()); | 226 ASSERT(checkThread()); |
| 227 { | 227 { |
| 228 // Grab the threadAttachMutex to ensure only one thread can shutdown at | 228 // Grab the threadAttachMutex to ensure only one thread can shutdown at |
| 229 // a time and that no other thread can do a global GC. It also allows | 229 // a time and that no other thread can do a global GC. It also allows |
| 230 // safe iteration of the attachedThreads set which happens as part of | 230 // safe iteration of the attachedThreads set which happens as part of |
| 231 // thread local GC asserts. We enter a safepoint while waiting for the | 231 // thread local GC asserts. We enter a safepoint while waiting for the |
| 232 // lock to avoid a dead-lock where another thread has already requested | 232 // lock to avoid a dead-lock where another thread has already requested |
| 233 // GC. | 233 // GC. |
| 234 SafePointAwareMutexLocker locker(threadAttachMutex(), NoHeapPointersOnSt
ack); | 234 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi
ntersOnStack); |
| 235 | 235 |
| 236 // Finish sweeping. | 236 // Finish sweeping. |
| 237 completeSweep(); | 237 completeSweep(); |
| 238 | 238 |
| 239 // From here on ignore all conservatively discovered | 239 // From here on ignore all conservatively discovered |
| 240 // pointers into the heap owned by this thread. | 240 // pointers into the heap owned by this thread. |
| 241 m_isTerminating = true; | 241 m_isTerminating = true; |
| 242 | 242 |
| 243 // Set the terminate flag on all heap pages of this thread. This is used
to | 243 // Set the terminate flag on all heap pages of this thread. This is used
to |
| 244 // ensure we don't trace pages on other threads that are not part of the | 244 // ensure we don't trace pages on other threads that are not part of the |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 322 for (Address* p = fakeFrameStart; p < fakeFrameEnd; ++p) | 322 for (Address* p = fakeFrameStart; p < fakeFrameEnd; ++p) |
| 323 Heap::checkAndMarkPointer(visitor, *p); | 323 Heap::checkAndMarkPointer(visitor, *p); |
| 324 } | 324 } |
| 325 } | 325 } |
| 326 #endif | 326 #endif |
| 327 } | 327 } |
| 328 | 328 |
| 329 NO_SANITIZE_ADDRESS | 329 NO_SANITIZE_ADDRESS |
| 330 void ThreadState::visitStack(Visitor* visitor) | 330 void ThreadState::visitStack(Visitor* visitor) |
| 331 { | 331 { |
| 332 if (m_stackState == NoHeapPointersOnStack) | 332 if (m_stackState == BlinkGC::NoHeapPointersOnStack) |
| 333 return; | 333 return; |
| 334 | 334 |
| 335 Address* start = reinterpret_cast<Address*>(m_startOfStack); | 335 Address* start = reinterpret_cast<Address*>(m_startOfStack); |
| 336 // If there is a safepoint scope marker we should stop the stack | 336 // If there is a safepoint scope marker we should stop the stack |
| 337 // scanning there to not touch active parts of the stack. Anything | 337 // scanning there to not touch active parts of the stack. Anything |
| 338 // interesting beyond that point is in the safepoint stack copy. | 338 // interesting beyond that point is in the safepoint stack copy. |
| 339 // If there is no scope marker the thread is blocked and we should | 339 // If there is no scope marker the thread is blocked and we should |
| 340 // scan all the way to the recorded end stack pointer. | 340 // scan all the way to the recorded end stack pointer. |
| 341 Address* end = reinterpret_cast<Address*>(m_endOfStack); | 341 Address* end = reinterpret_cast<Address*>(m_endOfStack); |
| 342 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeM
arker); | 342 Address* safePointScopeMarker = reinterpret_cast<Address*>(m_safePointScopeM
arker); |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 411 | 411 |
| 412 void ThreadState::snapshot() | 412 void ThreadState::snapshot() |
| 413 { | 413 { |
| 414 SnapshotInfo info(this); | 414 SnapshotInfo info(this); |
| 415 RefPtr<TracedValue> json = TracedValue::create(); | 415 RefPtr<TracedValue> json = TracedValue::create(); |
| 416 | 416 |
| 417 #define SNAPSHOT_HEAP(HeapType) \ | 417 #define SNAPSHOT_HEAP(HeapType) \ |
| 418 { \ | 418 { \ |
| 419 json->beginDictionary(); \ | 419 json->beginDictionary(); \ |
| 420 json->setString("name", #HeapType); \ | 420 json->setString("name", #HeapType); \ |
| 421 m_heaps[HeapType##HeapIndex]->snapshot(json.get(), &info); \ | 421 m_heaps[BlinkGC::HeapType##HeapIndex]->snapshot(json.get(), &info); \ |
| 422 json->endDictionary(); \ | 422 json->endDictionary(); \ |
| 423 } | 423 } |
| 424 json->beginArray("heaps"); | 424 json->beginArray("heaps"); |
| 425 SNAPSHOT_HEAP(EagerSweep); | 425 SNAPSHOT_HEAP(EagerSweep); |
| 426 SNAPSHOT_HEAP(NormalPage1); | 426 SNAPSHOT_HEAP(NormalPage1); |
| 427 SNAPSHOT_HEAP(NormalPage2); | 427 SNAPSHOT_HEAP(NormalPage2); |
| 428 SNAPSHOT_HEAP(NormalPage3); | 428 SNAPSHOT_HEAP(NormalPage3); |
| 429 SNAPSHOT_HEAP(NormalPage4); | 429 SNAPSHOT_HEAP(NormalPage4); |
| 430 SNAPSHOT_HEAP(Vector1); | 430 SNAPSHOT_HEAP(Vector1); |
| 431 SNAPSHOT_HEAP(Vector2); | 431 SNAPSHOT_HEAP(Vector2); |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 468 } | 468 } |
| 469 json->endArray(); | 469 json->endArray(); |
| 470 json->setInteger("liveSize", liveSize); | 470 json->setInteger("liveSize", liveSize); |
| 471 json->setInteger("deadSize", deadSize); | 471 json->setInteger("deadSize", deadSize); |
| 472 | 472 |
| 473 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "ThreadState", this, json.re
lease()); | 473 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "ThreadState", this, json.re
lease()); |
| 474 } | 474 } |
| 475 | 475 |
| 476 void ThreadState::incrementMarkedObjectsAge() | 476 void ThreadState::incrementMarkedObjectsAge() |
| 477 { | 477 { |
| 478 for (int i = 0; i < NumberOfHeaps; ++i) | 478 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 479 m_heaps[i]->incrementMarkedObjectsAge(); | 479 m_heaps[i]->incrementMarkedObjectsAge(); |
| 480 } | 480 } |
| 481 #endif | 481 #endif |
| 482 | 482 |
| 483 void ThreadState::pushThreadLocalWeakCallback(void* object, WeakCallback callbac
k) | 483 void ThreadState::pushThreadLocalWeakCallback(void* object, WeakCallback callbac
k) |
| 484 { | 484 { |
| 485 CallbackStack::Item* slot = m_threadLocalWeakCallbackStack->allocateEntry(); | 485 CallbackStack::Item* slot = m_threadLocalWeakCallbackStack->allocateEntry(); |
| 486 *slot = CallbackStack::Item(object, callback); | 486 *slot = CallbackStack::Item(object, callback); |
| 487 } | 487 } |
| 488 | 488 |
| (...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 640 | 640 |
| 641 // If we're consuming too much memory, trigger a conservative GC | 641 // If we're consuming too much memory, trigger a conservative GC |
| 642 // aggressively. This is a safe guard to avoid OOM. | 642 // aggressively. This is a safe guard to avoid OOM. |
| 643 bool ThreadState::shouldForceMemoryPressureGC() | 643 bool ThreadState::shouldForceMemoryPressureGC() |
| 644 { | 644 { |
| 645 if (totalMemorySize() < 300 * 1024 * 1024) | 645 if (totalMemorySize() < 300 * 1024 * 1024) |
| 646 return false; | 646 return false; |
| 647 return judgeGCThreshold(0, 1.5); | 647 return judgeGCThreshold(0, 1.5); |
| 648 } | 648 } |
| 649 | 649 |
| 650 void ThreadState::scheduleV8FollowupGCIfNeeded(V8GCType gcType) | 650 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) |
| 651 { | 651 { |
| 652 ASSERT(checkThread()); | 652 ASSERT(checkThread()); |
| 653 Heap::reportMemoryUsageForTracing(); | 653 Heap::reportMemoryUsageForTracing(); |
| 654 | 654 |
| 655 #if PRINT_HEAP_STATS | 655 #if PRINT_HEAP_STATS |
| 656 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", gcType =
= V8MajorGC ? "MajorGC" : "MinorGC"); | 656 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", gcType =
= BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); |
| 657 #endif | 657 #endif |
| 658 | 658 |
| 659 if (isGCForbidden()) | 659 if (isGCForbidden()) |
| 660 return; | 660 return; |
| 661 | 661 |
| 662 // This completeSweep() will do nothing in common cases since we've | 662 // This completeSweep() will do nothing in common cases since we've |
| 663 // called completeSweep() before V8 starts minor/major GCs. | 663 // called completeSweep() before V8 starts minor/major GCs. |
| 664 completeSweep(); | 664 completeSweep(); |
| 665 ASSERT(!isSweepingInProgress()); | 665 ASSERT(!isSweepingInProgress()); |
| 666 ASSERT(!sweepForbidden()); | 666 ASSERT(!sweepForbidden()); |
| 667 | 667 |
| 668 // TODO(haraken): Consider if we should trigger a memory pressure GC | 668 // TODO(haraken): Consider if we should trigger a memory pressure GC |
| 669 // for V8 minor GCs as well. | 669 // for V8 minor GCs as well. |
| 670 if (gcType == V8MajorGC && shouldForceMemoryPressureGC()) { | 670 if (gcType == BlinkGC::V8MajorGC && shouldForceMemoryPressureGC()) { |
| 671 #if PRINT_HEAP_STATS | 671 #if PRINT_HEAP_STATS |
| 672 dataLogF("Scheduled MemoryPressureGC\n"); | 672 dataLogF("Scheduled MemoryPressureGC\n"); |
| 673 #endif | 673 #endif |
| 674 Heap::collectGarbage(HeapPointersOnStack, GCWithoutSweep, Heap::MemoryPr
essureGC); | 674 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSwe
ep, Heap::MemoryPressureGC); |
| 675 return; | 675 return; |
| 676 } | 676 } |
| 677 if (shouldScheduleV8FollowupGC()) { | 677 if (shouldScheduleV8FollowupGC()) { |
| 678 #if PRINT_HEAP_STATS | 678 #if PRINT_HEAP_STATS |
| 679 dataLogF("Scheduled PreciseGC\n"); | 679 dataLogF("Scheduled PreciseGC\n"); |
| 680 #endif | 680 #endif |
| 681 schedulePreciseGC(); | 681 schedulePreciseGC(); |
| 682 return; | 682 return; |
| 683 } | 683 } |
| 684 if (gcType == V8MajorGC) { | 684 if (gcType == BlinkGC::V8MajorGC) { |
| 685 #if PRINT_HEAP_STATS | 685 #if PRINT_HEAP_STATS |
| 686 dataLogF("Scheduled IdleGC\n"); | 686 dataLogF("Scheduled IdleGC\n"); |
| 687 #endif | 687 #endif |
| 688 scheduleIdleGC(); | 688 scheduleIdleGC(); |
| 689 return; | 689 return; |
| 690 } | 690 } |
| 691 } | 691 } |
| 692 | 692 |
| 693 void ThreadState::willStartV8GC() | 693 void ThreadState::willStartV8GC() |
| 694 { | 694 { |
| 695 // Finish Oilpan's complete sweeping before running a V8 GC. | 695 // Finish Oilpan's complete sweeping before running a V8 GC. |
| 696 // This will let the GC collect more V8 objects. | 696 // This will let the GC collect more V8 objects. |
| 697 // | 697 // |
| 698 // TODO(haraken): It's a bit too late for a major GC to schedule | 698 // TODO(haraken): It's a bit too late for a major GC to schedule |
| 699 // completeSweep() here, because gcPrologue for a major GC is called | 699 // completeSweep() here, because gcPrologue for a major GC is called |
| 700 // not at the point where the major GC started but at the point where | 700 // not at the point where the major GC started but at the point where |
| 701 // the major GC requests object grouping. | 701 // the major GC requests object grouping. |
| 702 completeSweep(); | 702 completeSweep(); |
| 703 | 703 |
| 704 // The fact that the PageNavigation GC is scheduled means that there is | 704 // The fact that the PageNavigation GC is scheduled means that there is |
| 705 // a dead frame. In common cases, a sequence of Oilpan's GC => V8 GC => | 705 // a dead frame. In common cases, a sequence of Oilpan's GC => V8 GC => |
| 706 // Oilpan's GC is needed to collect the dead frame. So we force the | 706 // Oilpan's GC is needed to collect the dead frame. So we force the |
| 707 // PageNavigation GC before running the V8 GC. | 707 // PageNavigation GC before running the V8 GC. |
| 708 if (gcState() == PageNavigationGCScheduled) { | 708 if (gcState() == PageNavigationGCScheduled) { |
| 709 #if PRINT_HEAP_STATS | 709 #if PRINT_HEAP_STATS |
| 710 dataLogF("Scheduled PageNavigationGC\n"); | 710 dataLogF("Scheduled PageNavigationGC\n"); |
| 711 #endif | 711 #endif |
| 712 Heap::collectGarbage(HeapPointersOnStack, GCWithSweep, Heap::PageNavigat
ionGC); | 712 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithSweep,
Heap::PageNavigationGC); |
| 713 } | 713 } |
| 714 } | 714 } |
| 715 | 715 |
| 716 void ThreadState::schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio) | 716 void ThreadState::schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio) |
| 717 { | 717 { |
| 718 ASSERT(checkThread()); | 718 ASSERT(checkThread()); |
| 719 Heap::reportMemoryUsageForTracing(); | 719 Heap::reportMemoryUsageForTracing(); |
| 720 | 720 |
| 721 #if PRINT_HEAP_STATS | 721 #if PRINT_HEAP_STATS |
| 722 dataLogF("ThreadState::schedulePageNavigationGCIfNeeded (estimatedRemovalRat
io=%.2lf)\n", estimatedRemovalRatio); | 722 dataLogF("ThreadState::schedulePageNavigationGCIfNeeded (estimatedRemovalRat
io=%.2lf)\n", estimatedRemovalRatio); |
| 723 #endif | 723 #endif |
| 724 | 724 |
| 725 if (isGCForbidden()) | 725 if (isGCForbidden()) |
| 726 return; | 726 return; |
| 727 | 727 |
| 728 // Finish on-going lazy sweeping. | 728 // Finish on-going lazy sweeping. |
| 729 // TODO(haraken): It might not make sense to force completeSweep() for all | 729 // TODO(haraken): It might not make sense to force completeSweep() for all |
| 730 // page navigations. | 730 // page navigations. |
| 731 completeSweep(); | 731 completeSweep(); |
| 732 ASSERT(!isSweepingInProgress()); | 732 ASSERT(!isSweepingInProgress()); |
| 733 ASSERT(!sweepForbidden()); | 733 ASSERT(!sweepForbidden()); |
| 734 | 734 |
| 735 if (shouldForceMemoryPressureGC()) { | 735 if (shouldForceMemoryPressureGC()) { |
| 736 #if PRINT_HEAP_STATS | 736 #if PRINT_HEAP_STATS |
| 737 dataLogF("Scheduled MemoryPressureGC\n"); | 737 dataLogF("Scheduled MemoryPressureGC\n"); |
| 738 #endif | 738 #endif |
| 739 Heap::collectGarbage(HeapPointersOnStack, GCWithoutSweep, Heap::MemoryPr
essureGC); | 739 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSwe
ep, Heap::MemoryPressureGC); |
| 740 return; | 740 return; |
| 741 } | 741 } |
| 742 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { | 742 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { |
| 743 #if PRINT_HEAP_STATS | 743 #if PRINT_HEAP_STATS |
| 744 dataLogF("Scheduled PageNavigationGC\n"); | 744 dataLogF("Scheduled PageNavigationGC\n"); |
| 745 #endif | 745 #endif |
| 746 schedulePageNavigationGC(); | 746 schedulePageNavigationGC(); |
| 747 return; | 747 return; |
| 748 } | 748 } |
| 749 } | 749 } |
| (...skipping 18 matching lines...) Expand all Loading... |
| 768 // trigger nested GCs. | 768 // trigger nested GCs. |
| 769 if (isGCForbidden()) | 769 if (isGCForbidden()) |
| 770 return; | 770 return; |
| 771 | 771 |
| 772 if (shouldForceMemoryPressureGC()) { | 772 if (shouldForceMemoryPressureGC()) { |
| 773 completeSweep(); | 773 completeSweep(); |
| 774 if (shouldForceMemoryPressureGC()) { | 774 if (shouldForceMemoryPressureGC()) { |
| 775 #if PRINT_HEAP_STATS | 775 #if PRINT_HEAP_STATS |
| 776 dataLogF("Scheduled MemoryPressureGC\n"); | 776 dataLogF("Scheduled MemoryPressureGC\n"); |
| 777 #endif | 777 #endif |
| 778 Heap::collectGarbage(HeapPointersOnStack, GCWithoutSweep, Heap::Memo
ryPressureGC); | 778 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithou
tSweep, Heap::MemoryPressureGC); |
| 779 return; | 779 return; |
| 780 } | 780 } |
| 781 } | 781 } |
| 782 | 782 |
| 783 if (isSweepingInProgress()) | 783 if (isSweepingInProgress()) |
| 784 return; | 784 return; |
| 785 ASSERT(!sweepForbidden()); | 785 ASSERT(!sweepForbidden()); |
| 786 | 786 |
| 787 if (shouldForceConservativeGC()) { | 787 if (shouldForceConservativeGC()) { |
| 788 completeSweep(); | 788 completeSweep(); |
| 789 if (shouldForceConservativeGC()) { | 789 if (shouldForceConservativeGC()) { |
| 790 #if PRINT_HEAP_STATS | 790 #if PRINT_HEAP_STATS |
| 791 dataLogF("Scheduled ConservativeGC\n"); | 791 dataLogF("Scheduled ConservativeGC\n"); |
| 792 #endif | 792 #endif |
| 793 Heap::collectGarbage(HeapPointersOnStack, GCWithoutSweep, Heap::Cons
ervativeGC); | 793 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithou
tSweep, Heap::ConservativeGC); |
| 794 return; | 794 return; |
| 795 } | 795 } |
| 796 } | 796 } |
| 797 if (shouldScheduleIdleGC()) { | 797 if (shouldScheduleIdleGC()) { |
| 798 #if PRINT_HEAP_STATS | 798 #if PRINT_HEAP_STATS |
| 799 dataLogF("Scheduled IdleGC\n"); | 799 dataLogF("Scheduled IdleGC\n"); |
| 800 #endif | 800 #endif |
| 801 scheduleIdleGC(); | 801 scheduleIdleGC(); |
| 802 return; | 802 return; |
| 803 } | 803 } |
| 804 } | 804 } |
| 805 | 805 |
| 806 void ThreadState::performIdleGC(double deadlineSeconds) | 806 void ThreadState::performIdleGC(double deadlineSeconds) |
| 807 { | 807 { |
| 808 ASSERT(checkThread()); | 808 ASSERT(checkThread()); |
| 809 ASSERT(isMainThread()); | 809 ASSERT(isMainThread()); |
| 810 | 810 |
| 811 if (gcState() != IdleGCScheduled) | 811 if (gcState() != IdleGCScheduled) |
| 812 return; | 812 return; |
| 813 | 813 |
| 814 double idleDeltaInSeconds = deadlineSeconds - Platform::current()->monotonic
allyIncreasingTime(); | 814 double idleDeltaInSeconds = deadlineSeconds - Platform::current()->monotonic
allyIncreasingTime(); |
| 815 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds",
idleDeltaInSeconds, "estimatedMarkingTime", Heap::estimatedMarkingTime()); | 815 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds",
idleDeltaInSeconds, "estimatedMarkingTime", Heap::estimatedMarkingTime()); |
| 816 if (idleDeltaInSeconds <= Heap::estimatedMarkingTime() && !Platform::current
()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired()) { | 816 if (idleDeltaInSeconds <= Heap::estimatedMarkingTime() && !Platform::current
()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired()) { |
| 817 // If marking is estimated to take longer than the deadline and we can't | 817 // If marking is estimated to take longer than the deadline and we can't |
| 818 // exceed the deadline, then reschedule for the next idle period. | 818 // exceed the deadline, then reschedule for the next idle period. |
| 819 scheduleIdleGC(); | 819 scheduleIdleGC(); |
| 820 return; | 820 return; |
| 821 } | 821 } |
| 822 | 822 |
| 823 Heap::collectGarbage(NoHeapPointersOnStack, GCWithoutSweep, Heap::IdleGC); | 823 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep
, Heap::IdleGC); |
| 824 } | 824 } |
| 825 | 825 |
| 826 void ThreadState::performIdleLazySweep(double deadlineSeconds) | 826 void ThreadState::performIdleLazySweep(double deadlineSeconds) |
| 827 { | 827 { |
| 828 ASSERT(checkThread()); | 828 ASSERT(checkThread()); |
| 829 ASSERT(isMainThread()); | 829 ASSERT(isMainThread()); |
| 830 | 830 |
| 831 // If we are not in a sweeping phase, there is nothing to do here. | 831 // If we are not in a sweeping phase, there is nothing to do here. |
| 832 if (!isSweepingInProgress()) | 832 if (!isSweepingInProgress()) |
| 833 return; | 833 return; |
| 834 | 834 |
| 835 // This check is here to prevent performIdleLazySweep() from being called | 835 // This check is here to prevent performIdleLazySweep() from being called |
| 836 // recursively. I'm not sure if it can happen but it would be safer to have | 836 // recursively. I'm not sure if it can happen but it would be safer to have |
| 837 // the check just in case. | 837 // the check just in case. |
| 838 if (sweepForbidden()) | 838 if (sweepForbidden()) |
| 839 return; | 839 return; |
| 840 | 840 |
| 841 TRACE_EVENT1("blink_gc", "ThreadState::performIdleLazySweep", "idleDeltaInSe
conds", deadlineSeconds - Platform::current()->monotonicallyIncreasingTime()); | 841 TRACE_EVENT1("blink_gc", "ThreadState::performIdleLazySweep", "idleDeltaInSe
conds", deadlineSeconds - Platform::current()->monotonicallyIncreasingTime()); |
| 842 | 842 |
| 843 bool sweepCompleted = true; | 843 bool sweepCompleted = true; |
| 844 SweepForbiddenScope scope(this); | 844 SweepForbiddenScope scope(this); |
| 845 { | 845 { |
| 846 if (isMainThread()) | 846 if (isMainThread()) |
| 847 ScriptForbiddenScope::enter(); | 847 ScriptForbiddenScope::enter(); |
| 848 | 848 |
| 849 for (int i = 0; i < NumberOfHeaps; i++) { | 849 for (int i = 0; i < BlinkGC::NumberOfHeaps; i++) { |
| 850 // lazySweepWithDeadline() won't check the deadline until it sweeps | 850 // lazySweepWithDeadline() won't check the deadline until it sweeps |
| 851 // 10 pages. So we give a small slack for safety. | 851 // 10 pages. So we give a small slack for safety. |
| 852 double slack = 0.001; | 852 double slack = 0.001; |
| 853 double remainingBudget = deadlineSeconds - slack - Platform::current
()->monotonicallyIncreasingTime(); | 853 double remainingBudget = deadlineSeconds - slack - Platform::current
()->monotonicallyIncreasingTime(); |
| 854 if (remainingBudget <= 0 || !m_heaps[i]->lazySweepWithDeadline(deadl
ineSeconds)) { | 854 if (remainingBudget <= 0 || !m_heaps[i]->lazySweepWithDeadline(deadl
ineSeconds)) { |
| 855 // We couldn't finish the sweeping within the deadline. | 855 // We couldn't finish the sweeping within the deadline. |
| 856 // We request another idle task for the remaining sweeping. | 856 // We request another idle task for the remaining sweeping. |
| 857 scheduleIdleLazySweep(); | 857 scheduleIdleLazySweep(); |
| 858 sweepCompleted = false; | 858 sweepCompleted = false; |
| 859 break; | 859 break; |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 970 } | 970 } |
| 971 m_gcState = gcState; | 971 m_gcState = gcState; |
| 972 #if ENABLE(GC_PROFILING) | 972 #if ENABLE(GC_PROFILING) |
| 973 if (isMainThread()) | 973 if (isMainThread()) |
| 974 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::gcSt
ate", static_cast<int>(m_gcState)); | 974 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::gcSt
ate", static_cast<int>(m_gcState)); |
| 975 #endif | 975 #endif |
| 976 } | 976 } |
| 977 | 977 |
| 978 #undef VERIFY_STATE_TRANSITION | 978 #undef VERIFY_STATE_TRANSITION |
| 979 | 979 |
| 980 ThreadState::GCState ThreadState::gcState() const | 980 void ThreadState::runScheduledGC(BlinkGC::StackState stackState) |
| 981 { | |
| 982 return m_gcState; | |
| 983 } | |
| 984 | |
| 985 void ThreadState::runScheduledGC(StackState stackState) | |
| 986 { | 981 { |
| 987 ASSERT(checkThread()); | 982 ASSERT(checkThread()); |
| 988 if (stackState != NoHeapPointersOnStack) | 983 if (stackState != BlinkGC::NoHeapPointersOnStack) |
| 989 return; | 984 return; |
| 990 | 985 |
| 991 // If a safe point is entered while initiating a GC, we clearly do | 986 // If a safe point is entered while initiating a GC, we clearly do |
| 992 // not want to do another as part that -- the safe point is only | 987 // not want to do another as part that -- the safe point is only |
| 993 // entered after checking if a scheduled GC ought to run first. | 988 // entered after checking if a scheduled GC ought to run first. |
| 994 // Prevent that from happening by marking GCs as forbidden while | 989 // Prevent that from happening by marking GCs as forbidden while |
| 995 // one is initiated and later running. | 990 // one is initiated and later running. |
| 996 if (isGCForbidden()) | 991 if (isGCForbidden()) |
| 997 return; | 992 return; |
| 998 | 993 |
| 999 switch (gcState()) { | 994 switch (gcState()) { |
| 1000 case FullGCScheduled: | 995 case FullGCScheduled: |
| 1001 Heap::collectAllGarbage(); | 996 Heap::collectAllGarbage(); |
| 1002 break; | 997 break; |
| 1003 case PreciseGCScheduled: | 998 case PreciseGCScheduled: |
| 1004 Heap::collectGarbage(NoHeapPointersOnStack, GCWithoutSweep, Heap::Precis
eGC); | 999 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutS
weep, Heap::PreciseGC); |
| 1005 break; | 1000 break; |
| 1006 case PageNavigationGCScheduled: | 1001 case PageNavigationGCScheduled: |
| 1007 Heap::collectGarbage(NoHeapPointersOnStack, GCWithSweep, Heap::PageNavig
ationGC); | 1002 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSwee
p, Heap::PageNavigationGC); |
| 1008 break; | 1003 break; |
| 1009 case IdleGCScheduled: | 1004 case IdleGCScheduled: |
| 1010 // Idle time GC will be scheduled by Blink Scheduler. | 1005 // Idle time GC will be scheduled by Blink Scheduler. |
| 1011 break; | 1006 break; |
| 1012 default: | 1007 default: |
| 1013 break; | 1008 break; |
| 1014 } | 1009 } |
| 1015 } | 1010 } |
| 1016 | 1011 |
| 1017 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() | 1012 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() |
| 1018 { | 1013 { |
| 1019 if (m_shouldFlushHeapDoesNotContainCache) { | 1014 if (m_shouldFlushHeapDoesNotContainCache) { |
| 1020 Heap::flushHeapDoesNotContainCache(); | 1015 Heap::flushHeapDoesNotContainCache(); |
| 1021 m_shouldFlushHeapDoesNotContainCache = false; | 1016 m_shouldFlushHeapDoesNotContainCache = false; |
| 1022 } | 1017 } |
| 1023 } | 1018 } |
| 1024 | 1019 |
| 1025 void ThreadState::makeConsistentForGC() | 1020 void ThreadState::makeConsistentForGC() |
| 1026 { | 1021 { |
| 1027 ASSERT(isInGC()); | 1022 ASSERT(isInGC()); |
| 1028 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); | 1023 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); |
| 1029 for (int i = 0; i < NumberOfHeaps; ++i) | 1024 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 1030 m_heaps[i]->makeConsistentForGC(); | 1025 m_heaps[i]->makeConsistentForGC(); |
| 1031 } | 1026 } |
| 1032 | 1027 |
| 1033 void ThreadState::makeConsistentForMutator() | 1028 void ThreadState::makeConsistentForMutator() |
| 1034 { | 1029 { |
| 1035 ASSERT(isInGC()); | 1030 ASSERT(isInGC()); |
| 1036 for (int i = 0; i < NumberOfHeaps; ++i) | 1031 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 1037 m_heaps[i]->makeConsistentForMutator(); | 1032 m_heaps[i]->makeConsistentForMutator(); |
| 1038 } | 1033 } |
| 1039 | 1034 |
| 1040 void ThreadState::preGC() | 1035 void ThreadState::preGC() |
| 1041 { | 1036 { |
| 1042 ASSERT(!isInGC()); | 1037 ASSERT(!isInGC()); |
| 1043 setGCState(GCRunning); | 1038 setGCState(GCRunning); |
| 1044 makeConsistentForGC(); | 1039 makeConsistentForGC(); |
| 1045 flushHeapDoesNotContainCacheIfNeeded(); | 1040 flushHeapDoesNotContainCacheIfNeeded(); |
| 1046 clearHeapAges(); | 1041 clearHeapAges(); |
| 1047 } | 1042 } |
| 1048 | 1043 |
| 1049 void ThreadState::postGC(GCType gcType) | 1044 void ThreadState::postGC(BlinkGC::GCType gcType) |
| 1050 { | 1045 { |
| 1051 ASSERT(isInGC()); | 1046 ASSERT(isInGC()); |
| 1052 | 1047 |
| 1053 #if ENABLE(GC_PROFILING) | 1048 #if ENABLE(GC_PROFILING) |
| 1054 // We snapshot the heap prior to sweeping to get numbers for both resources | 1049 // We snapshot the heap prior to sweeping to get numbers for both resources |
| 1055 // that have been allocated since the last GC and for resources that are | 1050 // that have been allocated since the last GC and for resources that are |
| 1056 // going to be freed. | 1051 // going to be freed. |
| 1057 bool gcTracingEnabled; | 1052 bool gcTracingEnabled; |
| 1058 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | 1053 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); |
| 1059 | 1054 |
| 1060 if (gcTracingEnabled) { | 1055 if (gcTracingEnabled) { |
| 1061 bool disabledByDefaultGCTracingEnabled; | 1056 bool disabledByDefaultGCTracingEnabled; |
| 1062 TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("blink_gc")
, &disabledByDefaultGCTracingEnabled); | 1057 TRACE_EVENT_CATEGORY_GROUP_ENABLED(TRACE_DISABLED_BY_DEFAULT("blink_gc")
, &disabledByDefaultGCTracingEnabled); |
| 1063 | 1058 |
| 1064 snapshot(); | 1059 snapshot(); |
| 1065 if (disabledByDefaultGCTracingEnabled) | 1060 if (disabledByDefaultGCTracingEnabled) |
| 1066 collectAndReportMarkSweepStats(); | 1061 collectAndReportMarkSweepStats(); |
| 1067 incrementMarkedObjectsAge(); | 1062 incrementMarkedObjectsAge(); |
| 1068 } | 1063 } |
| 1069 #endif | 1064 #endif |
| 1070 | 1065 |
| 1071 for (int i = 0; i < NumberOfHeaps; i++) | 1066 for (int i = 0; i < BlinkGC::NumberOfHeaps; i++) |
| 1072 m_heaps[i]->prepareForSweep(); | 1067 m_heaps[i]->prepareForSweep(); |
| 1073 | 1068 |
| 1074 if (gcType == GCWithSweep) { | 1069 if (gcType == BlinkGC::GCWithSweep) { |
| 1075 setGCState(EagerSweepScheduled); | 1070 setGCState(EagerSweepScheduled); |
| 1076 } else if (gcType == GCWithoutSweep) { | 1071 } else if (gcType == BlinkGC::GCWithoutSweep) { |
| 1077 setGCState(LazySweepScheduled); | 1072 setGCState(LazySweepScheduled); |
| 1078 } else { | 1073 } else { |
| 1079 takeSnapshot(SnapshotType::HeapSnapshot); | 1074 takeSnapshot(SnapshotType::HeapSnapshot); |
| 1080 | 1075 |
| 1081 // This unmarks all marked objects and marks all unmarked objects dead. | 1076 // This unmarks all marked objects and marks all unmarked objects dead. |
| 1082 makeConsistentForMutator(); | 1077 makeConsistentForMutator(); |
| 1083 | 1078 |
| 1084 takeSnapshot(SnapshotType::FreelistSnapshot); | 1079 takeSnapshot(SnapshotType::FreelistSnapshot); |
| 1085 | 1080 |
| 1086 // Force setting NoGCScheduled to circumvent checkThread() | 1081 // Force setting NoGCScheduled to circumvent checkThread() |
| (...skipping 14 matching lines...) Expand all Loading... |
| 1101 // We have to set the GCState to Sweeping before calling pre-finalizers | 1096 // We have to set the GCState to Sweeping before calling pre-finalizers |
| 1102 // to disallow a GC during the pre-finalizers. | 1097 // to disallow a GC during the pre-finalizers. |
| 1103 setGCState(Sweeping); | 1098 setGCState(Sweeping); |
| 1104 | 1099 |
| 1105 // Allocation is allowed during the pre-finalizers and destructors. | 1100 // Allocation is allowed during the pre-finalizers and destructors. |
| 1106 // However, they must not mutate an object graph in a way in which | 1101 // However, they must not mutate an object graph in a way in which |
| 1107 // a dead object gets resurrected. | 1102 // a dead object gets resurrected. |
| 1108 invokePreFinalizers(); | 1103 invokePreFinalizers(); |
| 1109 | 1104 |
| 1110 #if defined(ADDRESS_SANITIZER) | 1105 #if defined(ADDRESS_SANITIZER) |
| 1111 poisonEagerHeap(SetPoison); | 1106 poisonEagerHeap(BlinkGC::SetPoison); |
| 1112 #endif | 1107 #endif |
| 1113 | 1108 |
| 1114 eagerSweep(); | 1109 eagerSweep(); |
| 1115 #if defined(ADDRESS_SANITIZER) | 1110 #if defined(ADDRESS_SANITIZER) |
| 1116 poisonAllHeaps(); | 1111 poisonAllHeaps(); |
| 1117 #endif | 1112 #endif |
| 1118 if (previousGCState == EagerSweepScheduled) { | 1113 if (previousGCState == EagerSweepScheduled) { |
| 1119 // Eager sweeping should happen only in testing. | 1114 // Eager sweeping should happen only in testing. |
| 1120 completeSweep(); | 1115 completeSweep(); |
| 1121 } else { | 1116 } else { |
| 1122 // The default behavior is lazy sweeping. | 1117 // The default behavior is lazy sweeping. |
| 1123 scheduleIdleLazySweep(); | 1118 scheduleIdleLazySweep(); |
| 1124 } | 1119 } |
| 1125 | 1120 |
| 1126 #if ENABLE(GC_PROFILING) | 1121 #if ENABLE(GC_PROFILING) |
| 1127 snapshotFreeListIfNecessary(); | 1122 snapshotFreeListIfNecessary(); |
| 1128 #endif | 1123 #endif |
| 1129 } | 1124 } |
| 1130 | 1125 |
| 1131 #if defined(ADDRESS_SANITIZER) | 1126 #if defined(ADDRESS_SANITIZER) |
| 1132 void ThreadState::poisonAllHeaps() | 1127 void ThreadState::poisonAllHeaps() |
| 1133 { | 1128 { |
| 1134 // TODO(Oilpan): enable the poisoning always. | 1129 // TODO(Oilpan): enable the poisoning always. |
| 1135 #if ENABLE(OILPAN) | 1130 #if ENABLE(OILPAN) |
| 1136 // Unpoison the live objects remaining in the eager heaps.. | 1131 // Unpoison the live objects remaining in the eager heaps.. |
| 1137 poisonEagerHeap(ClearPoison); | 1132 poisonEagerHeap(BlinkGC::ClearPoison); |
| 1138 // ..along with poisoning all unmarked objects in the other heaps. | 1133 // ..along with poisoning all unmarked objects in the other heaps. |
| 1139 for (int i = 1; i < NumberOfHeaps; i++) | 1134 for (int i = 1; i < BlinkGC::NumberOfHeaps; i++) |
| 1140 m_heaps[i]->poisonHeap(UnmarkedOnly, SetPoison); | 1135 m_heaps[i]->poisonHeap(BlinkGC::UnmarkedOnly, BlinkGC::SetPoison); |
| 1141 #endif | 1136 #endif |
| 1142 } | 1137 } |
| 1143 | 1138 |
| 1144 void ThreadState::poisonEagerHeap(Poisoning poisoning) | 1139 void ThreadState::poisonEagerHeap(BlinkGC::Poisoning poisoning) |
| 1145 { | 1140 { |
| 1146 // TODO(Oilpan): enable the poisoning always. | 1141 // TODO(Oilpan): enable the poisoning always. |
| 1147 #if ENABLE(OILPAN) | 1142 #if ENABLE(OILPAN) |
| 1148 m_heaps[EagerSweepHeapIndex]->poisonHeap(MarkedAndUnmarked, poisoning); | 1143 m_heaps[BlinkGC::EagerSweepHeapIndex]->poisonHeap(BlinkGC::MarkedAndUnmarked
, poisoning); |
| 1149 #endif | 1144 #endif |
| 1150 } | 1145 } |
| 1151 #endif | 1146 #endif |
| 1152 | 1147 |
| 1153 void ThreadState::eagerSweep() | 1148 void ThreadState::eagerSweep() |
| 1154 { | 1149 { |
| 1155 ASSERT(checkThread()); | 1150 ASSERT(checkThread()); |
| 1156 // Some objects need to be finalized promptly and cannot be handled | 1151 // Some objects need to be finalized promptly and cannot be handled |
| 1157 // by lazy sweeping. Keep those in a designated heap and sweep it | 1152 // by lazy sweeping. Keep those in a designated heap and sweep it |
| 1158 // eagerly. | 1153 // eagerly. |
| 1159 ASSERT(isSweepingInProgress()); | 1154 ASSERT(isSweepingInProgress()); |
| 1160 | 1155 |
| 1161 // Mirroring the completeSweep() condition; see its comment. | 1156 // Mirroring the completeSweep() condition; see its comment. |
| 1162 if (sweepForbidden()) | 1157 if (sweepForbidden()) |
| 1163 return; | 1158 return; |
| 1164 | 1159 |
| 1165 SweepForbiddenScope scope(this); | 1160 SweepForbiddenScope scope(this); |
| 1166 { | 1161 { |
| 1167 if (isMainThread()) | 1162 if (isMainThread()) |
| 1168 ScriptForbiddenScope::enter(); | 1163 ScriptForbiddenScope::enter(); |
| 1169 | 1164 |
| 1170 m_heaps[EagerSweepHeapIndex]->completeSweep(); | 1165 m_heaps[BlinkGC::EagerSweepHeapIndex]->completeSweep(); |
| 1171 | 1166 |
| 1172 if (isMainThread()) | 1167 if (isMainThread()) |
| 1173 ScriptForbiddenScope::exit(); | 1168 ScriptForbiddenScope::exit(); |
| 1174 } | 1169 } |
| 1175 } | 1170 } |
| 1176 | 1171 |
| 1177 void ThreadState::completeSweep() | 1172 void ThreadState::completeSweep() |
| 1178 { | 1173 { |
| 1179 ASSERT(checkThread()); | 1174 ASSERT(checkThread()); |
| 1180 // If we are not in a sweeping phase, there is nothing to do here. | 1175 // If we are not in a sweeping phase, there is nothing to do here. |
| 1181 if (!isSweepingInProgress()) | 1176 if (!isSweepingInProgress()) |
| 1182 return; | 1177 return; |
| 1183 | 1178 |
| 1184 // completeSweep() can be called recursively if finalizers can allocate | 1179 // completeSweep() can be called recursively if finalizers can allocate |
| 1185 // memory and the allocation triggers completeSweep(). This check prevents | 1180 // memory and the allocation triggers completeSweep(). This check prevents |
| 1186 // the sweeping from being executed recursively. | 1181 // the sweeping from being executed recursively. |
| 1187 if (sweepForbidden()) | 1182 if (sweepForbidden()) |
| 1188 return; | 1183 return; |
| 1189 | 1184 |
| 1190 SweepForbiddenScope scope(this); | 1185 SweepForbiddenScope scope(this); |
| 1191 { | 1186 { |
| 1192 if (isMainThread()) | 1187 if (isMainThread()) |
| 1193 ScriptForbiddenScope::enter(); | 1188 ScriptForbiddenScope::enter(); |
| 1194 | 1189 |
| 1195 TRACE_EVENT0("blink_gc", "ThreadState::completeSweep"); | 1190 TRACE_EVENT0("blink_gc", "ThreadState::completeSweep"); |
| 1196 double timeStamp = WTF::currentTimeMS(); | 1191 double timeStamp = WTF::currentTimeMS(); |
| 1197 | 1192 |
| 1198 static_assert(EagerSweepHeapIndex == 0, "Eagerly swept heaps must be pro
cessed first."); | 1193 static_assert(BlinkGC::EagerSweepHeapIndex == 0, "Eagerly swept heaps mu
st be processed first."); |
| 1199 for (int i = 0; i < NumberOfHeaps; i++) | 1194 for (int i = 0; i < BlinkGC::NumberOfHeaps; i++) |
| 1200 m_heaps[i]->completeSweep(); | 1195 m_heaps[i]->completeSweep(); |
| 1201 | 1196 |
| 1202 Platform::current()->histogramCustomCounts("BlinkGC.CompleteSweep", WTF:
:currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | 1197 Platform::current()->histogramCustomCounts("BlinkGC.CompleteSweep", WTF:
:currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
| 1203 | 1198 |
| 1204 if (isMainThread()) | 1199 if (isMainThread()) |
| 1205 ScriptForbiddenScope::exit(); | 1200 ScriptForbiddenScope::exit(); |
| 1206 } | 1201 } |
| 1207 | 1202 |
| 1208 postSweep(); | 1203 postSweep(); |
| 1209 } | 1204 } |
| (...skipping 30 matching lines...) Expand all Loading... |
| 1240 scheduleIdleGC(); | 1235 scheduleIdleGC(); |
| 1241 break; | 1236 break; |
| 1242 default: | 1237 default: |
| 1243 ASSERT_NOT_REACHED(); | 1238 ASSERT_NOT_REACHED(); |
| 1244 } | 1239 } |
| 1245 } | 1240 } |
| 1246 | 1241 |
| 1247 void ThreadState::prepareForThreadStateTermination() | 1242 void ThreadState::prepareForThreadStateTermination() |
| 1248 { | 1243 { |
| 1249 ASSERT(checkThread()); | 1244 ASSERT(checkThread()); |
| 1250 for (int i = 0; i < NumberOfHeaps; ++i) | 1245 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 1251 m_heaps[i]->prepareHeapForTermination(); | 1246 m_heaps[i]->prepareHeapForTermination(); |
| 1252 } | 1247 } |
| 1253 | 1248 |
| 1254 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 1249 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 1255 BasePage* ThreadState::findPageFromAddress(Address address) | 1250 BasePage* ThreadState::findPageFromAddress(Address address) |
| 1256 { | 1251 { |
| 1257 for (int i = 0; i < NumberOfHeaps; ++i) { | 1252 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) { |
| 1258 if (BasePage* page = m_heaps[i]->findPageFromAddress(address)) | 1253 if (BasePage* page = m_heaps[i]->findPageFromAddress(address)) |
| 1259 return page; | 1254 return page; |
| 1260 } | 1255 } |
| 1261 return nullptr; | 1256 return nullptr; |
| 1262 } | 1257 } |
| 1263 #endif | 1258 #endif |
| 1264 | 1259 |
| 1265 size_t ThreadState::objectPayloadSizeForTesting() | 1260 size_t ThreadState::objectPayloadSizeForTesting() |
| 1266 { | 1261 { |
| 1267 size_t objectPayloadSize = 0; | 1262 size_t objectPayloadSize = 0; |
| 1268 for (int i = 0; i < NumberOfHeaps; ++i) | 1263 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 1269 objectPayloadSize += m_heaps[i]->objectPayloadSizeForTesting(); | 1264 objectPayloadSize += m_heaps[i]->objectPayloadSizeForTesting(); |
| 1270 return objectPayloadSize; | 1265 return objectPayloadSize; |
| 1271 } | 1266 } |
| 1272 | 1267 |
| 1273 bool ThreadState::stopThreads() | 1268 bool ThreadState::stopThreads() |
| 1274 { | 1269 { |
| 1275 return s_safePointBarrier->parkOthers(); | 1270 return s_safePointBarrier->parkOthers(); |
| 1276 } | 1271 } |
| 1277 | 1272 |
| 1278 void ThreadState::resumeThreads() | 1273 void ThreadState::resumeThreads() |
| 1279 { | 1274 { |
| 1280 s_safePointBarrier->resumeOthers(); | 1275 s_safePointBarrier->resumeOthers(); |
| 1281 } | 1276 } |
| 1282 | 1277 |
| 1283 void ThreadState::safePoint(StackState stackState) | 1278 void ThreadState::safePoint(BlinkGC::StackState stackState) |
| 1284 { | 1279 { |
| 1285 ASSERT(checkThread()); | 1280 ASSERT(checkThread()); |
| 1286 Heap::reportMemoryUsageForTracing(); | 1281 Heap::reportMemoryUsageForTracing(); |
| 1287 | 1282 |
| 1288 runScheduledGC(stackState); | 1283 runScheduledGC(stackState); |
| 1289 ASSERT(!m_atSafePoint); | 1284 ASSERT(!m_atSafePoint); |
| 1290 m_stackState = stackState; | 1285 m_stackState = stackState; |
| 1291 m_atSafePoint = true; | 1286 m_atSafePoint = true; |
| 1292 s_safePointBarrier->checkAndPark(this); | 1287 s_safePointBarrier->checkAndPark(this); |
| 1293 m_atSafePoint = false; | 1288 m_atSafePoint = false; |
| 1294 m_stackState = HeapPointersOnStack; | 1289 m_stackState = BlinkGC::HeapPointersOnStack; |
| 1295 preSweep(); | 1290 preSweep(); |
| 1296 } | 1291 } |
| 1297 | 1292 |
| 1298 #ifdef ADDRESS_SANITIZER | 1293 #ifdef ADDRESS_SANITIZER |
| 1299 // When we are running under AddressSanitizer with detect_stack_use_after_return
=1 | 1294 // When we are running under AddressSanitizer with detect_stack_use_after_return
=1 |
| 1300 // then stack marker obtained from SafePointScope will point into a fake stack. | 1295 // then stack marker obtained from SafePointScope will point into a fake stack. |
| 1301 // Detect this case by checking if it falls in between current stack frame | 1296 // Detect this case by checking if it falls in between current stack frame |
| 1302 // and stack start and use an arbitrary high enough value for it. | 1297 // and stack start and use an arbitrary high enough value for it. |
| 1303 // Don't adjust stack marker in any other case to match behavior of code running | 1298 // Don't adjust stack marker in any other case to match behavior of code running |
| 1304 // without AddressSanitizer. | 1299 // without AddressSanitizer. |
| 1305 NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer(void* scope
Marker) | 1300 NO_SANITIZE_ADDRESS static void* adjustScopeMarkerForAdressSanitizer(void* scope
Marker) |
| 1306 { | 1301 { |
| 1307 Address start = reinterpret_cast<Address>(StackFrameDepth::getStackStart()); | 1302 Address start = reinterpret_cast<Address>(StackFrameDepth::getStackStart()); |
| 1308 Address end = reinterpret_cast<Address>(&start); | 1303 Address end = reinterpret_cast<Address>(&start); |
| 1309 RELEASE_ASSERT(end < start); | 1304 RELEASE_ASSERT(end < start); |
| 1310 | 1305 |
| 1311 if (end <= scopeMarker && scopeMarker < start) | 1306 if (end <= scopeMarker && scopeMarker < start) |
| 1312 return scopeMarker; | 1307 return scopeMarker; |
| 1313 | 1308 |
| 1314 // 256 is as good an approximation as any else. | 1309 // 256 is as good an approximation as any else. |
| 1315 const size_t bytesToCopy = sizeof(Address) * 256; | 1310 const size_t bytesToCopy = sizeof(Address) * 256; |
| 1316 if (static_cast<size_t>(start - end) < bytesToCopy) | 1311 if (static_cast<size_t>(start - end) < bytesToCopy) |
| 1317 return start; | 1312 return start; |
| 1318 | 1313 |
| 1319 return end + bytesToCopy; | 1314 return end + bytesToCopy; |
| 1320 } | 1315 } |
| 1321 #endif | 1316 #endif |
| 1322 | 1317 |
| 1323 void ThreadState::enterSafePoint(StackState stackState, void* scopeMarker) | 1318 void ThreadState::enterSafePoint(BlinkGC::StackState stackState, void* scopeMark
er) |
| 1324 { | 1319 { |
| 1325 ASSERT(checkThread()); | 1320 ASSERT(checkThread()); |
| 1326 #ifdef ADDRESS_SANITIZER | 1321 #ifdef ADDRESS_SANITIZER |
| 1327 if (stackState == HeapPointersOnStack) | 1322 if (stackState == BlinkGC::HeapPointersOnStack) |
| 1328 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker); | 1323 scopeMarker = adjustScopeMarkerForAdressSanitizer(scopeMarker); |
| 1329 #endif | 1324 #endif |
| 1330 ASSERT(stackState == NoHeapPointersOnStack || scopeMarker); | 1325 ASSERT(stackState == BlinkGC::NoHeapPointersOnStack || scopeMarker); |
| 1331 runScheduledGC(stackState); | 1326 runScheduledGC(stackState); |
| 1332 ASSERT(!m_atSafePoint); | 1327 ASSERT(!m_atSafePoint); |
| 1333 m_atSafePoint = true; | 1328 m_atSafePoint = true; |
| 1334 m_stackState = stackState; | 1329 m_stackState = stackState; |
| 1335 m_safePointScopeMarker = scopeMarker; | 1330 m_safePointScopeMarker = scopeMarker; |
| 1336 s_safePointBarrier->enterSafePoint(this); | 1331 s_safePointBarrier->enterSafePoint(this); |
| 1337 } | 1332 } |
| 1338 | 1333 |
| 1339 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker) | 1334 void ThreadState::leaveSafePoint(SafePointAwareMutexLocker* locker) |
| 1340 { | 1335 { |
| 1341 ASSERT(checkThread()); | 1336 ASSERT(checkThread()); |
| 1342 ASSERT(m_atSafePoint); | 1337 ASSERT(m_atSafePoint); |
| 1343 s_safePointBarrier->leaveSafePoint(this, locker); | 1338 s_safePointBarrier->leaveSafePoint(this, locker); |
| 1344 m_atSafePoint = false; | 1339 m_atSafePoint = false; |
| 1345 m_stackState = HeapPointersOnStack; | 1340 m_stackState = BlinkGC::HeapPointersOnStack; |
| 1346 clearSafePointScopeMarker(); | 1341 clearSafePointScopeMarker(); |
| 1347 preSweep(); | 1342 preSweep(); |
| 1348 } | 1343 } |
| 1349 | 1344 |
| 1350 void ThreadState::copyStackUntilSafePointScope() | 1345 void ThreadState::copyStackUntilSafePointScope() |
| 1351 { | 1346 { |
| 1352 if (!m_safePointScopeMarker || m_stackState == NoHeapPointersOnStack) | 1347 if (!m_safePointScopeMarker || m_stackState == BlinkGC::NoHeapPointersOnStac
k) |
| 1353 return; | 1348 return; |
| 1354 | 1349 |
| 1355 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker); | 1350 Address* to = reinterpret_cast<Address*>(m_safePointScopeMarker); |
| 1356 Address* from = reinterpret_cast<Address*>(m_endOfStack); | 1351 Address* from = reinterpret_cast<Address*>(m_endOfStack); |
| 1357 RELEASE_ASSERT(from < to); | 1352 RELEASE_ASSERT(from < to); |
| 1358 RELEASE_ASSERT(to <= reinterpret_cast<Address*>(m_startOfStack)); | 1353 RELEASE_ASSERT(to <= reinterpret_cast<Address*>(m_startOfStack)); |
| 1359 size_t slotCount = static_cast<size_t>(to - from); | 1354 size_t slotCount = static_cast<size_t>(to - from); |
| 1360 // Catch potential performance issues. | 1355 // Catch potential performance issues. |
| 1361 #if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 1356 #if defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
| 1362 // ASan/LSan use more space on the stack and we therefore | 1357 // ASan/LSan use more space on the stack and we therefore |
| 1363 // increase the allowed stack copying for those builds. | 1358 // increase the allowed stack copying for those builds. |
| 1364 ASSERT(slotCount < 2048); | 1359 ASSERT(slotCount < 2048); |
| 1365 #else | 1360 #else |
| 1366 ASSERT(slotCount < 1024); | 1361 ASSERT(slotCount < 1024); |
| 1367 #endif | 1362 #endif |
| 1368 | 1363 |
| 1369 ASSERT(!m_safePointStackCopy.size()); | 1364 ASSERT(!m_safePointStackCopy.size()); |
| 1370 m_safePointStackCopy.resize(slotCount); | 1365 m_safePointStackCopy.resize(slotCount); |
| 1371 for (size_t i = 0; i < slotCount; ++i) { | 1366 for (size_t i = 0; i < slotCount; ++i) { |
| 1372 m_safePointStackCopy[i] = from[i]; | 1367 m_safePointStackCopy[i] = from[i]; |
| 1373 } | 1368 } |
| 1374 } | 1369 } |
| 1375 | 1370 |
| 1376 void ThreadState::addInterruptor(PassOwnPtr<Interruptor> interruptor) | 1371 void ThreadState::addInterruptor(PassOwnPtr<Interruptor> interruptor) |
| 1377 { | 1372 { |
| 1378 ASSERT(checkThread()); | 1373 ASSERT(checkThread()); |
| 1379 SafePointScope scope(HeapPointersOnStack); | 1374 SafePointScope scope(BlinkGC::HeapPointersOnStack); |
| 1380 { | 1375 { |
| 1381 MutexLocker locker(threadAttachMutex()); | 1376 MutexLocker locker(threadAttachMutex()); |
| 1382 m_interruptors.append(interruptor); | 1377 m_interruptors.append(interruptor); |
| 1383 } | 1378 } |
| 1384 } | 1379 } |
| 1385 | 1380 |
| 1386 void ThreadState::removeInterruptor(Interruptor* interruptor) | 1381 void ThreadState::removeInterruptor(Interruptor* interruptor) |
| 1387 { | 1382 { |
| 1388 ASSERT(checkThread()); | 1383 ASSERT(checkThread()); |
| 1389 SafePointScope scope(HeapPointersOnStack); | 1384 SafePointScope scope(BlinkGC::HeapPointersOnStack); |
| 1390 { | 1385 { |
| 1391 MutexLocker locker(threadAttachMutex()); | 1386 MutexLocker locker(threadAttachMutex()); |
| 1392 size_t index = m_interruptors.find(interruptor); | 1387 size_t index = m_interruptors.find(interruptor); |
| 1393 RELEASE_ASSERT(index != kNotFound); | 1388 RELEASE_ASSERT(index != kNotFound); |
| 1394 m_interruptors.remove(index); | 1389 m_interruptors.remove(index); |
| 1395 } | 1390 } |
| 1396 } | 1391 } |
| 1397 | 1392 |
| 1398 void ThreadState::Interruptor::onInterrupted() | 1393 void ThreadState::Interruptor::onInterrupted() |
| 1399 { | 1394 { |
| 1400 ThreadState* state = ThreadState::current(); | 1395 ThreadState* state = ThreadState::current(); |
| 1401 ASSERT(state); | 1396 ASSERT(state); |
| 1402 ASSERT(!state->isAtSafePoint()); | 1397 ASSERT(!state->isAtSafePoint()); |
| 1403 state->safePoint(HeapPointersOnStack); | 1398 state->safePoint(BlinkGC::HeapPointersOnStack); |
| 1404 } | 1399 } |
| 1405 | 1400 |
| 1406 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() | 1401 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() |
| 1407 { | 1402 { |
| 1408 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); | 1403 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); |
| 1409 return threads; | 1404 return threads; |
| 1410 } | 1405 } |
| 1411 | 1406 |
| 1412 void ThreadState::lockThreadAttachMutex() | 1407 void ThreadState::lockThreadAttachMutex() |
| 1413 { | 1408 { |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1439 } | 1434 } |
| 1440 // FIXME: removeAll is inefficient. It can shrink repeatedly. | 1435 // FIXME: removeAll is inefficient. It can shrink repeatedly. |
| 1441 m_orderedPreFinalizers.removeAll(deadPreFinalizers); | 1436 m_orderedPreFinalizers.removeAll(deadPreFinalizers); |
| 1442 | 1437 |
| 1443 if (isMainThread()) | 1438 if (isMainThread()) |
| 1444 ScriptForbiddenScope::exit(); | 1439 ScriptForbiddenScope::exit(); |
| 1445 } | 1440 } |
| 1446 | 1441 |
| 1447 void ThreadState::clearHeapAges() | 1442 void ThreadState::clearHeapAges() |
| 1448 { | 1443 { |
| 1449 memset(m_heapAges, 0, sizeof(size_t) * NumberOfHeaps); | 1444 memset(m_heapAges, 0, sizeof(size_t) * BlinkGC::NumberOfHeaps); |
| 1450 memset(m_likelyToBePromptlyFreed.get(), 0, sizeof(int) * likelyToBePromptlyF
reedArraySize); | 1445 memset(m_likelyToBePromptlyFreed.get(), 0, sizeof(int) * likelyToBePromptlyF
reedArraySize); |
| 1451 m_currentHeapAges = 0; | 1446 m_currentHeapAges = 0; |
| 1452 } | 1447 } |
| 1453 | 1448 |
| 1454 int ThreadState::heapIndexOfVectorHeapLeastRecentlyExpanded(int beginHeapIndex,
int endHeapIndex) | 1449 int ThreadState::heapIndexOfVectorHeapLeastRecentlyExpanded(int beginHeapIndex,
int endHeapIndex) |
| 1455 { | 1450 { |
| 1456 size_t minHeapAge = m_heapAges[beginHeapIndex]; | 1451 size_t minHeapAge = m_heapAges[beginHeapIndex]; |
| 1457 int heapIndexWithMinHeapAge = beginHeapIndex; | 1452 int heapIndexWithMinHeapAge = beginHeapIndex; |
| 1458 for (int heapIndex = beginHeapIndex + 1; heapIndex <= endHeapIndex; heapInde
x++) { | 1453 for (int heapIndex = beginHeapIndex + 1; heapIndex <= endHeapIndex; heapInde
x++) { |
| 1459 if (m_heapAges[heapIndex] < minHeapAge) { | 1454 if (m_heapAges[heapIndex] < minHeapAge) { |
| 1460 minHeapAge = m_heapAges[heapIndex]; | 1455 minHeapAge = m_heapAges[heapIndex]; |
| 1461 heapIndexWithMinHeapAge = heapIndex; | 1456 heapIndexWithMinHeapAge = heapIndex; |
| 1462 } | 1457 } |
| 1463 } | 1458 } |
| 1464 ASSERT(isVectorHeapIndex(heapIndexWithMinHeapAge)); | 1459 ASSERT(isVectorHeapIndex(heapIndexWithMinHeapAge)); |
| 1465 return heapIndexWithMinHeapAge; | 1460 return heapIndexWithMinHeapAge; |
| 1466 } | 1461 } |
| 1467 | 1462 |
| 1468 BaseHeap* ThreadState::expandedVectorBackingHeap(size_t gcInfoIndex) | 1463 BaseHeap* ThreadState::expandedVectorBackingHeap(size_t gcInfoIndex) |
| 1469 { | 1464 { |
| 1470 ASSERT(checkThread()); | 1465 ASSERT(checkThread()); |
| 1471 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; | 1466 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; |
| 1472 --m_likelyToBePromptlyFreed[entryIndex]; | 1467 --m_likelyToBePromptlyFreed[entryIndex]; |
| 1473 int heapIndex = m_vectorBackingHeapIndex; | 1468 int heapIndex = m_vectorBackingHeapIndex; |
| 1474 m_heapAges[heapIndex] = ++m_currentHeapAges; | 1469 m_heapAges[heapIndex] = ++m_currentHeapAges; |
| 1475 m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(Vector
1HeapIndex, Vector4HeapIndex); | 1470 m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(BlinkG
C::Vector1HeapIndex, BlinkGC::Vector4HeapIndex); |
| 1476 return m_heaps[heapIndex]; | 1471 return m_heaps[heapIndex]; |
| 1477 } | 1472 } |
| 1478 | 1473 |
| 1479 void ThreadState::allocationPointAdjusted(int heapIndex) | 1474 void ThreadState::allocationPointAdjusted(int heapIndex) |
| 1480 { | 1475 { |
| 1481 m_heapAges[heapIndex] = ++m_currentHeapAges; | 1476 m_heapAges[heapIndex] = ++m_currentHeapAges; |
| 1482 if (m_vectorBackingHeapIndex == heapIndex) | 1477 if (m_vectorBackingHeapIndex == heapIndex) |
| 1483 m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(Ve
ctor1HeapIndex, Vector4HeapIndex); | 1478 m_vectorBackingHeapIndex = heapIndexOfVectorHeapLeastRecentlyExpanded(Bl
inkGC::Vector1HeapIndex, BlinkGC::Vector4HeapIndex); |
| 1484 } | 1479 } |
| 1485 | 1480 |
| 1486 void ThreadState::promptlyFreed(size_t gcInfoIndex) | 1481 void ThreadState::promptlyFreed(size_t gcInfoIndex) |
| 1487 { | 1482 { |
| 1488 ASSERT(checkThread()); | 1483 ASSERT(checkThread()); |
| 1489 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; | 1484 size_t entryIndex = gcInfoIndex & likelyToBePromptlyFreedArrayMask; |
| 1490 // See the comment in vectorBackingHeap() for why this is +3. | 1485 // See the comment in vectorBackingHeap() for why this is +3. |
| 1491 m_likelyToBePromptlyFreed[entryIndex] += 3; | 1486 m_likelyToBePromptlyFreed[entryIndex] += 3; |
| 1492 } | 1487 } |
| 1493 | 1488 |
| 1494 void ThreadState::takeSnapshot(SnapshotType type) | 1489 void ThreadState::takeSnapshot(SnapshotType type) |
| 1495 { | 1490 { |
| 1496 ASSERT(isInGC()); | 1491 ASSERT(isInGC()); |
| 1497 | 1492 |
| 1498 // 0 is used as index for freelist entries. Objects are indexed 1 to | 1493 // 0 is used as index for freelist entries. Objects are indexed 1 to |
| 1499 // gcInfoIndex. | 1494 // gcInfoIndex. |
| 1500 GCSnapshotInfo info(GCInfoTable::gcInfoIndex() + 1); | 1495 GCSnapshotInfo info(GCInfoTable::gcInfoIndex() + 1); |
| 1501 String threadDumpName = String::format("blink_gc/thread_%lu", static_cast<un
signed long>(m_thread)); | 1496 String threadDumpName = String::format("blink_gc/thread_%lu", static_cast<un
signed long>(m_thread)); |
| 1502 const String heapsDumpName = threadDumpName + "/heaps"; | 1497 const String heapsDumpName = threadDumpName + "/heaps"; |
| 1503 const String classesDumpName = threadDumpName + "/classes"; | 1498 const String classesDumpName = threadDumpName + "/classes"; |
| 1504 | 1499 |
| 1505 int numberOfHeapsReported = 0; | 1500 int numberOfHeapsReported = 0; |
| 1506 #define SNAPSHOT_HEAP(HeapType)
\ | 1501 #define SNAPSHOT_HEAP(HeapType)
\ |
| 1507 {
\ | 1502 {
\ |
| 1508 numberOfHeapsReported++;
\ | 1503 numberOfHeapsReported++;
\ |
| 1509 switch (type) {
\ | 1504 switch (type) {
\ |
| 1510 case SnapshotType::HeapSnapshot:
\ | 1505 case SnapshotType::HeapSnapshot:
\ |
| 1511 m_heaps[HeapType##HeapIndex]->takeSnapshot(heapsDumpName + "/" #Heap
Type, info); \ | 1506 m_heaps[BlinkGC::HeapType##HeapIndex]->takeSnapshot(heapsDumpName +
"/" #HeapType, info); \ |
| 1512 break;
\ | 1507 break;
\ |
| 1513 case SnapshotType::FreelistSnapshot:
\ | 1508 case SnapshotType::FreelistSnapshot:
\ |
| 1514 m_heaps[HeapType##HeapIndex]->takeFreelistSnapshot(heapsDumpName + "
/" #HeapType); \ | 1509 m_heaps[BlinkGC::HeapType##HeapIndex]->takeFreelistSnapshot(heapsDum
pName + "/" #HeapType); \ |
| 1515 break;
\ | 1510 break;
\ |
| 1516 default:
\ | 1511 default:
\ |
| 1517 ASSERT_NOT_REACHED();
\ | 1512 ASSERT_NOT_REACHED();
\ |
| 1518 }
\ | 1513 }
\ |
| 1519 } | 1514 } |
| 1520 | 1515 |
| 1521 SNAPSHOT_HEAP(NormalPage1); | 1516 SNAPSHOT_HEAP(NormalPage1); |
| 1522 SNAPSHOT_HEAP(NormalPage2); | 1517 SNAPSHOT_HEAP(NormalPage2); |
| 1523 SNAPSHOT_HEAP(NormalPage3); | 1518 SNAPSHOT_HEAP(NormalPage3); |
| 1524 SNAPSHOT_HEAP(NormalPage4); | 1519 SNAPSHOT_HEAP(NormalPage4); |
| 1525 SNAPSHOT_HEAP(EagerSweep); | 1520 SNAPSHOT_HEAP(EagerSweep); |
| 1526 SNAPSHOT_HEAP(Vector1); | 1521 SNAPSHOT_HEAP(Vector1); |
| 1527 SNAPSHOT_HEAP(Vector2); | 1522 SNAPSHOT_HEAP(Vector2); |
| 1528 SNAPSHOT_HEAP(Vector3); | 1523 SNAPSHOT_HEAP(Vector3); |
| 1529 SNAPSHOT_HEAP(Vector4); | 1524 SNAPSHOT_HEAP(Vector4); |
| 1530 SNAPSHOT_HEAP(InlineVector); | 1525 SNAPSHOT_HEAP(InlineVector); |
| 1531 SNAPSHOT_HEAP(HashTable); | 1526 SNAPSHOT_HEAP(HashTable); |
| 1532 SNAPSHOT_HEAP(LargeObject); | 1527 SNAPSHOT_HEAP(LargeObject); |
| 1533 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP); | 1528 FOR_EACH_TYPED_HEAP(SNAPSHOT_HEAP); |
| 1534 | 1529 |
| 1535 ASSERT(numberOfHeapsReported == NumberOfHeaps); | 1530 ASSERT(numberOfHeapsReported == BlinkGC::NumberOfHeaps); |
| 1536 | 1531 |
| 1537 #undef SNAPSHOT_HEAP | 1532 #undef SNAPSHOT_HEAP |
| 1538 | 1533 |
| 1539 if (type == SnapshotType::FreelistSnapshot) | 1534 if (type == SnapshotType::FreelistSnapshot) |
| 1540 return; | 1535 return; |
| 1541 | 1536 |
| 1542 size_t totalLiveCount = 0; | 1537 size_t totalLiveCount = 0; |
| 1543 size_t totalDeadCount = 0; | 1538 size_t totalDeadCount = 0; |
| 1544 size_t totalLiveSize = 0; | 1539 size_t totalLiveSize = 0; |
| 1545 size_t totalDeadSize = 0; | 1540 size_t totalDeadSize = 0; |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1606 } | 1601 } |
| 1607 | 1602 |
| 1608 void ThreadState::snapshotFreeList() | 1603 void ThreadState::snapshotFreeList() |
| 1609 { | 1604 { |
| 1610 RefPtr<TracedValue> json = TracedValue::create(); | 1605 RefPtr<TracedValue> json = TracedValue::create(); |
| 1611 | 1606 |
| 1612 #define SNAPSHOT_FREE_LIST(HeapType) \ | 1607 #define SNAPSHOT_FREE_LIST(HeapType) \ |
| 1613 { \ | 1608 { \ |
| 1614 json->beginDictionary(); \ | 1609 json->beginDictionary(); \ |
| 1615 json->setString("name", #HeapType); \ | 1610 json->setString("name", #HeapType); \ |
| 1616 m_heaps[HeapType##HeapIndex]->snapshotFreeList(*json); \ | 1611 m_heaps[BlinkGC::HeapType##HeapIndex]->snapshotFreeList(*json); \ |
| 1617 json->endDictionary(); \ | 1612 json->endDictionary(); \ |
| 1618 } | 1613 } |
| 1619 | 1614 |
| 1620 json->beginArray("heaps"); | 1615 json->beginArray("heaps"); |
| 1621 SNAPSHOT_FREE_LIST(EagerSweep); | 1616 SNAPSHOT_FREE_LIST(EagerSweep); |
| 1622 SNAPSHOT_FREE_LIST(NormalPage1); | 1617 SNAPSHOT_FREE_LIST(NormalPage1); |
| 1623 SNAPSHOT_FREE_LIST(NormalPage2); | 1618 SNAPSHOT_FREE_LIST(NormalPage2); |
| 1624 SNAPSHOT_FREE_LIST(NormalPage3); | 1619 SNAPSHOT_FREE_LIST(NormalPage3); |
| 1625 SNAPSHOT_FREE_LIST(NormalPage4); | 1620 SNAPSHOT_FREE_LIST(NormalPage4); |
| 1626 SNAPSHOT_FREE_LIST(Vector1); | 1621 SNAPSHOT_FREE_LIST(Vector1); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1637 | 1632 |
| 1638 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "
FreeList", this, json.release()); | 1633 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "
FreeList", this, json.release()); |
| 1639 } | 1634 } |
| 1640 | 1635 |
| 1641 void ThreadState::collectAndReportMarkSweepStats() const | 1636 void ThreadState::collectAndReportMarkSweepStats() const |
| 1642 { | 1637 { |
| 1643 if (!isMainThread()) | 1638 if (!isMainThread()) |
| 1644 return; | 1639 return; |
| 1645 | 1640 |
| 1646 ClassAgeCountsMap markingClassAgeCounts; | 1641 ClassAgeCountsMap markingClassAgeCounts; |
| 1647 for (int i = 0; i < NumberOfHeaps; ++i) | 1642 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 1648 m_heaps[i]->countMarkedObjects(markingClassAgeCounts); | 1643 m_heaps[i]->countMarkedObjects(markingClassAgeCounts); |
| 1649 reportMarkSweepStats("MarkingStats", markingClassAgeCounts); | 1644 reportMarkSweepStats("MarkingStats", markingClassAgeCounts); |
| 1650 | 1645 |
| 1651 ClassAgeCountsMap sweepingClassAgeCounts; | 1646 ClassAgeCountsMap sweepingClassAgeCounts; |
| 1652 for (int i = 0; i < NumberOfHeaps; ++i) | 1647 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 1653 m_heaps[i]->countObjectsToSweep(sweepingClassAgeCounts); | 1648 m_heaps[i]->countObjectsToSweep(sweepingClassAgeCounts); |
| 1654 reportMarkSweepStats("SweepingStats", sweepingClassAgeCounts); | 1649 reportMarkSweepStats("SweepingStats", sweepingClassAgeCounts); |
| 1655 } | 1650 } |
| 1656 | 1651 |
| 1657 void ThreadState::reportMarkSweepStats(const char* statsName, const ClassAgeCoun
tsMap& classAgeCounts) const | 1652 void ThreadState::reportMarkSweepStats(const char* statsName, const ClassAgeCoun
tsMap& classAgeCounts) const |
| 1658 { | 1653 { |
| 1659 RefPtr<TracedValue> json = TracedValue::create(); | 1654 RefPtr<TracedValue> json = TracedValue::create(); |
| 1660 for (ClassAgeCountsMap::const_iterator it = classAgeCounts.begin(), end = cl
assAgeCounts.end(); it != end; ++it) { | 1655 for (ClassAgeCountsMap::const_iterator it = classAgeCounts.begin(), end = cl
assAgeCounts.end(); it != end; ++it) { |
| 1661 json->beginArray(it->key.ascii().data()); | 1656 json->beginArray(it->key.ascii().data()); |
| 1662 for (size_t age = 0; age <= maxHeapObjectAge; ++age) | 1657 for (size_t age = 0; age <= maxHeapObjectAge; ++age) |
| 1663 json->pushInteger(it->value.ages[age]); | 1658 json->pushInteger(it->value.ages[age]); |
| 1664 json->endArray(); | 1659 json->endArray(); |
| 1665 } | 1660 } |
| 1666 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); | 1661 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(TRACE_DISABLED_BY_DEFAULT("blink_gc"), s
tatsName, this, json.release()); |
| 1667 } | 1662 } |
| 1668 #endif | 1663 #endif |
| 1669 | 1664 |
| 1670 } // namespace blink | 1665 } // namespace blink |
| OLD | NEW |