Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 30 | 30 |
| 31 #include "platform/heap/ThreadState.h" | 31 #include "platform/heap/ThreadState.h" |
| 32 | 32 |
| 33 #include "platform/ScriptForbiddenScope.h" | 33 #include "platform/ScriptForbiddenScope.h" |
| 34 #include "platform/TraceEvent.h" | 34 #include "platform/TraceEvent.h" |
| 35 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 35 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| 36 #include "platform/heap/CallbackStack.h" | 36 #include "platform/heap/CallbackStack.h" |
| 37 #include "platform/heap/Handle.h" | 37 #include "platform/heap/Handle.h" |
| 38 #include "platform/heap/Heap.h" | 38 #include "platform/heap/Heap.h" |
| 39 #include "platform/heap/MarkingVisitor.h" | 39 #include "platform/heap/MarkingVisitor.h" |
| 40 #include "platform/heap/PageMemory.h" | |
| 41 #include "platform/heap/PagePool.h" | |
| 40 #include "platform/heap/SafePoint.h" | 42 #include "platform/heap/SafePoint.h" |
| 41 #include "public/platform/Platform.h" | 43 #include "public/platform/Platform.h" |
| 42 #include "public/platform/WebMemoryAllocatorDump.h" | 44 #include "public/platform/WebMemoryAllocatorDump.h" |
| 43 #include "public/platform/WebProcessMemoryDump.h" | 45 #include "public/platform/WebProcessMemoryDump.h" |
| 44 #include "public/platform/WebScheduler.h" | 46 #include "public/platform/WebScheduler.h" |
| 45 #include "public/platform/WebThread.h" | 47 #include "public/platform/WebThread.h" |
| 46 #include "public/platform/WebTraceLocation.h" | 48 #include "public/platform/WebTraceLocation.h" |
| 47 #include "wtf/DataLog.h" | 49 #include "wtf/DataLog.h" |
| 48 #include "wtf/Partitions.h" | 50 #include "wtf/Partitions.h" |
| 49 #include "wtf/ThreadingPrimitives.h" | 51 #include "wtf/ThreadingPrimitives.h" |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 71 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; | 73 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; |
| 72 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 74 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
| 73 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; | 75 SafePointBarrier* ThreadState::s_safePointBarrier = nullptr; |
| 74 | 76 |
| 75 RecursiveMutex& ThreadState::threadAttachMutex() | 77 RecursiveMutex& ThreadState::threadAttachMutex() |
| 76 { | 78 { |
| 77 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ; | 79 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ; |
| 78 return mutex; | 80 return mutex; |
| 79 } | 81 } |
| 80 | 82 |
| 81 ThreadState::ThreadState() | 83 ThreadState::ThreadState(PerThreadHeapState perThreadHeap) |
| 82 : m_thread(currentThread()) | 84 : m_thread(currentThread()) |
| 83 , m_persistentRegion(adoptPtr(new PersistentRegion())) | 85 , m_persistentRegion(adoptPtr(new PersistentRegion())) |
| 86 , m_xThreadPersistentRegion(adoptPtr(new XThreadPersistentRegion())) | |
| 84 #if OS(WIN) && COMPILER(MSVC) | 87 #if OS(WIN) && COMPILER(MSVC) |
| 85 , m_threadStackSize(0) | 88 , m_threadStackSize(0) |
| 86 #endif | 89 #endif |
| 87 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( ))) | 90 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart( ))) |
| 88 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) ) | 91 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart()) ) |
| 89 , m_safePointScopeMarker(nullptr) | 92 , m_safePointScopeMarker(nullptr) |
| 90 , m_atSafePoint(false) | 93 , m_atSafePoint(false) |
| 91 , m_interruptors() | 94 , m_interruptors() |
| 92 , m_sweepForbidden(false) | 95 , m_sweepForbidden(false) |
| 93 , m_noAllocationCount(0) | 96 , m_noAllocationCount(0) |
| 94 , m_gcForbiddenCount(0) | 97 , m_gcForbiddenCount(0) |
| 95 , m_accumulatedSweepingTime(0) | 98 , m_accumulatedSweepingTime(0) |
| 96 , m_vectorBackingHeapIndex(BlinkGC::Vector1HeapIndex) | 99 , m_vectorBackingHeapIndex(BlinkGC::Vector1HeapIndex) |
| 97 , m_currentHeapAges(0) | 100 , m_currentHeapAges(0) |
| 98 , m_isTerminating(false) | 101 , m_isTerminating(false) |
| 99 , m_gcMixinMarker(nullptr) | 102 , m_gcMixinMarker(nullptr) |
| 100 , m_shouldFlushHeapDoesNotContainCache(false) | 103 , m_shouldFlushHeapDoesNotContainCache(false) |
| 101 , m_gcState(NoGCScheduled) | 104 , m_gcState(NoGCScheduled) |
| 102 , m_traceDOMWrappers(nullptr) | 105 , m_traceDOMWrappers(nullptr) |
| 103 #if defined(ADDRESS_SANITIZER) | 106 #if defined(ADDRESS_SANITIZER) |
| 104 , m_asanFakeStack(__asan_get_current_fake_stack()) | 107 , m_asanFakeStack(__asan_get_current_fake_stack()) |
| 105 #endif | 108 #endif |
| 106 #if defined(LEAK_SANITIZER) | 109 #if defined(LEAK_SANITIZER) |
| 107 , m_disabledStaticPersistentsRegistration(0) | 110 , m_disabledStaticPersistentsRegistration(0) |
| 108 #endif | 111 #endif |
| 112 , m_markingStack(adoptPtr(new CallbackStack())) | |
| 113 , m_postMarkingCallbackStack(adoptPtr(new CallbackStack())) | |
| 114 , m_globalWeakCallbackStack(adoptPtr(new CallbackStack())) | |
| 115 , m_ephemeronStack(adoptPtr(new CallbackStack())) | |
| 116 , m_heapDoesNotContainCache(adoptPtr(new HeapDoesNotContainCache())) | |
| 117 , m_regionTree(nullptr) | |
| 118 , m_perThreadHeapEnabled(perThreadHeap == PerThreadHeapEnabled) | |
| 109 { | 119 { |
| 110 ASSERT(checkThread()); | 120 ASSERT(checkThread()); |
| 111 ASSERT(!**s_threadSpecific); | 121 ASSERT(!**s_threadSpecific); |
| 112 **s_threadSpecific = this; | 122 **s_threadSpecific = this; |
| 113 | 123 |
| 114 if (isMainThread()) { | 124 if (isMainThread()) { |
| 115 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*); | 125 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*); |
| 116 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size(); | 126 size_t underestimatedStackSize = StackFrameDepth::getUnderestimatedStack Size(); |
| 117 if (underestimatedStackSize > sizeof(void*)) | 127 if (underestimatedStackSize > sizeof(void*)) |
| 118 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*); | 128 s_mainThreadUnderestimatedStackSize = underestimatedStackSize - size of(void*); |
| 129 m_heapStats = new GCHeapStats(); | |
| 130 } else if (perThreadHeapEnabled()) { | |
| 131 m_heapStats = new GCHeapStats(); | |
| 132 } else { | |
| 133 m_heapStats = ThreadState::mainThreadState()->heapStats(); | |
| 119 } | 134 } |
| 135 ASSERT(m_heapStats); | |
| 120 | 136 |
| 121 for (int heapIndex = 0; heapIndex < BlinkGC::LargeObjectHeapIndex; heapIndex ++) | 137 for (int heapIndex = 0; heapIndex < BlinkGC::LargeObjectHeapIndex; heapIndex ++) |
| 122 m_heaps[heapIndex] = new NormalPageHeap(this, heapIndex); | 138 m_heaps[heapIndex] = new NormalPageHeap(this, heapIndex); |
| 123 m_heaps[BlinkGC::LargeObjectHeapIndex] = new LargeObjectHeap(this, BlinkGC:: LargeObjectHeapIndex); | 139 m_heaps[BlinkGC::LargeObjectHeapIndex] = new LargeObjectHeap(this, BlinkGC:: LargeObjectHeapIndex); |
| 124 | 140 |
| 125 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr aySize]); | 141 m_likelyToBePromptlyFreed = adoptArrayPtr(new int[likelyToBePromptlyFreedArr aySize]); |
| 126 clearHeapAges(); | 142 clearHeapAges(); |
| 127 | 143 |
| 128 m_threadLocalWeakCallbackStack = new CallbackStack(); | 144 m_threadLocalWeakCallbackStack = new CallbackStack(); |
| 129 } | 145 } |
| 130 | 146 |
| 131 ThreadState::~ThreadState() | 147 ThreadState::~ThreadState() |
| 132 { | 148 { |
| 133 ASSERT(checkThread()); | 149 ASSERT(checkThread()); |
| 134 delete m_threadLocalWeakCallbackStack; | 150 delete m_threadLocalWeakCallbackStack; |
| 135 m_threadLocalWeakCallbackStack = nullptr; | 151 m_threadLocalWeakCallbackStack = nullptr; |
| 136 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) | 152 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 137 delete m_heaps[i]; | 153 delete m_heaps[i]; |
| 138 | 154 |
| 139 **s_threadSpecific = nullptr; | 155 **s_threadSpecific = nullptr; |
| 140 if (isMainThread()) { | 156 if (isMainThread()) { |
| 157 delete m_heapStats; | |
| 158 m_heapStats = nullptr; | |
| 141 s_mainThreadStackStart = 0; | 159 s_mainThreadStackStart = 0; |
| 142 s_mainThreadUnderestimatedStackSize = 0; | 160 s_mainThreadUnderestimatedStackSize = 0; |
| 143 } | 161 } |
| 162 if (perThreadHeapEnabled()) { | |
| 163 delete m_heapStats; | |
| 164 m_heapStats = nullptr; | |
| 165 } | |
| 166 delete m_regionTree; | |
| 167 m_regionTree = nullptr; | |
| 144 } | 168 } |
| 145 | 169 |
| 146 void ThreadState::init() | 170 void ThreadState::init() |
| 147 { | 171 { |
| 148 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); | 172 s_threadSpecific = new WTF::ThreadSpecific<ThreadState*>(); |
| 149 s_safePointBarrier = new SafePointBarrier; | 173 s_safePointBarrier = new SafePointBarrier; |
| 150 } | 174 } |
| 151 | 175 |
| 152 void ThreadState::shutdown() | 176 void ThreadState::shutdown() |
| 153 { | 177 { |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 189 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000); | 213 RELEASE_ASSERT(m_threadStackSize > 4 * 0x1000); |
| 190 m_threadStackSize -= 4 * 0x1000; | 214 m_threadStackSize -= 4 * 0x1000; |
| 191 return m_threadStackSize; | 215 return m_threadStackSize; |
| 192 } | 216 } |
| 193 #endif | 217 #endif |
| 194 | 218 |
| 195 void ThreadState::attachMainThread() | 219 void ThreadState::attachMainThread() |
| 196 { | 220 { |
| 197 RELEASE_ASSERT(!Heap::s_shutdownCalled); | 221 RELEASE_ASSERT(!Heap::s_shutdownCalled); |
| 198 MutexLocker locker(threadAttachMutex()); | 222 MutexLocker locker(threadAttachMutex()); |
| 199 ThreadState* state = new(s_mainThreadStateStorage) ThreadState(); | 223 ThreadState* state = new(s_mainThreadStateStorage) ThreadState(PerThreadHeap Disabled); |
| 200 attachedThreads().add(state); | 224 attachedThreads().add(state); |
| 201 } | 225 } |
| 202 | 226 |
| 203 void ThreadState::detachMainThread() | 227 void ThreadState::detachMainThread() |
| 204 { | 228 { |
| 205 // Enter a safe point before trying to acquire threadAttachMutex | 229 // Enter a safe point before trying to acquire threadAttachMutex |
| 206 // to avoid dead lock if another thread is preparing for GC, has acquired | 230 // to avoid dead lock if another thread is preparing for GC, has acquired |
| 207 // threadAttachMutex and waiting for other threads to pause or reach a | 231 // threadAttachMutex and waiting for other threads to pause or reach a |
| 208 // safepoint. | 232 // safepoint. |
| 209 ThreadState* state = mainThreadState(); | 233 ThreadState* state = mainThreadState(); |
| 210 | |
| 211 // 1. Finish sweeping. | 234 // 1. Finish sweeping. |
| 212 state->completeSweep(); | 235 state->completeSweep(); |
| 213 { | 236 { |
| 214 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi ntersOnStack); | 237 SafePointAwareMutexLocker locker(threadAttachMutex(), BlinkGC::NoHeapPoi ntersOnStack); |
| 215 | 238 |
| 216 // 2. Add the main thread's heap pages to the orphaned pool. | 239 // 2. Add the main thread's heap pages to the orphaned pool. |
| 217 state->cleanupPages(); | 240 state->cleanupPages(); |
| 218 | 241 |
| 219 // 3. Detach the main thread. | 242 // 3. Detach the main thread. |
| 220 ASSERT(attachedThreads().contains(state)); | 243 ASSERT(attachedThreads().contains(state)); |
| 221 attachedThreads().remove(state); | 244 attachedThreads().remove(state); |
| 222 state->~ThreadState(); | 245 state->~ThreadState(); |
| 223 } | 246 } |
| 224 shutdownHeapIfNecessary(); | 247 shutdownHeapIfNecessary(); |
| 225 } | 248 } |
| 226 | 249 |
| 227 void ThreadState::shutdownHeapIfNecessary() | 250 void ThreadState::shutdownHeapIfNecessary() |
| 228 { | 251 { |
| 229 // We don't need to enter a safe point before acquiring threadAttachMutex | 252 // We don't need to enter a safe point before acquiring threadAttachMutex |
| 230 // because this thread is already detached. | 253 // because this thread is already detached. |
| 231 | 254 ASSERT(!ThreadState::current()); |
| 232 MutexLocker locker(threadAttachMutex()); | 255 MutexLocker locker(threadAttachMutex()); |
| 233 // We start shutting down the heap if there is no running thread | 256 // We start shutting down the heap if there is no running thread |
| 234 // and Heap::shutdown() is already called. | 257 // and Heap::shutdown() is already called. |
| 235 if (!attachedThreads().size() && Heap::s_shutdownCalled) | 258 if (!attachedThreads().size() && Heap::s_shutdownCalled) |
| 236 Heap::doShutdown(); | 259 Heap::doShutdown(); |
| 237 } | 260 } |
| 238 | 261 |
| 239 void ThreadState::attach() | 262 void ThreadState::attach(PerThreadHeapState perThreadHeap) |
| 240 { | 263 { |
| 241 RELEASE_ASSERT(!Heap::s_shutdownCalled); | 264 RELEASE_ASSERT(!Heap::s_shutdownCalled); |
| 242 MutexLocker locker(threadAttachMutex()); | 265 MutexLocker locker(threadAttachMutex()); |
| 243 ThreadState* state = new ThreadState(); | 266 ThreadState* state = new ThreadState(perThreadHeap); |
| 244 attachedThreads().add(state); | 267 if (perThreadHeap == PerThreadHeapDisabled) |
| 268 attachedThreads().add(state); | |
| 245 } | 269 } |
| 246 | 270 |
| 247 void ThreadState::cleanupPages() | 271 void ThreadState::cleanupPages() |
| 248 { | 272 { |
| 249 ASSERT(checkThread()); | 273 ASSERT(checkThread()); |
| 250 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) | 274 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 251 m_heaps[i]->cleanupPages(); | 275 m_heaps[i]->cleanupPages(); |
| 252 } | 276 } |
| 253 | 277 |
| 254 void ThreadState::cleanup() | 278 void ThreadState::cleanup() |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 291 // if we have it is probably a bug so adding a debug ASSERT to catch thi s. | 315 // if we have it is probably a bug so adding a debug ASSERT to catch thi s. |
| 292 ASSERT(!currentCount); | 316 ASSERT(!currentCount); |
| 293 // All of pre-finalizers should be consumed. | 317 // All of pre-finalizers should be consumed. |
| 294 ASSERT(m_orderedPreFinalizers.isEmpty()); | 318 ASSERT(m_orderedPreFinalizers.isEmpty()); |
| 295 RELEASE_ASSERT(gcState() == NoGCScheduled); | 319 RELEASE_ASSERT(gcState() == NoGCScheduled); |
| 296 | 320 |
| 297 // Add pages to the orphaned page pool to ensure any global GCs from thi s point | 321 // Add pages to the orphaned page pool to ensure any global GCs from thi s point |
| 298 // on will not trace objects on this thread's heaps. | 322 // on will not trace objects on this thread's heaps. |
| 299 cleanupPages(); | 323 cleanupPages(); |
| 300 | 324 |
| 301 ASSERT(attachedThreads().contains(this)); | 325 if (!perThreadHeapEnabled()) { |
| 302 attachedThreads().remove(this); | 326 ASSERT(attachedThreads().contains(this)); |
| 327 attachedThreads().remove(this); | |
| 328 } | |
| 303 } | 329 } |
| 304 } | 330 } |
| 305 | 331 |
| 306 void ThreadState::detach() | 332 void ThreadState::detach() |
| 307 { | 333 { |
| 308 ThreadState* state = current(); | 334 ThreadState* state = current(); |
| 309 state->cleanup(); | 335 state->cleanup(); |
| 310 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); | 336 RELEASE_ASSERT(state->gcState() == ThreadState::NoGCScheduled); |
| 311 delete state; | 337 delete state; |
| 312 shutdownHeapIfNecessary(); | 338 shutdownHeapIfNecessary(); |
| 313 } | 339 } |
| 314 | 340 |
| 315 void ThreadState::visitPersistentRoots(Visitor* visitor) | 341 void ThreadState::visitPersistentRoots(Visitor* visitor) |
| 316 { | 342 { |
| 317 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots"); | 343 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots"); |
| 318 crossThreadPersistentRegion().tracePersistentNodes(visitor); | 344 crossThreadPersistentRegion().tracePersistentNodes(visitor); |
| 319 | 345 |
| 320 for (ThreadState* state : attachedThreads()) | 346 for (ThreadState* state : attachedThreads()) { |
| 321 state->visitPersistents(visitor); | 347 state->visitPersistents(visitor); |
| 348 } | |
| 322 } | 349 } |
| 323 | 350 |
| 324 void ThreadState::visitStackRoots(Visitor* visitor) | 351 void ThreadState::visitStackRoots(Visitor* visitor) |
| 325 { | 352 { |
| 326 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots"); | 353 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots"); |
| 327 for (ThreadState* state : attachedThreads()) | 354 for (ThreadState* state : attachedThreads()) { |
| 328 state->visitStack(visitor); | 355 state->visitStack(visitor); |
| 356 } | |
| 329 } | 357 } |
| 330 | 358 |
| 331 NO_SANITIZE_ADDRESS | 359 NO_SANITIZE_ADDRESS |
| 332 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) | 360 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) |
| 333 { | 361 { |
| 334 #if defined(ADDRESS_SANITIZER) | 362 #if defined(ADDRESS_SANITIZER) |
| 335 Address* start = reinterpret_cast<Address*>(m_startOfStack); | 363 Address* start = reinterpret_cast<Address*>(m_startOfStack); |
| 336 Address* end = reinterpret_cast<Address*>(m_endOfStack); | 364 Address* end = reinterpret_cast<Address*>(m_endOfStack); |
| 337 Address* fakeFrameStart = nullptr; | 365 Address* fakeFrameStart = nullptr; |
| 338 Address* fakeFrameEnd = nullptr; | 366 Address* fakeFrameEnd = nullptr; |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 396 __msan_unpoison(&ptr, sizeof(ptr)); | 424 __msan_unpoison(&ptr, sizeof(ptr)); |
| 397 #endif | 425 #endif |
| 398 Heap::checkAndMarkPointer(visitor, ptr); | 426 Heap::checkAndMarkPointer(visitor, ptr); |
| 399 visitAsanFakeStackForPointer(visitor, ptr); | 427 visitAsanFakeStackForPointer(visitor, ptr); |
| 400 } | 428 } |
| 401 } | 429 } |
| 402 | 430 |
| 403 void ThreadState::visitPersistents(Visitor* visitor) | 431 void ThreadState::visitPersistents(Visitor* visitor) |
| 404 { | 432 { |
| 405 m_persistentRegion->tracePersistentNodes(visitor); | 433 m_persistentRegion->tracePersistentNodes(visitor); |
| 434 m_xThreadPersistentRegion->tracePersistentNodes(visitor); | |
| 406 if (m_traceDOMWrappers) { | 435 if (m_traceDOMWrappers) { |
| 407 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); | 436 TRACE_EVENT0("blink_gc", "V8GCController::traceDOMWrappers"); |
| 408 m_traceDOMWrappers(m_isolate, visitor); | 437 m_traceDOMWrappers(m_isolate, visitor); |
| 409 } | 438 } |
| 410 } | 439 } |
| 411 | 440 |
| 412 ThreadState::GCSnapshotInfo::GCSnapshotInfo(size_t numObjectTypes) | 441 ThreadState::GCSnapshotInfo::GCSnapshotInfo(size_t numObjectTypes) |
| 413 : liveCount(Vector<int>(numObjectTypes)) | 442 : liveCount(Vector<int>(numObjectTypes)) |
| 414 , deadCount(Vector<int>(numObjectTypes)) | 443 , deadCount(Vector<int>(numObjectTypes)) |
| 415 , liveSize(Vector<size_t>(numObjectTypes)) | 444 , liveSize(Vector<size_t>(numObjectTypes)) |
| (...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 492 } | 521 } |
| 493 | 522 |
| 494 CrossThreadPersistentRegion& ThreadState::crossThreadPersistentRegion() | 523 CrossThreadPersistentRegion& ThreadState::crossThreadPersistentRegion() |
| 495 { | 524 { |
| 496 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); | 525 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); |
| 497 return persistentRegion; | 526 return persistentRegion; |
| 498 } | 527 } |
| 499 | 528 |
| 500 size_t ThreadState::totalMemorySize() | 529 size_t ThreadState::totalMemorySize() |
| 501 { | 530 { |
| 502 return Heap::allocatedObjectSize() + Heap::markedObjectSize() + WTF::Partiti ons::totalSizeOfCommittedPages(); | 531 return allocatedObjectSize() + markedObjectSize() + WTF::Partitions::totalSi zeOfCommittedPages(); |
| 503 } | 532 } |
| 504 | 533 |
| 505 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC) | 534 size_t ThreadState::estimatedLiveSize(size_t estimationBaseSize, size_t sizeAtLa stGC) |
| 506 { | 535 { |
| 507 if (Heap::wrapperCountAtLastGC() == 0) { | 536 if (wrapperCountAtLastGC() == 0) { |
| 508 // We'll reach here only before hitting the first GC. | 537 // We'll reach here only before hitting the first GC. |
| 509 return 0; | 538 return 0; |
| 510 } | 539 } |
| 511 | 540 |
| 512 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC); | 541 // (estimated size) = (estimation base size) - (heap size at the last GC) / (# of persistent handles at the last GC) * (# of persistent handles collected si nce the last GC); |
| 513 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / Heap::wrapperCountAtLastGC() * Heap::collectedWrapperCount()); | 542 size_t sizeRetainedByCollectedPersistents = static_cast<size_t>(1.0 * sizeAt LastGC / wrapperCountAtLastGC() * collectedWrapperCount()); |
| 514 if (estimationBaseSize < sizeRetainedByCollectedPersistents) | 543 if (estimationBaseSize < sizeRetainedByCollectedPersistents) |
| 515 return 0; | 544 return 0; |
| 516 return estimationBaseSize - sizeRetainedByCollectedPersistents; | 545 return estimationBaseSize - sizeRetainedByCollectedPersistents; |
| 517 } | 546 } |
| 518 | 547 |
| 519 double ThreadState::heapGrowingRate() | 548 double ThreadState::heapGrowingRate() |
| 520 { | 549 { |
| 521 size_t currentSize = Heap::allocatedObjectSize() + Heap::markedObjectSize(); | 550 size_t currentSize = allocatedObjectSize() + markedObjectSize(); |
| 522 size_t estimatedSize = estimatedLiveSize(Heap::markedObjectSizeAtLastComplet eSweep(), Heap::markedObjectSizeAtLastCompleteSweep()); | 551 size_t estimatedSize = estimatedLiveSize(markedObjectSizeAtLastCompleteSweep (), markedObjectSizeAtLastCompleteSweep()); |
| 523 | 552 |
| 524 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. | 553 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. |
| 525 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; | 554 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; |
| 526 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); | 555 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapEsti matedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_MAX))); |
| 527 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate)); | 556 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::heapGrow ingRate", static_cast<int>(100 * growingRate)); |
| 528 return growingRate; | 557 return growingRate; |
| 529 } | 558 } |
| 530 | 559 |
| 531 double ThreadState::partitionAllocGrowingRate() | 560 double ThreadState::partitionAllocGrowingRate() |
| 532 { | 561 { |
| 533 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages(); | 562 size_t currentSize = WTF::Partitions::totalSizeOfCommittedPages(); |
| 534 size_t estimatedSize = estimatedLiveSize(currentSize, Heap::partitionAllocSi zeAtLastGC()); | 563 size_t estimatedSize = estimatedLiveSize(currentSize, partitionAllocSizeAtLa stGC()); |
| 535 | 564 |
| 536 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. | 565 // If the estimatedSize is 0, we set a high growing rate to trigger a GC. |
| 537 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; | 566 double growingRate = estimatedSize > 0 ? 1.0 * currentSize / estimatedSize : 100; |
| 538 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX))); | 567 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocEstimatedSizeKB", std::min(estimatedSize / 1024, static_cast<size_t>(INT_M AX))); |
| 539 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate)); | 568 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::partitio nAllocGrowingRate", static_cast<int>(100 * growingRate)); |
| 540 return growingRate; | 569 return growingRate; |
| 541 } | 570 } |
| 542 | 571 |
| 543 // TODO(haraken): We should improve the GC heuristics. The heuristics affect | 572 // TODO(haraken): We should improve the GC heuristics. The heuristics affect |
| 544 // performance significantly. | 573 // performance significantly. |
| 545 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold) | 574 bool ThreadState::judgeGCThreshold(size_t totalMemorySizeThreshold, double heapG rowingRateThreshold) |
| 546 { | 575 { |
| 547 // If the allocated object size or the total memory size is small, don't tri gger a GC. | 576 // If the allocated object size or the total memory size is small, don't tri gger a GC. |
| 548 if (Heap::allocatedObjectSize() < 100 * 1024 || totalMemorySize() < totalMem orySizeThreshold) | 577 if (allocatedObjectSize() < 100 * 1024 || totalMemorySize() < totalMemorySiz eThreshold) |
| 549 return false; | 578 return false; |
| 550 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough, | 579 // If the growing rate of Oilpan's heap or PartitionAlloc is high enough, |
| 551 // trigger a GC. | 580 // trigger a GC. |
| 552 #if PRINT_HEAP_STATS | 581 #if PRINT_HEAP_STATS |
| 553 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate()); | 582 dataLogF("heapGrowingRate=%.1lf, partitionAllocGrowingRate=%.1lf\n", heapGro wingRate(), partitionAllocGrowingRate()); |
| 554 #endif | 583 #endif |
| 555 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold; | 584 return heapGrowingRate() >= heapGrowingRateThreshold || partitionAllocGrowin gRate() >= heapGrowingRateThreshold; |
| 556 } | 585 } |
| 557 | 586 |
| 558 bool ThreadState::shouldScheduleIdleGC() | 587 bool ThreadState::shouldScheduleIdleGC() |
| (...skipping 24 matching lines...) Expand all Loading... | |
| 583 bool ThreadState::shouldForceMemoryPressureGC() | 612 bool ThreadState::shouldForceMemoryPressureGC() |
| 584 { | 613 { |
| 585 if (totalMemorySize() < 300 * 1024 * 1024) | 614 if (totalMemorySize() < 300 * 1024 * 1024) |
| 586 return false; | 615 return false; |
| 587 return judgeGCThreshold(0, 1.5); | 616 return judgeGCThreshold(0, 1.5); |
| 588 } | 617 } |
| 589 | 618 |
| 590 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) | 619 void ThreadState::scheduleV8FollowupGCIfNeeded(BlinkGC::V8GCType gcType) |
| 591 { | 620 { |
| 592 ASSERT(checkThread()); | 621 ASSERT(checkThread()); |
| 593 Heap::reportMemoryUsageForTracing(); | 622 reportMemoryUsageForTracing(); |
| 594 | 623 |
| 595 #if PRINT_HEAP_STATS | 624 #if PRINT_HEAP_STATS |
| 596 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", gcType = = BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); | 625 dataLogF("ThreadState::scheduleV8FollowupGCIfNeeded (gcType=%s)\n", gcType = = BlinkGC::V8MajorGC ? "MajorGC" : "MinorGC"); |
| 597 #endif | 626 #endif |
| 598 | 627 |
| 599 if (isGCForbidden()) | 628 if (isGCForbidden()) |
| 600 return; | 629 return; |
| 601 | 630 |
| 602 // This completeSweep() will do nothing in common cases since we've | 631 // This completeSweep() will do nothing in common cases since we've |
| 603 // called completeSweep() before V8 starts minor/major GCs. | 632 // called completeSweep() before V8 starts minor/major GCs. |
| 604 completeSweep(); | 633 completeSweep(); |
| 605 ASSERT(!isSweepingInProgress()); | 634 ASSERT(!isSweepingInProgress()); |
| 606 ASSERT(!sweepForbidden()); | 635 ASSERT(!sweepForbidden()); |
| 607 | 636 |
| 608 // TODO(haraken): Consider if we should trigger a memory pressure GC | 637 // TODO(haraken): Consider if we should trigger a memory pressure GC |
| 609 // for V8 minor GCs as well. | 638 // for V8 minor GCs as well. |
| 610 if (gcType == BlinkGC::V8MajorGC && shouldForceMemoryPressureGC()) { | 639 if (gcType == BlinkGC::V8MajorGC && shouldForceMemoryPressureGC()) { |
| 611 #if PRINT_HEAP_STATS | 640 #if PRINT_HEAP_STATS |
| 612 dataLogF("Scheduled MemoryPressureGC\n"); | 641 dataLogF("Scheduled MemoryPressureGC\n"); |
| 613 #endif | 642 #endif |
| 614 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSwe ep, BlinkGC::MemoryPressureGC); | 643 if (perThreadHeapEnabled()) |
| 644 Heap::collectGarbageForIsolatedThread(this); | |
|
haraken
2016/01/07 08:06:22
It wouldn't be a good idea to introduce Heap::coll
| |
| 645 else | |
| 646 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithou tSweep, BlinkGC::MemoryPressureGC); | |
| 615 return; | 647 return; |
| 616 } | 648 } |
| 617 if (shouldScheduleV8FollowupGC()) { | 649 if (shouldScheduleV8FollowupGC()) { |
| 618 #if PRINT_HEAP_STATS | 650 #if PRINT_HEAP_STATS |
| 619 dataLogF("Scheduled PreciseGC\n"); | 651 dataLogF("Scheduled PreciseGC\n"); |
| 620 #endif | 652 #endif |
| 621 schedulePreciseGC(); | 653 schedulePreciseGC(); |
| 622 return; | 654 return; |
| 623 } | 655 } |
| 624 if (gcType == BlinkGC::V8MajorGC) { | 656 if (gcType == BlinkGC::V8MajorGC) { |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 642 completeSweep(); | 674 completeSweep(); |
| 643 | 675 |
| 644 // The fact that the PageNavigation GC is scheduled means that there is | 676 // The fact that the PageNavigation GC is scheduled means that there is |
| 645 // a dead frame. In common cases, a sequence of Oilpan's GC => V8 GC => | 677 // a dead frame. In common cases, a sequence of Oilpan's GC => V8 GC => |
| 646 // Oilpan's GC is needed to collect the dead frame. So we force the | 678 // Oilpan's GC is needed to collect the dead frame. So we force the |
| 647 // PageNavigation GC before running the V8 GC. | 679 // PageNavigation GC before running the V8 GC. |
| 648 if (gcState() == PageNavigationGCScheduled) { | 680 if (gcState() == PageNavigationGCScheduled) { |
| 649 #if PRINT_HEAP_STATS | 681 #if PRINT_HEAP_STATS |
| 650 dataLogF("Scheduled PageNavigationGC\n"); | 682 dataLogF("Scheduled PageNavigationGC\n"); |
| 651 #endif | 683 #endif |
| 652 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithSweep, BlinkGC::PageNavigationGC); | 684 if (perThreadHeapEnabled()) |
| 685 Heap::collectGarbageForIsolatedThread(this); | |
| 686 else | |
| 687 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithSw eep, BlinkGC::PageNavigationGC); | |
| 653 } | 688 } |
| 654 } | 689 } |
| 655 | 690 |
| 656 void ThreadState::schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio) | 691 void ThreadState::schedulePageNavigationGCIfNeeded(float estimatedRemovalRatio) |
| 657 { | 692 { |
| 658 ASSERT(checkThread()); | 693 ASSERT(checkThread()); |
| 659 Heap::reportMemoryUsageForTracing(); | 694 reportMemoryUsageForTracing(); |
| 660 | 695 |
| 661 #if PRINT_HEAP_STATS | 696 #if PRINT_HEAP_STATS |
| 662 dataLogF("ThreadState::schedulePageNavigationGCIfNeeded (estimatedRemovalRat io=%.2lf)\n", estimatedRemovalRatio); | 697 dataLogF("ThreadState::schedulePageNavigationGCIfNeeded (estimatedRemovalRat io=%.2lf)\n", estimatedRemovalRatio); |
| 663 #endif | 698 #endif |
| 664 | 699 |
| 665 if (isGCForbidden()) | 700 if (isGCForbidden()) |
| 666 return; | 701 return; |
| 667 | 702 |
| 668 // Finish on-going lazy sweeping. | 703 // Finish on-going lazy sweeping. |
| 669 // TODO(haraken): It might not make sense to force completeSweep() for all | 704 // TODO(haraken): It might not make sense to force completeSweep() for all |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 691 void ThreadState::schedulePageNavigationGC() | 726 void ThreadState::schedulePageNavigationGC() |
| 692 { | 727 { |
| 693 ASSERT(checkThread()); | 728 ASSERT(checkThread()); |
| 694 ASSERT(!isSweepingInProgress()); | 729 ASSERT(!isSweepingInProgress()); |
| 695 setGCState(PageNavigationGCScheduled); | 730 setGCState(PageNavigationGCScheduled); |
| 696 } | 731 } |
| 697 | 732 |
| 698 void ThreadState::scheduleGCIfNeeded() | 733 void ThreadState::scheduleGCIfNeeded() |
| 699 { | 734 { |
| 700 ASSERT(checkThread()); | 735 ASSERT(checkThread()); |
| 701 Heap::reportMemoryUsageForTracing(); | 736 reportMemoryUsageForTracing(); |
| 702 | 737 |
| 703 #if PRINT_HEAP_STATS | 738 #if PRINT_HEAP_STATS |
| 704 dataLogF("ThreadState::scheduleGCIfNeeded\n"); | 739 dataLogF("ThreadState::scheduleGCIfNeeded\n"); |
| 705 #endif | 740 #endif |
| 706 | 741 |
| 707 // Allocation is allowed during sweeping, but those allocations should not | 742 // Allocation is allowed during sweeping, but those allocations should not |
| 708 // trigger nested GCs. | 743 // trigger nested GCs. |
| 709 if (isGCForbidden()) | 744 if (isGCForbidden()) |
| 710 return; | 745 return; |
| 711 | 746 |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 723 return; | 758 return; |
| 724 } | 759 } |
| 725 } | 760 } |
| 726 | 761 |
| 727 if (shouldForceConservativeGC()) { | 762 if (shouldForceConservativeGC()) { |
| 728 completeSweep(); | 763 completeSweep(); |
| 729 if (shouldForceConservativeGC()) { | 764 if (shouldForceConservativeGC()) { |
| 730 #if PRINT_HEAP_STATS | 765 #if PRINT_HEAP_STATS |
| 731 dataLogF("Scheduled ConservativeGC\n"); | 766 dataLogF("Scheduled ConservativeGC\n"); |
| 732 #endif | 767 #endif |
| 733 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithou tSweep, BlinkGC::ConservativeGC); | 768 if (perThreadHeapEnabled()) |
| 769 Heap::collectGarbageForIsolatedThread(this); | |
| 770 else | |
| 771 Heap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWi thoutSweep, BlinkGC::ConservativeGC); | |
| 734 return; | 772 return; |
| 735 } | 773 } |
| 736 } | 774 } |
| 737 if (shouldScheduleIdleGC()) { | 775 if (shouldScheduleIdleGC()) { |
| 738 #if PRINT_HEAP_STATS | 776 #if PRINT_HEAP_STATS |
| 739 dataLogF("Scheduled IdleGC\n"); | 777 dataLogF("Scheduled IdleGC\n"); |
| 740 #endif | 778 #endif |
| 741 scheduleIdleGC(); | 779 scheduleIdleGC(); |
| 742 return; | 780 return; |
| 743 } | 781 } |
| 744 } | 782 } |
| 745 | 783 |
| 746 void ThreadState::performIdleGC(double deadlineSeconds) | 784 void ThreadState::performIdleGC(double deadlineSeconds) |
| 747 { | 785 { |
| 748 ASSERT(checkThread()); | 786 ASSERT(checkThread()); |
| 749 ASSERT(isMainThread()); | 787 ASSERT(isMainThread()); |
| 750 ASSERT(Platform::current()->currentThread()->scheduler()); | 788 ASSERT(Platform::current()->currentThread()->scheduler()); |
| 751 | 789 |
| 752 if (gcState() != IdleGCScheduled) | 790 if (gcState() != IdleGCScheduled) |
| 753 return; | 791 return; |
| 754 | 792 |
| 755 double idleDeltaInSeconds = deadlineSeconds - Platform::current()->monotonic allyIncreasingTimeSeconds(); | 793 double idleDeltaInSeconds = deadlineSeconds - Platform::current()->monotonic allyIncreasingTimeSeconds(); |
| 756 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", Heap::estimatedMarkingTime()); | 794 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds", idleDeltaInSeconds, "estimatedMarkingTime", m_heapStats->estimatedMarkingTime() ); |
| 757 if (idleDeltaInSeconds <= Heap::estimatedMarkingTime() && !Platform::current ()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired()) { | 795 if (idleDeltaInSeconds <= m_heapStats->estimatedMarkingTime() && !Platform:: current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired()) { |
| 758 // If marking is estimated to take longer than the deadline and we can't | 796 // If marking is estimated to take longer than the deadline and we can't |
| 759 // exceed the deadline, then reschedule for the next idle period. | 797 // exceed the deadline, then reschedule for the next idle period. |
| 760 scheduleIdleGC(); | 798 scheduleIdleGC(); |
| 761 return; | 799 return; |
| 762 } | 800 } |
| 763 | 801 |
| 764 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep , BlinkGC::IdleGC); | 802 if (perThreadHeapEnabled()) |
| 803 Heap::collectGarbageForIsolatedThread(this); | |
| 804 else | |
| 805 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutS weep, BlinkGC::IdleGC); | |
| 765 } | 806 } |
| 766 | 807 |
| 767 void ThreadState::performIdleLazySweep(double deadlineSeconds) | 808 void ThreadState::performIdleLazySweep(double deadlineSeconds) |
| 768 { | 809 { |
| 769 ASSERT(checkThread()); | 810 ASSERT(checkThread()); |
| 770 ASSERT(isMainThread()); | 811 ASSERT(isMainThread()); |
| 771 | 812 |
| 772 // If we are not in a sweeping phase, there is nothing to do here. | 813 // If we are not in a sweeping phase, there is nothing to do here. |
| 773 if (!isSweepingInProgress()) | 814 if (!isSweepingInProgress()) |
| 774 return; | 815 return; |
| (...skipping 158 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 933 // Prevent that from happening by marking GCs as forbidden while | 974 // Prevent that from happening by marking GCs as forbidden while |
| 934 // one is initiated and later running. | 975 // one is initiated and later running. |
| 935 if (isGCForbidden()) | 976 if (isGCForbidden()) |
| 936 return; | 977 return; |
| 937 | 978 |
| 938 switch (gcState()) { | 979 switch (gcState()) { |
| 939 case FullGCScheduled: | 980 case FullGCScheduled: |
| 940 Heap::collectAllGarbage(); | 981 Heap::collectAllGarbage(); |
| 941 break; | 982 break; |
| 942 case PreciseGCScheduled: | 983 case PreciseGCScheduled: |
| 943 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutS weep, BlinkGC::PreciseGC); | 984 if (perThreadHeapEnabled()) |
| 985 Heap::collectGarbageForIsolatedThread(this); | |
| 986 else | |
| 987 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWith outSweep, BlinkGC::PreciseGC); | |
| 944 break; | 988 break; |
| 945 case PageNavigationGCScheduled: | 989 case PageNavigationGCScheduled: |
| 946 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSwee p, BlinkGC::PageNavigationGC); | 990 if (perThreadHeapEnabled()) |
| 991 Heap::collectGarbageForIsolatedThread(this); | |
| 992 else | |
| 993 Heap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWith Sweep, BlinkGC::PageNavigationGC); | |
| 947 break; | 994 break; |
| 948 case IdleGCScheduled: | 995 case IdleGCScheduled: |
| 949 // Idle time GC will be scheduled by Blink Scheduler. | 996 // Idle time GC will be scheduled by Blink Scheduler. |
| 950 break; | 997 break; |
| 951 default: | 998 default: |
| 952 break; | 999 break; |
| 953 } | 1000 } |
| 954 } | 1001 } |
| 955 | 1002 |
| 956 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() | 1003 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() |
| 957 { | 1004 { |
| 958 if (m_shouldFlushHeapDoesNotContainCache) { | 1005 if (m_shouldFlushHeapDoesNotContainCache) { |
| 959 Heap::flushHeapDoesNotContainCache(); | 1006 flushHeapDoesNotContainCache(); |
| 960 m_shouldFlushHeapDoesNotContainCache = false; | 1007 m_shouldFlushHeapDoesNotContainCache = false; |
| 961 } | 1008 } |
| 962 } | 1009 } |
| 963 | 1010 |
| 964 void ThreadState::makeConsistentForGC() | 1011 void ThreadState::makeConsistentForGC() |
| 965 { | 1012 { |
| 966 ASSERT(isInGC()); | 1013 ASSERT(isInGC()); |
| 967 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); | 1014 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); |
| 968 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) | 1015 for (int i = 0; i < BlinkGC::NumberOfHeaps; ++i) |
| 969 m_heaps[i]->makeConsistentForGC(); | 1016 m_heaps[i]->makeConsistentForGC(); |
| (...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1118 if (isMainThread()) | 1165 if (isMainThread()) |
| 1119 Platform::current()->histogramCustomCounts("BlinkGC.CompleteSweep", timeForCompleteSweep, 1, 10 * 1000, 50); | 1166 Platform::current()->histogramCustomCounts("BlinkGC.CompleteSweep", timeForCompleteSweep, 1, 10 * 1000, 50); |
| 1120 } | 1167 } |
| 1121 | 1168 |
| 1122 postSweep(); | 1169 postSweep(); |
| 1123 } | 1170 } |
| 1124 | 1171 |
| 1125 void ThreadState::postSweep() | 1172 void ThreadState::postSweep() |
| 1126 { | 1173 { |
| 1127 ASSERT(checkThread()); | 1174 ASSERT(checkThread()); |
| 1128 Heap::reportMemoryUsageForTracing(); | 1175 reportMemoryUsageForTracing(); |
| 1129 | 1176 |
| 1130 if (isMainThread()) { | 1177 if (isMainThread()) { |
| 1131 double collectionRate = 0; | 1178 double collectionRate = 0; |
| 1132 if (Heap::objectSizeAtLastGC() > 0) | 1179 if (objectSizeAtLastGC() > 0) |
| 1133 collectionRate = 1 - 1.0 * Heap::markedObjectSize() / Heap::objectSi zeAtLastGC(); | 1180 collectionRate = 1 - 1.0 * markedObjectSize() / objectSizeAtLastGC() ; |
| 1134 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate)); | 1181 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadState::coll ectionRate", static_cast<int>(100 * collectionRate)); |
| 1135 | 1182 |
| 1136 #if PRINT_HEAP_STATS | 1183 #if PRINT_HEAP_STATS |
| 1137 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate)); | 1184 dataLogF("ThreadState::postSweep (collectionRate=%d%%)\n", static_cast<i nt>(100 * collectionRate)); |
| 1138 #endif | 1185 #endif |
| 1139 | 1186 |
| 1140 // Heap::markedObjectSize() may be underestimated here if any other | 1187 // Heap::markedObjectSize() may be underestimated here if any other |
| 1141 // thread has not yet finished lazy sweeping. | 1188 // thread has not yet finished lazy sweeping. |
| 1142 Heap::setMarkedObjectSizeAtLastCompleteSweep(Heap::markedObjectSize()); | 1189 m_heapStats->setMarkedObjectSizeAtLastCompleteSweep(markedObjectSize()); |
| 1143 | 1190 |
| 1144 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeBeforeGC", Heap::objectSizeAtLastGC() / 1024, 1, 4 * 1024 * 1024, 50); | 1191 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeBeforeGC", objectSizeAtLastGC() / 1024, 1, 4 * 1024 * 1024, 50); |
| 1145 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeAfterGC", Heap::markedObjectSize() / 1024, 1, 4 * 1024 * 1024, 50); | 1192 Platform::current()->histogramCustomCounts("BlinkGC.ObjectSizeAfterGC", markedObjectSize() / 1024, 1, 4 * 1024 * 1024, 50); |
| 1146 Platform::current()->histogramCustomCounts("BlinkGC.CollectionRate", sta tic_cast<int>(100 * collectionRate), 1, 100, 20); | 1193 Platform::current()->histogramCustomCounts("BlinkGC.CollectionRate", sta tic_cast<int>(100 * collectionRate), 1, 100, 20); |
| 1147 Platform::current()->histogramCustomCounts("BlinkGC.TimeForSweepingAllOb jects", m_accumulatedSweepingTime, 1, 10 * 1000, 50); | 1194 Platform::current()->histogramCustomCounts("BlinkGC.TimeForSweepingAllOb jects", m_accumulatedSweepingTime, 1, 10 * 1000, 50); |
| 1148 } | 1195 } |
| 1149 | 1196 |
| 1150 switch (gcState()) { | 1197 switch (gcState()) { |
| 1151 case Sweeping: | 1198 case Sweeping: |
| 1152 setGCState(NoGCScheduled); | 1199 setGCState(NoGCScheduled); |
| 1153 break; | 1200 break; |
| 1154 case SweepingAndPreciseGCScheduled: | 1201 case SweepingAndPreciseGCScheduled: |
| 1155 setGCState(PreciseGCScheduled); | 1202 setGCState(PreciseGCScheduled); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1195 } | 1242 } |
| 1196 | 1243 |
| 1197 void ThreadState::resumeThreads() | 1244 void ThreadState::resumeThreads() |
| 1198 { | 1245 { |
| 1199 s_safePointBarrier->resumeOthers(); | 1246 s_safePointBarrier->resumeOthers(); |
| 1200 } | 1247 } |
| 1201 | 1248 |
| 1202 void ThreadState::safePoint(BlinkGC::StackState stackState) | 1249 void ThreadState::safePoint(BlinkGC::StackState stackState) |
| 1203 { | 1250 { |
| 1204 ASSERT(checkThread()); | 1251 ASSERT(checkThread()); |
| 1205 Heap::reportMemoryUsageForTracing(); | 1252 reportMemoryUsageForTracing(); |
| 1206 | 1253 |
| 1207 runScheduledGC(stackState); | 1254 runScheduledGC(stackState); |
| 1208 ASSERT(!m_atSafePoint); | 1255 ASSERT(!m_atSafePoint); |
| 1209 m_stackState = stackState; | 1256 m_stackState = stackState; |
| 1210 m_atSafePoint = true; | 1257 m_atSafePoint = true; |
| 1211 s_safePointBarrier->checkAndPark(this); | 1258 s_safePointBarrier->checkAndPark(this); |
| 1212 m_atSafePoint = false; | 1259 m_atSafePoint = false; |
| 1213 m_stackState = BlinkGC::HeapPointersOnStack; | 1260 m_stackState = BlinkGC::HeapPointersOnStack; |
| 1214 preSweep(); | 1261 preSweep(); |
| 1215 } | 1262 } |
| (...skipping 298 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1514 threadDump->addScalar("live_count", "objects", totalLiveCount); | 1561 threadDump->addScalar("live_count", "objects", totalLiveCount); |
| 1515 threadDump->addScalar("dead_count", "objects", totalDeadCount); | 1562 threadDump->addScalar("dead_count", "objects", totalDeadCount); |
| 1516 threadDump->addScalar("live_size", "bytes", totalLiveSize); | 1563 threadDump->addScalar("live_size", "bytes", totalLiveSize); |
| 1517 threadDump->addScalar("dead_size", "bytes", totalDeadSize); | 1564 threadDump->addScalar("dead_size", "bytes", totalDeadSize); |
| 1518 | 1565 |
| 1519 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); | 1566 WebMemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvider::instance()->c reateMemoryAllocatorDumpForCurrentGC(heapsDumpName); |
| 1520 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName); | 1567 WebMemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvider::instance()- >createMemoryAllocatorDumpForCurrentGC(classesDumpName); |
| 1521 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid()); | 1568 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->addOwners hipEdge(classesDump->guid(), heapsDump->guid()); |
| 1522 } | 1569 } |
| 1523 | 1570 |
| 1571 void ThreadState::reportMemoryUsageHistogram() | |
| 1572 { | |
| 1573 static size_t supportedMaxSizeInMB = 4 * 1024; | |
| 1574 static size_t observedMaxSizeInMB = 0; | |
| 1575 | |
| 1576 // FIXME: Separate report per thread | |
| 1577 | |
| 1578 // +1 is for rounding up the sizeInMB. | |
| 1579 size_t sizeInMB = allocatedSpace() / 1024 / 1024 + 1; | |
| 1580 if (sizeInMB >= supportedMaxSizeInMB) | |
| 1581 sizeInMB = supportedMaxSizeInMB - 1; | |
| 1582 if (sizeInMB > observedMaxSizeInMB) { | |
| 1583 // Send a UseCounter only when we see the highest memory usage | |
| 1584 // we've ever seen. | |
| 1585 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI nMB, supportedMaxSizeInMB); | |
| 1586 observedMaxSizeInMB = sizeInMB; | |
| 1587 } | |
| 1588 } | |
| 1589 | |
| 1590 void ThreadState::reportMemoryUsageForTracing() | |
| 1591 { | |
| 1592 // FIXME: Separate report per thread | |
|
haraken
2016/01/07 08:06:22
In practice, we're just interested in the memory c
| |
| 1593 #if PRINT_HEAP_STATS | |
| 1594 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); | |
|
haraken
2016/01/07 08:06:22
Heap:: => ThreadState::
| |
| 1595 #endif | |
| 1596 | |
| 1597 bool gcTracingEnabled; | |
| 1598 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | |
| 1599 if (!gcTracingEnabled) | |
| 1600 return; | |
| 1601 | |
| 1602 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). | |
| 1603 // They are capped to INT_MAX just in case. | |
| 1604 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(allocated ObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | |
|
haraken
2016/01/07 08:06:22
Heap:: => ThreadState::
| |
| 1605 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(markedObject Size() / 1024, static_cast<size_t>(INT_MAX))); | |
| 1606 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); | |
| 1607 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(allocatedSpace () / 1024, static_cast<size_t>(INT_MAX))); | |
| 1608 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); | |
| 1609 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(wrapperCount(), static_cast<size_t>(INT_MAX))); | |
| 1610 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX))); | |
| 1611 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(collectedWrapperCount(), static_cast<size_t>(INT_MAX))); | |
| 1612 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(partitionAllocSizeAtLastGC() / 1024, static_cast<size_t >(INT_MAX))); | |
| 1613 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_ MAX))); | |
| 1614 } | |
| 1615 | |
| 1616 void ThreadState::resetHeapCounters() | |
| 1617 { | |
| 1618 ASSERT(isInGC()); | |
| 1619 | |
| 1620 reportMemoryUsageForTracing(); | |
| 1621 | |
| 1622 m_heapStats->reset(); | |
| 1623 } | |
| 1624 | |
| 1625 void ThreadState::flushHeapDoesNotContainCache() | |
| 1626 { | |
| 1627 m_heapDoesNotContainCache->flush(); | |
| 1628 } | |
| 1629 | |
| 1630 ThreadState::GCHeapStats::GCHeapStats() | |
| 1631 : m_allocatedSpace(0) | |
| 1632 , m_allocatedObjectSize(0) | |
| 1633 , m_objectSizeAtLastGC(0) | |
| 1634 , m_markedObjectSize(0) | |
| 1635 , m_markedObjectSizeAtLastCompleteSweep(0) | |
| 1636 , m_wrapperCount(0) | |
| 1637 , m_wrapperCountAtLastGC(0) | |
| 1638 , m_collectedWrapperCount(0) | |
| 1639 , m_partitionAllocSizeAtLastGC(WTF::Partitions::totalSizeOfCommittedPages()) | |
| 1640 , m_estimatedMarkingTimePerByte(0.0) | |
| 1641 #if ENABLE(ASSERT) | |
| 1642 // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneratio n. | |
| 1643 , m_gcGeneration(1) | |
| 1644 #endif | |
| 1645 { | |
| 1646 } | |
| 1647 | |
| 1648 double ThreadState::GCHeapStats::estimatedMarkingTime() | |
| 1649 { | |
| 1650 // Use 8 ms as initial estimated marking time. | |
| 1651 // 8 ms is long enough for low-end mobile devices to mark common | |
| 1652 // real-world object graphs. | |
| 1653 if (m_estimatedMarkingTimePerByte == 0) | |
| 1654 return 0.008; | |
| 1655 | |
| 1656 // Assuming that the collection rate of this GC will be mostly equal to | |
| 1657 // the collection rate of the last GC, estimate the marking time of this GC. | |
| 1658 return m_estimatedMarkingTimePerByte * (allocatedObjectSize() + markedObject Size()); | |
| 1659 } | |
| 1660 | |
| 1661 void ThreadState::GCHeapStats::reset() | |
| 1662 { | |
| 1663 m_objectSizeAtLastGC = m_allocatedObjectSize + m_markedObjectSize; | |
| 1664 m_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 1665 m_allocatedObjectSize = 0; | |
| 1666 m_markedObjectSize = 0; | |
| 1667 m_wrapperCountAtLastGC = m_wrapperCount; | |
| 1668 m_collectedWrapperCount = 0; | |
| 1669 } | |
| 1670 | |
| 1671 PageMemoryRegion* ThreadState::RegionTree::lookup(Address address) | |
| 1672 { | |
| 1673 RegionTree* current = this; | |
| 1674 while (current) { | |
| 1675 Address base = current->m_region->base(); | |
| 1676 if (address < base) { | |
| 1677 current = current->m_left; | |
| 1678 continue; | |
| 1679 } | |
| 1680 if (address >= base + current->m_region->size()) { | |
| 1681 current = current->m_right; | |
| 1682 continue; | |
| 1683 } | |
| 1684 ASSERT(current->m_region->contains(address)); | |
| 1685 return current->m_region; | |
| 1686 } | |
| 1687 return nullptr; | |
| 1688 } | |
| 1689 | |
| 1690 void ThreadState::RegionTree::add(RegionTree* newTree, RegionTree** context) | |
| 1691 { | |
| 1692 ASSERT(newTree); | |
| 1693 Address base = newTree->m_region->base(); | |
| 1694 for (RegionTree* current = *context; current; current = *context) { | |
| 1695 ASSERT(!current->m_region->contains(base)); | |
| 1696 context = (base < current->m_region->base()) ? ¤t->m_left : &curre nt->m_right; | |
| 1697 } | |
| 1698 *context = newTree; | |
| 1699 } | |
| 1700 | |
| 1701 void ThreadState::RegionTree::remove(PageMemoryRegion* region, RegionTree** cont ext) | |
| 1702 { | |
| 1703 ASSERT(region); | |
| 1704 ASSERT(context); | |
| 1705 Address base = region->base(); | |
| 1706 RegionTree* current = *context; | |
| 1707 for (; current; current = *context) { | |
| 1708 if (region == current->m_region) | |
| 1709 break; | |
| 1710 context = (base < current->m_region->base()) ? ¤t->m_left : &curre nt->m_right; | |
| 1711 } | |
| 1712 | |
| 1713 // Shutdown via detachMainThread might not have populated the region tree. | |
| 1714 if (!current) | |
| 1715 return; | |
| 1716 | |
| 1717 *context = nullptr; | |
| 1718 if (current->m_left) { | |
| 1719 add(current->m_left, context); | |
| 1720 current->m_left = nullptr; | |
| 1721 } | |
| 1722 if (current->m_right) { | |
| 1723 add(current->m_right, context); | |
| 1724 current->m_right = nullptr; | |
| 1725 } | |
| 1726 delete current; | |
| 1727 } | |
| 1728 | |
| 1729 void ThreadState::removeFromRegionTree(PageMemoryRegion* region) | |
| 1730 { | |
| 1731 if (m_regionTree) | |
| 1732 RegionTree::remove(region, &m_regionTree); | |
| 1733 } | |
| 1734 | |
| 1735 ThreadState* ThreadState::forObject(const void* object) | |
| 1736 { | |
| 1737 if (!object) | |
| 1738 return nullptr; | |
| 1739 BasePage* page = pageFromObject(object); | |
| 1740 ASSERT(page); | |
| 1741 ASSERT(page->heap()); | |
| 1742 return page->heap()->threadState(); | |
| 1743 } | |
| 1744 | |
| 1745 size_t ThreadState::s_totalAllocatedSpace = 0; | |
| 1746 size_t ThreadState::s_totalAllocatedObjectSize = 0; | |
| 1747 size_t ThreadState::s_totalMarkedObjectSize = 0; | |
| 1748 | |
| 1524 } // namespace blink | 1749 } // namespace blink |
| OLD | NEW |