Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 52 #include "wtf/Partitions.h" | 52 #include "wtf/Partitions.h" |
| 53 | 53 |
| 54 namespace blink { | 54 namespace blink { |
| 55 | 55 |
| 56 HeapAllocHooks::AllocationHook* HeapAllocHooks::m_allocationHook = nullptr; | 56 HeapAllocHooks::AllocationHook* HeapAllocHooks::m_allocationHook = nullptr; |
| 57 HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr; | 57 HeapAllocHooks::FreeHook* HeapAllocHooks::m_freeHook = nullptr; |
| 58 | 58 |
| 59 class ParkThreadsScope final { | 59 class ParkThreadsScope final { |
| 60 STACK_ALLOCATED(); | 60 STACK_ALLOCATED(); |
| 61 public: | 61 public: |
| 62 ParkThreadsScope() | 62 ParkThreadsScope(ThreadState* state) |
| 63 : m_shouldResumeThreads(false) | 63 : m_state(state) |
| 64 , m_shouldResumeThreads(false) | |
| 64 { | 65 { |
| 65 } | 66 } |
| 66 | 67 |
| 67 bool parkThreads(ThreadState* state) | 68 bool parkThreads() |
| 68 { | 69 { |
| 69 TRACE_EVENT0("blink_gc", "Heap::ParkThreadsScope"); | 70 TRACE_EVENT0("blink_gc", "Heap::ParkThreadsScope"); |
| 70 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | 71 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); |
| 71 if (state->isMainThread()) | 72 if (m_state->isMainThread()) |
| 72 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | 73 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); |
| 73 | 74 |
| 74 // TODO(haraken): In an unlikely coincidence that two threads decide | 75 // TODO(haraken): In an unlikely coincidence that two threads decide |
| 75 // to collect garbage at the same time, avoid doing two GCs in | 76 // to collect garbage at the same time, avoid doing two GCs in |
| 76 // a row and return false. | 77 // a row and return false. |
| 77 double startTime = WTF::currentTimeMS(); | 78 double startTime = WTF::currentTimeMS(); |
| 78 | 79 |
| 79 m_shouldResumeThreads = ThreadState::stopThreads(); | 80 m_shouldResumeThreads = m_state->heap().park(); |
| 80 | 81 |
| 81 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; | 82 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; |
| 82 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50 )); | 83 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50 )); |
| 83 timeToStopThreadsHistogram.count(timeForStoppingThreads); | 84 timeToStopThreadsHistogram.count(timeForStoppingThreads); |
| 84 | 85 |
| 85 if (state->isMainThread()) | 86 if (m_state->isMainThread()) |
| 86 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | 87 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); |
| 87 return m_shouldResumeThreads; | 88 return m_shouldResumeThreads; |
| 88 } | 89 } |
| 89 | 90 |
| 90 ~ParkThreadsScope() | 91 ~ParkThreadsScope() |
| 91 { | 92 { |
| 92 // Only cleanup if we parked all threads in which case the GC happened | 93 // Only cleanup if we parked all threads in which case the GC happened |
| 93 // and we need to resume the other threads. | 94 // and we need to resume the other threads. |
| 94 if (m_shouldResumeThreads) | 95 if (m_shouldResumeThreads) |
| 95 ThreadState::resumeThreads(); | 96 m_state->heap().resume(); |
| 96 } | 97 } |
| 97 | 98 |
| 98 private: | 99 private: |
| 100 ThreadState* m_state; | |
| 99 bool m_shouldResumeThreads; | 101 bool m_shouldResumeThreads; |
| 100 }; | 102 }; |
| 101 | 103 |
| 102 void Heap::flushHeapDoesNotContainCache() | 104 GCHeapStats::GCHeapStats() |
| 105 : m_allocatedSpace(0) | |
| 106 , m_allocatedObjectSize(0) | |
| 107 , m_objectSizeAtLastGC(0) | |
| 108 , m_markedObjectSize(0) | |
| 109 , m_markedObjectSizeAtLastCompleteSweep(0) | |
| 110 , m_wrapperCount(0) | |
| 111 , m_wrapperCountAtLastGC(0) | |
| 112 , m_collectedWrapperCount(0) | |
| 113 , m_partitionAllocSizeAtLastGC(WTF::Partitions::totalSizeOfCommittedPages()) | |
| 114 , m_estimatedMarkingTimePerByte(0.0) | |
| 103 { | 115 { |
| 104 s_heapDoesNotContainCache->flush(); | |
| 105 } | 116 } |
| 106 | 117 |
| 107 void Heap::init() | 118 double GCHeapStats::estimatedMarkingTime() |
| 108 { | 119 { |
| 109 ThreadState::init(); | 120 // Use 8 ms as initial estimated marking time. |
| 110 s_markingStack = new CallbackStack(); | 121 // 8 ms is long enough for low-end mobile devices to mark common |
| 111 s_postMarkingCallbackStack = new CallbackStack(); | 122 // real-world object graphs. |
| 112 s_globalWeakCallbackStack = new CallbackStack(); | 123 if (m_estimatedMarkingTimePerByte == 0) |
| 113 // Use smallest supported block size for ephemerons. | 124 return 0.008; |
| 114 s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize); | |
| 115 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | |
| 116 s_freePagePool = new FreePagePool(); | |
| 117 s_orphanedPagePool = new OrphanedPagePool(); | |
| 118 s_allocatedSpace = 0; | |
| 119 s_allocatedObjectSize = 0; | |
| 120 s_objectSizeAtLastGC = 0; | |
| 121 s_markedObjectSize = 0; | |
| 122 s_markedObjectSizeAtLastCompleteSweep = 0; | |
| 123 s_wrapperCount = 0; | |
| 124 s_wrapperCountAtLastGC = 0; | |
| 125 s_collectedWrapperCount = 0; | |
| 126 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 127 s_estimatedMarkingTimePerByte = 0.0; | |
| 128 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); | |
| 129 #if ENABLE(ASSERT) | |
| 130 s_gcGeneration = 1; | |
| 131 #endif | |
| 132 | 125 |
| 133 GCInfoTable::init(); | 126 // Assuming that the collection rate of this GC will be mostly equal to |
| 134 | 127 // the collection rate of the last GC, estimate the marking time of this GC. |
| 135 if (Platform::current() && Platform::current()->currentThread()) | 128 return m_estimatedMarkingTimePerByte * (allocatedObjectSize() + markedObject Size()); |
| 136 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); | |
| 137 } | 129 } |
| 138 | 130 |
| 139 void Heap::shutdown() | 131 void GCHeapStats::reset() |
| 140 { | 132 { |
| 141 if (Platform::current() && Platform::current()->currentThread()) | 133 m_objectSizeAtLastGC = m_allocatedObjectSize + m_markedObjectSize; |
| 142 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); | 134 m_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); |
| 143 s_shutdownCalled = true; | 135 m_allocatedObjectSize = 0; |
| 144 ThreadState::shutdownHeapIfNecessary(); | 136 m_markedObjectSize = 0; |
| 137 m_wrapperCountAtLastGC = m_wrapperCount; | |
| 138 m_collectedWrapperCount = 0; | |
| 145 } | 139 } |
| 146 | 140 |
| 147 void Heap::doShutdown() | 141 void GCHeapStats::increaseAllocatedObjectSize(size_t delta) |
| 148 { | 142 { |
| 149 // We don't want to call doShutdown() twice. | 143 atomicAdd(&m_allocatedObjectSize, static_cast<long>(delta)); |
| 150 if (!s_markingStack) | 144 Heap::increaseTotalAllocatedObjectSize(delta); |
| 151 return; | |
| 152 | |
| 153 ASSERT(!ThreadState::attachedThreads().size()); | |
| 154 delete s_heapDoesNotContainCache; | |
| 155 s_heapDoesNotContainCache = nullptr; | |
| 156 delete s_freePagePool; | |
| 157 s_freePagePool = nullptr; | |
| 158 delete s_orphanedPagePool; | |
| 159 s_orphanedPagePool = nullptr; | |
| 160 delete s_globalWeakCallbackStack; | |
| 161 s_globalWeakCallbackStack = nullptr; | |
| 162 delete s_postMarkingCallbackStack; | |
| 163 s_postMarkingCallbackStack = nullptr; | |
| 164 delete s_markingStack; | |
| 165 s_markingStack = nullptr; | |
| 166 delete s_ephemeronStack; | |
| 167 s_ephemeronStack = nullptr; | |
| 168 delete s_regionTree; | |
| 169 s_regionTree = nullptr; | |
| 170 GCInfoTable::shutdown(); | |
| 171 ThreadState::shutdown(); | |
| 172 ASSERT(Heap::allocatedSpace() == 0); | |
| 173 } | 145 } |
| 174 | 146 |
| 175 CrossThreadPersistentRegion& Heap::crossThreadPersistentRegion() | 147 void GCHeapStats::decreaseAllocatedObjectSize(size_t delta) |
| 176 { | 148 { |
| 177 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); | 149 atomicSubtract(&m_allocatedObjectSize, static_cast<long>(delta)); |
| 178 return persistentRegion; | 150 Heap::decreaseTotalAllocatedObjectSize(delta); |
| 151 } | |
| 152 | |
| 153 void GCHeapStats::increaseMarkedObjectSize(size_t delta) | |
| 154 { | |
| 155 atomicAdd(&m_markedObjectSize, static_cast<long>(delta)); | |
| 156 Heap::increaseTotalMarkedObjectSize(delta); | |
| 157 } | |
| 158 | |
| 159 void GCHeapStats::increaseAllocatedSpace(size_t delta) | |
| 160 { | |
| 161 atomicAdd(&m_allocatedSpace, static_cast<long>(delta)); | |
| 162 Heap::increaseTotalAllocatedSpace(delta); | |
| 163 } | |
| 164 | |
| 165 void GCHeapStats::decreaseAllocatedSpace(size_t delta) | |
| 166 { | |
| 167 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); | |
| 168 Heap::decreaseTotalAllocatedSpace(delta); | |
| 169 } | |
| 170 | |
| 171 Heap::Heap() | |
| 172 : m_regionTree(nullptr) | |
| 173 , m_heapDoesNotContainCache(adoptPtr(new HeapDoesNotContainCache)) | |
| 174 , m_safePointBarrier(adoptPtr(new SafePointBarrier(this))) | |
| 175 , m_freePagePool(adoptPtr(new FreePagePool)) | |
| 176 , m_orphanedPagePool(adoptPtr(new OrphanedPagePool)) | |
| 177 , m_markingStack(adoptPtr(new CallbackStack())) | |
| 178 , m_postMarkingCallbackStack(adoptPtr(new CallbackStack())) | |
| 179 , m_globalWeakCallbackStack(adoptPtr(new CallbackStack())) | |
| 180 , m_ephemeronStack(adoptPtr(new CallbackStack(CallbackStack::kMinimalBlockSi ze))) | |
| 181 { | |
| 182 MutexLocker locker(Heap::heapAttachMutex()); | |
| 183 all().add(this); | |
| 184 } | |
| 185 | |
| 186 Heap::~Heap() | |
| 187 { | |
| 188 MutexLocker locker(Heap::heapAttachMutex()); | |
| 189 all().remove(this); | |
| 190 } | |
| 191 | |
| 192 void Heap::attach(ThreadState* thread) | |
| 193 { | |
| 194 MutexLocker locker(m_threadAttachMutex); | |
| 195 m_threads.add(thread); | |
| 196 } | |
| 197 | |
| 198 void Heap::detach(ThreadState* thread) | |
| 199 { | |
| 200 { | |
| 201 SafePointAwareMutexLocker locker(m_threadAttachMutex, BlinkGC::NoHeapPoi ntersOnStack); | |
| 202 thread->cleanup(); | |
| 203 ASSERT(m_threads.contains(thread)); | |
| 204 m_threads.remove(thread); | |
| 205 } | |
| 206 if (m_threads.isEmpty()) { | |
| 207 delete this; | |
| 208 Heap::doShutdownIfNecessary(); | |
| 209 } | |
| 210 } | |
| 211 | |
| 212 void Heap::lockThreadAttachMutex() | |
| 213 { | |
| 214 m_threadAttachMutex.lock(); | |
| 215 } | |
| 216 | |
| 217 void Heap::unlockThreadAttachMutex() | |
| 218 { | |
| 219 m_threadAttachMutex.unlock(); | |
| 220 } | |
| 221 | |
| 222 bool Heap::park() | |
| 223 { | |
| 224 return m_safePointBarrier->parkOthers(); | |
| 225 } | |
| 226 | |
| 227 void Heap::resume() | |
| 228 { | |
| 229 m_safePointBarrier->resumeOthers(); | |
| 179 } | 230 } |
| 180 | 231 |
| 181 #if ENABLE(ASSERT) | 232 #if ENABLE(ASSERT) |
| 233 bool Heap::isAtSafePoint() const | |
| 234 { | |
| 235 for (ThreadState* state : m_threads) { | |
| 236 if (!state->isAtSafePoint()) | |
| 237 return false; | |
| 238 } | |
| 239 return true; | |
| 240 } | |
| 241 | |
| 182 BasePage* Heap::findPageFromAddress(Address address) | 242 BasePage* Heap::findPageFromAddress(Address address) |
| 183 { | 243 { |
| 184 MutexLocker lock(ThreadState::threadAttachMutex()); | 244 for (ThreadState* state : m_threads) { |
| 185 for (ThreadState* state : ThreadState::attachedThreads()) { | |
| 186 if (BasePage* page = state->findPageFromAddress(address)) | 245 if (BasePage* page = state->findPageFromAddress(address)) |
| 187 return page; | 246 return page; |
| 188 } | 247 } |
| 189 return nullptr; | 248 return nullptr; |
| 190 } | 249 } |
| 191 #endif | 250 #endif |
| 192 | 251 |
| 193 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 252 void Heap::preGC() |
| 194 { | 253 { |
| 195 ASSERT(ThreadState::current()->isInGC()); | 254 for (ThreadState* state : m_threads) { |
| 196 | 255 state->preGC(); |
| 197 #if !ENABLE(ASSERT) | 256 } |
| 198 if (s_heapDoesNotContainCache->lookup(address)) | 257 } |
| 258 | |
| 259 void Heap::postGC(BlinkGC::GCType gcType) | |
| 260 { | |
| 261 for (ThreadState* state : m_threads) { | |
| 262 state->postGC(gcType); | |
| 263 } | |
| 264 } | |
| 265 | |
| 266 size_t Heap::objectPayloadSizeForTesting() | |
| 267 { | |
| 268 size_t objectPayloadSize = 0; | |
| 269 for (ThreadState* state : m_threads) { | |
| 270 state->setGCState(ThreadState::GCRunning); | |
| 271 state->makeConsistentForGC(); | |
| 272 objectPayloadSize += state->objectPayloadSizeForTesting(); | |
| 273 state->setGCState(ThreadState::EagerSweepScheduled); | |
| 274 state->setGCState(ThreadState::Sweeping); | |
| 275 state->setGCState(ThreadState::NoGCScheduled); | |
| 276 } | |
| 277 return objectPayloadSize; | |
| 278 } | |
| 279 | |
| 280 void Heap::visitPersistentRoots(Visitor* visitor) | |
| 281 { | |
| 282 TRACE_EVENT0("blink_gc", "ThreadState::visitPersistentRoots"); | |
| 283 Heap::crossThreadPersistentRegion().tracePersistentNodes(visitor); | |
| 284 | |
| 285 for (ThreadState* state : m_threads) { | |
| 286 state->visitPersistents(visitor); | |
| 287 } | |
| 288 } | |
| 289 | |
| 290 void Heap::visitStackRoots(Visitor* visitor) | |
| 291 { | |
| 292 TRACE_EVENT0("blink_gc", "ThreadState::visitStackRoots"); | |
| 293 for (ThreadState* state : m_threads) { | |
| 294 state->visitStack(visitor); | |
| 295 } | |
| 296 } | |
| 297 | |
| 298 void Heap::checkAndPark(ThreadState* threadState, SafePointAwareMutexLocker* loc ker) | |
| 299 { | |
| 300 m_safePointBarrier->checkAndPark(threadState, locker); | |
| 301 } | |
| 302 | |
| 303 void Heap::enterSafePoint(ThreadState* threadState) | |
| 304 { | |
| 305 m_safePointBarrier->enterSafePoint(threadState); | |
| 306 } | |
| 307 | |
| 308 void Heap::leaveSafePoint(ThreadState* threadState, SafePointAwareMutexLocker* l ocker) | |
| 309 { | |
| 310 m_safePointBarrier->leaveSafePoint(threadState, locker); | |
| 311 } | |
| 312 | |
| 313 void Heap::flushHeapDoesNotContainCache() | |
| 314 { | |
| 315 m_heapDoesNotContainCache->flush(); | |
| 316 } | |
| 317 | |
| 318 BasePage* Heap::lookupPageForAddress(Address address) | |
| 319 { | |
| 320 ASSERT(ThreadState::current()->isInGC()); | |
| 321 if (!m_regionTree) | |
| 199 return nullptr; | 322 return nullptr; |
| 200 #endif | 323 if (PageMemoryRegion* region = m_regionTree->lookup(address)) { |
| 201 | 324 BasePage* page = region->pageFromAddress(address); |
| 202 if (BasePage* page = lookup(address)) { | 325 return page && !page->orphaned() ? page : nullptr; |
| 203 ASSERT(page->contains(address)); | 326 } |
| 204 ASSERT(!page->orphaned()); | |
| 205 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | |
| 206 page->checkAndMarkPointer(visitor, address); | |
| 207 return address; | |
| 208 } | |
| 209 | |
| 210 #if !ENABLE(ASSERT) | |
| 211 s_heapDoesNotContainCache->addEntry(address); | |
| 212 #else | |
| 213 if (!s_heapDoesNotContainCache->lookup(address)) | |
| 214 s_heapDoesNotContainCache->addEntry(address); | |
| 215 #endif | |
| 216 return nullptr; | 327 return nullptr; |
| 217 } | 328 } |
| 218 | 329 |
| 219 void Heap::pushTraceCallback(void* object, TraceCallback callback) | 330 void Heap::addPageMemoryRegion(PageMemoryRegion* region) |
| 220 { | 331 { |
| 221 ASSERT(ThreadState::current()->isInGC()); | 332 MutexLocker locker(m_regionTreeMutex); |
| 222 | 333 RegionTree::add(new RegionTree(region), &m_regionTree); |
| 223 // Trace should never reach an orphaned page. | 334 } |
| 224 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 335 |
| 225 CallbackStack::Item* slot = s_markingStack->allocateEntry(); | 336 void Heap::removePageMemoryRegion(PageMemoryRegion* region) |
| 226 *slot = CallbackStack::Item(object, callback); | 337 { |
| 338 // Deletion of large objects (and thus their regions) can happen | |
| 339 // concurrently on sweeper threads. Removal can also happen during thread | |
| 340 // shutdown, but that case is safe. Regardless, we make all removals | |
| 341 // mutually exclusive. | |
| 342 MutexLocker locker(m_regionTreeMutex); | |
| 343 RegionTree::remove(region, &m_regionTree); | |
| 344 } | |
| 345 | |
| 346 void Heap::pushTraceCallback(void* containerObject, TraceCallback callback) | |
| 347 { | |
| 348 ASSERT(ThreadState::current()->isInGC()); | |
| 349 | |
| 350 // Trace should never reach an orphaned page. | |
| 351 ASSERT(!orphanedPagePool()->contains(containerObject)); | |
| 352 CallbackStack::Item* slot = m_markingStack->allocateEntry(); | |
| 353 *slot = CallbackStack::Item(containerObject, callback); | |
| 227 } | 354 } |
| 228 | 355 |
| 229 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | 356 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
| 230 { | 357 { |
| 231 CallbackStack::Item* item = s_markingStack->pop(); | 358 CallbackStack::Item* item = m_markingStack->pop(); |
| 232 if (!item) | 359 if (!item) |
| 233 return false; | 360 return false; |
| 234 item->call(visitor); | 361 item->call(visitor); |
| 235 return true; | 362 return true; |
| 236 } | 363 } |
| 237 | 364 |
| 238 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) | 365 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) |
| 239 { | 366 { |
| 240 ASSERT(ThreadState::current()->isInGC()); | 367 ASSERT(ThreadState::current()->isInGC()); |
| 241 | 368 |
| 242 // Trace should never reach an orphaned page. | 369 // Trace should never reach an orphaned page. |
| 243 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 370 ASSERT(!orphanedPagePool()->contains(cell)); |
| 244 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); | 371 CallbackStack::Item* slot = m_globalWeakCallbackStack->allocateEntry(); |
| 245 *slot = CallbackStack::Item(object, callback); | 372 *slot = CallbackStack::Item(cell, callback); |
| 246 } | 373 } |
| 247 | 374 |
| 248 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) | 375 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) |
| 249 { | 376 { |
| 250 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { | 377 if (CallbackStack::Item* item = m_globalWeakCallbackStack->pop()) { |
| 251 item->call(visitor); | 378 item->call(visitor); |
| 252 return true; | 379 return true; |
| 253 } | 380 } |
| 254 return false; | 381 return false; |
| 255 } | 382 } |
| 256 | 383 |
| 257 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) | |
| 258 { | |
| 259 ASSERT(ThreadState::current()->isInGC()); | |
| 260 | |
| 261 // Trace should never reach an orphaned page. | |
| 262 ASSERT(!Heap::orphanedPagePool()->contains(cell)); | |
| 263 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); | |
| 264 *slot = CallbackStack::Item(cell, callback); | |
| 265 } | |
| 266 | |
| 267 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) | |
| 268 { | |
| 269 ASSERT(ThreadState::current()->isInGC()); | |
| 270 | |
| 271 // Trace should never reach an orphaned page. | |
| 272 ASSERT(!Heap::orphanedPagePool()->contains(object)); | |
| 273 ThreadState* state = pageFromObject(object)->heap()->threadState(); | |
| 274 state->pushThreadLocalWeakCallback(closure, callback); | |
| 275 } | |
| 276 | |
| 277 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) | |
| 278 { | |
| 279 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { | |
| 280 item->call(visitor); | |
| 281 return true; | |
| 282 } | |
| 283 return false; | |
| 284 } | |
| 285 | |
| 286 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) | 384 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) |
| 287 { | 385 { |
| 288 ASSERT(ThreadState::current()->isInGC()); | 386 ASSERT(ThreadState::current()->isInGC()); |
| 289 | 387 |
| 290 // Trace should never reach an orphaned page. | 388 // Trace should never reach an orphaned page. |
| 291 ASSERT(!Heap::orphanedPagePool()->contains(table)); | 389 ASSERT(!orphanedPagePool()->contains(table)); |
| 292 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); | 390 CallbackStack::Item* slot = m_ephemeronStack->allocateEntry(); |
| 293 *slot = CallbackStack::Item(table, iterationCallback); | 391 *slot = CallbackStack::Item(table, iterationCallback); |
| 294 | 392 |
| 295 // Register a post-marking callback to tell the tables that | 393 // Register a post-marking callback to tell the tables that |
| 296 // ephemeron iteration is complete. | 394 // ephemeron iteration is complete. |
| 297 pushPostMarkingCallback(table, iterationDoneCallback); | 395 pushPostMarkingCallback(table, iterationDoneCallback); |
| 298 } | 396 } |
| 299 | 397 |
| 300 #if ENABLE(ASSERT) | 398 #if ENABLE(ASSERT) |
| 301 bool Heap::weakTableRegistered(const void* table) | 399 bool Heap::weakTableRegistered(const void* table) |
| 302 { | 400 { |
| 303 ASSERT(s_ephemeronStack); | 401 ASSERT(m_ephemeronStack); |
| 304 return s_ephemeronStack->hasCallbackForObject(table); | 402 return m_ephemeronStack->hasCallbackForObject(table); |
| 305 } | 403 } |
| 306 #endif | 404 #endif |
| 307 | 405 |
| 308 void Heap::decommitCallbackStacks() | 406 void Heap::decommitCallbackStacks() |
| 309 { | 407 { |
| 310 s_markingStack->decommit(); | 408 m_markingStack->decommit(); |
| 311 s_postMarkingCallbackStack->decommit(); | 409 m_postMarkingCallbackStack->decommit(); |
| 312 s_globalWeakCallbackStack->decommit(); | 410 m_globalWeakCallbackStack->decommit(); |
| 313 s_ephemeronStack->decommit(); | 411 m_ephemeronStack->decommit(); |
| 314 } | 412 } |
| 315 | 413 |
| 316 void Heap::preGC() | 414 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 317 { | 415 { |
| 318 ASSERT(!ThreadState::current()->isInGC()); | 416 ASSERT(ThreadState::current()->isInGC()); |
| 319 for (ThreadState* state : ThreadState::attachedThreads()) | 417 #if !ENABLE(ASSERT) |
| 320 state->preGC(); | 418 if (m_heapDoesNotContainCache->lookup(address)) |
| 321 } | 419 return nullptr; |
| 322 | 420 #endif |
| 323 void Heap::postGC(BlinkGC::GCType gcType) | 421 |
| 324 { | 422 if (BasePage* page = lookupPageForAddress(address)) { |
| 325 ASSERT(ThreadState::current()->isInGC()); | 423 ASSERT(page->contains(address)); |
| 326 for (ThreadState* state : ThreadState::attachedThreads()) | 424 ASSERT(!page->orphaned()); |
| 327 state->postGC(gcType); | 425 ASSERT(!m_heapDoesNotContainCache->lookup(address)); |
| 426 page->checkAndMarkPointer(visitor, address); | |
| 427 return address; | |
| 428 } | |
| 429 | |
| 430 #if !ENABLE(ASSERT) | |
| 431 m_heapDoesNotContainCache->addEntry(address); | |
| 432 #else | |
| 433 if (!m_heapDoesNotContainCache->lookup(address)) | |
| 434 m_heapDoesNotContainCache->addEntry(address); | |
| 435 #endif | |
| 436 return nullptr; | |
| 437 } | |
| 438 | |
| 439 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) | |
| 440 { | |
| 441 ASSERT(ThreadState::current()->isInGC()); | |
|
haraken
2016/02/29 11:17:45
ThreadState::current() => state() throughout this
keishi
2016/03/02 06:01:03
? Heap doesn't have state()
| |
| 442 | |
| 443 // Trace should never reach an orphaned page. | |
| 444 ASSERT(!orphanedPagePool()->contains(object)); | |
| 445 CallbackStack::Item* slot = m_postMarkingCallbackStack->allocateEntry(); | |
| 446 *slot = CallbackStack::Item(object, callback); | |
| 447 } | |
| 448 | |
| 449 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) | |
| 450 { | |
| 451 if (CallbackStack::Item* item = m_postMarkingCallbackStack->pop()) { | |
| 452 item->call(visitor); | |
| 453 return true; | |
| 454 } | |
| 455 return false; | |
| 456 } | |
| 457 | |
| 458 void Heap::processMarkingStack(Visitor* visitor) | |
| 459 { | |
| 460 // Ephemeron fixed point loop. | |
| 461 do { | |
| 462 { | |
| 463 // Iteratively mark all objects that are reachable from the objects | |
| 464 // currently pushed onto the marking stack. | |
| 465 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); | |
| 466 while (popAndInvokeTraceCallback(visitor)) { } | |
| 467 } | |
| 468 | |
| 469 { | |
| 470 // Mark any strong pointers that have now become reachable in | |
| 471 // ephemeron maps. | |
| 472 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); | |
| 473 m_ephemeronStack->invokeEphemeronCallbacks(visitor); | |
| 474 } | |
| 475 | |
| 476 // Rerun loop if ephemeron processing queued more objects for tracing. | |
| 477 } while (!m_markingStack->isEmpty()); | |
| 478 } | |
| 479 | |
| 480 void Heap::postMarkingProcessing(Visitor* visitor) | |
| 481 { | |
| 482 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); | |
| 483 // Call post-marking callbacks including: | |
| 484 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | |
| 485 // (specifically to clear the queued bits for weak hash tables), and | |
| 486 // 2. the markNoTracing callbacks on collection backings to mark them | |
| 487 // if they are only reachable from their front objects. | |
| 488 while (popAndInvokePostMarkingCallback(visitor)) { } | |
| 489 | |
| 490 // Post-marking callbacks should not trace any objects and | |
| 491 // therefore the marking stack should be empty after the | |
| 492 // post-marking callbacks. | |
| 493 ASSERT(m_markingStack->isEmpty()); | |
| 494 } | |
| 495 | |
| 496 void Heap::globalWeakProcessing(Visitor* visitor) | |
| 497 { | |
| 498 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); | |
| 499 double startTime = WTF::currentTimeMS(); | |
| 500 | |
| 501 // Call weak callbacks on objects that may now be pointing to dead objects. | |
| 502 while (popAndInvokeGlobalWeakCallback(visitor)) { } | |
| 503 | |
| 504 // It is not permitted to trace pointers of live objects in the weak | |
| 505 // callback phase, so the marking stack should still be empty here. | |
| 506 ASSERT(m_markingStack->isEmpty()); | |
| 507 | |
| 508 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; | |
| 509 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakPrcessing", 1, 10 * 1000, 50)); | |
| 510 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); | |
| 511 } | |
| 512 | |
| 513 void Heap::resetHeapCounters() | |
| 514 { | |
| 515 ASSERT(ThreadState::current()->isInGC()); | |
| 516 | |
| 517 Heap::reportMemoryUsageForTracing(); | |
| 518 | |
| 519 m_stats.reset(); | |
| 520 { | |
| 521 MutexLocker locker(m_threadAttachMutex); | |
| 522 for (ThreadState* state : m_threads) | |
| 523 state->resetHeapCounters(); | |
| 524 } | |
| 525 } | |
| 526 | |
| 527 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) | |
| 528 { | |
| 529 ASSERT(ThreadState::current()->isInGC()); | |
| 530 | |
| 531 // Trace should never reach an orphaned page. | |
| 532 ASSERT(!orphanedPagePool()->contains(object)); | |
| 533 ThreadState* state = pageFromObject(object)->arena()->threadState(); | |
| 534 state->pushThreadLocalWeakCallback(closure, callback); | |
| 535 } | |
| 536 | |
| 537 void Heap::init() | |
| 538 { | |
| 539 ThreadState::init(); | |
| 540 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); | |
| 541 | |
| 542 GCInfoTable::init(); | |
| 543 | |
| 544 if (Platform::current() && Platform::current()->currentThread()) | |
| 545 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); | |
| 546 } | |
| 547 | |
| 548 void Heap::shutdown() | |
| 549 { | |
| 550 if (Platform::current() && Platform::current()->currentThread()) | |
| 551 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); | |
| 552 | |
| 553 s_shutdownCalled = true; | |
| 554 doShutdownIfNecessary(); | |
| 555 } | |
| 556 | |
| 557 void Heap::doShutdownIfNecessary() | |
|
haraken
2016/02/29 11:17:45
BTW, I'm wondering if we still need the complicate
| |
| 558 { | |
| 559 MutexLocker locker(Heap::heapAttachMutex()); | |
| 560 if (!s_shutdownCalled || !all().isEmpty()) | |
|
haraken
2016/02/29 11:17:45
Checking all().isEmpty() here isn't quite correct.
| |
| 561 return; | |
| 562 | |
| 563 // We don't want to shutdown twice. | |
| 564 if (s_shutdownComplete) | |
| 565 return; | |
| 566 | |
| 567 GCInfoTable::shutdown(); | |
|
haraken
2016/02/29 11:17:45
This must be executed once per process.
| |
| 568 ThreadState::shutdown(); | |
|
haraken
2016/02/29 11:17:45
This must be executed once per ThreadState.
| |
| 569 ASSERT(Heap::totalAllocatedSpace() == 0); | |
|
haraken
2016/02/29 11:17:45
This must be executed once per process.
Also mayb
| |
| 570 s_shutdownComplete = true; | |
| 571 } | |
| 572 | |
| 573 HashSet<Heap*>& Heap::all() | |
| 574 { | |
| 575 DEFINE_STATIC_LOCAL(HashSet<Heap*>, heaps, ()); | |
| 576 return heaps; | |
| 577 } | |
| 578 | |
| 579 RecursiveMutex& Heap::heapAttachMutex() | |
| 580 { | |
| 581 DEFINE_THREAD_SAFE_STATIC_LOCAL(RecursiveMutex, mutex, (new RecursiveMutex)) ; | |
| 582 return mutex; | |
| 583 } | |
| 584 | |
| 585 CrossThreadPersistentRegion& Heap::crossThreadPersistentRegion() | |
|
haraken
2016/02/29 11:17:45
This is also per-process stuff.
| |
| 586 { | |
| 587 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio n, new CrossThreadPersistentRegion()); | |
| 588 return persistentRegion; | |
| 328 } | 589 } |
| 329 | 590 |
| 330 const char* Heap::gcReasonString(BlinkGC::GCReason reason) | 591 const char* Heap::gcReasonString(BlinkGC::GCReason reason) |
| 331 { | 592 { |
| 332 switch (reason) { | 593 switch (reason) { |
| 333 case BlinkGC::IdleGC: | 594 case BlinkGC::IdleGC: |
| 334 return "IdleGC"; | 595 return "IdleGC"; |
| 335 case BlinkGC::PreciseGC: | 596 case BlinkGC::PreciseGC: |
| 336 return "PreciseGC"; | 597 return "PreciseGC"; |
| 337 case BlinkGC::ConservativeGC: | 598 case BlinkGC::ConservativeGC: |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 355 ThreadState* state = ThreadState::current(); | 616 ThreadState* state = ThreadState::current(); |
| 356 // Nested collectGarbage() invocations aren't supported. | 617 // Nested collectGarbage() invocations aren't supported. |
| 357 RELEASE_ASSERT(!state->isGCForbidden()); | 618 RELEASE_ASSERT(!state->isGCForbidden()); |
| 358 state->completeSweep(); | 619 state->completeSweep(); |
| 359 | 620 |
| 360 VisitorScope visitorScope(state, gcType); | 621 VisitorScope visitorScope(state, gcType); |
| 361 | 622 |
| 362 SafePointScope safePointScope(stackState, state); | 623 SafePointScope safePointScope(stackState, state); |
| 363 | 624 |
| 364 // Resume all parked threads upon leaving this scope. | 625 // Resume all parked threads upon leaving this scope. |
| 365 ParkThreadsScope parkThreadsScope; | 626 ParkThreadsScope parkThreadsScope(state); |
| 366 | 627 |
| 367 // Try to park the other threads. If we're unable to, bail out of the GC. | 628 // Try to park the other threads. If we're unable to, bail out of the GC. |
| 368 if (!parkThreadsScope.parkThreads(state)) | 629 if (!parkThreadsScope.parkThreads()) |
| 369 return; | 630 return; |
| 370 | 631 |
| 371 ScriptForbiddenIfMainThreadScope scriptForbidden; | 632 ScriptForbiddenIfMainThreadScope scriptForbidden; |
| 372 | 633 |
| 373 TRACE_EVENT2("blink_gc,devtools.timeline", "Heap::collectGarbage", | 634 TRACE_EVENT2("blink_gc,devtools.timeline", "Heap::collectGarbage", |
| 374 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, | 635 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, |
| 375 "gcReason", gcReasonString(reason)); | 636 "gcReason", gcReasonString(reason)); |
| 376 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | 637 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
| 377 double startTime = WTF::currentTimeMS(); | 638 double startTime = WTF::currentTimeMS(); |
| 378 | 639 |
| 379 if (gcType == BlinkGC::TakeSnapshot) | 640 if (gcType == BlinkGC::TakeSnapshot) |
| 380 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | 641 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
| 381 | 642 |
| 382 // Disallow allocation during garbage collection (but not during the | 643 // Disallow allocation during garbage collection (but not during the |
| 383 // finalization that happens when the visitorScope is torn down). | 644 // finalization that happens when the visitorScope is torn down). |
| 384 ThreadState::NoAllocationScope noAllocationScope(state); | 645 ThreadState::NoAllocationScope noAllocationScope(state); |
| 385 | 646 |
| 386 preGC(); | 647 state->heap().preGC(); |
| 387 | 648 |
| 388 StackFrameDepthScope stackDepthScope; | 649 StackFrameDepthScope stackDepthScope; |
| 389 | 650 |
| 390 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz e(); | 651 size_t totalObjectSize = state->heap().heapStats().allocatedObjectSize() + s tate->heap().heapStats().markedObjectSize(); |
| 391 if (gcType != BlinkGC::TakeSnapshot) | 652 if (gcType != BlinkGC::TakeSnapshot) |
| 392 Heap::resetHeapCounters(); | 653 state->heap().resetHeapCounters(); |
| 393 | 654 |
| 394 // 1. Trace persistent roots. | 655 // 1. Trace persistent roots. |
| 395 ThreadState::visitPersistentRoots(visitorScope.visitor()); | 656 state->heap().visitPersistentRoots(visitorScope.visitor()); |
| 396 | 657 |
| 397 // 2. Trace objects reachable from the stack. We do this independent of the | 658 // 2. Trace objects reachable from the stack. We do this independent of the |
| 398 // given stackState since other threads might have a different stack state. | 659 // given stackState since other threads might have a different stack state. |
| 399 ThreadState::visitStackRoots(visitorScope.visitor()); | 660 state->heap().visitStackRoots(visitorScope.visitor()); |
| 400 | 661 |
| 401 // 3. Transitive closure to trace objects including ephemerons. | 662 // 3. Transitive closure to trace objects including ephemerons. |
| 402 processMarkingStack(visitorScope.visitor()); | 663 state->heap().processMarkingStack(visitorScope.visitor()); |
| 403 | 664 |
| 404 postMarkingProcessing(visitorScope.visitor()); | 665 state->heap().postMarkingProcessing(visitorScope.visitor()); |
| 405 globalWeakProcessing(visitorScope.visitor()); | 666 state->heap().globalWeakProcessing(visitorScope.visitor()); |
| 406 | 667 |
| 407 // Now we can delete all orphaned pages because there are no dangling | 668 // Now we can delete all orphaned pages because there are no dangling |
| 408 // pointers to the orphaned pages. (If we have such dangling pointers, | 669 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 409 // we should have crashed during marking before getting here.) | 670 // we should have crashed during marking before getting here.) |
| 410 orphanedPagePool()->decommitOrphanedPages(); | 671 state->heap().orphanedPagePool()->decommitOrphanedPages(); |
| 411 | 672 |
| 412 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; | 673 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; |
| 413 s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0; | 674 state->heap().heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? ( markingTimeInMilliseconds / 1000 / totalObjectSize) : 0); |
| 414 | 675 |
| 415 #if PRINT_HEAP_STATS | 676 #if PRINT_HEAP_STATS |
| 416 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); | 677 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); |
| 417 #endif | 678 #endif |
| 418 | 679 |
| 419 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); | 680 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram, new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); |
| 420 markingTimeHistogram.count(markingTimeInMilliseconds); | 681 markingTimeHistogram.count(markingTimeInMilliseconds); |
| 421 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50 )); | 682 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50 )); |
| 422 totalObjectSpaceHistogram.count(Heap::allocatedObjectSize() / 1024); | 683 totalObjectSpaceHistogram.count(Heap::totalAllocatedObjectSize() / 1024); |
| 423 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10 24, 50)); | 684 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10 24, 50)); |
| 424 totalAllocatedSpaceHistogram.count(Heap::allocatedSpace() / 1024); | 685 totalAllocatedSpaceHistogram.count(Heap::totalAllocatedSpace() / 1024); |
| 425 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); | 686 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); |
| 426 gcReasonHistogram.count(reason); | 687 gcReasonHistogram.count(reason); |
| 427 | 688 |
| 428 Heap::reportMemoryUsageHistogram(); | 689 Heap::reportMemoryUsageHistogram(); |
| 429 WTF::Partitions::reportMemoryUsageHistogram(); | 690 WTF::Partitions::reportMemoryUsageHistogram(); |
| 430 | 691 |
| 431 postGC(gcType); | 692 state->heap().postGC(gcType); |
| 432 Heap::decommitCallbackStacks(); | 693 state->heap().decommitCallbackStacks(); |
| 433 | |
| 434 #if ENABLE(ASSERT) | |
| 435 // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneratio n. | |
| 436 if (++s_gcGeneration == 0) { | |
| 437 s_gcGeneration = 1; | |
| 438 } | |
| 439 #endif | |
| 440 } | 694 } |
| 441 | 695 |
| 442 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | 696 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 443 { | 697 { |
| 444 { | 698 { |
| 445 // A thread-specific termination GC must not allow other global GCs to g o | 699 // A thread-specific termination GC must not allow other global GCs to g o |
| 446 // ahead while it is running, hence the termination GC does not enter a | 700 // ahead while it is running, hence the termination GC does not enter a |
| 447 // safepoint. VisitorScope will not enter also a safepoint scope for | 701 // safepoint. VisitorScope will not enter also a safepoint scope for |
| 448 // ThreadTerminationGC. | 702 // ThreadTerminationGC. |
| 449 VisitorScope visitorScope(state, BlinkGC::ThreadTerminationGC); | 703 VisitorScope visitorScope(state, BlinkGC::ThreadTerminationGC); |
| 450 | 704 |
| 451 ThreadState::NoAllocationScope noAllocationScope(state); | 705 ThreadState::NoAllocationScope noAllocationScope(state); |
| 452 | 706 |
| 453 state->preGC(); | 707 state->preGC(); |
| 454 | 708 |
| 455 // 1. Trace the thread local persistent roots. For thread local GCs we | 709 // 1. Trace the thread local persistent roots. For thread local GCs we |
| 456 // don't trace the stack (ie. no conservative scanning) since this is | 710 // don't trace the stack (ie. no conservative scanning) since this is |
| 457 // only called during thread shutdown where there should be no objects | 711 // only called during thread shutdown where there should be no objects |
| 458 // on the stack. | 712 // on the stack. |
| 459 // We also assume that orphaned pages have no objects reachable from | 713 // We also assume that orphaned pages have no objects reachable from |
| 460 // persistent handles on other threads or CrossThreadPersistents. The | 714 // persistent handles on other threads or CrossThreadPersistents. The |
| 461 // only cases where this could happen is if a subsequent conservative | 715 // only cases where this could happen is if a subsequent conservative |
| 462 // global GC finds a "pointer" on the stack or due to a programming | 716 // global GC finds a "pointer" on the stack or due to a programming |
| 463 // error where an object has a dangling cross-thread pointer to an | 717 // error where an object has a dangling cross-thread pointer to an |
| 464 // object on this heap. | 718 // object on this heap. |
| 465 state->visitPersistents(visitorScope.visitor()); | 719 state->visitPersistents(visitorScope.visitor()); |
| 466 | 720 |
| 467 // 2. Trace objects reachable from the thread's persistent roots | 721 // 2. Trace objects reachable from the thread's persistent roots |
| 468 // including ephemerons. | 722 // including ephemerons. |
| 469 processMarkingStack(visitorScope.visitor()); | 723 state->heap().processMarkingStack(visitorScope.visitor()); |
| 470 | 724 |
| 471 postMarkingProcessing(visitorScope.visitor()); | 725 state->heap().postMarkingProcessing(visitorScope.visitor()); |
| 472 globalWeakProcessing(visitorScope.visitor()); | 726 state->heap().globalWeakProcessing(visitorScope.visitor()); |
| 473 | 727 |
| 474 state->postGC(BlinkGC::GCWithSweep); | 728 state->postGC(BlinkGC::GCWithSweep); |
| 475 Heap::decommitCallbackStacks(); | 729 state->heap().decommitCallbackStacks(); |
| 476 } | 730 } |
| 477 state->preSweep(); | 731 state->preSweep(); |
| 478 } | 732 } |
| 479 | 733 |
| 480 void Heap::processMarkingStack(Visitor* visitor) | |
| 481 { | |
| 482 // Ephemeron fixed point loop. | |
| 483 do { | |
| 484 { | |
| 485 // Iteratively mark all objects that are reachable from the objects | |
| 486 // currently pushed onto the marking stack. | |
| 487 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); | |
| 488 while (popAndInvokeTraceCallback(visitor)) { } | |
| 489 } | |
| 490 | |
| 491 { | |
| 492 // Mark any strong pointers that have now become reachable in | |
| 493 // ephemeron maps. | |
| 494 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); | |
| 495 s_ephemeronStack->invokeEphemeronCallbacks(visitor); | |
| 496 } | |
| 497 | |
| 498 // Rerun loop if ephemeron processing queued more objects for tracing. | |
| 499 } while (!s_markingStack->isEmpty()); | |
| 500 } | |
| 501 | |
| 502 void Heap::postMarkingProcessing(Visitor* visitor) | |
| 503 { | |
| 504 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); | |
| 505 // Call post-marking callbacks including: | |
| 506 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | |
| 507 // (specifically to clear the queued bits for weak hash tables), and | |
| 508 // 2. the markNoTracing callbacks on collection backings to mark them | |
| 509 // if they are only reachable from their front objects. | |
| 510 while (popAndInvokePostMarkingCallback(visitor)) { } | |
| 511 | |
| 512 // Post-marking callbacks should not trace any objects and | |
| 513 // therefore the marking stack should be empty after the | |
| 514 // post-marking callbacks. | |
| 515 ASSERT(s_markingStack->isEmpty()); | |
| 516 } | |
| 517 | |
| 518 void Heap::globalWeakProcessing(Visitor* visitor) | |
| 519 { | |
| 520 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); | |
| 521 double startTime = WTF::currentTimeMS(); | |
| 522 | |
| 523 // Call weak callbacks on objects that may now be pointing to dead objects. | |
| 524 while (popAndInvokeGlobalWeakCallback(visitor)) { } | |
| 525 | |
| 526 // It is not permitted to trace pointers of live objects in the weak | |
| 527 // callback phase, so the marking stack should still be empty here. | |
| 528 ASSERT(s_markingStack->isEmpty()); | |
| 529 | |
| 530 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; | |
| 531 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakPrcessing", 1, 10 * 1000, 50)); | |
| 532 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); | |
| 533 } | |
| 534 | |
| 535 void Heap::collectAllGarbage() | 734 void Heap::collectAllGarbage() |
| 536 { | 735 { |
| 537 // We need to run multiple GCs to collect a chain of persistent handles. | 736 // We need to run multiple GCs to collect a chain of persistent handles. |
| 538 size_t previousLiveObjects = 0; | 737 size_t previousLiveObjects = 0; |
| 539 for (int i = 0; i < 5; ++i) { | 738 for (int i = 0; i < 5; ++i) { |
| 540 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); | 739 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); |
| 541 size_t liveObjects = Heap::markedObjectSize(); | 740 size_t liveObjects = ThreadState::current()->heap().heapStats().markedOb jectSize(); |
| 542 if (liveObjects == previousLiveObjects) | 741 if (liveObjects == previousLiveObjects) |
| 543 break; | 742 break; |
| 544 previousLiveObjects = liveObjects; | 743 previousLiveObjects = liveObjects; |
| 545 } | 744 } |
| 546 } | 745 } |
| 547 | 746 |
| 548 double Heap::estimatedMarkingTime() | |
| 549 { | |
| 550 ASSERT(ThreadState::current()->isMainThread()); | |
| 551 | |
| 552 // Use 8 ms as initial estimated marking time. | |
| 553 // 8 ms is long enough for low-end mobile devices to mark common | |
| 554 // real-world object graphs. | |
| 555 if (s_estimatedMarkingTimePerByte == 0) | |
| 556 return 0.008; | |
| 557 | |
| 558 // Assuming that the collection rate of this GC will be mostly equal to | |
| 559 // the collection rate of the last GC, estimate the marking time of this GC. | |
| 560 return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap:: markedObjectSize()); | |
| 561 } | |
| 562 | |
| 563 void Heap::reportMemoryUsageHistogram() | 747 void Heap::reportMemoryUsageHistogram() |
| 564 { | 748 { |
| 565 static size_t supportedMaxSizeInMB = 4 * 1024; | 749 static size_t supportedMaxSizeInMB = 4 * 1024; |
| 566 static size_t observedMaxSizeInMB = 0; | 750 static size_t observedMaxSizeInMB = 0; |
| 567 | 751 |
| 568 // We only report the memory in the main thread. | 752 // We only report the memory in the main thread. |
| 569 if (!isMainThread()) | 753 if (!isMainThread()) |
| 570 return; | 754 return; |
| 571 // +1 is for rounding up the sizeInMB. | 755 // +1 is for rounding up the sizeInMB. |
| 572 size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1; | 756 size_t sizeInMB = ThreadState::current()->heap().heapStats().allocatedSpace( ) / 1024 / 1024 + 1; |
| 573 if (sizeInMB >= supportedMaxSizeInMB) | 757 if (sizeInMB >= supportedMaxSizeInMB) |
| 574 sizeInMB = supportedMaxSizeInMB - 1; | 758 sizeInMB = supportedMaxSizeInMB - 1; |
| 575 if (sizeInMB > observedMaxSizeInMB) { | 759 if (sizeInMB > observedMaxSizeInMB) { |
| 576 // Send a UseCounter only when we see the highest memory usage | 760 // Send a UseCounter only when we see the highest memory usage |
| 577 // we've ever seen. | 761 // we've ever seen. |
| 578 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); | 762 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); |
| 579 commitedSizeHistogram.count(sizeInMB); | 763 commitedSizeHistogram.count(sizeInMB); |
| 580 observedMaxSizeInMB = sizeInMB; | 764 observedMaxSizeInMB = sizeInMB; |
| 581 } | 765 } |
| 582 } | 766 } |
| 583 | 767 |
| 584 void Heap::reportMemoryUsageForTracing() | 768 void Heap::reportMemoryUsageForTracing() |
| 585 { | 769 { |
| 586 #if PRINT_HEAP_STATS | 770 #if PRINT_HEAP_STATS |
| 587 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); | 771 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); |
| 588 #endif | 772 #endif |
| 589 | 773 |
| 590 bool gcTracingEnabled; | 774 bool gcTracingEnabled; |
| 591 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | 775 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); |
| 592 if (!gcTracingEnabled) | 776 if (!gcTracingEnabled) |
| 593 return; | 777 return; |
| 594 | 778 |
| 779 Heap& heap = ThreadState::current()->heap(); | |
| 595 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). | 780 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). |
| 596 // They are capped to INT_MAX just in case. | 781 // They are capped to INT_MAX just in case. |
| 597 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedObject SizeKB", std::min(Heap::allocatedObjectSize() / 1024, static_cast<size_t>(INT_MA X))); | 782 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedObject SizeKB", std::min(heap.heapStats().allocatedObjectSize() / 1024, static_cast<siz e_t>(INT_MAX))); |
| 598 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eKB", std::min(Heap::markedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | 783 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eKB", std::min(heap.heapStats().markedObjectSize() / 1024, static_cast<size_t>(I NT_MAX))); |
| 599 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); | 784 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(heap.heapStats().markedObjectSizeAtLastComplet eSweep() / 1024, static_cast<size_t>(INT_MAX))); |
| 600 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedSpaceK B", std::min(Heap::allocatedSpace() / 1024, static_cast<size_t>(INT_MAX))); | 785 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedSpaceK B", std::min(heap.heapStats().allocatedSpace() / 1024, static_cast<size_t>(INT_M AX))); |
| 601 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(Heap::objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX) )); | 786 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(heap.heapStats().objectSizeAtLastGC() / 1024, static_cast<size_ t>(INT_MAX))); |
| 602 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(Heap::wrapperCount(), static_cast<size_t>(INT_MAX))); | 787 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(heap.heapStats().wrapperCount(), static_cast<size_t>(INT_MAX))); |
| 603 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(Heap::wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX))); | 788 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(heap.heapStats().wrapperCountAtLastGC(), static_cast<size_t>(IN T_MAX))); |
| 604 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(Heap::collectedWrapperCount(), static_cast<size_t>(INT_MAX))); | 789 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(heap.heapStats().collectedWrapperCount(), static_cast<size_t>( INT_MAX))); |
| 605 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(Heap::partitionAllocSizeAtLastGC() / 1024, static_cast< size_t>(INT_MAX))); | 790 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(heap.heapStats().partitionAllocSizeAtLastGC() / 1024, s tatic_cast<size_t>(INT_MAX))); |
| 606 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102 4, static_cast<size_t>(INT_MAX))); | 791 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102 4, static_cast<size_t>(INT_MAX))); |
| 607 } | 792 } |
| 608 | 793 |
| 609 size_t Heap::objectPayloadSizeForTesting() | |
| 610 { | |
| 611 size_t objectPayloadSize = 0; | |
| 612 for (ThreadState* state : ThreadState::attachedThreads()) { | |
| 613 state->setGCState(ThreadState::GCRunning); | |
| 614 state->makeConsistentForGC(); | |
| 615 objectPayloadSize += state->objectPayloadSizeForTesting(); | |
| 616 state->setGCState(ThreadState::EagerSweepScheduled); | |
| 617 state->setGCState(ThreadState::Sweeping); | |
| 618 state->setGCState(ThreadState::NoGCScheduled); | |
| 619 } | |
| 620 return objectPayloadSize; | |
| 621 } | |
| 622 | |
| 623 BasePage* Heap::lookup(Address address) | |
| 624 { | |
| 625 ASSERT(ThreadState::current()->isInGC()); | |
| 626 if (!s_regionTree) | |
| 627 return nullptr; | |
| 628 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { | |
| 629 BasePage* page = region->pageFromAddress(address); | |
| 630 return page && !page->orphaned() ? page : nullptr; | |
| 631 } | |
| 632 return nullptr; | |
| 633 } | |
| 634 | |
| 635 static Mutex& regionTreeMutex() | |
| 636 { | |
| 637 DEFINE_THREAD_SAFE_STATIC_LOCAL(Mutex, mutex, new Mutex); | |
| 638 return mutex; | |
| 639 } | |
| 640 | |
| 641 void Heap::removePageMemoryRegion(PageMemoryRegion* region) | |
| 642 { | |
| 643 // Deletion of large objects (and thus their regions) can happen | |
| 644 // concurrently on sweeper threads. Removal can also happen during thread | |
| 645 // shutdown, but that case is safe. Regardless, we make all removals | |
| 646 // mutually exclusive. | |
| 647 MutexLocker locker(regionTreeMutex()); | |
| 648 RegionTree::remove(region, &s_regionTree); | |
| 649 } | |
| 650 | |
| 651 void Heap::addPageMemoryRegion(PageMemoryRegion* region) | |
| 652 { | |
| 653 MutexLocker locker(regionTreeMutex()); | |
| 654 RegionTree::add(new RegionTree(region), &s_regionTree); | |
| 655 } | |
| 656 | |
| 657 void Heap::resetHeapCounters() | |
| 658 { | |
| 659 ASSERT(ThreadState::current()->isInGC()); | |
| 660 | |
| 661 Heap::reportMemoryUsageForTracing(); | |
| 662 | |
| 663 s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize; | |
| 664 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 665 s_allocatedObjectSize = 0; | |
| 666 s_markedObjectSize = 0; | |
| 667 s_wrapperCountAtLastGC = s_wrapperCount; | |
| 668 s_collectedWrapperCount = 0; | |
| 669 for (ThreadState* state : ThreadState::attachedThreads()) | |
| 670 state->resetHeapCounters(); | |
| 671 } | |
| 672 | |
| 673 CallbackStack* Heap::s_markingStack; | |
| 674 CallbackStack* Heap::s_postMarkingCallbackStack; | |
| 675 CallbackStack* Heap::s_globalWeakCallbackStack; | |
| 676 CallbackStack* Heap::s_ephemeronStack; | |
| 677 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | |
| 678 bool Heap::s_shutdownCalled = false; | 794 bool Heap::s_shutdownCalled = false; |
| 679 FreePagePool* Heap::s_freePagePool; | 795 bool Heap::s_shutdownComplete = false; |
| 680 OrphanedPagePool* Heap::s_orphanedPagePool; | |
| 681 RegionTree* Heap::s_regionTree = nullptr; | |
| 682 size_t Heap::s_allocatedSpace = 0; | |
| 683 size_t Heap::s_allocatedObjectSize = 0; | |
| 684 size_t Heap::s_objectSizeAtLastGC = 0; | |
| 685 size_t Heap::s_markedObjectSize = 0; | |
| 686 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; | |
| 687 size_t Heap::s_wrapperCount = 0; | |
| 688 size_t Heap::s_wrapperCountAtLastGC = 0; | |
| 689 size_t Heap::s_collectedWrapperCount = 0; | |
| 690 size_t Heap::s_partitionAllocSizeAtLastGC = 0; | |
| 691 double Heap::s_estimatedMarkingTimePerByte = 0.0; | |
| 692 bool Heap::s_isLowEndDevice = false; | 796 bool Heap::s_isLowEndDevice = false; |
| 693 #if ENABLE(ASSERT) | 797 size_t Heap::s_totalAllocatedSpace = 0; |
| 694 uint16_t Heap::s_gcGeneration = 0; | 798 size_t Heap::s_totalAllocatedObjectSize = 0; |
| 695 #endif | 799 size_t Heap::s_totalMarkedObjectSize = 0; |
| 696 | 800 |
| 697 } // namespace blink | 801 } // namespace blink |
| OLD | NEW |