| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 */ | 29 */ |
| 30 | 30 |
| 31 #include "platform/heap/Heap.h" | 31 #include "platform/heap/Heap.h" |
| 32 | 32 |
| 33 #include "base/debug/alias.h" |
| 33 #include "base/sys_info.h" | 34 #include "base/sys_info.h" |
| 34 #include "platform/Histogram.h" | 35 #include "platform/Histogram.h" |
| 35 #include "platform/ScriptForbiddenScope.h" | 36 #include "platform/ScriptForbiddenScope.h" |
| 36 #include "platform/TraceEvent.h" | 37 #include "platform/TraceEvent.h" |
| 37 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 38 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| 38 #include "platform/heap/CallbackStack.h" | 39 #include "platform/heap/CallbackStack.h" |
| 39 #include "platform/heap/MarkingVisitor.h" | 40 #include "platform/heap/MarkingVisitor.h" |
| 40 #include "platform/heap/PageMemory.h" | 41 #include "platform/heap/PageMemory.h" |
| 41 #include "platform/heap/PagePool.h" | 42 #include "platform/heap/PagePool.h" |
| 42 #include "platform/heap/SafePoint.h" | 43 #include "platform/heap/SafePoint.h" |
| (...skipping 15 matching lines...) Expand all Loading... |
| 58 class ParkThreadsScope final { | 59 class ParkThreadsScope final { |
| 59 STACK_ALLOCATED(); | 60 STACK_ALLOCATED(); |
| 60 public: | 61 public: |
| 61 ParkThreadsScope() | 62 ParkThreadsScope() |
| 62 : m_shouldResumeThreads(false) | 63 : m_shouldResumeThreads(false) |
| 63 { | 64 { |
| 64 } | 65 } |
| 65 | 66 |
| 66 bool parkThreads(ThreadState* state) | 67 bool parkThreads(ThreadState* state) |
| 67 { | 68 { |
| 68 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope"); | 69 TRACE_EVENT0("blink_gc", "Heap::ParkThreadsScope"); |
| 69 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | 70 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); |
| 70 if (state->isMainThread()) | 71 if (state->isMainThread()) |
| 71 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | 72 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); |
| 72 | 73 |
| 73 // TODO(haraken): In an unlikely coincidence that two threads decide | 74 // TODO(haraken): In an unlikely coincidence that two threads decide |
| 74 // to collect garbage at the same time, avoid doing two GCs in | 75 // to collect garbage at the same time, avoid doing two GCs in |
| 75 // a row and return false. | 76 // a row and return false. |
| 76 double startTime = WTF::currentTimeMS(); | 77 double startTime = WTF::currentTimeMS(); |
| 77 | 78 |
| 78 m_shouldResumeThreads = ThreadState::stopThreads(); | 79 m_shouldResumeThreads = ThreadState::stopThreads(); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 91 // Only cleanup if we parked all threads in which case the GC happened | 92 // Only cleanup if we parked all threads in which case the GC happened |
| 92 // and we need to resume the other threads. | 93 // and we need to resume the other threads. |
| 93 if (m_shouldResumeThreads) | 94 if (m_shouldResumeThreads) |
| 94 ThreadState::resumeThreads(); | 95 ThreadState::resumeThreads(); |
| 95 } | 96 } |
| 96 | 97 |
| 97 private: | 98 private: |
| 98 bool m_shouldResumeThreads; | 99 bool m_shouldResumeThreads; |
| 99 }; | 100 }; |
| 100 | 101 |
| 101 void ThreadHeap::flushHeapDoesNotContainCache() | 102 void Heap::flushHeapDoesNotContainCache() |
| 102 { | 103 { |
| 103 s_heapDoesNotContainCache->flush(); | 104 s_heapDoesNotContainCache->flush(); |
| 104 } | 105 } |
| 105 | 106 |
| 106 void ProcessHeap::init() | 107 void ProcessHeap::init() |
| 107 { | 108 { |
| 108 s_totalAllocatedSpace = 0; | 109 s_totalAllocatedSpace = 0; |
| 109 s_totalAllocatedObjectSize = 0; | 110 s_totalAllocatedObjectSize = 0; |
| 110 s_totalMarkedObjectSize = 0; | 111 s_totalMarkedObjectSize = 0; |
| 111 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); | 112 s_isLowEndDevice = base::SysInfo::IsLowEndDevice(); |
| 112 } | 113 } |
| 113 | 114 |
| 114 void ProcessHeap::resetHeapCounters() | 115 void ProcessHeap::resetHeapCounters() |
| 115 { | 116 { |
| 116 s_totalAllocatedObjectSize = 0; | 117 s_totalAllocatedObjectSize = 0; |
| 117 s_totalMarkedObjectSize = 0; | 118 s_totalMarkedObjectSize = 0; |
| 118 } | 119 } |
| 119 | 120 |
| 120 void ThreadHeap::init() | 121 void Heap::init() |
| 121 { | 122 { |
| 122 ThreadState::init(); | 123 ThreadState::init(); |
| 123 ProcessHeap::init(); | 124 ProcessHeap::init(); |
| 124 s_markingStack = new CallbackStack(); | 125 s_markingStack = new CallbackStack(); |
| 125 s_postMarkingCallbackStack = new CallbackStack(); | 126 s_postMarkingCallbackStack = new CallbackStack(); |
| 126 s_globalWeakCallbackStack = new CallbackStack(); | 127 s_globalWeakCallbackStack = new CallbackStack(); |
| 127 // Use smallest supported block size for ephemerons. | 128 // Use smallest supported block size for ephemerons. |
| 128 s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize); | 129 s_ephemeronStack = new CallbackStack(CallbackStack::kMinimalBlockSize); |
| 129 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | 130 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
| 130 s_freePagePool = new FreePagePool(); | 131 s_freePagePool = new FreePagePool(); |
| 131 s_orphanedPagePool = new OrphanedPagePool(); | 132 s_orphanedPagePool = new OrphanedPagePool(); |
| 132 s_lastGCReason = BlinkGC::NumberOfGCReason; | 133 s_lastGCReason = BlinkGC::NumberOfGCReason; |
| 133 | 134 |
| 134 GCInfoTable::init(); | 135 GCInfoTable::init(); |
| 135 | 136 |
| 136 if (Platform::current() && Platform::current()->currentThread()) | 137 if (Platform::current() && Platform::current()->currentThread()) |
| 137 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide
r::instance(), "BlinkGC"); | 138 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide
r::instance(), "BlinkGC"); |
| 138 } | 139 } |
| 139 | 140 |
| 140 void ThreadHeap::shutdown() | 141 void Heap::shutdown() |
| 141 { | 142 { |
| 142 ASSERT(s_markingStack); | 143 ASSERT(s_markingStack); |
| 143 | 144 |
| 144 if (Platform::current() && Platform::current()->currentThread()) | 145 if (Platform::current() && Platform::current()->currentThread()) |
| 145 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi
der::instance()); | 146 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi
der::instance()); |
| 146 | 147 |
| 147 // The main thread must be the last thread that gets detached. | 148 // The main thread must be the last thread that gets detached. |
| 148 RELEASE_ASSERT(ThreadState::attachedThreads().size() == 0); | 149 RELEASE_ASSERT(ThreadState::attachedThreads().size() == 0); |
| 149 | 150 |
| 150 delete s_heapDoesNotContainCache; | 151 delete s_heapDoesNotContainCache; |
| 151 s_heapDoesNotContainCache = nullptr; | 152 s_heapDoesNotContainCache = nullptr; |
| 152 delete s_freePagePool; | 153 delete s_freePagePool; |
| 153 s_freePagePool = nullptr; | 154 s_freePagePool = nullptr; |
| 154 delete s_orphanedPagePool; | 155 delete s_orphanedPagePool; |
| 155 s_orphanedPagePool = nullptr; | 156 s_orphanedPagePool = nullptr; |
| 156 delete s_globalWeakCallbackStack; | 157 delete s_globalWeakCallbackStack; |
| 157 s_globalWeakCallbackStack = nullptr; | 158 s_globalWeakCallbackStack = nullptr; |
| 158 delete s_postMarkingCallbackStack; | 159 delete s_postMarkingCallbackStack; |
| 159 s_postMarkingCallbackStack = nullptr; | 160 s_postMarkingCallbackStack = nullptr; |
| 160 delete s_markingStack; | 161 delete s_markingStack; |
| 161 s_markingStack = nullptr; | 162 s_markingStack = nullptr; |
| 162 delete s_ephemeronStack; | 163 delete s_ephemeronStack; |
| 163 s_ephemeronStack = nullptr; | 164 s_ephemeronStack = nullptr; |
| 164 GCInfoTable::shutdown(); | 165 GCInfoTable::shutdown(); |
| 165 ThreadState::shutdown(); | 166 ThreadState::shutdown(); |
| 166 ASSERT(ThreadHeap::heapStats().allocatedSpace() == 0); | 167 ASSERT(Heap::heapStats().allocatedSpace() == 0); |
| 167 } | 168 } |
| 168 | 169 |
| 169 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() | 170 CrossThreadPersistentRegion& ProcessHeap::crossThreadPersistentRegion() |
| 170 { | 171 { |
| 171 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio
n, new CrossThreadPersistentRegion()); | 172 DEFINE_THREAD_SAFE_STATIC_LOCAL(CrossThreadPersistentRegion, persistentRegio
n, new CrossThreadPersistentRegion()); |
| 172 return persistentRegion; | 173 return persistentRegion; |
| 173 } | 174 } |
| 174 | 175 |
| 175 bool ProcessHeap::s_isLowEndDevice = false; | 176 bool ProcessHeap::s_isLowEndDevice = false; |
| 176 size_t ProcessHeap::s_totalAllocatedSpace = 0; | 177 size_t ProcessHeap::s_totalAllocatedSpace = 0; |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 238 ProcessHeap::increaseTotalAllocatedSpace(delta); | 239 ProcessHeap::increaseTotalAllocatedSpace(delta); |
| 239 } | 240 } |
| 240 | 241 |
| 241 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) | 242 void ThreadHeapStats::decreaseAllocatedSpace(size_t delta) |
| 242 { | 243 { |
| 243 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); | 244 atomicSubtract(&m_allocatedSpace, static_cast<long>(delta)); |
| 244 ProcessHeap::decreaseTotalAllocatedSpace(delta); | 245 ProcessHeap::decreaseTotalAllocatedSpace(delta); |
| 245 } | 246 } |
| 246 | 247 |
| 247 #if ENABLE(ASSERT) | 248 #if ENABLE(ASSERT) |
| 248 BasePage* ThreadHeap::findPageFromAddress(Address address) | 249 BasePage* Heap::findPageFromAddress(Address address) |
| 249 { | 250 { |
| 250 MutexLocker lock(ThreadState::threadAttachMutex()); | 251 MutexLocker lock(ThreadState::threadAttachMutex()); |
| 251 for (ThreadState* state : ThreadState::attachedThreads()) { | 252 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 252 if (BasePage* page = state->findPageFromAddress(address)) | 253 if (BasePage* page = state->findPageFromAddress(address)) |
| 253 return page; | 254 return page; |
| 254 } | 255 } |
| 255 return nullptr; | 256 return nullptr; |
| 256 } | 257 } |
| 257 #endif | 258 #endif |
| 258 | 259 |
| 259 Address ThreadHeap::checkAndMarkPointer(Visitor* visitor, Address address) | 260 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 260 { | 261 { |
| 261 ASSERT(ThreadState::current()->isInGC()); | 262 ASSERT(ThreadState::current()->isInGC()); |
| 262 | 263 |
| 263 #if !ENABLE(ASSERT) | 264 #if !ENABLE(ASSERT) |
| 264 if (s_heapDoesNotContainCache->lookup(address)) | 265 if (s_heapDoesNotContainCache->lookup(address)) |
| 265 return nullptr; | 266 return nullptr; |
| 266 #endif | 267 #endif |
| 267 | 268 |
| 268 if (BasePage* page = lookup(address)) { | 269 if (BasePage* page = lookup(address)) { |
| 269 ASSERT(page->contains(address)); | 270 ASSERT(page->contains(address)); |
| 270 ASSERT(!page->orphaned()); | 271 ASSERT(!page->orphaned()); |
| 271 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 272 ASSERT(!s_heapDoesNotContainCache->lookup(address)); |
| 272 page->checkAndMarkPointer(visitor, address); | 273 page->checkAndMarkPointer(visitor, address); |
| 273 return address; | 274 return address; |
| 274 } | 275 } |
| 275 | 276 |
| 276 #if !ENABLE(ASSERT) | 277 #if !ENABLE(ASSERT) |
| 277 s_heapDoesNotContainCache->addEntry(address); | 278 s_heapDoesNotContainCache->addEntry(address); |
| 278 #else | 279 #else |
| 279 if (!s_heapDoesNotContainCache->lookup(address)) | 280 if (!s_heapDoesNotContainCache->lookup(address)) |
| 280 s_heapDoesNotContainCache->addEntry(address); | 281 s_heapDoesNotContainCache->addEntry(address); |
| 281 #endif | 282 #endif |
| 282 return nullptr; | 283 return nullptr; |
| 283 } | 284 } |
| 284 | 285 |
| 285 void ThreadHeap::pushTraceCallback(void* object, TraceCallback callback) | 286 void Heap::pushTraceCallback(void* object, TraceCallback callback) |
| 286 { | 287 { |
| 287 ASSERT(ThreadState::current()->isInGC()); | 288 ASSERT(ThreadState::current()->isInGC()); |
| 288 | 289 |
| 289 // Trace should never reach an orphaned page. | 290 // Trace should never reach an orphaned page. |
| 290 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); | 291 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); |
| 291 CallbackStack::Item* slot = s_markingStack->allocateEntry(); | 292 CallbackStack::Item* slot = s_markingStack->allocateEntry(); |
| 292 *slot = CallbackStack::Item(object, callback); | 293 *slot = CallbackStack::Item(object, callback); |
| 293 } | 294 } |
| 294 | 295 |
| 295 bool ThreadHeap::popAndInvokeTraceCallback(Visitor* visitor) | 296 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
| 296 { | 297 { |
| 297 CallbackStack::Item* item = s_markingStack->pop(); | 298 CallbackStack::Item* item = s_markingStack->pop(); |
| 298 if (!item) | 299 if (!item) |
| 299 return false; | 300 return false; |
| 300 item->call(visitor); | 301 item->call(visitor); |
| 301 return true; | 302 return true; |
| 302 } | 303 } |
| 303 | 304 |
| 304 void ThreadHeap::pushPostMarkingCallback(void* object, TraceCallback callback) | 305 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) |
| 305 { | 306 { |
| 306 ASSERT(ThreadState::current()->isInGC()); | 307 ASSERT(ThreadState::current()->isInGC()); |
| 307 | 308 |
| 308 // Trace should never reach an orphaned page. | 309 // Trace should never reach an orphaned page. |
| 309 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); | 310 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); |
| 310 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); | 311 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); |
| 311 *slot = CallbackStack::Item(object, callback); | 312 *slot = CallbackStack::Item(object, callback); |
| 312 } | 313 } |
| 313 | 314 |
| 314 bool ThreadHeap::popAndInvokePostMarkingCallback(Visitor* visitor) | 315 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) |
| 315 { | 316 { |
| 316 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { | 317 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { |
| 317 item->call(visitor); | 318 item->call(visitor); |
| 318 return true; | 319 return true; |
| 319 } | 320 } |
| 320 return false; | 321 return false; |
| 321 } | 322 } |
| 322 | 323 |
| 323 void ThreadHeap::pushGlobalWeakCallback(void** cell, WeakCallback callback) | 324 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) |
| 324 { | 325 { |
| 325 ASSERT(ThreadState::current()->isInGC()); | 326 ASSERT(ThreadState::current()->isInGC()); |
| 326 | 327 |
| 327 // Trace should never reach an orphaned page. | 328 // Trace should never reach an orphaned page. |
| 328 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(cell)); | 329 ASSERT(!Heap::getOrphanedPagePool()->contains(cell)); |
| 329 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); | 330 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); |
| 330 *slot = CallbackStack::Item(cell, callback); | 331 *slot = CallbackStack::Item(cell, callback); |
| 331 } | 332 } |
| 332 | 333 |
| 333 void ThreadHeap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCa
llback callback) | 334 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback
callback) |
| 334 { | 335 { |
| 335 ASSERT(ThreadState::current()->isInGC()); | 336 ASSERT(ThreadState::current()->isInGC()); |
| 336 | 337 |
| 337 // Trace should never reach an orphaned page. | 338 // Trace should never reach an orphaned page. |
| 338 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(object)); | 339 ASSERT(!Heap::getOrphanedPagePool()->contains(object)); |
| 339 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); | 340 ThreadState* state = pageFromObject(object)->arena()->getThreadState(); |
| 340 state->pushThreadLocalWeakCallback(closure, callback); | 341 state->pushThreadLocalWeakCallback(closure, callback); |
| 341 } | 342 } |
| 342 | 343 |
| 343 bool ThreadHeap::popAndInvokeGlobalWeakCallback(Visitor* visitor) | 344 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) |
| 344 { | 345 { |
| 345 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { | 346 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { |
| 346 item->call(visitor); | 347 item->call(visitor); |
| 347 return true; | 348 return true; |
| 348 } | 349 } |
| 349 return false; | 350 return false; |
| 350 } | 351 } |
| 351 | 352 |
| 352 void ThreadHeap::registerWeakTable(void* table, EphemeronCallback iterationCallb
ack, EphemeronCallback iterationDoneCallback) | 353 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
phemeronCallback iterationDoneCallback) |
| 353 { | 354 { |
| 354 ASSERT(ThreadState::current()->isInGC()); | 355 ASSERT(ThreadState::current()->isInGC()); |
| 355 | 356 |
| 356 // Trace should never reach an orphaned page. | 357 // Trace should never reach an orphaned page. |
| 357 ASSERT(!ThreadHeap::getOrphanedPagePool()->contains(table)); | 358 ASSERT(!Heap::getOrphanedPagePool()->contains(table)); |
| 358 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); | 359 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); |
| 359 *slot = CallbackStack::Item(table, iterationCallback); | 360 *slot = CallbackStack::Item(table, iterationCallback); |
| 360 | 361 |
| 361 // Register a post-marking callback to tell the tables that | 362 // Register a post-marking callback to tell the tables that |
| 362 // ephemeron iteration is complete. | 363 // ephemeron iteration is complete. |
| 363 pushPostMarkingCallback(table, iterationDoneCallback); | 364 pushPostMarkingCallback(table, iterationDoneCallback); |
| 364 } | 365 } |
| 365 | 366 |
| 366 #if ENABLE(ASSERT) | 367 #if ENABLE(ASSERT) |
| 367 bool ThreadHeap::weakTableRegistered(const void* table) | 368 bool Heap::weakTableRegistered(const void* table) |
| 368 { | 369 { |
| 369 ASSERT(s_ephemeronStack); | 370 ASSERT(s_ephemeronStack); |
| 370 return s_ephemeronStack->hasCallbackForObject(table); | 371 return s_ephemeronStack->hasCallbackForObject(table); |
| 371 } | 372 } |
| 372 #endif | 373 #endif |
| 373 | 374 |
| 374 void ThreadHeap::decommitCallbackStacks() | 375 void Heap::decommitCallbackStacks() |
| 375 { | 376 { |
| 376 s_markingStack->decommit(); | 377 s_markingStack->decommit(); |
| 377 s_postMarkingCallbackStack->decommit(); | 378 s_postMarkingCallbackStack->decommit(); |
| 378 s_globalWeakCallbackStack->decommit(); | 379 s_globalWeakCallbackStack->decommit(); |
| 379 s_ephemeronStack->decommit(); | 380 s_ephemeronStack->decommit(); |
| 380 } | 381 } |
| 381 | 382 |
| 382 void ThreadHeap::preGC() | 383 void Heap::preGC() |
| 383 { | 384 { |
| 384 ASSERT(!ThreadState::current()->isInGC()); | 385 ASSERT(!ThreadState::current()->isInGC()); |
| 385 for (ThreadState* state : ThreadState::attachedThreads()) | 386 for (ThreadState* state : ThreadState::attachedThreads()) |
| 386 state->preGC(); | 387 state->preGC(); |
| 387 } | 388 } |
| 388 | 389 |
| 389 void ThreadHeap::postGC(BlinkGC::GCType gcType) | 390 void Heap::postGC(BlinkGC::GCType gcType) |
| 390 { | 391 { |
| 391 ASSERT(ThreadState::current()->isInGC()); | 392 ASSERT(ThreadState::current()->isInGC()); |
| 392 for (ThreadState* state : ThreadState::attachedThreads()) | 393 for (ThreadState* state : ThreadState::attachedThreads()) |
| 393 state->postGC(gcType); | 394 state->postGC(gcType); |
| 394 } | 395 } |
| 395 | 396 |
| 396 const char* ThreadHeap::gcReasonString(BlinkGC::GCReason reason) | 397 const char* Heap::gcReasonString(BlinkGC::GCReason reason) |
| 397 { | 398 { |
| 398 switch (reason) { | 399 switch (reason) { |
| 399 case BlinkGC::IdleGC: | 400 case BlinkGC::IdleGC: |
| 400 return "IdleGC"; | 401 return "IdleGC"; |
| 401 case BlinkGC::PreciseGC: | 402 case BlinkGC::PreciseGC: |
| 402 return "PreciseGC"; | 403 return "PreciseGC"; |
| 403 case BlinkGC::ConservativeGC: | 404 case BlinkGC::ConservativeGC: |
| 404 return "ConservativeGC"; | 405 return "ConservativeGC"; |
| 405 case BlinkGC::ForcedGC: | 406 case BlinkGC::ForcedGC: |
| 406 return "ForcedGC"; | 407 return "ForcedGC"; |
| 407 case BlinkGC::MemoryPressureGC: | 408 case BlinkGC::MemoryPressureGC: |
| 408 return "MemoryPressureGC"; | 409 return "MemoryPressureGC"; |
| 409 case BlinkGC::PageNavigationGC: | 410 case BlinkGC::PageNavigationGC: |
| 410 return "PageNavigationGC"; | 411 return "PageNavigationGC"; |
| 411 default: | 412 default: |
| 412 ASSERT_NOT_REACHED(); | 413 ASSERT_NOT_REACHED(); |
| 413 } | 414 } |
| 414 return "<Unknown>"; | 415 return "<Unknown>"; |
| 415 } | 416 } |
| 416 | 417 |
| 417 void ThreadHeap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType
gcType, BlinkGC::GCReason reason) | 418 void Heap::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType gcType
, BlinkGC::GCReason reason) |
| 418 { | 419 { |
| 419 ASSERT(gcType != BlinkGC::ThreadTerminationGC); | 420 ASSERT(gcType != BlinkGC::ThreadTerminationGC); |
| 420 | 421 |
| 421 ThreadState* state = ThreadState::current(); | 422 ThreadState* state = ThreadState::current(); |
| 422 // Nested collectGarbage() invocations aren't supported. | 423 // Nested collectGarbage() invocations aren't supported. |
| 423 RELEASE_ASSERT(!state->isGCForbidden()); | 424 RELEASE_ASSERT(!state->isGCForbidden()); |
| 424 state->completeSweep(); | 425 state->completeSweep(); |
| 425 | 426 |
| 427 size_t debugAllocatedObjectSize = Heap::heapStats().allocatedObjectSize(); |
| 428 base::debug::Alias(&debugAllocatedObjectSize); |
| 429 size_t debugWrapperCount = Heap::heapStats().wrapperCount(); |
| 430 base::debug::Alias(&debugWrapperCount); |
| 431 |
| 426 OwnPtr<Visitor> visitor = Visitor::create(state, gcType); | 432 OwnPtr<Visitor> visitor = Visitor::create(state, gcType); |
| 427 | 433 |
| 428 SafePointScope safePointScope(stackState, state); | 434 SafePointScope safePointScope(stackState, state); |
| 429 | 435 |
| 430 // Resume all parked threads upon leaving this scope. | 436 // Resume all parked threads upon leaving this scope. |
| 431 ParkThreadsScope parkThreadsScope; | 437 ParkThreadsScope parkThreadsScope; |
| 432 | 438 |
| 433 // Try to park the other threads. If we're unable to, bail out of the GC. | 439 // Try to park the other threads. If we're unable to, bail out of the GC. |
| 434 if (!parkThreadsScope.parkThreads(state)) | 440 if (!parkThreadsScope.parkThreads(state)) |
| 435 return; | 441 return; |
| 436 | 442 |
| 437 ScriptForbiddenIfMainThreadScope scriptForbidden; | 443 ScriptForbiddenIfMainThreadScope scriptForbidden; |
| 438 | 444 |
| 439 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", | 445 TRACE_EVENT2("blink_gc,devtools.timeline", "Heap::collectGarbage", |
| 440 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, | 446 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, |
| 441 "gcReason", gcReasonString(reason)); | 447 "gcReason", gcReasonString(reason)); |
| 442 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | 448 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
| 443 double startTime = WTF::currentTimeMS(); | 449 double startTime = WTF::currentTimeMS(); |
| 444 | 450 |
| 445 if (gcType == BlinkGC::TakeSnapshot) | 451 if (gcType == BlinkGC::TakeSnapshot) |
| 446 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | 452 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
| 447 | 453 |
| 448 // Disallow allocation during garbage collection (but not during the | 454 // Disallow allocation during garbage collection (but not during the |
| 449 // finalization that happens when the visitorScope is torn down). | 455 // finalization that happens when the visitorScope is torn down). |
| 450 ThreadState::NoAllocationScope noAllocationScope(state); | 456 ThreadState::NoAllocationScope noAllocationScope(state); |
| 451 | 457 |
| 452 preGC(); | 458 preGC(); |
| 453 | 459 |
| 454 StackFrameDepthScope stackDepthScope; | 460 StackFrameDepthScope stackDepthScope; |
| 455 | 461 |
| 456 size_t totalObjectSize = ThreadHeap::heapStats().allocatedObjectSize() + Thr
eadHeap::heapStats().markedObjectSize(); | 462 size_t totalObjectSize = Heap::heapStats().allocatedObjectSize() + Heap::hea
pStats().markedObjectSize(); |
| 457 if (gcType != BlinkGC::TakeSnapshot) | 463 if (gcType != BlinkGC::TakeSnapshot) |
| 458 ThreadHeap::resetHeapCounters(); | 464 Heap::resetHeapCounters(); |
| 459 | 465 |
| 460 // 1. Trace persistent roots. | 466 // 1. Trace persistent roots. |
| 461 ThreadState::visitPersistentRoots(visitor.get()); | 467 ThreadState::visitPersistentRoots(visitor.get()); |
| 462 | 468 |
| 463 // 2. Trace objects reachable from the stack. We do this independent of the | 469 // 2. Trace objects reachable from the stack. We do this independent of the |
| 464 // given stackState since other threads might have a different stack state. | 470 // given stackState since other threads might have a different stack state. |
| 465 ThreadState::visitStackRoots(visitor.get()); | 471 ThreadState::visitStackRoots(visitor.get()); |
| 466 | 472 |
| 467 // 3. Transitive closure to trace objects including ephemerons. | 473 // 3. Transitive closure to trace objects including ephemerons. |
| 468 processMarkingStack(visitor.get()); | 474 processMarkingStack(visitor.get()); |
| 469 | 475 |
| 470 postMarkingProcessing(visitor.get()); | 476 postMarkingProcessing(visitor.get()); |
| 471 globalWeakProcessing(visitor.get()); | 477 globalWeakProcessing(visitor.get()); |
| 472 | 478 |
| 473 // Now we can delete all orphaned pages because there are no dangling | 479 // Now we can delete all orphaned pages because there are no dangling |
| 474 // pointers to the orphaned pages. (If we have such dangling pointers, | 480 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 475 // we should have crashed during marking before getting here.) | 481 // we should have crashed during marking before getting here.) |
| 476 getOrphanedPagePool()->decommitOrphanedPages(); | 482 getOrphanedPagePool()->decommitOrphanedPages(); |
| 477 | 483 |
| 478 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; | 484 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; |
| 479 ThreadHeap::heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (ma
rkingTimeInMilliseconds / 1000 / totalObjectSize) : 0); | 485 Heap::heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (markingT
imeInMilliseconds / 1000 / totalObjectSize) : 0); |
| 480 | 486 |
| 481 #if PRINT_HEAP_STATS | 487 #if PRINT_HEAP_STATS |
| 482 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1
lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime
InMilliseconds); | 488 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\
n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill
iseconds); |
| 483 #endif | 489 #endif |
| 484 | 490 |
| 485 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram,
new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); | 491 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram,
new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); |
| 486 markingTimeHistogram.count(markingTimeInMilliseconds); | 492 markingTimeHistogram.count(markingTimeInMilliseconds); |
| 487 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog
ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50
)); | 493 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog
ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50
)); |
| 488 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10
24); | 494 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10
24); |
| 489 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis
togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10
24, 50)); | 495 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis
togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10
24, 50)); |
| 490 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024
); | 496 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024
); |
| 491 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new
EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); | 497 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new
EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); |
| 492 gcReasonHistogram.count(reason); | 498 gcReasonHistogram.count(reason); |
| 493 | 499 |
| 494 s_lastGCReason = reason; | 500 s_lastGCReason = reason; |
| 495 | 501 |
| 496 ThreadHeap::reportMemoryUsageHistogram(); | 502 Heap::reportMemoryUsageHistogram(); |
| 497 WTF::Partitions::reportMemoryUsageHistogram(); | 503 WTF::Partitions::reportMemoryUsageHistogram(); |
| 498 | 504 |
| 499 postGC(gcType); | 505 postGC(gcType); |
| 500 ThreadHeap::decommitCallbackStacks(); | 506 Heap::decommitCallbackStacks(); |
| 501 } | 507 } |
| 502 | 508 |
| 503 void ThreadHeap::collectGarbageForTerminatingThread(ThreadState* state) | 509 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 504 { | 510 { |
| 505 { | 511 { |
| 506 // A thread-specific termination GC must not allow other global GCs to g
o | 512 // A thread-specific termination GC must not allow other global GCs to g
o |
| 507 // ahead while it is running, hence the termination GC does not enter a | 513 // ahead while it is running, hence the termination GC does not enter a |
| 508 // safepoint. VisitorScope will not enter also a safepoint scope for | 514 // safepoint. VisitorScope will not enter also a safepoint scope for |
| 509 // ThreadTerminationGC. | 515 // ThreadTerminationGC. |
| 510 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat
ionGC); | 516 OwnPtr<Visitor> visitor = Visitor::create(state, BlinkGC::ThreadTerminat
ionGC); |
| 511 | 517 |
| 512 ThreadState::NoAllocationScope noAllocationScope(state); | 518 ThreadState::NoAllocationScope noAllocationScope(state); |
| 513 | 519 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 526 state->visitPersistents(visitor.get()); | 532 state->visitPersistents(visitor.get()); |
| 527 | 533 |
| 528 // 2. Trace objects reachable from the thread's persistent roots | 534 // 2. Trace objects reachable from the thread's persistent roots |
| 529 // including ephemerons. | 535 // including ephemerons. |
| 530 processMarkingStack(visitor.get()); | 536 processMarkingStack(visitor.get()); |
| 531 | 537 |
| 532 postMarkingProcessing(visitor.get()); | 538 postMarkingProcessing(visitor.get()); |
| 533 globalWeakProcessing(visitor.get()); | 539 globalWeakProcessing(visitor.get()); |
| 534 | 540 |
| 535 state->postGC(BlinkGC::GCWithSweep); | 541 state->postGC(BlinkGC::GCWithSweep); |
| 536 ThreadHeap::decommitCallbackStacks(); | 542 Heap::decommitCallbackStacks(); |
| 537 } | 543 } |
| 538 state->preSweep(); | 544 state->preSweep(); |
| 539 } | 545 } |
| 540 | 546 |
| 541 void ThreadHeap::processMarkingStack(Visitor* visitor) | 547 void Heap::processMarkingStack(Visitor* visitor) |
| 542 { | 548 { |
| 543 // Ephemeron fixed point loop. | 549 // Ephemeron fixed point loop. |
| 544 do { | 550 do { |
| 545 { | 551 { |
| 546 // Iteratively mark all objects that are reachable from the objects | 552 // Iteratively mark all objects that are reachable from the objects |
| 547 // currently pushed onto the marking stack. | 553 // currently pushed onto the marking stack. |
| 548 TRACE_EVENT0("blink_gc", "ThreadHeap::processMarkingStackSingleThrea
ded"); | 554 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); |
| 549 while (popAndInvokeTraceCallback(visitor)) { } | 555 while (popAndInvokeTraceCallback(visitor)) { } |
| 550 } | 556 } |
| 551 | 557 |
| 552 { | 558 { |
| 553 // Mark any strong pointers that have now become reachable in | 559 // Mark any strong pointers that have now become reachable in |
| 554 // ephemeron maps. | 560 // ephemeron maps. |
| 555 TRACE_EVENT0("blink_gc", "ThreadHeap::processEphemeronStack"); | 561 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); |
| 556 s_ephemeronStack->invokeEphemeronCallbacks(visitor); | 562 s_ephemeronStack->invokeEphemeronCallbacks(visitor); |
| 557 } | 563 } |
| 558 | 564 |
| 559 // Rerun loop if ephemeron processing queued more objects for tracing. | 565 // Rerun loop if ephemeron processing queued more objects for tracing. |
| 560 } while (!s_markingStack->isEmpty()); | 566 } while (!s_markingStack->isEmpty()); |
| 561 } | 567 } |
| 562 | 568 |
| 563 void ThreadHeap::postMarkingProcessing(Visitor* visitor) | 569 void Heap::postMarkingProcessing(Visitor* visitor) |
| 564 { | 570 { |
| 565 TRACE_EVENT0("blink_gc", "ThreadHeap::postMarkingProcessing"); | 571 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); |
| 566 // Call post-marking callbacks including: | 572 // Call post-marking callbacks including: |
| 567 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | 573 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup |
| 568 // (specifically to clear the queued bits for weak hash tables), and | 574 // (specifically to clear the queued bits for weak hash tables), and |
| 569 // 2. the markNoTracing callbacks on collection backings to mark them | 575 // 2. the markNoTracing callbacks on collection backings to mark them |
| 570 // if they are only reachable from their front objects. | 576 // if they are only reachable from their front objects. |
| 571 while (popAndInvokePostMarkingCallback(visitor)) { } | 577 while (popAndInvokePostMarkingCallback(visitor)) { } |
| 572 | 578 |
| 573 // Post-marking callbacks should not trace any objects and | 579 // Post-marking callbacks should not trace any objects and |
| 574 // therefore the marking stack should be empty after the | 580 // therefore the marking stack should be empty after the |
| 575 // post-marking callbacks. | 581 // post-marking callbacks. |
| 576 ASSERT(s_markingStack->isEmpty()); | 582 ASSERT(s_markingStack->isEmpty()); |
| 577 } | 583 } |
| 578 | 584 |
| 579 void ThreadHeap::globalWeakProcessing(Visitor* visitor) | 585 void Heap::globalWeakProcessing(Visitor* visitor) |
| 580 { | 586 { |
| 581 TRACE_EVENT0("blink_gc", "ThreadHeap::globalWeakProcessing"); | 587 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); |
| 582 double startTime = WTF::currentTimeMS(); | 588 double startTime = WTF::currentTimeMS(); |
| 583 | 589 |
| 584 // Call weak callbacks on objects that may now be pointing to dead objects. | 590 // Call weak callbacks on objects that may now be pointing to dead objects. |
| 585 while (popAndInvokeGlobalWeakCallback(visitor)) { } | 591 while (popAndInvokeGlobalWeakCallback(visitor)) { } |
| 586 | 592 |
| 587 // It is not permitted to trace pointers of live objects in the weak | 593 // It is not permitted to trace pointers of live objects in the weak |
| 588 // callback phase, so the marking stack should still be empty here. | 594 // callback phase, so the marking stack should still be empty here. |
| 589 ASSERT(s_markingStack->isEmpty()); | 595 ASSERT(s_markingStack->isEmpty()); |
| 590 | 596 |
| 591 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; | 597 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; |
| 592 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra
m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000,
50)); | 598 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, globalWeakTimeHistogra
m, new CustomCountHistogram("BlinkGC.TimeForGlobalWeakProcessing", 1, 10 * 1000,
50)); |
| 593 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); | 599 globalWeakTimeHistogram.count(timeForGlobalWeakProcessing); |
| 594 } | 600 } |
| 595 | 601 |
| 596 void ThreadHeap::collectAllGarbage() | 602 void Heap::collectAllGarbage() |
| 597 { | 603 { |
| 598 // We need to run multiple GCs to collect a chain of persistent handles. | 604 // We need to run multiple GCs to collect a chain of persistent handles. |
| 599 size_t previousLiveObjects = 0; | 605 size_t previousLiveObjects = 0; |
| 600 for (int i = 0; i < 5; ++i) { | 606 for (int i = 0; i < 5; ++i) { |
| 601 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli
nkGC::ForcedGC); | 607 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli
nkGC::ForcedGC); |
| 602 size_t liveObjects = ThreadHeap::heapStats().markedObjectSize(); | 608 size_t liveObjects = Heap::heapStats().markedObjectSize(); |
| 603 if (liveObjects == previousLiveObjects) | 609 if (liveObjects == previousLiveObjects) |
| 604 break; | 610 break; |
| 605 previousLiveObjects = liveObjects; | 611 previousLiveObjects = liveObjects; |
| 606 } | 612 } |
| 607 } | 613 } |
| 608 | 614 |
| 609 void ThreadHeap::reportMemoryUsageHistogram() | 615 void Heap::reportMemoryUsageHistogram() |
| 610 { | 616 { |
| 611 static size_t supportedMaxSizeInMB = 4 * 1024; | 617 static size_t supportedMaxSizeInMB = 4 * 1024; |
| 612 static size_t observedMaxSizeInMB = 0; | 618 static size_t observedMaxSizeInMB = 0; |
| 613 | 619 |
| 614 // We only report the memory in the main thread. | 620 // We only report the memory in the main thread. |
| 615 if (!isMainThread()) | 621 if (!isMainThread()) |
| 616 return; | 622 return; |
| 617 // +1 is for rounding up the sizeInMB. | 623 // +1 is for rounding up the sizeInMB. |
| 618 size_t sizeInMB = ThreadHeap::heapStats().allocatedSpace() / 1024 / 1024 + 1
; | 624 size_t sizeInMB = Heap::heapStats().allocatedSpace() / 1024 / 1024 + 1; |
| 619 if (sizeInMB >= supportedMaxSizeInMB) | 625 if (sizeInMB >= supportedMaxSizeInMB) |
| 620 sizeInMB = supportedMaxSizeInMB - 1; | 626 sizeInMB = supportedMaxSizeInMB - 1; |
| 621 if (sizeInMB > observedMaxSizeInMB) { | 627 if (sizeInMB > observedMaxSizeInMB) { |
| 622 // Send a UseCounter only when we see the highest memory usage | 628 // Send a UseCounter only when we see the highest memory usage |
| 623 // we've ever seen. | 629 // we've ever seen. |
| 624 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog
ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); | 630 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, commitedSizeHistog
ram, new EnumerationHistogram("BlinkGC.CommittedSize", supportedMaxSizeInMB)); |
| 625 commitedSizeHistogram.count(sizeInMB); | 631 commitedSizeHistogram.count(sizeInMB); |
| 626 observedMaxSizeInMB = sizeInMB; | 632 observedMaxSizeInMB = sizeInMB; |
| 627 } | 633 } |
| 628 } | 634 } |
| 629 | 635 |
| 630 void ThreadHeap::reportMemoryUsageForTracing() | 636 void Heap::reportMemoryUsageForTracing() |
| 631 { | 637 { |
| 632 #if PRINT_HEAP_STATS | 638 #if PRINT_HEAP_STATS |
| 633 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi
ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\
n", ThreadHeap::allocatedSpace() / 1024 / 1024, ThreadHeap::allocatedObjectSize(
) / 1024 / 1024, ThreadHeap::markedObjectSize() / 1024 / 1024, WTF::Partitions::
totalSizeOfCommittedPages() / 1024 / 1024, ThreadHeap::wrapperCount(), ThreadHea
p::collectedWrapperCount()); | 639 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi
ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\
n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1
024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt
edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); |
| 634 #endif | 640 #endif |
| 635 | 641 |
| 636 bool gcTracingEnabled; | 642 bool gcTracingEnabled; |
| 637 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | 643 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); |
| 638 if (!gcTracingEnabled) | 644 if (!gcTracingEnabled) |
| 639 return; | 645 return; |
| 640 | 646 |
| 641 // These values are divided by 1024 to avoid overflow in practical cases (TR
ACE_COUNTER values are 32-bit ints). | 647 // These values are divided by 1024 to avoid overflow in practical cases (TR
ACE_COUNTER values are 32-bit ints). |
| 642 // They are capped to INT_MAX just in case. | 648 // They are capped to INT_MAX just in case. |
| 643 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated
ObjectSizeKB", std::min(ThreadHeap::heapStats().allocatedObjectSize() / 1024, st
atic_cast<size_t>(INT_MAX))); | 649 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedObject
SizeKB", std::min(Heap::heapStats().allocatedObjectSize() / 1024, static_cast<si
ze_t>(INT_MAX))); |
| 644 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj
ectSizeKB", std::min(ThreadHeap::heapStats().markedObjectSize() / 1024, static_c
ast<size_t>(INT_MAX))); | 650 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz
eKB", std::min(Heap::heapStats().markedObjectSize() / 1024, static_cast<size_t>(
INT_MAX))); |
| 645 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::markedObj
ectSizeAtLastCompleteSweepKB", std::min(ThreadHeap::heapStats().markedObjectSize
AtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); | 651 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz
eAtLastCompleteSweepKB", std::min(Heap::heapStats().markedObjectSizeAtLastComple
teSweep() / 1024, static_cast<size_t>(INT_MAX))); |
| 646 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::allocated
SpaceKB", std::min(ThreadHeap::heapStats().allocatedSpace() / 1024, static_cast<
size_t>(INT_MAX))); | 652 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::allocatedSpaceK
B", std::min(Heap::heapStats().allocatedSpace() / 1024, static_cast<size_t>(INT_
MAX))); |
| 647 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::objectSiz
eAtLastGCKB", std::min(ThreadHeap::heapStats().objectSizeAtLastGC() / 1024, stat
ic_cast<size_t>(INT_MAX))); | 653 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas
tGCKB", std::min(Heap::heapStats().objectSizeAtLastGC() / 1024, static_cast<size
_t>(INT_MAX))); |
| 648 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::wrapperCo
unt", std::min(ThreadHeap::heapStats().wrapperCount(), static_cast<size_t>(INT_M
AX))); | 654 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount",
std::min(Heap::heapStats().wrapperCount(), static_cast<size_t>(INT_MAX))); |
| 649 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::heapStats
().wrapperCountAtLastGC", std::min(ThreadHeap::heapStats().wrapperCountAtLastGC(
), static_cast<size_t>(INT_MAX))); | 655 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::heapStats().wra
pperCountAtLastGC", std::min(Heap::heapStats().wrapperCountAtLastGC(), static_ca
st<size_t>(INT_MAX))); |
| 650 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::collected
WrapperCount", std::min(ThreadHeap::heapStats().collectedWrapperCount(), static_
cast<size_t>(INT_MAX))); | 656 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe
rCount", std::min(Heap::heapStats().collectedWrapperCount(), static_cast<size_t>
(INT_MAX))); |
| 651 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "ThreadHeap::partition
AllocSizeAtLastGCKB", std::min(ThreadHeap::heapStats().partitionAllocSizeAtLastG
C() / 1024, static_cast<size_t>(INT_MAX))); | 657 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS
izeAtLastGCKB", std::min(Heap::heapStats().partitionAllocSizeAtLastGC() / 1024,
static_cast<size_t>(INT_MAX))); |
| 652 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize
OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102
4, static_cast<size_t>(INT_MAX))); | 658 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Partitions::totalSize
OfCommittedPagesKB", std::min(WTF::Partitions::totalSizeOfCommittedPages() / 102
4, static_cast<size_t>(INT_MAX))); |
| 653 } | 659 } |
| 654 | 660 |
| 655 size_t ThreadHeap::objectPayloadSizeForTesting() | 661 size_t Heap::objectPayloadSizeForTesting() |
| 656 { | 662 { |
| 657 size_t objectPayloadSize = 0; | 663 size_t objectPayloadSize = 0; |
| 658 for (ThreadState* state : ThreadState::attachedThreads()) { | 664 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 659 state->setGCState(ThreadState::GCRunning); | 665 state->setGCState(ThreadState::GCRunning); |
| 660 state->makeConsistentForGC(); | 666 state->makeConsistentForGC(); |
| 661 objectPayloadSize += state->objectPayloadSizeForTesting(); | 667 objectPayloadSize += state->objectPayloadSizeForTesting(); |
| 662 state->setGCState(ThreadState::EagerSweepScheduled); | 668 state->setGCState(ThreadState::EagerSweepScheduled); |
| 663 state->setGCState(ThreadState::Sweeping); | 669 state->setGCState(ThreadState::Sweeping); |
| 664 state->setGCState(ThreadState::NoGCScheduled); | 670 state->setGCState(ThreadState::NoGCScheduled); |
| 665 } | 671 } |
| 666 return objectPayloadSize; | 672 return objectPayloadSize; |
| 667 } | 673 } |
| 668 | 674 |
| 669 RegionTree* ThreadHeap::getRegionTree() | 675 RegionTree* Heap::getRegionTree() |
| 670 { | 676 { |
| 671 DEFINE_THREAD_SAFE_STATIC_LOCAL(RegionTree, tree, new RegionTree); | 677 DEFINE_THREAD_SAFE_STATIC_LOCAL(RegionTree, tree, new RegionTree); |
| 672 return &tree; | 678 return &tree; |
| 673 } | 679 } |
| 674 | 680 |
| 675 BasePage* ThreadHeap::lookup(Address address) | 681 BasePage* Heap::lookup(Address address) |
| 676 { | 682 { |
| 677 ASSERT(ThreadState::current()->isInGC()); | 683 ASSERT(ThreadState::current()->isInGC()); |
| 678 if (PageMemoryRegion* region = ThreadHeap::getRegionTree()->lookup(address))
{ | 684 if (PageMemoryRegion* region = Heap::getRegionTree()->lookup(address)) { |
| 679 BasePage* page = region->pageFromAddress(address); | 685 BasePage* page = region->pageFromAddress(address); |
| 680 return page && !page->orphaned() ? page : nullptr; | 686 return page && !page->orphaned() ? page : nullptr; |
| 681 } | 687 } |
| 682 return nullptr; | 688 return nullptr; |
| 683 } | 689 } |
| 684 | 690 |
| 685 void ThreadHeap::resetHeapCounters() | 691 void Heap::resetHeapCounters() |
| 686 { | 692 { |
| 687 ASSERT(ThreadState::current()->isInGC()); | 693 ASSERT(ThreadState::current()->isInGC()); |
| 688 | 694 |
| 689 ThreadHeap::reportMemoryUsageForTracing(); | 695 Heap::reportMemoryUsageForTracing(); |
| 690 | 696 |
| 691 ProcessHeap::resetHeapCounters(); | 697 ProcessHeap::resetHeapCounters(); |
| 692 ThreadHeap::heapStats().reset(); | 698 Heap::heapStats().reset(); |
| 693 for (ThreadState* state : ThreadState::attachedThreads()) | 699 for (ThreadState* state : ThreadState::attachedThreads()) |
| 694 state->resetHeapCounters(); | 700 state->resetHeapCounters(); |
| 695 } | 701 } |
| 696 | 702 |
| 697 ThreadHeapStats& ThreadHeap::heapStats() | 703 // TODO(keishi): Make this a member of ThreadHeap. |
| 704 ThreadHeapStats& Heap::heapStats() |
| 698 { | 705 { |
| 699 DEFINE_THREAD_SAFE_STATIC_LOCAL(ThreadHeapStats, stats, new ThreadHeapStats(
)); | 706 DEFINE_THREAD_SAFE_STATIC_LOCAL(ThreadHeapStats, stats, new ThreadHeapStats(
)); |
| 700 return stats; | 707 return stats; |
| 701 } | 708 } |
| 702 | 709 |
| 703 CallbackStack* ThreadHeap::s_markingStack; | 710 CallbackStack* Heap::s_markingStack; |
| 704 CallbackStack* ThreadHeap::s_postMarkingCallbackStack; | 711 CallbackStack* Heap::s_postMarkingCallbackStack; |
| 705 CallbackStack* ThreadHeap::s_globalWeakCallbackStack; | 712 CallbackStack* Heap::s_globalWeakCallbackStack; |
| 706 CallbackStack* ThreadHeap::s_ephemeronStack; | 713 CallbackStack* Heap::s_ephemeronStack; |
| 707 HeapDoesNotContainCache* ThreadHeap::s_heapDoesNotContainCache; | 714 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 708 FreePagePool* ThreadHeap::s_freePagePool; | 715 FreePagePool* Heap::s_freePagePool; |
| 709 OrphanedPagePool* ThreadHeap::s_orphanedPagePool; | 716 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 710 | 717 |
| 711 BlinkGC::GCReason ThreadHeap::s_lastGCReason = BlinkGC::NumberOfGCReason; | 718 BlinkGC::GCReason Heap::s_lastGCReason = BlinkGC::NumberOfGCReason; |
| 712 | 719 |
| 713 } // namespace blink | 720 } // namespace blink |
| OLD | NEW |