Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 137 { | 137 { |
| 138 // Only cleanup if we parked all threads in which case the GC happened | 138 // Only cleanup if we parked all threads in which case the GC happened |
| 139 // and we need to resume the other threads. | 139 // and we need to resume the other threads. |
| 140 if (m_resumeThreads) | 140 if (m_resumeThreads) |
| 141 ThreadState::resumeThreads(); | 141 ThreadState::resumeThreads(); |
| 142 } | 142 } |
| 143 private: | 143 private: |
| 144 bool m_resumeThreads; | 144 bool m_resumeThreads; |
| 145 }; | 145 }; |
| 146 | 146 |
| 147 void Heap::flushHeapDoesNotContainCache() | |
| 148 { | |
| 149 s_heapDoesNotContainCache->flush(); | |
| 150 } | |
| 151 | |
| 152 void Heap::init() | 147 void Heap::init() |
| 153 { | 148 { |
| 154 ThreadState::init(); | 149 ThreadState::init(); |
| 155 s_markingStack = new CallbackStack(); | 150 |
| 156 s_postMarkingCallbackStack = new CallbackStack(); | |
| 157 s_globalWeakCallbackStack = new CallbackStack(); | |
| 158 s_ephemeronStack = new CallbackStack(); | |
| 159 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | |
| 160 s_freePagePool = new FreePagePool(); | 151 s_freePagePool = new FreePagePool(); |
| 161 s_orphanedPagePool = new OrphanedPagePool(); | 152 s_orphanedPagePool = new OrphanedPagePool(); |
| 162 s_allocatedSpace = 0; | |
| 163 s_allocatedObjectSize = 0; | |
| 164 s_objectSizeAtLastGC = 0; | |
| 165 s_markedObjectSize = 0; | |
| 166 s_markedObjectSizeAtLastCompleteSweep = 0; | |
| 167 s_wrapperCount = 0; | |
| 168 s_wrapperCountAtLastGC = 0; | |
| 169 s_collectedWrapperCount = 0; | |
| 170 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 171 s_estimatedMarkingTimePerByte = 0.0; | |
| 172 #if ENABLE(ASSERT) | |
| 173 s_gcGeneration = 1; | |
| 174 #endif | |
| 175 | 153 |
| 176 GCInfoTable::init(); | 154 GCInfoTable::init(); |
| 177 | 155 |
| 178 if (Platform::current() && Platform::current()->currentThread()) | 156 if (Platform::current() && Platform::current()->currentThread()) |
| 179 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); | 157 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); |
| 180 } | 158 } |
| 181 | 159 |
| 182 void Heap::shutdown() | 160 void Heap::shutdown() |
| 183 { | 161 { |
| 184 if (Platform::current() && Platform::current()->currentThread()) | 162 if (Platform::current() && Platform::current()->currentThread()) |
| 185 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); | 163 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); |
| 186 s_shutdownCalled = true; | 164 s_shutdownCalled = true; |
| 187 ThreadState::shutdownHeapIfNecessary(); | 165 ThreadState::shutdownHeapIfNecessary(); |
| 188 } | 166 } |
| 189 | 167 |
| 190 void Heap::doShutdown() | 168 void Heap::doShutdown() |
| 191 { | 169 { |
| 192 // We don't want to call doShutdown() twice. | 170 // We don't want to call doShutdown() twice. |
| 193 if (!s_markingStack) | 171 if (!s_freePagePool) |
| 194 return; | 172 return; |
| 195 | |
| 196 ASSERT(!ThreadState::attachedThreads().size()); | 173 ASSERT(!ThreadState::attachedThreads().size()); |
| 197 delete s_heapDoesNotContainCache; | 174 delete s_orphanedPagePool; |
| 198 s_heapDoesNotContainCache = nullptr; | |
| 199 delete s_freePagePool; | 175 delete s_freePagePool; |
| 200 s_freePagePool = nullptr; | 176 s_freePagePool = nullptr; |
| 201 delete s_orphanedPagePool; | |
| 202 s_orphanedPagePool = nullptr; | 177 s_orphanedPagePool = nullptr; |
| 203 delete s_globalWeakCallbackStack; | |
| 204 s_globalWeakCallbackStack = nullptr; | |
| 205 delete s_postMarkingCallbackStack; | |
| 206 s_postMarkingCallbackStack = nullptr; | |
| 207 delete s_markingStack; | |
| 208 s_markingStack = nullptr; | |
| 209 delete s_ephemeronStack; | |
| 210 s_ephemeronStack = nullptr; | |
| 211 delete s_regionTree; | |
| 212 s_regionTree = nullptr; | |
| 213 GCInfoTable::shutdown(); | 178 GCInfoTable::shutdown(); |
| 214 ThreadState::shutdown(); | 179 ThreadState::shutdown(); |
| 215 ASSERT(Heap::allocatedSpace() == 0); | |
| 216 } | 180 } |
| 217 | 181 |
| 218 #if ENABLE(ASSERT) | 182 #if ENABLE(ASSERT) |
| 219 BasePage* Heap::findPageFromAddress(Address address) | 183 BasePage* Heap::findPageFromAddress(Address address) |
| 220 { | 184 { |
| 221 MutexLocker lock(ThreadState::threadAttachMutex()); | 185 MutexLocker lock(ThreadState::threadAttachMutex()); |
| 222 for (ThreadState* state : ThreadState::attachedThreads()) { | 186 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 223 if (BasePage* page = state->findPageFromAddress(address)) | 187 if (BasePage* page = state->findPageFromAddress(address)) |
| 224 return page; | 188 return page; |
| 225 } | 189 } |
| 226 return nullptr; | 190 return nullptr; |
| 227 } | 191 } |
| 228 #endif | 192 #endif |
| 229 | 193 |
| 230 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 194 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 231 { | 195 { |
| 232 ASSERT(ThreadState::current()->isInGC()); | 196 ThreadState* threadState = visitor->threadState(); |
| 197 ASSERT(threadState->isInGC()); | |
| 233 | 198 |
| 234 #if !ENABLE(ASSERT) | 199 #if !ENABLE(ASSERT) |
| 235 if (s_heapDoesNotContainCache->lookup(address)) | 200 if (threadState->heapDoesNotContainCache()->lookup(address)) |
| 236 return nullptr; | 201 return nullptr; |
| 237 #endif | 202 #endif |
| 238 | 203 |
| 239 if (BasePage* page = lookup(address)) { | 204 if (BasePage* page = lookup(address, threadState)) { |
| 240 ASSERT(page->contains(address)); | 205 ASSERT(page->contains(address)); |
| 241 ASSERT(!page->orphaned()); | 206 ASSERT(!page->orphaned()); |
| 242 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 207 ASSERT(!threadState->heapDoesNotContainCache()->lookup(address)); |
| 243 page->checkAndMarkPointer(visitor, address); | 208 page->checkAndMarkPointer(visitor, address); |
| 244 return address; | 209 return address; |
| 245 } | 210 } |
| 246 | 211 |
| 247 #if !ENABLE(ASSERT) | 212 #if !ENABLE(ASSERT) |
| 248 s_heapDoesNotContainCache->addEntry(address); | 213 threadState->heapDoesNotContainCache()->addEntry(address); |
| 249 #else | 214 #else |
| 250 if (!s_heapDoesNotContainCache->lookup(address)) | 215 if (!threadState->heapDoesNotContainCache()->lookup(address)) |
| 251 s_heapDoesNotContainCache->addEntry(address); | 216 threadState->heapDoesNotContainCache()->addEntry(address); |
| 252 #endif | 217 #endif |
| 253 return nullptr; | 218 return nullptr; |
| 254 } | 219 } |
| 255 | 220 |
| 256 void Heap::pushTraceCallback(void* object, TraceCallback callback) | 221 void Heap::pushTraceCallback(void* object, TraceCallback callback, ThreadState* threadState) |
| 257 { | 222 { |
| 258 ASSERT(ThreadState::current()->isInGC()); | 223 ASSERT(threadState->isInGC()); |
| 259 | 224 |
| 260 // Trace should never reach an orphaned page. | 225 // Trace should never reach an orphaned page. |
| 261 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 226 ASSERT(!Heap::orphanedPagePool()->contains(object)); |
| 262 CallbackStack::Item* slot = s_markingStack->allocateEntry(); | 227 CallbackStack::Item* slot = threadState->markingStack()->allocateEntry(); |
| 263 *slot = CallbackStack::Item(object, callback); | 228 *slot = CallbackStack::Item(object, callback); |
| 264 } | 229 } |
| 265 | 230 |
| 266 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | 231 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
| 267 { | 232 { |
| 268 CallbackStack::Item* item = s_markingStack->pop(); | 233 CallbackStack::Item* item = visitor->threadState()->markingStack()->pop(); |
| 269 if (!item) | 234 if (!item) |
| 270 return false; | 235 return false; |
| 271 item->call(visitor); | 236 item->call(visitor); |
| 272 return true; | 237 return true; |
| 273 } | 238 } |
| 274 | 239 |
| 275 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) | 240 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) |
| 276 { | 241 { |
| 277 ASSERT(ThreadState::current()->isInGC()); | 242 ASSERT(ThreadState::current()->isInGC()); |
| 278 | 243 |
| 279 // Trace should never reach an orphaned page. | 244 // Trace should never reach an orphaned page. |
| 280 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 245 ASSERT(!Heap::orphanedPagePool()->contains(object)); |
| 281 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); | 246 CallbackStack::Item* slot = ThreadState::current()->postMarkingCallbackStack ()->allocateEntry(); |
|
haraken
2016/01/07 08:06:22
Can we avoid calling ThreadState::current()?
| |
| 282 *slot = CallbackStack::Item(object, callback); | 247 *slot = CallbackStack::Item(object, callback); |
| 283 } | 248 } |
| 284 | 249 |
| 285 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) | 250 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) |
| 286 { | 251 { |
| 287 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { | 252 if (CallbackStack::Item* item = visitor->threadState()->postMarkingCallbackS tack()->pop()) { |
| 288 item->call(visitor); | 253 item->call(visitor); |
| 289 return true; | 254 return true; |
| 290 } | 255 } |
| 291 return false; | 256 return false; |
| 292 } | 257 } |
| 293 | 258 |
| 294 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) | 259 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback, ThreadStat e* threadState) |
| 295 { | 260 { |
| 296 ASSERT(ThreadState::current()->isInGC()); | 261 ASSERT(threadState->isInGC()); |
| 297 | 262 |
| 298 // Trace should never reach an orphaned page. | 263 // Trace should never reach an orphaned page. |
| 299 ASSERT(!Heap::orphanedPagePool()->contains(cell)); | 264 ASSERT(!Heap::orphanedPagePool()->contains(cell)); |
| 300 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); | 265 CallbackStack::Item* slot = threadState->globalWeakCallbackStack()->allocate Entry(); |
| 301 *slot = CallbackStack::Item(cell, callback); | 266 *slot = CallbackStack::Item(cell, callback); |
| 302 } | 267 } |
| 303 | 268 |
| 304 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) | 269 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) |
| 305 { | 270 { |
| 306 ASSERT(ThreadState::current()->isInGC()); | 271 ASSERT(ThreadState::current()->isInGC()); |
| 307 | 272 |
| 308 // Trace should never reach an orphaned page. | 273 // Trace should never reach an orphaned page. |
| 309 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 274 ASSERT(!Heap::orphanedPagePool()->contains(object)); |
| 310 ThreadState* state = pageFromObject(object)->heap()->threadState(); | 275 ThreadState* state = pageFromObject(object)->heap()->threadState(); |
| 311 state->pushThreadLocalWeakCallback(closure, callback); | 276 state->pushThreadLocalWeakCallback(closure, callback); |
| 312 } | 277 } |
| 313 | 278 |
| 314 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) | 279 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) |
| 315 { | 280 { |
| 316 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { | 281 if (CallbackStack::Item* item = visitor->threadState()->globalWeakCallbackSt ack()->pop()) { |
| 317 item->call(visitor); | 282 item->call(visitor); |
| 318 return true; | 283 return true; |
| 319 } | 284 } |
| 320 return false; | 285 return false; |
| 321 } | 286 } |
| 322 | 287 |
| 323 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) | 288 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback, ThreadState* threadState) |
| 324 { | 289 { |
| 325 ASSERT(ThreadState::current()->isInGC()); | 290 ASSERT(threadState->isInGC()); |
| 326 | 291 |
| 327 // Trace should never reach an orphaned page. | 292 // Trace should never reach an orphaned page. |
| 328 ASSERT(!Heap::orphanedPagePool()->contains(table)); | 293 ASSERT(!Heap::orphanedPagePool()->contains(table)); |
| 329 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); | 294 CallbackStack::Item* slot = threadState->ephemeronStack()->allocateEntry(); |
| 330 *slot = CallbackStack::Item(table, iterationCallback); | 295 *slot = CallbackStack::Item(table, iterationCallback); |
| 331 | 296 |
| 332 // Register a post-marking callback to tell the tables that | 297 // Register a post-marking callback to tell the tables that |
| 333 // ephemeron iteration is complete. | 298 // ephemeron iteration is complete. |
| 334 pushPostMarkingCallback(table, iterationDoneCallback); | 299 pushPostMarkingCallback(table, iterationDoneCallback); |
| 335 } | 300 } |
| 336 | 301 |
| 337 #if ENABLE(ASSERT) | 302 #if ENABLE(ASSERT) |
| 338 bool Heap::weakTableRegistered(const void* table) | 303 bool Heap::weakTableRegistered(const void* table, ThreadState* threadState) |
| 339 { | 304 { |
| 340 ASSERT(s_ephemeronStack); | 305 ASSERT(threadState->ephemeronStack()); |
| 341 return s_ephemeronStack->hasCallbackForObject(table); | 306 return threadState->ephemeronStack()->hasCallbackForObject(table); |
| 342 } | 307 } |
| 343 #endif | 308 #endif |
| 344 | 309 |
| 345 void Heap::preGC() | 310 void Heap::preGC() |
| 346 { | 311 { |
| 347 ASSERT(!ThreadState::current()->isInGC()); | 312 ASSERT(!ThreadState::current()->isInGC()); |
| 348 for (ThreadState* state : ThreadState::attachedThreads()) | 313 for (ThreadState* state : ThreadState::attachedThreads()) |
| 349 state->preGC(); | 314 state->preGC(); |
| 350 } | 315 } |
| 351 | 316 |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 407 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | 372 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
| 408 | 373 |
| 409 // Disallow allocation during garbage collection (but not during the | 374 // Disallow allocation during garbage collection (but not during the |
| 410 // finalization that happens when the gcScope is torn down). | 375 // finalization that happens when the gcScope is torn down). |
| 411 ThreadState::NoAllocationScope noAllocationScope(state); | 376 ThreadState::NoAllocationScope noAllocationScope(state); |
| 412 | 377 |
| 413 preGC(); | 378 preGC(); |
| 414 | 379 |
| 415 StackFrameDepthScope stackDepthScope; | 380 StackFrameDepthScope stackDepthScope; |
| 416 | 381 |
| 417 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz e(); | 382 size_t totalObjectSize = state->allocatedObjectSize() + state->markedObjectS ize(); |
| 418 if (gcType != BlinkGC::TakeSnapshot) | 383 if (gcType != BlinkGC::TakeSnapshot) |
| 419 Heap::resetHeapCounters(); | 384 state->resetHeapCounters(); |
| 420 | 385 |
| 421 // 1. Trace persistent roots. | 386 // 1. Trace persistent roots. |
| 422 ThreadState::visitPersistentRoots(gcScope.visitor()); | 387 ThreadState::visitPersistentRoots(gcScope.visitor()); |
| 423 | 388 |
| 424 // 2. Trace objects reachable from the stack. We do this independent of the | 389 // 2. Trace objects reachable from the stack. We do this independent of the |
| 425 // given stackState since other threads might have a different stack state. | 390 // given stackState since other threads might have a different stack state. |
| 426 ThreadState::visitStackRoots(gcScope.visitor()); | 391 ThreadState::visitStackRoots(gcScope.visitor()); |
| 427 | 392 |
| 428 // 3. Transitive closure to trace objects including ephemerons. | 393 // 3. Transitive closure to trace objects including ephemerons. |
| 429 processMarkingStack(gcScope.visitor()); | 394 processMarkingStack(gcScope.visitor()); |
| 430 | 395 |
| 431 postMarkingProcessing(gcScope.visitor()); | 396 postMarkingProcessing(gcScope.visitor()); |
| 432 globalWeakProcessing(gcScope.visitor()); | 397 globalWeakProcessing(gcScope.visitor()); |
| 433 | 398 |
| 434 // Now we can delete all orphaned pages because there are no dangling | 399 // Now we can delete all orphaned pages because there are no dangling |
| 435 // pointers to the orphaned pages. (If we have such dangling pointers, | 400 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 436 // we should have crashed during marking before getting here.) | 401 // we should have crashed during marking before getting here.) |
| 437 orphanedPagePool()->decommitOrphanedPages(); | 402 orphanedPagePool()->decommitOrphanedPages(); |
| 438 | 403 |
| 439 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; | 404 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; |
| 440 s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0; | 405 state->setEstimatedMarkingTimePerByte(totalObjectSize ? (markingTimeInMillis econds / 1000 / totalObjectSize) : 0); |
| 441 | 406 |
| 442 #if PRINT_HEAP_STATS | 407 #if PRINT_HEAP_STATS |
| 443 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); | 408 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); |
| 444 #endif | 409 #endif |
| 445 | 410 |
| 446 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking TimeInMilliseconds, 0, 10 * 1000, 50); | 411 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking TimeInMilliseconds, 0, 10 * 1000, 50); |
| 447 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap: :allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); | 412 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Threa dState::totalAllocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); |
| 448 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", He ap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); | 413 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", Th readState::totalAllocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); |
| 449 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, BlinkG C::NumberOfGCReason); | 414 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, BlinkG C::NumberOfGCReason); |
| 450 Heap::reportMemoryUsageHistogram(); | 415 state->reportMemoryUsageHistogram(); |
| 451 WTF::Partitions::reportMemoryUsageHistogram(); | 416 WTF::Partitions::reportMemoryUsageHistogram(); |
| 452 | 417 |
| 453 postGC(gcType); | 418 postGC(gcType); |
| 454 | 419 |
| 455 #if ENABLE(ASSERT) | 420 #if ENABLE(ASSERT) |
| 456 // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneratio n. | 421 state->incrementGcGeneration(); |
| 457 if (++s_gcGeneration == 0) { | |
| 458 s_gcGeneration = 1; | |
| 459 } | |
| 460 #endif | 422 #endif |
| 461 } | 423 } |
| 462 | 424 |
| 463 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | 425 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 464 { | 426 { |
| 465 { | 427 { |
| 466 // A thread-specific termination GC must not allow other global GCs to g o | 428 // A thread-specific termination GC must not allow other global GCs to g o |
| 467 // ahead while it is running, hence the termination GC does not enter a | 429 // ahead while it is running, hence the termination GC does not enter a |
| 468 // safepoint. GCScope will not enter also a safepoint scope for | 430 // safepoint. GCScope will not enter also a safepoint scope for |
| 469 // ThreadTerminationGC. | 431 // ThreadTerminationGC. |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 490 processMarkingStack(gcScope.visitor()); | 452 processMarkingStack(gcScope.visitor()); |
| 491 | 453 |
| 492 postMarkingProcessing(gcScope.visitor()); | 454 postMarkingProcessing(gcScope.visitor()); |
| 493 globalWeakProcessing(gcScope.visitor()); | 455 globalWeakProcessing(gcScope.visitor()); |
| 494 | 456 |
| 495 state->postGC(BlinkGC::GCWithSweep); | 457 state->postGC(BlinkGC::GCWithSweep); |
| 496 } | 458 } |
| 497 state->preSweep(); | 459 state->preSweep(); |
| 498 } | 460 } |
| 499 | 461 |
| 462 void Heap::collectGarbageForIsolatedThread(ThreadState* state) | |
| 463 { | |
| 464 { | |
| 465 // A thread-specific termination GC must not allow other global GCs to g o | |
| 466 // ahead while it is running, hence the termination GC does not enter a | |
| 467 // safepoint. GCScope will not enter also a safepoint scope for | |
| 468 // ThreadTerminationGC. | |
| 469 GCScope gcScope(state, BlinkGC::HeapPointersOnStack, BlinkGC::ThreadTerm inationGC); | |
|
haraken
2016/01/07 08:06:22
It is not a good idea to unconditionally assume Bl
haraken
2016/01/07 08:06:22
It is not a good idea to "reuse" the concept of Th
| |
| 470 | |
| 471 ThreadState::NoAllocationScope noAllocationScope(state); | |
| 472 | |
| 473 state->preGC(); | |
| 474 | |
| 475 // 1. Trace the thread local persistent roots. For thread local GCs we | |
| 476 // don't trace the stack (ie. no conservative scanning) since this is | |
| 477 // only called during thread shutdown where there should be no objects | |
| 478 // on the stack. | |
| 479 // We also assume that orphaned pages have no objects reachable from | |
| 480 // persistent handles on other threads or CrossThreadPersistents. The | |
| 481 // only cases where this could happen is if a subsequent conservative | |
| 482 // global GC finds a "pointer" on the stack or due to a programming | |
| 483 // error where an object has a dangling cross-thread pointer to an | |
| 484 // object on this heap. | |
| 485 state->visitPersistents(gcScope.visitor()); | |
| 486 | |
| 487 state->visitStack(gcScope.visitor()); | |
| 488 | |
| 489 // 2. Trace objects reachable from the thread's persistent roots | |
| 490 // including ephemerons. | |
| 491 processMarkingStack(gcScope.visitor()); | |
| 492 | |
| 493 postMarkingProcessing(gcScope.visitor()); | |
| 494 globalWeakProcessing(gcScope.visitor()); | |
| 495 | |
| 496 state->postGC(BlinkGC::GCWithSweep); | |
|
haraken
2016/01/07 08:06:22
We don't want to force sweeping.
I don't think we
| |
| 497 } | |
| 498 state->preSweep(); | |
| 499 } | |
| 500 | |
| 500 void Heap::processMarkingStack(Visitor* visitor) | 501 void Heap::processMarkingStack(Visitor* visitor) |
| 501 { | 502 { |
| 502 // Ephemeron fixed point loop. | 503 // Ephemeron fixed point loop. |
| 503 do { | 504 do { |
| 504 { | 505 { |
| 505 // Iteratively mark all objects that are reachable from the objects | 506 // Iteratively mark all objects that are reachable from the objects |
| 506 // currently pushed onto the marking stack. | 507 // currently pushed onto the marking stack. |
| 507 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); | 508 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); |
| 508 while (popAndInvokeTraceCallback(visitor)) { } | 509 while (popAndInvokeTraceCallback(visitor)) { } |
| 509 } | 510 } |
| 510 | 511 |
| 511 { | 512 { |
| 512 // Mark any strong pointers that have now become reachable in | 513 // Mark any strong pointers that have now become reachable in |
| 513 // ephemeron maps. | 514 // ephemeron maps. |
| 514 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); | 515 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); |
| 515 s_ephemeronStack->invokeEphemeronCallbacks(visitor); | 516 visitor->threadState()->ephemeronStack()->invokeEphemeronCallbacks(v isitor); |
| 516 } | 517 } |
| 517 | 518 |
| 518 // Rerun loop if ephemeron processing queued more objects for tracing. | 519 // Rerun loop if ephemeron processing queued more objects for tracing. |
| 519 } while (!s_markingStack->isEmpty()); | 520 } while (!visitor->threadState()->markingStack()->isEmpty()); |
| 520 } | 521 } |
| 521 | 522 |
| 522 void Heap::postMarkingProcessing(Visitor* visitor) | 523 void Heap::postMarkingProcessing(Visitor* visitor) |
| 523 { | 524 { |
| 524 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); | 525 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); |
| 525 // Call post-marking callbacks including: | 526 // Call post-marking callbacks including: |
| 526 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | 527 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup |
| 527 // (specifically to clear the queued bits for weak hash tables), and | 528 // (specifically to clear the queued bits for weak hash tables), and |
| 528 // 2. the markNoTracing callbacks on collection backings to mark them | 529 // 2. the markNoTracing callbacks on collection backings to mark them |
| 529 // if they are only reachable from their front objects. | 530 // if they are only reachable from their front objects. |
| 530 while (popAndInvokePostMarkingCallback(visitor)) { } | 531 while (popAndInvokePostMarkingCallback(visitor)) { } |
| 531 | 532 |
| 532 s_ephemeronStack->clear(); | 533 visitor->threadState()->ephemeronStack()->clear(); |
| 533 | 534 |
| 534 // Post-marking callbacks should not trace any objects and | 535 // Post-marking callbacks should not trace any objects and |
| 535 // therefore the marking stack should be empty after the | 536 // therefore the marking stack should be empty after the |
| 536 // post-marking callbacks. | 537 // post-marking callbacks. |
| 537 ASSERT(s_markingStack->isEmpty()); | 538 ASSERT(visitor->threadState()->markingStack()->isEmpty()); |
| 538 } | 539 } |
| 539 | 540 |
| 540 void Heap::globalWeakProcessing(Visitor* visitor) | 541 void Heap::globalWeakProcessing(Visitor* visitor) |
| 541 { | 542 { |
| 542 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); | 543 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); |
| 543 double startTime = WTF::currentTimeMS(); | 544 double startTime = WTF::currentTimeMS(); |
| 544 | 545 |
| 545 // Call weak callbacks on objects that may now be pointing to dead objects. | 546 // Call weak callbacks on objects that may now be pointing to dead objects. |
| 546 while (popAndInvokeGlobalWeakCallback(visitor)) { } | 547 while (popAndInvokeGlobalWeakCallback(visitor)) { } |
| 547 | 548 |
| 548 // It is not permitted to trace pointers of live objects in the weak | 549 // It is not permitted to trace pointers of live objects in the weak |
| 549 // callback phase, so the marking stack should still be empty here. | 550 // callback phase, so the marking stack should still be empty here. |
| 550 ASSERT(s_markingStack->isEmpty()); | 551 ASSERT(visitor->threadState()->markingStack()->isEmpty()); |
| 551 | 552 |
| 552 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; | 553 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; |
| 553 Platform::current()->histogramCustomCounts("BlinkGC.TimeForGlobalWeakPrcessi ng", timeForGlobalWeakProcessing, 1, 10 * 1000, 50); | 554 Platform::current()->histogramCustomCounts("BlinkGC.TimeForGlobalWeakPrcessi ng", timeForGlobalWeakProcessing, 1, 10 * 1000, 50); |
| 554 } | 555 } |
| 555 | 556 |
| 556 void Heap::collectAllGarbage() | 557 void Heap::collectAllGarbage() |
|
haraken
2016/01/07 08:06:22
This is another reason I don't want to introduce c
| |
| 557 { | 558 { |
| 558 // We need to run multiple GCs to collect a chain of persistent handles. | 559 // We need to run multiple GCs to collect a chain of persistent handles. |
| 559 size_t previousLiveObjects = 0; | 560 size_t previousLiveObjects = 0; |
| 560 for (int i = 0; i < 5; ++i) { | 561 for (int i = 0; i < 5; ++i) { |
| 561 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); | 562 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); |
| 562 size_t liveObjects = Heap::markedObjectSize(); | 563 size_t liveObjects = ThreadState::current()->markedObjectSize(); |
| 563 if (liveObjects == previousLiveObjects) | 564 if (liveObjects == previousLiveObjects) |
| 564 break; | 565 break; |
| 565 previousLiveObjects = liveObjects; | 566 previousLiveObjects = liveObjects; |
| 566 } | 567 } |
| 567 } | 568 } |
| 568 | 569 |
| 569 double Heap::estimatedMarkingTime() | |
| 570 { | |
| 571 ASSERT(ThreadState::current()->isMainThread()); | |
| 572 | |
| 573 // Use 8 ms as initial estimated marking time. | |
| 574 // 8 ms is long enough for low-end mobile devices to mark common | |
| 575 // real-world object graphs. | |
| 576 if (s_estimatedMarkingTimePerByte == 0) | |
| 577 return 0.008; | |
| 578 | |
| 579 // Assuming that the collection rate of this GC will be mostly equal to | |
| 580 // the collection rate of the last GC, estimate the marking time of this GC. | |
| 581 return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap:: markedObjectSize()); | |
| 582 } | |
| 583 | |
| 584 void Heap::reportMemoryUsageHistogram() | |
| 585 { | |
| 586 static size_t supportedMaxSizeInMB = 4 * 1024; | |
| 587 static size_t observedMaxSizeInMB = 0; | |
| 588 | |
| 589 // We only report the memory in the main thread. | |
| 590 if (!isMainThread()) | |
| 591 return; | |
| 592 // +1 is for rounding up the sizeInMB. | |
| 593 size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1; | |
| 594 if (sizeInMB >= supportedMaxSizeInMB) | |
| 595 sizeInMB = supportedMaxSizeInMB - 1; | |
| 596 if (sizeInMB > observedMaxSizeInMB) { | |
| 597 // Send a UseCounter only when we see the highest memory usage | |
| 598 // we've ever seen. | |
| 599 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI nMB, supportedMaxSizeInMB); | |
| 600 observedMaxSizeInMB = sizeInMB; | |
| 601 } | |
| 602 } | |
| 603 | |
| 604 void Heap::reportMemoryUsageForTracing() | |
| 605 { | |
| 606 #if PRINT_HEAP_STATS | |
| 607 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); | |
| 608 #endif | |
| 609 | |
| 610 bool gcTracingEnabled; | |
| 611 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | |
| 612 if (!gcTracingEnabled) | |
| 613 return; | |
| 614 | |
| 615 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). | |
| 616 // They are capped to INT_MAX just in case. | |
| 617 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(Heap::all ocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | |
| 618 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(Heap::marked ObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | |
| 619 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); | |
| 620 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::allocate dSpace() / 1024, static_cast<size_t>(INT_MAX))); | |
| 621 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(Heap::objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX) )); | |
| 622 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(Heap::wrapperCount(), static_cast<size_t>(INT_MAX))); | |
| 623 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(Heap::wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX))); | |
| 624 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(Heap::collectedWrapperCount(), static_cast<size_t>(INT_MAX))); | |
| 625 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(Heap::partitionAllocSizeAtLastGC() / 1024, static_cast< size_t>(INT_MAX))); | |
| 626 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_ MAX))); | |
| 627 } | |
| 628 | |
| 629 size_t Heap::objectPayloadSizeForTesting() | 570 size_t Heap::objectPayloadSizeForTesting() |
| 630 { | 571 { |
| 631 size_t objectPayloadSize = 0; | 572 size_t objectPayloadSize = 0; |
| 632 for (ThreadState* state : ThreadState::attachedThreads()) { | 573 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 633 state->setGCState(ThreadState::GCRunning); | 574 state->setGCState(ThreadState::GCRunning); |
| 634 state->makeConsistentForGC(); | 575 state->makeConsistentForGC(); |
| 635 objectPayloadSize += state->objectPayloadSizeForTesting(); | 576 objectPayloadSize += state->objectPayloadSizeForTesting(); |
| 636 state->setGCState(ThreadState::EagerSweepScheduled); | 577 state->setGCState(ThreadState::EagerSweepScheduled); |
| 637 state->setGCState(ThreadState::Sweeping); | 578 state->setGCState(ThreadState::Sweeping); |
| 638 state->setGCState(ThreadState::NoGCScheduled); | 579 state->setGCState(ThreadState::NoGCScheduled); |
| 639 } | 580 } |
| 640 return objectPayloadSize; | 581 return objectPayloadSize; |
| 641 } | 582 } |
| 642 | 583 |
| 643 BasePage* Heap::lookup(Address address) | 584 BasePage* Heap::lookup(Address address, ThreadState* threadState) |
| 644 { | 585 { |
| 645 ASSERT(ThreadState::current()->isInGC()); | 586 ASSERT(threadState->isInGC()); |
| 646 if (!s_regionTree) | 587 if (!threadState->regionTree()) |
| 647 return nullptr; | 588 return nullptr; |
| 648 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { | 589 if (PageMemoryRegion* region = threadState->regionTree()->lookup(address)) { |
| 649 BasePage* page = region->pageFromAddress(address); | 590 BasePage* page = region->pageFromAddress(address); |
| 650 return page && !page->orphaned() ? page : nullptr; | 591 return page && !page->orphaned() ? page : nullptr; |
| 651 } | 592 } |
| 652 return nullptr; | 593 return nullptr; |
| 653 } | 594 } |
| 654 | 595 |
| 655 static Mutex& regionTreeMutex() | |
| 656 { | |
| 657 DEFINE_THREAD_SAFE_STATIC_LOCAL(Mutex, mutex, new Mutex); | |
| 658 return mutex; | |
| 659 } | |
| 660 | |
| 661 void Heap::removePageMemoryRegion(PageMemoryRegion* region) | 596 void Heap::removePageMemoryRegion(PageMemoryRegion* region) |
| 662 { | 597 { |
| 663 // Deletion of large objects (and thus their regions) can happen | 598 ThreadState* threadState = ThreadState::current(); |
| 664 // concurrently on sweeper threads. Removal can also happen during thread | 599 // When the render process shuts down, the main thread state may already be destroyed. |
| 665 // shutdown, but that case is safe. Regardless, we make all removals | 600 if (!threadState) |
| 666 // mutually exclusive. | 601 return; |
| 667 MutexLocker locker(regionTreeMutex()); | 602 threadState->removeFromRegionTree(region); |
| 668 RegionTree::remove(region, &s_regionTree); | |
| 669 } | 603 } |
| 670 | 604 |
| 671 void Heap::addPageMemoryRegion(PageMemoryRegion* region) | 605 void Heap::addPageMemoryRegion(PageMemoryRegion* region) |
| 672 { | 606 { |
| 673 MutexLocker locker(regionTreeMutex()); | 607 ThreadState* threadState = ThreadState::current(); |
| 674 RegionTree::add(new RegionTree(region), &s_regionTree); | 608 ASSERT(threadState); |
| 609 ThreadState::RegionTree* regionTree = threadState->regionTree(); | |
| 610 ThreadState::RegionTree::add(new ThreadState::RegionTree(region), ®ionTre e); | |
| 611 threadState->setRegionTree(regionTree); | |
| 675 } | 612 } |
| 676 | 613 |
| 677 PageMemoryRegion* Heap::RegionTree::lookup(Address address) | |
| 678 { | |
| 679 RegionTree* current = s_regionTree; | |
| 680 while (current) { | |
| 681 Address base = current->m_region->base(); | |
| 682 if (address < base) { | |
| 683 current = current->m_left; | |
| 684 continue; | |
| 685 } | |
| 686 if (address >= base + current->m_region->size()) { | |
| 687 current = current->m_right; | |
| 688 continue; | |
| 689 } | |
| 690 ASSERT(current->m_region->contains(address)); | |
| 691 return current->m_region; | |
| 692 } | |
| 693 return nullptr; | |
| 694 } | |
| 695 | |
| 696 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context) | |
| 697 { | |
| 698 ASSERT(newTree); | |
| 699 Address base = newTree->m_region->base(); | |
| 700 for (RegionTree* current = *context; current; current = *context) { | |
| 701 ASSERT(!current->m_region->contains(base)); | |
| 702 context = (base < current->m_region->base()) ? ¤t->m_left : &curre nt->m_right; | |
| 703 } | |
| 704 *context = newTree; | |
| 705 } | |
| 706 | |
| 707 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context) | |
| 708 { | |
| 709 ASSERT(region); | |
| 710 ASSERT(context); | |
| 711 Address base = region->base(); | |
| 712 RegionTree* current = *context; | |
| 713 for (; current; current = *context) { | |
| 714 if (region == current->m_region) | |
| 715 break; | |
| 716 context = (base < current->m_region->base()) ? ¤t->m_left : &curre nt->m_right; | |
| 717 } | |
| 718 | |
| 719 // Shutdown via detachMainThread might not have populated the region tree. | |
| 720 if (!current) | |
| 721 return; | |
| 722 | |
| 723 *context = nullptr; | |
| 724 if (current->m_left) { | |
| 725 add(current->m_left, context); | |
| 726 current->m_left = nullptr; | |
| 727 } | |
| 728 if (current->m_right) { | |
| 729 add(current->m_right, context); | |
| 730 current->m_right = nullptr; | |
| 731 } | |
| 732 delete current; | |
| 733 } | |
| 734 | |
| 735 void Heap::resetHeapCounters() | |
| 736 { | |
| 737 ASSERT(ThreadState::current()->isInGC()); | |
| 738 | |
| 739 Heap::reportMemoryUsageForTracing(); | |
| 740 | |
| 741 s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize; | |
| 742 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 743 s_allocatedObjectSize = 0; | |
| 744 s_markedObjectSize = 0; | |
| 745 s_wrapperCountAtLastGC = s_wrapperCount; | |
| 746 s_collectedWrapperCount = 0; | |
| 747 } | |
| 748 | |
| 749 CallbackStack* Heap::s_markingStack; | |
| 750 CallbackStack* Heap::s_postMarkingCallbackStack; | |
| 751 CallbackStack* Heap::s_globalWeakCallbackStack; | |
| 752 CallbackStack* Heap::s_ephemeronStack; | |
| 753 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | |
| 754 bool Heap::s_shutdownCalled = false; | 614 bool Heap::s_shutdownCalled = false; |
| 755 FreePagePool* Heap::s_freePagePool; | 615 FreePagePool* Heap::s_freePagePool; |
| 756 OrphanedPagePool* Heap::s_orphanedPagePool; | 616 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 757 Heap::RegionTree* Heap::s_regionTree = nullptr; | |
| 758 size_t Heap::s_allocatedSpace = 0; | |
| 759 size_t Heap::s_allocatedObjectSize = 0; | |
| 760 size_t Heap::s_objectSizeAtLastGC = 0; | |
| 761 size_t Heap::s_markedObjectSize = 0; | |
| 762 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; | |
| 763 size_t Heap::s_wrapperCount = 0; | |
| 764 size_t Heap::s_wrapperCountAtLastGC = 0; | |
| 765 size_t Heap::s_collectedWrapperCount = 0; | |
| 766 size_t Heap::s_partitionAllocSizeAtLastGC = 0; | |
| 767 double Heap::s_estimatedMarkingTimePerByte = 0.0; | |
| 768 #if ENABLE(ASSERT) | |
| 769 uint16_t Heap::s_gcGeneration = 0; | |
| 770 #endif | |
| 771 | 617 |
| 772 } // namespace blink | 618 } // namespace blink |
| OLD | NEW |