Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 128 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 139 { | 139 { |
| 140 // Only cleanup if we parked all threads in which case the GC happened | 140 // Only cleanup if we parked all threads in which case the GC happened |
| 141 // and we need to resume the other threads. | 141 // and we need to resume the other threads. |
| 142 if (m_resumeThreads) | 142 if (m_resumeThreads) |
| 143 ThreadState::resumeThreads(); | 143 ThreadState::resumeThreads(); |
| 144 } | 144 } |
| 145 private: | 145 private: |
| 146 bool m_resumeThreads; | 146 bool m_resumeThreads; |
| 147 }; | 147 }; |
| 148 | 148 |
| 149 void Heap::flushHeapDoesNotContainCache() | |
| 150 { | |
| 151 s_heapDoesNotContainCache->flush(); | |
| 152 } | |
| 153 | |
| 154 void Heap::init() | 149 void Heap::init() |
| 155 { | 150 { |
| 156 ThreadState::init(); | 151 ThreadState::init(); |
| 157 s_markingStack = new CallbackStack(); | |
| 158 s_postMarkingCallbackStack = new CallbackStack(); | |
| 159 s_globalWeakCallbackStack = new CallbackStack(); | |
| 160 s_ephemeronStack = new CallbackStack(); | |
| 161 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | |
| 162 s_freePagePool = new FreePagePool(); | |
| 163 s_orphanedPagePool = new OrphanedPagePool(); | |
| 164 s_allocatedSpace = 0; | |
| 165 s_allocatedObjectSize = 0; | |
| 166 s_objectSizeAtLastGC = 0; | |
| 167 s_markedObjectSize = 0; | |
| 168 s_markedObjectSizeAtLastCompleteSweep = 0; | |
| 169 s_wrapperCount = 0; | |
| 170 s_wrapperCountAtLastGC = 0; | |
| 171 s_collectedWrapperCount = 0; | |
| 172 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 173 s_estimatedMarkingTimePerByte = 0.0; | |
| 174 #if ENABLE(ASSERT) | |
| 175 s_gcGeneration = 1; | |
| 176 #endif | |
| 177 | 152 |
| 178 GCInfoTable::init(); | 153 GCInfoTable::init(); |
| 179 | 154 |
| 180 if (Platform::current() && Platform::current()->currentThread()) | 155 if (Platform::current() && Platform::current()->currentThread()) |
| 181 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); | 156 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide r::instance(), "BlinkGC"); |
| 182 } | 157 } |
| 183 | 158 |
| 184 void Heap::shutdown() | 159 void Heap::shutdown() |
| 185 { | 160 { |
| 186 if (Platform::current() && Platform::current()->currentThread()) | 161 if (Platform::current() && Platform::current()->currentThread()) |
| 187 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); | 162 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi der::instance()); |
| 188 s_shutdownCalled = true; | |
| 189 ThreadState::shutdownHeapIfNecessary(); | 163 ThreadState::shutdownHeapIfNecessary(); |
| 190 } | 164 } |
| 191 | 165 |
| 192 void Heap::doShutdown() | 166 void Heap::doShutdown() |
| 193 { | 167 { |
| 194 // We don't want to call doShutdown() twice. | |
| 195 if (!s_markingStack) | |
| 196 return; | |
| 197 | |
| 198 ASSERT(!ThreadState::attachedThreads().size()); | 168 ASSERT(!ThreadState::attachedThreads().size()); |
| 199 delete s_heapDoesNotContainCache; | |
| 200 s_heapDoesNotContainCache = nullptr; | |
| 201 delete s_freePagePool; | |
| 202 s_freePagePool = nullptr; | |
| 203 delete s_orphanedPagePool; | |
| 204 s_orphanedPagePool = nullptr; | |
| 205 delete s_globalWeakCallbackStack; | |
| 206 s_globalWeakCallbackStack = nullptr; | |
| 207 delete s_postMarkingCallbackStack; | |
| 208 s_postMarkingCallbackStack = nullptr; | |
| 209 delete s_markingStack; | |
| 210 s_markingStack = nullptr; | |
| 211 delete s_ephemeronStack; | |
| 212 s_ephemeronStack = nullptr; | |
| 213 delete s_regionTree; | |
| 214 s_regionTree = nullptr; | |
| 215 GCInfoTable::shutdown(); | 169 GCInfoTable::shutdown(); |
| 216 ThreadState::shutdown(); | 170 ThreadState::shutdown(); |
| 217 ASSERT(Heap::allocatedSpace() == 0); | 171 // FIXME: should be zero |
| 172 //ASSERT(ThreadState::terminating()->allocatedSpace() == 0); | |
| 218 } | 173 } |
| 219 | 174 |
| 220 #if ENABLE(ASSERT) | 175 #if ENABLE(ASSERT) |
| 221 BasePage* Heap::findPageFromAddress(Address address) | 176 BasePage* Heap::findPageFromAddress(Address address) |
| 222 { | 177 { |
| 223 MutexLocker lock(ThreadState::threadAttachMutex()); | 178 MutexLocker lock(ThreadState::threadAttachMutex()); |
| 224 for (ThreadState* state : ThreadState::attachedThreads()) { | 179 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 225 if (BasePage* page = state->findPageFromAddress(address)) | 180 if (BasePage* page = state->findPageFromAddress(address)) |
| 226 return page; | 181 return page; |
| 227 } | 182 } |
| 228 return nullptr; | 183 return nullptr; |
| 229 } | 184 } |
| 230 #endif | 185 #endif |
| 231 | 186 |
| 232 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 187 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 233 { | 188 { |
| 234 ASSERT(ThreadState::current()->isInGC()); | 189 ASSERT(ThreadState::current()->isInGC()); |
| 235 | 190 |
| 236 #if !ENABLE(ASSERT) | 191 #if !ENABLE(ASSERT) |
| 237 if (s_heapDoesNotContainCache->lookup(address)) | 192 if (ThreadState::current()->heapDoesNotContainCache()->lookup(address)) |
| 238 return nullptr; | 193 return nullptr; |
| 239 #endif | 194 #endif |
| 240 | 195 |
| 241 if (BasePage* page = lookup(address)) { | 196 if (BasePage* page = lookup(address)) { |
| 242 ASSERT(page->contains(address)); | 197 ASSERT(page->contains(address)); |
| 243 ASSERT(!page->orphaned()); | 198 ASSERT(!page->orphaned()); |
| 244 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | 199 ASSERT(!ThreadState::current()->heapDoesNotContainCache()->lookup(addres s)); |
| 245 page->checkAndMarkPointer(visitor, address); | 200 page->checkAndMarkPointer(visitor, address); |
| 246 return address; | 201 return address; |
| 247 } | 202 } |
| 248 | 203 |
| 249 #if !ENABLE(ASSERT) | 204 #if !ENABLE(ASSERT) |
| 250 s_heapDoesNotContainCache->addEntry(address); | 205 ThreadState::current()->heapDoesNotContainCache()->addEntry(address); |
| 251 #else | 206 #else |
| 252 if (!s_heapDoesNotContainCache->lookup(address)) | 207 if (!ThreadState::current()->heapDoesNotContainCache()->lookup(address)) |
| 253 s_heapDoesNotContainCache->addEntry(address); | 208 ThreadState::current()->heapDoesNotContainCache()->addEntry(address); |
| 254 #endif | 209 #endif |
| 255 return nullptr; | 210 return nullptr; |
| 256 } | 211 } |
| 257 | 212 |
| 258 void Heap::pushTraceCallback(void* object, TraceCallback callback) | 213 void Heap::pushTraceCallback(void* object, TraceCallback callback) |
| 259 { | 214 { |
| 260 ASSERT(ThreadState::current()->isInGC()); | 215 ASSERT(ThreadState::current()->isInGC()); |
| 261 | 216 |
| 262 // Trace should never reach an orphaned page. | 217 // Trace should never reach an orphaned page. |
| 263 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 218 ASSERT(!ThreadState::current()->orphanedPagePool()->contains(object)); |
| 264 CallbackStack::Item* slot = s_markingStack->allocateEntry(); | 219 CallbackStack::Item* slot = ThreadState::current()->markingStack()->allocate Entry(); |
| 265 *slot = CallbackStack::Item(object, callback); | 220 *slot = CallbackStack::Item(object, callback); |
| 266 } | 221 } |
| 267 | 222 |
| 268 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | 223 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
| 269 { | 224 { |
| 270 CallbackStack::Item* item = s_markingStack->pop(); | 225 CallbackStack::Item* item = ThreadState::current()->markingStack()->pop(); |
|
haraken
2015/11/30 02:54:41
At the moment this is okay, but ThreadState::curre
keishi
2016/01/06 05:35:33
Done.
| |
| 271 if (!item) | 226 if (!item) |
| 272 return false; | 227 return false; |
| 273 item->call(visitor); | 228 item->call(visitor); |
| 274 return true; | 229 return true; |
| 275 } | 230 } |
| 276 | 231 |
| 277 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) | 232 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) |
| 278 { | 233 { |
| 279 ASSERT(ThreadState::current()->isInGC()); | 234 ASSERT(ThreadState::current()->isInGC()); |
| 280 | 235 |
| 281 // Trace should never reach an orphaned page. | 236 // Trace should never reach an orphaned page. |
| 282 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 237 ASSERT(!ThreadState::current()->orphanedPagePool()->contains(object)); |
| 283 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); | 238 CallbackStack::Item* slot = ThreadState::current()->postMarkingCallbackStack ()->allocateEntry(); |
| 284 *slot = CallbackStack::Item(object, callback); | 239 *slot = CallbackStack::Item(object, callback); |
| 285 } | 240 } |
| 286 | 241 |
| 287 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) | 242 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) |
| 288 { | 243 { |
| 289 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { | 244 if (CallbackStack::Item* item = ThreadState::current()->postMarkingCallbackS tack()->pop()) { |
| 290 item->call(visitor); | 245 item->call(visitor); |
| 291 return true; | 246 return true; |
| 292 } | 247 } |
| 293 return false; | 248 return false; |
| 294 } | 249 } |
| 295 | 250 |
| 296 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) | 251 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) |
| 297 { | 252 { |
| 298 ASSERT(ThreadState::current()->isInGC()); | 253 ASSERT(ThreadState::current()->isInGC()); |
| 299 | 254 |
| 300 // Trace should never reach an orphaned page. | 255 // Trace should never reach an orphaned page. |
| 301 ASSERT(!Heap::orphanedPagePool()->contains(cell)); | 256 ASSERT(!ThreadState::current()->orphanedPagePool()->contains(cell)); |
| 302 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); | 257 CallbackStack::Item* slot = ThreadState::current()->globalWeakCallbackStack( )->allocateEntry(); |
| 303 *slot = CallbackStack::Item(cell, callback); | 258 *slot = CallbackStack::Item(cell, callback); |
| 304 } | 259 } |
| 305 | 260 |
| 306 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) | 261 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback callback) |
| 307 { | 262 { |
| 308 ASSERT(ThreadState::current()->isInGC()); | 263 ASSERT(ThreadState::current()->isInGC()); |
| 309 | 264 |
| 310 // Trace should never reach an orphaned page. | 265 // Trace should never reach an orphaned page. |
| 311 ASSERT(!Heap::orphanedPagePool()->contains(object)); | 266 ASSERT(!ThreadState::current()->orphanedPagePool()->contains(object)); |
| 312 ThreadState* state = pageFromObject(object)->heap()->threadState(); | 267 ThreadState* state = pageFromObject(object)->heap()->threadState(); |
| 313 state->pushThreadLocalWeakCallback(closure, callback); | 268 state->pushThreadLocalWeakCallback(closure, callback); |
| 314 } | 269 } |
| 315 | 270 |
| 316 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) | 271 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) |
| 317 { | 272 { |
| 318 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { | 273 if (CallbackStack::Item* item = ThreadState::current()->globalWeakCallbackSt ack()->pop()) { |
| 319 item->call(visitor); | 274 item->call(visitor); |
| 320 return true; | 275 return true; |
| 321 } | 276 } |
| 322 return false; | 277 return false; |
| 323 } | 278 } |
| 324 | 279 |
| 325 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) | 280 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) |
| 326 { | 281 { |
| 327 ASSERT(ThreadState::current()->isInGC()); | 282 ASSERT(ThreadState::current()->isInGC()); |
| 328 | 283 |
| 329 // Trace should never reach an orphaned page. | 284 // Trace should never reach an orphaned page. |
| 330 ASSERT(!Heap::orphanedPagePool()->contains(table)); | 285 ASSERT(!ThreadState::current()->orphanedPagePool()->contains(table)); |
| 331 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); | 286 CallbackStack::Item* slot = ThreadState::current()->ephemeronStack()->alloca teEntry(); |
| 332 *slot = CallbackStack::Item(table, iterationCallback); | 287 *slot = CallbackStack::Item(table, iterationCallback); |
| 333 | 288 |
| 334 // Register a post-marking callback to tell the tables that | 289 // Register a post-marking callback to tell the tables that |
| 335 // ephemeron iteration is complete. | 290 // ephemeron iteration is complete. |
| 336 pushPostMarkingCallback(table, iterationDoneCallback); | 291 pushPostMarkingCallback(table, iterationDoneCallback); |
| 337 } | 292 } |
| 338 | 293 |
| 339 #if ENABLE(ASSERT) | 294 #if ENABLE(ASSERT) |
| 340 bool Heap::weakTableRegistered(const void* table) | 295 bool Heap::weakTableRegistered(const void* table) |
| 341 { | 296 { |
| 342 ASSERT(s_ephemeronStack); | 297 ASSERT(ThreadState::current()->ephemeronStack()); |
| 343 return s_ephemeronStack->hasCallbackForObject(table); | 298 return ThreadState::current()->ephemeronStack()->hasCallbackForObject(table) ; |
| 344 } | 299 } |
| 345 #endif | 300 #endif |
| 346 | 301 |
| 347 void Heap::preGC() | 302 void Heap::preGC() |
| 348 { | 303 { |
| 349 ASSERT(!ThreadState::current()->isInGC()); | 304 ASSERT(!ThreadState::current()->isInGC()); |
| 350 for (ThreadState* state : ThreadState::attachedThreads()) | 305 for (ThreadState* state : ThreadState::attachedThreads()) |
| 351 state->preGC(); | 306 state->preGC(); |
| 352 } | 307 } |
| 353 | 308 |
| (...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 411 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | 366 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
| 412 | 367 |
| 413 // Disallow allocation during garbage collection (but not during the | 368 // Disallow allocation during garbage collection (but not during the |
| 414 // finalization that happens when the gcScope is torn down). | 369 // finalization that happens when the gcScope is torn down). |
| 415 ThreadState::NoAllocationScope noAllocationScope(state); | 370 ThreadState::NoAllocationScope noAllocationScope(state); |
| 416 | 371 |
| 417 preGC(); | 372 preGC(); |
| 418 | 373 |
| 419 StackFrameDepthScope stackDepthScope; | 374 StackFrameDepthScope stackDepthScope; |
| 420 | 375 |
| 421 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz e(); | 376 size_t totalObjectSize = ThreadState::current()->allocatedObjectSize() + Thr eadState::current()->markedObjectSize(); |
| 422 if (gcType != BlinkGC::TakeSnapshot) | 377 if (gcType != BlinkGC::TakeSnapshot) |
| 423 Heap::resetHeapCounters(); | 378 state->resetHeapCounters(); |
| 424 | 379 |
| 425 // 1. Trace persistent roots. | 380 // 1. Trace persistent roots. |
| 426 ThreadState::visitPersistentRoots(gcScope.visitor()); | 381 ThreadState::visitPersistentRoots(gcScope.visitor()); |
| 427 | 382 |
| 428 // 2. Trace objects reachable from the stack. We do this independent of the | 383 // 2. Trace objects reachable from the stack. We do this independent of the |
| 429 // given stackState since other threads might have a different stack state. | 384 // given stackState since other threads might have a different stack state. |
| 430 ThreadState::visitStackRoots(gcScope.visitor()); | 385 ThreadState::visitStackRoots(gcScope.visitor()); |
| 431 | 386 |
| 432 // 3. Transitive closure to trace objects including ephemerons. | 387 // 3. Transitive closure to trace objects including ephemerons. |
| 433 processMarkingStack(gcScope.visitor()); | 388 processMarkingStack(gcScope.visitor()); |
| 434 | 389 |
| 435 postMarkingProcessing(gcScope.visitor()); | 390 postMarkingProcessing(gcScope.visitor()); |
| 436 globalWeakProcessing(gcScope.visitor()); | 391 globalWeakProcessing(gcScope.visitor()); |
| 437 | 392 |
| 438 // Now we can delete all orphaned pages because there are no dangling | 393 // Now we can delete all orphaned pages because there are no dangling |
| 439 // pointers to the orphaned pages. (If we have such dangling pointers, | 394 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 440 // we should have crashed during marking before getting here.) | 395 // we should have crashed during marking before getting here.) |
| 441 orphanedPagePool()->decommitOrphanedPages(); | 396 state->orphanedPagePool()->decommitOrphanedPages(); |
| 442 | 397 |
| 443 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; | 398 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; |
| 444 s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds / 1000 / totalObjectSize) : 0; | 399 ThreadState::current()->setEstimatedMarkingTimePerByte(totalObjectSize ? (ma rkingTimeInMilliseconds / 1000 / totalObjectSize) : 0); |
| 445 | 400 |
| 446 #if PRINT_HEAP_STATS | 401 #if PRINT_HEAP_STATS |
| 447 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); | 402 dataLogF("Heap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1lfms)\ n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTimeInMill iseconds); |
| 448 #endif | 403 #endif |
| 449 | 404 |
| 450 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking TimeInMilliseconds, 0, 10 * 1000, 50); | 405 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking TimeInMilliseconds, 0, 10 * 1000, 50); |
| 451 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap: :allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); | 406 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Threa dState::current()->allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); |
| 452 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", He ap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); | 407 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", Th readState::current()->allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); |
| 453 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, BlinkG C::NumberOfGCReason); | 408 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, BlinkG C::NumberOfGCReason); |
| 454 Heap::reportMemoryUsageHistogram(); | 409 ThreadState::current()->reportMemoryUsageHistogram(); |
| 455 WTF::Partitions::reportMemoryUsageHistogram(); | 410 WTF::Partitions::reportMemoryUsageHistogram(); |
| 456 | 411 |
| 457 postGC(gcType); | 412 postGC(gcType); |
| 458 | 413 |
| 459 #if ENABLE(ASSERT) | 414 #if ENABLE(ASSERT) |
| 460 // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneratio n. | 415 // 0 is used to figure non-assigned area, so avoid to use 0 in s_gcGeneratio n. |
| 461 if (++s_gcGeneration == 0) { | 416 ThreadState::current()->incrementGcGeneration(); |
| 462 s_gcGeneration = 1; | |
| 463 } | |
| 464 #endif | 417 #endif |
| 465 | 418 |
| 466 if (state->isMainThread()) | 419 if (state->isMainThread()) |
| 467 ScriptForbiddenScope::exit(); | 420 ScriptForbiddenScope::exit(); |
| 468 } | 421 } |
| 469 | 422 |
| 470 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | 423 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 471 { | 424 { |
| 472 { | 425 { |
| 473 // A thread-specific termination GC must not allow other global GCs to g o | 426 // A thread-specific termination GC must not allow other global GCs to g o |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 497 processMarkingStack(gcScope.visitor()); | 450 processMarkingStack(gcScope.visitor()); |
| 498 | 451 |
| 499 postMarkingProcessing(gcScope.visitor()); | 452 postMarkingProcessing(gcScope.visitor()); |
| 500 globalWeakProcessing(gcScope.visitor()); | 453 globalWeakProcessing(gcScope.visitor()); |
| 501 | 454 |
| 502 state->postGC(BlinkGC::GCWithSweep); | 455 state->postGC(BlinkGC::GCWithSweep); |
| 503 } | 456 } |
| 504 state->preSweep(); | 457 state->preSweep(); |
| 505 } | 458 } |
| 506 | 459 |
| 460 void Heap::collectGarbageForIsolatedThread(ThreadState* state) | |
| 461 { | |
| 462 { | |
| 463 // A thread-specific termination GC must not allow other global GCs to g o | |
| 464 // ahead while it is running, hence the termination GC does not enter a | |
| 465 // safepoint. GCScope will not enter also a safepoint scope for | |
| 466 // ThreadTerminationGC. | |
| 467 GCScope gcScope(state, BlinkGC::NoHeapPointersOnStack, BlinkGC::ThreadTe rminationGC); | |
|
haraken
2015/11/30 02:54:42
As commented in ThreadState.cpp, it is wrong to se
keishi
2016/01/06 05:35:33
Done.
| |
| 468 | |
| 469 ThreadState::NoAllocationScope noAllocationScope(state); | |
| 470 | |
| 471 state->preGC(); | |
| 472 | |
| 473 // 1. Trace the thread local persistent roots. For thread local GCs we | |
| 474 // don't trace the stack (ie. no conservative scanning) since this is | |
| 475 // only called during thread shutdown where there should be no objects | |
| 476 // on the stack. | |
| 477 // We also assume that orphaned pages have no objects reachable from | |
| 478 // persistent handles on other threads or CrossThreadPersistents. The | |
| 479 // only cases where this could happen is if a subsequent conservative | |
| 480 // global GC finds a "pointer" on the stack or due to a programming | |
| 481 // error where an object has a dangling cross-thread pointer to an | |
| 482 // object on this heap. | |
| 483 state->visitPersistents(gcScope.visitor()); | |
| 484 | |
| 485 // 2. Trace objects reachable from the thread's persistent roots | |
| 486 // including ephemerons. | |
| 487 processMarkingStack(gcScope.visitor()); | |
| 488 | |
| 489 postMarkingProcessing(gcScope.visitor()); | |
| 490 globalWeakProcessing(gcScope.visitor()); | |
| 491 | |
| 492 state->postGC(BlinkGC::GCWithSweep); | |
| 493 } | |
| 494 state->preSweep(); | |
| 495 } | |
| 496 | |
| 507 void Heap::processMarkingStack(Visitor* visitor) | 497 void Heap::processMarkingStack(Visitor* visitor) |
| 508 { | 498 { |
| 509 // Ephemeron fixed point loop. | 499 // Ephemeron fixed point loop. |
| 510 do { | 500 do { |
| 511 { | 501 { |
| 512 // Iteratively mark all objects that are reachable from the objects | 502 // Iteratively mark all objects that are reachable from the objects |
| 513 // currently pushed onto the marking stack. | 503 // currently pushed onto the marking stack. |
| 514 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); | 504 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); |
| 515 while (popAndInvokeTraceCallback(visitor)) { } | 505 while (popAndInvokeTraceCallback(visitor)) { } |
| 516 } | 506 } |
| 517 | 507 |
| 518 { | 508 { |
| 519 // Mark any strong pointers that have now become reachable in | 509 // Mark any strong pointers that have now become reachable in |
| 520 // ephemeron maps. | 510 // ephemeron maps. |
| 521 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); | 511 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); |
| 522 s_ephemeronStack->invokeEphemeronCallbacks(visitor); | 512 ThreadState::current()->ephemeronStack()->invokeEphemeronCallbacks(v isitor); |
| 523 } | 513 } |
| 524 | 514 |
| 525 // Rerun loop if ephemeron processing queued more objects for tracing. | 515 // Rerun loop if ephemeron processing queued more objects for tracing. |
| 526 } while (!s_markingStack->isEmpty()); | 516 } while (!ThreadState::current()->markingStack()->isEmpty()); |
| 527 } | 517 } |
| 528 | 518 |
| 529 void Heap::postMarkingProcessing(Visitor* visitor) | 519 void Heap::postMarkingProcessing(Visitor* visitor) |
| 530 { | 520 { |
| 531 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); | 521 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); |
| 532 // Call post-marking callbacks including: | 522 // Call post-marking callbacks including: |
| 533 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | 523 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup |
| 534 // (specifically to clear the queued bits for weak hash tables), and | 524 // (specifically to clear the queued bits for weak hash tables), and |
| 535 // 2. the markNoTracing callbacks on collection backings to mark them | 525 // 2. the markNoTracing callbacks on collection backings to mark them |
| 536 // if they are only reachable from their front objects. | 526 // if they are only reachable from their front objects. |
| 537 while (popAndInvokePostMarkingCallback(visitor)) { } | 527 while (popAndInvokePostMarkingCallback(visitor)) { } |
| 538 | 528 |
| 539 s_ephemeronStack->clear(); | 529 ThreadState::current()->ephemeronStack()->clear(); |
| 540 | 530 |
| 541 // Post-marking callbacks should not trace any objects and | 531 // Post-marking callbacks should not trace any objects and |
| 542 // therefore the marking stack should be empty after the | 532 // therefore the marking stack should be empty after the |
| 543 // post-marking callbacks. | 533 // post-marking callbacks. |
| 544 ASSERT(s_markingStack->isEmpty()); | 534 ASSERT(ThreadState::current()->markingStack()->isEmpty()); |
| 545 } | 535 } |
| 546 | 536 |
| 547 void Heap::globalWeakProcessing(Visitor* visitor) | 537 void Heap::globalWeakProcessing(Visitor* visitor) |
| 548 { | 538 { |
| 549 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); | 539 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); |
| 550 double startTime = WTF::currentTimeMS(); | 540 double startTime = WTF::currentTimeMS(); |
| 551 | 541 |
| 552 // Call weak callbacks on objects that may now be pointing to dead objects. | 542 // Call weak callbacks on objects that may now be pointing to dead objects. |
| 553 while (popAndInvokeGlobalWeakCallback(visitor)) { } | 543 while (popAndInvokeGlobalWeakCallback(visitor)) { } |
| 554 | 544 |
| 555 // It is not permitted to trace pointers of live objects in the weak | 545 // It is not permitted to trace pointers of live objects in the weak |
| 556 // callback phase, so the marking stack should still be empty here. | 546 // callback phase, so the marking stack should still be empty here. |
| 557 ASSERT(s_markingStack->isEmpty()); | 547 ASSERT(ThreadState::current()->markingStack()->isEmpty()); |
| 558 | 548 |
| 559 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; | 549 double timeForGlobalWeakProcessing = WTF::currentTimeMS() - startTime; |
| 560 Platform::current()->histogramCustomCounts("BlinkGC.TimeForGlobalWeakPrcessi ng", timeForGlobalWeakProcessing, 1, 10 * 1000, 50); | 550 Platform::current()->histogramCustomCounts("BlinkGC.TimeForGlobalWeakPrcessi ng", timeForGlobalWeakProcessing, 1, 10 * 1000, 50); |
| 561 } | 551 } |
| 562 | 552 |
| 563 void Heap::collectAllGarbage() | 553 void Heap::collectAllGarbage() |
| 564 { | 554 { |
| 565 // We need to run multiple GCs to collect a chain of persistent handles. | 555 // We need to run multiple GCs to collect a chain of persistent handles. |
| 566 size_t previousLiveObjects = 0; | 556 size_t previousLiveObjects = 0; |
| 567 for (int i = 0; i < 5; ++i) { | 557 for (int i = 0; i < 5; ++i) { |
| 568 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); | 558 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli nkGC::ForcedGC); |
| 569 size_t liveObjects = Heap::markedObjectSize(); | 559 size_t liveObjects = ThreadState::current()->markedObjectSize(); |
| 570 if (liveObjects == previousLiveObjects) | 560 if (liveObjects == previousLiveObjects) |
| 571 break; | 561 break; |
| 572 previousLiveObjects = liveObjects; | 562 previousLiveObjects = liveObjects; |
| 573 } | 563 } |
| 574 } | 564 } |
| 575 | 565 |
| 576 double Heap::estimatedMarkingTime() | |
| 577 { | |
| 578 ASSERT(ThreadState::current()->isMainThread()); | |
| 579 | |
| 580 // Use 8 ms as initial estimated marking time. | |
| 581 // 8 ms is long enough for low-end mobile devices to mark common | |
| 582 // real-world object graphs. | |
| 583 if (s_estimatedMarkingTimePerByte == 0) | |
| 584 return 0.008; | |
| 585 | |
| 586 // Assuming that the collection rate of this GC will be mostly equal to | |
| 587 // the collection rate of the last GC, estimate the marking time of this GC. | |
| 588 return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap:: markedObjectSize()); | |
| 589 } | |
| 590 | |
| 591 void Heap::reportMemoryUsageHistogram() | |
| 592 { | |
| 593 static size_t supportedMaxSizeInMB = 4 * 1024; | |
| 594 static size_t observedMaxSizeInMB = 0; | |
| 595 | |
| 596 // We only report the memory in the main thread. | |
| 597 if (!isMainThread()) | |
| 598 return; | |
| 599 // +1 is for rounding up the sizeInMB. | |
| 600 size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1; | |
| 601 if (sizeInMB >= supportedMaxSizeInMB) | |
| 602 sizeInMB = supportedMaxSizeInMB - 1; | |
| 603 if (sizeInMB > observedMaxSizeInMB) { | |
| 604 // Send a UseCounter only when we see the highest memory usage | |
| 605 // we've ever seen. | |
| 606 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI nMB, supportedMaxSizeInMB); | |
| 607 observedMaxSizeInMB = sizeInMB; | |
| 608 } | |
| 609 } | |
| 610 | |
| 611 void Heap::reportMemoryUsageForTracing() | |
| 612 { | |
| 613 #if PRINT_HEAP_STATS | |
| 614 // dataLogF("allocatedSpace=%ldMB, allocatedObjectSize=%ldMB, markedObjectSi ze=%ldMB, partitionAllocSize=%ldMB, wrapperCount=%ld, collectedWrapperCount=%ld\ n", Heap::allocatedSpace() / 1024 / 1024, Heap::allocatedObjectSize() / 1024 / 1 024, Heap::markedObjectSize() / 1024 / 1024, WTF::Partitions::totalSizeOfCommitt edPages() / 1024 / 1024, Heap::wrapperCount(), Heap::collectedWrapperCount()); | |
| 615 #endif | |
| 616 | |
| 617 bool gcTracingEnabled; | |
| 618 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | |
| 619 if (!gcTracingEnabled) | |
| 620 return; | |
| 621 | |
| 622 // These values are divided by 1024 to avoid overflow in practical cases (TR ACE_COUNTER values are 32-bit ints). | |
| 623 // They are capped to INT_MAX just in case. | |
| 624 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(Heap::all ocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | |
| 625 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(Heap::marked ObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | |
| 626 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::markedObjectSiz eAtLastCompleteSweepKB", std::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(INT_MAX))); | |
| 627 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::allocate dSpace() / 1024, static_cast<size_t>(INT_MAX))); | |
| 628 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::objectSizeAtLas tGCKB", std::min(Heap::objectSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX) )); | |
| 629 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCount", std::min(Heap::wrapperCount(), static_cast<size_t>(INT_MAX))); | |
| 630 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::wrapperCountAtL astGC", std::min(Heap::wrapperCountAtLastGC(), static_cast<size_t>(INT_MAX))); | |
| 631 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::collectedWrappe rCount", std::min(Heap::collectedWrapperCount(), static_cast<size_t>(INT_MAX))); | |
| 632 TRACE_COUNTER1(TRACE_DISABLED_BY_DEFAULT("blink_gc"), "Heap::partitionAllocS izeAtLastGCKB", std::min(Heap::partitionAllocSizeAtLastGC() / 1024, static_cast< size_t>(INT_MAX))); | |
| 633 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_ MAX))); | |
| 634 } | |
| 635 | |
| 636 size_t Heap::objectPayloadSizeForTesting() | 566 size_t Heap::objectPayloadSizeForTesting() |
| 637 { | 567 { |
| 638 size_t objectPayloadSize = 0; | 568 size_t objectPayloadSize = 0; |
| 639 for (ThreadState* state : ThreadState::attachedThreads()) { | 569 for (ThreadState* state : ThreadState::attachedThreads()) { |
| 640 state->setGCState(ThreadState::GCRunning); | 570 state->setGCState(ThreadState::GCRunning); |
| 641 state->makeConsistentForGC(); | 571 state->makeConsistentForGC(); |
| 642 objectPayloadSize += state->objectPayloadSizeForTesting(); | 572 objectPayloadSize += state->objectPayloadSizeForTesting(); |
| 643 state->setGCState(ThreadState::EagerSweepScheduled); | 573 state->setGCState(ThreadState::EagerSweepScheduled); |
| 644 state->setGCState(ThreadState::Sweeping); | 574 state->setGCState(ThreadState::Sweeping); |
| 645 state->setGCState(ThreadState::NoGCScheduled); | 575 state->setGCState(ThreadState::NoGCScheduled); |
| 646 } | 576 } |
| 647 return objectPayloadSize; | 577 return objectPayloadSize; |
| 648 } | 578 } |
| 649 | 579 |
| 650 BasePage* Heap::lookup(Address address) | 580 BasePage* Heap::lookup(Address address) |
| 651 { | 581 { |
| 652 ASSERT(ThreadState::current()->isInGC()); | 582 ASSERT(ThreadState::current()->isInGC()); |
| 653 if (!s_regionTree) | 583 if (!ThreadState::current()->regionTree()) |
| 654 return nullptr; | 584 return nullptr; |
| 655 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { | 585 if (PageMemoryRegion* region = ThreadState::current()->regionTree()->lookup( address)) { |
| 656 BasePage* page = region->pageFromAddress(address); | 586 BasePage* page = region->pageFromAddress(address); |
| 657 return page && !page->orphaned() ? page : nullptr; | 587 return page && !page->orphaned() ? page : nullptr; |
| 658 } | 588 } |
| 659 return nullptr; | 589 return nullptr; |
| 660 } | 590 } |
| 661 | 591 |
| 662 static Mutex& regionTreeMutex() | |
| 663 { | |
| 664 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); | |
| 665 return mutex; | |
| 666 } | |
| 667 | |
| 668 void Heap::removePageMemoryRegion(PageMemoryRegion* region) | 592 void Heap::removePageMemoryRegion(PageMemoryRegion* region) |
| 669 { | 593 { |
| 670 // Deletion of large objects (and thus their regions) can happen | 594 // Deletion of large objects (and thus their regions) can happen |
| 671 // concurrently on sweeper threads. Removal can also happen during thread | 595 // concurrently on sweeper threads. Removal can also happen during thread |
| 672 // shutdown, but that case is safe. Regardless, we make all removals | 596 // shutdown, but that case is safe. Regardless, we make all removals |
| 673 // mutually exclusive. | 597 // mutually exclusive. |
| 674 MutexLocker locker(regionTreeMutex()); | 598 //MutexLocker locker(regionTreeMutex()); |
| 675 RegionTree::remove(region, &s_regionTree); | 599 ThreadState* state = ThreadState::current(); |
| 600 if (!state) | |
| 601 state = ThreadState::terminating(); | |
|
haraken
2015/11/30 02:54:42
I'm wondering why this is needed. We should make s
| |
| 602 ThreadState::RegionTree* regionTree = state->regionTree(); | |
| 603 ThreadState::RegionTree::remove(region, ®ionTree); | |
| 604 state->setRegionTree(regionTree); | |
|
haraken
2015/11/30 02:54:42
Help me understand: What are you doing by setting
| |
| 676 } | 605 } |
| 677 | 606 |
| 678 void Heap::addPageMemoryRegion(PageMemoryRegion* region) | 607 void Heap::addPageMemoryRegion(PageMemoryRegion* region) |
| 679 { | 608 { |
| 680 MutexLocker locker(regionTreeMutex()); | 609 ASSERT(ThreadState::current() && !ThreadState::terminating()); |
| 681 RegionTree::add(new RegionTree(region), &s_regionTree); | 610 //MutexLocker locker(regionTreeMutex()); |
| 611 ThreadState::RegionTree* regionTree = ThreadState::current()->regionTree(); | |
| 612 ThreadState::RegionTree::add(new ThreadState::RegionTree(region), ®ionTre e); | |
| 613 ThreadState::current()->setRegionTree(regionTree); | |
| 682 } | 614 } |
| 683 | 615 |
| 684 PageMemoryRegion* Heap::RegionTree::lookup(Address address) | |
| 685 { | |
| 686 RegionTree* current = s_regionTree; | |
| 687 while (current) { | |
| 688 Address base = current->m_region->base(); | |
| 689 if (address < base) { | |
| 690 current = current->m_left; | |
| 691 continue; | |
| 692 } | |
| 693 if (address >= base + current->m_region->size()) { | |
| 694 current = current->m_right; | |
| 695 continue; | |
| 696 } | |
| 697 ASSERT(current->m_region->contains(address)); | |
| 698 return current->m_region; | |
| 699 } | |
| 700 return nullptr; | |
| 701 } | |
| 702 | |
| 703 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context) | |
| 704 { | |
| 705 ASSERT(newTree); | |
| 706 Address base = newTree->m_region->base(); | |
| 707 for (RegionTree* current = *context; current; current = *context) { | |
| 708 ASSERT(!current->m_region->contains(base)); | |
| 709 context = (base < current->m_region->base()) ? ¤t->m_left : &curre nt->m_right; | |
| 710 } | |
| 711 *context = newTree; | |
| 712 } | |
| 713 | |
| 714 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context) | |
| 715 { | |
| 716 ASSERT(region); | |
| 717 ASSERT(context); | |
| 718 Address base = region->base(); | |
| 719 RegionTree* current = *context; | |
| 720 for (; current; current = *context) { | |
| 721 if (region == current->m_region) | |
| 722 break; | |
| 723 context = (base < current->m_region->base()) ? ¤t->m_left : &curre nt->m_right; | |
| 724 } | |
| 725 | |
| 726 // Shutdown via detachMainThread might not have populated the region tree. | |
| 727 if (!current) | |
| 728 return; | |
| 729 | |
| 730 *context = nullptr; | |
| 731 if (current->m_left) { | |
| 732 add(current->m_left, context); | |
| 733 current->m_left = nullptr; | |
| 734 } | |
| 735 if (current->m_right) { | |
| 736 add(current->m_right, context); | |
| 737 current->m_right = nullptr; | |
| 738 } | |
| 739 delete current; | |
| 740 } | |
| 741 | |
| 742 void Heap::resetHeapCounters() | |
| 743 { | |
| 744 ASSERT(ThreadState::current()->isInGC()); | |
| 745 | |
| 746 Heap::reportMemoryUsageForTracing(); | |
| 747 | |
| 748 s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize; | |
| 749 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
| 750 s_allocatedObjectSize = 0; | |
| 751 s_markedObjectSize = 0; | |
| 752 s_wrapperCountAtLastGC = s_wrapperCount; | |
| 753 s_collectedWrapperCount = 0; | |
| 754 } | |
| 755 | |
| 756 CallbackStack* Heap::s_markingStack; | |
| 757 CallbackStack* Heap::s_postMarkingCallbackStack; | |
| 758 CallbackStack* Heap::s_globalWeakCallbackStack; | |
| 759 CallbackStack* Heap::s_ephemeronStack; | |
| 760 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | |
| 761 bool Heap::s_shutdownCalled = false; | |
| 762 FreePagePool* Heap::s_freePagePool; | |
| 763 OrphanedPagePool* Heap::s_orphanedPagePool; | |
| 764 Heap::RegionTree* Heap::s_regionTree = nullptr; | |
| 765 size_t Heap::s_allocatedSpace = 0; | |
| 766 size_t Heap::s_allocatedObjectSize = 0; | |
| 767 size_t Heap::s_objectSizeAtLastGC = 0; | |
| 768 size_t Heap::s_markedObjectSize = 0; | |
| 769 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; | |
| 770 size_t Heap::s_wrapperCount = 0; | |
| 771 size_t Heap::s_wrapperCountAtLastGC = 0; | |
| 772 size_t Heap::s_collectedWrapperCount = 0; | |
| 773 size_t Heap::s_partitionAllocSizeAtLastGC = 0; | |
| 774 double Heap::s_estimatedMarkingTimePerByte = 0.0; | |
| 775 #if ENABLE(ASSERT) | |
| 776 uint16_t Heap::s_gcGeneration = 0; | |
| 777 #endif | |
| 778 | |
| 779 } // namespace blink | 616 } // namespace blink |
| OLD | NEW |