Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 95 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 95 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
| 96 SafePointBarrier* ThreadState::s_safePointBarrier = 0; | 96 SafePointBarrier* ThreadState::s_safePointBarrier = 0; |
| 97 bool ThreadState::s_inGC = false; | 97 bool ThreadState::s_inGC = false; |
| 98 | 98 |
| 99 static Mutex& threadAttachMutex() | 99 static Mutex& threadAttachMutex() |
| 100 { | 100 { |
| 101 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | 101 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); |
| 102 return mutex; | 102 return mutex; |
| 103 } | 103 } |
| 104 | 104 |
| 105 // The threadShutdownMutex is used to synchronize thread shutdown | |
| 106 // since the thread local GC, as of now, cannot run in parallel | |
| 107 // with other thread local GCs since it is using the global marking | |
| 108 // stack. It can also not run in parallel with a global GC, but | |
| 109 // that is honored by not entering a safepoint while doing the | |
| 110 // thread local GC, meaning a request for a global GC would time | |
| 111 // out. | |
|
haraken
2014/07/07 16:13:47
I'm not sure if this assumption is correct. Heap::
wibling-chromium
2014/07/08 13:39:47
It should not be the case that we enter a safepoin
| |
| 112 static Mutex& threadShutdownMutex() | |
| 113 { | |
| 114 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | |
| 115 return mutex; | |
| 116 } | |
| 117 | |
| 105 static double lockingTimeout() | 118 static double lockingTimeout() |
| 106 { | 119 { |
| 107 // Wait time for parking all threads is at most 100 MS. | 120 // Wait time for parking all threads is at most 100 MS. |
| 108 return 0.100; | 121 return 0.100; |
| 109 } | 122 } |
| 110 | 123 |
| 111 | 124 |
| 112 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr _t*); | 125 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr _t*); |
| 113 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback); | 126 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback); |
| 114 | 127 |
| (...skipping 138 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 253 barrier->doEnterSafePoint(state, stackEnd); | 266 barrier->doEnterSafePoint(state, stackEnd); |
| 254 } | 267 } |
| 255 | 268 |
| 256 volatile int m_canResume; | 269 volatile int m_canResume; |
| 257 volatile int m_unparkedThreadCount; | 270 volatile int m_unparkedThreadCount; |
| 258 Mutex m_mutex; | 271 Mutex m_mutex; |
| 259 ThreadCondition m_parked; | 272 ThreadCondition m_parked; |
| 260 ThreadCondition m_resume; | 273 ThreadCondition m_resume; |
| 261 }; | 274 }; |
| 262 | 275 |
| 276 | |
| 277 BaseHeapPage::BaseHeapPage(PageMemory* storage, const GCInfo* gcInfo, ThreadStat e* state) | |
| 278 : m_storage(storage) | |
| 279 , m_gcInfo(gcInfo) | |
| 280 , m_threadState(state) | |
| 281 , m_padding(0) | |
| 282 { | |
| 283 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | |
| 284 } | |
| 285 | |
| 263 ThreadState::ThreadState() | 286 ThreadState::ThreadState() |
| 264 : m_thread(currentThread()) | 287 : m_thread(currentThread()) |
| 265 , m_persistents(adoptPtr(new PersistentAnchor())) | 288 , m_persistents(adoptPtr(new PersistentAnchor())) |
| 266 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 289 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 267 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 290 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 268 , m_safePointScopeMarker(0) | 291 , m_safePointScopeMarker(0) |
| 269 , m_atSafePoint(false) | 292 , m_atSafePoint(false) |
| 270 , m_interruptors() | 293 , m_interruptors() |
| 271 , m_gcRequested(false) | 294 , m_gcRequested(false) |
| 272 , m_forcePreciseGCForTesting(false) | 295 , m_forcePreciseGCForTesting(false) |
| 273 , m_sweepRequested(0) | 296 , m_sweepRequested(0) |
| 274 , m_sweepInProgress(false) | 297 , m_sweepInProgress(false) |
| 275 , m_noAllocationCount(0) | 298 , m_noAllocationCount(0) |
| 276 , m_inGC(false) | 299 , m_inGC(false) |
| 277 , m_heapContainsCache(adoptPtr(new HeapContainsCache())) | 300 , m_heapContainsCache(adoptPtr(new HeapContainsCache())) |
| 278 , m_isCleaningUp(false) | 301 , m_isCleaningUp(false) |
| 279 #if defined(ADDRESS_SANITIZER) | 302 #if defined(ADDRESS_SANITIZER) |
| 280 , m_asanFakeStack(__asan_get_current_fake_stack()) | 303 , m_asanFakeStack(__asan_get_current_fake_stack()) |
| 281 #endif | 304 #endif |
| 282 { | 305 { |
| 283 ASSERT(!**s_threadSpecific); | 306 ASSERT(!**s_threadSpecific); |
| 284 **s_threadSpecific = this; | 307 **s_threadSpecific = this; |
| 285 | 308 |
| 286 m_stats.clear(); | 309 m_stats.clear(); |
| 287 m_statsAfterLastGC.clear(); | 310 m_statsAfterLastGC.clear(); |
| 288 // First allocate the general heap, second iterate through to | 311 // First allocate the general heap, second iterate through to |
| 289 // allocate the type specific heaps | 312 // allocate the type specific heaps |
| 290 m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this); | 313 m_heaps[GeneralHeap] = new ThreadHeap<FinalizedHeapObjectHeader>(this, Gener alHeap); |
| 291 for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++) | 314 for (int i = GeneralHeap + 1; i < NumberOfHeaps; i++) |
| 292 m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this); | 315 m_heaps[i] = new ThreadHeap<HeapObjectHeader>(this, i); |
| 293 | 316 |
| 294 CallbackStack::init(&m_weakCallbackStack); | 317 CallbackStack::init(&m_weakCallbackStack); |
| 295 } | 318 } |
| 296 | 319 |
| 297 ThreadState::~ThreadState() | 320 ThreadState::~ThreadState() |
| 298 { | 321 { |
| 299 checkThread(); | 322 checkThread(); |
| 300 CallbackStack::shutdown(&m_weakCallbackStack); | 323 CallbackStack::shutdown(&m_weakCallbackStack); |
| 301 for (int i = GeneralHeap; i < NumberOfHeaps; i++) | 324 for (int i = GeneralHeap; i < NumberOfHeaps; i++) |
| 302 delete m_heaps[i]; | 325 delete m_heaps[i]; |
| (...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 368 | 391 |
| 369 void ThreadState::cleanup() | 392 void ThreadState::cleanup() |
| 370 { | 393 { |
| 371 // From here on ignore all conservatively discovered | 394 // From here on ignore all conservatively discovered |
| 372 // pointers into the heap owned by this thread. | 395 // pointers into the heap owned by this thread. |
| 373 m_isCleaningUp = true; | 396 m_isCleaningUp = true; |
| 374 | 397 |
| 375 for (size_t i = 0; i < m_cleanupTasks.size(); i++) | 398 for (size_t i = 0; i < m_cleanupTasks.size(); i++) |
| 376 m_cleanupTasks[i]->preCleanup(); | 399 m_cleanupTasks[i]->preCleanup(); |
| 377 | 400 |
| 378 // After this GC we expect heap to be empty because | 401 { |
| 379 // preCleanup tasks should have cleared all persistent | 402 // We enter a safepoint while waiting for the thread shutdown mutex. |
| 380 // handles that were externally owned. | 403 SafePointAwareMutexLocker locker(threadShutdownMutex()); |
| 381 Heap::collectAllGarbage(); | |
| 382 | 404 |
| 383 // Verify that all heaps are empty now. | 405 // Set a flag on all this thread's heap pages to ensure we don't trace |
|
haraken
2014/07/07 16:13:47
Set flags on all heap pages of this thread to ensu
wibling-chromium
2014/07/08 13:39:47
Done.
| |
| 384 for (int i = 0; i < NumberOfHeaps; i++) | 406 // outside this thread's heap pages. |
| 385 m_heaps[i]->assertEmpty(); | 407 setupHeapsForShutdown(); |
| 408 | |
| 409 // Do thread local GC's as long as the count of thread local Persistents | |
| 410 // changes and is above zero. | |
| 411 PersistentAnchor* anchor = static_cast<PersistentAnchor*>(m_persistents. get()); | |
| 412 int oldCount = 0; | |
| 413 int curCount = anchor->numberOfPersistents(); | |
|
haraken
2014/07/07 16:13:46
curCount => currentCount
wibling-chromium
2014/07/08 13:39:47
Done.
| |
| 414 while (curCount > 0 && curCount != oldCount) { | |
|
haraken
2014/07/07 16:13:47
I'm not sure if 'curCount == oldCount' is enough t
wibling-chromium
2014/07/08 13:39:48
I have added an ASSERT to PersistentBase to check
| |
| 415 Heap::collectGarbageForThread(this, false); | |
| 416 oldCount = curCount; | |
| 417 curCount = anchor->numberOfPersistents(); | |
| 418 } | |
| 419 | |
| 420 // Do a final sweep to finalize any objects pointed to by persistents | |
| 421 // collected in the last round of GC above. | |
| 422 Heap::collectGarbageForThread(this, true); | |
|
Mads Ager (chromium)
2014/07/08 08:24:56
Add enum instead of bool?
wibling-chromium
2014/07/08 13:39:47
Done.
| |
| 423 } | |
| 386 | 424 |
| 387 for (size_t i = 0; i < m_cleanupTasks.size(); i++) | 425 for (size_t i = 0; i < m_cleanupTasks.size(); i++) |
| 388 m_cleanupTasks[i]->postCleanup(); | 426 m_cleanupTasks[i]->postCleanup(); |
| 389 | 427 |
| 390 m_cleanupTasks.clear(); | 428 m_cleanupTasks.clear(); |
| 391 } | 429 } |
| 392 | 430 |
| 393 void ThreadState::detach() | 431 void ThreadState::detach() |
| 394 { | 432 { |
| 395 ThreadState* state = current(); | 433 ThreadState* state = current(); |
| 396 state->cleanup(); | 434 state->cleanup(); |
| 397 | 435 |
| 398 // Enter a safe point before trying to acquire threadAttachMutex | 436 // Enter a safe point before trying to acquire threadAttachMutex |
| 399 // to avoid dead lock if another thread is preparing for GC, has acquired | 437 // to avoid dead lock if another thread is preparing for GC, has acquired |
| 400 // threadAttachMutex and waiting for other threads to pause or reach a | 438 // threadAttachMutex and waiting for other threads to pause or reach a |
| 401 // safepoint. | 439 // safepoint. |
| 402 if (!state->isAtSafePoint()) | 440 if (!state->isAtSafePoint()) |
| 403 state->enterSafePointWithoutPointers(); | 441 state->enterSafePointWithoutPointers(); |
| 404 | 442 |
| 405 { | 443 { |
| 406 MutexLocker locker(threadAttachMutex()); | 444 MutexLocker locker(threadAttachMutex()); |
|
haraken
2014/07/07 16:13:46
This should be SafePointAwareMutexLocker. Then we
wibling-chromium
2014/07/08 13:39:47
Done.
| |
| 407 state->leaveSafePoint(); | 445 state->leaveSafePoint(); |
| 408 ASSERT(attachedThreads().contains(state)); | 446 ASSERT(attachedThreads().contains(state)); |
| 409 attachedThreads().remove(state); | 447 attachedThreads().remove(state); |
| 448 // Deleting the thread state also destroys the thread's heaps at which | |
| 449 // point all the thread's pages are added to the orphanedPagePool. | |
| 450 // Subsequently they are moved to the memoryPool once they are no | |
| 451 // longer traced by a global GC. | |
| 410 delete state; | 452 delete state; |
| 411 } | 453 } |
| 412 shutdownHeapIfNecessary(); | 454 shutdownHeapIfNecessary(); |
| 413 } | 455 } |
| 414 | 456 |
| 415 void ThreadState::visitRoots(Visitor* visitor) | 457 void ThreadState::visitRoots(Visitor* visitor) |
| 416 { | 458 { |
| 417 { | 459 { |
| 418 // All threads are at safepoints so this is not strictly necessary. | 460 // All threads are at safepoints so this is not strictly necessary. |
| 419 // However we acquire the mutex to make mutation and traversal of this | 461 // However we acquire the mutex to make mutation and traversal of this |
| 420 // list symmetrical. | 462 // list symmetrical. |
| 421 MutexLocker locker(globalRootsMutex()); | 463 MutexLocker locker(globalRootsMutex()); |
| 422 globalRoots()->trace(visitor); | 464 globalRoots()->trace(visitor); |
| 423 } | 465 } |
| 424 | 466 |
| 425 AttachedThreadStateSet& threads = attachedThreads(); | 467 AttachedThreadStateSet& threads = attachedThreads(); |
| 426 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en d(); it != end; ++it) | 468 for (AttachedThreadStateSet::iterator it = threads.begin(), end = threads.en d(); it != end; ++it) |
| 427 (*it)->trace(visitor); | 469 (*it)->trace(visitor); |
| 428 } | 470 } |
| 429 | 471 |
| 472 void ThreadState::visitLocalRoots(Visitor* visitor) | |
| 473 { | |
| 474 // We assume there are no CrossThreadPersistents to this threads heap. If | |
| 475 // there is, it will be handled in the same way as any other cross-thread | |
| 476 // pointer pointing to an orphaned page. | |
| 477 m_persistents->trace(visitor); | |
| 478 } | |
| 479 | |
| 430 NO_SANITIZE_ADDRESS | 480 NO_SANITIZE_ADDRESS |
| 431 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) | 481 void ThreadState::visitAsanFakeStackForPointer(Visitor* visitor, Address ptr) |
| 432 { | 482 { |
| 433 #if defined(ADDRESS_SANITIZER) | 483 #if defined(ADDRESS_SANITIZER) |
| 434 Address* start = reinterpret_cast<Address*>(m_startOfStack); | 484 Address* start = reinterpret_cast<Address*>(m_startOfStack); |
| 435 Address* end = reinterpret_cast<Address*>(m_endOfStack); | 485 Address* end = reinterpret_cast<Address*>(m_endOfStack); |
| 436 Address* fakeFrameStart = 0; | 486 Address* fakeFrameStart = 0; |
| 437 Address* fakeFrameEnd = 0; | 487 Address* fakeFrameEnd = 0; |
| 438 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr); | 488 Address* maybeFakeFrame = reinterpret_cast<Address*>(ptr); |
| 439 Address* realFrameForFakeFrame = | 489 Address* realFrameForFakeFrame = |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 542 #endif | 592 #endif |
| 543 | 593 |
| 544 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) | 594 void ThreadState::pushWeakObjectPointerCallback(void* object, WeakPointerCallbac k callback) |
| 545 { | 595 { |
| 546 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); | 596 CallbackStack::Item* slot = m_weakCallbackStack->allocateEntry(&m_weakCallba ckStack); |
| 547 *slot = CallbackStack::Item(object, callback); | 597 *slot = CallbackStack::Item(object, callback); |
| 548 } | 598 } |
| 549 | 599 |
| 550 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) | 600 bool ThreadState::popAndInvokeWeakPointerCallback(Visitor* visitor) |
| 551 { | 601 { |
| 552 return m_weakCallbackStack->popAndInvokeCallback(&m_weakCallbackStack, visit or); | 602 return m_weakCallbackStack->popAndInvokeCallback<false>(&m_weakCallbackStack , visitor); |
| 553 } | 603 } |
| 554 | 604 |
| 555 PersistentNode* ThreadState::globalRoots() | 605 PersistentNode* ThreadState::globalRoots() |
| 556 { | 606 { |
| 557 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); | 607 AtomicallyInitializedStatic(PersistentNode*, anchor = new PersistentAnchor); |
| 558 return anchor; | 608 return anchor; |
| 559 } | 609 } |
| 560 | 610 |
| 561 Mutex& ThreadState::globalRootsMutex() | 611 Mutex& ThreadState::globalRootsMutex() |
| 562 { | 612 { |
| (...skipping 117 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 680 { | 730 { |
| 681 for (int i = 0; i < NumberOfHeaps; i++) | 731 for (int i = 0; i < NumberOfHeaps; i++) |
| 682 m_heaps[i]->makeConsistentForGC(); | 732 m_heaps[i]->makeConsistentForGC(); |
| 683 } | 733 } |
| 684 | 734 |
| 685 void ThreadState::prepareForGC() | 735 void ThreadState::prepareForGC() |
| 686 { | 736 { |
| 687 for (int i = 0; i < NumberOfHeaps; i++) { | 737 for (int i = 0; i < NumberOfHeaps; i++) { |
| 688 BaseHeap* heap = m_heaps[i]; | 738 BaseHeap* heap = m_heaps[i]; |
| 689 heap->makeConsistentForGC(); | 739 heap->makeConsistentForGC(); |
| 690 // If there are parked threads with outstanding sweep requests, clear th eir mark bits. | 740 // If there are parked threads with outstanding sweep requests, clear th eir mark bits, |
|
haraken
2014/07/07 16:13:47
If a next GC is requested before processing a swee
wibling-chromium
2014/07/08 13:39:47
Rephrased it a bit.
| |
| 691 // This happens if a thread did not have time to wake up and sweep, | 741 // and mark any of their dead objects as dead. The latter is used to ens ure the next |
| 692 // before the next GC arrived. | 742 // GC marking does not revive already dead objects. If we revived a dead object we |
|
Mads Ager (chromium)
2014/07/08 08:24:56
revive -> trace
revived -> traced
wibling-chromium
2014/07/08 13:39:48
Done.
| |
| 743 // could end up tracing into garbage or the middle of another object via the revived | |
| 744 // object. | |
|
haraken
2014/07/07 16:13:46
Just help me understand: Is this "revive" problem
wibling-chromium
2014/07/08 13:39:47
This is an issue we discovered while I was doing t
| |
| 745 // The case of a thread missing a sweep happens if it did not have time to wake up | |
| 746 // and sweep, before the next GC arrived. | |
| 693 if (sweepRequested()) | 747 if (sweepRequested()) |
| 694 heap->clearMarks(); | 748 heap->clearLiveAndMarkDead(); |
| 695 } | 749 } |
| 696 setSweepRequested(); | 750 setSweepRequested(); |
| 697 } | 751 } |
| 698 | 752 |
| 753 void ThreadState::setupHeapsForShutdown() | |
| 754 { | |
| 755 for (int i = 0; i < NumberOfHeaps; i++) { | |
| 756 BaseHeap* heap = m_heaps[i]; | |
| 757 heap->setShutdown(); | |
|
haraken
2014/07/07 16:13:46
setShutdown => setHeapForShutdown
wibling-chromium
2014/07/08 13:39:47
Changed it to prepareHeapForShutdown.
| |
| 758 } | |
| 759 } | |
| 760 | |
| 699 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) | 761 BaseHeapPage* ThreadState::heapPageFromAddress(Address address) |
| 700 { | 762 { |
| 701 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address); | 763 BaseHeapPage* cachedPage = heapContainsCache()->lookup(address); |
| 702 #ifdef NDEBUG | 764 #ifdef NDEBUG |
| 703 if (cachedPage) | 765 if (cachedPage) |
| 704 return cachedPage; | 766 return cachedPage; |
| 705 #endif | 767 #endif |
| 706 | 768 |
| 707 for (int i = 0; i < NumberOfHeaps; i++) { | 769 for (int i = 0; i < NumberOfHeaps; i++) { |
| 708 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address); | 770 BaseHeapPage* page = m_heaps[i]->heapPageFromAddress(address); |
| (...skipping 206 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 915 threadAttachMutex().unlock(); | 977 threadAttachMutex().unlock(); |
| 916 return gcInfo; | 978 return gcInfo; |
| 917 } | 979 } |
| 918 } | 980 } |
| 919 if (needLockForIteration) | 981 if (needLockForIteration) |
| 920 threadAttachMutex().unlock(); | 982 threadAttachMutex().unlock(); |
| 921 return 0; | 983 return 0; |
| 922 } | 984 } |
| 923 #endif | 985 #endif |
| 924 } | 986 } |
| OLD | NEW |