Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 313 NoHeapPointersOnStack, | 313 NoHeapPointersOnStack, |
| 314 HeapPointersOnStack | 314 HeapPointersOnStack |
| 315 }; | 315 }; |
| 316 | 316 |
| 317 // When profiling we would like to identify forced GC requests. | 317 // When profiling we would like to identify forced GC requests. |
| 318 enum CauseOfGC { | 318 enum CauseOfGC { |
| 319 NormalGC, | 319 NormalGC, |
| 320 ForcedGC | 320 ForcedGC |
| 321 }; | 321 }; |
| 322 | 322 |
| 323 // See setGCState() for possible state transitions. | |
| 324 enum GCState { | |
| 325 NoGCScheduled, | |
| 326 GCScheduled, | |
| 327 StoppingOtherThreads, | |
| 328 GCRunning, | |
|
tkent
2014/11/26 02:27:07
What's 'GC' here? Does this contains both of marki
| |
| 329 SweepScheduled, | |
| 330 Sweeping, | |
| 331 }; | |
| 332 | |
| 323 class NoSweepScope { | 333 class NoSweepScope { |
| 324 public: | 334 public: |
| 325 explicit NoSweepScope(ThreadState* state) : m_state(state) | 335 explicit NoSweepScope(ThreadState* state) : m_state(state) |
| 326 { | 336 { |
| 327 ASSERT(!m_state->m_sweepInProgress); | 337 ASSERT(!m_state->m_sweepInProgress); |
| 328 m_state->m_sweepInProgress = true; | 338 m_state->m_sweepInProgress = true; |
| 329 } | 339 } |
| 330 ~NoSweepScope() | 340 ~NoSweepScope() |
| 331 { | 341 { |
| 332 ASSERT(m_state->m_sweepInProgress); | 342 ASSERT(m_state->m_sweepInProgress); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 392 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); | 402 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); |
| 393 } | 403 } |
| 394 | 404 |
| 395 bool isMainThread() const { return this == mainThreadState(); } | 405 bool isMainThread() const { return this == mainThreadState(); } |
| 396 inline bool checkThread() const | 406 inline bool checkThread() const |
| 397 { | 407 { |
| 398 ASSERT(m_thread == currentThread()); | 408 ASSERT(m_thread == currentThread()); |
| 399 return true; | 409 return true; |
| 400 } | 410 } |
| 401 | 411 |
| 402 // If gcRequested returns true when a thread returns to its event | |
| 403 // loop the thread will initiate a garbage collection. | |
| 404 bool gcRequested(); | |
| 405 void setGCRequested(); | |
|
tkent
2014/11/26 02:27:07
nit: I'd like to keep setGCRequested() as an alias
haraken
2014/11/26 05:27:01
Done.
| |
| 406 void clearGCRequested(); | |
| 407 void didV8GC(); | 412 void didV8GC(); |
| 408 | 413 |
| 409 // shouldGC and shouldForceConservativeGC implement the heuristics | 414 // shouldGC and shouldForceConservativeGC implement the heuristics |
| 410 // that are used to determine when to collect garbage. If | 415 // that are used to determine when to collect garbage. If |
| 411 // shouldForceConservativeGC returns true, we force the garbage | 416 // shouldForceConservativeGC returns true, we force the garbage |
| 412 // collection immediately. Otherwise, if shouldGC returns true, we | 417 // collection immediately. Otherwise, if shouldGC returns true, we |
| 413 // record that we should garbage collect the next time we return | 418 // record that we should garbage collect the next time we return |
| 414 // to the event loop. If both return false, we don't need to | 419 // to the event loop. If both return false, we don't need to |
| 415 // collect garbage at this point. | 420 // collect garbage at this point. |
| 416 bool shouldGC(); | 421 bool shouldGC(); |
| 417 bool shouldForceConservativeGC(); | 422 bool shouldForceConservativeGC(); |
| 418 | 423 |
| 424 void setGCState(GCState); | |
| 425 GCState gcState() const; | |
| 426 | |
| 419 // Was the last GC forced for testing? This is set when garbage collection | 427 // Was the last GC forced for testing? This is set when garbage collection |
| 420 // is forced for testing and there are pointers on the stack. It remains | 428 // is forced for testing and there are pointers on the stack. It remains |
| 421 // set until a garbage collection is triggered with no pointers on the stack . | 429 // set until a garbage collection is triggered with no pointers on the stack . |
| 422 // This is used for layout tests that trigger GCs and check if objects are | 430 // This is used for layout tests that trigger GCs and check if objects are |
| 423 // dead at a given point in time. That only reliably works when we get | 431 // dead at a given point in time. That only reliably works when we get |
| 424 // precise GCs with no conservative stack scanning. | 432 // precise GCs with no conservative stack scanning. |
| 425 void setForcePreciseGCForTesting(bool); | 433 void setForcePreciseGCForTesting(bool); |
| 426 bool forcePreciseGCForTesting(); | 434 bool forcePreciseGCForTesting(); |
| 427 | 435 |
| 428 bool sweepRequested(); | |
| 429 void setSweepRequested(); | |
| 430 void clearSweepRequested(); | |
| 431 void performPendingSweep(); | 436 void performPendingSweep(); |
| 432 | 437 |
| 433 // Support for disallowing allocation. Mainly used for sanity | 438 // Support for disallowing allocation. Mainly used for sanity |
| 434 // checks asserts. | 439 // checks asserts. |
| 435 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocatio nCount; } | 440 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocatio nCount; } |
| 436 void enterNoAllocationScope() { m_noAllocationCount++; } | 441 void enterNoAllocationScope() { m_noAllocationCount++; } |
| 437 void leaveNoAllocationScope() { m_noAllocationCount--; } | 442 void leaveNoAllocationScope() { m_noAllocationCount--; } |
| 438 | 443 |
| 439 // Before performing GC the thread-specific heap state should be | 444 // Before performing GC the thread-specific heap state should be |
| 440 // made consistent for sweeping. | 445 // made consistent for sweeping. |
| 441 void makeConsistentForSweeping(); | 446 void makeConsistentForSweeping(); |
| 442 #if ENABLE(ASSERT) | 447 #if ENABLE(ASSERT) |
| 443 bool isConsistentForSweeping(); | 448 bool isConsistentForSweeping(); |
| 444 #endif | 449 #endif |
| 445 | 450 |
| 446 // Is the thread corresponding to this thread state currently | 451 // Is this thread currently sweeping? |
| 447 // performing GC? | |
| 448 bool isInGC() const { return m_inGC; } | |
| 449 | |
| 450 // Is any of the threads registered with the blink garbage collection | |
| 451 // infrastructure currently performing GC? | |
| 452 static bool isAnyThreadInGC() { return s_inGC; } | |
| 453 | |
| 454 void enterGC() | |
| 455 { | |
| 456 ASSERT(!m_inGC); | |
| 457 ASSERT(!s_inGC); | |
| 458 m_inGC = true; | |
| 459 s_inGC = true; | |
| 460 } | |
| 461 | |
| 462 void leaveGC() | |
| 463 { | |
| 464 m_inGC = false; | |
| 465 s_inGC = false; | |
| 466 } | |
| 467 | |
| 468 // Is the thread corresponding to this thread state currently | |
| 469 // sweeping? | |
| 470 bool isSweepInProgress() const { return m_sweepInProgress; } | 452 bool isSweepInProgress() const { return m_sweepInProgress; } |
| 471 | 453 |
| 472 void prepareRegionTree(); | 454 void prepareRegionTree(); |
| 473 void flushHeapDoesNotContainCacheIfNeeded(); | 455 void flushHeapDoesNotContainCacheIfNeeded(); |
| 474 void prepareForGC(); | 456 void preGC(); |
| 457 void postGC(); | |
| 475 | 458 |
| 476 // Safepoint related functionality. | 459 // Safepoint related functionality. |
| 477 // | 460 // |
| 478 // When a thread attempts to perform GC it needs to stop all other threads | 461 // When a thread attempts to perform GC it needs to stop all other threads |
| 479 // that use the heap or at least guarantee that they will not touch any | 462 // that use the heap or at least guarantee that they will not touch any |
| 480 // heap allocated object until GC is complete. | 463 // heap allocated object until GC is complete. |
| 481 // | 464 // |
| 482 // We say that a thread is at a safepoint if this thread is guaranteed to | 465 // We say that a thread is at a safepoint if this thread is guaranteed to |
| 483 // not touch any heap allocated object or any heap related functionality unt il | 466 // not touch any heap allocated object or any heap related functionality unt il |
| 484 // it leaves the safepoint. | 467 // it leaves the safepoint. |
| (...skipping 249 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 734 void cleanupPages(); | 717 void cleanupPages(); |
| 735 | 718 |
| 736 void unregisterPreFinalizerInternal(void*); | 719 void unregisterPreFinalizerInternal(void*); |
| 737 void invokePreFinalizers(Visitor&); | 720 void invokePreFinalizers(Visitor&); |
| 738 | 721 |
| 739 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; | 722 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; |
| 740 static uintptr_t s_mainThreadStackStart; | 723 static uintptr_t s_mainThreadStackStart; |
| 741 static uintptr_t s_mainThreadUnderestimatedStackSize; | 724 static uintptr_t s_mainThreadUnderestimatedStackSize; |
| 742 static SafePointBarrier* s_safePointBarrier; | 725 static SafePointBarrier* s_safePointBarrier; |
| 743 | 726 |
| 744 // This variable is flipped to true after all threads are stoped | |
| 745 // and outermost GC has started. | |
| 746 static bool s_inGC; | |
| 747 | |
| 748 // We can't create a static member of type ThreadState here | 727 // We can't create a static member of type ThreadState here |
| 749 // because it will introduce global constructor and destructor. | 728 // because it will introduce global constructor and destructor. |
| 750 // We would like to manage lifetime of the ThreadState attached | 729 // We would like to manage lifetime of the ThreadState attached |
| 751 // to the main thread explicitly instead and still use normal | 730 // to the main thread explicitly instead and still use normal |
| 752 // constructor and destructor for the ThreadState class. | 731 // constructor and destructor for the ThreadState class. |
| 753 // For this we reserve static storage for the main ThreadState | 732 // For this we reserve static storage for the main ThreadState |
| 754 // and lazily construct ThreadState in it using placement new. | 733 // and lazily construct ThreadState in it using placement new. |
| 755 static uint8_t s_mainThreadStateStorage[]; | 734 static uint8_t s_mainThreadStateStorage[]; |
| 756 | 735 |
| 757 ThreadIdentifier m_thread; | 736 ThreadIdentifier m_thread; |
| 758 OwnPtr<PersistentNode> m_persistents; | 737 OwnPtr<PersistentNode> m_persistents; |
| 759 StackState m_stackState; | 738 StackState m_stackState; |
| 760 intptr_t* m_startOfStack; | 739 intptr_t* m_startOfStack; |
| 761 intptr_t* m_endOfStack; | 740 intptr_t* m_endOfStack; |
| 762 void* m_safePointScopeMarker; | 741 void* m_safePointScopeMarker; |
| 763 Vector<Address> m_safePointStackCopy; | 742 Vector<Address> m_safePointStackCopy; |
| 764 bool m_atSafePoint; | 743 bool m_atSafePoint; |
| 765 Vector<Interruptor*> m_interruptors; | 744 Vector<Interruptor*> m_interruptors; |
| 766 bool m_gcRequested; | |
| 767 bool m_didV8GCAfterLastGC; | 745 bool m_didV8GCAfterLastGC; |
| 768 bool m_forcePreciseGCForTesting; | 746 bool m_forcePreciseGCForTesting; |
| 769 volatile int m_sweepRequested; | |
| 770 bool m_sweepInProgress; | 747 bool m_sweepInProgress; |
| 771 size_t m_noAllocationCount; | 748 size_t m_noAllocationCount; |
| 772 bool m_inGC; | |
| 773 BaseHeap* m_heaps[NumberOfHeaps]; | 749 BaseHeap* m_heaps[NumberOfHeaps]; |
| 774 | 750 |
| 775 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; | 751 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; |
| 776 bool m_isTerminating; | 752 bool m_isTerminating; |
| 777 | 753 |
| 778 bool m_shouldFlushHeapDoesNotContainCache; | 754 bool m_shouldFlushHeapDoesNotContainCache; |
| 779 bool m_lowCollectionRate; | 755 bool m_lowCollectionRate; |
| 756 GCState m_gcState; | |
| 780 | 757 |
| 781 CallbackStack* m_weakCallbackStack; | 758 CallbackStack* m_weakCallbackStack; |
| 782 HashMap<void*, bool (*)(void*, Visitor&)> m_preFinalizers; | 759 HashMap<void*, bool (*)(void*, Visitor&)> m_preFinalizers; |
| 783 | 760 |
| 784 v8::Isolate* m_isolate; | 761 v8::Isolate* m_isolate; |
| 785 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*); | 762 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*); |
| 786 | 763 |
| 787 #if defined(ADDRESS_SANITIZER) | 764 #if defined(ADDRESS_SANITIZER) |
| 788 void* m_asanFakeStack; | 765 void* m_asanFakeStack; |
| 789 #endif | 766 #endif |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 905 // HeapPage header. We use some of the bits to determine | 882 // HeapPage header. We use some of the bits to determine |
| 906 // whether the page is part of a terminting thread or | 883 // whether the page is part of a terminting thread or |
| 907 // if the page is traced after being terminated (orphaned). | 884 // if the page is traced after being terminated (orphaned). |
| 908 uintptr_t m_terminating : 1; | 885 uintptr_t m_terminating : 1; |
| 909 uintptr_t m_promptlyFreedSize : 17; // == blinkPageSizeLog2 | 886 uintptr_t m_promptlyFreedSize : 17; // == blinkPageSizeLog2 |
| 910 }; | 887 }; |
| 911 | 888 |
| 912 } // namespace blink | 889 } // namespace blink |
| 913 | 890 |
| 914 #endif // ThreadState_h | 891 #endif // ThreadState_h |
| OLD | NEW |