| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 302 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 313 NoHeapPointersOnStack, | 313 NoHeapPointersOnStack, |
| 314 HeapPointersOnStack | 314 HeapPointersOnStack |
| 315 }; | 315 }; |
| 316 | 316 |
| 317 // When profiling we would like to identify forced GC requests. | 317 // When profiling we would like to identify forced GC requests. |
| 318 enum CauseOfGC { | 318 enum CauseOfGC { |
| 319 NormalGC, | 319 NormalGC, |
| 320 ForcedGC | 320 ForcedGC |
| 321 }; | 321 }; |
| 322 | 322 |
| 323 // See setGCState() for possible state transitions. |
| 324 enum GCState { |
| 325 NoGCScheduled, |
| 326 GCScheduled, |
| 327 StoppingOtherThreads, |
| 328 GCRunning, |
| 329 SweepScheduled, |
| 330 Sweeping, |
| 331 }; |
| 332 |
| 323 class NoSweepScope { | 333 class NoSweepScope { |
| 324 public: | 334 public: |
| 325 explicit NoSweepScope(ThreadState* state) : m_state(state) | 335 explicit NoSweepScope(ThreadState* state) : m_state(state) |
| 326 { | 336 { |
| 327 ASSERT(!m_state->m_sweepInProgress); | 337 ASSERT(!m_state->m_sweepInProgress); |
| 328 m_state->m_sweepInProgress = true; | 338 m_state->m_sweepInProgress = true; |
| 329 } | 339 } |
| 330 ~NoSweepScope() | 340 ~NoSweepScope() |
| 331 { | 341 { |
| 332 ASSERT(m_state->m_sweepInProgress); | 342 ASSERT(m_state->m_sweepInProgress); |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 392 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); | 402 return reinterpret_cast<ThreadState*>(s_mainThreadStateStorage); |
| 393 } | 403 } |
| 394 | 404 |
| 395 bool isMainThread() const { return this == mainThreadState(); } | 405 bool isMainThread() const { return this == mainThreadState(); } |
| 396 inline bool checkThread() const | 406 inline bool checkThread() const |
| 397 { | 407 { |
| 398 ASSERT(m_thread == currentThread()); | 408 ASSERT(m_thread == currentThread()); |
| 399 return true; | 409 return true; |
| 400 } | 410 } |
| 401 | 411 |
| 402 // If gcRequested returns true when a thread returns to its event | |
| 403 // loop the thread will initiate a garbage collection. | |
| 404 bool gcRequested(); | |
| 405 void setGCRequested(); | |
| 406 void clearGCRequested(); | |
| 407 void didV8GC(); | 412 void didV8GC(); |
| 408 | 413 |
| 409 // shouldGC and shouldForceConservativeGC implement the heuristics | 414 // shouldGC and shouldForceConservativeGC implement the heuristics |
| 410 // that are used to determine when to collect garbage. If | 415 // that are used to determine when to collect garbage. If |
| 411 // shouldForceConservativeGC returns true, we force the garbage | 416 // shouldForceConservativeGC returns true, we force the garbage |
| 412 // collection immediately. Otherwise, if shouldGC returns true, we | 417 // collection immediately. Otherwise, if shouldGC returns true, we |
| 413 // record that we should garbage collect the next time we return | 418 // record that we should garbage collect the next time we return |
| 414 // to the event loop. If both return false, we don't need to | 419 // to the event loop. If both return false, we don't need to |
| 415 // collect garbage at this point. | 420 // collect garbage at this point. |
| 416 bool shouldGC(); | 421 bool shouldGC(); |
| 417 bool shouldForceConservativeGC(); | 422 bool shouldForceConservativeGC(); |
| 418 | 423 |
| 424 void requestGC() { setGCState(GCScheduled); } |
| 425 void setGCState(GCState); |
| 426 GCState gcState() const; |
| 427 |
| 428 void preGC(); |
| 429 void postGC(); |
| 430 |
| 419 // Was the last GC forced for testing? This is set when garbage collection | 431 // Was the last GC forced for testing? This is set when garbage collection |
| 420 // is forced for testing and there are pointers on the stack. It remains | 432 // is forced for testing and there are pointers on the stack. It remains |
| 421 // set until a garbage collection is triggered with no pointers on the stack
. | 433 // set until a garbage collection is triggered with no pointers on the stack
. |
| 422 // This is used for layout tests that trigger GCs and check if objects are | 434 // This is used for layout tests that trigger GCs and check if objects are |
| 423 // dead at a given point in time. That only reliably works when we get | 435 // dead at a given point in time. That only reliably works when we get |
| 424 // precise GCs with no conservative stack scanning. | 436 // precise GCs with no conservative stack scanning. |
| 425 void setForcePreciseGCForTesting(bool); | 437 void setForcePreciseGCForTesting(bool); |
| 426 bool forcePreciseGCForTesting(); | 438 bool forcePreciseGCForTesting(); |
| 427 | 439 |
| 428 bool sweepRequested(); | |
| 429 void setSweepRequested(); | |
| 430 void clearSweepRequested(); | |
| 431 void performPendingSweep(); | 440 void performPendingSweep(); |
| 432 | 441 |
| 433 // Support for disallowing allocation. Mainly used for sanity | 442 // Support for disallowing allocation. Mainly used for sanity |
| 434 // checks asserts. | 443 // checks asserts. |
| 435 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocatio
nCount; } | 444 bool isAllocationAllowed() const { return !isAtSafePoint() && !m_noAllocatio
nCount; } |
| 436 void enterNoAllocationScope() { m_noAllocationCount++; } | 445 void enterNoAllocationScope() { m_noAllocationCount++; } |
| 437 void leaveNoAllocationScope() { m_noAllocationCount--; } | 446 void leaveNoAllocationScope() { m_noAllocationCount--; } |
| 438 | 447 |
| 439 // Before performing GC the thread-specific heap state should be | 448 // Before performing GC the thread-specific heap state should be |
| 440 // made consistent for sweeping. | 449 // made consistent for sweeping. |
| 441 void makeConsistentForSweeping(); | 450 void makeConsistentForSweeping(); |
| 442 | 451 |
| 443 // Is the thread corresponding to this thread state currently | 452 // Is this thread currently sweeping? |
| 444 // performing GC? | |
| 445 bool isInGC() const { return m_inGC; } | |
| 446 | |
| 447 // FIXME: This will be removed soon. | |
| 448 void enterGC() | |
| 449 { | |
| 450 ASSERT(!m_inGC); | |
| 451 m_inGC = true; | |
| 452 } | |
| 453 | |
| 454 // FIXME: This will be removed soon. | |
| 455 void leaveGC() | |
| 456 { | |
| 457 ASSERT(m_inGC); | |
| 458 m_inGC = false; | |
| 459 } | |
| 460 | |
| 461 // Is the thread corresponding to this thread state currently | |
| 462 // sweeping? | |
| 463 bool isSweepInProgress() const { return m_sweepInProgress; } | 453 bool isSweepInProgress() const { return m_sweepInProgress; } |
| 464 | 454 |
| 465 void prepareRegionTree(); | 455 void prepareRegionTree(); |
| 466 void flushHeapDoesNotContainCacheIfNeeded(); | 456 void flushHeapDoesNotContainCacheIfNeeded(); |
| 467 void prepareForGC(); | |
| 468 | 457 |
| 469 // Safepoint related functionality. | 458 // Safepoint related functionality. |
| 470 // | 459 // |
| 471 // When a thread attempts to perform GC it needs to stop all other threads | 460 // When a thread attempts to perform GC it needs to stop all other threads |
| 472 // that use the heap or at least guarantee that they will not touch any | 461 // that use the heap or at least guarantee that they will not touch any |
| 473 // heap allocated object until GC is complete. | 462 // heap allocated object until GC is complete. |
| 474 // | 463 // |
| 475 // We say that a thread is at a safepoint if this thread is guaranteed to | 464 // We say that a thread is at a safepoint if this thread is guaranteed to |
| 476 // not touch any heap allocated object or any heap related functionality unt
il | 465 // not touch any heap allocated object or any heap related functionality unt
il |
| 477 // it leaves the safepoint. | 466 // it leaves the safepoint. |
| (...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 743 | 732 |
| 744 ThreadIdentifier m_thread; | 733 ThreadIdentifier m_thread; |
| 745 OwnPtr<PersistentNode> m_persistents; | 734 OwnPtr<PersistentNode> m_persistents; |
| 746 StackState m_stackState; | 735 StackState m_stackState; |
| 747 intptr_t* m_startOfStack; | 736 intptr_t* m_startOfStack; |
| 748 intptr_t* m_endOfStack; | 737 intptr_t* m_endOfStack; |
| 749 void* m_safePointScopeMarker; | 738 void* m_safePointScopeMarker; |
| 750 Vector<Address> m_safePointStackCopy; | 739 Vector<Address> m_safePointStackCopy; |
| 751 bool m_atSafePoint; | 740 bool m_atSafePoint; |
| 752 Vector<Interruptor*> m_interruptors; | 741 Vector<Interruptor*> m_interruptors; |
| 753 bool m_gcRequested; | |
| 754 bool m_didV8GCAfterLastGC; | 742 bool m_didV8GCAfterLastGC; |
| 755 bool m_forcePreciseGCForTesting; | 743 bool m_forcePreciseGCForTesting; |
| 756 volatile int m_sweepRequested; | |
| 757 bool m_sweepInProgress; | 744 bool m_sweepInProgress; |
| 758 size_t m_noAllocationCount; | 745 size_t m_noAllocationCount; |
| 759 bool m_inGC; | |
| 760 BaseHeap* m_heaps[NumberOfHeaps]; | 746 BaseHeap* m_heaps[NumberOfHeaps]; |
| 761 | 747 |
| 762 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; | 748 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; |
| 763 bool m_isTerminating; | 749 bool m_isTerminating; |
| 764 | 750 |
| 765 bool m_shouldFlushHeapDoesNotContainCache; | 751 bool m_shouldFlushHeapDoesNotContainCache; |
| 766 double m_collectionRate; | 752 double m_collectionRate; |
| 753 GCState m_gcState; |
| 767 | 754 |
| 768 CallbackStack* m_weakCallbackStack; | 755 CallbackStack* m_weakCallbackStack; |
| 769 HashMap<void*, bool (*)(void*, Visitor&)> m_preFinalizers; | 756 HashMap<void*, bool (*)(void*, Visitor&)> m_preFinalizers; |
| 770 | 757 |
| 771 v8::Isolate* m_isolate; | 758 v8::Isolate* m_isolate; |
| 772 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*); | 759 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*); |
| 773 | 760 |
| 774 #if defined(ADDRESS_SANITIZER) | 761 #if defined(ADDRESS_SANITIZER) |
| 775 void* m_asanFakeStack; | 762 void* m_asanFakeStack; |
| 776 #endif | 763 #endif |
| (...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 892 // HeapPage header. We use some of the bits to determine | 879 // HeapPage header. We use some of the bits to determine |
| 893 // whether the page is part of a terminting thread or | 880 // whether the page is part of a terminting thread or |
| 894 // if the page is traced after being terminated (orphaned). | 881 // if the page is traced after being terminated (orphaned). |
| 895 uintptr_t m_terminating : 1; | 882 uintptr_t m_terminating : 1; |
| 896 uintptr_t m_promptlyFreedSize : 17; // == blinkPageSizeLog2 | 883 uintptr_t m_promptlyFreedSize : 17; // == blinkPageSizeLog2 |
| 897 }; | 884 }; |
| 898 | 885 |
| 899 } // namespace blink | 886 } // namespace blink |
| 900 | 887 |
| 901 #endif // ThreadState_h | 888 #endif // ThreadState_h |
| OLD | NEW |