| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 42 #include "wtf/Vector.h" | 42 #include "wtf/Vector.h" |
| 43 | 43 |
| 44 namespace WebCore { | 44 namespace WebCore { |
| 45 | 45 |
| 46 class BaseHeap; | 46 class BaseHeap; |
| 47 class BaseHeapPage; | 47 class BaseHeapPage; |
| 48 class FinalizedHeapObjectHeader; | 48 class FinalizedHeapObjectHeader; |
| 49 struct GCInfo; | 49 struct GCInfo; |
| 50 class HeapContainsCache; | 50 class HeapContainsCache; |
| 51 class HeapObjectHeader; | 51 class HeapObjectHeader; |
| 52 class PageMemory; |
| 52 class PersistentNode; | 53 class PersistentNode; |
| 53 class Visitor; | 54 class Visitor; |
| 54 class SafePointBarrier; | 55 class SafePointBarrier; |
| 55 class SafePointAwareMutexLocker; | 56 class SafePointAwareMutexLocker; |
| 56 template<typename Header> class ThreadHeap; | 57 template<typename Header> class ThreadHeap; |
| 57 class CallbackStack; | 58 class CallbackStack; |
| 58 | 59 |
| 59 typedef uint8_t* Address; | 60 typedef uint8_t* Address; |
| 60 | 61 |
| 61 typedef void (*FinalizationCallback)(void*); | 62 typedef void (*FinalizationCallback)(void*); |
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 232 // The set of ThreadStates for all threads attached to the Blink | 233 // The set of ThreadStates for all threads attached to the Blink |
| 233 // garbage collector. | 234 // garbage collector. |
| 234 typedef HashSet<ThreadState*> AttachedThreadStateSet; | 235 typedef HashSet<ThreadState*> AttachedThreadStateSet; |
| 235 static AttachedThreadStateSet& attachedThreads(); | 236 static AttachedThreadStateSet& attachedThreads(); |
| 236 | 237 |
| 237 // Initialize threading infrastructure. Should be called from the main | 238 // Initialize threading infrastructure. Should be called from the main |
| 238 // thread. | 239 // thread. |
| 239 static void init(); | 240 static void init(); |
| 240 static void shutdown(); | 241 static void shutdown(); |
| 241 static void shutdownHeapIfNecessary(); | 242 static void shutdownHeapIfNecessary(); |
| 243 bool isTerminating() { return m_isTerminating; } |
| 242 | 244 |
| 243 static void attachMainThread(); | 245 static void attachMainThread(); |
| 244 static void detachMainThread(); | 246 static void detachMainThread(); |
| 245 | 247 |
| 246 // Trace all GC roots, called when marking the managed heap objects. | 248 // Trace all GC roots, called when marking the managed heap objects. |
| 247 static void visitRoots(Visitor*); | 249 static void visitRoots(Visitor*); |
| 248 | 250 |
| 249 // Associate ThreadState object with the current thread. After this | 251 // Associate ThreadState object with the current thread. After this |
| 250 // call thread can start using the garbage collected heap infrastructure. | 252 // call thread can start using the garbage collected heap infrastructure. |
| 251 // It also has to periodically check for safepoints. | 253 // It also has to periodically check for safepoints. |
| (...skipping 248 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 500 static const GCInfo* findGCInfoFromAllThreads(Address); | 502 static const GCInfo* findGCInfoFromAllThreads(Address); |
| 501 #endif | 503 #endif |
| 502 | 504 |
| 503 void pushWeakObjectPointerCallback(void*, WeakPointerCallback); | 505 void pushWeakObjectPointerCallback(void*, WeakPointerCallback); |
| 504 bool popAndInvokeWeakPointerCallback(Visitor*); | 506 bool popAndInvokeWeakPointerCallback(Visitor*); |
| 505 | 507 |
| 506 void getStats(HeapStats&); | 508 void getStats(HeapStats&); |
| 507 HeapStats& stats() { return m_stats; } | 509 HeapStats& stats() { return m_stats; } |
| 508 HeapStats& statsAfterLastGC() { return m_statsAfterLastGC; } | 510 HeapStats& statsAfterLastGC() { return m_statsAfterLastGC; } |
| 509 | 511 |
| 512 void setupHeapsForTermination(); |
| 513 void visitLocalRoots(Visitor*); |
| 514 |
| 510 private: | 515 private: |
| 511 explicit ThreadState(); | 516 explicit ThreadState(); |
| 512 ~ThreadState(); | 517 ~ThreadState(); |
| 513 | 518 |
| 514 friend class SafePointBarrier; | 519 friend class SafePointBarrier; |
| 515 friend class SafePointAwareMutexLocker; | 520 friend class SafePointAwareMutexLocker; |
| 516 | 521 |
| 517 void enterSafePoint(StackState, void*); | 522 void enterSafePoint(StackState, void*); |
| 518 NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope(); | 523 NO_SANITIZE_ADDRESS void copyStackUntilSafePointScope(); |
| 519 void clearSafePointScopeMarker() | 524 void clearSafePointScopeMarker() |
| (...skipping 11 matching lines...) Expand all Loading... |
| 531 BaseHeapPage* heapPageFromAddress(Address); | 536 BaseHeapPage* heapPageFromAddress(Address); |
| 532 | 537 |
| 533 // When ThreadState is detaching from non-main thread its | 538 // When ThreadState is detaching from non-main thread its |
| 534 // heap is expected to be empty (because it is going away). | 539 // heap is expected to be empty (because it is going away). |
| 535 // Perform registered cleanup tasks and garbage collection | 540 // Perform registered cleanup tasks and garbage collection |
| 536 // to sweep away any objects that are left on this heap. | 541 // to sweep away any objects that are left on this heap. |
| 537 // We assert that nothing must remain after this cleanup. | 542 // We assert that nothing must remain after this cleanup. |
| 538 // If assertion does not hold we crash as we are potentially | 543 // If assertion does not hold we crash as we are potentially |
| 539 // in the dangling pointer situation. | 544 // in the dangling pointer situation. |
| 540 void cleanup(); | 545 void cleanup(); |
| 541 void preCleanup(); | 546 void cleanupPages(); |
| 542 void postCleanup(); | |
| 543 | 547 |
| 544 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; | 548 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; |
| 545 static SafePointBarrier* s_safePointBarrier; | 549 static SafePointBarrier* s_safePointBarrier; |
| 546 | 550 |
| 547 // This variable is flipped to true after all threads are stoped | 551 // This variable is flipped to true after all threads are stoped |
| 548 // and outermost GC has started. | 552 // and outermost GC has started. |
| 549 static bool s_inGC; | 553 static bool s_inGC; |
| 550 | 554 |
| 551 // We can't create a static member of type ThreadState here | 555 // We can't create a static member of type ThreadState here |
| 552 // because it will introduce global constructor and destructor. | 556 // because it will introduce global constructor and destructor. |
| (...skipping 20 matching lines...) Expand all Loading... |
| 573 volatile int m_sweepRequested; | 577 volatile int m_sweepRequested; |
| 574 bool m_sweepInProgress; | 578 bool m_sweepInProgress; |
| 575 size_t m_noAllocationCount; | 579 size_t m_noAllocationCount; |
| 576 bool m_inGC; | 580 bool m_inGC; |
| 577 BaseHeap* m_heaps[NumberOfHeaps]; | 581 BaseHeap* m_heaps[NumberOfHeaps]; |
| 578 OwnPtr<HeapContainsCache> m_heapContainsCache; | 582 OwnPtr<HeapContainsCache> m_heapContainsCache; |
| 579 HeapStats m_stats; | 583 HeapStats m_stats; |
| 580 HeapStats m_statsAfterLastGC; | 584 HeapStats m_statsAfterLastGC; |
| 581 | 585 |
| 582 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; | 586 Vector<OwnPtr<CleanupTask> > m_cleanupTasks; |
| 583 bool m_isCleaningUp; | 587 bool m_isTerminating; |
| 584 | 588 |
| 585 CallbackStack* m_weakCallbackStack; | 589 CallbackStack* m_weakCallbackStack; |
| 586 | 590 |
| 587 #if defined(ADDRESS_SANITIZER) | 591 #if defined(ADDRESS_SANITIZER) |
| 588 void* m_asanFakeStack; | 592 void* m_asanFakeStack; |
| 589 #endif | 593 #endif |
| 590 }; | 594 }; |
| 591 | 595 |
| 592 template<ThreadAffinity affinity> class ThreadStateFor; | 596 template<ThreadAffinity affinity> class ThreadStateFor; |
| 593 | 597 |
| (...skipping 13 matching lines...) Expand all Loading... |
| 607 }; | 611 }; |
| 608 | 612 |
| 609 // The SafePointAwareMutexLocker is used to enter a safepoint while waiting for | 613 // The SafePointAwareMutexLocker is used to enter a safepoint while waiting for |
| 610 // a mutex lock. It also ensures that the lock is not held while waiting for a G
C | 614 // a mutex lock. It also ensures that the lock is not held while waiting for a G
C |
| 611 // to complete in the leaveSafePoint method, by releasing the lock if the | 615 // to complete in the leaveSafePoint method, by releasing the lock if the |
| 612 // leaveSafePoint method cannot complete without blocking, see | 616 // leaveSafePoint method cannot complete without blocking, see |
| 613 // SafePointBarrier::checkAndPark. | 617 // SafePointBarrier::checkAndPark. |
| 614 class SafePointAwareMutexLocker { | 618 class SafePointAwareMutexLocker { |
| 615 WTF_MAKE_NONCOPYABLE(SafePointAwareMutexLocker); | 619 WTF_MAKE_NONCOPYABLE(SafePointAwareMutexLocker); |
| 616 public: | 620 public: |
| 617 explicit SafePointAwareMutexLocker(Mutex& mutex) : m_mutex(mutex), m_locked(
false) | 621 explicit SafePointAwareMutexLocker(Mutex& mutex, ThreadState::StackState sta
ckState = ThreadState::HeapPointersOnStack) |
| 622 : m_mutex(mutex) |
| 623 , m_locked(false) |
| 618 { | 624 { |
| 619 ThreadState* state = ThreadState::current(); | 625 ThreadState* state = ThreadState::current(); |
| 620 do { | 626 do { |
| 621 bool leaveSafePoint = false; | 627 bool leaveSafePoint = false; |
| 622 // We cannot enter a safepoint if we are currently sweeping. In that | 628 // We cannot enter a safepoint if we are currently sweeping. In that |
| 623 // case we just try to acquire the lock without being at a safepoint
. | 629 // case we just try to acquire the lock without being at a safepoint
. |
| 624 // If another thread tries to do a GC at that time it might time out | 630 // If another thread tries to do a GC at that time it might time out |
| 625 // due to this thread not being at a safepoint and waiting on the lo
ck. | 631 // due to this thread not being at a safepoint and waiting on the lo
ck. |
| 626 if (!state->isSweepInProgress() && !state->isAtSafePoint()) { | 632 if (!state->isSweepInProgress() && !state->isAtSafePoint()) { |
| 627 state->enterSafePoint(ThreadState::HeapPointersOnStack, this); | 633 state->enterSafePoint(stackState, this); |
| 628 leaveSafePoint = true; | 634 leaveSafePoint = true; |
| 629 } | 635 } |
| 630 m_mutex.lock(); | 636 m_mutex.lock(); |
| 631 m_locked = true; | 637 m_locked = true; |
| 632 if (leaveSafePoint) { | 638 if (leaveSafePoint) { |
| 633 // When leaving the safepoint we might end up release the mutex | 639 // When leaving the safepoint we might end up release the mutex |
| 634 // if another thread is requesting a GC, see | 640 // if another thread is requesting a GC, see |
| 635 // SafePointBarrier::checkAndPark. This is the case where we | 641 // SafePointBarrier::checkAndPark. This is the case where we |
| 636 // loop around to reacquire the lock. | 642 // loop around to reacquire the lock. |
| 637 state->leaveSafePoint(this); | 643 state->leaveSafePoint(this); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 652 { | 658 { |
| 653 ASSERT(m_locked); | 659 ASSERT(m_locked); |
| 654 m_mutex.unlock(); | 660 m_mutex.unlock(); |
| 655 m_locked = false; | 661 m_locked = false; |
| 656 } | 662 } |
| 657 | 663 |
| 658 Mutex& m_mutex; | 664 Mutex& m_mutex; |
| 659 bool m_locked; | 665 bool m_locked; |
| 660 }; | 666 }; |
| 661 | 667 |
| 668 // Common header for heap pages. Needs to be defined before class Visitor. |
| 669 class BaseHeapPage { |
| 670 public: |
| 671 BaseHeapPage(PageMemory*, const GCInfo*, ThreadState*); |
| 672 virtual ~BaseHeapPage() { } |
| 673 |
| 674 // Check if the given address points to an object in this |
| 675 // heap page. If so, find the start of that object and mark it |
| 676 // using the given Visitor. Otherwise do nothing. The pointer must |
| 677 // be within the same aligned blinkPageSize as the this-pointer. |
| 678 // |
| 679 // This is used during conservative stack scanning to |
| 680 // conservatively mark all objects that could be referenced from |
| 681 // the stack. |
| 682 virtual void checkAndMarkPointer(Visitor*, Address) = 0; |
| 683 virtual bool contains(Address) = 0; |
| 684 |
| 685 #if ENABLE(GC_TRACING) |
| 686 virtual const GCInfo* findGCInfo(Address) = 0; |
| 687 #endif |
| 688 |
| 689 Address address() { return reinterpret_cast<Address>(this); } |
| 690 PageMemory* storage() const { return m_storage; } |
| 691 ThreadState* threadState() const { return m_threadState; } |
| 692 const GCInfo* gcInfo() { return m_gcInfo; } |
| 693 virtual bool isLargeObject() { return false; } |
| 694 virtual void markOrphaned() |
| 695 { |
| 696 m_threadState = 0; |
| 697 m_gcInfo = 0; |
| 698 m_terminating = false; |
| 699 m_tracedAfterOrphaned = false; |
| 700 } |
| 701 bool orphaned() { return !m_threadState; } |
| 702 bool terminating() { return m_terminating; } |
| 703 void setTerminating() { m_terminating = true; } |
| 704 bool tracedAfterOrphaned() { return m_tracedAfterOrphaned; } |
| 705 void setTracedAfterOrphaned() { m_tracedAfterOrphaned = true; } |
| 706 |
| 707 private: |
| 708 PageMemory* m_storage; |
| 709 const GCInfo* m_gcInfo; |
| 710 ThreadState* m_threadState; |
| 711 // Pointer sized integer to ensure proper alignment of the |
| 712 // HeapPage header. We use some of the bits to determine |
| 713 // whether the page is part of a terminting thread or |
| 714 // if the page is traced after being terminated (orphaned). |
| 715 uintptr_t m_terminating : 1; |
| 716 uintptr_t m_tracedAfterOrphaned : 1; |
| 717 }; |
| 718 |
| 662 } | 719 } |
| 663 | 720 |
| 664 #endif // ThreadState_h | 721 #endif // ThreadState_h |
| OLD | NEW |