| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 458 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 469 } | 469 } |
| 470 | 470 |
| 471 // Should only be called under protection of threadAttachMutex(). | 471 // Should only be called under protection of threadAttachMutex(). |
| 472 const Vector<Interruptor*>& interruptors() const { return m_interruptors; } | 472 const Vector<Interruptor*>& interruptors() const { return m_interruptors; } |
| 473 | 473 |
| 474 void recordStackEnd(intptr_t* endOfStack) | 474 void recordStackEnd(intptr_t* endOfStack) |
| 475 { | 475 { |
| 476 m_endOfStack = endOfStack; | 476 m_endOfStack = endOfStack; |
| 477 } | 477 } |
| 478 | 478 |
| 479 // MarkingTask functions are called before and after marking live objects. | |
| 480 // They might be called on threads other than the thread associated to this | |
| 481 // ThreadState. | |
| 482 class MarkingTask { | |
| 483 public: | |
| 484 virtual ~MarkingTask() { } | |
| 485 virtual void willStartMarking(ThreadState&) { } | |
| 486 virtual void didFinishMarking(ThreadState&) { } | |
| 487 }; | |
| 488 // A caller is responsible to call removeMarkingTask before deleting the | |
| 489 // specified task. | |
| 490 void addMarkingTask(MarkingTask*); | |
| 491 void removeMarkingTask(MarkingTask*); | |
| 492 | |
| 493 // Get one of the heap structures for this thread. | 479 // Get one of the heap structures for this thread. |
| 494 // | 480 // |
| 495 // The heap is split into multiple heap parts based on object | 481 // The heap is split into multiple heap parts based on object |
| 496 // types. To get the index for a given type, use | 482 // types. To get the index for a given type, use |
| 497 // HeapIndexTrait<Type>::index. | 483 // HeapIndexTrait<Type>::index. |
| 498 BaseHeap* heap(int heapIndex) const { return m_heaps[heapIndex]; } | 484 BaseHeap* heap(int heapIndex) const { return m_heaps[heapIndex]; } |
| 499 | 485 |
| 500 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) | 486 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
| 501 // Infrastructure to determine if an address is within one of the | 487 // Infrastructure to determine if an address is within one of the |
| 502 // address ranges for the Blink heap. If the address is in the Blink | 488 // address ranges for the Blink heap. If the address is in the Blink |
| 503 // heap the containing heap page is returned. | 489 // heap the containing heap page is returned. |
| 504 BasePage* findPageFromAddress(Address); | 490 BasePage* findPageFromAddress(Address); |
| 505 BasePage* findPageFromAddress(const void* pointer) { return findPageFromAddr
ess(reinterpret_cast<Address>(const_cast<void*>(pointer))); } | 491 BasePage* findPageFromAddress(void* pointer) { return findPageFromAddress(re
interpret_cast<Address>(pointer)); } |
| 506 #endif | 492 #endif |
| 507 | 493 |
| 508 // List of persistent roots allocated on the given thread. | 494 // List of persistent roots allocated on the given thread. |
| 509 PersistentNode* roots() const { return m_persistents.get(); } | 495 PersistentNode* roots() const { return m_persistents.get(); } |
| 510 | 496 |
| 511 // List of global persistent roots not owned by any particular thread. | 497 // List of global persistent roots not owned by any particular thread. |
| 512 // globalRootsMutex must be acquired before any modifications. | 498 // globalRootsMutex must be acquired before any modifications. |
| 513 static PersistentNode& globalRoots(); | 499 static PersistentNode& globalRoots(); |
| 514 static Mutex& globalRootsMutex(); | 500 static Mutex& globalRootsMutex(); |
| 515 | 501 |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 589 // Cancel above requests. The argument should be |*this|. This function is | 575 // Cancel above requests. The argument should be |*this|. This function is |
| 590 // ignored if it is called in pre-finalizer functions. | 576 // ignored if it is called in pre-finalizer functions. |
| 591 template<typename T> | 577 template<typename T> |
| 592 void unregisterPreFinalizer(T& target) | 578 void unregisterPreFinalizer(T& target) |
| 593 { | 579 { |
| 594 checkThread(); | 580 checkThread(); |
| 595 ASSERT(&T::invokePreFinalizer); | 581 ASSERT(&T::invokePreFinalizer); |
| 596 unregisterPreFinalizerInternal(&target); | 582 unregisterPreFinalizerInternal(&target); |
| 597 } | 583 } |
| 598 | 584 |
| 599 // Mark an on-heap object as a zombie. The object won't be swept until | |
| 600 // purifyZombies(). It's ok to call markAsZombie() during weak processing. | |
| 601 // The specified object must not have references to objects owned by other | |
| 602 // threads. | |
| 603 // Do not use this function. This feature is a temporal workaround for | |
| 604 // WebAudio, and will be removed soon. | |
| 605 void markAsZombie(void*); | |
| 606 // Purify all of zombie objects marked before calling purifyZombies(). | |
| 607 void purifyZombies(); | |
| 608 | |
| 609 Vector<PageMemoryRegion*>& allocatedRegionsSinceLastGC() { return m_allocate
dRegionsSinceLastGC; } | 585 Vector<PageMemoryRegion*>& allocatedRegionsSinceLastGC() { return m_allocate
dRegionsSinceLastGC; } |
| 610 | 586 |
| 611 void shouldFlushHeapDoesNotContainCache() { m_shouldFlushHeapDoesNotContainC
ache = true; } | 587 void shouldFlushHeapDoesNotContainCache() { m_shouldFlushHeapDoesNotContainC
ache = true; } |
| 612 | 588 |
| 613 void registerTraceDOMWrappers(v8::Isolate* isolate, void (*traceDOMWrappers)
(v8::Isolate*, Visitor*)) | 589 void registerTraceDOMWrappers(v8::Isolate* isolate, void (*traceDOMWrappers)
(v8::Isolate*, Visitor*)) |
| 614 { | 590 { |
| 615 m_isolate = isolate; | 591 m_isolate = isolate; |
| 616 m_traceDOMWrappers = traceDOMWrappers; | 592 m_traceDOMWrappers = traceDOMWrappers; |
| 617 } | 593 } |
| 618 | 594 |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 650 // Perform registered cleanup tasks and garbage collection | 626 // Perform registered cleanup tasks and garbage collection |
| 651 // to sweep away any objects that are left on this heap. | 627 // to sweep away any objects that are left on this heap. |
| 652 // We assert that nothing must remain after this cleanup. | 628 // We assert that nothing must remain after this cleanup. |
| 653 // If assertion does not hold we crash as we are potentially | 629 // If assertion does not hold we crash as we are potentially |
| 654 // in the dangling pointer situation. | 630 // in the dangling pointer situation. |
| 655 void cleanup(); | 631 void cleanup(); |
| 656 void cleanupPages(); | 632 void cleanupPages(); |
| 657 | 633 |
| 658 void unregisterPreFinalizerInternal(void*); | 634 void unregisterPreFinalizerInternal(void*); |
| 659 void invokePreFinalizers(Visitor&); | 635 void invokePreFinalizers(Visitor&); |
| 660 void invokePreMarkingTasks(); | |
| 661 void invokePostMarkingTasks(); | |
| 662 | 636 |
| 663 #if ENABLE(GC_PROFILING) | 637 #if ENABLE(GC_PROFILING) |
| 664 void snapshotFreeList(); | 638 void snapshotFreeList(); |
| 665 #endif | 639 #endif |
| 666 | 640 |
| 667 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; | 641 static WTF::ThreadSpecific<ThreadState*>* s_threadSpecific; |
| 668 static uintptr_t s_mainThreadStackStart; | 642 static uintptr_t s_mainThreadStackStart; |
| 669 static uintptr_t s_mainThreadUnderestimatedStackSize; | 643 static uintptr_t s_mainThreadUnderestimatedStackSize; |
| 670 static SafePointBarrier* s_safePointBarrier; | 644 static SafePointBarrier* s_safePointBarrier; |
| 671 | 645 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 689 Vector<Interruptor*> m_interruptors; | 663 Vector<Interruptor*> m_interruptors; |
| 690 bool m_hasPendingIdleTask; | 664 bool m_hasPendingIdleTask; |
| 691 bool m_didV8GCAfterLastGC; | 665 bool m_didV8GCAfterLastGC; |
| 692 bool m_sweepForbidden; | 666 bool m_sweepForbidden; |
| 693 size_t m_noAllocationCount; | 667 size_t m_noAllocationCount; |
| 694 size_t m_allocatedObjectSizeBeforeGC; | 668 size_t m_allocatedObjectSizeBeforeGC; |
| 695 BaseHeap* m_heaps[NumberOfHeaps]; | 669 BaseHeap* m_heaps[NumberOfHeaps]; |
| 696 | 670 |
| 697 Vector<OwnPtr<CleanupTask>> m_cleanupTasks; | 671 Vector<OwnPtr<CleanupTask>> m_cleanupTasks; |
| 698 bool m_isTerminating; | 672 bool m_isTerminating; |
| 699 Vector<MarkingTask*> m_markingTasks; | |
| 700 | 673 |
| 701 bool m_shouldFlushHeapDoesNotContainCache; | 674 bool m_shouldFlushHeapDoesNotContainCache; |
| 702 double m_collectionRate; | 675 double m_collectionRate; |
| 703 GCState m_gcState; | 676 GCState m_gcState; |
| 704 | 677 |
| 705 CallbackStack* m_weakCallbackStack; | 678 CallbackStack* m_weakCallbackStack; |
| 706 HashMap<void*, bool (*)(void*, Visitor&)> m_preFinalizers; | 679 HashMap<void*, bool (*)(void*, Visitor&)> m_preFinalizers; |
| 707 HashSet<void*> m_zombies; | 680 |
| 708 v8::Isolate* m_isolate; | 681 v8::Isolate* m_isolate; |
| 709 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*); | 682 void (*m_traceDOMWrappers)(v8::Isolate*, Visitor*); |
| 710 | 683 |
| 711 #if defined(ADDRESS_SANITIZER) | 684 #if defined(ADDRESS_SANITIZER) |
| 712 void* m_asanFakeStack; | 685 void* m_asanFakeStack; |
| 713 #endif | 686 #endif |
| 714 | 687 |
| 715 Vector<PageMemoryRegion*> m_allocatedRegionsSinceLastGC; | 688 Vector<PageMemoryRegion*> m_allocatedRegionsSinceLastGC; |
| 716 | 689 |
| 717 #if ENABLE(GC_PROFILING) | 690 #if ENABLE(GC_PROFILING) |
| (...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 787 m_locked = false; | 760 m_locked = false; |
| 788 } | 761 } |
| 789 | 762 |
| 790 MutexBase& m_mutex; | 763 MutexBase& m_mutex; |
| 791 bool m_locked; | 764 bool m_locked; |
| 792 }; | 765 }; |
| 793 | 766 |
| 794 } // namespace blink | 767 } // namespace blink |
| 795 | 768 |
| 796 #endif // ThreadState_h | 769 #endif // ThreadState_h |
| OLD | NEW |