OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
416 } | 416 } |
417 | 417 |
418 NO_SANITIZE_ADDRESS | 418 NO_SANITIZE_ADDRESS |
419 void HeapObjectHeader::unmark() | 419 void HeapObjectHeader::unmark() |
420 { | 420 { |
421 checkHeader(); | 421 checkHeader(); |
422 m_size &= ~markBitMask; | 422 m_size &= ~markBitMask; |
423 } | 423 } |
424 | 424 |
425 NO_SANITIZE_ADDRESS | 425 NO_SANITIZE_ADDRESS |
426 bool HeapObjectHeader::hasDebugMark() const | 426 bool HeapObjectHeader::hasDeadMark() const |
427 { | 427 { |
428 checkHeader(); | 428 checkHeader(); |
429 return m_size & debugBitMask; | 429 return m_size & deadBitMask; |
430 } | 430 } |
431 | 431 |
432 NO_SANITIZE_ADDRESS | 432 NO_SANITIZE_ADDRESS |
433 void HeapObjectHeader::clearDebugMark() | 433 void HeapObjectHeader::clearDeadMark() |
434 { | 434 { |
435 checkHeader(); | 435 checkHeader(); |
436 m_size &= ~debugBitMask; | 436 m_size &= ~deadBitMask; |
437 } | 437 } |
438 | 438 |
439 NO_SANITIZE_ADDRESS | 439 NO_SANITIZE_ADDRESS |
440 void HeapObjectHeader::setDebugMark() | 440 void HeapObjectHeader::setDeadMark() |
441 { | 441 { |
| 442 ASSERT(!isMarked()); |
442 checkHeader(); | 443 checkHeader(); |
443 m_size |= debugBitMask; | 444 m_size |= deadBitMask; |
444 } | 445 } |
445 | 446 |
446 #ifndef NDEBUG | 447 #ifndef NDEBUG |
447 NO_SANITIZE_ADDRESS | 448 NO_SANITIZE_ADDRESS |
448 void HeapObjectHeader::zapMagic() | 449 void HeapObjectHeader::zapMagic() |
449 { | 450 { |
450 m_magic = zappedMagic; | 451 m_magic = zappedMagic; |
451 } | 452 } |
452 #endif | 453 #endif |
453 | 454 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
493 return heapObjectHeader()->unmark(); | 494 return heapObjectHeader()->unmark(); |
494 } | 495 } |
495 | 496 |
496 template<typename Header> | 497 template<typename Header> |
497 bool LargeHeapObject<Header>::isMarked() | 498 bool LargeHeapObject<Header>::isMarked() |
498 { | 499 { |
499 return heapObjectHeader()->isMarked(); | 500 return heapObjectHeader()->isMarked(); |
500 } | 501 } |
501 | 502 |
502 template<typename Header> | 503 template<typename Header> |
| 504 void LargeHeapObject<Header>::setDeadMark() |
| 505 { |
| 506 heapObjectHeader()->setDeadMark(); |
| 507 } |
| 508 |
| 509 template<typename Header> |
503 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr
ess) | 510 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr
ess) |
504 { | 511 { |
505 ASSERT(contains(address)); | 512 ASSERT(contains(address)); |
506 if (!objectContains(address)) | 513 if (!objectContains(address) || heapObjectHeader()->hasDeadMark()) |
507 return; | 514 return; |
508 #if ENABLE(GC_TRACING) | 515 #if ENABLE(GC_TRACING) |
509 visitor->setHostInfo(&address, "stack"); | 516 visitor->setHostInfo(&address, "stack"); |
510 #endif | 517 #endif |
511 mark(visitor); | 518 mark(visitor); |
512 } | 519 } |
513 | 520 |
514 template<> | 521 template<> |
515 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) | 522 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) |
516 { | 523 { |
(...skipping 28 matching lines...) Expand all Loading... |
545 | 552 |
546 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa
yload) | 553 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa
yload) |
547 { | 554 { |
548 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); | 555 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); |
549 FinalizedHeapObjectHeader* header = | 556 FinalizedHeapObjectHeader* header = |
550 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize)
; | 557 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize)
; |
551 return header; | 558 return header; |
552 } | 559 } |
553 | 560 |
554 template<typename Header> | 561 template<typename Header> |
555 ThreadHeap<Header>::ThreadHeap(ThreadState* state) | 562 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
556 : m_currentAllocationPoint(0) | 563 : m_currentAllocationPoint(0) |
557 , m_remainingAllocationSize(0) | 564 , m_remainingAllocationSize(0) |
558 , m_firstPage(0) | 565 , m_firstPage(0) |
559 , m_firstLargeHeapObject(0) | 566 , m_firstLargeHeapObject(0) |
560 , m_biggestFreeListIndex(0) | 567 , m_biggestFreeListIndex(0) |
561 , m_threadState(state) | 568 , m_threadState(state) |
562 , m_pagePool(0) | 569 , m_index(index) |
563 { | 570 { |
564 clearFreeLists(); | 571 clearFreeLists(); |
565 } | 572 } |
566 | 573 |
567 template<typename Header> | 574 template<typename Header> |
568 ThreadHeap<Header>::~ThreadHeap() | 575 ThreadHeap<Header>::~ThreadHeap() |
569 { | 576 { |
570 clearFreeLists(); | 577 ASSERT(!m_firstPage); |
571 if (!ThreadState::current()->isMainThread()) | 578 ASSERT(!m_firstLargeHeapObject); |
572 assertEmpty(); | |
573 deletePages(); | |
574 } | 579 } |
575 | 580 |
576 template<typename Header> | 581 template<typename Header> |
| 582 void ThreadHeap<Header>::cleanupPages() |
| 583 { |
| 584 clearFreeLists(); |
| 585 flushHeapContainsCache(); |
| 586 |
| 587 // Add the ThreadHeap's pages to the orphanedPagePool. |
| 588 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) |
| 589 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
| 590 m_firstPage = 0; |
| 591 |
| 592 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj
ect; largeObject = largeObject->m_next) |
| 593 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); |
| 594 m_firstLargeHeapObject = 0; |
| 595 } |
| 596 |
| 597 template<typename Header> |
577 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) | 598 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) |
578 { | 599 { |
579 size_t allocationSize = allocationSizeFromSize(size); | 600 size_t allocationSize = allocationSizeFromSize(size); |
580 if (threadState()->shouldGC()) { | 601 if (threadState()->shouldGC()) { |
581 if (threadState()->shouldForceConservativeGC()) | 602 if (threadState()->shouldForceConservativeGC()) |
582 Heap::collectGarbage(ThreadState::HeapPointersOnStack); | 603 Heap::collectGarbage(ThreadState::HeapPointersOnStack); |
583 else | 604 else |
584 threadState()->setGCRequested(); | 605 threadState()->setGCRequested(); |
585 } | 606 } |
586 ensureCurrentAllocation(allocationSize, gcInfo); | 607 ensureCurrentAllocation(allocationSize, gcInfo); |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
730 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) | 751 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) |
731 { | 752 { |
732 flushHeapContainsCache(); | 753 flushHeapContainsCache(); |
733 object->unlink(previousNext); | 754 object->unlink(previousNext); |
734 object->finalize(); | 755 object->finalize(); |
735 | 756 |
736 // Unpoison the object header and allocationGranularity bytes after the | 757 // Unpoison the object header and allocationGranularity bytes after the |
737 // object before freeing. | 758 // object before freeing. |
738 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); | 759 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); |
739 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | 760 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); |
740 delete object->storage(); | 761 |
741 } | 762 if (object->terminating()) { |
| 763 ASSERT(ThreadState::current()->isTerminating()); |
| 764 // The thread is shutting down so this object is being removed as part |
| 765 // of a thread local GC. In that case the object could be traced in the |
| 766 // next global GC either due to a dead object being traced via a |
| 767 // conservative pointer or due to a programming error where an object |
| 768 // in another thread heap keeps a dangling pointer to this object. |
| 769 // To guard against this we put the large object memory in the |
| 770 // orphanedPagePool to ensure it is still reachable. After the next glob
al |
| 771 // GC it can be released assuming no rogue/dangling pointers refer to |
| 772 // it. |
| 773 // NOTE: large objects are not moved to the free page pool as it is |
| 774 // unlikely they can be reused due to their individual sizes. |
| 775 Heap::orphanedPagePool()->addOrphanedPage(m_index, object); |
| 776 } else { |
| 777 ASSERT(!ThreadState::current()->isTerminating()); |
| 778 PageMemory* memory = object->storage(); |
| 779 object->~LargeHeapObject<Header>(); |
| 780 delete memory; |
| 781 } |
| 782 } |
| 783 |
| 784 template<typename DataType> |
| 785 PagePool<DataType>::PagePool() |
| 786 { |
| 787 for (int i = 0; i < NumberOfHeaps; ++i) { |
| 788 m_pool[i] = 0; |
| 789 } |
| 790 } |
| 791 |
| 792 FreePagePool::~FreePagePool() |
| 793 { |
| 794 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 795 while (PoolEntry* entry = m_pool[index]) { |
| 796 m_pool[index] = entry->next; |
| 797 PageMemory* memory = entry->data; |
| 798 ASSERT(memory); |
| 799 delete memory; |
| 800 delete entry; |
| 801 } |
| 802 } |
| 803 } |
| 804 |
| 805 void FreePagePool::addFreePage(int index, PageMemory* memory) |
| 806 { |
| 807 // When adding a page to the pool we decommit it to ensure it is unused |
| 808 // while in the pool. This also allows the physical memory, backing the |
| 809 // page, to be given back to the OS. |
| 810 memory->decommit(); |
| 811 MutexLocker locker(m_mutex[index]); |
| 812 PoolEntry* entry = new PoolEntry(memory, m_pool[index]); |
| 813 m_pool[index] = entry; |
| 814 } |
| 815 |
| 816 PageMemory* FreePagePool::takeFreePage(int index) |
| 817 { |
| 818 MutexLocker locker(m_mutex[index]); |
| 819 while (PoolEntry* entry = m_pool[index]) { |
| 820 m_pool[index] = entry->next; |
| 821 PageMemory* memory = entry->data; |
| 822 ASSERT(memory); |
| 823 delete entry; |
| 824 if (memory->commit()) |
| 825 return memory; |
| 826 |
| 827 // We got some memory, but failed to commit it, try again. |
| 828 delete memory; |
| 829 } |
| 830 return 0; |
| 831 } |
| 832 |
| 833 OrphanedPagePool::~OrphanedPagePool() |
| 834 { |
| 835 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 836 while (PoolEntry* entry = m_pool[index]) { |
| 837 m_pool[index] = entry->next; |
| 838 BaseHeapPage* page = entry->data; |
| 839 delete entry; |
| 840 PageMemory* memory = page->storage(); |
| 841 ASSERT(memory); |
| 842 page->~BaseHeapPage(); |
| 843 delete memory; |
| 844 } |
| 845 } |
| 846 } |
| 847 |
| 848 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page) |
| 849 { |
| 850 page->markOrphaned(); |
| 851 PoolEntry* entry = new PoolEntry(page, m_pool[index]); |
| 852 m_pool[index] = entry; |
| 853 } |
| 854 |
| 855 void OrphanedPagePool::decommitOrphanedPages() |
| 856 { |
| 857 #ifndef NDEBUG |
| 858 // No locking needed as all threads are at safepoints at this point in time. |
| 859 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
| 860 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) |
| 861 ASSERT((*it)->isAtSafePoint()); |
| 862 #endif |
| 863 |
| 864 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 865 PoolEntry* entry = m_pool[index]; |
| 866 PoolEntry** prevNext = &m_pool[index]; |
| 867 while (entry) { |
| 868 BaseHeapPage* page = entry->data; |
| 869 if (page->tracedAfterOrphaned()) { |
| 870 // If the orphaned page was traced in the last GC it is not |
| 871 // decommited. We only decommit a page, ie. put it in the |
| 872 // memory pool, when the page has no objects pointing to it. |
| 873 // We remark the page as orphaned to clear the tracedAfterOrphan
ed |
| 874 // flag and any object trace bits that were set during tracing. |
| 875 page->markOrphaned(); |
| 876 prevNext = &entry->next; |
| 877 entry = entry->next; |
| 878 continue; |
| 879 } |
| 880 |
| 881 // Page was not traced. Check if we should reuse the memory or just |
| 882 // free it. Large object memory is not reused, but freed, normal |
| 883 // blink heap pages are reused. |
| 884 // NOTE: We call the destructor before freeing or adding to the |
| 885 // free page pool. |
| 886 PageMemory* memory = page->storage(); |
| 887 if (page->isLargeObject()) { |
| 888 page->~BaseHeapPage(); |
| 889 delete memory; |
| 890 } else { |
| 891 page->~BaseHeapPage(); |
| 892 // Clear out the page before adding it to the free page pool to |
| 893 // ensure it is zero filled when being reused. |
| 894 asanMemset(memory->writableStart(), 0, blinkPagePayloadSize()); |
| 895 Heap::freePagePool()->addFreePage(index, memory); |
| 896 } |
| 897 |
| 898 PoolEntry* deadEntry = entry; |
| 899 entry = entry->next; |
| 900 *prevNext = entry; |
| 901 delete deadEntry; |
| 902 } |
| 903 } |
| 904 } |
| 905 |
| 906 #ifndef NDEBUG |
| 907 bool OrphanedPagePool::contains(void* object) |
| 908 { |
| 909 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 910 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { |
| 911 BaseHeapPage* page = entry->data; |
| 912 if (page->contains(reinterpret_cast<Address>(object))) |
| 913 return true; |
| 914 } |
| 915 } |
| 916 return false; |
| 917 } |
| 918 #endif |
742 | 919 |
743 template<> | 920 template<> |
744 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) | 921 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) |
745 { | 922 { |
746 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the
GCInfo on | 923 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the
GCInfo on |
747 // the heap should be unused (ie. 0). | 924 // the heap should be unused (ie. 0). |
748 allocatePage(0); | 925 allocatePage(0); |
749 } | 926 } |
750 | 927 |
751 template<> | 928 template<> |
752 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) | 929 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) |
753 { | 930 { |
754 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC
Info on the heap | 931 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC
Info on the heap |
755 // since it is the same for all objects | 932 // since it is the same for all objects |
756 ASSERT(gcInfo); | 933 ASSERT(gcInfo); |
757 allocatePage(gcInfo); | 934 allocatePage(gcInfo); |
758 } | 935 } |
759 | 936 |
760 template<typename Header> | 937 template <typename Header> |
761 void ThreadHeap<Header>::clearPagePool() | 938 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) |
762 { | |
763 while (takePageFromPool()) { } | |
764 } | |
765 | |
766 template<typename Header> | |
767 PageMemory* ThreadHeap<Header>::takePageFromPool() | |
768 { | |
769 Heap::flushHeapDoesNotContainCache(); | |
770 while (PagePoolEntry* entry = m_pagePool) { | |
771 m_pagePool = entry->next(); | |
772 PageMemory* storage = entry->storage(); | |
773 delete entry; | |
774 | |
775 if (storage->commit()) | |
776 return storage; | |
777 | |
778 // Failed to commit pooled storage. Release it. | |
779 delete storage; | |
780 } | |
781 | |
782 return 0; | |
783 } | |
784 | |
785 template<typename Header> | |
786 void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage) | |
787 { | 939 { |
788 flushHeapContainsCache(); | 940 flushHeapContainsCache(); |
789 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); | 941 if (page->terminating()) { |
790 m_pagePool = entry; | 942 ASSERT(ThreadState::current()->isTerminating()); |
791 } | 943 // The thread is shutting down so this page is being removed as part |
792 | 944 // of a thread local GC. In that case the page could be accessed in the |
793 template <typename Header> | 945 // next global GC either due to a dead object being traced via a |
794 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) | 946 // conservative pointer or due to a programming error where an object |
795 { | 947 // in another thread heap keeps a dangling pointer to this object. |
796 PageMemory* storage = page->storage(); | 948 // To guard against this we put the page in the orphanedPagePool to |
797 storage->decommit(); | 949 // ensure it is still reachable. After the next global GC it can be |
798 addPageMemoryToPool(storage); | 950 // decommitted and moved to the page pool assuming no rogue/dangling |
| 951 // pointers refer to it. |
| 952 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
| 953 } else { |
| 954 ASSERT(!ThreadState::current()->isTerminating()); |
| 955 PageMemory* memory = page->storage(); |
| 956 page->~HeapPage<Header>(); |
| 957 Heap::freePagePool()->addFreePage(m_index, memory); |
| 958 } |
799 } | 959 } |
800 | 960 |
801 template<typename Header> | 961 template<typename Header> |
802 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 962 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
803 { | 963 { |
804 Heap::flushHeapDoesNotContainCache(); | 964 Heap::flushHeapDoesNotContainCache(); |
805 PageMemory* pageMemory = takePageFromPool(); | 965 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
806 if (!pageMemory) { | 966 // We continue allocating page memory until we succeed in getting one. |
| 967 // Since the FreePagePool is global other threads could use all the |
| 968 // newly allocated page memory before this thread calls takeFreePage. |
| 969 while (!pageMemory) { |
807 // Allocate a memory region for blinkPagesPerRegion pages that | 970 // Allocate a memory region for blinkPagesPerRegion pages that |
808 // will each have the following layout. | 971 // will each have the following layout. |
809 // | 972 // |
810 // [ guard os page | ... payload ... | guard os page ] | 973 // [ guard os page | ... payload ... | guard os page ] |
811 // ^---{ aligned to blink page size } | 974 // ^---{ aligned to blink page size } |
812 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl
inkPagesPerRegion, blinkPagesPerRegion); | 975 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl
inkPagesPerRegion, blinkPagesPerRegion); |
813 // Setup the PageMemory object for each of the pages in the | 976 // Setup the PageMemory object for each of the pages in the |
814 // region. | 977 // region. |
815 size_t offset = 0; | 978 size_t offset = 0; |
816 for (size_t i = 0; i < blinkPagesPerRegion; i++) { | 979 for (size_t i = 0; i < blinkPagesPerRegion; i++) { |
817 addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offs
et, blinkPagePayloadSize())); | 980 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo
ryInRegion(region, offset, blinkPagePayloadSize())); |
818 offset += blinkPageSize; | 981 offset += blinkPageSize; |
819 } | 982 } |
820 pageMemory = takePageFromPool(); | 983 pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
821 RELEASE_ASSERT(pageMemory); | |
822 } | 984 } |
823 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); | 985 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); |
824 // FIXME: Oilpan: Linking new pages into the front of the list is | 986 // FIXME: Oilpan: Linking new pages into the front of the list is |
825 // crucial when performing allocations during finalization because | 987 // crucial when performing allocations during finalization because |
826 // it ensures that those pages are not swept in the current GC | 988 // it ensures that those pages are not swept in the current GC |
827 // round. We should create a separate page list for that to | 989 // round. We should create a separate page list for that to |
828 // separate out the pages allocated during finalization clearly | 990 // separate out the pages allocated during finalization clearly |
829 // from the pages currently being swept. | 991 // from the pages currently being swept. |
830 page->link(&m_firstPage); | 992 page->link(&m_firstPage); |
831 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); | 993 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); |
(...skipping 23 matching lines...) Expand all Loading... |
855 ASSERT(isConsistentForGC()); | 1017 ASSERT(isConsistentForGC()); |
856 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING | 1018 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING |
857 // When using ASan do a pre-sweep where all unmarked objects are poisoned be
fore | 1019 // When using ASan do a pre-sweep where all unmarked objects are poisoned be
fore |
858 // calling their finalizer methods. This can catch the cases where one objec
ts | 1020 // calling their finalizer methods. This can catch the cases where one objec
ts |
859 // finalizer tries to modify another object as part of finalization. | 1021 // finalizer tries to modify another object as part of finalization. |
860 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1022 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
861 page->poisonUnmarkedObjects(); | 1023 page->poisonUnmarkedObjects(); |
862 #endif | 1024 #endif |
863 HeapPage<Header>* page = m_firstPage; | 1025 HeapPage<Header>* page = m_firstPage; |
864 HeapPage<Header>** previous = &m_firstPage; | 1026 HeapPage<Header>** previous = &m_firstPage; |
865 bool pagesRemoved = false; | |
866 while (page) { | 1027 while (page) { |
867 if (page->isEmpty()) { | 1028 if (page->isEmpty()) { |
868 flushHeapContainsCache(); | |
869 HeapPage<Header>* unused = page; | 1029 HeapPage<Header>* unused = page; |
870 page = page->next(); | 1030 page = page->next(); |
871 HeapPage<Header>::unlink(unused, previous); | 1031 HeapPage<Header>::unlink(unused, previous); |
872 pagesRemoved = true; | |
873 } else { | 1032 } else { |
874 page->sweep(); | 1033 page->sweep(); |
875 previous = &page->m_next; | 1034 previous = &page->m_next; |
876 page = page->next(); | 1035 page = page->next(); |
877 } | 1036 } |
878 } | 1037 } |
879 if (pagesRemoved) | |
880 flushHeapContainsCache(); | |
881 | 1038 |
882 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; | 1039 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; |
883 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | 1040 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
884 if (current->isMarked()) { | 1041 if (current->isMarked()) { |
885 stats().increaseAllocatedSpace(current->size()); | 1042 stats().increaseAllocatedSpace(current->size()); |
886 stats().increaseObjectSpace(current->payloadSize()); | 1043 stats().increaseObjectSpace(current->payloadSize()); |
887 current->unmark(); | 1044 current->unmark(); |
888 previousNext = ¤t->m_next; | 1045 previousNext = ¤t->m_next; |
889 current = current->next(); | 1046 current = current->next(); |
890 } else { | 1047 } else { |
891 LargeHeapObject<Header>* next = current->next(); | 1048 LargeHeapObject<Header>* next = current->next(); |
892 freeLargeObject(current, previousNext); | 1049 freeLargeObject(current, previousNext); |
893 current = next; | 1050 current = next; |
894 } | 1051 } |
895 } | 1052 } |
896 } | 1053 } |
897 | 1054 |
898 template<typename Header> | 1055 template<typename Header> |
899 void ThreadHeap<Header>::assertEmpty() | |
900 { | |
901 // No allocations are permitted. The thread is exiting. | |
902 NoAllocationScope<AnyThread> noAllocation; | |
903 makeConsistentForGC(); | |
904 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | |
905 Address end = page->end(); | |
906 Address headerAddress; | |
907 for (headerAddress = page->payload(); headerAddress < end; ) { | |
908 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*
>(headerAddress); | |
909 ASSERT(basicHeader->size() < blinkPagePayloadSize()); | |
910 // A live object is potentially a dangling pointer from | |
911 // some root. Treat that as a bug. Unfortunately, it is | |
912 // hard to reliably check in the presence of conservative | |
913 // stack scanning. Something could be conservatively kept | |
914 // alive because a non-pointer on another thread's stack | |
915 // is treated as a pointer into the heap. | |
916 // | |
917 // FIXME: This assert can currently trigger in cases where | |
918 // worker shutdown does not get enough precise GCs to get | |
919 // all objects removed from the worker heap. There are two | |
920 // issues: 1) conservative GCs keeping objects alive, and | |
921 // 2) long chains of RefPtrs/Persistents that require more | |
922 // GCs to get everything cleaned up. Maybe we can keep | |
923 // threads alive until their heaps become empty instead of | |
924 // forcing the threads to die immediately? | |
925 ASSERT(Heap::lastGCWasConservative() || basicHeader->isFree()); | |
926 if (basicHeader->isFree()) | |
927 addToFreeList(headerAddress, basicHeader->size()); | |
928 headerAddress += basicHeader->size(); | |
929 } | |
930 ASSERT(headerAddress == end); | |
931 } | |
932 | |
933 ASSERT(Heap::lastGCWasConservative() || !m_firstLargeHeapObject); | |
934 } | |
935 | |
936 template<typename Header> | |
937 bool ThreadHeap<Header>::isConsistentForGC() | 1056 bool ThreadHeap<Header>::isConsistentForGC() |
938 { | 1057 { |
939 for (size_t i = 0; i < blinkPageSizeLog2; i++) { | 1058 for (size_t i = 0; i < blinkPageSizeLog2; i++) { |
940 if (m_freeLists[i]) | 1059 if (m_freeLists[i]) |
941 return false; | 1060 return false; |
942 } | 1061 } |
943 return !ownsNonEmptyAllocationArea(); | 1062 return !ownsNonEmptyAllocationArea(); |
944 } | 1063 } |
945 | 1064 |
946 template<typename Header> | 1065 template<typename Header> |
947 void ThreadHeap<Header>::makeConsistentForGC() | 1066 void ThreadHeap<Header>::makeConsistentForGC() |
948 { | 1067 { |
949 if (ownsNonEmptyAllocationArea()) | 1068 if (ownsNonEmptyAllocationArea()) |
950 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 1069 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
951 setAllocationPoint(0, 0); | 1070 setAllocationPoint(0, 0); |
952 clearFreeLists(); | 1071 clearFreeLists(); |
953 } | 1072 } |
954 | 1073 |
955 template<typename Header> | 1074 template<typename Header> |
956 void ThreadHeap<Header>::clearMarks() | 1075 void ThreadHeap<Header>::clearLiveAndMarkDead() |
957 { | 1076 { |
958 ASSERT(isConsistentForGC()); | 1077 ASSERT(isConsistentForGC()); |
959 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1078 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
960 page->clearMarks(); | 1079 page->clearLiveAndMarkDead(); |
961 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) | 1080 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { |
962 current->unmark(); | 1081 if (current->isMarked()) |
| 1082 current->unmark(); |
| 1083 else |
| 1084 current->setDeadMark(); |
| 1085 } |
963 } | 1086 } |
964 | 1087 |
965 template<typename Header> | 1088 template<typename Header> |
966 void ThreadHeap<Header>::deletePages() | |
967 { | |
968 flushHeapContainsCache(); | |
969 // Add all pages in the pool to the heap's list of pages before deleting | |
970 clearPagePool(); | |
971 | |
972 for (HeapPage<Header>* page = m_firstPage; page; ) { | |
973 HeapPage<Header>* dead = page; | |
974 page = page->next(); | |
975 PageMemory* storage = dead->storage(); | |
976 dead->~HeapPage(); | |
977 delete storage; | |
978 } | |
979 m_firstPage = 0; | |
980 | |
981 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | |
982 LargeHeapObject<Header>* dead = current; | |
983 current = current->next(); | |
984 PageMemory* storage = dead->storage(); | |
985 dead->~LargeHeapObject(); | |
986 delete storage; | |
987 } | |
988 m_firstLargeHeapObject = 0; | |
989 } | |
990 | |
991 template<typename Header> | |
992 void ThreadHeap<Header>::clearFreeLists() | 1089 void ThreadHeap<Header>::clearFreeLists() |
993 { | 1090 { |
994 for (size_t i = 0; i < blinkPageSizeLog2; i++) | 1091 for (size_t i = 0; i < blinkPageSizeLog2; i++) |
995 m_freeLists[i] = 0; | 1092 m_freeLists[i] = 0; |
996 } | 1093 } |
997 | 1094 |
998 int BaseHeap::bucketIndexForSize(size_t size) | 1095 int BaseHeap::bucketIndexForSize(size_t size) |
999 { | 1096 { |
1000 ASSERT(size > 0); | 1097 ASSERT(size > 0); |
1001 int index = -1; | 1098 int index = -1; |
(...skipping 20 matching lines...) Expand all Loading... |
1022 void HeapPage<Header>::link(HeapPage** prevNext) | 1119 void HeapPage<Header>::link(HeapPage** prevNext) |
1023 { | 1120 { |
1024 m_next = *prevNext; | 1121 m_next = *prevNext; |
1025 *prevNext = this; | 1122 *prevNext = this; |
1026 } | 1123 } |
1027 | 1124 |
1028 template<typename Header> | 1125 template<typename Header> |
1029 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) | 1126 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) |
1030 { | 1127 { |
1031 *prevNext = unused->m_next; | 1128 *prevNext = unused->m_next; |
1032 unused->heap()->addPageToPool(unused); | 1129 unused->heap()->removePageFromHeap(unused); |
1033 } | 1130 } |
1034 | 1131 |
1035 template<typename Header> | 1132 template<typename Header> |
1036 void HeapPage<Header>::getStats(HeapStats& stats) | 1133 void HeapPage<Header>::getStats(HeapStats& stats) |
1037 { | 1134 { |
1038 stats.increaseAllocatedSpace(blinkPageSize); | 1135 stats.increaseAllocatedSpace(blinkPageSize); |
1039 Address headerAddress = payload(); | 1136 Address headerAddress = payload(); |
1040 ASSERT(headerAddress != end()); | 1137 ASSERT(headerAddress != end()); |
1041 do { | 1138 do { |
1042 Header* header = reinterpret_cast<Header*>(headerAddress); | 1139 Header* header = reinterpret_cast<Header*>(headerAddress); |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1105 header->unmark(); | 1202 header->unmark(); |
1106 headerAddress += header->size(); | 1203 headerAddress += header->size(); |
1107 heap()->stats().increaseObjectSpace(header->payloadSize()); | 1204 heap()->stats().increaseObjectSpace(header->payloadSize()); |
1108 startOfGap = headerAddress; | 1205 startOfGap = headerAddress; |
1109 } | 1206 } |
1110 if (startOfGap != end()) | 1207 if (startOfGap != end()) |
1111 heap()->addToFreeList(startOfGap, end() - startOfGap); | 1208 heap()->addToFreeList(startOfGap, end() - startOfGap); |
1112 } | 1209 } |
1113 | 1210 |
1114 template<typename Header> | 1211 template<typename Header> |
1115 void HeapPage<Header>::clearMarks() | 1212 void HeapPage<Header>::clearLiveAndMarkDead() |
1116 { | 1213 { |
1117 for (Address headerAddress = payload(); headerAddress < end();) { | 1214 for (Address headerAddress = payload(); headerAddress < end();) { |
1118 Header* header = reinterpret_cast<Header*>(headerAddress); | 1215 Header* header = reinterpret_cast<Header*>(headerAddress); |
1119 ASSERT(header->size() < blinkPagePayloadSize()); | 1216 ASSERT(header->size() < blinkPagePayloadSize()); |
1120 if (!header->isFree()) | 1217 // Check if a free list entry first since we cannot call |
| 1218 // isMarked on a free list entry. |
| 1219 if (header->isFree()) { |
| 1220 headerAddress += header->size(); |
| 1221 continue; |
| 1222 } |
| 1223 if (header->isMarked()) |
1121 header->unmark(); | 1224 header->unmark(); |
| 1225 else |
| 1226 header->setDeadMark(); |
1122 headerAddress += header->size(); | 1227 headerAddress += header->size(); |
1123 } | 1228 } |
1124 } | 1229 } |
1125 | 1230 |
1126 template<typename Header> | 1231 template<typename Header> |
1127 void HeapPage<Header>::populateObjectStartBitMap() | 1232 void HeapPage<Header>::populateObjectStartBitMap() |
1128 { | 1233 { |
1129 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); | 1234 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); |
1130 Address start = payload(); | 1235 Address start = payload(); |
1131 for (Address headerAddress = start; headerAddress < end();) { | 1236 for (Address headerAddress = start; headerAddress < end();) { |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1191 if (header->isFree()) | 1296 if (header->isFree()) |
1192 return 0; | 1297 return 0; |
1193 return header; | 1298 return header; |
1194 } | 1299 } |
1195 | 1300 |
1196 template<typename Header> | 1301 template<typename Header> |
1197 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) | 1302 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) |
1198 { | 1303 { |
1199 ASSERT(contains(address)); | 1304 ASSERT(contains(address)); |
1200 Header* header = findHeaderFromAddress(address); | 1305 Header* header = findHeaderFromAddress(address); |
1201 if (!header) | 1306 if (!header || header->hasDeadMark()) |
1202 return; | 1307 return; |
1203 | 1308 |
1204 #if ENABLE(GC_TRACING) | 1309 #if ENABLE(GC_TRACING) |
1205 visitor->setHostInfo(&address, "stack"); | 1310 visitor->setHostInfo(&address, "stack"); |
1206 #endif | 1311 #endif |
1207 if (hasVTable(header) && !vTableInitialized(header->payload())) | 1312 if (hasVTable(header) && !vTableInitialized(header->payload())) |
1208 visitor->markConservatively(header); | 1313 visitor->markConservatively(header); |
1209 else | 1314 else |
1210 visitor->mark(header, traceCallback(header)); | 1315 visitor->mark(header, traceCallback(header)); |
1211 } | 1316 } |
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1379 { | 1484 { |
1380 for (size_t i = 0; i < bufferSize; i++) | 1485 for (size_t i = 0; i < bufferSize; i++) |
1381 m_buffer[i] = Item(0, 0); | 1486 m_buffer[i] = Item(0, 0); |
1382 } | 1487 } |
1383 | 1488 |
1384 bool CallbackStack::isEmpty() | 1489 bool CallbackStack::isEmpty() |
1385 { | 1490 { |
1386 return m_current == &(m_buffer[0]) && !m_next; | 1491 return m_current == &(m_buffer[0]) && !m_next; |
1387 } | 1492 } |
1388 | 1493 |
| 1494 template<CallbackInvocationMode Mode> |
1389 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor
) | 1495 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor
) |
1390 { | 1496 { |
1391 if (m_current == &(m_buffer[0])) { | 1497 if (m_current == &(m_buffer[0])) { |
1392 if (!m_next) { | 1498 if (!m_next) { |
1393 #ifndef NDEBUG | 1499 #ifndef NDEBUG |
1394 clearUnused(); | 1500 clearUnused(); |
1395 #endif | 1501 #endif |
1396 return false; | 1502 return false; |
1397 } | 1503 } |
1398 CallbackStack* nextStack = m_next; | 1504 CallbackStack* nextStack = m_next; |
1399 *first = nextStack; | 1505 *first = nextStack; |
1400 delete this; | 1506 delete this; |
1401 return nextStack->popAndInvokeCallback(first, visitor); | 1507 return nextStack->popAndInvokeCallback<Mode>(first, visitor); |
1402 } | 1508 } |
1403 Item* item = --m_current; | 1509 Item* item = --m_current; |
1404 | 1510 |
| 1511 // If the object being traced is located on a page which is dead don't |
| 1512 // trace it. This can happen when a conservative GC kept a dead object |
| 1513 // alive which pointed to a (now gone) object on the cleaned up page. |
| 1514 // Also if doing a thread local GC don't trace objects that are located |
| 1515 // on other thread's heaps, ie. pages where the terminating flag is not |
| 1516 // set. |
| 1517 BaseHeapPage* heapPage = pageHeaderFromObject(item->object()); |
| 1518 if (Mode == GlobalMarking && heapPage->orphaned()) { |
| 1519 // When doing a global GC we should only get a trace callback to an orph
aned |
| 1520 // page if the GC is conservative. If it is not conservative there is |
| 1521 // a bug in the code where we have a dangling pointer to a page |
| 1522 // on the dead thread. |
| 1523 RELEASE_ASSERT(Heap::lastGCWasConservative()); |
| 1524 heapPage->setTracedAfterOrphaned(); |
| 1525 return true; |
| 1526 } |
| 1527 if (Mode == ThreadLocalMarking && (heapPage->orphaned() || !heapPage->termin
ating())) |
| 1528 return true; |
| 1529 // For WeaknessProcessing we should never reach orphaned pages since |
| 1530 // they should never be registered as objects on orphaned pages are not |
| 1531 // traced. We cannot assert this here since we might have an off-heap |
| 1532 // collection. However we assert it in Heap::pushWeakObjectPointerCallback. |
| 1533 |
1405 VisitorCallback callback = item->callback(); | 1534 VisitorCallback callback = item->callback(); |
1406 #if ENABLE(GC_TRACING) | 1535 #if ENABLE(GC_TRACING) |
1407 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI
nvokeCallback | 1536 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI
nvokeCallback |
1408 visitor->setHostInfo(item->object(), classOf(item->object())); | 1537 visitor->setHostInfo(item->object(), classOf(item->object())); |
1409 #endif | 1538 #endif |
1410 callback(visitor, item->object()); | 1539 callback(visitor, item->object()); |
1411 | 1540 |
1412 return true; | 1541 return true; |
1413 } | 1542 } |
1414 | 1543 |
(...skipping 17 matching lines...) Expand all Loading... |
1432 { | 1561 { |
1433 // Recurse first (bufferSize at a time) so we get to the newly added entries | 1562 // Recurse first (bufferSize at a time) so we get to the newly added entries |
1434 // last. | 1563 // last. |
1435 if (m_next) | 1564 if (m_next) |
1436 m_next->invokeOldestCallbacks(visitor); | 1565 m_next->invokeOldestCallbacks(visitor); |
1437 | 1566 |
1438 // This loop can tolerate entries being added by the callbacks after | 1567 // This loop can tolerate entries being added by the callbacks after |
1439 // iteration starts. | 1568 // iteration starts. |
1440 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1569 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
1441 Item& item = m_buffer[i]; | 1570 Item& item = m_buffer[i]; |
| 1571 |
| 1572 // We don't need to check for orphaned pages when popping an ephemeron |
| 1573 // callback since the callback is only pushed after the object containin
g |
| 1574 // it has been traced. There are basically three cases to consider: |
| 1575 // 1. Member<EphemeronCollection> |
| 1576 // 2. EphemeronCollection is part of a containing object |
| 1577 // 3. EphemeronCollection is a value object in a collection |
| 1578 // |
| 1579 // Ad. 1. In this case we push the start of the ephemeron on the |
| 1580 // marking stack and do the orphaned page check when popping it off |
| 1581 // the marking stack. |
| 1582 // Ad. 2. The containing object cannot be on an orphaned page since |
| 1583 // in that case we wouldn't have traced its parts. This also means |
| 1584 // the ephemeron collection is not on the orphaned page. |
| 1585 // Ad. 3. Is the same as 2. The collection containing the ephemeron |
| 1586 // collection as a value object cannot be on an orphaned page since |
| 1587 // it would not have traced its values in that case. |
1442 item.callback()(visitor, item.object()); | 1588 item.callback()(visitor, item.object()); |
1443 } | 1589 } |
1444 } | 1590 } |
1445 | 1591 |
1446 #ifndef NDEBUG | 1592 #ifndef NDEBUG |
1447 bool CallbackStack::hasCallbackForObject(const void* object) | 1593 bool CallbackStack::hasCallbackForObject(const void* object) |
1448 { | 1594 { |
1449 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1595 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
1450 Item* item = &m_buffer[i]; | 1596 Item* item = &m_buffer[i]; |
1451 if (item->object() == object) { | 1597 if (item->object() == object) { |
(...skipping 11 matching lines...) Expand all Loading... |
1463 public: | 1609 public: |
1464 #if ENABLE(GC_TRACING) | 1610 #if ENABLE(GC_TRACING) |
1465 typedef HashSet<uintptr_t> LiveObjectSet; | 1611 typedef HashSet<uintptr_t> LiveObjectSet; |
1466 typedef HashMap<String, LiveObjectSet> LiveObjectMap; | 1612 typedef HashMap<String, LiveObjectSet> LiveObjectMap; |
1467 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph; | 1613 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph; |
1468 #endif | 1614 #endif |
1469 | 1615 |
1470 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer,
TraceCallback callback) | 1616 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer,
TraceCallback callback) |
1471 { | 1617 { |
1472 ASSERT(header); | 1618 ASSERT(header); |
| 1619 // Check that we are not marking objects that are outside the heap by ca
lling Heap::contains. |
| 1620 // However we cannot call Heap::contains when outside a GC and we call m
ark when doing weakness |
| 1621 // for ephemerons. Hence we only check when called within. |
| 1622 ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrphane
dPage(header)); |
1473 ASSERT(objectPointer); | 1623 ASSERT(objectPointer); |
1474 if (header->isMarked()) | 1624 if (header->isMarked()) |
1475 return; | 1625 return; |
1476 header->mark(); | 1626 header->mark(); |
1477 #if ENABLE(GC_TRACING) | 1627 #if ENABLE(GC_TRACING) |
1478 MutexLocker locker(objectGraphMutex()); | 1628 MutexLocker locker(objectGraphMutex()); |
1479 String className(classOf(objectPointer)); | 1629 String className(classOf(objectPointer)); |
1480 { | 1630 { |
1481 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv
eObjectSet()); | 1631 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv
eObjectSet()); |
1482 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin
ter)); | 1632 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin
ter)); |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1684 }; | 1834 }; |
1685 | 1835 |
1686 void Heap::init() | 1836 void Heap::init() |
1687 { | 1837 { |
1688 ThreadState::init(); | 1838 ThreadState::init(); |
1689 CallbackStack::init(&s_markingStack); | 1839 CallbackStack::init(&s_markingStack); |
1690 CallbackStack::init(&s_weakCallbackStack); | 1840 CallbackStack::init(&s_weakCallbackStack); |
1691 CallbackStack::init(&s_ephemeronStack); | 1841 CallbackStack::init(&s_ephemeronStack); |
1692 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | 1842 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
1693 s_markingVisitor = new MarkingVisitor(); | 1843 s_markingVisitor = new MarkingVisitor(); |
| 1844 s_freePagePool = new FreePagePool(); |
| 1845 s_orphanedPagePool = new OrphanedPagePool(); |
1694 } | 1846 } |
1695 | 1847 |
1696 void Heap::shutdown() | 1848 void Heap::shutdown() |
1697 { | 1849 { |
1698 s_shutdownCalled = true; | 1850 s_shutdownCalled = true; |
1699 ThreadState::shutdownHeapIfNecessary(); | 1851 ThreadState::shutdownHeapIfNecessary(); |
1700 } | 1852 } |
1701 | 1853 |
1702 void Heap::doShutdown() | 1854 void Heap::doShutdown() |
1703 { | 1855 { |
1704 // We don't want to call doShutdown() twice. | 1856 // We don't want to call doShutdown() twice. |
1705 if (!s_markingVisitor) | 1857 if (!s_markingVisitor) |
1706 return; | 1858 return; |
1707 | 1859 |
1708 ASSERT(!ThreadState::isAnyThreadInGC()); | 1860 ASSERT(!ThreadState::isAnyThreadInGC()); |
1709 ASSERT(!ThreadState::attachedThreads().size()); | 1861 ASSERT(!ThreadState::attachedThreads().size()); |
1710 delete s_markingVisitor; | 1862 delete s_markingVisitor; |
1711 s_markingVisitor = 0; | 1863 s_markingVisitor = 0; |
1712 delete s_heapDoesNotContainCache; | 1864 delete s_heapDoesNotContainCache; |
1713 s_heapDoesNotContainCache = 0; | 1865 s_heapDoesNotContainCache = 0; |
| 1866 delete s_freePagePool; |
| 1867 s_freePagePool = 0; |
| 1868 delete s_orphanedPagePool; |
| 1869 s_orphanedPagePool = 0; |
1714 CallbackStack::shutdown(&s_weakCallbackStack); | 1870 CallbackStack::shutdown(&s_weakCallbackStack); |
1715 CallbackStack::shutdown(&s_markingStack); | 1871 CallbackStack::shutdown(&s_markingStack); |
1716 CallbackStack::shutdown(&s_ephemeronStack); | 1872 CallbackStack::shutdown(&s_ephemeronStack); |
1717 ThreadState::shutdown(); | 1873 ThreadState::shutdown(); |
1718 } | 1874 } |
1719 | 1875 |
1720 BaseHeapPage* Heap::contains(Address address) | 1876 BaseHeapPage* Heap::contains(Address address) |
1721 { | 1877 { |
1722 ASSERT(ThreadState::isAnyThreadInGC()); | 1878 ASSERT(ThreadState::isAnyThreadInGC()); |
1723 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1879 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1724 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1880 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
1725 BaseHeapPage* page = (*it)->contains(address); | 1881 BaseHeapPage* page = (*it)->contains(address); |
1726 if (page) | 1882 if (page) |
1727 return page; | 1883 return page; |
1728 } | 1884 } |
1729 return 0; | 1885 return 0; |
1730 } | 1886 } |
1731 | 1887 |
| 1888 #ifndef NDEBUG |
| 1889 bool Heap::containedInHeapOrOrphanedPage(void* object) |
| 1890 { |
| 1891 return contains(object) || orphanedPagePool()->contains(object); |
| 1892 } |
| 1893 #endif |
| 1894 |
1732 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 1895 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
1733 { | 1896 { |
1734 ASSERT(ThreadState::isAnyThreadInGC()); | 1897 ASSERT(ThreadState::isAnyThreadInGC()); |
1735 | 1898 |
1736 #ifdef NDEBUG | 1899 #ifdef NDEBUG |
1737 if (s_heapDoesNotContainCache->lookup(address)) | 1900 if (s_heapDoesNotContainCache->lookup(address)) |
1738 return 0; | 1901 return 0; |
1739 #endif | 1902 #endif |
1740 | 1903 |
1741 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1904 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1800 builder.append("\n\t"); | 1963 builder.append("\n\t"); |
1801 builder.append(frameToName.nullableName()); | 1964 builder.append(frameToName.nullableName()); |
1802 --framesToShow; | 1965 --framesToShow; |
1803 } | 1966 } |
1804 return builder.toString().replace("WebCore::", ""); | 1967 return builder.toString().replace("WebCore::", ""); |
1805 } | 1968 } |
1806 #endif | 1969 #endif |
1807 | 1970 |
1808 void Heap::pushTraceCallback(void* object, TraceCallback callback) | 1971 void Heap::pushTraceCallback(void* object, TraceCallback callback) |
1809 { | 1972 { |
1810 ASSERT(Heap::contains(object)); | 1973 ASSERT(Heap::containedInHeapOrOrphanedPage(object)); |
1811 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); | 1974 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); |
1812 *slot = CallbackStack::Item(object, callback); | 1975 *slot = CallbackStack::Item(object, callback); |
1813 } | 1976 } |
1814 | 1977 |
| 1978 template<CallbackInvocationMode Mode> |
1815 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | 1979 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
1816 { | 1980 { |
1817 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor); | 1981 return s_markingStack->popAndInvokeCallback<Mode>(&s_markingStack, visitor); |
1818 } | 1982 } |
1819 | 1983 |
1820 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback
) | 1984 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback
) |
1821 { | 1985 { |
1822 ASSERT(Heap::contains(cell)); | 1986 ASSERT(!Heap::orphanedPagePool()->contains(cell)); |
1823 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba
ckStack); | 1987 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba
ckStack); |
1824 *slot = CallbackStack::Item(cell, callback); | 1988 *slot = CallbackStack::Item(cell, callback); |
1825 } | 1989 } |
1826 | 1990 |
1827 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe
rCallback callback) | 1991 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe
rCallback callback) |
1828 { | 1992 { |
1829 ASSERT(Heap::contains(object)); | 1993 ASSERT(Heap::contains(object)); |
1830 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeader
Address(reinterpret_cast<Address>(object))); | 1994 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object); |
| 1995 ASSERT(!heapPageForObject->orphaned()); |
1831 ASSERT(Heap::contains(object) == heapPageForObject); | 1996 ASSERT(Heap::contains(object) == heapPageForObject); |
1832 ThreadState* state = heapPageForObject->threadState(); | 1997 ThreadState* state = heapPageForObject->threadState(); |
1833 state->pushWeakObjectPointerCallback(closure, callback); | 1998 state->pushWeakObjectPointerCallback(closure, callback); |
1834 } | 1999 } |
1835 | 2000 |
1836 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) | 2001 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) |
1837 { | 2002 { |
1838 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visit
or); | 2003 return s_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&s_weak
CallbackStack, visitor); |
1839 } | 2004 } |
1840 | 2005 |
1841 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
phemeronCallback iterationDoneCallback) | 2006 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
phemeronCallback iterationDoneCallback) |
1842 { | 2007 { |
| 2008 // Check that the ephemeron table being pushed onto the stack is not on an |
| 2009 // orphaned page. |
| 2010 ASSERT(!Heap::orphanedPagePool()->contains(table)); |
| 2011 |
1843 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac
k); | 2012 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac
k); |
1844 *slot = CallbackStack::Item(table, iterationCallback); | 2013 *slot = CallbackStack::Item(table, iterationCallback); |
1845 | 2014 |
1846 // We use the callback stack of weak cell pointers for the ephemeronIteratio
nDone callbacks. | 2015 // We use the callback stack of weak cell pointers for the ephemeronIteratio
nDone callbacks. |
1847 // These callbacks are called right after marking and before any thread comm
ences execution | 2016 // These callbacks are called right after marking and before any thread comm
ences execution |
1848 // so it suits our needs for telling the ephemerons that the iteration is do
ne. | 2017 // so it suits our needs for telling the ephemerons that the iteration is do
ne. |
1849 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac
k); | 2018 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac
k); |
1850 } | 2019 } |
1851 | 2020 |
1852 #ifndef NDEBUG | 2021 #ifndef NDEBUG |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1887 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); | 2056 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); |
1888 #endif | 2057 #endif |
1889 | 2058 |
1890 // Disallow allocation during garbage collection (but not | 2059 // Disallow allocation during garbage collection (but not |
1891 // during the finalization that happens when the gcScope is | 2060 // during the finalization that happens when the gcScope is |
1892 // torn down). | 2061 // torn down). |
1893 NoAllocationScope<AnyThread> noAllocationScope; | 2062 NoAllocationScope<AnyThread> noAllocationScope; |
1894 | 2063 |
1895 prepareForGC(); | 2064 prepareForGC(); |
1896 | 2065 |
1897 ThreadState::visitRoots(s_markingVisitor); | 2066 traceRootsAndPerformGlobalWeakProcessing<GlobalMarking>(); |
| 2067 |
| 2068 // After a global marking we know that any orphaned page that was not reache
d |
| 2069 // cannot be reached in a subsequent GC. This is due to a thread either havi
ng |
| 2070 // swept its heap or having done a "poor mans sweep" in prepareForGC which m
arks |
| 2071 // objects that are dead, but not swept in the previous GC as dead. In this
GC's |
| 2072 // marking we check that any object marked as dead is not traced. E.g. via a |
| 2073 // conservatively found pointer or a programming error with an object contai
ning |
| 2074 // a dangling pointer. |
| 2075 orphanedPagePool()->decommitOrphanedPages(); |
| 2076 |
| 2077 #if ENABLE(GC_TRACING) |
| 2078 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); |
| 2079 #endif |
| 2080 |
| 2081 if (blink::Platform::current()) { |
| 2082 uint64_t objectSpaceSize; |
| 2083 uint64_t allocatedSpaceSize; |
| 2084 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize); |
| 2085 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag
e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
| 2086 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp
ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); |
| 2087 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate
dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); |
| 2088 } |
| 2089 } |
| 2090 |
| 2091 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 2092 { |
| 2093 // We explicitly do not enter a safepoint while doing thread specific |
| 2094 // garbage collection since we don't want to allow a global GC at the |
| 2095 // same time as a thread local GC. |
| 2096 |
| 2097 { |
| 2098 NoAllocationScope<AnyThread> noAllocationScope; |
| 2099 |
| 2100 state->enterGC(); |
| 2101 state->prepareForGC(); |
| 2102 |
| 2103 traceRootsAndPerformGlobalWeakProcessing<ThreadLocalMarking>(); |
| 2104 |
| 2105 state->leaveGC(); |
| 2106 } |
| 2107 state->performPendingSweep(); |
| 2108 } |
| 2109 |
| 2110 template<CallbackInvocationMode Mode> |
| 2111 void Heap::traceRootsAndPerformGlobalWeakProcessing() |
| 2112 { |
| 2113 if (Mode == ThreadLocalMarking) |
| 2114 ThreadState::current()->visitLocalRoots(s_markingVisitor); |
| 2115 else |
| 2116 ThreadState::visitRoots(s_markingVisitor); |
1898 | 2117 |
1899 // Ephemeron fixed point loop. | 2118 // Ephemeron fixed point loop. |
1900 do { | 2119 do { |
1901 // Recursively mark all objects that are reachable from the roots. | 2120 // Recursively mark all objects that are reachable from the roots for |
1902 while (popAndInvokeTraceCallback(s_markingVisitor)) { } | 2121 // this thread. If Mode is ThreadLocalMarking don't continue tracing if |
| 2122 // the trace hits an object on another thread's heap. |
| 2123 while (popAndInvokeTraceCallback<Mode>(s_markingVisitor)) { } |
1903 | 2124 |
1904 // Mark any strong pointers that have now become reachable in ephemeron | 2125 // Mark any strong pointers that have now become reachable in ephemeron |
1905 // maps. | 2126 // maps. |
1906 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); | 2127 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); |
1907 | 2128 |
1908 // Rerun loop if ephemeron processing queued more objects for tracing. | 2129 // Rerun loop if ephemeron processing queued more objects for tracing. |
1909 } while (!s_markingStack->isEmpty()); | 2130 } while (!s_markingStack->isEmpty()); |
1910 | 2131 |
1911 // Call weak callbacks on objects that may now be pointing to dead | 2132 // Call weak callbacks on objects that may now be pointing to dead |
1912 // objects and call ephemeronIterationDone callbacks on weak tables | 2133 // objects and call ephemeronIterationDone callbacks on weak tables |
1913 // to do cleanup (specifically clear the queued bits for weak hash | 2134 // to do cleanup (specifically clear the queued bits for weak hash |
1914 // tables). | 2135 // tables). |
1915 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } | 2136 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } |
1916 | 2137 |
1917 CallbackStack::clear(&s_ephemeronStack); | 2138 CallbackStack::clear(&s_ephemeronStack); |
1918 | 2139 |
1919 // It is not permitted to trace pointers of live objects in the weak | 2140 // It is not permitted to trace pointers of live objects in the weak |
1920 // callback phase, so the marking stack should still be empty here. | 2141 // callback phase, so the marking stack should still be empty here. |
1921 ASSERT(s_markingStack->isEmpty()); | 2142 ASSERT(s_markingStack->isEmpty()); |
1922 | |
1923 #if ENABLE(GC_TRACING) | |
1924 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); | |
1925 #endif | |
1926 | |
1927 if (blink::Platform::current()) { | |
1928 uint64_t objectSpaceSize; | |
1929 uint64_t allocatedSpaceSize; | |
1930 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize); | |
1931 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag
e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | |
1932 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp
ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | |
1933 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate
dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | |
1934 } | |
1935 } | 2143 } |
1936 | 2144 |
1937 void Heap::collectAllGarbage() | 2145 void Heap::collectAllGarbage() |
1938 { | 2146 { |
1939 // FIXME: oilpan: we should perform a single GC and everything | 2147 // FIXME: oilpan: we should perform a single GC and everything |
1940 // should die. Unfortunately it is not the case for all objects | 2148 // should die. Unfortunately it is not the case for all objects |
1941 // because the hierarchy was not completely moved to the heap and | 2149 // because the hierarchy was not completely moved to the heap and |
1942 // some heap allocated objects own objects that contain persistents | 2150 // some heap allocated objects own objects that contain persistents |
1943 // pointing to other heap allocated objects. | 2151 // pointing to other heap allocated objects. |
1944 for (int i = 0; i < 5; i++) | 2152 for (int i = 0; i < 5; i++) |
1945 collectGarbage(ThreadState::NoHeapPointersOnStack); | 2153 collectGarbage(ThreadState::NoHeapPointersOnStack); |
1946 } | 2154 } |
1947 | 2155 |
1948 void Heap::setForcePreciseGCForTesting() | 2156 void Heap::setForcePreciseGCForTesting() |
1949 { | 2157 { |
1950 ThreadState::current()->setForcePreciseGCForTesting(true); | 2158 ThreadState::current()->setForcePreciseGCForTesting(true); |
1951 } | 2159 } |
1952 | 2160 |
| 2161 template<typename Header> |
| 2162 void ThreadHeap<Header>::prepareHeapForTermination() |
| 2163 { |
| 2164 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { |
| 2165 page->setTerminating(); |
| 2166 } |
| 2167 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { |
| 2168 current->setTerminating(); |
| 2169 } |
| 2170 } |
| 2171 |
1953 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS
ize) | 2172 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS
ize) |
1954 { | 2173 { |
1955 *objectSpaceSize = 0; | 2174 *objectSpaceSize = 0; |
1956 *allocatedSpaceSize = 0; | 2175 *allocatedSpaceSize = 0; |
1957 ASSERT(ThreadState::isAnyThreadInGC()); | 2176 ASSERT(ThreadState::isAnyThreadInGC()); |
1958 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2177 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1959 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; | 2178 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; |
1960 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { | 2179 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { |
1961 *objectSpaceSize += (*it)->stats().totalObjectSpace(); | 2180 *objectSpaceSize += (*it)->stats().totalObjectSpace(); |
1962 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); | 2181 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); |
(...skipping 30 matching lines...) Expand all Loading... |
1993 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2212 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1994 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) | 2213 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) |
1995 (*it)->makeConsistentForGC(); | 2214 (*it)->makeConsistentForGC(); |
1996 } | 2215 } |
1997 | 2216 |
1998 // Force template instantiations for the types that we need. | 2217 // Force template instantiations for the types that we need. |
1999 template class HeapPage<FinalizedHeapObjectHeader>; | 2218 template class HeapPage<FinalizedHeapObjectHeader>; |
2000 template class HeapPage<HeapObjectHeader>; | 2219 template class HeapPage<HeapObjectHeader>; |
2001 template class ThreadHeap<FinalizedHeapObjectHeader>; | 2220 template class ThreadHeap<FinalizedHeapObjectHeader>; |
2002 template class ThreadHeap<HeapObjectHeader>; | 2221 template class ThreadHeap<HeapObjectHeader>; |
| 2222 template bool CallbackStack::popAndInvokeCallback<GlobalMarking>(CallbackStack**
, Visitor*); |
| 2223 template bool CallbackStack::popAndInvokeCallback<ThreadLocalMarking>(CallbackSt
ack**, Visitor*); |
| 2224 template bool CallbackStack::popAndInvokeCallback<WeaknessProcessing>(CallbackSt
ack**, Visitor*); |
2003 | 2225 |
2004 Visitor* Heap::s_markingVisitor; | 2226 Visitor* Heap::s_markingVisitor; |
2005 CallbackStack* Heap::s_markingStack; | 2227 CallbackStack* Heap::s_markingStack; |
2006 CallbackStack* Heap::s_weakCallbackStack; | 2228 CallbackStack* Heap::s_weakCallbackStack; |
2007 CallbackStack* Heap::s_ephemeronStack; | 2229 CallbackStack* Heap::s_ephemeronStack; |
2008 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2230 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
2009 bool Heap::s_shutdownCalled = false; | 2231 bool Heap::s_shutdownCalled = false; |
2010 bool Heap::s_lastGCWasConservative = false; | 2232 bool Heap::s_lastGCWasConservative = false; |
| 2233 FreePagePool* Heap::s_freePagePool; |
| 2234 OrphanedPagePool* Heap::s_orphanedPagePool; |
2011 } | 2235 } |
OLD | NEW |