OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
416 } | 416 } |
417 | 417 |
418 NO_SANITIZE_ADDRESS | 418 NO_SANITIZE_ADDRESS |
419 void HeapObjectHeader::unmark() | 419 void HeapObjectHeader::unmark() |
420 { | 420 { |
421 checkHeader(); | 421 checkHeader(); |
422 m_size &= ~markBitMask; | 422 m_size &= ~markBitMask; |
423 } | 423 } |
424 | 424 |
425 NO_SANITIZE_ADDRESS | 425 NO_SANITIZE_ADDRESS |
426 bool HeapObjectHeader::hasDebugMark() const | 426 bool HeapObjectHeader::hasDeadMark() const |
427 { | 427 { |
428 checkHeader(); | 428 checkHeader(); |
429 return m_size & debugBitMask; | 429 return m_size & deadBitMask; |
430 } | 430 } |
431 | 431 |
432 NO_SANITIZE_ADDRESS | 432 NO_SANITIZE_ADDRESS |
433 void HeapObjectHeader::clearDebugMark() | 433 void HeapObjectHeader::clearDeadMark() |
434 { | 434 { |
435 checkHeader(); | 435 checkHeader(); |
436 m_size &= ~debugBitMask; | 436 m_size &= ~deadBitMask; |
437 } | 437 } |
438 | 438 |
439 NO_SANITIZE_ADDRESS | 439 NO_SANITIZE_ADDRESS |
440 void HeapObjectHeader::setDebugMark() | 440 void HeapObjectHeader::setDeadMark() |
441 { | 441 { |
| 442 ASSERT(!isMarked()); |
442 checkHeader(); | 443 checkHeader(); |
443 m_size |= debugBitMask; | 444 m_size |= deadBitMask; |
444 } | 445 } |
445 | 446 |
446 #ifndef NDEBUG | 447 #ifndef NDEBUG |
447 NO_SANITIZE_ADDRESS | 448 NO_SANITIZE_ADDRESS |
448 void HeapObjectHeader::zapMagic() | 449 void HeapObjectHeader::zapMagic() |
449 { | 450 { |
450 m_magic = zappedMagic; | 451 m_magic = zappedMagic; |
451 } | 452 } |
452 #endif | 453 #endif |
453 | 454 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
493 return heapObjectHeader()->unmark(); | 494 return heapObjectHeader()->unmark(); |
494 } | 495 } |
495 | 496 |
496 template<typename Header> | 497 template<typename Header> |
497 bool LargeHeapObject<Header>::isMarked() | 498 bool LargeHeapObject<Header>::isMarked() |
498 { | 499 { |
499 return heapObjectHeader()->isMarked(); | 500 return heapObjectHeader()->isMarked(); |
500 } | 501 } |
501 | 502 |
502 template<typename Header> | 503 template<typename Header> |
| 504 void LargeHeapObject<Header>::setDeadMark() |
| 505 { |
| 506 heapObjectHeader()->setDeadMark(); |
| 507 } |
| 508 |
| 509 template<typename Header> |
503 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr
ess) | 510 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr
ess) |
504 { | 511 { |
505 ASSERT(contains(address)); | 512 ASSERT(contains(address)); |
506 if (!objectContains(address)) | 513 if (!objectContains(address) || heapObjectHeader()->hasDeadMark()) |
507 return; | 514 return; |
508 #if ENABLE(GC_TRACING) | 515 #if ENABLE(GC_TRACING) |
509 visitor->setHostInfo(&address, "stack"); | 516 visitor->setHostInfo(&address, "stack"); |
510 #endif | 517 #endif |
511 mark(visitor); | 518 mark(visitor); |
512 } | 519 } |
513 | 520 |
514 template<> | 521 template<> |
515 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) | 522 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) |
516 { | 523 { |
(...skipping 28 matching lines...) Expand all Loading... |
545 | 552 |
546 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa
yload) | 553 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa
yload) |
547 { | 554 { |
548 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); | 555 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); |
549 FinalizedHeapObjectHeader* header = | 556 FinalizedHeapObjectHeader* header = |
550 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize)
; | 557 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize)
; |
551 return header; | 558 return header; |
552 } | 559 } |
553 | 560 |
554 template<typename Header> | 561 template<typename Header> |
555 ThreadHeap<Header>::ThreadHeap(ThreadState* state) | 562 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
556 : m_currentAllocationPoint(0) | 563 : m_currentAllocationPoint(0) |
557 , m_remainingAllocationSize(0) | 564 , m_remainingAllocationSize(0) |
558 , m_firstPage(0) | 565 , m_firstPage(0) |
559 , m_firstLargeHeapObject(0) | 566 , m_firstLargeHeapObject(0) |
560 , m_biggestFreeListIndex(0) | 567 , m_biggestFreeListIndex(0) |
561 , m_threadState(state) | 568 , m_threadState(state) |
562 , m_pagePool(0) | 569 , m_index(index) |
563 { | 570 { |
564 clearFreeLists(); | 571 clearFreeLists(); |
565 } | 572 } |
566 | 573 |
567 template<typename Header> | 574 template<typename Header> |
568 ThreadHeap<Header>::~ThreadHeap() | 575 ThreadHeap<Header>::~ThreadHeap() |
569 { | 576 { |
570 clearFreeLists(); | 577 ASSERT(!m_firstPage); |
571 if (!ThreadState::current()->isMainThread()) | 578 ASSERT(!m_firstLargeHeapObject); |
572 assertEmpty(); | |
573 deletePages(); | |
574 } | 579 } |
575 | 580 |
576 template<typename Header> | 581 template<typename Header> |
| 582 void ThreadHeap<Header>::cleanupPages() |
| 583 { |
| 584 clearFreeLists(); |
| 585 flushHeapContainsCache(); |
| 586 |
| 587 // Add the ThreadHeap's pages to the orphanedPagePool. |
| 588 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) |
| 589 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
| 590 m_firstPage = 0; |
| 591 |
| 592 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj
ect; largeObject = largeObject->m_next) |
| 593 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); |
| 594 m_firstLargeHeapObject = 0; |
| 595 } |
| 596 |
| 597 template<typename Header> |
577 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) | 598 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) |
578 { | 599 { |
579 size_t allocationSize = allocationSizeFromSize(size); | 600 size_t allocationSize = allocationSizeFromSize(size); |
580 if (threadState()->shouldGC()) { | 601 if (threadState()->shouldGC()) { |
581 if (threadState()->shouldForceConservativeGC()) | 602 if (threadState()->shouldForceConservativeGC()) |
582 Heap::collectGarbage(ThreadState::HeapPointersOnStack); | 603 Heap::collectGarbage(ThreadState::HeapPointersOnStack); |
583 else | 604 else |
584 threadState()->setGCRequested(); | 605 threadState()->setGCRequested(); |
585 } | 606 } |
586 ensureCurrentAllocation(allocationSize, gcInfo); | 607 ensureCurrentAllocation(allocationSize, gcInfo); |
(...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
730 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) | 751 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH
eapObject<Header>** previousNext) |
731 { | 752 { |
732 flushHeapContainsCache(); | 753 flushHeapContainsCache(); |
733 object->unlink(previousNext); | 754 object->unlink(previousNext); |
734 object->finalize(); | 755 object->finalize(); |
735 | 756 |
736 // Unpoison the object header and allocationGranularity bytes after the | 757 // Unpoison the object header and allocationGranularity bytes after the |
737 // object before freeing. | 758 // object before freeing. |
738 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); | 759 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); |
739 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | 760 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); |
740 delete object->storage(); | 761 |
741 } | 762 if (object->terminating()) { |
| 763 ASSERT(ThreadState::current()->isTerminating()); |
| 764 // The thread is shutting down so this object is being removed as part |
| 765 // of a thread local GC. In that case the object could be traced in the |
| 766 // next global GC either due to a dead object being traced via a |
| 767 // conservative pointer or due to a programming error where an object |
| 768 // in another thread heap keeps a dangling pointer to this object. |
| 769 // To guard against this we put the large object memory in the |
| 770 // orphanedPagePool to ensure it is still reachable. After the next glob
al |
| 771 // GC it can be released assuming no rogue/dangling pointers refer to |
| 772 // it. |
| 773 // NOTE: large objects are not moved to the free page pool as it is |
| 774 // unlikely they can be reused due to their individual sizes. |
| 775 Heap::orphanedPagePool()->addOrphanedPage(m_index, object); |
| 776 } else { |
| 777 ASSERT(!ThreadState::current()->isTerminating()); |
| 778 PageMemory* memory = object->storage(); |
| 779 object->~LargeHeapObject<Header>(); |
| 780 delete memory; |
| 781 } |
| 782 } |
| 783 |
| 784 template<typename DataType> |
| 785 PagePool<DataType>::PagePool() |
| 786 { |
| 787 for (int i = 0; i < NumberOfHeaps; ++i) { |
| 788 m_pool[i] = 0; |
| 789 } |
| 790 } |
| 791 |
| 792 FreePagePool::~FreePagePool() |
| 793 { |
| 794 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 795 while (PoolEntry* entry = m_pool[index]) { |
| 796 m_pool[index] = entry->next; |
| 797 PageMemory* memory = entry->data; |
| 798 ASSERT(memory); |
| 799 delete memory; |
| 800 delete entry; |
| 801 } |
| 802 } |
| 803 } |
| 804 |
| 805 void FreePagePool::addFreePage(int index, PageMemory* memory) |
| 806 { |
| 807 // When adding a page to the pool we decommit it to ensure it is unused |
| 808 // while in the pool. This also allows the physical memory, backing the |
| 809 // page, to be given back to the OS. |
| 810 memory->decommit(); |
| 811 MutexLocker locker(m_mutex[index]); |
| 812 PoolEntry* entry = new PoolEntry(memory, m_pool[index]); |
| 813 m_pool[index] = entry; |
| 814 } |
| 815 |
| 816 PageMemory* FreePagePool::takeFreePage(int index) |
| 817 { |
| 818 MutexLocker locker(m_mutex[index]); |
| 819 while (PoolEntry* entry = m_pool[index]) { |
| 820 m_pool[index] = entry->next; |
| 821 PageMemory* memory = entry->data; |
| 822 ASSERT(memory); |
| 823 delete entry; |
| 824 if (memory->commit()) |
| 825 return memory; |
| 826 |
| 827 // We got some memory, but failed to commit it, try again. |
| 828 delete memory; |
| 829 } |
| 830 return 0; |
| 831 } |
| 832 |
| 833 OrphanedPagePool::~OrphanedPagePool() |
| 834 { |
| 835 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 836 while (PoolEntry* entry = m_pool[index]) { |
| 837 m_pool[index] = entry->next; |
| 838 BaseHeapPage* page = entry->data; |
| 839 delete entry; |
| 840 PageMemory* memory = page->storage(); |
| 841 ASSERT(memory); |
| 842 page->~BaseHeapPage(); |
| 843 delete memory; |
| 844 } |
| 845 } |
| 846 } |
| 847 |
| 848 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page) |
| 849 { |
| 850 page->markOrphaned(); |
| 851 PoolEntry* entry = new PoolEntry(page, m_pool[index]); |
| 852 m_pool[index] = entry; |
| 853 } |
| 854 |
| 855 NO_SANITIZE_ADDRESS |
| 856 void OrphanedPagePool::decommitOrphanedPages() |
| 857 { |
| 858 #ifndef NDEBUG |
| 859 // No locking needed as all threads are at safepoints at this point in time. |
| 860 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
| 861 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) |
| 862 ASSERT((*it)->isAtSafePoint()); |
| 863 #endif |
| 864 |
| 865 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 866 PoolEntry* entry = m_pool[index]; |
| 867 PoolEntry** prevNext = &m_pool[index]; |
| 868 while (entry) { |
| 869 BaseHeapPage* page = entry->data; |
| 870 if (page->tracedAfterOrphaned()) { |
| 871 // If the orphaned page was traced in the last GC it is not |
| 872 // decommited. We only decommit a page, ie. put it in the |
| 873 // memory pool, when the page has no objects pointing to it. |
| 874 // We remark the page as orphaned to clear the tracedAfterOrphan
ed |
| 875 // flag and any object trace bits that were set during tracing. |
| 876 page->markOrphaned(); |
| 877 prevNext = &entry->next; |
| 878 entry = entry->next; |
| 879 continue; |
| 880 } |
| 881 |
| 882 // Page was not traced. Check if we should reuse the memory or just |
| 883 // free it. Large object memory is not reused, but freed, normal |
| 884 // blink heap pages are reused. |
| 885 // NOTE: We call the destructor before freeing or adding to the |
| 886 // free page pool. |
| 887 PageMemory* memory = page->storage(); |
| 888 if (page->isLargeObject()) { |
| 889 page->~BaseHeapPage(); |
| 890 delete memory; |
| 891 } else { |
| 892 page->~BaseHeapPage(); |
| 893 // Clear out the page's memory before adding it to the free page |
| 894 // pool to ensure it is zero filled when being reused. |
| 895 clearMemory(memory); |
| 896 Heap::freePagePool()->addFreePage(index, memory); |
| 897 } |
| 898 |
| 899 PoolEntry* deadEntry = entry; |
| 900 entry = entry->next; |
| 901 *prevNext = entry; |
| 902 delete deadEntry; |
| 903 } |
| 904 } |
| 905 } |
| 906 |
| 907 NO_SANITIZE_ADDRESS |
| 908 void OrphanedPagePool::clearMemory(PageMemory* memory) |
| 909 { |
| 910 #if defined(ADDRESS_SANITIZER) |
| 911 // Don't use memset when running with ASan since this needs to zap |
| 912 // poisoned memory as well and the NO_SANITIZE_ADDRESS annotation |
| 913 // only works for code in this method and not for calls to memset. |
| 914 Address base = memory->writableStart(); |
| 915 for (Address current = base; current < base + blinkPagePayloadSize(); ++curr
ent) |
| 916 *current = 0; |
| 917 #else |
| 918 memset(memory->writableStart(), 0, blinkPagePayloadSize()); |
| 919 #endif |
| 920 } |
| 921 |
| 922 #ifndef NDEBUG |
| 923 bool OrphanedPagePool::contains(void* object) |
| 924 { |
| 925 for (int index = 0; index < NumberOfHeaps; ++index) { |
| 926 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { |
| 927 BaseHeapPage* page = entry->data; |
| 928 if (page->contains(reinterpret_cast<Address>(object))) |
| 929 return true; |
| 930 } |
| 931 } |
| 932 return false; |
| 933 } |
| 934 #endif |
742 | 935 |
743 template<> | 936 template<> |
744 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) | 937 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) |
745 { | 938 { |
746 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the
GCInfo on | 939 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the
GCInfo on |
747 // the heap should be unused (ie. 0). | 940 // the heap should be unused (ie. 0). |
748 allocatePage(0); | 941 allocatePage(0); |
749 } | 942 } |
750 | 943 |
751 template<> | 944 template<> |
752 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) | 945 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) |
753 { | 946 { |
754 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC
Info on the heap | 947 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC
Info on the heap |
755 // since it is the same for all objects | 948 // since it is the same for all objects |
756 ASSERT(gcInfo); | 949 ASSERT(gcInfo); |
757 allocatePage(gcInfo); | 950 allocatePage(gcInfo); |
758 } | 951 } |
759 | 952 |
760 template<typename Header> | 953 template <typename Header> |
761 void ThreadHeap<Header>::clearPagePool() | 954 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) |
762 { | |
763 while (takePageFromPool()) { } | |
764 } | |
765 | |
766 template<typename Header> | |
767 PageMemory* ThreadHeap<Header>::takePageFromPool() | |
768 { | |
769 Heap::flushHeapDoesNotContainCache(); | |
770 while (PagePoolEntry* entry = m_pagePool) { | |
771 m_pagePool = entry->next(); | |
772 PageMemory* storage = entry->storage(); | |
773 delete entry; | |
774 | |
775 if (storage->commit()) | |
776 return storage; | |
777 | |
778 // Failed to commit pooled storage. Release it. | |
779 delete storage; | |
780 } | |
781 | |
782 return 0; | |
783 } | |
784 | |
785 template<typename Header> | |
786 void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage) | |
787 { | 955 { |
788 flushHeapContainsCache(); | 956 flushHeapContainsCache(); |
789 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); | 957 if (page->terminating()) { |
790 m_pagePool = entry; | 958 ASSERT(ThreadState::current()->isTerminating()); |
791 } | 959 // The thread is shutting down so this page is being removed as part |
792 | 960 // of a thread local GC. In that case the page could be accessed in the |
793 template <typename Header> | 961 // next global GC either due to a dead object being traced via a |
794 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) | 962 // conservative pointer or due to a programming error where an object |
795 { | 963 // in another thread heap keeps a dangling pointer to this object. |
796 PageMemory* storage = page->storage(); | 964 // To guard against this we put the page in the orphanedPagePool to |
797 storage->decommit(); | 965 // ensure it is still reachable. After the next global GC it can be |
798 addPageMemoryToPool(storage); | 966 // decommitted and moved to the page pool assuming no rogue/dangling |
| 967 // pointers refer to it. |
| 968 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
| 969 } else { |
| 970 ASSERT(!ThreadState::current()->isTerminating()); |
| 971 PageMemory* memory = page->storage(); |
| 972 page->~HeapPage<Header>(); |
| 973 Heap::freePagePool()->addFreePage(m_index, memory); |
| 974 } |
799 } | 975 } |
800 | 976 |
801 template<typename Header> | 977 template<typename Header> |
802 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 978 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
803 { | 979 { |
804 Heap::flushHeapDoesNotContainCache(); | 980 Heap::flushHeapDoesNotContainCache(); |
805 PageMemory* pageMemory = takePageFromPool(); | 981 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
806 if (!pageMemory) { | 982 // We continue allocating page memory until we succeed in getting one. |
| 983 // Since the FreePagePool is global other threads could use all the |
| 984 // newly allocated page memory before this thread calls takeFreePage. |
| 985 while (!pageMemory) { |
807 // Allocate a memory region for blinkPagesPerRegion pages that | 986 // Allocate a memory region for blinkPagesPerRegion pages that |
808 // will each have the following layout. | 987 // will each have the following layout. |
809 // | 988 // |
810 // [ guard os page | ... payload ... | guard os page ] | 989 // [ guard os page | ... payload ... | guard os page ] |
811 // ^---{ aligned to blink page size } | 990 // ^---{ aligned to blink page size } |
812 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl
inkPagesPerRegion, blinkPagesPerRegion); | 991 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl
inkPagesPerRegion, blinkPagesPerRegion); |
813 // Setup the PageMemory object for each of the pages in the | 992 // Setup the PageMemory object for each of the pages in the |
814 // region. | 993 // region. |
815 size_t offset = 0; | 994 size_t offset = 0; |
816 for (size_t i = 0; i < blinkPagesPerRegion; i++) { | 995 for (size_t i = 0; i < blinkPagesPerRegion; i++) { |
817 addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offs
et, blinkPagePayloadSize())); | 996 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo
ryInRegion(region, offset, blinkPagePayloadSize())); |
818 offset += blinkPageSize; | 997 offset += blinkPageSize; |
819 } | 998 } |
820 pageMemory = takePageFromPool(); | 999 pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
821 RELEASE_ASSERT(pageMemory); | |
822 } | 1000 } |
823 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); | 1001 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>(
pageMemory, this, gcInfo); |
824 // FIXME: Oilpan: Linking new pages into the front of the list is | 1002 // FIXME: Oilpan: Linking new pages into the front of the list is |
825 // crucial when performing allocations during finalization because | 1003 // crucial when performing allocations during finalization because |
826 // it ensures that those pages are not swept in the current GC | 1004 // it ensures that those pages are not swept in the current GC |
827 // round. We should create a separate page list for that to | 1005 // round. We should create a separate page list for that to |
828 // separate out the pages allocated during finalization clearly | 1006 // separate out the pages allocated during finalization clearly |
829 // from the pages currently being swept. | 1007 // from the pages currently being swept. |
830 page->link(&m_firstPage); | 1008 page->link(&m_firstPage); |
831 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); | 1009 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); |
(...skipping 23 matching lines...) Expand all Loading... |
855 ASSERT(isConsistentForGC()); | 1033 ASSERT(isConsistentForGC()); |
856 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING | 1034 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING |
857 // When using ASan do a pre-sweep where all unmarked objects are poisoned be
fore | 1035 // When using ASan do a pre-sweep where all unmarked objects are poisoned be
fore |
858 // calling their finalizer methods. This can catch the cases where one objec
ts | 1036 // calling their finalizer methods. This can catch the cases where one objec
ts |
859 // finalizer tries to modify another object as part of finalization. | 1037 // finalizer tries to modify another object as part of finalization. |
860 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1038 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
861 page->poisonUnmarkedObjects(); | 1039 page->poisonUnmarkedObjects(); |
862 #endif | 1040 #endif |
863 HeapPage<Header>* page = m_firstPage; | 1041 HeapPage<Header>* page = m_firstPage; |
864 HeapPage<Header>** previous = &m_firstPage; | 1042 HeapPage<Header>** previous = &m_firstPage; |
865 bool pagesRemoved = false; | |
866 while (page) { | 1043 while (page) { |
867 if (page->isEmpty()) { | 1044 if (page->isEmpty()) { |
868 flushHeapContainsCache(); | |
869 HeapPage<Header>* unused = page; | 1045 HeapPage<Header>* unused = page; |
870 page = page->next(); | 1046 page = page->next(); |
871 HeapPage<Header>::unlink(unused, previous); | 1047 HeapPage<Header>::unlink(unused, previous); |
872 pagesRemoved = true; | |
873 } else { | 1048 } else { |
874 page->sweep(); | 1049 page->sweep(); |
875 previous = &page->m_next; | 1050 previous = &page->m_next; |
876 page = page->next(); | 1051 page = page->next(); |
877 } | 1052 } |
878 } | 1053 } |
879 if (pagesRemoved) | |
880 flushHeapContainsCache(); | |
881 | 1054 |
882 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; | 1055 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; |
883 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | 1056 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
884 if (current->isMarked()) { | 1057 if (current->isMarked()) { |
885 stats().increaseAllocatedSpace(current->size()); | 1058 stats().increaseAllocatedSpace(current->size()); |
886 stats().increaseObjectSpace(current->payloadSize()); | 1059 stats().increaseObjectSpace(current->payloadSize()); |
887 current->unmark(); | 1060 current->unmark(); |
888 previousNext = ¤t->m_next; | 1061 previousNext = ¤t->m_next; |
889 current = current->next(); | 1062 current = current->next(); |
890 } else { | 1063 } else { |
891 LargeHeapObject<Header>* next = current->next(); | 1064 LargeHeapObject<Header>* next = current->next(); |
892 freeLargeObject(current, previousNext); | 1065 freeLargeObject(current, previousNext); |
893 current = next; | 1066 current = next; |
894 } | 1067 } |
895 } | 1068 } |
896 } | 1069 } |
897 | 1070 |
898 template<typename Header> | 1071 template<typename Header> |
899 void ThreadHeap<Header>::assertEmpty() | |
900 { | |
901 // No allocations are permitted. The thread is exiting. | |
902 NoAllocationScope<AnyThread> noAllocation; | |
903 makeConsistentForGC(); | |
904 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | |
905 Address end = page->end(); | |
906 Address headerAddress; | |
907 for (headerAddress = page->payload(); headerAddress < end; ) { | |
908 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader*
>(headerAddress); | |
909 ASSERT(basicHeader->size() < blinkPagePayloadSize()); | |
910 // A live object is potentially a dangling pointer from | |
911 // some root. Treat that as a bug. Unfortunately, it is | |
912 // hard to reliably check in the presence of conservative | |
913 // stack scanning. Something could be conservatively kept | |
914 // alive because a non-pointer on another thread's stack | |
915 // is treated as a pointer into the heap. | |
916 // | |
917 // FIXME: This assert can currently trigger in cases where | |
918 // worker shutdown does not get enough precise GCs to get | |
919 // all objects removed from the worker heap. There are two | |
920 // issues: 1) conservative GCs keeping objects alive, and | |
921 // 2) long chains of RefPtrs/Persistents that require more | |
922 // GCs to get everything cleaned up. Maybe we can keep | |
923 // threads alive until their heaps become empty instead of | |
924 // forcing the threads to die immediately? | |
925 ASSERT(Heap::lastGCWasConservative() || basicHeader->isFree()); | |
926 if (basicHeader->isFree()) | |
927 addToFreeList(headerAddress, basicHeader->size()); | |
928 headerAddress += basicHeader->size(); | |
929 } | |
930 ASSERT(headerAddress == end); | |
931 } | |
932 | |
933 ASSERT(Heap::lastGCWasConservative() || !m_firstLargeHeapObject); | |
934 } | |
935 | |
936 template<typename Header> | |
937 bool ThreadHeap<Header>::isConsistentForGC() | 1072 bool ThreadHeap<Header>::isConsistentForGC() |
938 { | 1073 { |
939 for (size_t i = 0; i < blinkPageSizeLog2; i++) { | 1074 for (size_t i = 0; i < blinkPageSizeLog2; i++) { |
940 if (m_freeLists[i]) | 1075 if (m_freeLists[i]) |
941 return false; | 1076 return false; |
942 } | 1077 } |
943 return !ownsNonEmptyAllocationArea(); | 1078 return !ownsNonEmptyAllocationArea(); |
944 } | 1079 } |
945 | 1080 |
946 template<typename Header> | 1081 template<typename Header> |
947 void ThreadHeap<Header>::makeConsistentForGC() | 1082 void ThreadHeap<Header>::makeConsistentForGC() |
948 { | 1083 { |
949 if (ownsNonEmptyAllocationArea()) | 1084 if (ownsNonEmptyAllocationArea()) |
950 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 1085 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
951 setAllocationPoint(0, 0); | 1086 setAllocationPoint(0, 0); |
952 clearFreeLists(); | 1087 clearFreeLists(); |
953 } | 1088 } |
954 | 1089 |
955 template<typename Header> | 1090 template<typename Header> |
956 void ThreadHeap<Header>::clearMarks() | 1091 void ThreadHeap<Header>::clearLiveAndMarkDead() |
957 { | 1092 { |
958 ASSERT(isConsistentForGC()); | 1093 ASSERT(isConsistentForGC()); |
959 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1094 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
960 page->clearMarks(); | 1095 page->clearLiveAndMarkDead(); |
961 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) | 1096 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { |
962 current->unmark(); | 1097 if (current->isMarked()) |
| 1098 current->unmark(); |
| 1099 else |
| 1100 current->setDeadMark(); |
| 1101 } |
963 } | 1102 } |
964 | 1103 |
965 template<typename Header> | 1104 template<typename Header> |
966 void ThreadHeap<Header>::deletePages() | |
967 { | |
968 flushHeapContainsCache(); | |
969 // Add all pages in the pool to the heap's list of pages before deleting | |
970 clearPagePool(); | |
971 | |
972 for (HeapPage<Header>* page = m_firstPage; page; ) { | |
973 HeapPage<Header>* dead = page; | |
974 page = page->next(); | |
975 PageMemory* storage = dead->storage(); | |
976 dead->~HeapPage(); | |
977 delete storage; | |
978 } | |
979 m_firstPage = 0; | |
980 | |
981 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | |
982 LargeHeapObject<Header>* dead = current; | |
983 current = current->next(); | |
984 PageMemory* storage = dead->storage(); | |
985 dead->~LargeHeapObject(); | |
986 delete storage; | |
987 } | |
988 m_firstLargeHeapObject = 0; | |
989 } | |
990 | |
991 template<typename Header> | |
992 void ThreadHeap<Header>::clearFreeLists() | 1105 void ThreadHeap<Header>::clearFreeLists() |
993 { | 1106 { |
994 for (size_t i = 0; i < blinkPageSizeLog2; i++) | 1107 for (size_t i = 0; i < blinkPageSizeLog2; i++) |
995 m_freeLists[i] = 0; | 1108 m_freeLists[i] = 0; |
996 } | 1109 } |
997 | 1110 |
998 int BaseHeap::bucketIndexForSize(size_t size) | 1111 int BaseHeap::bucketIndexForSize(size_t size) |
999 { | 1112 { |
1000 ASSERT(size > 0); | 1113 ASSERT(size > 0); |
1001 int index = -1; | 1114 int index = -1; |
(...skipping 20 matching lines...) Expand all Loading... |
1022 void HeapPage<Header>::link(HeapPage** prevNext) | 1135 void HeapPage<Header>::link(HeapPage** prevNext) |
1023 { | 1136 { |
1024 m_next = *prevNext; | 1137 m_next = *prevNext; |
1025 *prevNext = this; | 1138 *prevNext = this; |
1026 } | 1139 } |
1027 | 1140 |
1028 template<typename Header> | 1141 template<typename Header> |
1029 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) | 1142 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) |
1030 { | 1143 { |
1031 *prevNext = unused->m_next; | 1144 *prevNext = unused->m_next; |
1032 unused->heap()->addPageToPool(unused); | 1145 unused->heap()->removePageFromHeap(unused); |
1033 } | 1146 } |
1034 | 1147 |
1035 template<typename Header> | 1148 template<typename Header> |
1036 void HeapPage<Header>::getStats(HeapStats& stats) | 1149 void HeapPage<Header>::getStats(HeapStats& stats) |
1037 { | 1150 { |
1038 stats.increaseAllocatedSpace(blinkPageSize); | 1151 stats.increaseAllocatedSpace(blinkPageSize); |
1039 Address headerAddress = payload(); | 1152 Address headerAddress = payload(); |
1040 ASSERT(headerAddress != end()); | 1153 ASSERT(headerAddress != end()); |
1041 do { | 1154 do { |
1042 Header* header = reinterpret_cast<Header*>(headerAddress); | 1155 Header* header = reinterpret_cast<Header*>(headerAddress); |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1105 header->unmark(); | 1218 header->unmark(); |
1106 headerAddress += header->size(); | 1219 headerAddress += header->size(); |
1107 heap()->stats().increaseObjectSpace(header->payloadSize()); | 1220 heap()->stats().increaseObjectSpace(header->payloadSize()); |
1108 startOfGap = headerAddress; | 1221 startOfGap = headerAddress; |
1109 } | 1222 } |
1110 if (startOfGap != end()) | 1223 if (startOfGap != end()) |
1111 heap()->addToFreeList(startOfGap, end() - startOfGap); | 1224 heap()->addToFreeList(startOfGap, end() - startOfGap); |
1112 } | 1225 } |
1113 | 1226 |
1114 template<typename Header> | 1227 template<typename Header> |
1115 void HeapPage<Header>::clearMarks() | 1228 void HeapPage<Header>::clearLiveAndMarkDead() |
1116 { | 1229 { |
1117 for (Address headerAddress = payload(); headerAddress < end();) { | 1230 for (Address headerAddress = payload(); headerAddress < end();) { |
1118 Header* header = reinterpret_cast<Header*>(headerAddress); | 1231 Header* header = reinterpret_cast<Header*>(headerAddress); |
1119 ASSERT(header->size() < blinkPagePayloadSize()); | 1232 ASSERT(header->size() < blinkPagePayloadSize()); |
1120 if (!header->isFree()) | 1233 // Check if a free list entry first since we cannot call |
| 1234 // isMarked on a free list entry. |
| 1235 if (header->isFree()) { |
| 1236 headerAddress += header->size(); |
| 1237 continue; |
| 1238 } |
| 1239 if (header->isMarked()) |
1121 header->unmark(); | 1240 header->unmark(); |
| 1241 else |
| 1242 header->setDeadMark(); |
1122 headerAddress += header->size(); | 1243 headerAddress += header->size(); |
1123 } | 1244 } |
1124 } | 1245 } |
1125 | 1246 |
1126 template<typename Header> | 1247 template<typename Header> |
1127 void HeapPage<Header>::populateObjectStartBitMap() | 1248 void HeapPage<Header>::populateObjectStartBitMap() |
1128 { | 1249 { |
1129 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); | 1250 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); |
1130 Address start = payload(); | 1251 Address start = payload(); |
1131 for (Address headerAddress = start; headerAddress < end();) { | 1252 for (Address headerAddress = start; headerAddress < end();) { |
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1191 if (header->isFree()) | 1312 if (header->isFree()) |
1192 return 0; | 1313 return 0; |
1193 return header; | 1314 return header; |
1194 } | 1315 } |
1195 | 1316 |
1196 template<typename Header> | 1317 template<typename Header> |
1197 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) | 1318 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) |
1198 { | 1319 { |
1199 ASSERT(contains(address)); | 1320 ASSERT(contains(address)); |
1200 Header* header = findHeaderFromAddress(address); | 1321 Header* header = findHeaderFromAddress(address); |
1201 if (!header) | 1322 if (!header || header->hasDeadMark()) |
1202 return; | 1323 return; |
1203 | 1324 |
1204 #if ENABLE(GC_TRACING) | 1325 #if ENABLE(GC_TRACING) |
1205 visitor->setHostInfo(&address, "stack"); | 1326 visitor->setHostInfo(&address, "stack"); |
1206 #endif | 1327 #endif |
1207 if (hasVTable(header) && !vTableInitialized(header->payload())) | 1328 if (hasVTable(header) && !vTableInitialized(header->payload())) |
1208 visitor->markConservatively(header); | 1329 visitor->markConservatively(header); |
1209 else | 1330 else |
1210 visitor->mark(header, traceCallback(header)); | 1331 visitor->mark(header, traceCallback(header)); |
1211 } | 1332 } |
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1379 { | 1500 { |
1380 for (size_t i = 0; i < bufferSize; i++) | 1501 for (size_t i = 0; i < bufferSize; i++) |
1381 m_buffer[i] = Item(0, 0); | 1502 m_buffer[i] = Item(0, 0); |
1382 } | 1503 } |
1383 | 1504 |
1384 bool CallbackStack::isEmpty() | 1505 bool CallbackStack::isEmpty() |
1385 { | 1506 { |
1386 return m_current == &(m_buffer[0]) && !m_next; | 1507 return m_current == &(m_buffer[0]) && !m_next; |
1387 } | 1508 } |
1388 | 1509 |
| 1510 template<CallbackInvocationMode Mode> |
1389 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor
) | 1511 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor
) |
1390 { | 1512 { |
1391 if (m_current == &(m_buffer[0])) { | 1513 if (m_current == &(m_buffer[0])) { |
1392 if (!m_next) { | 1514 if (!m_next) { |
1393 #ifndef NDEBUG | 1515 #ifndef NDEBUG |
1394 clearUnused(); | 1516 clearUnused(); |
1395 #endif | 1517 #endif |
1396 return false; | 1518 return false; |
1397 } | 1519 } |
1398 CallbackStack* nextStack = m_next; | 1520 CallbackStack* nextStack = m_next; |
1399 *first = nextStack; | 1521 *first = nextStack; |
1400 delete this; | 1522 delete this; |
1401 return nextStack->popAndInvokeCallback(first, visitor); | 1523 return nextStack->popAndInvokeCallback<Mode>(first, visitor); |
1402 } | 1524 } |
1403 Item* item = --m_current; | 1525 Item* item = --m_current; |
1404 | 1526 |
| 1527 // If the object being traced is located on a page which is dead don't |
| 1528 // trace it. This can happen when a conservative GC kept a dead object |
| 1529 // alive which pointed to a (now gone) object on the cleaned up page. |
| 1530 // Also if doing a thread local GC don't trace objects that are located |
| 1531 // on other thread's heaps, ie. pages where the terminating flag is not |
| 1532 // set. |
| 1533 BaseHeapPage* heapPage = pageHeaderFromObject(item->object()); |
| 1534 if (Mode == GlobalMarking && heapPage->orphaned()) { |
| 1535 // When doing a global GC we should only get a trace callback to an orph
aned |
| 1536 // page if the GC is conservative. If it is not conservative there is |
| 1537 // a bug in the code where we have a dangling pointer to a page |
| 1538 // on the dead thread. |
| 1539 RELEASE_ASSERT(Heap::lastGCWasConservative()); |
| 1540 heapPage->setTracedAfterOrphaned(); |
| 1541 return true; |
| 1542 } |
| 1543 if (Mode == ThreadLocalMarking && (heapPage->orphaned() || !heapPage->termin
ating())) |
| 1544 return true; |
| 1545 // For WeaknessProcessing we should never reach orphaned pages since |
| 1546 // they should never be registered as objects on orphaned pages are not |
| 1547 // traced. We cannot assert this here since we might have an off-heap |
| 1548 // collection. However we assert it in Heap::pushWeakObjectPointerCallback. |
| 1549 |
1405 VisitorCallback callback = item->callback(); | 1550 VisitorCallback callback = item->callback(); |
1406 #if ENABLE(GC_TRACING) | 1551 #if ENABLE(GC_TRACING) |
1407 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI
nvokeCallback | 1552 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI
nvokeCallback |
1408 visitor->setHostInfo(item->object(), classOf(item->object())); | 1553 visitor->setHostInfo(item->object(), classOf(item->object())); |
1409 #endif | 1554 #endif |
1410 callback(visitor, item->object()); | 1555 callback(visitor, item->object()); |
1411 | 1556 |
1412 return true; | 1557 return true; |
1413 } | 1558 } |
1414 | 1559 |
(...skipping 17 matching lines...) Expand all Loading... |
1432 { | 1577 { |
1433 // Recurse first (bufferSize at a time) so we get to the newly added entries | 1578 // Recurse first (bufferSize at a time) so we get to the newly added entries |
1434 // last. | 1579 // last. |
1435 if (m_next) | 1580 if (m_next) |
1436 m_next->invokeOldestCallbacks(visitor); | 1581 m_next->invokeOldestCallbacks(visitor); |
1437 | 1582 |
1438 // This loop can tolerate entries being added by the callbacks after | 1583 // This loop can tolerate entries being added by the callbacks after |
1439 // iteration starts. | 1584 // iteration starts. |
1440 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1585 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
1441 Item& item = m_buffer[i]; | 1586 Item& item = m_buffer[i]; |
| 1587 |
| 1588 // We don't need to check for orphaned pages when popping an ephemeron |
| 1589 // callback since the callback is only pushed after the object containin
g |
| 1590 // it has been traced. There are basically three cases to consider: |
| 1591 // 1. Member<EphemeronCollection> |
| 1592 // 2. EphemeronCollection is part of a containing object |
| 1593 // 3. EphemeronCollection is a value object in a collection |
| 1594 // |
| 1595 // Ad. 1. In this case we push the start of the ephemeron on the |
| 1596 // marking stack and do the orphaned page check when popping it off |
| 1597 // the marking stack. |
| 1598 // Ad. 2. The containing object cannot be on an orphaned page since |
| 1599 // in that case we wouldn't have traced its parts. This also means |
| 1600 // the ephemeron collection is not on the orphaned page. |
| 1601 // Ad. 3. Is the same as 2. The collection containing the ephemeron |
| 1602 // collection as a value object cannot be on an orphaned page since |
| 1603 // it would not have traced its values in that case. |
1442 item.callback()(visitor, item.object()); | 1604 item.callback()(visitor, item.object()); |
1443 } | 1605 } |
1444 } | 1606 } |
1445 | 1607 |
1446 #ifndef NDEBUG | 1608 #ifndef NDEBUG |
1447 bool CallbackStack::hasCallbackForObject(const void* object) | 1609 bool CallbackStack::hasCallbackForObject(const void* object) |
1448 { | 1610 { |
1449 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1611 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
1450 Item* item = &m_buffer[i]; | 1612 Item* item = &m_buffer[i]; |
1451 if (item->object() == object) { | 1613 if (item->object() == object) { |
(...skipping 11 matching lines...) Expand all Loading... |
1463 public: | 1625 public: |
1464 #if ENABLE(GC_TRACING) | 1626 #if ENABLE(GC_TRACING) |
1465 typedef HashSet<uintptr_t> LiveObjectSet; | 1627 typedef HashSet<uintptr_t> LiveObjectSet; |
1466 typedef HashMap<String, LiveObjectSet> LiveObjectMap; | 1628 typedef HashMap<String, LiveObjectSet> LiveObjectMap; |
1467 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph; | 1629 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph; |
1468 #endif | 1630 #endif |
1469 | 1631 |
1470 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer,
TraceCallback callback) | 1632 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer,
TraceCallback callback) |
1471 { | 1633 { |
1472 ASSERT(header); | 1634 ASSERT(header); |
| 1635 // Check that we are not marking objects that are outside the heap by ca
lling Heap::contains. |
| 1636 // However we cannot call Heap::contains when outside a GC and we call m
ark when doing weakness |
| 1637 // for ephemerons. Hence we only check when called within. |
| 1638 ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrphane
dPage(header)); |
1473 ASSERT(objectPointer); | 1639 ASSERT(objectPointer); |
1474 if (header->isMarked()) | 1640 if (header->isMarked()) |
1475 return; | 1641 return; |
1476 header->mark(); | 1642 header->mark(); |
1477 #if ENABLE(GC_TRACING) | 1643 #if ENABLE(GC_TRACING) |
1478 MutexLocker locker(objectGraphMutex()); | 1644 MutexLocker locker(objectGraphMutex()); |
1479 String className(classOf(objectPointer)); | 1645 String className(classOf(objectPointer)); |
1480 { | 1646 { |
1481 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv
eObjectSet()); | 1647 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv
eObjectSet()); |
1482 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin
ter)); | 1648 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin
ter)); |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1684 }; | 1850 }; |
1685 | 1851 |
1686 void Heap::init() | 1852 void Heap::init() |
1687 { | 1853 { |
1688 ThreadState::init(); | 1854 ThreadState::init(); |
1689 CallbackStack::init(&s_markingStack); | 1855 CallbackStack::init(&s_markingStack); |
1690 CallbackStack::init(&s_weakCallbackStack); | 1856 CallbackStack::init(&s_weakCallbackStack); |
1691 CallbackStack::init(&s_ephemeronStack); | 1857 CallbackStack::init(&s_ephemeronStack); |
1692 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | 1858 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
1693 s_markingVisitor = new MarkingVisitor(); | 1859 s_markingVisitor = new MarkingVisitor(); |
| 1860 s_freePagePool = new FreePagePool(); |
| 1861 s_orphanedPagePool = new OrphanedPagePool(); |
1694 } | 1862 } |
1695 | 1863 |
1696 void Heap::shutdown() | 1864 void Heap::shutdown() |
1697 { | 1865 { |
1698 s_shutdownCalled = true; | 1866 s_shutdownCalled = true; |
1699 ThreadState::shutdownHeapIfNecessary(); | 1867 ThreadState::shutdownHeapIfNecessary(); |
1700 } | 1868 } |
1701 | 1869 |
1702 void Heap::doShutdown() | 1870 void Heap::doShutdown() |
1703 { | 1871 { |
1704 // We don't want to call doShutdown() twice. | 1872 // We don't want to call doShutdown() twice. |
1705 if (!s_markingVisitor) | 1873 if (!s_markingVisitor) |
1706 return; | 1874 return; |
1707 | 1875 |
1708 ASSERT(!ThreadState::isAnyThreadInGC()); | 1876 ASSERT(!ThreadState::isAnyThreadInGC()); |
1709 ASSERT(!ThreadState::attachedThreads().size()); | 1877 ASSERT(!ThreadState::attachedThreads().size()); |
1710 delete s_markingVisitor; | 1878 delete s_markingVisitor; |
1711 s_markingVisitor = 0; | 1879 s_markingVisitor = 0; |
1712 delete s_heapDoesNotContainCache; | 1880 delete s_heapDoesNotContainCache; |
1713 s_heapDoesNotContainCache = 0; | 1881 s_heapDoesNotContainCache = 0; |
| 1882 delete s_freePagePool; |
| 1883 s_freePagePool = 0; |
| 1884 delete s_orphanedPagePool; |
| 1885 s_orphanedPagePool = 0; |
1714 CallbackStack::shutdown(&s_weakCallbackStack); | 1886 CallbackStack::shutdown(&s_weakCallbackStack); |
1715 CallbackStack::shutdown(&s_markingStack); | 1887 CallbackStack::shutdown(&s_markingStack); |
1716 CallbackStack::shutdown(&s_ephemeronStack); | 1888 CallbackStack::shutdown(&s_ephemeronStack); |
1717 ThreadState::shutdown(); | 1889 ThreadState::shutdown(); |
1718 } | 1890 } |
1719 | 1891 |
1720 BaseHeapPage* Heap::contains(Address address) | 1892 BaseHeapPage* Heap::contains(Address address) |
1721 { | 1893 { |
1722 ASSERT(ThreadState::isAnyThreadInGC()); | 1894 ASSERT(ThreadState::isAnyThreadInGC()); |
1723 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1895 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1724 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1896 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
1725 BaseHeapPage* page = (*it)->contains(address); | 1897 BaseHeapPage* page = (*it)->contains(address); |
1726 if (page) | 1898 if (page) |
1727 return page; | 1899 return page; |
1728 } | 1900 } |
1729 return 0; | 1901 return 0; |
1730 } | 1902 } |
1731 | 1903 |
| 1904 #ifndef NDEBUG |
| 1905 bool Heap::containedInHeapOrOrphanedPage(void* object) |
| 1906 { |
| 1907 return contains(object) || orphanedPagePool()->contains(object); |
| 1908 } |
| 1909 #endif |
| 1910 |
1732 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 1911 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
1733 { | 1912 { |
1734 ASSERT(ThreadState::isAnyThreadInGC()); | 1913 ASSERT(ThreadState::isAnyThreadInGC()); |
1735 | 1914 |
1736 #ifdef NDEBUG | 1915 #ifdef NDEBUG |
1737 if (s_heapDoesNotContainCache->lookup(address)) | 1916 if (s_heapDoesNotContainCache->lookup(address)) |
1738 return 0; | 1917 return 0; |
1739 #endif | 1918 #endif |
1740 | 1919 |
1741 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 1920 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1800 builder.append("\n\t"); | 1979 builder.append("\n\t"); |
1801 builder.append(frameToName.nullableName()); | 1980 builder.append(frameToName.nullableName()); |
1802 --framesToShow; | 1981 --framesToShow; |
1803 } | 1982 } |
1804 return builder.toString().replace("WebCore::", ""); | 1983 return builder.toString().replace("WebCore::", ""); |
1805 } | 1984 } |
1806 #endif | 1985 #endif |
1807 | 1986 |
1808 void Heap::pushTraceCallback(void* object, TraceCallback callback) | 1987 void Heap::pushTraceCallback(void* object, TraceCallback callback) |
1809 { | 1988 { |
1810 ASSERT(Heap::contains(object)); | 1989 ASSERT(Heap::containedInHeapOrOrphanedPage(object)); |
1811 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); | 1990 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); |
1812 *slot = CallbackStack::Item(object, callback); | 1991 *slot = CallbackStack::Item(object, callback); |
1813 } | 1992 } |
1814 | 1993 |
| 1994 template<CallbackInvocationMode Mode> |
1815 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | 1995 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
1816 { | 1996 { |
1817 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor); | 1997 return s_markingStack->popAndInvokeCallback<Mode>(&s_markingStack, visitor); |
1818 } | 1998 } |
1819 | 1999 |
1820 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback
) | 2000 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback
) |
1821 { | 2001 { |
1822 ASSERT(Heap::contains(cell)); | 2002 ASSERT(!Heap::orphanedPagePool()->contains(cell)); |
1823 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba
ckStack); | 2003 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba
ckStack); |
1824 *slot = CallbackStack::Item(cell, callback); | 2004 *slot = CallbackStack::Item(cell, callback); |
1825 } | 2005 } |
1826 | 2006 |
1827 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe
rCallback callback) | 2007 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe
rCallback callback) |
1828 { | 2008 { |
1829 ASSERT(Heap::contains(object)); | 2009 ASSERT(Heap::contains(object)); |
1830 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeader
Address(reinterpret_cast<Address>(object))); | 2010 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object); |
| 2011 ASSERT(!heapPageForObject->orphaned()); |
1831 ASSERT(Heap::contains(object) == heapPageForObject); | 2012 ASSERT(Heap::contains(object) == heapPageForObject); |
1832 ThreadState* state = heapPageForObject->threadState(); | 2013 ThreadState* state = heapPageForObject->threadState(); |
1833 state->pushWeakObjectPointerCallback(closure, callback); | 2014 state->pushWeakObjectPointerCallback(closure, callback); |
1834 } | 2015 } |
1835 | 2016 |
1836 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) | 2017 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) |
1837 { | 2018 { |
1838 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visit
or); | 2019 return s_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&s_weak
CallbackStack, visitor); |
1839 } | 2020 } |
1840 | 2021 |
1841 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
phemeronCallback iterationDoneCallback) | 2022 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
phemeronCallback iterationDoneCallback) |
1842 { | 2023 { |
| 2024 // Check that the ephemeron table being pushed onto the stack is not on an |
| 2025 // orphaned page. |
| 2026 ASSERT(!Heap::orphanedPagePool()->contains(table)); |
| 2027 |
1843 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac
k); | 2028 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac
k); |
1844 *slot = CallbackStack::Item(table, iterationCallback); | 2029 *slot = CallbackStack::Item(table, iterationCallback); |
1845 | 2030 |
1846 // We use the callback stack of weak cell pointers for the ephemeronIteratio
nDone callbacks. | 2031 // We use the callback stack of weak cell pointers for the ephemeronIteratio
nDone callbacks. |
1847 // These callbacks are called right after marking and before any thread comm
ences execution | 2032 // These callbacks are called right after marking and before any thread comm
ences execution |
1848 // so it suits our needs for telling the ephemerons that the iteration is do
ne. | 2033 // so it suits our needs for telling the ephemerons that the iteration is do
ne. |
1849 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac
k); | 2034 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac
k); |
1850 } | 2035 } |
1851 | 2036 |
1852 #ifndef NDEBUG | 2037 #ifndef NDEBUG |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1887 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); | 2072 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); |
1888 #endif | 2073 #endif |
1889 | 2074 |
1890 // Disallow allocation during garbage collection (but not | 2075 // Disallow allocation during garbage collection (but not |
1891 // during the finalization that happens when the gcScope is | 2076 // during the finalization that happens when the gcScope is |
1892 // torn down). | 2077 // torn down). |
1893 NoAllocationScope<AnyThread> noAllocationScope; | 2078 NoAllocationScope<AnyThread> noAllocationScope; |
1894 | 2079 |
1895 prepareForGC(); | 2080 prepareForGC(); |
1896 | 2081 |
1897 ThreadState::visitRoots(s_markingVisitor); | 2082 traceRootsAndPerformGlobalWeakProcessing<GlobalMarking>(); |
| 2083 |
| 2084 // After a global marking we know that any orphaned page that was not reache
d |
| 2085 // cannot be reached in a subsequent GC. This is due to a thread either havi
ng |
| 2086 // swept its heap or having done a "poor mans sweep" in prepareForGC which m
arks |
| 2087 // objects that are dead, but not swept in the previous GC as dead. In this
GC's |
| 2088 // marking we check that any object marked as dead is not traced. E.g. via a |
| 2089 // conservatively found pointer or a programming error with an object contai
ning |
| 2090 // a dangling pointer. |
| 2091 orphanedPagePool()->decommitOrphanedPages(); |
| 2092 |
| 2093 #if ENABLE(GC_TRACING) |
| 2094 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); |
| 2095 #endif |
| 2096 |
| 2097 if (blink::Platform::current()) { |
| 2098 uint64_t objectSpaceSize; |
| 2099 uint64_t allocatedSpaceSize; |
| 2100 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize); |
| 2101 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag
e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
| 2102 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp
ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); |
| 2103 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate
dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); |
| 2104 } |
| 2105 } |
| 2106 |
| 2107 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 2108 { |
| 2109 // We explicitly do not enter a safepoint while doing thread specific |
| 2110 // garbage collection since we don't want to allow a global GC at the |
| 2111 // same time as a thread local GC. |
| 2112 |
| 2113 { |
| 2114 NoAllocationScope<AnyThread> noAllocationScope; |
| 2115 |
| 2116 state->enterGC(); |
| 2117 state->prepareForGC(); |
| 2118 |
| 2119 traceRootsAndPerformGlobalWeakProcessing<ThreadLocalMarking>(); |
| 2120 |
| 2121 state->leaveGC(); |
| 2122 } |
| 2123 state->performPendingSweep(); |
| 2124 } |
| 2125 |
| 2126 template<CallbackInvocationMode Mode> |
| 2127 void Heap::traceRootsAndPerformGlobalWeakProcessing() |
| 2128 { |
| 2129 if (Mode == ThreadLocalMarking) |
| 2130 ThreadState::current()->visitLocalRoots(s_markingVisitor); |
| 2131 else |
| 2132 ThreadState::visitRoots(s_markingVisitor); |
1898 | 2133 |
1899 // Ephemeron fixed point loop. | 2134 // Ephemeron fixed point loop. |
1900 do { | 2135 do { |
1901 // Recursively mark all objects that are reachable from the roots. | 2136 // Recursively mark all objects that are reachable from the roots for |
1902 while (popAndInvokeTraceCallback(s_markingVisitor)) { } | 2137 // this thread. If Mode is ThreadLocalMarking don't continue tracing if |
| 2138 // the trace hits an object on another thread's heap. |
| 2139 while (popAndInvokeTraceCallback<Mode>(s_markingVisitor)) { } |
1903 | 2140 |
1904 // Mark any strong pointers that have now become reachable in ephemeron | 2141 // Mark any strong pointers that have now become reachable in ephemeron |
1905 // maps. | 2142 // maps. |
1906 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); | 2143 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); |
1907 | 2144 |
1908 // Rerun loop if ephemeron processing queued more objects for tracing. | 2145 // Rerun loop if ephemeron processing queued more objects for tracing. |
1909 } while (!s_markingStack->isEmpty()); | 2146 } while (!s_markingStack->isEmpty()); |
1910 | 2147 |
1911 // Call weak callbacks on objects that may now be pointing to dead | 2148 // Call weak callbacks on objects that may now be pointing to dead |
1912 // objects and call ephemeronIterationDone callbacks on weak tables | 2149 // objects and call ephemeronIterationDone callbacks on weak tables |
1913 // to do cleanup (specifically clear the queued bits for weak hash | 2150 // to do cleanup (specifically clear the queued bits for weak hash |
1914 // tables). | 2151 // tables). |
1915 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } | 2152 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } |
1916 | 2153 |
1917 CallbackStack::clear(&s_ephemeronStack); | 2154 CallbackStack::clear(&s_ephemeronStack); |
1918 | 2155 |
1919 // It is not permitted to trace pointers of live objects in the weak | 2156 // It is not permitted to trace pointers of live objects in the weak |
1920 // callback phase, so the marking stack should still be empty here. | 2157 // callback phase, so the marking stack should still be empty here. |
1921 ASSERT(s_markingStack->isEmpty()); | 2158 ASSERT(s_markingStack->isEmpty()); |
1922 | |
1923 #if ENABLE(GC_TRACING) | |
1924 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); | |
1925 #endif | |
1926 | |
1927 if (blink::Platform::current()) { | |
1928 uint64_t objectSpaceSize; | |
1929 uint64_t allocatedSpaceSize; | |
1930 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize); | |
1931 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag
e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | |
1932 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp
ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | |
1933 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate
dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | |
1934 } | |
1935 } | 2159 } |
1936 | 2160 |
1937 void Heap::collectAllGarbage() | 2161 void Heap::collectAllGarbage() |
1938 { | 2162 { |
1939 // FIXME: oilpan: we should perform a single GC and everything | 2163 // FIXME: oilpan: we should perform a single GC and everything |
1940 // should die. Unfortunately it is not the case for all objects | 2164 // should die. Unfortunately it is not the case for all objects |
1941 // because the hierarchy was not completely moved to the heap and | 2165 // because the hierarchy was not completely moved to the heap and |
1942 // some heap allocated objects own objects that contain persistents | 2166 // some heap allocated objects own objects that contain persistents |
1943 // pointing to other heap allocated objects. | 2167 // pointing to other heap allocated objects. |
1944 for (int i = 0; i < 5; i++) | 2168 for (int i = 0; i < 5; i++) |
1945 collectGarbage(ThreadState::NoHeapPointersOnStack); | 2169 collectGarbage(ThreadState::NoHeapPointersOnStack); |
1946 } | 2170 } |
1947 | 2171 |
1948 void Heap::setForcePreciseGCForTesting() | 2172 void Heap::setForcePreciseGCForTesting() |
1949 { | 2173 { |
1950 ThreadState::current()->setForcePreciseGCForTesting(true); | 2174 ThreadState::current()->setForcePreciseGCForTesting(true); |
1951 } | 2175 } |
1952 | 2176 |
| 2177 template<typename Header> |
| 2178 void ThreadHeap<Header>::prepareHeapForTermination() |
| 2179 { |
| 2180 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { |
| 2181 page->setTerminating(); |
| 2182 } |
| 2183 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur
rent = current->next()) { |
| 2184 current->setTerminating(); |
| 2185 } |
| 2186 } |
| 2187 |
1953 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS
ize) | 2188 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS
ize) |
1954 { | 2189 { |
1955 *objectSpaceSize = 0; | 2190 *objectSpaceSize = 0; |
1956 *allocatedSpaceSize = 0; | 2191 *allocatedSpaceSize = 0; |
1957 ASSERT(ThreadState::isAnyThreadInGC()); | 2192 ASSERT(ThreadState::isAnyThreadInGC()); |
1958 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2193 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1959 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; | 2194 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; |
1960 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { | 2195 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en
d; ++it) { |
1961 *objectSpaceSize += (*it)->stats().totalObjectSpace(); | 2196 *objectSpaceSize += (*it)->stats().totalObjectSpace(); |
1962 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); | 2197 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); |
(...skipping 30 matching lines...) Expand all Loading... |
1993 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); | 2228 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads(
); |
1994 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) | 2229 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) |
1995 (*it)->makeConsistentForGC(); | 2230 (*it)->makeConsistentForGC(); |
1996 } | 2231 } |
1997 | 2232 |
1998 // Force template instantiations for the types that we need. | 2233 // Force template instantiations for the types that we need. |
1999 template class HeapPage<FinalizedHeapObjectHeader>; | 2234 template class HeapPage<FinalizedHeapObjectHeader>; |
2000 template class HeapPage<HeapObjectHeader>; | 2235 template class HeapPage<HeapObjectHeader>; |
2001 template class ThreadHeap<FinalizedHeapObjectHeader>; | 2236 template class ThreadHeap<FinalizedHeapObjectHeader>; |
2002 template class ThreadHeap<HeapObjectHeader>; | 2237 template class ThreadHeap<HeapObjectHeader>; |
| 2238 template bool CallbackStack::popAndInvokeCallback<GlobalMarking>(CallbackStack**
, Visitor*); |
| 2239 template bool CallbackStack::popAndInvokeCallback<ThreadLocalMarking>(CallbackSt
ack**, Visitor*); |
| 2240 template bool CallbackStack::popAndInvokeCallback<WeaknessProcessing>(CallbackSt
ack**, Visitor*); |
2003 | 2241 |
2004 Visitor* Heap::s_markingVisitor; | 2242 Visitor* Heap::s_markingVisitor; |
2005 CallbackStack* Heap::s_markingStack; | 2243 CallbackStack* Heap::s_markingStack; |
2006 CallbackStack* Heap::s_weakCallbackStack; | 2244 CallbackStack* Heap::s_weakCallbackStack; |
2007 CallbackStack* Heap::s_ephemeronStack; | 2245 CallbackStack* Heap::s_ephemeronStack; |
2008 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2246 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
2009 bool Heap::s_shutdownCalled = false; | 2247 bool Heap::s_shutdownCalled = false; |
2010 bool Heap::s_lastGCWasConservative = false; | 2248 bool Heap::s_lastGCWasConservative = false; |
| 2249 FreePagePool* Heap::s_freePagePool; |
| 2250 OrphanedPagePool* Heap::s_orphanedPagePool; |
2011 } | 2251 } |
OLD | NEW |