Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 416 } | 416 } |
| 417 | 417 |
| 418 NO_SANITIZE_ADDRESS | 418 NO_SANITIZE_ADDRESS |
| 419 void HeapObjectHeader::unmark() | 419 void HeapObjectHeader::unmark() |
| 420 { | 420 { |
| 421 checkHeader(); | 421 checkHeader(); |
| 422 m_size &= ~markBitMask; | 422 m_size &= ~markBitMask; |
| 423 } | 423 } |
| 424 | 424 |
| 425 NO_SANITIZE_ADDRESS | 425 NO_SANITIZE_ADDRESS |
| 426 bool HeapObjectHeader::hasDebugMark() const | 426 bool HeapObjectHeader::hasDeadMark() const |
| 427 { | 427 { |
| 428 checkHeader(); | 428 checkHeader(); |
| 429 return m_size & debugBitMask; | 429 return m_size & deadBitMask; |
| 430 } | 430 } |
| 431 | 431 |
| 432 NO_SANITIZE_ADDRESS | 432 NO_SANITIZE_ADDRESS |
| 433 void HeapObjectHeader::clearDebugMark() | 433 void HeapObjectHeader::clearDeadMark() |
| 434 { | 434 { |
| 435 checkHeader(); | 435 checkHeader(); |
| 436 m_size &= ~debugBitMask; | 436 m_size &= ~deadBitMask; |
| 437 } | 437 } |
| 438 | 438 |
| 439 NO_SANITIZE_ADDRESS | 439 NO_SANITIZE_ADDRESS |
| 440 void HeapObjectHeader::setDebugMark() | 440 void HeapObjectHeader::setDeadMark() |
| 441 { | 441 { |
| 442 ASSERT(!isMarked()); | |
| 442 checkHeader(); | 443 checkHeader(); |
| 443 m_size |= debugBitMask; | 444 m_size |= deadBitMask; |
| 444 } | 445 } |
| 445 | 446 |
| 446 #ifndef NDEBUG | 447 #ifndef NDEBUG |
| 447 NO_SANITIZE_ADDRESS | 448 NO_SANITIZE_ADDRESS |
| 448 void HeapObjectHeader::zapMagic() | 449 void HeapObjectHeader::zapMagic() |
| 449 { | 450 { |
| 450 m_magic = zappedMagic; | 451 m_magic = zappedMagic; |
| 451 } | 452 } |
| 452 #endif | 453 #endif |
| 453 | 454 |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 493 return heapObjectHeader()->unmark(); | 494 return heapObjectHeader()->unmark(); |
| 494 } | 495 } |
| 495 | 496 |
| 496 template<typename Header> | 497 template<typename Header> |
| 497 bool LargeHeapObject<Header>::isMarked() | 498 bool LargeHeapObject<Header>::isMarked() |
| 498 { | 499 { |
| 499 return heapObjectHeader()->isMarked(); | 500 return heapObjectHeader()->isMarked(); |
| 500 } | 501 } |
| 501 | 502 |
| 502 template<typename Header> | 503 template<typename Header> |
| 504 void LargeHeapObject<Header>::setDeadMark() | |
| 505 { | |
| 506 heapObjectHeader()->setDeadMark(); | |
| 507 } | |
| 508 | |
| 509 template<typename Header> | |
| 503 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) | 510 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) |
| 504 { | 511 { |
| 505 ASSERT(contains(address)); | 512 ASSERT(contains(address)); |
| 506 if (!objectContains(address)) | 513 if (!objectContains(address) || heapObjectHeader()->hasDeadMark()) |
| 507 return; | 514 return; |
| 508 #if ENABLE(GC_TRACING) | 515 #if ENABLE(GC_TRACING) |
| 509 visitor->setHostInfo(&address, "stack"); | 516 visitor->setHostInfo(&address, "stack"); |
| 510 #endif | 517 #endif |
| 511 mark(visitor); | 518 mark(visitor); |
| 512 } | 519 } |
| 513 | 520 |
| 514 template<> | 521 template<> |
| 515 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) | 522 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) |
| 516 { | 523 { |
| (...skipping 28 matching lines...) Expand all Loading... | |
| 545 | 552 |
| 546 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa yload) | 553 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa yload) |
| 547 { | 554 { |
| 548 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); | 555 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); |
| 549 FinalizedHeapObjectHeader* header = | 556 FinalizedHeapObjectHeader* header = |
| 550 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ; | 557 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ; |
| 551 return header; | 558 return header; |
| 552 } | 559 } |
| 553 | 560 |
| 554 template<typename Header> | 561 template<typename Header> |
| 555 ThreadHeap<Header>::ThreadHeap(ThreadState* state) | 562 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
| 556 : m_currentAllocationPoint(0) | 563 : m_currentAllocationPoint(0) |
| 557 , m_remainingAllocationSize(0) | 564 , m_remainingAllocationSize(0) |
| 558 , m_firstPage(0) | 565 , m_firstPage(0) |
| 559 , m_firstLargeHeapObject(0) | 566 , m_firstLargeHeapObject(0) |
| 560 , m_biggestFreeListIndex(0) | 567 , m_biggestFreeListIndex(0) |
| 561 , m_threadState(state) | 568 , m_threadState(state) |
| 562 , m_pagePool(0) | 569 , m_index(index) |
| 563 { | 570 { |
| 564 clearFreeLists(); | 571 clearFreeLists(); |
| 565 } | 572 } |
| 566 | 573 |
| 567 template<typename Header> | 574 template<typename Header> |
| 568 ThreadHeap<Header>::~ThreadHeap() | 575 ThreadHeap<Header>::~ThreadHeap() |
| 569 { | 576 { |
| 570 clearFreeLists(); | 577 ASSERT(!m_firstPage); |
| 571 if (!ThreadState::current()->isMainThread()) | 578 ASSERT(!m_firstLargeHeapObject); |
| 572 assertEmpty(); | |
| 573 deletePages(); | |
| 574 } | 579 } |
| 575 | 580 |
| 576 template<typename Header> | 581 template<typename Header> |
| 582 void ThreadHeap<Header>::cleanupPages() | |
| 583 { | |
| 584 clearFreeLists(); | |
| 585 flushHeapContainsCache(); | |
| 586 | |
| 587 // Add the ThreadHeap's pages to the orphanedPagePool. | |
| 588 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next) | |
| 589 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); | |
| 590 m_firstPage = 0; | |
| 591 | |
| 592 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next) | |
| 593 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); | |
| 594 m_firstLargeHeapObject = 0; | |
| 595 } | |
| 596 | |
| 597 template<typename Header> | |
| 577 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) | 598 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) |
| 578 { | 599 { |
| 579 size_t allocationSize = allocationSizeFromSize(size); | 600 size_t allocationSize = allocationSizeFromSize(size); |
| 580 if (threadState()->shouldGC()) { | 601 if (threadState()->shouldGC()) { |
| 581 if (threadState()->shouldForceConservativeGC()) | 602 if (threadState()->shouldForceConservativeGC()) |
| 582 Heap::collectGarbage(ThreadState::HeapPointersOnStack); | 603 Heap::collectGarbage(ThreadState::HeapPointersOnStack); |
| 583 else | 604 else |
| 584 threadState()->setGCRequested(); | 605 threadState()->setGCRequested(); |
| 585 } | 606 } |
| 586 ensureCurrentAllocation(allocationSize, gcInfo); | 607 ensureCurrentAllocation(allocationSize, gcInfo); |
| (...skipping 143 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 730 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) | 751 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) |
| 731 { | 752 { |
| 732 flushHeapContainsCache(); | 753 flushHeapContainsCache(); |
| 733 object->unlink(previousNext); | 754 object->unlink(previousNext); |
| 734 object->finalize(); | 755 object->finalize(); |
| 735 | 756 |
| 736 // Unpoison the object header and allocationGranularity bytes after the | 757 // Unpoison the object header and allocationGranularity bytes after the |
| 737 // object before freeing. | 758 // object before freeing. |
| 738 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); | 759 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); |
| 739 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); | 760 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); |
| 740 delete object->storage(); | 761 |
| 741 } | 762 if (object->terminating()) { |
| 763 ASSERT(ThreadState::current()->isTerminating()); | |
| 764 // The thread is shutting down so this object is being removed as part | |
| 765 // of a thread local GC. In that case the object could be traced in the | |
| 766 // next global GC either due to a dead object being traced via a | |
| 767 // conservative pointer or due to a programming error where an object | |
| 768 // in another thread heap keeps a dangling pointer to this object. | |
| 769 // To guard against this we put the large object memory in the | |
| 770 // orphanedPagePool to ensure it is still reachable. After the next glob al | |
| 771 // GC it can be released assuming no rogue/dangling pointers refer to | |
| 772 // it. | |
| 773 // NOTE: large objects are not moved to the free page pool as it is | |
| 774 // unlikely they can be reused due to their individual sizes. | |
| 775 Heap::orphanedPagePool()->addOrphanedPage(m_index, object); | |
| 776 } else { | |
| 777 PageMemory* memory = object->storage(); | |
| 778 object->~LargeHeapObject<Header>(); | |
| 779 delete memory; | |
| 780 } | |
| 781 } | |
| 782 | |
| 783 template<typename DataType> | |
| 784 PagePool<DataType>::PagePool() | |
| 785 { | |
| 786 for (int i = 0; i < NumberOfHeaps; ++i) { | |
| 787 m_pool[i] = 0; | |
| 788 } | |
| 789 } | |
| 790 | |
| 791 FreePagePool::~FreePagePool() | |
| 792 { | |
| 793 for (int index = 0; index < NumberOfHeaps; ++index) { | |
| 794 while (PoolEntry* entry = m_pool[index]) { | |
| 795 m_pool[index] = entry->next; | |
| 796 PageMemory* memory = entry->data; | |
| 797 ASSERT(memory); | |
| 798 delete memory; | |
| 799 delete entry; | |
| 800 } | |
| 801 } | |
| 802 } | |
| 803 | |
| 804 void FreePagePool::addFreePage(int index, PageMemory* memory) | |
| 805 { | |
| 806 // When adding a page to the pool we decommit it to ensure it is unused | |
| 807 // while in the pool. This also allows the physical memory, backing the | |
| 808 // page, to be given back to the OS. | |
| 809 memory->decommit(); | |
| 810 MutexLocker locker(m_mutex[index]); | |
| 811 PoolEntry* entry = new PoolEntry(memory, m_pool[index]); | |
| 812 m_pool[index] = entry; | |
| 813 } | |
| 814 | |
| 815 PageMemory* FreePagePool::takeFreePage(int index) | |
| 816 { | |
| 817 MutexLocker locker(m_mutex[index]); | |
| 818 while (PoolEntry* entry = m_pool[index]) { | |
| 819 m_pool[index] = entry->next; | |
| 820 PageMemory* memory = entry->data; | |
| 821 ASSERT(memory); | |
| 822 delete entry; | |
| 823 if (memory->commit()) | |
| 824 return memory; | |
| 825 | |
| 826 // We got some memory, but failed to commit it, try again. | |
| 827 delete memory; | |
| 828 } | |
| 829 return 0; | |
| 830 } | |
| 831 | |
| 832 OrphanedPagePool::~OrphanedPagePool() | |
| 833 { | |
| 834 for (int index = 0; index < NumberOfHeaps; ++index) { | |
| 835 while (PoolEntry* entry = m_pool[index]) { | |
| 836 m_pool[index] = entry->next; | |
| 837 BaseHeapPage* page = entry->data; | |
| 838 delete entry; | |
| 839 PageMemory* memory = page->storage(); | |
| 840 ASSERT(memory); | |
| 841 page->~BaseHeapPage(); | |
| 842 delete memory; | |
| 843 } | |
| 844 } | |
| 845 } | |
| 846 | |
| 847 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page) | |
| 848 { | |
| 849 page->markOrphaned(); | |
| 850 PoolEntry* entry = new PoolEntry(page, m_pool[index]); | |
| 851 m_pool[index] = entry; | |
| 852 } | |
| 853 | |
| 854 void OrphanedPagePool::decommitOrphanedPages() | |
| 855 { | |
| 856 #ifndef NDEBUG | |
| 857 // No locking needed as all threads are at safepoints at this point in time. | |
| 858 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | |
| 859 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) | |
| 860 ASSERT((*it)->isAtSafePoint()); | |
| 861 #endif | |
| 862 | |
| 863 for (int index = 0; index < NumberOfHeaps; ++index) { | |
| 864 PoolEntry* entry = m_pool[index]; | |
| 865 PoolEntry** prevNext = &m_pool[index]; | |
| 866 while (entry) { | |
| 867 BaseHeapPage* page = entry->data; | |
| 868 if (page->tracedAfterOrphaned()) { | |
| 869 // If the orphaned page was traced in the last GC it is not | |
| 870 // decommited. We only decommit a page, ie. put it in the | |
| 871 // memory pool, when the page has no objects pointing to it. | |
| 872 // We remark the page as orphaned to clear the tracedAfterOrphan ed | |
| 873 // flag and any object trace bits that were set during tracing. | |
| 874 page->markOrphaned(); | |
| 875 prevNext = &entry->next; | |
| 876 entry = entry->next; | |
| 877 continue; | |
| 878 } | |
| 879 | |
| 880 // Page was not traced. Check if we should reuse the memory or just | |
| 881 // free it. Large object memory is not reused, but freed, normal | |
| 882 // blink heap pages are reused. | |
| 883 // NOTE: We call the destructor before freeing or adding to the | |
| 884 // free page pool. | |
| 885 PageMemory* memory = page->storage(); | |
| 886 if (page->isLargeObject()) { | |
| 887 page->~BaseHeapPage(); | |
| 888 delete memory; | |
| 889 } else { | |
| 890 page->~BaseHeapPage(); | |
| 891 Heap::freePagePool()->addFreePage(index, memory); | |
| 892 } | |
| 893 | |
| 894 PoolEntry* deadEntry = entry; | |
| 895 entry = entry->next; | |
| 896 *prevNext = entry; | |
| 897 delete deadEntry; | |
| 898 } | |
| 899 } | |
| 900 } | |
| 901 | |
| 902 #ifndef NDEBUG | |
| 903 bool OrphanedPagePool::contains(void* object) | |
| 904 { | |
| 905 for (int index = 0; index < NumberOfHeaps; ++index) { | |
| 906 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { | |
| 907 BaseHeapPage* page = entry->data; | |
| 908 if (page->contains(reinterpret_cast<Address>(object))) | |
| 909 return true; | |
| 910 } | |
| 911 } | |
| 912 return false; | |
| 913 } | |
| 914 #endif | |
| 742 | 915 |
| 743 template<> | 916 template<> |
| 744 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) | 917 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) |
| 745 { | 918 { |
| 746 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on | 919 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on |
| 747 // the heap should be unused (ie. 0). | 920 // the heap should be unused (ie. 0). |
| 748 allocatePage(0); | 921 allocatePage(0); |
| 749 } | 922 } |
| 750 | 923 |
| 751 template<> | 924 template<> |
| 752 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) | 925 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) |
| 753 { | 926 { |
| 754 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap | 927 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap |
| 755 // since it is the same for all objects | 928 // since it is the same for all objects |
| 756 ASSERT(gcInfo); | 929 ASSERT(gcInfo); |
| 757 allocatePage(gcInfo); | 930 allocatePage(gcInfo); |
| 758 } | 931 } |
| 759 | 932 |
| 760 template<typename Header> | 933 template <typename Header> |
| 761 void ThreadHeap<Header>::clearPagePool() | 934 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page) |
| 762 { | |
| 763 while (takePageFromPool()) { } | |
| 764 } | |
| 765 | |
| 766 template<typename Header> | |
| 767 PageMemory* ThreadHeap<Header>::takePageFromPool() | |
| 768 { | |
| 769 Heap::flushHeapDoesNotContainCache(); | |
| 770 while (PagePoolEntry* entry = m_pagePool) { | |
| 771 m_pagePool = entry->next(); | |
| 772 PageMemory* storage = entry->storage(); | |
| 773 delete entry; | |
| 774 | |
| 775 if (storage->commit()) | |
| 776 return storage; | |
| 777 | |
| 778 // Failed to commit pooled storage. Release it. | |
| 779 delete storage; | |
| 780 } | |
| 781 | |
| 782 return 0; | |
| 783 } | |
| 784 | |
| 785 template<typename Header> | |
| 786 void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage) | |
| 787 { | 935 { |
| 788 flushHeapContainsCache(); | 936 flushHeapContainsCache(); |
| 789 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); | 937 if (page->terminating()) { |
| 790 m_pagePool = entry; | 938 ASSERT(ThreadState::current()->isTerminating()); |
| 791 } | 939 // The thread is shutting down so this page is being removed as part |
| 792 | 940 // of a thread local GC. In that case the page could be accessed in the |
| 793 template <typename Header> | 941 // next global GC either due to a dead object being traced via a |
| 794 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) | 942 // conservative pointer or due to a programming error where an object |
| 795 { | 943 // in another thread heap keeps a dangling pointer to this object. |
| 796 PageMemory* storage = page->storage(); | 944 // To guard against this we put the page in the orphanedPagePool to |
| 797 storage->decommit(); | 945 // ensure it is still reachable. After the next global GC it can be |
| 798 addPageMemoryToPool(storage); | 946 // decommitted and moved to the page pool assuming no rogue/dangling |
| 947 // pointers refer to it. | |
| 948 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); | |
| 949 } else { | |
| 950 PageMemory* memory = page->storage(); | |
| 951 page->~HeapPage<Header>(); | |
| 952 Heap::freePagePool()->addFreePage(m_index, memory); | |
| 953 } | |
| 799 } | 954 } |
| 800 | 955 |
| 801 template<typename Header> | 956 template<typename Header> |
| 802 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) | 957 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) |
| 803 { | 958 { |
| 804 Heap::flushHeapDoesNotContainCache(); | 959 Heap::flushHeapDoesNotContainCache(); |
| 805 PageMemory* pageMemory = takePageFromPool(); | 960 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
| 806 if (!pageMemory) { | 961 // We continue allocating page memory until we succeed in getting one. |
| 962 // Since the FreePagePool is global other threads could use all the | |
| 963 // newly allocated page memory before this thread call takeFreePage. | |
|
zerny-chromium
2014/07/11 11:24:42
s/call/calls
wibling-chromium
2014/07/11 13:06:54
Done.
| |
| 964 while (!pageMemory) { | |
| 807 // Allocate a memory region for blinkPagesPerRegion pages that | 965 // Allocate a memory region for blinkPagesPerRegion pages that |
| 808 // will each have the following layout. | 966 // will each have the following layout. |
| 809 // | 967 // |
| 810 // [ guard os page | ... payload ... | guard os page ] | 968 // [ guard os page | ... payload ... | guard os page ] |
| 811 // ^---{ aligned to blink page size } | 969 // ^---{ aligned to blink page size } |
| 812 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); | 970 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); |
| 813 // Setup the PageMemory object for each of the pages in the | 971 // Setup the PageMemory object for each of the pages in the |
| 814 // region. | 972 // region. |
| 815 size_t offset = 0; | 973 size_t offset = 0; |
| 816 for (size_t i = 0; i < blinkPagesPerRegion; i++) { | 974 for (size_t i = 0; i < blinkPagesPerRegion; i++) { |
| 817 addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offs et, blinkPagePayloadSize())); | 975 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo ryInRegion(region, offset, blinkPagePayloadSize())); |
| 818 offset += blinkPageSize; | 976 offset += blinkPageSize; |
| 819 } | 977 } |
| 820 pageMemory = takePageFromPool(); | 978 pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
| 821 RELEASE_ASSERT(pageMemory); | |
| 822 } | 979 } |
| 823 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); | 980 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); |
| 824 // FIXME: Oilpan: Linking new pages into the front of the list is | 981 // FIXME: Oilpan: Linking new pages into the front of the list is |
| 825 // crucial when performing allocations during finalization because | 982 // crucial when performing allocations during finalization because |
| 826 // it ensures that those pages are not swept in the current GC | 983 // it ensures that those pages are not swept in the current GC |
| 827 // round. We should create a separate page list for that to | 984 // round. We should create a separate page list for that to |
| 828 // separate out the pages allocated during finalization clearly | 985 // separate out the pages allocated during finalization clearly |
| 829 // from the pages currently being swept. | 986 // from the pages currently being swept. |
| 830 page->link(&m_firstPage); | 987 page->link(&m_firstPage); |
| 831 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); | 988 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); |
| (...skipping 23 matching lines...) Expand all Loading... | |
| 855 ASSERT(isConsistentForGC()); | 1012 ASSERT(isConsistentForGC()); |
| 856 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING | 1013 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING |
| 857 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore | 1014 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore |
| 858 // calling their finalizer methods. This can catch the cases where one objec ts | 1015 // calling their finalizer methods. This can catch the cases where one objec ts |
| 859 // finalizer tries to modify another object as part of finalization. | 1016 // finalizer tries to modify another object as part of finalization. |
| 860 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1017 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
| 861 page->poisonUnmarkedObjects(); | 1018 page->poisonUnmarkedObjects(); |
| 862 #endif | 1019 #endif |
| 863 HeapPage<Header>* page = m_firstPage; | 1020 HeapPage<Header>* page = m_firstPage; |
| 864 HeapPage<Header>** previous = &m_firstPage; | 1021 HeapPage<Header>** previous = &m_firstPage; |
| 865 bool pagesRemoved = false; | |
| 866 while (page) { | 1022 while (page) { |
| 867 if (page->isEmpty()) { | 1023 if (page->isEmpty()) { |
| 868 flushHeapContainsCache(); | |
| 869 HeapPage<Header>* unused = page; | 1024 HeapPage<Header>* unused = page; |
| 870 page = page->next(); | 1025 page = page->next(); |
| 871 HeapPage<Header>::unlink(unused, previous); | 1026 HeapPage<Header>::unlink(unused, previous); |
| 872 pagesRemoved = true; | |
| 873 } else { | 1027 } else { |
| 874 page->sweep(); | 1028 page->sweep(); |
| 875 previous = &page->m_next; | 1029 previous = &page->m_next; |
| 876 page = page->next(); | 1030 page = page->next(); |
| 877 } | 1031 } |
| 878 } | 1032 } |
| 879 if (pagesRemoved) | |
| 880 flushHeapContainsCache(); | |
| 881 | 1033 |
| 882 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; | 1034 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; |
| 883 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | 1035 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { |
| 884 if (current->isMarked()) { | 1036 if (current->isMarked()) { |
| 885 stats().increaseAllocatedSpace(current->size()); | 1037 stats().increaseAllocatedSpace(current->size()); |
| 886 stats().increaseObjectSpace(current->payloadSize()); | 1038 stats().increaseObjectSpace(current->payloadSize()); |
| 887 current->unmark(); | 1039 current->unmark(); |
| 888 previousNext = ¤t->m_next; | 1040 previousNext = ¤t->m_next; |
| 889 current = current->next(); | 1041 current = current->next(); |
| 890 } else { | 1042 } else { |
| 891 LargeHeapObject<Header>* next = current->next(); | 1043 LargeHeapObject<Header>* next = current->next(); |
| 892 freeLargeObject(current, previousNext); | 1044 freeLargeObject(current, previousNext); |
| 893 current = next; | 1045 current = next; |
| 894 } | 1046 } |
| 895 } | 1047 } |
| 896 } | 1048 } |
| 897 | 1049 |
| 898 template<typename Header> | 1050 template<typename Header> |
| 899 void ThreadHeap<Header>::assertEmpty() | |
| 900 { | |
| 901 // No allocations are permitted. The thread is exiting. | |
| 902 NoAllocationScope<AnyThread> noAllocation; | |
| 903 makeConsistentForGC(); | |
| 904 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | |
| 905 Address end = page->end(); | |
| 906 Address headerAddress; | |
| 907 for (headerAddress = page->payload(); headerAddress < end; ) { | |
| 908 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress); | |
| 909 ASSERT(basicHeader->size() < blinkPagePayloadSize()); | |
| 910 // A live object is potentially a dangling pointer from | |
| 911 // some root. Treat that as a bug. Unfortunately, it is | |
| 912 // hard to reliably check in the presence of conservative | |
| 913 // stack scanning. Something could be conservatively kept | |
| 914 // alive because a non-pointer on another thread's stack | |
| 915 // is treated as a pointer into the heap. | |
| 916 // | |
| 917 // FIXME: This assert can currently trigger in cases where | |
| 918 // worker shutdown does not get enough precise GCs to get | |
| 919 // all objects removed from the worker heap. There are two | |
| 920 // issues: 1) conservative GCs keeping objects alive, and | |
| 921 // 2) long chains of RefPtrs/Persistents that require more | |
| 922 // GCs to get everything cleaned up. Maybe we can keep | |
| 923 // threads alive until their heaps become empty instead of | |
| 924 // forcing the threads to die immediately? | |
| 925 ASSERT(Heap::lastGCWasConservative() || basicHeader->isFree()); | |
| 926 if (basicHeader->isFree()) | |
| 927 addToFreeList(headerAddress, basicHeader->size()); | |
| 928 headerAddress += basicHeader->size(); | |
| 929 } | |
| 930 ASSERT(headerAddress == end); | |
| 931 } | |
| 932 | |
| 933 ASSERT(Heap::lastGCWasConservative() || !m_firstLargeHeapObject); | |
| 934 } | |
| 935 | |
| 936 template<typename Header> | |
| 937 bool ThreadHeap<Header>::isConsistentForGC() | 1051 bool ThreadHeap<Header>::isConsistentForGC() |
| 938 { | 1052 { |
| 939 for (size_t i = 0; i < blinkPageSizeLog2; i++) { | 1053 for (size_t i = 0; i < blinkPageSizeLog2; i++) { |
| 940 if (m_freeLists[i]) | 1054 if (m_freeLists[i]) |
| 941 return false; | 1055 return false; |
| 942 } | 1056 } |
| 943 return !ownsNonEmptyAllocationArea(); | 1057 return !ownsNonEmptyAllocationArea(); |
| 944 } | 1058 } |
| 945 | 1059 |
| 946 template<typename Header> | 1060 template<typename Header> |
| 947 void ThreadHeap<Header>::makeConsistentForGC() | 1061 void ThreadHeap<Header>::makeConsistentForGC() |
| 948 { | 1062 { |
| 949 if (ownsNonEmptyAllocationArea()) | 1063 if (ownsNonEmptyAllocationArea()) |
| 950 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 1064 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
| 951 setAllocationPoint(0, 0); | 1065 setAllocationPoint(0, 0); |
| 952 clearFreeLists(); | 1066 clearFreeLists(); |
| 953 } | 1067 } |
| 954 | 1068 |
| 955 template<typename Header> | 1069 template<typename Header> |
| 956 void ThreadHeap<Header>::clearMarks() | 1070 void ThreadHeap<Header>::clearLiveAndMarkDead() |
| 957 { | 1071 { |
| 958 ASSERT(isConsistentForGC()); | 1072 ASSERT(isConsistentForGC()); |
| 959 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1073 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
| 960 page->clearMarks(); | 1074 page->clearLiveAndMarkDead(); |
| 961 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) | 1075 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { |
| 962 current->unmark(); | 1076 if (current->isMarked()) |
| 1077 current->unmark(); | |
| 1078 else | |
| 1079 current->setDeadMark(); | |
| 1080 } | |
| 963 } | 1081 } |
| 964 | 1082 |
| 965 template<typename Header> | 1083 template<typename Header> |
| 966 void ThreadHeap<Header>::deletePages() | |
| 967 { | |
| 968 flushHeapContainsCache(); | |
| 969 // Add all pages in the pool to the heap's list of pages before deleting | |
| 970 clearPagePool(); | |
| 971 | |
| 972 for (HeapPage<Header>* page = m_firstPage; page; ) { | |
| 973 HeapPage<Header>* dead = page; | |
| 974 page = page->next(); | |
| 975 PageMemory* storage = dead->storage(); | |
| 976 dead->~HeapPage(); | |
| 977 delete storage; | |
| 978 } | |
| 979 m_firstPage = 0; | |
| 980 | |
| 981 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { | |
| 982 LargeHeapObject<Header>* dead = current; | |
| 983 current = current->next(); | |
| 984 PageMemory* storage = dead->storage(); | |
| 985 dead->~LargeHeapObject(); | |
| 986 delete storage; | |
| 987 } | |
| 988 m_firstLargeHeapObject = 0; | |
| 989 } | |
| 990 | |
| 991 template<typename Header> | |
| 992 void ThreadHeap<Header>::clearFreeLists() | 1084 void ThreadHeap<Header>::clearFreeLists() |
| 993 { | 1085 { |
| 994 for (size_t i = 0; i < blinkPageSizeLog2; i++) | 1086 for (size_t i = 0; i < blinkPageSizeLog2; i++) |
| 995 m_freeLists[i] = 0; | 1087 m_freeLists[i] = 0; |
| 996 } | 1088 } |
| 997 | 1089 |
| 998 int BaseHeap::bucketIndexForSize(size_t size) | 1090 int BaseHeap::bucketIndexForSize(size_t size) |
| 999 { | 1091 { |
| 1000 ASSERT(size > 0); | 1092 ASSERT(size > 0); |
| 1001 int index = -1; | 1093 int index = -1; |
| (...skipping 20 matching lines...) Expand all Loading... | |
| 1022 void HeapPage<Header>::link(HeapPage** prevNext) | 1114 void HeapPage<Header>::link(HeapPage** prevNext) |
| 1023 { | 1115 { |
| 1024 m_next = *prevNext; | 1116 m_next = *prevNext; |
| 1025 *prevNext = this; | 1117 *prevNext = this; |
| 1026 } | 1118 } |
| 1027 | 1119 |
| 1028 template<typename Header> | 1120 template<typename Header> |
| 1029 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) | 1121 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) |
| 1030 { | 1122 { |
| 1031 *prevNext = unused->m_next; | 1123 *prevNext = unused->m_next; |
| 1032 unused->heap()->addPageToPool(unused); | 1124 unused->heap()->removePageFromHeap(unused); |
| 1033 } | 1125 } |
| 1034 | 1126 |
| 1035 template<typename Header> | 1127 template<typename Header> |
| 1036 void HeapPage<Header>::getStats(HeapStats& stats) | 1128 void HeapPage<Header>::getStats(HeapStats& stats) |
| 1037 { | 1129 { |
| 1038 stats.increaseAllocatedSpace(blinkPageSize); | 1130 stats.increaseAllocatedSpace(blinkPageSize); |
| 1039 Address headerAddress = payload(); | 1131 Address headerAddress = payload(); |
| 1040 ASSERT(headerAddress != end()); | 1132 ASSERT(headerAddress != end()); |
| 1041 do { | 1133 do { |
| 1042 Header* header = reinterpret_cast<Header*>(headerAddress); | 1134 Header* header = reinterpret_cast<Header*>(headerAddress); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1105 header->unmark(); | 1197 header->unmark(); |
| 1106 headerAddress += header->size(); | 1198 headerAddress += header->size(); |
| 1107 heap()->stats().increaseObjectSpace(header->payloadSize()); | 1199 heap()->stats().increaseObjectSpace(header->payloadSize()); |
| 1108 startOfGap = headerAddress; | 1200 startOfGap = headerAddress; |
| 1109 } | 1201 } |
| 1110 if (startOfGap != end()) | 1202 if (startOfGap != end()) |
| 1111 heap()->addToFreeList(startOfGap, end() - startOfGap); | 1203 heap()->addToFreeList(startOfGap, end() - startOfGap); |
| 1112 } | 1204 } |
| 1113 | 1205 |
| 1114 template<typename Header> | 1206 template<typename Header> |
| 1115 void HeapPage<Header>::clearMarks() | 1207 void HeapPage<Header>::clearLiveAndMarkDead() |
| 1116 { | 1208 { |
| 1117 for (Address headerAddress = payload(); headerAddress < end();) { | 1209 for (Address headerAddress = payload(); headerAddress < end();) { |
| 1118 Header* header = reinterpret_cast<Header*>(headerAddress); | 1210 Header* header = reinterpret_cast<Header*>(headerAddress); |
| 1119 ASSERT(header->size() < blinkPagePayloadSize()); | 1211 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1120 if (!header->isFree()) | 1212 // Check if a free list entry first since we cannot call |
| 1213 // isMarked on a free list entry. | |
| 1214 if (header->isFree()) { | |
| 1215 headerAddress += header->size(); | |
| 1216 continue; | |
| 1217 } | |
| 1218 if (header->isMarked()) | |
| 1121 header->unmark(); | 1219 header->unmark(); |
| 1220 else | |
| 1221 header->setDeadMark(); | |
| 1122 headerAddress += header->size(); | 1222 headerAddress += header->size(); |
| 1123 } | 1223 } |
| 1124 } | 1224 } |
| 1125 | 1225 |
| 1126 template<typename Header> | 1226 template<typename Header> |
| 1127 void HeapPage<Header>::populateObjectStartBitMap() | 1227 void HeapPage<Header>::populateObjectStartBitMap() |
| 1128 { | 1228 { |
| 1129 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); | 1229 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); |
| 1130 Address start = payload(); | 1230 Address start = payload(); |
| 1131 for (Address headerAddress = start; headerAddress < end();) { | 1231 for (Address headerAddress = start; headerAddress < end();) { |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1191 if (header->isFree()) | 1291 if (header->isFree()) |
| 1192 return 0; | 1292 return 0; |
| 1193 return header; | 1293 return header; |
| 1194 } | 1294 } |
| 1195 | 1295 |
| 1196 template<typename Header> | 1296 template<typename Header> |
| 1197 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) | 1297 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) |
| 1198 { | 1298 { |
| 1199 ASSERT(contains(address)); | 1299 ASSERT(contains(address)); |
| 1200 Header* header = findHeaderFromAddress(address); | 1300 Header* header = findHeaderFromAddress(address); |
| 1201 if (!header) | 1301 if (!header || header->hasDeadMark()) |
| 1202 return; | 1302 return; |
| 1203 | 1303 |
| 1204 #if ENABLE(GC_TRACING) | 1304 #if ENABLE(GC_TRACING) |
| 1205 visitor->setHostInfo(&address, "stack"); | 1305 visitor->setHostInfo(&address, "stack"); |
| 1206 #endif | 1306 #endif |
| 1207 if (hasVTable(header) && !vTableInitialized(header->payload())) | 1307 if (hasVTable(header) && !vTableInitialized(header->payload())) |
| 1208 visitor->markConservatively(header); | 1308 visitor->markConservatively(header); |
| 1209 else | 1309 else |
| 1210 visitor->mark(header, traceCallback(header)); | 1310 visitor->mark(header, traceCallback(header)); |
| 1211 } | 1311 } |
| (...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1379 { | 1479 { |
| 1380 for (size_t i = 0; i < bufferSize; i++) | 1480 for (size_t i = 0; i < bufferSize; i++) |
| 1381 m_buffer[i] = Item(0, 0); | 1481 m_buffer[i] = Item(0, 0); |
| 1382 } | 1482 } |
| 1383 | 1483 |
| 1384 bool CallbackStack::isEmpty() | 1484 bool CallbackStack::isEmpty() |
| 1385 { | 1485 { |
| 1386 return m_current == &(m_buffer[0]) && !m_next; | 1486 return m_current == &(m_buffer[0]) && !m_next; |
| 1387 } | 1487 } |
| 1388 | 1488 |
| 1489 template<CallbackInvocationMode Mode> | |
| 1389 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor ) | 1490 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor ) |
| 1390 { | 1491 { |
| 1391 if (m_current == &(m_buffer[0])) { | 1492 if (m_current == &(m_buffer[0])) { |
| 1392 if (!m_next) { | 1493 if (!m_next) { |
| 1393 #ifndef NDEBUG | 1494 #ifndef NDEBUG |
| 1394 clearUnused(); | 1495 clearUnused(); |
| 1395 #endif | 1496 #endif |
| 1396 return false; | 1497 return false; |
| 1397 } | 1498 } |
| 1398 CallbackStack* nextStack = m_next; | 1499 CallbackStack* nextStack = m_next; |
| 1399 *first = nextStack; | 1500 *first = nextStack; |
| 1400 delete this; | 1501 delete this; |
| 1401 return nextStack->popAndInvokeCallback(first, visitor); | 1502 return nextStack->popAndInvokeCallback<Mode>(first, visitor); |
| 1402 } | 1503 } |
| 1403 Item* item = --m_current; | 1504 Item* item = --m_current; |
| 1404 | 1505 |
| 1506 // If the object being traced is located on a page which is dead don't | |
| 1507 // trace it. This can happen when a conservative GC kept a dead object | |
| 1508 // alive which pointed to a (now gone) object on the cleaned up page. | |
| 1509 // Also if doing a thread local GC don't trace objects that are located | |
| 1510 // on other thread's heaps, ie. pages where the shuttingDown flag is not | |
| 1511 // set. | |
| 1512 BaseHeapPage* heapPage = pageHeaderFromObject(item->object()); | |
| 1513 if (Mode == GlobalMarking && heapPage->orphaned()) { | |
| 1514 // When doing a GC we should only get a trace callback to an orphaned | |
| 1515 // page if the GC is conservative. If it is not conservative there is | |
| 1516 // a bug in the code where we have a dangling pointer to a page | |
| 1517 // on the dead thread. | |
| 1518 RELEASE_ASSERT(Heap::lastGCWasConservative()); | |
| 1519 heapPage->setTracedAfterOrphaned(); | |
| 1520 return true; | |
| 1521 } | |
| 1522 if (Mode == ThreadLocalMarking && (heapPage->orphaned() || !heapPage->termin ating())) | |
| 1523 return true; | |
| 1524 // For WeaknessProcessing we should never reach an orphaned pages since | |
| 1525 // they should never be registered as objects on orphaned pages are not | |
| 1526 // traced. We cannot assert this here since we might have an off-heap | |
| 1527 // collection. However we assert it in Heap::pushWeakObjectPointerCallback. | |
| 1528 | |
| 1405 VisitorCallback callback = item->callback(); | 1529 VisitorCallback callback = item->callback(); |
| 1406 #if ENABLE(GC_TRACING) | 1530 #if ENABLE(GC_TRACING) |
| 1407 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback | 1531 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback |
| 1408 visitor->setHostInfo(item->object(), classOf(item->object())); | 1532 visitor->setHostInfo(item->object(), classOf(item->object())); |
| 1409 #endif | 1533 #endif |
| 1410 callback(visitor, item->object()); | 1534 callback(visitor, item->object()); |
| 1411 | 1535 |
| 1412 return true; | 1536 return true; |
| 1413 } | 1537 } |
| 1414 | 1538 |
| (...skipping 17 matching lines...) Expand all Loading... | |
| 1432 { | 1556 { |
| 1433 // Recurse first (bufferSize at a time) so we get to the newly added entries | 1557 // Recurse first (bufferSize at a time) so we get to the newly added entries |
| 1434 // last. | 1558 // last. |
| 1435 if (m_next) | 1559 if (m_next) |
| 1436 m_next->invokeOldestCallbacks(visitor); | 1560 m_next->invokeOldestCallbacks(visitor); |
| 1437 | 1561 |
| 1438 // This loop can tolerate entries being added by the callbacks after | 1562 // This loop can tolerate entries being added by the callbacks after |
| 1439 // iteration starts. | 1563 // iteration starts. |
| 1440 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1564 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
| 1441 Item& item = m_buffer[i]; | 1565 Item& item = m_buffer[i]; |
| 1566 | |
| 1567 // We don't need to check for orphaned pages when popping an ephemeron | |
| 1568 // callback since the callback is only pushed after the object containin g | |
| 1569 // it has been traced. There are basically three cases to consider: | |
| 1570 // 1. Member<EphemeronCollection> | |
| 1571 // 2. EphemeronCollection is part of a containing object | |
| 1572 // 3. EphemeronCollection is a value object in a collection | |
| 1573 // | |
| 1574 // Ad. 1. In this case we push the start of the ephemeron on the | |
| 1575 // marking stack and do the orphaned page check when popping it off | |
| 1576 // the marking stack. | |
| 1577 // Ad. 2. The containing object cannot be on an orphaned page since | |
| 1578 // in that case we wouldn't have traced its parts. This also means | |
| 1579 // the ephemeron collection is not on the orphaned page. | |
| 1580 // Ad. 3. Is the same as 2. The collection containing the ephemeron | |
| 1581 // collection as a value object cannot be on an orphaned page since | |
| 1582 // it would not have traced its values in that case. | |
| 1442 item.callback()(visitor, item.object()); | 1583 item.callback()(visitor, item.object()); |
| 1443 } | 1584 } |
| 1444 } | 1585 } |
| 1445 | 1586 |
| 1446 #ifndef NDEBUG | 1587 #ifndef NDEBUG |
| 1447 bool CallbackStack::hasCallbackForObject(const void* object) | 1588 bool CallbackStack::hasCallbackForObject(const void* object) |
| 1448 { | 1589 { |
| 1449 for (unsigned i = 0; m_buffer + i < m_current; i++) { | 1590 for (unsigned i = 0; m_buffer + i < m_current; i++) { |
| 1450 Item* item = &m_buffer[i]; | 1591 Item* item = &m_buffer[i]; |
| 1451 if (item->object() == object) { | 1592 if (item->object() == object) { |
| (...skipping 11 matching lines...) Expand all Loading... | |
| 1463 public: | 1604 public: |
| 1464 #if ENABLE(GC_TRACING) | 1605 #if ENABLE(GC_TRACING) |
| 1465 typedef HashSet<uintptr_t> LiveObjectSet; | 1606 typedef HashSet<uintptr_t> LiveObjectSet; |
| 1466 typedef HashMap<String, LiveObjectSet> LiveObjectMap; | 1607 typedef HashMap<String, LiveObjectSet> LiveObjectMap; |
| 1467 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph; | 1608 typedef HashMap<uintptr_t, std::pair<uintptr_t, String> > ObjectGraph; |
| 1468 #endif | 1609 #endif |
| 1469 | 1610 |
| 1470 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback) | 1611 inline void visitHeader(HeapObjectHeader* header, const void* objectPointer, TraceCallback callback) |
| 1471 { | 1612 { |
| 1472 ASSERT(header); | 1613 ASSERT(header); |
| 1614 // Check that we are not marking objects that are outside the heap by ca lling Heap::contains. | |
| 1615 // However we cannot call Heap::contains when outside a GC and we call m ark when doing weakness | |
| 1616 // for ephemerons. Hence we only check when called within. | |
| 1617 ASSERT(!ThreadState::isAnyThreadInGC() || Heap::containedInHeapOrOrphane dPage(header)); | |
| 1473 ASSERT(objectPointer); | 1618 ASSERT(objectPointer); |
| 1474 if (header->isMarked()) | 1619 if (header->isMarked()) |
| 1475 return; | 1620 return; |
| 1476 header->mark(); | 1621 header->mark(); |
| 1477 #if ENABLE(GC_TRACING) | 1622 #if ENABLE(GC_TRACING) |
| 1478 MutexLocker locker(objectGraphMutex()); | 1623 MutexLocker locker(objectGraphMutex()); |
| 1479 String className(classOf(objectPointer)); | 1624 String className(classOf(objectPointer)); |
| 1480 { | 1625 { |
| 1481 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv eObjectSet()); | 1626 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv eObjectSet()); |
| 1482 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin ter)); | 1627 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin ter)); |
| (...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1684 }; | 1829 }; |
| 1685 | 1830 |
| 1686 void Heap::init() | 1831 void Heap::init() |
| 1687 { | 1832 { |
| 1688 ThreadState::init(); | 1833 ThreadState::init(); |
| 1689 CallbackStack::init(&s_markingStack); | 1834 CallbackStack::init(&s_markingStack); |
| 1690 CallbackStack::init(&s_weakCallbackStack); | 1835 CallbackStack::init(&s_weakCallbackStack); |
| 1691 CallbackStack::init(&s_ephemeronStack); | 1836 CallbackStack::init(&s_ephemeronStack); |
| 1692 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | 1837 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); |
| 1693 s_markingVisitor = new MarkingVisitor(); | 1838 s_markingVisitor = new MarkingVisitor(); |
| 1839 s_freePagePool = new FreePagePool(); | |
| 1840 s_orphanedPagePool = new OrphanedPagePool(); | |
| 1694 } | 1841 } |
| 1695 | 1842 |
| 1696 void Heap::shutdown() | 1843 void Heap::shutdown() |
| 1697 { | 1844 { |
| 1698 s_shutdownCalled = true; | 1845 s_shutdownCalled = true; |
| 1699 ThreadState::shutdownHeapIfNecessary(); | 1846 ThreadState::shutdownHeapIfNecessary(); |
| 1700 } | 1847 } |
| 1701 | 1848 |
| 1702 void Heap::doShutdown() | 1849 void Heap::doShutdown() |
| 1703 { | 1850 { |
| 1704 // We don't want to call doShutdown() twice. | 1851 // We don't want to call doShutdown() twice. |
| 1705 if (!s_markingVisitor) | 1852 if (!s_markingVisitor) |
| 1706 return; | 1853 return; |
| 1707 | 1854 |
| 1708 ASSERT(!ThreadState::isAnyThreadInGC()); | 1855 ASSERT(!ThreadState::isAnyThreadInGC()); |
| 1709 ASSERT(!ThreadState::attachedThreads().size()); | 1856 ASSERT(!ThreadState::attachedThreads().size()); |
| 1710 delete s_markingVisitor; | 1857 delete s_markingVisitor; |
| 1711 s_markingVisitor = 0; | 1858 s_markingVisitor = 0; |
| 1712 delete s_heapDoesNotContainCache; | 1859 delete s_heapDoesNotContainCache; |
| 1713 s_heapDoesNotContainCache = 0; | 1860 s_heapDoesNotContainCache = 0; |
| 1861 delete s_freePagePool; | |
| 1862 s_freePagePool = 0; | |
| 1863 delete s_orphanedPagePool; | |
| 1864 s_orphanedPagePool = 0; | |
| 1714 CallbackStack::shutdown(&s_weakCallbackStack); | 1865 CallbackStack::shutdown(&s_weakCallbackStack); |
| 1715 CallbackStack::shutdown(&s_markingStack); | 1866 CallbackStack::shutdown(&s_markingStack); |
| 1716 CallbackStack::shutdown(&s_ephemeronStack); | 1867 CallbackStack::shutdown(&s_ephemeronStack); |
| 1717 ThreadState::shutdown(); | 1868 ThreadState::shutdown(); |
| 1718 } | 1869 } |
| 1719 | 1870 |
| 1720 BaseHeapPage* Heap::contains(Address address) | 1871 BaseHeapPage* Heap::contains(Address address) |
| 1721 { | 1872 { |
| 1722 ASSERT(ThreadState::isAnyThreadInGC()); | 1873 ASSERT(ThreadState::isAnyThreadInGC()); |
| 1723 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 1874 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 1724 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { | 1875 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { |
| 1725 BaseHeapPage* page = (*it)->contains(address); | 1876 BaseHeapPage* page = (*it)->contains(address); |
| 1726 if (page) | 1877 if (page) |
| 1727 return page; | 1878 return page; |
| 1728 } | 1879 } |
| 1729 return 0; | 1880 return 0; |
| 1730 } | 1881 } |
| 1731 | 1882 |
| 1883 #ifndef NDEBUG | |
| 1884 bool Heap::containedInHeapOrOrphanedPage(void* object) | |
| 1885 { | |
| 1886 return contains(object) || orphanedPagePool()->contains(object); | |
| 1887 } | |
| 1888 #endif | |
| 1889 | |
| 1732 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | 1890 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) |
| 1733 { | 1891 { |
| 1734 ASSERT(ThreadState::isAnyThreadInGC()); | 1892 ASSERT(ThreadState::isAnyThreadInGC()); |
| 1735 | 1893 |
| 1736 #ifdef NDEBUG | 1894 #ifdef NDEBUG |
| 1737 if (s_heapDoesNotContainCache->lookup(address)) | 1895 if (s_heapDoesNotContainCache->lookup(address)) |
| 1738 return 0; | 1896 return 0; |
| 1739 #endif | 1897 #endif |
| 1740 | 1898 |
| 1741 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 1899 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| (...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1800 builder.append("\n\t"); | 1958 builder.append("\n\t"); |
| 1801 builder.append(frameToName.nullableName()); | 1959 builder.append(frameToName.nullableName()); |
| 1802 --framesToShow; | 1960 --framesToShow; |
| 1803 } | 1961 } |
| 1804 return builder.toString().replace("WebCore::", ""); | 1962 return builder.toString().replace("WebCore::", ""); |
| 1805 } | 1963 } |
| 1806 #endif | 1964 #endif |
| 1807 | 1965 |
| 1808 void Heap::pushTraceCallback(void* object, TraceCallback callback) | 1966 void Heap::pushTraceCallback(void* object, TraceCallback callback) |
| 1809 { | 1967 { |
| 1810 ASSERT(Heap::contains(object)); | 1968 ASSERT(Heap::containedInHeapOrOrphanedPage(object)); |
| 1811 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); | 1969 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); |
| 1812 *slot = CallbackStack::Item(object, callback); | 1970 *slot = CallbackStack::Item(object, callback); |
| 1813 } | 1971 } |
| 1814 | 1972 |
| 1973 template<CallbackInvocationMode Mode> | |
| 1815 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | 1974 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) |
| 1816 { | 1975 { |
| 1817 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor); | 1976 return s_markingStack->popAndInvokeCallback<Mode>(&s_markingStack, visitor); |
| 1818 } | 1977 } |
| 1819 | 1978 |
| 1820 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback ) | 1979 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback ) |
| 1821 { | 1980 { |
| 1822 ASSERT(Heap::contains(cell)); | 1981 ASSERT(!Heap::orphanedPagePool()->contains(cell)); |
| 1823 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba ckStack); | 1982 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba ckStack); |
| 1824 *slot = CallbackStack::Item(cell, callback); | 1983 *slot = CallbackStack::Item(cell, callback); |
| 1825 } | 1984 } |
| 1826 | 1985 |
| 1827 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe rCallback callback) | 1986 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe rCallback callback) |
| 1828 { | 1987 { |
| 1829 ASSERT(Heap::contains(object)); | 1988 ASSERT(Heap::contains(object)); |
| 1830 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeader Address(reinterpret_cast<Address>(object))); | 1989 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object); |
| 1990 ASSERT(!heapPageForObject->orphaned()); | |
| 1831 ASSERT(Heap::contains(object) == heapPageForObject); | 1991 ASSERT(Heap::contains(object) == heapPageForObject); |
| 1832 ThreadState* state = heapPageForObject->threadState(); | 1992 ThreadState* state = heapPageForObject->threadState(); |
| 1833 state->pushWeakObjectPointerCallback(closure, callback); | 1993 state->pushWeakObjectPointerCallback(closure, callback); |
| 1834 } | 1994 } |
| 1835 | 1995 |
| 1836 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) | 1996 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) |
| 1837 { | 1997 { |
| 1838 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visit or); | 1998 return s_weakCallbackStack->popAndInvokeCallback<WeaknessProcessing>(&s_weak CallbackStack, visitor); |
| 1839 } | 1999 } |
| 1840 | 2000 |
| 2001 | |
| 1841 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) | 2002 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) |
| 1842 { | 2003 { |
| 2004 // Check that the ephemeron table being pushed onto the stack is not on an | |
| 2005 // orphaned page. | |
| 2006 ASSERT(!Heap::orphanedPagePool()->contains(table)); | |
| 2007 | |
| 2008 // XXX: Add slow case check for not pushing an orphaned page | |
|
Mads Ager (chromium)
2014/07/11 10:59:34
Remove comment?
zerny-chromium
2014/07/11 11:24:42
s/XXX/FIXME
wibling-chromium
2014/07/11 13:06:54
Done.
wibling-chromium
2014/07/11 13:06:54
It was already done as part of the above assert.
| |
| 1843 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac k); | 2009 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac k); |
| 1844 *slot = CallbackStack::Item(table, iterationCallback); | 2010 *slot = CallbackStack::Item(table, iterationCallback); |
| 1845 | 2011 |
| 1846 // We use the callback stack of weak cell pointers for the ephemeronIteratio nDone callbacks. | 2012 // We use the callback stack of weak cell pointers for the ephemeronIteratio nDone callbacks. |
| 1847 // These callbacks are called right after marking and before any thread comm ences execution | 2013 // These callbacks are called right after marking and before any thread comm ences execution |
| 1848 // so it suits our needs for telling the ephemerons that the iteration is do ne. | 2014 // so it suits our needs for telling the ephemerons that the iteration is do ne. |
| 1849 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac k); | 2015 pushWeakCellPointerCallback(static_cast<void**>(table), iterationDoneCallbac k); |
| 1850 } | 2016 } |
| 1851 | 2017 |
| 1852 #ifndef NDEBUG | 2018 #ifndef NDEBUG |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1887 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); | 2053 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); |
| 1888 #endif | 2054 #endif |
| 1889 | 2055 |
| 1890 // Disallow allocation during garbage collection (but not | 2056 // Disallow allocation during garbage collection (but not |
| 1891 // during the finalization that happens when the gcScope is | 2057 // during the finalization that happens when the gcScope is |
| 1892 // torn down). | 2058 // torn down). |
| 1893 NoAllocationScope<AnyThread> noAllocationScope; | 2059 NoAllocationScope<AnyThread> noAllocationScope; |
| 1894 | 2060 |
| 1895 prepareForGC(); | 2061 prepareForGC(); |
| 1896 | 2062 |
| 1897 ThreadState::visitRoots(s_markingVisitor); | 2063 traceRootsAndPerformGlobalWeakProcessing<GlobalMarking>(); |
| 2064 | |
| 2065 // After a global marking we know that any orphaned page that was not reache d | |
| 2066 // cannot be reached in a subsequent GC. This is due to a thread either havi ng | |
| 2067 // swept its heap or having done a "poor mans sweep" in prepareForGC which m arks | |
| 2068 // objects that are dead, but not swept in the previous GC as dead. In this GC's | |
| 2069 // marking we check that any object marked as dead is not traced. E.g. via a | |
| 2070 // conservatively found pointer or a programming error with an object contai ning | |
| 2071 // a dangling pointer. | |
| 2072 orphanedPagePool()->decommitOrphanedPages(); | |
| 2073 | |
| 2074 #if ENABLE(GC_TRACING) | |
| 2075 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); | |
| 2076 #endif | |
| 2077 | |
| 2078 if (blink::Platform::current()) { | |
| 2079 uint64_t objectSpaceSize; | |
| 2080 uint64_t allocatedSpaceSize; | |
| 2081 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize); | |
| 2082 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | |
| 2083 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | |
| 2084 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | |
| 2085 } | |
| 2086 } | |
| 2087 | |
| 2088 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | |
| 2089 { | |
| 2090 // We explicitly do not enter a safepoint while doing thread specific | |
| 2091 // garbage collection since we don't want to allow a global GC at the | |
| 2092 // same time as a thread local GC. | |
| 2093 | |
| 2094 { | |
| 2095 NoAllocationScope<AnyThread> noAllocationScope; | |
| 2096 | |
| 2097 state->enterGC(); | |
| 2098 state->prepareForGC(); | |
| 2099 | |
| 2100 traceRootsAndPerformGlobalWeakProcessing<ThreadLocalMarking>(); | |
| 2101 | |
| 2102 state->leaveGC(); | |
| 2103 } | |
| 2104 state->performPendingSweep(); | |
| 2105 } | |
| 2106 | |
| 2107 template<CallbackInvocationMode Mode> | |
| 2108 void Heap::traceRootsAndPerformGlobalWeakProcessing() | |
| 2109 { | |
| 2110 if (Mode == ThreadLocalMarking) | |
| 2111 ThreadState::current()->visitLocalRoots(s_markingVisitor); | |
| 2112 else | |
| 2113 ThreadState::visitRoots(s_markingVisitor); | |
| 1898 | 2114 |
| 1899 // Ephemeron fixed point loop. | 2115 // Ephemeron fixed point loop. |
| 1900 do { | 2116 do { |
| 1901 // Recursively mark all objects that are reachable from the roots. | 2117 // Recursively mark all objects that are reachable from the roots for |
| 1902 while (popAndInvokeTraceCallback(s_markingVisitor)) { } | 2118 // this thread. Also don't continue tracing if the trace hits an object |
| 2119 // on another thread's heap. | |
|
zerny-chromium
2014/07/11 11:24:42
The addition is dependent on Mode, so maybe add: I
wibling-chromium
2014/07/11 13:06:54
Done.
| |
| 2120 while (popAndInvokeTraceCallback<Mode>(s_markingVisitor)) { } | |
| 1903 | 2121 |
| 1904 // Mark any strong pointers that have now become reachable in ephemeron | 2122 // Mark any strong pointers that have now become reachable in ephemeron |
| 1905 // maps. | 2123 // maps. |
| 1906 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); | 2124 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); |
| 1907 | 2125 |
| 1908 // Rerun loop if ephemeron processing queued more objects for tracing. | 2126 // Rerun loop if ephemeron processing queued more objects for tracing. |
| 1909 } while (!s_markingStack->isEmpty()); | 2127 } while (!s_markingStack->isEmpty()); |
| 1910 | 2128 |
| 1911 // Call weak callbacks on objects that may now be pointing to dead | 2129 // Call weak callbacks on objects that may now be pointing to dead |
| 1912 // objects and call ephemeronIterationDone callbacks on weak tables | 2130 // objects and call ephemeronIterationDone callbacks on weak tables |
| 1913 // to do cleanup (specifically clear the queued bits for weak hash | 2131 // to do cleanup (specifically clear the queued bits for weak hash |
| 1914 // tables). | 2132 // tables). |
| 1915 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } | 2133 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } |
| 1916 | 2134 |
| 1917 CallbackStack::clear(&s_ephemeronStack); | 2135 CallbackStack::clear(&s_ephemeronStack); |
| 1918 | 2136 |
| 1919 // It is not permitted to trace pointers of live objects in the weak | 2137 // It is not permitted to trace pointers of live objects in the weak |
| 1920 // callback phase, so the marking stack should still be empty here. | 2138 // callback phase, so the marking stack should still be empty here. |
| 1921 ASSERT(s_markingStack->isEmpty()); | 2139 ASSERT(s_markingStack->isEmpty()); |
| 1922 | |
| 1923 #if ENABLE(GC_TRACING) | |
| 1924 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats(); | |
| 1925 #endif | |
| 1926 | |
| 1927 if (blink::Platform::current()) { | |
| 1928 uint64_t objectSpaceSize; | |
| 1929 uint64_t allocatedSpaceSize; | |
| 1930 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize); | |
| 1931 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | |
| 1932 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | |
| 1933 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50); | |
| 1934 } | |
| 1935 } | 2140 } |
| 1936 | 2141 |
| 1937 void Heap::collectAllGarbage() | 2142 void Heap::collectAllGarbage() |
| 1938 { | 2143 { |
| 1939 // FIXME: oilpan: we should perform a single GC and everything | 2144 // FIXME: oilpan: we should perform a single GC and everything |
| 1940 // should die. Unfortunately it is not the case for all objects | 2145 // should die. Unfortunately it is not the case for all objects |
| 1941 // because the hierarchy was not completely moved to the heap and | 2146 // because the hierarchy was not completely moved to the heap and |
| 1942 // some heap allocated objects own objects that contain persistents | 2147 // some heap allocated objects own objects that contain persistents |
| 1943 // pointing to other heap allocated objects. | 2148 // pointing to other heap allocated objects. |
| 1944 for (int i = 0; i < 5; i++) | 2149 for (int i = 0; i < 5; i++) |
| 1945 collectGarbage(ThreadState::NoHeapPointersOnStack); | 2150 collectGarbage(ThreadState::NoHeapPointersOnStack); |
| 1946 } | 2151 } |
| 1947 | 2152 |
| 1948 void Heap::setForcePreciseGCForTesting() | 2153 void Heap::setForcePreciseGCForTesting() |
| 1949 { | 2154 { |
| 1950 ThreadState::current()->setForcePreciseGCForTesting(true); | 2155 ThreadState::current()->setForcePreciseGCForTesting(true); |
| 1951 } | 2156 } |
| 1952 | 2157 |
| 2158 template<typename Header> | |
| 2159 void ThreadHeap<Header>::prepareHeapForTermination() | |
| 2160 { | |
| 2161 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | |
| 2162 page->setTerminating(); | |
| 2163 } | |
| 2164 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) { | |
| 2165 current->setTerminating(); | |
| 2166 } | |
| 2167 } | |
| 2168 | |
| 1953 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize) | 2169 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize) |
| 1954 { | 2170 { |
| 1955 *objectSpaceSize = 0; | 2171 *objectSpaceSize = 0; |
| 1956 *allocatedSpaceSize = 0; | 2172 *allocatedSpaceSize = 0; |
| 1957 ASSERT(ThreadState::isAnyThreadInGC()); | 2173 ASSERT(ThreadState::isAnyThreadInGC()); |
| 1958 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 2174 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 1959 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; | 2175 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; |
| 1960 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) { | 2176 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) { |
| 1961 *objectSpaceSize += (*it)->stats().totalObjectSpace(); | 2177 *objectSpaceSize += (*it)->stats().totalObjectSpace(); |
| 1962 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); | 2178 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); |
| (...skipping 30 matching lines...) Expand all Loading... | |
| 1993 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); | 2209 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); |
| 1994 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) | 2210 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) |
| 1995 (*it)->makeConsistentForGC(); | 2211 (*it)->makeConsistentForGC(); |
| 1996 } | 2212 } |
| 1997 | 2213 |
| 1998 // Force template instantiations for the types that we need. | 2214 // Force template instantiations for the types that we need. |
| 1999 template class HeapPage<FinalizedHeapObjectHeader>; | 2215 template class HeapPage<FinalizedHeapObjectHeader>; |
| 2000 template class HeapPage<HeapObjectHeader>; | 2216 template class HeapPage<HeapObjectHeader>; |
| 2001 template class ThreadHeap<FinalizedHeapObjectHeader>; | 2217 template class ThreadHeap<FinalizedHeapObjectHeader>; |
| 2002 template class ThreadHeap<HeapObjectHeader>; | 2218 template class ThreadHeap<HeapObjectHeader>; |
| 2219 template bool CallbackStack::popAndInvokeCallback<GlobalMarking>(CallbackStack** , Visitor*); | |
| 2220 template bool CallbackStack::popAndInvokeCallback<ThreadLocalMarking>(CallbackSt ack**, Visitor*); | |
| 2221 template bool CallbackStack::popAndInvokeCallback<WeaknessProcessing>(CallbackSt ack**, Visitor*); | |
| 2003 | 2222 |
| 2004 Visitor* Heap::s_markingVisitor; | 2223 Visitor* Heap::s_markingVisitor; |
| 2005 CallbackStack* Heap::s_markingStack; | 2224 CallbackStack* Heap::s_markingStack; |
| 2006 CallbackStack* Heap::s_weakCallbackStack; | 2225 CallbackStack* Heap::s_weakCallbackStack; |
| 2007 CallbackStack* Heap::s_ephemeronStack; | 2226 CallbackStack* Heap::s_ephemeronStack; |
| 2008 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | 2227 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; |
| 2009 bool Heap::s_shutdownCalled = false; | 2228 bool Heap::s_shutdownCalled = false; |
| 2010 bool Heap::s_lastGCWasConservative = false; | 2229 bool Heap::s_lastGCWasConservative = false; |
| 2230 FreePagePool* Heap::s_freePagePool; | |
| 2231 OrphanedPagePool* Heap::s_orphanedPagePool; | |
| 2011 } | 2232 } |
| OLD | NEW |