Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(229)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 371623002: [oilpan]: Make thread shutdown more robust. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: review feedback Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after
416 } 416 }
417 417
418 NO_SANITIZE_ADDRESS 418 NO_SANITIZE_ADDRESS
419 void HeapObjectHeader::unmark() 419 void HeapObjectHeader::unmark()
420 { 420 {
421 checkHeader(); 421 checkHeader();
422 m_size &= ~markBitMask; 422 m_size &= ~markBitMask;
423 } 423 }
424 424
425 NO_SANITIZE_ADDRESS 425 NO_SANITIZE_ADDRESS
426 bool HeapObjectHeader::hasDebugMark() const 426 bool HeapObjectHeader::hasDeadMark() const
427 { 427 {
428 checkHeader(); 428 checkHeader();
429 return m_size & debugBitMask; 429 return m_size & deadBitMask;
430 } 430 }
431 431
432 NO_SANITIZE_ADDRESS 432 NO_SANITIZE_ADDRESS
433 void HeapObjectHeader::clearDebugMark() 433 void HeapObjectHeader::clearDeadMark()
434 { 434 {
435 checkHeader(); 435 checkHeader();
436 m_size &= ~debugBitMask; 436 m_size &= ~deadBitMask;
437 } 437 }
438 438
439 NO_SANITIZE_ADDRESS 439 NO_SANITIZE_ADDRESS
440 void HeapObjectHeader::setDebugMark() 440 void HeapObjectHeader::setDeadMark()
441 { 441 {
442 ASSERT(!isMarked());
442 checkHeader(); 443 checkHeader();
443 m_size |= debugBitMask; 444 m_size |= deadBitMask;
444 } 445 }
445 446
446 #ifndef NDEBUG 447 #ifndef NDEBUG
447 NO_SANITIZE_ADDRESS 448 NO_SANITIZE_ADDRESS
448 void HeapObjectHeader::zapMagic() 449 void HeapObjectHeader::zapMagic()
449 { 450 {
450 m_magic = zappedMagic; 451 m_magic = zappedMagic;
451 } 452 }
452 #endif 453 #endif
453 454
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
493 return heapObjectHeader()->unmark(); 494 return heapObjectHeader()->unmark();
494 } 495 }
495 496
496 template<typename Header> 497 template<typename Header>
497 bool LargeHeapObject<Header>::isMarked() 498 bool LargeHeapObject<Header>::isMarked()
498 { 499 {
499 return heapObjectHeader()->isMarked(); 500 return heapObjectHeader()->isMarked();
500 } 501 }
501 502
502 template<typename Header> 503 template<typename Header>
504 void LargeHeapObject<Header>::setDeadMark()
505 {
506 heapObjectHeader()->setDeadMark();
507 }
508
509 template<typename Header>
503 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) 510 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess)
504 { 511 {
505 ASSERT(contains(address)); 512 ASSERT(contains(address));
506 if (!objectContains(address)) 513 if (!objectContains(address) || heapObjectHeader()->hasDeadMark())
507 return; 514 return;
508 #if ENABLE(GC_TRACING) 515 #if ENABLE(GC_TRACING)
509 visitor->setHostInfo(&address, "stack"); 516 visitor->setHostInfo(&address, "stack");
510 #endif 517 #endif
511 mark(visitor); 518 mark(visitor);
512 } 519 }
513 520
514 template<> 521 template<>
515 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) 522 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
516 { 523 {
(...skipping 28 matching lines...) Expand all
545 552
546 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa yload) 553 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa yload)
547 { 554 {
548 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); 555 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
549 FinalizedHeapObjectHeader* header = 556 FinalizedHeapObjectHeader* header =
550 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ; 557 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ;
551 return header; 558 return header;
552 } 559 }
553 560
554 template<typename Header> 561 template<typename Header>
555 ThreadHeap<Header>::ThreadHeap(ThreadState* state) 562 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
556 : m_currentAllocationPoint(0) 563 : m_currentAllocationPoint(0)
557 , m_remainingAllocationSize(0) 564 , m_remainingAllocationSize(0)
558 , m_firstPage(0) 565 , m_firstPage(0)
559 , m_firstLargeHeapObject(0) 566 , m_firstLargeHeapObject(0)
560 , m_biggestFreeListIndex(0) 567 , m_biggestFreeListIndex(0)
561 , m_threadState(state) 568 , m_threadState(state)
562 , m_pagePool(0) 569 , m_index(index)
563 { 570 {
564 clearFreeLists(); 571 clearFreeLists();
565 } 572 }
566 573
567 template<typename Header> 574 template<typename Header>
568 ThreadHeap<Header>::~ThreadHeap() 575 ThreadHeap<Header>::~ThreadHeap()
569 { 576 {
570 clearFreeLists(); 577 clearFreeLists();
571 if (!ThreadState::current()->isMainThread()) 578 flushHeapContainsCache();
572 assertEmpty(); 579
573 deletePages(); 580 // Add the ThreadHeap's pages to the orphanedPagePool.
581 Vector<BaseHeapPage*> pages;
582 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
583 pages.append(page);
Mads Ager (chromium) 2014/07/09 09:31:40 I think we might as well just add the pages direct
wibling-chromium 2014/07/09 10:32:30 Done. I previously had a lock on the orphanedPageP
584 m_firstPage = 0;
585
586 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next)
587 pages.append(largeObject);
588 m_firstLargeHeapObject = 0;
589 Heap::orphanedPagePool()->addOrphanedPages(m_index, pages);
574 } 590 }
575 591
576 template<typename Header> 592 template<typename Header>
577 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) 593 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
578 { 594 {
579 size_t allocationSize = allocationSizeFromSize(size); 595 size_t allocationSize = allocationSizeFromSize(size);
580 if (threadState()->shouldGC()) { 596 if (threadState()->shouldGC()) {
581 if (threadState()->shouldForceConservativeGC()) 597 if (threadState()->shouldForceConservativeGC())
582 Heap::collectGarbage(ThreadState::HeapPointersOnStack); 598 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
583 else 599 else
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
733 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 749 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
734 { 750 {
735 flushHeapContainsCache(); 751 flushHeapContainsCache();
736 object->unlink(previousNext); 752 object->unlink(previousNext);
737 object->finalize(); 753 object->finalize();
738 754
739 // Unpoison the object header and allocationGranularity bytes after the 755 // Unpoison the object header and allocationGranularity bytes after the
740 // object before freeing. 756 // object before freeing.
741 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); 757 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
742 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 758 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
743 delete object->storage(); 759
760 if (object->shuttingDown()) {
761 ASSERT(ThreadState::current()->isCleaningUp());
762 // The thread is shutting down so this object is being removed as part
763 // of a thread local GC. In that case the object could be traced in the
764 // next global GC either due to a dead object being traced via a
765 // conservative pointer or due to a programming error where an object
766 // in another thread heap keeps a dangling pointer to this object.
767 // To guard against this we put the large object memory in the
768 // orphanedPagePool to ensure it is still reachable. After the next glob al
769 // GC it can be released assuming no rogue/dangling pointers refer to
770 // it.
771 // NOTE: large objects are not moved to the memory pool as it is unlikel y
Mads Ager (chromium) 2014/07/09 09:31:40 memory pool -> free page pool.
wibling-chromium 2014/07/09 10:32:30 Done.
772 // they can be reused due to their individual sizes.
773 Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
774 } else {
775 PageMemory* memory = object->storage();
776 object->~LargeHeapObject<Header>();
777 delete memory;
778 }
779 }
780
781 template<typename DataType>
782 PagePool<DataType>::PagePool()
783 {
784 for (int i = 0; i < NumberOfHeaps; ++i) {
785 m_pool[i] = 0;
786 }
787 }
788
789 FreePagePool::~FreePagePool()
790 {
791 for (int index = 0; index < NumberOfHeaps; ++index) {
792 while (PoolEntry* entry = m_pool[index]) {
793 m_pool[index] = entry->next;
794 PageMemory* memory = entry->data;
795 ASSERT(memory);
796 delete memory;
797 delete entry;
798 }
799 }
800 }
801
802 void FreePagePool::addFreePage(int index, PageMemory* memory)
803 {
804 // When adding a page to the pool we decommit it to ensure it is unused
805 // while in the pool. This also allows the physical memory, backing the
806 // page, to be given back to the OS.
807 memory->decommit();
808 MutexLocker locker(m_mutex[index]);
haraken 2014/07/09 08:01:59 Just to confirm: Doesn't this need to be SafePoint
wibling-chromium 2014/07/09 10:32:30 No, we are not at a safepoint here since this can
809 PoolEntry* entry = new PoolEntry(memory, m_pool[index]);
810 m_pool[index] = entry;
811 }
812
813 PageMemory* FreePagePool::takeFreePage(int index)
814 {
815 MutexLocker locker(m_mutex[index]);
haraken 2014/07/09 08:01:59 Ditto.
816 while (PoolEntry* entry = m_pool[index]) {
817 m_pool[index] = entry->next;
818 PageMemory* memory = entry->data;
819 ASSERT(memory);
820 delete entry;
821 if (memory->commit())
822 return memory;
823
824 // We got some memory, but failed to commit it, try again.
825 delete memory;
826 }
827 return 0;
828 }
829
830 OrphanedPagePool::~OrphanedPagePool()
831 {
832 for (int index = 0; index < NumberOfHeaps; ++index) {
833 while (PoolEntry* entry = m_pool[index]) {
834 m_pool[index] = entry->next;
835 BaseHeapPage* page = entry->data;
836 delete entry;
837 PageMemory* memory = page->storage();
838 ASSERT(memory);
839 page->~BaseHeapPage();
840 delete memory;
841 }
842 }
843 }
844
845 void OrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page)
846 {
847 page->markOrphaned();
848 PoolEntry* entry = new PoolEntry(page, m_pool[index]);
849 m_pool[index] = entry;
850 }
851
852 void OrphanedPagePool::addOrphanedPages(int index, Vector<BaseHeapPage*>& pages)
853 {
854 for (Vector<BaseHeapPage*>::const_iterator it = pages.begin(); it != pages.e nd(); ++it) {
855 addOrphanedPage(index, *it);
856 }
857 }
858
859 void OrphanedPagePool::decommitOrphanedPages()
860 {
861 #ifndef NDEBUG
862 // No locking needed as all threads are at safepoints at this point in time.
863 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
864 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
865 ASSERT((*it)->isAtSafePoint());
866 #endif
867
868 for (int index = 0; index < NumberOfHeaps; ++index) {
869 PoolEntry* entry = m_pool[index];
870 PoolEntry** prevNext = &m_pool[index];
871 while (entry) {
872 BaseHeapPage* page = entry->data;
873 if (page->traced()) {
874 // If the page was traced in the last GC it is not decommited.
875 // We only decommit a page, ie. put it in the memory pool,
876 // when the page has no objects pointing to it.
877 // We mark the page as orphaned. This clears the traced flag
878 // and any object trace bits that were set during tracing.
879 page->markOrphaned();
haraken 2014/07/09 08:01:59 Do we need to call markOrphaned()? I guess the pag
wibling-chromium 2014/07/09 10:32:31 Yes, we need to call it to clear the trace bits, b
880 prevNext = &entry->next;
881 entry = entry->next;
882 continue;
883 }
884
885 // Page was not traced. Check if we should reuse the memory or just
886 // free it. Large object memory is not reused, but freed, normal
887 // blink heap pages are reused.
888 // NOTE: We call the destructor before freeing or adding to the
889 // free page pool.
890 PageMemory* memory = page->storage();
891 if (page->isLargeObject()) {
892 page->~BaseHeapPage();
893 delete memory;
894 } else {
895 page->~BaseHeapPage();
896 Heap::freePagePool()->addFreePage(index, memory);
897 }
898
899 PoolEntry* deadEntry = entry;
900 entry = entry->next;
901 *prevNext = entry;
902 delete deadEntry;
903 }
904 }
905 }
906
907 bool OrphanedPagePool::contains(void* object)
haraken 2014/07/09 08:01:59 Shall we add #ifndef NDEBUG ?
wibling-chromium 2014/07/09 10:32:30 Done.
908 {
909 for (int index = 0; index < NumberOfHeaps; ++index) {
910 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
911 BaseHeapPage* page = entry->data;
912 if (page->contains(reinterpret_cast<Address>(object)))
913 return true;
914 }
915 }
916 return false;
744 } 917 }
745 918
746 template<> 919 template<>
747 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) 920 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
748 { 921 {
749 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on 922 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
750 // the heap should be unused (ie. 0). 923 // the heap should be unused (ie. 0).
751 allocatePage(0); 924 allocatePage(0);
752 } 925 }
753 926
754 template<> 927 template<>
755 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) 928 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
756 { 929 {
757 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap 930 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap
758 // since it is the same for all objects 931 // since it is the same for all objects
759 ASSERT(gcInfo); 932 ASSERT(gcInfo);
760 allocatePage(gcInfo); 933 allocatePage(gcInfo);
761 } 934 }
762 935
763 template<typename Header> 936 template <typename Header>
764 void ThreadHeap<Header>::clearPagePool() 937 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
765 {
766 while (takePageFromPool()) { }
767 }
768
769 template<typename Header>
770 PageMemory* ThreadHeap<Header>::takePageFromPool()
771 {
772 Heap::flushHeapDoesNotContainCache();
773 while (PagePoolEntry* entry = m_pagePool) {
774 m_pagePool = entry->next();
775 PageMemory* storage = entry->storage();
776 delete entry;
777
778 if (storage->commit())
779 return storage;
780
781 // Failed to commit pooled storage. Release it.
782 delete storage;
783 }
784
785 return 0;
786 }
787
788 template<typename Header>
789 void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage)
790 { 938 {
791 flushHeapContainsCache(); 939 flushHeapContainsCache();
792 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); 940 if (page->shuttingDown()) {
793 m_pagePool = entry; 941 ASSERT(ThreadState::current()->isCleaningUp());
794 } 942 // The thread is shutting down so this page is being removed as part
795 943 // of a thread local GC. In that case the page could be accessed in the
796 template <typename Header> 944 // next global GC either due to a dead object being traced via a
797 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) 945 // conservative pointer or due to a programming error where an object
798 { 946 // in another thread heap keeps a dangling pointer to this object.
799 PageMemory* storage = page->storage(); 947 // To guard against this we put the page in the orphanedPagePool to
800 storage->decommit(); 948 // ensure it is still reachable. After the next global GC it can be
801 addPageMemoryToPool(storage); 949 // decommitted and moved to the page pool assuming no rogue/dangling
950 // pointers refer to it.
951 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
952 } else {
953 PageMemory* memory = page->storage();
954 page->~HeapPage<Header>();
955 Heap::freePagePool()->addFreePage(m_index, memory);
956 }
802 } 957 }
803 958
804 template<typename Header> 959 template<typename Header>
805 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) 960 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
806 { 961 {
807 Heap::flushHeapDoesNotContainCache(); 962 Heap::flushHeapDoesNotContainCache();
808 PageMemory* pageMemory = takePageFromPool(); 963 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index);
809 if (!pageMemory) { 964 while (!pageMemory) {
haraken 2014/07/09 08:01:59 Let's add a comment why this needs to be 'while'.
wibling-chromium 2014/07/09 10:32:31 Done.
810 // Allocate a memory region for blinkPagesPerRegion pages that 965 // Allocate a memory region for blinkPagesPerRegion pages that
811 // will each have the following layout. 966 // will each have the following layout.
812 // 967 //
813 // [ guard os page | ... payload ... | guard os page ] 968 // [ guard os page | ... payload ... | guard os page ]
814 // ^---{ aligned to blink page size } 969 // ^---{ aligned to blink page size }
815 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); 970 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion);
816 // Setup the PageMemory object for each of the pages in the 971 // Setup the PageMemory object for each of the pages in the
817 // region. 972 // region.
818 size_t offset = 0; 973 size_t offset = 0;
819 for (size_t i = 0; i < blinkPagesPerRegion; i++) { 974 for (size_t i = 0; i < blinkPagesPerRegion; i++) {
820 addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offs et, blinkPagePayloadSize())); 975 Heap::freePagePool()->addFreePage(m_index, PageMemory::setupPageMemo ryInRegion(region, offset, blinkPagePayloadSize()));
821 offset += blinkPageSize; 976 offset += blinkPageSize;
822 } 977 }
823 pageMemory = takePageFromPool(); 978 pageMemory = Heap::freePagePool()->takeFreePage(m_index);
824 RELEASE_ASSERT(pageMemory);
825 } 979 }
826 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); 980 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo);
827 // FIXME: Oilpan: Linking new pages into the front of the list is 981 // FIXME: Oilpan: Linking new pages into the front of the list is
828 // crucial when performing allocations during finalization because 982 // crucial when performing allocations during finalization because
829 // it ensures that those pages are not swept in the current GC 983 // it ensures that those pages are not swept in the current GC
830 // round. We should create a separate page list for that to 984 // round. We should create a separate page list for that to
831 // separate out the pages allocated during finalization clearly 985 // separate out the pages allocated during finalization clearly
832 // from the pages currently being swept. 986 // from the pages currently being swept.
833 page->link(&m_firstPage); 987 page->link(&m_firstPage);
834 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); 988 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
(...skipping 23 matching lines...) Expand all
858 ASSERT(isConsistentForGC()); 1012 ASSERT(isConsistentForGC());
859 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING 1013 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
860 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore 1014 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore
861 // calling their finalizer methods. This can catch the cases where one objec ts 1015 // calling their finalizer methods. This can catch the cases where one objec ts
862 // finalizer tries to modify another object as part of finalization. 1016 // finalizer tries to modify another object as part of finalization.
863 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1017 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
864 page->poisonUnmarkedObjects(); 1018 page->poisonUnmarkedObjects();
865 #endif 1019 #endif
866 HeapPage<Header>* page = m_firstPage; 1020 HeapPage<Header>* page = m_firstPage;
867 HeapPage<Header>** previous = &m_firstPage; 1021 HeapPage<Header>** previous = &m_firstPage;
868 bool pagesRemoved = false;
869 while (page) { 1022 while (page) {
870 if (page->isEmpty()) { 1023 if (page->isEmpty()) {
871 flushHeapContainsCache();
872 HeapPage<Header>* unused = page; 1024 HeapPage<Header>* unused = page;
873 page = page->next(); 1025 page = page->next();
874 HeapPage<Header>::unlink(unused, previous); 1026 HeapPage<Header>::unlink(unused, previous);
875 pagesRemoved = true;
876 } else { 1027 } else {
877 page->sweep(); 1028 page->sweep();
878 previous = &page->m_next; 1029 previous = &page->m_next;
879 page = page->next(); 1030 page = page->next();
880 } 1031 }
881 } 1032 }
882 if (pagesRemoved)
883 flushHeapContainsCache();
884 1033
885 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; 1034 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
886 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { 1035 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
887 if (current->isMarked()) { 1036 if (current->isMarked()) {
888 stats().increaseAllocatedSpace(current->size()); 1037 stats().increaseAllocatedSpace(current->size());
889 stats().increaseObjectSpace(current->payloadSize()); 1038 stats().increaseObjectSpace(current->payloadSize());
890 current->unmark(); 1039 current->unmark();
891 previousNext = &current->m_next; 1040 previousNext = &current->m_next;
892 current = current->next(); 1041 current = current->next();
893 } else { 1042 } else {
894 LargeHeapObject<Header>* next = current->next(); 1043 LargeHeapObject<Header>* next = current->next();
895 freeLargeObject(current, previousNext); 1044 freeLargeObject(current, previousNext);
896 current = next; 1045 current = next;
897 } 1046 }
898 } 1047 }
899 } 1048 }
900 1049
901 template<typename Header> 1050 template<typename Header>
902 void ThreadHeap<Header>::assertEmpty()
903 {
904 // No allocations are permitted. The thread is exiting.
905 NoAllocationScope<AnyThread> noAllocation;
906 makeConsistentForGC();
907 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
908 Address end = page->end();
909 Address headerAddress;
910 for (headerAddress = page->payload(); headerAddress < end; ) {
911 BasicObjectHeader* basicHeader = reinterpret_cast<BasicObjectHeader* >(headerAddress);
912 ASSERT(basicHeader->size() < blinkPagePayloadSize());
913 // A live object is potentially a dangling pointer from
914 // some root. Treat that as a bug. Unfortunately, it is
915 // hard to reliably check in the presence of conservative
916 // stack scanning. Something could be conservatively kept
917 // alive because a non-pointer on another thread's stack
918 // is treated as a pointer into the heap.
919 //
920 // FIXME: This assert can currently trigger in cases where
921 // worker shutdown does not get enough precise GCs to get
922 // all objects removed from the worker heap. There are two
923 // issues: 1) conservative GCs keeping objects alive, and
924 // 2) long chains of RefPtrs/Persistents that require more
925 // GCs to get everything cleaned up. Maybe we can keep
926 // threads alive until their heaps become empty instead of
927 // forcing the threads to die immediately?
928 ASSERT(Heap::lastGCWasConservative() || basicHeader->isFree());
929 headerAddress += basicHeader->size();
930 }
931 ASSERT(headerAddress == end);
932 addToFreeList(page->payload(), end - page->payload());
933 }
934
935 ASSERT(Heap::lastGCWasConservative() || !m_firstLargeHeapObject);
936 }
937
938 template<typename Header>
939 bool ThreadHeap<Header>::isConsistentForGC() 1051 bool ThreadHeap<Header>::isConsistentForGC()
940 { 1052 {
941 for (size_t i = 0; i < blinkPageSizeLog2; i++) { 1053 for (size_t i = 0; i < blinkPageSizeLog2; i++) {
942 if (m_freeLists[i]) 1054 if (m_freeLists[i])
943 return false; 1055 return false;
944 } 1056 }
945 return !ownsNonEmptyAllocationArea(); 1057 return !ownsNonEmptyAllocationArea();
946 } 1058 }
947 1059
948 template<typename Header> 1060 template<typename Header>
949 void ThreadHeap<Header>::makeConsistentForGC() 1061 void ThreadHeap<Header>::makeConsistentForGC()
950 { 1062 {
951 if (ownsNonEmptyAllocationArea()) 1063 if (ownsNonEmptyAllocationArea())
952 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); 1064 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
953 setAllocationPoint(0, 0); 1065 setAllocationPoint(0, 0);
954 clearFreeLists(); 1066 clearFreeLists();
955 } 1067 }
956 1068
957 template<typename Header> 1069 template<typename Header>
958 void ThreadHeap<Header>::clearMarks() 1070 void ThreadHeap<Header>::clearLiveAndMarkDead()
959 { 1071 {
960 ASSERT(isConsistentForGC()); 1072 ASSERT(isConsistentForGC());
961 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1073 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
962 page->clearMarks(); 1074 page->clearLiveAndMarkDead();
963 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) 1075 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
964 current->unmark(); 1076 if (current->isMarked())
1077 current->unmark();
1078 else
1079 current->setDeadMark();
1080 }
965 } 1081 }
966 1082
967 template<typename Header> 1083 template<typename Header>
968 void ThreadHeap<Header>::deletePages()
969 {
970 flushHeapContainsCache();
971 // Add all pages in the pool to the heap's list of pages before deleting
972 clearPagePool();
973
974 for (HeapPage<Header>* page = m_firstPage; page; ) {
975 HeapPage<Header>* dead = page;
976 page = page->next();
977 PageMemory* storage = dead->storage();
978 dead->~HeapPage();
979 delete storage;
980 }
981 m_firstPage = 0;
982
983 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
984 LargeHeapObject<Header>* dead = current;
985 current = current->next();
986 PageMemory* storage = dead->storage();
987 dead->~LargeHeapObject();
988 delete storage;
989 }
990 m_firstLargeHeapObject = 0;
991 }
992
993 template<typename Header>
994 void ThreadHeap<Header>::clearFreeLists() 1084 void ThreadHeap<Header>::clearFreeLists()
995 { 1085 {
996 for (size_t i = 0; i < blinkPageSizeLog2; i++) 1086 for (size_t i = 0; i < blinkPageSizeLog2; i++)
997 m_freeLists[i] = 0; 1087 m_freeLists[i] = 0;
998 } 1088 }
999 1089
1000 int BaseHeap::bucketIndexForSize(size_t size) 1090 int BaseHeap::bucketIndexForSize(size_t size)
1001 { 1091 {
1002 ASSERT(size > 0); 1092 ASSERT(size > 0);
1003 int index = -1; 1093 int index = -1;
(...skipping 20 matching lines...) Expand all
1024 void HeapPage<Header>::link(HeapPage** prevNext) 1114 void HeapPage<Header>::link(HeapPage** prevNext)
1025 { 1115 {
1026 m_next = *prevNext; 1116 m_next = *prevNext;
1027 *prevNext = this; 1117 *prevNext = this;
1028 } 1118 }
1029 1119
1030 template<typename Header> 1120 template<typename Header>
1031 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) 1121 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
1032 { 1122 {
1033 *prevNext = unused->m_next; 1123 *prevNext = unused->m_next;
1034 unused->heap()->addPageToPool(unused); 1124 unused->heap()->removePageFromHeap(unused);
1035 } 1125 }
1036 1126
1037 template<typename Header> 1127 template<typename Header>
1038 void HeapPage<Header>::getStats(HeapStats& stats) 1128 void HeapPage<Header>::getStats(HeapStats& stats)
1039 { 1129 {
1040 stats.increaseAllocatedSpace(blinkPageSize); 1130 stats.increaseAllocatedSpace(blinkPageSize);
1041 Address headerAddress = payload(); 1131 Address headerAddress = payload();
1042 ASSERT(headerAddress != end()); 1132 ASSERT(headerAddress != end());
1043 do { 1133 do {
1044 Header* header = reinterpret_cast<Header*>(headerAddress); 1134 Header* header = reinterpret_cast<Header*>(headerAddress);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1090 header->unmark(); 1180 header->unmark();
1091 headerAddress += header->size(); 1181 headerAddress += header->size();
1092 heap()->stats().increaseObjectSpace(header->payloadSize()); 1182 heap()->stats().increaseObjectSpace(header->payloadSize());
1093 startOfGap = headerAddress; 1183 startOfGap = headerAddress;
1094 } 1184 }
1095 if (startOfGap != end()) 1185 if (startOfGap != end())
1096 heap()->addToFreeList(startOfGap, end() - startOfGap); 1186 heap()->addToFreeList(startOfGap, end() - startOfGap);
1097 } 1187 }
1098 1188
1099 template<typename Header> 1189 template<typename Header>
1100 void HeapPage<Header>::clearMarks() 1190 void HeapPage<Header>::clearLiveAndMarkDead()
1101 { 1191 {
1102 for (Address headerAddress = payload(); headerAddress < end();) { 1192 for (Address headerAddress = payload(); headerAddress < end();) {
1103 Header* header = reinterpret_cast<Header*>(headerAddress); 1193 Header* header = reinterpret_cast<Header*>(headerAddress);
1104 ASSERT(header->size() < blinkPagePayloadSize()); 1194 ASSERT(header->size() < blinkPagePayloadSize());
1105 if (!header->isFree()) 1195 // Check if a free list entry first since we cannot call
1196 // isMarked on a free list entry.
1197 if (header->isFree()) {
1198 headerAddress += header->size();
1199 continue;
1200 }
1201 if (header->isMarked())
1106 header->unmark(); 1202 header->unmark();
1203 else
1204 header->setDeadMark();
1107 headerAddress += header->size(); 1205 headerAddress += header->size();
1108 } 1206 }
1109 } 1207 }
1110 1208
1111 template<typename Header> 1209 template<typename Header>
1112 void HeapPage<Header>::populateObjectStartBitMap() 1210 void HeapPage<Header>::populateObjectStartBitMap()
1113 { 1211 {
1114 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); 1212 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1115 Address start = payload(); 1213 Address start = payload();
1116 for (Address headerAddress = start; headerAddress < end();) { 1214 for (Address headerAddress = start; headerAddress < end();) {
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1176 if (header->isFree()) 1274 if (header->isFree())
1177 return 0; 1275 return 0;
1178 return header; 1276 return header;
1179 } 1277 }
1180 1278
1181 template<typename Header> 1279 template<typename Header>
1182 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) 1280 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1183 { 1281 {
1184 ASSERT(contains(address)); 1282 ASSERT(contains(address));
1185 Header* header = findHeaderFromAddress(address); 1283 Header* header = findHeaderFromAddress(address);
1186 if (!header) 1284 if (!header || header->hasDeadMark())
1187 return; 1285 return;
1188 1286
1189 #if ENABLE(GC_TRACING) 1287 #if ENABLE(GC_TRACING)
1190 visitor->setHostInfo(&address, "stack"); 1288 visitor->setHostInfo(&address, "stack");
1191 #endif 1289 #endif
1192 if (hasVTable(header) && !vTableInitialized(header->payload())) 1290 if (hasVTable(header) && !vTableInitialized(header->payload()))
1193 visitor->markConservatively(header); 1291 visitor->markConservatively(header);
1194 else 1292 else
1195 visitor->mark(header, traceCallback(header)); 1293 visitor->mark(header, traceCallback(header));
1196 } 1294 }
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1364 { 1462 {
1365 for (size_t i = 0; i < bufferSize; i++) 1463 for (size_t i = 0; i < bufferSize; i++)
1366 m_buffer[i] = Item(0, 0); 1464 m_buffer[i] = Item(0, 0);
1367 } 1465 }
1368 1466
1369 bool CallbackStack::isEmpty() 1467 bool CallbackStack::isEmpty()
1370 { 1468 {
1371 return m_current == &(m_buffer[0]) && !m_next; 1469 return m_current == &(m_buffer[0]) && !m_next;
1372 } 1470 }
1373 1471
1472 template<GCMode Mode>
1374 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor ) 1473 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor )
1375 { 1474 {
1376 if (m_current == &(m_buffer[0])) { 1475 if (m_current == &(m_buffer[0])) {
1377 if (!m_next) { 1476 if (!m_next) {
1378 #ifndef NDEBUG 1477 #ifndef NDEBUG
1379 clearUnused(); 1478 clearUnused();
1380 #endif 1479 #endif
1381 return false; 1480 return false;
1382 } 1481 }
1383 CallbackStack* nextStack = m_next; 1482 CallbackStack* nextStack = m_next;
1384 *first = nextStack; 1483 *first = nextStack;
1385 delete this; 1484 delete this;
1386 return nextStack->popAndInvokeCallback(first, visitor); 1485 return nextStack->popAndInvokeCallback<Mode>(first, visitor);
1387 } 1486 }
1388 Item* item = --m_current; 1487 Item* item = --m_current;
1389 1488
1489 // If the object being traced is located on a page which is dead don't
1490 // trace it. This can happen when a conservative GC kept a dead object
1491 // alive which pointed to a (now gone) object on the cleaned up page.
1492 // Also if doing a thread local GC don't trace objects that are located
1493 // on other thread's heaps, ie. pages where the shuttingDown flag is not
1494 // set.
1495 BaseHeapPage* heapPage = pageHeaderFromObject(item->object());
1496 if (heapPage->orphaned() || (Mode == ThreadLocalGC && !heapPage->shuttingDow n())) {
1497 // When doing a GC we should only get a trace callback to an orphaned
1498 // page if the GC is conservative. If it is not conservative there is
1499 // a bug in the code where we have a dangling pointer to a page
1500 // on the dead thread.
1501 RELEASE_ASSERT(!heapPage->orphaned() || Heap::lastGCWasConservative());
1502
1503 if (Mode == GlobalGC) {
1504 // If tracing this from a global GC set the traced bit.
1505 heapPage->setTraced();
haraken 2014/07/09 08:01:59 We'll need the tracing flag only on orphaned pages
wibling-chromium 2014/07/09 10:32:31 We also need to ignore the traceCallback when doin
1506 }
1507 return true;
1508 }
1509
1390 VisitorCallback callback = item->callback(); 1510 VisitorCallback callback = item->callback();
1391 #if ENABLE(GC_TRACING) 1511 #if ENABLE(GC_TRACING)
1392 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback 1512 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback
1393 visitor->setHostInfo(item->object(), classOf(item->object())); 1513 visitor->setHostInfo(item->object(), classOf(item->object()));
1394 #endif 1514 #endif
1395 callback(visitor, item->object()); 1515 callback(visitor, item->object());
1396 1516
1397 return true; 1517 return true;
1398 } 1518 }
1399 1519
1520 template<GCMode Mode>
1400 void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor) 1521 void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor)
1401 { 1522 {
1402 CallbackStack* stack = 0; 1523 CallbackStack* stack = 0;
1403 // The first block is the only one where new ephemerons are added, so we 1524 // The first block is the only one where new ephemerons are added, so we
1404 // call the callbacks on that last, to catch any new ephemerons discovered 1525 // call the callbacks on that last, to catch any new ephemerons discovered
1405 // in the callbacks. 1526 // in the callbacks.
1406 // However, if enough ephemerons were added, we may have a new block that 1527 // However, if enough ephemerons were added, we may have a new block that
1407 // has been prepended to the chain. This will be very rare, but we can 1528 // has been prepended to the chain. This will be very rare, but we can
1408 // handle the situation by starting again and calling all the callbacks 1529 // handle the situation by starting again and calling all the callbacks
1409 // a second time. 1530 // a second time.
1410 while (stack != *first) { 1531 while (stack != *first) {
1411 stack = *first; 1532 stack = *first;
1412 stack->invokeOldestCallbacks(visitor); 1533 stack->invokeOldestCallbacks<Mode>(visitor);
1413 } 1534 }
1414 } 1535 }
1415 1536
1537 template<GCMode Mode>
1416 void CallbackStack::invokeOldestCallbacks(Visitor* visitor) 1538 void CallbackStack::invokeOldestCallbacks(Visitor* visitor)
1417 { 1539 {
1418 // Recurse first (bufferSize at a time) so we get to the newly added entries 1540 // Recurse first (bufferSize at a time) so we get to the newly added entries
1419 // last. 1541 // last.
1420 if (m_next) 1542 if (m_next)
1421 m_next->invokeOldestCallbacks(visitor); 1543 m_next->invokeOldestCallbacks<Mode>(visitor);
1422 1544
1423 // This loop can tolerate entries being added by the callbacks after 1545 // This loop can tolerate entries being added by the callbacks after
1424 // iteration starts. 1546 // iteration starts.
1425 for (unsigned i = 0; m_buffer + i < m_current; i++) { 1547 for (unsigned i = 0; m_buffer + i < m_current; i++) {
1426 Item& item = m_buffer[i]; 1548 Item& item = m_buffer[i];
1549
1550 BaseHeapPage* heapPage = pageHeaderFromObject(item.object());
1551 if (heapPage->orphaned() || (Mode == ThreadLocalGC && !heapPage->shuttin gDown())) {
1552 // We should only get a trace callback to an orphaned page if doing
1553 // a conservative GC. If not conservative there is a bug in the code
1554 // where we have a dangling pointer to a page on the dead thread.
1555 RELEASE_ASSERT(Heap::lastGCWasConservative());
1556
1557 // If tracing this from a global GC set the traced bit.
1558 if (Mode == GlobalGC)
1559 heapPage->setTraced();
haraken 2014/07/09 08:01:59 Ditto. We might want to restructure the branches a
1560 continue;
1561 }
1427 item.callback()(visitor, item.object()); 1562 item.callback()(visitor, item.object());
1428 } 1563 }
1429 } 1564 }
1430 1565
1431 #ifndef NDEBUG 1566 #ifndef NDEBUG
1432 bool CallbackStack::hasCallbackForObject(const void* object) 1567 bool CallbackStack::hasCallbackForObject(const void* object)
1433 { 1568 {
1434 for (unsigned i = 0; m_buffer + i < m_current; i++) { 1569 for (unsigned i = 0; m_buffer + i < m_current; i++) {
1435 Item* item = &m_buffer[i]; 1570 Item* item = &m_buffer[i];
1436 if (item->object() == object) { 1571 if (item->object() == object) {
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
1669 }; 1804 };
1670 1805
1671 void Heap::init() 1806 void Heap::init()
1672 { 1807 {
1673 ThreadState::init(); 1808 ThreadState::init();
1674 CallbackStack::init(&s_markingStack); 1809 CallbackStack::init(&s_markingStack);
1675 CallbackStack::init(&s_weakCallbackStack); 1810 CallbackStack::init(&s_weakCallbackStack);
1676 CallbackStack::init(&s_ephemeronStack); 1811 CallbackStack::init(&s_ephemeronStack);
1677 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); 1812 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
1678 s_markingVisitor = new MarkingVisitor(); 1813 s_markingVisitor = new MarkingVisitor();
1814 s_freePagePool = new FreePagePool();
1815 s_orphanedPagePool = new OrphanedPagePool();
1679 } 1816 }
1680 1817
1681 void Heap::shutdown() 1818 void Heap::shutdown()
1682 { 1819 {
1683 s_shutdownCalled = true; 1820 s_shutdownCalled = true;
1684 ThreadState::shutdownHeapIfNecessary(); 1821 ThreadState::shutdownHeapIfNecessary();
1685 } 1822 }
1686 1823
1687 void Heap::doShutdown() 1824 void Heap::doShutdown()
1688 { 1825 {
1689 // We don't want to call doShutdown() twice. 1826 // We don't want to call doShutdown() twice.
1690 if (!s_markingVisitor) 1827 if (!s_markingVisitor)
1691 return; 1828 return;
1692 1829
1693 ASSERT(!ThreadState::isAnyThreadInGC()); 1830 ASSERT(!ThreadState::isAnyThreadInGC());
1694 ASSERT(!ThreadState::attachedThreads().size()); 1831 ASSERT(!ThreadState::attachedThreads().size());
1695 delete s_markingVisitor; 1832 delete s_markingVisitor;
1696 s_markingVisitor = 0; 1833 s_markingVisitor = 0;
1697 delete s_heapDoesNotContainCache; 1834 delete s_heapDoesNotContainCache;
1698 s_heapDoesNotContainCache = 0; 1835 s_heapDoesNotContainCache = 0;
1836 delete s_freePagePool;
1837 s_freePagePool = 0;
1838 delete s_orphanedPagePool;
1839 s_orphanedPagePool = 0;
1699 CallbackStack::shutdown(&s_weakCallbackStack); 1840 CallbackStack::shutdown(&s_weakCallbackStack);
1700 CallbackStack::shutdown(&s_markingStack); 1841 CallbackStack::shutdown(&s_markingStack);
1701 CallbackStack::shutdown(&s_ephemeronStack); 1842 CallbackStack::shutdown(&s_ephemeronStack);
1702 ThreadState::shutdown(); 1843 ThreadState::shutdown();
1703 } 1844 }
1704 1845
1705 BaseHeapPage* Heap::contains(Address address) 1846 BaseHeapPage* Heap::contains(Address address)
1706 { 1847 {
1707 ASSERT(ThreadState::isAnyThreadInGC()); 1848 ASSERT(ThreadState::isAnyThreadInGC());
1708 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 1849 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
1709 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 1850 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1710 BaseHeapPage* page = (*it)->contains(address); 1851 BaseHeapPage* page = (*it)->contains(address);
1711 if (page) 1852 if (page)
1712 return page; 1853 return page;
1713 } 1854 }
1714 return 0; 1855 return 0;
1715 } 1856 }
1716 1857
1858 #ifndef NDEBUG
1859 bool Heap::containedInHeapOrOrphanedPage(void* object)
1860 {
1861 return contains(object) || orphanedPagePool()->contains(object);
1862 }
1863 #endif
1864
1717 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) 1865 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1718 { 1866 {
1719 ASSERT(ThreadState::isAnyThreadInGC()); 1867 ASSERT(ThreadState::isAnyThreadInGC());
1720 1868
1721 #ifdef NDEBUG 1869 #ifdef NDEBUG
1722 if (s_heapDoesNotContainCache->lookup(address)) 1870 if (s_heapDoesNotContainCache->lookup(address))
1723 return 0; 1871 return 0;
1724 #endif 1872 #endif
1725 1873
1726 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 1874 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1785 builder.append("\n\t"); 1933 builder.append("\n\t");
1786 builder.append(frameToName.nullableName()); 1934 builder.append(frameToName.nullableName());
1787 --framesToShow; 1935 --framesToShow;
1788 } 1936 }
1789 return builder.toString().replace("WebCore::", ""); 1937 return builder.toString().replace("WebCore::", "");
1790 } 1938 }
1791 #endif 1939 #endif
1792 1940
1793 void Heap::pushTraceCallback(void* object, TraceCallback callback) 1941 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1794 { 1942 {
1795 ASSERT(Heap::contains(object)); 1943 ASSERT(Heap::containedInHeapOrOrphanedPage(object));
1796 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); 1944 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1797 *slot = CallbackStack::Item(object, callback); 1945 *slot = CallbackStack::Item(object, callback);
1798 } 1946 }
1799 1947
1948 template<GCMode Mode>
1800 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) 1949 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1801 { 1950 {
1802 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor); 1951 return s_markingStack->popAndInvokeCallback<Mode>(&s_markingStack, visitor);
1803 } 1952 }
1804 1953
1805 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback ) 1954 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback )
1806 { 1955 {
1807 ASSERT(Heap::contains(cell)); 1956 ASSERT(Heap::contains(cell));
1808 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba ckStack); 1957 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba ckStack);
1809 *slot = CallbackStack::Item(cell, callback); 1958 *slot = CallbackStack::Item(cell, callback);
1810 } 1959 }
1811 1960
1812 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe rCallback callback) 1961 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe rCallback callback)
1813 { 1962 {
1814 ASSERT(Heap::contains(object)); 1963 ASSERT(Heap::contains(object));
1815 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeader Address(reinterpret_cast<Address>(object))); 1964 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object);
1816 ASSERT(Heap::contains(object) == heapPageForObject); 1965 ASSERT(Heap::contains(object) == heapPageForObject);
1817 ThreadState* state = heapPageForObject->threadState(); 1966 ThreadState* state = heapPageForObject->threadState();
1818 state->pushWeakObjectPointerCallback(closure, callback); 1967 state->pushWeakObjectPointerCallback(closure, callback);
1819 } 1968 }
1820 1969
1821 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) 1970 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1822 { 1971 {
1823 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visit or); 1972 return s_weakCallbackStack->popAndInvokeCallback<GlobalGC>(&s_weakCallbackSt ack, visitor);
1824 } 1973 }
1825 1974
1826 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) 1975 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback)
1827 { 1976 {
1828 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac k); 1977 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac k);
1829 *slot = CallbackStack::Item(table, iterationCallback); 1978 *slot = CallbackStack::Item(table, iterationCallback);
1830 1979
1831 // We use the callback stack of weak cell pointers for the ephemeronIteratio nDone callbacks. 1980 // We use the callback stack of weak cell pointers for the ephemeronIteratio nDone callbacks.
1832 // These callbacks are called right after marking and before any thread comm ences execution 1981 // These callbacks are called right after marking and before any thread comm ences execution
1833 // so it suits our needs for telling the ephemerons that the iteration is do ne. 1982 // so it suits our needs for telling the ephemerons that the iteration is do ne.
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1872 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); 2021 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
1873 #endif 2022 #endif
1874 2023
1875 // Disallow allocation during garbage collection (but not 2024 // Disallow allocation during garbage collection (but not
1876 // during the finalization that happens when the gcScope is 2025 // during the finalization that happens when the gcScope is
1877 // torn down). 2026 // torn down).
1878 NoAllocationScope<AnyThread> noAllocationScope; 2027 NoAllocationScope<AnyThread> noAllocationScope;
1879 2028
1880 prepareForGC(); 2029 prepareForGC();
1881 2030
1882 ThreadState::visitRoots(s_markingVisitor); 2031 traceRootsAndPerformGlobalWeakProcessing<GlobalGC>();
2032
2033 // After a global marking we know that any orphaned page that was not reache d
2034 // cannot be reached in a subsequent GC. This is due to a thread either havi ng
2035 // swept its heap or having done a "poor mans sweep" in prepareForGC which m arks
2036 // objects that are dead, but not swept in the previous GC as dead. In this GC's
2037 // marking we check that any object marked as dead is not traced. E.g. via a
2038 // conservatively found pointer or a programming error with an object contai ning
2039 // a dangling pointer.
2040 orphanedPagePool()->decommitOrphanedPages();
2041
2042 #if ENABLE(GC_TRACING)
2043 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2044 #endif
2045
2046 if (blink::Platform::current()) {
2047 uint64_t objectSpaceSize;
2048 uint64_t allocatedSpaceSize;
2049 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2050 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2051 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2052 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2053 }
2054 }
2055
2056 void Heap::collectGarbageForTerminatingThread(ThreadState* state)
2057 {
2058 // We explicitly do not enter a safepoint while doing thread specific
2059 // garbage collection since we don't want to allow a global GC at the
2060 // same time as a thread local GC.
2061
2062 {
2063 NoAllocationScope<AnyThread> noAllocationScope;
2064
2065 state->enterGC();
2066 state->prepareForGC();
2067
2068 traceRootsAndPerformGlobalWeakProcessing<ThreadLocalGC>();
haraken 2014/07/09 08:01:59 traceRootsAndPerform"Global"WeakProcessing sounds
wibling-chromium 2014/07/09 10:32:31 I agree GlobalWeakProcessing is a bit confusing, b
2069
2070 state->leaveGC();
2071 }
2072 state->performPendingSweep();
2073 }
2074
2075 template<GCMode Mode>
2076 void Heap::traceRootsAndPerformGlobalWeakProcessing()
2077 {
2078 if (Mode == ThreadLocalGC)
2079 ThreadState::current()->visitLocalRoots(s_markingVisitor);
2080 else
2081 ThreadState::visitRoots(s_markingVisitor);
1883 2082
1884 // Ephemeron fixed point loop. 2083 // Ephemeron fixed point loop.
1885 do { 2084 do {
1886 // Recursively mark all objects that are reachable from the roots. 2085 // Recursively mark all objects that are reachable from the roots for th is thread.
1887 while (popAndInvokeTraceCallback(s_markingVisitor)) { } 2086 // Also don't continue tracing if the trace hits an object on another th read's heap.
2087 while (popAndInvokeTraceCallback<Mode>(s_markingVisitor)) { }
1888 2088
1889 // Mark any strong pointers that have now become reachable in ephemeron 2089 // Mark any strong pointers that have now become reachable in ephemeron
1890 // maps. 2090 // maps.
1891 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); 2091 CallbackStack::invokeCallbacks<Mode>(&s_ephemeronStack, s_markingVisitor );
1892 2092
1893 // Rerun loop if ephemeron processing queued more objects for tracing. 2093 // Rerun loop if ephemeron processing queued more objects for tracing.
1894 } while (!s_markingStack->isEmpty()); 2094 } while (!s_markingStack->isEmpty());
1895 2095
1896 // Call weak callbacks on objects that may now be pointing to dead 2096 // Call weak callbacks on objects that may now be pointing to dead
1897 // objects and call ephemeronIterationDone callbacks on weak tables 2097 // objects and call ephemeronIterationDone callbacks on weak tables
1898 // to do cleanup (specifically clear the queued bits for weak hash 2098 // to do cleanup (specifically clear the queued bits for weak hash
1899 // tables). 2099 // tables).
1900 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } 2100 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1901 2101
1902 CallbackStack::clear(&s_ephemeronStack); 2102 CallbackStack::clear(&s_ephemeronStack);
1903 2103
1904 // It is not permitted to trace pointers of live objects in the weak 2104 // It is not permitted to trace pointers of live objects in the weak
1905 // callback phase, so the marking stack should still be empty here. 2105 // callback phase, so the marking stack should still be empty here.
1906 ASSERT(s_markingStack->isEmpty()); 2106 ASSERT(s_markingStack->isEmpty());
1907
1908 #if ENABLE(GC_TRACING)
1909 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
1910 #endif
1911
1912 if (blink::Platform::current()) {
1913 uint64_t objectSpaceSize;
1914 uint64_t allocatedSpaceSize;
1915 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
1916 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
1917 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1918 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1919 }
1920 } 2107 }
1921 2108
1922 void Heap::collectAllGarbage() 2109 void Heap::collectAllGarbage()
1923 { 2110 {
1924 // FIXME: oilpan: we should perform a single GC and everything 2111 // FIXME: oilpan: we should perform a single GC and everything
1925 // should die. Unfortunately it is not the case for all objects 2112 // should die. Unfortunately it is not the case for all objects
1926 // because the hierarchy was not completely moved to the heap and 2113 // because the hierarchy was not completely moved to the heap and
1927 // some heap allocated objects own objects that contain persistents 2114 // some heap allocated objects own objects that contain persistents
1928 // pointing to other heap allocated objects. 2115 // pointing to other heap allocated objects.
1929 for (int i = 0; i < 5; i++) 2116 for (int i = 0; i < 5; i++)
1930 collectGarbage(ThreadState::NoHeapPointersOnStack); 2117 collectGarbage(ThreadState::NoHeapPointersOnStack);
1931 } 2118 }
1932 2119
1933 void Heap::setForcePreciseGCForTesting() 2120 void Heap::setForcePreciseGCForTesting()
1934 { 2121 {
1935 ThreadState::current()->setForcePreciseGCForTesting(true); 2122 ThreadState::current()->setForcePreciseGCForTesting(true);
1936 } 2123 }
1937 2124
2125 template<typename Header>
2126 void ThreadHeap<Header>::prepareHeapForShutdown()
2127 {
2128 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
2129 page->setShutdown();
2130 }
2131 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
2132 current->setShutdown();
2133 }
2134 }
2135
1938 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize) 2136 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize)
1939 { 2137 {
1940 *objectSpaceSize = 0; 2138 *objectSpaceSize = 0;
1941 *allocatedSpaceSize = 0; 2139 *allocatedSpaceSize = 0;
1942 ASSERT(ThreadState::isAnyThreadInGC()); 2140 ASSERT(ThreadState::isAnyThreadInGC());
1943 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2141 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
1944 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; 2142 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1945 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) { 2143 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
1946 *objectSpaceSize += (*it)->stats().totalObjectSpace(); 2144 *objectSpaceSize += (*it)->stats().totalObjectSpace();
1947 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); 2145 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
(...skipping 30 matching lines...) Expand all
1978 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2176 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
1979 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) 2177 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it)
1980 (*it)->makeConsistentForGC(); 2178 (*it)->makeConsistentForGC();
1981 } 2179 }
1982 2180
1983 // Force template instantiations for the types that we need. 2181 // Force template instantiations for the types that we need.
1984 template class HeapPage<FinalizedHeapObjectHeader>; 2182 template class HeapPage<FinalizedHeapObjectHeader>;
1985 template class HeapPage<HeapObjectHeader>; 2183 template class HeapPage<HeapObjectHeader>;
1986 template class ThreadHeap<FinalizedHeapObjectHeader>; 2184 template class ThreadHeap<FinalizedHeapObjectHeader>;
1987 template class ThreadHeap<HeapObjectHeader>; 2185 template class ThreadHeap<HeapObjectHeader>;
2186 template bool CallbackStack::popAndInvokeCallback<GlobalGC>(CallbackStack**, Vis itor*);
2187 template bool CallbackStack::popAndInvokeCallback<ThreadLocalGC>(CallbackStack** , Visitor*);
1988 2188
1989 Visitor* Heap::s_markingVisitor; 2189 Visitor* Heap::s_markingVisitor;
1990 CallbackStack* Heap::s_markingStack; 2190 CallbackStack* Heap::s_markingStack;
1991 CallbackStack* Heap::s_weakCallbackStack; 2191 CallbackStack* Heap::s_weakCallbackStack;
1992 CallbackStack* Heap::s_ephemeronStack; 2192 CallbackStack* Heap::s_ephemeronStack;
1993 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2193 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
1994 bool Heap::s_shutdownCalled = false; 2194 bool Heap::s_shutdownCalled = false;
1995 bool Heap::s_lastGCWasConservative = false; 2195 bool Heap::s_lastGCWasConservative = false;
2196 FreePagePool* Heap::s_freePagePool;
2197 OrphanedPagePool* Heap::s_orphanedPagePool;
1996 } 2198 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698