Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2008)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 371623002: [oilpan]: Make thread shutdown more robust. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: review feedback Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after
415 } 415 }
416 416
417 NO_SANITIZE_ADDRESS 417 NO_SANITIZE_ADDRESS
418 void HeapObjectHeader::unmark() 418 void HeapObjectHeader::unmark()
419 { 419 {
420 checkHeader(); 420 checkHeader();
421 m_size &= ~markBitMask; 421 m_size &= ~markBitMask;
422 } 422 }
423 423
424 NO_SANITIZE_ADDRESS 424 NO_SANITIZE_ADDRESS
425 bool HeapObjectHeader::hasDebugMark() const 425 bool HeapObjectHeader::hasDeadMark() const
426 { 426 {
427 checkHeader(); 427 checkHeader();
428 return m_size & debugBitMask; 428 return m_size & deadBitMask;
429 } 429 }
430 430
431 NO_SANITIZE_ADDRESS 431 NO_SANITIZE_ADDRESS
432 void HeapObjectHeader::clearDebugMark() 432 void HeapObjectHeader::clearDeadMark()
433 { 433 {
434 checkHeader(); 434 checkHeader();
435 m_size &= ~debugBitMask; 435 m_size &= ~deadBitMask;
436 } 436 }
437 437
438 NO_SANITIZE_ADDRESS 438 NO_SANITIZE_ADDRESS
439 void HeapObjectHeader::setDebugMark() 439 void HeapObjectHeader::setDeadMark()
440 { 440 {
441 ASSERT(!isMarked());
441 checkHeader(); 442 checkHeader();
442 m_size |= debugBitMask; 443 m_size |= deadBitMask;
443 } 444 }
444 445
445 #ifndef NDEBUG 446 #ifndef NDEBUG
446 NO_SANITIZE_ADDRESS 447 NO_SANITIZE_ADDRESS
447 void HeapObjectHeader::zapMagic() 448 void HeapObjectHeader::zapMagic()
448 { 449 {
449 m_magic = zappedMagic; 450 m_magic = zappedMagic;
450 } 451 }
451 #endif 452 #endif
452 453
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
492 return heapObjectHeader()->unmark(); 493 return heapObjectHeader()->unmark();
493 } 494 }
494 495
495 template<typename Header> 496 template<typename Header>
496 bool LargeHeapObject<Header>::isMarked() 497 bool LargeHeapObject<Header>::isMarked()
497 { 498 {
498 return heapObjectHeader()->isMarked(); 499 return heapObjectHeader()->isMarked();
499 } 500 }
500 501
501 template<typename Header> 502 template<typename Header>
503 void LargeHeapObject<Header>::setDeadMark()
504 {
505 heapObjectHeader()->setDeadMark();
506 }
507
508 template<typename Header>
502 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) 509 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess)
503 { 510 {
504 ASSERT(contains(address)); 511 ASSERT(contains(address));
505 if (!objectContains(address)) 512 if (!objectContains(address) || heapObjectHeader()->hasDeadMark())
506 return; 513 return;
507 #if ENABLE(GC_TRACING) 514 #if ENABLE(GC_TRACING)
508 visitor->setHostInfo(&address, "stack"); 515 visitor->setHostInfo(&address, "stack");
509 #endif 516 #endif
510 mark(visitor); 517 mark(visitor);
511 } 518 }
512 519
513 template<> 520 template<>
514 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) 521 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
515 { 522 {
(...skipping 28 matching lines...) Expand all
544 551
545 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa yload) 552 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa yload)
546 { 553 {
547 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); 554 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
548 FinalizedHeapObjectHeader* header = 555 FinalizedHeapObjectHeader* header =
549 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ; 556 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ;
550 return header; 557 return header;
551 } 558 }
552 559
553 template<typename Header> 560 template<typename Header>
554 ThreadHeap<Header>::ThreadHeap(ThreadState* state) 561 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
555 : m_currentAllocationPoint(0) 562 : m_currentAllocationPoint(0)
556 , m_remainingAllocationSize(0) 563 , m_remainingAllocationSize(0)
557 , m_firstPage(0) 564 , m_firstPage(0)
558 , m_firstLargeHeapObject(0) 565 , m_firstLargeHeapObject(0)
559 , m_biggestFreeListIndex(0) 566 , m_biggestFreeListIndex(0)
560 , m_threadState(state) 567 , m_threadState(state)
561 , m_pagePool(0) 568 , m_index(index)
562 { 569 {
563 clearFreeLists(); 570 clearFreeLists();
564 } 571 }
565 572
566 template<typename Header> 573 template<typename Header>
567 ThreadHeap<Header>::~ThreadHeap() 574 ThreadHeap<Header>::~ThreadHeap()
568 { 575 {
569 clearFreeLists(); 576 clearFreeLists();
570 if (!ThreadState::current()->isMainThread()) 577 flushHeapContainsCache();
haraken 2014/07/08 05:44:51 Don't you need to flush HeapDoesNotContainCache as
wibling-chromium 2014/07/08 13:39:45 No, we only need to flush the HeapDoesNotContainCa
571 assertEmpty(); 578
572 deletePages(); 579 // Add the ThreadHeap's pages to the orphanedPagePool.
580 Vector<BaseHeapPage*> pages;
581 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
582 pages.append(page);
583 m_firstPage = 0;
584
585 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next)
586 pages.append(largeObject);
587 m_firstLargeHeapObject = 0;
588 Heap::orphanedPagePool()->addOrphanedPages(m_index, pages);
573 } 589 }
574 590
575 template<typename Header> 591 template<typename Header>
576 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) 592 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
577 { 593 {
578 size_t allocationSize = allocationSizeFromSize(size); 594 size_t allocationSize = allocationSizeFromSize(size);
579 if (threadState()->shouldGC()) { 595 if (threadState()->shouldGC()) {
580 if (threadState()->shouldForceConservativeGC()) 596 if (threadState()->shouldForceConservativeGC())
581 Heap::collectGarbage(ThreadState::HeapPointersOnStack); 597 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
582 else 598 else
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
732 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 748 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
733 { 749 {
734 flushHeapContainsCache(); 750 flushHeapContainsCache();
735 object->unlink(previousNext); 751 object->unlink(previousNext);
736 object->finalize(); 752 object->finalize();
737 753
738 // Unpoison the object header and allocationGranularity bytes after the 754 // Unpoison the object header and allocationGranularity bytes after the
739 // object before freeing. 755 // object before freeing.
740 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); 756 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
741 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 757 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
742 delete object->storage(); 758
759 if (object->shuttingDown()) {
760 // The thread is shutting down so this object is being removed as part
761 // of a thread local GC. In that case the object could be revived in the
Mads Ager (chromium) 2014/07/08 08:24:56 revived -> traced
wibling-chromium 2014/07/08 13:39:46 Done.
762 // next global GC either due to a dead object being revived via a
Mads Ager (chromium) 2014/07/08 08:24:55 revived -> traced
wibling-chromium 2014/07/08 13:39:46 Done.
763 // conservative pointer or due to a programming error where an object
764 // in another thread heap keeps a dangling pointer to the this object.
haraken 2014/07/08 05:44:50 the this object => this object
wibling-chromium 2014/07/08 13:39:45 Done.
765 // To guard agains this we put the large object memory in the
haraken 2014/07/08 05:44:50 against
wibling-chromium 2014/07/08 13:39:46 Done.
766 // orphanedPagePool to ensure it is still reachable. After the next full
767 // GC it can be released assuming no rogue/dangling pointers refer to
haraken 2014/07/08 05:44:51 full GC => global GC
wibling-chromium 2014/07/08 13:39:46 Done.
768 // it.
769 // NOTE: large objects are not moved to the memory pool as it is unlikel y
770 // they can be reused due to their individual sizes.
771 Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
772 } else {
773 PageMemory* memory = object->storage();
774 object->~LargeHeapObject<Header>();
775 delete memory;
776 }
777 }
778
779 template<typename DataType>
780 HeapPool<DataType>::HeapPool()
781 {
782 for (int i = 0; i < NumberOfHeaps; ++i) {
783 m_pool[i] = 0;
784 }
785 }
786
787 HeapMemoryPool::~HeapMemoryPool()
788 {
789 for (int index = 0; index < NumberOfHeaps; ++index) {
790 while (PoolEntry* entry = m_pool[index]) {
791 m_pool[index] = entry->next;
792 PageMemory* memory = entry->data;
793 ASSERT(memory);
794 delete memory;
795 delete entry;
796 }
797 }
798 }
799
800 void HeapMemoryPool::addMemory(int index, PageMemory* memory)
801 {
802 // When adding memory to the pool we decommit it to ensure it is unused
803 // while in the pool. This also allows the physical memory backing the
804 // page to be given back to the OS.
805 memory->decommit();
806 MutexLocker locker(m_mutex[index]);
haraken 2014/07/08 05:44:51 Just help me understand: Why do we need a mutex he
wibling-chromium 2014/07/08 13:39:46 I have changed the page pool to be global. The rea
807 PoolEntry* entry = new PoolEntry(memory, m_pool[index]);
808 m_pool[index] = entry;
809 }
810
811 PageMemory* HeapMemoryPool::takeMemory(int index)
812 {
813 MutexLocker locker(m_mutex[index]);
814 while (PoolEntry* entry = m_pool[index]) {
815 m_pool[index] = entry->next;
816 PageMemory* memory = entry->data;
817 ASSERT(memory);
818 delete entry;
819 if (memory->commit())
820 return memory;
821
822 // We got some memory, but failed to commit it, try again.
823 delete memory;
824 }
825 return 0;
826 }
827
828 HeapOrphanedPagePool::~HeapOrphanedPagePool()
829 {
830 for (int index = 0; index < NumberOfHeaps; ++index) {
831 while (PoolEntry* entry = m_pool[index]) {
832 m_pool[index] = entry->next;
833 BaseHeapPage* page = entry->data;
834 delete entry;
835 PageMemory* memory = page->storage();
836 ASSERT(memory);
837 page->~BaseHeapPage();
838 delete memory;
839 }
840 }
841 }
842
843 void HeapOrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page)
Mads Ager (chromium) 2014/07/08 08:24:56 For these methods we know that some mutex is alrea
wibling-chromium 2014/07/08 13:39:46 Yes, basically we only add pages to the orphaned p
844 {
845 page->markOrphaned();
846 PoolEntry* entry = new PoolEntry(page, m_pool[index]);
847 m_pool[index] = entry;
848 }
849
850 void HeapOrphanedPagePool::addOrphanedPages(int index, Vector<BaseHeapPage*>& pa ges)
851 {
852 for (Vector<BaseHeapPage*>::const_iterator it = pages.begin(); it != pages.e nd(); ++it) {
853 addOrphanedPage(index, *it);
854 }
855 }
856
857 void HeapOrphanedPagePool::decommitOrphanedPages()
858 {
859 // No locking needed as all threads are at safepoints at this point in time.
haraken 2014/07/08 05:44:51 Can we add an ASSERT about this?
wibling-chromium 2014/07/08 13:39:46 Done.
860 for (int index = 0; index < NumberOfHeaps; ++index) {
861 PoolEntry* entry = m_pool[index];
862 PoolEntry** prevNext = &m_pool[index];
863 while (entry) {
864 BaseHeapPage* page = entry->data;
865 if (page->traced()) {
866 // If the page was traced in the last GC it is not decommited.
867 // We only decommit a page, ie. put it in the memory pool,
868 // when the page has no objects pointing to it.
869 // We mark the page as orphaned. This clears the traced flag
870 // and any object trace bits that were set during tracing.
871 page->markOrphaned();
872 prevNext = &entry->next;
873 entry = entry->next;
874 continue;
875 }
876
877 // Page was not traced. Check if we should reuse the memory or just
878 // free it. Large object memory is not reused, but freed, normal
879 // blink heap pages are reused.
880 PageMemory* memory = page->storage();
881
882 // Call the destructor before freeing or adding to the memory pool.
haraken 2014/07/08 05:44:50 Just help me understand: Why does this order matte
wibling-chromium 2014/07/08 13:39:46 We cannot call the destructor after adding the mem
883 if (page->reuseMemory()) {
haraken 2014/07/08 05:44:50 reuseMemory => shouldReuseMemory ?
Mads Ager (chromium) 2014/07/08 08:24:56 !page->isLargeObject()
wibling-chromium 2014/07/08 13:39:45 Done.
884 page->~BaseHeapPage();
885 Heap::memoryPool()->addMemory(index, memory);
Mads Ager (chromium) 2014/07/08 08:24:55 So this is where we use that the page pool is now
wibling-chromium 2014/07/08 13:39:45 Yes, I would like to keep it global for now and in
886 } else {
887 page->~BaseHeapPage();
888 delete memory;
889 }
890
891 PoolEntry* deadEntry = entry;
892 entry = entry->next;
893 *prevNext = entry;
894 delete deadEntry;
895 }
896 }
897 }
898
899 bool HeapOrphanedPagePool::contains(void* object)
haraken 2014/07/08 05:44:50 Just to confirm: HeapOrphanedPagePool::contains()
wibling-chromium 2014/07/08 13:39:46 No, it is only called inside an ASSERT. I will wra
900 {
901 for (int index = 0; index < NumberOfHeaps; ++index) {
902 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
903 BaseHeapPage* page = entry->data;
904 if (page->contains(reinterpret_cast<Address>(object)))
905 return true;
906 }
907 }
908 return false;
743 } 909 }
744 910
745 template<> 911 template<>
746 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) 912 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
747 { 913 {
748 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on 914 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
749 // the heap should be unused (ie. 0). 915 // the heap should be unused (ie. 0).
750 allocatePage(0); 916 allocatePage(0);
751 } 917 }
752 918
753 template<> 919 template<>
754 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) 920 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
755 { 921 {
756 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap 922 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap
757 // since it is the same for all objects 923 // since it is the same for all objects
758 ASSERT(gcInfo); 924 ASSERT(gcInfo);
759 allocatePage(gcInfo); 925 allocatePage(gcInfo);
760 } 926 }
761 927
762 template<typename Header> 928 template <typename Header>
763 void ThreadHeap<Header>::clearPagePool() 929 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
764 {
765 while (takePageFromPool()) { }
766 }
767
768 template<typename Header>
769 PageMemory* ThreadHeap<Header>::takePageFromPool()
770 {
771 Heap::flushHeapDoesNotContainCache();
772 while (PagePoolEntry* entry = m_pagePool) {
773 m_pagePool = entry->next();
774 PageMemory* storage = entry->storage();
775 delete entry;
776
777 if (storage->commit())
778 return storage;
779
780 // Failed to commit pooled storage. Release it.
781 delete storage;
782 }
783
784 return 0;
785 }
786
787 template<typename Header>
788 void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage)
789 { 930 {
790 flushHeapContainsCache(); 931 flushHeapContainsCache();
791 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); 932 if (page->shuttingDown()) {
792 m_pagePool = entry; 933 // The thread is shutting down so this page is being removed as part
793 } 934 // of a thread local GC. In that case the page could be revived in the
Mads Ager (chromium) 2014/07/08 08:24:55 revived -> accessed
wibling-chromium 2014/07/08 13:39:45 Done.
794 935 // next global GC either due to a dead object being revived via a
Mads Ager (chromium) 2014/07/08 08:24:56 revived -> traced
wibling-chromium 2014/07/08 13:39:46 Done.
795 template <typename Header> 936 // conservative pointer or due to a programming error where an object
796 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) 937 // in another thread heap keeps a dangling pointer to the this object.
haraken 2014/07/08 05:44:51 the this object => this object
wibling-chromium 2014/07/08 13:39:46 Done.
797 { 938 // To guard agains this we put the page in the orphanedPagePool to
haraken 2014/07/08 05:44:50 against
wibling-chromium 2014/07/08 13:39:45 Done.
798 PageMemory* storage = page->storage(); 939 // ensure it is still reachable. After the next full GC it can be
haraken 2014/07/08 05:44:51 full GC => global GC
wibling-chromium 2014/07/08 13:39:45 Done.
799 storage->decommit(); 940 // decommitted and moved to the memory pool assuming no rogue/dangling
800 addPageMemoryToPool(storage); 941 // pointers refer to it.
942 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
943 } else {
944 PageMemory* memory = page->storage();
945 page->~HeapPage<Header>();
946 Heap::memoryPool()->addMemory(m_index, memory);
947 }
801 } 948 }
802 949
803 template<typename Header> 950 template<typename Header>
804 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) 951 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
805 { 952 {
806 Heap::flushHeapDoesNotContainCache(); 953 Heap::flushHeapDoesNotContainCache();
807 PageMemory* pageMemory = takePageFromPool(); 954 PageMemory* pageMemory = Heap::memoryPool()->takeMemory(m_index);
808 if (!pageMemory) { 955 while (!pageMemory) {
haraken 2014/07/08 05:44:50 I'm curious why we need change 'if' to 'while'?
Mads Ager (chromium) 2014/07/08 08:24:55 Because the page pool is not global and other thre
809 // Allocate a memory region for blinkPagesPerRegion pages that 956 // Allocate a memory region for blinkPagesPerRegion pages that
810 // will each have the following layout. 957 // will each have the following layout.
811 // 958 //
812 // [ guard os page | ... payload ... | guard os page ] 959 // [ guard os page | ... payload ... | guard os page ]
813 // ^---{ aligned to blink page size } 960 // ^---{ aligned to blink page size }
814 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); 961 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion);
815 // Setup the PageMemory object for each of the pages in the 962 // Setup the PageMemory object for each of the pages in the
816 // region. 963 // region.
817 size_t offset = 0; 964 size_t offset = 0;
818 for (size_t i = 0; i < blinkPagesPerRegion; i++) { 965 for (size_t i = 0; i < blinkPagesPerRegion; i++) {
819 addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offs et, blinkPagePayloadSize())); 966 Heap::memoryPool()->addMemory(m_index, PageMemory::setupPageMemoryIn Region(region, offset, blinkPagePayloadSize()));
820 offset += blinkPageSize; 967 offset += blinkPageSize;
821 } 968 }
822 pageMemory = takePageFromPool(); 969 pageMemory = Heap::memoryPool()->takeMemory(m_index);
823 RELEASE_ASSERT(pageMemory);
824 } 970 }
825 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); 971 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo);
826 // FIXME: Oilpan: Linking new pages into the front of the list is 972 // FIXME: Oilpan: Linking new pages into the front of the list is
827 // crucial when performing allocations during finalization because 973 // crucial when performing allocations during finalization because
828 // it ensures that those pages are not swept in the current GC 974 // it ensures that those pages are not swept in the current GC
829 // round. We should create a separate page list for that to 975 // round. We should create a separate page list for that to
830 // separate out the pages allocated during finalization clearly 976 // separate out the pages allocated during finalization clearly
831 // from the pages currently being swept. 977 // from the pages currently being swept.
832 page->link(&m_firstPage); 978 page->link(&m_firstPage);
833 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); 979 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
(...skipping 23 matching lines...) Expand all
857 ASSERT(isConsistentForGC()); 1003 ASSERT(isConsistentForGC());
858 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING 1004 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
859 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore 1005 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore
860 // calling their finalizer methods. This can catch the cases where one objec ts 1006 // calling their finalizer methods. This can catch the cases where one objec ts
861 // finalizer tries to modify another object as part of finalization. 1007 // finalizer tries to modify another object as part of finalization.
862 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1008 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
863 page->poisonUnmarkedObjects(); 1009 page->poisonUnmarkedObjects();
864 #endif 1010 #endif
865 HeapPage<Header>* page = m_firstPage; 1011 HeapPage<Header>* page = m_firstPage;
866 HeapPage<Header>** previous = &m_firstPage; 1012 HeapPage<Header>** previous = &m_firstPage;
867 bool pagesRemoved = false;
868 while (page) { 1013 while (page) {
869 if (page->isEmpty()) { 1014 if (page->isEmpty()) {
870 flushHeapContainsCache();
haraken 2014/07/08 05:44:50 Just help me understand: Why can we drop flushHeap
Mads Ager (chromium) 2014/07/08 08:24:56 Because unlink now calls removePageFromHeap which
wibling-chromium 2014/07/08 13:39:45 Yes, it seemed like we did this a bit too many tim
871 HeapPage<Header>* unused = page; 1015 HeapPage<Header>* unused = page;
872 page = page->next(); 1016 page = page->next();
873 HeapPage<Header>::unlink(unused, previous); 1017 HeapPage<Header>::unlink(unused, previous);
874 pagesRemoved = true;
875 } else { 1018 } else {
876 page->sweep(); 1019 page->sweep();
877 previous = &page->m_next; 1020 previous = &page->m_next;
878 page = page->next(); 1021 page = page->next();
879 } 1022 }
880 } 1023 }
881 if (pagesRemoved)
882 flushHeapContainsCache();
883 1024
884 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; 1025 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
885 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { 1026 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
886 if (current->isMarked()) { 1027 if (current->isMarked()) {
887 stats().increaseAllocatedSpace(current->size()); 1028 stats().increaseAllocatedSpace(current->size());
888 stats().increaseObjectSpace(current->payloadSize()); 1029 stats().increaseObjectSpace(current->payloadSize());
889 current->unmark(); 1030 current->unmark();
890 previousNext = &current->m_next; 1031 previousNext = &current->m_next;
891 current = current->next(); 1032 current = current->next();
892 } else { 1033 } else {
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
947 template<typename Header> 1088 template<typename Header>
948 void ThreadHeap<Header>::makeConsistentForGC() 1089 void ThreadHeap<Header>::makeConsistentForGC()
949 { 1090 {
950 if (ownsNonEmptyAllocationArea()) 1091 if (ownsNonEmptyAllocationArea())
951 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); 1092 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
952 setAllocationPoint(0, 0); 1093 setAllocationPoint(0, 0);
953 clearFreeLists(); 1094 clearFreeLists();
954 } 1095 }
955 1096
956 template<typename Header> 1097 template<typename Header>
957 void ThreadHeap<Header>::clearMarks() 1098 void ThreadHeap<Header>::clearLiveAndMarkDead()
958 { 1099 {
959 ASSERT(isConsistentForGC()); 1100 ASSERT(isConsistentForGC());
960 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1101 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
961 page->clearMarks(); 1102 page->clearLiveAndMarkDead();
962 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) 1103 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
963 current->unmark(); 1104 if (current->isMarked())
1105 current->unmark();
1106 else
1107 current->setDeadMark();
1108 }
964 } 1109 }
965 1110
966 template<typename Header> 1111 template<typename Header>
967 void ThreadHeap<Header>::deletePages()
968 {
969 flushHeapContainsCache();
970 // Add all pages in the pool to the heap's list of pages before deleting
971 clearPagePool();
972
973 for (HeapPage<Header>* page = m_firstPage; page; ) {
974 HeapPage<Header>* dead = page;
975 page = page->next();
976 PageMemory* storage = dead->storage();
977 dead->~HeapPage();
978 delete storage;
979 }
980 m_firstPage = 0;
981
982 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
983 LargeHeapObject<Header>* dead = current;
984 current = current->next();
985 PageMemory* storage = dead->storage();
986 dead->~LargeHeapObject();
987 delete storage;
988 }
989 m_firstLargeHeapObject = 0;
990 }
991
992 template<typename Header>
993 void ThreadHeap<Header>::clearFreeLists() 1112 void ThreadHeap<Header>::clearFreeLists()
994 { 1113 {
995 for (size_t i = 0; i < blinkPageSizeLog2; i++) 1114 for (size_t i = 0; i < blinkPageSizeLog2; i++)
996 m_freeLists[i] = 0; 1115 m_freeLists[i] = 0;
997 } 1116 }
998 1117
999 int BaseHeap::bucketIndexForSize(size_t size) 1118 int BaseHeap::bucketIndexForSize(size_t size)
1000 { 1119 {
1001 ASSERT(size > 0); 1120 ASSERT(size > 0);
1002 int index = -1; 1121 int index = -1;
(...skipping 20 matching lines...) Expand all
1023 void HeapPage<Header>::link(HeapPage** prevNext) 1142 void HeapPage<Header>::link(HeapPage** prevNext)
1024 { 1143 {
1025 m_next = *prevNext; 1144 m_next = *prevNext;
1026 *prevNext = this; 1145 *prevNext = this;
1027 } 1146 }
1028 1147
1029 template<typename Header> 1148 template<typename Header>
1030 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) 1149 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
1031 { 1150 {
1032 *prevNext = unused->m_next; 1151 *prevNext = unused->m_next;
1033 unused->heap()->addPageToPool(unused); 1152 unused->heap()->removePageFromHeap(unused);
1034 } 1153 }
1035 1154
1036 template<typename Header> 1155 template<typename Header>
1037 void HeapPage<Header>::getStats(HeapStats& stats) 1156 void HeapPage<Header>::getStats(HeapStats& stats)
1038 { 1157 {
1039 stats.increaseAllocatedSpace(blinkPageSize); 1158 stats.increaseAllocatedSpace(blinkPageSize);
1040 Address headerAddress = payload(); 1159 Address headerAddress = payload();
1041 ASSERT(headerAddress != end()); 1160 ASSERT(headerAddress != end());
1042 do { 1161 do {
1043 Header* header = reinterpret_cast<Header*>(headerAddress); 1162 Header* header = reinterpret_cast<Header*>(headerAddress);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1089 header->unmark(); 1208 header->unmark();
1090 headerAddress += header->size(); 1209 headerAddress += header->size();
1091 heap()->stats().increaseObjectSpace(header->payloadSize()); 1210 heap()->stats().increaseObjectSpace(header->payloadSize());
1092 startOfGap = headerAddress; 1211 startOfGap = headerAddress;
1093 } 1212 }
1094 if (startOfGap != end()) 1213 if (startOfGap != end())
1095 heap()->addToFreeList(startOfGap, end() - startOfGap); 1214 heap()->addToFreeList(startOfGap, end() - startOfGap);
1096 } 1215 }
1097 1216
1098 template<typename Header> 1217 template<typename Header>
1099 void HeapPage<Header>::clearMarks() 1218 void HeapPage<Header>::clearLiveAndMarkDead()
1100 { 1219 {
1101 for (Address headerAddress = payload(); headerAddress < end();) { 1220 for (Address headerAddress = payload(); headerAddress < end();) {
1102 Header* header = reinterpret_cast<Header*>(headerAddress); 1221 Header* header = reinterpret_cast<Header*>(headerAddress);
1103 ASSERT(header->size() < blinkPagePayloadSize()); 1222 ASSERT(header->size() < blinkPagePayloadSize());
1104 if (!header->isFree()) 1223 // Skip freelist entries.
1224 if (header->isFree()) {
1225 headerAddress += header->size();
1226 continue;
1227 }
1228 if (header->isMarked())
1105 header->unmark(); 1229 header->unmark();
1230 else
1231 header->setDeadMark();
haraken 2014/07/08 05:44:50 Slightly better: if (header->isMarked()) header
wibling-chromium 2014/07/08 13:39:45 Done.
1106 headerAddress += header->size(); 1232 headerAddress += header->size();
1107 } 1233 }
1108 } 1234 }
1109 1235
1110 template<typename Header> 1236 template<typename Header>
1111 void HeapPage<Header>::populateObjectStartBitMap() 1237 void HeapPage<Header>::populateObjectStartBitMap()
1112 { 1238 {
1113 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); 1239 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1114 Address start = payload(); 1240 Address start = payload();
1115 for (Address headerAddress = start; headerAddress < end();) { 1241 for (Address headerAddress = start; headerAddress < end();) {
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1175 if (header->isFree()) 1301 if (header->isFree())
1176 return 0; 1302 return 0;
1177 return header; 1303 return header;
1178 } 1304 }
1179 1305
1180 template<typename Header> 1306 template<typename Header>
1181 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) 1307 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1182 { 1308 {
1183 ASSERT(contains(address)); 1309 ASSERT(contains(address));
1184 Header* header = findHeaderFromAddress(address); 1310 Header* header = findHeaderFromAddress(address);
1185 if (!header) 1311 if (!header || header->hasDeadMark())
1186 return; 1312 return;
1187 1313
1188 #if ENABLE(GC_TRACING) 1314 #if ENABLE(GC_TRACING)
1189 visitor->setHostInfo(&address, "stack"); 1315 visitor->setHostInfo(&address, "stack");
1190 #endif 1316 #endif
1191 if (hasVTable(header) && !vTableInitialized(header->payload())) 1317 if (hasVTable(header) && !vTableInitialized(header->payload()))
1192 visitor->markConservatively(header); 1318 visitor->markConservatively(header);
1193 else 1319 else
1194 visitor->mark(header, traceCallback(header)); 1320 visitor->mark(header, traceCallback(header));
1195 } 1321 }
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1363 { 1489 {
1364 for (size_t i = 0; i < bufferSize; i++) 1490 for (size_t i = 0; i < bufferSize; i++)
1365 m_buffer[i] = Item(0, 0); 1491 m_buffer[i] = Item(0, 0);
1366 } 1492 }
1367 1493
1368 bool CallbackStack::isEmpty() 1494 bool CallbackStack::isEmpty()
1369 { 1495 {
1370 return m_current == &(m_buffer[0]) && !m_next; 1496 return m_current == &(m_buffer[0]) && !m_next;
1371 } 1497 }
1372 1498
1373 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor ) 1499 template<bool ThreadLocal>
1374 {
1375 if (m_current == &(m_buffer[0])) {
1376 if (!m_next) {
1377 #ifndef NDEBUG
1378 clearUnused();
1379 #endif
1380 return false;
1381 }
1382 CallbackStack* nextStack = m_next;
1383 *first = nextStack;
1384 delete this;
1385 return nextStack->popAndInvokeCallback(first, visitor);
1386 }
1387 Item* item = --m_current;
1388
1389 VisitorCallback callback = item->callback();
1390 #if ENABLE(GC_TRACING)
1391 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback
1392 visitor->setHostInfo(item->object(), classOf(item->object()));
1393 #endif
1394 callback(visitor, item->object());
1395
1396 return true;
1397 }
1398
1399 void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor) 1500 void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor)
1400 { 1501 {
1401 CallbackStack* stack = 0; 1502 CallbackStack* stack = 0;
1402 // The first block is the only one where new ephemerons are added, so we 1503 // The first block is the only one where new ephemerons are added, so we
1403 // call the callbacks on that last, to catch any new ephemerons discovered 1504 // call the callbacks on that last, to catch any new ephemerons discovered
1404 // in the callbacks. 1505 // in the callbacks.
1405 // However, if enough ephemerons were added, we may have a new block that 1506 // However, if enough ephemerons were added, we may have a new block that
1406 // has been prepended to the chain. This will be very rare, but we can 1507 // has been prepended to the chain. This will be very rare, but we can
1407 // handle the situation by starting again and calling all the callbacks 1508 // handle the situation by starting again and calling all the callbacks
1408 // a second time. 1509 // a second time.
1409 while (stack != *first) { 1510 while (stack != *first) {
1410 stack = *first; 1511 stack = *first;
1411 stack->invokeOldestCallbacks(visitor); 1512 stack->invokeOldestCallbacks<ThreadLocal>(visitor);
1412 } 1513 }
1413 } 1514 }
1414 1515
1516 template<bool ThreadLocal>
1415 void CallbackStack::invokeOldestCallbacks(Visitor* visitor) 1517 void CallbackStack::invokeOldestCallbacks(Visitor* visitor)
1416 { 1518 {
1417 // Recurse first (bufferSize at a time) so we get to the newly added entries 1519 // Recurse first (bufferSize at a time) so we get to the newly added entries
1418 // last. 1520 // last.
1419 if (m_next) 1521 if (m_next)
1420 m_next->invokeOldestCallbacks(visitor); 1522 m_next->invokeOldestCallbacks<ThreadLocal>(visitor);
1421 1523
1422 // This loop can tolerate entries being added by the callbacks after 1524 // This loop can tolerate entries being added by the callbacks after
1423 // iteration starts. 1525 // iteration starts.
1424 for (unsigned i = 0; m_buffer + i < m_current; i++) { 1526 for (unsigned i = 0; m_buffer + i < m_current; i++) {
1425 Item& item = m_buffer[i]; 1527 Item& item = m_buffer[i];
1528
1529 BaseHeapPage* heapPage = pageHeaderFromObject(item.object());
1530 if (ThreadLocal ? (heapPage->orphaned() || !heapPage->shuttingDown()) : heapPage->orphaned()) {
1531 // If tracing this from a global GC set the traced bit.
1532 if (!ThreadLocal)
1533 heapPage->setTraced();
1534 continue;
1535 }
1426 item.callback()(visitor, item.object()); 1536 item.callback()(visitor, item.object());
1427 } 1537 }
1428 } 1538 }
1429 1539
1430 #ifndef NDEBUG 1540 #ifndef NDEBUG
1431 bool CallbackStack::hasCallbackForObject(const void* object) 1541 bool CallbackStack::hasCallbackForObject(const void* object)
1432 { 1542 {
1433 for (unsigned i = 0; m_buffer + i < m_current; i++) { 1543 for (unsigned i = 0; m_buffer + i < m_current; i++) {
1434 Item* item = &m_buffer[i]; 1544 Item* item = &m_buffer[i];
1435 if (item->object() == object) { 1545 if (item->object() == object) {
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
1668 }; 1778 };
1669 1779
1670 void Heap::init() 1780 void Heap::init()
1671 { 1781 {
1672 ThreadState::init(); 1782 ThreadState::init();
1673 CallbackStack::init(&s_markingStack); 1783 CallbackStack::init(&s_markingStack);
1674 CallbackStack::init(&s_weakCallbackStack); 1784 CallbackStack::init(&s_weakCallbackStack);
1675 CallbackStack::init(&s_ephemeronStack); 1785 CallbackStack::init(&s_ephemeronStack);
1676 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); 1786 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
1677 s_markingVisitor = new MarkingVisitor(); 1787 s_markingVisitor = new MarkingVisitor();
1788 s_memoryPool = new HeapMemoryPool();
1789 s_orphanedPagePool = new HeapOrphanedPagePool();
1678 } 1790 }
1679 1791
1680 void Heap::shutdown() 1792 void Heap::shutdown()
1681 { 1793 {
1682 s_shutdownCalled = true; 1794 s_shutdownCalled = true;
1683 ThreadState::shutdownHeapIfNecessary(); 1795 ThreadState::shutdownHeapIfNecessary();
1684 } 1796 }
1685 1797
1686 void Heap::doShutdown() 1798 void Heap::doShutdown()
1687 { 1799 {
1688 // We don't want to call doShutdown() twice. 1800 // We don't want to call doShutdown() twice.
1689 if (!s_markingVisitor) 1801 if (!s_markingVisitor)
1690 return; 1802 return;
1691 1803
1692 ASSERT(!ThreadState::isAnyThreadInGC()); 1804 ASSERT(!ThreadState::isAnyThreadInGC());
1693 ASSERT(!ThreadState::attachedThreads().size()); 1805 ASSERT(!ThreadState::attachedThreads().size());
1694 delete s_markingVisitor; 1806 delete s_markingVisitor;
1695 s_markingVisitor = 0; 1807 s_markingVisitor = 0;
1696 delete s_heapDoesNotContainCache; 1808 delete s_heapDoesNotContainCache;
1697 s_heapDoesNotContainCache = 0; 1809 s_heapDoesNotContainCache = 0;
1810 delete s_memoryPool;
1811 s_memoryPool = 0;
1812 delete s_orphanedPagePool;
1813 s_orphanedPagePool = 0;
1698 CallbackStack::shutdown(&s_weakCallbackStack); 1814 CallbackStack::shutdown(&s_weakCallbackStack);
1699 CallbackStack::shutdown(&s_markingStack); 1815 CallbackStack::shutdown(&s_markingStack);
1700 CallbackStack::shutdown(&s_ephemeronStack); 1816 CallbackStack::shutdown(&s_ephemeronStack);
1701 ThreadState::shutdown(); 1817 ThreadState::shutdown();
1702 } 1818 }
1703 1819
1704 BaseHeapPage* Heap::contains(Address address) 1820 BaseHeapPage* Heap::contains(Address address)
1705 { 1821 {
1706 ASSERT(ThreadState::isAnyThreadInGC()); 1822 ASSERT(ThreadState::isAnyThreadInGC());
1707 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 1823 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
1708 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 1824 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1709 BaseHeapPage* page = (*it)->contains(address); 1825 BaseHeapPage* page = (*it)->contains(address);
1710 if (page) 1826 if (page)
1711 return page; 1827 return page;
1712 } 1828 }
1713 return 0; 1829 return 0;
1714 } 1830 }
1715 1831
1832 #ifndef NDEBUG
1833 bool Heap::containedInHeapOrOrphanedPage(void* object)
1834 {
1835 return contains(object) || orphanedPagePool()->contains(object);
1836 }
1837 #endif
1838
1716 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) 1839 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1717 { 1840 {
1718 ASSERT(ThreadState::isAnyThreadInGC()); 1841 ASSERT(ThreadState::isAnyThreadInGC());
1719 1842
1720 #ifdef NDEBUG 1843 #ifdef NDEBUG
1721 if (s_heapDoesNotContainCache->lookup(address)) 1844 if (s_heapDoesNotContainCache->lookup(address))
1722 return 0; 1845 return 0;
1723 #endif 1846 #endif
1724 1847
1725 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 1848 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1784 builder.append("\n\t"); 1907 builder.append("\n\t");
1785 builder.append(frameToName.nullableName()); 1908 builder.append(frameToName.nullableName());
1786 --framesToShow; 1909 --framesToShow;
1787 } 1910 }
1788 return builder.toString().replace("WebCore::", ""); 1911 return builder.toString().replace("WebCore::", "");
1789 } 1912 }
1790 #endif 1913 #endif
1791 1914
1792 void Heap::pushTraceCallback(void* object, TraceCallback callback) 1915 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1793 { 1916 {
1794 ASSERT(Heap::contains(object)); 1917 ASSERT(Heap::containedInHeapOrOrphanedPage(object));
1795 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); 1918 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1796 *slot = CallbackStack::Item(object, callback); 1919 *slot = CallbackStack::Item(object, callback);
1797 } 1920 }
1798 1921
1922 template<bool ThreadLocal>
1799 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) 1923 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1800 { 1924 {
1801 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor); 1925 return s_markingStack->popAndInvokeCallback<ThreadLocal>(&s_markingStack, vi sitor);
1802 } 1926 }
1803 1927
1804 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback ) 1928 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback )
1805 { 1929 {
1806 ASSERT(Heap::contains(cell)); 1930 ASSERT(Heap::contains(cell));
1807 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba ckStack); 1931 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba ckStack);
1808 *slot = CallbackStack::Item(cell, callback); 1932 *slot = CallbackStack::Item(cell, callback);
1809 } 1933 }
1810 1934
1811 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe rCallback callback) 1935 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe rCallback callback)
1812 { 1936 {
1813 ASSERT(Heap::contains(object)); 1937 ASSERT(Heap::contains(object));
1814 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeader Address(reinterpret_cast<Address>(object))); 1938 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object);
1815 ASSERT(Heap::contains(object) == heapPageForObject); 1939 ASSERT(Heap::contains(object) == heapPageForObject);
1816 ThreadState* state = heapPageForObject->threadState(); 1940 ThreadState* state = heapPageForObject->threadState();
1817 state->pushWeakObjectPointerCallback(closure, callback); 1941 state->pushWeakObjectPointerCallback(closure, callback);
1818 } 1942 }
1819 1943
1820 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) 1944 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1821 { 1945 {
1822 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visit or); 1946 return s_weakCallbackStack->popAndInvokeCallback<false>(&s_weakCallbackStack , visitor);
haraken 2014/07/08 05:44:51 We prefer enum than true/false.
wibling-chromium 2014/07/08 13:39:45 Done.
1823 } 1947 }
1824 1948
1825 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) 1949 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback)
1826 { 1950 {
1827 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac k); 1951 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac k);
1828 *slot = CallbackStack::Item(table, iterationCallback); 1952 *slot = CallbackStack::Item(table, iterationCallback);
1829 1953
1830 // We use the callback stack of weak cell pointers for the ephemeronIteratio nDone callbacks. 1954 // We use the callback stack of weak cell pointers for the ephemeronIteratio nDone callbacks.
1831 // These callbacks are called right after marking and before any thread comm ences execution 1955 // These callbacks are called right after marking and before any thread comm ences execution
1832 // so it suits our needs for telling the ephemerons that the iteration is do ne. 1956 // so it suits our needs for telling the ephemerons that the iteration is do ne.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1870 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); 1994 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
1871 #endif 1995 #endif
1872 1996
1873 // Disallow allocation during garbage collection (but not 1997 // Disallow allocation during garbage collection (but not
1874 // during the finalization that happens when the gcScope is 1998 // during the finalization that happens when the gcScope is
1875 // torn down). 1999 // torn down).
1876 NoAllocationScope<AnyThread> noAllocationScope; 2000 NoAllocationScope<AnyThread> noAllocationScope;
1877 2001
1878 prepareForGC(); 2002 prepareForGC();
1879 2003
1880 ThreadState::visitRoots(s_markingVisitor); 2004 tracingAndGlobalWeakProcessing<false>();
2005
2006 // After a global marking we know that any orphaned page that was not reache d
2007 // cannot be revived in a subsequent GC. This is due to a thread either havi ng
Mads Ager (chromium) 2014/07/08 08:24:56 revived -> reached
wibling-chromium 2014/07/08 13:39:46 Done.
2008 // swept its heap or having done a "poor mans sweep" in prepareForGC which m arks
2009 // objects that are dead, but not swept in the previous GC as dead. In this GC's
2010 // marking we check that any object marked as dead is not revived. E.g. via a
Mads Ager (chromium) 2014/07/08 08:24:55 revived -> traced
wibling-chromium 2014/07/08 13:39:46 Done.
2011 // conservatively found pointer or a programming error with an object contai ning
2012 // a dangling pointer.
haraken 2014/07/08 05:44:50 In my understanding, if we're performing a precise
wibling-chromium 2014/07/08 13:39:45 That is correct. I will try to add a RELEASE_ASSER
2013 orphanedPagePool()->decommitOrphanedPages();
2014
2015 #if ENABLE(GC_TRACING)
2016 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2017 #endif
2018
2019 if (blink::Platform::current()) {
2020 uint64_t objectSpaceSize;
2021 uint64_t allocatedSpaceSize;
2022 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2023 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2024 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2025 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2026 }
2027 }
2028
2029 void Heap::collectGarbageForThread(ThreadState* state, bool sweepOnly)
2030 {
2031 // We explicitly do not enter a safepoint while doing thread specific
2032 // garbage collection since we don't want to allow a global GC at the
2033 // same time as a thread local GC.
2034
2035 NoAllocationScope<AnyThread> noAllocationScope;
Mads Ager (chromium) 2014/07/08 08:24:55 This no allocation scope covers the sweep as well.
wibling-chromium 2014/07/08 13:39:45 Good point. Fixed.
2036
2037 state->enterGC();
2038 state->prepareForGC();
2039
2040 if (!sweepOnly)
2041 tracingAndGlobalWeakProcessing<true>();
2042
2043 state->leaveGC();
2044 state->performPendingSweep();
2045 }
2046
2047 template<bool ThreadLocal>
2048 void Heap::tracingAndGlobalWeakProcessing()
2049 {
2050 if (ThreadLocal)
2051 ThreadState::current()->visitLocalRoots(s_markingVisitor);
2052 else
2053 ThreadState::visitRoots(s_markingVisitor);
1881 2054
1882 // Ephemeron fixed point loop. 2055 // Ephemeron fixed point loop.
1883 do { 2056 do {
1884 // Recursively mark all objects that are reachable from the roots. 2057 // Recursively mark all objects that are reachable from the roots for th is thread.
1885 while (popAndInvokeTraceCallback(s_markingVisitor)) { } 2058 // Also don't continue tracing if the trace hits an object on another th read's heap.
2059 while (popAndInvokeTraceCallback<ThreadLocal>(s_markingVisitor)) { }
1886 2060
1887 // Mark any strong pointers that have now become reachable in ephemeron 2061 // Mark any strong pointers that have now become reachable in ephemeron
1888 // maps. 2062 // maps.
1889 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); 2063 CallbackStack::invokeCallbacks<ThreadLocal>(&s_ephemeronStack, s_marking Visitor);
1890 2064
1891 // Rerun loop if ephemeron processing queued more objects for tracing. 2065 // Rerun loop if ephemeron processing queued more objects for tracing.
1892 } while (!s_markingStack->isEmpty()); 2066 } while (!s_markingStack->isEmpty());
1893 2067
1894 // Call weak callbacks on objects that may now be pointing to dead 2068 // Call weak callbacks on objects that may now be pointing to dead
1895 // objects and call ephemeronIterationDone callbacks on weak tables 2069 // objects and call ephemeronIterationDone callbacks on weak tables
1896 // to do cleanup (specifically clear the queued bits for weak hash 2070 // to do cleanup (specifically clear the queued bits for weak hash
1897 // tables). 2071 // tables).
1898 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } 2072 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1899 2073
1900 CallbackStack::clear(&s_ephemeronStack); 2074 CallbackStack::clear(&s_ephemeronStack);
1901 2075
1902 // It is not permitted to trace pointers of live objects in the weak 2076 // It is not permitted to trace pointers of live objects in the weak
1903 // callback phase, so the marking stack should still be empty here. 2077 // callback phase, so the marking stack should still be empty here.
1904 ASSERT(s_markingStack->isEmpty()); 2078 ASSERT(s_markingStack->isEmpty());
1905
1906 #if ENABLE(GC_TRACING)
1907 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
1908 #endif
1909
1910 if (blink::Platform::current()) {
1911 uint64_t objectSpaceSize;
1912 uint64_t allocatedSpaceSize;
1913 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
1914 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
1915 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1916 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1917 }
1918 } 2079 }
1919 2080
1920 void Heap::collectAllGarbage() 2081 void Heap::collectAllGarbage()
1921 { 2082 {
1922 // FIXME: oilpan: we should perform a single GC and everything 2083 // FIXME: oilpan: we should perform a single GC and everything
1923 // should die. Unfortunately it is not the case for all objects 2084 // should die. Unfortunately it is not the case for all objects
1924 // because the hierarchy was not completely moved to the heap and 2085 // because the hierarchy was not completely moved to the heap and
1925 // some heap allocated objects own objects that contain persistents 2086 // some heap allocated objects own objects that contain persistents
1926 // pointing to other heap allocated objects. 2087 // pointing to other heap allocated objects.
1927 for (int i = 0; i < 5; i++) 2088 for (int i = 0; i < 5; i++)
1928 collectGarbage(ThreadState::NoHeapPointersOnStack); 2089 collectGarbage(ThreadState::NoHeapPointersOnStack);
1929 } 2090 }
1930 2091
1931 void Heap::setForcePreciseGCForTesting() 2092 void Heap::setForcePreciseGCForTesting()
1932 { 2093 {
1933 ThreadState::current()->setForcePreciseGCForTesting(true); 2094 ThreadState::current()->setForcePreciseGCForTesting(true);
1934 } 2095 }
1935 2096
2097 template<typename Header>
2098 void ThreadHeap<Header>::setShutdown()
2099 {
2100 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
2101 page->setShutdown();
2102 }
2103 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
2104 current->setShutdown();
2105 }
2106 }
2107
1936 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize) 2108 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize)
1937 { 2109 {
1938 *objectSpaceSize = 0; 2110 *objectSpaceSize = 0;
1939 *allocatedSpaceSize = 0; 2111 *allocatedSpaceSize = 0;
1940 ASSERT(ThreadState::isAnyThreadInGC()); 2112 ASSERT(ThreadState::isAnyThreadInGC());
1941 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2113 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
1942 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; 2114 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1943 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) { 2115 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
1944 *objectSpaceSize += (*it)->stats().totalObjectSpace(); 2116 *objectSpaceSize += (*it)->stats().totalObjectSpace();
1945 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); 2117 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1984 template class ThreadHeap<FinalizedHeapObjectHeader>; 2156 template class ThreadHeap<FinalizedHeapObjectHeader>;
1985 template class ThreadHeap<HeapObjectHeader>; 2157 template class ThreadHeap<HeapObjectHeader>;
1986 2158
1987 Visitor* Heap::s_markingVisitor; 2159 Visitor* Heap::s_markingVisitor;
1988 CallbackStack* Heap::s_markingStack; 2160 CallbackStack* Heap::s_markingStack;
1989 CallbackStack* Heap::s_weakCallbackStack; 2161 CallbackStack* Heap::s_weakCallbackStack;
1990 CallbackStack* Heap::s_ephemeronStack; 2162 CallbackStack* Heap::s_ephemeronStack;
1991 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2163 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
1992 bool Heap::s_shutdownCalled = false; 2164 bool Heap::s_shutdownCalled = false;
1993 bool Heap::s_lastGCWasConservative = false; 2165 bool Heap::s_lastGCWasConservative = false;
2166 HeapMemoryPool* Heap::s_memoryPool;
2167 HeapOrphanedPagePool* Heap::s_orphanedPagePool;
1994 } 2168 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698