Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(43)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 371623002: [oilpan]: Make thread shutdown more robust. (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after
415 } 415 }
416 416
417 NO_SANITIZE_ADDRESS 417 NO_SANITIZE_ADDRESS
418 void HeapObjectHeader::unmark() 418 void HeapObjectHeader::unmark()
419 { 419 {
420 checkHeader(); 420 checkHeader();
421 m_size &= ~markBitMask; 421 m_size &= ~markBitMask;
422 } 422 }
423 423
424 NO_SANITIZE_ADDRESS 424 NO_SANITIZE_ADDRESS
425 bool HeapObjectHeader::hasDebugMark() const 425 bool HeapObjectHeader::hasDeadMark() const
426 { 426 {
427 checkHeader(); 427 checkHeader();
428 return m_size & debugBitMask; 428 return m_size & deadBitMask;
429 } 429 }
430 430
431 NO_SANITIZE_ADDRESS 431 NO_SANITIZE_ADDRESS
432 void HeapObjectHeader::clearDebugMark() 432 void HeapObjectHeader::clearDeadMark()
433 { 433 {
434 checkHeader(); 434 checkHeader();
435 m_size &= ~debugBitMask; 435 m_size &= ~deadBitMask;
436 } 436 }
437 437
438 NO_SANITIZE_ADDRESS 438 NO_SANITIZE_ADDRESS
439 void HeapObjectHeader::setDebugMark() 439 void HeapObjectHeader::setDeadMark()
440 { 440 {
441 ASSERT(!isMarked());
441 checkHeader(); 442 checkHeader();
442 m_size |= debugBitMask; 443 m_size |= deadBitMask;
443 } 444 }
444 445
445 #ifndef NDEBUG 446 #ifndef NDEBUG
446 NO_SANITIZE_ADDRESS 447 NO_SANITIZE_ADDRESS
447 void HeapObjectHeader::zapMagic() 448 void HeapObjectHeader::zapMagic()
448 { 449 {
449 m_magic = zappedMagic; 450 m_magic = zappedMagic;
450 } 451 }
451 #endif 452 #endif
452 453
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
492 return heapObjectHeader()->unmark(); 493 return heapObjectHeader()->unmark();
493 } 494 }
494 495
495 template<typename Header> 496 template<typename Header>
496 bool LargeHeapObject<Header>::isMarked() 497 bool LargeHeapObject<Header>::isMarked()
497 { 498 {
498 return heapObjectHeader()->isMarked(); 499 return heapObjectHeader()->isMarked();
499 } 500 }
500 501
501 template<typename Header> 502 template<typename Header>
503 void LargeHeapObject<Header>::setDeadMark()
504 {
505 heapObjectHeader()->setDeadMark();
506 }
507
508 template<typename Header>
502 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess) 509 void LargeHeapObject<Header>::checkAndMarkPointer(Visitor* visitor, Address addr ess)
503 { 510 {
504 ASSERT(contains(address)); 511 ASSERT(contains(address));
505 if (!objectContains(address)) 512 if (!objectContains(address) || heapObjectHeader()->hasDeadMark())
506 return; 513 return;
507 #if ENABLE(GC_TRACING) 514 #if ENABLE(GC_TRACING)
508 visitor->setHostInfo(&address, "stack"); 515 visitor->setHostInfo(&address, "stack");
509 #endif 516 #endif
510 mark(visitor); 517 mark(visitor);
511 } 518 }
512 519
513 template<> 520 template<>
514 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor) 521 void LargeHeapObject<FinalizedHeapObjectHeader>::mark(Visitor* visitor)
515 { 522 {
(...skipping 28 matching lines...) Expand all
544 551
545 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa yload) 552 FinalizedHeapObjectHeader* FinalizedHeapObjectHeader::fromPayload(const void* pa yload)
546 { 553 {
547 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload)); 554 Address addr = reinterpret_cast<Address>(const_cast<void*>(payload));
548 FinalizedHeapObjectHeader* header = 555 FinalizedHeapObjectHeader* header =
549 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ; 556 reinterpret_cast<FinalizedHeapObjectHeader*>(addr - finalizedHeaderSize) ;
550 return header; 557 return header;
551 } 558 }
552 559
553 template<typename Header> 560 template<typename Header>
554 ThreadHeap<Header>::ThreadHeap(ThreadState* state) 561 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index)
555 : m_currentAllocationPoint(0) 562 : m_currentAllocationPoint(0)
556 , m_remainingAllocationSize(0) 563 , m_remainingAllocationSize(0)
557 , m_firstPage(0) 564 , m_firstPage(0)
558 , m_firstLargeHeapObject(0) 565 , m_firstLargeHeapObject(0)
559 , m_biggestFreeListIndex(0) 566 , m_biggestFreeListIndex(0)
560 , m_threadState(state) 567 , m_threadState(state)
561 , m_pagePool(0) 568 , m_index(index)
562 { 569 {
563 clearFreeLists(); 570 clearFreeLists();
564 } 571 }
565 572
566 template<typename Header> 573 template<typename Header>
567 ThreadHeap<Header>::~ThreadHeap() 574 ThreadHeap<Header>::~ThreadHeap()
568 { 575 {
569 clearFreeLists(); 576 clearFreeLists();
570 if (!ThreadState::current()->isMainThread()) 577 flushHeapContainsCache();
571 assertEmpty(); 578
572 deletePages(); 579 // Add the ThreadHeap's pages to the orphanedPagePool.
580 Vector<BaseHeapPage*> pages;
581 for (HeapPage<Header>* page = m_firstPage; page; page = page->m_next)
582 pages.append(page);
583 m_firstPage = 0;
584
585 for (LargeHeapObject<Header>* largeObject = m_firstLargeHeapObject; largeObj ect; largeObject = largeObject->m_next)
586 pages.append(largeObject);
587 m_firstLargeHeapObject = 0;
588 Heap::orphanedPagePool()->addOrphanedPages(m_index, pages);
573 } 589 }
574 590
575 template<typename Header> 591 template<typename Header>
576 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo) 592 Address ThreadHeap<Header>::outOfLineAllocate(size_t size, const GCInfo* gcInfo)
577 { 593 {
578 size_t allocationSize = allocationSizeFromSize(size); 594 size_t allocationSize = allocationSizeFromSize(size);
579 if (threadState()->shouldGC()) { 595 if (threadState()->shouldGC()) {
580 if (threadState()->shouldForceConservativeGC()) 596 if (threadState()->shouldForceConservativeGC())
581 Heap::collectGarbage(ThreadState::HeapPointersOnStack); 597 Heap::collectGarbage(ThreadState::HeapPointersOnStack);
582 else 598 else
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
732 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext) 748 void ThreadHeap<Header>::freeLargeObject(LargeHeapObject<Header>* object, LargeH eapObject<Header>** previousNext)
733 { 749 {
734 flushHeapContainsCache(); 750 flushHeapContainsCache();
735 object->unlink(previousNext); 751 object->unlink(previousNext);
736 object->finalize(); 752 object->finalize();
737 753
738 // Unpoison the object header and allocationGranularity bytes after the 754 // Unpoison the object header and allocationGranularity bytes after the
739 // object before freeing. 755 // object before freeing.
740 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header)); 756 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(Header));
741 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 757 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
742 delete object->storage(); 758
759 if (object->shuttingDown()) {
760 // The thread is shutting down so this object is being removed as part
761 // of a thread local GC. In that case the object could be revived via
762 // a conservatively found dead object on another heap in a subsequent
763 // global GC and we need to put it in the orphanedPagePool to ensure
764 // it is still reachable. After the next full GC it can be released.
zerny-chromium 2014/07/07 12:11:56 Update the reason to explain that it could also be
wibling-chromium 2014/07/07 13:50:07 Done.
765 // NOTE: large objects are not moved to the memory pool as it is unlikel y
766 // they can be reused due to their individual sizes.
767 Heap::orphanedPagePool()->addOrphanedPage(m_index, object);
768 } else {
769 PageMemory* memory = object->storage();
770 object->~LargeHeapObject<Header>();
771 delete memory;
772 }
773 }
774
775 template<typename DataType>
776 HeapPool<DataType>::HeapPool()
777 {
778 for (int i = 0; i < NumberOfHeaps; ++i) {
779 m_pool[i] = 0;
780 }
781 }
782
783 HeapMemoryPool::~HeapMemoryPool()
784 {
785 for (int index = 0; index < NumberOfHeaps; ++index) {
786 while (PoolEntry* entry = m_pool[index]) {
787 m_pool[index] = entry->next;
788 PageMemory* memory = entry->data;
789 ASSERT(memory);
790 delete memory;
791 delete entry;
792 }
793 }
794 }
795
796 void HeapMemoryPool::addMemory(int index, PageMemory* memory)
797 {
798 // When adding memory to the pool we decommit it to ensure it is unused
799 // while in the pool. This also allows the physical memory backing the
800 // page to be given back to the OS.
801 memory->decommit();
802 MutexLocker locker(m_mutex[index]);
803 PoolEntry* entry = new PoolEntry(memory, m_pool[index]);
804 m_pool[index] = entry;
805 }
806
807 PageMemory* HeapMemoryPool::takeMemory(int index)
808 {
809 MutexLocker locker(m_mutex[index]);
810 while (PoolEntry* entry = m_pool[index]) {
811 m_pool[index] = entry->next;
812 PageMemory* memory = entry->data;
813 ASSERT(memory);
814 delete entry;
815 if (memory->commit())
816 return memory;
817
818 // We got some memory, but failed to commit it, try again.
819 delete memory;
820 }
821 return 0;
822 }
823
824 HeapOrphanedPagePool::~HeapOrphanedPagePool()
825 {
826 for (int index = 0; index < NumberOfHeaps; ++index) {
827 while (PoolEntry* entry = m_pool[index]) {
828 m_pool[index] = entry->next;
829 BaseHeapPage* page = entry->data;
830 delete entry;
831 PageMemory* memory = page->storage();
832 ASSERT(memory);
833 page->~BaseHeapPage();
834 delete memory;
835 }
836 }
837 }
838
839 void HeapOrphanedPagePool::addOrphanedPage(int index, BaseHeapPage* page)
840 {
841 page->markOrphaned();
842 PoolEntry* entry = new PoolEntry(page, m_pool[index]);
843 m_pool[index] = entry;
844 }
845
846 void HeapOrphanedPagePool::addOrphanedPages(int index, Vector<BaseHeapPage*>& pa ges)
847 {
848 for (Vector<BaseHeapPage*>::const_iterator it = pages.begin(); it != pages.e nd(); ++it) {
849 BaseHeapPage* page = *it;
850 page->markOrphaned();
851 PoolEntry* entry = new PoolEntry(page, m_pool[index]);
852 m_pool[index] = entry;
zerny-chromium 2014/07/07 12:11:56 Nit: call addOrphanedPage(index, page) instead
wibling-chromium 2014/07/07 13:50:07 Done.
853 }
854 }
855
856 void HeapOrphanedPagePool::decommitOrphanedPages()
857 {
858 // No locking needed as all threads are at safepoints at this point in time.
859 for (int index = 0; index < NumberOfHeaps; ++index) {
860 PoolEntry* entry = m_pool[index];
861 PoolEntry** prevNext = &m_pool[index];
862 while (entry) {
863 BaseHeapPage* page = entry->data;
864 if (page->traced()) {
865 // If the page was traced in the last GC it is not decommited.
866 // We only decommit a page, ie. put it in the memory pool,
867 // when the page has no objects pointing to it.
868 // We mark the page as dead. This clears the traced flag
zerny-chromium 2014/07/07 12:11:56 => We mark the page as orphaned.
wibling-chromium 2014/07/07 13:50:07 Done.
869 // and any object trace bits that were set during tracing.
870 page->markOrphaned();
871 prevNext = &entry->next;
872 entry = entry->next;
873 continue;
874 }
875
876 // Page was not traced. Check if we should reuse the memory or just
877 // free it. Large object memory is not reused, but freed, normal
878 // blink heap pages are reused.
879 PageMemory* memory = page->storage();
880
881 // Call the destructor before freeing or adding to the memory pool.
882 if (page->reuseMemory()) {
883 page->~BaseHeapPage();
884 Heap::memoryPool()->addMemory(index, memory);
885 } else {
886 page->~BaseHeapPage();
887 delete memory;
888 }
889
890 PoolEntry* deadEntry = entry;
891 entry = entry->next;
892 *prevNext = entry;
893 delete deadEntry;
894 }
895 }
896 }
897
898 bool HeapOrphanedPagePool::contains(void* object)
899 {
900 for (int index = 0; index < NumberOfHeaps; ++index) {
901 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
902 BaseHeapPage* page = entry->data;
903 if (page->contains(reinterpret_cast<Address>(object)))
904 return true;
905 }
906 }
907 return false;
743 } 908 }
744 909
745 template<> 910 template<>
746 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) 911 void ThreadHeap<FinalizedHeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
747 { 912 {
748 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on 913 // When adding a page to the ThreadHeap using FinalizedHeapObjectHeaders the GCInfo on
749 // the heap should be unused (ie. 0). 914 // the heap should be unused (ie. 0).
750 allocatePage(0); 915 allocatePage(0);
751 } 916 }
752 917
753 template<> 918 template<>
754 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo) 919 void ThreadHeap<HeapObjectHeader>::addPageToHeap(const GCInfo* gcInfo)
755 { 920 {
756 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap 921 // When adding a page to the ThreadHeap using HeapObjectHeaders store the GC Info on the heap
757 // since it is the same for all objects 922 // since it is the same for all objects
758 ASSERT(gcInfo); 923 ASSERT(gcInfo);
759 allocatePage(gcInfo); 924 allocatePage(gcInfo);
760 } 925 }
761 926
762 template<typename Header> 927 template <typename Header>
763 void ThreadHeap<Header>::clearPagePool() 928 void ThreadHeap<Header>::removePageFromHeap(HeapPage<Header>* page)
764 {
765 while (takePageFromPool()) { }
766 }
767
768 template<typename Header>
769 PageMemory* ThreadHeap<Header>::takePageFromPool()
770 {
771 Heap::flushHeapDoesNotContainCache();
772 while (PagePoolEntry* entry = m_pagePool) {
773 m_pagePool = entry->next();
774 PageMemory* storage = entry->storage();
775 delete entry;
776
777 if (storage->commit())
778 return storage;
779
780 // Failed to commit pooled storage. Release it.
781 delete storage;
782 }
783
784 return 0;
785 }
786
787 template<typename Header>
788 void ThreadHeap<Header>::addPageMemoryToPool(PageMemory* storage)
789 { 929 {
790 flushHeapContainsCache(); 930 flushHeapContainsCache();
791 PagePoolEntry* entry = new PagePoolEntry(storage, m_pagePool); 931 if (page->shuttingDown()) {
792 m_pagePool = entry; 932 // The thread is shutting down so this page is being removed as part of
zerny-chromium 2014/07/07 12:11:56 Nit: as before, it is just as much guarding agains
wibling-chromium 2014/07/07 13:50:07 Done.
793 } 933 // a thread local GC. In that case the page could be revived via a
794 934 // conservatively found dead object on another heap in a subsequent
795 template <typename Header> 935 // global GC and we need to put it in the orphanedPagePool to ensure
796 void ThreadHeap<Header>::addPageToPool(HeapPage<Header>* page) 936 // it is still reachable. After the next full GC we can decommit it
797 { 937 // and move it to the free memory pool.
798 PageMemory* storage = page->storage(); 938 Heap::orphanedPagePool()->addOrphanedPage(m_index, page);
799 storage->decommit(); 939 } else {
800 addPageMemoryToPool(storage); 940 PageMemory* memory = page->storage();
941 page->~HeapPage<Header>();
942 Heap::memoryPool()->addMemory(m_index, memory);
943 }
801 } 944 }
802 945
803 template<typename Header> 946 template<typename Header>
804 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo) 947 void ThreadHeap<Header>::allocatePage(const GCInfo* gcInfo)
805 { 948 {
806 Heap::flushHeapDoesNotContainCache(); 949 Heap::flushHeapDoesNotContainCache();
807 PageMemory* pageMemory = takePageFromPool(); 950 PageMemory* pageMemory = Heap::memoryPool()->takeMemory(m_index);
808 if (!pageMemory) { 951 while (!pageMemory) {
809 // Allocate a memory region for blinkPagesPerRegion pages that 952 // Allocate a memory region for blinkPagesPerRegion pages that
810 // will each have the following layout. 953 // will each have the following layout.
811 // 954 //
812 // [ guard os page | ... payload ... | guard os page ] 955 // [ guard os page | ... payload ... | guard os page ]
813 // ^---{ aligned to blink page size } 956 // ^---{ aligned to blink page size }
814 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion); 957 PageMemoryRegion* region = PageMemoryRegion::allocate(blinkPageSize * bl inkPagesPerRegion, blinkPagesPerRegion);
815 // Setup the PageMemory object for each of the pages in the 958 // Setup the PageMemory object for each of the pages in the
816 // region. 959 // region.
817 size_t offset = 0; 960 size_t offset = 0;
818 for (size_t i = 0; i < blinkPagesPerRegion; i++) { 961 for (size_t i = 0; i < blinkPagesPerRegion; i++) {
819 addPageMemoryToPool(PageMemory::setupPageMemoryInRegion(region, offs et, blinkPagePayloadSize())); 962 Heap::memoryPool()->addMemory(m_index, PageMemory::setupPageMemoryIn Region(region, offset, blinkPagePayloadSize()));
820 offset += blinkPageSize; 963 offset += blinkPageSize;
821 } 964 }
822 pageMemory = takePageFromPool(); 965 pageMemory = Heap::memoryPool()->takeMemory(m_index);
823 RELEASE_ASSERT(pageMemory);
824 } 966 }
825 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo); 967 HeapPage<Header>* page = new (pageMemory->writableStart()) HeapPage<Header>( pageMemory, this, gcInfo);
826 // FIXME: Oilpan: Linking new pages into the front of the list is 968 // FIXME: Oilpan: Linking new pages into the front of the list is
827 // crucial when performing allocations during finalization because 969 // crucial when performing allocations during finalization because
828 // it ensures that those pages are not swept in the current GC 970 // it ensures that those pages are not swept in the current GC
829 // round. We should create a separate page list for that to 971 // round. We should create a separate page list for that to
830 // separate out the pages allocated during finalization clearly 972 // separate out the pages allocated during finalization clearly
831 // from the pages currently being swept. 973 // from the pages currently being swept.
832 page->link(&m_firstPage); 974 page->link(&m_firstPage);
833 addToFreeList(page->payload(), HeapPage<Header>::payloadSize()); 975 addToFreeList(page->payload(), HeapPage<Header>::payloadSize());
(...skipping 23 matching lines...) Expand all
857 ASSERT(isConsistentForGC()); 999 ASSERT(isConsistentForGC());
858 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING 1000 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING
859 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore 1001 // When using ASan do a pre-sweep where all unmarked objects are poisoned be fore
860 // calling their finalizer methods. This can catch the cases where one objec ts 1002 // calling their finalizer methods. This can catch the cases where one objec ts
861 // finalizer tries to modify another object as part of finalization. 1003 // finalizer tries to modify another object as part of finalization.
862 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1004 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
863 page->poisonUnmarkedObjects(); 1005 page->poisonUnmarkedObjects();
864 #endif 1006 #endif
865 HeapPage<Header>* page = m_firstPage; 1007 HeapPage<Header>* page = m_firstPage;
866 HeapPage<Header>** previous = &m_firstPage; 1008 HeapPage<Header>** previous = &m_firstPage;
867 bool pagesRemoved = false;
868 while (page) { 1009 while (page) {
869 if (page->isEmpty()) { 1010 if (page->isEmpty()) {
870 flushHeapContainsCache();
871 HeapPage<Header>* unused = page; 1011 HeapPage<Header>* unused = page;
872 page = page->next(); 1012 page = page->next();
873 HeapPage<Header>::unlink(unused, previous); 1013 HeapPage<Header>::unlink(unused, previous);
874 pagesRemoved = true;
875 } else { 1014 } else {
876 page->sweep(); 1015 page->sweep();
877 previous = &page->m_next; 1016 previous = &page->m_next;
878 page = page->next(); 1017 page = page->next();
879 } 1018 }
880 } 1019 }
881 if (pagesRemoved)
882 flushHeapContainsCache();
883 1020
884 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject; 1021 LargeHeapObject<Header>** previousNext = &m_firstLargeHeapObject;
885 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) { 1022 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
886 if (current->isMarked()) { 1023 if (current->isMarked()) {
887 stats().increaseAllocatedSpace(current->size()); 1024 stats().increaseAllocatedSpace(current->size());
888 stats().increaseObjectSpace(current->payloadSize()); 1025 stats().increaseObjectSpace(current->payloadSize());
889 current->unmark(); 1026 current->unmark();
890 previousNext = &current->m_next; 1027 previousNext = &current->m_next;
891 current = current->next(); 1028 current = current->next();
892 } else { 1029 } else {
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
947 template<typename Header> 1084 template<typename Header>
948 void ThreadHeap<Header>::makeConsistentForGC() 1085 void ThreadHeap<Header>::makeConsistentForGC()
949 { 1086 {
950 if (ownsNonEmptyAllocationArea()) 1087 if (ownsNonEmptyAllocationArea())
951 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); 1088 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
952 setAllocationPoint(0, 0); 1089 setAllocationPoint(0, 0);
953 clearFreeLists(); 1090 clearFreeLists();
954 } 1091 }
955 1092
956 template<typename Header> 1093 template<typename Header>
957 void ThreadHeap<Header>::clearMarks() 1094 void ThreadHeap<Header>::clearLiveAndMarkDead()
958 { 1095 {
959 ASSERT(isConsistentForGC()); 1096 ASSERT(isConsistentForGC());
960 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) 1097 for (HeapPage<Header>* page = m_firstPage; page; page = page->next())
961 page->clearMarks(); 1098 page->clearLiveAndMarkDead();
962 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) 1099 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
963 current->unmark(); 1100 if (current->isMarked())
1101 current->unmark();
1102 else
1103 current->setDeadMark();
1104 }
964 } 1105 }
965 1106
966 template<typename Header> 1107 template<typename Header>
967 void ThreadHeap<Header>::deletePages()
968 {
969 flushHeapContainsCache();
970 // Add all pages in the pool to the heap's list of pages before deleting
971 clearPagePool();
972
973 for (HeapPage<Header>* page = m_firstPage; page; ) {
974 HeapPage<Header>* dead = page;
975 page = page->next();
976 PageMemory* storage = dead->storage();
977 dead->~HeapPage();
978 delete storage;
979 }
980 m_firstPage = 0;
981
982 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current;) {
983 LargeHeapObject<Header>* dead = current;
984 current = current->next();
985 PageMemory* storage = dead->storage();
986 dead->~LargeHeapObject();
987 delete storage;
988 }
989 m_firstLargeHeapObject = 0;
990 }
991
992 template<typename Header>
993 void ThreadHeap<Header>::clearFreeLists() 1108 void ThreadHeap<Header>::clearFreeLists()
994 { 1109 {
995 for (size_t i = 0; i < blinkPageSizeLog2; i++) 1110 for (size_t i = 0; i < blinkPageSizeLog2; i++)
996 m_freeLists[i] = 0; 1111 m_freeLists[i] = 0;
997 } 1112 }
998 1113
999 int BaseHeap::bucketIndexForSize(size_t size) 1114 int BaseHeap::bucketIndexForSize(size_t size)
1000 { 1115 {
1001 ASSERT(size > 0); 1116 ASSERT(size > 0);
1002 int index = -1; 1117 int index = -1;
(...skipping 20 matching lines...) Expand all
1023 void HeapPage<Header>::link(HeapPage** prevNext) 1138 void HeapPage<Header>::link(HeapPage** prevNext)
1024 { 1139 {
1025 m_next = *prevNext; 1140 m_next = *prevNext;
1026 *prevNext = this; 1141 *prevNext = this;
1027 } 1142 }
1028 1143
1029 template<typename Header> 1144 template<typename Header>
1030 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext) 1145 void HeapPage<Header>::unlink(HeapPage* unused, HeapPage** prevNext)
1031 { 1146 {
1032 *prevNext = unused->m_next; 1147 *prevNext = unused->m_next;
1033 unused->heap()->addPageToPool(unused); 1148 unused->heap()->removePageFromHeap(unused);
1034 } 1149 }
1035 1150
1036 template<typename Header> 1151 template<typename Header>
1037 void HeapPage<Header>::getStats(HeapStats& stats) 1152 void HeapPage<Header>::getStats(HeapStats& stats)
1038 { 1153 {
1039 stats.increaseAllocatedSpace(blinkPageSize); 1154 stats.increaseAllocatedSpace(blinkPageSize);
1040 Address headerAddress = payload(); 1155 Address headerAddress = payload();
1041 ASSERT(headerAddress != end()); 1156 ASSERT(headerAddress != end());
1042 do { 1157 do {
1043 Header* header = reinterpret_cast<Header*>(headerAddress); 1158 Header* header = reinterpret_cast<Header*>(headerAddress);
(...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after
1089 header->unmark(); 1204 header->unmark();
1090 headerAddress += header->size(); 1205 headerAddress += header->size();
1091 heap()->stats().increaseObjectSpace(header->payloadSize()); 1206 heap()->stats().increaseObjectSpace(header->payloadSize());
1092 startOfGap = headerAddress; 1207 startOfGap = headerAddress;
1093 } 1208 }
1094 if (startOfGap != end()) 1209 if (startOfGap != end())
1095 heap()->addToFreeList(startOfGap, end() - startOfGap); 1210 heap()->addToFreeList(startOfGap, end() - startOfGap);
1096 } 1211 }
1097 1212
1098 template<typename Header> 1213 template<typename Header>
1099 void HeapPage<Header>::clearMarks() 1214 void HeapPage<Header>::clearLiveAndMarkDead()
1100 { 1215 {
1101 for (Address headerAddress = payload(); headerAddress < end();) { 1216 for (Address headerAddress = payload(); headerAddress < end();) {
1102 Header* header = reinterpret_cast<Header*>(headerAddress); 1217 Header* header = reinterpret_cast<Header*>(headerAddress);
1103 ASSERT(header->size() < blinkPagePayloadSize()); 1218 ASSERT(header->size() < blinkPagePayloadSize());
1104 if (!header->isFree()) 1219 // Skip freelist entries.
1220 if (header->isFree()) {
1221 headerAddress += header->size();
1222 continue;
1223 }
1224 if (header->isMarked())
1105 header->unmark(); 1225 header->unmark();
1226 else
1227 header->setDeadMark();
1106 headerAddress += header->size(); 1228 headerAddress += header->size();
1107 } 1229 }
1108 } 1230 }
1109 1231
1110 template<typename Header> 1232 template<typename Header>
1111 void HeapPage<Header>::populateObjectStartBitMap() 1233 void HeapPage<Header>::populateObjectStartBitMap()
1112 { 1234 {
1113 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); 1235 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1114 Address start = payload(); 1236 Address start = payload();
1115 for (Address headerAddress = start; headerAddress < end();) { 1237 for (Address headerAddress = start; headerAddress < end();) {
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
1175 if (header->isFree()) 1297 if (header->isFree())
1176 return 0; 1298 return 0;
1177 return header; 1299 return header;
1178 } 1300 }
1179 1301
1180 template<typename Header> 1302 template<typename Header>
1181 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address) 1303 void HeapPage<Header>::checkAndMarkPointer(Visitor* visitor, Address address)
1182 { 1304 {
1183 ASSERT(contains(address)); 1305 ASSERT(contains(address));
1184 Header* header = findHeaderFromAddress(address); 1306 Header* header = findHeaderFromAddress(address);
1185 if (!header) 1307 if (!header || header->hasDeadMark())
1186 return; 1308 return;
1187 1309
1188 #if ENABLE(GC_TRACING) 1310 #if ENABLE(GC_TRACING)
1189 visitor->setHostInfo(&address, "stack"); 1311 visitor->setHostInfo(&address, "stack");
1190 #endif 1312 #endif
1191 if (hasVTable(header) && !vTableInitialized(header->payload())) 1313 if (hasVTable(header) && !vTableInitialized(header->payload()))
1192 visitor->markConservatively(header); 1314 visitor->markConservatively(header);
1193 else 1315 else
1194 visitor->mark(header, traceCallback(header)); 1316 visitor->mark(header, traceCallback(header));
1195 } 1317 }
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1363 { 1485 {
1364 for (size_t i = 0; i < bufferSize; i++) 1486 for (size_t i = 0; i < bufferSize; i++)
1365 m_buffer[i] = Item(0, 0); 1487 m_buffer[i] = Item(0, 0);
1366 } 1488 }
1367 1489
1368 bool CallbackStack::isEmpty() 1490 bool CallbackStack::isEmpty()
1369 { 1491 {
1370 return m_current == &(m_buffer[0]) && !m_next; 1492 return m_current == &(m_buffer[0]) && !m_next;
1371 } 1493 }
1372 1494
1373 bool CallbackStack::popAndInvokeCallback(CallbackStack** first, Visitor* visitor ) 1495 template<bool ThreadLocal>
1374 {
1375 if (m_current == &(m_buffer[0])) {
1376 if (!m_next) {
1377 #ifndef NDEBUG
1378 clearUnused();
1379 #endif
1380 return false;
1381 }
1382 CallbackStack* nextStack = m_next;
1383 *first = nextStack;
1384 delete this;
1385 return nextStack->popAndInvokeCallback(first, visitor);
1386 }
1387 Item* item = --m_current;
1388
1389 VisitorCallback callback = item->callback();
1390 #if ENABLE(GC_TRACING)
1391 if (ThreadState::isAnyThreadInGC()) // weak-processing will also use popAndI nvokeCallback
1392 visitor->setHostInfo(item->object(), classOf(item->object()));
1393 #endif
1394 callback(visitor, item->object());
1395
1396 return true;
1397 }
1398
1399 void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor) 1496 void CallbackStack::invokeCallbacks(CallbackStack** first, Visitor* visitor)
1400 { 1497 {
1401 CallbackStack* stack = 0; 1498 CallbackStack* stack = 0;
1402 // The first block is the only one where new ephemerons are added, so we 1499 // The first block is the only one where new ephemerons are added, so we
1403 // call the callbacks on that last, to catch any new ephemerons discovered 1500 // call the callbacks on that last, to catch any new ephemerons discovered
1404 // in the callbacks. 1501 // in the callbacks.
1405 // However, if enough ephemerons were added, we may have a new block that 1502 // However, if enough ephemerons were added, we may have a new block that
1406 // has been prepended to the chain. This will be very rare, but we can 1503 // has been prepended to the chain. This will be very rare, but we can
1407 // handle the situation by starting again and calling all the callbacks 1504 // handle the situation by starting again and calling all the callbacks
1408 // a second time. 1505 // a second time.
1409 while (stack != *first) { 1506 while (stack != *first) {
1410 stack = *first; 1507 stack = *first;
1411 stack->invokeOldestCallbacks(visitor); 1508 stack->invokeOldestCallbacks<ThreadLocal>(visitor);
1412 } 1509 }
1413 } 1510 }
1414 1511
1512 template<bool ThreadLocal>
1415 void CallbackStack::invokeOldestCallbacks(Visitor* visitor) 1513 void CallbackStack::invokeOldestCallbacks(Visitor* visitor)
1416 { 1514 {
1417 // Recurse first (bufferSize at a time) so we get to the newly added entries 1515 // Recurse first (bufferSize at a time) so we get to the newly added entries
1418 // last. 1516 // last.
1419 if (m_next) 1517 if (m_next)
1420 m_next->invokeOldestCallbacks(visitor); 1518 m_next->invokeOldestCallbacks<ThreadLocal>(visitor);
1421 1519
1422 // This loop can tolerate entries being added by the callbacks after 1520 // This loop can tolerate entries being added by the callbacks after
1423 // iteration starts. 1521 // iteration starts.
1424 for (unsigned i = 0; m_buffer + i < m_current; i++) { 1522 for (unsigned i = 0; m_buffer + i < m_current; i++) {
1425 Item& item = m_buffer[i]; 1523 Item& item = m_buffer[i];
1524
1525 BaseHeapPage* heapPage = pageHeaderFromObject(item.object());
1526 if (ThreadLocal ? (heapPage->orphaned() || !heapPage->shuttingDown()) : heapPage->orphaned()) {
zerny-chromium 2014/07/07 12:11:56 Nit: if (heapPage->orphaned() || !(ThreadLocal &&
wibling-chromium 2014/07/07 13:50:07 I don't think that would work. In the case where T
zerny-chromium 2014/07/08 08:11:03 Sorry, I messed up the negation. It should be:
1527 // If tracing this from a global GC set the traced bit.
1528 if (!ThreadLocal)
1529 heapPage->setTraced();
1530 continue;
1531 }
1426 item.callback()(visitor, item.object()); 1532 item.callback()(visitor, item.object());
1427 } 1533 }
1428 } 1534 }
1429 1535
1430 #ifndef NDEBUG 1536 #ifndef NDEBUG
1431 bool CallbackStack::hasCallbackForObject(const void* object) 1537 bool CallbackStack::hasCallbackForObject(const void* object)
1432 { 1538 {
1433 for (unsigned i = 0; m_buffer + i < m_current; i++) { 1539 for (unsigned i = 0; m_buffer + i < m_current; i++) {
1434 Item* item = &m_buffer[i]; 1540 Item* item = &m_buffer[i];
1435 if (item->object() == object) { 1541 if (item->object() == object) {
(...skipping 232 matching lines...) Expand 10 before | Expand all | Expand 10 after
1668 }; 1774 };
1669 1775
1670 void Heap::init() 1776 void Heap::init()
1671 { 1777 {
1672 ThreadState::init(); 1778 ThreadState::init();
1673 CallbackStack::init(&s_markingStack); 1779 CallbackStack::init(&s_markingStack);
1674 CallbackStack::init(&s_weakCallbackStack); 1780 CallbackStack::init(&s_weakCallbackStack);
1675 CallbackStack::init(&s_ephemeronStack); 1781 CallbackStack::init(&s_ephemeronStack);
1676 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); 1782 s_heapDoesNotContainCache = new HeapDoesNotContainCache();
1677 s_markingVisitor = new MarkingVisitor(); 1783 s_markingVisitor = new MarkingVisitor();
1784 s_memoryPool = new HeapMemoryPool();
1785 s_orphanedPagePool = new HeapOrphanedPagePool();
1678 } 1786 }
1679 1787
1680 void Heap::shutdown() 1788 void Heap::shutdown()
1681 { 1789 {
1682 s_shutdownCalled = true; 1790 s_shutdownCalled = true;
1683 ThreadState::shutdownHeapIfNecessary(); 1791 ThreadState::shutdownHeapIfNecessary();
1684 } 1792 }
1685 1793
1686 void Heap::doShutdown() 1794 void Heap::doShutdown()
1687 { 1795 {
1688 // We don't want to call doShutdown() twice. 1796 // We don't want to call doShutdown() twice.
1689 if (!s_markingVisitor) 1797 if (!s_markingVisitor)
1690 return; 1798 return;
1691 1799
1692 ASSERT(!ThreadState::isAnyThreadInGC()); 1800 ASSERT(!ThreadState::isAnyThreadInGC());
1693 ASSERT(!ThreadState::attachedThreads().size()); 1801 ASSERT(!ThreadState::attachedThreads().size());
1694 delete s_markingVisitor; 1802 delete s_markingVisitor;
1695 s_markingVisitor = 0; 1803 s_markingVisitor = 0;
1696 delete s_heapDoesNotContainCache; 1804 delete s_heapDoesNotContainCache;
1697 s_heapDoesNotContainCache = 0; 1805 s_heapDoesNotContainCache = 0;
1806 delete s_memoryPool;
1807 s_memoryPool = 0;
1808 delete s_orphanedPagePool;
1809 s_orphanedPagePool = 0;
1698 CallbackStack::shutdown(&s_weakCallbackStack); 1810 CallbackStack::shutdown(&s_weakCallbackStack);
1699 CallbackStack::shutdown(&s_markingStack); 1811 CallbackStack::shutdown(&s_markingStack);
1700 CallbackStack::shutdown(&s_ephemeronStack); 1812 CallbackStack::shutdown(&s_ephemeronStack);
1701 ThreadState::shutdown(); 1813 ThreadState::shutdown();
1702 } 1814 }
1703 1815
1704 BaseHeapPage* Heap::contains(Address address) 1816 BaseHeapPage* Heap::contains(Address address)
1705 { 1817 {
1706 ASSERT(ThreadState::isAnyThreadInGC()); 1818 ASSERT(ThreadState::isAnyThreadInGC());
1707 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 1819 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
1708 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 1820 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1709 BaseHeapPage* page = (*it)->contains(address); 1821 BaseHeapPage* page = (*it)->contains(address);
1710 if (page) 1822 if (page)
1711 return page; 1823 return page;
1712 } 1824 }
1713 return 0; 1825 return 0;
1714 } 1826 }
1715 1827
1828 #ifndef NDEBUG
1829 bool Heap::containedInHeapOrOrphanedPage(void* object)
1830 {
1831 return contains(object) || orphanedPagePool()->contains(object);
1832 }
1833 #endif
1834
1716 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) 1835 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address)
1717 { 1836 {
1718 ASSERT(ThreadState::isAnyThreadInGC()); 1837 ASSERT(ThreadState::isAnyThreadInGC());
1719 1838
1720 #ifdef NDEBUG 1839 #ifdef NDEBUG
1721 if (s_heapDoesNotContainCache->lookup(address)) 1840 if (s_heapDoesNotContainCache->lookup(address))
1722 return 0; 1841 return 0;
1723 #endif 1842 #endif
1724 1843
1725 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 1844 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1784 builder.append("\n\t"); 1903 builder.append("\n\t");
1785 builder.append(frameToName.nullableName()); 1904 builder.append(frameToName.nullableName());
1786 --framesToShow; 1905 --framesToShow;
1787 } 1906 }
1788 return builder.toString().replace("WebCore::", ""); 1907 return builder.toString().replace("WebCore::", "");
1789 } 1908 }
1790 #endif 1909 #endif
1791 1910
1792 void Heap::pushTraceCallback(void* object, TraceCallback callback) 1911 void Heap::pushTraceCallback(void* object, TraceCallback callback)
1793 { 1912 {
1794 ASSERT(Heap::contains(object)); 1913 ASSERT(Heap::containedInHeapOrOrphanedPage(object));
1795 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack); 1914 CallbackStack::Item* slot = s_markingStack->allocateEntry(&s_markingStack);
1796 *slot = CallbackStack::Item(object, callback); 1915 *slot = CallbackStack::Item(object, callback);
1797 } 1916 }
1798 1917
1918 template<bool ThreadLocal>
1799 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) 1919 bool Heap::popAndInvokeTraceCallback(Visitor* visitor)
1800 { 1920 {
1801 return s_markingStack->popAndInvokeCallback(&s_markingStack, visitor); 1921 return s_markingStack->popAndInvokeCallback<ThreadLocal>(&s_markingStack, vi sitor);
1802 } 1922 }
1803 1923
1804 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback ) 1924 void Heap::pushWeakCellPointerCallback(void** cell, WeakPointerCallback callback )
1805 { 1925 {
1806 ASSERT(Heap::contains(cell)); 1926 ASSERT(Heap::contains(cell));
1807 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba ckStack); 1927 CallbackStack::Item* slot = s_weakCallbackStack->allocateEntry(&s_weakCallba ckStack);
1808 *slot = CallbackStack::Item(cell, callback); 1928 *slot = CallbackStack::Item(cell, callback);
1809 } 1929 }
1810 1930
1811 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe rCallback callback) 1931 void Heap::pushWeakObjectPointerCallback(void* closure, void* object, WeakPointe rCallback callback)
1812 { 1932 {
1813 ASSERT(Heap::contains(object)); 1933 ASSERT(Heap::contains(object));
1814 BaseHeapPage* heapPageForObject = reinterpret_cast<BaseHeapPage*>(pageHeader Address(reinterpret_cast<Address>(object))); 1934 BaseHeapPage* heapPageForObject = pageHeaderFromObject(object);
1815 ASSERT(Heap::contains(object) == heapPageForObject); 1935 ASSERT(Heap::contains(object) == heapPageForObject);
1816 ThreadState* state = heapPageForObject->threadState(); 1936 ThreadState* state = heapPageForObject->threadState();
1817 state->pushWeakObjectPointerCallback(closure, callback); 1937 state->pushWeakObjectPointerCallback(closure, callback);
1818 } 1938 }
1819 1939
1820 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor) 1940 bool Heap::popAndInvokeWeakPointerCallback(Visitor* visitor)
1821 { 1941 {
1822 return s_weakCallbackStack->popAndInvokeCallback(&s_weakCallbackStack, visit or); 1942 return s_weakCallbackStack->popAndInvokeCallback<false>(&s_weakCallbackStack , visitor);
1823 } 1943 }
1824 1944
1825 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback) 1945 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E phemeronCallback iterationDoneCallback)
1826 { 1946 {
1827 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac k); 1947 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(&s_ephemeronStac k);
1828 *slot = CallbackStack::Item(table, iterationCallback); 1948 *slot = CallbackStack::Item(table, iterationCallback);
1829 1949
1830 // We use the callback stack of weak cell pointers for the ephemeronIteratio nDone callbacks. 1950 // We use the callback stack of weak cell pointers for the ephemeronIteratio nDone callbacks.
1831 // These callbacks are called right after marking and before any thread comm ences execution 1951 // These callbacks are called right after marking and before any thread comm ences execution
1832 // so it suits our needs for telling the ephemerons that the iteration is do ne. 1952 // so it suits our needs for telling the ephemerons that the iteration is do ne.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1870 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear(); 1990 static_cast<MarkingVisitor*>(s_markingVisitor)->objectGraph().clear();
1871 #endif 1991 #endif
1872 1992
1873 // Disallow allocation during garbage collection (but not 1993 // Disallow allocation during garbage collection (but not
1874 // during the finalization that happens when the gcScope is 1994 // during the finalization that happens when the gcScope is
1875 // torn down). 1995 // torn down).
1876 NoAllocationScope<AnyThread> noAllocationScope; 1996 NoAllocationScope<AnyThread> noAllocationScope;
1877 1997
1878 prepareForGC(); 1998 prepareForGC();
1879 1999
1880 ThreadState::visitRoots(s_markingVisitor); 2000 tracingAndGlobalWeakProcessing<false>();
2001
2002 // After a global marking we know that any orphaned page that was not reache d
2003 // cannot be revived in a subsequent GC. This is due to a thread either havi ng
2004 // swept its heap or having done a "poor mans sweep" in prepareForGC which m arks
2005 // objects that were not traced in the previous GC as dead. In this GC's mar king
zerny-chromium 2014/07/07 12:11:56 => that are dead but not swept.
wibling-chromium 2014/07/07 13:50:07 Done.
2006 // we check that any object marked as dead is not revived. The only case
2007 // where we need to check for this is during conservative scanning.
zerny-chromium 2014/07/07 12:11:56 Nit: again, a programming error could also result
wibling-chromium 2014/07/07 13:50:07 Done.
2008 orphanedPagePool()->decommitOrphanedPages();
2009
2010 #if ENABLE(GC_TRACING)
2011 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
2012 #endif
2013
2014 if (blink::Platform::current()) {
2015 uint64_t objectSpaceSize;
2016 uint64_t allocatedSpaceSize;
2017 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
2018 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
2019 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2020 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
2021 }
2022 }
2023
2024 void Heap::collectGarbageForThread(ThreadState* state, bool sweepOnly)
2025 {
2026 // We explicitly do not enter a safepoint while doing thread specific
2027 // garbage collection since we don't want to allow a global GC at the
2028 // same time as a thread local GC.
2029
2030 NoAllocationScope<AnyThread> noAllocationScope;
2031
2032 state->enterGC();
2033 state->prepareForGC();
2034
2035 if (!sweepOnly)
2036 tracingAndGlobalWeakProcessing<true>();
2037
2038 state->leaveGC();
2039 state->performPendingSweep();
2040 }
2041
2042 template<bool ThreadLocal>
2043 void Heap::tracingAndGlobalWeakProcessing()
2044 {
2045 if (ThreadLocal)
2046 ThreadState::current()->visitLocalRoots(s_markingVisitor);
2047 else
2048 ThreadState::visitRoots(s_markingVisitor);
1881 2049
1882 // Ephemeron fixed point loop. 2050 // Ephemeron fixed point loop.
1883 do { 2051 do {
1884 // Recursively mark all objects that are reachable from the roots. 2052 // Recursively mark all objects that are reachable from the roots for th is thread.
1885 while (popAndInvokeTraceCallback(s_markingVisitor)) { } 2053 // Also don't continue tracing if the trace hits an object on another th read's heap.
2054 while (popAndInvokeTraceCallback<ThreadLocal>(s_markingVisitor)) { }
1886 2055
1887 // Mark any strong pointers that have now become reachable in ephemeron 2056 // Mark any strong pointers that have now become reachable in ephemeron
1888 // maps. 2057 // maps.
1889 CallbackStack::invokeCallbacks(&s_ephemeronStack, s_markingVisitor); 2058 CallbackStack::invokeCallbacks<ThreadLocal>(&s_ephemeronStack, s_marking Visitor);
1890 2059
1891 // Rerun loop if ephemeron processing queued more objects for tracing. 2060 // Rerun loop if ephemeron processing queued more objects for tracing.
1892 } while (!s_markingStack->isEmpty()); 2061 } while (!s_markingStack->isEmpty());
1893 2062
1894 // Call weak callbacks on objects that may now be pointing to dead 2063 // Call weak callbacks on objects that may now be pointing to dead
1895 // objects and call ephemeronIterationDone callbacks on weak tables 2064 // objects and call ephemeronIterationDone callbacks on weak tables
1896 // to do cleanup (specifically clear the queued bits for weak hash 2065 // to do cleanup (specifically clear the queued bits for weak hash
1897 // tables). 2066 // tables).
1898 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { } 2067 while (popAndInvokeWeakPointerCallback(s_markingVisitor)) { }
1899 2068
1900 CallbackStack::clear(&s_ephemeronStack); 2069 CallbackStack::clear(&s_ephemeronStack);
1901 2070
1902 // It is not permitted to trace pointers of live objects in the weak 2071 // It is not permitted to trace pointers of live objects in the weak
1903 // callback phase, so the marking stack should still be empty here. 2072 // callback phase, so the marking stack should still be empty here.
1904 ASSERT(s_markingStack->isEmpty()); 2073 ASSERT(s_markingStack->isEmpty());
1905
1906 #if ENABLE(GC_TRACING)
1907 static_cast<MarkingVisitor*>(s_markingVisitor)->reportStats();
1908 #endif
1909
1910 if (blink::Platform::current()) {
1911 uint64_t objectSpaceSize;
1912 uint64_t allocatedSpaceSize;
1913 getHeapSpaceSize(&objectSpaceSize, &allocatedSpaceSize);
1914 blink::Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbag e", WTF::currentTimeMS() - timeStamp, 0, 10 * 1000, 50);
1915 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSp ace", objectSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1916 blink::Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocate dSpace", allocatedSpaceSize / 1024, 0, 4 * 1024 * 1024, 50);
1917 }
1918 } 2074 }
1919 2075
1920 void Heap::collectAllGarbage() 2076 void Heap::collectAllGarbage()
1921 { 2077 {
1922 // FIXME: oilpan: we should perform a single GC and everything 2078 // FIXME: oilpan: we should perform a single GC and everything
1923 // should die. Unfortunately it is not the case for all objects 2079 // should die. Unfortunately it is not the case for all objects
1924 // because the hierarchy was not completely moved to the heap and 2080 // because the hierarchy was not completely moved to the heap and
1925 // some heap allocated objects own objects that contain persistents 2081 // some heap allocated objects own objects that contain persistents
1926 // pointing to other heap allocated objects. 2082 // pointing to other heap allocated objects.
1927 for (int i = 0; i < 5; i++) 2083 for (int i = 0; i < 5; i++)
1928 collectGarbage(ThreadState::NoHeapPointersOnStack); 2084 collectGarbage(ThreadState::NoHeapPointersOnStack);
1929 } 2085 }
1930 2086
1931 void Heap::setForcePreciseGCForTesting() 2087 void Heap::setForcePreciseGCForTesting()
1932 { 2088 {
1933 ThreadState::current()->setForcePreciseGCForTesting(true); 2089 ThreadState::current()->setForcePreciseGCForTesting(true);
1934 } 2090 }
1935 2091
2092 template<typename Header>
2093 void ThreadHeap<Header>::setShutdown()
2094 {
2095 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) {
2096 page->setShutdown();
2097 }
2098 for (LargeHeapObject<Header>* current = m_firstLargeHeapObject; current; cur rent = current->next()) {
2099 current->setShutdown();
2100 }
2101 }
2102
1936 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize) 2103 void Heap::getHeapSpaceSize(uint64_t* objectSpaceSize, uint64_t* allocatedSpaceS ize)
1937 { 2104 {
1938 *objectSpaceSize = 0; 2105 *objectSpaceSize = 0;
1939 *allocatedSpaceSize = 0; 2106 *allocatedSpaceSize = 0;
1940 ASSERT(ThreadState::isAnyThreadInGC()); 2107 ASSERT(ThreadState::isAnyThreadInGC());
1941 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( ); 2108 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThreads( );
1942 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator; 2109 typedef ThreadState::AttachedThreadStateSet::iterator ThreadStateIterator;
1943 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) { 2110 for (ThreadStateIterator it = threads.begin(), end = threads.end(); it != en d; ++it) {
1944 *objectSpaceSize += (*it)->stats().totalObjectSpace(); 2111 *objectSpaceSize += (*it)->stats().totalObjectSpace();
1945 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace(); 2112 *allocatedSpaceSize += (*it)->stats().totalAllocatedSpace();
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
1984 template class ThreadHeap<FinalizedHeapObjectHeader>; 2151 template class ThreadHeap<FinalizedHeapObjectHeader>;
1985 template class ThreadHeap<HeapObjectHeader>; 2152 template class ThreadHeap<HeapObjectHeader>;
1986 2153
1987 Visitor* Heap::s_markingVisitor; 2154 Visitor* Heap::s_markingVisitor;
1988 CallbackStack* Heap::s_markingStack; 2155 CallbackStack* Heap::s_markingStack;
1989 CallbackStack* Heap::s_weakCallbackStack; 2156 CallbackStack* Heap::s_weakCallbackStack;
1990 CallbackStack* Heap::s_ephemeronStack; 2157 CallbackStack* Heap::s_ephemeronStack;
1991 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; 2158 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache;
1992 bool Heap::s_shutdownCalled = false; 2159 bool Heap::s_shutdownCalled = false;
1993 bool Heap::s_lastGCWasConservative = false; 2160 bool Heap::s_lastGCWasConservative = false;
2161 HeapMemoryPool* Heap::s_memoryPool;
2162 HeapOrphanedPagePool* Heap::s_orphanedPagePool;
1994 } 2163 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698