Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(178)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 840223002: Oilpan: Remove duplicated code between HeapPage and LargeObject (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/ThreadState.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 514 matching lines...) Expand 10 before | Expand all | Expand 10 after
525 525
526 void LargeObject::markUnmarkedObjectsDead() 526 void LargeObject::markUnmarkedObjectsDead()
527 { 527 {
528 HeapObjectHeader* header = heapObjectHeader(); 528 HeapObjectHeader* header = heapObjectHeader();
529 if (header->isMarked()) 529 if (header->isMarked())
530 header->unmark(); 530 header->unmark();
531 else 531 else
532 header->markDead(); 532 header->markDead();
533 } 533 }
534 534
535 void LargeObject::removeFromHeap(ThreadHeap* heap) 535 void LargeObject::removeFromHeap()
536 { 536 {
537 heap->freeLargeObject(this); 537 static_cast<ThreadHeapForLargeObject*>(heap())->freeLargeObject(this);
538 }
539
540 ThreadHeap::ThreadHeap(ThreadState* state, int index)
541 : m_currentAllocationPoint(nullptr)
542 , m_remainingAllocationSize(0)
543 , m_lastRemainingAllocationSize(0)
544 , m_firstPage(nullptr)
545 , m_firstLargeObject(nullptr)
546 , m_firstUnsweptPage(nullptr)
547 , m_firstUnsweptLargeObject(nullptr)
548 , m_threadState(state)
549 , m_index(index)
550 , m_promptlyFreedSize(0)
551 #if ENABLE(GC_PROFILING)
552 , m_cumulativeAllocationSize(0)
553 , m_allocationCount(0)
554 , m_inlineAllocationCount(0)
555 #endif
556 {
557 clearFreeLists();
558 } 538 }
559 539
560 FreeList::FreeList() 540 FreeList::FreeList()
561 : m_biggestFreeListIndex(0) 541 : m_biggestFreeListIndex(0)
562 { 542 {
563 } 543 }
564 544
545 ThreadHeap::ThreadHeap(ThreadState* state, int index)
546 : m_firstPage(nullptr)
547 , m_firstUnsweptPage(nullptr)
548 , m_threadState(state)
549 , m_index(index)
550 #if ENABLE(GC_PROFILING)
551 , m_cumulativeAllocationSize(0)
552 , m_allocationCount(0)
553 , m_inlineAllocationCount(0)
554 #endif
555 {
556 }
557
558 ThreadHeapForHeapPage::ThreadHeapForHeapPage(ThreadState* state, int index)
559 : ThreadHeap(state, index)
560 , m_currentAllocationPoint(nullptr)
561 , m_remainingAllocationSize(0)
562 , m_lastRemainingAllocationSize(0)
563 , m_promptlyFreedSize(0)
564 {
565 clearFreeLists();
566 }
567
568 ThreadHeapForLargeObject::ThreadHeapForLargeObject(ThreadState* state, int index )
569 : ThreadHeap(state, index)
570 {
571 }
572
565 ThreadHeap::~ThreadHeap() 573 ThreadHeap::~ThreadHeap()
566 { 574 {
567 ASSERT(!m_firstPage); 575 ASSERT(!m_firstPage);
568 ASSERT(!m_firstLargeObject);
569 ASSERT(!m_firstUnsweptPage); 576 ASSERT(!m_firstUnsweptPage);
570 ASSERT(!m_firstUnsweptLargeObject);
571 } 577 }
572 578
573 void ThreadHeap::cleanupPages() 579 void ThreadHeap::cleanupPages()
574 { 580 {
575 clearFreeLists(); 581 clearFreeLists();
576 582
577 ASSERT(!m_firstUnsweptPage); 583 ASSERT(!m_firstUnsweptPage);
578 ASSERT(!m_firstUnsweptLargeObject);
579 // Add the ThreadHeap's pages to the orphanedPagePool. 584 // Add the ThreadHeap's pages to the orphanedPagePool.
580 for (HeapPage* page = m_firstPage; page; page = page->m_next) { 585 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
581 Heap::decreaseAllocatedSpace(blinkPageSize); 586 Heap::decreaseAllocatedSpace(page->size());
582 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 587 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page);
583 } 588 }
584 m_firstPage = nullptr; 589 m_firstPage = nullptr;
585
586 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->m_next) {
587 Heap::decreaseAllocatedSpace(largeObject->size());
588 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
589 }
590 m_firstLargeObject = nullptr;
591 } 590 }
592 591
593 void ThreadHeap::updateRemainingAllocationSize() 592 void ThreadHeapForHeapPage::updateRemainingAllocationSize()
594 { 593 {
595 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { 594 if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
596 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); 595 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize());
597 m_lastRemainingAllocationSize = remainingAllocationSize(); 596 m_lastRemainingAllocationSize = remainingAllocationSize();
598 } 597 }
599 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); 598 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
600 } 599 }
601 600
602 void ThreadHeap::setAllocationPoint(Address point, size_t size) 601 void ThreadHeapForHeapPage::setAllocationPoint(Address point, size_t size)
603 { 602 {
604 #if ENABLE(ASSERT) 603 #if ENABLE(ASSERT)
605 if (point) { 604 if (point) {
606 ASSERT(size); 605 ASSERT(size);
607 BaseHeapPage* page = pageFromObject(point); 606 BaseHeapPage* page = pageFromObject(point);
608 ASSERT(!page->isLargeObject()); 607 ASSERT(!page->isLargeObject());
609 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize()); 608 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize());
610 } 609 }
611 #endif 610 #endif
612 if (hasCurrentAllocationArea()) 611 if (hasCurrentAllocationArea()) {
613 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); 612 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
613 }
614 updateRemainingAllocationSize(); 614 updateRemainingAllocationSize();
615 m_currentAllocationPoint = point; 615 m_currentAllocationPoint = point;
616 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; 616 m_lastRemainingAllocationSize = m_remainingAllocationSize = size;
617 } 617 }
618 618
619 Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) 619 Address ThreadHeapForHeapPage::outOfLineAllocate(size_t allocationSize, size_t g cInfoIndex)
620 { 620 {
621 ASSERT(allocationSize > remainingAllocationSize()); 621 ASSERT(allocationSize > remainingAllocationSize());
622 ASSERT(allocationSize >= allocationGranularity); 622 ASSERT(allocationSize >= allocationGranularity);
623 623
624 #if ENABLE(GC_PROFILING) 624 #if ENABLE(GC_PROFILING)
625 m_threadState->snapshotFreeListIfNecessary(); 625 m_threadState->snapshotFreeListIfNecessary();
626 #endif 626 #endif
627 627
628 // 1. If this allocation is big enough, allocate a large object. 628 // 1. If this allocation is big enough, allocate a large object.
629 if (allocationSize >= largeObjectSizeThreshold) 629 if (allocationSize >= largeObjectSizeThreshold)
630 return allocateLargeObject(allocationSize, gcInfoIndex); 630 return static_cast<ThreadHeapForLargeObject*>(threadState()->heap(LargeO bjectHeap))->allocateLargeObject(allocationSize, gcInfoIndex);
631 631
632 // 2. Check if we should trigger a GC. 632 // 2. Check if we should trigger a GC.
633 updateRemainingAllocationSize(); 633 updateRemainingAllocationSize();
634 threadState()->scheduleGCIfNeeded(); 634 threadState()->scheduleGCIfNeeded();
635 635
636 // 3. Try to allocate from a free list. 636 // 3. Try to allocate from a free list.
637 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); 637 Address result = allocateFromFreeList(allocationSize, gcInfoIndex);
638 if (result) 638 if (result)
639 return result; 639 return result;
640 640
641 // 4. Reset the allocation point. 641 // 4. Reset the allocation point.
642 setAllocationPoint(nullptr, 0); 642 setAllocationPoint(nullptr, 0);
643 643
644 // 5. Lazily sweep pages of this heap until we find a freed area for 644 // 5. Lazily sweep pages of this heap until we find a freed area for
645 // this allocation or we finish sweeping all pages of this heap. 645 // this allocation or we finish sweeping all pages of this heap.
646 result = lazySweepPages(allocationSize, gcInfoIndex); 646 result = lazySweep(allocationSize, gcInfoIndex);
647 if (result) 647 if (result)
648 return result; 648 return result;
649 649
650 // 6. Coalesce promptly freed areas and then try to allocate from a free 650 // 6. Coalesce promptly freed areas and then try to allocate from a free
651 // list. 651 // list.
652 if (coalesce()) { 652 if (coalesce()) {
653 result = allocateFromFreeList(allocationSize, gcInfoIndex); 653 result = allocateFromFreeList(allocationSize, gcInfoIndex);
654 if (result) 654 if (result)
655 return result; 655 return result;
656 } 656 }
657 657
658 // 7. Complete sweeping. 658 // 7. Complete sweeping.
659 threadState()->completeSweep(); 659 threadState()->completeSweep();
660 660
661 // 8. Add a new page to this heap. 661 // 8. Add a new page to this heap.
662 allocatePage(); 662 allocatePage();
663 663
664 // 9. Try to allocate from a free list. This allocation must succeed. 664 // 9. Try to allocate from a free list. This allocation must succeed.
665 result = allocateFromFreeList(allocationSize, gcInfoIndex); 665 result = allocateFromFreeList(allocationSize, gcInfoIndex);
666 RELEASE_ASSERT(result); 666 RELEASE_ASSERT(result);
667 return result; 667 return result;
668 } 668 }
669 669
670 Address ThreadHeap::allocateFromFreeList(size_t allocationSize, size_t gcInfoInd ex) 670 Address ThreadHeapForHeapPage::allocateFromFreeList(size_t allocationSize, size_ t gcInfoIndex)
671 { 671 {
672 // Try reusing a block from the largest bin. The underlying reasoning 672 // Try reusing a block from the largest bin. The underlying reasoning
673 // being that we want to amortize this slow allocation call by carving 673 // being that we want to amortize this slow allocation call by carving
674 // off as a large a free block as possible in one go; a block that will 674 // off as a large a free block as possible in one go; a block that will
675 // service this block and let following allocations be serviced quickly 675 // service this block and let following allocations be serviced quickly
676 // by bump allocation. 676 // by bump allocation.
677 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; 677 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex;
678 int index = m_freeList.m_biggestFreeListIndex; 678 int index = m_freeList.m_biggestFreeListIndex;
679 for (; index > 0; --index, bucketSize >>= 1) { 679 for (; index > 0; --index, bucketSize >>= 1) {
680 FreeListEntry* entry = m_freeList.m_freeLists[index]; 680 FreeListEntry* entry = m_freeList.m_freeLists[index];
(...skipping 14 matching lines...) Expand all
695 } 695 }
696 } 696 }
697 m_freeList.m_biggestFreeListIndex = index; 697 m_freeList.m_biggestFreeListIndex = index;
698 return nullptr; 698 return nullptr;
699 } 699 }
700 700
701 void ThreadHeap::prepareForSweep() 701 void ThreadHeap::prepareForSweep()
702 { 702 {
703 ASSERT(!threadState()->isInGC()); 703 ASSERT(!threadState()->isInGC());
704 ASSERT(!m_firstUnsweptPage); 704 ASSERT(!m_firstUnsweptPage);
705 ASSERT(!m_firstUnsweptLargeObject);
706 705
707 // Move all pages to a list of unswept pages. 706 // Move all pages to a list of unswept pages.
708 m_firstUnsweptPage = m_firstPage; 707 m_firstUnsweptPage = m_firstPage;
709 m_firstUnsweptLargeObject = m_firstLargeObject;
710 m_firstPage = nullptr; 708 m_firstPage = nullptr;
711 m_firstLargeObject = nullptr;
712 } 709 }
713 710
714 Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) 711 Address ThreadHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex)
715 { 712 {
716 ASSERT(!hasCurrentAllocationArea());
717 ASSERT(allocationSize < largeObjectSizeThreshold);
718
719 // If there are no pages to be swept, return immediately. 713 // If there are no pages to be swept, return immediately.
720 if (!m_firstUnsweptPage) 714 if (!m_firstUnsweptPage)
721 return nullptr; 715 return nullptr;
722 716
723 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 717 RELEASE_ASSERT(threadState()->isSweepingInProgress());
724 718
725 // lazySweepPages() can be called recursively if finalizers invoked in 719 // lazySweepPages() can be called recursively if finalizers invoked in
726 // page->sweep() allocate memory and the allocation triggers 720 // page->sweep() allocate memory and the allocation triggers
727 // lazySweepPages(). This check prevents the sweeping from being executed 721 // lazySweepPages(). This check prevents the sweeping from being executed
728 // recursively. 722 // recursively.
729 if (threadState()->sweepForbidden()) 723 if (threadState()->sweepForbidden())
730 return nullptr; 724 return nullptr;
731 725
732 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages"); 726 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages");
733 ThreadState::SweepForbiddenScope scope(m_threadState); 727 ThreadState::SweepForbiddenScope scope(threadState());
734 728
735 if (threadState()->isMainThread()) 729 if (threadState()->isMainThread())
736 ScriptForbiddenScope::enter(); 730 ScriptForbiddenScope::enter();
737 731
732 Address result = lazySweepPages(allocationSize, gcInfoIndex);
733
734 if (threadState()->isMainThread())
735 ScriptForbiddenScope::exit();
736 return result;
737 }
738
739 Address ThreadHeapForHeapPage::lazySweepPages(size_t allocationSize, size_t gcIn foIndex)
740 {
741 ASSERT(!hasCurrentAllocationArea());
738 Address result = nullptr; 742 Address result = nullptr;
739 while (m_firstUnsweptPage) { 743 while (m_firstUnsweptPage) {
740 HeapPage* page = m_firstUnsweptPage; 744 BaseHeapPage* page = m_firstUnsweptPage;
741 if (page->isEmpty()) { 745 if (page->isEmpty()) {
742 page->unlink(&m_firstUnsweptPage); 746 page->unlink(&m_firstUnsweptPage);
743 page->removeFromHeap(this); 747 page->removeFromHeap();
744 } else { 748 } else {
745 // Sweep a page and move the page from m_firstUnsweptPages to 749 // Sweep a page and move the page from m_firstUnsweptPages to
746 // m_firstPages. 750 // m_firstPages.
747 page->sweep(); 751 page->sweep();
748 page->unlink(&m_firstUnsweptPage); 752 page->unlink(&m_firstUnsweptPage);
749 page->link(&m_firstPage); 753 page->link(&m_firstPage);
750 page->markAsSwept(); 754 page->markAsSwept();
751 755
756 // For HeapPage, stop lazy sweeping once we find a slot to
757 // allocate a new object.
752 result = allocateFromFreeList(allocationSize, gcInfoIndex); 758 result = allocateFromFreeList(allocationSize, gcInfoIndex);
753 if (result) 759 if (result)
754 break; 760 break;
755 } 761 }
756 } 762 }
757
758 if (threadState()->isMainThread())
759 ScriptForbiddenScope::exit();
760 return result; 763 return result;
761 } 764 }
762 765
763 bool ThreadHeap::lazySweepLargeObjects(size_t allocationSize) 766 Address ThreadHeapForLargeObject::lazySweepPages(size_t allocationSize, size_t g cInfoIndex)
764 { 767 {
765 ASSERT(allocationSize >= largeObjectSizeThreshold); 768 Address result = nullptr;
766
767 // If there are no large objects to be swept, return immediately.
768 if (!m_firstUnsweptLargeObject)
769 return false;
770
771 RELEASE_ASSERT(threadState()->isSweepingInProgress());
772
773 // lazySweepLargeObjects() can be called recursively if finalizers invoked
774 // in page->sweep() allocate memory and the allocation triggers
775 // lazySweepLargeObjects(). This check prevents the sweeping from being
776 // executed recursively.
777 if (threadState()->sweepForbidden())
778 return false;
779
780 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepLargeObjects");
781 ThreadState::SweepForbiddenScope scope(m_threadState);
782
783 if (threadState()->isMainThread())
784 ScriptForbiddenScope::enter();
785
786 bool result = false;
787 size_t sweptSize = 0; 769 size_t sweptSize = 0;
788 while (m_firstUnsweptLargeObject) { 770 while (m_firstUnsweptPage) {
789 LargeObject* largeObject = m_firstUnsweptLargeObject; 771 BaseHeapPage* page = m_firstUnsweptPage;
790 if (largeObject->isEmpty()) { 772 if (page->isEmpty()) {
791 sweptSize += largeObject->size(); 773 sweptSize += static_cast<LargeObject*>(page)->payloadSize() + sizeof (HeapObjectHeader);
792 largeObject->unlink(&m_firstUnsweptLargeObject); 774 page->unlink(&m_firstUnsweptPage);
793 largeObject->removeFromHeap(this); 775 page->removeFromHeap();
794 776 // For LargeObject, stop lazy sweeping once we have swept
795 // If we have swept large objects more than allocationSize, 777 // more than allocationSize bytes.
796 // we stop the lazy sweeping.
797 if (sweptSize >= allocationSize) { 778 if (sweptSize >= allocationSize) {
798 result = true; 779 result = doAllocateLargeObject(allocationSize, gcInfoIndex);
780 ASSERT(result);
799 break; 781 break;
800 } 782 }
801 } else { 783 } else {
802 // Sweep a large object and move the large object from 784 // Sweep a page and move the page from m_firstUnsweptPages to
803 // m_firstUnsweptLargeObjects to m_firstLargeObjects. 785 // m_firstPages.
804 largeObject->sweep(); 786 page->sweep();
805 largeObject->unlink(&m_firstUnsweptLargeObject); 787 page->unlink(&m_firstUnsweptPage);
806 largeObject->link(&m_firstLargeObject); 788 page->link(&m_firstPage);
807 largeObject->markAsSwept(); 789 page->markAsSwept();
808 } 790 }
809 } 791 }
810
811 if (threadState()->isMainThread())
812 ScriptForbiddenScope::exit();
813 return result; 792 return result;
814 } 793 }
815 794
816 void ThreadHeap::completeSweep() 795 void ThreadHeap::completeSweep()
817 { 796 {
818 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 797 RELEASE_ASSERT(threadState()->isSweepingInProgress());
819 ASSERT(threadState()->sweepForbidden()); 798 ASSERT(threadState()->sweepForbidden());
820 799
821 if (threadState()->isMainThread()) 800 if (threadState()->isMainThread())
822 ScriptForbiddenScope::enter(); 801 ScriptForbiddenScope::enter();
823 802
824 // Sweep normal pages.
825 while (m_firstUnsweptPage) { 803 while (m_firstUnsweptPage) {
826 HeapPage* page = m_firstUnsweptPage; 804 BaseHeapPage* page = m_firstUnsweptPage;
827 if (page->isEmpty()) { 805 if (page->isEmpty()) {
828 page->unlink(&m_firstUnsweptPage); 806 page->unlink(&m_firstUnsweptPage);
829 page->removeFromHeap(this); 807 page->removeFromHeap();
830 } else { 808 } else {
831 // Sweep a page and move the page from m_firstUnsweptPages to 809 // Sweep a page and move the page from m_firstUnsweptPages to
832 // m_firstPages. 810 // m_firstPages.
833 page->sweep(); 811 page->sweep();
834 page->unlink(&m_firstUnsweptPage); 812 page->unlink(&m_firstUnsweptPage);
835 page->link(&m_firstPage); 813 page->link(&m_firstPage);
836 page->markAsSwept(); 814 page->markAsSwept();
837 } 815 }
838 } 816 }
839 817
840 // Sweep large objects.
841 while (m_firstUnsweptLargeObject) {
842 LargeObject* largeObject = m_firstUnsweptLargeObject;
843 if (largeObject->isEmpty()) {
844 largeObject->unlink(&m_firstUnsweptLargeObject);
845 largeObject->removeFromHeap(this);
846 } else {
847 // Sweep a large object and move the large object from
848 // m_firstUnsweptLargeObjects to m_firstLargeObjects.
849 largeObject->sweep();
850 largeObject->unlink(&m_firstUnsweptLargeObject);
851 largeObject->link(&m_firstLargeObject);
852 largeObject->markAsSwept();
853 }
854 }
855
856 if (threadState()->isMainThread()) 818 if (threadState()->isMainThread())
857 ScriptForbiddenScope::exit(); 819 ScriptForbiddenScope::exit();
858 } 820 }
859 821
860 #if ENABLE(ASSERT) 822 #if ENABLE(ASSERT)
861 static bool isLargeObjectAligned(LargeObject* largeObject, Address address)
862 {
863 // Check that a large object is blinkPageSize aligned (modulo the osPageSize
864 // for the guard page).
865 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun dToBlinkPageStart(reinterpret_cast<Address>(largeObject));
866 }
867 #endif
868
869 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING)
870 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address) 823 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address)
871 { 824 {
872 for (HeapPage* page = m_firstPage; page; page = page->next()) { 825 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
873 if (page->contains(address)) 826 if (page->contains(address))
874 return page; 827 return page;
875 } 828 }
876 for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { 829 for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) {
877 if (page->contains(address)) 830 if (page->contains(address))
878 return page; 831 return page;
879 } 832 }
880 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
881 ASSERT(isLargeObjectAligned(largeObject, address));
882 if (largeObject->contains(address))
883 return largeObject;
884 }
885 for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; larg eObject = largeObject->next()) {
886 ASSERT(isLargeObjectAligned(largeObject, address));
887 if (largeObject->contains(address))
888 return largeObject;
889 }
890 return nullptr; 833 return nullptr;
891 } 834 }
892 #endif 835 #endif
893 836
894 #if ENABLE(GC_PROFILING) 837 #if ENABLE(GC_PROFILING)
895 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 838 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0
896 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) 839 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
897 { 840 {
898 ASSERT(isConsistentForSweeping()); 841 ASSERT(isConsistentForSweeping());
899 size_t previousPageCount = info->pageCount; 842 size_t previousPageCount = info->pageCount;
900 843
901 json->beginArray("pages"); 844 json->beginArray("pages");
902 for (HeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCo unt) { 845 for (BaseHeapPage* page = m_firstPage; page; page = page->next(), ++info->pa geCount) {
903 // FIXME: To limit the size of the snapshot we only output "threshold" m any page snapshots. 846 // FIXME: To limit the size of the snapshot we only output "threshold" m any page snapshots.
904 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { 847 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) {
905 json->beginArray(); 848 json->beginArray();
906 json->pushInteger(reinterpret_cast<intptr_t>(page)); 849 json->pushInteger(reinterpret_cast<intptr_t>(page));
907 page->snapshot(json, info); 850 page->snapshot(json, info);
908 json->endArray(); 851 json->endArray();
909 } else { 852 } else {
910 page->snapshot(0, info); 853 page->snapshot(0, info);
911 } 854 }
912 } 855 }
913 json->endArray(); 856 json->endArray();
914 857
915 json->beginArray("largeObjects");
916 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
917 json->beginDictionary();
918 largeObject->snapshot(json, info);
919 json->endDictionary();
920 }
921 json->endArray();
922
923 json->setInteger("pageCount", info->pageCount - previousPageCount); 858 json->setInteger("pageCount", info->pageCount - previousPageCount);
924 } 859 }
925 860
926 void ThreadHeap::incrementMarkedObjectsAge() 861 void ThreadHeap::incrementMarkedObjectsAge()
927 { 862 {
928 for (HeapPage* page = m_firstPage; page; page = page->next()) 863 for (HeapPage* page = m_firstPage; page; page = page->next())
929 page->incrementMarkedObjectsAge(); 864 page->incrementMarkedObjectsAge();
930 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) 865 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next())
931 largeObject->incrementMarkedObjectsAge(); 866 largeObject->incrementMarkedObjectsAge();
932 } 867 }
(...skipping 26 matching lines...) Expand all
959 // space. 894 // space.
960 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd ToFreeList()) 895 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd ToFreeList())
961 return; 896 return;
962 #endif 897 #endif
963 int index = bucketIndexForSize(size); 898 int index = bucketIndexForSize(size);
964 entry->link(&m_freeLists[index]); 899 entry->link(&m_freeLists[index]);
965 if (index > m_biggestFreeListIndex) 900 if (index > m_biggestFreeListIndex)
966 m_biggestFreeListIndex = index; 901 m_biggestFreeListIndex = index;
967 } 902 }
968 903
969 bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize) 904 bool ThreadHeapForHeapPage::expandObject(HeapObjectHeader* header, size_t newSiz e)
970 { 905 {
971 // It's possible that Vector requests a smaller expanded size because 906 // It's possible that Vector requests a smaller expanded size because
972 // Vector::shrinkCapacity can set a capacity smaller than the actual payload 907 // Vector::shrinkCapacity can set a capacity smaller than the actual payload
973 // size. 908 // size.
974 if (header->payloadSize() >= newSize) 909 if (header->payloadSize() >= newSize)
975 return true; 910 return true;
976 size_t allocationSize = allocationSizeFromSize(newSize); 911 size_t allocationSize = allocationSizeFromSize(newSize);
977 ASSERT(allocationSize > header->size()); 912 ASSERT(allocationSize > header->size());
978 size_t expandSize = allocationSize - header->size(); 913 size_t expandSize = allocationSize - header->size();
979 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema iningAllocationSize) { 914 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema iningAllocationSize) {
980 m_currentAllocationPoint += expandSize; 915 m_currentAllocationPoint += expandSize;
981 m_remainingAllocationSize -= expandSize; 916 m_remainingAllocationSize -= expandSize;
982 917
983 // Unpoison the memory used for the object (payload). 918 // Unpoison the memory used for the object (payload).
984 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize); 919 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize);
985 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize); 920 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize);
986 header->setSize(allocationSize); 921 header->setSize(allocationSize);
987 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); 922 ASSERT(findPageFromAddress(header->payloadEnd() - 1));
988 return true; 923 return true;
989 } 924 }
990 return false; 925 return false;
991 } 926 }
992 927
993 void ThreadHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) 928 void ThreadHeapForHeapPage::shrinkObject(HeapObjectHeader* header, size_t newSiz e)
994 { 929 {
995 ASSERT(header->payloadSize() > newSize); 930 ASSERT(header->payloadSize() > newSize);
996 size_t allocationSize = allocationSizeFromSize(newSize); 931 size_t allocationSize = allocationSizeFromSize(newSize);
997 ASSERT(header->size() > allocationSize); 932 ASSERT(header->size() > allocationSize);
998 size_t shrinkSize = header->size() - allocationSize; 933 size_t shrinkSize = header->size() - allocationSize;
999 if (header->payloadEnd() == m_currentAllocationPoint) { 934 if (header->payloadEnd() == m_currentAllocationPoint) {
1000 m_currentAllocationPoint -= shrinkSize; 935 m_currentAllocationPoint -= shrinkSize;
1001 m_remainingAllocationSize += shrinkSize; 936 m_remainingAllocationSize += shrinkSize;
1002 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize); 937 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize);
1003 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize); 938 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize);
1004 header->setSize(allocationSize); 939 header->setSize(allocationSize);
1005 } else { 940 } else {
1006 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); 941 ASSERT(shrinkSize >= sizeof(HeapObjectHeader));
1007 ASSERT(header->gcInfoIndex() > 0); 942 ASSERT(header->gcInfoIndex() > 0);
1008 HeapObjectHeader* freedHeader = new (NotNull, header->payloadEnd() - shr inkSize) HeapObjectHeader(shrinkSize, header->gcInfoIndex()); 943 HeapObjectHeader* freedHeader = new (NotNull, header->payloadEnd() - shr inkSize) HeapObjectHeader(shrinkSize, header->gcInfoIndex());
1009 freedHeader->markPromptlyFreed(); 944 freedHeader->markPromptlyFreed();
1010 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFrom Address(reinterpret_cast<Address>(header))); 945 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFrom Address(reinterpret_cast<Address>(header)));
1011 m_promptlyFreedSize += shrinkSize; 946 m_promptlyFreedSize += shrinkSize;
1012 header->setSize(allocationSize); 947 header->setSize(allocationSize);
1013 } 948 }
1014 } 949 }
1015 950
1016 void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header) 951 void ThreadHeapForHeapPage::promptlyFreeObject(HeapObjectHeader* header)
1017 { 952 {
1018 ASSERT(!m_threadState->sweepForbidden()); 953 ASSERT(!threadState()->sweepForbidden());
1019 header->checkHeader(); 954 header->checkHeader();
1020 Address address = reinterpret_cast<Address>(header); 955 Address address = reinterpret_cast<Address>(header);
1021 Address payload = header->payload(); 956 Address payload = header->payload();
1022 size_t size = header->size(); 957 size_t size = header->size();
1023 size_t payloadSize = header->payloadSize(); 958 size_t payloadSize = header->payloadSize();
1024 ASSERT(size > 0); 959 ASSERT(size > 0);
1025 ASSERT(pageFromObject(address) == findPageFromAddress(address)); 960 ASSERT(pageFromObject(address) == findPageFromAddress(address));
1026 961
1027 { 962 {
1028 ThreadState::SweepForbiddenScope forbiddenScope(m_threadState); 963 ThreadState::SweepForbiddenScope forbiddenScope(threadState());
1029 header->finalize(payload, payloadSize); 964 header->finalize(payload, payloadSize);
1030 if (address + size == m_currentAllocationPoint) { 965 if (address + size == m_currentAllocationPoint) {
1031 m_currentAllocationPoint = address; 966 m_currentAllocationPoint = address;
1032 if (m_lastRemainingAllocationSize == m_remainingAllocationSize) { 967 if (m_lastRemainingAllocationSize == m_remainingAllocationSize) {
1033 Heap::decreaseAllocatedObjectSize(size); 968 Heap::decreaseAllocatedObjectSize(size);
1034 m_lastRemainingAllocationSize += size; 969 m_lastRemainingAllocationSize += size;
1035 } 970 }
1036 m_remainingAllocationSize += size; 971 m_remainingAllocationSize += size;
1037 FILL_ZERO_IF_PRODUCTION(address, size); 972 FILL_ZERO_IF_PRODUCTION(address, size);
1038 ASAN_POISON_MEMORY_REGION(address, size); 973 ASAN_POISON_MEMORY_REGION(address, size);
1039 return; 974 return;
1040 } 975 }
1041 FILL_ZERO_IF_PRODUCTION(payload, payloadSize); 976 FILL_ZERO_IF_PRODUCTION(payload, payloadSize);
1042 header->markPromptlyFreed(); 977 header->markPromptlyFreed();
1043 } 978 }
1044 979
1045 m_promptlyFreedSize += size; 980 m_promptlyFreedSize += size;
1046 } 981 }
1047 982
1048 bool ThreadHeap::coalesce() 983 bool ThreadHeapForHeapPage::coalesce()
1049 { 984 {
1050 // Don't coalesce heaps if there are not enough promptly freed entries 985 // Don't coalesce heaps if there are not enough promptly freed entries
1051 // to be coalesced. 986 // to be coalesced.
1052 // 987 //
1053 // FIXME: This threshold is determined just to optimize blink_perf 988 // FIXME: This threshold is determined just to optimize blink_perf
1054 // benchmarks. Coalescing is very sensitive to the threashold and 989 // benchmarks. Coalescing is very sensitive to the threashold and
1055 // we need further investigations on the coalescing scheme. 990 // we need further investigations on the coalescing scheme.
1056 if (m_promptlyFreedSize < 1024 * 1024) 991 if (m_promptlyFreedSize < 1024 * 1024)
1057 return false; 992 return false;
1058 993
1059 if (m_threadState->sweepForbidden()) 994 if (threadState()->sweepForbidden())
1060 return false; 995 return false;
1061 996
1062 ASSERT(!hasCurrentAllocationArea()); 997 ASSERT(!hasCurrentAllocationArea());
1063 TRACE_EVENT0("blink_gc", "ThreadHeap::coalesce"); 998 TRACE_EVENT0("blink_gc", "ThreadHeap::coalesce");
1064 999
1065 // Rebuild free lists. 1000 // Rebuild free lists.
1066 m_freeList.clear(); 1001 m_freeList.clear();
1067 size_t freedSize = 0; 1002 size_t freedSize = 0;
1068 for (HeapPage* page = m_firstPage; page; page = page->next()) { 1003 for (HeapPage* page = static_cast<HeapPage*>(m_firstPage); page; page = stat ic_cast<HeapPage*>(page->next())) {
1069 page->clearObjectStartBitMap(); 1004 page->clearObjectStartBitMap();
1070 Address startOfGap = page->payload(); 1005 Address startOfGap = page->payload();
1071 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn d(); ) { 1006 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn d(); ) {
1072 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade rAddress); 1007 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade rAddress);
1073 size_t size = header->size(); 1008 size_t size = header->size();
1074 ASSERT(size > 0); 1009 ASSERT(size > 0);
1075 ASSERT(size < blinkPagePayloadSize()); 1010 ASSERT(size < blinkPagePayloadSize());
1076 1011
1077 if (header->isPromptlyFreed()) { 1012 if (header->isPromptlyFreed()) {
1078 ASSERT(size >= sizeof(HeapObjectHeader)); 1013 ASSERT(size >= sizeof(HeapObjectHeader));
(...skipping 20 matching lines...) Expand all
1099 1034
1100 if (startOfGap != page->payloadEnd()) 1035 if (startOfGap != page->payloadEnd())
1101 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); 1036 addToFreeList(startOfGap, page->payloadEnd() - startOfGap);
1102 } 1037 }
1103 Heap::decreaseAllocatedObjectSize(freedSize); 1038 Heap::decreaseAllocatedObjectSize(freedSize);
1104 ASSERT(m_promptlyFreedSize == freedSize); 1039 ASSERT(m_promptlyFreedSize == freedSize);
1105 m_promptlyFreedSize = 0; 1040 m_promptlyFreedSize = 0;
1106 return true; 1041 return true;
1107 } 1042 }
1108 1043
1109 Address ThreadHeap::allocateLargeObject(size_t size, size_t gcInfoIndex) 1044 Address ThreadHeapForLargeObject::allocateLargeObject(size_t allocationSize, siz e_t gcInfoIndex)
1110 { 1045 {
1111 // Caller already added space for object header and rounded up to allocation 1046 // Caller already added space for object header and rounded up to allocation
1112 // alignment 1047 // alignment
1113 ASSERT(!(size & allocationMask)); 1048 ASSERT(!(allocationSize & allocationMask));
1114 1049
1115 size_t allocationSize = sizeof(LargeObject) + size; 1050 // 1. Check if we should trigger a GC.
1051 threadState()->scheduleGCIfNeeded();
1116 1052
1117 // Ensure that there is enough space for alignment. If the header 1053 // 2. Try to sweep large objects more than allocationSize bytes
1118 // is not a multiple of 8 bytes we will allocate an extra 1054 // before allocating a new large object.
1119 // headerPadding bytes to ensure it 8 byte aligned. 1055 Address result = lazySweep(allocationSize, gcInfoIndex);
1120 allocationSize += headerPadding(); 1056 if (result)
1057 return result;
1121 1058
1059 // 3. If we have failed in sweeping allocationSize bytes,
1060 // we complete sweeping before allocating this large object.
1061 threadState()->completeSweep();
1062 return doAllocateLargeObject(allocationSize, gcInfoIndex);
1063 }
1064
1065 Address ThreadHeapForLargeObject::doAllocateLargeObject(size_t allocationSize, s ize_t gcInfoIndex)
1066 {
1067 size_t largeObjectSize = sizeof(LargeObject) + LargeObject::headerPadding() + allocationSize;
1122 // If ASan is supported we add allocationGranularity bytes to the allocated 1068 // If ASan is supported we add allocationGranularity bytes to the allocated
1123 // space and poison that to detect overflows 1069 // space and poison that to detect overflows
1124 #if defined(ADDRESS_SANITIZER) 1070 #if defined(ADDRESS_SANITIZER)
1125 allocationSize += allocationGranularity; 1071 largeObjectSize += allocationGranularity;
1126 #endif 1072 #endif
1127 1073
1128 // 1. Check if we should trigger a GC. 1074 threadState()->shouldFlushHeapDoesNotContainCache();
1129 updateRemainingAllocationSize(); 1075 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize);
1130 m_threadState->scheduleGCIfNeeded(); 1076 threadState()->allocatedRegionsSinceLastGC().append(pageMemory->region());
1131
1132 // 2. Try to sweep large objects more than allocationSize bytes
1133 // before allocating a new large object.
1134 if (!lazySweepLargeObjects(allocationSize)) {
1135 // 3. If we have failed in sweeping allocationSize bytes,
1136 // we complete sweeping before allocating this large object.
1137 m_threadState->completeSweep();
1138 }
1139
1140 m_threadState->shouldFlushHeapDoesNotContainCache();
1141 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
1142 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
1143 Address largeObjectAddress = pageMemory->writableStart(); 1077 Address largeObjectAddress = pageMemory->writableStart();
1144 Address headerAddress = largeObjectAddress + sizeof(LargeObject) + headerPad ding(); 1078 Address headerAddress = largeObjectAddress + sizeof(LargeObject) + LargeObje ct::headerPadding();
1145 #if ENABLE(ASSERT) 1079 #if ENABLE(ASSERT)
1146 // Verify that the allocated PageMemory is expectedly zeroed. 1080 // Verify that the allocated PageMemory is expectedly zeroed.
1147 for (size_t i = 0; i < size; ++i) 1081 for (size_t i = 0; i < largeObjectSize; ++i)
1148 ASSERT(!headerAddress[i]); 1082 ASSERT(!headerAddress[i]);
1149 #endif 1083 #endif
1150 ASSERT(gcInfoIndex > 0); 1084 ASSERT(gcInfoIndex > 0);
1151 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex); 1085 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex);
1152 Address result = headerAddress + sizeof(*header); 1086 Address result = headerAddress + sizeof(*header);
1153 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1087 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1154 LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory, this, size); 1088 LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory, this, allocationSize);
1155 header->checkHeader(); 1089 header->checkHeader();
1156 1090
1157 // Poison the object header and allocationGranularity bytes after the object 1091 // Poison the object header and allocationGranularity bytes after the object
1158 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 1092 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
1159 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 1093 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
1160 1094
1161 largeObject->link(&m_firstLargeObject); 1095 largeObject->link(&m_firstPage);
1162 1096
1163 Heap::increaseAllocatedSpace(largeObject->size()); 1097 Heap::increaseAllocatedSpace(largeObject->size());
1164 Heap::increaseAllocatedObjectSize(largeObject->size()); 1098 Heap::increaseAllocatedObjectSize(largeObject->size());
1165 return result; 1099 return result;
1166 } 1100 }
1167 1101
1168 void ThreadHeap::freeLargeObject(LargeObject* object) 1102 void ThreadHeapForLargeObject::freeLargeObject(LargeObject* object)
1169 { 1103 {
1170 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( )); 1104 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( ));
1171 Heap::decreaseAllocatedSpace(object->size()); 1105 Heap::decreaseAllocatedSpace(object->size());
1172 1106
1173 // Unpoison the object header and allocationGranularity bytes after the 1107 // Unpoison the object header and allocationGranularity bytes after the
1174 // object before freeing. 1108 // object before freeing.
1175 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea der)); 1109 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea der));
1176 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 1110 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
1177 1111
1178 if (object->terminating()) { 1112 if (object->terminating()) {
1179 ASSERT(ThreadState::current()->isTerminating()); 1113 ASSERT(ThreadState::current()->isTerminating());
1180 // The thread is shutting down and this page is being removed as a part 1114 // The thread is shutting down and this page is being removed as a part
1181 // of the thread local GC. In that case the object could be traced in 1115 // of the thread local GC. In that case the object could be traced in
1182 // the next global GC if there is a dangling pointer from a live thread 1116 // the next global GC if there is a dangling pointer from a live thread
1183 // heap to this dead thread heap. To guard against this, we put the 1117 // heap to this dead thread heap. To guard against this, we put the
1184 // page into the orphaned page pool and zap the page memory. This 1118 // page into the orphaned page pool and zap the page memory. This
1185 // ensures that tracing the dangling pointer in the next global GC just 1119 // ensures that tracing the dangling pointer in the next global GC just
1186 // crashes instead of causing use-after-frees. After the next global 1120 // crashes instead of causing use-after-frees. After the next global
1187 // GC, the orphaned pages are removed. 1121 // GC, the orphaned pages are removed.
1188 Heap::orphanedPagePool()->addOrphanedPage(m_index, object); 1122 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object);
1189 } else { 1123 } else {
1190 ASSERT(!ThreadState::current()->isTerminating()); 1124 ASSERT(!ThreadState::current()->isTerminating());
1191 PageMemory* memory = object->storage(); 1125 PageMemory* memory = object->storage();
1192 object->~LargeObject(); 1126 object->~LargeObject();
1193 delete memory; 1127 delete memory;
1194 } 1128 }
1195 } 1129 }
1196 1130
1197 template<typename DataType> 1131 template<typename DataType>
1198 PagePool<DataType>::PagePool() 1132 PagePool<DataType>::PagePool()
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1239 1173
1240 // We got some memory, but failed to commit it, try again. 1174 // We got some memory, but failed to commit it, try again.
1241 delete memory; 1175 delete memory;
1242 } 1176 }
1243 return nullptr; 1177 return nullptr;
1244 } 1178 }
1245 1179
1246 BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap) 1180 BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap)
1247 : m_storage(storage) 1181 : m_storage(storage)
1248 , m_heap(heap) 1182 , m_heap(heap)
1183 , m_next(nullptr)
1249 , m_terminating(false) 1184 , m_terminating(false)
1250 , m_swept(true) 1185 , m_swept(true)
1251 { 1186 {
1252 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 1187 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1253 } 1188 }
1254 1189
1255 void BaseHeapPage::markOrphaned() 1190 void BaseHeapPage::markOrphaned()
1256 { 1191 {
1257 m_heap = nullptr; 1192 m_heap = nullptr;
1258 m_terminating = false; 1193 m_terminating = false;
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1344 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { 1279 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
1345 BaseHeapPage* page = entry->data; 1280 BaseHeapPage* page = entry->data;
1346 if (page->contains(reinterpret_cast<Address>(object))) 1281 if (page->contains(reinterpret_cast<Address>(object)))
1347 return true; 1282 return true;
1348 } 1283 }
1349 } 1284 }
1350 return false; 1285 return false;
1351 } 1286 }
1352 #endif 1287 #endif
1353 1288
1354 void ThreadHeap::freePage(HeapPage* page) 1289 void ThreadHeapForHeapPage::freePage(HeapPage* page)
1355 { 1290 {
1356 Heap::decreaseAllocatedSpace(blinkPageSize); 1291 Heap::decreaseAllocatedSpace(page->size());
1357 1292
1358 if (page->terminating()) { 1293 if (page->terminating()) {
1359 // The thread is shutting down and this page is being removed as a part 1294 // The thread is shutting down and this page is being removed as a part
1360 // of the thread local GC. In that case the object could be traced in 1295 // of the thread local GC. In that case the object could be traced in
1361 // the next global GC if there is a dangling pointer from a live thread 1296 // the next global GC if there is a dangling pointer from a live thread
1362 // heap to this dead thread heap. To guard against this, we put the 1297 // heap to this dead thread heap. To guard against this, we put the
1363 // page into the orphaned page pool and zap the page memory. This 1298 // page into the orphaned page pool and zap the page memory. This
1364 // ensures that tracing the dangling pointer in the next global GC just 1299 // ensures that tracing the dangling pointer in the next global GC just
1365 // crashes instead of causing use-after-frees. After the next global 1300 // crashes instead of causing use-after-frees. After the next global
1366 // GC, the orphaned pages are removed. 1301 // GC, the orphaned pages are removed.
1367 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 1302 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page);
1368 } else { 1303 } else {
1369 PageMemory* memory = page->storage(); 1304 PageMemory* memory = page->storage();
1370 page->~HeapPage(); 1305 page->~HeapPage();
1371 Heap::freePagePool()->addFreePage(m_index, memory); 1306 Heap::freePagePool()->addFreePage(heapIndex(), memory);
1372 } 1307 }
1373 } 1308 }
1374 1309
1375 void ThreadHeap::allocatePage() 1310 void ThreadHeapForHeapPage::allocatePage()
1376 { 1311 {
1377 m_threadState->shouldFlushHeapDoesNotContainCache(); 1312 threadState()->shouldFlushHeapDoesNotContainCache();
1378 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); 1313 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex());
1379 // We continue allocating page memory until we succeed in committing one. 1314 // We continue allocating page memory until we succeed in committing one.
1380 while (!pageMemory) { 1315 while (!pageMemory) {
1381 // Allocate a memory region for blinkPagesPerRegion pages that 1316 // Allocate a memory region for blinkPagesPerRegion pages that
1382 // will each have the following layout. 1317 // will each have the following layout.
1383 // 1318 //
1384 // [ guard os page | ... payload ... | guard os page ] 1319 // [ guard os page | ... payload ... | guard os page ]
1385 // ^---{ aligned to blink page size } 1320 // ^---{ aligned to blink page size }
1386 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); 1321 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages();
1387 m_threadState->allocatedRegionsSinceLastGC().append(region); 1322 threadState()->allocatedRegionsSinceLastGC().append(region);
1388 1323
1389 // Setup the PageMemory object for each of the pages in the region. 1324 // Setup the PageMemory object for each of the pages in the region.
1390 size_t offset = 0; 1325 size_t offset = 0;
1391 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { 1326 for (size_t i = 0; i < blinkPagesPerRegion; ++i) {
1392 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off set, blinkPagePayloadSize()); 1327 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off set, blinkPagePayloadSize());
1393 // Take the first possible page ensuring that this thread actually 1328 // Take the first possible page ensuring that this thread actually
1394 // gets a page and add the rest to the page pool. 1329 // gets a page and add the rest to the page pool.
1395 if (!pageMemory) { 1330 if (!pageMemory) {
1396 if (memory->commit()) 1331 if (memory->commit())
1397 pageMemory = memory; 1332 pageMemory = memory;
1398 else 1333 else
1399 delete memory; 1334 delete memory;
1400 } else { 1335 } else {
1401 Heap::freePagePool()->addFreePage(m_index, memory); 1336 Heap::freePagePool()->addFreePage(heapIndex(), memory);
1402 } 1337 }
1403 offset += blinkPageSize; 1338 offset += blinkPageSize;
1404 } 1339 }
1405 } 1340 }
1406 HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this ); 1341 HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this );
1407
1408 page->link(&m_firstPage); 1342 page->link(&m_firstPage);
1409 1343
1410 Heap::increaseAllocatedSpace(blinkPageSize); 1344 Heap::increaseAllocatedSpace(page->size());
1411 addToFreeList(page->payload(), page->payloadSize()); 1345 addToFreeList(page->payload(), page->payloadSize());
1412 } 1346 }
1413 1347
1414 #if ENABLE(ASSERT) 1348 #if ENABLE(ASSERT)
1415 bool ThreadHeap::pagesToBeSweptContains(Address address) 1349 bool ThreadHeapForHeapPage::pagesToBeSweptContains(Address address)
1416 { 1350 {
1417 for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { 1351 for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) {
1418 if (page->contains(address)) 1352 if (page->contains(address))
1419 return true; 1353 return true;
1420 } 1354 }
1421 return false; 1355 return false;
1422 } 1356 }
1423 #endif 1357 #endif
1424 1358
1425 size_t ThreadHeap::objectPayloadSizeForTesting() 1359 size_t ThreadHeap::objectPayloadSizeForTesting()
1426 { 1360 {
1427 ASSERT(isConsistentForSweeping()); 1361 ASSERT(isConsistentForSweeping());
1428 ASSERT(!m_firstUnsweptPage); 1362 ASSERT(!m_firstUnsweptPage);
1429 ASSERT(!m_firstUnsweptLargeObject);
1430 1363
1431 size_t objectPayloadSize = 0; 1364 size_t objectPayloadSize = 0;
1432 for (HeapPage* page = m_firstPage; page; page = page->next()) 1365 for (BaseHeapPage* page = m_firstPage; page; page = page->next())
1433 objectPayloadSize += page->objectPayloadSizeForTesting(); 1366 objectPayloadSize += page->objectPayloadSizeForTesting();
1434 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next())
1435 objectPayloadSize += largeObject->objectPayloadSizeForTesting();
1436 return objectPayloadSize; 1367 return objectPayloadSize;
1437 } 1368 }
1438 1369
1439 #if ENABLE(ASSERT) 1370 #if ENABLE(ASSERT)
1440 bool ThreadHeap::isConsistentForSweeping() 1371 bool ThreadHeapForHeapPage::isConsistentForSweeping()
1441 { 1372 {
1442 // A thread heap is consistent for sweeping if none of the pages to be swept 1373 // A thread heap is consistent for sweeping if none of the pages to be swept
1443 // contain a freelist block or the current allocation point. 1374 // contain a freelist block or the current allocation point.
1444 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { 1375 for (size_t i = 0; i < blinkPageSizeLog2; ++i) {
1445 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE ntry; freeListEntry = freeListEntry->next()) { 1376 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE ntry; freeListEntry = freeListEntry->next()) {
1446 if (pagesToBeSweptContains(freeListEntry->address())) 1377 if (pagesToBeSweptContains(freeListEntry->address()))
1447 return false; 1378 return false;
1448 } 1379 }
1449 } 1380 }
1450 if (hasCurrentAllocationArea()) { 1381 if (hasCurrentAllocationArea()) {
1451 if (pagesToBeSweptContains(currentAllocationPoint())) 1382 if (pagesToBeSweptContains(currentAllocationPoint()))
1452 return false; 1383 return false;
1453 } 1384 }
1454 return true; 1385 return true;
1455 } 1386 }
1456 #endif 1387 #endif
1457 1388
1458 void ThreadHeap::makeConsistentForSweeping() 1389 void ThreadHeap::makeConsistentForSweeping()
1459 { 1390 {
1460 preparePagesForSweeping();
1461 setAllocationPoint(nullptr, 0);
1462 clearFreeLists(); 1391 clearFreeLists();
1463 }
1464
1465 void ThreadHeap::preparePagesForSweeping()
1466 {
1467 ASSERT(isConsistentForSweeping()); 1392 ASSERT(isConsistentForSweeping());
1468 for (HeapPage* page = m_firstPage; page; page = page->next()) 1393 for (BaseHeapPage* page = m_firstPage; page; page = page->next())
1469 page->markAsUnswept(); 1394 page->markAsUnswept();
1470 1395
1471 // If a new GC is requested before this thread got around to sweep, 1396 // If a new GC is requested before this thread got around to sweep,
1472 // ie. due to the thread doing a long running operation, we clear 1397 // ie. due to the thread doing a long running operation, we clear
1473 // the mark bits and mark any of the dead objects as dead. The latter 1398 // the mark bits and mark any of the dead objects as dead. The latter
1474 // is used to ensure the next GC marking does not trace already dead 1399 // is used to ensure the next GC marking does not trace already dead
1475 // objects. If we trace a dead object we could end up tracing into 1400 // objects. If we trace a dead object we could end up tracing into
1476 // garbage or the middle of another object via the newly conservatively 1401 // garbage or the middle of another object via the newly conservatively
1477 // found object. 1402 // found object.
1478 HeapPage* previousPage = nullptr; 1403 BaseHeapPage* previousPage = nullptr;
1479 for (HeapPage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) { 1404 for (BaseHeapPage* page = m_firstUnsweptPage; page; previousPage = page, pag e = page->next()) {
1480 page->markUnmarkedObjectsDead(); 1405 page->markUnmarkedObjectsDead();
1481 ASSERT(!page->hasBeenSwept()); 1406 ASSERT(!page->hasBeenSwept());
1482 } 1407 }
1483 if (previousPage) { 1408 if (previousPage) {
1484 ASSERT(m_firstUnsweptPage); 1409 ASSERT(m_firstUnsweptPage);
1485 previousPage->m_next = m_firstPage; 1410 previousPage->m_next = m_firstPage;
1486 m_firstPage = m_firstUnsweptPage; 1411 m_firstPage = m_firstUnsweptPage;
1487 m_firstUnsweptPage = nullptr; 1412 m_firstUnsweptPage = nullptr;
1488 } 1413 }
1489 ASSERT(!m_firstUnsweptPage); 1414 ASSERT(!m_firstUnsweptPage);
1490
1491 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next())
1492 largeObject->markAsUnswept();
1493
1494 LargeObject* previousLargeObject = nullptr;
1495 for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; prev iousLargeObject = largeObject, largeObject = largeObject->next()) {
1496 largeObject->markUnmarkedObjectsDead();
1497 ASSERT(!largeObject->hasBeenSwept());
1498 }
1499 if (previousLargeObject) {
1500 ASSERT(m_firstUnsweptLargeObject);
1501 previousLargeObject->m_next = m_firstLargeObject;
1502 m_firstLargeObject = m_firstUnsweptLargeObject;
1503 m_firstUnsweptLargeObject = nullptr;
1504 }
1505 ASSERT(!m_firstUnsweptLargeObject);
1506 } 1415 }
1507 1416
1508 void ThreadHeap::clearFreeLists() 1417 void ThreadHeapForHeapPage::clearFreeLists()
1509 { 1418 {
1419 setAllocationPoint(nullptr, 0);
1510 m_freeList.clear(); 1420 m_freeList.clear();
1511 } 1421 }
1512 1422
1513 #if ENABLE(GC_PROFILING) 1423 #if ENABLE(GC_PROFILING)
1514 void ThreadHeap::snapshotFreeList(TracedValue& json) 1424 void ThreadHeap::snapshotFreeList(TracedValue& json)
1515 { 1425 {
1516 json.setInteger("cumulativeAllocationSize", m_cumulativeAllocationSize); 1426 json.setInteger("cumulativeAllocationSize", m_cumulativeAllocationSize);
1517 json.setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocatio nCount) / m_allocationCount); 1427 json.setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocatio nCount) / m_allocationCount);
1518 json.setInteger("inlineAllocationCount", m_inlineAllocationCount); 1428 json.setInteger("inlineAllocationCount", m_inlineAllocationCount);
1519 json.setInteger("allocationCount", m_allocationCount); 1429 json.setInteger("allocationCount", m_allocationCount);
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after
1588 ++entryCount; 1498 ++entryCount;
1589 freeSize += entry->size(); 1499 freeSize += entry->size();
1590 } 1500 }
1591 totalFreeSize += freeSize; 1501 totalFreeSize += freeSize;
1592 } 1502 }
1593 } 1503 }
1594 #endif 1504 #endif
1595 1505
1596 HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap) 1506 HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap)
1597 : BaseHeapPage(storage, heap) 1507 : BaseHeapPage(storage, heap)
1598 , m_next(nullptr)
1599 { 1508 {
1600 m_objectStartBitMapComputed = false; 1509 m_objectStartBitMapComputed = false;
1601 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 1510 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1602 } 1511 }
1603 1512
1604 size_t HeapPage::objectPayloadSizeForTesting() 1513 size_t HeapPage::objectPayloadSizeForTesting()
1605 { 1514 {
1606 size_t objectPayloadSize = 0; 1515 size_t objectPayloadSize = 0;
1607 Address headerAddress = payload(); 1516 Address headerAddress = payload();
1608 markAsSwept(); 1517 markAsSwept();
(...skipping 22 matching lines...) Expand all
1631 clearObjectStartBitMap(); 1540 clearObjectStartBitMap();
1632 1541
1633 size_t markedObjectSize = 0; 1542 size_t markedObjectSize = 0;
1634 Address startOfGap = payload(); 1543 Address startOfGap = payload();
1635 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { 1544 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) {
1636 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1545 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1637 ASSERT(header->size() > 0); 1546 ASSERT(header->size() > 0);
1638 ASSERT(header->size() < blinkPagePayloadSize()); 1547 ASSERT(header->size() < blinkPagePayloadSize());
1639 1548
1640 if (header->isPromptlyFreed()) 1549 if (header->isPromptlyFreed())
1641 heap()->decreasePromptlyFreedSize(header->size()); 1550 heapForHeapPage()->decreasePromptlyFreedSize(header->size());
1642 if (header->isFree()) { 1551 if (header->isFree()) {
1643 size_t size = header->size(); 1552 size_t size = header->size();
1644 // Zero the memory in the free list header to maintain the 1553 // Zero the memory in the free list header to maintain the
1645 // invariant that memory on the free list is zero filled. 1554 // invariant that memory on the free list is zero filled.
1646 // The rest of the memory is already on the free list and is 1555 // The rest of the memory is already on the free list and is
1647 // therefore already zero filled. 1556 // therefore already zero filled.
1648 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry) ? size : sizeof(FreeListEntry)); 1557 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry) ? size : sizeof(FreeListEntry));
1649 headerAddress += size; 1558 headerAddress += size;
1650 continue; 1559 continue;
1651 } 1560 }
(...skipping 12 matching lines...) Expand all
1664 header->finalize(payload, payloadSize); 1573 header->finalize(payload, payloadSize);
1665 // This memory will be added to the freelist. Maintain the invariant 1574 // This memory will be added to the freelist. Maintain the invariant
1666 // that memory on the freelist is zero filled. 1575 // that memory on the freelist is zero filled.
1667 FILL_ZERO_IF_PRODUCTION(headerAddress, size); 1576 FILL_ZERO_IF_PRODUCTION(headerAddress, size);
1668 ASAN_POISON_MEMORY_REGION(payload, payloadSize); 1577 ASAN_POISON_MEMORY_REGION(payload, payloadSize);
1669 headerAddress += size; 1578 headerAddress += size;
1670 continue; 1579 continue;
1671 } 1580 }
1672 1581
1673 if (startOfGap != headerAddress) 1582 if (startOfGap != headerAddress)
1674 heap()->addToFreeList(startOfGap, headerAddress - startOfGap); 1583 heapForHeapPage()->addToFreeList(startOfGap, headerAddress - startOf Gap);
1675 header->unmark(); 1584 header->unmark();
1676 headerAddress += header->size(); 1585 headerAddress += header->size();
1677 markedObjectSize += header->size(); 1586 markedObjectSize += header->size();
1678 startOfGap = headerAddress; 1587 startOfGap = headerAddress;
1679 } 1588 }
1680 if (startOfGap != payloadEnd()) 1589 if (startOfGap != payloadEnd())
1681 heap()->addToFreeList(startOfGap, payloadEnd() - startOfGap); 1590 heapForHeapPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap);
1682 1591
1683 if (markedObjectSize) 1592 if (markedObjectSize)
1684 Heap::increaseMarkedObjectSize(markedObjectSize); 1593 Heap::increaseMarkedObjectSize(markedObjectSize);
1685 } 1594 }
1686 1595
1687 void HeapPage::markUnmarkedObjectsDead() 1596 void HeapPage::markUnmarkedObjectsDead()
1688 { 1597 {
1689 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1598 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1690 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1599 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1691 ASSERT(header->size() < blinkPagePayloadSize()); 1600 ASSERT(header->size() < blinkPagePayloadSize());
1692 // Check if a free list entry first since we cannot call 1601 // Check if a free list entry first since we cannot call
1693 // isMarked on a free list entry. 1602 // isMarked on a free list entry.
1694 if (header->isFree()) { 1603 if (header->isFree()) {
1695 headerAddress += header->size(); 1604 headerAddress += header->size();
1696 continue; 1605 continue;
1697 } 1606 }
1698 header->checkHeader(); 1607 header->checkHeader();
1699 if (header->isMarked()) 1608 if (header->isMarked())
1700 header->unmark(); 1609 header->unmark();
1701 else 1610 else
1702 header->markDead(); 1611 header->markDead();
1703 headerAddress += header->size(); 1612 headerAddress += header->size();
1704 } 1613 }
1705 } 1614 }
1706 1615
1707 void HeapPage::removeFromHeap(ThreadHeap* heap) 1616 void HeapPage::removeFromHeap()
1708 { 1617 {
1709 heap->freePage(this); 1618 heapForHeapPage()->freePage(this);
1619 }
1620
1621 ThreadHeapForHeapPage* HeapPage::heapForHeapPage()
1622 {
1623 return static_cast<ThreadHeapForHeapPage*>(heap());
1710 } 1624 }
1711 1625
1712 void HeapPage::populateObjectStartBitMap() 1626 void HeapPage::populateObjectStartBitMap()
1713 { 1627 {
1714 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); 1628 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1715 Address start = payload(); 1629 Address start = payload();
1716 for (Address headerAddress = start; headerAddress < payloadEnd();) { 1630 for (Address headerAddress = start; headerAddress < payloadEnd();) {
1717 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1631 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1718 size_t objectOffset = headerAddress - start; 1632 size_t objectOffset = headerAddress - start;
1719 ASSERT(!(objectOffset & allocationMask)); 1633 ASSERT(!(objectOffset & allocationMask));
(...skipping 898 matching lines...) Expand 10 before | Expand all | Expand 10 after
2618 2532
2619 double Heap::estimatedMarkingTime() 2533 double Heap::estimatedMarkingTime()
2620 { 2534 {
2621 // FIXME: Implement heuristics 2535 // FIXME: Implement heuristics
2622 return 0.0; 2536 return 0.0;
2623 } 2537 }
2624 2538
2625 void ThreadHeap::prepareHeapForTermination() 2539 void ThreadHeap::prepareHeapForTermination()
2626 { 2540 {
2627 ASSERT(!m_firstUnsweptPage); 2541 ASSERT(!m_firstUnsweptPage);
2628 ASSERT(!m_firstUnsweptLargeObject); 2542 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
2629 for (HeapPage* page = m_firstPage; page; page = page->next()) {
2630 page->setTerminating(); 2543 page->setTerminating();
2631 } 2544 }
2632 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
2633 largeObject->setTerminating();
2634 }
2635 } 2545 }
2636 2546
2637 size_t Heap::objectPayloadSizeForTesting() 2547 size_t Heap::objectPayloadSizeForTesting()
2638 { 2548 {
2639 size_t objectPayloadSize = 0; 2549 size_t objectPayloadSize = 0;
2640 for (ThreadState* state : ThreadState::attachedThreads()) { 2550 for (ThreadState* state : ThreadState::attachedThreads()) {
2641 state->setGCState(ThreadState::GCRunning); 2551 state->setGCState(ThreadState::GCRunning);
2642 state->makeConsistentForSweeping(); 2552 state->makeConsistentForSweeping();
2643 objectPayloadSize += state->objectPayloadSizeForTesting(); 2553 objectPayloadSize += state->objectPayloadSizeForTesting();
2644 state->setGCState(ThreadState::EagerSweepScheduled); 2554 state->setGCState(ThreadState::EagerSweepScheduled);
(...skipping 14 matching lines...) Expand all
2659 ASSERT(!state->isInGC()); 2569 ASSERT(!state->isInGC());
2660 2570
2661 // Don't promptly free large objects because their page is never reused. 2571 // Don't promptly free large objects because their page is never reused.
2662 // Don't free backings allocated on other threads. 2572 // Don't free backings allocated on other threads.
2663 BaseHeapPage* page = pageFromObject(address); 2573 BaseHeapPage* page = pageFromObject(address);
2664 if (page->isLargeObject() || page->heap()->threadState() != state) 2574 if (page->isLargeObject() || page->heap()->threadState() != state)
2665 return; 2575 return;
2666 2576
2667 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 2577 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
2668 header->checkHeader(); 2578 header->checkHeader();
2669 static_cast<HeapPage*>(page)->heap()->promptlyFreeObject(header); 2579 static_cast<HeapPage*>(page)->heapForHeapPage()->promptlyFreeObject(header);
2670 } 2580 }
2671 2581
2672 void HeapAllocator::freeVectorBacking(void* address) 2582 void HeapAllocator::freeVectorBacking(void* address)
2673 { 2583 {
2674 backingFree(address); 2584 backingFree(address);
2675 } 2585 }
2676 2586
2677 void HeapAllocator::freeInlineVectorBacking(void* address) 2587 void HeapAllocator::freeInlineVectorBacking(void* address)
2678 { 2588 {
2679 backingFree(address); 2589 backingFree(address);
(...skipping 16 matching lines...) Expand all
2696 ASSERT(state->isAllocationAllowed()); 2606 ASSERT(state->isAllocationAllowed());
2697 2607
2698 // FIXME: Support expand for large objects. 2608 // FIXME: Support expand for large objects.
2699 // Don't expand backings allocated on other threads. 2609 // Don't expand backings allocated on other threads.
2700 BaseHeapPage* page = pageFromObject(address); 2610 BaseHeapPage* page = pageFromObject(address);
2701 if (page->isLargeObject() || page->heap()->threadState() != state) 2611 if (page->isLargeObject() || page->heap()->threadState() != state)
2702 return false; 2612 return false;
2703 2613
2704 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 2614 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
2705 header->checkHeader(); 2615 header->checkHeader();
2706 return static_cast<HeapPage*>(page)->heap()->expandObject(header, newSize); 2616 return static_cast<HeapPage*>(page)->heapForHeapPage()->expandObject(header, newSize);
2707 } 2617 }
2708 2618
2709 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) 2619 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize)
2710 { 2620 {
2711 return backingExpand(address, newSize); 2621 return backingExpand(address, newSize);
2712 } 2622 }
2713 2623
2714 bool HeapAllocator::expandInlineVectorBacking(void* address, size_t newSize) 2624 bool HeapAllocator::expandInlineVectorBacking(void* address, size_t newSize)
2715 { 2625 {
2716 return backingExpand(address, newSize); 2626 return backingExpand(address, newSize);
(...skipping 22 matching lines...) Expand all
2739 ASSERT(state->isAllocationAllowed()); 2649 ASSERT(state->isAllocationAllowed());
2740 2650
2741 // FIXME: Support shrink for large objects. 2651 // FIXME: Support shrink for large objects.
2742 // Don't shrink backings allocated on other threads. 2652 // Don't shrink backings allocated on other threads.
2743 BaseHeapPage* page = pageFromObject(address); 2653 BaseHeapPage* page = pageFromObject(address);
2744 if (page->isLargeObject() || page->heap()->threadState() != state) 2654 if (page->isLargeObject() || page->heap()->threadState() != state)
2745 return; 2655 return;
2746 2656
2747 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 2657 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
2748 header->checkHeader(); 2658 header->checkHeader();
2749 static_cast<HeapPage*>(page)->heap()->shrinkObject(header, quantizedShrunkSi ze); 2659 static_cast<HeapPage*>(page)->heapForHeapPage()->shrinkObject(header, quanti zedShrunkSize);
2750 } 2660 }
2751 2661
2752 void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedC urrentSize, size_t quantizedShrunkSize) 2662 void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedC urrentSize, size_t quantizedShrunkSize)
2753 { 2663 {
2754 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); 2664 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
2755 } 2665 }
2756 2666
2757 void HeapAllocator::shrinkInlineVectorBackingInternal(void* address, size_t quan tizedCurrentSize, size_t quantizedShrunkSize) 2667 void HeapAllocator::shrinkInlineVectorBackingInternal(void* address, size_t quan tizedCurrentSize, size_t quantizedShrunkSize)
2758 { 2668 {
2759 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); 2669 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
2859 bool Heap::s_shutdownCalled = false; 2769 bool Heap::s_shutdownCalled = false;
2860 bool Heap::s_lastGCWasConservative = false; 2770 bool Heap::s_lastGCWasConservative = false;
2861 FreePagePool* Heap::s_freePagePool; 2771 FreePagePool* Heap::s_freePagePool;
2862 OrphanedPagePool* Heap::s_orphanedPagePool; 2772 OrphanedPagePool* Heap::s_orphanedPagePool;
2863 Heap::RegionTree* Heap::s_regionTree = nullptr; 2773 Heap::RegionTree* Heap::s_regionTree = nullptr;
2864 size_t Heap::s_allocatedObjectSize = 0; 2774 size_t Heap::s_allocatedObjectSize = 0;
2865 size_t Heap::s_allocatedSpace = 0; 2775 size_t Heap::s_allocatedSpace = 0;
2866 size_t Heap::s_markedObjectSize = 0; 2776 size_t Heap::s_markedObjectSize = 0;
2867 2777
2868 } // namespace blink 2778 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/ThreadState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698