Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(207)

Side by Side Diff: Source/platform/heap/Heap.cpp

Issue 840223002: Oilpan: Remove duplicated code between HeapPage and LargeObject (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/ThreadState.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 516 matching lines...) Expand 10 before | Expand all | Expand 10 after
527 527
528 void LargeObject::markUnmarkedObjectsDead() 528 void LargeObject::markUnmarkedObjectsDead()
529 { 529 {
530 HeapObjectHeader* header = heapObjectHeader(); 530 HeapObjectHeader* header = heapObjectHeader();
531 if (header->isMarked()) 531 if (header->isMarked())
532 header->unmark(); 532 header->unmark();
533 else 533 else
534 header->markDead(); 534 header->markDead();
535 } 535 }
536 536
537 void LargeObject::removeFromHeap(ThreadHeap* heap) 537 void LargeObject::removeFromHeap()
538 { 538 {
539 heap->freeLargeObject(this); 539 static_cast<ThreadHeapForLargeObject*>(heap())->freeLargeObject(this);
540 }
541
542 ThreadHeap::ThreadHeap(ThreadState* state, int index)
543 : m_currentAllocationPoint(nullptr)
544 , m_remainingAllocationSize(0)
545 , m_lastRemainingAllocationSize(0)
546 , m_firstPage(nullptr)
547 , m_firstLargeObject(nullptr)
548 , m_firstUnsweptPage(nullptr)
549 , m_firstUnsweptLargeObject(nullptr)
550 , m_threadState(state)
551 , m_index(index)
552 , m_promptlyFreedSize(0)
553 {
554 clearFreeLists();
555 } 540 }
556 541
557 FreeList::FreeList() 542 FreeList::FreeList()
558 : m_biggestFreeListIndex(0) 543 : m_biggestFreeListIndex(0)
559 { 544 {
560 } 545 }
561 546
547 ThreadHeap::ThreadHeap(ThreadState* state, int index)
548 : m_firstPage(nullptr)
549 , m_firstUnsweptPage(nullptr)
550 , m_threadState(state)
551 , m_index(index)
552 {
553 clearFreeLists();
554 }
555
556 ThreadHeapForHeapPage::ThreadHeapForHeapPage(ThreadState* state, int index)
557 : ThreadHeap(state, index)
558 , m_currentAllocationPoint(nullptr)
559 , m_remainingAllocationSize(0)
560 , m_lastRemainingAllocationSize(0)
561 , m_promptlyFreedSize(0)
562 {
563 }
564
565 ThreadHeapForLargeObject::ThreadHeapForLargeObject(ThreadState* state, int index )
566 : ThreadHeap(state, index)
567 {
568 }
569
562 ThreadHeap::~ThreadHeap() 570 ThreadHeap::~ThreadHeap()
563 { 571 {
564 ASSERT(!m_firstPage); 572 ASSERT(!m_firstPage);
565 ASSERT(!m_firstLargeObject);
566 ASSERT(!m_firstUnsweptPage); 573 ASSERT(!m_firstUnsweptPage);
567 ASSERT(!m_firstUnsweptLargeObject);
568 } 574 }
569 575
570 void ThreadHeap::cleanupPages() 576 void ThreadHeap::cleanupPages()
571 { 577 {
572 clearFreeLists(); 578 clearFreeLists();
573 579
574 ASSERT(!m_firstUnsweptPage); 580 ASSERT(!m_firstUnsweptPage);
575 ASSERT(!m_firstUnsweptLargeObject);
576 // Add the ThreadHeap's pages to the orphanedPagePool. 581 // Add the ThreadHeap's pages to the orphanedPagePool.
577 for (HeapPage* page = m_firstPage; page; page = page->m_next) { 582 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
578 Heap::decreaseAllocatedSpace(blinkPageSize); 583 Heap::decreaseAllocatedSpace(blinkPageSize);
579 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 584 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page);
580 } 585 }
581 m_firstPage = nullptr; 586 m_firstPage = nullptr;
582
583 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->m_next) {
584 Heap::decreaseAllocatedSpace(largeObject->size());
585 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject);
586 }
587 m_firstLargeObject = nullptr;
588 } 587 }
589 588
590 void ThreadHeap::updateRemainingAllocationSize() 589 void ThreadHeapForHeapPage::updateRemainingAllocationSize()
591 { 590 {
592 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { 591 if (m_lastRemainingAllocationSize > remainingAllocationSize()) {
593 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); 592 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize());
594 m_lastRemainingAllocationSize = remainingAllocationSize(); 593 m_lastRemainingAllocationSize = remainingAllocationSize();
595 } 594 }
596 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); 595 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize());
597 } 596 }
598 597
599 void ThreadHeap::setAllocationPoint(Address point, size_t size) 598 void ThreadHeapForHeapPage::setAllocationPoint(Address point, size_t size)
600 { 599 {
601 #if ENABLE(ASSERT) 600 #if ENABLE(ASSERT)
602 if (point) { 601 if (point) {
603 ASSERT(size); 602 ASSERT(size);
604 BaseHeapPage* page = pageFromObject(point); 603 BaseHeapPage* page = pageFromObject(point);
605 ASSERT(!page->isLargeObject()); 604 ASSERT(!page->isLargeObject());
606 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize()); 605 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize());
607 } 606 }
608 #endif 607 #endif
609 if (hasCurrentAllocationArea()) 608 if (hasCurrentAllocationArea()) {
610 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); 609 addToFreeList(currentAllocationPoint(), remainingAllocationSize());
610 }
611 updateRemainingAllocationSize(); 611 updateRemainingAllocationSize();
612 m_currentAllocationPoint = point; 612 m_currentAllocationPoint = point;
613 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; 613 m_lastRemainingAllocationSize = m_remainingAllocationSize = size;
614 } 614 }
615 615
616 Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) 616 Address ThreadHeapForHeapPage::outOfLineAllocate(size_t allocationSize, size_t g cInfoIndex)
617 { 617 {
618 ASSERT(allocationSize > remainingAllocationSize()); 618 ASSERT(allocationSize > remainingAllocationSize());
619 ASSERT(allocationSize >= allocationGranularity); 619 ASSERT(allocationSize >= allocationGranularity);
620 620
621 // 1. If this allocation is big enough, allocate a large object. 621 // 1. If this allocation is big enough, allocate a large object.
622 if (allocationSize >= largeObjectSizeThreshold) 622 if (allocationSize >= largeObjectSizeThreshold)
623 return allocateLargeObject(allocationSize, gcInfoIndex); 623 return static_cast<ThreadHeapForLargeObject*>(threadState()->heap(LargeO bjectHeap))->allocateLargeObject(allocationSize, gcInfoIndex);
624 624
625 // 2. Check if we should trigger a GC. 625 // 2. Check if we should trigger a GC.
626 updateRemainingAllocationSize(); 626 updateRemainingAllocationSize();
627 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); 627 threadState()->scheduleGCOrForceConservativeGCIfNeeded();
628 628
629 // 3. Try to allocate from a free list. 629 // 3. Try to allocate from a free list.
630 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); 630 Address result = allocateFromFreeList(allocationSize, gcInfoIndex);
631 if (result) 631 if (result)
632 return result; 632 return result;
633 633
634 // 4. Reset the allocation point. 634 // 4. Reset the allocation point.
635 setAllocationPoint(nullptr, 0); 635 setAllocationPoint(nullptr, 0);
636 636
637 // 5. Lazily sweep pages of this heap until we find a freed area for 637 // 5. Lazily sweep pages of this heap until we find a freed area for
638 // this allocation or we finish sweeping all pages of this heap. 638 // this allocation or we finish sweeping all pages of this heap.
639 result = lazySweepPages(allocationSize, gcInfoIndex); 639 result = lazySweep(allocationSize, gcInfoIndex);
640 if (result) 640 if (result)
641 return result; 641 return result;
642 642
643 // 6. Coalesce promptly freed areas and then try to allocate from a free 643 // 6. Coalesce promptly freed areas and then try to allocate from a free
644 // list. 644 // list.
645 if (coalesce()) { 645 if (coalesce()) {
646 result = allocateFromFreeList(allocationSize, gcInfoIndex); 646 result = allocateFromFreeList(allocationSize, gcInfoIndex);
647 if (result) 647 if (result)
648 return result; 648 return result;
649 } 649 }
650 650
651 // 7. Complete sweeping. 651 // 7. Complete sweeping.
652 threadState()->completeSweep(); 652 threadState()->completeSweep();
653 653
654 // 8. Add a new page to this heap. 654 // 8. Add a new page to this heap.
655 allocatePage(); 655 allocatePage();
656 656
657 // 9. Try to allocate from a free list. This allocation must succeed. 657 // 9. Try to allocate from a free list. This allocation must succeed.
658 result = allocateFromFreeList(allocationSize, gcInfoIndex); 658 result = allocateFromFreeList(allocationSize, gcInfoIndex);
659 RELEASE_ASSERT(result); 659 RELEASE_ASSERT(result);
660 return result; 660 return result;
661 } 661 }
662 662
663 Address ThreadHeap::allocateFromFreeList(size_t allocationSize, size_t gcInfoInd ex) 663 Address ThreadHeapForHeapPage::allocateFromFreeList(size_t allocationSize, size_ t gcInfoIndex)
664 { 664 {
665 // Try reusing a block from the largest bin. The underlying reasoning 665 // Try reusing a block from the largest bin. The underlying reasoning
666 // being that we want to amortize this slow allocation call by carving 666 // being that we want to amortize this slow allocation call by carving
667 // off as a large a free block as possible in one go; a block that will 667 // off as a large a free block as possible in one go; a block that will
668 // service this block and let following allocations be serviced quickly 668 // service this block and let following allocations be serviced quickly
669 // by bump allocation. 669 // by bump allocation.
670 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; 670 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex;
671 int index = m_freeList.m_biggestFreeListIndex; 671 int index = m_freeList.m_biggestFreeListIndex;
672 for (; index > 0; --index, bucketSize >>= 1) { 672 for (; index > 0; --index, bucketSize >>= 1) {
673 FreeListEntry* entry = m_freeList.m_freeLists[index]; 673 FreeListEntry* entry = m_freeList.m_freeLists[index];
(...skipping 14 matching lines...) Expand all
688 } 688 }
689 } 689 }
690 m_freeList.m_biggestFreeListIndex = index; 690 m_freeList.m_biggestFreeListIndex = index;
691 return nullptr; 691 return nullptr;
692 } 692 }
693 693
694 void ThreadHeap::prepareForSweep() 694 void ThreadHeap::prepareForSweep()
695 { 695 {
696 ASSERT(!threadState()->isInGC()); 696 ASSERT(!threadState()->isInGC());
697 ASSERT(!m_firstUnsweptPage); 697 ASSERT(!m_firstUnsweptPage);
698 ASSERT(!m_firstUnsweptLargeObject);
699 698
700 // Move all pages to a list of unswept pages. 699 // Move all pages to a list of unswept pages.
701 m_firstUnsweptPage = m_firstPage; 700 m_firstUnsweptPage = m_firstPage;
702 m_firstUnsweptLargeObject = m_firstLargeObject;
703 m_firstPage = nullptr; 701 m_firstPage = nullptr;
704 m_firstLargeObject = nullptr;
705 } 702 }
706 703
707 Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) 704 Address ThreadHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex)
708 { 705 {
709 ASSERT(!hasCurrentAllocationArea());
710 ASSERT(allocationSize < largeObjectSizeThreshold);
711
712 // If there are no pages to be swept, return immediately. 706 // If there are no pages to be swept, return immediately.
713 if (!m_firstUnsweptPage) 707 if (!m_firstUnsweptPage)
714 return nullptr; 708 return nullptr;
715 709
716 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 710 RELEASE_ASSERT(threadState()->isSweepingInProgress());
717 711
718 // lazySweepPages() can be called recursively if finalizers invoked in 712 // lazySweepPages() can be called recursively if finalizers invoked in
719 // page->sweep() allocate memory and the allocation triggers 713 // page->sweep() allocate memory and the allocation triggers
720 // lazySweepPages(). This check prevents the sweeping from being executed 714 // lazySweepPages(). This check prevents the sweeping from being executed
721 // recursively. 715 // recursively.
722 if (threadState()->sweepForbidden()) 716 if (threadState()->sweepForbidden())
723 return nullptr; 717 return nullptr;
724 718
725 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages"); 719 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages");
726 ThreadState::SweepForbiddenScope scope(m_threadState); 720 ThreadState::SweepForbiddenScope scope(threadState());
727 721
728 if (threadState()->isMainThread()) 722 if (threadState()->isMainThread())
729 ScriptForbiddenScope::enter(); 723 ScriptForbiddenScope::enter();
730 724
725 Address result = lazySweepPages(allocationSize, gcInfoIndex);
726
727 if (threadState()->isMainThread())
728 ScriptForbiddenScope::exit();
729 return result;
730 }
731
732 Address ThreadHeapForHeapPage::lazySweepPages(size_t allocationSize, size_t gcIn foIndex)
733 {
734 ASSERT(!hasCurrentAllocationArea());
731 Address result = nullptr; 735 Address result = nullptr;
732 while (m_firstUnsweptPage) { 736 while (m_firstUnsweptPage) {
733 HeapPage* page = m_firstUnsweptPage; 737 BaseHeapPage* page = m_firstUnsweptPage;
734 if (page->isEmpty()) { 738 if (page->isEmpty()) {
735 page->unlink(&m_firstUnsweptPage); 739 page->unlink(&m_firstUnsweptPage);
736 page->removeFromHeap(this); 740 page->removeFromHeap();
737 } else { 741 } else {
738 // Sweep a page and move the page from m_firstUnsweptPages to 742 // Sweep a page and move the page from m_firstUnsweptPages to
739 // m_firstPages. 743 // m_firstPages.
740 page->sweep(); 744 page->sweep();
741 page->unlink(&m_firstUnsweptPage); 745 page->unlink(&m_firstUnsweptPage);
742 page->link(&m_firstPage); 746 page->link(&m_firstPage);
743 page->markAsSwept(); 747 page->markAsSwept();
744 748
749 // For HeapPage, stop lazy sweeping once we find a slot to
750 // allocate a new object.
745 result = allocateFromFreeList(allocationSize, gcInfoIndex); 751 result = allocateFromFreeList(allocationSize, gcInfoIndex);
746 if (result) 752 if (result) {
747 break; 753 break;
754 }
748 } 755 }
749 } 756 }
750
751 if (threadState()->isMainThread())
752 ScriptForbiddenScope::exit();
753 return result; 757 return result;
754 } 758 }
755 759
756 bool ThreadHeap::lazySweepLargeObjects(size_t allocationSize) 760 Address ThreadHeapForLargeObject::lazySweepPages(size_t allocationSize, size_t g cInfoIndex)
757 { 761 {
758 ASSERT(allocationSize >= largeObjectSizeThreshold); 762 Address result = nullptr;
759
760 // If there are no large objects to be swept, return immediately.
761 if (!m_firstUnsweptLargeObject)
762 return false;
763
764 RELEASE_ASSERT(threadState()->isSweepingInProgress());
765
766 // lazySweepLargeObjects() can be called recursively if finalizers invoked
767 // in page->sweep() allocate memory and the allocation triggers
768 // lazySweepLargeObjects(). This check prevents the sweeping from being
769 // executed recursively.
770 if (threadState()->sweepForbidden())
771 return false;
772
773 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepLargeObjects");
774 ThreadState::SweepForbiddenScope scope(m_threadState);
775
776 if (threadState()->isMainThread())
777 ScriptForbiddenScope::enter();
778
779 bool result = false;
780 size_t sweptSize = 0; 763 size_t sweptSize = 0;
781 while (m_firstUnsweptLargeObject) { 764 while (m_firstUnsweptPage) {
782 LargeObject* largeObject = m_firstUnsweptLargeObject; 765 BaseHeapPage* page = m_firstUnsweptPage;
783 if (largeObject->isEmpty()) { 766 if (page->isEmpty()) {
784 sweptSize += largeObject->size(); 767 sweptSize += static_cast<LargeObject*>(page)->payloadSize() + sizeof (HeapObjectHeader);
785 largeObject->unlink(&m_firstUnsweptLargeObject); 768 page->unlink(&m_firstUnsweptPage);
786 largeObject->removeFromHeap(this); 769 page->removeFromHeap();
787 770 // For LargeObject, stop lazy sweeping once we have swept
788 // If we have swept large objects more than allocationSize, 771 // more than allocationSize bytes.
789 // we stop the lazy sweeping.
790 if (sweptSize >= allocationSize) { 772 if (sweptSize >= allocationSize) {
791 result = true; 773 result = doAllocateLargeObject(allocationSize, gcInfoIndex);
774 ASSERT(result);
792 break; 775 break;
793 } 776 }
794 } else { 777 } else {
795 // Sweep a large object and move the large object from 778 // Sweep a page and move the page from m_firstUnsweptPages to
796 // m_firstUnsweptLargeObjects to m_firstLargeObjects. 779 // m_firstPages.
797 largeObject->sweep(); 780 page->sweep();
798 largeObject->unlink(&m_firstUnsweptLargeObject); 781 page->unlink(&m_firstUnsweptPage);
799 largeObject->link(&m_firstLargeObject); 782 page->link(&m_firstPage);
800 largeObject->markAsSwept(); 783 page->markAsSwept();
801 } 784 }
802 } 785 }
803
804 if (threadState()->isMainThread())
805 ScriptForbiddenScope::exit();
806 return result; 786 return result;
807 } 787 }
808 788
809 void ThreadHeap::completeSweep() 789 void ThreadHeap::completeSweep()
810 { 790 {
811 RELEASE_ASSERT(threadState()->isSweepingInProgress()); 791 RELEASE_ASSERT(threadState()->isSweepingInProgress());
812 ASSERT(threadState()->sweepForbidden()); 792 ASSERT(threadState()->sweepForbidden());
813 793
814 if (threadState()->isMainThread()) 794 if (threadState()->isMainThread())
815 ScriptForbiddenScope::enter(); 795 ScriptForbiddenScope::enter();
816 796
817 // Sweep normal pages.
818 while (m_firstUnsweptPage) { 797 while (m_firstUnsweptPage) {
819 HeapPage* page = m_firstUnsweptPage; 798 BaseHeapPage* page = m_firstUnsweptPage;
820 if (page->isEmpty()) { 799 if (page->isEmpty()) {
821 page->unlink(&m_firstUnsweptPage); 800 page->unlink(&m_firstUnsweptPage);
822 page->removeFromHeap(this); 801 page->removeFromHeap();
823 } else { 802 } else {
824 // Sweep a page and move the page from m_firstUnsweptPages to 803 // Sweep a page and move the page from m_firstUnsweptPages to
825 // m_firstPages. 804 // m_firstPages.
826 page->sweep(); 805 page->sweep();
827 page->unlink(&m_firstUnsweptPage); 806 page->unlink(&m_firstUnsweptPage);
828 page->link(&m_firstPage); 807 page->link(&m_firstPage);
829 page->markAsSwept(); 808 page->markAsSwept();
830 } 809 }
831 } 810 }
832 811
833 // Sweep large objects.
834 while (m_firstUnsweptLargeObject) {
835 LargeObject* largeObject = m_firstUnsweptLargeObject;
836 if (largeObject->isEmpty()) {
837 largeObject->unlink(&m_firstUnsweptLargeObject);
838 largeObject->removeFromHeap(this);
839 } else {
840 // Sweep a large object and move the large object from
841 // m_firstUnsweptLargeObjects to m_firstLargeObjects.
842 largeObject->sweep();
843 largeObject->unlink(&m_firstUnsweptLargeObject);
844 largeObject->link(&m_firstLargeObject);
845 largeObject->markAsSwept();
846 }
847 }
848
849 if (threadState()->isMainThread()) 812 if (threadState()->isMainThread())
850 ScriptForbiddenScope::exit(); 813 ScriptForbiddenScope::exit();
851 } 814 }
852 815
853 #if ENABLE(ASSERT) 816 #if ENABLE(ASSERT)
854 static bool isLargeObjectAligned(LargeObject* largeObject, Address address)
855 {
856 // Check that a large object is blinkPageSize aligned (modulo the osPageSize
857 // for the guard page).
858 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun dToBlinkPageStart(reinterpret_cast<Address>(largeObject));
859 }
860 #endif
861
862 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING)
863 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address) 817 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address)
864 { 818 {
865 for (HeapPage* page = m_firstPage; page; page = page->next()) { 819 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
866 if (page->contains(address)) 820 if (page->contains(address))
867 return page; 821 return page;
868 } 822 }
869 for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { 823 for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) {
870 if (page->contains(address)) 824 if (page->contains(address))
871 return page; 825 return page;
872 } 826 }
873 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
874 ASSERT(isLargeObjectAligned(largeObject, address));
875 if (largeObject->contains(address))
876 return largeObject;
877 }
878 for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; larg eObject = largeObject->next()) {
879 ASSERT(isLargeObjectAligned(largeObject, address));
880 if (largeObject->contains(address))
881 return largeObject;
882 }
883 return nullptr; 827 return nullptr;
884 } 828 }
885 #endif 829 #endif
886 830
887 #if ENABLE(GC_PROFILE_HEAP) 831 #if ENABLE(GC_PROFILE_HEAP)
888 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 832 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0
889 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) 833 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info)
890 { 834 {
891 ASSERT(isConsistentForSweeping()); 835 ASSERT(isConsistentForSweeping());
892 size_t previousPageCount = info->pageCount; 836 size_t previousPageCount = info->pageCount;
893 837
894 json->beginArray("pages"); 838 json->beginArray("pages");
895 for (HeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCo unt) { 839 for (BaseHeapPage* page = m_firstPage; page; page = page->next(), ++info->pa geCount) {
896 // FIXME: To limit the size of the snapshot we only output "threshold" m any page snapshots. 840 // FIXME: To limit the size of the snapshot we only output "threshold" m any page snapshots.
897 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { 841 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) {
898 json->beginArray(); 842 json->beginArray();
899 json->pushInteger(reinterpret_cast<intptr_t>(page)); 843 json->pushInteger(reinterpret_cast<intptr_t>(page));
900 page->snapshot(json, info); 844 page->snapshot(json, info);
901 json->endArray(); 845 json->endArray();
902 } else { 846 } else {
903 page->snapshot(0, info); 847 page->snapshot(0, info);
904 } 848 }
905 } 849 }
906 json->endArray(); 850 json->endArray();
907 851
908 json->beginArray("largeObjects");
909 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
910 json->beginDictionary();
911 largeObject->snapshot(json, info);
912 json->endDictionary();
913 }
914 json->endArray();
915
916 json->setInteger("pageCount", info->pageCount - previousPageCount); 852 json->setInteger("pageCount", info->pageCount - previousPageCount);
917 } 853 }
918 #endif 854 #endif
919 855
920 void FreeList::addToFreeList(Address address, size_t size) 856 void FreeList::addToFreeList(Address address, size_t size)
921 { 857 {
922 ASSERT(size < blinkPagePayloadSize()); 858 ASSERT(size < blinkPagePayloadSize());
923 // The free list entries are only pointer aligned (but when we allocate 859 // The free list entries are only pointer aligned (but when we allocate
924 // from them we are 8 byte aligned due to the header size). 860 // from them we are 8 byte aligned due to the header size).
925 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask)); 861 ASSERT(!((reinterpret_cast<uintptr_t>(address) + sizeof(HeapObjectHeader)) & allocationMask));
(...skipping 18 matching lines...) Expand all
944 // space. 880 // space.
945 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd ToFreeList()) 881 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd ToFreeList())
946 return; 882 return;
947 #endif 883 #endif
948 int index = bucketIndexForSize(size); 884 int index = bucketIndexForSize(size);
949 entry->link(&m_freeLists[index]); 885 entry->link(&m_freeLists[index]);
950 if (index > m_biggestFreeListIndex) 886 if (index > m_biggestFreeListIndex)
951 m_biggestFreeListIndex = index; 887 m_biggestFreeListIndex = index;
952 } 888 }
953 889
954 bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize) 890 bool ThreadHeapForHeapPage::expandObject(HeapObjectHeader* header, size_t newSiz e)
955 { 891 {
956 // It's possible that Vector requests a smaller expanded size because 892 // It's possible that Vector requests a smaller expanded size because
957 // Vector::shrinkCapacity can set a capacity smaller than the actual payload 893 // Vector::shrinkCapacity can set a capacity smaller than the actual payload
958 // size. 894 // size.
959 if (header->payloadSize() >= newSize) 895 if (header->payloadSize() >= newSize)
960 return true; 896 return true;
961 size_t allocationSize = allocationSizeFromSize(newSize); 897 size_t allocationSize = allocationSizeFromSize(newSize);
962 ASSERT(allocationSize > header->size()); 898 ASSERT(allocationSize > header->size());
963 size_t expandSize = allocationSize - header->size(); 899 size_t expandSize = allocationSize - header->size();
964 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema iningAllocationSize) { 900 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema iningAllocationSize) {
965 m_currentAllocationPoint += expandSize; 901 m_currentAllocationPoint += expandSize;
966 m_remainingAllocationSize -= expandSize; 902 m_remainingAllocationSize -= expandSize;
967 903
968 // Unpoison the memory used for the object (payload). 904 // Unpoison the memory used for the object (payload).
969 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize); 905 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize);
970 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize); 906 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize);
971 header->setSize(allocationSize); 907 header->setSize(allocationSize);
972 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); 908 ASSERT(findPageFromAddress(header->payloadEnd() - 1));
973 return true; 909 return true;
974 } 910 }
975 return false; 911 return false;
976 } 912 }
977 913
978 void ThreadHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) 914 void ThreadHeapForHeapPage::shrinkObject(HeapObjectHeader* header, size_t newSiz e)
979 { 915 {
980 ASSERT(header->payloadSize() > newSize); 916 ASSERT(header->payloadSize() > newSize);
981 size_t allocationSize = allocationSizeFromSize(newSize); 917 size_t allocationSize = allocationSizeFromSize(newSize);
982 ASSERT(header->size() > allocationSize); 918 ASSERT(header->size() > allocationSize);
983 size_t shrinkSize = header->size() - allocationSize; 919 size_t shrinkSize = header->size() - allocationSize;
984 if (header->payloadEnd() == m_currentAllocationPoint) { 920 if (header->payloadEnd() == m_currentAllocationPoint) {
985 m_currentAllocationPoint -= shrinkSize; 921 m_currentAllocationPoint -= shrinkSize;
986 m_remainingAllocationSize += shrinkSize; 922 m_remainingAllocationSize += shrinkSize;
987 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize); 923 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize);
988 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize); 924 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize);
989 header->setSize(allocationSize); 925 header->setSize(allocationSize);
990 } else { 926 } else {
991 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); 927 ASSERT(shrinkSize >= sizeof(HeapObjectHeader));
992 ASSERT(header->gcInfoIndex() > 0); 928 ASSERT(header->gcInfoIndex() > 0);
993 HeapObjectHeader* freedHeader = new (NotNull, header->payloadEnd() - shr inkSize) HeapObjectHeader(shrinkSize, header->gcInfoIndex()); 929 HeapObjectHeader* freedHeader = new (NotNull, header->payloadEnd() - shr inkSize) HeapObjectHeader(shrinkSize, header->gcInfoIndex());
994 freedHeader->markPromptlyFreed(); 930 freedHeader->markPromptlyFreed();
995 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFrom Address(reinterpret_cast<Address>(header))); 931 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFrom Address(reinterpret_cast<Address>(header)));
996 m_promptlyFreedSize += shrinkSize; 932 m_promptlyFreedSize += shrinkSize;
997 header->setSize(allocationSize); 933 header->setSize(allocationSize);
998 } 934 }
999 } 935 }
1000 936
1001 void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header) 937 void ThreadHeapForHeapPage::promptlyFreeObject(HeapObjectHeader* header)
1002 { 938 {
1003 ASSERT(!m_threadState->sweepForbidden()); 939 ASSERT(!threadState()->sweepForbidden());
1004 header->checkHeader(); 940 header->checkHeader();
1005 Address address = reinterpret_cast<Address>(header); 941 Address address = reinterpret_cast<Address>(header);
1006 Address payload = header->payload(); 942 Address payload = header->payload();
1007 size_t size = header->size(); 943 size_t size = header->size();
1008 size_t payloadSize = header->payloadSize(); 944 size_t payloadSize = header->payloadSize();
1009 ASSERT(size > 0); 945 ASSERT(size > 0);
1010 ASSERT(pageFromObject(address) == findPageFromAddress(address)); 946 ASSERT(pageFromObject(address) == findPageFromAddress(address));
1011 947
1012 { 948 {
1013 ThreadState::SweepForbiddenScope forbiddenScope(m_threadState); 949 ThreadState::SweepForbiddenScope forbiddenScope(threadState());
1014 header->finalize(payload, payloadSize); 950 header->finalize(payload, payloadSize);
1015 if (address + size == m_currentAllocationPoint) { 951 if (address + size == m_currentAllocationPoint) {
1016 m_currentAllocationPoint = address; 952 m_currentAllocationPoint = address;
1017 if (m_lastRemainingAllocationSize == m_remainingAllocationSize) { 953 if (m_lastRemainingAllocationSize == m_remainingAllocationSize) {
1018 Heap::decreaseAllocatedObjectSize(size); 954 Heap::decreaseAllocatedObjectSize(size);
1019 m_lastRemainingAllocationSize += size; 955 m_lastRemainingAllocationSize += size;
1020 } 956 }
1021 m_remainingAllocationSize += size; 957 m_remainingAllocationSize += size;
1022 FILL_ZERO_IF_PRODUCTION(address, size); 958 FILL_ZERO_IF_PRODUCTION(address, size);
1023 ASAN_POISON_MEMORY_REGION(address, size); 959 ASAN_POISON_MEMORY_REGION(address, size);
1024 return; 960 return;
1025 } 961 }
1026 FILL_ZERO_IF_PRODUCTION(payload, payloadSize); 962 FILL_ZERO_IF_PRODUCTION(payload, payloadSize);
1027 header->markPromptlyFreed(); 963 header->markPromptlyFreed();
1028 } 964 }
1029 965
1030 m_promptlyFreedSize += size; 966 m_promptlyFreedSize += size;
1031 } 967 }
1032 968
1033 bool ThreadHeap::coalesce() 969 bool ThreadHeapForHeapPage::coalesce()
1034 { 970 {
1035 // Don't coalesce heaps if there are not enough promptly freed entries 971 // Don't coalesce heaps if there are not enough promptly freed entries
1036 // to be coalesced. 972 // to be coalesced.
1037 // 973 //
1038 // FIXME: This threshold is determined just to optimize blink_perf 974 // FIXME: This threshold is determined just to optimize blink_perf
1039 // benchmarks. Coalescing is very sensitive to the threashold and 975 // benchmarks. Coalescing is very sensitive to the threashold and
1040 // we need further investigations on the coalescing scheme. 976 // we need further investigations on the coalescing scheme.
1041 if (m_promptlyFreedSize < 1024 * 1024) 977 if (m_promptlyFreedSize < 1024 * 1024)
1042 return false; 978 return false;
1043 979
1044 if (m_threadState->sweepForbidden()) 980 if (threadState()->sweepForbidden())
1045 return false; 981 return false;
1046 982
1047 ASSERT(!hasCurrentAllocationArea()); 983 ASSERT(!hasCurrentAllocationArea());
1048 TRACE_EVENT0("blink_gc", "ThreadHeap::coalesce"); 984 TRACE_EVENT0("blink_gc", "ThreadHeap::coalesce");
1049 985
1050 // Rebuild free lists. 986 // Rebuild free lists.
1051 m_freeList.clear(); 987 m_freeList.clear();
1052 size_t freedSize = 0; 988 size_t freedSize = 0;
1053 for (HeapPage* page = m_firstPage; page; page = page->next()) { 989 for (HeapPage* page = static_cast<HeapPage*>(m_firstPage); page; page = stat ic_cast<HeapPage*>(page->next())) {
1054 page->clearObjectStartBitMap(); 990 page->clearObjectStartBitMap();
1055 Address startOfGap = page->payload(); 991 Address startOfGap = page->payload();
1056 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn d(); ) { 992 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn d(); ) {
1057 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade rAddress); 993 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade rAddress);
1058 size_t size = header->size(); 994 size_t size = header->size();
1059 ASSERT(size > 0); 995 ASSERT(size > 0);
1060 ASSERT(size < blinkPagePayloadSize()); 996 ASSERT(size < blinkPagePayloadSize());
1061 997
1062 if (header->isPromptlyFreed()) { 998 if (header->isPromptlyFreed()) {
1063 ASSERT(size >= sizeof(HeapObjectHeader)); 999 ASSERT(size >= sizeof(HeapObjectHeader));
(...skipping 20 matching lines...) Expand all
1084 1020
1085 if (startOfGap != page->payloadEnd()) 1021 if (startOfGap != page->payloadEnd())
1086 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); 1022 addToFreeList(startOfGap, page->payloadEnd() - startOfGap);
1087 } 1023 }
1088 Heap::decreaseAllocatedObjectSize(freedSize); 1024 Heap::decreaseAllocatedObjectSize(freedSize);
1089 ASSERT(m_promptlyFreedSize == freedSize); 1025 ASSERT(m_promptlyFreedSize == freedSize);
1090 m_promptlyFreedSize = 0; 1026 m_promptlyFreedSize = 0;
1091 return true; 1027 return true;
1092 } 1028 }
1093 1029
1094 Address ThreadHeap::allocateLargeObject(size_t size, size_t gcInfoIndex) 1030 Address ThreadHeapForLargeObject::allocateLargeObject(size_t allocationSize, siz e_t gcInfoIndex)
1095 { 1031 {
1096 // Caller already added space for object header and rounded up to allocation 1032 // Caller already added space for object header and rounded up to allocation
1097 // alignment 1033 // alignment
1098 ASSERT(!(size & allocationMask)); 1034 ASSERT(!(allocationSize & allocationMask));
1099 1035
1100 size_t allocationSize = sizeof(LargeObject) + size; 1036 // 1. Check if we should trigger a GC.
1037 threadState()->scheduleGCOrForceConservativeGCIfNeeded();
1101 1038
1102 // Ensure that there is enough space for alignment. If the header 1039 // 2. Try to sweep large objects more than allocationSize bytes
1103 // is not a multiple of 8 bytes we will allocate an extra 1040 // before allocating a new large object.
1104 // headerPadding bytes to ensure it 8 byte aligned. 1041 Address result = lazySweep(allocationSize, gcInfoIndex);
1105 allocationSize += headerPadding(); 1042 if (result)
1043 return result;
1106 1044
1045 // 3. If we have failed in sweeping allocationSize bytes,
1046 // we complete sweeping before allocating this large object.
1047 threadState()->completeSweep();
1048 return doAllocateLargeObject(allocationSize, gcInfoIndex);
1049 }
1050
1051 Address ThreadHeapForLargeObject::doAllocateLargeObject(size_t allocationSize, s ize_t gcInfoIndex)
1052 {
1053 size_t largeObjectSize = sizeof(LargeObject) + headerPadding() + allocationS ize;
1107 // If ASan is supported we add allocationGranularity bytes to the allocated 1054 // If ASan is supported we add allocationGranularity bytes to the allocated
1108 // space and poison that to detect overflows 1055 // space and poison that to detect overflows
1109 #if defined(ADDRESS_SANITIZER) 1056 #if defined(ADDRESS_SANITIZER)
1110 allocationSize += allocationGranularity; 1057 largeObjectSize += allocationGranularity;
1111 #endif 1058 #endif
1112 1059
1113 // 1. Check if we should trigger a GC. 1060 threadState()->shouldFlushHeapDoesNotContainCache();
1114 updateRemainingAllocationSize(); 1061 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize);
1115 m_threadState->scheduleGCOrForceConservativeGCIfNeeded(); 1062 threadState()->allocatedRegionsSinceLastGC().append(pageMemory->region());
1116
1117 // 2. Try to sweep large objects more than allocationSize bytes
1118 // before allocating a new large object.
1119 if (!lazySweepLargeObjects(allocationSize)) {
1120 // 3. If we have failed in sweeping allocationSize bytes,
1121 // we complete sweeping before allocating this large object.
1122 m_threadState->completeSweep();
1123 }
1124
1125 m_threadState->shouldFlushHeapDoesNotContainCache();
1126 PageMemory* pageMemory = PageMemory::allocate(allocationSize);
1127 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region());
1128 Address largeObjectAddress = pageMemory->writableStart(); 1063 Address largeObjectAddress = pageMemory->writableStart();
1129 Address headerAddress = largeObjectAddress + sizeof(LargeObject) + headerPad ding(); 1064 Address headerAddress = largeObjectAddress + sizeof(LargeObject) + headerPad ding();
1130 #if ENABLE(ASSERT) 1065 #if ENABLE(ASSERT)
1131 // Verify that the allocated PageMemory is expectedly zeroed. 1066 // Verify that the allocated PageMemory is expectedly zeroed.
1132 for (size_t i = 0; i < size; ++i) 1067 for (size_t i = 0; i < largeObjectSize; ++i)
1133 ASSERT(!headerAddress[i]); 1068 ASSERT(!headerAddress[i]);
1134 #endif 1069 #endif
1135 ASSERT(gcInfoIndex > 0); 1070 ASSERT(gcInfoIndex > 0);
1136 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex); 1071 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar geObjectSizeInHeader, gcInfoIndex);
1137 Address result = headerAddress + sizeof(*header); 1072 Address result = headerAddress + sizeof(*header);
1138 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); 1073 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask));
1139 LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory, this, size); 1074 LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory, this, allocationSize);
1140 header->checkHeader(); 1075 header->checkHeader();
1141 1076
1142 // Poison the object header and allocationGranularity bytes after the object 1077 // Poison the object header and allocationGranularity bytes after the object
1143 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); 1078 ASAN_POISON_MEMORY_REGION(header, sizeof(*header));
1144 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity); 1079 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo cationGranularity);
1145 1080
1146 largeObject->link(&m_firstLargeObject); 1081 largeObject->link(&m_firstPage);
1147 1082
1148 Heap::increaseAllocatedSpace(largeObject->size()); 1083 Heap::increaseAllocatedSpace(largeObject->size());
1149 Heap::increaseAllocatedObjectSize(largeObject->size()); 1084 Heap::increaseAllocatedObjectSize(largeObject->size());
1150 return result; 1085 return result;
1151 } 1086 }
1152 1087
1153 void ThreadHeap::freeLargeObject(LargeObject* object) 1088 void ThreadHeapForLargeObject::freeLargeObject(LargeObject* object)
1154 { 1089 {
1155 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( )); 1090 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize( ));
1156 Heap::decreaseAllocatedSpace(object->size()); 1091 Heap::decreaseAllocatedSpace(object->size());
1157 1092
1158 // Unpoison the object header and allocationGranularity bytes after the 1093 // Unpoison the object header and allocationGranularity bytes after the
1159 // object before freeing. 1094 // object before freeing.
1160 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea der)); 1095 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea der));
1161 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity); 1096 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr anularity);
1162 1097
1163 if (object->terminating()) { 1098 if (object->terminating()) {
1164 ASSERT(ThreadState::current()->isTerminating()); 1099 ASSERT(ThreadState::current()->isTerminating());
1165 // The thread is shutting down and this page is being removed as a part 1100 // The thread is shutting down and this page is being removed as a part
1166 // of the thread local GC. In that case the object could be traced in 1101 // of the thread local GC. In that case the object could be traced in
1167 // the next global GC if there is a dangling pointer from a live thread 1102 // the next global GC if there is a dangling pointer from a live thread
1168 // heap to this dead thread heap. To guard against this, we put the 1103 // heap to this dead thread heap. To guard against this, we put the
1169 // page into the orphaned page pool and zap the page memory. This 1104 // page into the orphaned page pool and zap the page memory. This
1170 // ensures that tracing the dangling pointer in the next global GC just 1105 // ensures that tracing the dangling pointer in the next global GC just
1171 // crashes instead of causing use-after-frees. After the next global 1106 // crashes instead of causing use-after-frees. After the next global
1172 // GC, the orphaned pages are removed. 1107 // GC, the orphaned pages are removed.
1173 Heap::orphanedPagePool()->addOrphanedPage(m_index, object); 1108 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object);
1174 } else { 1109 } else {
1175 ASSERT(!ThreadState::current()->isTerminating()); 1110 ASSERT(!ThreadState::current()->isTerminating());
1176 PageMemory* memory = object->storage(); 1111 PageMemory* memory = object->storage();
1177 object->~LargeObject(); 1112 object->~LargeObject();
1178 delete memory; 1113 delete memory;
1179 } 1114 }
1180 } 1115 }
1181 1116
1182 template<typename DataType> 1117 template<typename DataType>
1183 PagePool<DataType>::PagePool() 1118 PagePool<DataType>::PagePool()
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1224 1159
1225 // We got some memory, but failed to commit it, try again. 1160 // We got some memory, but failed to commit it, try again.
1226 delete memory; 1161 delete memory;
1227 } 1162 }
1228 return nullptr; 1163 return nullptr;
1229 } 1164 }
1230 1165
1231 BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap) 1166 BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap)
1232 : m_storage(storage) 1167 : m_storage(storage)
1233 , m_heap(heap) 1168 , m_heap(heap)
1169 , m_next(nullptr)
1234 , m_terminating(false) 1170 , m_terminating(false)
1235 , m_swept(true) 1171 , m_swept(true)
1236 { 1172 {
1237 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 1173 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1238 } 1174 }
1239 1175
1240 void BaseHeapPage::markOrphaned() 1176 void BaseHeapPage::markOrphaned()
1241 { 1177 {
1242 m_heap = nullptr; 1178 m_heap = nullptr;
1243 m_terminating = false; 1179 m_terminating = false;
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
1329 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { 1265 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) {
1330 BaseHeapPage* page = entry->data; 1266 BaseHeapPage* page = entry->data;
1331 if (page->contains(reinterpret_cast<Address>(object))) 1267 if (page->contains(reinterpret_cast<Address>(object)))
1332 return true; 1268 return true;
1333 } 1269 }
1334 } 1270 }
1335 return false; 1271 return false;
1336 } 1272 }
1337 #endif 1273 #endif
1338 1274
1339 void ThreadHeap::freePage(HeapPage* page) 1275 void ThreadHeapForHeapPage::freePage(HeapPage* page)
1340 { 1276 {
1341 Heap::decreaseAllocatedSpace(blinkPageSize); 1277 Heap::decreaseAllocatedSpace(blinkPageSize);
1342 1278
1343 if (page->terminating()) { 1279 if (page->terminating()) {
1344 // The thread is shutting down and this page is being removed as a part 1280 // The thread is shutting down and this page is being removed as a part
1345 // of the thread local GC. In that case the object could be traced in 1281 // of the thread local GC. In that case the object could be traced in
1346 // the next global GC if there is a dangling pointer from a live thread 1282 // the next global GC if there is a dangling pointer from a live thread
1347 // heap to this dead thread heap. To guard against this, we put the 1283 // heap to this dead thread heap. To guard against this, we put the
1348 // page into the orphaned page pool and zap the page memory. This 1284 // page into the orphaned page pool and zap the page memory. This
1349 // ensures that tracing the dangling pointer in the next global GC just 1285 // ensures that tracing the dangling pointer in the next global GC just
1350 // crashes instead of causing use-after-frees. After the next global 1286 // crashes instead of causing use-after-frees. After the next global
1351 // GC, the orphaned pages are removed. 1287 // GC, the orphaned pages are removed.
1352 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); 1288 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page);
1353 } else { 1289 } else {
1354 PageMemory* memory = page->storage(); 1290 PageMemory* memory = page->storage();
1355 page->~HeapPage(); 1291 page->~HeapPage();
1356 Heap::freePagePool()->addFreePage(m_index, memory); 1292 Heap::freePagePool()->addFreePage(heapIndex(), memory);
1357 } 1293 }
1358 } 1294 }
1359 1295
1360 void ThreadHeap::allocatePage() 1296 void ThreadHeapForHeapPage::allocatePage()
1361 { 1297 {
1362 m_threadState->shouldFlushHeapDoesNotContainCache(); 1298 threadState()->shouldFlushHeapDoesNotContainCache();
1363 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); 1299 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex());
1364 // We continue allocating page memory until we succeed in committing one. 1300 // We continue allocating page memory until we succeed in committing one.
1365 while (!pageMemory) { 1301 while (!pageMemory) {
1366 // Allocate a memory region for blinkPagesPerRegion pages that 1302 // Allocate a memory region for blinkPagesPerRegion pages that
1367 // will each have the following layout. 1303 // will each have the following layout.
1368 // 1304 //
1369 // [ guard os page | ... payload ... | guard os page ] 1305 // [ guard os page | ... payload ... | guard os page ]
1370 // ^---{ aligned to blink page size } 1306 // ^---{ aligned to blink page size }
1371 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); 1307 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages();
1372 m_threadState->allocatedRegionsSinceLastGC().append(region); 1308 threadState()->allocatedRegionsSinceLastGC().append(region);
1373 1309
1374 // Setup the PageMemory object for each of the pages in the region. 1310 // Setup the PageMemory object for each of the pages in the region.
1375 size_t offset = 0; 1311 size_t offset = 0;
1376 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { 1312 for (size_t i = 0; i < blinkPagesPerRegion; ++i) {
1377 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off set, blinkPagePayloadSize()); 1313 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off set, blinkPagePayloadSize());
1378 // Take the first possible page ensuring that this thread actually 1314 // Take the first possible page ensuring that this thread actually
1379 // gets a page and add the rest to the page pool. 1315 // gets a page and add the rest to the page pool.
1380 if (!pageMemory) { 1316 if (!pageMemory) {
1381 if (memory->commit()) 1317 if (memory->commit())
1382 pageMemory = memory; 1318 pageMemory = memory;
1383 else 1319 else
1384 delete memory; 1320 delete memory;
1385 } else { 1321 } else {
1386 Heap::freePagePool()->addFreePage(m_index, memory); 1322 Heap::freePagePool()->addFreePage(heapIndex(), memory);
1387 } 1323 }
1388 offset += blinkPageSize; 1324 offset += blinkPageSize;
1389 } 1325 }
1390 } 1326 }
1391 HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this ); 1327 HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this );
1392
1393 page->link(&m_firstPage); 1328 page->link(&m_firstPage);
1394 1329
1395 Heap::increaseAllocatedSpace(blinkPageSize); 1330 Heap::increaseAllocatedSpace(blinkPageSize);
1396 addToFreeList(page->payload(), page->payloadSize()); 1331 addToFreeList(page->payload(), page->payloadSize());
1397 } 1332 }
1398 1333
1399 #if ENABLE(ASSERT) 1334 #if ENABLE(ASSERT)
1400 bool ThreadHeap::pagesToBeSweptContains(Address address) 1335 bool ThreadHeapForHeapPage::pagesToBeSweptContains(Address address)
1401 { 1336 {
1402 for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { 1337 for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) {
1403 if (page->contains(address)) 1338 if (page->contains(address))
1404 return true; 1339 return true;
1405 } 1340 }
1406 return false; 1341 return false;
1407 } 1342 }
1408 #endif 1343 #endif
1409 1344
1410 size_t ThreadHeap::objectPayloadSizeForTesting() 1345 size_t ThreadHeap::objectPayloadSizeForTesting()
1411 { 1346 {
1412 ASSERT(isConsistentForSweeping()); 1347 ASSERT(isConsistentForSweeping());
1413 ASSERT(!m_firstUnsweptPage); 1348 ASSERT(!m_firstUnsweptPage);
1414 ASSERT(!m_firstUnsweptLargeObject);
1415 1349
1416 size_t objectPayloadSize = 0; 1350 size_t objectPayloadSize = 0;
1417 for (HeapPage* page = m_firstPage; page; page = page->next()) 1351 for (BaseHeapPage* page = m_firstPage; page; page = page->next())
1418 objectPayloadSize += page->objectPayloadSizeForTesting(); 1352 objectPayloadSize += page->objectPayloadSizeForTesting();
1419 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next())
1420 objectPayloadSize += largeObject->objectPayloadSizeForTesting();
1421 return objectPayloadSize; 1353 return objectPayloadSize;
1422 } 1354 }
1423 1355
1424 #if ENABLE(ASSERT) 1356 #if ENABLE(ASSERT)
1425 bool ThreadHeap::isConsistentForSweeping() 1357 bool ThreadHeapForHeapPage::isConsistentForSweeping()
1426 { 1358 {
1427 // A thread heap is consistent for sweeping if none of the pages to be swept 1359 // A thread heap is consistent for sweeping if none of the pages to be swept
1428 // contain a freelist block or the current allocation point. 1360 // contain a freelist block or the current allocation point.
1429 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { 1361 for (size_t i = 0; i < blinkPageSizeLog2; ++i) {
1430 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE ntry; freeListEntry = freeListEntry->next()) { 1362 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE ntry; freeListEntry = freeListEntry->next()) {
1431 if (pagesToBeSweptContains(freeListEntry->address())) 1363 if (pagesToBeSweptContains(freeListEntry->address()))
1432 return false; 1364 return false;
1433 } 1365 }
1434 } 1366 }
1435 if (hasCurrentAllocationArea()) { 1367 if (hasCurrentAllocationArea()) {
1436 if (pagesToBeSweptContains(currentAllocationPoint())) 1368 if (pagesToBeSweptContains(currentAllocationPoint()))
1437 return false; 1369 return false;
1438 } 1370 }
1439 return true; 1371 return true;
1440 } 1372 }
1441 #endif 1373 #endif
1442 1374
1443 void ThreadHeap::makeConsistentForSweeping() 1375 void ThreadHeap::makeConsistentForSweeping()
1444 { 1376 {
1445 preparePagesForSweeping();
1446 setAllocationPoint(nullptr, 0);
1447 clearFreeLists(); 1377 clearFreeLists();
1448 }
1449
1450 void ThreadHeap::preparePagesForSweeping()
1451 {
1452 ASSERT(isConsistentForSweeping()); 1378 ASSERT(isConsistentForSweeping());
1453 for (HeapPage* page = m_firstPage; page; page = page->next()) 1379 for (BaseHeapPage* page = m_firstPage; page; page = page->next())
1454 page->markAsUnswept(); 1380 page->markAsUnswept();
1455 1381
1456 // If a new GC is requested before this thread got around to sweep, 1382 // If a new GC is requested before this thread got around to sweep,
1457 // ie. due to the thread doing a long running operation, we clear 1383 // ie. due to the thread doing a long running operation, we clear
1458 // the mark bits and mark any of the dead objects as dead. The latter 1384 // the mark bits and mark any of the dead objects as dead. The latter
1459 // is used to ensure the next GC marking does not trace already dead 1385 // is used to ensure the next GC marking does not trace already dead
1460 // objects. If we trace a dead object we could end up tracing into 1386 // objects. If we trace a dead object we could end up tracing into
1461 // garbage or the middle of another object via the newly conservatively 1387 // garbage or the middle of another object via the newly conservatively
1462 // found object. 1388 // found object.
1463 HeapPage* previousPage = nullptr; 1389 BaseHeapPage* previousPage = nullptr;
1464 for (HeapPage* page = m_firstUnsweptPage; page; previousPage = page, page = page->next()) { 1390 for (BaseHeapPage* page = m_firstUnsweptPage; page; previousPage = page, pag e = page->next()) {
1465 page->markUnmarkedObjectsDead(); 1391 page->markUnmarkedObjectsDead();
1466 ASSERT(!page->hasBeenSwept()); 1392 ASSERT(!page->hasBeenSwept());
1467 } 1393 }
1468 if (previousPage) { 1394 if (previousPage) {
1469 ASSERT(m_firstUnsweptPage); 1395 ASSERT(m_firstUnsweptPage);
1470 previousPage->m_next = m_firstPage; 1396 previousPage->m_next = m_firstPage;
1471 m_firstPage = m_firstUnsweptPage; 1397 m_firstPage = m_firstUnsweptPage;
1472 m_firstUnsweptPage = nullptr; 1398 m_firstUnsweptPage = nullptr;
1473 } 1399 }
1474 ASSERT(!m_firstUnsweptPage); 1400 ASSERT(!m_firstUnsweptPage);
1475
1476 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next())
1477 largeObject->markAsUnswept();
1478
1479 LargeObject* previousLargeObject = nullptr;
1480 for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; prev iousLargeObject = largeObject, largeObject = largeObject->next()) {
1481 largeObject->markUnmarkedObjectsDead();
1482 ASSERT(!largeObject->hasBeenSwept());
1483 }
1484 if (previousLargeObject) {
1485 ASSERT(m_firstUnsweptLargeObject);
1486 previousLargeObject->m_next = m_firstLargeObject;
1487 m_firstLargeObject = m_firstUnsweptLargeObject;
1488 m_firstUnsweptLargeObject = nullptr;
1489 }
1490 ASSERT(!m_firstUnsweptLargeObject);
1491 } 1401 }
1492 1402
1493 void ThreadHeap::clearFreeLists() 1403 void ThreadHeapForHeapPage::clearFreeLists()
1494 { 1404 {
1405 setAllocationPoint(nullptr, 0);
sof 2015/02/06 09:46:52 Why is this here?
haraken 2015/02/06 09:59:28 Since setAllocationPoint is a method of ThreadHeap
1495 m_freeList.clear(); 1406 m_freeList.clear();
1496 } 1407 }
1497 1408
1498 void FreeList::clear() 1409 void FreeList::clear()
1499 { 1410 {
1500 m_biggestFreeListIndex = 0; 1411 m_biggestFreeListIndex = 0;
1501 for (size_t i = 0; i < blinkPageSizeLog2; ++i) 1412 for (size_t i = 0; i < blinkPageSizeLog2; ++i)
1502 m_freeLists[i] = nullptr; 1413 m_freeLists[i] = nullptr;
1503 } 1414 }
1504 1415
1505 int FreeList::bucketIndexForSize(size_t size) 1416 int FreeList::bucketIndexForSize(size_t size)
1506 { 1417 {
1507 ASSERT(size > 0); 1418 ASSERT(size > 0);
1508 int index = -1; 1419 int index = -1;
1509 while (size) { 1420 while (size) {
1510 size >>= 1; 1421 size >>= 1;
1511 index++; 1422 index++;
1512 } 1423 }
1513 return index; 1424 return index;
1514 } 1425 }
1515 1426
1516 HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap) 1427 HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap)
1517 : BaseHeapPage(storage, heap) 1428 : BaseHeapPage(storage, heap)
1518 , m_next(nullptr)
1519 { 1429 {
1520 m_objectStartBitMapComputed = false; 1430 m_objectStartBitMapComputed = false;
1521 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); 1431 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this)));
1522 } 1432 }
1523 1433
1524 size_t HeapPage::objectPayloadSizeForTesting() 1434 size_t HeapPage::objectPayloadSizeForTesting()
1525 { 1435 {
1526 size_t objectPayloadSize = 0; 1436 size_t objectPayloadSize = 0;
1527 Address headerAddress = payload(); 1437 Address headerAddress = payload();
1528 markAsSwept(); 1438 markAsSwept();
(...skipping 22 matching lines...) Expand all
1551 clearObjectStartBitMap(); 1461 clearObjectStartBitMap();
1552 1462
1553 size_t markedObjectSize = 0; 1463 size_t markedObjectSize = 0;
1554 Address startOfGap = payload(); 1464 Address startOfGap = payload();
1555 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { 1465 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) {
1556 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1466 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1557 ASSERT(header->size() > 0); 1467 ASSERT(header->size() > 0);
1558 ASSERT(header->size() < blinkPagePayloadSize()); 1468 ASSERT(header->size() < blinkPagePayloadSize());
1559 1469
1560 if (header->isPromptlyFreed()) 1470 if (header->isPromptlyFreed())
1561 heap()->decreasePromptlyFreedSize(header->size()); 1471 heapForHeapPage()->decreasePromptlyFreedSize(header->size());
1562 if (header->isFree()) { 1472 if (header->isFree()) {
1563 size_t size = header->size(); 1473 size_t size = header->size();
1564 // Zero the memory in the free list header to maintain the 1474 // Zero the memory in the free list header to maintain the
1565 // invariant that memory on the free list is zero filled. 1475 // invariant that memory on the free list is zero filled.
1566 // The rest of the memory is already on the free list and is 1476 // The rest of the memory is already on the free list and is
1567 // therefore already zero filled. 1477 // therefore already zero filled.
1568 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry) ? size : sizeof(FreeListEntry)); 1478 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry) ? size : sizeof(FreeListEntry));
1569 headerAddress += size; 1479 headerAddress += size;
1570 continue; 1480 continue;
1571 } 1481 }
(...skipping 12 matching lines...) Expand all
1584 header->finalize(payload, payloadSize); 1494 header->finalize(payload, payloadSize);
1585 // This memory will be added to the freelist. Maintain the invariant 1495 // This memory will be added to the freelist. Maintain the invariant
1586 // that memory on the freelist is zero filled. 1496 // that memory on the freelist is zero filled.
1587 FILL_ZERO_IF_PRODUCTION(headerAddress, size); 1497 FILL_ZERO_IF_PRODUCTION(headerAddress, size);
1588 ASAN_POISON_MEMORY_REGION(payload, payloadSize); 1498 ASAN_POISON_MEMORY_REGION(payload, payloadSize);
1589 headerAddress += size; 1499 headerAddress += size;
1590 continue; 1500 continue;
1591 } 1501 }
1592 1502
1593 if (startOfGap != headerAddress) 1503 if (startOfGap != headerAddress)
1594 heap()->addToFreeList(startOfGap, headerAddress - startOfGap); 1504 heapForHeapPage()->addToFreeList(startOfGap, headerAddress - startOf Gap);
1595 header->unmark(); 1505 header->unmark();
1596 headerAddress += header->size(); 1506 headerAddress += header->size();
1597 markedObjectSize += header->size(); 1507 markedObjectSize += header->size();
1598 startOfGap = headerAddress; 1508 startOfGap = headerAddress;
1599 } 1509 }
1600 if (startOfGap != payloadEnd()) 1510 if (startOfGap != payloadEnd())
1601 heap()->addToFreeList(startOfGap, payloadEnd() - startOfGap); 1511 heapForHeapPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap);
1602 1512
1603 if (markedObjectSize) 1513 if (markedObjectSize)
1604 Heap::increaseMarkedObjectSize(markedObjectSize); 1514 Heap::increaseMarkedObjectSize(markedObjectSize);
1605 } 1515 }
1606 1516
1607 void HeapPage::markUnmarkedObjectsDead() 1517 void HeapPage::markUnmarkedObjectsDead()
1608 { 1518 {
1609 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { 1519 for (Address headerAddress = payload(); headerAddress < payloadEnd();) {
1610 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1520 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1611 ASSERT(header->size() < blinkPagePayloadSize()); 1521 ASSERT(header->size() < blinkPagePayloadSize());
1612 // Check if a free list entry first since we cannot call 1522 // Check if a free list entry first since we cannot call
1613 // isMarked on a free list entry. 1523 // isMarked on a free list entry.
1614 if (header->isFree()) { 1524 if (header->isFree()) {
1615 headerAddress += header->size(); 1525 headerAddress += header->size();
1616 continue; 1526 continue;
1617 } 1527 }
1618 header->checkHeader(); 1528 header->checkHeader();
1619 if (header->isMarked()) 1529 if (header->isMarked())
1620 header->unmark(); 1530 header->unmark();
1621 else 1531 else
1622 header->markDead(); 1532 header->markDead();
1623 headerAddress += header->size(); 1533 headerAddress += header->size();
1624 } 1534 }
1625 } 1535 }
1626 1536
1627 void HeapPage::removeFromHeap(ThreadHeap* heap) 1537 void HeapPage::removeFromHeap()
1628 { 1538 {
1629 heap->freePage(this); 1539 heapForHeapPage()->freePage(this);
1540 }
1541
1542 ThreadHeapForHeapPage* HeapPage::heapForHeapPage()
1543 {
1544 return static_cast<ThreadHeapForHeapPage*>(heap());
1630 } 1545 }
1631 1546
1632 void HeapPage::populateObjectStartBitMap() 1547 void HeapPage::populateObjectStartBitMap()
1633 { 1548 {
1634 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); 1549 memset(&m_objectStartBitMap, 0, objectStartBitMapSize);
1635 Address start = payload(); 1550 Address start = payload();
1636 for (Address headerAddress = start; headerAddress < payloadEnd();) { 1551 for (Address headerAddress = start; headerAddress < payloadEnd();) {
1637 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress); 1552 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd ress);
1638 size_t objectOffset = headerAddress - start; 1553 size_t objectOffset = headerAddress - start;
1639 ASSERT(!(objectOffset & allocationMask)); 1554 ASSERT(!(objectOffset & allocationMask));
(...skipping 837 matching lines...) Expand 10 before | Expand all | Expand 10 after
2477 // because the hierarchy was not completely moved to the heap and 2392 // because the hierarchy was not completely moved to the heap and
2478 // some heap allocated objects own objects that contain persistents 2393 // some heap allocated objects own objects that contain persistents
2479 // pointing to other heap allocated objects. 2394 // pointing to other heap allocated objects.
2480 for (int i = 0; i < 5; ++i) 2395 for (int i = 0; i < 5; ++i)
2481 collectGarbage(ThreadState::NoHeapPointersOnStack); 2396 collectGarbage(ThreadState::NoHeapPointersOnStack);
2482 } 2397 }
2483 2398
2484 void ThreadHeap::prepareHeapForTermination() 2399 void ThreadHeap::prepareHeapForTermination()
2485 { 2400 {
2486 ASSERT(!m_firstUnsweptPage); 2401 ASSERT(!m_firstUnsweptPage);
2487 ASSERT(!m_firstUnsweptLargeObject); 2402 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) {
2488 for (HeapPage* page = m_firstPage; page; page = page->next()) {
2489 page->setTerminating(); 2403 page->setTerminating();
2490 } 2404 }
2491 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject = largeObject->next()) {
2492 largeObject->setTerminating();
2493 }
2494 } 2405 }
2495 2406
2496 size_t Heap::objectPayloadSizeForTesting() 2407 size_t Heap::objectPayloadSizeForTesting()
2497 { 2408 {
2498 size_t objectPayloadSize = 0; 2409 size_t objectPayloadSize = 0;
2499 for (ThreadState* state : ThreadState::attachedThreads()) { 2410 for (ThreadState* state : ThreadState::attachedThreads()) {
2500 state->setGCState(ThreadState::GCRunning); 2411 state->setGCState(ThreadState::GCRunning);
2501 state->makeConsistentForSweeping(); 2412 state->makeConsistentForSweeping();
2502 objectPayloadSize += state->objectPayloadSizeForTesting(); 2413 objectPayloadSize += state->objectPayloadSizeForTesting();
2503 state->setGCState(ThreadState::EagerSweepScheduled); 2414 state->setGCState(ThreadState::EagerSweepScheduled);
(...skipping 13 matching lines...) Expand all
2517 return; 2428 return;
2518 2429
2519 // Don't promptly free large objects because their page is never reused 2430 // Don't promptly free large objects because their page is never reused
2520 // and don't free backings allocated on other threads. 2431 // and don't free backings allocated on other threads.
2521 BaseHeapPage* page = pageFromObject(address); 2432 BaseHeapPage* page = pageFromObject(address);
2522 if (page->isLargeObject() || page->heap()->threadState() != state) 2433 if (page->isLargeObject() || page->heap()->threadState() != state)
2523 return; 2434 return;
2524 2435
2525 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 2436 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
2526 header->checkHeader(); 2437 header->checkHeader();
2527 static_cast<HeapPage*>(page)->heap()->promptlyFreeObject(header); 2438 static_cast<HeapPage*>(page)->heapForHeapPage()->promptlyFreeObject(header);
2528 } 2439 }
2529 2440
2530 void HeapAllocator::freeVectorBacking(void* address) 2441 void HeapAllocator::freeVectorBacking(void* address)
2531 { 2442 {
2532 backingFree(address); 2443 backingFree(address);
2533 } 2444 }
2534 2445
2535 void HeapAllocator::freeInlineVectorBacking(void* address) 2446 void HeapAllocator::freeInlineVectorBacking(void* address)
2536 { 2447 {
2537 backingFree(address); 2448 backingFree(address);
(...skipping 13 matching lines...) Expand all
2551 if (state->sweepForbidden()) 2462 if (state->sweepForbidden())
2552 return false; 2463 return false;
2553 ASSERT(state->isAllocationAllowed()); 2464 ASSERT(state->isAllocationAllowed());
2554 2465
2555 BaseHeapPage* page = pageFromObject(address); 2466 BaseHeapPage* page = pageFromObject(address);
2556 if (page->isLargeObject() || page->heap()->threadState() != state) 2467 if (page->isLargeObject() || page->heap()->threadState() != state)
2557 return false; 2468 return false;
2558 2469
2559 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 2470 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
2560 header->checkHeader(); 2471 header->checkHeader();
2561 return static_cast<HeapPage*>(page)->heap()->expandObject(header, newSize); 2472 return static_cast<HeapPage*>(page)->heapForHeapPage()->expandObject(header, newSize);
2562 } 2473 }
2563 2474
2564 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) 2475 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize)
2565 { 2476 {
2566 return backingExpand(address, newSize); 2477 return backingExpand(address, newSize);
2567 } 2478 }
2568 2479
2569 bool HeapAllocator::expandInlineVectorBacking(void* address, size_t newSize) 2480 bool HeapAllocator::expandInlineVectorBacking(void* address, size_t newSize)
2570 { 2481 {
2571 return backingExpand(address, newSize); 2482 return backingExpand(address, newSize);
(...skipping 25 matching lines...) Expand all
2597 // FIXME: This wastes unused memory. If this increases memory 2508 // FIXME: This wastes unused memory. If this increases memory
2598 // consumption, we should reallocate a new large object and shrink the 2509 // consumption, we should reallocate a new large object and shrink the
2599 // memory usage. 2510 // memory usage.
2600 return; 2511 return;
2601 } 2512 }
2602 if (page->heap()->threadState() != state) 2513 if (page->heap()->threadState() != state)
2603 return; 2514 return;
2604 2515
2605 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); 2516 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address);
2606 header->checkHeader(); 2517 header->checkHeader();
2607 static_cast<HeapPage*>(page)->heap()->shrinkObject(header, quantizedShrunkSi ze); 2518 static_cast<HeapPage*>(page)->heapForHeapPage()->shrinkObject(header, quanti zedShrunkSize);
2608 } 2519 }
2609 2520
2610 void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedC urrentSize, size_t quantizedShrunkSize) 2521 void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedC urrentSize, size_t quantizedShrunkSize)
2611 { 2522 {
2612 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); 2523 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
2613 } 2524 }
2614 2525
2615 void HeapAllocator::shrinkInlineVectorBackingInternal(void* address, size_t quan tizedCurrentSize, size_t quantizedShrunkSize) 2526 void HeapAllocator::shrinkInlineVectorBackingInternal(void* address, size_t quan tizedCurrentSize, size_t quantizedShrunkSize)
2616 { 2527 {
2617 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); 2528 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize);
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
2717 bool Heap::s_shutdownCalled = false; 2628 bool Heap::s_shutdownCalled = false;
2718 bool Heap::s_lastGCWasConservative = false; 2629 bool Heap::s_lastGCWasConservative = false;
2719 FreePagePool* Heap::s_freePagePool; 2630 FreePagePool* Heap::s_freePagePool;
2720 OrphanedPagePool* Heap::s_orphanedPagePool; 2631 OrphanedPagePool* Heap::s_orphanedPagePool;
2721 Heap::RegionTree* Heap::s_regionTree = nullptr; 2632 Heap::RegionTree* Heap::s_regionTree = nullptr;
2722 size_t Heap::s_allocatedObjectSize = 0; 2633 size_t Heap::s_allocatedObjectSize = 0;
2723 size_t Heap::s_allocatedSpace = 0; 2634 size_t Heap::s_allocatedSpace = 0;
2724 size_t Heap::s_markedObjectSize = 0; 2635 size_t Heap::s_markedObjectSize = 0;
2725 2636
2726 } // namespace blink 2637 } // namespace blink
OLDNEW
« no previous file with comments | « Source/platform/heap/Heap.h ('k') | Source/platform/heap/ThreadState.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698