OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 514 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
525 | 525 |
526 void LargeObject::markUnmarkedObjectsDead() | 526 void LargeObject::markUnmarkedObjectsDead() |
527 { | 527 { |
528 HeapObjectHeader* header = heapObjectHeader(); | 528 HeapObjectHeader* header = heapObjectHeader(); |
529 if (header->isMarked()) | 529 if (header->isMarked()) |
530 header->unmark(); | 530 header->unmark(); |
531 else | 531 else |
532 header->markDead(); | 532 header->markDead(); |
533 } | 533 } |
534 | 534 |
535 void LargeObject::removeFromHeap() | 535 void LargeObject::removeFromHeap(ThreadHeap* heap) |
536 { | 536 { |
537 static_cast<ThreadHeapForLargeObject*>(heap())->freeLargeObject(this); | 537 heap->freeLargeObject(this); |
| 538 } |
| 539 |
| 540 ThreadHeap::ThreadHeap(ThreadState* state, int index) |
| 541 : m_currentAllocationPoint(nullptr) |
| 542 , m_remainingAllocationSize(0) |
| 543 , m_lastRemainingAllocationSize(0) |
| 544 , m_firstPage(nullptr) |
| 545 , m_firstLargeObject(nullptr) |
| 546 , m_firstUnsweptPage(nullptr) |
| 547 , m_firstUnsweptLargeObject(nullptr) |
| 548 , m_threadState(state) |
| 549 , m_index(index) |
| 550 , m_promptlyFreedSize(0) |
| 551 #if ENABLE(GC_PROFILING) |
| 552 , m_cumulativeAllocationSize(0) |
| 553 , m_allocationCount(0) |
| 554 , m_inlineAllocationCount(0) |
| 555 #endif |
| 556 { |
| 557 clearFreeLists(); |
538 } | 558 } |
539 | 559 |
540 FreeList::FreeList() | 560 FreeList::FreeList() |
541 : m_biggestFreeListIndex(0) | 561 : m_biggestFreeListIndex(0) |
542 { | 562 { |
543 } | 563 } |
544 | 564 |
545 ThreadHeap::ThreadHeap(ThreadState* state, int index) | |
546 : m_firstPage(nullptr) | |
547 , m_firstUnsweptPage(nullptr) | |
548 , m_threadState(state) | |
549 , m_index(index) | |
550 #if ENABLE(GC_PROFILING) | |
551 , m_cumulativeAllocationSize(0) | |
552 , m_allocationCount(0) | |
553 , m_inlineAllocationCount(0) | |
554 #endif | |
555 { | |
556 } | |
557 | |
558 ThreadHeapForHeapPage::ThreadHeapForHeapPage(ThreadState* state, int index) | |
559 : ThreadHeap(state, index) | |
560 , m_currentAllocationPoint(nullptr) | |
561 , m_remainingAllocationSize(0) | |
562 , m_lastRemainingAllocationSize(0) | |
563 , m_promptlyFreedSize(0) | |
564 { | |
565 clearFreeLists(); | |
566 } | |
567 | |
568 ThreadHeapForLargeObject::ThreadHeapForLargeObject(ThreadState* state, int index
) | |
569 : ThreadHeap(state, index) | |
570 { | |
571 } | |
572 | |
573 ThreadHeap::~ThreadHeap() | 565 ThreadHeap::~ThreadHeap() |
574 { | 566 { |
575 ASSERT(!m_firstPage); | 567 ASSERT(!m_firstPage); |
| 568 ASSERT(!m_firstLargeObject); |
576 ASSERT(!m_firstUnsweptPage); | 569 ASSERT(!m_firstUnsweptPage); |
| 570 ASSERT(!m_firstUnsweptLargeObject); |
577 } | 571 } |
578 | 572 |
579 void ThreadHeap::cleanupPages() | 573 void ThreadHeap::cleanupPages() |
580 { | 574 { |
581 clearFreeLists(); | 575 clearFreeLists(); |
582 | 576 |
583 ASSERT(!m_firstUnsweptPage); | 577 ASSERT(!m_firstUnsweptPage); |
| 578 ASSERT(!m_firstUnsweptLargeObject); |
584 // Add the ThreadHeap's pages to the orphanedPagePool. | 579 // Add the ThreadHeap's pages to the orphanedPagePool. |
585 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { | 580 for (HeapPage* page = m_firstPage; page; page = page->m_next) { |
586 Heap::decreaseAllocatedSpace(page->size()); | 581 Heap::decreaseAllocatedSpace(blinkPageSize); |
587 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); | 582 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
588 } | 583 } |
589 m_firstPage = nullptr; | 584 m_firstPage = nullptr; |
| 585 |
| 586 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->m_next) { |
| 587 Heap::decreaseAllocatedSpace(largeObject->size()); |
| 588 Heap::orphanedPagePool()->addOrphanedPage(m_index, largeObject); |
| 589 } |
| 590 m_firstLargeObject = nullptr; |
590 } | 591 } |
591 | 592 |
592 void ThreadHeapForHeapPage::updateRemainingAllocationSize() | 593 void ThreadHeap::updateRemainingAllocationSize() |
593 { | 594 { |
594 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 595 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
595 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); | 596 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain
ingAllocationSize()); |
596 m_lastRemainingAllocationSize = remainingAllocationSize(); | 597 m_lastRemainingAllocationSize = remainingAllocationSize(); |
597 } | 598 } |
598 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 599 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
599 } | 600 } |
600 | 601 |
601 void ThreadHeapForHeapPage::setAllocationPoint(Address point, size_t size) | 602 void ThreadHeap::setAllocationPoint(Address point, size_t size) |
602 { | 603 { |
603 #if ENABLE(ASSERT) | 604 #if ENABLE(ASSERT) |
604 if (point) { | 605 if (point) { |
605 ASSERT(size); | 606 ASSERT(size); |
606 BaseHeapPage* page = pageFromObject(point); | 607 BaseHeapPage* page = pageFromObject(point); |
607 ASSERT(!page->isLargeObject()); | 608 ASSERT(!page->isLargeObject()); |
608 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize()); | 609 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize()); |
609 } | 610 } |
610 #endif | 611 #endif |
611 if (hasCurrentAllocationArea()) { | 612 if (hasCurrentAllocationArea()) |
612 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 613 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
613 } | |
614 updateRemainingAllocationSize(); | 614 updateRemainingAllocationSize(); |
615 m_currentAllocationPoint = point; | 615 m_currentAllocationPoint = point; |
616 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; | 616 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
617 } | 617 } |
618 | 618 |
619 Address ThreadHeapForHeapPage::outOfLineAllocate(size_t allocationSize, size_t g
cInfoIndex) | 619 Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) |
620 { | 620 { |
621 ASSERT(allocationSize > remainingAllocationSize()); | 621 ASSERT(allocationSize > remainingAllocationSize()); |
622 ASSERT(allocationSize >= allocationGranularity); | 622 ASSERT(allocationSize >= allocationGranularity); |
623 | 623 |
624 #if ENABLE(GC_PROFILING) | 624 #if ENABLE(GC_PROFILING) |
625 m_threadState->snapshotFreeListIfNecessary(); | 625 m_threadState->snapshotFreeListIfNecessary(); |
626 #endif | 626 #endif |
627 | 627 |
628 // 1. If this allocation is big enough, allocate a large object. | 628 // 1. If this allocation is big enough, allocate a large object. |
629 if (allocationSize >= largeObjectSizeThreshold) | 629 if (allocationSize >= largeObjectSizeThreshold) |
630 return static_cast<ThreadHeapForLargeObject*>(threadState()->heap(LargeO
bjectHeap))->allocateLargeObject(allocationSize, gcInfoIndex); | 630 return allocateLargeObject(allocationSize, gcInfoIndex); |
631 | 631 |
632 // 2. Check if we should trigger a GC. | 632 // 2. Check if we should trigger a GC. |
633 updateRemainingAllocationSize(); | 633 updateRemainingAllocationSize(); |
634 threadState()->scheduleGCIfNeeded(); | 634 threadState()->scheduleGCIfNeeded(); |
635 | 635 |
636 // 3. Try to allocate from a free list. | 636 // 3. Try to allocate from a free list. |
637 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); | 637 Address result = allocateFromFreeList(allocationSize, gcInfoIndex); |
638 if (result) | 638 if (result) |
639 return result; | 639 return result; |
640 | 640 |
641 // 4. Reset the allocation point. | 641 // 4. Reset the allocation point. |
642 setAllocationPoint(nullptr, 0); | 642 setAllocationPoint(nullptr, 0); |
643 | 643 |
644 // 5. Lazily sweep pages of this heap until we find a freed area for | 644 // 5. Lazily sweep pages of this heap until we find a freed area for |
645 // this allocation or we finish sweeping all pages of this heap. | 645 // this allocation or we finish sweeping all pages of this heap. |
646 result = lazySweep(allocationSize, gcInfoIndex); | 646 result = lazySweepPages(allocationSize, gcInfoIndex); |
647 if (result) | 647 if (result) |
648 return result; | 648 return result; |
649 | 649 |
650 // 6. Coalesce promptly freed areas and then try to allocate from a free | 650 // 6. Coalesce promptly freed areas and then try to allocate from a free |
651 // list. | 651 // list. |
652 if (coalesce()) { | 652 if (coalesce()) { |
653 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 653 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
654 if (result) | 654 if (result) |
655 return result; | 655 return result; |
656 } | 656 } |
657 | 657 |
658 // 7. Complete sweeping. | 658 // 7. Complete sweeping. |
659 threadState()->completeSweep(); | 659 threadState()->completeSweep(); |
660 | 660 |
661 // 8. Add a new page to this heap. | 661 // 8. Add a new page to this heap. |
662 allocatePage(); | 662 allocatePage(); |
663 | 663 |
664 // 9. Try to allocate from a free list. This allocation must succeed. | 664 // 9. Try to allocate from a free list. This allocation must succeed. |
665 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 665 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
666 RELEASE_ASSERT(result); | 666 RELEASE_ASSERT(result); |
667 return result; | 667 return result; |
668 } | 668 } |
669 | 669 |
670 Address ThreadHeapForHeapPage::allocateFromFreeList(size_t allocationSize, size_
t gcInfoIndex) | 670 Address ThreadHeap::allocateFromFreeList(size_t allocationSize, size_t gcInfoInd
ex) |
671 { | 671 { |
672 // Try reusing a block from the largest bin. The underlying reasoning | 672 // Try reusing a block from the largest bin. The underlying reasoning |
673 // being that we want to amortize this slow allocation call by carving | 673 // being that we want to amortize this slow allocation call by carving |
674 // off as a large a free block as possible in one go; a block that will | 674 // off as a large a free block as possible in one go; a block that will |
675 // service this block and let following allocations be serviced quickly | 675 // service this block and let following allocations be serviced quickly |
676 // by bump allocation. | 676 // by bump allocation. |
677 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; | 677 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; |
678 int index = m_freeList.m_biggestFreeListIndex; | 678 int index = m_freeList.m_biggestFreeListIndex; |
679 for (; index > 0; --index, bucketSize >>= 1) { | 679 for (; index > 0; --index, bucketSize >>= 1) { |
680 FreeListEntry* entry = m_freeList.m_freeLists[index]; | 680 FreeListEntry* entry = m_freeList.m_freeLists[index]; |
(...skipping 14 matching lines...) Expand all Loading... |
695 } | 695 } |
696 } | 696 } |
697 m_freeList.m_biggestFreeListIndex = index; | 697 m_freeList.m_biggestFreeListIndex = index; |
698 return nullptr; | 698 return nullptr; |
699 } | 699 } |
700 | 700 |
701 void ThreadHeap::prepareForSweep() | 701 void ThreadHeap::prepareForSweep() |
702 { | 702 { |
703 ASSERT(!threadState()->isInGC()); | 703 ASSERT(!threadState()->isInGC()); |
704 ASSERT(!m_firstUnsweptPage); | 704 ASSERT(!m_firstUnsweptPage); |
| 705 ASSERT(!m_firstUnsweptLargeObject); |
705 | 706 |
706 // Move all pages to a list of unswept pages. | 707 // Move all pages to a list of unswept pages. |
707 m_firstUnsweptPage = m_firstPage; | 708 m_firstUnsweptPage = m_firstPage; |
| 709 m_firstUnsweptLargeObject = m_firstLargeObject; |
708 m_firstPage = nullptr; | 710 m_firstPage = nullptr; |
| 711 m_firstLargeObject = nullptr; |
709 } | 712 } |
710 | 713 |
711 Address ThreadHeap::lazySweep(size_t allocationSize, size_t gcInfoIndex) | 714 Address ThreadHeap::lazySweepPages(size_t allocationSize, size_t gcInfoIndex) |
712 { | 715 { |
| 716 ASSERT(!hasCurrentAllocationArea()); |
| 717 ASSERT(allocationSize < largeObjectSizeThreshold); |
| 718 |
713 // If there are no pages to be swept, return immediately. | 719 // If there are no pages to be swept, return immediately. |
714 if (!m_firstUnsweptPage) | 720 if (!m_firstUnsweptPage) |
715 return nullptr; | 721 return nullptr; |
716 | 722 |
717 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | 723 RELEASE_ASSERT(threadState()->isSweepingInProgress()); |
718 | 724 |
719 // lazySweepPages() can be called recursively if finalizers invoked in | 725 // lazySweepPages() can be called recursively if finalizers invoked in |
720 // page->sweep() allocate memory and the allocation triggers | 726 // page->sweep() allocate memory and the allocation triggers |
721 // lazySweepPages(). This check prevents the sweeping from being executed | 727 // lazySweepPages(). This check prevents the sweeping from being executed |
722 // recursively. | 728 // recursively. |
723 if (threadState()->sweepForbidden()) | 729 if (threadState()->sweepForbidden()) |
724 return nullptr; | 730 return nullptr; |
725 | 731 |
726 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages"); | 732 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepPages"); |
727 ThreadState::SweepForbiddenScope scope(threadState()); | 733 ThreadState::SweepForbiddenScope scope(m_threadState); |
728 | 734 |
729 if (threadState()->isMainThread()) | 735 if (threadState()->isMainThread()) |
730 ScriptForbiddenScope::enter(); | 736 ScriptForbiddenScope::enter(); |
731 | 737 |
732 Address result = lazySweepPages(allocationSize, gcInfoIndex); | |
733 | |
734 if (threadState()->isMainThread()) | |
735 ScriptForbiddenScope::exit(); | |
736 return result; | |
737 } | |
738 | |
739 Address ThreadHeapForHeapPage::lazySweepPages(size_t allocationSize, size_t gcIn
foIndex) | |
740 { | |
741 ASSERT(!hasCurrentAllocationArea()); | |
742 Address result = nullptr; | 738 Address result = nullptr; |
743 while (m_firstUnsweptPage) { | 739 while (m_firstUnsweptPage) { |
744 BaseHeapPage* page = m_firstUnsweptPage; | 740 HeapPage* page = m_firstUnsweptPage; |
745 if (page->isEmpty()) { | 741 if (page->isEmpty()) { |
746 page->unlink(&m_firstUnsweptPage); | 742 page->unlink(&m_firstUnsweptPage); |
747 page->removeFromHeap(); | 743 page->removeFromHeap(this); |
748 } else { | 744 } else { |
749 // Sweep a page and move the page from m_firstUnsweptPages to | 745 // Sweep a page and move the page from m_firstUnsweptPages to |
750 // m_firstPages. | 746 // m_firstPages. |
751 page->sweep(); | 747 page->sweep(); |
752 page->unlink(&m_firstUnsweptPage); | 748 page->unlink(&m_firstUnsweptPage); |
753 page->link(&m_firstPage); | 749 page->link(&m_firstPage); |
754 page->markAsSwept(); | 750 page->markAsSwept(); |
755 | 751 |
756 // For HeapPage, stop lazy sweeping once we find a slot to | |
757 // allocate a new object. | |
758 result = allocateFromFreeList(allocationSize, gcInfoIndex); | 752 result = allocateFromFreeList(allocationSize, gcInfoIndex); |
759 if (result) | 753 if (result) |
760 break; | 754 break; |
761 } | 755 } |
762 } | 756 } |
| 757 |
| 758 if (threadState()->isMainThread()) |
| 759 ScriptForbiddenScope::exit(); |
763 return result; | 760 return result; |
764 } | 761 } |
765 | 762 |
766 Address ThreadHeapForLargeObject::lazySweepPages(size_t allocationSize, size_t g
cInfoIndex) | 763 bool ThreadHeap::lazySweepLargeObjects(size_t allocationSize) |
767 { | 764 { |
768 Address result = nullptr; | 765 ASSERT(allocationSize >= largeObjectSizeThreshold); |
| 766 |
| 767 // If there are no large objects to be swept, return immediately. |
| 768 if (!m_firstUnsweptLargeObject) |
| 769 return false; |
| 770 |
| 771 RELEASE_ASSERT(threadState()->isSweepingInProgress()); |
| 772 |
| 773 // lazySweepLargeObjects() can be called recursively if finalizers invoked |
| 774 // in page->sweep() allocate memory and the allocation triggers |
| 775 // lazySweepLargeObjects(). This check prevents the sweeping from being |
| 776 // executed recursively. |
| 777 if (threadState()->sweepForbidden()) |
| 778 return false; |
| 779 |
| 780 TRACE_EVENT0("blink_gc", "ThreadHeap::lazySweepLargeObjects"); |
| 781 ThreadState::SweepForbiddenScope scope(m_threadState); |
| 782 |
| 783 if (threadState()->isMainThread()) |
| 784 ScriptForbiddenScope::enter(); |
| 785 |
| 786 bool result = false; |
769 size_t sweptSize = 0; | 787 size_t sweptSize = 0; |
770 while (m_firstUnsweptPage) { | 788 while (m_firstUnsweptLargeObject) { |
771 BaseHeapPage* page = m_firstUnsweptPage; | 789 LargeObject* largeObject = m_firstUnsweptLargeObject; |
772 if (page->isEmpty()) { | 790 if (largeObject->isEmpty()) { |
773 sweptSize += static_cast<LargeObject*>(page)->payloadSize() + sizeof
(HeapObjectHeader); | 791 sweptSize += largeObject->size(); |
774 page->unlink(&m_firstUnsweptPage); | 792 largeObject->unlink(&m_firstUnsweptLargeObject); |
775 page->removeFromHeap(); | 793 largeObject->removeFromHeap(this); |
776 // For LargeObject, stop lazy sweeping once we have swept | 794 |
777 // more than allocationSize bytes. | 795 // If we have swept large objects more than allocationSize, |
| 796 // we stop the lazy sweeping. |
778 if (sweptSize >= allocationSize) { | 797 if (sweptSize >= allocationSize) { |
779 result = doAllocateLargeObject(allocationSize, gcInfoIndex); | 798 result = true; |
780 ASSERT(result); | |
781 break; | 799 break; |
782 } | 800 } |
783 } else { | 801 } else { |
| 802 // Sweep a large object and move the large object from |
| 803 // m_firstUnsweptLargeObjects to m_firstLargeObjects. |
| 804 largeObject->sweep(); |
| 805 largeObject->unlink(&m_firstUnsweptLargeObject); |
| 806 largeObject->link(&m_firstLargeObject); |
| 807 largeObject->markAsSwept(); |
| 808 } |
| 809 } |
| 810 |
| 811 if (threadState()->isMainThread()) |
| 812 ScriptForbiddenScope::exit(); |
| 813 return result; |
| 814 } |
| 815 |
| 816 void ThreadHeap::completeSweep() |
| 817 { |
| 818 RELEASE_ASSERT(threadState()->isSweepingInProgress()); |
| 819 ASSERT(threadState()->sweepForbidden()); |
| 820 |
| 821 if (threadState()->isMainThread()) |
| 822 ScriptForbiddenScope::enter(); |
| 823 |
| 824 // Sweep normal pages. |
| 825 while (m_firstUnsweptPage) { |
| 826 HeapPage* page = m_firstUnsweptPage; |
| 827 if (page->isEmpty()) { |
| 828 page->unlink(&m_firstUnsweptPage); |
| 829 page->removeFromHeap(this); |
| 830 } else { |
784 // Sweep a page and move the page from m_firstUnsweptPages to | 831 // Sweep a page and move the page from m_firstUnsweptPages to |
785 // m_firstPages. | 832 // m_firstPages. |
786 page->sweep(); | 833 page->sweep(); |
787 page->unlink(&m_firstUnsweptPage); | 834 page->unlink(&m_firstUnsweptPage); |
788 page->link(&m_firstPage); | 835 page->link(&m_firstPage); |
789 page->markAsSwept(); | 836 page->markAsSwept(); |
790 } | 837 } |
791 } | 838 } |
792 return result; | |
793 } | |
794 | |
795 void ThreadHeap::completeSweep() | |
796 { | |
797 RELEASE_ASSERT(threadState()->isSweepingInProgress()); | |
798 ASSERT(threadState()->sweepForbidden()); | |
799 | |
800 if (threadState()->isMainThread()) | |
801 ScriptForbiddenScope::enter(); | |
802 | |
803 while (m_firstUnsweptPage) { | |
804 BaseHeapPage* page = m_firstUnsweptPage; | |
805 if (page->isEmpty()) { | |
806 page->unlink(&m_firstUnsweptPage); | |
807 page->removeFromHeap(); | |
808 } else { | |
809 // Sweep a page and move the page from m_firstUnsweptPages to | |
810 // m_firstPages. | |
811 page->sweep(); | |
812 page->unlink(&m_firstUnsweptPage); | |
813 page->link(&m_firstPage); | |
814 page->markAsSwept(); | |
815 } | |
816 } | |
817 | 839 |
| 840 // Sweep large objects. |
| 841 while (m_firstUnsweptLargeObject) { |
| 842 LargeObject* largeObject = m_firstUnsweptLargeObject; |
| 843 if (largeObject->isEmpty()) { |
| 844 largeObject->unlink(&m_firstUnsweptLargeObject); |
| 845 largeObject->removeFromHeap(this); |
| 846 } else { |
| 847 // Sweep a large object and move the large object from |
| 848 // m_firstUnsweptLargeObjects to m_firstLargeObjects. |
| 849 largeObject->sweep(); |
| 850 largeObject->unlink(&m_firstUnsweptLargeObject); |
| 851 largeObject->link(&m_firstLargeObject); |
| 852 largeObject->markAsSwept(); |
| 853 } |
| 854 } |
| 855 |
818 if (threadState()->isMainThread()) | 856 if (threadState()->isMainThread()) |
819 ScriptForbiddenScope::exit(); | 857 ScriptForbiddenScope::exit(); |
820 } | 858 } |
821 | 859 |
822 #if ENABLE(ASSERT) | 860 #if ENABLE(ASSERT) |
| 861 static bool isLargeObjectAligned(LargeObject* largeObject, Address address) |
| 862 { |
| 863 // Check that a large object is blinkPageSize aligned (modulo the osPageSize |
| 864 // for the guard page). |
| 865 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun
dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); |
| 866 } |
| 867 #endif |
| 868 |
| 869 #if ENABLE(ASSERT) || ENABLE(GC_PROFILING) |
823 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address) | 870 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address) |
824 { | 871 { |
825 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { | 872 for (HeapPage* page = m_firstPage; page; page = page->next()) { |
826 if (page->contains(address)) | 873 if (page->contains(address)) |
827 return page; | 874 return page; |
828 } | 875 } |
829 for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) { | 876 for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
830 if (page->contains(address)) | 877 if (page->contains(address)) |
831 return page; | 878 return page; |
832 } | 879 } |
| 880 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) { |
| 881 ASSERT(isLargeObjectAligned(largeObject, address)); |
| 882 if (largeObject->contains(address)) |
| 883 return largeObject; |
| 884 } |
| 885 for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; larg
eObject = largeObject->next()) { |
| 886 ASSERT(isLargeObjectAligned(largeObject, address)); |
| 887 if (largeObject->contains(address)) |
| 888 return largeObject; |
| 889 } |
833 return nullptr; | 890 return nullptr; |
834 } | 891 } |
835 #endif | 892 #endif |
836 | 893 |
837 #if ENABLE(GC_PROFILING) | 894 #if ENABLE(GC_PROFILING) |
838 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 | 895 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 |
839 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | 896 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
840 { | 897 { |
841 ASSERT(isConsistentForSweeping()); | 898 ASSERT(isConsistentForSweeping()); |
842 size_t previousPageCount = info->pageCount; | 899 size_t previousPageCount = info->pageCount; |
843 | 900 |
844 json->beginArray("pages"); | 901 json->beginArray("pages"); |
845 for (BaseHeapPage* page = m_firstPage; page; page = page->next(), ++info->pa
geCount) { | 902 for (HeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCo
unt) { |
846 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. | 903 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. |
847 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { | 904 if (info->pageCount < GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD) { |
848 json->beginArray(); | 905 json->beginArray(); |
849 json->pushInteger(reinterpret_cast<intptr_t>(page)); | 906 json->pushInteger(reinterpret_cast<intptr_t>(page)); |
850 page->snapshot(json, info); | 907 page->snapshot(json, info); |
851 json->endArray(); | 908 json->endArray(); |
852 } else { | 909 } else { |
853 page->snapshot(0, info); | 910 page->snapshot(0, info); |
854 } | 911 } |
855 } | 912 } |
856 json->endArray(); | 913 json->endArray(); |
857 | 914 |
| 915 json->beginArray("largeObjects"); |
| 916 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) { |
| 917 json->beginDictionary(); |
| 918 largeObject->snapshot(json, info); |
| 919 json->endDictionary(); |
| 920 } |
| 921 json->endArray(); |
| 922 |
858 json->setInteger("pageCount", info->pageCount - previousPageCount); | 923 json->setInteger("pageCount", info->pageCount - previousPageCount); |
859 } | 924 } |
860 | 925 |
861 void ThreadHeap::incrementMarkedObjectsAge() | 926 void ThreadHeap::incrementMarkedObjectsAge() |
862 { | 927 { |
863 for (HeapPage* page = m_firstPage; page; page = page->next()) | 928 for (HeapPage* page = m_firstPage; page; page = page->next()) |
864 page->incrementMarkedObjectsAge(); | 929 page->incrementMarkedObjectsAge(); |
865 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) | 930 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) |
866 largeObject->incrementMarkedObjectsAge(); | 931 largeObject->incrementMarkedObjectsAge(); |
867 } | 932 } |
(...skipping 26 matching lines...) Expand all Loading... |
894 // space. | 959 // space. |
895 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd
ToFreeList()) | 960 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd
ToFreeList()) |
896 return; | 961 return; |
897 #endif | 962 #endif |
898 int index = bucketIndexForSize(size); | 963 int index = bucketIndexForSize(size); |
899 entry->link(&m_freeLists[index]); | 964 entry->link(&m_freeLists[index]); |
900 if (index > m_biggestFreeListIndex) | 965 if (index > m_biggestFreeListIndex) |
901 m_biggestFreeListIndex = index; | 966 m_biggestFreeListIndex = index; |
902 } | 967 } |
903 | 968 |
904 bool ThreadHeapForHeapPage::expandObject(HeapObjectHeader* header, size_t newSiz
e) | 969 bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize) |
905 { | 970 { |
906 // It's possible that Vector requests a smaller expanded size because | 971 // It's possible that Vector requests a smaller expanded size because |
907 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 972 // Vector::shrinkCapacity can set a capacity smaller than the actual payload |
908 // size. | 973 // size. |
909 if (header->payloadSize() >= newSize) | 974 if (header->payloadSize() >= newSize) |
910 return true; | 975 return true; |
911 size_t allocationSize = allocationSizeFromSize(newSize); | 976 size_t allocationSize = allocationSizeFromSize(newSize); |
912 ASSERT(allocationSize > header->size()); | 977 ASSERT(allocationSize > header->size()); |
913 size_t expandSize = allocationSize - header->size(); | 978 size_t expandSize = allocationSize - header->size(); |
914 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema
iningAllocationSize) { | 979 if (header->payloadEnd() == m_currentAllocationPoint && expandSize <= m_rema
iningAllocationSize) { |
915 m_currentAllocationPoint += expandSize; | 980 m_currentAllocationPoint += expandSize; |
916 m_remainingAllocationSize -= expandSize; | 981 m_remainingAllocationSize -= expandSize; |
917 | 982 |
918 // Unpoison the memory used for the object (payload). | 983 // Unpoison the memory used for the object (payload). |
919 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize); | 984 ASAN_UNPOISON_MEMORY_REGION(header->payloadEnd(), expandSize); |
920 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize); | 985 FILL_ZERO_IF_NOT_PRODUCTION(header->payloadEnd(), expandSize); |
921 header->setSize(allocationSize); | 986 header->setSize(allocationSize); |
922 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); | 987 ASSERT(findPageFromAddress(header->payloadEnd() - 1)); |
923 return true; | 988 return true; |
924 } | 989 } |
925 return false; | 990 return false; |
926 } | 991 } |
927 | 992 |
928 void ThreadHeapForHeapPage::shrinkObject(HeapObjectHeader* header, size_t newSiz
e) | 993 void ThreadHeap::shrinkObject(HeapObjectHeader* header, size_t newSize) |
929 { | 994 { |
930 ASSERT(header->payloadSize() > newSize); | 995 ASSERT(header->payloadSize() > newSize); |
931 size_t allocationSize = allocationSizeFromSize(newSize); | 996 size_t allocationSize = allocationSizeFromSize(newSize); |
932 ASSERT(header->size() > allocationSize); | 997 ASSERT(header->size() > allocationSize); |
933 size_t shrinkSize = header->size() - allocationSize; | 998 size_t shrinkSize = header->size() - allocationSize; |
934 if (header->payloadEnd() == m_currentAllocationPoint) { | 999 if (header->payloadEnd() == m_currentAllocationPoint) { |
935 m_currentAllocationPoint -= shrinkSize; | 1000 m_currentAllocationPoint -= shrinkSize; |
936 m_remainingAllocationSize += shrinkSize; | 1001 m_remainingAllocationSize += shrinkSize; |
937 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize); | 1002 FILL_ZERO_IF_PRODUCTION(m_currentAllocationPoint, shrinkSize); |
938 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize); | 1003 ASAN_POISON_MEMORY_REGION(m_currentAllocationPoint, shrinkSize); |
939 header->setSize(allocationSize); | 1004 header->setSize(allocationSize); |
940 } else { | 1005 } else { |
941 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); | 1006 ASSERT(shrinkSize >= sizeof(HeapObjectHeader)); |
942 ASSERT(header->gcInfoIndex() > 0); | 1007 ASSERT(header->gcInfoIndex() > 0); |
943 HeapObjectHeader* freedHeader = new (NotNull, header->payloadEnd() - shr
inkSize) HeapObjectHeader(shrinkSize, header->gcInfoIndex()); | 1008 HeapObjectHeader* freedHeader = new (NotNull, header->payloadEnd() - shr
inkSize) HeapObjectHeader(shrinkSize, header->gcInfoIndex()); |
944 freedHeader->markPromptlyFreed(); | 1009 freedHeader->markPromptlyFreed(); |
945 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFrom
Address(reinterpret_cast<Address>(header))); | 1010 ASSERT(pageFromObject(reinterpret_cast<Address>(header)) == findPageFrom
Address(reinterpret_cast<Address>(header))); |
946 m_promptlyFreedSize += shrinkSize; | 1011 m_promptlyFreedSize += shrinkSize; |
947 header->setSize(allocationSize); | 1012 header->setSize(allocationSize); |
948 } | 1013 } |
949 } | 1014 } |
950 | 1015 |
951 void ThreadHeapForHeapPage::promptlyFreeObject(HeapObjectHeader* header) | 1016 void ThreadHeap::promptlyFreeObject(HeapObjectHeader* header) |
952 { | 1017 { |
953 ASSERT(!threadState()->sweepForbidden()); | 1018 ASSERT(!m_threadState->sweepForbidden()); |
954 header->checkHeader(); | 1019 header->checkHeader(); |
955 Address address = reinterpret_cast<Address>(header); | 1020 Address address = reinterpret_cast<Address>(header); |
956 Address payload = header->payload(); | 1021 Address payload = header->payload(); |
957 size_t size = header->size(); | 1022 size_t size = header->size(); |
958 size_t payloadSize = header->payloadSize(); | 1023 size_t payloadSize = header->payloadSize(); |
959 ASSERT(size > 0); | 1024 ASSERT(size > 0); |
960 ASSERT(pageFromObject(address) == findPageFromAddress(address)); | 1025 ASSERT(pageFromObject(address) == findPageFromAddress(address)); |
961 | 1026 |
962 { | 1027 { |
963 ThreadState::SweepForbiddenScope forbiddenScope(threadState()); | 1028 ThreadState::SweepForbiddenScope forbiddenScope(m_threadState); |
964 header->finalize(payload, payloadSize); | 1029 header->finalize(payload, payloadSize); |
965 if (address + size == m_currentAllocationPoint) { | 1030 if (address + size == m_currentAllocationPoint) { |
966 m_currentAllocationPoint = address; | 1031 m_currentAllocationPoint = address; |
967 if (m_lastRemainingAllocationSize == m_remainingAllocationSize) { | 1032 if (m_lastRemainingAllocationSize == m_remainingAllocationSize) { |
968 Heap::decreaseAllocatedObjectSize(size); | 1033 Heap::decreaseAllocatedObjectSize(size); |
969 m_lastRemainingAllocationSize += size; | 1034 m_lastRemainingAllocationSize += size; |
970 } | 1035 } |
971 m_remainingAllocationSize += size; | 1036 m_remainingAllocationSize += size; |
972 FILL_ZERO_IF_PRODUCTION(address, size); | 1037 FILL_ZERO_IF_PRODUCTION(address, size); |
973 ASAN_POISON_MEMORY_REGION(address, size); | 1038 ASAN_POISON_MEMORY_REGION(address, size); |
974 return; | 1039 return; |
975 } | 1040 } |
976 FILL_ZERO_IF_PRODUCTION(payload, payloadSize); | 1041 FILL_ZERO_IF_PRODUCTION(payload, payloadSize); |
977 header->markPromptlyFreed(); | 1042 header->markPromptlyFreed(); |
978 } | 1043 } |
979 | 1044 |
980 m_promptlyFreedSize += size; | 1045 m_promptlyFreedSize += size; |
981 } | 1046 } |
982 | 1047 |
983 bool ThreadHeapForHeapPage::coalesce() | 1048 bool ThreadHeap::coalesce() |
984 { | 1049 { |
985 // Don't coalesce heaps if there are not enough promptly freed entries | 1050 // Don't coalesce heaps if there are not enough promptly freed entries |
986 // to be coalesced. | 1051 // to be coalesced. |
987 // | 1052 // |
988 // FIXME: This threshold is determined just to optimize blink_perf | 1053 // FIXME: This threshold is determined just to optimize blink_perf |
989 // benchmarks. Coalescing is very sensitive to the threashold and | 1054 // benchmarks. Coalescing is very sensitive to the threashold and |
990 // we need further investigations on the coalescing scheme. | 1055 // we need further investigations on the coalescing scheme. |
991 if (m_promptlyFreedSize < 1024 * 1024) | 1056 if (m_promptlyFreedSize < 1024 * 1024) |
992 return false; | 1057 return false; |
993 | 1058 |
994 if (threadState()->sweepForbidden()) | 1059 if (m_threadState->sweepForbidden()) |
995 return false; | 1060 return false; |
996 | 1061 |
997 ASSERT(!hasCurrentAllocationArea()); | 1062 ASSERT(!hasCurrentAllocationArea()); |
998 TRACE_EVENT0("blink_gc", "ThreadHeap::coalesce"); | 1063 TRACE_EVENT0("blink_gc", "ThreadHeap::coalesce"); |
999 | 1064 |
1000 // Rebuild free lists. | 1065 // Rebuild free lists. |
1001 m_freeList.clear(); | 1066 m_freeList.clear(); |
1002 size_t freedSize = 0; | 1067 size_t freedSize = 0; |
1003 for (HeapPage* page = static_cast<HeapPage*>(m_firstPage); page; page = stat
ic_cast<HeapPage*>(page->next())) { | 1068 for (HeapPage* page = m_firstPage; page; page = page->next()) { |
1004 page->clearObjectStartBitMap(); | 1069 page->clearObjectStartBitMap(); |
1005 Address startOfGap = page->payload(); | 1070 Address startOfGap = page->payload(); |
1006 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn
d(); ) { | 1071 for (Address headerAddress = startOfGap; headerAddress < page->payloadEn
d(); ) { |
1007 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade
rAddress); | 1072 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(heade
rAddress); |
1008 size_t size = header->size(); | 1073 size_t size = header->size(); |
1009 ASSERT(size > 0); | 1074 ASSERT(size > 0); |
1010 ASSERT(size < blinkPagePayloadSize()); | 1075 ASSERT(size < blinkPagePayloadSize()); |
1011 | 1076 |
1012 if (header->isPromptlyFreed()) { | 1077 if (header->isPromptlyFreed()) { |
1013 ASSERT(size >= sizeof(HeapObjectHeader)); | 1078 ASSERT(size >= sizeof(HeapObjectHeader)); |
(...skipping 20 matching lines...) Expand all Loading... |
1034 | 1099 |
1035 if (startOfGap != page->payloadEnd()) | 1100 if (startOfGap != page->payloadEnd()) |
1036 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); | 1101 addToFreeList(startOfGap, page->payloadEnd() - startOfGap); |
1037 } | 1102 } |
1038 Heap::decreaseAllocatedObjectSize(freedSize); | 1103 Heap::decreaseAllocatedObjectSize(freedSize); |
1039 ASSERT(m_promptlyFreedSize == freedSize); | 1104 ASSERT(m_promptlyFreedSize == freedSize); |
1040 m_promptlyFreedSize = 0; | 1105 m_promptlyFreedSize = 0; |
1041 return true; | 1106 return true; |
1042 } | 1107 } |
1043 | 1108 |
1044 Address ThreadHeapForLargeObject::allocateLargeObject(size_t allocationSize, siz
e_t gcInfoIndex) | 1109 Address ThreadHeap::allocateLargeObject(size_t size, size_t gcInfoIndex) |
1045 { | 1110 { |
1046 // Caller already added space for object header and rounded up to allocation | 1111 // Caller already added space for object header and rounded up to allocation |
1047 // alignment | 1112 // alignment |
1048 ASSERT(!(allocationSize & allocationMask)); | 1113 ASSERT(!(size & allocationMask)); |
| 1114 |
| 1115 size_t allocationSize = sizeof(LargeObject) + size; |
| 1116 |
| 1117 // Ensure that there is enough space for alignment. If the header |
| 1118 // is not a multiple of 8 bytes we will allocate an extra |
| 1119 // headerPadding bytes to ensure it 8 byte aligned. |
| 1120 allocationSize += headerPadding(); |
| 1121 |
| 1122 // If ASan is supported we add allocationGranularity bytes to the allocated |
| 1123 // space and poison that to detect overflows |
| 1124 #if defined(ADDRESS_SANITIZER) |
| 1125 allocationSize += allocationGranularity; |
| 1126 #endif |
1049 | 1127 |
1050 // 1. Check if we should trigger a GC. | 1128 // 1. Check if we should trigger a GC. |
1051 threadState()->scheduleGCIfNeeded(); | 1129 updateRemainingAllocationSize(); |
| 1130 m_threadState->scheduleGCIfNeeded(); |
1052 | 1131 |
1053 // 2. Try to sweep large objects more than allocationSize bytes | 1132 // 2. Try to sweep large objects more than allocationSize bytes |
1054 // before allocating a new large object. | 1133 // before allocating a new large object. |
1055 Address result = lazySweep(allocationSize, gcInfoIndex); | 1134 if (!lazySweepLargeObjects(allocationSize)) { |
1056 if (result) | 1135 // 3. If we have failed in sweeping allocationSize bytes, |
1057 return result; | 1136 // we complete sweeping before allocating this large object. |
| 1137 m_threadState->completeSweep(); |
| 1138 } |
1058 | 1139 |
1059 // 3. If we have failed in sweeping allocationSize bytes, | 1140 m_threadState->shouldFlushHeapDoesNotContainCache(); |
1060 // we complete sweeping before allocating this large object. | 1141 PageMemory* pageMemory = PageMemory::allocate(allocationSize); |
1061 threadState()->completeSweep(); | 1142 m_threadState->allocatedRegionsSinceLastGC().append(pageMemory->region()); |
1062 return doAllocateLargeObject(allocationSize, gcInfoIndex); | |
1063 } | |
1064 | |
1065 Address ThreadHeapForLargeObject::doAllocateLargeObject(size_t allocationSize, s
ize_t gcInfoIndex) | |
1066 { | |
1067 size_t largeObjectSize = sizeof(LargeObject) + LargeObject::headerPadding()
+ allocationSize; | |
1068 // If ASan is supported we add allocationGranularity bytes to the allocated | |
1069 // space and poison that to detect overflows | |
1070 #if defined(ADDRESS_SANITIZER) | |
1071 largeObjectSize += allocationGranularity; | |
1072 #endif | |
1073 | |
1074 threadState()->shouldFlushHeapDoesNotContainCache(); | |
1075 PageMemory* pageMemory = PageMemory::allocate(largeObjectSize); | |
1076 threadState()->allocatedRegionsSinceLastGC().append(pageMemory->region()); | |
1077 Address largeObjectAddress = pageMemory->writableStart(); | 1143 Address largeObjectAddress = pageMemory->writableStart(); |
1078 Address headerAddress = largeObjectAddress + sizeof(LargeObject) + LargeObje
ct::headerPadding(); | 1144 Address headerAddress = largeObjectAddress + sizeof(LargeObject) + headerPad
ding(); |
1079 #if ENABLE(ASSERT) | 1145 #if ENABLE(ASSERT) |
1080 // Verify that the allocated PageMemory is expectedly zeroed. | 1146 // Verify that the allocated PageMemory is expectedly zeroed. |
1081 for (size_t i = 0; i < largeObjectSize; ++i) | 1147 for (size_t i = 0; i < size; ++i) |
1082 ASSERT(!headerAddress[i]); | 1148 ASSERT(!headerAddress[i]); |
1083 #endif | 1149 #endif |
1084 ASSERT(gcInfoIndex > 0); | 1150 ASSERT(gcInfoIndex > 0); |
1085 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); | 1151 HeapObjectHeader* header = new (NotNull, headerAddress) HeapObjectHeader(lar
geObjectSizeInHeader, gcInfoIndex); |
1086 Address result = headerAddress + sizeof(*header); | 1152 Address result = headerAddress + sizeof(*header); |
1087 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | 1153 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); |
1088 LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory,
this, allocationSize); | 1154 LargeObject* largeObject = new (largeObjectAddress) LargeObject(pageMemory,
this, size); |
1089 header->checkHeader(); | 1155 header->checkHeader(); |
1090 | 1156 |
1091 // Poison the object header and allocationGranularity bytes after the object | 1157 // Poison the object header and allocationGranularity bytes after the object |
1092 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); | 1158 ASAN_POISON_MEMORY_REGION(header, sizeof(*header)); |
1093 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); | 1159 ASAN_POISON_MEMORY_REGION(largeObject->address() + largeObject->size(), allo
cationGranularity); |
1094 | 1160 |
1095 largeObject->link(&m_firstPage); | 1161 largeObject->link(&m_firstLargeObject); |
1096 | 1162 |
1097 Heap::increaseAllocatedSpace(largeObject->size()); | 1163 Heap::increaseAllocatedSpace(largeObject->size()); |
1098 Heap::increaseAllocatedObjectSize(largeObject->size()); | 1164 Heap::increaseAllocatedObjectSize(largeObject->size()); |
1099 return result; | 1165 return result; |
1100 } | 1166 } |
1101 | 1167 |
1102 void ThreadHeapForLargeObject::freeLargeObject(LargeObject* object) | 1168 void ThreadHeap::freeLargeObject(LargeObject* object) |
1103 { | 1169 { |
1104 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); | 1170 object->heapObjectHeader()->finalize(object->payload(), object->payloadSize(
)); |
1105 Heap::decreaseAllocatedSpace(object->size()); | 1171 Heap::decreaseAllocatedSpace(object->size()); |
1106 | 1172 |
1107 // Unpoison the object header and allocationGranularity bytes after the | 1173 // Unpoison the object header and allocationGranularity bytes after the |
1108 // object before freeing. | 1174 // object before freeing. |
1109 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); | 1175 ASAN_UNPOISON_MEMORY_REGION(object->heapObjectHeader(), sizeof(HeapObjectHea
der)); |
1110 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); | 1176 ASAN_UNPOISON_MEMORY_REGION(object->address() + object->size(), allocationGr
anularity); |
1111 | 1177 |
1112 if (object->terminating()) { | 1178 if (object->terminating()) { |
1113 ASSERT(ThreadState::current()->isTerminating()); | 1179 ASSERT(ThreadState::current()->isTerminating()); |
1114 // The thread is shutting down and this page is being removed as a part | 1180 // The thread is shutting down and this page is being removed as a part |
1115 // of the thread local GC. In that case the object could be traced in | 1181 // of the thread local GC. In that case the object could be traced in |
1116 // the next global GC if there is a dangling pointer from a live thread | 1182 // the next global GC if there is a dangling pointer from a live thread |
1117 // heap to this dead thread heap. To guard against this, we put the | 1183 // heap to this dead thread heap. To guard against this, we put the |
1118 // page into the orphaned page pool and zap the page memory. This | 1184 // page into the orphaned page pool and zap the page memory. This |
1119 // ensures that tracing the dangling pointer in the next global GC just | 1185 // ensures that tracing the dangling pointer in the next global GC just |
1120 // crashes instead of causing use-after-frees. After the next global | 1186 // crashes instead of causing use-after-frees. After the next global |
1121 // GC, the orphaned pages are removed. | 1187 // GC, the orphaned pages are removed. |
1122 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), object); | 1188 Heap::orphanedPagePool()->addOrphanedPage(m_index, object); |
1123 } else { | 1189 } else { |
1124 ASSERT(!ThreadState::current()->isTerminating()); | 1190 ASSERT(!ThreadState::current()->isTerminating()); |
1125 PageMemory* memory = object->storage(); | 1191 PageMemory* memory = object->storage(); |
1126 object->~LargeObject(); | 1192 object->~LargeObject(); |
1127 delete memory; | 1193 delete memory; |
1128 } | 1194 } |
1129 } | 1195 } |
1130 | 1196 |
1131 template<typename DataType> | 1197 template<typename DataType> |
1132 PagePool<DataType>::PagePool() | 1198 PagePool<DataType>::PagePool() |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1173 | 1239 |
1174 // We got some memory, but failed to commit it, try again. | 1240 // We got some memory, but failed to commit it, try again. |
1175 delete memory; | 1241 delete memory; |
1176 } | 1242 } |
1177 return nullptr; | 1243 return nullptr; |
1178 } | 1244 } |
1179 | 1245 |
1180 BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap) | 1246 BaseHeapPage::BaseHeapPage(PageMemory* storage, ThreadHeap* heap) |
1181 : m_storage(storage) | 1247 : m_storage(storage) |
1182 , m_heap(heap) | 1248 , m_heap(heap) |
1183 , m_next(nullptr) | |
1184 , m_terminating(false) | 1249 , m_terminating(false) |
1185 , m_swept(true) | 1250 , m_swept(true) |
1186 { | 1251 { |
1187 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 1252 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
1188 } | 1253 } |
1189 | 1254 |
1190 void BaseHeapPage::markOrphaned() | 1255 void BaseHeapPage::markOrphaned() |
1191 { | 1256 { |
1192 m_heap = nullptr; | 1257 m_heap = nullptr; |
1193 m_terminating = false; | 1258 m_terminating = false; |
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1279 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { | 1344 for (PoolEntry* entry = m_pool[index]; entry; entry = entry->next) { |
1280 BaseHeapPage* page = entry->data; | 1345 BaseHeapPage* page = entry->data; |
1281 if (page->contains(reinterpret_cast<Address>(object))) | 1346 if (page->contains(reinterpret_cast<Address>(object))) |
1282 return true; | 1347 return true; |
1283 } | 1348 } |
1284 } | 1349 } |
1285 return false; | 1350 return false; |
1286 } | 1351 } |
1287 #endif | 1352 #endif |
1288 | 1353 |
1289 void ThreadHeapForHeapPage::freePage(HeapPage* page) | 1354 void ThreadHeap::freePage(HeapPage* page) |
1290 { | 1355 { |
1291 Heap::decreaseAllocatedSpace(page->size()); | 1356 Heap::decreaseAllocatedSpace(blinkPageSize); |
1292 | 1357 |
1293 if (page->terminating()) { | 1358 if (page->terminating()) { |
1294 // The thread is shutting down and this page is being removed as a part | 1359 // The thread is shutting down and this page is being removed as a part |
1295 // of the thread local GC. In that case the object could be traced in | 1360 // of the thread local GC. In that case the object could be traced in |
1296 // the next global GC if there is a dangling pointer from a live thread | 1361 // the next global GC if there is a dangling pointer from a live thread |
1297 // heap to this dead thread heap. To guard against this, we put the | 1362 // heap to this dead thread heap. To guard against this, we put the |
1298 // page into the orphaned page pool and zap the page memory. This | 1363 // page into the orphaned page pool and zap the page memory. This |
1299 // ensures that tracing the dangling pointer in the next global GC just | 1364 // ensures that tracing the dangling pointer in the next global GC just |
1300 // crashes instead of causing use-after-frees. After the next global | 1365 // crashes instead of causing use-after-frees. After the next global |
1301 // GC, the orphaned pages are removed. | 1366 // GC, the orphaned pages are removed. |
1302 Heap::orphanedPagePool()->addOrphanedPage(heapIndex(), page); | 1367 Heap::orphanedPagePool()->addOrphanedPage(m_index, page); |
1303 } else { | 1368 } else { |
1304 PageMemory* memory = page->storage(); | 1369 PageMemory* memory = page->storage(); |
1305 page->~HeapPage(); | 1370 page->~HeapPage(); |
1306 Heap::freePagePool()->addFreePage(heapIndex(), memory); | 1371 Heap::freePagePool()->addFreePage(m_index, memory); |
1307 } | 1372 } |
1308 } | 1373 } |
1309 | 1374 |
1310 void ThreadHeapForHeapPage::allocatePage() | 1375 void ThreadHeap::allocatePage() |
1311 { | 1376 { |
1312 threadState()->shouldFlushHeapDoesNotContainCache(); | 1377 m_threadState->shouldFlushHeapDoesNotContainCache(); |
1313 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(heapIndex()); | 1378 PageMemory* pageMemory = Heap::freePagePool()->takeFreePage(m_index); |
1314 // We continue allocating page memory until we succeed in committing one. | 1379 // We continue allocating page memory until we succeed in committing one. |
1315 while (!pageMemory) { | 1380 while (!pageMemory) { |
1316 // Allocate a memory region for blinkPagesPerRegion pages that | 1381 // Allocate a memory region for blinkPagesPerRegion pages that |
1317 // will each have the following layout. | 1382 // will each have the following layout. |
1318 // | 1383 // |
1319 // [ guard os page | ... payload ... | guard os page ] | 1384 // [ guard os page | ... payload ... | guard os page ] |
1320 // ^---{ aligned to blink page size } | 1385 // ^---{ aligned to blink page size } |
1321 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); | 1386 PageMemoryRegion* region = PageMemoryRegion::allocateNormalPages(); |
1322 threadState()->allocatedRegionsSinceLastGC().append(region); | 1387 m_threadState->allocatedRegionsSinceLastGC().append(region); |
1323 | 1388 |
1324 // Setup the PageMemory object for each of the pages in the region. | 1389 // Setup the PageMemory object for each of the pages in the region. |
1325 size_t offset = 0; | 1390 size_t offset = 0; |
1326 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { | 1391 for (size_t i = 0; i < blinkPagesPerRegion; ++i) { |
1327 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off
set, blinkPagePayloadSize()); | 1392 PageMemory* memory = PageMemory::setupPageMemoryInRegion(region, off
set, blinkPagePayloadSize()); |
1328 // Take the first possible page ensuring that this thread actually | 1393 // Take the first possible page ensuring that this thread actually |
1329 // gets a page and add the rest to the page pool. | 1394 // gets a page and add the rest to the page pool. |
1330 if (!pageMemory) { | 1395 if (!pageMemory) { |
1331 if (memory->commit()) | 1396 if (memory->commit()) |
1332 pageMemory = memory; | 1397 pageMemory = memory; |
1333 else | 1398 else |
1334 delete memory; | 1399 delete memory; |
1335 } else { | 1400 } else { |
1336 Heap::freePagePool()->addFreePage(heapIndex(), memory); | 1401 Heap::freePagePool()->addFreePage(m_index, memory); |
1337 } | 1402 } |
1338 offset += blinkPageSize; | 1403 offset += blinkPageSize; |
1339 } | 1404 } |
1340 } | 1405 } |
1341 HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this
); | 1406 HeapPage* page = new (pageMemory->writableStart()) HeapPage(pageMemory, this
); |
| 1407 |
1342 page->link(&m_firstPage); | 1408 page->link(&m_firstPage); |
1343 | 1409 |
1344 Heap::increaseAllocatedSpace(page->size()); | 1410 Heap::increaseAllocatedSpace(blinkPageSize); |
1345 addToFreeList(page->payload(), page->payloadSize()); | 1411 addToFreeList(page->payload(), page->payloadSize()); |
1346 } | 1412 } |
1347 | 1413 |
1348 #if ENABLE(ASSERT) | 1414 #if ENABLE(ASSERT) |
1349 bool ThreadHeapForHeapPage::pagesToBeSweptContains(Address address) | 1415 bool ThreadHeap::pagesToBeSweptContains(Address address) |
1350 { | 1416 { |
1351 for (BaseHeapPage* page = m_firstUnsweptPage; page; page = page->next()) { | 1417 for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
1352 if (page->contains(address)) | 1418 if (page->contains(address)) |
1353 return true; | 1419 return true; |
1354 } | 1420 } |
1355 return false; | 1421 return false; |
1356 } | 1422 } |
1357 #endif | 1423 #endif |
1358 | 1424 |
1359 size_t ThreadHeap::objectPayloadSizeForTesting() | 1425 size_t ThreadHeap::objectPayloadSizeForTesting() |
1360 { | 1426 { |
1361 ASSERT(isConsistentForSweeping()); | 1427 ASSERT(isConsistentForSweeping()); |
1362 ASSERT(!m_firstUnsweptPage); | 1428 ASSERT(!m_firstUnsweptPage); |
| 1429 ASSERT(!m_firstUnsweptLargeObject); |
1363 | 1430 |
1364 size_t objectPayloadSize = 0; | 1431 size_t objectPayloadSize = 0; |
1365 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) | 1432 for (HeapPage* page = m_firstPage; page; page = page->next()) |
1366 objectPayloadSize += page->objectPayloadSizeForTesting(); | 1433 objectPayloadSize += page->objectPayloadSizeForTesting(); |
| 1434 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) |
| 1435 objectPayloadSize += largeObject->objectPayloadSizeForTesting(); |
1367 return objectPayloadSize; | 1436 return objectPayloadSize; |
1368 } | 1437 } |
1369 | 1438 |
1370 #if ENABLE(ASSERT) | 1439 #if ENABLE(ASSERT) |
1371 bool ThreadHeapForHeapPage::isConsistentForSweeping() | 1440 bool ThreadHeap::isConsistentForSweeping() |
1372 { | 1441 { |
1373 // A thread heap is consistent for sweeping if none of the pages to be swept | 1442 // A thread heap is consistent for sweeping if none of the pages to be swept |
1374 // contain a freelist block or the current allocation point. | 1443 // contain a freelist block or the current allocation point. |
1375 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | 1444 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
1376 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { | 1445 for (FreeListEntry* freeListEntry = m_freeList.m_freeLists[i]; freeListE
ntry; freeListEntry = freeListEntry->next()) { |
1377 if (pagesToBeSweptContains(freeListEntry->address())) | 1446 if (pagesToBeSweptContains(freeListEntry->address())) |
1378 return false; | 1447 return false; |
1379 } | 1448 } |
1380 } | 1449 } |
1381 if (hasCurrentAllocationArea()) { | 1450 if (hasCurrentAllocationArea()) { |
1382 if (pagesToBeSweptContains(currentAllocationPoint())) | 1451 if (pagesToBeSweptContains(currentAllocationPoint())) |
1383 return false; | 1452 return false; |
1384 } | 1453 } |
1385 return true; | 1454 return true; |
1386 } | 1455 } |
1387 #endif | 1456 #endif |
1388 | 1457 |
1389 void ThreadHeap::makeConsistentForSweeping() | 1458 void ThreadHeap::makeConsistentForSweeping() |
1390 { | 1459 { |
| 1460 preparePagesForSweeping(); |
| 1461 setAllocationPoint(nullptr, 0); |
1391 clearFreeLists(); | 1462 clearFreeLists(); |
| 1463 } |
| 1464 |
| 1465 void ThreadHeap::preparePagesForSweeping() |
| 1466 { |
1392 ASSERT(isConsistentForSweeping()); | 1467 ASSERT(isConsistentForSweeping()); |
1393 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) | 1468 for (HeapPage* page = m_firstPage; page; page = page->next()) |
1394 page->markAsUnswept(); | 1469 page->markAsUnswept(); |
1395 | 1470 |
1396 // If a new GC is requested before this thread got around to sweep, | 1471 // If a new GC is requested before this thread got around to sweep, |
1397 // ie. due to the thread doing a long running operation, we clear | 1472 // ie. due to the thread doing a long running operation, we clear |
1398 // the mark bits and mark any of the dead objects as dead. The latter | 1473 // the mark bits and mark any of the dead objects as dead. The latter |
1399 // is used to ensure the next GC marking does not trace already dead | 1474 // is used to ensure the next GC marking does not trace already dead |
1400 // objects. If we trace a dead object we could end up tracing into | 1475 // objects. If we trace a dead object we could end up tracing into |
1401 // garbage or the middle of another object via the newly conservatively | 1476 // garbage or the middle of another object via the newly conservatively |
1402 // found object. | 1477 // found object. |
1403 BaseHeapPage* previousPage = nullptr; | 1478 HeapPage* previousPage = nullptr; |
1404 for (BaseHeapPage* page = m_firstUnsweptPage; page; previousPage = page, pag
e = page->next()) { | 1479 for (HeapPage* page = m_firstUnsweptPage; page; previousPage = page, page =
page->next()) { |
1405 page->markUnmarkedObjectsDead(); | 1480 page->markUnmarkedObjectsDead(); |
1406 ASSERT(!page->hasBeenSwept()); | 1481 ASSERT(!page->hasBeenSwept()); |
1407 } | 1482 } |
1408 if (previousPage) { | 1483 if (previousPage) { |
1409 ASSERT(m_firstUnsweptPage); | 1484 ASSERT(m_firstUnsweptPage); |
1410 previousPage->m_next = m_firstPage; | 1485 previousPage->m_next = m_firstPage; |
1411 m_firstPage = m_firstUnsweptPage; | 1486 m_firstPage = m_firstUnsweptPage; |
1412 m_firstUnsweptPage = nullptr; | 1487 m_firstUnsweptPage = nullptr; |
1413 } | 1488 } |
1414 ASSERT(!m_firstUnsweptPage); | 1489 ASSERT(!m_firstUnsweptPage); |
| 1490 |
| 1491 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) |
| 1492 largeObject->markAsUnswept(); |
| 1493 |
| 1494 LargeObject* previousLargeObject = nullptr; |
| 1495 for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; prev
iousLargeObject = largeObject, largeObject = largeObject->next()) { |
| 1496 largeObject->markUnmarkedObjectsDead(); |
| 1497 ASSERT(!largeObject->hasBeenSwept()); |
| 1498 } |
| 1499 if (previousLargeObject) { |
| 1500 ASSERT(m_firstUnsweptLargeObject); |
| 1501 previousLargeObject->m_next = m_firstLargeObject; |
| 1502 m_firstLargeObject = m_firstUnsweptLargeObject; |
| 1503 m_firstUnsweptLargeObject = nullptr; |
| 1504 } |
| 1505 ASSERT(!m_firstUnsweptLargeObject); |
1415 } | 1506 } |
1416 | 1507 |
1417 void ThreadHeapForHeapPage::clearFreeLists() | 1508 void ThreadHeap::clearFreeLists() |
1418 { | 1509 { |
1419 setAllocationPoint(nullptr, 0); | |
1420 m_freeList.clear(); | 1510 m_freeList.clear(); |
1421 } | 1511 } |
1422 | 1512 |
1423 #if ENABLE(GC_PROFILING) | 1513 #if ENABLE(GC_PROFILING) |
1424 void ThreadHeap::snapshotFreeList(TracedValue& json) | 1514 void ThreadHeap::snapshotFreeList(TracedValue& json) |
1425 { | 1515 { |
1426 json.setInteger("cumulativeAllocationSize", m_cumulativeAllocationSize); | 1516 json.setInteger("cumulativeAllocationSize", m_cumulativeAllocationSize); |
1427 json.setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocatio
nCount) / m_allocationCount); | 1517 json.setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocatio
nCount) / m_allocationCount); |
1428 json.setInteger("inlineAllocationCount", m_inlineAllocationCount); | 1518 json.setInteger("inlineAllocationCount", m_inlineAllocationCount); |
1429 json.setInteger("allocationCount", m_allocationCount); | 1519 json.setInteger("allocationCount", m_allocationCount); |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1498 ++entryCount; | 1588 ++entryCount; |
1499 freeSize += entry->size(); | 1589 freeSize += entry->size(); |
1500 } | 1590 } |
1501 totalFreeSize += freeSize; | 1591 totalFreeSize += freeSize; |
1502 } | 1592 } |
1503 } | 1593 } |
1504 #endif | 1594 #endif |
1505 | 1595 |
1506 HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap) | 1596 HeapPage::HeapPage(PageMemory* storage, ThreadHeap* heap) |
1507 : BaseHeapPage(storage, heap) | 1597 : BaseHeapPage(storage, heap) |
| 1598 , m_next(nullptr) |
1508 { | 1599 { |
1509 m_objectStartBitMapComputed = false; | 1600 m_objectStartBitMapComputed = false; |
1510 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); | 1601 ASSERT(isPageHeaderAddress(reinterpret_cast<Address>(this))); |
1511 } | 1602 } |
1512 | 1603 |
1513 size_t HeapPage::objectPayloadSizeForTesting() | 1604 size_t HeapPage::objectPayloadSizeForTesting() |
1514 { | 1605 { |
1515 size_t objectPayloadSize = 0; | 1606 size_t objectPayloadSize = 0; |
1516 Address headerAddress = payload(); | 1607 Address headerAddress = payload(); |
1517 markAsSwept(); | 1608 markAsSwept(); |
(...skipping 22 matching lines...) Expand all Loading... |
1540 clearObjectStartBitMap(); | 1631 clearObjectStartBitMap(); |
1541 | 1632 |
1542 size_t markedObjectSize = 0; | 1633 size_t markedObjectSize = 0; |
1543 Address startOfGap = payload(); | 1634 Address startOfGap = payload(); |
1544 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { | 1635 for (Address headerAddress = startOfGap; headerAddress < payloadEnd(); ) { |
1545 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1636 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
1546 ASSERT(header->size() > 0); | 1637 ASSERT(header->size() > 0); |
1547 ASSERT(header->size() < blinkPagePayloadSize()); | 1638 ASSERT(header->size() < blinkPagePayloadSize()); |
1548 | 1639 |
1549 if (header->isPromptlyFreed()) | 1640 if (header->isPromptlyFreed()) |
1550 heapForHeapPage()->decreasePromptlyFreedSize(header->size()); | 1641 heap()->decreasePromptlyFreedSize(header->size()); |
1551 if (header->isFree()) { | 1642 if (header->isFree()) { |
1552 size_t size = header->size(); | 1643 size_t size = header->size(); |
1553 // Zero the memory in the free list header to maintain the | 1644 // Zero the memory in the free list header to maintain the |
1554 // invariant that memory on the free list is zero filled. | 1645 // invariant that memory on the free list is zero filled. |
1555 // The rest of the memory is already on the free list and is | 1646 // The rest of the memory is already on the free list and is |
1556 // therefore already zero filled. | 1647 // therefore already zero filled. |
1557 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry)
? size : sizeof(FreeListEntry)); | 1648 FILL_ZERO_IF_PRODUCTION(headerAddress, size < sizeof(FreeListEntry)
? size : sizeof(FreeListEntry)); |
1558 headerAddress += size; | 1649 headerAddress += size; |
1559 continue; | 1650 continue; |
1560 } | 1651 } |
(...skipping 12 matching lines...) Expand all Loading... |
1573 header->finalize(payload, payloadSize); | 1664 header->finalize(payload, payloadSize); |
1574 // This memory will be added to the freelist. Maintain the invariant | 1665 // This memory will be added to the freelist. Maintain the invariant |
1575 // that memory on the freelist is zero filled. | 1666 // that memory on the freelist is zero filled. |
1576 FILL_ZERO_IF_PRODUCTION(headerAddress, size); | 1667 FILL_ZERO_IF_PRODUCTION(headerAddress, size); |
1577 ASAN_POISON_MEMORY_REGION(payload, payloadSize); | 1668 ASAN_POISON_MEMORY_REGION(payload, payloadSize); |
1578 headerAddress += size; | 1669 headerAddress += size; |
1579 continue; | 1670 continue; |
1580 } | 1671 } |
1581 | 1672 |
1582 if (startOfGap != headerAddress) | 1673 if (startOfGap != headerAddress) |
1583 heapForHeapPage()->addToFreeList(startOfGap, headerAddress - startOf
Gap); | 1674 heap()->addToFreeList(startOfGap, headerAddress - startOfGap); |
1584 header->unmark(); | 1675 header->unmark(); |
1585 headerAddress += header->size(); | 1676 headerAddress += header->size(); |
1586 markedObjectSize += header->size(); | 1677 markedObjectSize += header->size(); |
1587 startOfGap = headerAddress; | 1678 startOfGap = headerAddress; |
1588 } | 1679 } |
1589 if (startOfGap != payloadEnd()) | 1680 if (startOfGap != payloadEnd()) |
1590 heapForHeapPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap); | 1681 heap()->addToFreeList(startOfGap, payloadEnd() - startOfGap); |
1591 | 1682 |
1592 if (markedObjectSize) | 1683 if (markedObjectSize) |
1593 Heap::increaseMarkedObjectSize(markedObjectSize); | 1684 Heap::increaseMarkedObjectSize(markedObjectSize); |
1594 } | 1685 } |
1595 | 1686 |
1596 void HeapPage::markUnmarkedObjectsDead() | 1687 void HeapPage::markUnmarkedObjectsDead() |
1597 { | 1688 { |
1598 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1689 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
1599 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1690 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
1600 ASSERT(header->size() < blinkPagePayloadSize()); | 1691 ASSERT(header->size() < blinkPagePayloadSize()); |
1601 // Check if a free list entry first since we cannot call | 1692 // Check if a free list entry first since we cannot call |
1602 // isMarked on a free list entry. | 1693 // isMarked on a free list entry. |
1603 if (header->isFree()) { | 1694 if (header->isFree()) { |
1604 headerAddress += header->size(); | 1695 headerAddress += header->size(); |
1605 continue; | 1696 continue; |
1606 } | 1697 } |
1607 header->checkHeader(); | 1698 header->checkHeader(); |
1608 if (header->isMarked()) | 1699 if (header->isMarked()) |
1609 header->unmark(); | 1700 header->unmark(); |
1610 else | 1701 else |
1611 header->markDead(); | 1702 header->markDead(); |
1612 headerAddress += header->size(); | 1703 headerAddress += header->size(); |
1613 } | 1704 } |
1614 } | 1705 } |
1615 | 1706 |
1616 void HeapPage::removeFromHeap() | 1707 void HeapPage::removeFromHeap(ThreadHeap* heap) |
1617 { | 1708 { |
1618 heapForHeapPage()->freePage(this); | 1709 heap->freePage(this); |
1619 } | |
1620 | |
1621 ThreadHeapForHeapPage* HeapPage::heapForHeapPage() | |
1622 { | |
1623 return static_cast<ThreadHeapForHeapPage*>(heap()); | |
1624 } | 1710 } |
1625 | 1711 |
1626 void HeapPage::populateObjectStartBitMap() | 1712 void HeapPage::populateObjectStartBitMap() |
1627 { | 1713 { |
1628 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); | 1714 memset(&m_objectStartBitMap, 0, objectStartBitMapSize); |
1629 Address start = payload(); | 1715 Address start = payload(); |
1630 for (Address headerAddress = start; headerAddress < payloadEnd();) { | 1716 for (Address headerAddress = start; headerAddress < payloadEnd();) { |
1631 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1717 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
1632 size_t objectOffset = headerAddress - start; | 1718 size_t objectOffset = headerAddress - start; |
1633 ASSERT(!(objectOffset & allocationMask)); | 1719 ASSERT(!(objectOffset & allocationMask)); |
(...skipping 898 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2532 | 2618 |
2533 double Heap::estimatedMarkingTime() | 2619 double Heap::estimatedMarkingTime() |
2534 { | 2620 { |
2535 // FIXME: Implement heuristics | 2621 // FIXME: Implement heuristics |
2536 return 0.0; | 2622 return 0.0; |
2537 } | 2623 } |
2538 | 2624 |
2539 void ThreadHeap::prepareHeapForTermination() | 2625 void ThreadHeap::prepareHeapForTermination() |
2540 { | 2626 { |
2541 ASSERT(!m_firstUnsweptPage); | 2627 ASSERT(!m_firstUnsweptPage); |
2542 for (BaseHeapPage* page = m_firstPage; page; page = page->next()) { | 2628 ASSERT(!m_firstUnsweptLargeObject); |
| 2629 for (HeapPage* page = m_firstPage; page; page = page->next()) { |
2543 page->setTerminating(); | 2630 page->setTerminating(); |
2544 } | 2631 } |
| 2632 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) { |
| 2633 largeObject->setTerminating(); |
| 2634 } |
2545 } | 2635 } |
2546 | 2636 |
2547 size_t Heap::objectPayloadSizeForTesting() | 2637 size_t Heap::objectPayloadSizeForTesting() |
2548 { | 2638 { |
2549 size_t objectPayloadSize = 0; | 2639 size_t objectPayloadSize = 0; |
2550 for (ThreadState* state : ThreadState::attachedThreads()) { | 2640 for (ThreadState* state : ThreadState::attachedThreads()) { |
2551 state->setGCState(ThreadState::GCRunning); | 2641 state->setGCState(ThreadState::GCRunning); |
2552 state->makeConsistentForSweeping(); | 2642 state->makeConsistentForSweeping(); |
2553 objectPayloadSize += state->objectPayloadSizeForTesting(); | 2643 objectPayloadSize += state->objectPayloadSizeForTesting(); |
2554 state->setGCState(ThreadState::EagerSweepScheduled); | 2644 state->setGCState(ThreadState::EagerSweepScheduled); |
(...skipping 14 matching lines...) Expand all Loading... |
2569 ASSERT(!state->isInGC()); | 2659 ASSERT(!state->isInGC()); |
2570 | 2660 |
2571 // Don't promptly free large objects because their page is never reused. | 2661 // Don't promptly free large objects because their page is never reused. |
2572 // Don't free backings allocated on other threads. | 2662 // Don't free backings allocated on other threads. |
2573 BaseHeapPage* page = pageFromObject(address); | 2663 BaseHeapPage* page = pageFromObject(address); |
2574 if (page->isLargeObject() || page->heap()->threadState() != state) | 2664 if (page->isLargeObject() || page->heap()->threadState() != state) |
2575 return; | 2665 return; |
2576 | 2666 |
2577 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 2667 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
2578 header->checkHeader(); | 2668 header->checkHeader(); |
2579 static_cast<HeapPage*>(page)->heapForHeapPage()->promptlyFreeObject(header); | 2669 static_cast<HeapPage*>(page)->heap()->promptlyFreeObject(header); |
2580 } | 2670 } |
2581 | 2671 |
2582 void HeapAllocator::freeVectorBacking(void* address) | 2672 void HeapAllocator::freeVectorBacking(void* address) |
2583 { | 2673 { |
2584 backingFree(address); | 2674 backingFree(address); |
2585 } | 2675 } |
2586 | 2676 |
2587 void HeapAllocator::freeInlineVectorBacking(void* address) | 2677 void HeapAllocator::freeInlineVectorBacking(void* address) |
2588 { | 2678 { |
2589 backingFree(address); | 2679 backingFree(address); |
(...skipping 16 matching lines...) Expand all Loading... |
2606 ASSERT(state->isAllocationAllowed()); | 2696 ASSERT(state->isAllocationAllowed()); |
2607 | 2697 |
2608 // FIXME: Support expand for large objects. | 2698 // FIXME: Support expand for large objects. |
2609 // Don't expand backings allocated on other threads. | 2699 // Don't expand backings allocated on other threads. |
2610 BaseHeapPage* page = pageFromObject(address); | 2700 BaseHeapPage* page = pageFromObject(address); |
2611 if (page->isLargeObject() || page->heap()->threadState() != state) | 2701 if (page->isLargeObject() || page->heap()->threadState() != state) |
2612 return false; | 2702 return false; |
2613 | 2703 |
2614 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 2704 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
2615 header->checkHeader(); | 2705 header->checkHeader(); |
2616 return static_cast<HeapPage*>(page)->heapForHeapPage()->expandObject(header,
newSize); | 2706 return static_cast<HeapPage*>(page)->heap()->expandObject(header, newSize); |
2617 } | 2707 } |
2618 | 2708 |
2619 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) | 2709 bool HeapAllocator::expandVectorBacking(void* address, size_t newSize) |
2620 { | 2710 { |
2621 return backingExpand(address, newSize); | 2711 return backingExpand(address, newSize); |
2622 } | 2712 } |
2623 | 2713 |
2624 bool HeapAllocator::expandInlineVectorBacking(void* address, size_t newSize) | 2714 bool HeapAllocator::expandInlineVectorBacking(void* address, size_t newSize) |
2625 { | 2715 { |
2626 return backingExpand(address, newSize); | 2716 return backingExpand(address, newSize); |
(...skipping 22 matching lines...) Expand all Loading... |
2649 ASSERT(state->isAllocationAllowed()); | 2739 ASSERT(state->isAllocationAllowed()); |
2650 | 2740 |
2651 // FIXME: Support shrink for large objects. | 2741 // FIXME: Support shrink for large objects. |
2652 // Don't shrink backings allocated on other threads. | 2742 // Don't shrink backings allocated on other threads. |
2653 BaseHeapPage* page = pageFromObject(address); | 2743 BaseHeapPage* page = pageFromObject(address); |
2654 if (page->isLargeObject() || page->heap()->threadState() != state) | 2744 if (page->isLargeObject() || page->heap()->threadState() != state) |
2655 return; | 2745 return; |
2656 | 2746 |
2657 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); | 2747 HeapObjectHeader* header = HeapObjectHeader::fromPayload(address); |
2658 header->checkHeader(); | 2748 header->checkHeader(); |
2659 static_cast<HeapPage*>(page)->heapForHeapPage()->shrinkObject(header, quanti
zedShrunkSize); | 2749 static_cast<HeapPage*>(page)->heap()->shrinkObject(header, quantizedShrunkSi
ze); |
2660 } | 2750 } |
2661 | 2751 |
2662 void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedC
urrentSize, size_t quantizedShrunkSize) | 2752 void HeapAllocator::shrinkVectorBackingInternal(void* address, size_t quantizedC
urrentSize, size_t quantizedShrunkSize) |
2663 { | 2753 { |
2664 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); | 2754 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); |
2665 } | 2755 } |
2666 | 2756 |
2667 void HeapAllocator::shrinkInlineVectorBackingInternal(void* address, size_t quan
tizedCurrentSize, size_t quantizedShrunkSize) | 2757 void HeapAllocator::shrinkInlineVectorBackingInternal(void* address, size_t quan
tizedCurrentSize, size_t quantizedShrunkSize) |
2668 { | 2758 { |
2669 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); | 2759 backingShrink(address, quantizedCurrentSize, quantizedShrunkSize); |
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2769 bool Heap::s_shutdownCalled = false; | 2859 bool Heap::s_shutdownCalled = false; |
2770 bool Heap::s_lastGCWasConservative = false; | 2860 bool Heap::s_lastGCWasConservative = false; |
2771 FreePagePool* Heap::s_freePagePool; | 2861 FreePagePool* Heap::s_freePagePool; |
2772 OrphanedPagePool* Heap::s_orphanedPagePool; | 2862 OrphanedPagePool* Heap::s_orphanedPagePool; |
2773 Heap::RegionTree* Heap::s_regionTree = nullptr; | 2863 Heap::RegionTree* Heap::s_regionTree = nullptr; |
2774 size_t Heap::s_allocatedObjectSize = 0; | 2864 size_t Heap::s_allocatedObjectSize = 0; |
2775 size_t Heap::s_allocatedSpace = 0; | 2865 size_t Heap::s_allocatedSpace = 0; |
2776 size_t Heap::s_markedObjectSize = 0; | 2866 size_t Heap::s_markedObjectSize = 0; |
2777 | 2867 |
2778 } // namespace blink | 2868 } // namespace blink |
OLD | NEW |