OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
74 // The dead bit is used for objects that have gone through a GC marking, but did | 74 // The dead bit is used for objects that have gone through a GC marking, but did |
75 // not get swept before a new GC started. In that case we set the dead bit on | 75 // not get swept before a new GC started. In that case we set the dead bit on |
76 // objects that were not marked in the previous GC to ensure we are not tracing | 76 // objects that were not marked in the previous GC to ensure we are not tracing |
77 // them via a conservatively found pointer. Tracing dead objects could lead to | 77 // them via a conservatively found pointer. Tracing dead objects could lead to |
78 // tracing of already finalized objects in another thread's heap which is a | 78 // tracing of already finalized objects in another thread's heap which is a |
79 // use-after-free situation. | 79 // use-after-free situation. |
80 const size_t deadBitMask = 4; | 80 const size_t deadBitMask = 4; |
81 // On free-list entries we reuse the dead bit to distinguish a normal free-list | 81 // On free-list entries we reuse the dead bit to distinguish a normal free-list |
82 // entry from one that has been promptly freed. | 82 // entry from one that has been promptly freed. |
83 const size_t promptlyFreedMask = freeListMask | deadBitMask; | 83 const size_t promptlyFreedMask = freeListMask | deadBitMask; |
84 #if ENABLE(GC_PROFILE_HEAP) | 84 //#if ENABLE(GC_PROFILE_HEAP) |
85 const size_t heapObjectGenerations = 8; | 85 const size_t heapObjectGenerations = 8; |
86 const size_t maxHeapObjectAge = heapObjectGenerations - 1; | 86 const size_t maxHeapObjectAge = heapObjectGenerations - 1; |
87 const size_t heapObjectAgeMask = ~(maxHeapObjectSize - 1); | 87 const size_t heapObjectAgeMask = ~(maxHeapObjectSize - 1); |
88 const size_t sizeMask = ~heapObjectAgeMask & ~static_cast<size_t>(7); | 88 const size_t sizeMask = ~heapObjectAgeMask & ~static_cast<size_t>(7); |
89 #else | 89 //#else |
90 const size_t sizeMask = ~static_cast<size_t>(7); | 90 //const size_t sizeMask = ~static_cast<size_t>(7); |
91 #endif | 91 //#endif |
92 const uint8_t freelistZapValue = 42; | 92 const uint8_t freelistZapValue = 42; |
93 const uint8_t finalizedZapValue = 24; | 93 const uint8_t finalizedZapValue = 24; |
94 // The orphaned zap value must be zero in the lowest bits to allow for using | 94 // The orphaned zap value must be zero in the lowest bits to allow for using |
95 // the mark bit when tracing. | 95 // the mark bit when tracing. |
96 const uint8_t orphanedZapValue = 240; | 96 const uint8_t orphanedZapValue = 240; |
97 const int numberOfPagesToConsiderForCoalescing = 100; | 97 const int numberOfPagesToConsiderForCoalescing = 100; |
98 | 98 |
99 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) | 99 #if ENABLE(ASSERT) || defined(LEAK_SANITIZER) || defined(ADDRESS_SANITIZER) |
100 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) | 100 #define FILL_ZERO_IF_PRODUCTION(address, size) do { } while (false) |
101 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) | 101 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) memset((address), 0, (size)) |
102 #else | 102 #else |
103 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) | 103 #define FILL_ZERO_IF_PRODUCTION(address, size) memset((address), 0, (size)) |
104 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) | 104 #define FILL_ZERO_IF_NOT_PRODUCTION(address, size) do { } while (false) |
105 #endif | 105 #endif |
106 | 106 |
107 class CallbackStack; | 107 class CallbackStack; |
108 class PageMemory; | 108 class PageMemory; |
109 template<ThreadAffinity affinity> class ThreadLocalPersistents; | 109 template<ThreadAffinity affinity> class ThreadLocalPersistents; |
110 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity>> class Persistent; | 110 template<typename T, typename RootsAccessor = ThreadLocalPersistents<ThreadingTr ait<T>::Affinity>> class Persistent; |
111 | 111 |
112 #if ENABLE(GC_PROFILE_HEAP) | 112 #if ENABLE(GC_PROFILE_HEAP) || ENABLE(GC_PROFILE_FREE_LIST) |
113 class TracedValue; | 113 class TracedValue; |
114 #endif | 114 #endif |
115 | 115 |
116 // Blink heap pages are set up with a guard page before and after the payload. | 116 // Blink heap pages are set up with a guard page before and after the payload. |
117 inline size_t blinkPagePayloadSize() | 117 inline size_t blinkPagePayloadSize() |
118 { | 118 { |
119 return blinkPageSize - 2 * WTF::kSystemPageSize; | 119 return blinkPageSize - 2 * WTF::kSystemPageSize; |
120 } | 120 } |
121 | 121 |
122 // Blink heap pages are aligned to the Blink heap page size. | 122 // Blink heap pages are aligned to the Blink heap page size. |
(...skipping 215 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
338 | 338 |
339 static void finalize(const GCInfo*, Address, size_t); | 339 static void finalize(const GCInfo*, Address, size_t); |
340 static HeapObjectHeader* fromPayload(const void*); | 340 static HeapObjectHeader* fromPayload(const void*); |
341 | 341 |
342 static const uint32_t magic = 0xc0de247; | 342 static const uint32_t magic = 0xc0de247; |
343 static const uint32_t zappedMagic = 0xC0DEdead; | 343 static const uint32_t zappedMagic = 0xC0DEdead; |
344 // The zap value for vtables should be < 4K to ensure it cannot be | 344 // The zap value for vtables should be < 4K to ensure it cannot be |
345 // used for dispatch. | 345 // used for dispatch. |
346 static const intptr_t zappedVTable = 0xd0d; | 346 static const intptr_t zappedVTable = 0xd0d; |
347 | 347 |
348 #if ENABLE(GC_PROFILE_HEAP) | 348 //#if ENABLE(GC_PROFILE_HEAP) |
349 NO_SANITIZE_ADDRESS | 349 NO_SANITIZE_ADDRESS |
350 size_t encodedSize() const { return m_size; } | 350 size_t encodedSize() const { return m_size; } |
351 | 351 |
352 NO_SANITIZE_ADDRESS | 352 NO_SANITIZE_ADDRESS |
353 size_t age() const { return m_size >> maxHeapObjectSizeLog2; } | 353 size_t age() const { return m_size >> maxHeapObjectSizeLog2; } |
354 | 354 |
355 NO_SANITIZE_ADDRESS | 355 NO_SANITIZE_ADDRESS |
356 void incAge() | 356 void incAge() |
357 { | 357 { |
358 size_t current = age(); | 358 size_t current = age(); |
359 if (current < maxHeapObjectAge) | 359 if (current < maxHeapObjectAge) |
360 m_size = ((current + 1) << maxHeapObjectSizeLog2) | (m_size & ~heapO bjectAgeMask); | 360 m_size = ((current + 1) << maxHeapObjectSizeLog2) | (m_size & ~heapO bjectAgeMask); |
361 } | 361 } |
362 #endif | 362 //#endif |
363 | 363 |
364 private: | 364 private: |
365 volatile uint32_t m_size; | 365 volatile uint32_t m_size; |
366 #if ENABLE(ASSERT) | 366 #if ENABLE(ASSERT) |
367 uint32_t m_magic; | 367 uint32_t m_magic; |
368 #endif | 368 #endif |
369 }; | 369 }; |
370 | 370 |
371 inline HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) | 371 inline HeapObjectHeader* HeapObjectHeader::fromPayload(const void* payload) |
372 { | 372 { |
(...skipping 168 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
541 void clearObjectStartBitMap(); | 541 void clearObjectStartBitMap(); |
542 void finalize(Header*); | 542 void finalize(Header*); |
543 virtual void checkAndMarkPointer(Visitor*, Address) override; | 543 virtual void checkAndMarkPointer(Visitor*, Address) override; |
544 #if ENABLE(GC_PROFILE_MARKING) | 544 #if ENABLE(GC_PROFILE_MARKING) |
545 const GCInfo* findGCInfo(Address) override; | 545 const GCInfo* findGCInfo(Address) override; |
546 #endif | 546 #endif |
547 #if ENABLE(GC_PROFILE_HEAP) | 547 #if ENABLE(GC_PROFILE_HEAP) |
548 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); | 548 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*); |
549 #endif | 549 #endif |
550 | 550 |
551 void countUnmarkedObjects(); | |
552 | |
551 #if defined(ADDRESS_SANITIZER) | 553 #if defined(ADDRESS_SANITIZER) |
552 void poisonUnmarkedObjects(); | 554 void poisonUnmarkedObjects(); |
553 #endif | 555 #endif |
554 | 556 |
555 virtual void markOrphaned() override | 557 virtual void markOrphaned() override |
556 { | 558 { |
557 // Zap the payload with a recognizable value to detect any incorrect | 559 // Zap the payload with a recognizable value to detect any incorrect |
558 // cross thread pointer usage. | 560 // cross thread pointer usage. |
559 #if defined(ADDRESS_SANITIZER) | 561 #if defined(ADDRESS_SANITIZER) |
560 // This needs to zap poisoned memory as well. | 562 // This needs to zap poisoned memory as well. |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
688 | 690 |
689 // Find the page in this thread heap containing the given | 691 // Find the page in this thread heap containing the given |
690 // address. Returns 0 if the address is not contained in any | 692 // address. Returns 0 if the address is not contained in any |
691 // page in this thread heap. | 693 // page in this thread heap. |
692 virtual BaseHeapPage* pageFromAddress(Address) = 0; | 694 virtual BaseHeapPage* pageFromAddress(Address) = 0; |
693 | 695 |
694 #if ENABLE(GC_PROFILE_MARKING) | 696 #if ENABLE(GC_PROFILE_MARKING) |
695 virtual const GCInfo* findGCInfoOfLargeObject(Address) = 0; | 697 virtual const GCInfo* findGCInfoOfLargeObject(Address) = 0; |
696 #endif | 698 #endif |
697 | 699 |
700 #if ENABLE(GC_PROFILE_FREE_LIST) | |
701 virtual void snapshotFreeList(TracedValue*) = 0; | |
702 #endif | |
703 | |
698 #if ENABLE(GC_PROFILE_HEAP) | 704 #if ENABLE(GC_PROFILE_HEAP) |
699 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; | 705 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) = 0; |
700 #endif | 706 #endif |
701 | 707 |
702 // Sweep this part of the Blink heap. This finalizes dead objects | 708 // Sweep this part of the Blink heap. This finalizes dead objects |
703 // and builds freelists for all the unused memory. | 709 // and builds freelists for all the unused memory. |
704 virtual void sweep() = 0; | 710 virtual void sweep() = 0; |
705 virtual void postSweepProcessing() = 0; | 711 virtual void postSweepProcessing() = 0; |
706 | 712 |
707 virtual void clearFreeLists() = 0; | 713 virtual void clearFreeLists() = 0; |
708 virtual void markUnmarkedObjectsDead() = 0; | 714 virtual void markUnmarkedObjectsDead() = 0; |
709 | 715 |
710 virtual void makeConsistentForSweeping() = 0; | 716 virtual void makeConsistentForSweeping() = 0; |
711 #if ENABLE(ASSERT) | 717 #if ENABLE(ASSERT) |
712 virtual bool isConsistentForSweeping() = 0; | 718 virtual bool isConsistentForSweeping() = 0; |
713 #endif | 719 #endif |
714 virtual size_t objectPayloadSizeForTesting() = 0; | 720 virtual size_t objectPayloadSizeForTesting() = 0; |
715 | 721 |
716 virtual void prepareHeapForTermination() = 0; | 722 virtual void prepareHeapForTermination() = 0; |
717 }; | 723 }; |
718 | 724 |
719 template<typename Header> | 725 template<typename Header> |
720 class FreeList { | 726 class FreeList { |
721 public: | 727 public: |
722 FreeList(); | 728 FreeList(); |
723 | 729 |
724 void addToFreeList(Address, size_t); | 730 void addToFreeList(Address, size_t); |
725 void clear(); | 731 void clear(); |
732 FreeListEntry* takeEntry(size_t allocationSize); | |
733 | |
734 #if ENABLE(GC_PROFILE_FREE_LIST) | |
735 void countBucketSizes(size_t[], size_t[], size_t* freeSize) const; | |
736 #endif | |
726 | 737 |
727 // Returns a bucket number for inserting a FreeListEntry of a given size. | 738 // Returns a bucket number for inserting a FreeListEntry of a given size. |
728 // All FreeListEntries in the given bucket, n, have size >= 2^n. | 739 // All FreeListEntries in the given bucket, n, have size >= 2^n. |
729 static int bucketIndexForSize(size_t); | 740 static int bucketIndexForSize(size_t); |
730 | 741 |
731 private: | 742 private: |
732 int m_biggestFreeListIndex; | 743 int m_biggestFreeListIndex; |
733 | 744 |
734 // All FreeListEntries in the nth list have size >= 2^n. | 745 // All FreeListEntries in the nth list have size >= 2^n. |
735 FreeListEntry* m_freeLists[blinkPageSizeLog2]; | 746 FreeListEntry* m_freeLists[blinkPageSizeLog2]; |
736 | 747 |
748 #if ENABLE(ASSERT) | |
737 friend class ThreadHeap<Header>; | 749 friend class ThreadHeap<Header>; |
750 #endif | |
738 }; | 751 }; |
739 | 752 |
740 // Thread heaps represent a part of the per-thread Blink heap. | 753 // Thread heaps represent a part of the per-thread Blink heap. |
741 // | 754 // |
742 // Each Blink thread has a number of thread heaps: one general heap | 755 // Each Blink thread has a number of thread heaps: one general heap |
743 // that contains any type of object and a number of heaps specialized | 756 // that contains any type of object and a number of heaps specialized |
744 // for specific object types (such as Node). | 757 // for specific object types (such as Node). |
745 // | 758 // |
746 // Each thread heap contains the functionality to allocate new objects | 759 // Each thread heap contains the functionality to allocate new objects |
747 // (potentially adding new pages to the heap), to find and mark | 760 // (potentially adding new pages to the heap), to find and mark |
748 // objects during conservative stack scanning and to sweep the set of | 761 // objects during conservative stack scanning and to sweep the set of |
749 // pages after a GC. | 762 // pages after a GC. |
750 template<typename Header> | 763 template<typename Header> |
751 class ThreadHeap : public BaseHeap { | 764 class ThreadHeap : public BaseHeap { |
752 public: | 765 public: |
753 ThreadHeap(ThreadState*, int); | 766 ThreadHeap(ThreadState*, int); |
754 virtual ~ThreadHeap(); | 767 virtual ~ThreadHeap(); |
755 virtual void cleanupPages() override; | 768 virtual void cleanupPages() override; |
756 | 769 |
757 virtual BaseHeapPage* pageFromAddress(Address) override; | 770 virtual BaseHeapPage* pageFromAddress(Address) override; |
758 #if ENABLE(GC_PROFILE_MARKING) | 771 #if ENABLE(GC_PROFILE_MARKING) |
759 virtual const GCInfo* findGCInfoOfLargeObject(Address) override; | 772 virtual const GCInfo* findGCInfoOfLargeObject(Address) override; |
760 #endif | 773 #endif |
774 #if ENABLE(GC_PROFILE_FREE_LIST) | |
775 virtual void snapshotFreeList(TracedValue*) override; | |
776 #endif | |
761 #if ENABLE(GC_PROFILE_HEAP) | 777 #if ENABLE(GC_PROFILE_HEAP) |
762 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; | 778 virtual void snapshot(TracedValue*, ThreadState::SnapshotInfo*) override; |
763 #endif | 779 #endif |
764 | 780 |
765 virtual void sweep() override; | 781 virtual void sweep() override; |
766 virtual void postSweepProcessing() override; | 782 virtual void postSweepProcessing() override; |
767 | 783 |
768 virtual void clearFreeLists() override; | 784 virtual void clearFreeLists() override; |
769 virtual void markUnmarkedObjectsDead() override; | 785 virtual void markUnmarkedObjectsDead() override; |
770 | 786 |
771 virtual void makeConsistentForSweeping() override; | 787 virtual void makeConsistentForSweeping() override; |
772 #if ENABLE(ASSERT) | 788 #if ENABLE(ASSERT) |
773 virtual bool isConsistentForSweeping() override; | 789 virtual bool isConsistentForSweeping() override; |
774 #endif | 790 #endif |
775 virtual size_t objectPayloadSizeForTesting() override; | 791 virtual size_t objectPayloadSizeForTesting() override; |
776 | 792 |
777 ThreadState* threadState() { return m_threadState; } | 793 ThreadState* threadState() { return m_threadState; } |
778 | 794 |
779 void addToFreeList(Address address, size_t size) | 795 void addToFreeList(Address address, size_t size) |
780 { | 796 { |
781 ASSERT(pageFromAddress(address)); | 797 ASSERT(pageFromAddress(address)); |
782 ASSERT(pageFromAddress(address + size - 1)); | 798 ASSERT(pageFromAddress(address + size - 1)); |
783 m_freeList.addToFreeList(address, size); | 799 m_freeList.addToFreeList(address, size); |
784 } | 800 } |
785 | 801 |
786 inline Address allocate(size_t payloadSize, const GCInfo*); | 802 inline Address allocate(size_t, const GCInfo*); |
787 inline static size_t roundedAllocationSize(size_t size) | 803 inline static size_t roundedAllocationSize(size_t size) |
788 { | 804 { |
789 return allocationSizeFromSize(size) - sizeof(Header); | 805 return allocationSizeFromSize(size) - sizeof(Header); |
790 } | 806 } |
791 | 807 |
792 virtual void prepareHeapForTermination() override; | 808 virtual void prepareHeapForTermination() override; |
793 | 809 |
794 void removePageFromHeap(HeapPage<Header>*); | 810 void removePageFromHeap(HeapPage<Header>*); |
795 | 811 |
796 PLATFORM_EXPORT void promptlyFreeObject(Header*); | 812 PLATFORM_EXPORT void promptlyFreeObject(Header*); |
797 PLATFORM_EXPORT bool expandObject(Header*, size_t); | 813 PLATFORM_EXPORT bool expandObject(Header*, size_t); |
798 void shrinkObject(Header*, size_t); | 814 void shrinkObject(Header*, size_t); |
799 | 815 |
800 private: | 816 private: |
801 void addPageToHeap(const GCInfo*); | 817 void addPageToHeap(const GCInfo*); |
802 PLATFORM_EXPORT Address outOfLineAllocate(size_t allocationSize, const GCInf o*); | 818 PLATFORM_EXPORT Address outOfLineAllocate(size_t payloadSize, size_t allocat ionSize, const GCInfo*); |
803 static size_t allocationSizeFromSize(size_t); | 819 static size_t allocationSizeFromSize(size_t); |
804 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); | 820 PLATFORM_EXPORT Address allocateLargeObject(size_t, const GCInfo*); |
805 Address currentAllocationPoint() const { return m_currentAllocationPoint; } | 821 Address currentAllocationPoint() const { return m_currentAllocationPoint; } |
806 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } | 822 size_t remainingAllocationSize() const { return m_remainingAllocationSize; } |
807 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r emainingAllocationSize(); } | 823 bool hasCurrentAllocationArea() const { return currentAllocationPoint() && r emainingAllocationSize(); } |
808 void setAllocationPoint(Address point, size_t size) | 824 void setAllocationPoint(Address point, size_t size) |
809 { | 825 { |
826 #if ENABLE(GC_PROFILE_FREE_LIST) | |
827 m_allocationPointSizeSum += size; | |
828 ++m_setAllocationPointCount; | |
829 #endif | |
810 ASSERT(!point || pageFromAddress(point)); | 830 ASSERT(!point || pageFromAddress(point)); |
811 ASSERT(size <= HeapPage<Header>::payloadSize()); | 831 ASSERT(size <= HeapPage<Header>::payloadSize()); |
812 if (hasCurrentAllocationArea()) | 832 if (hasCurrentAllocationArea()) |
813 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 833 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
814 updateRemainingAllocationSize(); | 834 updateRemainingAllocationSize(); |
815 m_currentAllocationPoint = point; | 835 m_currentAllocationPoint = point; |
816 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; | 836 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
817 } | 837 } |
818 void updateRemainingAllocationSize(); | 838 void updateRemainingAllocationSize(); |
819 Address allocateFromFreeList(size_t, const GCInfo*); | 839 bool allocateFromFreeList(size_t); |
820 | 840 |
821 void freeLargeObject(LargeObject<Header>*, LargeObject<Header>**); | 841 void freeLargeObject(LargeObject<Header>*, LargeObject<Header>**); |
822 void allocatePage(const GCInfo*); | 842 void allocatePage(const GCInfo*); |
823 | 843 |
824 inline Address allocateSize(size_t allocationSize, const GCInfo*); | |
825 inline Address allocateAtAddress(Address, size_t allocationSize, const GCInf o*); | |
826 | |
827 #if ENABLE(ASSERT) | 844 #if ENABLE(ASSERT) |
828 bool pagesToBeSweptContains(Address); | 845 bool pagesToBeSweptContains(Address); |
829 bool pagesAllocatedDuringSweepingContains(Address); | 846 bool pagesAllocatedDuringSweepingContains(Address); |
830 #endif | 847 #endif |
831 | 848 |
832 void sweepNormalPages(); | 849 void sweepNormalPages(); |
833 void sweepLargePages(); | 850 void sweepLargePages(); |
834 bool coalesce(size_t); | 851 bool coalesce(size_t); |
835 | 852 |
836 Address m_currentAllocationPoint; | 853 Address m_currentAllocationPoint; |
837 size_t m_remainingAllocationSize; | 854 size_t m_remainingAllocationSize; |
838 size_t m_lastRemainingAllocationSize; | 855 size_t m_lastRemainingAllocationSize; |
839 | 856 |
857 double m_totalAllocationSize; | |
858 size_t m_allocationCount; | |
859 size_t m_inlineAllocationCount; | |
860 | |
840 HeapPage<Header>* m_firstPage; | 861 HeapPage<Header>* m_firstPage; |
841 LargeObject<Header>* m_firstLargeObject; | 862 LargeObject<Header>* m_firstLargeObject; |
842 | 863 |
843 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; | 864 HeapPage<Header>* m_firstPageAllocatedDuringSweeping; |
844 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; | 865 HeapPage<Header>* m_lastPageAllocatedDuringSweeping; |
845 | 866 |
846 LargeObject<Header>* m_firstLargeObjectAllocatedDuringSweeping; | 867 LargeObject<Header>* m_firstLargeObjectAllocatedDuringSweeping; |
847 LargeObject<Header>* m_lastLargeObjectAllocatedDuringSweeping; | 868 LargeObject<Header>* m_lastLargeObjectAllocatedDuringSweeping; |
848 | 869 |
849 ThreadState* m_threadState; | 870 ThreadState* m_threadState; |
850 | 871 |
851 FreeList<Header> m_freeList; | 872 FreeList<Header> m_freeList; |
852 | 873 |
853 // Index into the page pools. This is used to ensure that the pages of the | 874 // Index into the page pools. This is used to ensure that the pages of the |
854 // same type go into the correct page pool and thus avoid type confusion. | 875 // same type go into the correct page pool and thus avoid type confusion. |
855 int m_index; | 876 int m_index; |
856 | 877 |
857 // The promptly freed count contains the number of promptly freed objects | 878 // The promptly freed count contains the number of promptly freed objects |
858 // since the last sweep or since it was manually reset to delay coalescing. | 879 // since the last sweep or since it was manually reset to delay coalescing. |
859 size_t m_promptlyFreedCount; | 880 size_t m_promptlyFreedCount; |
881 | |
882 #if ENABLE(GC_PROFILE_FREE_LIST) | |
883 size_t m_allocationPointSizeSum = 0; | |
keishi
2015/01/27 08:59:01
Used to report average allocation point size betwe
| |
884 size_t m_setAllocationPointCount = 0; | |
885 #endif | |
860 }; | 886 }; |
861 | 887 |
862 class PLATFORM_EXPORT Heap { | 888 class PLATFORM_EXPORT Heap { |
863 public: | 889 public: |
864 static void init(); | 890 static void init(); |
865 static void shutdown(); | 891 static void shutdown(); |
866 static void doShutdown(); | 892 static void doShutdown(); |
867 | 893 |
868 static BaseHeapPage* contains(Address); | 894 static BaseHeapPage* contains(Address); |
869 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } | 895 static BaseHeapPage* contains(void* pointer) { return contains(reinterpret_c ast<Address>(pointer)); } |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
926 static void collectAllGarbage(); | 952 static void collectAllGarbage(); |
927 | 953 |
928 static void processMarkingStack(Visitor*); | 954 static void processMarkingStack(Visitor*); |
929 static void postMarkingProcessing(Visitor*); | 955 static void postMarkingProcessing(Visitor*); |
930 static void globalWeakProcessing(Visitor*); | 956 static void globalWeakProcessing(Visitor*); |
931 static void setForcePreciseGCForTesting(); | 957 static void setForcePreciseGCForTesting(); |
932 | 958 |
933 static void preGC(); | 959 static void preGC(); |
934 static void postGC(); | 960 static void postGC(); |
935 | 961 |
962 static void reportSweepingStats(); | |
963 | |
936 // Conservatively checks whether an address is a pointer in any of the | 964 // Conservatively checks whether an address is a pointer in any of the |
937 // thread heaps. If so marks the object pointed to as live. | 965 // thread heaps. If so marks the object pointed to as live. |
938 static Address checkAndMarkPointer(Visitor*, Address); | 966 static Address checkAndMarkPointer(Visitor*, Address); |
939 | 967 |
940 #if ENABLE(GC_PROFILE_MARKING) | 968 #if ENABLE(GC_PROFILE_MARKING) |
941 // Dump the path to specified object on the next GC. This method is to be | 969 // Dump the path to specified object on the next GC. This method is to be |
942 // invoked from GDB. | 970 // invoked from GDB. |
943 static void dumpPathToObjectOnNextGC(void* p); | 971 static void dumpPathToObjectOnNextGC(void* p); |
944 | 972 |
945 // Forcibly find GCInfo of the object at Address. This is slow and should | 973 // Forcibly find GCInfo of the object at Address. This is slow and should |
(...skipping 329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
1275 // therefore has to happen before any calculation on the size. | 1303 // therefore has to happen before any calculation on the size. |
1276 RELEASE_ASSERT(size < maxHeapObjectSize); | 1304 RELEASE_ASSERT(size < maxHeapObjectSize); |
1277 | 1305 |
1278 // Add space for header. | 1306 // Add space for header. |
1279 size_t allocationSize = size + sizeof(Header); | 1307 size_t allocationSize = size + sizeof(Header); |
1280 // Align size with allocation granularity. | 1308 // Align size with allocation granularity. |
1281 allocationSize = (allocationSize + allocationMask) & ~allocationMask; | 1309 allocationSize = (allocationSize + allocationMask) & ~allocationMask; |
1282 return allocationSize; | 1310 return allocationSize; |
1283 } | 1311 } |
1284 | 1312 |
1285 | |
1286 template<typename Header> | |
1287 inline Address ThreadHeap<Header>::allocateAtAddress(Address headerAddress, size _t allocationSize, const GCInfo* gcInfo) | |
1288 { | |
1289 new (NotNull, headerAddress) Header(allocationSize, gcInfo); | |
1290 Address result = headerAddress + sizeof(Header); | |
1291 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | |
1292 | |
1293 // Unpoison the memory used for the object (payload). | |
1294 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); | |
1295 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(Header)); | |
1296 ASSERT(pageFromAddress(headerAddress + allocationSize - 1)); | |
1297 return result; | |
1298 } | |
1299 | |
1300 template<typename Header> | |
1301 Address ThreadHeap<Header>::allocateSize(size_t allocationSize, const GCInfo* gc Info) | |
1302 { | |
1303 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | |
1304 Address headerAddress = m_currentAllocationPoint; | |
1305 m_currentAllocationPoint += allocationSize; | |
1306 m_remainingAllocationSize -= allocationSize; | |
1307 return allocateAtAddress(headerAddress, allocationSize, gcInfo); | |
1308 } | |
1309 return outOfLineAllocate(allocationSize, gcInfo); | |
1310 } | |
1311 | |
1312 template<typename Header> | 1313 template<typename Header> |
1313 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) | 1314 Address ThreadHeap<Header>::allocate(size_t size, const GCInfo* gcInfo) |
1314 { | 1315 { |
1315 return allocateSize(allocationSizeFromSize(size), gcInfo); | 1316 size_t allocationSize = allocationSizeFromSize(size); |
1317 m_totalAllocationSize += allocationSize; | |
1318 m_allocationCount++; | |
1319 if (LIKELY(allocationSize <= m_remainingAllocationSize)) { | |
1320 m_inlineAllocationCount++; | |
1321 Address headerAddress = m_currentAllocationPoint; | |
1322 m_currentAllocationPoint += allocationSize; | |
1323 m_remainingAllocationSize -= allocationSize; | |
1324 new (NotNull, headerAddress) Header(allocationSize, gcInfo); | |
1325 Address result = headerAddress + sizeof(Header); | |
1326 ASSERT(!(reinterpret_cast<uintptr_t>(result) & allocationMask)); | |
1327 | |
1328 // Unpoison the memory used for the object (payload). | |
1329 ASAN_UNPOISON_MEMORY_REGION(result, allocationSize - sizeof(Header)); | |
1330 FILL_ZERO_IF_NOT_PRODUCTION(result, allocationSize - sizeof(Header)); | |
1331 ASSERT(pageFromAddress(headerAddress + allocationSize - 1)); | |
1332 return result; | |
1333 } | |
1334 return outOfLineAllocate(size, allocationSize, gcInfo); | |
1316 } | 1335 } |
1317 | 1336 |
1318 template<typename T, typename HeapTraits> | 1337 template<typename T, typename HeapTraits> |
1319 Address Heap::allocate(size_t size) | 1338 Address Heap::allocate(size_t size) |
1320 { | 1339 { |
1321 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); | 1340 ThreadState* state = ThreadStateFor<ThreadingTrait<T>::Affinity>::state(); |
1322 ASSERT(state->isAllocationAllowed()); | 1341 ASSERT(state->isAllocationAllowed()); |
1323 const GCInfo* gcInfo = GCInfoTrait<T>::get(); | 1342 const GCInfo* gcInfo = GCInfoTrait<T>::get(); |
1324 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), size); | 1343 int heapIndex = HeapTraits::index(gcInfo->hasFinalizer(), size); |
1325 BaseHeap* heap = state->heap(heapIndex); | 1344 BaseHeap* heap = state->heap(heapIndex); |
(...skipping 1022 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
2348 template<typename T, size_t inlineCapacity> | 2367 template<typename T, size_t inlineCapacity> |
2349 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; | 2368 struct GCInfoTrait<HeapVector<T, inlineCapacity>> : public GCInfoTrait<Vector<T, inlineCapacity, HeapAllocator>> { }; |
2350 template<typename T, size_t inlineCapacity> | 2369 template<typename T, size_t inlineCapacity> |
2351 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; | 2370 struct GCInfoTrait<HeapDeque<T, inlineCapacity>> : public GCInfoTrait<Deque<T, i nlineCapacity, HeapAllocator>> { }; |
2352 template<typename T, typename U, typename V> | 2371 template<typename T, typename U, typename V> |
2353 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; | 2372 struct GCInfoTrait<HeapHashCountedSet<T, U, V>> : public GCInfoTrait<HashCounted Set<T, U, V, HeapAllocator>> { }; |
2354 | 2373 |
2355 } // namespace blink | 2374 } // namespace blink |
2356 | 2375 |
2357 #endif // Heap_h | 2376 #endif // Heap_h |
OLD | NEW |