Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 42 #include "wtf/LeakAnnotations.h" | 42 #include "wtf/LeakAnnotations.h" |
| 43 #include "wtf/PassOwnPtr.h" | 43 #include "wtf/PassOwnPtr.h" |
| 44 #if ENABLE(GC_PROFILE_MARKING) | 44 #if ENABLE(GC_PROFILE_MARKING) |
| 45 #include "wtf/HashMap.h" | 45 #include "wtf/HashMap.h" |
| 46 #include "wtf/HashSet.h" | 46 #include "wtf/HashSet.h" |
| 47 #include "wtf/text/StringBuilder.h" | 47 #include "wtf/text/StringBuilder.h" |
| 48 #include "wtf/text/StringHash.h" | 48 #include "wtf/text/StringHash.h" |
| 49 #include <stdio.h> | 49 #include <stdio.h> |
| 50 #include <utility> | 50 #include <utility> |
| 51 #endif | 51 #endif |
| 52 #if ENABLE(GC_PROFILE_HEAP) | 52 #if ENABLE(GC_PROFILE_HEAP) || ENABLE(GC_PROFILE_FREE_LIST) || ENABLE(GC_PROFILE _MARKING) |
| 53 #include "platform/TracedValue.h" | 53 #include "platform/TracedValue.h" |
| 54 #endif | 54 #endif |
| 55 | 55 |
| 56 #if OS(POSIX) | 56 #if OS(POSIX) |
| 57 #include <sys/mman.h> | 57 #include <sys/mman.h> |
| 58 #include <unistd.h> | 58 #include <unistd.h> |
| 59 #elif OS(WIN) | 59 #elif OS(WIN) |
| 60 #include <windows.h> | 60 #include <windows.h> |
| 61 #endif | 61 #endif |
| 62 | 62 |
| 63 namespace blink { | 63 namespace blink { |
| 64 | 64 |
| 65 struct AgeHistogram { | |
| 66 int data[8]; | |
| 67 }; | |
| 68 | |
| 69 typedef HashMap<String, AgeHistogram> ObjectAgeMap; | |
| 70 | |
| 71 static ObjectAgeMap& uom() | |
|
keishi
2015/01/27 08:59:01
Unmarked object map.
| |
| 72 { | |
| 73 static ObjectAgeMap uomap; | |
| 74 return uomap; | |
| 75 } | |
| 76 | |
| 77 static Mutex& uomMutex() | |
| 78 { | |
| 79 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | |
| 80 return mutex; | |
| 81 } | |
| 82 | |
| 83 static ObjectAgeMap& mom() | |
|
keishi
2015/01/27 08:59:00
Marked object map.
| |
| 84 { | |
| 85 static ObjectAgeMap momap; | |
| 86 return momap; | |
| 87 } | |
| 88 | |
| 89 static Mutex& momMutex() | |
| 90 { | |
| 91 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); | |
| 92 return mutex; | |
| 93 } | |
| 94 | |
| 65 #if ENABLE(GC_PROFILE_MARKING) | 95 #if ENABLE(GC_PROFILE_MARKING) |
| 66 static String classOf(const void* object) | 96 static String classOf(const void* object) |
| 67 { | 97 { |
| 68 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_ cast<void*>(object)))) | 98 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_ cast<void*>(object)))) |
| 69 return gcInfo->m_className; | 99 return gcInfo->m_className; |
| 70 return "unknown"; | 100 return "unknown"; |
| 71 } | 101 } |
| 72 #endif | 102 #endif |
| 73 | 103 |
| 74 static bool vTableInitialized(void* objectPointer) | 104 static bool vTableInitialized(void* objectPointer) |
| (...skipping 540 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 615 { | 645 { |
| 616 ASSERT(gcInfo()); | 646 ASSERT(gcInfo()); |
| 617 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize()); | 647 HeapObjectHeader::finalize(gcInfo(), payload(), payloadSize()); |
| 618 } | 648 } |
| 619 | 649 |
| 620 template<typename Header> | 650 template<typename Header> |
| 621 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) | 651 ThreadHeap<Header>::ThreadHeap(ThreadState* state, int index) |
| 622 : m_currentAllocationPoint(nullptr) | 652 : m_currentAllocationPoint(nullptr) |
| 623 , m_remainingAllocationSize(0) | 653 , m_remainingAllocationSize(0) |
| 624 , m_lastRemainingAllocationSize(0) | 654 , m_lastRemainingAllocationSize(0) |
| 655 #if ENABLE(GC_PROFILE_FREE_LIST) | |
| 656 , m_totalAllocationSize(0.0) | |
| 657 , m_allocationCount(0) | |
| 658 , m_inlineAllocationCount(0) | |
| 659 #endif | |
| 625 , m_firstPage(nullptr) | 660 , m_firstPage(nullptr) |
| 626 , m_firstLargeObject(nullptr) | 661 , m_firstLargeObject(nullptr) |
| 627 , m_firstPageAllocatedDuringSweeping(nullptr) | 662 , m_firstPageAllocatedDuringSweeping(nullptr) |
| 628 , m_lastPageAllocatedDuringSweeping(nullptr) | 663 , m_lastPageAllocatedDuringSweeping(nullptr) |
| 629 , m_firstLargeObjectAllocatedDuringSweeping(nullptr) | 664 , m_firstLargeObjectAllocatedDuringSweeping(nullptr) |
| 630 , m_lastLargeObjectAllocatedDuringSweeping(nullptr) | 665 , m_lastLargeObjectAllocatedDuringSweeping(nullptr) |
| 631 , m_threadState(state) | 666 , m_threadState(state) |
| 632 , m_index(index) | 667 , m_index(index) |
| 633 , m_promptlyFreedCount(0) | 668 , m_promptlyFreedCount(0) |
| 634 { | 669 { |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 671 void ThreadHeap<Header>::updateRemainingAllocationSize() | 706 void ThreadHeap<Header>::updateRemainingAllocationSize() |
| 672 { | 707 { |
| 673 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { | 708 if (m_lastRemainingAllocationSize > remainingAllocationSize()) { |
| 674 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); | 709 Heap::increaseAllocatedObjectSize(m_lastRemainingAllocationSize - remain ingAllocationSize()); |
| 675 m_lastRemainingAllocationSize = remainingAllocationSize(); | 710 m_lastRemainingAllocationSize = remainingAllocationSize(); |
| 676 } | 711 } |
| 677 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); | 712 ASSERT(m_lastRemainingAllocationSize == remainingAllocationSize()); |
| 678 } | 713 } |
| 679 | 714 |
| 680 template<typename Header> | 715 template<typename Header> |
| 681 Address ThreadHeap<Header>::outOfLineAllocate(size_t allocationSize, const GCInf o* gcInfo) | 716 Address ThreadHeap<Header>::outOfLineAllocate(size_t payloadSize, size_t allocat ionSize, const GCInfo* gcInfo) |
| 682 { | 717 { |
| 718 #if ENABLE(GC_PROFILE_FREE_LIST) | |
| 719 m_threadState->snapshotFreeListIfNecessary(); | |
| 720 #endif | |
| 683 ASSERT(allocationSize > remainingAllocationSize()); | 721 ASSERT(allocationSize > remainingAllocationSize()); |
| 684 if (allocationSize > blinkPageSize / 2) | 722 if (allocationSize > blinkPageSize / 2) |
| 685 return allocateLargeObject(allocationSize, gcInfo); | 723 return allocateLargeObject(allocationSize, gcInfo); |
| 686 | 724 |
| 687 updateRemainingAllocationSize(); | 725 updateRemainingAllocationSize(); |
| 688 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); | 726 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); |
| 689 | 727 |
| 728 setAllocationPoint(nullptr, 0); | |
|
keishi
2015/01/27 08:59:00
The changes below to outOfLineAllocate and allocat
| |
| 690 ASSERT(allocationSize >= allocationGranularity); | 729 ASSERT(allocationSize >= allocationGranularity); |
| 691 Address result = allocateFromFreeList(allocationSize, gcInfo); | 730 if (allocateFromFreeList(allocationSize)) |
| 692 if (result) | 731 return allocate(payloadSize, gcInfo); |
| 693 return result; | 732 if (coalesce(allocationSize) && allocateFromFreeList(allocationSize)) |
| 694 setAllocationPoint(nullptr, 0); | 733 return allocate(payloadSize, gcInfo); |
| 695 if (coalesce(allocationSize)) { | |
| 696 result = allocateFromFreeList(allocationSize, gcInfo); | |
| 697 if (result) | |
| 698 return result; | |
| 699 } | |
| 700 | 734 |
| 701 addPageToHeap(gcInfo); | 735 addPageToHeap(gcInfo); |
| 702 result = allocateFromFreeList(allocationSize, gcInfo); | 736 bool success = allocateFromFreeList(allocationSize); |
| 703 RELEASE_ASSERT(result); | 737 RELEASE_ASSERT(success); |
| 704 return result; | 738 return allocate(payloadSize, gcInfo); |
| 705 } | |
| 706 | |
| 707 static bool shouldUseFirstFitForHeap(int heapIndex) | |
| 708 { | |
| 709 // For an allocation of size N, should a heap perform a first-fit | |
| 710 // matching within the sized bin that N belongs to? | |
| 711 // | |
| 712 // Theory: quickly reusing a previously freed backing store block stands | |
| 713 // a chance of maintaining cached presence of that block (maintains | |
| 714 // "locality".) This is preferable to starting to bump allocate from a | |
| 715 // new and bigger block, which is what allocateFromFreeList() does by | |
| 716 // default. Hence, the backing store heaps are considered for binned | |
| 717 // first-fit matching. | |
| 718 // | |
| 719 // This appears to hold true through performance expermentation; at | |
| 720 // least no signficant performance regressions have been observed. | |
| 721 // | |
| 722 // This theory of improved performance does not hold true for other | |
| 723 // heap types. We are currently seeking an understanding of why; | |
| 724 // larger amounts of small block fragmentation might be one reason | |
| 725 // for it. TBC. | |
| 726 // | |
| 727 switch (heapIndex) { | |
| 728 case VectorBackingHeap: | |
| 729 case InlineVectorBackingHeap: | |
| 730 case HashTableBackingHeap: | |
| 731 case VectorBackingHeapNonFinalized: | |
| 732 case InlineVectorBackingHeapNonFinalized: | |
| 733 case HashTableBackingHeapNonFinalized: | |
| 734 return true; | |
| 735 default: | |
| 736 return false; | |
| 737 } | |
| 738 } | 739 } |
| 739 | 740 |
| 740 template<typename Header> | 741 template<typename Header> |
| 741 Address ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize, const GC Info* gcInfo) | 742 FreeListEntry* FreeList<Header>::takeEntry(size_t allocationSize) |
| 742 { | 743 { |
| 743 // The freelist allocation scheme is currently as follows: | 744 size_t bucketSize = 1 << m_biggestFreeListIndex; |
| 744 // | 745 int i = m_biggestFreeListIndex; |
| 745 // - If the heap is of an appropriate type, try to pick the first | 746 for (; i > 0; i--, bucketSize >>= 1) { |
| 746 // entry from the sized bin corresponding to |allocationSize|. | 747 if (bucketSize < allocationSize) { |
| 747 // [See shouldUseFirstFitForHeap() comment for motivation on why.] | 748 // A FreeListEntry for bucketSize might be larger than allocationSiz e. |
| 748 // | 749 // FIXME: We check only the first FreeListEntry because searching |
| 749 // - If that didn't satisfy the allocation, try reusing a block | 750 // the entire list is costly. |
| 750 // from the largest bin. The underlying reasoning being that | 751 if (!m_freeLists[i] || m_freeLists[i]->size() < allocationSize) |
| 751 // we want to amortize this slow allocation call by carving | |
| 752 // off as a large a free block as possible in one go; a block | |
| 753 // that will service this block and let following allocations | |
| 754 // be serviced quickly by bump allocation. | |
| 755 // | |
| 756 // - Fail; allocation cannot be serviced by the freelist. | |
| 757 // The allocator will handle that failure by requesting more | |
| 758 // heap pages from the OS and re-initiate the allocation request. | |
| 759 // | |
| 760 int index = FreeList<Header>::bucketIndexForSize(allocationSize) + 1; | |
| 761 if (index <= m_freeList.m_biggestFreeListIndex && shouldUseFirstFitForHeap(m _index)) { | |
| 762 if (FreeListEntry* entry = m_freeList.m_freeLists[index]) { | |
| 763 entry->unlink(&m_freeList.m_freeLists[index]); | |
| 764 if (!m_freeList.m_freeLists[index] && index == m_freeList.m_biggestF reeListIndex) { | |
| 765 // Biggest bucket drained, adjust biggest index downwards. | |
| 766 int maxIndex = m_freeList.m_biggestFreeListIndex - 1; | |
| 767 for (; maxIndex >= 0 && !m_freeList.m_freeLists[maxIndex]; --max Index) { } | |
| 768 m_freeList.m_biggestFreeListIndex = maxIndex < 0 ? 0 : maxIndex; | |
| 769 } | |
| 770 // Allocate into the freelist block without disturbing the current a llocation area. | |
| 771 ASSERT(entry->size() >= allocationSize); | |
| 772 if (entry->size() > allocationSize) | |
| 773 addToFreeList(entry->address() + allocationSize, entry->size() - allocationSize); | |
| 774 Heap::increaseAllocatedObjectSize(allocationSize); | |
| 775 return allocateAtAddress(entry->address(), allocationSize, gcInfo); | |
| 776 } | |
| 777 // Failed to find a first-fit freelist entry; fall into the standard cas e of | |
| 778 // chopping off the largest free block and bump allocate from it. | |
| 779 } | |
| 780 size_t bucketSize = 1 << m_freeList.m_biggestFreeListIndex; | |
| 781 index = m_freeList.m_biggestFreeListIndex; | |
| 782 for (; index > 0; --index, bucketSize >>= 1) { | |
| 783 FreeListEntry* entry = m_freeList.m_freeLists[index]; | |
| 784 if (allocationSize > bucketSize) { | |
| 785 // Final bucket candidate; check initial entry if it is able | |
| 786 // to service this allocation. Do not perform a linear scan, | |
| 787 // as it is considered too costly. | |
| 788 if (!entry || entry->size() < allocationSize) | |
| 789 break; | 752 break; |
| 790 } | 753 } |
| 791 if (entry) { | 754 if (FreeListEntry* entry = m_freeLists[i]) { |
| 792 entry->unlink(&m_freeList.m_freeLists[index]); | 755 m_biggestFreeListIndex = i; |
| 793 setAllocationPoint(entry->address(), entry->size()); | 756 entry->unlink(&m_freeLists[i]); |
| 794 ASSERT(hasCurrentAllocationArea()); | 757 return entry; |
| 795 ASSERT(remainingAllocationSize() >= allocationSize); | |
| 796 m_freeList.m_biggestFreeListIndex = index; | |
| 797 return allocateSize(allocationSize, gcInfo); | |
| 798 } | 758 } |
| 799 } | 759 } |
| 800 m_freeList.m_biggestFreeListIndex = index; | 760 m_biggestFreeListIndex = i; |
| 801 return nullptr; | 761 return nullptr; |
| 802 } | 762 } |
| 803 | 763 |
| 764 template<typename Header> | |
| 765 bool ThreadHeap<Header>::allocateFromFreeList(size_t allocationSize) | |
| 766 { | |
| 767 ASSERT(!hasCurrentAllocationArea()); | |
| 768 if (FreeListEntry* entry = m_freeList.takeEntry(allocationSize)) { | |
| 769 setAllocationPoint(entry->address(), entry->size()); | |
| 770 ASSERT(hasCurrentAllocationArea()); | |
| 771 ASSERT(remainingAllocationSize() >= allocationSize); | |
| 772 return true; | |
| 773 } | |
| 774 return false; | |
| 775 } | |
| 776 | |
| 804 #if ENABLE(ASSERT) | 777 #if ENABLE(ASSERT) |
| 805 template<typename Header> | 778 template<typename Header> |
| 806 static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address addre ss) | 779 static bool isLargeObjectAligned(LargeObject<Header>* largeObject, Address addre ss) |
| 807 { | 780 { |
| 808 // Check that a large object is blinkPageSize aligned (modulo the osPageSize | 781 // Check that a large object is blinkPageSize aligned (modulo the osPageSize |
| 809 // for the guard page). | 782 // for the guard page). |
| 810 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); | 783 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); |
| 811 } | 784 } |
| 812 #endif | 785 #endif |
| 813 | 786 |
| (...skipping 26 matching lines...) Expand all Loading... | |
| 840 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeObject(Address address) | 813 const GCInfo* ThreadHeap<Header>::findGCInfoOfLargeObject(Address address) |
| 841 { | 814 { |
| 842 for (LargeObject<Header>* largeObject = m_firstLargeObject; largeObject; lar geObject = largeObject->next()) { | 815 for (LargeObject<Header>* largeObject = m_firstLargeObject; largeObject; lar geObject = largeObject->next()) { |
| 843 if (largeObject->contains(address)) | 816 if (largeObject->contains(address)) |
| 844 return largeObject->gcInfo(); | 817 return largeObject->gcInfo(); |
| 845 } | 818 } |
| 846 return nullptr; | 819 return nullptr; |
| 847 } | 820 } |
| 848 #endif | 821 #endif |
| 849 | 822 |
| 823 #if ENABLE(GC_PROFILE_FREE_LIST) | |
| 824 template<typename Header> | |
| 825 void ThreadHeap<Header>::snapshotFreeList(TracedValue* json) | |
| 826 { | |
| 827 json->setDouble("totalAllocationSize", m_totalAllocationSize); | |
| 828 json->setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocati onCount) / m_allocationCount); | |
| 829 json->setInteger("inlineAllocationCount", m_inlineAllocationCount); | |
| 830 json->setInteger("allocationCount", m_allocationCount); | |
| 831 if (m_setAllocationPointCount > 0) { | |
| 832 json->setDouble("averageAllocationPointSize", static_cast<double>(m_alloca tionPointSizeSum) / m_setAllocationPointCount); | |
| 833 } | |
| 834 m_allocationPointSizeSum = 0; | |
| 835 m_setAllocationPointCount = 0; | |
| 836 size_t pageCount = 0; | |
| 837 size_t totalPageSize = 0; | |
| 838 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | |
| 839 ++pageCount; | |
| 840 totalPageSize += page->payloadSize(); | |
| 841 } | |
| 842 json->setInteger("pageCount", pageCount); | |
| 843 json->setInteger("totalPageSize", totalPageSize); | |
| 844 size_t bucketSizes[blinkPageSizeLog2]; | |
| 845 size_t bucketTotalSizes[blinkPageSizeLog2]; | |
| 846 size_t freeSize = 0; | |
| 847 m_freeList.countBucketSizes(bucketSizes, bucketTotalSizes, &freeSize); | |
| 848 json->setInteger("freeSize", freeSize); | |
| 849 json->beginArray("bucketSizes"); | |
| 850 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | |
| 851 json->pushInteger(bucketSizes[i]); | |
| 852 } | |
| 853 json->endArray(); | |
| 854 json->beginArray("bucketTotalSizes"); | |
| 855 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { | |
| 856 json->pushInteger(bucketTotalSizes[i]); | |
| 857 } | |
| 858 json->endArray(); | |
| 859 } | |
| 860 #endif | |
| 861 | |
| 850 #if ENABLE(GC_PROFILE_HEAP) | 862 #if ENABLE(GC_PROFILE_HEAP) |
| 851 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 | 863 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 |
| 852 template<typename Header> | 864 template<typename Header> |
| 853 void ThreadHeap<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | 865 void ThreadHeap<Header>::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
| 854 { | 866 { |
| 855 ASSERT(isConsistentForSweeping()); | 867 ASSERT(isConsistentForSweeping()); |
| 856 size_t previousPageCount = info->pageCount; | 868 size_t previousPageCount = info->pageCount; |
| 857 | 869 |
| 858 json->beginArray("pages"); | 870 json->beginArray("pages"); |
| 859 for (HeapPage<Header>* page = m_firstPage; page; page = page->next(), ++info ->pageCount) { | 871 for (HeapPage<Header>* page = m_firstPage; page; page = page->next(), ++info ->pageCount) { |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 907 // space. | 919 // space. |
| 908 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList() ) | 920 if (HeapPage<Header>::payloadSize() != size && !entry->shouldAddToFreeList() ) |
| 909 return; | 921 return; |
| 910 #endif | 922 #endif |
| 911 int index = bucketIndexForSize(size); | 923 int index = bucketIndexForSize(size); |
| 912 entry->link(&m_freeLists[index]); | 924 entry->link(&m_freeLists[index]); |
| 913 if (index > m_biggestFreeListIndex) | 925 if (index > m_biggestFreeListIndex) |
| 914 m_biggestFreeListIndex = index; | 926 m_biggestFreeListIndex = index; |
| 915 } | 927 } |
| 916 | 928 |
| 929 #if ENABLE(GC_PROFILE_FREE_LIST) | |
| 930 template<typename Header> | |
| 931 void FreeList<Header>::countBucketSizes(size_t sizes[], size_t totalSizes[], siz e_t* freeSize) const | |
| 932 { | |
| 933 *freeSize = 0; | |
| 934 for (size_t i = 0; i < blinkPageSizeLog2; i++) { | |
| 935 sizes[i] = 0; | |
| 936 totalSizes[i] = 0; | |
| 937 FreeListEntry* entry = m_freeLists[i]; | |
| 938 while (entry) { | |
| 939 ++sizes[i]; | |
| 940 *freeSize += entry->size(); | |
| 941 totalSizes[i] += entry->size(); | |
| 942 entry = entry->next(); | |
| 943 } | |
| 944 } | |
| 945 } | |
| 946 #endif | |
| 947 | |
| 917 template<typename Header> | 948 template<typename Header> |
| 918 bool ThreadHeap<Header>::expandObject(Header* header, size_t newSize) | 949 bool ThreadHeap<Header>::expandObject(Header* header, size_t newSize) |
| 919 { | 950 { |
| 920 // It's possible that Vector requests a smaller expanded size because | 951 // It's possible that Vector requests a smaller expanded size because |
| 921 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 952 // Vector::shrinkCapacity can set a capacity smaller than the actual payload |
| 922 // size. | 953 // size. |
| 923 if (header->payloadSize() >= newSize) | 954 if (header->payloadSize() >= newSize) |
| 924 return true; | 955 return true; |
| 925 size_t allocationSize = allocationSizeFromSize(newSize); | 956 size_t allocationSize = allocationSizeFromSize(newSize); |
| 926 ASSERT(allocationSize > header->size()); | 957 ASSERT(allocationSize > header->size()); |
| (...skipping 583 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1510 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during | 1541 // STRICT_ASAN_FINALIZATION_CHECKING turns on poisoning of all objects during |
| 1511 // sweeping to catch cases where dead objects touch each other. This is not | 1542 // sweeping to catch cases where dead objects touch each other. This is not |
| 1512 // turned on by default because it also triggers for cases that are safe. | 1543 // turned on by default because it also triggers for cases that are safe. |
| 1513 // Examples of such safe cases are context life cycle observers and timers | 1544 // Examples of such safe cases are context life cycle observers and timers |
| 1514 // embedded in garbage collected objects. | 1545 // embedded in garbage collected objects. |
| 1515 #define STRICT_ASAN_FINALIZATION_CHECKING 0 | 1546 #define STRICT_ASAN_FINALIZATION_CHECKING 0 |
| 1516 | 1547 |
| 1517 template<typename Header> | 1548 template<typename Header> |
| 1518 void ThreadHeap<Header>::sweep() | 1549 void ThreadHeap<Header>::sweep() |
| 1519 { | 1550 { |
| 1551 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) { | |
| 1552 page->countUnmarkedObjects(); | |
| 1553 } | |
| 1520 ASSERT(isConsistentForSweeping()); | 1554 ASSERT(isConsistentForSweeping()); |
| 1521 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING | 1555 #if defined(ADDRESS_SANITIZER) && STRICT_ASAN_FINALIZATION_CHECKING |
| 1522 // When using ASan do a pre-sweep where all unmarked objects are | 1556 // When using ASan do a pre-sweep where all unmarked objects are |
| 1523 // poisoned before calling their finalizer methods. This can catch | 1557 // poisoned before calling their finalizer methods. This can catch |
| 1524 // the case where the finalizer of an object tries to modify | 1558 // the case where the finalizer of an object tries to modify |
| 1525 // another object as part of finalization. | 1559 // another object as part of finalization. |
| 1526 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) | 1560 for (HeapPage<Header>* page = m_firstPage; page; page = page->next()) |
| 1527 page->poisonUnmarkedObjects(); | 1561 page->poisonUnmarkedObjects(); |
| 1528 #endif | 1562 #endif |
| 1529 sweepNormalPages(); | 1563 sweepNormalPages(); |
| (...skipping 349 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1879 Header* header = reinterpret_cast<Header*>(headerAddress); | 1913 Header* header = reinterpret_cast<Header*>(headerAddress); |
| 1880 ASSERT(header->size() < blinkPagePayloadSize()); | 1914 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1881 | 1915 |
| 1882 if (!header->isFree() && !header->isMarked()) | 1916 if (!header->isFree() && !header->isMarked()) |
| 1883 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | 1917 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); |
| 1884 headerAddress += header->size(); | 1918 headerAddress += header->size(); |
| 1885 } | 1919 } |
| 1886 } | 1920 } |
| 1887 #endif | 1921 #endif |
| 1888 | 1922 |
| 1923 template<typename Header> | |
| 1924 void HeapPage<Header>::countUnmarkedObjects() | |
|
keishi
2015/01/27 08:59:00
Scans heap for objects that haven't been marked, a
| |
| 1925 { | |
| 1926 MutexLocker locker(uomMutex()); | |
| 1927 for (Address headerAddress = payload(); headerAddress < end(); ) { | |
| 1928 Header* header = reinterpret_cast<Header*>(headerAddress); | |
| 1929 ASSERT(header->size() < blinkPagePayloadSize()); | |
| 1930 | |
| 1931 if (!header->isFree() && !header->isMarked()) { | |
| 1932 String className(classOf(header->payload())); | |
| 1933 ObjectAgeMap::AddResult result = uom().add(className, AgeHistogram() ); | |
| 1934 result.storedValue->value.data[header->age()]++; | |
| 1935 } | |
| 1936 headerAddress += header->size(); | |
| 1937 } | |
| 1938 } | |
| 1939 | |
| 1889 template<> | 1940 template<> |
| 1890 inline void HeapPage<GeneralHeapObjectHeader>::finalize(GeneralHeapObjectHeader* header) | 1941 inline void HeapPage<GeneralHeapObjectHeader>::finalize(GeneralHeapObjectHeader* header) |
| 1891 { | 1942 { |
| 1892 header->finalize(); | 1943 header->finalize(); |
| 1893 } | 1944 } |
| 1894 | 1945 |
| 1895 template<> | 1946 template<> |
| 1896 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header) | 1947 inline void HeapPage<HeapObjectHeader>::finalize(HeapObjectHeader* header) |
| 1897 { | 1948 { |
| 1898 ASSERT(gcInfo()); | 1949 ASSERT(gcInfo()); |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2054 // Release builds don't have the ASSERT, but it is OK because | 2105 // Release builds don't have the ASSERT, but it is OK because |
| 2055 // release builds will crash in the following header->isMarked() | 2106 // release builds will crash in the following header->isMarked() |
| 2056 // because all the entries of the orphaned heaps are zapped. | 2107 // because all the entries of the orphaned heaps are zapped. |
| 2057 ASSERT(!pageFromObject(objectPointer)->orphaned()); | 2108 ASSERT(!pageFromObject(objectPointer)->orphaned()); |
| 2058 | 2109 |
| 2059 if (header->isMarked()) | 2110 if (header->isMarked()) |
| 2060 return; | 2111 return; |
| 2061 header->mark(); | 2112 header->mark(); |
| 2062 | 2113 |
| 2063 #if ENABLE(GC_PROFILE_MARKING) | 2114 #if ENABLE(GC_PROFILE_MARKING) |
| 2115 header->incAge(); | |
| 2116 | |
| 2064 MutexLocker locker(objectGraphMutex()); | 2117 MutexLocker locker(objectGraphMutex()); |
| 2065 String className(classOf(objectPointer)); | 2118 String className(classOf(objectPointer)); |
| 2066 { | 2119 { |
| 2067 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv eObjectSet()); | 2120 LiveObjectMap::AddResult result = currentlyLive().add(className, Liv eObjectSet()); |
| 2068 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin ter)); | 2121 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin ter)); |
| 2069 } | 2122 } |
| 2123 { | |
| 2124 MutexLocker locker(momMutex()); | |
| 2125 ObjectAgeMap::AddResult result = mom().add(className, AgeHistogram() ); | |
|
keishi
2015/01/27 08:59:01
Record that the object has been marked to mom()
| |
| 2126 result.storedValue->value.data[header->age()]++; | |
| 2127 } | |
| 2070 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintp tr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName)); | 2128 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintp tr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject), m_hostName)); |
| 2071 ASSERT(result.isNewEntry); | 2129 ASSERT(result.isNewEntry); |
| 2072 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_ho stObject, className.ascii().data(), objectPointer); | 2130 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_ho stObject, className.ascii().data(), objectPointer); |
| 2073 #endif | 2131 #endif |
| 2074 if (callback) | 2132 if (callback) |
| 2075 Heap::pushTraceCallback(m_markingStack, const_cast<void*>(objectPoin ter), callback); | 2133 Heap::pushTraceCallback(m_markingStack, const_cast<void*>(objectPoin ter), callback); |
| 2076 } | 2134 } |
| 2077 | 2135 |
| 2078 // We need both HeapObjectHeader and GeneralHeapObjectHeader versions to | 2136 // We need both HeapObjectHeader and GeneralHeapObjectHeader versions to |
| 2079 // correctly find the payload. | 2137 // correctly find the payload. |
| (...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2209 } | 2267 } |
| 2210 | 2268 |
| 2211 previouslyLive().swap(currentlyLive()); | 2269 previouslyLive().swap(currentlyLive()); |
| 2212 currentlyLive().clear(); | 2270 currentlyLive().clear(); |
| 2213 | 2271 |
| 2214 for (uintptr_t object : objectsToFindPath()) { | 2272 for (uintptr_t object : objectsToFindPath()) { |
| 2215 dumpPathToObjectFromObjectGraph(objectGraph(), object); | 2273 dumpPathToObjectFromObjectGraph(objectGraph(), object); |
| 2216 } | 2274 } |
| 2217 } | 2275 } |
| 2218 | 2276 |
| 2277 void reportMarkingStats() | |
| 2278 { | |
| 2279 MutexLocker locker(momMutex()); | |
| 2280 RefPtr<TracedValue> json = TracedValue::create(); | |
| 2281 for (ObjectAgeMap::iterator it = mom().begin(), end = mom().end(); it != end; ++it) { | |
| 2282 json->beginArray(it->key.ascii().data()); | |
| 2283 for (size_t i = 0; i < 8; ++i) { | |
| 2284 json->pushInteger(it->value.data[i]); | |
| 2285 } | |
| 2286 json->endArray(); | |
| 2287 } | |
| 2288 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "MarkingStats", (unsigne d long long)0, json.release()); | |
| 2289 mom().clear(); | |
| 2290 } | |
| 2291 | |
| 2219 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous) | 2292 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous) |
| 2220 { | 2293 { |
| 2221 int count = 0; | 2294 int count = 0; |
| 2222 | 2295 |
| 2223 fprintf(stderr, " [previously %u]", previous.size()); | 2296 fprintf(stderr, " [previously %u]", previous.size()); |
| 2224 for (uintptr_t object : current) { | 2297 for (uintptr_t object : current) { |
| 2225 if (previous.find(object) == previous.end()) | 2298 if (previous.find(object) == previous.end()) |
| 2226 continue; | 2299 continue; |
| 2227 count++; | 2300 count++; |
| 2228 } | 2301 } |
| (...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2547 } | 2620 } |
| 2548 #endif | 2621 #endif |
| 2549 | 2622 |
| 2550 void Heap::preGC() | 2623 void Heap::preGC() |
| 2551 { | 2624 { |
| 2552 ASSERT(!ThreadState::current()->isInGC()); | 2625 ASSERT(!ThreadState::current()->isInGC()); |
| 2553 for (ThreadState* state : ThreadState::attachedThreads()) | 2626 for (ThreadState* state : ThreadState::attachedThreads()) |
| 2554 state->preGC(); | 2627 state->preGC(); |
| 2555 } | 2628 } |
| 2556 | 2629 |
| 2630 void Heap::reportSweepingStats() | |
| 2631 { | |
| 2632 MutexLocker locker(uomMutex()); | |
| 2633 RefPtr<TracedValue> json = TracedValue::create(); | |
| 2634 for (ObjectAgeMap::iterator it = uom().begin(), end = uom().end(); it != end ; ++it) { | |
| 2635 json->beginArray(it->key.ascii().data()); | |
| 2636 for (size_t i = 0; i < 8; ++i) { | |
| 2637 json->pushInteger(it->value.data[i]); | |
| 2638 } | |
| 2639 json->endArray(); | |
| 2640 } | |
| 2641 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "SweepingStats", (unsigned l ong long)0, json.release()); | |
| 2642 uom().clear(); | |
| 2643 } | |
| 2644 | |
| 2557 void Heap::postGC() | 2645 void Heap::postGC() |
| 2558 { | 2646 { |
| 2559 ASSERT(ThreadState::current()->isInGC()); | 2647 ASSERT(ThreadState::current()->isInGC()); |
| 2560 for (ThreadState* state : ThreadState::attachedThreads()) | 2648 for (ThreadState* state : ThreadState::attachedThreads()) |
| 2561 state->postGC(); | 2649 state->postGC(); |
| 2562 } | 2650 } |
| 2563 | 2651 |
| 2564 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp e gcType) | 2652 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp e gcType) |
| 2565 { | 2653 { |
| 2566 ThreadState* state = ThreadState::current(); | 2654 ThreadState* state = ThreadState::current(); |
| 2567 state->setGCState(ThreadState::StoppingOtherThreads); | 2655 state->setGCState(ThreadState::StoppingOtherThreads); |
| 2568 | 2656 |
| 2657 #if ENABLE(GC_PROFILE_FREE_LIST) | |
| 2658 state->snapshotFreeListIfNecessary(); | |
| 2659 #endif | |
| 2660 | |
| 2569 GCScope gcScope(stackState); | 2661 GCScope gcScope(stackState); |
| 2570 // Check if we successfully parked the other threads. If not we bail out of | 2662 // Check if we successfully parked the other threads. If not we bail out of |
| 2571 // the GC. | 2663 // the GC. |
| 2572 if (!gcScope.allThreadsParked()) { | 2664 if (!gcScope.allThreadsParked()) { |
| 2573 state->scheduleGC(); | 2665 state->scheduleGC(); |
| 2574 return; | 2666 return; |
| 2575 } | 2667 } |
| 2576 | 2668 |
| 2577 if (state->isMainThread()) | 2669 if (state->isMainThread()) |
| 2578 ScriptForbiddenScope::enter(); | 2670 ScriptForbiddenScope::enter(); |
| (...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2618 globalWeakProcessing(s_markingVisitor); | 2710 globalWeakProcessing(s_markingVisitor); |
| 2619 | 2711 |
| 2620 // Now we can delete all orphaned pages because there are no dangling | 2712 // Now we can delete all orphaned pages because there are no dangling |
| 2621 // pointers to the orphaned pages. (If we have such dangling pointers, | 2713 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 2622 // we should have crashed during marking before getting here.) | 2714 // we should have crashed during marking before getting here.) |
| 2623 orphanedPagePool()->decommitOrphanedPages(); | 2715 orphanedPagePool()->decommitOrphanedPages(); |
| 2624 | 2716 |
| 2625 postGC(); | 2717 postGC(); |
| 2626 | 2718 |
| 2627 #if ENABLE(GC_PROFILE_MARKING) | 2719 #if ENABLE(GC_PROFILE_MARKING) |
| 2628 static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportStats() ; | 2720 //static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportStats (); |
| 2721 static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportMarking Stats(); | |
| 2629 #endif | 2722 #endif |
| 2630 | 2723 |
| 2631 if (Platform::current()) { | 2724 if (Platform::current()) { |
| 2632 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF ::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | 2725 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF ::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
| 2633 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); | 2726 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); |
| 2634 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace" , Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); | 2727 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace" , Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); |
| 2635 } | 2728 } |
| 2636 | 2729 |
| 2637 if (state->isMainThread()) | 2730 if (state->isMainThread()) |
| 2638 ScriptForbiddenScope::exit(); | 2731 ScriptForbiddenScope::exit(); |
| (...skipping 351 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2990 bool Heap::s_shutdownCalled = false; | 3083 bool Heap::s_shutdownCalled = false; |
| 2991 bool Heap::s_lastGCWasConservative = false; | 3084 bool Heap::s_lastGCWasConservative = false; |
| 2992 FreePagePool* Heap::s_freePagePool; | 3085 FreePagePool* Heap::s_freePagePool; |
| 2993 OrphanedPagePool* Heap::s_orphanedPagePool; | 3086 OrphanedPagePool* Heap::s_orphanedPagePool; |
| 2994 Heap::RegionTree* Heap::s_regionTree = nullptr; | 3087 Heap::RegionTree* Heap::s_regionTree = nullptr; |
| 2995 size_t Heap::s_allocatedObjectSize = 0; | 3088 size_t Heap::s_allocatedObjectSize = 0; |
| 2996 size_t Heap::s_allocatedSpace = 0; | 3089 size_t Heap::s_allocatedSpace = 0; |
| 2997 size_t Heap::s_markedObjectSize = 0; | 3090 size_t Heap::s_markedObjectSize = 0; |
| 2998 | 3091 |
| 2999 } // namespace blink | 3092 } // namespace blink |
| OLD | NEW |