OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
43 #include "wtf/LeakAnnotations.h" | 43 #include "wtf/LeakAnnotations.h" |
44 #include "wtf/PassOwnPtr.h" | 44 #include "wtf/PassOwnPtr.h" |
45 #if ENABLE(GC_PROFILE_MARKING) | 45 #if ENABLE(GC_PROFILE_MARKING) |
46 #include "wtf/HashMap.h" | 46 #include "wtf/HashMap.h" |
47 #include "wtf/HashSet.h" | 47 #include "wtf/HashSet.h" |
48 #include "wtf/text/StringBuilder.h" | 48 #include "wtf/text/StringBuilder.h" |
49 #include "wtf/text/StringHash.h" | 49 #include "wtf/text/StringHash.h" |
50 #include <stdio.h> | 50 #include <stdio.h> |
51 #include <utility> | 51 #include <utility> |
52 #endif | 52 #endif |
53 #if ENABLE(GC_PROFILE_HEAP) | 53 #if ENABLE(GC_PROFILE_HEAP) || ENABLE(GC_PROFILE_FREE_LIST) || ENABLE(GC_PROFILE
_MARKING) |
54 #include "platform/TracedValue.h" | 54 #include "platform/TracedValue.h" |
55 #endif | 55 #endif |
56 | 56 |
57 #if OS(POSIX) | 57 #if OS(POSIX) |
58 #include <sys/mman.h> | 58 #include <sys/mman.h> |
59 #include <unistd.h> | 59 #include <unistd.h> |
60 #elif OS(WIN) | 60 #elif OS(WIN) |
61 #include <windows.h> | 61 #include <windows.h> |
62 #endif | 62 #endif |
63 | 63 |
64 namespace blink { | 64 namespace blink { |
65 | 65 |
| 66 struct AgeHistogram { |
| 67 int data[8]; |
| 68 }; |
| 69 |
| 70 typedef HashMap<String, AgeHistogram> ObjectAgeMap; |
| 71 |
| 72 static ObjectAgeMap& uom() |
| 73 { |
| 74 static ObjectAgeMap uomap; |
| 75 return uomap; |
| 76 } |
| 77 |
| 78 static Mutex& uomMutex() |
| 79 { |
| 80 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); |
| 81 return mutex; |
| 82 } |
| 83 |
| 84 static ObjectAgeMap& mom() |
| 85 { |
| 86 static ObjectAgeMap momap; |
| 87 return momap; |
| 88 } |
| 89 |
| 90 static Mutex& momMutex() |
| 91 { |
| 92 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); |
| 93 return mutex; |
| 94 } |
| 95 |
66 #if ENABLE(GC_PROFILE_MARKING) | 96 #if ENABLE(GC_PROFILE_MARKING) |
67 static String classOf(const void* object) | 97 static String classOf(const void* object) |
68 { | 98 { |
69 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_
cast<void*>(object)))) | 99 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_
cast<void*>(object)))) |
70 return gcInfo->m_className; | 100 return gcInfo->m_className; |
71 return "unknown"; | 101 return "unknown"; |
72 } | 102 } |
73 #endif | 103 #endif |
74 | 104 |
75 static bool vTableInitialized(void* objectPointer) | 105 static bool vTableInitialized(void* objectPointer) |
(...skipping 460 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
536 | 566 |
537 void LargeObject::removeFromHeap(ThreadHeap* heap) | 567 void LargeObject::removeFromHeap(ThreadHeap* heap) |
538 { | 568 { |
539 heap->freeLargeObject(this); | 569 heap->freeLargeObject(this); |
540 } | 570 } |
541 | 571 |
542 ThreadHeap::ThreadHeap(ThreadState* state, int index) | 572 ThreadHeap::ThreadHeap(ThreadState* state, int index) |
543 : m_currentAllocationPoint(nullptr) | 573 : m_currentAllocationPoint(nullptr) |
544 , m_remainingAllocationSize(0) | 574 , m_remainingAllocationSize(0) |
545 , m_lastRemainingAllocationSize(0) | 575 , m_lastRemainingAllocationSize(0) |
| 576 #if ENABLE(GC_PROFILE_FREE_LIST) |
| 577 , m_totalAllocationSize(0.0) |
| 578 , m_allocationCount(0) |
| 579 , m_inlineAllocationCount(0) |
| 580 #endif |
546 , m_firstPage(nullptr) | 581 , m_firstPage(nullptr) |
547 , m_firstLargeObject(nullptr) | 582 , m_firstLargeObject(nullptr) |
548 , m_firstUnsweptPage(nullptr) | 583 , m_firstUnsweptPage(nullptr) |
549 , m_firstUnsweptLargeObject(nullptr) | 584 , m_firstUnsweptLargeObject(nullptr) |
550 , m_threadState(state) | 585 , m_threadState(state) |
551 , m_index(index) | 586 , m_index(index) |
552 , m_promptlyFreedSize(0) | 587 , m_promptlyFreedSize(0) |
553 { | 588 { |
554 clearFreeLists(); | 589 clearFreeLists(); |
555 } | 590 } |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
599 void ThreadHeap::setAllocationPoint(Address point, size_t size) | 634 void ThreadHeap::setAllocationPoint(Address point, size_t size) |
600 { | 635 { |
601 #if ENABLE(ASSERT) | 636 #if ENABLE(ASSERT) |
602 if (point) { | 637 if (point) { |
603 ASSERT(size); | 638 ASSERT(size); |
604 BaseHeapPage* page = pageFromObject(point); | 639 BaseHeapPage* page = pageFromObject(point); |
605 ASSERT(!page->isLargeObject()); | 640 ASSERT(!page->isLargeObject()); |
606 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize()); | 641 ASSERT(size <= static_cast<HeapPage*>(page)->payloadSize()); |
607 } | 642 } |
608 #endif | 643 #endif |
| 644 #if ENABLE(GC_PROFILE_FREE_LIST) |
| 645 m_allocationPointSizeSum += size; |
| 646 ++m_setAllocationPointCount; |
| 647 #endif |
609 if (hasCurrentAllocationArea()) | 648 if (hasCurrentAllocationArea()) |
610 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); | 649 addToFreeList(currentAllocationPoint(), remainingAllocationSize()); |
611 updateRemainingAllocationSize(); | 650 updateRemainingAllocationSize(); |
612 m_currentAllocationPoint = point; | 651 m_currentAllocationPoint = point; |
613 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; | 652 m_lastRemainingAllocationSize = m_remainingAllocationSize = size; |
614 } | 653 } |
615 | 654 |
616 Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) | 655 Address ThreadHeap::outOfLineAllocate(size_t allocationSize, size_t gcInfoIndex) |
617 { | 656 { |
| 657 #if ENABLE(GC_PROFILE_FREE_LIST) |
| 658 m_threadState->snapshotFreeListIfNecessary(); |
| 659 #endif |
618 ASSERT(allocationSize > remainingAllocationSize()); | 660 ASSERT(allocationSize > remainingAllocationSize()); |
619 ASSERT(allocationSize >= allocationGranularity); | 661 ASSERT(allocationSize >= allocationGranularity); |
620 | 662 |
621 // 1. If this allocation is big enough, allocate a large object. | 663 // 1. If this allocation is big enough, allocate a large object. |
622 if (allocationSize >= largeObjectSizeThreshold) | 664 if (allocationSize >= largeObjectSizeThreshold) |
623 return allocateLargeObject(allocationSize, gcInfoIndex); | 665 return allocateLargeObject(allocationSize, gcInfoIndex); |
624 | 666 |
625 // 2. Check if we should trigger a GC. | 667 // 2. Check if we should trigger a GC. |
626 updateRemainingAllocationSize(); | 668 updateRemainingAllocationSize(); |
627 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); | 669 threadState()->scheduleGCOrForceConservativeGCIfNeeded(); |
(...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
850 ScriptForbiddenScope::exit(); | 892 ScriptForbiddenScope::exit(); |
851 } | 893 } |
852 | 894 |
853 #if ENABLE(ASSERT) | 895 #if ENABLE(ASSERT) |
854 static bool isLargeObjectAligned(LargeObject* largeObject, Address address) | 896 static bool isLargeObjectAligned(LargeObject* largeObject, Address address) |
855 { | 897 { |
856 // Check that a large object is blinkPageSize aligned (modulo the osPageSize | 898 // Check that a large object is blinkPageSize aligned (modulo the osPageSize |
857 // for the guard page). | 899 // for the guard page). |
858 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun
dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); | 900 return reinterpret_cast<Address>(largeObject) - WTF::kSystemPageSize == roun
dToBlinkPageStart(reinterpret_cast<Address>(largeObject)); |
859 } | 901 } |
| 902 #endif |
860 | 903 |
| 904 #if ENABLE(ASSERT) || ENABLE(GC_PROFILE_MARKING) |
861 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address) | 905 BaseHeapPage* ThreadHeap::findPageFromAddress(Address address) |
862 { | 906 { |
863 for (HeapPage* page = m_firstPage; page; page = page->next()) { | 907 for (HeapPage* page = m_firstPage; page; page = page->next()) { |
864 if (page->contains(address)) | 908 if (page->contains(address)) |
865 return page; | 909 return page; |
866 } | 910 } |
867 for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { | 911 for (HeapPage* page = m_firstUnsweptPage; page; page = page->next()) { |
868 if (page->contains(address)) | 912 if (page->contains(address)) |
869 return page; | 913 return page; |
870 } | 914 } |
871 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) { | 915 for (LargeObject* largeObject = m_firstLargeObject; largeObject; largeObject
= largeObject->next()) { |
872 ASSERT(isLargeObjectAligned(largeObject, address)); | 916 ASSERT(isLargeObjectAligned(largeObject, address)); |
873 if (largeObject->contains(address)) | 917 if (largeObject->contains(address)) |
874 return largeObject; | 918 return largeObject; |
875 } | 919 } |
876 for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; larg
eObject = largeObject->next()) { | 920 for (LargeObject* largeObject = m_firstUnsweptLargeObject; largeObject; larg
eObject = largeObject->next()) { |
877 ASSERT(isLargeObjectAligned(largeObject, address)); | 921 ASSERT(isLargeObjectAligned(largeObject, address)); |
878 if (largeObject->contains(address)) | 922 if (largeObject->contains(address)) |
879 return largeObject; | 923 return largeObject; |
880 } | 924 } |
881 return nullptr; | 925 return nullptr; |
882 } | 926 } |
883 #endif | 927 #endif |
884 | 928 |
| 929 #if ENABLE(GC_PROFILE_FREE_LIST) |
| 930 void ThreadHeap::snapshotFreeList(TracedValue* json) |
| 931 { |
| 932 json->setDouble("totalAllocationSize", m_totalAllocationSize); |
| 933 json->setDouble("inlineAllocationRate", static_cast<double>(m_inlineAllocati
onCount) / m_allocationCount); |
| 934 json->setInteger("inlineAllocationCount", m_inlineAllocationCount); |
| 935 json->setInteger("allocationCount", m_allocationCount); |
| 936 if (m_setAllocationPointCount > 0) { |
| 937 json->setDouble("averageAllocationPointSize", static_cast<double>(m_alloca
tionPointSizeSum) / m_setAllocationPointCount); |
| 938 } |
| 939 m_allocationPointSizeSum = 0; |
| 940 m_setAllocationPointCount = 0; |
| 941 size_t pageCount = 0; |
| 942 size_t totalPageSize = 0; |
| 943 for (HeapPage* page = m_firstPage; page; page = page->next()) { |
| 944 ++pageCount; |
| 945 totalPageSize += page->payloadSize(); |
| 946 } |
| 947 json->setInteger("pageCount", pageCount); |
| 948 json->setInteger("totalPageSize", totalPageSize); |
| 949 size_t bucketSizes[blinkPageSizeLog2]; |
| 950 size_t bucketTotalSizes[blinkPageSizeLog2]; |
| 951 size_t freeSize = 0; |
| 952 m_freeList.countBucketSizes(bucketSizes, bucketTotalSizes, &freeSize); |
| 953 json->setInteger("freeSize", freeSize); |
| 954 json->beginArray("bucketSizes"); |
| 955 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| 956 json->pushInteger(bucketSizes[i]); |
| 957 } |
| 958 json->endArray(); |
| 959 json->beginArray("bucketTotalSizes"); |
| 960 for (size_t i = 0; i < blinkPageSizeLog2; ++i) { |
| 961 json->pushInteger(bucketTotalSizes[i]); |
| 962 } |
| 963 json->endArray(); |
| 964 } |
| 965 #endif |
| 966 |
885 #if ENABLE(GC_PROFILE_HEAP) | 967 #if ENABLE(GC_PROFILE_HEAP) |
886 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 | 968 #define GC_PROFILE_HEAP_PAGE_SNAPSHOT_THRESHOLD 0 |
887 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | 969 void ThreadHeap::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
888 { | 970 { |
889 ASSERT(isConsistentForSweeping()); | 971 ASSERT(isConsistentForSweeping()); |
890 size_t previousPageCount = info->pageCount; | 972 size_t previousPageCount = info->pageCount; |
891 | 973 |
892 json->beginArray("pages"); | 974 json->beginArray("pages"); |
893 for (HeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCo
unt) { | 975 for (HeapPage* page = m_firstPage; page; page = page->next(), ++info->pageCo
unt) { |
894 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. | 976 // FIXME: To limit the size of the snapshot we only output "threshold" m
any page snapshots. |
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
942 // space. | 1024 // space. |
943 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd
ToFreeList()) | 1025 if (static_cast<HeapPage*>(page)->payloadSize() != size && !entry->shouldAdd
ToFreeList()) |
944 return; | 1026 return; |
945 #endif | 1027 #endif |
946 int index = bucketIndexForSize(size); | 1028 int index = bucketIndexForSize(size); |
947 entry->link(&m_freeLists[index]); | 1029 entry->link(&m_freeLists[index]); |
948 if (index > m_biggestFreeListIndex) | 1030 if (index > m_biggestFreeListIndex) |
949 m_biggestFreeListIndex = index; | 1031 m_biggestFreeListIndex = index; |
950 } | 1032 } |
951 | 1033 |
| 1034 #if ENABLE(GC_PROFILE_FREE_LIST) |
| 1035 void FreeList::countBucketSizes(size_t sizes[], size_t totalSizes[], size_t* fre
eSize) const |
| 1036 { |
| 1037 *freeSize = 0; |
| 1038 for (size_t i = 0; i < blinkPageSizeLog2; i++) { |
| 1039 sizes[i] = 0; |
| 1040 totalSizes[i] = 0; |
| 1041 FreeListEntry* entry = m_freeLists[i]; |
| 1042 while (entry) { |
| 1043 ++sizes[i]; |
| 1044 *freeSize += entry->size(); |
| 1045 totalSizes[i] += entry->size(); |
| 1046 entry = entry->next(); |
| 1047 } |
| 1048 } |
| 1049 } |
| 1050 #endif |
| 1051 |
952 bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize) | 1052 bool ThreadHeap::expandObject(HeapObjectHeader* header, size_t newSize) |
953 { | 1053 { |
954 // It's possible that Vector requests a smaller expanded size because | 1054 // It's possible that Vector requests a smaller expanded size because |
955 // Vector::shrinkCapacity can set a capacity smaller than the actual payload | 1055 // Vector::shrinkCapacity can set a capacity smaller than the actual payload |
956 // size. | 1056 // size. |
957 if (header->payloadSize() >= newSize) | 1057 if (header->payloadSize() >= newSize) |
958 return true; | 1058 return true; |
959 size_t allocationSize = allocationSizeFromSize(newSize); | 1059 size_t allocationSize = allocationSizeFromSize(newSize); |
960 ASSERT(allocationSize > header->size()); | 1060 ASSERT(allocationSize > header->size()); |
961 size_t expandSize = allocationSize - header->size(); | 1061 size_t expandSize = allocationSize - header->size(); |
(...skipping 793 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1755 } | 1855 } |
1756 } | 1856 } |
1757 #endif | 1857 #endif |
1758 | 1858 |
1759 size_t LargeObject::objectPayloadSizeForTesting() | 1859 size_t LargeObject::objectPayloadSizeForTesting() |
1760 { | 1860 { |
1761 markAsSwept(); | 1861 markAsSwept(); |
1762 return payloadSize(); | 1862 return payloadSize(); |
1763 } | 1863 } |
1764 | 1864 |
| 1865 void HeapPage::countUnmarkedObjects() |
| 1866 { |
| 1867 MutexLocker locker(uomMutex()); |
| 1868 for (Address headerAddress = payload(); headerAddress < payloadEnd(); ) { |
| 1869 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1870 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1871 |
| 1872 if (!header->isFree() && !header->isMarked()) { |
| 1873 String className(classOf(header->payload())); |
| 1874 ObjectAgeMap::AddResult result = uom().add(className, AgeHistogram()
); |
| 1875 result.storedValue->value.data[header->age()]++; |
| 1876 } |
| 1877 headerAddress += header->size(); |
| 1878 } |
| 1879 } |
| 1880 |
| 1881 #if ENABLE(GC_PROFILE_MARKING) |
| 1882 const GCInfo* LargeObject::findGCInfo(Address address) |
| 1883 { |
| 1884 if (!containedInObjectPayload(address)) |
| 1885 return nullptr; |
| 1886 HeapObjectHeader* header = heapObjectHeader(); |
| 1887 return Heap::gcInfo(header->gcInfoIndex()); |
| 1888 } |
| 1889 #endif |
| 1890 |
1765 #if ENABLE(GC_PROFILE_HEAP) | 1891 #if ENABLE(GC_PROFILE_HEAP) |
1766 void LargeObject::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) | 1892 void LargeObject::snapshot(TracedValue* json, ThreadState::SnapshotInfo* info) |
1767 { | 1893 { |
1768 HeapObjectHeader* header = heapObjectHeader(); | 1894 HeapObjectHeader* header = heapObjectHeader(); |
1769 size_t tag = info->getClassTag(Heap::gcInfo(header->gcInfoIndex())); | 1895 size_t tag = info->getClassTag(Heap::gcInfo(header->gcInfoIndex())); |
1770 size_t age = header->age(); | 1896 size_t age = header->age(); |
1771 if (header->isMarked()) { | 1897 if (header->isMarked()) { |
1772 info->liveCount[tag] += 1; | 1898 info->liveCount[tag] += 1; |
1773 info->liveSize[tag] += header->size(); | 1899 info->liveSize[tag] += header->size(); |
1774 // Count objects that are live when promoted to the final generation. | 1900 // Count objects that are live when promoted to the final generation. |
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1914 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin
ter)); | 2040 result.storedValue->value.add(reinterpret_cast<uintptr_t>(objectPoin
ter)); |
1915 } | 2041 } |
1916 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintp
tr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject),
m_hostName)); | 2042 ObjectGraph::AddResult result = objectGraph().add(reinterpret_cast<uintp
tr_t>(objectPointer), std::make_pair(reinterpret_cast<uintptr_t>(m_hostObject),
m_hostName)); |
1917 ASSERT(result.isNewEntry); | 2043 ASSERT(result.isNewEntry); |
1918 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_ho
stObject, className.ascii().data(), objectPointer); | 2044 // fprintf(stderr, "%s[%p] -> %s[%p]\n", m_hostName.ascii().data(), m_ho
stObject, className.ascii().data(), objectPointer); |
1919 } | 2045 } |
1920 | 2046 |
1921 void reportStats() | 2047 void reportStats() |
1922 { | 2048 { |
1923 fprintf(stderr, "\n---------- AFTER MARKING -------------------\n"); | 2049 fprintf(stderr, "\n---------- AFTER MARKING -------------------\n"); |
1924 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = current
lyLive().payloadEnd(); it != end; ++it) { | 2050 for (LiveObjectMap::iterator it = currentlyLive().begin(), end = current
lyLive().end(); it != end; ++it) { |
1925 fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size()); | 2051 fprintf(stderr, "%s %u", it->key.ascii().data(), it->value.size()); |
1926 | 2052 |
1927 if (it->key == "blink::Document") | 2053 if (it->key == "blink::Document") |
1928 reportStillAlive(it->value, previouslyLive().get(it->key)); | 2054 reportStillAlive(it->value, previouslyLive().get(it->key)); |
1929 | 2055 |
1930 fprintf(stderr, "\n"); | 2056 fprintf(stderr, "\n"); |
1931 } | 2057 } |
1932 | 2058 |
1933 previouslyLive().swap(currentlyLive()); | 2059 previouslyLive().swap(currentlyLive()); |
1934 currentlyLive().clear(); | 2060 currentlyLive().clear(); |
1935 | 2061 |
1936 for (uintptr_t object : objectsToFindPath()) { | 2062 for (uintptr_t object : objectsToFindPath()) { |
1937 dumpPathToObjectFromObjectGraph(objectGraph(), object); | 2063 dumpPathToObjectFromObjectGraph(objectGraph(), object); |
1938 } | 2064 } |
1939 } | 2065 } |
1940 | 2066 |
| 2067 void reportMarkingStats() |
| 2068 { |
| 2069 MutexLocker locker(momMutex()); |
| 2070 RefPtr<TracedValue> json = TracedValue::create(); |
| 2071 for (ObjectAgeMap::iterator it = mom().begin(), end = mom().end(); it !=
end; ++it) { |
| 2072 json->beginArray(it->key.ascii().data()); |
| 2073 for (size_t i = 0; i < 8; ++i) { |
| 2074 json->pushInteger(it->value.data[i]); |
| 2075 } |
| 2076 json->endArray(); |
| 2077 } |
| 2078 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "MarkingStats", (unsigne
d long long)0, json.release()); |
| 2079 mom().clear(); |
| 2080 } |
| 2081 |
1941 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous) | 2082 static void reportStillAlive(LiveObjectSet current, LiveObjectSet previous) |
1942 { | 2083 { |
1943 int count = 0; | 2084 int count = 0; |
1944 | 2085 |
1945 fprintf(stderr, " [previously %u]", previous.size()); | 2086 fprintf(stderr, " [previously %u]", previous.size()); |
1946 for (uintptr_t object : current) { | 2087 for (uintptr_t object : current) { |
1947 if (previous.find(object) == previous.payloadEnd()) | 2088 if (previous.find(object) == previous.end()) |
1948 continue; | 2089 continue; |
1949 count++; | 2090 count++; |
1950 } | 2091 } |
1951 | 2092 |
1952 if (!count) | 2093 if (!count) |
1953 return; | 2094 return; |
1954 | 2095 |
1955 fprintf(stderr, " {survived 2GCs %d: ", count); | 2096 fprintf(stderr, " {survived 2GCs %d: ", count); |
1956 for (uintptr_t object : current) { | 2097 for (uintptr_t object : current) { |
1957 if (previous.find(object) == previous.payloadEnd()) | 2098 if (previous.find(object) == previous.end()) |
1958 continue; | 2099 continue; |
1959 fprintf(stderr, "%ld", object); | 2100 fprintf(stderr, "%ld", object); |
1960 if (--count) | 2101 if (--count) |
1961 fprintf(stderr, ", "); | 2102 fprintf(stderr, ", "); |
1962 } | 2103 } |
1963 ASSERT(!count); | 2104 ASSERT(!count); |
1964 fprintf(stderr, "}"); | 2105 fprintf(stderr, "}"); |
1965 } | 2106 } |
1966 | 2107 |
1967 static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintpt
r_t target) | 2108 static void dumpPathToObjectFromObjectGraph(const ObjectGraph& graph, uintpt
r_t target) |
1968 { | 2109 { |
1969 ObjectGraph::const_iterator it = graph.find(target); | 2110 ObjectGraph::const_iterator it = graph.find(target); |
1970 if (it == graph.payloadEnd()) | 2111 if (it == graph.end()) |
1971 return; | 2112 return; |
1972 fprintf(stderr, "Path to %lx of %s\n", target, classOf(reinterpret_cast<
const void*>(target)).ascii().data()); | 2113 fprintf(stderr, "Path to %lx of %s\n", target, classOf(reinterpret_cast<
const void*>(target)).ascii().data()); |
1973 while (it != graph.payloadEnd()) { | 2114 while (it != graph.end()) { |
1974 fprintf(stderr, "<- %lx of %s\n", it->value.first, it->value.second.
utf8().data()); | 2115 fprintf(stderr, "<- %lx of %s\n", it->value.first, it->value.second.
utf8().data()); |
1975 it = graph.find(it->value.first); | 2116 it = graph.find(it->value.first); |
1976 } | 2117 } |
1977 fprintf(stderr, "\n"); | 2118 fprintf(stderr, "\n"); |
1978 } | 2119 } |
1979 | 2120 |
1980 static void dumpPathToObjectOnNextGC(void* p) | 2121 static void dumpPathToObjectOnNextGC(void* p) |
1981 { | 2122 { |
1982 objectsToFindPath().add(reinterpret_cast<uintptr_t>(p)); | 2123 objectsToFindPath().add(reinterpret_cast<uintptr_t>(p)); |
1983 } | 2124 } |
(...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2277 } | 2418 } |
2278 #endif | 2419 #endif |
2279 | 2420 |
2280 void Heap::preGC() | 2421 void Heap::preGC() |
2281 { | 2422 { |
2282 ASSERT(!ThreadState::current()->isInGC()); | 2423 ASSERT(!ThreadState::current()->isInGC()); |
2283 for (ThreadState* state : ThreadState::attachedThreads()) | 2424 for (ThreadState* state : ThreadState::attachedThreads()) |
2284 state->preGC(); | 2425 state->preGC(); |
2285 } | 2426 } |
2286 | 2427 |
| 2428 void Heap::reportSweepingStats() |
| 2429 { |
| 2430 MutexLocker locker(uomMutex()); |
| 2431 RefPtr<TracedValue> json = TracedValue::create(); |
| 2432 for (ObjectAgeMap::iterator it = uom().begin(), end = uom().end(); it != end
; ++it) { |
| 2433 json->beginArray(it->key.ascii().data()); |
| 2434 for (size_t i = 0; i < 8; ++i) { |
| 2435 json->pushInteger(it->value.data[i]); |
| 2436 } |
| 2437 json->endArray(); |
| 2438 } |
| 2439 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID("blink_gc", "SweepingStats", (unsigned l
ong long)0, json.release()); |
| 2440 uom().clear(); |
| 2441 } |
| 2442 |
2287 void Heap::postGC(ThreadState::GCType gcType) | 2443 void Heap::postGC(ThreadState::GCType gcType) |
2288 { | 2444 { |
2289 ASSERT(ThreadState::current()->isInGC()); | 2445 ASSERT(ThreadState::current()->isInGC()); |
2290 for (ThreadState* state : ThreadState::attachedThreads()) | 2446 for (ThreadState* state : ThreadState::attachedThreads()) |
2291 state->postGC(gcType); | 2447 state->postGC(gcType); |
2292 } | 2448 } |
2293 | 2449 |
2294 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp
e gcType) | 2450 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp
e gcType) |
2295 { | 2451 { |
2296 ThreadState* state = ThreadState::current(); | 2452 ThreadState* state = ThreadState::current(); |
2297 state->setGCState(ThreadState::StoppingOtherThreads); | 2453 state->setGCState(ThreadState::StoppingOtherThreads); |
2298 | 2454 |
| 2455 #if ENABLE(GC_PROFILE_FREE_LIST) |
| 2456 state->snapshotFreeListIfNecessary(); |
| 2457 #endif |
| 2458 |
2299 GCScope gcScope(stackState); | 2459 GCScope gcScope(stackState); |
2300 // Check if we successfully parked the other threads. If not we bail out of | 2460 // Check if we successfully parked the other threads. If not we bail out of |
2301 // the GC. | 2461 // the GC. |
2302 if (!gcScope.allThreadsParked()) { | 2462 if (!gcScope.allThreadsParked()) { |
2303 state->scheduleGC(); | 2463 state->scheduleGC(); |
2304 return; | 2464 return; |
2305 } | 2465 } |
2306 | 2466 |
2307 if (state->isMainThread()) | 2467 if (state->isMainThread()) |
2308 ScriptForbiddenScope::enter(); | 2468 ScriptForbiddenScope::enter(); |
(...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2350 globalWeakProcessing(s_markingVisitor); | 2510 globalWeakProcessing(s_markingVisitor); |
2351 | 2511 |
2352 // Now we can delete all orphaned pages because there are no dangling | 2512 // Now we can delete all orphaned pages because there are no dangling |
2353 // pointers to the orphaned pages. (If we have such dangling pointers, | 2513 // pointers to the orphaned pages. (If we have such dangling pointers, |
2354 // we should have crashed during marking before getting here.) | 2514 // we should have crashed during marking before getting here.) |
2355 orphanedPagePool()->decommitOrphanedPages(); | 2515 orphanedPagePool()->decommitOrphanedPages(); |
2356 | 2516 |
2357 postGC(gcType); | 2517 postGC(gcType); |
2358 | 2518 |
2359 #if ENABLE(GC_PROFILE_MARKING) | 2519 #if ENABLE(GC_PROFILE_MARKING) |
2360 static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportStats()
; | 2520 //static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportStats
(); |
| 2521 static_cast<MarkingVisitor<GlobalMarking>*>(s_markingVisitor)->reportMarking
Stats(); |
2361 #endif | 2522 #endif |
2362 | 2523 |
2363 if (Platform::current()) { | 2524 if (Platform::current()) { |
2364 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF
::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); | 2525 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", WTF
::currentTimeMS() - timeStamp, 0, 10 * 1000, 50); |
2365 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H
eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); | 2526 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", H
eap::allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); |
2366 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace"
, Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); | 2527 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace"
, Heap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); |
2367 } | 2528 } |
2368 | 2529 |
2369 if (state->isMainThread()) | 2530 if (state->isMainThread()) |
2370 ScriptForbiddenScope::exit(); | 2531 ScriptForbiddenScope::exit(); |
(...skipping 334 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2705 bool Heap::s_shutdownCalled = false; | 2866 bool Heap::s_shutdownCalled = false; |
2706 bool Heap::s_lastGCWasConservative = false; | 2867 bool Heap::s_lastGCWasConservative = false; |
2707 FreePagePool* Heap::s_freePagePool; | 2868 FreePagePool* Heap::s_freePagePool; |
2708 OrphanedPagePool* Heap::s_orphanedPagePool; | 2869 OrphanedPagePool* Heap::s_orphanedPagePool; |
2709 Heap::RegionTree* Heap::s_regionTree = nullptr; | 2870 Heap::RegionTree* Heap::s_regionTree = nullptr; |
2710 size_t Heap::s_allocatedObjectSize = 0; | 2871 size_t Heap::s_allocatedObjectSize = 0; |
2711 size_t Heap::s_allocatedSpace = 0; | 2872 size_t Heap::s_allocatedSpace = 0; |
2712 size_t Heap::s_markedObjectSize = 0; | 2873 size_t Heap::s_markedObjectSize = 0; |
2713 | 2874 |
2714 } // namespace blink | 2875 } // namespace blink |
OLD | NEW |