OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 11 matching lines...) Expand all Loading... |
22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 22 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 23 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 24 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 25 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 26 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 27 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 28 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
29 */ | 29 */ |
30 | 30 |
31 #include "config.h" | 31 #include "config.h" |
32 #include "platform/heap/Heap.h" | 32 #include "platform/heap/HeapPage.h" |
33 | 33 |
34 #include "platform/ScriptForbiddenScope.h" | 34 #include "platform/ScriptForbiddenScope.h" |
35 #include "platform/Task.h" | 35 #include "platform/Task.h" |
36 #include "platform/TraceEvent.h" | 36 #include "platform/TraceEvent.h" |
37 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 37 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
38 #include "platform/heap/CallbackStack.h" | 38 #include "platform/heap/CallbackStack.h" |
| 39 #include "platform/heap/Heap.h" |
39 #include "platform/heap/MarkingVisitor.h" | 40 #include "platform/heap/MarkingVisitor.h" |
40 #include "platform/heap/PageMemory.h" | 41 #include "platform/heap/PageMemory.h" |
41 #include "platform/heap/PagePool.h" | 42 #include "platform/heap/PagePool.h" |
42 #include "platform/heap/SafePoint.h" | 43 #include "platform/heap/SafePoint.h" |
43 #include "platform/heap/ThreadState.h" | 44 #include "platform/heap/ThreadState.h" |
44 #include "public/platform/Platform.h" | 45 #include "public/platform/Platform.h" |
45 #include "public/platform/WebMemoryAllocatorDump.h" | 46 #include "public/platform/WebMemoryAllocatorDump.h" |
46 #include "public/platform/WebProcessMemoryDump.h" | 47 #include "public/platform/WebProcessMemoryDump.h" |
47 #include "wtf/Assertions.h" | 48 #include "wtf/Assertions.h" |
48 #include "wtf/ContainerAnnotations.h" | 49 #include "wtf/ContainerAnnotations.h" |
49 #include "wtf/LeakAnnotations.h" | 50 #include "wtf/LeakAnnotations.h" |
50 #include "wtf/MainThread.h" | 51 #include "wtf/MainThread.h" |
51 #include "wtf/PageAllocator.h" | 52 #include "wtf/PageAllocator.h" |
52 #include "wtf/Partitions.h" | 53 #include "wtf/Partitions.h" |
53 #include "wtf/PassOwnPtr.h" | 54 #include "wtf/PassOwnPtr.h" |
54 #if ENABLE(GC_PROFILING) | 55 #if ENABLE(GC_PROFILING) |
55 #include "platform/TracedValue.h" | 56 #include "platform/TracedValue.h" |
56 #include "wtf/HashMap.h" | 57 #include "wtf/HashMap.h" |
57 #include "wtf/HashSet.h" | 58 #include "wtf/HashSet.h" |
58 #include "wtf/text/StringBuilder.h" | 59 #include "wtf/text/StringBuilder.h" |
59 #include "wtf/text/StringHash.h" | 60 #include "wtf/text/StringHash.h" |
60 #include <stdio.h> | 61 #include <stdio.h> |
61 #include <utility> | 62 #include <utility> |
62 #endif | 63 #endif |
63 | 64 |
64 #if OS(POSIX) | |
65 #include <sys/mman.h> | |
66 #include <unistd.h> | |
67 #elif OS(WIN) | |
68 #include <windows.h> | |
69 #endif | |
70 | |
71 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER | 65 #ifdef ANNOTATE_CONTIGUOUS_CONTAINER |
72 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. | 66 // FIXME: have ContainerAnnotations.h define an ENABLE_-style name instead. |
73 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 | 67 #define ENABLE_ASAN_CONTAINER_ANNOTATIONS 1 |
74 | 68 |
75 // When finalizing a non-inlined vector backing store/container, remove | 69 // When finalizing a non-inlined vector backing store/container, remove |
76 // its contiguous container annotation. Required as it will not be destructed | 70 // its contiguous container annotation. Required as it will not be destructed |
77 // from its Vector. | 71 // from its Vector. |
78 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize)
\ | 72 #define ASAN_RETIRE_CONTAINER_ANNOTATION(object, objectSize)
\ |
79 do {
\ | 73 do {
\ |
80 BasePage* page = pageFromObject(object);
\ | 74 BasePage* page = pageFromObject(object);
\ |
(...skipping 24 matching lines...) Expand all Loading... |
105 | 99 |
106 #if ENABLE(GC_PROFILING) | 100 #if ENABLE(GC_PROFILING) |
107 static String classOf(const void* object) | 101 static String classOf(const void* object) |
108 { | 102 { |
109 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_
cast<void*>(object)))) | 103 if (const GCInfo* gcInfo = Heap::findGCInfo(reinterpret_cast<Address>(const_
cast<void*>(object)))) |
110 return gcInfo->m_className; | 104 return gcInfo->m_className; |
111 return "unknown"; | 105 return "unknown"; |
112 } | 106 } |
113 #endif | 107 #endif |
114 | 108 |
115 class GCForbiddenScope final { | |
116 public: | |
117 explicit GCForbiddenScope(ThreadState* state) | |
118 : m_state(state) | |
119 { | |
120 // Prevent nested collectGarbage() invocations. | |
121 m_state->enterGCForbiddenScope(); | |
122 } | |
123 | |
124 ~GCForbiddenScope() | |
125 { | |
126 m_state->leaveGCForbiddenScope(); | |
127 } | |
128 | |
129 private: | |
130 ThreadState* m_state; | |
131 }; | |
132 | |
133 class GCScope final { | |
134 public: | |
135 GCScope(ThreadState* state, ThreadState::StackState stackState, ThreadState:
:GCType gcType) | |
136 : m_state(state) | |
137 , m_gcForbiddenScope(state) | |
138 // See collectGarbageForTerminatingThread() comment on why a | |
139 // safepoint scope isn't entered for its GCScope. | |
140 , m_safePointScope(stackState, gcType != ThreadState::ThreadTerminationG
C ? state : nullptr) | |
141 , m_gcType(gcType) | |
142 , m_parkedAllThreads(false) | |
143 { | |
144 TRACE_EVENT0("blink_gc", "Heap::GCScope"); | |
145 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | |
146 if (m_state->isMainThread()) | |
147 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | |
148 | |
149 ASSERT(m_state->checkThread()); | |
150 | |
151 // TODO(haraken): In an unlikely coincidence that two threads decide | |
152 // to collect garbage at the same time, avoid doing two GCs in | |
153 // a row. | |
154 if (LIKELY(gcType != ThreadState::ThreadTerminationGC && ThreadState::st
opThreads())) | |
155 m_parkedAllThreads = true; | |
156 | |
157 switch (gcType) { | |
158 case ThreadState::GCWithSweep: | |
159 case ThreadState::GCWithoutSweep: | |
160 m_visitor = adoptPtr(new MarkingVisitor<Visitor::GlobalMarking>()); | |
161 break; | |
162 case ThreadState::TakeSnapshot: | |
163 m_visitor = adoptPtr(new MarkingVisitor<Visitor::SnapshotMarking>())
; | |
164 break; | |
165 case ThreadState::ThreadTerminationGC: | |
166 m_visitor = adoptPtr(new MarkingVisitor<Visitor::ThreadLocalMarking>
()); | |
167 break; | |
168 default: | |
169 ASSERT_NOT_REACHED(); | |
170 } | |
171 | |
172 if (m_state->isMainThread()) | |
173 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | |
174 } | |
175 | |
176 bool allThreadsParked() const { return m_parkedAllThreads; } | |
177 Visitor* visitor() const { return m_visitor.get(); } | |
178 | |
179 ~GCScope() | |
180 { | |
181 // Only cleanup if we parked all threads in which case the GC happened | |
182 // and we need to resume the other threads. | |
183 if (LIKELY(m_gcType != ThreadState::ThreadTerminationGC && m_parkedAllTh
reads)) | |
184 ThreadState::resumeThreads(); | |
185 } | |
186 | |
187 private: | |
188 ThreadState* m_state; | |
189 // The ordering of the two scope objects matters: GCs must first be forbidde
n | |
190 // before entering the safe point scope. Prior to reaching the safe point, | |
191 // ThreadState::runScheduledGC() is called. See its comment why we need | |
192 // to be in a GC forbidden scope when doing so. | |
193 GCForbiddenScope m_gcForbiddenScope; | |
194 SafePointScope m_safePointScope; | |
195 ThreadState::GCType m_gcType; | |
196 OwnPtr<Visitor> m_visitor; | |
197 bool m_parkedAllThreads; // False if we fail to park all threads | |
198 }; | |
199 | |
200 #if ENABLE(ASSERT) | 109 #if ENABLE(ASSERT) |
201 NO_SANITIZE_ADDRESS | 110 NO_SANITIZE_ADDRESS |
202 void HeapObjectHeader::zapMagic() | 111 void HeapObjectHeader::zapMagic() |
203 { | 112 { |
204 ASSERT(checkHeader()); | 113 ASSERT(checkHeader()); |
205 m_magic = zappedMagic; | 114 m_magic = zappedMagic; |
206 } | 115 } |
207 #endif | 116 #endif |
208 | 117 |
209 void HeapObjectHeader::finalize(Address object, size_t objectSize) | 118 void HeapObjectHeader::finalize(Address object, size_t objectSize) |
(...skipping 1698 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1908 ASSERT(ThreadState::current()->isInGC()); | 1817 ASSERT(ThreadState::current()->isInGC()); |
1909 | 1818 |
1910 m_hasEntries = true; | 1819 m_hasEntries = true; |
1911 size_t index = hash(address); | 1820 size_t index = hash(address); |
1912 ASSERT(!(index & 1)); | 1821 ASSERT(!(index & 1)); |
1913 Address cachePage = roundToBlinkPageStart(address); | 1822 Address cachePage = roundToBlinkPageStart(address); |
1914 m_entries[index + 1] = m_entries[index]; | 1823 m_entries[index + 1] = m_entries[index]; |
1915 m_entries[index] = cachePage; | 1824 m_entries[index] = cachePage; |
1916 } | 1825 } |
1917 | 1826 |
1918 void Heap::flushHeapDoesNotContainCache() | |
1919 { | |
1920 s_heapDoesNotContainCache->flush(); | |
1921 } | |
1922 | |
1923 void Heap::init() | |
1924 { | |
1925 ThreadState::init(); | |
1926 s_markingStack = new CallbackStack(); | |
1927 s_postMarkingCallbackStack = new CallbackStack(); | |
1928 s_globalWeakCallbackStack = new CallbackStack(); | |
1929 s_ephemeronStack = new CallbackStack(); | |
1930 s_heapDoesNotContainCache = new HeapDoesNotContainCache(); | |
1931 s_freePagePool = new FreePagePool(); | |
1932 s_orphanedPagePool = new OrphanedPagePool(); | |
1933 s_allocatedSpace = 0; | |
1934 s_allocatedObjectSize = 0; | |
1935 s_objectSizeAtLastGC = 0; | |
1936 s_markedObjectSize = 0; | |
1937 s_markedObjectSizeAtLastCompleteSweep = 0; | |
1938 s_persistentCount = 0; | |
1939 s_persistentCountAtLastGC = 0; | |
1940 s_collectedPersistentCount = 0; | |
1941 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
1942 s_estimatedMarkingTimePerByte = 0.0; | |
1943 | |
1944 GCInfoTable::init(); | |
1945 | |
1946 if (Platform::current() && Platform::current()->currentThread()) | |
1947 Platform::current()->registerMemoryDumpProvider(BlinkGCMemoryDumpProvide
r::instance()); | |
1948 } | |
1949 | |
1950 void Heap::shutdown() | |
1951 { | |
1952 if (Platform::current() && Platform::current()->currentThread()) | |
1953 Platform::current()->unregisterMemoryDumpProvider(BlinkGCMemoryDumpProvi
der::instance()); | |
1954 s_shutdownCalled = true; | |
1955 ThreadState::shutdownHeapIfNecessary(); | |
1956 } | |
1957 | |
1958 void Heap::doShutdown() | |
1959 { | |
1960 // We don't want to call doShutdown() twice. | |
1961 if (!s_markingStack) | |
1962 return; | |
1963 | |
1964 ASSERT(!ThreadState::attachedThreads().size()); | |
1965 delete s_heapDoesNotContainCache; | |
1966 s_heapDoesNotContainCache = nullptr; | |
1967 delete s_freePagePool; | |
1968 s_freePagePool = nullptr; | |
1969 delete s_orphanedPagePool; | |
1970 s_orphanedPagePool = nullptr; | |
1971 delete s_globalWeakCallbackStack; | |
1972 s_globalWeakCallbackStack = nullptr; | |
1973 delete s_postMarkingCallbackStack; | |
1974 s_postMarkingCallbackStack = nullptr; | |
1975 delete s_markingStack; | |
1976 s_markingStack = nullptr; | |
1977 delete s_ephemeronStack; | |
1978 s_ephemeronStack = nullptr; | |
1979 delete s_regionTree; | |
1980 s_regionTree = nullptr; | |
1981 GCInfoTable::shutdown(); | |
1982 ThreadState::shutdown(); | |
1983 ASSERT(Heap::allocatedSpace() == 0); | |
1984 } | |
1985 | |
1986 #if ENABLE(ASSERT) | |
1987 BasePage* Heap::findPageFromAddress(Address address) | |
1988 { | |
1989 MutexLocker lock(ThreadState::threadAttachMutex()); | |
1990 for (ThreadState* state : ThreadState::attachedThreads()) { | |
1991 if (BasePage* page = state->findPageFromAddress(address)) | |
1992 return page; | |
1993 } | |
1994 return nullptr; | |
1995 } | |
1996 #endif | |
1997 | |
1998 Address Heap::checkAndMarkPointer(Visitor* visitor, Address address) | |
1999 { | |
2000 ASSERT(ThreadState::current()->isInGC()); | |
2001 | |
2002 #if !ENABLE(ASSERT) | |
2003 if (s_heapDoesNotContainCache->lookup(address)) | |
2004 return nullptr; | |
2005 #endif | |
2006 | |
2007 if (BasePage* page = lookup(address)) { | |
2008 ASSERT(page->contains(address)); | |
2009 ASSERT(!page->orphaned()); | |
2010 ASSERT(!s_heapDoesNotContainCache->lookup(address)); | |
2011 page->checkAndMarkPointer(visitor, address); | |
2012 return address; | |
2013 } | |
2014 | |
2015 #if !ENABLE(ASSERT) | |
2016 s_heapDoesNotContainCache->addEntry(address); | |
2017 #else | |
2018 if (!s_heapDoesNotContainCache->lookup(address)) | |
2019 s_heapDoesNotContainCache->addEntry(address); | |
2020 #endif | |
2021 return nullptr; | |
2022 } | |
2023 | |
2024 #if ENABLE(GC_PROFILING) | |
2025 const GCInfo* Heap::findGCInfo(Address address) | |
2026 { | |
2027 return ThreadState::findGCInfoFromAllThreads(address); | |
2028 } | |
2029 #endif | |
2030 | |
2031 #if ENABLE(GC_PROFILING) | |
2032 String Heap::createBacktraceString() | |
2033 { | |
2034 int framesToShow = 3; | |
2035 int stackFrameSize = 16; | |
2036 ASSERT(stackFrameSize >= framesToShow); | |
2037 using FramePointer = void*; | |
2038 FramePointer* stackFrame = static_cast<FramePointer*>(alloca(sizeof(FramePoi
nter) * stackFrameSize)); | |
2039 WTFGetBacktrace(stackFrame, &stackFrameSize); | |
2040 | |
2041 StringBuilder builder; | |
2042 builder.append("Persistent"); | |
2043 bool didAppendFirstName = false; | |
2044 // Skip frames before/including "blink::Persistent". | |
2045 bool didSeePersistent = false; | |
2046 for (int i = 0; i < stackFrameSize && framesToShow > 0; ++i) { | |
2047 FrameToNameScope frameToName(stackFrame[i]); | |
2048 if (!frameToName.nullableName()) | |
2049 continue; | |
2050 if (strstr(frameToName.nullableName(), "blink::Persistent")) { | |
2051 didSeePersistent = true; | |
2052 continue; | |
2053 } | |
2054 if (!didSeePersistent) | |
2055 continue; | |
2056 if (!didAppendFirstName) { | |
2057 didAppendFirstName = true; | |
2058 builder.append(" ... Backtrace:"); | |
2059 } | |
2060 builder.append("\n\t"); | |
2061 builder.append(frameToName.nullableName()); | |
2062 --framesToShow; | |
2063 } | |
2064 return builder.toString().replace("blink::", ""); | |
2065 } | |
2066 #endif | |
2067 | |
2068 void Heap::pushTraceCallback(void* object, TraceCallback callback) | |
2069 { | |
2070 ASSERT(ThreadState::current()->isInGC()); | |
2071 | |
2072 // Trace should never reach an orphaned page. | |
2073 ASSERT(!Heap::orphanedPagePool()->contains(object)); | |
2074 CallbackStack::Item* slot = s_markingStack->allocateEntry(); | |
2075 *slot = CallbackStack::Item(object, callback); | |
2076 } | |
2077 | |
2078 bool Heap::popAndInvokeTraceCallback(Visitor* visitor) | |
2079 { | |
2080 CallbackStack::Item* item = s_markingStack->pop(); | |
2081 if (!item) | |
2082 return false; | |
2083 item->call(visitor); | |
2084 return true; | |
2085 } | |
2086 | |
2087 void Heap::pushPostMarkingCallback(void* object, TraceCallback callback) | |
2088 { | |
2089 ASSERT(ThreadState::current()->isInGC()); | |
2090 | |
2091 // Trace should never reach an orphaned page. | |
2092 ASSERT(!Heap::orphanedPagePool()->contains(object)); | |
2093 CallbackStack::Item* slot = s_postMarkingCallbackStack->allocateEntry(); | |
2094 *slot = CallbackStack::Item(object, callback); | |
2095 } | |
2096 | |
2097 bool Heap::popAndInvokePostMarkingCallback(Visitor* visitor) | |
2098 { | |
2099 if (CallbackStack::Item* item = s_postMarkingCallbackStack->pop()) { | |
2100 item->call(visitor); | |
2101 return true; | |
2102 } | |
2103 return false; | |
2104 } | |
2105 | |
2106 void Heap::pushGlobalWeakCallback(void** cell, WeakCallback callback) | |
2107 { | |
2108 ASSERT(ThreadState::current()->isInGC()); | |
2109 | |
2110 // Trace should never reach an orphaned page. | |
2111 ASSERT(!Heap::orphanedPagePool()->contains(cell)); | |
2112 CallbackStack::Item* slot = s_globalWeakCallbackStack->allocateEntry(); | |
2113 *slot = CallbackStack::Item(cell, callback); | |
2114 } | |
2115 | |
2116 void Heap::pushThreadLocalWeakCallback(void* closure, void* object, WeakCallback
callback) | |
2117 { | |
2118 ASSERT(ThreadState::current()->isInGC()); | |
2119 | |
2120 // Trace should never reach an orphaned page. | |
2121 ASSERT(!Heap::orphanedPagePool()->contains(object)); | |
2122 ThreadState* state = pageFromObject(object)->heap()->threadState(); | |
2123 state->pushThreadLocalWeakCallback(closure, callback); | |
2124 } | |
2125 | |
2126 bool Heap::popAndInvokeGlobalWeakCallback(Visitor* visitor) | |
2127 { | |
2128 if (CallbackStack::Item* item = s_globalWeakCallbackStack->pop()) { | |
2129 item->call(visitor); | |
2130 return true; | |
2131 } | |
2132 return false; | |
2133 } | |
2134 | |
2135 void Heap::registerWeakTable(void* table, EphemeronCallback iterationCallback, E
phemeronCallback iterationDoneCallback) | |
2136 { | |
2137 ASSERT(ThreadState::current()->isInGC()); | |
2138 | |
2139 // Trace should never reach an orphaned page. | |
2140 ASSERT(!Heap::orphanedPagePool()->contains(table)); | |
2141 CallbackStack::Item* slot = s_ephemeronStack->allocateEntry(); | |
2142 *slot = CallbackStack::Item(table, iterationCallback); | |
2143 | |
2144 // Register a post-marking callback to tell the tables that | |
2145 // ephemeron iteration is complete. | |
2146 pushPostMarkingCallback(table, iterationDoneCallback); | |
2147 } | |
2148 | |
2149 #if ENABLE(ASSERT) | |
2150 bool Heap::weakTableRegistered(const void* table) | |
2151 { | |
2152 ASSERT(s_ephemeronStack); | |
2153 return s_ephemeronStack->hasCallbackForObject(table); | |
2154 } | |
2155 #endif | |
2156 | |
2157 void Heap::preGC() | |
2158 { | |
2159 ASSERT(!ThreadState::current()->isInGC()); | |
2160 for (ThreadState* state : ThreadState::attachedThreads()) | |
2161 state->preGC(); | |
2162 } | |
2163 | |
2164 void Heap::postGC(ThreadState::GCType gcType) | |
2165 { | |
2166 ASSERT(ThreadState::current()->isInGC()); | |
2167 for (ThreadState* state : ThreadState::attachedThreads()) | |
2168 state->postGC(gcType); | |
2169 } | |
2170 | |
2171 const char* Heap::gcReasonString(GCReason reason) | |
2172 { | |
2173 switch (reason) { | |
2174 #define STRINGIFY_REASON(reason) case reason: return #reason; | |
2175 STRINGIFY_REASON(IdleGC); | |
2176 STRINGIFY_REASON(PreciseGC); | |
2177 STRINGIFY_REASON(ConservativeGC); | |
2178 STRINGIFY_REASON(ForcedGC); | |
2179 #undef STRINGIFY_REASON | |
2180 case NumberOfGCReason: ASSERT_NOT_REACHED(); | |
2181 } | |
2182 return "<Unknown>"; | |
2183 } | |
2184 | |
2185 void Heap::collectGarbage(ThreadState::StackState stackState, ThreadState::GCTyp
e gcType, GCReason reason) | |
2186 { | |
2187 ThreadState* state = ThreadState::current(); | |
2188 // Nested collectGarbage() invocations aren't supported. | |
2189 RELEASE_ASSERT(!state->isGCForbidden()); | |
2190 state->completeSweep(); | |
2191 | |
2192 GCScope gcScope(state, stackState, gcType); | |
2193 // Check if we successfully parked the other threads. If not we bail out of | |
2194 // the GC. | |
2195 if (!gcScope.allThreadsParked()) | |
2196 return; | |
2197 | |
2198 if (state->isMainThread()) | |
2199 ScriptForbiddenScope::enter(); | |
2200 | |
2201 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", | |
2202 "lazySweeping", gcType == ThreadState::GCWithoutSweep, | |
2203 "gcReason", gcReasonString(reason)); | |
2204 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | |
2205 double timeStamp = WTF::currentTimeMS(); | |
2206 | |
2207 if (gcType == ThreadState::TakeSnapshot) | |
2208 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | |
2209 | |
2210 // Disallow allocation during garbage collection (but not during the | |
2211 // finalization that happens when the gcScope is torn down). | |
2212 ThreadState::NoAllocationScope noAllocationScope(state); | |
2213 | |
2214 preGC(); | |
2215 | |
2216 StackFrameDepthScope stackDepthScope; | |
2217 | |
2218 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz
e(); | |
2219 if (gcType != ThreadState::TakeSnapshot) | |
2220 Heap::resetHeapCounters(); | |
2221 | |
2222 // 1. Trace persistent roots. | |
2223 ThreadState::visitPersistentRoots(gcScope.visitor()); | |
2224 | |
2225 // 2. Trace objects reachable from the stack. We do this independent of the | |
2226 // given stackState since other threads might have a different stack state. | |
2227 ThreadState::visitStackRoots(gcScope.visitor()); | |
2228 | |
2229 // 3. Transitive closure to trace objects including ephemerons. | |
2230 processMarkingStack(gcScope.visitor()); | |
2231 | |
2232 postMarkingProcessing(gcScope.visitor()); | |
2233 globalWeakProcessing(gcScope.visitor()); | |
2234 | |
2235 // Now we can delete all orphaned pages because there are no dangling | |
2236 // pointers to the orphaned pages. (If we have such dangling pointers, | |
2237 // we should have crashed during marking before getting here.) | |
2238 orphanedPagePool()->decommitOrphanedPages(); | |
2239 | |
2240 double markingTimeInMilliseconds = WTF::currentTimeMS() - timeStamp; | |
2241 s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds
/ 1000 / totalObjectSize) : 0; | |
2242 | |
2243 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking
TimeInMilliseconds, 0, 10 * 1000, 50); | |
2244 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap:
:allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); | |
2245 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", He
ap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); | |
2246 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, Number
OfGCReason); | |
2247 Heap::reportMemoryUsageHistogram(); | |
2248 WTF::Partitions::reportMemoryUsageHistogram(); | |
2249 | |
2250 postGC(gcType); | |
2251 | |
2252 if (state->isMainThread()) | |
2253 ScriptForbiddenScope::exit(); | |
2254 } | |
2255 | |
2256 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | |
2257 { | |
2258 { | |
2259 // A thread-specific termination GC must not allow other global GCs to g
o | |
2260 // ahead while it is running, hence the termination GC does not enter a | |
2261 // safepoint. GCScope will not enter also a safepoint scope for | |
2262 // ThreadTerminationGC. | |
2263 GCScope gcScope(state, ThreadState::NoHeapPointersOnStack, ThreadState::
ThreadTerminationGC); | |
2264 | |
2265 ThreadState::NoAllocationScope noAllocationScope(state); | |
2266 | |
2267 state->preGC(); | |
2268 StackFrameDepthScope stackDepthScope; | |
2269 | |
2270 // 1. Trace the thread local persistent roots. For thread local GCs we | |
2271 // don't trace the stack (ie. no conservative scanning) since this is | |
2272 // only called during thread shutdown where there should be no objects | |
2273 // on the stack. | |
2274 // We also assume that orphaned pages have no objects reachable from | |
2275 // persistent handles on other threads or CrossThreadPersistents. The | |
2276 // only cases where this could happen is if a subsequent conservative | |
2277 // global GC finds a "pointer" on the stack or due to a programming | |
2278 // error where an object has a dangling cross-thread pointer to an | |
2279 // object on this heap. | |
2280 state->visitPersistents(gcScope.visitor()); | |
2281 | |
2282 // 2. Trace objects reachable from the thread's persistent roots | |
2283 // including ephemerons. | |
2284 processMarkingStack(gcScope.visitor()); | |
2285 | |
2286 postMarkingProcessing(gcScope.visitor()); | |
2287 globalWeakProcessing(gcScope.visitor()); | |
2288 | |
2289 state->postGC(ThreadState::GCWithSweep); | |
2290 } | |
2291 state->preSweep(); | |
2292 } | |
2293 | |
2294 void Heap::processMarkingStack(Visitor* visitor) | |
2295 { | |
2296 // Ephemeron fixed point loop. | |
2297 do { | |
2298 { | |
2299 // Iteratively mark all objects that are reachable from the objects | |
2300 // currently pushed onto the marking stack. | |
2301 TRACE_EVENT0("blink_gc", "Heap::processMarkingStackSingleThreaded"); | |
2302 while (popAndInvokeTraceCallback(visitor)) { } | |
2303 } | |
2304 | |
2305 { | |
2306 // Mark any strong pointers that have now become reachable in | |
2307 // ephemeron maps. | |
2308 TRACE_EVENT0("blink_gc", "Heap::processEphemeronStack"); | |
2309 s_ephemeronStack->invokeEphemeronCallbacks(visitor); | |
2310 } | |
2311 | |
2312 // Rerun loop if ephemeron processing queued more objects for tracing. | |
2313 } while (!s_markingStack->isEmpty()); | |
2314 } | |
2315 | |
2316 void Heap::postMarkingProcessing(Visitor* visitor) | |
2317 { | |
2318 TRACE_EVENT0("blink_gc", "Heap::postMarkingProcessing"); | |
2319 // Call post-marking callbacks including: | |
2320 // 1. the ephemeronIterationDone callbacks on weak tables to do cleanup | |
2321 // (specifically to clear the queued bits for weak hash tables), and | |
2322 // 2. the markNoTracing callbacks on collection backings to mark them | |
2323 // if they are only reachable from their front objects. | |
2324 while (popAndInvokePostMarkingCallback(visitor)) { } | |
2325 | |
2326 s_ephemeronStack->clear(); | |
2327 | |
2328 // Post-marking callbacks should not trace any objects and | |
2329 // therefore the marking stack should be empty after the | |
2330 // post-marking callbacks. | |
2331 ASSERT(s_markingStack->isEmpty()); | |
2332 } | |
2333 | |
2334 void Heap::globalWeakProcessing(Visitor* visitor) | |
2335 { | |
2336 TRACE_EVENT0("blink_gc", "Heap::globalWeakProcessing"); | |
2337 // Call weak callbacks on objects that may now be pointing to dead objects. | |
2338 while (popAndInvokeGlobalWeakCallback(visitor)) { } | |
2339 | |
2340 // It is not permitted to trace pointers of live objects in the weak | |
2341 // callback phase, so the marking stack should still be empty here. | |
2342 ASSERT(s_markingStack->isEmpty()); | |
2343 } | |
2344 | |
2345 void Heap::collectAllGarbage() | |
2346 { | |
2347 // FIXME: Oilpan: we should perform a single GC and everything | |
2348 // should die. Unfortunately it is not the case for all objects | |
2349 // because the hierarchy was not completely moved to the heap and | |
2350 // some heap allocated objects own objects that contain persistents | |
2351 // pointing to other heap allocated objects. | |
2352 size_t previousLiveObjects = 0; | |
2353 for (int i = 0; i < 5; ++i) { | |
2354 collectGarbage(ThreadState::NoHeapPointersOnStack, ThreadState::GCWithSw
eep, ForcedGC); | |
2355 size_t liveObjects = Heap::markedObjectSize(); | |
2356 if (liveObjects == previousLiveObjects) | |
2357 break; | |
2358 previousLiveObjects = liveObjects; | |
2359 } | |
2360 } | |
2361 | |
2362 double Heap::estimatedMarkingTime() | |
2363 { | |
2364 ASSERT(ThreadState::current()->isMainThread()); | |
2365 | |
2366 // Use 8 ms as initial estimated marking time. | |
2367 // 8 ms is long enough for low-end mobile devices to mark common | |
2368 // real-world object graphs. | |
2369 if (s_estimatedMarkingTimePerByte == 0) | |
2370 return 0.008; | |
2371 | |
2372 // Assuming that the collection rate of this GC will be mostly equal to | |
2373 // the collection rate of the last GC, estimate the marking time of this GC. | |
2374 return s_estimatedMarkingTimePerByte * (Heap::allocatedObjectSize() + Heap::
markedObjectSize()); | |
2375 } | |
2376 | |
2377 void Heap::reportMemoryUsageHistogram() | |
2378 { | |
2379 static size_t supportedMaxSizeInMB = 4 * 1024; | |
2380 static size_t observedMaxSizeInMB = 0; | |
2381 | |
2382 // We only report the memory in the main thread. | |
2383 if (!isMainThread()) | |
2384 return; | |
2385 // +1 is for rounding up the sizeInMB. | |
2386 size_t sizeInMB = Heap::allocatedSpace() / 1024 / 1024 + 1; | |
2387 if (sizeInMB >= supportedMaxSizeInMB) | |
2388 sizeInMB = supportedMaxSizeInMB - 1; | |
2389 if (sizeInMB > observedMaxSizeInMB) { | |
2390 // Send a UseCounter only when we see the highest memory usage | |
2391 // we've ever seen. | |
2392 Platform::current()->histogramEnumeration("BlinkGC.CommittedSize", sizeI
nMB, supportedMaxSizeInMB); | |
2393 observedMaxSizeInMB = sizeInMB; | |
2394 } | |
2395 } | |
2396 | |
2397 void Heap::reportMemoryUsageForTracing() | |
2398 { | |
2399 bool gcTracingEnabled; | |
2400 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | |
2401 if (!gcTracingEnabled) | |
2402 return; | |
2403 | |
2404 // These values are divided by 1024 to avoid overflow in practical cases (TR
ACE_COUNTER values are 32-bit ints). | |
2405 // They are capped to INT_MAX just in case. | |
2406 TRACE_COUNTER1("blink_gc", "Heap::allocatedObjectSizeKB", std::min(Heap::all
ocatedObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | |
2407 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeKB", std::min(Heap::marked
ObjectSize() / 1024, static_cast<size_t>(INT_MAX))); | |
2408 TRACE_COUNTER1("blink_gc", "Heap::markedObjectSizeAtLastCompleteSweepKB", st
d::min(Heap::markedObjectSizeAtLastCompleteSweep() / 1024, static_cast<size_t>(I
NT_MAX))); | |
2409 TRACE_COUNTER1("blink_gc", "Heap::allocatedSpaceKB", std::min(Heap::allocate
dSpace() / 1024, static_cast<size_t>(INT_MAX))); | |
2410 TRACE_COUNTER1("blink_gc", "Heap::objectSizeAtLastGCKB", std::min(Heap::obje
ctSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); | |
2411 TRACE_COUNTER1("blink_gc", "Heap::persistentCount", std::min(Heap::persisten
tCount(), static_cast<size_t>(INT_MAX))); | |
2412 TRACE_COUNTER1("blink_gc", "Heap::persistentCountAtLastGC", std::min(Heap::p
ersistentCountAtLastGC(), static_cast<size_t>(INT_MAX))); | |
2413 TRACE_COUNTER1("blink_gc", "Heap::collectedPersistentCount", std::min(Heap::
collectedPersistentCount(), static_cast<size_t>(INT_MAX))); | |
2414 TRACE_COUNTER1("blink_gc", "Heap::partitionAllocSizeAtLastGCKB", std::min(He
ap::partitionAllocSizeAtLastGC() / 1024, static_cast<size_t>(INT_MAX))); | |
2415 TRACE_COUNTER1("blink_gc", "Partitions::totalSizeOfCommittedPagesKB", std::m
in(WTF::Partitions::totalSizeOfCommittedPages() / 1024, static_cast<size_t>(INT_
MAX))); | |
2416 } | |
2417 | |
2418 size_t Heap::objectPayloadSizeForTesting() | |
2419 { | |
2420 size_t objectPayloadSize = 0; | |
2421 for (ThreadState* state : ThreadState::attachedThreads()) { | |
2422 state->setGCState(ThreadState::GCRunning); | |
2423 state->makeConsistentForGC(); | |
2424 objectPayloadSize += state->objectPayloadSizeForTesting(); | |
2425 state->setGCState(ThreadState::EagerSweepScheduled); | |
2426 state->setGCState(ThreadState::Sweeping); | |
2427 state->setGCState(ThreadState::NoGCScheduled); | |
2428 } | |
2429 return objectPayloadSize; | |
2430 } | |
2431 | |
2432 BasePage* Heap::lookup(Address address) | |
2433 { | |
2434 ASSERT(ThreadState::current()->isInGC()); | |
2435 if (!s_regionTree) | |
2436 return nullptr; | |
2437 if (PageMemoryRegion* region = s_regionTree->lookup(address)) { | |
2438 BasePage* page = region->pageFromAddress(address); | |
2439 return page && !page->orphaned() ? page : nullptr; | |
2440 } | |
2441 return nullptr; | |
2442 } | |
2443 | |
2444 static Mutex& regionTreeMutex() | |
2445 { | |
2446 AtomicallyInitializedStaticReference(Mutex, mutex, new Mutex); | |
2447 return mutex; | |
2448 } | |
2449 | |
2450 void Heap::removePageMemoryRegion(PageMemoryRegion* region) | |
2451 { | |
2452 // Deletion of large objects (and thus their regions) can happen | |
2453 // concurrently on sweeper threads. Removal can also happen during thread | |
2454 // shutdown, but that case is safe. Regardless, we make all removals | |
2455 // mutually exclusive. | |
2456 MutexLocker locker(regionTreeMutex()); | |
2457 RegionTree::remove(region, &s_regionTree); | |
2458 } | |
2459 | |
2460 void Heap::addPageMemoryRegion(PageMemoryRegion* region) | |
2461 { | |
2462 MutexLocker locker(regionTreeMutex()); | |
2463 RegionTree::add(new RegionTree(region), &s_regionTree); | |
2464 } | |
2465 | |
2466 PageMemoryRegion* Heap::RegionTree::lookup(Address address) | |
2467 { | |
2468 RegionTree* current = s_regionTree; | |
2469 while (current) { | |
2470 Address base = current->m_region->base(); | |
2471 if (address < base) { | |
2472 current = current->m_left; | |
2473 continue; | |
2474 } | |
2475 if (address >= base + current->m_region->size()) { | |
2476 current = current->m_right; | |
2477 continue; | |
2478 } | |
2479 ASSERT(current->m_region->contains(address)); | |
2480 return current->m_region; | |
2481 } | |
2482 return nullptr; | |
2483 } | |
2484 | |
2485 void Heap::RegionTree::add(RegionTree* newTree, RegionTree** context) | |
2486 { | |
2487 ASSERT(newTree); | |
2488 Address base = newTree->m_region->base(); | |
2489 for (RegionTree* current = *context; current; current = *context) { | |
2490 ASSERT(!current->m_region->contains(base)); | |
2491 context = (base < current->m_region->base()) ? ¤t->m_left : &curre
nt->m_right; | |
2492 } | |
2493 *context = newTree; | |
2494 } | |
2495 | |
2496 void Heap::RegionTree::remove(PageMemoryRegion* region, RegionTree** context) | |
2497 { | |
2498 ASSERT(region); | |
2499 ASSERT(context); | |
2500 Address base = region->base(); | |
2501 RegionTree* current = *context; | |
2502 for (; current; current = *context) { | |
2503 if (region == current->m_region) | |
2504 break; | |
2505 context = (base < current->m_region->base()) ? ¤t->m_left : &curre
nt->m_right; | |
2506 } | |
2507 | |
2508 // Shutdown via detachMainThread might not have populated the region tree. | |
2509 if (!current) | |
2510 return; | |
2511 | |
2512 *context = nullptr; | |
2513 if (current->m_left) { | |
2514 add(current->m_left, context); | |
2515 current->m_left = nullptr; | |
2516 } | |
2517 if (current->m_right) { | |
2518 add(current->m_right, context); | |
2519 current->m_right = nullptr; | |
2520 } | |
2521 delete current; | |
2522 } | |
2523 | |
2524 void Heap::resetHeapCounters() | |
2525 { | |
2526 ASSERT(ThreadState::current()->isInGC()); | |
2527 | |
2528 Heap::reportMemoryUsageForTracing(); | |
2529 | |
2530 s_objectSizeAtLastGC = s_allocatedObjectSize + s_markedObjectSize; | |
2531 s_partitionAllocSizeAtLastGC = WTF::Partitions::totalSizeOfCommittedPages(); | |
2532 s_allocatedObjectSize = 0; | |
2533 s_markedObjectSize = 0; | |
2534 s_persistentCountAtLastGC = s_persistentCount; | |
2535 s_collectedPersistentCount = 0; | |
2536 } | |
2537 | |
2538 CallbackStack* Heap::s_markingStack; | |
2539 CallbackStack* Heap::s_postMarkingCallbackStack; | |
2540 CallbackStack* Heap::s_globalWeakCallbackStack; | |
2541 CallbackStack* Heap::s_ephemeronStack; | |
2542 HeapDoesNotContainCache* Heap::s_heapDoesNotContainCache; | |
2543 bool Heap::s_shutdownCalled = false; | |
2544 FreePagePool* Heap::s_freePagePool; | |
2545 OrphanedPagePool* Heap::s_orphanedPagePool; | |
2546 Heap::RegionTree* Heap::s_regionTree = nullptr; | |
2547 size_t Heap::s_allocatedSpace = 0; | |
2548 size_t Heap::s_allocatedObjectSize = 0; | |
2549 size_t Heap::s_objectSizeAtLastGC = 0; | |
2550 size_t Heap::s_markedObjectSize = 0; | |
2551 size_t Heap::s_markedObjectSizeAtLastCompleteSweep = 0; | |
2552 size_t Heap::s_persistentCount = 0; | |
2553 size_t Heap::s_persistentCountAtLastGC = 0; | |
2554 size_t Heap::s_collectedPersistentCount = 0; | |
2555 size_t Heap::s_partitionAllocSizeAtLastGC = 0; | |
2556 double Heap::s_estimatedMarkingTimePerByte = 0.0; | |
2557 | |
2558 } // namespace blink | 1827 } // namespace blink |
OLD | NEW |