| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 144 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | 144 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); |
| 145 | 145 |
| 146 m_state->checkThread(); | 146 m_state->checkThread(); |
| 147 | 147 |
| 148 // TODO(haraken): In an unlikely coincidence that two threads decide | 148 // TODO(haraken): In an unlikely coincidence that two threads decide |
| 149 // to collect garbage at the same time, avoid doing two GCs in | 149 // to collect garbage at the same time, avoid doing two GCs in |
| 150 // a row. | 150 // a row. |
| 151 if (LIKELY(gcType != ThreadState::ThreadTerminationGC && ThreadState::st
opThreads())) | 151 if (LIKELY(gcType != ThreadState::ThreadTerminationGC && ThreadState::st
opThreads())) |
| 152 m_parkedAllThreads = true; | 152 m_parkedAllThreads = true; |
| 153 | 153 |
| 154 switch (gcType) { |
| 155 case ThreadState::GCWithSweep: |
| 156 case ThreadState::GCWithoutSweep: |
| 157 m_visitor = adoptPtr(new MarkingVisitor<Visitor::GlobalMarking>()); |
| 158 break; |
| 159 case ThreadState::TakeSnapshot: |
| 160 m_visitor = adoptPtr(new MarkingVisitor<Visitor::SnapshotMarking>())
; |
| 161 break; |
| 162 case ThreadState::ThreadTerminationGC: |
| 163 m_visitor = adoptPtr(new MarkingVisitor<Visitor::ThreadLocalMarking>
()); |
| 164 break; |
| 165 default: |
| 166 ASSERT_NOT_REACHED(); |
| 167 } |
| 168 |
| 154 if (m_state->isMainThread()) | 169 if (m_state->isMainThread()) |
| 155 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | 170 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); |
| 156 } | 171 } |
| 157 | 172 |
| 158 bool allThreadsParked() { return m_parkedAllThreads; } | 173 bool allThreadsParked() const { return m_parkedAllThreads; } |
| 174 Visitor* visitor() const { return m_visitor.get(); } |
| 159 | 175 |
| 160 ~GCScope() | 176 ~GCScope() |
| 161 { | 177 { |
| 162 // Only cleanup if we parked all threads in which case the GC happened | 178 // Only cleanup if we parked all threads in which case the GC happened |
| 163 // and we need to resume the other threads. | 179 // and we need to resume the other threads. |
| 164 if (LIKELY(m_gcType != ThreadState::ThreadTerminationGC && m_parkedAllTh
reads)) | 180 if (LIKELY(m_gcType != ThreadState::ThreadTerminationGC && m_parkedAllTh
reads)) |
| 165 ThreadState::resumeThreads(); | 181 ThreadState::resumeThreads(); |
| 166 } | 182 } |
| 167 | 183 |
| 168 private: | 184 private: |
| 169 ThreadState* m_state; | 185 ThreadState* m_state; |
| 170 // The ordering of the two scope objects matters: GCs must first be forbidde
n | 186 // The ordering of the two scope objects matters: GCs must first be forbidde
n |
| 171 // before entering the safe point scope. Prior to reaching the safe point, | 187 // before entering the safe point scope. Prior to reaching the safe point, |
| 172 // ThreadState::runScheduledGC() is called. See its comment why we need | 188 // ThreadState::runScheduledGC() is called. See its comment why we need |
| 173 // to be in a GC forbidden scope when doing so. | 189 // to be in a GC forbidden scope when doing so. |
| 174 GCForbiddenScope m_gcForbiddenScope; | 190 GCForbiddenScope m_gcForbiddenScope; |
| 175 SafePointScope m_safePointScope; | 191 SafePointScope m_safePointScope; |
| 176 ThreadState::GCType m_gcType; | 192 ThreadState::GCType m_gcType; |
| 193 OwnPtr<Visitor> m_visitor; |
| 177 bool m_parkedAllThreads; // False if we fail to park all threads | 194 bool m_parkedAllThreads; // False if we fail to park all threads |
| 178 }; | 195 }; |
| 179 | 196 |
| 180 #if ENABLE(ASSERT) | 197 #if ENABLE(ASSERT) |
| 181 NO_SANITIZE_ADDRESS | 198 NO_SANITIZE_ADDRESS |
| 182 void HeapObjectHeader::zapMagic() | 199 void HeapObjectHeader::zapMagic() |
| 183 { | 200 { |
| 184 checkHeader(); | 201 checkHeader(); |
| 185 m_magic = zappedMagic; | 202 m_magic = zappedMagic; |
| 186 } | 203 } |
| (...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 319 } | 336 } |
| 320 if (previousPage) { | 337 if (previousPage) { |
| 321 ASSERT(m_firstUnsweptPage); | 338 ASSERT(m_firstUnsweptPage); |
| 322 previousPage->m_next = m_firstPage; | 339 previousPage->m_next = m_firstPage; |
| 323 m_firstPage = m_firstUnsweptPage; | 340 m_firstPage = m_firstUnsweptPage; |
| 324 m_firstUnsweptPage = nullptr; | 341 m_firstUnsweptPage = nullptr; |
| 325 } | 342 } |
| 326 ASSERT(!m_firstUnsweptPage); | 343 ASSERT(!m_firstUnsweptPage); |
| 327 } | 344 } |
| 328 | 345 |
| 346 void BaseHeap::makeConsistentForMutator() |
| 347 { |
| 348 clearFreeLists(); |
| 349 ASSERT(isConsistentForGC()); |
| 350 ASSERT(!m_firstPage); |
| 351 |
| 352 // Drop marks from marked objects and rebuild free lists in preparation for |
| 353 // resuming the executions of mutators. |
| 354 BasePage* previousPage = nullptr; |
| 355 for (BasePage* page = m_firstUnsweptPage; page; previousPage = page, page =
page->next()) { |
| 356 page->makeConsistentForMutator(); |
| 357 page->markAsSwept(); |
| 358 } |
| 359 if (previousPage) { |
| 360 ASSERT(m_firstUnsweptPage); |
| 361 previousPage->m_next = m_firstPage; |
| 362 m_firstPage = m_firstUnsweptPage; |
| 363 m_firstUnsweptPage = nullptr; |
| 364 } |
| 365 ASSERT(!m_firstUnsweptPage); |
| 366 } |
| 367 |
| 329 size_t BaseHeap::objectPayloadSizeForTesting() | 368 size_t BaseHeap::objectPayloadSizeForTesting() |
| 330 { | 369 { |
| 331 ASSERT(isConsistentForGC()); | 370 ASSERT(isConsistentForGC()); |
| 332 ASSERT(!m_firstUnsweptPage); | 371 ASSERT(!m_firstUnsweptPage); |
| 333 | 372 |
| 334 size_t objectPayloadSize = 0; | 373 size_t objectPayloadSize = 0; |
| 335 for (BasePage* page = m_firstPage; page; page = page->next()) | 374 for (BasePage* page = m_firstPage; page; page = page->next()) |
| 336 objectPayloadSize += page->objectPayloadSizeForTesting(); | 375 objectPayloadSize += page->objectPayloadSizeForTesting(); |
| 337 return objectPayloadSize; | 376 return objectPayloadSize; |
| 338 } | 377 } |
| 339 | 378 |
| 340 void BaseHeap::prepareHeapForTermination() | 379 void BaseHeap::prepareHeapForTermination() |
| 341 { | 380 { |
| 342 ASSERT(!m_firstUnsweptPage); | 381 ASSERT(!m_firstUnsweptPage); |
| 343 for (BasePage* page = m_firstPage; page; page = page->next()) { | 382 for (BasePage* page = m_firstPage; page; page = page->next()) { |
| 344 page->setTerminating(); | 383 page->setTerminating(); |
| 345 } | 384 } |
| 346 } | 385 } |
| 347 | 386 |
| 348 void BaseHeap::prepareForSweep() | 387 void BaseHeap::prepareForSweep() |
| 349 { | 388 { |
| 350 ASSERT(!threadState()->isInGC()); | 389 ASSERT(threadState()->isInGC()); |
| 351 ASSERT(!m_firstUnsweptPage); | 390 ASSERT(!m_firstUnsweptPage); |
| 352 | 391 |
| 353 // Move all pages to a list of unswept pages. | 392 // Move all pages to a list of unswept pages. |
| 354 m_firstUnsweptPage = m_firstPage; | 393 m_firstUnsweptPage = m_firstPage; |
| 355 m_firstPage = nullptr; | 394 m_firstPage = nullptr; |
| 356 } | 395 } |
| 357 | 396 |
| 358 #if defined(ADDRESS_SANITIZER) | 397 #if defined(ADDRESS_SANITIZER) |
| 359 void BaseHeap::poisonHeap(ObjectsToPoison objectsToPoison, Poisoning poisoning) | 398 void BaseHeap::poisonHeap(ObjectsToPoison objectsToPoison, Poisoning poisoning) |
| 360 { | 399 { |
| (...skipping 855 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1216 markedObjectSize += header->size(); | 1255 markedObjectSize += header->size(); |
| 1217 } else { | 1256 } else { |
| 1218 header->markDead(); | 1257 header->markDead(); |
| 1219 } | 1258 } |
| 1220 headerAddress += header->size(); | 1259 headerAddress += header->size(); |
| 1221 } | 1260 } |
| 1222 if (markedObjectSize) | 1261 if (markedObjectSize) |
| 1223 Heap::increaseMarkedObjectSize(markedObjectSize); | 1262 Heap::increaseMarkedObjectSize(markedObjectSize); |
| 1224 } | 1263 } |
| 1225 | 1264 |
| 1265 void NormalPage::makeConsistentForMutator() |
| 1266 { |
| 1267 size_t markedObjectSize = 0; |
| 1268 Address startOfGap = payload(); |
| 1269 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1270 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1271 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1272 // Check if a free list entry first since we cannot call |
| 1273 // isMarked on a free list entry. |
| 1274 if (header->isFree()) { |
| 1275 headerAddress += header->size(); |
| 1276 continue; |
| 1277 } |
| 1278 header->checkHeader(); |
| 1279 |
| 1280 if (startOfGap != headerAddress) |
| 1281 heapForNormalPage()->addToFreeList(startOfGap, headerAddress - start
OfGap); |
| 1282 if (header->isMarked()) { |
| 1283 header->unmark(); |
| 1284 markedObjectSize += header->size(); |
| 1285 } |
| 1286 headerAddress += header->size(); |
| 1287 startOfGap = headerAddress; |
| 1288 } |
| 1289 if (startOfGap != payloadEnd()) |
| 1290 heapForNormalPage()->addToFreeList(startOfGap, payloadEnd() - startOfGap
); |
| 1291 |
| 1292 if (markedObjectSize) |
| 1293 Heap::increaseMarkedObjectSize(markedObjectSize); |
| 1294 } |
| 1295 |
| 1226 #if defined(ADDRESS_SANITIZER) | 1296 #if defined(ADDRESS_SANITIZER) |
| 1227 void NormalPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning poison
ing) | 1297 void NormalPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning poison
ing) |
| 1228 { | 1298 { |
| 1229 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { | 1299 for (Address headerAddress = payload(); headerAddress < payloadEnd();) { |
| 1230 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); | 1300 HeapObjectHeader* header = reinterpret_cast<HeapObjectHeader*>(headerAdd
ress); |
| 1231 ASSERT(header->size() < blinkPagePayloadSize()); | 1301 ASSERT(header->size() < blinkPagePayloadSize()); |
| 1232 // Check if a free list entry first since we cannot call | 1302 // Check if a free list entry first since we cannot call |
| 1233 // isMarked on a free list entry. | 1303 // isMarked on a free list entry. |
| 1234 if (header->isFree()) { | 1304 if (header->isFree()) { |
| 1235 headerAddress += header->size(); | 1305 headerAddress += header->size(); |
| (...skipping 272 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1508 { | 1578 { |
| 1509 HeapObjectHeader* header = heapObjectHeader(); | 1579 HeapObjectHeader* header = heapObjectHeader(); |
| 1510 if (header->isMarked()) { | 1580 if (header->isMarked()) { |
| 1511 header->unmark(); | 1581 header->unmark(); |
| 1512 Heap::increaseMarkedObjectSize(size()); | 1582 Heap::increaseMarkedObjectSize(size()); |
| 1513 } else { | 1583 } else { |
| 1514 header->markDead(); | 1584 header->markDead(); |
| 1515 } | 1585 } |
| 1516 } | 1586 } |
| 1517 | 1587 |
| 1588 void LargeObjectPage::makeConsistentForMutator() |
| 1589 { |
| 1590 HeapObjectHeader* header = heapObjectHeader(); |
| 1591 if (header->isMarked()) { |
| 1592 header->unmark(); |
| 1593 Heap::increaseMarkedObjectSize(size()); |
| 1594 } |
| 1595 } |
| 1596 |
| 1518 #if defined(ADDRESS_SANITIZER) | 1597 #if defined(ADDRESS_SANITIZER) |
| 1519 void LargeObjectPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning p
oisoning) | 1598 void LargeObjectPage::poisonObjects(ObjectsToPoison objectsToPoison, Poisoning p
oisoning) |
| 1520 { | 1599 { |
| 1521 HeapObjectHeader* header = heapObjectHeader(); | 1600 HeapObjectHeader* header = heapObjectHeader(); |
| 1522 if (objectsToPoison == MarkedAndUnmarked || !header->isMarked()) { | 1601 if (objectsToPoison == MarkedAndUnmarked || !header->isMarked()) { |
| 1523 if (poisoning == SetPoison) | 1602 if (poisoning == SetPoison) |
| 1524 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); | 1603 ASAN_POISON_MEMORY_REGION(header->payload(), header->payloadSize()); |
| 1525 else | 1604 else |
| 1526 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize()
); | 1605 ASAN_UNPOISON_MEMORY_REGION(header->payload(), header->payloadSize()
); |
| 1527 } | 1606 } |
| (...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1933 ThreadState* state = ThreadState::current(); | 2012 ThreadState* state = ThreadState::current(); |
| 1934 // Nested collectGarbage() invocations aren't supported. | 2013 // Nested collectGarbage() invocations aren't supported. |
| 1935 RELEASE_ASSERT(!state->isGCForbidden()); | 2014 RELEASE_ASSERT(!state->isGCForbidden()); |
| 1936 state->completeSweep(); | 2015 state->completeSweep(); |
| 1937 | 2016 |
| 1938 GCScope gcScope(state, stackState, gcType); | 2017 GCScope gcScope(state, stackState, gcType); |
| 1939 // Check if we successfully parked the other threads. If not we bail out of | 2018 // Check if we successfully parked the other threads. If not we bail out of |
| 1940 // the GC. | 2019 // the GC. |
| 1941 if (!gcScope.allThreadsParked()) | 2020 if (!gcScope.allThreadsParked()) |
| 1942 return; | 2021 return; |
| 1943 MarkingVisitor<Visitor::GlobalMarking> visitor; | |
| 1944 | 2022 |
| 1945 if (state->isMainThread()) | 2023 if (state->isMainThread()) |
| 1946 ScriptForbiddenScope::enter(); | 2024 ScriptForbiddenScope::enter(); |
| 1947 | 2025 |
| 1948 s_lastGCWasConservative = false; | 2026 s_lastGCWasConservative = false; |
| 1949 | 2027 |
| 1950 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", | 2028 TRACE_EVENT2("blink_gc", "Heap::collectGarbage", |
| 1951 "lazySweeping", gcType == ThreadState::GCWithoutSweep, | 2029 "lazySweeping", gcType == ThreadState::GCWithoutSweep, |
| 1952 "gcReason", gcReasonString(reason)); | 2030 "gcReason", gcReasonString(reason)); |
| 1953 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | 2031 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
| 1954 double timeStamp = WTF::currentTimeMS(); | 2032 double timeStamp = WTF::currentTimeMS(); |
| 1955 #if ENABLE(GC_PROFILING) | 2033 #if ENABLE(GC_PROFILING) |
| 1956 visitor.objectGraph().clear(); | 2034 gcScope.visitor()->objectGraph().clear(); |
| 1957 #endif | 2035 #endif |
| 1958 | 2036 |
| 1959 // Disallow allocation during garbage collection (but not during the | 2037 // Disallow allocation during garbage collection (but not during the |
| 1960 // finalization that happens when the gcScope is torn down). | 2038 // finalization that happens when the gcScope is torn down). |
| 1961 ThreadState::NoAllocationScope noAllocationScope(state); | 2039 ThreadState::NoAllocationScope noAllocationScope(state); |
| 1962 | 2040 |
| 1963 preGC(); | 2041 preGC(); |
| 1964 | 2042 |
| 1965 StackFrameDepthScope stackDepthScope; | 2043 StackFrameDepthScope stackDepthScope; |
| 1966 | 2044 |
| 1967 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz
e(); | 2045 size_t totalObjectSize = Heap::allocatedObjectSize() + Heap::markedObjectSiz
e(); |
| 1968 Heap::resetHeapCounters(); | 2046 Heap::resetHeapCounters(); |
| 1969 | 2047 |
| 1970 // 1. Trace persistent roots. | 2048 // 1. Trace persistent roots. |
| 1971 ThreadState::visitPersistentRoots(&visitor); | 2049 ThreadState::visitPersistentRoots(gcScope.visitor()); |
| 1972 | 2050 |
| 1973 // 2. Trace objects reachable from the persistent roots including | 2051 // 2. Trace objects reachable from the persistent roots including |
| 1974 // ephemerons. | 2052 // ephemerons. |
| 1975 processMarkingStack(&visitor); | 2053 processMarkingStack(gcScope.visitor()); |
| 1976 | 2054 |
| 1977 // 3. Trace objects reachable from the stack. We do this independent of the | 2055 // 3. Trace objects reachable from the stack. We do this independent of the |
| 1978 // given stackState since other threads might have a different stack state. | 2056 // given stackState since other threads might have a different stack state. |
| 1979 ThreadState::visitStackRoots(&visitor); | 2057 ThreadState::visitStackRoots(gcScope.visitor()); |
| 1980 | 2058 |
| 1981 // 4. Trace objects reachable from the stack "roots" including ephemerons. | 2059 // 4. Trace objects reachable from the stack "roots" including ephemerons. |
| 1982 // Only do the processing if we found a pointer to an object on one of the | 2060 // Only do the processing if we found a pointer to an object on one of the |
| 1983 // thread stacks. | 2061 // thread stacks. |
| 1984 if (lastGCWasConservative()) | 2062 if (lastGCWasConservative()) |
| 1985 processMarkingStack(&visitor); | 2063 processMarkingStack(gcScope.visitor()); |
| 1986 | 2064 |
| 1987 postMarkingProcessing(&visitor); | 2065 postMarkingProcessing(gcScope.visitor()); |
| 1988 globalWeakProcessing(&visitor); | 2066 globalWeakProcessing(gcScope.visitor()); |
| 1989 | 2067 |
| 1990 // Now we can delete all orphaned pages because there are no dangling | 2068 // Now we can delete all orphaned pages because there are no dangling |
| 1991 // pointers to the orphaned pages. (If we have such dangling pointers, | 2069 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 1992 // we should have crashed during marking before getting here.) | 2070 // we should have crashed during marking before getting here.) |
| 1993 orphanedPagePool()->decommitOrphanedPages(); | 2071 orphanedPagePool()->decommitOrphanedPages(); |
| 1994 | 2072 |
| 1995 postGC(gcType); | 2073 postGC(gcType); |
| 1996 | 2074 |
| 1997 #if ENABLE(GC_PROFILING) | 2075 #if ENABLE(GC_PROFILING) |
| 1998 visitor.reportStats(); | 2076 gcScope.visitor()->reportStats(); |
| 1999 #endif | 2077 #endif |
| 2000 | 2078 |
| 2001 double markingTimeInMilliseconds = WTF::currentTimeMS() - timeStamp; | 2079 double markingTimeInMilliseconds = WTF::currentTimeMS() - timeStamp; |
| 2002 s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds
/ 1000 / totalObjectSize) : 0; | 2080 s_estimatedMarkingTimePerByte = totalObjectSize ? (markingTimeInMilliseconds
/ 1000 / totalObjectSize) : 0; |
| 2003 | 2081 |
| 2004 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking
TimeInMilliseconds, 0, 10 * 1000, 50); | 2082 Platform::current()->histogramCustomCounts("BlinkGC.CollectGarbage", marking
TimeInMilliseconds, 0, 10 * 1000, 50); |
| 2005 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap:
:allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); | 2083 Platform::current()->histogramCustomCounts("BlinkGC.TotalObjectSpace", Heap:
:allocatedObjectSize() / 1024, 0, 4 * 1024 * 1024, 50); |
| 2006 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", He
ap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); | 2084 Platform::current()->histogramCustomCounts("BlinkGC.TotalAllocatedSpace", He
ap::allocatedSpace() / 1024, 0, 4 * 1024 * 1024, 50); |
| 2007 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, Number
OfGCReason); | 2085 Platform::current()->histogramEnumeration("BlinkGC.GCReason", reason, Number
OfGCReason); |
| 2008 Heap::reportMemoryUsageHistogram(); | 2086 Heap::reportMemoryUsageHistogram(); |
| 2009 WTF::Partitions::reportMemoryUsageHistogram(); | 2087 WTF::Partitions::reportMemoryUsageHistogram(); |
| 2010 | 2088 |
| 2011 if (state->isMainThread()) | 2089 if (state->isMainThread()) |
| 2012 ScriptForbiddenScope::exit(); | 2090 ScriptForbiddenScope::exit(); |
| 2013 } | 2091 } |
| 2014 | 2092 |
| 2015 void Heap::collectGarbageForTerminatingThread(ThreadState* state) | 2093 void Heap::collectGarbageForTerminatingThread(ThreadState* state) |
| 2016 { | 2094 { |
| 2017 { | 2095 { |
| 2018 // A thread-specific termination GC must not allow other global GCs to g
o | 2096 // A thread-specific termination GC must not allow other global GCs to g
o |
| 2019 // ahead while it is running, hence the termination GC does not enter a | 2097 // ahead while it is running, hence the termination GC does not enter a |
| 2020 // safepoint. GCScope will not enter also a safepoint scope for | 2098 // safepoint. GCScope will not enter also a safepoint scope for |
| 2021 // ThreadTerminationGC. | 2099 // ThreadTerminationGC. |
| 2022 GCScope gcScope(state, ThreadState::NoHeapPointersOnStack, ThreadState::
ThreadTerminationGC); | 2100 GCScope gcScope(state, ThreadState::NoHeapPointersOnStack, ThreadState::
ThreadTerminationGC); |
| 2023 | 2101 |
| 2024 MarkingVisitor<Visitor::ThreadLocalMarking> visitor; | |
| 2025 ThreadState::NoAllocationScope noAllocationScope(state); | 2102 ThreadState::NoAllocationScope noAllocationScope(state); |
| 2026 | 2103 |
| 2027 state->preGC(); | 2104 state->preGC(); |
| 2028 StackFrameDepthScope stackDepthScope; | 2105 StackFrameDepthScope stackDepthScope; |
| 2029 | 2106 |
| 2030 // 1. Trace the thread local persistent roots. For thread local GCs we | 2107 // 1. Trace the thread local persistent roots. For thread local GCs we |
| 2031 // don't trace the stack (ie. no conservative scanning) since this is | 2108 // don't trace the stack (ie. no conservative scanning) since this is |
| 2032 // only called during thread shutdown where there should be no objects | 2109 // only called during thread shutdown where there should be no objects |
| 2033 // on the stack. | 2110 // on the stack. |
| 2034 // We also assume that orphaned pages have no objects reachable from | 2111 // We also assume that orphaned pages have no objects reachable from |
| 2035 // persistent handles on other threads or CrossThreadPersistents. The | 2112 // persistent handles on other threads or CrossThreadPersistents. The |
| 2036 // only cases where this could happen is if a subsequent conservative | 2113 // only cases where this could happen is if a subsequent conservative |
| 2037 // global GC finds a "pointer" on the stack or due to a programming | 2114 // global GC finds a "pointer" on the stack or due to a programming |
| 2038 // error where an object has a dangling cross-thread pointer to an | 2115 // error where an object has a dangling cross-thread pointer to an |
| 2039 // object on this heap. | 2116 // object on this heap. |
| 2040 state->visitPersistents(&visitor); | 2117 state->visitPersistents(gcScope.visitor()); |
| 2041 | 2118 |
| 2042 // 2. Trace objects reachable from the thread's persistent roots | 2119 // 2. Trace objects reachable from the thread's persistent roots |
| 2043 // including ephemerons. | 2120 // including ephemerons. |
| 2044 processMarkingStack(&visitor); | 2121 processMarkingStack(gcScope.visitor()); |
| 2045 | 2122 |
| 2046 postMarkingProcessing(&visitor); | 2123 postMarkingProcessing(gcScope.visitor()); |
| 2047 globalWeakProcessing(&visitor); | 2124 globalWeakProcessing(gcScope.visitor()); |
| 2048 | 2125 |
| 2049 state->postGC(ThreadState::GCWithSweep); | 2126 state->postGC(ThreadState::GCWithSweep); |
| 2050 } | 2127 } |
| 2051 state->preSweep(); | 2128 state->preSweep(); |
| 2052 } | 2129 } |
| 2053 | 2130 |
| 2054 void Heap::processMarkingStack(Visitor* visitor) | 2131 void Heap::processMarkingStack(Visitor* visitor) |
| 2055 { | 2132 { |
| 2056 // Ephemeron fixed point loop. | 2133 // Ephemeron fixed point loop. |
| 2057 do { | 2134 do { |
| (...skipping 223 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2281 size_t Heap::s_allocatedObjectSize = 0; | 2358 size_t Heap::s_allocatedObjectSize = 0; |
| 2282 size_t Heap::s_allocatedSpace = 0; | 2359 size_t Heap::s_allocatedSpace = 0; |
| 2283 size_t Heap::s_markedObjectSize = 0; | 2360 size_t Heap::s_markedObjectSize = 0; |
| 2284 // We don't want to use 0 KB for the initial value because it may end up | 2361 // We don't want to use 0 KB for the initial value because it may end up |
| 2285 // triggering the first GC of some thread too prematurely. | 2362 // triggering the first GC of some thread too prematurely. |
| 2286 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; | 2363 size_t Heap::s_estimatedLiveObjectSize = 512 * 1024; |
| 2287 size_t Heap::s_externalObjectSizeAtLastGC = 0; | 2364 size_t Heap::s_externalObjectSizeAtLastGC = 0; |
| 2288 double Heap::s_estimatedMarkingTimePerByte = 0.0; | 2365 double Heap::s_estimatedMarkingTimePerByte = 0.0; |
| 2289 | 2366 |
| 2290 } // namespace blink | 2367 } // namespace blink |
| OLD | NEW |