| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 20 matching lines...) Expand all Loading... |
| 31 #include "platform/heap/ThreadState.h" | 31 #include "platform/heap/ThreadState.h" |
| 32 | 32 |
| 33 #include "base/trace_event/process_memory_dump.h" | 33 #include "base/trace_event/process_memory_dump.h" |
| 34 #include "platform/Histogram.h" | 34 #include "platform/Histogram.h" |
| 35 #include "platform/RuntimeEnabledFeatures.h" | 35 #include "platform/RuntimeEnabledFeatures.h" |
| 36 #include "platform/ScriptForbiddenScope.h" | 36 #include "platform/ScriptForbiddenScope.h" |
| 37 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 37 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| 38 #include "platform/heap/CallbackStack.h" | 38 #include "platform/heap/CallbackStack.h" |
| 39 #include "platform/heap/Handle.h" | 39 #include "platform/heap/Handle.h" |
| 40 #include "platform/heap/Heap.h" | 40 #include "platform/heap/Heap.h" |
| 41 #include "platform/heap/HeapCompact.h" | |
| 42 #include "platform/heap/PagePool.h" | 41 #include "platform/heap/PagePool.h" |
| 43 #include "platform/heap/SafePoint.h" | 42 #include "platform/heap/SafePoint.h" |
| 44 #include "platform/heap/Visitor.h" | 43 #include "platform/heap/Visitor.h" |
| 45 #include "platform/tracing/TraceEvent.h" | 44 #include "platform/tracing/TraceEvent.h" |
| 46 #include "platform/tracing/web_memory_allocator_dump.h" | 45 #include "platform/tracing/web_memory_allocator_dump.h" |
| 47 #include "platform/tracing/web_process_memory_dump.h" | 46 #include "platform/tracing/web_process_memory_dump.h" |
| 48 #include "public/platform/Platform.h" | 47 #include "public/platform/Platform.h" |
| 49 #include "public/platform/WebScheduler.h" | 48 #include "public/platform/WebScheduler.h" |
| 50 #include "public/platform/WebThread.h" | 49 #include "public/platform/WebThread.h" |
| 51 #include "public/platform/WebTraceLocation.h" | 50 #include "public/platform/WebTraceLocation.h" |
| (...skipping 21 matching lines...) Expand all Loading... |
| 73 | 72 |
| 74 namespace blink { | 73 namespace blink { |
| 75 | 74 |
| 76 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; | 75 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; |
| 77 uintptr_t ThreadState::s_mainThreadStackStart = 0; | 76 uintptr_t ThreadState::s_mainThreadStackStart = 0; |
| 78 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; | 77 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; |
| 79 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 78 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
| 80 | 79 |
| 81 const size_t defaultAllocatedObjectSizeThreshold = 100 * 1024; | 80 const size_t defaultAllocatedObjectSizeThreshold = 100 * 1024; |
| 82 | 81 |
| 83 const char* ThreadState::gcReasonString(BlinkGC::GCReason reason) { | 82 const char* gcReasonString(BlinkGC::GCReason reason) { |
| 84 switch (reason) { | 83 switch (reason) { |
| 85 case BlinkGC::IdleGC: | 84 case BlinkGC::IdleGC: |
| 86 return "IdleGC"; | 85 return "IdleGC"; |
| 87 case BlinkGC::PreciseGC: | 86 case BlinkGC::PreciseGC: |
| 88 return "PreciseGC"; | 87 return "PreciseGC"; |
| 89 case BlinkGC::ConservativeGC: | 88 case BlinkGC::ConservativeGC: |
| 90 return "ConservativeGC"; | 89 return "ConservativeGC"; |
| 91 case BlinkGC::ForcedGC: | 90 case BlinkGC::ForcedGC: |
| 92 return "ForcedGC"; | 91 return "ForcedGC"; |
| 93 case BlinkGC::MemoryPressureGC: | 92 case BlinkGC::MemoryPressureGC: |
| (...skipping 404 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 498 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; | 497 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; |
| 499 | 498 |
| 500 // Disallow allocation during weak processing. | 499 // Disallow allocation during weak processing. |
| 501 // It would be technically safe to allow allocations, but it is unsafe | 500 // It would be technically safe to allow allocations, but it is unsafe |
| 502 // to mutate an object graph in a way in which a dead object gets | 501 // to mutate an object graph in a way in which a dead object gets |
| 503 // resurrected or mutate a HashTable (because HashTable's weak processing | 502 // resurrected or mutate a HashTable (because HashTable's weak processing |
| 504 // assumes that the HashTable hasn't been mutated since the latest marking). | 503 // assumes that the HashTable hasn't been mutated since the latest marking). |
| 505 // Due to the complexity, we just forbid allocations. | 504 // Due to the complexity, we just forbid allocations. |
| 506 NoAllocationScope noAllocationScope(this); | 505 NoAllocationScope noAllocationScope(this); |
| 507 | 506 |
| 508 GCForbiddenScope gcForbiddenScope(this); | |
| 509 std::unique_ptr<Visitor> visitor = | 507 std::unique_ptr<Visitor> visitor = |
| 510 Visitor::create(this, BlinkGC::ThreadLocalWeakProcessing); | 508 Visitor::create(this, BlinkGC::ThreadLocalWeakProcessing); |
| 511 | 509 |
| 512 // Perform thread-specific weak processing. | 510 // Perform thread-specific weak processing. |
| 513 while (popAndInvokeThreadLocalWeakCallback(visitor.get())) { | 511 while (popAndInvokeThreadLocalWeakCallback(visitor.get())) { |
| 514 } | 512 } |
| 515 | 513 |
| 516 m_threadLocalWeakCallbackStack->decommit(); | 514 m_threadLocalWeakCallbackStack->decommit(); |
| 517 | 515 |
| 518 if (isMainThread()) { | 516 if (isMainThread()) { |
| (...skipping 475 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 994 } | 992 } |
| 995 | 993 |
| 996 #undef VERIFY_STATE_TRANSITION | 994 #undef VERIFY_STATE_TRANSITION |
| 997 | 995 |
| 998 void ThreadState::runScheduledGC(BlinkGC::StackState stackState) { | 996 void ThreadState::runScheduledGC(BlinkGC::StackState stackState) { |
| 999 ASSERT(checkThread()); | 997 ASSERT(checkThread()); |
| 1000 if (stackState != BlinkGC::NoHeapPointersOnStack) | 998 if (stackState != BlinkGC::NoHeapPointersOnStack) |
| 1001 return; | 999 return; |
| 1002 | 1000 |
| 1003 // If a safe point is entered while initiating a GC, we clearly do | 1001 // If a safe point is entered while initiating a GC, we clearly do |
| 1004 // not want to do another as part of that -- the safe point is only | 1002 // not want to do another as part that -- the safe point is only |
| 1005 // entered after checking if a scheduled GC ought to run first. | 1003 // entered after checking if a scheduled GC ought to run first. |
| 1006 // Prevent that from happening by marking GCs as forbidden while | 1004 // Prevent that from happening by marking GCs as forbidden while |
| 1007 // one is initiated and later running. | 1005 // one is initiated and later running. |
| 1008 if (isGCForbidden()) | 1006 if (isGCForbidden()) |
| 1009 return; | 1007 return; |
| 1010 | 1008 |
| 1011 switch (gcState()) { | 1009 switch (gcState()) { |
| 1012 case FullGCScheduled: | 1010 case FullGCScheduled: |
| 1013 collectAllGarbage(); | 1011 collectAllGarbage(); |
| 1014 break; | 1012 break; |
| (...skipping 20 matching lines...) Expand all Loading... |
| 1035 } | 1033 } |
| 1036 } | 1034 } |
| 1037 | 1035 |
| 1038 void ThreadState::makeConsistentForGC() { | 1036 void ThreadState::makeConsistentForGC() { |
| 1039 ASSERT(isInGC()); | 1037 ASSERT(isInGC()); |
| 1040 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); | 1038 TRACE_EVENT0("blink_gc", "ThreadState::makeConsistentForGC"); |
| 1041 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 1039 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
| 1042 m_arenas[i]->makeConsistentForGC(); | 1040 m_arenas[i]->makeConsistentForGC(); |
| 1043 } | 1041 } |
| 1044 | 1042 |
| 1045 void ThreadState::compact() { | |
| 1046 if (!heap().compaction()->isCompacting()) | |
| 1047 return; | |
| 1048 | |
| 1049 SweepForbiddenScope scope(this); | |
| 1050 ScriptForbiddenIfMainThreadScope scriptForbiddenScope; | |
| 1051 NoAllocationScope noAllocationScope(this); | |
| 1052 | |
| 1053 // Compaction is done eagerly and before the mutator threads get | |
| 1054 // to run again. Doing it lazily is problematic, as the mutator's | |
| 1055 // references to live objects could suddenly be invalidated by | |
| 1056 // compaction of a page/heap. We do know all the references to | |
| 1057 // the relocating objects just after marking, but won't later. | |
| 1058 // (e.g., stack references could have been created, new objects | |
| 1059 // created which refer to old collection objects, and so on.) | |
| 1060 | |
| 1061 // Compact the hash table backing store arena first, it usually has | |
| 1062 // higher fragmentation and is larger. | |
| 1063 // | |
| 1064 // TODO: implement bail out wrt any overall deadline, not compacting | |
| 1065 // the remaining arenas if the time budget has been exceeded. | |
| 1066 heap().compaction()->startThreadCompaction(); | |
| 1067 for (int i = BlinkGC::HashTableArenaIndex; i >= BlinkGC::Vector1ArenaIndex; | |
| 1068 --i) | |
| 1069 static_cast<NormalPageArena*>(m_arenas[i])->sweepAndCompact(); | |
| 1070 heap().compaction()->finishThreadCompaction(); | |
| 1071 } | |
| 1072 | |
| 1073 void ThreadState::makeConsistentForMutator() { | 1043 void ThreadState::makeConsistentForMutator() { |
| 1074 ASSERT(isInGC()); | 1044 ASSERT(isInGC()); |
| 1075 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) | 1045 for (int i = 0; i < BlinkGC::NumberOfArenas; ++i) |
| 1076 m_arenas[i]->makeConsistentForMutator(); | 1046 m_arenas[i]->makeConsistentForMutator(); |
| 1077 } | 1047 } |
| 1078 | 1048 |
| 1079 void ThreadState::preGC() { | 1049 void ThreadState::preGC() { |
| 1080 if (RuntimeEnabledFeatures::traceWrappablesEnabled() && m_isolate && | 1050 if (RuntimeEnabledFeatures::traceWrappablesEnabled() && m_isolate && |
| 1081 m_performCleanup) | 1051 m_performCleanup) |
| 1082 m_performCleanup(m_isolate); | 1052 m_performCleanup(m_isolate); |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1146 | 1116 |
| 1147 // Allocation is allowed during the pre-finalizers and destructors. | 1117 // Allocation is allowed during the pre-finalizers and destructors. |
| 1148 // However, they must not mutate an object graph in a way in which | 1118 // However, they must not mutate an object graph in a way in which |
| 1149 // a dead object gets resurrected. | 1119 // a dead object gets resurrected. |
| 1150 invokePreFinalizers(); | 1120 invokePreFinalizers(); |
| 1151 | 1121 |
| 1152 m_accumulatedSweepingTime = 0; | 1122 m_accumulatedSweepingTime = 0; |
| 1153 | 1123 |
| 1154 eagerSweep(); | 1124 eagerSweep(); |
| 1155 | 1125 |
| 1156 // Any sweep compaction must happen after pre-finalizers and eager | |
| 1157 // sweeping, as it will finalize dead objects in compactable arenas | |
| 1158 // (e.g., backing stores for container objects.) | |
| 1159 // | |
| 1160 // As per-contract for prefinalizers, those finalizable objects must | |
| 1161 // still be accessible when the prefinalizer runs, hence we cannot | |
| 1162 // schedule compaction until those have run. Similarly for eager sweeping. | |
| 1163 compact(); | |
| 1164 | |
| 1165 #if defined(ADDRESS_SANITIZER) | 1126 #if defined(ADDRESS_SANITIZER) |
| 1166 poisonAllHeaps(); | 1127 poisonAllHeaps(); |
| 1167 #endif | 1128 #endif |
| 1168 | |
| 1169 if (previousGCState == EagerSweepScheduled) { | 1129 if (previousGCState == EagerSweepScheduled) { |
| 1170 // Eager sweeping should happen only in testing. | 1130 // Eager sweeping should happen only in testing. |
| 1171 completeSweep(); | 1131 completeSweep(); |
| 1172 } else { | 1132 } else { |
| 1173 // The default behavior is lazy sweeping. | 1133 // The default behavior is lazy sweeping. |
| 1174 scheduleIdleLazySweep(); | 1134 scheduleIdleLazySweep(); |
| 1175 } | 1135 } |
| 1176 } | 1136 } |
| 1177 | 1137 |
| 1178 #if defined(ADDRESS_SANITIZER) | 1138 #if defined(ADDRESS_SANITIZER) |
| (...skipping 528 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1707 | 1667 |
| 1708 void ThreadState::collectGarbage(BlinkGC::StackState stackState, | 1668 void ThreadState::collectGarbage(BlinkGC::StackState stackState, |
| 1709 BlinkGC::GCType gcType, | 1669 BlinkGC::GCType gcType, |
| 1710 BlinkGC::GCReason reason) { | 1670 BlinkGC::GCReason reason) { |
| 1711 DCHECK_NE(gcType, BlinkGC::ThreadTerminationGC); | 1671 DCHECK_NE(gcType, BlinkGC::ThreadTerminationGC); |
| 1712 | 1672 |
| 1713 // Nested collectGarbage() invocations aren't supported. | 1673 // Nested collectGarbage() invocations aren't supported. |
| 1714 RELEASE_ASSERT(!isGCForbidden()); | 1674 RELEASE_ASSERT(!isGCForbidden()); |
| 1715 completeSweep(); | 1675 completeSweep(); |
| 1716 | 1676 |
| 1717 GCForbiddenScope gcForbiddenScope(this); | 1677 std::unique_ptr<Visitor> visitor = Visitor::create(this, gcType); |
| 1718 | 1678 |
| 1719 SafePointScope safePointScope(stackState, this); | 1679 SafePointScope safePointScope(stackState, this); |
| 1720 | 1680 |
| 1721 // Resume all parked threads upon leaving this scope. | 1681 // Resume all parked threads upon leaving this scope. |
| 1722 ParkThreadsScope parkThreadsScope(this); | 1682 ParkThreadsScope parkThreadsScope(this); |
| 1723 | 1683 |
| 1724 // Try to park the other threads. If we're unable to, bail out of the GC. | 1684 // Try to park the other threads. If we're unable to, bail out of the GC. |
| 1725 if (!parkThreadsScope.parkThreads()) | 1685 if (!parkThreadsScope.parkThreads()) |
| 1726 return; | 1686 return; |
| 1727 | 1687 |
| 1728 BlinkGC::GCType visitorType = gcType; | |
| 1729 if (heap().compaction()->shouldCompact(this, gcType, reason)) | |
| 1730 visitorType = heap().compaction()->initialize(this); | |
| 1731 | |
| 1732 std::unique_ptr<Visitor> visitor = Visitor::create(this, visitorType); | |
| 1733 | |
| 1734 ScriptForbiddenIfMainThreadScope scriptForbidden; | 1688 ScriptForbiddenIfMainThreadScope scriptForbidden; |
| 1735 | 1689 |
| 1736 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", "lazySweeping", | 1690 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", "lazySweeping", |
| 1737 gcType == BlinkGC::GCWithoutSweep, "gcReason", | 1691 gcType == BlinkGC::GCWithoutSweep, "gcReason", |
| 1738 gcReasonString(reason)); | 1692 gcReasonString(reason)); |
| 1739 double startTime = WTF::currentTimeMS(); | 1693 double startTime = WTF::currentTimeMS(); |
| 1740 | 1694 |
| 1741 if (gcType == BlinkGC::TakeSnapshot) | 1695 if (gcType == BlinkGC::TakeSnapshot) |
| 1742 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | 1696 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
| 1743 | 1697 |
| 1744 // Disallow allocation during garbage collection (but not during the | 1698 // Disallow allocation during garbage collection (but not during the |
| 1745 // finalization that happens when the visitorScope is torn down). | 1699 // finalization that happens when the visitorScope is torn down). |
| 1746 NoAllocationScope noAllocationScope(this); | 1700 ThreadState::NoAllocationScope noAllocationScope(this); |
| 1747 | 1701 |
| 1748 heap().commitCallbackStacks(); | 1702 heap().commitCallbackStacks(); |
| 1749 heap().preGC(); | 1703 heap().preGC(); |
| 1750 | 1704 |
| 1751 StackFrameDepthScope stackDepthScope(&heap().stackFrameDepth()); | 1705 StackFrameDepthScope stackDepthScope(&heap().stackFrameDepth()); |
| 1752 | 1706 |
| 1753 size_t totalObjectSize = heap().heapStats().allocatedObjectSize() + | 1707 size_t totalObjectSize = heap().heapStats().allocatedObjectSize() + |
| 1754 heap().heapStats().markedObjectSize(); | 1708 heap().heapStats().markedObjectSize(); |
| 1755 if (gcType != BlinkGC::TakeSnapshot) | 1709 if (gcType != BlinkGC::TakeSnapshot) |
| 1756 heap().resetHeapCounters(); | 1710 heap().resetHeapCounters(); |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1824 heap().postGC(gcType); | 1778 heap().postGC(gcType); |
| 1825 heap().decommitCallbackStacks(); | 1779 heap().decommitCallbackStacks(); |
| 1826 } | 1780 } |
| 1827 | 1781 |
| 1828 void ThreadState::collectGarbageForTerminatingThread() { | 1782 void ThreadState::collectGarbageForTerminatingThread() { |
| 1829 { | 1783 { |
| 1830 // A thread-specific termination GC must not allow other global GCs to go | 1784 // A thread-specific termination GC must not allow other global GCs to go |
| 1831 // ahead while it is running, hence the termination GC does not enter a | 1785 // ahead while it is running, hence the termination GC does not enter a |
| 1832 // safepoint. VisitorScope will not enter also a safepoint scope for | 1786 // safepoint. VisitorScope will not enter also a safepoint scope for |
| 1833 // ThreadTerminationGC. | 1787 // ThreadTerminationGC. |
| 1834 GCForbiddenScope gcForbiddenScope(this); | |
| 1835 std::unique_ptr<Visitor> visitor = | 1788 std::unique_ptr<Visitor> visitor = |
| 1836 Visitor::create(this, BlinkGC::ThreadTerminationGC); | 1789 Visitor::create(this, BlinkGC::ThreadTerminationGC); |
| 1837 | 1790 |
| 1838 NoAllocationScope noAllocationScope(this); | 1791 ThreadState::NoAllocationScope noAllocationScope(this); |
| 1839 | 1792 |
| 1840 heap().commitCallbackStacks(); | 1793 heap().commitCallbackStacks(); |
| 1841 preGC(); | 1794 preGC(); |
| 1842 | 1795 |
| 1843 // 1. Trace the thread local persistent roots. For thread local GCs we | 1796 // 1. Trace the thread local persistent roots. For thread local GCs we |
| 1844 // don't trace the stack (ie. no conservative scanning) since this is | 1797 // don't trace the stack (ie. no conservative scanning) since this is |
| 1845 // only called during thread shutdown where there should be no objects | 1798 // only called during thread shutdown where there should be no objects |
| 1846 // on the stack. | 1799 // on the stack. |
| 1847 // We also assume that orphaned pages have no objects reachable from | 1800 // We also assume that orphaned pages have no objects reachable from |
| 1848 // persistent handles on other threads or CrossThreadPersistents. The | 1801 // persistent handles on other threads or CrossThreadPersistents. The |
| (...skipping 23 matching lines...) Expand all Loading... |
| 1872 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, | 1825 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, |
| 1873 BlinkGC::ForcedGC); | 1826 BlinkGC::ForcedGC); |
| 1874 size_t liveObjects = heap().heapStats().markedObjectSize(); | 1827 size_t liveObjects = heap().heapStats().markedObjectSize(); |
| 1875 if (liveObjects == previousLiveObjects) | 1828 if (liveObjects == previousLiveObjects) |
| 1876 break; | 1829 break; |
| 1877 previousLiveObjects = liveObjects; | 1830 previousLiveObjects = liveObjects; |
| 1878 } | 1831 } |
| 1879 } | 1832 } |
| 1880 | 1833 |
| 1881 } // namespace blink | 1834 } // namespace blink |
| OLD | NEW |