OLD | NEW |
1 /* | 1 /* |
2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
3 * | 3 * |
4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
6 * met: | 6 * met: |
7 * | 7 * |
8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
(...skipping 21 matching lines...) Expand all Loading... |
32 | 32 |
33 #include "base/trace_event/process_memory_dump.h" | 33 #include "base/trace_event/process_memory_dump.h" |
34 #include "platform/Histogram.h" | 34 #include "platform/Histogram.h" |
35 #include "platform/RuntimeEnabledFeatures.h" | 35 #include "platform/RuntimeEnabledFeatures.h" |
36 #include "platform/ScriptForbiddenScope.h" | 36 #include "platform/ScriptForbiddenScope.h" |
37 #include "platform/TraceEvent.h" | 37 #include "platform/TraceEvent.h" |
38 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 38 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
39 #include "platform/heap/CallbackStack.h" | 39 #include "platform/heap/CallbackStack.h" |
40 #include "platform/heap/Handle.h" | 40 #include "platform/heap/Handle.h" |
41 #include "platform/heap/Heap.h" | 41 #include "platform/heap/Heap.h" |
| 42 #include "platform/heap/PagePool.h" |
42 #include "platform/heap/SafePoint.h" | 43 #include "platform/heap/SafePoint.h" |
43 #include "platform/heap/Visitor.h" | 44 #include "platform/heap/Visitor.h" |
44 #include "platform/web_memory_allocator_dump.h" | 45 #include "platform/web_memory_allocator_dump.h" |
45 #include "platform/web_process_memory_dump.h" | 46 #include "platform/web_process_memory_dump.h" |
46 #include "public/platform/Platform.h" | 47 #include "public/platform/Platform.h" |
47 #include "public/platform/WebScheduler.h" | 48 #include "public/platform/WebScheduler.h" |
48 #include "public/platform/WebThread.h" | 49 #include "public/platform/WebThread.h" |
49 #include "public/platform/WebTraceLocation.h" | 50 #include "public/platform/WebTraceLocation.h" |
50 #include "wtf/CurrentTime.h" | 51 #include "wtf/CurrentTime.h" |
51 #include "wtf/DataLog.h" | 52 #include "wtf/DataLog.h" |
(...skipping 19 matching lines...) Expand all Loading... |
71 | 72 |
72 namespace blink { | 73 namespace blink { |
73 | 74 |
74 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; | 75 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; |
75 uintptr_t ThreadState::s_mainThreadStackStart = 0; | 76 uintptr_t ThreadState::s_mainThreadStackStart = 0; |
76 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; | 77 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; |
77 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 78 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
78 | 79 |
79 const size_t defaultAllocatedObjectSizeThreshold = 100 * 1024; | 80 const size_t defaultAllocatedObjectSizeThreshold = 100 * 1024; |
80 | 81 |
| 82 const char* gcReasonString(BlinkGC::GCReason reason) |
| 83 { |
| 84 switch (reason) { |
| 85 case BlinkGC::IdleGC: |
| 86 return "IdleGC"; |
| 87 case BlinkGC::PreciseGC: |
| 88 return "PreciseGC"; |
| 89 case BlinkGC::ConservativeGC: |
| 90 return "ConservativeGC"; |
| 91 case BlinkGC::ForcedGC: |
| 92 return "ForcedGC"; |
| 93 case BlinkGC::MemoryPressureGC: |
| 94 return "MemoryPressureGC"; |
| 95 case BlinkGC::PageNavigationGC: |
| 96 return "PageNavigationGC"; |
| 97 default: |
| 98 NOTREACHED(); |
| 99 } |
| 100 return "<Unknown>"; |
| 101 } |
| 102 |
| 103 class ParkThreadsScope final { |
| 104 STACK_ALLOCATED(); |
| 105 public: |
| 106 explicit ParkThreadsScope(ThreadState* state) |
| 107 : m_state(state) |
| 108 , m_shouldResumeThreads(false) |
| 109 { |
| 110 } |
| 111 |
| 112 bool parkThreads() |
| 113 { |
| 114 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope"); |
| 115 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); |
| 116 if (m_state->isMainThread()) |
| 117 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); |
| 118 |
| 119 // TODO(haraken): In an unlikely coincidence that two threads decide |
| 120 // to collect garbage at the same time, avoid doing two GCs in |
| 121 // a row and return false. |
| 122 double startTime = WTF::currentTimeMS(); |
| 123 |
| 124 m_shouldResumeThreads = m_state->heap().park(); |
| 125 |
| 126 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; |
| 127 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH
istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50
)); |
| 128 timeToStopThreadsHistogram.count(timeForStoppingThreads); |
| 129 |
| 130 if (m_state->isMainThread()) |
| 131 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); |
| 132 return m_shouldResumeThreads; |
| 133 } |
| 134 |
| 135 ~ParkThreadsScope() |
| 136 { |
| 137 // Only cleanup if we parked all threads in which case the GC happened |
| 138 // and we need to resume the other threads. |
| 139 if (m_shouldResumeThreads) |
| 140 m_state->heap().resume(); |
| 141 } |
| 142 |
| 143 private: |
| 144 ThreadState* m_state; |
| 145 bool m_shouldResumeThreads; |
| 146 }; |
| 147 |
81 ThreadState::ThreadState(bool perThreadHeapEnabled) | 148 ThreadState::ThreadState(bool perThreadHeapEnabled) |
82 : m_thread(currentThread()) | 149 : m_thread(currentThread()) |
83 , m_persistentRegion(wrapUnique(new PersistentRegion())) | 150 , m_persistentRegion(wrapUnique(new PersistentRegion())) |
84 #if OS(WIN) && COMPILER(MSVC) | 151 #if OS(WIN) && COMPILER(MSVC) |
85 , m_threadStackSize(0) | 152 , m_threadStackSize(0) |
86 #endif | 153 #endif |
87 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) | 154 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) |
88 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) | 155 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) |
89 , m_safePointScopeMarker(nullptr) | 156 , m_safePointScopeMarker(nullptr) |
90 , m_atSafePoint(false) | 157 , m_atSafePoint(false) |
(...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
231 prepareForThreadStateTermination(); | 298 prepareForThreadStateTermination(); |
232 | 299 |
233 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination(
this); | 300 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination(
this); |
234 | 301 |
235 // Do thread local GC's as long as the count of thread local Persistents | 302 // Do thread local GC's as long as the count of thread local Persistents |
236 // changes and is above zero. | 303 // changes and is above zero. |
237 int oldCount = -1; | 304 int oldCount = -1; |
238 int currentCount = getPersistentRegion()->numberOfPersistents(); | 305 int currentCount = getPersistentRegion()->numberOfPersistents(); |
239 ASSERT(currentCount >= 0); | 306 ASSERT(currentCount >= 0); |
240 while (currentCount != oldCount) { | 307 while (currentCount != oldCount) { |
241 ThreadHeap::collectGarbageForTerminatingThread(this); | 308 collectGarbageForTerminatingThread(); |
242 // Release the thread-local static persistents that were | 309 // Release the thread-local static persistents that were |
243 // instantiated while running the termination GC. | 310 // instantiated while running the termination GC. |
244 releaseStaticPersistentNodes(); | 311 releaseStaticPersistentNodes(); |
245 oldCount = currentCount; | 312 oldCount = currentCount; |
246 currentCount = getPersistentRegion()->numberOfPersistents(); | 313 currentCount = getPersistentRegion()->numberOfPersistents(); |
247 } | 314 } |
248 // We should not have any persistents left when getting to this point, | 315 // We should not have any persistents left when getting to this point, |
249 // if we have it is probably a bug so adding a debug ASSERT to catch this. | 316 // if we have it is probably a bug so adding a debug ASSERT to catch this. |
250 ASSERT(!currentCount); | 317 ASSERT(!currentCount); |
251 // All of pre-finalizers should be consumed. | 318 // All of pre-finalizers should be consumed. |
252 ASSERT(m_orderedPreFinalizers.isEmpty()); | 319 ASSERT(m_orderedPreFinalizers.isEmpty()); |
253 RELEASE_ASSERT(gcState() == NoGCScheduled); | 320 RELEASE_ASSERT(gcState() == NoGCScheduled); |
254 | 321 |
255 // Add pages to the orphaned page pool to ensure any global GCs from this po
int | 322 // Add pages to the orphaned page pool to ensure any global GCs from this po
int |
256 // on will not trace objects on this thread's arenas. | 323 // on will not trace objects on this thread's arenas. |
257 cleanupPages(); | 324 cleanupPages(); |
258 } | 325 } |
259 | 326 |
260 void ThreadState::cleanupMainThread() | 327 void ThreadState::cleanupMainThread() |
261 { | 328 { |
262 ASSERT(isMainThread()); | 329 ASSERT(isMainThread()); |
263 | 330 |
264 #if defined(LEAK_SANITIZER) | 331 #if defined(LEAK_SANITIZER) |
265 // See comment below, clear out most garbage before releasing static | 332 // See comment below, clear out most garbage before releasing static |
266 // persistents should some of the finalizers depend on touching | 333 // persistents should some of the finalizers depend on touching |
267 // these persistents. | 334 // these persistents. |
268 ThreadHeap::collectAllGarbage(); | 335 collectAllGarbage(); |
269 #endif | 336 #endif |
270 | 337 |
271 releaseStaticPersistentNodes(); | 338 releaseStaticPersistentNodes(); |
272 | 339 |
273 #if defined(LEAK_SANITIZER) | 340 #if defined(LEAK_SANITIZER) |
274 // If LSan is about to perform leak detection, after having released all | 341 // If LSan is about to perform leak detection, after having released all |
275 // the registered static Persistent<> root references to global caches | 342 // the registered static Persistent<> root references to global caches |
276 // that Blink keeps, follow up with a round of GCs to clear out all | 343 // that Blink keeps, follow up with a round of GCs to clear out all |
277 // what they referred to. | 344 // what they referred to. |
278 // | 345 // |
279 // This is not needed for caches over non-Oilpan objects, as they're | 346 // This is not needed for caches over non-Oilpan objects, as they're |
280 // not scanned by LSan due to being held in non-global storage | 347 // not scanned by LSan due to being held in non-global storage |
281 // ("static" references inside functions/methods.) | 348 // ("static" references inside functions/methods.) |
282 ThreadHeap::collectAllGarbage(); | 349 collectAllGarbage(); |
283 #endif | 350 #endif |
284 | 351 |
285 // Finish sweeping before shutting down V8. Otherwise, some destructor | 352 // Finish sweeping before shutting down V8. Otherwise, some destructor |
286 // may access V8 and cause crashes. | 353 // may access V8 and cause crashes. |
287 completeSweep(); | 354 completeSweep(); |
288 | 355 |
289 // It is unsafe to trigger GCs after this point because some | 356 // It is unsafe to trigger GCs after this point because some |
290 // destructor may access already-detached V8 and cause crashes. | 357 // destructor may access already-detached V8 and cause crashes. |
291 // Also it is useless. So we forbid GCs. | 358 // Also it is useless. So we forbid GCs. |
292 enterGCForbiddenScope(); | 359 enterGCForbiddenScope(); |
(...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
615 // TODO(haraken): It might not make sense to force completeSweep() for all | 682 // TODO(haraken): It might not make sense to force completeSweep() for all |
616 // page navigations. | 683 // page navigations. |
617 completeSweep(); | 684 completeSweep(); |
618 ASSERT(!isSweepingInProgress()); | 685 ASSERT(!isSweepingInProgress()); |
619 ASSERT(!sweepForbidden()); | 686 ASSERT(!sweepForbidden()); |
620 | 687 |
621 if (shouldForceMemoryPressureGC()) { | 688 if (shouldForceMemoryPressureGC()) { |
622 #if PRINT_HEAP_STATS | 689 #if PRINT_HEAP_STATS |
623 dataLogF("Scheduled MemoryPressureGC\n"); | 690 dataLogF("Scheduled MemoryPressureGC\n"); |
624 #endif | 691 #endif |
625 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWith
outSweep, BlinkGC::MemoryPressureGC); | 692 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep, Bl
inkGC::MemoryPressureGC); |
626 return; | 693 return; |
627 } | 694 } |
628 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { | 695 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { |
629 #if PRINT_HEAP_STATS | 696 #if PRINT_HEAP_STATS |
630 dataLogF("Scheduled PageNavigationGC\n"); | 697 dataLogF("Scheduled PageNavigationGC\n"); |
631 #endif | 698 #endif |
632 schedulePageNavigationGC(); | 699 schedulePageNavigationGC(); |
633 } | 700 } |
634 } | 701 } |
635 | 702 |
(...skipping 23 matching lines...) Expand all Loading... |
659 ASSERT(!sweepForbidden()); | 726 ASSERT(!sweepForbidden()); |
660 | 727 |
661 reportMemoryToV8(); | 728 reportMemoryToV8(); |
662 | 729 |
663 if (shouldForceMemoryPressureGC()) { | 730 if (shouldForceMemoryPressureGC()) { |
664 completeSweep(); | 731 completeSweep(); |
665 if (shouldForceMemoryPressureGC()) { | 732 if (shouldForceMemoryPressureGC()) { |
666 #if PRINT_HEAP_STATS | 733 #if PRINT_HEAP_STATS |
667 dataLogF("Scheduled MemoryPressureGC\n"); | 734 dataLogF("Scheduled MemoryPressureGC\n"); |
668 #endif | 735 #endif |
669 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GC
WithoutSweep, BlinkGC::MemoryPressureGC); | 736 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep
, BlinkGC::MemoryPressureGC); |
670 return; | 737 return; |
671 } | 738 } |
672 } | 739 } |
673 | 740 |
674 if (shouldForceConservativeGC()) { | 741 if (shouldForceConservativeGC()) { |
675 completeSweep(); | 742 completeSweep(); |
676 if (shouldForceConservativeGC()) { | 743 if (shouldForceConservativeGC()) { |
677 #if PRINT_HEAP_STATS | 744 #if PRINT_HEAP_STATS |
678 dataLogF("Scheduled ConservativeGC\n"); | 745 dataLogF("Scheduled ConservativeGC\n"); |
679 #endif | 746 #endif |
680 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GC
WithoutSweep, BlinkGC::ConservativeGC); | 747 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep
, BlinkGC::ConservativeGC); |
681 return; | 748 return; |
682 } | 749 } |
683 } | 750 } |
684 if (shouldScheduleIdleGC()) { | 751 if (shouldScheduleIdleGC()) { |
685 #if PRINT_HEAP_STATS | 752 #if PRINT_HEAP_STATS |
686 dataLogF("Scheduled IdleGC\n"); | 753 dataLogF("Scheduled IdleGC\n"); |
687 #endif | 754 #endif |
688 scheduleIdleGC(); | 755 scheduleIdleGC(); |
689 return; | 756 return; |
690 } | 757 } |
(...skipping 25 matching lines...) Expand all Loading... |
716 | 783 |
717 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime(); | 784 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime(); |
718 if (idleDeltaInSeconds <= m_heap->heapStats().estimatedMarkingTime() && !Pla
tform::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired(
)) { | 785 if (idleDeltaInSeconds <= m_heap->heapStats().estimatedMarkingTime() && !Pla
tform::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired(
)) { |
719 // If marking is estimated to take longer than the deadline and we can't | 786 // If marking is estimated to take longer than the deadline and we can't |
720 // exceed the deadline, then reschedule for the next idle period. | 787 // exceed the deadline, then reschedule for the next idle period. |
721 scheduleIdleGC(); | 788 scheduleIdleGC(); |
722 return; | 789 return; |
723 } | 790 } |
724 | 791 |
725 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds",
idleDeltaInSeconds, "estimatedMarkingTime", m_heap->heapStats().estimatedMarkin
gTime()); | 792 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds",
idleDeltaInSeconds, "estimatedMarkingTime", m_heap->heapStats().estimatedMarkin
gTime()); |
726 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithou
tSweep, BlinkGC::IdleGC); | 793 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep, Blin
kGC::IdleGC); |
727 } | 794 } |
728 | 795 |
729 void ThreadState::performIdleLazySweep(double deadlineSeconds) | 796 void ThreadState::performIdleLazySweep(double deadlineSeconds) |
730 { | 797 { |
731 ASSERT(checkThread()); | 798 ASSERT(checkThread()); |
732 ASSERT(isMainThread()); | 799 ASSERT(isMainThread()); |
733 | 800 |
734 // If we are not in a sweeping phase, there is nothing to do here. | 801 // If we are not in a sweeping phase, there is nothing to do here. |
735 if (!isSweepingInProgress()) | 802 if (!isSweepingInProgress()) |
736 return; | 803 return; |
(...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
892 // If a safe point is entered while initiating a GC, we clearly do | 959 // If a safe point is entered while initiating a GC, we clearly do |
893 // not want to do another as part that -- the safe point is only | 960 // not want to do another as part that -- the safe point is only |
894 // entered after checking if a scheduled GC ought to run first. | 961 // entered after checking if a scheduled GC ought to run first. |
895 // Prevent that from happening by marking GCs as forbidden while | 962 // Prevent that from happening by marking GCs as forbidden while |
896 // one is initiated and later running. | 963 // one is initiated and later running. |
897 if (isGCForbidden()) | 964 if (isGCForbidden()) |
898 return; | 965 return; |
899 | 966 |
900 switch (gcState()) { | 967 switch (gcState()) { |
901 case FullGCScheduled: | 968 case FullGCScheduled: |
902 ThreadHeap::collectAllGarbage(); | 969 collectAllGarbage(); |
903 break; | 970 break; |
904 case PreciseGCScheduled: | 971 case PreciseGCScheduled: |
905 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWi
thoutSweep, BlinkGC::PreciseGC); | 972 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep,
BlinkGC::PreciseGC); |
906 break; | 973 break; |
907 case PageNavigationGCScheduled: | 974 case PageNavigationGCScheduled: |
908 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWi
thSweep, BlinkGC::PageNavigationGC); | 975 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli
nkGC::PageNavigationGC); |
909 break; | 976 break; |
910 case IdleGCScheduled: | 977 case IdleGCScheduled: |
911 // Idle time GC will be scheduled by Blink Scheduler. | 978 // Idle time GC will be scheduled by Blink Scheduler. |
912 break; | 979 break; |
913 default: | 980 default: |
914 break; | 981 break; |
915 } | 982 } |
916 } | 983 } |
917 | 984 |
918 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() | 985 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() |
(...skipping 613 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1532 threadDump->AddScalar("live_count", "objects", totalLiveCount); | 1599 threadDump->AddScalar("live_count", "objects", totalLiveCount); |
1533 threadDump->AddScalar("dead_count", "objects", totalDeadCount); | 1600 threadDump->AddScalar("dead_count", "objects", totalDeadCount); |
1534 threadDump->AddScalar("live_size", "bytes", totalLiveSize); | 1601 threadDump->AddScalar("live_size", "bytes", totalLiveSize); |
1535 threadDump->AddScalar("dead_size", "bytes", totalDeadSize); | 1602 threadDump->AddScalar("dead_size", "bytes", totalDeadSize); |
1536 | 1603 |
1537 base::trace_event::MemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvide
r::instance()->createMemoryAllocatorDumpForCurrentGC(heapsDumpName); | 1604 base::trace_event::MemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvide
r::instance()->createMemoryAllocatorDumpForCurrentGC(heapsDumpName); |
1538 base::trace_event::MemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvi
der::instance()->createMemoryAllocatorDumpForCurrentGC(classesDumpName); | 1605 base::trace_event::MemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvi
der::instance()->createMemoryAllocatorDumpForCurrentGC(classesDumpName); |
1539 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->AddOwners
hipEdge(classesDump->guid(), heapsDump->guid()); | 1606 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->AddOwners
hipEdge(classesDump->guid(), heapsDump->guid()); |
1540 } | 1607 } |
1541 | 1608 |
| 1609 void ThreadState::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType
gcType, BlinkGC::GCReason reason) |
| 1610 { |
| 1611 DCHECK_NE(gcType, BlinkGC::ThreadTerminationGC); |
| 1612 |
| 1613 // Nested collectGarbage() invocations aren't supported. |
| 1614 RELEASE_ASSERT(!isGCForbidden()); |
| 1615 completeSweep(); |
| 1616 |
| 1617 std::unique_ptr<Visitor> visitor = Visitor::create(this, gcType); |
| 1618 |
| 1619 SafePointScope safePointScope(stackState, this); |
| 1620 |
| 1621 // Resume all parked threads upon leaving this scope. |
| 1622 ParkThreadsScope parkThreadsScope(this); |
| 1623 |
| 1624 // Try to park the other threads. If we're unable to, bail out of the GC. |
| 1625 if (!parkThreadsScope.parkThreads()) |
| 1626 return; |
| 1627 |
| 1628 ScriptForbiddenIfMainThreadScope scriptForbidden; |
| 1629 |
| 1630 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", |
| 1631 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, |
| 1632 "gcReason", gcReasonString(reason)); |
| 1633 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); |
| 1634 double startTime = WTF::currentTimeMS(); |
| 1635 |
| 1636 if (gcType == BlinkGC::TakeSnapshot) |
| 1637 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); |
| 1638 |
| 1639 // Disallow allocation during garbage collection (but not during the |
| 1640 // finalization that happens when the visitorScope is torn down). |
| 1641 ThreadState::NoAllocationScope noAllocationScope(this); |
| 1642 |
| 1643 heap().commitCallbackStacks(); |
| 1644 heap().preGC(); |
| 1645 |
| 1646 StackFrameDepthScope stackDepthScope(&heap().stackFrameDepth()); |
| 1647 |
| 1648 size_t totalObjectSize = heap().heapStats().allocatedObjectSize() + heap().h
eapStats().markedObjectSize(); |
| 1649 if (gcType != BlinkGC::TakeSnapshot) |
| 1650 heap().resetHeapCounters(); |
| 1651 |
| 1652 { |
| 1653 // Access to the CrossThreadPersistentRegion has to be prevented while |
| 1654 // marking and global weak processing is in progress. If not, threads |
| 1655 // not attached to Oilpan and participating in this GC are able |
| 1656 // to allocate & free PersistentNodes, something the marking phase isn't |
| 1657 // capable of handling. |
| 1658 CrossThreadPersistentRegion::LockScope persistentLock(ProcessHeap::cross
ThreadPersistentRegion()); |
| 1659 |
| 1660 // 1. Trace persistent roots. |
| 1661 heap().visitPersistentRoots(visitor.get()); |
| 1662 |
| 1663 // 2. Trace objects reachable from the stack. We do this independent of
the |
| 1664 // given stackState since other threads might have a different stack sta
te. |
| 1665 heap().visitStackRoots(visitor.get()); |
| 1666 |
| 1667 // 3. Transitive closure to trace objects including ephemerons. |
| 1668 heap().processMarkingStack(visitor.get()); |
| 1669 |
| 1670 heap().postMarkingProcessing(visitor.get()); |
| 1671 heap().globalWeakProcessing(visitor.get()); |
| 1672 } |
| 1673 |
| 1674 // Now we can delete all orphaned pages because there are no dangling |
| 1675 // pointers to the orphaned pages. (If we have such dangling pointers, |
| 1676 // we should have crashed during marking before getting here.) |
| 1677 heap().getOrphanedPagePool()->decommitOrphanedPages(); |
| 1678 |
| 1679 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; |
| 1680 heap().heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (marking
TimeInMilliseconds / 1000 / totalObjectSize) : 0); |
| 1681 |
| 1682 #if PRINT_HEAP_STATS |
| 1683 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1
lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime
InMilliseconds); |
| 1684 #endif |
| 1685 |
| 1686 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram,
new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); |
| 1687 markingTimeHistogram.count(markingTimeInMilliseconds); |
| 1688 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog
ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50
)); |
| 1689 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10
24); |
| 1690 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis
togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10
24, 50)); |
| 1691 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024
); |
| 1692 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new
EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); |
| 1693 gcReasonHistogram.count(reason); |
| 1694 |
| 1695 heap().m_lastGCReason = reason; |
| 1696 |
| 1697 ThreadHeap::reportMemoryUsageHistogram(); |
| 1698 WTF::Partitions::reportMemoryUsageHistogram(); |
| 1699 |
| 1700 heap().postGC(gcType); |
| 1701 heap().decommitCallbackStacks(); |
| 1702 } |
| 1703 |
| 1704 void ThreadState::collectGarbageForTerminatingThread() |
| 1705 { |
| 1706 { |
| 1707 // A thread-specific termination GC must not allow other global GCs to g
o |
| 1708 // ahead while it is running, hence the termination GC does not enter a |
| 1709 // safepoint. VisitorScope will not enter also a safepoint scope for |
| 1710 // ThreadTerminationGC. |
| 1711 std::unique_ptr<Visitor> visitor = Visitor::create(this, BlinkGC::Thread
TerminationGC); |
| 1712 |
| 1713 ThreadState::NoAllocationScope noAllocationScope(this); |
| 1714 |
| 1715 heap().commitCallbackStacks(); |
| 1716 preGC(); |
| 1717 |
| 1718 // 1. Trace the thread local persistent roots. For thread local GCs we |
| 1719 // don't trace the stack (ie. no conservative scanning) since this is |
| 1720 // only called during thread shutdown where there should be no objects |
| 1721 // on the stack. |
| 1722 // We also assume that orphaned pages have no objects reachable from |
| 1723 // persistent handles on other threads or CrossThreadPersistents. The |
| 1724 // only cases where this could happen is if a subsequent conservative |
| 1725 // global GC finds a "pointer" on the stack or due to a programming |
| 1726 // error where an object has a dangling cross-thread pointer to an |
| 1727 // object on this heap. |
| 1728 visitPersistents(visitor.get()); |
| 1729 |
| 1730 // 2. Trace objects reachable from the thread's persistent roots |
| 1731 // including ephemerons. |
| 1732 heap().processMarkingStack(visitor.get()); |
| 1733 |
| 1734 heap().postMarkingProcessing(visitor.get()); |
| 1735 heap().globalWeakProcessing(visitor.get()); |
| 1736 |
| 1737 postGC(BlinkGC::GCWithSweep); |
| 1738 heap().decommitCallbackStacks(); |
| 1739 } |
| 1740 preSweep(); |
| 1741 } |
| 1742 |
| 1743 void ThreadState::collectAllGarbage() |
| 1744 { |
| 1745 // We need to run multiple GCs to collect a chain of persistent handles. |
| 1746 size_t previousLiveObjects = 0; |
| 1747 for (int i = 0; i < 5; ++i) { |
| 1748 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli
nkGC::ForcedGC); |
| 1749 size_t liveObjects = heap().heapStats().markedObjectSize(); |
| 1750 if (liveObjects == previousLiveObjects) |
| 1751 break; |
| 1752 previousLiveObjects = liveObjects; |
| 1753 } |
| 1754 } |
| 1755 |
1542 } // namespace blink | 1756 } // namespace blink |
OLD | NEW |