| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 21 matching lines...) Expand all Loading... |
| 32 | 32 |
| 33 #include "base/trace_event/process_memory_dump.h" | 33 #include "base/trace_event/process_memory_dump.h" |
| 34 #include "platform/Histogram.h" | 34 #include "platform/Histogram.h" |
| 35 #include "platform/RuntimeEnabledFeatures.h" | 35 #include "platform/RuntimeEnabledFeatures.h" |
| 36 #include "platform/ScriptForbiddenScope.h" | 36 #include "platform/ScriptForbiddenScope.h" |
| 37 #include "platform/TraceEvent.h" | 37 #include "platform/TraceEvent.h" |
| 38 #include "platform/heap/BlinkGCMemoryDumpProvider.h" | 38 #include "platform/heap/BlinkGCMemoryDumpProvider.h" |
| 39 #include "platform/heap/CallbackStack.h" | 39 #include "platform/heap/CallbackStack.h" |
| 40 #include "platform/heap/Handle.h" | 40 #include "platform/heap/Handle.h" |
| 41 #include "platform/heap/Heap.h" | 41 #include "platform/heap/Heap.h" |
| 42 #include "platform/heap/PagePool.h" | |
| 43 #include "platform/heap/SafePoint.h" | 42 #include "platform/heap/SafePoint.h" |
| 44 #include "platform/heap/Visitor.h" | 43 #include "platform/heap/Visitor.h" |
| 45 #include "platform/web_memory_allocator_dump.h" | 44 #include "platform/web_memory_allocator_dump.h" |
| 46 #include "platform/web_process_memory_dump.h" | 45 #include "platform/web_process_memory_dump.h" |
| 47 #include "public/platform/Platform.h" | 46 #include "public/platform/Platform.h" |
| 48 #include "public/platform/WebScheduler.h" | 47 #include "public/platform/WebScheduler.h" |
| 49 #include "public/platform/WebThread.h" | 48 #include "public/platform/WebThread.h" |
| 50 #include "public/platform/WebTraceLocation.h" | 49 #include "public/platform/WebTraceLocation.h" |
| 51 #include "wtf/CurrentTime.h" | 50 #include "wtf/CurrentTime.h" |
| 52 #include "wtf/DataLog.h" | 51 #include "wtf/DataLog.h" |
| (...skipping 19 matching lines...) Expand all Loading... |
| 72 | 71 |
| 73 namespace blink { | 72 namespace blink { |
| 74 | 73 |
| 75 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; | 74 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = nullptr; |
| 76 uintptr_t ThreadState::s_mainThreadStackStart = 0; | 75 uintptr_t ThreadState::s_mainThreadStackStart = 0; |
| 77 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; | 76 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; |
| 78 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; | 77 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; |
| 79 | 78 |
| 80 const size_t defaultAllocatedObjectSizeThreshold = 100 * 1024; | 79 const size_t defaultAllocatedObjectSizeThreshold = 100 * 1024; |
| 81 | 80 |
| 82 const char* gcReasonString(BlinkGC::GCReason reason) | |
| 83 { | |
| 84 switch (reason) { | |
| 85 case BlinkGC::IdleGC: | |
| 86 return "IdleGC"; | |
| 87 case BlinkGC::PreciseGC: | |
| 88 return "PreciseGC"; | |
| 89 case BlinkGC::ConservativeGC: | |
| 90 return "ConservativeGC"; | |
| 91 case BlinkGC::ForcedGC: | |
| 92 return "ForcedGC"; | |
| 93 case BlinkGC::MemoryPressureGC: | |
| 94 return "MemoryPressureGC"; | |
| 95 case BlinkGC::PageNavigationGC: | |
| 96 return "PageNavigationGC"; | |
| 97 default: | |
| 98 NOTREACHED(); | |
| 99 } | |
| 100 return "<Unknown>"; | |
| 101 } | |
| 102 | |
| 103 class ParkThreadsScope final { | |
| 104 STACK_ALLOCATED(); | |
| 105 public: | |
| 106 explicit ParkThreadsScope(ThreadState* state) | |
| 107 : m_state(state) | |
| 108 , m_shouldResumeThreads(false) | |
| 109 { | |
| 110 } | |
| 111 | |
| 112 bool parkThreads() | |
| 113 { | |
| 114 TRACE_EVENT0("blink_gc", "ThreadHeap::ParkThreadsScope"); | |
| 115 const char* samplingState = TRACE_EVENT_GET_SAMPLING_STATE(); | |
| 116 if (m_state->isMainThread()) | |
| 117 TRACE_EVENT_SET_SAMPLING_STATE("blink_gc", "BlinkGCWaiting"); | |
| 118 | |
| 119 // TODO(haraken): In an unlikely coincidence that two threads decide | |
| 120 // to collect garbage at the same time, avoid doing two GCs in | |
| 121 // a row and return false. | |
| 122 double startTime = WTF::currentTimeMS(); | |
| 123 | |
| 124 m_shouldResumeThreads = m_state->heap().park(); | |
| 125 | |
| 126 double timeForStoppingThreads = WTF::currentTimeMS() - startTime; | |
| 127 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, timeToStopThreadsH
istogram, new CustomCountHistogram("BlinkGC.TimeForStoppingThreads", 1, 1000, 50
)); | |
| 128 timeToStopThreadsHistogram.count(timeForStoppingThreads); | |
| 129 | |
| 130 if (m_state->isMainThread()) | |
| 131 TRACE_EVENT_SET_NONCONST_SAMPLING_STATE(samplingState); | |
| 132 return m_shouldResumeThreads; | |
| 133 } | |
| 134 | |
| 135 ~ParkThreadsScope() | |
| 136 { | |
| 137 // Only cleanup if we parked all threads in which case the GC happened | |
| 138 // and we need to resume the other threads. | |
| 139 if (m_shouldResumeThreads) | |
| 140 m_state->heap().resume(); | |
| 141 } | |
| 142 | |
| 143 private: | |
| 144 ThreadState* m_state; | |
| 145 bool m_shouldResumeThreads; | |
| 146 }; | |
| 147 | |
| 148 ThreadState::ThreadState(bool perThreadHeapEnabled) | 81 ThreadState::ThreadState(bool perThreadHeapEnabled) |
| 149 : m_thread(currentThread()) | 82 : m_thread(currentThread()) |
| 150 , m_persistentRegion(wrapUnique(new PersistentRegion())) | 83 , m_persistentRegion(wrapUnique(new PersistentRegion())) |
| 151 #if OS(WIN) && COMPILER(MSVC) | 84 #if OS(WIN) && COMPILER(MSVC) |
| 152 , m_threadStackSize(0) | 85 , m_threadStackSize(0) |
| 153 #endif | 86 #endif |
| 154 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) | 87 , m_startOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart(
))) |
| 155 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) | 88 , m_endOfStack(reinterpret_cast<intptr_t*>(StackFrameDepth::getStackStart())
) |
| 156 , m_safePointScopeMarker(nullptr) | 89 , m_safePointScopeMarker(nullptr) |
| 157 , m_atSafePoint(false) | 90 , m_atSafePoint(false) |
| (...skipping 140 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 298 prepareForThreadStateTermination(); | 231 prepareForThreadStateTermination(); |
| 299 | 232 |
| 300 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination(
this); | 233 ProcessHeap::crossThreadPersistentRegion().prepareForThreadStateTermination(
this); |
| 301 | 234 |
| 302 // Do thread local GC's as long as the count of thread local Persistents | 235 // Do thread local GC's as long as the count of thread local Persistents |
| 303 // changes and is above zero. | 236 // changes and is above zero. |
| 304 int oldCount = -1; | 237 int oldCount = -1; |
| 305 int currentCount = getPersistentRegion()->numberOfPersistents(); | 238 int currentCount = getPersistentRegion()->numberOfPersistents(); |
| 306 ASSERT(currentCount >= 0); | 239 ASSERT(currentCount >= 0); |
| 307 while (currentCount != oldCount) { | 240 while (currentCount != oldCount) { |
| 308 collectGarbageForTerminatingThread(); | 241 ThreadHeap::collectGarbageForTerminatingThread(this); |
| 309 // Release the thread-local static persistents that were | 242 // Release the thread-local static persistents that were |
| 310 // instantiated while running the termination GC. | 243 // instantiated while running the termination GC. |
| 311 releaseStaticPersistentNodes(); | 244 releaseStaticPersistentNodes(); |
| 312 oldCount = currentCount; | 245 oldCount = currentCount; |
| 313 currentCount = getPersistentRegion()->numberOfPersistents(); | 246 currentCount = getPersistentRegion()->numberOfPersistents(); |
| 314 } | 247 } |
| 315 // We should not have any persistents left when getting to this point, | 248 // We should not have any persistents left when getting to this point, |
| 316 // if we have it is probably a bug so adding a debug ASSERT to catch this. | 249 // if we have it is probably a bug so adding a debug ASSERT to catch this. |
| 317 ASSERT(!currentCount); | 250 ASSERT(!currentCount); |
| 318 // All of pre-finalizers should be consumed. | 251 // All of pre-finalizers should be consumed. |
| 319 ASSERT(m_orderedPreFinalizers.isEmpty()); | 252 ASSERT(m_orderedPreFinalizers.isEmpty()); |
| 320 RELEASE_ASSERT(gcState() == NoGCScheduled); | 253 RELEASE_ASSERT(gcState() == NoGCScheduled); |
| 321 | 254 |
| 322 // Add pages to the orphaned page pool to ensure any global GCs from this po
int | 255 // Add pages to the orphaned page pool to ensure any global GCs from this po
int |
| 323 // on will not trace objects on this thread's arenas. | 256 // on will not trace objects on this thread's arenas. |
| 324 cleanupPages(); | 257 cleanupPages(); |
| 325 } | 258 } |
| 326 | 259 |
| 327 void ThreadState::cleanupMainThread() | 260 void ThreadState::cleanupMainThread() |
| 328 { | 261 { |
| 329 ASSERT(isMainThread()); | 262 ASSERT(isMainThread()); |
| 330 | 263 |
| 331 #if defined(LEAK_SANITIZER) | 264 #if defined(LEAK_SANITIZER) |
| 332 // See comment below, clear out most garbage before releasing static | 265 // See comment below, clear out most garbage before releasing static |
| 333 // persistents should some of the finalizers depend on touching | 266 // persistents should some of the finalizers depend on touching |
| 334 // these persistents. | 267 // these persistents. |
| 335 collectAllGarbage(); | 268 ThreadHeap::collectAllGarbage(); |
| 336 #endif | 269 #endif |
| 337 | 270 |
| 338 releaseStaticPersistentNodes(); | 271 releaseStaticPersistentNodes(); |
| 339 | 272 |
| 340 #if defined(LEAK_SANITIZER) | 273 #if defined(LEAK_SANITIZER) |
| 341 // If LSan is about to perform leak detection, after having released all | 274 // If LSan is about to perform leak detection, after having released all |
| 342 // the registered static Persistent<> root references to global caches | 275 // the registered static Persistent<> root references to global caches |
| 343 // that Blink keeps, follow up with a round of GCs to clear out all | 276 // that Blink keeps, follow up with a round of GCs to clear out all |
| 344 // what they referred to. | 277 // what they referred to. |
| 345 // | 278 // |
| 346 // This is not needed for caches over non-Oilpan objects, as they're | 279 // This is not needed for caches over non-Oilpan objects, as they're |
| 347 // not scanned by LSan due to being held in non-global storage | 280 // not scanned by LSan due to being held in non-global storage |
| 348 // ("static" references inside functions/methods.) | 281 // ("static" references inside functions/methods.) |
| 349 collectAllGarbage(); | 282 ThreadHeap::collectAllGarbage(); |
| 350 #endif | 283 #endif |
| 351 | 284 |
| 352 // Finish sweeping before shutting down V8. Otherwise, some destructor | 285 // Finish sweeping before shutting down V8. Otherwise, some destructor |
| 353 // may access V8 and cause crashes. | 286 // may access V8 and cause crashes. |
| 354 completeSweep(); | 287 completeSweep(); |
| 355 | 288 |
| 356 // It is unsafe to trigger GCs after this point because some | 289 // It is unsafe to trigger GCs after this point because some |
| 357 // destructor may access already-detached V8 and cause crashes. | 290 // destructor may access already-detached V8 and cause crashes. |
| 358 // Also it is useless. So we forbid GCs. | 291 // Also it is useless. So we forbid GCs. |
| 359 enterGCForbiddenScope(); | 292 enterGCForbiddenScope(); |
| (...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 682 // TODO(haraken): It might not make sense to force completeSweep() for all | 615 // TODO(haraken): It might not make sense to force completeSweep() for all |
| 683 // page navigations. | 616 // page navigations. |
| 684 completeSweep(); | 617 completeSweep(); |
| 685 ASSERT(!isSweepingInProgress()); | 618 ASSERT(!isSweepingInProgress()); |
| 686 ASSERT(!sweepForbidden()); | 619 ASSERT(!sweepForbidden()); |
| 687 | 620 |
| 688 if (shouldForceMemoryPressureGC()) { | 621 if (shouldForceMemoryPressureGC()) { |
| 689 #if PRINT_HEAP_STATS | 622 #if PRINT_HEAP_STATS |
| 690 dataLogF("Scheduled MemoryPressureGC\n"); | 623 dataLogF("Scheduled MemoryPressureGC\n"); |
| 691 #endif | 624 #endif |
| 692 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep, Bl
inkGC::MemoryPressureGC); | 625 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWith
outSweep, BlinkGC::MemoryPressureGC); |
| 693 return; | 626 return; |
| 694 } | 627 } |
| 695 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { | 628 if (shouldSchedulePageNavigationGC(estimatedRemovalRatio)) { |
| 696 #if PRINT_HEAP_STATS | 629 #if PRINT_HEAP_STATS |
| 697 dataLogF("Scheduled PageNavigationGC\n"); | 630 dataLogF("Scheduled PageNavigationGC\n"); |
| 698 #endif | 631 #endif |
| 699 schedulePageNavigationGC(); | 632 schedulePageNavigationGC(); |
| 700 } | 633 } |
| 701 } | 634 } |
| 702 | 635 |
| (...skipping 23 matching lines...) Expand all Loading... |
| 726 ASSERT(!sweepForbidden()); | 659 ASSERT(!sweepForbidden()); |
| 727 | 660 |
| 728 reportMemoryToV8(); | 661 reportMemoryToV8(); |
| 729 | 662 |
| 730 if (shouldForceMemoryPressureGC()) { | 663 if (shouldForceMemoryPressureGC()) { |
| 731 completeSweep(); | 664 completeSweep(); |
| 732 if (shouldForceMemoryPressureGC()) { | 665 if (shouldForceMemoryPressureGC()) { |
| 733 #if PRINT_HEAP_STATS | 666 #if PRINT_HEAP_STATS |
| 734 dataLogF("Scheduled MemoryPressureGC\n"); | 667 dataLogF("Scheduled MemoryPressureGC\n"); |
| 735 #endif | 668 #endif |
| 736 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep
, BlinkGC::MemoryPressureGC); | 669 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GC
WithoutSweep, BlinkGC::MemoryPressureGC); |
| 737 return; | 670 return; |
| 738 } | 671 } |
| 739 } | 672 } |
| 740 | 673 |
| 741 if (shouldForceConservativeGC()) { | 674 if (shouldForceConservativeGC()) { |
| 742 completeSweep(); | 675 completeSweep(); |
| 743 if (shouldForceConservativeGC()) { | 676 if (shouldForceConservativeGC()) { |
| 744 #if PRINT_HEAP_STATS | 677 #if PRINT_HEAP_STATS |
| 745 dataLogF("Scheduled ConservativeGC\n"); | 678 dataLogF("Scheduled ConservativeGC\n"); |
| 746 #endif | 679 #endif |
| 747 collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GCWithoutSweep
, BlinkGC::ConservativeGC); | 680 ThreadHeap::collectGarbage(BlinkGC::HeapPointersOnStack, BlinkGC::GC
WithoutSweep, BlinkGC::ConservativeGC); |
| 748 return; | 681 return; |
| 749 } | 682 } |
| 750 } | 683 } |
| 751 if (shouldScheduleIdleGC()) { | 684 if (shouldScheduleIdleGC()) { |
| 752 #if PRINT_HEAP_STATS | 685 #if PRINT_HEAP_STATS |
| 753 dataLogF("Scheduled IdleGC\n"); | 686 dataLogF("Scheduled IdleGC\n"); |
| 754 #endif | 687 #endif |
| 755 scheduleIdleGC(); | 688 scheduleIdleGC(); |
| 756 return; | 689 return; |
| 757 } | 690 } |
| (...skipping 25 matching lines...) Expand all Loading... |
| 783 | 716 |
| 784 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime(); | 717 double idleDeltaInSeconds = deadlineSeconds - monotonicallyIncreasingTime(); |
| 785 if (idleDeltaInSeconds <= m_heap->heapStats().estimatedMarkingTime() && !Pla
tform::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired(
)) { | 718 if (idleDeltaInSeconds <= m_heap->heapStats().estimatedMarkingTime() && !Pla
tform::current()->currentThread()->scheduler()->canExceedIdleDeadlineIfRequired(
)) { |
| 786 // If marking is estimated to take longer than the deadline and we can't | 719 // If marking is estimated to take longer than the deadline and we can't |
| 787 // exceed the deadline, then reschedule for the next idle period. | 720 // exceed the deadline, then reschedule for the next idle period. |
| 788 scheduleIdleGC(); | 721 scheduleIdleGC(); |
| 789 return; | 722 return; |
| 790 } | 723 } |
| 791 | 724 |
| 792 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds",
idleDeltaInSeconds, "estimatedMarkingTime", m_heap->heapStats().estimatedMarkin
gTime()); | 725 TRACE_EVENT2("blink_gc", "ThreadState::performIdleGC", "idleDeltaInSeconds",
idleDeltaInSeconds, "estimatedMarkingTime", m_heap->heapStats().estimatedMarkin
gTime()); |
| 793 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep, Blin
kGC::IdleGC); | 726 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithou
tSweep, BlinkGC::IdleGC); |
| 794 } | 727 } |
| 795 | 728 |
| 796 void ThreadState::performIdleLazySweep(double deadlineSeconds) | 729 void ThreadState::performIdleLazySweep(double deadlineSeconds) |
| 797 { | 730 { |
| 798 ASSERT(checkThread()); | 731 ASSERT(checkThread()); |
| 799 ASSERT(isMainThread()); | 732 ASSERT(isMainThread()); |
| 800 | 733 |
| 801 // If we are not in a sweeping phase, there is nothing to do here. | 734 // If we are not in a sweeping phase, there is nothing to do here. |
| 802 if (!isSweepingInProgress()) | 735 if (!isSweepingInProgress()) |
| 803 return; | 736 return; |
| (...skipping 155 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 959 // If a safe point is entered while initiating a GC, we clearly do | 892 // If a safe point is entered while initiating a GC, we clearly do |
| 960 // not want to do another as part that -- the safe point is only | 893 // not want to do another as part that -- the safe point is only |
| 961 // entered after checking if a scheduled GC ought to run first. | 894 // entered after checking if a scheduled GC ought to run first. |
| 962 // Prevent that from happening by marking GCs as forbidden while | 895 // Prevent that from happening by marking GCs as forbidden while |
| 963 // one is initiated and later running. | 896 // one is initiated and later running. |
| 964 if (isGCForbidden()) | 897 if (isGCForbidden()) |
| 965 return; | 898 return; |
| 966 | 899 |
| 967 switch (gcState()) { | 900 switch (gcState()) { |
| 968 case FullGCScheduled: | 901 case FullGCScheduled: |
| 969 collectAllGarbage(); | 902 ThreadHeap::collectAllGarbage(); |
| 970 break; | 903 break; |
| 971 case PreciseGCScheduled: | 904 case PreciseGCScheduled: |
| 972 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithoutSweep,
BlinkGC::PreciseGC); | 905 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWi
thoutSweep, BlinkGC::PreciseGC); |
| 973 break; | 906 break; |
| 974 case PageNavigationGCScheduled: | 907 case PageNavigationGCScheduled: |
| 975 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli
nkGC::PageNavigationGC); | 908 ThreadHeap::collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWi
thSweep, BlinkGC::PageNavigationGC); |
| 976 break; | 909 break; |
| 977 case IdleGCScheduled: | 910 case IdleGCScheduled: |
| 978 // Idle time GC will be scheduled by Blink Scheduler. | 911 // Idle time GC will be scheduled by Blink Scheduler. |
| 979 break; | 912 break; |
| 980 default: | 913 default: |
| 981 break; | 914 break; |
| 982 } | 915 } |
| 983 } | 916 } |
| 984 | 917 |
| 985 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() | 918 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() |
| (...skipping 613 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1599 threadDump->AddScalar("live_count", "objects", totalLiveCount); | 1532 threadDump->AddScalar("live_count", "objects", totalLiveCount); |
| 1600 threadDump->AddScalar("dead_count", "objects", totalDeadCount); | 1533 threadDump->AddScalar("dead_count", "objects", totalDeadCount); |
| 1601 threadDump->AddScalar("live_size", "bytes", totalLiveSize); | 1534 threadDump->AddScalar("live_size", "bytes", totalLiveSize); |
| 1602 threadDump->AddScalar("dead_size", "bytes", totalDeadSize); | 1535 threadDump->AddScalar("dead_size", "bytes", totalDeadSize); |
| 1603 | 1536 |
| 1604 base::trace_event::MemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvide
r::instance()->createMemoryAllocatorDumpForCurrentGC(heapsDumpName); | 1537 base::trace_event::MemoryAllocatorDump* heapsDump = BlinkGCMemoryDumpProvide
r::instance()->createMemoryAllocatorDumpForCurrentGC(heapsDumpName); |
| 1605 base::trace_event::MemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvi
der::instance()->createMemoryAllocatorDumpForCurrentGC(classesDumpName); | 1538 base::trace_event::MemoryAllocatorDump* classesDump = BlinkGCMemoryDumpProvi
der::instance()->createMemoryAllocatorDumpForCurrentGC(classesDumpName); |
| 1606 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->AddOwners
hipEdge(classesDump->guid(), heapsDump->guid()); | 1539 BlinkGCMemoryDumpProvider::instance()->currentProcessMemoryDump()->AddOwners
hipEdge(classesDump->guid(), heapsDump->guid()); |
| 1607 } | 1540 } |
| 1608 | 1541 |
| 1609 void ThreadState::collectGarbage(BlinkGC::StackState stackState, BlinkGC::GCType
gcType, BlinkGC::GCReason reason) | |
| 1610 { | |
| 1611 DCHECK_NE(gcType, BlinkGC::ThreadTerminationGC); | |
| 1612 | |
| 1613 // Nested collectGarbage() invocations aren't supported. | |
| 1614 RELEASE_ASSERT(!isGCForbidden()); | |
| 1615 completeSweep(); | |
| 1616 | |
| 1617 std::unique_ptr<Visitor> visitor = Visitor::create(this, gcType); | |
| 1618 | |
| 1619 SafePointScope safePointScope(stackState, this); | |
| 1620 | |
| 1621 // Resume all parked threads upon leaving this scope. | |
| 1622 ParkThreadsScope parkThreadsScope(this); | |
| 1623 | |
| 1624 // Try to park the other threads. If we're unable to, bail out of the GC. | |
| 1625 if (!parkThreadsScope.parkThreads()) | |
| 1626 return; | |
| 1627 | |
| 1628 ScriptForbiddenIfMainThreadScope scriptForbidden; | |
| 1629 | |
| 1630 TRACE_EVENT2("blink_gc,devtools.timeline", "BlinkGCMarking", | |
| 1631 "lazySweeping", gcType == BlinkGC::GCWithoutSweep, | |
| 1632 "gcReason", gcReasonString(reason)); | |
| 1633 TRACE_EVENT_SCOPED_SAMPLING_STATE("blink_gc", "BlinkGC"); | |
| 1634 double startTime = WTF::currentTimeMS(); | |
| 1635 | |
| 1636 if (gcType == BlinkGC::TakeSnapshot) | |
| 1637 BlinkGCMemoryDumpProvider::instance()->clearProcessDumpForCurrentGC(); | |
| 1638 | |
| 1639 // Disallow allocation during garbage collection (but not during the | |
| 1640 // finalization that happens when the visitorScope is torn down). | |
| 1641 ThreadState::NoAllocationScope noAllocationScope(this); | |
| 1642 | |
| 1643 heap().commitCallbackStacks(); | |
| 1644 heap().preGC(); | |
| 1645 | |
| 1646 StackFrameDepthScope stackDepthScope(&heap().stackFrameDepth()); | |
| 1647 | |
| 1648 size_t totalObjectSize = heap().heapStats().allocatedObjectSize() + heap().h
eapStats().markedObjectSize(); | |
| 1649 if (gcType != BlinkGC::TakeSnapshot) | |
| 1650 heap().resetHeapCounters(); | |
| 1651 | |
| 1652 { | |
| 1653 // Access to the CrossThreadPersistentRegion has to be prevented while | |
| 1654 // marking and global weak processing is in progress. If not, threads | |
| 1655 // not attached to Oilpan and participating in this GC are able | |
| 1656 // to allocate & free PersistentNodes, something the marking phase isn't | |
| 1657 // capable of handling. | |
| 1658 CrossThreadPersistentRegion::LockScope persistentLock(ProcessHeap::cross
ThreadPersistentRegion()); | |
| 1659 | |
| 1660 // 1. Trace persistent roots. | |
| 1661 heap().visitPersistentRoots(visitor.get()); | |
| 1662 | |
| 1663 // 2. Trace objects reachable from the stack. We do this independent of
the | |
| 1664 // given stackState since other threads might have a different stack sta
te. | |
| 1665 heap().visitStackRoots(visitor.get()); | |
| 1666 | |
| 1667 // 3. Transitive closure to trace objects including ephemerons. | |
| 1668 heap().processMarkingStack(visitor.get()); | |
| 1669 | |
| 1670 heap().postMarkingProcessing(visitor.get()); | |
| 1671 heap().globalWeakProcessing(visitor.get()); | |
| 1672 } | |
| 1673 | |
| 1674 // Now we can delete all orphaned pages because there are no dangling | |
| 1675 // pointers to the orphaned pages. (If we have such dangling pointers, | |
| 1676 // we should have crashed during marking before getting here.) | |
| 1677 heap().getOrphanedPagePool()->decommitOrphanedPages(); | |
| 1678 | |
| 1679 double markingTimeInMilliseconds = WTF::currentTimeMS() - startTime; | |
| 1680 heap().heapStats().setEstimatedMarkingTimePerByte(totalObjectSize ? (marking
TimeInMilliseconds / 1000 / totalObjectSize) : 0); | |
| 1681 | |
| 1682 #if PRINT_HEAP_STATS | |
| 1683 dataLogF("ThreadHeap::collectGarbage (gcReason=%s, lazySweeping=%d, time=%.1
lfms)\n", gcReasonString(reason), gcType == BlinkGC::GCWithoutSweep, markingTime
InMilliseconds); | |
| 1684 #endif | |
| 1685 | |
| 1686 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, markingTimeHistogram,
new CustomCountHistogram("BlinkGC.CollectGarbage", 0, 10 * 1000, 50)); | |
| 1687 markingTimeHistogram.count(markingTimeInMilliseconds); | |
| 1688 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalObjectSpaceHistog
ram, new CustomCountHistogram("BlinkGC.TotalObjectSpace", 0, 4 * 1024 * 1024, 50
)); | |
| 1689 totalObjectSpaceHistogram.count(ProcessHeap::totalAllocatedObjectSize() / 10
24); | |
| 1690 DEFINE_THREAD_SAFE_STATIC_LOCAL(CustomCountHistogram, totalAllocatedSpaceHis
togram, new CustomCountHistogram("BlinkGC.TotalAllocatedSpace", 0, 4 * 1024 * 10
24, 50)); | |
| 1691 totalAllocatedSpaceHistogram.count(ProcessHeap::totalAllocatedSpace() / 1024
); | |
| 1692 DEFINE_THREAD_SAFE_STATIC_LOCAL(EnumerationHistogram, gcReasonHistogram, new
EnumerationHistogram("BlinkGC.GCReason", BlinkGC::NumberOfGCReason)); | |
| 1693 gcReasonHistogram.count(reason); | |
| 1694 | |
| 1695 heap().m_lastGCReason = reason; | |
| 1696 | |
| 1697 ThreadHeap::reportMemoryUsageHistogram(); | |
| 1698 WTF::Partitions::reportMemoryUsageHistogram(); | |
| 1699 | |
| 1700 heap().postGC(gcType); | |
| 1701 heap().decommitCallbackStacks(); | |
| 1702 } | |
| 1703 | |
| 1704 void ThreadState::collectGarbageForTerminatingThread() | |
| 1705 { | |
| 1706 { | |
| 1707 // A thread-specific termination GC must not allow other global GCs to g
o | |
| 1708 // ahead while it is running, hence the termination GC does not enter a | |
| 1709 // safepoint. VisitorScope will not enter also a safepoint scope for | |
| 1710 // ThreadTerminationGC. | |
| 1711 std::unique_ptr<Visitor> visitor = Visitor::create(this, BlinkGC::Thread
TerminationGC); | |
| 1712 | |
| 1713 ThreadState::NoAllocationScope noAllocationScope(this); | |
| 1714 | |
| 1715 heap().commitCallbackStacks(); | |
| 1716 preGC(); | |
| 1717 | |
| 1718 // 1. Trace the thread local persistent roots. For thread local GCs we | |
| 1719 // don't trace the stack (ie. no conservative scanning) since this is | |
| 1720 // only called during thread shutdown where there should be no objects | |
| 1721 // on the stack. | |
| 1722 // We also assume that orphaned pages have no objects reachable from | |
| 1723 // persistent handles on other threads or CrossThreadPersistents. The | |
| 1724 // only cases where this could happen is if a subsequent conservative | |
| 1725 // global GC finds a "pointer" on the stack or due to a programming | |
| 1726 // error where an object has a dangling cross-thread pointer to an | |
| 1727 // object on this heap. | |
| 1728 visitPersistents(visitor.get()); | |
| 1729 | |
| 1730 // 2. Trace objects reachable from the thread's persistent roots | |
| 1731 // including ephemerons. | |
| 1732 heap().processMarkingStack(visitor.get()); | |
| 1733 | |
| 1734 heap().postMarkingProcessing(visitor.get()); | |
| 1735 heap().globalWeakProcessing(visitor.get()); | |
| 1736 | |
| 1737 postGC(BlinkGC::GCWithSweep); | |
| 1738 heap().decommitCallbackStacks(); | |
| 1739 } | |
| 1740 preSweep(); | |
| 1741 } | |
| 1742 | |
| 1743 void ThreadState::collectAllGarbage() | |
| 1744 { | |
| 1745 // We need to run multiple GCs to collect a chain of persistent handles. | |
| 1746 size_t previousLiveObjects = 0; | |
| 1747 for (int i = 0; i < 5; ++i) { | |
| 1748 collectGarbage(BlinkGC::NoHeapPointersOnStack, BlinkGC::GCWithSweep, Bli
nkGC::ForcedGC); | |
| 1749 size_t liveObjects = heap().heapStats().markedObjectSize(); | |
| 1750 if (liveObjects == previousLiveObjects) | |
| 1751 break; | |
| 1752 previousLiveObjects = liveObjects; | |
| 1753 } | |
| 1754 } | |
| 1755 | |
| 1756 } // namespace blink | 1542 } // namespace blink |
| OLD | NEW |