| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2013 Google Inc. All rights reserved. | 2 * Copyright (C) 2013 Google Inc. All rights reserved. |
| 3 * | 3 * |
| 4 * Redistribution and use in source and binary forms, with or without | 4 * Redistribution and use in source and binary forms, with or without |
| 5 * modification, are permitted provided that the following conditions are | 5 * modification, are permitted provided that the following conditions are |
| 6 * met: | 6 * met: |
| 7 * | 7 * |
| 8 * * Redistributions of source code must retain the above copyright | 8 * * Redistributions of source code must retain the above copyright |
| 9 * notice, this list of conditions and the following disclaimer. | 9 * notice, this list of conditions and the following disclaimer. |
| 10 * * Redistributions in binary form must reproduce the above | 10 * * Redistributions in binary form must reproduce the above |
| (...skipping 308 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 319 }; | 319 }; |
| 320 | 320 |
| 321 ThreadState::ThreadState() | 321 ThreadState::ThreadState() |
| 322 : m_thread(currentThread()) | 322 : m_thread(currentThread()) |
| 323 , m_persistents(adoptPtr(new PersistentAnchor())) | 323 , m_persistents(adoptPtr(new PersistentAnchor())) |
| 324 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 324 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 325 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) | 325 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) |
| 326 , m_safePointScopeMarker(0) | 326 , m_safePointScopeMarker(0) |
| 327 , m_atSafePoint(false) | 327 , m_atSafePoint(false) |
| 328 , m_interruptors() | 328 , m_interruptors() |
| 329 , m_gcRequested(false) | |
| 330 , m_didV8GCAfterLastGC(false) | 329 , m_didV8GCAfterLastGC(false) |
| 331 , m_forcePreciseGCForTesting(false) | 330 , m_forcePreciseGCForTesting(false) |
| 332 , m_sweepRequested(0) | |
| 333 , m_sweepInProgress(false) | 331 , m_sweepInProgress(false) |
| 334 , m_noAllocationCount(0) | 332 , m_noAllocationCount(0) |
| 335 , m_inGC(false) | |
| 336 , m_isTerminating(false) | 333 , m_isTerminating(false) |
| 337 , m_shouldFlushHeapDoesNotContainCache(false) | 334 , m_shouldFlushHeapDoesNotContainCache(false) |
| 338 , m_collectionRate(0) | 335 , m_collectionRate(0) |
| 336 , m_gcState(NoGCScheduled) |
| 339 , m_traceDOMWrappers(0) | 337 , m_traceDOMWrappers(0) |
| 340 #if defined(ADDRESS_SANITIZER) | 338 #if defined(ADDRESS_SANITIZER) |
| 341 , m_asanFakeStack(__asan_get_current_fake_stack()) | 339 , m_asanFakeStack(__asan_get_current_fake_stack()) |
| 342 #endif | 340 #endif |
| 343 { | 341 { |
| 344 checkThread(); | 342 checkThread(); |
| 345 ASSERT(!**s_threadSpecific); | 343 ASSERT(!**s_threadSpecific); |
| 346 **s_threadSpecific = this; | 344 **s_threadSpecific = this; |
| 347 | 345 |
| 348 if (isMainThread()) { | 346 if (isMainThread()) { |
| 349 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s
izeof(void*); | 347 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s
izeof(void*); |
| 350 s_mainThreadUnderestimatedStackSize = getUnderestimatedStackSize() - siz
eof(void*); | 348 s_mainThreadUnderestimatedStackSize = getUnderestimatedStackSize() - siz
eof(void*); |
| 351 } | 349 } |
| 352 | 350 |
| 353 InitializeHeaps<NumberOfHeaps>::init(m_heaps, this); | 351 InitializeHeaps<NumberOfHeaps>::init(m_heaps, this); |
| 354 | 352 |
| 355 m_weakCallbackStack = new CallbackStack(); | 353 m_weakCallbackStack = new CallbackStack(); |
| 356 } | 354 } |
| 357 | 355 |
| 358 ThreadState::~ThreadState() | 356 ThreadState::~ThreadState() |
| 359 { | 357 { |
| 360 checkThread(); | 358 checkThread(); |
| 359 ASSERT(gcState() == NoGCScheduled); |
| 361 delete m_weakCallbackStack; | 360 delete m_weakCallbackStack; |
| 362 m_weakCallbackStack = 0; | 361 m_weakCallbackStack = 0; |
| 363 for (int i = 0; i < NumberOfHeaps; i++) | 362 for (int i = 0; i < NumberOfHeaps; i++) |
| 364 delete m_heaps[i]; | 363 delete m_heaps[i]; |
| 365 deleteAllValues(m_interruptors); | 364 deleteAllValues(m_interruptors); |
| 366 **s_threadSpecific = 0; | 365 **s_threadSpecific = 0; |
| 367 if (isMainThread()) { | 366 if (isMainThread()) { |
| 368 s_mainThreadStackStart = 0; | 367 s_mainThreadStackStart = 0; |
| 369 s_mainThreadUnderestimatedStackSize = 0; | 368 s_mainThreadUnderestimatedStackSize = 0; |
| 370 } | 369 } |
| (...skipping 386 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 757 // increase in size, but not for less than 4MB. | 756 // increase in size, but not for less than 4MB. |
| 758 return newSize >= 4 * 1024 * 1024 && newSize > 2 * Heap::markedObjectSiz
e(); | 757 return newSize >= 4 * 1024 * 1024 && newSize > 2 * Heap::markedObjectSiz
e(); |
| 759 } | 758 } |
| 760 // Otherwise, trigger a conservative GC on a 300% increase in size, but not | 759 // Otherwise, trigger a conservative GC on a 300% increase in size, but not |
| 761 // for less than 32MB. We set the higher limit in this case because Oilpan | 760 // for less than 32MB. We set the higher limit in this case because Oilpan |
| 762 // GC is unlikely to collect a lot of objects without having a V8 GC. | 761 // GC is unlikely to collect a lot of objects without having a V8 GC. |
| 763 // FIXME: Is 32MB reasonable? | 762 // FIXME: Is 32MB reasonable? |
| 764 return newSize >= 32 * 1024 * 1024 && newSize > 4 * Heap::markedObjectSize()
; | 763 return newSize >= 32 * 1024 * 1024 && newSize > 4 * Heap::markedObjectSize()
; |
| 765 } | 764 } |
| 766 | 765 |
| 767 bool ThreadState::sweepRequested() | 766 void ThreadState::setGCState(GCState gcState) |
| 768 { | 767 { |
| 769 ASSERT(Heap::isInGC() || checkThread()); | 768 switch (gcState) { |
| 770 return m_sweepRequested; | 769 case NoGCScheduled: |
| 770 checkThread(); |
| 771 RELEASE_ASSERT(m_gcState == Sweeping); |
| 772 break; |
| 773 case GCScheduled: |
| 774 checkThread(); |
| 775 RELEASE_ASSERT(m_gcState == NoGCScheduled || m_gcState == GCScheduled ||
m_gcState == StoppingOtherThreads); |
| 776 break; |
| 777 case StoppingOtherThreads: |
| 778 checkThread(); |
| 779 break; |
| 780 case GCRunning: |
| 781 break; |
| 782 case SweepScheduled: |
| 783 RELEASE_ASSERT(m_gcState == GCRunning); |
| 784 break; |
| 785 case Sweeping: |
| 786 checkThread(); |
| 787 RELEASE_ASSERT(m_gcState == SweepScheduled); |
| 788 break; |
| 789 default: |
| 790 ASSERT_NOT_REACHED(); |
| 791 } |
| 792 m_gcState = gcState; |
| 771 } | 793 } |
| 772 | 794 |
| 773 void ThreadState::setSweepRequested() | 795 ThreadState::GCState ThreadState::gcState() const |
| 774 { | 796 { |
| 775 // Sweep requested is set from the thread that initiates garbage | 797 return m_gcState; |
| 776 // collection which could be different from the thread for this | |
| 777 // thread state. Therefore the setting of m_sweepRequested needs a | |
| 778 // barrier. | |
| 779 atomicTestAndSetToOne(&m_sweepRequested); | |
| 780 } | |
| 781 | |
| 782 void ThreadState::clearSweepRequested() | |
| 783 { | |
| 784 checkThread(); | |
| 785 m_sweepRequested = 0; | |
| 786 } | |
| 787 | |
| 788 bool ThreadState::gcRequested() | |
| 789 { | |
| 790 checkThread(); | |
| 791 return m_gcRequested; | |
| 792 } | |
| 793 | |
| 794 void ThreadState::setGCRequested() | |
| 795 { | |
| 796 checkThread(); | |
| 797 m_gcRequested = true; | |
| 798 } | |
| 799 | |
| 800 void ThreadState::clearGCRequested() | |
| 801 { | |
| 802 checkThread(); | |
| 803 m_gcRequested = false; | |
| 804 } | 798 } |
| 805 | 799 |
| 806 void ThreadState::didV8GC() | 800 void ThreadState::didV8GC() |
| 807 { | 801 { |
| 808 checkThread(); | 802 checkThread(); |
| 809 m_didV8GCAfterLastGC = true; | 803 m_didV8GCAfterLastGC = true; |
| 810 } | 804 } |
| 811 | 805 |
| 812 void ThreadState::performPendingGC(StackState stackState) | 806 void ThreadState::performPendingGC(StackState stackState) |
| 813 { | 807 { |
| 814 checkThread(); | 808 checkThread(); |
| 815 if (stackState == NoHeapPointersOnStack) { | 809 if (stackState == NoHeapPointersOnStack) { |
| 816 if (forcePreciseGCForTesting()) { | 810 if (forcePreciseGCForTesting()) { |
| 817 setForcePreciseGCForTesting(false); | 811 setForcePreciseGCForTesting(false); |
| 818 Heap::collectAllGarbage(); | 812 Heap::collectAllGarbage(); |
| 819 } else if (gcRequested()) { | 813 } else if (gcState() == GCScheduled) { |
| 820 Heap::collectGarbage(NoHeapPointersOnStack); | 814 Heap::collectGarbage(NoHeapPointersOnStack); |
| 821 } | 815 } |
| 822 } | 816 } |
| 823 } | 817 } |
| 824 | 818 |
| 825 void ThreadState::setForcePreciseGCForTesting(bool value) | 819 void ThreadState::setForcePreciseGCForTesting(bool value) |
| 826 { | 820 { |
| 827 checkThread(); | 821 checkThread(); |
| 828 m_forcePreciseGCForTesting = value; | 822 m_forcePreciseGCForTesting = value; |
| 829 } | 823 } |
| (...skipping 19 matching lines...) Expand all Loading... |
| 849 } | 843 } |
| 850 | 844 |
| 851 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() | 845 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() |
| 852 { | 846 { |
| 853 if (m_shouldFlushHeapDoesNotContainCache) { | 847 if (m_shouldFlushHeapDoesNotContainCache) { |
| 854 Heap::flushHeapDoesNotContainCache(); | 848 Heap::flushHeapDoesNotContainCache(); |
| 855 m_shouldFlushHeapDoesNotContainCache = false; | 849 m_shouldFlushHeapDoesNotContainCache = false; |
| 856 } | 850 } |
| 857 } | 851 } |
| 858 | 852 |
| 859 void ThreadState::prepareForGC() | 853 void ThreadState::preGC() |
| 860 { | 854 { |
| 861 for (int i = 0; i < NumberOfHeaps; i++) { | 855 for (int i = 0; i < NumberOfHeaps; i++) { |
| 862 BaseHeap* heap = m_heaps[i]; | 856 BaseHeap* heap = m_heaps[i]; |
| 863 heap->makeConsistentForSweeping(); | 857 heap->makeConsistentForSweeping(); |
| 864 // If a new GC is requested before this thread got around to sweep, ie.
due to the | 858 // If a new GC is requested before this thread got around to sweep, ie.
due to the |
| 865 // thread doing a long running operation, we clear the mark bits and mar
k any of | 859 // thread doing a long running operation, we clear the mark bits and mar
k any of |
| 866 // the dead objects as dead. The latter is used to ensure the next GC ma
rking does | 860 // the dead objects as dead. The latter is used to ensure the next GC ma
rking does |
| 867 // not trace already dead objects. If we trace a dead object we could en
d up tracing | 861 // not trace already dead objects. If we trace a dead object we could en
d up tracing |
| 868 // into garbage or the middle of another object via the newly conservati
vely found | 862 // into garbage or the middle of another object via the newly conservati
vely found |
| 869 // object. | 863 // object. |
| 870 if (sweepRequested()) | 864 if (gcState() == ThreadState::SweepScheduled) |
| 871 heap->markUnmarkedObjectsDead(); | 865 heap->markUnmarkedObjectsDead(); |
| 872 } | 866 } |
| 873 prepareRegionTree(); | 867 prepareRegionTree(); |
| 874 setSweepRequested(); | 868 setGCState(ThreadState::GCRunning); |
| 875 flushHeapDoesNotContainCacheIfNeeded(); | 869 flushHeapDoesNotContainCacheIfNeeded(); |
| 876 } | 870 } |
| 877 | 871 |
| 872 void ThreadState::postGC() |
| 873 { |
| 874 setGCState(ThreadState::SweepScheduled); |
| 875 } |
| 876 |
| 878 void ThreadState::setupHeapsForTermination() | 877 void ThreadState::setupHeapsForTermination() |
| 879 { | 878 { |
| 880 checkThread(); | 879 checkThread(); |
| 881 for (int i = 0; i < NumberOfHeaps; i++) | 880 for (int i = 0; i < NumberOfHeaps; i++) |
| 882 m_heaps[i]->prepareHeapForTermination(); | 881 m_heaps[i]->prepareHeapForTermination(); |
| 883 } | 882 } |
| 884 | 883 |
| 885 BaseHeapPage* ThreadState::pageFromAddress(Address address) | 884 BaseHeapPage* ThreadState::pageFromAddress(Address address) |
| 886 { | 885 { |
| 887 for (int i = 0; i < NumberOfHeaps; i++) { | 886 for (int i = 0; i < NumberOfHeaps; i++) { |
| (...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 996 ASSERT(!m_safePointStackCopy.size()); | 995 ASSERT(!m_safePointStackCopy.size()); |
| 997 m_safePointStackCopy.resize(slotCount); | 996 m_safePointStackCopy.resize(slotCount); |
| 998 for (size_t i = 0; i < slotCount; ++i) { | 997 for (size_t i = 0; i < slotCount; ++i) { |
| 999 m_safePointStackCopy[i] = from[i]; | 998 m_safePointStackCopy[i] = from[i]; |
| 1000 } | 999 } |
| 1001 } | 1000 } |
| 1002 | 1001 |
| 1003 void ThreadState::performPendingSweep() | 1002 void ThreadState::performPendingSweep() |
| 1004 { | 1003 { |
| 1005 checkThread(); | 1004 checkThread(); |
| 1006 if (!sweepRequested()) | 1005 if (gcState() != SweepScheduled) |
| 1007 return; | 1006 return; |
| 1007 setGCState(Sweeping); |
| 1008 | 1008 |
| 1009 #if ENABLE(GC_PROFILE_HEAP) | 1009 #if ENABLE(GC_PROFILE_HEAP) |
| 1010 // We snapshot the heap prior to sweeping to get numbers for both resources | 1010 // We snapshot the heap prior to sweeping to get numbers for both resources |
| 1011 // that have been allocated since the last GC and for resources that are | 1011 // that have been allocated since the last GC and for resources that are |
| 1012 // going to be freed. | 1012 // going to be freed. |
| 1013 bool gcTracingEnabled; | 1013 bool gcTracingEnabled; |
| 1014 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); | 1014 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); |
| 1015 if (gcTracingEnabled) | 1015 if (gcTracingEnabled) |
| 1016 snapshot(); | 1016 snapshot(); |
| 1017 #endif | 1017 #endif |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1053 TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps"); | 1053 TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps"); |
| 1054 for (int i = 0; i < NumberOfFinalizedHeaps; i++) { | 1054 for (int i = 0; i < NumberOfFinalizedHeaps; i++) { |
| 1055 m_heaps[FirstFinalizedHeap + i]->sweep(); | 1055 m_heaps[FirstFinalizedHeap + i]->sweep(); |
| 1056 } | 1056 } |
| 1057 } | 1057 } |
| 1058 | 1058 |
| 1059 for (int i = 0; i < NumberOfHeaps; i++) | 1059 for (int i = 0; i < NumberOfHeaps; i++) |
| 1060 m_heaps[i]->postSweepProcessing(); | 1060 m_heaps[i]->postSweepProcessing(); |
| 1061 } | 1061 } |
| 1062 | 1062 |
| 1063 clearGCRequested(); | |
| 1064 m_didV8GCAfterLastGC = false; | 1063 m_didV8GCAfterLastGC = false; |
| 1065 clearSweepRequested(); | 1064 setGCState(ThreadState::NoGCScheduled); |
| 1066 | 1065 |
| 1067 // If we collected less than 50% of objects, record that the collection rate | 1066 // If we collected less than 50% of objects, record that the collection rate |
| 1068 // is low which we use to determine when to perform the next GC. | 1067 // is low which we use to determine when to perform the next GC. |
| 1069 // FIXME: We should make m_collectionRate available in non-main threads. | 1068 // FIXME: We should make m_collectionRate available in non-main threads. |
| 1070 // FIXME: Heap::markedObjectSize() may not be accurate because other threads | 1069 // FIXME: Heap::markedObjectSize() may not be accurate because other threads |
| 1071 // may not have finished sweeping. | 1070 // may not have finished sweeping. |
| 1072 if (isMainThread()) { | 1071 if (isMainThread()) { |
| 1073 m_collectionRate = 1.0 * Heap::markedObjectSize() / allocatedObjectSizeB
eforeSweeping; | 1072 m_collectionRate = 1.0 * Heap::markedObjectSize() / allocatedObjectSizeB
eforeSweeping; |
| 1074 } | 1073 } |
| 1075 | 1074 |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1137 if (entry.value(entry.key, visitor)) | 1136 if (entry.value(entry.key, visitor)) |
| 1138 deadObjects.append(entry.key); | 1137 deadObjects.append(entry.key); |
| 1139 } | 1138 } |
| 1140 // FIXME: removeAll is inefficient. It can shrink repeatedly. | 1139 // FIXME: removeAll is inefficient. It can shrink repeatedly. |
| 1141 m_preFinalizers.removeAll(deadObjects); | 1140 m_preFinalizers.removeAll(deadObjects); |
| 1142 } | 1141 } |
| 1143 | 1142 |
| 1144 #if ENABLE(GC_PROFILE_MARKING) | 1143 #if ENABLE(GC_PROFILE_MARKING) |
| 1145 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) | 1144 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) |
| 1146 { | 1145 { |
| 1147 bool needLockForIteration = !isInGC(); | 1146 bool needLockForIteration = !Heap::isInGC(); |
| 1148 if (needLockForIteration) | 1147 if (needLockForIteration) |
| 1149 threadAttachMutex().lock(); | 1148 threadAttachMutex().lock(); |
| 1150 | 1149 |
| 1151 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); | 1150 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); |
| 1152 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { | 1151 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end
= threads.end(); it != end; ++it) { |
| 1153 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { | 1152 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { |
| 1154 if (needLockForIteration) | 1153 if (needLockForIteration) |
| 1155 threadAttachMutex().unlock(); | 1154 threadAttachMutex().unlock(); |
| 1156 return gcInfo; | 1155 return gcInfo; |
| 1157 } | 1156 } |
| 1158 } | 1157 } |
| 1159 if (needLockForIteration) | 1158 if (needLockForIteration) |
| 1160 threadAttachMutex().unlock(); | 1159 threadAttachMutex().unlock(); |
| 1161 return 0; | 1160 return 0; |
| 1162 } | 1161 } |
| 1163 #endif | 1162 #endif |
| 1164 | 1163 |
| 1165 } // namespace blink | 1164 } // namespace blink |
| OLD | NEW |