Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(3)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 747363005: Oilpan: Introduce a state transition model for Oilpan GC states (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Created 6 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after
132 #else 132 #else
133 return 0; 133 return 0;
134 #endif 134 #endif
135 } 135 }
136 136
137 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0; 137 WTF::ThreadSpecific<ThreadState*>* ThreadState::s_threadSpecific = 0;
138 uintptr_t ThreadState::s_mainThreadStackStart = 0; 138 uintptr_t ThreadState::s_mainThreadStackStart = 0;
139 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0; 139 uintptr_t ThreadState::s_mainThreadUnderestimatedStackSize = 0;
140 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 140 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
141 SafePointBarrier* ThreadState::s_safePointBarrier = 0; 141 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
142 bool ThreadState::s_inGC = false;
143 142
144 static Mutex& threadAttachMutex() 143 static Mutex& threadAttachMutex()
145 { 144 {
146 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); 145 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
147 return mutex; 146 return mutex;
148 } 147 }
149 148
150 static double lockingTimeout() 149 static double lockingTimeout()
151 { 150 {
152 // Wait time for parking all threads is at most 100 MS. 151 // Wait time for parking all threads is at most 100 MS.
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
320 }; 319 };
321 320
322 ThreadState::ThreadState() 321 ThreadState::ThreadState()
323 : m_thread(currentThread()) 322 : m_thread(currentThread())
324 , m_persistents(adoptPtr(new PersistentAnchor())) 323 , m_persistents(adoptPtr(new PersistentAnchor()))
325 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart())) 324 , m_startOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
326 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart())) 325 , m_endOfStack(reinterpret_cast<intptr_t*>(getStackStart()))
327 , m_safePointScopeMarker(0) 326 , m_safePointScopeMarker(0)
328 , m_atSafePoint(false) 327 , m_atSafePoint(false)
329 , m_interruptors() 328 , m_interruptors()
330 , m_gcRequested(false)
331 , m_didV8GCAfterLastGC(false) 329 , m_didV8GCAfterLastGC(false)
332 , m_forcePreciseGCForTesting(false) 330 , m_forcePreciseGCForTesting(false)
333 , m_sweepRequested(0)
334 , m_sweepInProgress(false) 331 , m_sweepInProgress(false)
335 , m_noAllocationCount(0) 332 , m_noAllocationCount(0)
336 , m_inGC(false)
337 , m_isTerminating(false) 333 , m_isTerminating(false)
338 , m_shouldFlushHeapDoesNotContainCache(false) 334 , m_shouldFlushHeapDoesNotContainCache(false)
339 , m_lowCollectionRate(false) 335 , m_lowCollectionRate(false)
336 , m_gcState(NoGCScheduled)
340 , m_traceDOMWrappers(0) 337 , m_traceDOMWrappers(0)
341 #if defined(ADDRESS_SANITIZER) 338 #if defined(ADDRESS_SANITIZER)
342 , m_asanFakeStack(__asan_get_current_fake_stack()) 339 , m_asanFakeStack(__asan_get_current_fake_stack())
343 #endif 340 #endif
344 { 341 {
345 ASSERT(!**s_threadSpecific); 342 ASSERT(!**s_threadSpecific);
346 **s_threadSpecific = this; 343 **s_threadSpecific = this;
347 344
348 if (isMainThread()) { 345 if (isMainThread()) {
349 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*); 346 s_mainThreadStackStart = reinterpret_cast<uintptr_t>(m_startOfStack) - s izeof(void*);
350 s_mainThreadUnderestimatedStackSize = getUnderestimatedStackSize() - siz eof(void*); 347 s_mainThreadUnderestimatedStackSize = getUnderestimatedStackSize() - siz eof(void*);
351 } 348 }
352 349
353 InitializeHeaps<NumberOfHeaps>::init(m_heaps, this); 350 InitializeHeaps<NumberOfHeaps>::init(m_heaps, this);
354 351
355 m_weakCallbackStack = new CallbackStack(); 352 m_weakCallbackStack = new CallbackStack();
356 } 353 }
357 354
358 ThreadState::~ThreadState() 355 ThreadState::~ThreadState()
359 { 356 {
360 checkThread(); 357 checkThread();
358 ASSERT(gcState() == NoGCScheduled);
361 delete m_weakCallbackStack; 359 delete m_weakCallbackStack;
362 m_weakCallbackStack = 0; 360 m_weakCallbackStack = 0;
363 for (int i = 0; i < NumberOfHeaps; i++) 361 for (int i = 0; i < NumberOfHeaps; i++)
364 delete m_heaps[i]; 362 delete m_heaps[i];
365 deleteAllValues(m_interruptors); 363 deleteAllValues(m_interruptors);
366 **s_threadSpecific = 0; 364 **s_threadSpecific = 0;
367 if (isMainThread()) { 365 if (isMainThread()) {
368 s_mainThreadStackStart = 0; 366 s_mainThreadStackStart = 0;
369 s_mainThreadUnderestimatedStackSize = 0; 367 s_mainThreadUnderestimatedStackSize = 0;
370 } 368 }
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after
758 return newSize >= 4 * 1024 * 1024 && newSize > 2 * Heap::markedObjectSiz e(); 756 return newSize >= 4 * 1024 * 1024 && newSize > 2 * Heap::markedObjectSiz e();
759 } 757 }
760 // Otherwise, trigger a conservative GC on a 300% increase in size, but not 758 // Otherwise, trigger a conservative GC on a 300% increase in size, but not
761 // for less than 32MB. We set the higher limits in this case because Oilpan 759 // for less than 32MB. We set the higher limits in this case because Oilpan
762 // GC might waste time to trace a lot of unused DOM wrappers or to find 760 // GC might waste time to trace a lot of unused DOM wrappers or to find
763 // small amount of garbage. 761 // small amount of garbage.
764 // FIXME: Is 32MB reasonable? 762 // FIXME: Is 32MB reasonable?
765 return newSize >= 32 * 1024 * 1024 && newSize > 4 * Heap::markedObjectSize() ; 763 return newSize >= 32 * 1024 * 1024 && newSize > 4 * Heap::markedObjectSize() ;
766 } 764 }
767 765
768 bool ThreadState::sweepRequested() 766 void ThreadState::setGCState(GCState gcState)
769 { 767 {
770 ASSERT(isAnyThreadInGC() || checkThread()); 768 switch (gcState) {
771 return m_sweepRequested; 769 case NoGCScheduled:
770 checkThread();
771 RELEASE_ASSERT(m_gcState == Sweeping);
772 break;
773 case GCScheduled:
774 checkThread();
775 RELEASE_ASSERT(m_gcState == NoGCScheduled || m_gcState == GCScheduled || m_gcState == StoppingOtherThreads);
776 break;
777 case StoppingOtherThreads:
778 checkThread();
779 break;
780 case GCRunning:
781 break;
782 case SweepScheduled:
783 RELEASE_ASSERT(m_gcState == GCRunning);
784 break;
785 case Sweeping:
786 checkThread();
787 RELEASE_ASSERT(m_gcState == SweepScheduled);
788 break;
789 default:
790 ASSERT_NOT_REACHED();
791 }
792 m_gcState = gcState;
772 } 793 }
773 794
774 void ThreadState::setSweepRequested() 795 ThreadState::GCState ThreadState::gcState() const
775 { 796 {
776 // Sweep requested is set from the thread that initiates garbage 797 return m_gcState;
777 // collection which could be different from the thread for this
778 // thread state. Therefore the setting of m_sweepRequested needs a
779 // barrier.
780 atomicTestAndSetToOne(&m_sweepRequested);
781 }
782
783 void ThreadState::clearSweepRequested()
784 {
785 checkThread();
786 m_sweepRequested = 0;
787 }
788
789 bool ThreadState::gcRequested()
790 {
791 checkThread();
792 return m_gcRequested;
793 }
794
795 void ThreadState::setGCRequested()
796 {
797 checkThread();
798 m_gcRequested = true;
799 }
800
801 void ThreadState::clearGCRequested()
802 {
803 checkThread();
804 m_gcRequested = false;
805 } 798 }
806 799
807 void ThreadState::didV8GC() 800 void ThreadState::didV8GC()
808 { 801 {
809 checkThread(); 802 checkThread();
810 m_didV8GCAfterLastGC = true; 803 m_didV8GCAfterLastGC = true;
811 } 804 }
812 805
813 void ThreadState::performPendingGC(StackState stackState) 806 void ThreadState::performPendingGC(StackState stackState)
814 { 807 {
815 if (stackState == NoHeapPointersOnStack) { 808 if (stackState == NoHeapPointersOnStack) {
816 if (forcePreciseGCForTesting()) { 809 if (forcePreciseGCForTesting()) {
817 setForcePreciseGCForTesting(false); 810 setForcePreciseGCForTesting(false);
818 Heap::collectAllGarbage(); 811 Heap::collectAllGarbage();
819 } else if (gcRequested()) { 812 } else if (gcState() == GCScheduled) {
820 Heap::collectGarbage(NoHeapPointersOnStack); 813 Heap::collectGarbage(NoHeapPointersOnStack);
821 } 814 }
822 } 815 }
823 } 816 }
824 817
825 void ThreadState::setForcePreciseGCForTesting(bool value) 818 void ThreadState::setForcePreciseGCForTesting(bool value)
826 { 819 {
827 checkThread(); 820 checkThread();
828 m_forcePreciseGCForTesting = value; 821 m_forcePreciseGCForTesting = value;
829 } 822 }
(...skipping 30 matching lines...) Expand all
860 } 853 }
861 854
862 void ThreadState::flushHeapDoesNotContainCacheIfNeeded() 855 void ThreadState::flushHeapDoesNotContainCacheIfNeeded()
863 { 856 {
864 if (m_shouldFlushHeapDoesNotContainCache) { 857 if (m_shouldFlushHeapDoesNotContainCache) {
865 Heap::flushHeapDoesNotContainCache(); 858 Heap::flushHeapDoesNotContainCache();
866 m_shouldFlushHeapDoesNotContainCache = false; 859 m_shouldFlushHeapDoesNotContainCache = false;
867 } 860 }
868 } 861 }
869 862
870 void ThreadState::prepareForGC() 863 void ThreadState::preGC()
871 { 864 {
872 for (int i = 0; i < NumberOfHeaps; i++) { 865 for (int i = 0; i < NumberOfHeaps; i++) {
873 BaseHeap* heap = m_heaps[i]; 866 BaseHeap* heap = m_heaps[i];
874 heap->makeConsistentForSweeping(); 867 heap->makeConsistentForSweeping();
875 // If a new GC is requested before this thread got around to sweep, ie. due to the 868 // If a new GC is requested before this thread got around to sweep, ie. due to the
876 // thread doing a long running operation, we clear the mark bits and mar k any of 869 // thread doing a long running operation, we clear the mark bits and mar k any of
877 // the dead objects as dead. The latter is used to ensure the next GC ma rking does 870 // the dead objects as dead. The latter is used to ensure the next GC ma rking does
878 // not trace already dead objects. If we trace a dead object we could en d up tracing 871 // not trace already dead objects. If we trace a dead object we could en d up tracing
879 // into garbage or the middle of another object via the newly conservati vely found 872 // into garbage or the middle of another object via the newly conservati vely found
880 // object. 873 // object.
881 if (sweepRequested()) 874 if (gcState() == ThreadState::SweepScheduled)
882 heap->markUnmarkedObjectsDead(); 875 heap->markUnmarkedObjectsDead();
883 } 876 }
884 prepareRegionTree(); 877 prepareRegionTree();
885 setSweepRequested(); 878 setGCState(ThreadState::GCRunning);
886 flushHeapDoesNotContainCacheIfNeeded(); 879 flushHeapDoesNotContainCacheIfNeeded();
887 } 880 }
888 881
882 void ThreadState::postGC()
883 {
884 setGCState(ThreadState::SweepScheduled);
885 }
886
889 void ThreadState::setupHeapsForTermination() 887 void ThreadState::setupHeapsForTermination()
890 { 888 {
891 for (int i = 0; i < NumberOfHeaps; i++) 889 for (int i = 0; i < NumberOfHeaps; i++)
892 m_heaps[i]->prepareHeapForTermination(); 890 m_heaps[i]->prepareHeapForTermination();
893 } 891 }
894 892
895 BaseHeapPage* ThreadState::pageFromAddress(Address address) 893 BaseHeapPage* ThreadState::pageFromAddress(Address address)
896 { 894 {
897 for (int i = 0; i < NumberOfHeaps; i++) { 895 for (int i = 0; i < NumberOfHeaps; i++) {
898 if (BaseHeapPage* page = m_heaps[i]->pageFromAddress(address)) 896 if (BaseHeapPage* page = m_heaps[i]->pageFromAddress(address))
(...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after
1006 1004
1007 ASSERT(!m_safePointStackCopy.size()); 1005 ASSERT(!m_safePointStackCopy.size());
1008 m_safePointStackCopy.resize(slotCount); 1006 m_safePointStackCopy.resize(slotCount);
1009 for (size_t i = 0; i < slotCount; ++i) { 1007 for (size_t i = 0; i < slotCount; ++i) {
1010 m_safePointStackCopy[i] = from[i]; 1008 m_safePointStackCopy[i] = from[i];
1011 } 1009 }
1012 } 1010 }
1013 1011
1014 void ThreadState::performPendingSweep() 1012 void ThreadState::performPendingSweep()
1015 { 1013 {
1016 if (!sweepRequested()) 1014 if (gcState() != SweepScheduled)
1017 return; 1015 return;
1016 setGCState(Sweeping);
1018 1017
1019 #if ENABLE(GC_PROFILE_HEAP) 1018 #if ENABLE(GC_PROFILE_HEAP)
1020 // We snapshot the heap prior to sweeping to get numbers for both resources 1019 // We snapshot the heap prior to sweeping to get numbers for both resources
1021 // that have been allocated since the last GC and for resources that are 1020 // that have been allocated since the last GC and for resources that are
1022 // going to be freed. 1021 // going to be freed.
1023 bool gcTracingEnabled; 1022 bool gcTracingEnabled;
1024 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled); 1023 TRACE_EVENT_CATEGORY_GROUP_ENABLED("blink_gc", &gcTracingEnabled);
1025 if (gcTracingEnabled) 1024 if (gcTracingEnabled)
1026 snapshot(); 1025 snapshot();
1027 #endif 1026 #endif
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
1063 TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps"); 1062 TRACE_EVENT0("blink_gc", "ThreadState::sweepFinalizedHeaps");
1064 for (int i = 0; i < NumberOfFinalizedHeaps; i++) { 1063 for (int i = 0; i < NumberOfFinalizedHeaps; i++) {
1065 m_heaps[FirstFinalizedHeap + i]->sweep(); 1064 m_heaps[FirstFinalizedHeap + i]->sweep();
1066 } 1065 }
1067 } 1066 }
1068 1067
1069 for (int i = 0; i < NumberOfHeaps; i++) 1068 for (int i = 0; i < NumberOfHeaps; i++)
1070 m_heaps[i]->postSweepProcessing(); 1069 m_heaps[i]->postSweepProcessing();
1071 } 1070 }
1072 1071
1073 clearGCRequested();
1074 m_didV8GCAfterLastGC = false; 1072 m_didV8GCAfterLastGC = false;
1075 clearSweepRequested(); 1073 setGCState(ThreadState::NoGCScheduled);
1076 1074
1077 // If we collected less than 50% of objects, record that the collection rate 1075 // If we collected less than 50% of objects, record that the collection rate
1078 // is low which we use to determine when to perform the next GC. 1076 // is low which we use to determine when to perform the next GC.
1079 // FIXME: We should make m_lowCollectionRate available in non-main threads. 1077 // FIXME: We should make m_lowCollectionRate available in non-main threads.
1080 // FIXME: Heap::markedObjectSize() may not be accurate because other threads 1078 // FIXME: Heap::markedObjectSize() may not be accurate because other threads
1081 // may not have finished sweeping. 1079 // may not have finished sweeping.
1082 if (isMainThread()) 1080 if (isMainThread())
1083 m_lowCollectionRate = Heap::markedObjectSize() > (allocatedObjectSizeBef oreSweeping / 2); 1081 m_lowCollectionRate = Heap::markedObjectSize() > (allocatedObjectSizeBef oreSweeping / 2);
1084 1082
1085 if (Platform::current()) { 1083 if (Platform::current()) {
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1144 if (entry.value(entry.key, visitor)) 1142 if (entry.value(entry.key, visitor))
1145 deadObjects.append(entry.key); 1143 deadObjects.append(entry.key);
1146 } 1144 }
1147 // FIXME: removeAll is inefficient. It can shrink repeatedly. 1145 // FIXME: removeAll is inefficient. It can shrink repeatedly.
1148 m_preFinalizers.removeAll(deadObjects); 1146 m_preFinalizers.removeAll(deadObjects);
1149 } 1147 }
1150 1148
1151 #if ENABLE(GC_PROFILE_MARKING) 1149 #if ENABLE(GC_PROFILE_MARKING)
1152 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address) 1150 const GCInfo* ThreadState::findGCInfoFromAllThreads(Address address)
1153 { 1151 {
1154 bool needLockForIteration = !isAnyThreadInGC(); 1152 bool needLockForIteration = !Heap::isInGC();
1155 if (needLockForIteration) 1153 if (needLockForIteration)
1156 threadAttachMutex().lock(); 1154 threadAttachMutex().lock();
1157 1155
1158 ThreadState::AttachedThreadStateSet& threads = attachedThreads(); 1156 ThreadState::AttachedThreadStateSet& threads = attachedThreads();
1159 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 1157 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
1160 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) { 1158 if (const GCInfo* gcInfo = (*it)->findGCInfo(address)) {
1161 if (needLockForIteration) 1159 if (needLockForIteration)
1162 threadAttachMutex().unlock(); 1160 threadAttachMutex().unlock();
1163 return gcInfo; 1161 return gcInfo;
1164 } 1162 }
1165 } 1163 }
1166 if (needLockForIteration) 1164 if (needLockForIteration)
1167 threadAttachMutex().unlock(); 1165 threadAttachMutex().unlock();
1168 return 0; 1166 return 0;
1169 } 1167 }
1170 #endif 1168 #endif
1171 1169
1172 } // namespace blink 1170 } // namespace blink
OLDNEW
« Source/platform/heap/ThreadState.h ('K') | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698