Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(480)

Side by Side Diff: Source/platform/heap/ThreadState.cpp

Issue 260723003: [oilpan]: Make parking threads for GC timeout in the case parking exceeds 100 MS (Closed) Base URL: svn://svn.chromium.org/blink/trunk
Patch Set: Add safepoint scopes around yield Created 6 years, 7 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (C) 2013 Google Inc. All rights reserved. 2 * Copyright (C) 2013 Google Inc. All rights reserved.
3 * 3 *
4 * Redistribution and use in source and binary forms, with or without 4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are 5 * modification, are permitted provided that the following conditions are
6 * met: 6 * met:
7 * 7 *
8 * * Redistributions of source code must retain the above copyright 8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer. 9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above 10 * * Redistributions in binary form must reproduce the above
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
90 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)]; 90 uint8_t ThreadState::s_mainThreadStateStorage[sizeof(ThreadState)];
91 SafePointBarrier* ThreadState::s_safePointBarrier = 0; 91 SafePointBarrier* ThreadState::s_safePointBarrier = 0;
92 bool ThreadState::s_inGC = false; 92 bool ThreadState::s_inGC = false;
93 93
94 static Mutex& threadAttachMutex() 94 static Mutex& threadAttachMutex()
95 { 95 {
96 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex); 96 AtomicallyInitializedStatic(Mutex&, mutex = *new Mutex);
97 return mutex; 97 return mutex;
98 } 98 }
99 99
100 static double lockingTimeout()
101 {
102 // Wait time for parking all threads is at most 500 MS.
103 return 0.100;
104 }
105
106
100 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr _t*); 107 typedef void (*PushAllRegistersCallback)(SafePointBarrier*, ThreadState*, intptr _t*);
101 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback); 108 extern "C" void pushAllRegisters(SafePointBarrier*, ThreadState*, PushAllRegiste rsCallback);
102 109
103 class SafePointBarrier { 110 class SafePointBarrier {
104 public: 111 public:
105 SafePointBarrier() : m_canResume(1), m_unparkedThreadCount(0) { } 112 SafePointBarrier() : m_canResume(1), m_unparkedThreadCount(0) { }
106 ~SafePointBarrier() { } 113 ~SafePointBarrier() { }
107 114
108 // Request other attached threads that are not at safe points to park themse lves on safepoints. 115 // Request other attached threads that are not at safe points to park themse lves on safepoints.
109 void parkOthers() 116 bool parkOthers()
110 { 117 {
111 ASSERT(ThreadState::current()->isAtSafePoint()); 118 ASSERT(ThreadState::current()->isAtSafePoint());
112 119
113 // Lock threadAttachMutex() to prevent threads from attaching. 120 // Lock threadAttachMutex() to prevent threads from attaching.
114 threadAttachMutex().lock(); 121 threadAttachMutex().lock();
115 122
116 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThre ads(); 123 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThre ads();
117 124
118 MutexLocker locker(m_mutex); 125 MutexLocker locker(m_mutex);
119 atomicAdd(&m_unparkedThreadCount, threads.size()); 126 atomicAdd(&m_unparkedThreadCount, threads.size());
120 releaseStore(&m_canResume, 0); 127 releaseStore(&m_canResume, 0);
121 128
122 ThreadState* current = ThreadState::current(); 129 ThreadState* current = ThreadState::current();
123 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 130 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
124 if (*it == current) 131 if (*it == current)
125 continue; 132 continue;
126 133
127 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->inter ruptors(); 134 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->inter ruptors();
128 for (size_t i = 0; i < interruptors.size(); i++) 135 for (size_t i = 0; i < interruptors.size(); i++)
129 interruptors[i]->requestInterrupt(); 136 interruptors[i]->requestInterrupt();
130 } 137 }
131 138
132 while (acquireLoad(&m_unparkedThreadCount) > 0) 139 while (acquireLoad(&m_unparkedThreadCount) > 0) {
133 m_parked.wait(m_mutex); 140 double expirationTime = currentTime() + lockingTimeout();
141 if (!m_parked.timedWait(m_mutex, expirationTime)) {
142 // One of the other threads did not return to a safepoint within the maximum
143 // time we allow for threads to be parked. Abandon the GC and re sume the
144 // currently parked threads.
145 resumeOthers(true);
146 return false;
147 }
148 }
149 return true;
134 } 150 }
135 151
136 void resumeOthers() 152 void resumeOthers(bool barrierLocked = false)
137 { 153 {
138 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThre ads(); 154 ThreadState::AttachedThreadStateSet& threads = ThreadState::attachedThre ads();
139 atomicSubtract(&m_unparkedThreadCount, threads.size()); 155 atomicSubtract(&m_unparkedThreadCount, threads.size());
140 releaseStore(&m_canResume, 1); 156 releaseStore(&m_canResume, 1);
141 { 157
158 // FIXME: Resumed threads will all contend for m_mutex just to unlock it
159 // later which is a waste of resources.
160 if (UNLIKELY(barrierLocked)) {
161 m_resume.broadcast();
162 } else {
142 // FIXME: Resumed threads will all contend for 163 // FIXME: Resumed threads will all contend for
143 // m_mutex just to unlock it later which is a waste of 164 // m_mutex just to unlock it later which is a waste of
144 // resources. 165 // resources.
145 MutexLocker locker(m_mutex); 166 MutexLocker locker(m_mutex);
146 m_resume.broadcast(); 167 m_resume.broadcast();
147 } 168 }
148 169
149 ThreadState* current = ThreadState::current(); 170 ThreadState* current = ThreadState::current();
150 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) { 171 for (ThreadState::AttachedThreadStateSet::iterator it = threads.begin(), end = threads.end(); it != end; ++it) {
151 if (*it == current) 172 if (*it == current)
152 continue; 173 continue;
153 174
154 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->inter ruptors(); 175 const Vector<ThreadState::Interruptor*>& interruptors = (*it)->inter ruptors();
155 for (size_t i = 0; i < interruptors.size(); i++) 176 for (size_t i = 0; i < interruptors.size(); i++)
156 interruptors[i]->clearInterrupt(); 177 interruptors[i]->clearInterrupt();
157 } 178 }
158 179
159 threadAttachMutex().unlock(); 180 threadAttachMutex().unlock();
160 ASSERT(ThreadState::current()->isAtSafePoint()); 181 ASSERT(ThreadState::current()->isAtSafePoint());
161 } 182 }
162 183
184 void checkAndPark(ThreadState* state)
185 {
186 ASSERT(!state->isSweepInProgress());
187 if (!acquireLoad(&m_canResume)) {
188 pushAllRegisters(this, state, parkAfterPushRegisters);
189 state->performPendingSweep();
190 }
191 }
192
193 void enterSafePoint(ThreadState* state)
194 {
195 ASSERT(!state->isSweepInProgress());
196 pushAllRegisters(this, state, enterSafePointAfterPushRegisters);
197 }
198
199 void leaveSafePoint(ThreadState* state)
200 {
201 if (atomicIncrement(&m_unparkedThreadCount) > 0)
202 checkAndPark(state);
203 }
204
205 private:
163 void doPark(ThreadState* state, intptr_t* stackEnd) 206 void doPark(ThreadState* state, intptr_t* stackEnd)
164 { 207 {
165 state->recordStackEnd(stackEnd); 208 state->recordStackEnd(stackEnd);
166 MutexLocker locker(m_mutex); 209 MutexLocker locker(m_mutex);
167 if (!atomicDecrement(&m_unparkedThreadCount)) 210 if (!atomicDecrement(&m_unparkedThreadCount))
168 m_parked.signal(); 211 m_parked.signal();
169 while (!acquireLoad(&m_canResume)) 212 while (!acquireLoad(&m_canResume))
170 m_resume.wait(m_mutex); 213 m_resume.wait(m_mutex);
171 atomicIncrement(&m_unparkedThreadCount); 214 atomicIncrement(&m_unparkedThreadCount);
172 } 215 }
173 216
174 void checkAndPark(ThreadState* state) 217 static void parkAfterPushRegisters(SafePointBarrier* barrier, ThreadState* s tate, intptr_t* stackEnd)
175 { 218 {
176 ASSERT(!state->isSweepInProgress()); 219 barrier->doPark(state, stackEnd);
177 if (!acquireLoad(&m_canResume)) {
178 pushAllRegisters(this, state, parkAfterPushRegisters);
179 state->performPendingSweep();
180 }
181 } 220 }
182 221
183 void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd) 222 void doEnterSafePoint(ThreadState* state, intptr_t* stackEnd)
184 { 223 {
185 state->recordStackEnd(stackEnd); 224 state->recordStackEnd(stackEnd);
186 state->copyStackUntilSafePointScope(); 225 state->copyStackUntilSafePointScope();
187 // m_unparkedThreadCount tracks amount of unparked threads. It is 226 // m_unparkedThreadCount tracks amount of unparked threads. It is
188 // positive if and only if we have requested other threads to park 227 // positive if and only if we have requested other threads to park
189 // at safe-points in preparation for GC. The last thread to park 228 // at safe-points in preparation for GC. The last thread to park
190 // itself will make the counter hit zero and should notify GC thread 229 // itself will make the counter hit zero and should notify GC thread
191 // that it is safe to proceed. 230 // that it is safe to proceed.
192 // If no other thread is waiting for other threads to park then 231 // If no other thread is waiting for other threads to park then
193 // this counter can be negative: if N threads are at safe-points 232 // this counter can be negative: if N threads are at safe-points
194 // the counter will be -N. 233 // the counter will be -N.
195 if (!atomicDecrement(&m_unparkedThreadCount)) { 234 if (!atomicDecrement(&m_unparkedThreadCount)) {
196 MutexLocker locker(m_mutex); 235 MutexLocker locker(m_mutex);
197 m_parked.signal(); // Safe point reached. 236 m_parked.signal(); // Safe point reached.
198 } 237 }
199 } 238 }
200 239
201 void enterSafePoint(ThreadState* state)
202 {
203 ASSERT(!state->isSweepInProgress());
204 pushAllRegisters(this, state, enterSafePointAfterPushRegisters);
205 }
206
207 void leaveSafePoint(ThreadState* state)
208 {
209 if (atomicIncrement(&m_unparkedThreadCount) > 0)
210 checkAndPark(state);
211 }
212
213 private:
214 static void parkAfterPushRegisters(SafePointBarrier* barrier, ThreadState* s tate, intptr_t* stackEnd)
215 {
216 barrier->doPark(state, stackEnd);
217 }
218
219 static void enterSafePointAfterPushRegisters(SafePointBarrier* barrier, Thre adState* state, intptr_t* stackEnd) 240 static void enterSafePointAfterPushRegisters(SafePointBarrier* barrier, Thre adState* state, intptr_t* stackEnd)
220 { 241 {
221 barrier->doEnterSafePoint(state, stackEnd); 242 barrier->doEnterSafePoint(state, stackEnd);
222 } 243 }
223 244
224 volatile int m_canResume; 245 volatile int m_canResume;
225 volatile int m_unparkedThreadCount; 246 volatile int m_unparkedThreadCount;
226 Mutex m_mutex; 247 Mutex m_mutex;
227 ThreadCondition m_parked; 248 ThreadCondition m_parked;
228 ThreadCondition m_resume; 249 ThreadCondition m_resume;
(...skipping 466 matching lines...) Expand 10 before | Expand all | Expand 10 after
695 if (isConsistentForGC()) { 716 if (isConsistentForGC()) {
696 HeapStats scannedStats; 717 HeapStats scannedStats;
697 scannedStats.clear(); 718 scannedStats.clear();
698 for (int i = 0; i < NumberOfHeaps; i++) 719 for (int i = 0; i < NumberOfHeaps; i++)
699 m_heaps[i]->getScannedStats(scannedStats); 720 m_heaps[i]->getScannedStats(scannedStats);
700 ASSERT(scannedStats == stats); 721 ASSERT(scannedStats == stats);
701 } 722 }
702 #endif 723 #endif
703 } 724 }
704 725
705 void ThreadState::stopThreads() 726 bool ThreadState::stopThreads()
706 { 727 {
707 s_safePointBarrier->parkOthers(); 728 return s_safePointBarrier->parkOthers();
708 } 729 }
709 730
710 void ThreadState::resumeThreads() 731 void ThreadState::resumeThreads()
711 { 732 {
712 s_safePointBarrier->resumeOthers(); 733 s_safePointBarrier->resumeOthers();
713 } 734 }
714 735
715 void ThreadState::safePoint(StackState stackState) 736 void ThreadState::safePoint(StackState stackState)
716 { 737 {
717 checkThread(); 738 checkThread();
(...skipping 132 matching lines...) Expand 10 before | Expand all | Expand 10 after
850 state->safePoint(HeapPointersOnStack); 871 state->safePoint(HeapPointersOnStack);
851 } 872 }
852 873
853 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads() 874 ThreadState::AttachedThreadStateSet& ThreadState::attachedThreads()
854 { 875 {
855 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ()); 876 DEFINE_STATIC_LOCAL(AttachedThreadStateSet, threads, ());
856 return threads; 877 return threads;
857 } 878 }
858 879
859 } 880 }
OLDNEW
« no previous file with comments | « Source/platform/heap/ThreadState.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698