OLD | NEW |
| (Empty) |
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |
2 /* This Source Code Form is subject to the terms of the Mozilla Public | |
3 * License, v. 2.0. If a copy of the MPL was not distributed with this | |
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |
5 | |
6 #include "primpl.h" | |
7 | |
8 #if defined(WIN95) | |
9 /* | |
10 ** Some local variables report warnings on Win95 because the code paths | |
11 ** using them are conditioned on HAVE_CUSTOME_USER_THREADS. | |
12 ** The pragma suppresses the warning. | |
13 ** | |
14 */ | |
15 #pragma warning(disable : 4101) | |
16 #endif | |
17 | |
18 | |
19 void _PR_InitLocks(void) | |
20 { | |
21 _PR_MD_INIT_LOCKS(); | |
22 } | |
23 | |
24 /* | |
25 ** Deal with delayed interrupts/requested reschedule during interrupt | |
26 ** re-enables. | |
27 */ | |
28 void _PR_IntsOn(_PRCPU *cpu) | |
29 { | |
30 PRUintn missed, pri, i; | |
31 _PRInterruptTable *it; | |
32 PRThread *me; | |
33 | |
34 PR_ASSERT(cpu); /* Global threads don't have CPUs */ | |
35 PR_ASSERT(_PR_MD_GET_INTSOFF() > 0); | |
36 me = _PR_MD_CURRENT_THREAD(); | |
37 PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); | |
38 | |
39 /* | |
40 ** Process delayed interrupts. This logic is kinda scary because we | |
41 ** need to avoid losing an interrupt (it's ok to delay an interrupt | |
42 ** until later). | |
43 ** | |
44 ** There are two missed state words. _pr_ints.where indicates to the | |
45 ** interrupt handler which state word is currently safe for | |
46 ** modification. | |
47 ** | |
48 ** This code scans both interrupt state words, using the where flag | |
49 ** to indicate to the interrupt which state word is safe for writing. | |
50 ** If an interrupt comes in during a scan the other word will be | |
51 ** modified. This modification will be noticed during the next | |
52 ** iteration of the loop or during the next call to this routine. | |
53 */ | |
54 for (i = 0; i < 2; i++) { | |
55 cpu->where = (1 - i); | |
56 missed = cpu->u.missed[i]; | |
57 if (missed != 0) { | |
58 cpu->u.missed[i] = 0; | |
59 for (it = _pr_interruptTable; it->name; it++) { | |
60 if (missed & it->missed_bit) { | |
61 PR_LOG(_pr_sched_lm, PR_LOG_MIN, | |
62 ("IntsOn[0]: %s intr", it->name)); | |
63 (*it->handler)(); | |
64 } | |
65 } | |
66 } | |
67 } | |
68 | |
69 if (cpu->u.missed[3] != 0) { | |
70 _PRCPU *cpu; | |
71 | |
72 _PR_THREAD_LOCK(me); | |
73 me->state = _PR_RUNNABLE; | |
74 pri = me->priority; | |
75 | |
76 cpu = me->cpu; | |
77 _PR_RUNQ_LOCK(cpu); | |
78 _PR_ADD_RUNQ(me, cpu, pri); | |
79 _PR_RUNQ_UNLOCK(cpu); | |
80 _PR_THREAD_UNLOCK(me); | |
81 _PR_MD_SWITCH_CONTEXT(me); | |
82 } | |
83 } | |
84 | |
85 /* | |
86 ** Unblock the first runnable waiting thread. Skip over | |
87 ** threads that are trying to be suspended | |
88 ** Note: Caller must hold _PR_LOCK_LOCK() | |
89 */ | |
90 void _PR_UnblockLockWaiter(PRLock *lock) | |
91 { | |
92 PRThread *t = NULL; | |
93 PRThread *me; | |
94 PRCList *q; | |
95 | |
96 q = lock->waitQ.next; | |
97 PR_ASSERT(q != &lock->waitQ); | |
98 while (q != &lock->waitQ) { | |
99 /* Unblock first waiter */ | |
100 t = _PR_THREAD_CONDQ_PTR(q); | |
101 | |
102 /* | |
103 ** We are about to change the thread's state to runnable and for
local | |
104 ** threads, we are going to assign a cpu to it. So, protect thr
ead's | |
105 ** data structure. | |
106 */ | |
107 _PR_THREAD_LOCK(t); | |
108 | |
109 if (t->flags & _PR_SUSPENDING) { | |
110 q = q->next; | |
111 _PR_THREAD_UNLOCK(t); | |
112 continue; | |
113 } | |
114 | |
115 /* Found a runnable thread */ | |
116 PR_ASSERT(t->state == _PR_LOCK_WAIT); | |
117 PR_ASSERT(t->wait.lock == lock); | |
118 t->wait.lock = 0; | |
119 PR_REMOVE_LINK(&t->waitQLinks); /* take it off lock's waitQ */ | |
120 | |
121 /* | |
122 ** If this is a native thread, nothing else to do except to wake
it | |
123 ** up by calling the machine dependent wakeup routine. | |
124 ** | |
125 ** If this is a local thread, we need to assign it a cpu and | |
126 ** put the thread on that cpu's run queue. There are two cases
to | |
127 ** take care of. If the currently running thread is also a loca
l | |
128 ** thread, we just assign our own cpu to that thread and put it
on | |
129 ** the cpu's run queue. If the the currently running thread is
a | |
130 ** native thread, we assign the primordial cpu to it (on NT, | |
131 ** MD_WAKEUP handles the cpu assignment). | |
132 */ | |
133 | |
134 if ( !_PR_IS_NATIVE_THREAD(t) ) { | |
135 | |
136 t->state = _PR_RUNNABLE; | |
137 | |
138 me = _PR_MD_CURRENT_THREAD(); | |
139 | |
140 _PR_AddThreadToRunQ(me, t); | |
141 _PR_THREAD_UNLOCK(t); | |
142 } else { | |
143 t->state = _PR_RUNNING; | |
144 _PR_THREAD_UNLOCK(t); | |
145 } | |
146 _PR_MD_WAKEUP_WAITER(t); | |
147 break; | |
148 } | |
149 return; | |
150 } | |
151 | |
152 /************************************************************************/ | |
153 | |
154 | |
155 PR_IMPLEMENT(PRLock*) PR_NewLock(void) | |
156 { | |
157 PRLock *lock; | |
158 | |
159 if (!_pr_initialized) _PR_ImplicitInitialization(); | |
160 | |
161 lock = PR_NEWZAP(PRLock); | |
162 if (lock) { | |
163 if (_PR_MD_NEW_LOCK(&lock->ilock) == PR_FAILURE) { | |
164 PR_DELETE(lock); | |
165 return(NULL); | |
166 } | |
167 PR_INIT_CLIST(&lock->links); | |
168 PR_INIT_CLIST(&lock->waitQ); | |
169 } | |
170 return lock; | |
171 } | |
172 | |
173 /* | |
174 ** Destroy the given lock "lock". There is no point in making this race | |
175 ** free because if some other thread has the pointer to this lock all | |
176 ** bets are off. | |
177 */ | |
178 PR_IMPLEMENT(void) PR_DestroyLock(PRLock *lock) | |
179 { | |
180 PR_ASSERT(lock->owner == 0); | |
181 _PR_MD_FREE_LOCK(&lock->ilock); | |
182 PR_DELETE(lock); | |
183 } | |
184 | |
185 extern PRThread *suspendAllThread; | |
186 /* | |
187 ** Lock the lock. | |
188 */ | |
189 PR_IMPLEMENT(void) PR_Lock(PRLock *lock) | |
190 { | |
191 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
192 PRIntn is; | |
193 PRThread *t; | |
194 PRCList *q; | |
195 | |
196 PR_ASSERT(me != suspendAllThread); | |
197 PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); | |
198 PR_ASSERT(lock != NULL); | |
199 #ifdef _PR_GLOBAL_THREADS_ONLY | |
200 PR_ASSERT(lock->owner != me); | |
201 _PR_MD_LOCK(&lock->ilock); | |
202 lock->owner = me; | |
203 return; | |
204 #else /* _PR_GLOBAL_THREADS_ONLY */ | |
205 | |
206 if (_native_threads_only) { | |
207 PR_ASSERT(lock->owner != me); | |
208 _PR_MD_LOCK(&lock->ilock); | |
209 lock->owner = me; | |
210 return; | |
211 } | |
212 | |
213 if (!_PR_IS_NATIVE_THREAD(me)) | |
214 _PR_INTSOFF(is); | |
215 | |
216 PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); | |
217 | |
218 retry: | |
219 _PR_LOCK_LOCK(lock); | |
220 if (lock->owner == 0) { | |
221 /* Just got the lock */ | |
222 lock->owner = me; | |
223 lock->priority = me->priority; | |
224 /* Add the granted lock to this owning thread's lock list */ | |
225 PR_APPEND_LINK(&lock->links, &me->lockList); | |
226 _PR_LOCK_UNLOCK(lock); | |
227 if (!_PR_IS_NATIVE_THREAD(me)) | |
228 _PR_FAST_INTSON(is); | |
229 return; | |
230 } | |
231 | |
232 /* If this thread already owns this lock, then it is a deadlock */ | |
233 PR_ASSERT(lock->owner != me); | |
234 | |
235 PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); | |
236 | |
237 #if 0 | |
238 if (me->priority > lock->owner->priority) { | |
239 /* | |
240 ** Give the lock owner a priority boost until we get the | |
241 ** lock. Record the priority we boosted it to. | |
242 */ | |
243 lock->boostPriority = me->priority; | |
244 _PR_SetThreadPriority(lock->owner, me->priority); | |
245 } | |
246 #endif | |
247 | |
248 /* | |
249 Add this thread to the asked for lock's list of waiting threads. We | |
250 add this thread thread in the right priority order so when the unlock | |
251 occurs, the thread with the higher priority will get the lock. | |
252 */ | |
253 q = lock->waitQ.next; | |
254 if (q == &lock->waitQ || _PR_THREAD_CONDQ_PTR(q)->priority == | |
255 _PR_THREAD_CONDQ_PTR(lock->waitQ.prev)->priority) { | |
256 /* | |
257 * If all the threads in the lock waitQ have the same priority, | |
258 * then avoid scanning the list: insert the element at the end. | |
259 */ | |
260 q = &lock->waitQ; | |
261 } else { | |
262 /* Sort thread into lock's waitQ at appropriate point */ | |
263 /* Now scan the list for where to insert this entry */ | |
264 while (q != &lock->waitQ) { | |
265 t = _PR_THREAD_CONDQ_PTR(lock->waitQ.next); | |
266 if (me->priority > t->priority) { | |
267 /* Found a lower priority thread to insert in fr
ont of */ | |
268 break; | |
269 } | |
270 q = q->next; | |
271 } | |
272 } | |
273 PR_INSERT_BEFORE(&me->waitQLinks, q); | |
274 | |
275 /* | |
276 Now grab the threadLock since we are about to change the state. We have | |
277 to do this since a PR_Suspend or PR_SetThreadPriority type call that tak
es | |
278 a PRThread* as an argument could be changing the state of this thread fr
om | |
279 a thread running on a different cpu. | |
280 */ | |
281 | |
282 _PR_THREAD_LOCK(me); | |
283 me->state = _PR_LOCK_WAIT; | |
284 me->wait.lock = lock; | |
285 _PR_THREAD_UNLOCK(me); | |
286 | |
287 _PR_LOCK_UNLOCK(lock); | |
288 | |
289 _PR_MD_WAIT(me, PR_INTERVAL_NO_TIMEOUT); | |
290 goto retry; | |
291 | |
292 #endif /* _PR_GLOBAL_THREADS_ONLY */ | |
293 } | |
294 | |
295 /* | |
296 ** Unlock the lock. | |
297 */ | |
298 PR_IMPLEMENT(PRStatus) PR_Unlock(PRLock *lock) | |
299 { | |
300 PRCList *q; | |
301 PRThreadPriority pri, boost; | |
302 PRIntn is; | |
303 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
304 | |
305 PR_ASSERT(lock != NULL); | |
306 PR_ASSERT(lock->owner == me); | |
307 PR_ASSERT(me != suspendAllThread); | |
308 PR_ASSERT(!(me->flags & _PR_IDLE_THREAD)); | |
309 if (lock->owner != me) { | |
310 return PR_FAILURE; | |
311 } | |
312 | |
313 #ifdef _PR_GLOBAL_THREADS_ONLY | |
314 lock->owner = 0; | |
315 _PR_MD_UNLOCK(&lock->ilock); | |
316 return PR_SUCCESS; | |
317 #else /* _PR_GLOBAL_THREADS_ONLY */ | |
318 | |
319 if (_native_threads_only) { | |
320 lock->owner = 0; | |
321 _PR_MD_UNLOCK(&lock->ilock); | |
322 return PR_SUCCESS; | |
323 } | |
324 | |
325 if (!_PR_IS_NATIVE_THREAD(me)) | |
326 _PR_INTSOFF(is); | |
327 _PR_LOCK_LOCK(lock); | |
328 | |
329 /* Remove the lock from the owning thread's lock list */ | |
330 PR_REMOVE_LINK(&lock->links); | |
331 pri = lock->priority; | |
332 boost = lock->boostPriority; | |
333 if (boost > pri) { | |
334 /* | |
335 ** We received a priority boost during the time we held the lock. | |
336 ** We need to figure out what priority to move to by scanning | |
337 ** down our list of lock's that we are still holding and using | |
338 ** the highest boosted priority found. | |
339 */ | |
340 q = me->lockList.next; | |
341 while (q != &me->lockList) { | |
342 PRLock *ll = _PR_LOCK_PTR(q); | |
343 if (ll->boostPriority > pri) { | |
344 pri = ll->boostPriority; | |
345 } | |
346 q = q->next; | |
347 } | |
348 if (pri != me->priority) { | |
349 _PR_SetThreadPriority(me, pri); | |
350 } | |
351 } | |
352 | |
353 /* Unblock the first waiting thread */ | |
354 q = lock->waitQ.next; | |
355 if (q != &lock->waitQ) | |
356 _PR_UnblockLockWaiter(lock); | |
357 lock->boostPriority = PR_PRIORITY_LOW; | |
358 lock->owner = 0; | |
359 _PR_LOCK_UNLOCK(lock); | |
360 if (!_PR_IS_NATIVE_THREAD(me)) | |
361 _PR_INTSON(is); | |
362 return PR_SUCCESS; | |
363 #endif /* _PR_GLOBAL_THREADS_ONLY */ | |
364 } | |
365 | |
366 /* | |
367 ** If the current thread owns |lock|, this assertion is guaranteed to | |
368 ** succeed. Otherwise, the behavior of this function is undefined. | |
369 */ | |
370 PR_IMPLEMENT(void) PR_AssertCurrentThreadOwnsLock(PRLock *lock) | |
371 { | |
372 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
373 PR_ASSERT(lock->owner == me); | |
374 } | |
375 | |
376 /* | |
377 ** Test and then lock the lock if it's not already locked by some other | |
378 ** thread. Return PR_FALSE if some other thread owned the lock at the | |
379 ** time of the call. | |
380 */ | |
381 PR_IMPLEMENT(PRBool) PR_TestAndLock(PRLock *lock) | |
382 { | |
383 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
384 PRBool rv = PR_FALSE; | |
385 PRIntn is; | |
386 | |
387 #ifdef _PR_GLOBAL_THREADS_ONLY | |
388 is = _PR_MD_TEST_AND_LOCK(&lock->ilock); | |
389 if (is == 0) { | |
390 lock->owner = me; | |
391 return PR_TRUE; | |
392 } | |
393 return PR_FALSE; | |
394 #else /* _PR_GLOBAL_THREADS_ONLY */ | |
395 | |
396 #ifndef _PR_LOCAL_THREADS_ONLY | |
397 if (_native_threads_only) { | |
398 is = _PR_MD_TEST_AND_LOCK(&lock->ilock); | |
399 if (is == 0) { | |
400 lock->owner = me; | |
401 return PR_TRUE; | |
402 } | |
403 return PR_FALSE; | |
404 } | |
405 #endif | |
406 | |
407 if (!_PR_IS_NATIVE_THREAD(me)) | |
408 _PR_INTSOFF(is); | |
409 | |
410 _PR_LOCK_LOCK(lock); | |
411 if (lock->owner == 0) { | |
412 /* Just got the lock */ | |
413 lock->owner = me; | |
414 lock->priority = me->priority; | |
415 /* Add the granted lock to this owning thread's lock list */ | |
416 PR_APPEND_LINK(&lock->links, &me->lockList); | |
417 rv = PR_TRUE; | |
418 } | |
419 _PR_LOCK_UNLOCK(lock); | |
420 | |
421 if (!_PR_IS_NATIVE_THREAD(me)) | |
422 _PR_INTSON(is); | |
423 return rv; | |
424 #endif /* _PR_GLOBAL_THREADS_ONLY */ | |
425 } | |
426 | |
427 /************************************************************************/ | |
428 /************************************************************************/ | |
429 /***********************ROUTINES FOR DCE EMULATION***********************/ | |
430 /************************************************************************/ | |
431 /************************************************************************/ | |
432 PR_IMPLEMENT(PRStatus) PRP_TryLock(PRLock *lock) | |
433 { return (PR_TestAndLock(lock)) ? PR_SUCCESS : PR_FAILURE; } | |
OLD | NEW |