OLD | NEW |
| (Empty) |
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ | |
2 /* This Source Code Form is subject to the terms of the Mozilla Public | |
3 * License, v. 2.0. If a copy of the MPL was not distributed with this | |
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ | |
5 | |
6 #include "primpl.h" | |
7 #include <signal.h> | |
8 #include <string.h> | |
9 | |
10 #if defined(WIN95)
| |
11 /* | |
12 ** Some local variables report warnings on Win95 because the code paths | |
13 ** using them are conditioned on HAVE_CUSTOME_USER_THREADS. | |
14 ** The pragma suppresses the warning. | |
15 ** | |
16 */ | |
17 #pragma warning(disable : 4101) | |
18 #endif | |
19 | |
20 /* _pr_activeLock protects the following global variables */ | |
21 PRLock *_pr_activeLock; | |
22 PRInt32 _pr_primordialExitCount; /* In PR_Cleanup(), the primordial thread | |
23 * waits until all other user (non-system) | |
24 * threads have terminated before it exits. | |
25 * So whenever we decrement _pr_userActive, | |
26 * it is compared with | |
27 * _pr_primordialExitCount. | |
28 * If the primordial thread is a system | |
29 * thread, then _pr_primordialExitCount | |
30 * is 0. If the primordial thread is | |
31 * itself a user thread, then | |
32 * _pr_primordialThread is 1. | |
33 */ | |
34 PRCondVar *_pr_primordialExitCVar; /* When _pr_userActive is decremented to | |
35 * _pr_primordialExitCount, this condition | |
36 * variable is notified. | |
37 */ | |
38 | |
39 PRLock *_pr_deadQLock; | |
40 PRUint32 _pr_numNativeDead; | |
41 PRUint32 _pr_numUserDead; | |
42 PRCList _pr_deadNativeQ; | |
43 PRCList _pr_deadUserQ; | |
44 | |
45 PRUint32 _pr_join_counter; | |
46 | |
47 PRUint32 _pr_local_threads; | |
48 PRUint32 _pr_global_threads; | |
49 | |
50 PRBool suspendAllOn = PR_FALSE; | |
51 PRThread *suspendAllThread = NULL; | |
52 | |
53 extern PRCList _pr_active_global_threadQ; | |
54 extern PRCList _pr_active_local_threadQ; | |
55 | |
56 static void _PR_DecrActiveThreadCount(PRThread *thread); | |
57 static PRThread *_PR_AttachThread(PRThreadType, PRThreadPriority, PRThreadStack
*); | |
58 static void _PR_InitializeNativeStack(PRThreadStack *ts); | |
59 static void _PR_InitializeRecycledThread(PRThread *thread); | |
60 static void _PR_UserRunThread(void); | |
61 | |
62 void _PR_InitThreads(PRThreadType type, PRThreadPriority priority, | |
63 PRUintn maxPTDs) | |
64 { | |
65 PRThread *thread; | |
66 PRThreadStack *stack; | |
67 | |
68 _pr_terminationCVLock = PR_NewLock(); | |
69 _pr_activeLock = PR_NewLock(); | |
70 | |
71 #ifndef HAVE_CUSTOM_USER_THREADS | |
72 stack = PR_NEWZAP(PRThreadStack); | |
73 #ifdef HAVE_STACK_GROWING_UP | |
74 stack->stackTop = (char*) ((((long)&type) >> _pr_pageShift) | |
75 << _pr_pageShift); | |
76 #else | |
77 #if defined(SOLARIS) || defined (UNIXWARE) && defined (USR_SVR4_THREADS) | |
78 stack->stackTop = (char*) &thread; | |
79 #else | |
80 stack->stackTop = (char*) ((((long)&type + _pr_pageSize - 1) | |
81 >> _pr_pageShift) << _pr_pageShift); | |
82 #endif | |
83 #endif | |
84 #else | |
85 /* If stack is NULL, we're using custom user threads like NT fibers. */ | |
86 stack = PR_NEWZAP(PRThreadStack); | |
87 if (stack) { | |
88 stack->stackSize = 0; | |
89 _PR_InitializeNativeStack(stack); | |
90 } | |
91 #endif /* HAVE_CUSTOM_USER_THREADS */ | |
92 | |
93 thread = _PR_AttachThread(type, priority, stack); | |
94 if (thread) { | |
95 _PR_MD_SET_CURRENT_THREAD(thread); | |
96 | |
97 if (type == PR_SYSTEM_THREAD) { | |
98 thread->flags = _PR_SYSTEM; | |
99 _pr_systemActive++; | |
100 _pr_primordialExitCount = 0; | |
101 } else { | |
102 _pr_userActive++; | |
103 _pr_primordialExitCount = 1; | |
104 } | |
105 thread->no_sched = 1; | |
106 _pr_primordialExitCVar = PR_NewCondVar(_pr_activeLock); | |
107 } | |
108 | |
109 if (!thread) PR_Abort(); | |
110 #ifdef _PR_LOCAL_THREADS_ONLY | |
111 thread->flags |= _PR_PRIMORDIAL; | |
112 #else | |
113 thread->flags |= _PR_PRIMORDIAL | _PR_GLOBAL_SCOPE; | |
114 #endif | |
115 | |
116 /* | |
117 * Needs _PR_PRIMORDIAL flag set before calling | |
118 * _PR_MD_INIT_THREAD() | |
119 */ | |
120 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { | |
121 /* | |
122 * XXX do what? | |
123 */ | |
124 } | |
125 | |
126 if (_PR_IS_NATIVE_THREAD(thread)) { | |
127 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ()); | |
128 _pr_global_threads++; | |
129 } else { | |
130 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ()); | |
131 _pr_local_threads++; | |
132 } | |
133 | |
134 _pr_recycleThreads = 0; | |
135 _pr_deadQLock = PR_NewLock(); | |
136 _pr_numNativeDead = 0; | |
137 _pr_numUserDead = 0; | |
138 PR_INIT_CLIST(&_pr_deadNativeQ); | |
139 PR_INIT_CLIST(&_pr_deadUserQ); | |
140 } | |
141 | |
142 void _PR_CleanupThreads(void) | |
143 { | |
144 if (_pr_terminationCVLock) { | |
145 PR_DestroyLock(_pr_terminationCVLock); | |
146 _pr_terminationCVLock = NULL; | |
147 } | |
148 if (_pr_activeLock) { | |
149 PR_DestroyLock(_pr_activeLock); | |
150 _pr_activeLock = NULL; | |
151 } | |
152 if (_pr_primordialExitCVar) { | |
153 PR_DestroyCondVar(_pr_primordialExitCVar); | |
154 _pr_primordialExitCVar = NULL; | |
155 } | |
156 /* TODO _pr_dead{Native,User}Q need to be deleted */ | |
157 if (_pr_deadQLock) { | |
158 PR_DestroyLock(_pr_deadQLock); | |
159 _pr_deadQLock = NULL; | |
160 } | |
161 } | |
162 | |
163 /* | |
164 ** Initialize a stack for a native thread | |
165 */ | |
166 static void _PR_InitializeNativeStack(PRThreadStack *ts) | |
167 { | |
168 if( ts && (ts->stackTop == 0) ) { | |
169 ts->allocSize = ts->stackSize; | |
170 | |
171 /* | |
172 ** Setup stackTop and stackBottom values. | |
173 */ | |
174 #ifdef HAVE_STACK_GROWING_UP | |
175 ts->allocBase = (char*) ((((long)&ts) >> _pr_pageShift) | |
176 << _pr_pageShift); | |
177 ts->stackBottom = ts->allocBase + ts->stackSize; | |
178 ts->stackTop = ts->allocBase; | |
179 #else | |
180 ts->allocBase = (char*) ((((long)&ts + _pr_pageSize - 1) | |
181 >> _pr_pageShift) << _pr_pageShift); | |
182 ts->stackTop = ts->allocBase; | |
183 ts->stackBottom = ts->allocBase - ts->stackSize; | |
184 #endif | |
185 } | |
186 } | |
187 | |
188 void _PR_NotifyJoinWaiters(PRThread *thread) | |
189 { | |
190 /* | |
191 ** Handle joinable threads. Change the state to waiting for join. | |
192 ** Remove from our run Q and put it on global waiting to join Q. | |
193 ** Notify on our "termination" condition variable so that joining | |
194 ** thread will know about our termination. Switch our context and | |
195 ** come back later on to continue the cleanup. | |
196 */ | |
197 PR_ASSERT(thread == _PR_MD_CURRENT_THREAD()); | |
198 if (thread->term != NULL) { | |
199 PR_Lock(_pr_terminationCVLock); | |
200 _PR_THREAD_LOCK(thread); | |
201 thread->state = _PR_JOIN_WAIT; | |
202 if ( !_PR_IS_NATIVE_THREAD(thread) ) { | |
203 _PR_MISCQ_LOCK(thread->cpu); | |
204 _PR_ADD_JOINQ(thread, thread->cpu); | |
205 _PR_MISCQ_UNLOCK(thread->cpu); | |
206 } | |
207 _PR_THREAD_UNLOCK(thread); | |
208 PR_NotifyCondVar(thread->term); | |
209 PR_Unlock(_pr_terminationCVLock); | |
210 _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT); | |
211 PR_ASSERT(thread->state != _PR_JOIN_WAIT); | |
212 } | |
213 | |
214 } | |
215 | |
216 /* | |
217 * Zero some of the data members of a recycled thread. | |
218 * | |
219 * Note that we can do this either when a dead thread is added to | |
220 * the dead thread queue or when it is reused. Here, we are doing | |
221 * this lazily, when the thread is reused in _PR_CreateThread(). | |
222 */ | |
223 static void _PR_InitializeRecycledThread(PRThread *thread) | |
224 { | |
225 /* | |
226 * Assert that the following data members are already zeroed | |
227 * by _PR_CleanupThread(). | |
228 */ | |
229 #ifdef DEBUG | |
230 if (thread->privateData) { | |
231 unsigned int i; | |
232 for (i = 0; i < thread->tpdLength; i++) { | |
233 PR_ASSERT(thread->privateData[i] == NULL); | |
234 } | |
235 } | |
236 #endif | |
237 PR_ASSERT(thread->dumpArg == 0 && thread->dump == 0); | |
238 PR_ASSERT(thread->errorString == 0 && thread->errorStringSize == 0); | |
239 PR_ASSERT(thread->errorStringLength == 0); | |
240 PR_ASSERT(thread->name == 0); | |
241 | |
242 /* Reset data members in thread structure */ | |
243 thread->errorCode = thread->osErrorCode = 0; | |
244 thread->io_pending = thread->io_suspended = PR_FALSE; | |
245 thread->environment = 0; | |
246 PR_INIT_CLIST(&thread->lockList); | |
247 } | |
248 | |
249 PRStatus _PR_RecycleThread(PRThread *thread) | |
250 { | |
251 if ( _PR_IS_NATIVE_THREAD(thread) && | |
252 _PR_NUM_DEADNATIVE < _pr_recycleThreads) { | |
253 _PR_DEADQ_LOCK; | |
254 PR_APPEND_LINK(&thread->links, &_PR_DEADNATIVEQ); | |
255 _PR_INC_DEADNATIVE; | |
256 _PR_DEADQ_UNLOCK; | |
257 return (PR_SUCCESS); | |
258 } else if ( !_PR_IS_NATIVE_THREAD(thread) && | |
259 _PR_NUM_DEADUSER < _pr_recycleThreads) { | |
260 _PR_DEADQ_LOCK; | |
261 PR_APPEND_LINK(&thread->links, &_PR_DEADUSERQ); | |
262 _PR_INC_DEADUSER; | |
263 _PR_DEADQ_UNLOCK; | |
264 return (PR_SUCCESS); | |
265 } | |
266 return (PR_FAILURE); | |
267 } | |
268 | |
269 /* | |
270 * Decrement the active thread count, either _pr_systemActive or | |
271 * _pr_userActive, depending on whether the thread is a system thread | |
272 * or a user thread. If all the user threads, except possibly | |
273 * the primordial thread, have terminated, we notify the primordial | |
274 * thread of this condition. | |
275 * | |
276 * Since this function will lock _pr_activeLock, do not call this | |
277 * function while holding the _pr_activeLock lock, as this will result | |
278 * in a deadlock. | |
279 */ | |
280 | |
281 static void | |
282 _PR_DecrActiveThreadCount(PRThread *thread) | |
283 { | |
284 PR_Lock(_pr_activeLock); | |
285 if (thread->flags & _PR_SYSTEM) { | |
286 _pr_systemActive--; | |
287 } else { | |
288 _pr_userActive--; | |
289 if (_pr_userActive == _pr_primordialExitCount) { | |
290 PR_NotifyCondVar(_pr_primordialExitCVar); | |
291 } | |
292 } | |
293 PR_Unlock(_pr_activeLock); | |
294 } | |
295 | |
296 /* | |
297 ** Detach thread structure | |
298 */ | |
299 static void | |
300 _PR_DestroyThread(PRThread *thread) | |
301 { | |
302 _PR_MD_FREE_LOCK(&thread->threadLock); | |
303 PR_DELETE(thread); | |
304 } | |
305 | |
306 void | |
307 _PR_NativeDestroyThread(PRThread *thread) | |
308 { | |
309 if(thread->term) { | |
310 PR_DestroyCondVar(thread->term); | |
311 thread->term = 0; | |
312 } | |
313 if (NULL != thread->privateData) { | |
314 PR_ASSERT(0 != thread->tpdLength); | |
315 PR_DELETE(thread->privateData); | |
316 thread->tpdLength = 0; | |
317 } | |
318 PR_DELETE(thread->stack); | |
319 _PR_DestroyThread(thread); | |
320 } | |
321 | |
322 void | |
323 _PR_UserDestroyThread(PRThread *thread) | |
324 { | |
325 if(thread->term) { | |
326 PR_DestroyCondVar(thread->term); | |
327 thread->term = 0; | |
328 } | |
329 if (NULL != thread->privateData) { | |
330 PR_ASSERT(0 != thread->tpdLength); | |
331 PR_DELETE(thread->privateData); | |
332 thread->tpdLength = 0; | |
333 } | |
334 _PR_MD_FREE_LOCK(&thread->threadLock); | |
335 if (thread->threadAllocatedOnStack == 1) { | |
336 _PR_MD_CLEAN_THREAD(thread); | |
337 /* | |
338 * Because the no_sched field is set, this thread/stack will | |
339 * will not be re-used until the flag is cleared by the thread | |
340 * we will context switch to. | |
341 */ | |
342 _PR_FreeStack(thread->stack); | |
343 } else { | |
344 #ifdef WINNT | |
345 _PR_MD_CLEAN_THREAD(thread); | |
346 #else | |
347 /* | |
348 * This assertion does not apply to NT. On NT, every fiber | |
349 * has its threadAllocatedOnStack equal to 0. Elsewhere, | |
350 * only the primordial thread has its threadAllocatedOnStack | |
351 * equal to 0. | |
352 */ | |
353 PR_ASSERT(thread->flags & _PR_PRIMORDIAL); | |
354 #endif | |
355 } | |
356 } | |
357 | |
358 | |
359 /* | |
360 ** Run a thread's start function. When the start function returns the | |
361 ** thread is done executing and no longer needs the CPU. If there are no | |
362 ** more user threads running then we can exit the program. | |
363 */ | |
364 void _PR_NativeRunThread(void *arg) | |
365 { | |
366 PRThread *thread = (PRThread *)arg; | |
367 | |
368 _PR_MD_SET_CURRENT_THREAD(thread); | |
369 | |
370 _PR_MD_SET_CURRENT_CPU(NULL); | |
371 | |
372 /* Set up the thread stack information */ | |
373 _PR_InitializeNativeStack(thread->stack); | |
374 | |
375 /* Set up the thread md information */ | |
376 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { | |
377 /* | |
378 * thread failed to initialize itself, possibly due to | |
379 * failure to allocate per-thread resources | |
380 */ | |
381 return; | |
382 } | |
383 | |
384 while(1) { | |
385 thread->state = _PR_RUNNING; | |
386 | |
387 /* | |
388 * Add to list of active threads | |
389 */ | |
390 PR_Lock(_pr_activeLock); | |
391 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_GLOBAL_THREADQ()); | |
392 _pr_global_threads++; | |
393 PR_Unlock(_pr_activeLock); | |
394 | |
395 (*thread->startFunc)(thread->arg); | |
396 | |
397 /* | |
398 * The following two assertions are meant for NT asynch io. | |
399 * | |
400 * The thread should have no asynch io in progress when it | |
401 * exits, otherwise the overlapped buffer, which is part of | |
402 * the thread structure, would become invalid. | |
403 */ | |
404 PR_ASSERT(thread->io_pending == PR_FALSE); | |
405 /* | |
406 * This assertion enforces the programming guideline that | |
407 * if an io function times out or is interrupted, the thread | |
408 * should close the fd to force the asynch io to abort | |
409 * before it exits. Right now, closing the fd is the only | |
410 * way to clear the io_suspended flag. | |
411 */ | |
412 PR_ASSERT(thread->io_suspended == PR_FALSE); | |
413 | |
414 /* | |
415 * remove thread from list of active threads | |
416 */ | |
417 PR_Lock(_pr_activeLock); | |
418 PR_REMOVE_LINK(&thread->active); | |
419 _pr_global_threads--; | |
420 PR_Unlock(_pr_activeLock); | |
421 | |
422 PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting")); | |
423 | |
424 /* All done, time to go away */ | |
425 _PR_CleanupThread(thread); | |
426 | |
427 _PR_NotifyJoinWaiters(thread); | |
428 | |
429 _PR_DecrActiveThreadCount(thread); | |
430 | |
431 thread->state = _PR_DEAD_STATE; | |
432 | |
433 if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == | |
434 PR_FAILURE)) { | |
435 /* | |
436 * thread not recycled | |
437 * platform-specific thread exit processing | |
438 * - for stuff like releasing native-thread resources, etc. | |
439 */ | |
440 _PR_MD_EXIT_THREAD(thread); | |
441 /* | |
442 * Free memory allocated for the thread | |
443 */ | |
444 _PR_NativeDestroyThread(thread); | |
445 /* | |
446 * thread gone, cannot de-reference thread now | |
447 */ | |
448 return; | |
449 } | |
450 | |
451 /* Now wait for someone to activate us again... */ | |
452 _PR_MD_WAIT(thread, PR_INTERVAL_NO_TIMEOUT); | |
453 } | |
454 } | |
455 | |
456 static void _PR_UserRunThread(void) | |
457 { | |
458 PRThread *thread = _PR_MD_CURRENT_THREAD(); | |
459 PRIntn is; | |
460 | |
461 if (_MD_LAST_THREAD()) | |
462 _MD_LAST_THREAD()->no_sched = 0; | |
463 | |
464 #ifdef HAVE_CUSTOM_USER_THREADS | |
465 if (thread->stack == NULL) { | |
466 thread->stack = PR_NEWZAP(PRThreadStack); | |
467 _PR_InitializeNativeStack(thread->stack); | |
468 } | |
469 #endif /* HAVE_CUSTOM_USER_THREADS */ | |
470 | |
471 while(1) { | |
472 /* Run thread main */ | |
473 if ( !_PR_IS_NATIVE_THREAD(thread)) _PR_MD_SET_INTSOFF(0); | |
474 | |
475 /* | |
476 * Add to list of active threads | |
477 */ | |
478 if (!(thread->flags & _PR_IDLE_THREAD)) { | |
479 PR_Lock(_pr_activeLock); | |
480 PR_APPEND_LINK(&thread->active, &_PR_ACTIVE_LOCAL_THREADQ()); | |
481 _pr_local_threads++; | |
482 PR_Unlock(_pr_activeLock); | |
483 } | |
484 | |
485 (*thread->startFunc)(thread->arg); | |
486 | |
487 /* | |
488 * The following two assertions are meant for NT asynch io. | |
489 * | |
490 * The thread should have no asynch io in progress when it | |
491 * exits, otherwise the overlapped buffer, which is part of | |
492 * the thread structure, would become invalid. | |
493 */ | |
494 PR_ASSERT(thread->io_pending == PR_FALSE); | |
495 /* | |
496 * This assertion enforces the programming guideline that | |
497 * if an io function times out or is interrupted, the thread | |
498 * should close the fd to force the asynch io to abort | |
499 * before it exits. Right now, closing the fd is the only | |
500 * way to clear the io_suspended flag. | |
501 */ | |
502 PR_ASSERT(thread->io_suspended == PR_FALSE); | |
503 | |
504 PR_Lock(_pr_activeLock); | |
505 /* | |
506 * remove thread from list of active threads | |
507 */ | |
508 if (!(thread->flags & _PR_IDLE_THREAD)) { | |
509 PR_REMOVE_LINK(&thread->active); | |
510 _pr_local_threads--; | |
511 } | |
512 PR_Unlock(_pr_activeLock); | |
513 PR_LOG(_pr_thread_lm, PR_LOG_MIN, ("thread exiting")); | |
514 | |
515 /* All done, time to go away */ | |
516 _PR_CleanupThread(thread); | |
517 | |
518 _PR_INTSOFF(is); | |
519 | |
520 _PR_NotifyJoinWaiters(thread); | |
521 | |
522 _PR_DecrActiveThreadCount(thread); | |
523 | |
524 thread->state = _PR_DEAD_STATE; | |
525 | |
526 if (!_pr_recycleThreads || (_PR_RecycleThread(thread) == | |
527 PR_FAILURE)) { | |
528 /* | |
529 ** Destroy the thread resources | |
530 */ | |
531 _PR_UserDestroyThread(thread); | |
532 } | |
533 | |
534 /* | |
535 ** Find another user thread to run. This cpu has finished the | |
536 ** previous threads main and is now ready to run another thread. | |
537 */ | |
538 { | |
539 PRInt32 is; | |
540 _PR_INTSOFF(is); | |
541 _PR_MD_SWITCH_CONTEXT(thread); | |
542 } | |
543 | |
544 /* Will land here when we get scheduled again if we are recycling... */ | |
545 } | |
546 } | |
547 | |
548 void _PR_SetThreadPriority(PRThread *thread, PRThreadPriority newPri) | |
549 { | |
550 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
551 PRIntn is; | |
552 | |
553 if ( _PR_IS_NATIVE_THREAD(thread) ) { | |
554 _PR_MD_SET_PRIORITY(&(thread->md), newPri); | |
555 return; | |
556 } | |
557 | |
558 if (!_PR_IS_NATIVE_THREAD(me)) | |
559 _PR_INTSOFF(is); | |
560 _PR_THREAD_LOCK(thread); | |
561 if (newPri != thread->priority) { | |
562 _PRCPU *cpu = thread->cpu; | |
563 | |
564 switch (thread->state) { | |
565 case _PR_RUNNING: | |
566 /* Change my priority */ | |
567 | |
568 _PR_RUNQ_LOCK(cpu); | |
569 thread->priority = newPri; | |
570 if (_PR_RUNQREADYMASK(cpu) >> (newPri + 1)) { | |
571 if (!_PR_IS_NATIVE_THREAD(me)) | |
572 _PR_SET_RESCHED_FLAG(); | |
573 } | |
574 _PR_RUNQ_UNLOCK(cpu); | |
575 break; | |
576 | |
577 case _PR_RUNNABLE: | |
578 | |
579 _PR_RUNQ_LOCK(cpu); | |
580 /* Move to different runQ */ | |
581 _PR_DEL_RUNQ(thread); | |
582 thread->priority = newPri; | |
583 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); | |
584 _PR_ADD_RUNQ(thread, cpu, newPri); | |
585 _PR_RUNQ_UNLOCK(cpu); | |
586 | |
587 if (newPri > me->priority) { | |
588 if (!_PR_IS_NATIVE_THREAD(me)) | |
589 _PR_SET_RESCHED_FLAG(); | |
590 } | |
591 | |
592 break; | |
593 | |
594 case _PR_LOCK_WAIT: | |
595 case _PR_COND_WAIT: | |
596 case _PR_IO_WAIT: | |
597 case _PR_SUSPENDED: | |
598 | |
599 thread->priority = newPri; | |
600 break; | |
601 } | |
602 } | |
603 _PR_THREAD_UNLOCK(thread); | |
604 if (!_PR_IS_NATIVE_THREAD(me)) | |
605 _PR_INTSON(is); | |
606 } | |
607 | |
608 /* | |
609 ** Suspend the named thread and copy its gc registers into regBuf | |
610 */ | |
611 static void _PR_Suspend(PRThread *thread) | |
612 { | |
613 PRIntn is; | |
614 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
615 | |
616 PR_ASSERT(thread != me); | |
617 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread) || (!thread->cpu)); | |
618 | |
619 if (!_PR_IS_NATIVE_THREAD(me)) | |
620 _PR_INTSOFF(is); | |
621 _PR_THREAD_LOCK(thread); | |
622 switch (thread->state) { | |
623 case _PR_RUNNABLE: | |
624 if (!_PR_IS_NATIVE_THREAD(thread)) { | |
625 _PR_RUNQ_LOCK(thread->cpu); | |
626 _PR_DEL_RUNQ(thread); | |
627 _PR_RUNQ_UNLOCK(thread->cpu); | |
628 | |
629 _PR_MISCQ_LOCK(thread->cpu); | |
630 _PR_ADD_SUSPENDQ(thread, thread->cpu); | |
631 _PR_MISCQ_UNLOCK(thread->cpu); | |
632 } else { | |
633 /* | |
634 * Only LOCAL threads are suspended by _PR_Suspend | |
635 */ | |
636 PR_ASSERT(0); | |
637 } | |
638 thread->state = _PR_SUSPENDED; | |
639 break; | |
640 | |
641 case _PR_RUNNING: | |
642 /* | |
643 * The thread being suspended should be a LOCAL thread with | |
644 * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state | |
645 */ | |
646 PR_ASSERT(0); | |
647 break; | |
648 | |
649 case _PR_LOCK_WAIT: | |
650 case _PR_IO_WAIT: | |
651 case _PR_COND_WAIT: | |
652 if (_PR_IS_NATIVE_THREAD(thread)) { | |
653 _PR_MD_SUSPEND_THREAD(thread); | |
654 } | |
655 thread->flags |= _PR_SUSPENDING; | |
656 break; | |
657 | |
658 default: | |
659 PR_Abort(); | |
660 } | |
661 _PR_THREAD_UNLOCK(thread); | |
662 if (!_PR_IS_NATIVE_THREAD(me)) | |
663 _PR_INTSON(is); | |
664 } | |
665 | |
666 static void _PR_Resume(PRThread *thread) | |
667 { | |
668 PRThreadPriority pri; | |
669 PRIntn is; | |
670 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
671 | |
672 if (!_PR_IS_NATIVE_THREAD(me)) | |
673 _PR_INTSOFF(is); | |
674 _PR_THREAD_LOCK(thread); | |
675 switch (thread->state) { | |
676 case _PR_SUSPENDED: | |
677 thread->state = _PR_RUNNABLE; | |
678 thread->flags &= ~_PR_SUSPENDING; | |
679 if (!_PR_IS_NATIVE_THREAD(thread)) { | |
680 _PR_MISCQ_LOCK(thread->cpu); | |
681 _PR_DEL_SUSPENDQ(thread); | |
682 _PR_MISCQ_UNLOCK(thread->cpu); | |
683 | |
684 pri = thread->priority; | |
685 | |
686 _PR_RUNQ_LOCK(thread->cpu); | |
687 _PR_ADD_RUNQ(thread, thread->cpu, pri); | |
688 _PR_RUNQ_UNLOCK(thread->cpu); | |
689 | |
690 if (pri > _PR_MD_CURRENT_THREAD()->priority) { | |
691 if (!_PR_IS_NATIVE_THREAD(me)) | |
692 _PR_SET_RESCHED_FLAG(); | |
693 } | |
694 } else { | |
695 PR_ASSERT(0); | |
696 } | |
697 break; | |
698 | |
699 case _PR_IO_WAIT: | |
700 case _PR_COND_WAIT: | |
701 thread->flags &= ~_PR_SUSPENDING; | |
702 /* PR_ASSERT(thread->wait.monitor->stickyCount == 0); */ | |
703 break; | |
704 | |
705 case _PR_LOCK_WAIT: | |
706 { | |
707 PRLock *wLock = thread->wait.lock; | |
708 | |
709 thread->flags &= ~_PR_SUSPENDING; | |
710 | |
711 _PR_LOCK_LOCK(wLock); | |
712 if (thread->wait.lock->owner == 0) { | |
713 _PR_UnblockLockWaiter(thread->wait.lock); | |
714 } | |
715 _PR_LOCK_UNLOCK(wLock); | |
716 break; | |
717 } | |
718 case _PR_RUNNABLE: | |
719 break; | |
720 case _PR_RUNNING: | |
721 /* | |
722 * The thread being suspended should be a LOCAL thread with | |
723 * _pr_numCPUs == 1. Hence, the thread cannot be in RUNNING state | |
724 */ | |
725 PR_ASSERT(0); | |
726 break; | |
727 | |
728 default: | |
729 /* | |
730 * thread should have been in one of the above-listed blocked states | |
731 * (_PR_JOIN_WAIT, _PR_IO_WAIT, _PR_UNBORN, _PR_DEAD_STATE) | |
732 */ | |
733 PR_Abort(); | |
734 } | |
735 _PR_THREAD_UNLOCK(thread); | |
736 if (!_PR_IS_NATIVE_THREAD(me)) | |
737 _PR_INTSON(is); | |
738 | |
739 } | |
740 | |
741 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) | |
742 static PRThread *get_thread(_PRCPU *cpu, PRBool *wakeup_cpus) | |
743 { | |
744 PRThread *thread; | |
745 PRIntn pri; | |
746 PRUint32 r; | |
747 PRCList *qp; | |
748 PRIntn priMin, priMax; | |
749 | |
750 _PR_RUNQ_LOCK(cpu); | |
751 r = _PR_RUNQREADYMASK(cpu); | |
752 if (r==0) { | |
753 priMin = priMax = PR_PRIORITY_FIRST; | |
754 } else if (r == (1<<PR_PRIORITY_NORMAL) ) { | |
755 priMin = priMax = PR_PRIORITY_NORMAL; | |
756 } else { | |
757 priMin = PR_PRIORITY_FIRST; | |
758 priMax = PR_PRIORITY_LAST; | |
759 } | |
760 thread = NULL; | |
761 for (pri = priMax; pri >= priMin ; pri-- ) { | |
762 if (r & (1 << pri)) { | |
763 for (qp = _PR_RUNQ(cpu)[pri].next; | |
764 qp != &_PR_RUNQ(cpu)[pri]; | |
765 qp = qp->next) { | |
766 thread = _PR_THREAD_PTR(qp); | |
767 /* | |
768 * skip non-schedulable threads | |
769 */ | |
770 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); | |
771 if (thread->no_sched) { | |
772 thread = NULL; | |
773 /* | |
774 * Need to wakeup cpus to avoid missing a | |
775 * runnable thread | |
776 * Waking up all CPU's need happen only once. | |
777 */ | |
778 | |
779 *wakeup_cpus = PR_TRUE; | |
780 continue; | |
781 } else if (thread->flags & _PR_BOUND_THREAD) { | |
782 /* | |
783 * Thread bound to cpu 0 | |
784 */ | |
785 | |
786 thread = NULL; | |
787 #ifdef IRIX | |
788 _PR_MD_WAKEUP_PRIMORDIAL_CPU(); | |
789 #endif | |
790 continue; | |
791 } else if (thread->io_pending == PR_TRUE) { | |
792 /* | |
793 * A thread that is blocked for I/O needs to run | |
794 * on the same cpu on which it was blocked. This is because | |
795 * the cpu's ioq is accessed without lock protection and sch
eduling | |
796 * the thread on a different cpu would preclude this optimiz
ation. | |
797 */ | |
798 thread = NULL; | |
799 continue; | |
800 } else { | |
801 /* Pull thread off of its run queue */ | |
802 _PR_DEL_RUNQ(thread); | |
803 _PR_RUNQ_UNLOCK(cpu); | |
804 return(thread); | |
805 } | |
806 } | |
807 } | |
808 thread = NULL; | |
809 } | |
810 _PR_RUNQ_UNLOCK(cpu); | |
811 return(thread); | |
812 } | |
813 #endif /* !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) */ | |
814 | |
815 /* | |
816 ** Schedule this native thread by finding the highest priority nspr | |
817 ** thread that is ready to run. | |
818 ** | |
819 ** Note- everyone really needs to call _PR_MD_SWITCH_CONTEXT (which calls | |
820 ** PR_Schedule() rather than calling PR_Schedule. Otherwise if there | |
821 ** is initialization required for switching from SWITCH_CONTEXT, | |
822 ** it will not get done! | |
823 */ | |
824 void _PR_Schedule(void) | |
825 { | |
826 PRThread *thread, *me = _PR_MD_CURRENT_THREAD(); | |
827 _PRCPU *cpu = _PR_MD_CURRENT_CPU(); | |
828 PRIntn pri; | |
829 PRUint32 r; | |
830 PRCList *qp; | |
831 PRIntn priMin, priMax; | |
832 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) | |
833 PRBool wakeup_cpus; | |
834 #endif | |
835 | |
836 /* Interrupts must be disabled */ | |
837 PR_ASSERT(_PR_IS_NATIVE_THREAD(me) || _PR_MD_GET_INTSOFF() != 0); | |
838 | |
839 /* Since we are rescheduling, we no longer want to */ | |
840 _PR_CLEAR_RESCHED_FLAG(); | |
841 | |
842 /* | |
843 ** Find highest priority thread to run. Bigger priority numbers are | |
844 ** higher priority threads | |
845 */ | |
846 _PR_RUNQ_LOCK(cpu); | |
847 /* | |
848 * if we are in SuspendAll mode, can schedule only the thread | |
849 * that called PR_SuspendAll | |
850 * | |
851 * The thread may be ready to run now, after completing an I/O | |
852 * operation, for example | |
853 */ | |
854 if ((thread = suspendAllThread) != 0) { | |
855 if ((!(thread->no_sched)) && (thread->state == _PR_RUNNABLE)) { | |
856 /* Pull thread off of its run queue */ | |
857 _PR_DEL_RUNQ(thread); | |
858 _PR_RUNQ_UNLOCK(cpu); | |
859 goto found_thread; | |
860 } else { | |
861 thread = NULL; | |
862 _PR_RUNQ_UNLOCK(cpu); | |
863 goto idle_thread; | |
864 } | |
865 } | |
866 r = _PR_RUNQREADYMASK(cpu); | |
867 if (r==0) { | |
868 priMin = priMax = PR_PRIORITY_FIRST; | |
869 } else if (r == (1<<PR_PRIORITY_NORMAL) ) { | |
870 priMin = priMax = PR_PRIORITY_NORMAL; | |
871 } else { | |
872 priMin = PR_PRIORITY_FIRST; | |
873 priMax = PR_PRIORITY_LAST; | |
874 } | |
875 thread = NULL; | |
876 for (pri = priMax; pri >= priMin ; pri-- ) { | |
877 if (r & (1 << pri)) { | |
878 for (qp = _PR_RUNQ(cpu)[pri].next; | |
879 qp != &_PR_RUNQ(cpu)[pri]; | |
880 qp = qp->next) { | |
881 thread = _PR_THREAD_PTR(qp); | |
882 /* | |
883 * skip non-schedulable threads | |
884 */ | |
885 PR_ASSERT(!(thread->flags & _PR_IDLE_THREAD)); | |
886 if ((thread->no_sched) && (me != thread)){ | |
887 thread = NULL; | |
888 continue; | |
889 } else { | |
890 /* Pull thread off of its run queue */ | |
891 _PR_DEL_RUNQ(thread); | |
892 _PR_RUNQ_UNLOCK(cpu); | |
893 goto found_thread; | |
894 } | |
895 } | |
896 } | |
897 thread = NULL; | |
898 } | |
899 _PR_RUNQ_UNLOCK(cpu); | |
900 | |
901 #if !defined(_PR_LOCAL_THREADS_ONLY) && defined(XP_UNIX) | |
902 | |
903 wakeup_cpus = PR_FALSE; | |
904 _PR_CPU_LIST_LOCK(); | |
905 for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) { | |
906 if (cpu != _PR_CPU_PTR(qp)) { | |
907 if ((thread = get_thread(_PR_CPU_PTR(qp), &wakeup_cpus)) | |
908 != NULL) { | |
909 thread->cpu = cpu; | |
910 _PR_CPU_LIST_UNLOCK(); | |
911 if (wakeup_cpus == PR_TRUE) | |
912 _PR_MD_WAKEUP_CPUS(); | |
913 goto found_thread; | |
914 } | |
915 } | |
916 } | |
917 _PR_CPU_LIST_UNLOCK(); | |
918 if (wakeup_cpus == PR_TRUE) | |
919 _PR_MD_WAKEUP_CPUS(); | |
920 | |
921 #endif /* _PR_LOCAL_THREADS_ONLY */ | |
922 | |
923 idle_thread: | |
924 /* | |
925 ** There are no threads to run. Switch to the idle thread | |
926 */ | |
927 PR_LOG(_pr_sched_lm, PR_LOG_MAX, ("pausing")); | |
928 thread = _PR_MD_CURRENT_CPU()->idle_thread; | |
929 | |
930 found_thread: | |
931 PR_ASSERT((me == thread) || ((thread->state == _PR_RUNNABLE) && | |
932 (!(thread->no_sched)))); | |
933 | |
934 /* Resume the thread */ | |
935 PR_LOG(_pr_sched_lm, PR_LOG_MAX, | |
936 ("switching to %d[%p]", thread->id, thread)); | |
937 PR_ASSERT(thread->state != _PR_RUNNING); | |
938 thread->state = _PR_RUNNING; | |
939 | |
940 /* If we are on the runq, it just means that we went to sleep on some | |
941 * resource, and by the time we got here another real native thread had | |
942 * already given us the resource and put us back on the runqueue | |
943 */ | |
944 PR_ASSERT(thread->cpu == _PR_MD_CURRENT_CPU()); | |
945 if (thread != me) | |
946 _PR_MD_RESTORE_CONTEXT(thread); | |
947 #if 0 | |
948 /* XXXMB; with setjmp/longjmp it is impossible to land here, but | |
949 * it is not with fibers... Is this a bad thing? I believe it is | |
950 * still safe. | |
951 */ | |
952 PR_NOT_REACHED("impossible return from schedule"); | |
953 #endif | |
954 } | |
955 | |
956 /* | |
957 ** Attaches a thread. | |
958 ** Does not set the _PR_MD_CURRENT_THREAD. | |
959 ** Does not specify the scope of the thread. | |
960 */ | |
961 static PRThread * | |
962 _PR_AttachThread(PRThreadType type, PRThreadPriority priority, | |
963 PRThreadStack *stack) | |
964 { | |
965 PRThread *thread; | |
966 char *mem; | |
967 | |
968 if (priority > PR_PRIORITY_LAST) { | |
969 priority = PR_PRIORITY_LAST; | |
970 } else if (priority < PR_PRIORITY_FIRST) { | |
971 priority = PR_PRIORITY_FIRST; | |
972 } | |
973 | |
974 mem = (char*) PR_CALLOC(sizeof(PRThread)); | |
975 if (mem) { | |
976 thread = (PRThread*) mem; | |
977 thread->priority = priority; | |
978 thread->stack = stack; | |
979 thread->state = _PR_RUNNING; | |
980 PR_INIT_CLIST(&thread->lockList); | |
981 if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) { | |
982 PR_DELETE(thread); | |
983 return 0; | |
984 } | |
985 | |
986 return thread; | |
987 } | |
988 return 0; | |
989 } | |
990 | |
991 | |
992 | |
993 PR_IMPLEMENT(PRThread*) | |
994 _PR_NativeCreateThread(PRThreadType type, | |
995 void (*start)(void *arg), | |
996 void *arg, | |
997 PRThreadPriority priority, | |
998 PRThreadScope scope, | |
999 PRThreadState state, | |
1000 PRUint32 stackSize, | |
1001 PRUint32 flags) | |
1002 { | |
1003 PRThread *thread; | |
1004 | |
1005 thread = _PR_AttachThread(type, priority, NULL); | |
1006 | |
1007 if (thread) { | |
1008 PR_Lock(_pr_activeLock); | |
1009 thread->flags = (flags | _PR_GLOBAL_SCOPE); | |
1010 thread->id = ++_pr_utid; | |
1011 if (type == PR_SYSTEM_THREAD) { | |
1012 thread->flags |= _PR_SYSTEM; | |
1013 _pr_systemActive++; | |
1014 } else { | |
1015 _pr_userActive++; | |
1016 } | |
1017 PR_Unlock(_pr_activeLock); | |
1018 | |
1019 thread->stack = PR_NEWZAP(PRThreadStack); | |
1020 if (!thread->stack) { | |
1021 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
1022 goto done; | |
1023 } | |
1024 thread->stack->stackSize = stackSize?stackSize:_MD_DEFAULT_STACK_SIZE; | |
1025 thread->stack->thr = thread; | |
1026 thread->startFunc = start; | |
1027 thread->arg = arg; | |
1028 | |
1029 /* | |
1030 Set thread flags related to scope and joinable state. If joinable | |
1031 thread, allocate a "termination" conidition variable. | |
1032 */ | |
1033 if (state == PR_JOINABLE_THREAD) { | |
1034 thread->term = PR_NewCondVar(_pr_terminationCVLock); | |
1035 if (thread->term == NULL) { | |
1036 PR_DELETE(thread->stack); | |
1037 goto done; | |
1038 } | |
1039 } | |
1040 | |
1041 thread->state = _PR_RUNNING; | |
1042 if (_PR_MD_CREATE_THREAD(thread, _PR_NativeRunThread, priority, | |
1043 scope,state,stackSize) == PR_SUCCESS) { | |
1044 return thread; | |
1045 } | |
1046 if (thread->term) { | |
1047 PR_DestroyCondVar(thread->term); | |
1048 thread->term = NULL; | |
1049 } | |
1050 PR_DELETE(thread->stack); | |
1051 } | |
1052 | |
1053 done: | |
1054 if (thread) { | |
1055 _PR_DecrActiveThreadCount(thread); | |
1056 _PR_DestroyThread(thread); | |
1057 } | |
1058 return NULL; | |
1059 } | |
1060 | |
1061 /************************************************************************/ | |
1062 | |
1063 PR_IMPLEMENT(PRThread*) _PR_CreateThread(PRThreadType type, | |
1064 void (*start)(void *arg), | |
1065 void *arg, | |
1066 PRThreadPriority priority, | |
1067 PRThreadScope scope, | |
1068 PRThreadState state, | |
1069 PRUint32 stackSize, | |
1070 PRUint32 flags) | |
1071 { | |
1072 PRThread *me; | |
1073 PRThread *thread = NULL; | |
1074 PRThreadStack *stack; | |
1075 char *top; | |
1076 PRIntn is; | |
1077 PRIntn native = 0; | |
1078 PRIntn useRecycled = 0; | |
1079 PRBool status; | |
1080 | |
1081 /* | |
1082 First, pin down the priority. Not all compilers catch passing out of | |
1083 range enum here. If we let bad values thru, priority queues won't work. | |
1084 */ | |
1085 if (priority > PR_PRIORITY_LAST) { | |
1086 priority = PR_PRIORITY_LAST; | |
1087 } else if (priority < PR_PRIORITY_FIRST) { | |
1088 priority = PR_PRIORITY_FIRST; | |
1089 } | |
1090 | |
1091 if (!_pr_initialized) _PR_ImplicitInitialization(); | |
1092 | |
1093 if (! (flags & _PR_IDLE_THREAD)) | |
1094 me = _PR_MD_CURRENT_THREAD(); | |
1095 | |
1096 #if defined(_PR_GLOBAL_THREADS_ONLY) | |
1097 /* | |
1098 * can create global threads only | |
1099 */ | |
1100 if (scope == PR_LOCAL_THREAD) | |
1101 scope = PR_GLOBAL_THREAD; | |
1102 #endif | |
1103 | |
1104 if (_native_threads_only) | |
1105 scope = PR_GLOBAL_THREAD; | |
1106 | |
1107 native = (((scope == PR_GLOBAL_THREAD)|| (scope == PR_GLOBAL_BOUND_THREAD)) | |
1108 && _PR_IS_NATIVE_THREAD_
SUPPORTED()); | |
1109 | |
1110 _PR_ADJUST_STACKSIZE(stackSize); | |
1111 | |
1112 if (native) { | |
1113 /* | |
1114 * clear the IDLE_THREAD flag which applies to LOCAL | |
1115 * threads only | |
1116 */ | |
1117 flags &= ~_PR_IDLE_THREAD; | |
1118 flags |= _PR_GLOBAL_SCOPE; | |
1119 if (_PR_NUM_DEADNATIVE > 0) { | |
1120 _PR_DEADQ_LOCK; | |
1121 | |
1122 if (_PR_NUM_DEADNATIVE == 0) { /* Thread safe check */ | |
1123 _PR_DEADQ_UNLOCK; | |
1124 } else { | |
1125 thread = _PR_THREAD_PTR(_PR_DEADNATIVEQ.next); | |
1126 PR_REMOVE_LINK(&thread->links); | |
1127 _PR_DEC_DEADNATIVE; | |
1128 _PR_DEADQ_UNLOCK; | |
1129 | |
1130 _PR_InitializeRecycledThread(thread); | |
1131 thread->startFunc = start; | |
1132 thread->arg = arg; | |
1133 thread->flags = (flags | _PR_GLOBAL_SCOPE); | |
1134 if (type == PR_SYSTEM_THREAD) | |
1135 { | |
1136 thread->flags |= _PR_SYSTEM; | |
1137 PR_ATOMIC_INCREMENT(&_pr_systemActive); | |
1138 } | |
1139 else PR_ATOMIC_INCREMENT(&_pr_userActive); | |
1140 | |
1141 if (state == PR_JOINABLE_THREAD) { | |
1142 if (!thread->term) | |
1143 thread->term = PR_NewCondVar(_pr_terminationCVLock); | |
1144 } | |
1145 else { | |
1146 if(thread->term) { | |
1147 PR_DestroyCondVar(thread->term); | |
1148 thread->term = 0; | |
1149 } | |
1150 } | |
1151 | |
1152 thread->priority = priority; | |
1153 _PR_MD_SET_PRIORITY(&(thread->md), priority); | |
1154 /* XXX what about stackSize? */ | |
1155 thread->state = _PR_RUNNING; | |
1156 _PR_MD_WAKEUP_WAITER(thread); | |
1157 return thread; | |
1158 } | |
1159 } | |
1160 thread = _PR_NativeCreateThread(type, start, arg, priority, | |
1161 scope, state, stackSize, flags); | |
1162 } else { | |
1163 if (_PR_NUM_DEADUSER > 0) { | |
1164 _PR_DEADQ_LOCK; | |
1165 | |
1166 if (_PR_NUM_DEADUSER == 0) { /* thread safe check */ | |
1167 _PR_DEADQ_UNLOCK; | |
1168 } else { | |
1169 PRCList *ptr; | |
1170 | |
1171 /* Go down list checking for a recycled thread with a | |
1172 * large enough stack. XXXMB - this has a bad degenerate case. | |
1173 */ | |
1174 ptr = _PR_DEADUSERQ.next; | |
1175 while( ptr != &_PR_DEADUSERQ ) { | |
1176 thread = _PR_THREAD_PTR(ptr); | |
1177 if ((thread->stack->stackSize >= stackSize) && | |
1178 (!thread->no_sched)) { | |
1179 PR_REMOVE_LINK(&thread->links); | |
1180 _PR_DEC_DEADUSER; | |
1181 break; | |
1182 } else { | |
1183 ptr = ptr->next; | |
1184 thread = NULL; | |
1185 } | |
1186 } | |
1187 | |
1188 _PR_DEADQ_UNLOCK; | |
1189 | |
1190 if (thread) { | |
1191 _PR_InitializeRecycledThread(thread); | |
1192 thread->startFunc = start; | |
1193 thread->arg = arg; | |
1194 thread->priority = priority; | |
1195 if (state == PR_JOINABLE_THREAD) { | |
1196 if (!thread->term) | |
1197 thread->term = PR_NewCondVar(_pr_terminationCVLock); | |
1198 } else { | |
1199 if(thread->term) { | |
1200 PR_DestroyCondVar(thread->term); | |
1201 thread->term = 0; | |
1202 } | |
1203 } | |
1204 useRecycled++; | |
1205 } | |
1206 } | |
1207 } | |
1208 if (thread == NULL) { | |
1209 #ifndef HAVE_CUSTOM_USER_THREADS | |
1210 stack = _PR_NewStack(stackSize); | |
1211 if (!stack) { | |
1212 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
1213 return NULL; | |
1214 } | |
1215 | |
1216 /* Allocate thread object and per-thread data off the top of the sta
ck*/ | |
1217 top = stack->stackTop; | |
1218 #ifdef HAVE_STACK_GROWING_UP | |
1219 thread = (PRThread*) top; | |
1220 top = top + sizeof(PRThread); | |
1221 /* | |
1222 * Make stack 64-byte aligned | |
1223 */ | |
1224 if ((PRUptrdiff)top & 0x3f) { | |
1225 top = (char*)(((PRUptrdiff)top + 0x40) & ~0x3f); | |
1226 } | |
1227 #else | |
1228 top = top - sizeof(PRThread); | |
1229 thread = (PRThread*) top; | |
1230 /* | |
1231 * Make stack 64-byte aligned | |
1232 */ | |
1233 if ((PRUptrdiff)top & 0x3f) { | |
1234 top = (char*)((PRUptrdiff)top & ~0x3f); | |
1235 } | |
1236 #endif | |
1237 stack->thr = thread; | |
1238 memset(thread, 0, sizeof(PRThread)); | |
1239 thread->threadAllocatedOnStack = 1; | |
1240 #else | |
1241 thread = _PR_MD_CREATE_USER_THREAD(stackSize, start, arg); | |
1242 if (!thread) { | |
1243 PR_SetError(PR_OUT_OF_MEMORY_ERROR, 0); | |
1244 return NULL; | |
1245 } | |
1246 thread->threadAllocatedOnStack = 0; | |
1247 stack = NULL; | |
1248 top = NULL; | |
1249 #endif | |
1250 | |
1251 /* Initialize thread */ | |
1252 thread->tpdLength = 0; | |
1253 thread->privateData = NULL; | |
1254 thread->stack = stack; | |
1255 thread->priority = priority; | |
1256 thread->startFunc = start; | |
1257 thread->arg = arg; | |
1258 PR_INIT_CLIST(&thread->lockList); | |
1259 | |
1260 if (_PR_MD_INIT_THREAD(thread) == PR_FAILURE) { | |
1261 if (thread->threadAllocatedOnStack == 1) | |
1262 _PR_FreeStack(thread->stack); | |
1263 else { | |
1264 PR_DELETE(thread); | |
1265 } | |
1266 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); | |
1267 return NULL; | |
1268 } | |
1269 | |
1270 if (_PR_MD_NEW_LOCK(&thread->threadLock) == PR_FAILURE) { | |
1271 if (thread->threadAllocatedOnStack == 1) | |
1272 _PR_FreeStack(thread->stack); | |
1273 else { | |
1274 PR_DELETE(thread->privateData); | |
1275 PR_DELETE(thread); | |
1276 } | |
1277 PR_SetError(PR_INSUFFICIENT_RESOURCES_ERROR, 0); | |
1278 return NULL; | |
1279 } | |
1280 | |
1281 _PR_MD_INIT_CONTEXT(thread, top, _PR_UserRunThread, &status); | |
1282 | |
1283 if (status == PR_FALSE) { | |
1284 _PR_MD_FREE_LOCK(&thread->threadLock); | |
1285 if (thread->threadAllocatedOnStack == 1) | |
1286 _PR_FreeStack(thread->stack); | |
1287 else { | |
1288 PR_DELETE(thread->privateData); | |
1289 PR_DELETE(thread); | |
1290 } | |
1291 return NULL; | |
1292 } | |
1293 | |
1294 /* | |
1295 Set thread flags related to scope and joinable state. If joinable | |
1296 thread, allocate a "termination" condition variable. | |
1297 */ | |
1298 if (state == PR_JOINABLE_THREAD) { | |
1299 thread->term = PR_NewCondVar(_pr_terminationCVLock); | |
1300 if (thread->term == NULL) { | |
1301 _PR_MD_FREE_LOCK(&thread->threadLock); | |
1302 if (thread->threadAllocatedOnStack == 1) | |
1303 _PR_FreeStack(thread->stack); | |
1304 else { | |
1305 PR_DELETE(thread->privateData); | |
1306 PR_DELETE(thread); | |
1307 } | |
1308 return NULL; | |
1309 } | |
1310 } | |
1311 | |
1312 } | |
1313 | |
1314 /* Update thread type counter */ | |
1315 PR_Lock(_pr_activeLock); | |
1316 thread->flags = flags; | |
1317 thread->id = ++_pr_utid; | |
1318 if (type == PR_SYSTEM_THREAD) { | |
1319 thread->flags |= _PR_SYSTEM; | |
1320 _pr_systemActive++; | |
1321 } else { | |
1322 _pr_userActive++; | |
1323 } | |
1324 | |
1325 /* Make thread runnable */ | |
1326 thread->state = _PR_RUNNABLE; | |
1327 /* | |
1328 * Add to list of active threads | |
1329 */ | |
1330 PR_Unlock(_pr_activeLock); | |
1331 | |
1332 if ((! (thread->flags & _PR_IDLE_THREAD)) && _PR_IS_NATIVE_THREAD(me) ) | |
1333 thread->cpu = _PR_GetPrimordialCPU(); | |
1334 else | |
1335 thread->cpu = _PR_MD_CURRENT_CPU(); | |
1336 | |
1337 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); | |
1338 | |
1339 if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me))
{ | |
1340 _PR_INTSOFF(is); | |
1341 _PR_RUNQ_LOCK(thread->cpu); | |
1342 _PR_ADD_RUNQ(thread, thread->cpu, priority); | |
1343 _PR_RUNQ_UNLOCK(thread->cpu); | |
1344 } | |
1345 | |
1346 if (thread->flags & _PR_IDLE_THREAD) { | |
1347 /* | |
1348 ** If the creating thread is a kernel thread, we need to | |
1349 ** awaken the user thread idle thread somehow; potentially | |
1350 ** it could be sleeping in its idle loop, and we need to poke | |
1351 ** it. To do so, wake the idle thread... | |
1352 */ | |
1353 _PR_MD_WAKEUP_WAITER(NULL); | |
1354 } else if (_PR_IS_NATIVE_THREAD(me)) { | |
1355 _PR_MD_WAKEUP_WAITER(thread); | |
1356 } | |
1357 if ((! (thread->flags & _PR_IDLE_THREAD)) && !_PR_IS_NATIVE_THREAD(me) ) | |
1358 _PR_INTSON(is); | |
1359 } | |
1360 | |
1361 return thread; | |
1362 } | |
1363 | |
1364 PR_IMPLEMENT(PRThread*) PR_CreateThread(PRThreadType type, | |
1365 void (*start)(void *arg), | |
1366 void *arg, | |
1367 PRThreadPriority priority, | |
1368 PRThreadScope scope, | |
1369 PRThreadState state, | |
1370 PRUint32 stackSize) | |
1371 { | |
1372 return _PR_CreateThread(type, start, arg, priority, scope, state, | |
1373 stackSize, 0); | |
1374 } | |
1375 | |
1376 /* | |
1377 ** Associate a thread object with an existing native thread. | |
1378 ** "type" is the type of thread object to attach | |
1379 ** "priority" is the priority to assign to the thread | |
1380 ** "stack" defines the shape of the threads stack | |
1381 ** | |
1382 ** This can return NULL if some kind of error occurs, or if memory is | |
1383 ** tight. | |
1384 ** | |
1385 ** This call is not normally needed unless you create your own native | |
1386 ** thread. PR_Init does this automatically for the primordial thread. | |
1387 */ | |
1388 PRThread* _PRI_AttachThread(PRThreadType type, | |
1389 PRThreadPriority priority, PRThreadStack *stack, PRUint32 flags) | |
1390 { | |
1391 PRThread *thread; | |
1392 | |
1393 if ((thread = _PR_MD_GET_ATTACHED_THREAD()) != NULL) { | |
1394 return thread; | |
1395 } | |
1396 _PR_MD_SET_CURRENT_THREAD(NULL); | |
1397 | |
1398 /* Clear out any state if this thread was attached before */ | |
1399 _PR_MD_SET_CURRENT_CPU(NULL); | |
1400 | |
1401 thread = _PR_AttachThread(type, priority, stack); | |
1402 if (thread) { | |
1403 PRIntn is; | |
1404 | |
1405 _PR_MD_SET_CURRENT_THREAD(thread); | |
1406 | |
1407 thread->flags = flags | _PR_GLOBAL_SCOPE | _PR_ATTACHED; | |
1408 | |
1409 if (!stack) { | |
1410 thread->stack = PR_NEWZAP(PRThreadStack); | |
1411 if (!thread->stack) { | |
1412 _PR_DestroyThread(thread); | |
1413 return NULL; | |
1414 } | |
1415 thread->stack->stackSize = _MD_DEFAULT_STACK_SIZE; | |
1416 } | |
1417 PR_INIT_CLIST(&thread->links); | |
1418 | |
1419 if (_PR_MD_INIT_ATTACHED_THREAD(thread) == PR_FAILURE) { | |
1420 PR_DELETE(thread->stack); | |
1421 _PR_DestroyThread(thread); | |
1422 return NULL; | |
1423 } | |
1424 | |
1425 _PR_MD_SET_CURRENT_CPU(NULL); | |
1426 | |
1427 if (_PR_MD_CURRENT_CPU()) { | |
1428 _PR_INTSOFF(is); | |
1429 PR_Lock(_pr_activeLock); | |
1430 } | |
1431 if (type == PR_SYSTEM_THREAD) { | |
1432 thread->flags |= _PR_SYSTEM; | |
1433 _pr_systemActive++; | |
1434 } else { | |
1435 _pr_userActive++; | |
1436 } | |
1437 if (_PR_MD_CURRENT_CPU()) { | |
1438 PR_Unlock(_pr_activeLock); | |
1439 _PR_INTSON(is); | |
1440 } | |
1441 } | |
1442 return thread; | |
1443 } | |
1444 | |
1445 PR_IMPLEMENT(PRThread*) PR_AttachThread(PRThreadType type, | |
1446 PRThreadPriority priority, PRThreadStack *stack) | |
1447 { | |
1448 return PR_GetCurrentThread(); | |
1449 } | |
1450 | |
1451 PR_IMPLEMENT(void) PR_DetachThread(void) | |
1452 { | |
1453 /* | |
1454 * On IRIX, Solaris, and Windows, foreign threads are detached when | |
1455 * they terminate. | |
1456 */ | |
1457 #if !defined(IRIX) && !defined(WIN32) \ | |
1458 && !(defined(SOLARIS) && defined(_PR_GLOBAL_THREADS_ONLY)) | |
1459 PRThread *me; | |
1460 if (_pr_initialized) { | |
1461 me = _PR_MD_GET_ATTACHED_THREAD(); | |
1462 if ((me != NULL) && (me->flags & _PR_ATTACHED)) | |
1463 _PRI_DetachThread(); | |
1464 } | |
1465 #endif | |
1466 } | |
1467 | |
1468 void _PRI_DetachThread(void) | |
1469 { | |
1470 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1471 | |
1472 if (me->flags & _PR_PRIMORDIAL) { | |
1473 /* | |
1474 * ignore, if primordial thread | |
1475 */ | |
1476 return; | |
1477 } | |
1478 PR_ASSERT(me->flags & _PR_ATTACHED); | |
1479 PR_ASSERT(_PR_IS_NATIVE_THREAD(me)); | |
1480 _PR_CleanupThread(me); | |
1481 PR_DELETE(me->privateData); | |
1482 | |
1483 _PR_DecrActiveThreadCount(me); | |
1484 | |
1485 _PR_MD_CLEAN_THREAD(me); | |
1486 _PR_MD_SET_CURRENT_THREAD(NULL); | |
1487 if (!me->threadAllocatedOnStack) | |
1488 PR_DELETE(me->stack); | |
1489 _PR_MD_FREE_LOCK(&me->threadLock); | |
1490 PR_DELETE(me); | |
1491 } | |
1492 | |
1493 /* | |
1494 ** Wait for thread termination: | |
1495 ** "thread" is the target thread | |
1496 ** | |
1497 ** This can return PR_FAILURE if no joinable thread could be found | |
1498 ** corresponding to the specified target thread. | |
1499 ** | |
1500 ** The calling thread is suspended until the target thread completes. | |
1501 ** Several threads cannot wait for the same thread to complete; one thread | |
1502 ** will complete successfully and others will terminate with an error PR_FAILURE
. | |
1503 ** The calling thread will not be blocked if the target thread has already | |
1504 ** terminated. | |
1505 */ | |
1506 PR_IMPLEMENT(PRStatus) PR_JoinThread(PRThread *thread) | |
1507 { | |
1508 PRIntn is; | |
1509 PRCondVar *term; | |
1510 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1511 | |
1512 if (!_PR_IS_NATIVE_THREAD(me)) | |
1513 _PR_INTSOFF(is); | |
1514 term = thread->term; | |
1515 /* can't join a non-joinable thread */ | |
1516 if (term == NULL) { | |
1517 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
1518 goto ErrorExit; | |
1519 } | |
1520 | |
1521 /* multiple threads can't wait on the same joinable thread */ | |
1522 if (term->condQ.next != &term->condQ) { | |
1523 goto ErrorExit; | |
1524 } | |
1525 if (!_PR_IS_NATIVE_THREAD(me)) | |
1526 _PR_INTSON(is); | |
1527 | |
1528 /* wait for the target thread's termination cv invariant */ | |
1529 PR_Lock (_pr_terminationCVLock); | |
1530 while (thread->state != _PR_JOIN_WAIT) { | |
1531 (void) PR_WaitCondVar(term, PR_INTERVAL_NO_TIMEOUT); | |
1532 } | |
1533 (void) PR_Unlock (_pr_terminationCVLock); | |
1534 | |
1535 /* | |
1536 Remove target thread from global waiting to join Q; make it runnable | |
1537 again and put it back on its run Q. When it gets scheduled later in | |
1538 _PR_RunThread code, it will clean up its stack. | |
1539 */ | |
1540 if (!_PR_IS_NATIVE_THREAD(me)) | |
1541 _PR_INTSOFF(is); | |
1542 thread->state = _PR_RUNNABLE; | |
1543 if ( !_PR_IS_NATIVE_THREAD(thread) ) { | |
1544 _PR_THREAD_LOCK(thread); | |
1545 | |
1546 _PR_MISCQ_LOCK(thread->cpu); | |
1547 _PR_DEL_JOINQ(thread); | |
1548 _PR_MISCQ_UNLOCK(thread->cpu); | |
1549 | |
1550 _PR_AddThreadToRunQ(me, thread); | |
1551 _PR_THREAD_UNLOCK(thread); | |
1552 } | |
1553 if (!_PR_IS_NATIVE_THREAD(me)) | |
1554 _PR_INTSON(is); | |
1555 | |
1556 _PR_MD_WAKEUP_WAITER(thread); | |
1557 | |
1558 return PR_SUCCESS; | |
1559 | |
1560 ErrorExit: | |
1561 if ( !_PR_IS_NATIVE_THREAD(me)) _PR_INTSON(is); | |
1562 return PR_FAILURE; | |
1563 } | |
1564 | |
1565 PR_IMPLEMENT(void) PR_SetThreadPriority(PRThread *thread, | |
1566 PRThreadPriority newPri) | |
1567 { | |
1568 | |
1569 /* | |
1570 First, pin down the priority. Not all compilers catch passing out of | |
1571 range enum here. If we let bad values thru, priority queues won't work. | |
1572 */ | |
1573 if ((PRIntn)newPri > (PRIntn)PR_PRIORITY_LAST) { | |
1574 newPri = PR_PRIORITY_LAST; | |
1575 } else if ((PRIntn)newPri < (PRIntn)PR_PRIORITY_FIRST) { | |
1576 newPri = PR_PRIORITY_FIRST; | |
1577 } | |
1578 | |
1579 if ( _PR_IS_NATIVE_THREAD(thread) ) { | |
1580 thread->priority = newPri; | |
1581 _PR_MD_SET_PRIORITY(&(thread->md), newPri); | |
1582 } else _PR_SetThreadPriority(thread, newPri); | |
1583 } | |
1584 | |
1585 PR_IMPLEMENT(PRStatus) PR_SetCurrentThreadName(const char *name) | |
1586 { | |
1587 PRThread *thread; | |
1588 size_t nameLen; | |
1589 | |
1590 if (!name) { | |
1591 PR_SetError(PR_INVALID_ARGUMENT_ERROR, 0); | |
1592 return PR_FAILURE; | |
1593 } | |
1594 | |
1595 thread = PR_GetCurrentThread(); | |
1596 if (!thread) | |
1597 return PR_FAILURE; | |
1598 | |
1599 PR_Free(thread->name); | |
1600 nameLen = strlen(name); | |
1601 thread->name = (char *)PR_Malloc(nameLen + 1); | |
1602 if (!thread->name) | |
1603 return PR_FAILURE; | |
1604 memcpy(thread->name, name, nameLen + 1); | |
1605 _PR_MD_SET_CURRENT_THREAD_NAME(thread->name); | |
1606 return PR_SUCCESS; | |
1607 } | |
1608 | |
1609 PR_IMPLEMENT(const char *) PR_GetThreadName(const PRThread *thread) | |
1610 { | |
1611 if (!thread) | |
1612 return NULL; | |
1613 return thread->name; | |
1614 } | |
1615 | |
1616 | |
1617 /* | |
1618 ** This routine prevents all other threads from running. This call is needed by | |
1619 ** the garbage collector. | |
1620 */ | |
1621 PR_IMPLEMENT(void) PR_SuspendAll(void) | |
1622 { | |
1623 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1624 PRCList *qp; | |
1625 | |
1626 /* | |
1627 * Stop all user and native threads which are marked GC able. | |
1628 */ | |
1629 PR_Lock(_pr_activeLock); | |
1630 suspendAllOn = PR_TRUE; | |
1631 suspendAllThread = _PR_MD_CURRENT_THREAD(); | |
1632 _PR_MD_BEGIN_SUSPEND_ALL(); | |
1633 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; | |
1634 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { | |
1635 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && | |
1636 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) { | |
1637 _PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); | |
1638 PR_ASSERT((_PR_ACTIVE_THREAD_PTR(qp))->state != _PR_RUNNING); | |
1639 } | |
1640 } | |
1641 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; | |
1642 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { | |
1643 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && | |
1644 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) | |
1645 /* PR_Suspend(_PR_ACTIVE_THREAD_PTR(qp)); */ | |
1646 _PR_MD_SUSPEND_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); | |
1647 } | |
1648 _PR_MD_END_SUSPEND_ALL(); | |
1649 } | |
1650 | |
1651 /* | |
1652 ** This routine unblocks all other threads that were suspended from running by | |
1653 ** PR_SuspendAll(). This call is needed by the garbage collector. | |
1654 */ | |
1655 PR_IMPLEMENT(void) PR_ResumeAll(void) | |
1656 { | |
1657 PRThread *me = _PR_MD_CURRENT_THREAD(); | |
1658 PRCList *qp; | |
1659 | |
1660 /* | |
1661 * Resume all user and native threads which are marked GC able. | |
1662 */ | |
1663 _PR_MD_BEGIN_RESUME_ALL(); | |
1664 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; | |
1665 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp->next) { | |
1666 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && | |
1667 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) | |
1668 _PR_Resume(_PR_ACTIVE_THREAD_PTR(qp)); | |
1669 } | |
1670 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; | |
1671 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp->next) { | |
1672 if ((me != _PR_ACTIVE_THREAD_PTR(qp)) && | |
1673 _PR_IS_GCABLE_THREAD(_PR_ACTIVE_THREAD_PTR(qp))) | |
1674 _PR_MD_RESUME_THREAD(_PR_ACTIVE_THREAD_PTR(qp)); | |
1675 } | |
1676 _PR_MD_END_RESUME_ALL(); | |
1677 suspendAllThread = NULL; | |
1678 suspendAllOn = PR_FALSE; | |
1679 PR_Unlock(_pr_activeLock); | |
1680 } | |
1681 | |
1682 PR_IMPLEMENT(PRStatus) PR_EnumerateThreads(PREnumerator func, void *arg) | |
1683 { | |
1684 PRCList *qp, *qp_next; | |
1685 PRIntn i = 0; | |
1686 PRStatus rv = PR_SUCCESS; | |
1687 PRThread* t; | |
1688 | |
1689 /* | |
1690 ** Currently Enumerate threads happen only with suspension and | |
1691 ** pr_activeLock held | |
1692 */ | |
1693 PR_ASSERT(suspendAllOn); | |
1694 | |
1695 /* Steve Morse, 4-23-97: Note that we can't walk a queue by taking | |
1696 * qp->next after applying the function "func". In particular, "func" | |
1697 * might remove the thread from the queue and put it into another one in | |
1698 * which case qp->next no longer points to the next entry in the original | |
1699 * queue. | |
1700 * | |
1701 * To get around this problem, we save qp->next in qp_next before applying | |
1702 * "func" and use that saved value as the next value after applying "func". | |
1703 */ | |
1704 | |
1705 /* | |
1706 * Traverse the list of local and global threads | |
1707 */ | |
1708 for (qp = _PR_ACTIVE_LOCAL_THREADQ().next; | |
1709 qp != &_PR_ACTIVE_LOCAL_THREADQ(); qp = qp_next) | |
1710 { | |
1711 qp_next = qp->next; | |
1712 t = _PR_ACTIVE_THREAD_PTR(qp); | |
1713 if (_PR_IS_GCABLE_THREAD(t)) | |
1714 { | |
1715 rv = (*func)(t, i, arg); | |
1716 if (rv != PR_SUCCESS) | |
1717 return rv; | |
1718 i++; | |
1719 } | |
1720 } | |
1721 for (qp = _PR_ACTIVE_GLOBAL_THREADQ().next; | |
1722 qp != &_PR_ACTIVE_GLOBAL_THREADQ(); qp = qp_next) | |
1723 { | |
1724 qp_next = qp->next; | |
1725 t = _PR_ACTIVE_THREAD_PTR(qp); | |
1726 if (_PR_IS_GCABLE_THREAD(t)) | |
1727 { | |
1728 rv = (*func)(t, i, arg); | |
1729 if (rv != PR_SUCCESS) | |
1730 return rv; | |
1731 i++; | |
1732 } | |
1733 } | |
1734 return rv; | |
1735 } | |
1736 | |
1737 /* FUNCTION: _PR_AddSleepQ | |
1738 ** DESCRIPTION: | |
1739 ** Adds a thread to the sleep/pauseQ. | |
1740 ** RESTRICTIONS: | |
1741 ** Caller must have the RUNQ lock. | |
1742 ** Caller must be a user level thread | |
1743 */ | |
1744 PR_IMPLEMENT(void) | |
1745 _PR_AddSleepQ(PRThread *thread, PRIntervalTime timeout) | |
1746 { | |
1747 _PRCPU *cpu = thread->cpu; | |
1748 | |
1749 if (timeout == PR_INTERVAL_NO_TIMEOUT) { | |
1750 /* append the thread to the global pause Q */ | |
1751 PR_APPEND_LINK(&thread->links, &_PR_PAUSEQ(thread->cpu)); | |
1752 thread->flags |= _PR_ON_PAUSEQ; | |
1753 } else { | |
1754 PRIntervalTime sleep; | |
1755 PRCList *q; | |
1756 PRThread *t; | |
1757 | |
1758 /* sort onto global sleepQ */ | |
1759 sleep = timeout; | |
1760 | |
1761 /* Check if we are longest timeout */ | |
1762 if (timeout >= _PR_SLEEPQMAX(cpu)) { | |
1763 PR_INSERT_BEFORE(&thread->links, &_PR_SLEEPQ(cpu)); | |
1764 thread->sleep = timeout - _PR_SLEEPQMAX(cpu); | |
1765 _PR_SLEEPQMAX(cpu) = timeout; | |
1766 } else { | |
1767 /* Sort thread into global sleepQ at appropriate point */ | |
1768 q = _PR_SLEEPQ(cpu).next; | |
1769 | |
1770 /* Now scan the list for where to insert this entry */ | |
1771 while (q != &_PR_SLEEPQ(cpu)) { | |
1772 t = _PR_THREAD_PTR(q); | |
1773 if (sleep < t->sleep) { | |
1774 /* Found sleeper to insert in front of */ | |
1775 break; | |
1776 } | |
1777 sleep -= t->sleep; | |
1778 q = q->next; | |
1779 } | |
1780 thread->sleep = sleep; | |
1781 PR_INSERT_BEFORE(&thread->links, q); | |
1782 | |
1783 /* | |
1784 ** Subtract our sleep time from the sleeper that follows us (there | |
1785 ** must be one) so that they remain relative to us. | |
1786 */ | |
1787 PR_ASSERT (thread->links.next != &_PR_SLEEPQ(cpu)); | |
1788 | |
1789 t = _PR_THREAD_PTR(thread->links.next); | |
1790 PR_ASSERT(_PR_THREAD_PTR(t->links.prev) == thread); | |
1791 t->sleep -= sleep; | |
1792 } | |
1793 | |
1794 thread->flags |= _PR_ON_SLEEPQ; | |
1795 } | |
1796 } | |
1797 | |
1798 /* FUNCTION: _PR_DelSleepQ | |
1799 ** DESCRIPTION: | |
1800 ** Removes a thread from the sleep/pauseQ. | |
1801 ** INPUTS: | |
1802 ** If propogate_time is true, then the thread following the deleted | |
1803 ** thread will be get the time from the deleted thread. This is used | |
1804 ** when deleting a sleeper that has not timed out. | |
1805 ** RESTRICTIONS: | |
1806 ** Caller must have the RUNQ lock. | |
1807 ** Caller must be a user level thread | |
1808 */ | |
1809 PR_IMPLEMENT(void) | |
1810 _PR_DelSleepQ(PRThread *thread, PRBool propogate_time) | |
1811 { | |
1812 _PRCPU *cpu = thread->cpu; | |
1813 | |
1814 /* Remove from pauseQ/sleepQ */ | |
1815 if (thread->flags & (_PR_ON_PAUSEQ|_PR_ON_SLEEPQ)) { | |
1816 if (thread->flags & _PR_ON_SLEEPQ) { | |
1817 PRCList *q = thread->links.next; | |
1818 if (q != &_PR_SLEEPQ(cpu)) { | |
1819 if (propogate_time == PR_TRUE) { | |
1820 PRThread *after = _PR_THREAD_PTR(q); | |
1821 after->sleep += thread->sleep; | |
1822 } else | |
1823 _PR_SLEEPQMAX(cpu) -= thread->sleep; | |
1824 } else { | |
1825 /* Check if prev is the beggining of the list; if so, | |
1826 * we are the only element on the list. | |
1827 */ | |
1828 if (thread->links.prev != &_PR_SLEEPQ(cpu)) | |
1829 _PR_SLEEPQMAX(cpu) -= thread->sleep; | |
1830 else | |
1831 _PR_SLEEPQMAX(cpu) = 0; | |
1832 } | |
1833 thread->flags &= ~_PR_ON_SLEEPQ; | |
1834 } else { | |
1835 thread->flags &= ~_PR_ON_PAUSEQ; | |
1836 } | |
1837 PR_REMOVE_LINK(&thread->links); | |
1838 } else | |
1839 PR_ASSERT(0); | |
1840 } | |
1841 | |
1842 void | |
1843 _PR_AddThreadToRunQ( | |
1844 PRThread *me, /* the current thread */ | |
1845 PRThread *thread) /* the local thread to be added to a run queue */ | |
1846 { | |
1847 PRThreadPriority pri = thread->priority; | |
1848 _PRCPU *cpu = thread->cpu; | |
1849 | |
1850 PR_ASSERT(!_PR_IS_NATIVE_THREAD(thread)); | |
1851 | |
1852 #if defined(WINNT) | |
1853 /* | |
1854 * On NT, we can only reliably know that the current CPU | |
1855 * is not idle. We add the awakened thread to the run | |
1856 * queue of its CPU if its CPU is the current CPU. | |
1857 * For any other CPU, we don't really know whether it | |
1858 * is busy or idle. So in all other cases, we just | |
1859 * "post" the awakened thread to the IO completion port | |
1860 * for the next idle CPU to execute (this is done in | |
1861 * _PR_MD_WAKEUP_WAITER). | |
1862 * Threads with a suspended I/O operation remain bound to | |
1863 * the same cpu until I/O is cancelled | |
1864 * | |
1865 * NOTE: the boolean expression below must be the exact | |
1866 * opposite of the corresponding boolean expression in | |
1867 * _PR_MD_WAKEUP_WAITER. | |
1868 */ | |
1869 if ((!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) || | |
1870 (thread->md.thr_bound_cpu)) { | |
1871 PR_ASSERT(!thread->md.thr_bound_cpu || | |
1872 (thread->md.thr_bound_cp
u == cpu)); | |
1873 _PR_RUNQ_LOCK(cpu); | |
1874 _PR_ADD_RUNQ(thread, cpu, pri); | |
1875 _PR_RUNQ_UNLOCK(cpu); | |
1876 } | |
1877 #else | |
1878 _PR_RUNQ_LOCK(cpu); | |
1879 _PR_ADD_RUNQ(thread, cpu, pri); | |
1880 _PR_RUNQ_UNLOCK(cpu); | |
1881 if (!_PR_IS_NATIVE_THREAD(me) && (cpu == me->cpu)) { | |
1882 if (pri > me->priority) { | |
1883 _PR_SET_RESCHED_FLAG(); | |
1884 } | |
1885 } | |
1886 #endif | |
1887 } | |
OLD | NEW |