Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(318)

Side by Side Diff: nspr/pr/src/threads/combined/prucpu.c

Issue 2078763002: Delete bundled copy of NSS and replace with README. (Closed) Base URL: https://chromium.googlesource.com/chromium/deps/nss@master
Patch Set: Delete bundled copy of NSS and replace with README. Created 4 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « nspr/pr/src/pthreads/ptthread.c ('k') | nspr/pr/src/threads/combined/prucv.c » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 /* -*- Mode: C++; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
2 /* This Source Code Form is subject to the terms of the Mozilla Public
3 * License, v. 2.0. If a copy of the MPL was not distributed with this
4 * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
5
6 #include "primpl.h"
7
8 _PRCPU *_pr_primordialCPU = NULL;
9
10 PRInt32 _pr_md_idle_cpus; /* number of idle cpus */
11 /*
12 * The idle threads in MxN models increment/decrement _pr_md_idle_cpus.
13 * If _PR_HAVE_ATOMIC_OPS is not defined, they can't use the atomic
14 * increment/decrement routines (which are based on PR_Lock/PR_Unlock),
15 * because PR_Lock asserts that the calling thread is not an idle thread.
16 * So we use a _MDLock to protect _pr_md_idle_cpus.
17 */
18 #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY)
19 #ifndef _PR_HAVE_ATOMIC_OPS
20 static _MDLock _pr_md_idle_cpus_lock;
21 #endif
22 #endif
23 PRUintn _pr_numCPU;
24 PRInt32 _pr_cpus_exit;
25 PRUint32 _pr_cpu_affinity_mask = 0;
26
27 #if !defined (_PR_GLOBAL_THREADS_ONLY)
28
29 static PRUintn _pr_cpuID;
30
31 static void PR_CALLBACK _PR_CPU_Idle(void *);
32
33 static _PRCPU *_PR_CreateCPU(void);
34 static PRStatus _PR_StartCPU(_PRCPU *cpu, PRThread *thread);
35
36 #if !defined(_PR_LOCAL_THREADS_ONLY)
37 static void _PR_RunCPU(void *arg);
38 #endif
39
40 void _PR_InitCPUs()
41 {
42 PRThread *me = _PR_MD_CURRENT_THREAD();
43
44 if (_native_threads_only)
45 return;
46
47 _pr_cpuID = 0;
48 _MD_NEW_LOCK( &_pr_cpuLock);
49 #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY)
50 #ifndef _PR_HAVE_ATOMIC_OPS
51 _MD_NEW_LOCK(&_pr_md_idle_cpus_lock);
52 #endif
53 #endif
54
55 #ifdef _PR_LOCAL_THREADS_ONLY
56
57 #ifdef HAVE_CUSTOM_USER_THREADS
58 _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(me);
59 #endif
60
61 /* Now start the first CPU. */
62 _pr_primordialCPU = _PR_CreateCPU();
63 _pr_numCPU = 1;
64 _PR_StartCPU(_pr_primordialCPU, me);
65
66 _PR_MD_SET_CURRENT_CPU(_pr_primordialCPU);
67
68 /* Initialize cpu for current thread (could be different from me) */
69 _PR_MD_CURRENT_THREAD()->cpu = _pr_primordialCPU;
70
71 _PR_MD_SET_LAST_THREAD(me);
72
73 #else /* Combined MxN model */
74
75 _pr_primordialCPU = _PR_CreateCPU();
76 _pr_numCPU = 1;
77 _PR_CreateThread(PR_SYSTEM_THREAD,
78 _PR_RunCPU,
79 _pr_primordialCPU,
80 PR_PRIORITY_NORMAL,
81 PR_GLOBAL_THREAD,
82 PR_UNJOINABLE_THREAD,
83 0,
84 _PR_IDLE_THREAD);
85
86 #endif /* _PR_LOCAL_THREADS_ONLY */
87
88 _PR_MD_INIT_CPUS();
89 }
90
91 #ifdef WINNT
92 /*
93 * Right now this function merely stops the CPUs and does
94 * not do any other cleanup.
95 *
96 * It is only implemented for WINNT because bug 161998 only
97 * affects the WINNT version of NSPR, but it would be nice
98 * to implement this function for other platforms too.
99 */
100 void _PR_CleanupCPUs(void)
101 {
102 PRUintn i;
103 PRCList *qp;
104 _PRCPU *cpu;
105
106 _pr_cpus_exit = 1;
107 for (i = 0; i < _pr_numCPU; i++) {
108 _PR_MD_WAKEUP_WAITER(NULL);
109 }
110 for (qp = _PR_CPUQ().next; qp != &_PR_CPUQ(); qp = qp->next) {
111 cpu = _PR_CPU_PTR(qp);
112 _PR_MD_JOIN_THREAD(&cpu->thread->md);
113 }
114 }
115 #endif
116
117 static _PRCPUQueue *_PR_CreateCPUQueue(void)
118 {
119 PRInt32 index;
120 _PRCPUQueue *cpuQueue;
121 cpuQueue = PR_NEWZAP(_PRCPUQueue);
122
123 _MD_NEW_LOCK( &cpuQueue->runQLock );
124 _MD_NEW_LOCK( &cpuQueue->sleepQLock );
125 _MD_NEW_LOCK( &cpuQueue->miscQLock );
126
127 for (index = 0; index < PR_ARRAY_SIZE(cpuQueue->runQ); index++)
128 PR_INIT_CLIST( &(cpuQueue->runQ[index]) );
129 PR_INIT_CLIST( &(cpuQueue->sleepQ) );
130 PR_INIT_CLIST( &(cpuQueue->pauseQ) );
131 PR_INIT_CLIST( &(cpuQueue->suspendQ) );
132 PR_INIT_CLIST( &(cpuQueue->waitingToJoinQ) );
133
134 cpuQueue->numCPUs = 1;
135
136 return cpuQueue;
137 }
138
139 /*
140 * Create a new CPU.
141 *
142 * This function initializes enough of the _PRCPU structure so
143 * that it can be accessed safely by a global thread or another
144 * CPU. This function does not create the native thread that
145 * will run the CPU nor does it initialize the parts of _PRCPU
146 * that must be initialized by that native thread.
147 *
148 * The reason we cannot simply have the native thread create
149 * and fully initialize a new CPU is that we need to be able to
150 * create a usable _pr_primordialCPU in _PR_InitCPUs without
151 * assuming that the primordial CPU thread we created can run
152 * during NSPR initialization. For example, on Windows while
153 * new threads can be created by DllMain, they won't be able
154 * to run during DLL initialization. If NSPR is initialized
155 * by DllMain, the primordial CPU thread won't run until DLL
156 * initialization is finished.
157 */
158 static _PRCPU *_PR_CreateCPU(void)
159 {
160 _PRCPU *cpu;
161
162 cpu = PR_NEWZAP(_PRCPU);
163 if (cpu) {
164 cpu->queue = _PR_CreateCPUQueue();
165 if (!cpu->queue) {
166 PR_DELETE(cpu);
167 return NULL;
168 }
169 }
170 return cpu;
171 }
172
173 /*
174 * Start a new CPU.
175 *
176 * 'cpu' is a _PRCPU structure created by _PR_CreateCPU().
177 * 'thread' is the native thread that will run the CPU.
178 *
179 * If this function fails, 'cpu' is destroyed.
180 */
181 static PRStatus _PR_StartCPU(_PRCPU *cpu, PRThread *thread)
182 {
183 /*
184 ** Start a new cpu. The assumption this code makes is that the
185 ** underlying operating system creates a stack to go with the new
186 ** native thread. That stack will be used by the cpu when pausing.
187 */
188
189 PR_ASSERT(!_native_threads_only);
190
191 cpu->last_clock = PR_IntervalNow();
192
193 /* Before we create any threads on this CPU we have to
194 * set the current CPU
195 */
196 _PR_MD_SET_CURRENT_CPU(cpu);
197 _PR_MD_INIT_RUNNING_CPU(cpu);
198 thread->cpu = cpu;
199
200 cpu->idle_thread = _PR_CreateThread(PR_SYSTEM_THREAD,
201 _PR_CPU_Idle,
202 (void *)cpu,
203 PR_PRIORITY_NORMAL,
204 PR_LOCAL_THREAD,
205 PR_UNJOINABLE_THREAD,
206 0,
207 _PR_IDLE_THREAD);
208
209 if (!cpu->idle_thread) {
210 /* didn't clean up CPU queue XXXMB */
211 PR_DELETE(cpu);
212 return PR_FAILURE;
213 }
214 PR_ASSERT(cpu->idle_thread->cpu == cpu);
215
216 cpu->idle_thread->no_sched = 0;
217
218 cpu->thread = thread;
219
220 if (_pr_cpu_affinity_mask)
221 PR_SetThreadAffinityMask(thread, _pr_cpu_affinity_mask);
222
223 /* Created and started a new CPU */
224 _PR_CPU_LIST_LOCK();
225 cpu->id = _pr_cpuID++;
226 PR_APPEND_LINK(&cpu->links, &_PR_CPUQ());
227 _PR_CPU_LIST_UNLOCK();
228
229 return PR_SUCCESS;
230 }
231
232 #if !defined(_PR_GLOBAL_THREADS_ONLY) && !defined(_PR_LOCAL_THREADS_ONLY)
233 /*
234 ** This code is used during a cpu's initial creation.
235 */
236 static void _PR_RunCPU(void *arg)
237 {
238 _PRCPU *cpu = (_PRCPU *)arg;
239 PRThread *me = _PR_MD_CURRENT_THREAD();
240
241 PR_ASSERT(NULL != me);
242
243 /*
244 * _PR_StartCPU calls _PR_CreateThread to create the
245 * idle thread. Because _PR_CreateThread calls PR_Lock,
246 * the current thread has to remain a global thread
247 * during the _PR_StartCPU call so that it can wait for
248 * the lock if the lock is held by another thread. If
249 * we clear the _PR_GLOBAL_SCOPE flag in
250 * _PR_MD_CREATE_PRIMORDIAL_THREAD, the current thread
251 * will be treated as a local thread and have trouble
252 * waiting for the lock because the CPU is not fully
253 * constructed yet.
254 *
255 * After the CPU is started, it is safe to mark the
256 * current thread as a local thread.
257 */
258
259 #ifdef HAVE_CUSTOM_USER_THREADS
260 _PR_MD_CREATE_PRIMORDIAL_USER_THREAD(me);
261 #endif
262
263 me->no_sched = 1;
264 _PR_StartCPU(cpu, me);
265
266 #ifdef HAVE_CUSTOM_USER_THREADS
267 me->flags &= (~_PR_GLOBAL_SCOPE);
268 #endif
269
270 _PR_MD_SET_CURRENT_CPU(cpu);
271 _PR_MD_SET_CURRENT_THREAD(cpu->thread);
272 me->cpu = cpu;
273
274 while(1) {
275 PRInt32 is;
276 if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is);
277 _PR_MD_START_INTERRUPTS();
278 _PR_MD_SWITCH_CONTEXT(me);
279 }
280 }
281 #endif
282
283 static void PR_CALLBACK _PR_CPU_Idle(void *_cpu)
284 {
285 _PRCPU *cpu = (_PRCPU *)_cpu;
286 PRThread *me = _PR_MD_CURRENT_THREAD();
287
288 PR_ASSERT(NULL != me);
289
290 me->cpu = cpu;
291 cpu->idle_thread = me;
292 if (_MD_LAST_THREAD())
293 _MD_LAST_THREAD()->no_sched = 0;
294 if (!_PR_IS_NATIVE_THREAD(me)) _PR_MD_SET_INTSOFF(0);
295 while(1) {
296 PRInt32 is;
297 PRIntervalTime timeout;
298 if (!_PR_IS_NATIVE_THREAD(me)) _PR_INTSOFF(is);
299
300 _PR_RUNQ_LOCK(cpu);
301 #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY)
302 #ifdef _PR_HAVE_ATOMIC_OPS
303 _PR_MD_ATOMIC_INCREMENT(&_pr_md_idle_cpus);
304 #else
305 _PR_MD_LOCK(&_pr_md_idle_cpus_lock);
306 _pr_md_idle_cpus++;
307 _PR_MD_UNLOCK(&_pr_md_idle_cpus_lock);
308 #endif /* _PR_HAVE_ATOMIC_OPS */
309 #endif
310 /* If someone on runq; do a nonblocking PAUSECPU */
311 if (_PR_RUNQREADYMASK(me->cpu) != 0) {
312 _PR_RUNQ_UNLOCK(cpu);
313 timeout = PR_INTERVAL_NO_WAIT;
314 } else {
315 _PR_RUNQ_UNLOCK(cpu);
316
317 _PR_SLEEPQ_LOCK(cpu);
318 if (PR_CLIST_IS_EMPTY(&_PR_SLEEPQ(me->cpu))) {
319 timeout = PR_INTERVAL_NO_TIMEOUT;
320 } else {
321 PRThread *wakeThread;
322 wakeThread = _PR_THREAD_PTR(_PR_SLEEPQ(me->cpu).next);
323 timeout = wakeThread->sleep;
324 }
325 _PR_SLEEPQ_UNLOCK(cpu);
326 }
327
328 /* Wait for an IO to complete */
329 (void)_PR_MD_PAUSE_CPU(timeout);
330
331 #ifdef WINNT
332 if (_pr_cpus_exit) {
333 /* _PR_CleanupCPUs tells us to exit */
334 _PR_MD_END_THREAD();
335 }
336 #endif
337
338 #if !defined(_PR_LOCAL_THREADS_ONLY) && !defined(_PR_GLOBAL_THREADS_ONLY)
339 #ifdef _PR_HAVE_ATOMIC_OPS
340 _PR_MD_ATOMIC_DECREMENT(&_pr_md_idle_cpus);
341 #else
342 _PR_MD_LOCK(&_pr_md_idle_cpus_lock);
343 _pr_md_idle_cpus--;
344 _PR_MD_UNLOCK(&_pr_md_idle_cpus_lock);
345 #endif /* _PR_HAVE_ATOMIC_OPS */
346 #endif
347
348 _PR_ClockInterrupt();
349
350 /* Now schedule any thread that is on the runq
351 * INTS must be OFF when calling PR_Schedule()
352 */
353 me->state = _PR_RUNNABLE;
354 _PR_MD_SWITCH_CONTEXT(me);
355 if (!_PR_IS_NATIVE_THREAD(me)) _PR_FAST_INTSON(is);
356 }
357 }
358 #endif /* _PR_GLOBAL_THREADS_ONLY */
359
360 PR_IMPLEMENT(void) PR_SetConcurrency(PRUintn numCPUs)
361 {
362 #if defined(_PR_GLOBAL_THREADS_ONLY) || defined(_PR_LOCAL_THREADS_ONLY)
363
364 /* do nothing */
365
366 #else /* combined, MxN thread model */
367
368 PRUintn newCPU;
369 _PRCPU *cpu;
370 PRThread *thr;
371
372
373 if (!_pr_initialized) _PR_ImplicitInitialization();
374
375 if (_native_threads_only)
376 return;
377
378 _PR_CPU_LIST_LOCK();
379 if (_pr_numCPU < numCPUs) {
380 newCPU = numCPUs - _pr_numCPU;
381 _pr_numCPU = numCPUs;
382 } else newCPU = 0;
383 _PR_CPU_LIST_UNLOCK();
384
385 for (; newCPU; newCPU--) {
386 cpu = _PR_CreateCPU();
387 thr = _PR_CreateThread(PR_SYSTEM_THREAD,
388 _PR_RunCPU,
389 cpu,
390 PR_PRIORITY_NORMAL,
391 PR_GLOBAL_THREAD,
392 PR_UNJOINABLE_THREAD,
393 0,
394 _PR_IDLE_THREAD);
395 }
396 #endif
397 }
398
399 PR_IMPLEMENT(_PRCPU *) _PR_GetPrimordialCPU(void)
400 {
401 if (_pr_primordialCPU)
402 return _pr_primordialCPU;
403 else
404 return _PR_MD_CURRENT_CPU();
405 }
OLDNEW
« no previous file with comments | « nspr/pr/src/pthreads/ptthread.c ('k') | nspr/pr/src/threads/combined/prucv.c » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698