Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(64)

Side by Side Diff: fusl/src/thread/pthread_barrier_wait.c

Issue 1714623002: [fusl] clang-format fusl (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: headers too Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #include "pthread_impl.h" 1 #include "pthread_impl.h"
2 2
3 static int pshared_barrier_wait(pthread_barrier_t *b) 3 static int pshared_barrier_wait(pthread_barrier_t* b) {
4 { 4 int limit = (b->_b_limit & INT_MAX) + 1;
5 » int limit = (b->_b_limit & INT_MAX) + 1; 5 int ret = 0;
6 » int ret = 0; 6 int v, w;
7 » int v, w;
8 7
9 » if (limit==1) return PTHREAD_BARRIER_SERIAL_THREAD; 8 if (limit == 1)
9 return PTHREAD_BARRIER_SERIAL_THREAD;
10 10
11 » while ((v=a_cas(&b->_b_lock, 0, limit))) 11 while ((v = a_cas(&b->_b_lock, 0, limit)))
12 » » __wait(&b->_b_lock, &b->_b_waiters, v, 0); 12 __wait(&b->_b_lock, &b->_b_waiters, v, 0);
13 13
14 » /* Wait for <limit> threads to get to the barrier */ 14 /* Wait for <limit> threads to get to the barrier */
15 » if (++b->_b_count == limit) { 15 if (++b->_b_count == limit) {
16 » » a_store(&b->_b_count, 0); 16 a_store(&b->_b_count, 0);
17 » » ret = PTHREAD_BARRIER_SERIAL_THREAD; 17 ret = PTHREAD_BARRIER_SERIAL_THREAD;
18 » » if (b->_b_waiters2) __wake(&b->_b_count, -1, 0); 18 if (b->_b_waiters2)
19 » } else { 19 __wake(&b->_b_count, -1, 0);
20 » » a_store(&b->_b_lock, 0); 20 } else {
21 » » if (b->_b_waiters) __wake(&b->_b_lock, 1, 0); 21 a_store(&b->_b_lock, 0);
22 » » while ((v=b->_b_count)>0) 22 if (b->_b_waiters)
23 » » » __wait(&b->_b_count, &b->_b_waiters2, v, 0); 23 __wake(&b->_b_lock, 1, 0);
24 » } 24 while ((v = b->_b_count) > 0)
25 __wait(&b->_b_count, &b->_b_waiters2, v, 0);
26 }
25 27
26 » __vm_lock(); 28 __vm_lock();
27 29
28 » /* Ensure all threads have a vm lock before proceeding */ 30 /* Ensure all threads have a vm lock before proceeding */
29 » if (a_fetch_add(&b->_b_count, -1)==1-limit) { 31 if (a_fetch_add(&b->_b_count, -1) == 1 - limit) {
30 » » a_store(&b->_b_count, 0); 32 a_store(&b->_b_count, 0);
31 » » if (b->_b_waiters2) __wake(&b->_b_count, -1, 0); 33 if (b->_b_waiters2)
32 » } else { 34 __wake(&b->_b_count, -1, 0);
33 » » while ((v=b->_b_count)) 35 } else {
34 » » » __wait(&b->_b_count, &b->_b_waiters2, v, 0); 36 while ((v = b->_b_count))
35 » } 37 __wait(&b->_b_count, &b->_b_waiters2, v, 0);
36 » 38 }
37 » /* Perform a recursive unlock suitable for self-sync'd destruction */
38 » do {
39 » » v = b->_b_lock;
40 » » w = b->_b_waiters;
41 » } while (a_cas(&b->_b_lock, v, v==INT_MIN+1 ? 0 : v-1) != v);
42 39
43 » /* Wake a thread waiting to reuse or destroy the barrier */ 40 /* Perform a recursive unlock suitable for self-sync'd destruction */
44 » if (v==INT_MIN+1 || (v==1 && w)) 41 do {
45 » » __wake(&b->_b_lock, 1, 0); 42 v = b->_b_lock;
43 w = b->_b_waiters;
44 } while (a_cas(&b->_b_lock, v, v == INT_MIN + 1 ? 0 : v - 1) != v);
46 45
47 » __vm_unlock(); 46 /* Wake a thread waiting to reuse or destroy the barrier */
47 if (v == INT_MIN + 1 || (v == 1 && w))
48 __wake(&b->_b_lock, 1, 0);
48 49
49 » return ret; 50 __vm_unlock();
51
52 return ret;
50 } 53 }
51 54
52 struct instance 55 struct instance {
53 { 56 volatile int count;
54 » volatile int count; 57 volatile int last;
55 » volatile int last; 58 volatile int waiters;
56 » volatile int waiters; 59 volatile int finished;
57 » volatile int finished;
58 }; 60 };
59 61
60 int pthread_barrier_wait(pthread_barrier_t *b) 62 int pthread_barrier_wait(pthread_barrier_t* b) {
61 { 63 int limit = b->_b_limit;
62 » int limit = b->_b_limit; 64 struct instance* inst;
63 » struct instance *inst;
64 65
65 » /* Trivial case: count was set at 1 */ 66 /* Trivial case: count was set at 1 */
66 » if (!limit) return PTHREAD_BARRIER_SERIAL_THREAD; 67 if (!limit)
68 return PTHREAD_BARRIER_SERIAL_THREAD;
67 69
68 » /* Process-shared barriers require a separate, inefficient wait */ 70 /* Process-shared barriers require a separate, inefficient wait */
69 » if (limit < 0) return pshared_barrier_wait(b); 71 if (limit < 0)
72 return pshared_barrier_wait(b);
70 73
71 » /* Otherwise we need a lock on the barrier object */ 74 /* Otherwise we need a lock on the barrier object */
72 » while (a_swap(&b->_b_lock, 1)) 75 while (a_swap(&b->_b_lock, 1))
73 » » __wait(&b->_b_lock, &b->_b_waiters, 1, 1); 76 __wait(&b->_b_lock, &b->_b_waiters, 1, 1);
74 » inst = b->_b_inst; 77 inst = b->_b_inst;
75 78
76 » /* First thread to enter the barrier becomes the "instance owner" */ 79 /* First thread to enter the barrier becomes the "instance owner" */
77 » if (!inst) { 80 if (!inst) {
78 » » struct instance new_inst = { 0 }; 81 struct instance new_inst = {0};
79 » » int spins = 200; 82 int spins = 200;
80 » » b->_b_inst = inst = &new_inst; 83 b->_b_inst = inst = &new_inst;
81 » » a_store(&b->_b_lock, 0); 84 a_store(&b->_b_lock, 0);
82 » » if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); 85 if (b->_b_waiters)
83 » » while (spins-- && !inst->finished) 86 __wake(&b->_b_lock, 1, 1);
84 » » » a_spin(); 87 while (spins-- && !inst->finished)
85 » » a_inc(&inst->finished); 88 a_spin();
86 » » while (inst->finished == 1) 89 a_inc(&inst->finished);
87 » » » __syscall(SYS_futex,&inst->finished,FUTEX_WAIT|128,1,0) != -ENOSYS 90 while (inst->finished == 1)
88 » » » || __syscall(SYS_futex,&inst->finished,FUTEX_WAIT,1,0); 91 __syscall(SYS_futex, &inst->finished, FUTEX_WAIT | 128, 1, 0) !=
89 » » return PTHREAD_BARRIER_SERIAL_THREAD; 92 -ENOSYS ||
90 » } 93 __syscall(SYS_futex, &inst->finished, FUTEX_WAIT, 1, 0);
94 return PTHREAD_BARRIER_SERIAL_THREAD;
95 }
91 96
92 » /* Last thread to enter the barrier wakes all non-instance-owners */ 97 /* Last thread to enter the barrier wakes all non-instance-owners */
93 » if (++inst->count == limit) { 98 if (++inst->count == limit) {
94 » » b->_b_inst = 0; 99 b->_b_inst = 0;
95 » » a_store(&b->_b_lock, 0); 100 a_store(&b->_b_lock, 0);
96 » » if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); 101 if (b->_b_waiters)
97 » » a_store(&inst->last, 1); 102 __wake(&b->_b_lock, 1, 1);
98 » » if (inst->waiters) 103 a_store(&inst->last, 1);
99 » » » __wake(&inst->last, -1, 1); 104 if (inst->waiters)
100 » } else { 105 __wake(&inst->last, -1, 1);
101 » » a_store(&b->_b_lock, 0); 106 } else {
102 » » if (b->_b_waiters) __wake(&b->_b_lock, 1, 1); 107 a_store(&b->_b_lock, 0);
103 » » __wait(&inst->last, &inst->waiters, 0, 1); 108 if (b->_b_waiters)
104 » } 109 __wake(&b->_b_lock, 1, 1);
110 __wait(&inst->last, &inst->waiters, 0, 1);
111 }
105 112
106 » /* Last thread to exit the barrier wakes the instance owner */ 113 /* Last thread to exit the barrier wakes the instance owner */
107 » if (a_fetch_add(&inst->count,-1)==1 && a_fetch_add(&inst->finished,1)) 114 if (a_fetch_add(&inst->count, -1) == 1 && a_fetch_add(&inst->finished, 1))
108 » » __wake(&inst->finished, 1, 1); 115 __wake(&inst->finished, 1, 1);
109 116
110 » return 0; 117 return 0;
111 } 118 }
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698