Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(58)

Side by Side Diff: fusl/src/thread/pthread_cond_timedwait.c

Issue 1714623002: [fusl] clang-format fusl (Closed) Base URL: git@github.com:domokit/mojo.git@master
Patch Set: headers too Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 #include "pthread_impl.h" 1 #include "pthread_impl.h"
2 2
3 void __pthread_testcancel(void); 3 void __pthread_testcancel(void);
4 int __pthread_mutex_lock(pthread_mutex_t *); 4 int __pthread_mutex_lock(pthread_mutex_t*);
5 int __pthread_mutex_unlock(pthread_mutex_t *); 5 int __pthread_mutex_unlock(pthread_mutex_t*);
6 int __pthread_setcancelstate(int, int *); 6 int __pthread_setcancelstate(int, int*);
7 7
8 /* 8 /*
9 * struct waiter 9 * struct waiter
10 * 10 *
11 * Waiter objects have automatic storage on the waiting thread, and 11 * Waiter objects have automatic storage on the waiting thread, and
12 * are used in building a linked list representing waiters currently 12 * are used in building a linked list representing waiters currently
13 * waiting on the condition variable or a group of waiters woken 13 * waiting on the condition variable or a group of waiters woken
14 * together by a broadcast or signal; in the case of signal, this is a 14 * together by a broadcast or signal; in the case of signal, this is a
15 * degenerate list of one member. 15 * degenerate list of one member.
16 * 16 *
17 * Waiter lists attached to the condition variable itself are 17 * Waiter lists attached to the condition variable itself are
18 * protected by the lock on the cv. Detached waiter lists are never 18 * protected by the lock on the cv. Detached waiter lists are never
19 * modified again, but can only be traversed in reverse order, and are 19 * modified again, but can only be traversed in reverse order, and are
20 * protected by the "barrier" locks in each node, which are unlocked 20 * protected by the "barrier" locks in each node, which are unlocked
21 * in turn to control wake order. 21 * in turn to control wake order.
22 * 22 *
23 * Since process-shared cond var semantics do not necessarily allow 23 * Since process-shared cond var semantics do not necessarily allow
24 * one thread to see another's automatic storage (they may be in 24 * one thread to see another's automatic storage (they may be in
25 * different processes), the waiter list is not used for the 25 * different processes), the waiter list is not used for the
26 * process-shared case, but the structure is still used to store data 26 * process-shared case, but the structure is still used to store data
27 * needed by the cancellation cleanup handler. 27 * needed by the cancellation cleanup handler.
28 */ 28 */
29 29
30 struct waiter { 30 struct waiter {
31 » struct waiter *prev, *next; 31 struct waiter *prev, *next;
32 » volatile int state, barrier; 32 volatile int state, barrier;
33 » volatile int *notify; 33 volatile int* notify;
34 }; 34 };
35 35
36 /* Self-synchronized-destruction-safe lock functions */ 36 /* Self-synchronized-destruction-safe lock functions */
37 37
38 static inline void lock(volatile int *l) 38 static inline void lock(volatile int* l) {
39 { 39 if (a_cas(l, 0, 1)) {
40 » if (a_cas(l, 0, 1)) { 40 a_cas(l, 1, 2);
41 » » a_cas(l, 1, 2); 41 do
42 » » do __wait(l, 0, 2, 1); 42 __wait(l, 0, 2, 1);
43 » » while (a_cas(l, 0, 2)); 43 while (a_cas(l, 0, 2));
44 » } 44 }
45 } 45 }
46 46
47 static inline void unlock(volatile int *l) 47 static inline void unlock(volatile int* l) {
48 { 48 if (a_swap(l, 0) == 2)
49 » if (a_swap(l, 0)==2) 49 __wake(l, 1, 1);
50 » » __wake(l, 1, 1); 50 }
51 } 51
52 52 static inline void unlock_requeue(volatile int* l, volatile int* r, int w) {
53 static inline void unlock_requeue(volatile int *l, volatile int *r, int w) 53 a_store(l, 0);
54 { 54 if (w)
55 » a_store(l, 0); 55 __wake(l, 1, 1);
56 » if (w) __wake(l, 1, 1); 56 else
57 » else __syscall(SYS_futex, l, FUTEX_REQUEUE|128, 0, 1, r) != -ENOSYS 57 __syscall(SYS_futex, l, FUTEX_REQUEUE | 128, 0, 1, r) != -ENOSYS ||
58 » » || __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r); 58 __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r);
59 } 59 }
60 60
61 enum { 61 enum {
62 » WAITING, 62 WAITING,
63 » SIGNALED, 63 SIGNALED,
64 » LEAVING, 64 LEAVING,
65 }; 65 };
66 66
67 int __pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restri ct m, const struct timespec *restrict ts) 67 int __pthread_cond_timedwait(pthread_cond_t* restrict c,
68 { 68 pthread_mutex_t* restrict m,
69 » struct waiter node = { 0 }; 69 const struct timespec* restrict ts) {
70 » int e, seq, clock = c->_c_clock, cs, shared=0, oldstate, tmp; 70 struct waiter node = {0};
71 » volatile int *fut; 71 int e, seq, clock = c->_c_clock, cs, shared = 0, oldstate, tmp;
72 72 volatile int* fut;
73 » if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid) 73
74 » » return EPERM; 74 if ((m->_m_type & 15) && (m->_m_lock & INT_MAX) != __pthread_self()->tid)
75 75 return EPERM;
76 » if (ts && ts->tv_nsec >= 1000000000UL) 76
77 » » return EINVAL; 77 if (ts && ts->tv_nsec >= 1000000000UL)
78 78 return EINVAL;
79 » __pthread_testcancel(); 79
80 80 __pthread_testcancel();
81 » if (c->_c_shared) { 81
82 » » shared = 1; 82 if (c->_c_shared) {
83 » » fut = &c->_c_seq; 83 shared = 1;
84 » » seq = c->_c_seq; 84 fut = &c->_c_seq;
85 » » a_inc(&c->_c_waiters); 85 seq = c->_c_seq;
86 » } else { 86 a_inc(&c->_c_waiters);
87 » » lock(&c->_c_lock); 87 } else {
88 88 lock(&c->_c_lock);
89 » » seq = node.barrier = 2; 89
90 » » fut = &node.barrier; 90 seq = node.barrier = 2;
91 » » node.state = WAITING; 91 fut = &node.barrier;
92 » » node.next = c->_c_head; 92 node.state = WAITING;
93 » » c->_c_head = &node; 93 node.next = c->_c_head;
94 » » if (!c->_c_tail) c->_c_tail = &node; 94 c->_c_head = &node;
95 » » else node.next->prev = &node; 95 if (!c->_c_tail)
96 96 c->_c_tail = &node;
97 » » unlock(&c->_c_lock); 97 else
98 » } 98 node.next->prev = &node;
99 99
100 » __pthread_mutex_unlock(m); 100 unlock(&c->_c_lock);
101 101 }
102 » __pthread_setcancelstate(PTHREAD_CANCEL_MASKED, &cs); 102
103 » if (cs == PTHREAD_CANCEL_DISABLE) __pthread_setcancelstate(cs, 0); 103 __pthread_mutex_unlock(m);
104 104
105 » do e = __timedwait_cp(fut, seq, clock, ts, !shared); 105 __pthread_setcancelstate(PTHREAD_CANCEL_MASKED, &cs);
106 » while (*fut==seq && (!e || e==EINTR)); 106 if (cs == PTHREAD_CANCEL_DISABLE)
107 » if (e == EINTR) e = 0; 107 __pthread_setcancelstate(cs, 0);
108 108
109 » if (shared) { 109 do
110 » » /* Suppress cancellation if a signal was potentially 110 e = __timedwait_cp(fut, seq, clock, ts, !shared);
111 » » * consumed; this is a legitimate form of spurious 111 while (*fut == seq && (!e || e == EINTR));
112 » » * wake even if not. */ 112 if (e == EINTR)
113 » » if (e == ECANCELED && c->_c_seq != seq) e = 0; 113 e = 0;
114 » » if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff) 114
115 » » » __wake(&c->_c_waiters, 1, 0); 115 if (shared) {
116 » » oldstate = WAITING; 116 /* Suppress cancellation if a signal was potentially
117 » » goto relock; 117 * consumed; this is a legitimate form of spurious
118 » } 118 * wake even if not. */
119 119 if (e == ECANCELED && c->_c_seq != seq)
120 » oldstate = a_cas(&node.state, WAITING, LEAVING); 120 e = 0;
121 121 if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff)
122 » if (oldstate == WAITING) { 122 __wake(&c->_c_waiters, 1, 0);
123 » » /* Access to cv object is valid because this waiter was not 123 oldstate = WAITING;
124 » » * yet signaled and a new signal/broadcast cannot return 124 goto relock;
125 » » * after seeing a LEAVING waiter without getting notified 125 }
126 » » * via the futex notify below. */ 126
127 127 oldstate = a_cas(&node.state, WAITING, LEAVING);
128 » » lock(&c->_c_lock); 128
129 » » 129 if (oldstate == WAITING) {
130 » » if (c->_c_head == &node) c->_c_head = node.next; 130 /* Access to cv object is valid because this waiter was not
131 » » else if (node.prev) node.prev->next = node.next; 131 * yet signaled and a new signal/broadcast cannot return
132 » » if (c->_c_tail == &node) c->_c_tail = node.prev; 132 * after seeing a LEAVING waiter without getting notified
133 » » else if (node.next) node.next->prev = node.prev; 133 * via the futex notify below. */
134 » » 134
135 » » unlock(&c->_c_lock); 135 lock(&c->_c_lock);
136 136
137 » » if (node.notify) { 137 if (c->_c_head == &node)
138 » » » if (a_fetch_add(node.notify, -1)==1) 138 c->_c_head = node.next;
139 » » » » __wake(node.notify, 1, 1); 139 else if (node.prev)
140 » » } 140 node.prev->next = node.next;
141 » } else { 141 if (c->_c_tail == &node)
142 » » /* Lock barrier first to control wake order. */ 142 c->_c_tail = node.prev;
143 » » lock(&node.barrier); 143 else if (node.next)
144 » } 144 node.next->prev = node.prev;
145
146 unlock(&c->_c_lock);
147
148 if (node.notify) {
149 if (a_fetch_add(node.notify, -1) == 1)
150 __wake(node.notify, 1, 1);
151 }
152 } else {
153 /* Lock barrier first to control wake order. */
154 lock(&node.barrier);
155 }
145 156
146 relock: 157 relock:
147 » /* Errors locking the mutex override any existing error or 158 /* Errors locking the mutex override any existing error or
148 » * cancellation, since the caller must see them to know the 159 * cancellation, since the caller must see them to know the
149 » * state of the mutex. */ 160 * state of the mutex. */
150 » if ((tmp = pthread_mutex_lock(m))) e = tmp; 161 if ((tmp = pthread_mutex_lock(m)))
151 162 e = tmp;
152 » if (oldstate == WAITING) goto done; 163
153 164 if (oldstate == WAITING)
154 » if (!node.next) a_inc(&m->_m_waiters); 165 goto done;
155 166
156 » /* Unlock the barrier that's holding back the next waiter, and 167 if (!node.next)
157 » * either wake it or requeue it to the mutex. */ 168 a_inc(&m->_m_waiters);
158 » if (node.prev) 169
159 » » unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & 12 8); 170 /* Unlock the barrier that's holding back the next waiter, and
160 » else 171 * either wake it or requeue it to the mutex. */
161 » » a_dec(&m->_m_waiters); 172 if (node.prev)
162 173 unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & 128);
163 » /* Since a signal was consumed, cancellation is not permitted. */ 174 else
164 » if (e == ECANCELED) e = 0; 175 a_dec(&m->_m_waiters);
176
177 /* Since a signal was consumed, cancellation is not permitted. */
178 if (e == ECANCELED)
179 e = 0;
165 180
166 done: 181 done:
167 » __pthread_setcancelstate(cs, 0); 182 __pthread_setcancelstate(cs, 0);
168 183
169 » if (e == ECANCELED) { 184 if (e == ECANCELED) {
170 » » __pthread_testcancel(); 185 __pthread_testcancel();
171 » » __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0); 186 __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0);
172 » } 187 }
173 188
174 » return e; 189 return e;
175 } 190 }
176 191
177 int __private_cond_signal(pthread_cond_t *c, int n) 192 int __private_cond_signal(pthread_cond_t* c, int n) {
178 { 193 struct waiter *p, *first = 0;
179 » struct waiter *p, *first=0; 194 volatile int ref = 0;
180 » volatile int ref = 0; 195 int cur;
181 » int cur; 196
182 197 lock(&c->_c_lock);
183 » lock(&c->_c_lock); 198 for (p = c->_c_tail; n && p; p = p->prev) {
184 » for (p=c->_c_tail; n && p; p=p->prev) { 199 if (a_cas(&p->state, WAITING, SIGNALED) != WAITING) {
185 » » if (a_cas(&p->state, WAITING, SIGNALED) != WAITING) { 200 ref++;
186 » » » ref++; 201 p->notify = &ref;
187 » » » p->notify = &ref; 202 } else {
188 » » } else { 203 n--;
189 » » » n--; 204 if (!first)
190 » » » if (!first) first=p; 205 first = p;
191 » » } 206 }
192 » } 207 }
193 » /* Split the list, leaving any remainder on the cv. */ 208 /* Split the list, leaving any remainder on the cv. */
194 » if (p) { 209 if (p) {
195 » » if (p->next) p->next->prev = 0; 210 if (p->next)
196 » » p->next = 0; 211 p->next->prev = 0;
197 » } else { 212 p->next = 0;
198 » » c->_c_head = 0; 213 } else {
199 » } 214 c->_c_head = 0;
200 » c->_c_tail = p; 215 }
201 » unlock(&c->_c_lock); 216 c->_c_tail = p;
202 217 unlock(&c->_c_lock);
203 » /* Wait for any waiters in the LEAVING state to remove 218
204 » * themselves from the list before returning or allowing 219 /* Wait for any waiters in the LEAVING state to remove
205 » * signaled threads to proceed. */ 220 * themselves from the list before returning or allowing
206 » while ((cur = ref)) __wait(&ref, 0, cur, 1); 221 * signaled threads to proceed. */
207 222 while ((cur = ref))
208 » /* Allow first signaled waiter, if any, to proceed. */ 223 __wait(&ref, 0, cur, 1);
209 » if (first) unlock(&first->barrier); 224
210 225 /* Allow first signaled waiter, if any, to proceed. */
211 » return 0; 226 if (first)
227 unlock(&first->barrier);
228
229 return 0;
212 } 230 }
213 231
214 weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait); 232 weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait);
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698