OLD | NEW |
(Empty) | |
| 1 #include "pthread_impl.h" |
| 2 |
| 3 void __pthread_testcancel(void); |
| 4 int __pthread_mutex_lock(pthread_mutex_t *); |
| 5 int __pthread_mutex_unlock(pthread_mutex_t *); |
| 6 int __pthread_setcancelstate(int, int *); |
| 7 |
| 8 /* |
| 9 * struct waiter |
| 10 * |
| 11 * Waiter objects have automatic storage on the waiting thread, and |
| 12 * are used in building a linked list representing waiters currently |
| 13 * waiting on the condition variable or a group of waiters woken |
| 14 * together by a broadcast or signal; in the case of signal, this is a |
| 15 * degenerate list of one member. |
| 16 * |
| 17 * Waiter lists attached to the condition variable itself are |
| 18 * protected by the lock on the cv. Detached waiter lists are never |
| 19 * modified again, but can only be traversed in reverse order, and are |
| 20 * protected by the "barrier" locks in each node, which are unlocked |
| 21 * in turn to control wake order. |
| 22 * |
| 23 * Since process-shared cond var semantics do not necessarily allow |
| 24 * one thread to see another's automatic storage (they may be in |
| 25 * different processes), the waiter list is not used for the |
| 26 * process-shared case, but the structure is still used to store data |
| 27 * needed by the cancellation cleanup handler. |
| 28 */ |
| 29 |
| 30 struct waiter { |
| 31 struct waiter *prev, *next; |
| 32 volatile int state, barrier; |
| 33 volatile int *notify; |
| 34 }; |
| 35 |
| 36 /* Self-synchronized-destruction-safe lock functions */ |
| 37 |
| 38 static inline void lock(volatile int *l) |
| 39 { |
| 40 if (a_cas(l, 0, 1)) { |
| 41 a_cas(l, 1, 2); |
| 42 do __wait(l, 0, 2, 1); |
| 43 while (a_cas(l, 0, 2)); |
| 44 } |
| 45 } |
| 46 |
| 47 static inline void unlock(volatile int *l) |
| 48 { |
| 49 if (a_swap(l, 0)==2) |
| 50 __wake(l, 1, 1); |
| 51 } |
| 52 |
| 53 static inline void unlock_requeue(volatile int *l, volatile int *r, int w) |
| 54 { |
| 55 a_store(l, 0); |
| 56 if (w) __wake(l, 1, 1); |
| 57 else __syscall(SYS_futex, l, FUTEX_REQUEUE|128, 0, 1, r) != -ENOSYS |
| 58 || __syscall(SYS_futex, l, FUTEX_REQUEUE, 0, 1, r); |
| 59 } |
| 60 |
| 61 enum { |
| 62 WAITING, |
| 63 SIGNALED, |
| 64 LEAVING, |
| 65 }; |
| 66 |
| 67 int __pthread_cond_timedwait(pthread_cond_t *restrict c, pthread_mutex_t *restri
ct m, const struct timespec *restrict ts) |
| 68 { |
| 69 struct waiter node = { 0 }; |
| 70 int e, seq, clock = c->_c_clock, cs, shared=0, oldstate, tmp; |
| 71 volatile int *fut; |
| 72 |
| 73 if ((m->_m_type&15) && (m->_m_lock&INT_MAX) != __pthread_self()->tid) |
| 74 return EPERM; |
| 75 |
| 76 if (ts && ts->tv_nsec >= 1000000000UL) |
| 77 return EINVAL; |
| 78 |
| 79 __pthread_testcancel(); |
| 80 |
| 81 if (c->_c_shared) { |
| 82 shared = 1; |
| 83 fut = &c->_c_seq; |
| 84 seq = c->_c_seq; |
| 85 a_inc(&c->_c_waiters); |
| 86 } else { |
| 87 lock(&c->_c_lock); |
| 88 |
| 89 seq = node.barrier = 2; |
| 90 fut = &node.barrier; |
| 91 node.state = WAITING; |
| 92 node.next = c->_c_head; |
| 93 c->_c_head = &node; |
| 94 if (!c->_c_tail) c->_c_tail = &node; |
| 95 else node.next->prev = &node; |
| 96 |
| 97 unlock(&c->_c_lock); |
| 98 } |
| 99 |
| 100 __pthread_mutex_unlock(m); |
| 101 |
| 102 __pthread_setcancelstate(PTHREAD_CANCEL_MASKED, &cs); |
| 103 if (cs == PTHREAD_CANCEL_DISABLE) __pthread_setcancelstate(cs, 0); |
| 104 |
| 105 do e = __timedwait_cp(fut, seq, clock, ts, !shared); |
| 106 while (*fut==seq && (!e || e==EINTR)); |
| 107 if (e == EINTR) e = 0; |
| 108 |
| 109 if (shared) { |
| 110 /* Suppress cancellation if a signal was potentially |
| 111 * consumed; this is a legitimate form of spurious |
| 112 * wake even if not. */ |
| 113 if (e == ECANCELED && c->_c_seq != seq) e = 0; |
| 114 if (a_fetch_add(&c->_c_waiters, -1) == -0x7fffffff) |
| 115 __wake(&c->_c_waiters, 1, 0); |
| 116 oldstate = WAITING; |
| 117 goto relock; |
| 118 } |
| 119 |
| 120 oldstate = a_cas(&node.state, WAITING, LEAVING); |
| 121 |
| 122 if (oldstate == WAITING) { |
| 123 /* Access to cv object is valid because this waiter was not |
| 124 * yet signaled and a new signal/broadcast cannot return |
| 125 * after seeing a LEAVING waiter without getting notified |
| 126 * via the futex notify below. */ |
| 127 |
| 128 lock(&c->_c_lock); |
| 129 |
| 130 if (c->_c_head == &node) c->_c_head = node.next; |
| 131 else if (node.prev) node.prev->next = node.next; |
| 132 if (c->_c_tail == &node) c->_c_tail = node.prev; |
| 133 else if (node.next) node.next->prev = node.prev; |
| 134 |
| 135 unlock(&c->_c_lock); |
| 136 |
| 137 if (node.notify) { |
| 138 if (a_fetch_add(node.notify, -1)==1) |
| 139 __wake(node.notify, 1, 1); |
| 140 } |
| 141 } else { |
| 142 /* Lock barrier first to control wake order. */ |
| 143 lock(&node.barrier); |
| 144 } |
| 145 |
| 146 relock: |
| 147 /* Errors locking the mutex override any existing error or |
| 148 * cancellation, since the caller must see them to know the |
| 149 * state of the mutex. */ |
| 150 if ((tmp = pthread_mutex_lock(m))) e = tmp; |
| 151 |
| 152 if (oldstate == WAITING) goto done; |
| 153 |
| 154 if (!node.next) a_inc(&m->_m_waiters); |
| 155 |
| 156 /* Unlock the barrier that's holding back the next waiter, and |
| 157 * either wake it or requeue it to the mutex. */ |
| 158 if (node.prev) |
| 159 unlock_requeue(&node.prev->barrier, &m->_m_lock, m->_m_type & 12
8); |
| 160 else |
| 161 a_dec(&m->_m_waiters); |
| 162 |
| 163 /* Since a signal was consumed, cancellation is not permitted. */ |
| 164 if (e == ECANCELED) e = 0; |
| 165 |
| 166 done: |
| 167 __pthread_setcancelstate(cs, 0); |
| 168 |
| 169 if (e == ECANCELED) { |
| 170 __pthread_testcancel(); |
| 171 __pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, 0); |
| 172 } |
| 173 |
| 174 return e; |
| 175 } |
| 176 |
| 177 int __private_cond_signal(pthread_cond_t *c, int n) |
| 178 { |
| 179 struct waiter *p, *first=0; |
| 180 volatile int ref = 0; |
| 181 int cur; |
| 182 |
| 183 lock(&c->_c_lock); |
| 184 for (p=c->_c_tail; n && p; p=p->prev) { |
| 185 if (a_cas(&p->state, WAITING, SIGNALED) != WAITING) { |
| 186 ref++; |
| 187 p->notify = &ref; |
| 188 } else { |
| 189 n--; |
| 190 if (!first) first=p; |
| 191 } |
| 192 } |
| 193 /* Split the list, leaving any remainder on the cv. */ |
| 194 if (p) { |
| 195 if (p->next) p->next->prev = 0; |
| 196 p->next = 0; |
| 197 } else { |
| 198 c->_c_head = 0; |
| 199 } |
| 200 c->_c_tail = p; |
| 201 unlock(&c->_c_lock); |
| 202 |
| 203 /* Wait for any waiters in the LEAVING state to remove |
| 204 * themselves from the list before returning or allowing |
| 205 * signaled threads to proceed. */ |
| 206 while ((cur = ref)) __wait(&ref, 0, cur, 1); |
| 207 |
| 208 /* Allow first signaled waiter, if any, to proceed. */ |
| 209 if (first) unlock(&first->barrier); |
| 210 |
| 211 return 0; |
| 212 } |
| 213 |
| 214 weak_alias(__pthread_cond_timedwait, pthread_cond_timedwait); |
OLD | NEW |