OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright (c) 2014 The Native Client Authors. All rights reserved. |
| 3 * Use of this source code is governed by a BSD-style license that can be |
| 4 * found in the LICENSE file. |
| 5 */ |
| 6 |
| 7 /* |
| 8 * Native Client mutex implementation |
| 9 */ |
| 10 |
| 11 #include <errno.h> |
| 12 #include <limits.h> |
| 13 |
| 14 #include "native_client/src/untrusted/nacl/nacl_irt.h" |
| 15 #include "native_client/src/untrusted/pthread/pthread.h" |
| 16 #include "native_client/src/untrusted/pthread/pthread_internal.h" |
| 17 #include "native_client/src/untrusted/pthread/pthread_types.h" |
| 18 |
| 19 /* |
| 20 * This implemtation is based on the one found in the bionic C library |
| 21 * which is part of the Android Open Source Project |
| 22 * (See: libc/bionic/pthread_rwlock.cpp) |
| 23 * The following steps were used to port it to NaCl: |
| 24 * - convert from C++ to C (mostly replace 'bool') |
| 25 * - convert futex calls to __libnacl_irt_futex |
| 26 * - remove unsuppored features such as PTHREAD_PROCESS_PRIVATE/SHARED. |
| 27 */ |
| 28 |
| 29 /* |
| 30 * |
| 31 * Technical note: |
| 32 * |
| 33 * Possible states of a read/write lock: |
| 34 * |
| 35 * - no readers and no writer (unlocked) |
| 36 * - one or more readers sharing the lock at the same time (read-locked) |
| 37 * - one writer holding the lock (write-lock) |
| 38 * |
| 39 * Additionally: |
| 40 * - trying to get the write-lock while there are any readers blocks |
| 41 * - trying to get the read-lock while there is a writer blocks |
| 42 * - a single thread can acquire the lock multiple times in read mode |
| 43 * |
| 44 * - Posix states that behavior is undefined (may deadlock) if a thread tries |
| 45 * to acquire the lock |
| 46 * - in write mode while already holding the lock (whether in read or write |
| 47 * mode) |
| 48 * - in read mode while already holding the lock in write mode. |
| 49 * - This implementation will return EDEADLK in "write after write" and "read |
| 50 * after write" cases and will deadlock in write after read case. |
| 51 */ |
| 52 |
| 53 int pthread_rwlockattr_init(pthread_rwlockattr_t *attr) { |
| 54 *attr = PTHREAD_PROCESS_PRIVATE; |
| 55 return 0; |
| 56 } |
| 57 |
| 58 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr) { |
| 59 *attr = -1; |
| 60 return 0; |
| 61 } |
| 62 |
| 63 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared) { |
| 64 switch (pshared) { |
| 65 case PTHREAD_PROCESS_PRIVATE: |
| 66 case PTHREAD_PROCESS_SHARED: |
| 67 *attr = pshared; |
| 68 return 0; |
| 69 default: |
| 70 return EINVAL; |
| 71 } |
| 72 } |
| 73 |
| 74 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, |
| 75 int *pshared) { |
| 76 *pshared = *attr; |
| 77 return 0; |
| 78 } |
| 79 |
| 80 int pthread_rwlock_init(pthread_rwlock_t *rwlock, |
| 81 const pthread_rwlockattr_t *attr) { |
| 82 if (attr != NULL) { |
| 83 switch (*attr) { |
| 84 case PTHREAD_PROCESS_SHARED: |
| 85 case PTHREAD_PROCESS_PRIVATE: |
| 86 rwlock->attr= *attr; |
| 87 break; |
| 88 default: |
| 89 return EINVAL; |
| 90 } |
| 91 } |
| 92 |
| 93 rwlock->state = 0; |
| 94 rwlock->pending_readers = 0; |
| 95 rwlock->pending_writers = 0; |
| 96 rwlock->writer_thread_id = 0; |
| 97 |
| 98 return 0; |
| 99 } |
| 100 |
| 101 int pthread_rwlock_destroy(pthread_rwlock_t* rwlock) { |
| 102 if (rwlock->state != 0) { |
| 103 return EBUSY; |
| 104 } |
| 105 return 0; |
| 106 } |
| 107 |
| 108 static int __pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, |
| 109 const struct timespec *abs_timeout) { |
| 110 if (__predict_false(pthread_self() == rwlock->writer_thread_id)) { |
| 111 return EDEADLK; |
| 112 } |
| 113 |
| 114 int done = 0; |
| 115 do { |
| 116 /* |
| 117 * This is actually a race read as there's nothing that guarantees the |
| 118 * atomicity of integer reads / writes. However, in practice this "never" |
| 119 * happens so until we switch to C++11 this should work fine. The same |
| 120 * applies in the other places this idiom is used. |
| 121 */ |
| 122 int32_t cur_state = rwlock->state; /* C++11 relaxed atomic read */ |
| 123 if (__predict_true(cur_state >= 0)) { |
| 124 /* Add as an extra reader. */ |
| 125 done = __sync_bool_compare_and_swap(&rwlock->state, |
| 126 cur_state, |
| 127 cur_state + 1); |
| 128 } else { |
| 129 /* |
| 130 * Owner holds it in write mode, hang up. |
| 131 * To avoid losing wake ups the pending_readers update and the state read |
| 132 * should be sequentially consistent. (currently enforced by |
| 133 * __sync_fetch_and_add which creates a full barrier) |
| 134 */ |
| 135 __sync_fetch_and_add(&rwlock->pending_readers, 1); |
| 136 int ret = __libnacl_irt_futex.futex_wait_abs(&rwlock->state, |
| 137 cur_state, |
| 138 abs_timeout); |
| 139 __sync_fetch_and_sub(&rwlock->pending_readers, 1); |
| 140 if (ret == -ETIMEDOUT) { |
| 141 return ETIMEDOUT; |
| 142 } |
| 143 } |
| 144 } while (!done); |
| 145 |
| 146 return 0; |
| 147 } |
| 148 |
| 149 static int __pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, |
| 150 const struct timespec *abs_timeout) { |
| 151 pthread_t self = pthread_self(); |
| 152 if (__predict_false(self == rwlock->writer_thread_id)) { |
| 153 return EDEADLK; |
| 154 } |
| 155 |
| 156 int done = 0; |
| 157 do { |
| 158 int32_t cur_state = rwlock->state; |
| 159 if (__predict_true(cur_state == 0)) { |
| 160 /* Change state from 0 to -1. */ |
| 161 done = __sync_bool_compare_and_swap(&rwlock->state, |
| 162 0 /* cur state */, |
| 163 -1 /* new state */); |
| 164 } else { |
| 165 /* |
| 166 * Failed to acquire, hang up. |
| 167 * To avoid losing wake ups the pending_writers update and the state read |
| 168 * should be sequentially consistent. (currently enforced by |
| 169 * __sync_fetch_and_add which creates a full barrier) |
| 170 */ |
| 171 __sync_fetch_and_add(&rwlock->pending_writers, 1); |
| 172 int ret = __libnacl_irt_futex.futex_wait_abs(&rwlock->state, |
| 173 cur_state, |
| 174 abs_timeout); |
| 175 __sync_fetch_and_sub(&rwlock->pending_writers, 1); |
| 176 if (ret == -ETIMEDOUT) { |
| 177 return ETIMEDOUT; |
| 178 } |
| 179 } |
| 180 } while (!done); |
| 181 |
| 182 rwlock->writer_thread_id = self; |
| 183 return 0; |
| 184 } |
| 185 |
| 186 int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) { |
| 187 return __pthread_rwlock_timedrdlock(rwlock, NULL); |
| 188 } |
| 189 |
| 190 int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, |
| 191 const struct timespec *abs_timeout) { |
| 192 return __pthread_rwlock_timedrdlock(rwlock, abs_timeout); |
| 193 } |
| 194 |
| 195 int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) { |
| 196 int32_t cur_state = rwlock->state; |
| 197 if ((cur_state >= 0) && |
| 198 __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state + 1)) { |
| 199 return 0; |
| 200 } |
| 201 return EBUSY; |
| 202 } |
| 203 |
| 204 int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) { |
| 205 return __pthread_rwlock_timedwrlock(rwlock, NULL); |
| 206 } |
| 207 |
| 208 int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, |
| 209 const struct timespec *abs_timeout) { |
| 210 return __pthread_rwlock_timedwrlock(rwlock, abs_timeout); |
| 211 } |
| 212 |
| 213 int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) { |
| 214 int32_t cur_state = rwlock->state; |
| 215 if ((cur_state == 0) && |
| 216 __sync_bool_compare_and_swap(&rwlock->state, |
| 217 0 /* cur state */, |
| 218 -1 /* new state */)) { |
| 219 rwlock->writer_thread_id = pthread_self(); |
| 220 return 0; |
| 221 } |
| 222 return EBUSY; |
| 223 } |
| 224 |
| 225 int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) { |
| 226 pthread_t self = pthread_self(); |
| 227 int done = 0; |
| 228 do { |
| 229 int32_t cur_state = rwlock->state; |
| 230 if (cur_state == 0) { |
| 231 return EPERM; |
| 232 } |
| 233 if (cur_state == -1) { |
| 234 if (rwlock->writer_thread_id != self) { |
| 235 return EPERM; |
| 236 } |
| 237 /* We're no longer the owner. */ |
| 238 rwlock->writer_thread_id = 0; |
| 239 /* |
| 240 * Change state from -1 to 0. |
| 241 * We use __sync_bool_compare_and_swap to achieve sequential consistency |
| 242 * of the state store and the following pendingX loads. A simple store |
| 243 * with memory_order_release semantics is not enough to guarantee that the |
| 244 * pendingX loads are not reordered before the store (which may lead to a |
| 245 * lost wakeup). |
| 246 */ |
| 247 __sync_bool_compare_and_swap(&rwlock->state, |
| 248 -1 /* cur state*/, |
| 249 0 /* new state */); |
| 250 |
| 251 /* Wake any waiters. */ |
| 252 if (__predict_false(rwlock->pending_readers > 0 |
| 253 || rwlock->pending_writers > 0)) { |
| 254 __libnacl_irt_futex.futex_wake(&rwlock->state, INT_MAX, NULL); |
| 255 } |
| 256 done = 1; |
| 257 } else { /* cur_state > 0 */ |
| 258 /* |
| 259 * Reduce state by 1. |
| 260 * See the comment above on why we need __sync_bool_compare_and_swap. |
| 261 */ |
| 262 done = __sync_bool_compare_and_swap(&rwlock->state, |
| 263 cur_state, |
| 264 cur_state - 1); |
| 265 if (done && (cur_state - 1) == 0) { |
| 266 /* There are no more readers, wake any waiters. */ |
| 267 if (__predict_false(rwlock->pending_readers > 0 |
| 268 || rwlock->pending_writers > 0)) { |
| 269 __libnacl_irt_futex.futex_wake(&rwlock->state, INT_MAX, NULL); |
| 270 } |
| 271 } |
| 272 } |
| 273 } while (!done); |
| 274 |
| 275 return 0; |
| 276 } |
OLD | NEW |