Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1502)

Unified Diff: src/untrusted/pthread/nc_rwlock.c

Issue 609363003: Add implementation of pthread_rwlock based on NaCl futexs (Closed) Base URL: svn://svn.chromium.org/native_client/trunk/src/native_client
Patch Set: Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « src/untrusted/pthread/nacl.scons ('k') | src/untrusted/pthread/pthread.h » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/untrusted/pthread/nc_rwlock.c
diff --git a/src/untrusted/pthread/nc_rwlock.c b/src/untrusted/pthread/nc_rwlock.c
new file mode 100644
index 0000000000000000000000000000000000000000..94a9efd425238987a39977c4e3a3548acc787adb
--- /dev/null
+++ b/src/untrusted/pthread/nc_rwlock.c
@@ -0,0 +1,276 @@
+/*
+ * Copyright (c) 2014 The Native Client Authors. All rights reserved.
+ * Use of this source code is governed by a BSD-style license that can be
+ * found in the LICENSE file.
+ */
+
+/*
+ * Native Client mutex implementation
+ */
+
+#include <errno.h>
+#include <limits.h>
+
+#include "native_client/src/untrusted/nacl/nacl_irt.h"
+#include "native_client/src/untrusted/pthread/pthread.h"
+#include "native_client/src/untrusted/pthread/pthread_internal.h"
+#include "native_client/src/untrusted/pthread/pthread_types.h"
+
+/*
+ * This implemtation is based on the one found in the bionic C library
+ * which is part of the Android Open Source Project
+ * (See: libc/bionic/pthread_rwlock.cpp)
+ * The following steps were used to port it to NaCl:
+ * - convert from C++ to C (mostly replace 'bool')
+ * - convert futex calls to __libnacl_irt_futex
+ * - remove unsuppored features such as PTHREAD_PROCESS_PRIVATE/SHARED.
+ */
+
+/*
+ *
+ * Technical note:
+ *
+ * Possible states of a read/write lock:
+ *
+ * - no readers and no writer (unlocked)
+ * - one or more readers sharing the lock at the same time (read-locked)
+ * - one writer holding the lock (write-lock)
+ *
+ * Additionally:
+ * - trying to get the write-lock while there are any readers blocks
+ * - trying to get the read-lock while there is a writer blocks
+ * - a single thread can acquire the lock multiple times in read mode
+ *
+ * - Posix states that behavior is undefined (may deadlock) if a thread tries
+ * to acquire the lock
+ * - in write mode while already holding the lock (whether in read or write
+ * mode)
+ * - in read mode while already holding the lock in write mode.
+ * - This implementation will return EDEADLK in "write after write" and "read
+ * after write" cases and will deadlock in write after read case.
+ */
+
+int pthread_rwlockattr_init(pthread_rwlockattr_t *attr) {
+ *attr = PTHREAD_PROCESS_PRIVATE;
+ return 0;
+}
+
+int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr) {
+ *attr = -1;
+ return 0;
+}
+
+int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared) {
+ switch (pshared) {
+ case PTHREAD_PROCESS_PRIVATE:
+ case PTHREAD_PROCESS_SHARED:
+ *attr = pshared;
+ return 0;
+ default:
+ return EINVAL;
+ }
+}
+
+int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr,
+ int *pshared) {
+ *pshared = *attr;
+ return 0;
+}
+
+int pthread_rwlock_init(pthread_rwlock_t *rwlock,
+ const pthread_rwlockattr_t *attr) {
+ if (attr != NULL) {
+ switch (*attr) {
+ case PTHREAD_PROCESS_SHARED:
+ case PTHREAD_PROCESS_PRIVATE:
+ rwlock->attr= *attr;
+ break;
+ default:
+ return EINVAL;
+ }
+ }
+
+ rwlock->state = 0;
+ rwlock->pending_readers = 0;
+ rwlock->pending_writers = 0;
+ rwlock->writer_thread_id = 0;
+
+ return 0;
+}
+
+int pthread_rwlock_destroy(pthread_rwlock_t* rwlock) {
+ if (rwlock->state != 0) {
+ return EBUSY;
+ }
+ return 0;
+}
+
+static int __pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
+ const struct timespec *abs_timeout) {
+ if (__predict_false(pthread_self() == rwlock->writer_thread_id)) {
+ return EDEADLK;
+ }
+
+ int done = 0;
+ do {
+ /*
+ * This is actually a race read as there's nothing that guarantees the
+ * atomicity of integer reads / writes. However, in practice this "never"
+ * happens so until we switch to C++11 this should work fine. The same
+ * applies in the other places this idiom is used.
+ */
+ int32_t cur_state = rwlock->state; /* C++11 relaxed atomic read */
+ if (__predict_true(cur_state >= 0)) {
+ /* Add as an extra reader. */
+ done = __sync_bool_compare_and_swap(&rwlock->state,
+ cur_state,
+ cur_state + 1);
+ } else {
+ /*
+ * Owner holds it in write mode, hang up.
+ * To avoid losing wake ups the pending_readers update and the state read
+ * should be sequentially consistent. (currently enforced by
+ * __sync_fetch_and_add which creates a full barrier)
+ */
+ __sync_fetch_and_add(&rwlock->pending_readers, 1);
+ int ret = __libnacl_irt_futex.futex_wait_abs(&rwlock->state,
+ cur_state,
+ abs_timeout);
+ __sync_fetch_and_sub(&rwlock->pending_readers, 1);
+ if (ret == -ETIMEDOUT) {
+ return ETIMEDOUT;
+ }
+ }
+ } while (!done);
+
+ return 0;
+}
+
+static int __pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
+ const struct timespec *abs_timeout) {
+ pthread_t self = pthread_self();
+ if (__predict_false(self == rwlock->writer_thread_id)) {
+ return EDEADLK;
+ }
+
+ int done = 0;
+ do {
+ int32_t cur_state = rwlock->state;
+ if (__predict_true(cur_state == 0)) {
+ /* Change state from 0 to -1. */
+ done = __sync_bool_compare_and_swap(&rwlock->state,
+ 0 /* cur state */,
+ -1 /* new state */);
+ } else {
+ /*
+ * Failed to acquire, hang up.
+ * To avoid losing wake ups the pending_writers update and the state read
+ * should be sequentially consistent. (currently enforced by
+ * __sync_fetch_and_add which creates a full barrier)
+ */
+ __sync_fetch_and_add(&rwlock->pending_writers, 1);
+ int ret = __libnacl_irt_futex.futex_wait_abs(&rwlock->state,
+ cur_state,
+ abs_timeout);
+ __sync_fetch_and_sub(&rwlock->pending_writers, 1);
+ if (ret == -ETIMEDOUT) {
+ return ETIMEDOUT;
+ }
+ }
+ } while (!done);
+
+ rwlock->writer_thread_id = self;
+ return 0;
+}
+
+int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) {
+ return __pthread_rwlock_timedrdlock(rwlock, NULL);
+}
+
+int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock,
+ const struct timespec *abs_timeout) {
+ return __pthread_rwlock_timedrdlock(rwlock, abs_timeout);
+}
+
+int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) {
+ int32_t cur_state = rwlock->state;
+ if ((cur_state >= 0) &&
+ __sync_bool_compare_and_swap(&rwlock->state, cur_state, cur_state + 1)) {
+ return 0;
+ }
+ return EBUSY;
+}
+
+int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) {
+ return __pthread_rwlock_timedwrlock(rwlock, NULL);
+}
+
+int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock,
+ const struct timespec *abs_timeout) {
+ return __pthread_rwlock_timedwrlock(rwlock, abs_timeout);
+}
+
+int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) {
+ int32_t cur_state = rwlock->state;
+ if ((cur_state == 0) &&
+ __sync_bool_compare_and_swap(&rwlock->state,
+ 0 /* cur state */,
+ -1 /* new state */)) {
+ rwlock->writer_thread_id = pthread_self();
+ return 0;
+ }
+ return EBUSY;
+}
+
+int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) {
+ pthread_t self = pthread_self();
+ int done = 0;
+ do {
+ int32_t cur_state = rwlock->state;
+ if (cur_state == 0) {
+ return EPERM;
+ }
+ if (cur_state == -1) {
+ if (rwlock->writer_thread_id != self) {
+ return EPERM;
+ }
+ /* We're no longer the owner. */
+ rwlock->writer_thread_id = 0;
+ /*
+ * Change state from -1 to 0.
+ * We use __sync_bool_compare_and_swap to achieve sequential consistency
+ * of the state store and the following pendingX loads. A simple store
+ * with memory_order_release semantics is not enough to guarantee that the
+ * pendingX loads are not reordered before the store (which may lead to a
+ * lost wakeup).
+ */
+ __sync_bool_compare_and_swap(&rwlock->state,
+ -1 /* cur state*/,
+ 0 /* new state */);
+
+ /* Wake any waiters. */
+ if (__predict_false(rwlock->pending_readers > 0
+ || rwlock->pending_writers > 0)) {
+ __libnacl_irt_futex.futex_wake(&rwlock->state, INT_MAX, NULL);
+ }
+ done = 1;
+ } else { /* cur_state > 0 */
+ /*
+ * Reduce state by 1.
+ * See the comment above on why we need __sync_bool_compare_and_swap.
+ */
+ done = __sync_bool_compare_and_swap(&rwlock->state,
+ cur_state,
+ cur_state - 1);
+ if (done && (cur_state - 1) == 0) {
+ /* There are no more readers, wake any waiters. */
+ if (__predict_false(rwlock->pending_readers > 0
+ || rwlock->pending_writers > 0)) {
+ __libnacl_irt_futex.futex_wake(&rwlock->state, INT_MAX, NULL);
+ }
+ }
+ }
+ } while (!done);
+
+ return 0;
+}
« no previous file with comments | « src/untrusted/pthread/nacl.scons ('k') | src/untrusted/pthread/pthread.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698