OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 The Native Client Authors. All rights reserved. | |
3 * Use of this source code is governed by a BSD-style license that can be | |
4 * found in the LICENSE file. | |
5 */ | |
6 | |
7 /* | |
8 * Native Client rwlock implementation | |
9 * | |
10 * This implementation is a 'write-preferring' reader-writer lock which | |
11 * avoids writer starvation by preventing readers from acquiring the lock | |
12 * while there are waiting writers (with an exception to prevent deadlocks | |
13 * in the case of recursive read lock (see read_lock_available)). See: | |
14 * http://en.wikipedia.org/wiki/Readers%E2%80%93writer_lock | |
15 * | |
16 * The thundering herd problem is avoided by only waking a single | |
17 * waiter (either a single writer or a single reader) when the | |
18 * lock is released. | |
19 */ | |
20 | |
21 #include <errno.h> | |
22 | |
23 #include "native_client/src/untrusted/pthread/pthread.h" | |
24 #include "native_client/src/untrusted/pthread/pthread_internal.h" | |
25 #include "native_client/src/untrusted/pthread/pthread_types.h" | |
26 | |
27 int pthread_rwlockattr_init(pthread_rwlockattr_t *attr) { | |
28 attr->type = PTHREAD_PROCESS_PRIVATE; | |
29 return 0; | |
30 } | |
31 | |
32 int pthread_rwlockattr_destroy(pthread_rwlockattr_t *attr) { | |
33 return 0; | |
34 } | |
35 | |
36 int pthread_rwlockattr_getpshared(const pthread_rwlockattr_t *attr, | |
37 int *pshared) { | |
38 *pshared = attr->type; | |
39 return 0; | |
40 } | |
41 | |
42 int pthread_rwlockattr_setpshared(pthread_rwlockattr_t *attr, int pshared) { | |
43 if (pshared != PTHREAD_PROCESS_PRIVATE && pshared != PTHREAD_PROCESS_SHARED) | |
44 return EINVAL; | |
45 attr->type = pshared; | |
46 return 0; | |
47 } | |
48 | |
49 int pthread_rwlock_init(pthread_rwlock_t *rwlock, | |
50 const pthread_rwlockattr_t *attr) { | |
51 if (attr != NULL && attr->type != PTHREAD_PROCESS_PRIVATE) | |
52 return EINVAL; | |
53 rwlock->writer_thread_id = NACL_PTHREAD_ILLEGAL_THREAD_ID; | |
54 rwlock->writers_waiting = 0; | |
55 rwlock->reader_count = 0; | |
56 int rc = pthread_mutex_init(&rwlock->mutex, NULL); | |
57 if (rc != 0) | |
58 return rc; | |
59 rc = pthread_cond_init(&rwlock->write_possible, NULL); | |
60 if (rc != 0) | |
61 return rc; | |
62 return pthread_cond_init(&rwlock->read_possible, NULL); | |
63 } | |
64 | |
65 /* | |
66 * Helper function used by waiting writers to determine if they can take | |
67 * the lock. The rwlock->mutex must be held when calling this function. | |
68 * Returns 1 if the write lock can be taken, 0 if it can't. | |
69 */ | |
70 static inline int write_lock_available(pthread_rwlock_t *rwlock) { | |
71 /* | |
72 * Write lock is available if there is no current writer and no current | |
73 * readers. | |
74 */ | |
75 if (rwlock->writer_thread_id != NACL_PTHREAD_ILLEGAL_THREAD_ID) | |
76 return 0; | |
77 if (rwlock->reader_count > 0) | |
78 return 0; | |
79 return 1; | |
80 } | |
81 | |
82 /* | |
83 * Helper function used by waiting readers to determine if they can take | |
84 * the lock. The rwlock->mutex must be held when calling this function. | |
85 * Returns 1 if the write lock can be taken, 0 if it can't. | |
86 */ | |
87 static inline int read_lock_available(pthread_rwlock_t *rwlock) { | |
88 /* | |
89 * Read lock is unavailable if there is a current writer. | |
90 */ | |
91 if (rwlock->writer_thread_id != NACL_PTHREAD_ILLEGAL_THREAD_ID) | |
92 return 0; | |
93 | |
94 /* | |
95 * Attempt to reduce writer starvation by blocking readers when there | |
96 * is a waiting writer. However don't do this if the current thread | |
97 * already holds one or more rdlocks in order to allow for recursive | |
98 * rdlocks. See: http://stackoverflow.com/questions/2190090/ | |
99 * how-to-prevent-writer-starvation-in-a-read-write-lock-in-pthreads | |
100 */ | |
101 if (rwlock->writers_waiting > 0 && __nc_get_tdb()->rdlock_count == 0) | |
102 return 0; | |
103 return 1; | |
104 } | |
105 | |
106 /* | |
107 * Internal function used to acquire the read lock. | |
108 * This operates in three different ways in order to implement the three public | |
109 * functions: | |
110 * pthread_rwlock_rdlock | |
111 * pthread_rwlock_tryrdlock | |
112 * pthread_rwlock_timedrdlock | |
113 */ | |
114 static int rdlock_internal(pthread_rwlock_t *rwlock, | |
115 const struct timespec *abs_timeout, | |
116 int try_only) { | |
117 int rc2; | |
118 int rc = pthread_mutex_lock(&rwlock->mutex); | |
119 if (rc != 0) | |
120 return rc; | |
121 | |
122 /* | |
123 * Wait repeatedly until the write preconditions are met. | |
124 * In theory this loop should only execute once because the preconditions | |
125 * should always be true when the condition is signaled. | |
126 */ | |
127 while (!read_lock_available(rwlock)) { | |
128 if (try_only) { | |
129 rc = EBUSY; | |
130 } else if (abs_timeout != NULL) { | |
131 rc = pthread_cond_timedwait(&rwlock->read_possible, | |
132 &rwlock->mutex, | |
133 abs_timeout); | |
134 } else { | |
135 rc = pthread_cond_wait(&rwlock->read_possible, &rwlock->mutex); | |
136 } | |
137 if (rc != 0) | |
138 goto done; | |
139 } | |
140 | |
141 /* Acquire the read lock. */ | |
142 rwlock->reader_count++; | |
143 __nc_get_tdb()->rdlock_count++; | |
144 done: | |
145 rc2 = pthread_mutex_unlock(&rwlock->mutex); | |
146 return rc == 0 ? rc2 : rc; | |
147 } | |
148 | |
149 /* | |
150 * Internal function used to acquire the write lock. | |
151 * This operates in three different ways in order to implement the three public | |
152 * functions: | |
153 * pthread_rwlock_wrlock | |
154 * pthread_rwlock_trywrlock | |
155 * pthread_rwlock_timedwrlock | |
156 */ | |
157 static int rwlock_internal(pthread_rwlock_t *rwlock, | |
158 const struct timespec *abs_timeout, | |
159 int try_only) { | |
160 int rc2; | |
161 int rc = pthread_mutex_lock(&rwlock->mutex); | |
162 if (rc != 0) | |
163 return rc; | |
164 | |
165 /* Wait repeatedly until the write preconditions are met */ | |
166 while (!write_lock_available(rwlock)) { | |
167 if (try_only) { | |
168 rc = EBUSY; | |
169 } else { | |
170 /* | |
171 * Before waiting (and releasing the lock) we increment the | |
172 * waiting_writers count so the unlocking code knows to wake | |
173 * a writer first (before any waiting readers). | |
174 */ | |
175 rwlock->writers_waiting++; | |
176 if (abs_timeout != NULL) { | |
177 rc = pthread_cond_timedwait(&rwlock->write_possible, | |
178 &rwlock->mutex, | |
179 abs_timeout); | |
180 } else { | |
181 rc = pthread_cond_wait(&rwlock->write_possible, | |
182 &rwlock->mutex); | |
183 } | |
184 rwlock->writers_waiting--; | |
185 } | |
186 if (rc != 0) | |
187 goto done; | |
188 } | |
189 | |
190 /* Acquire the write lock. */ | |
191 rwlock->writer_thread_id = pthread_self(); | |
192 done: | |
193 rc2 = pthread_mutex_unlock(&rwlock->mutex); | |
194 return rc == 0 ? rc2 : rc; | |
195 } | |
196 | |
197 int pthread_rwlock_timedrdlock(pthread_rwlock_t *rwlock, | |
198 const struct timespec *abs_timeout) { | |
199 return rdlock_internal(rwlock, abs_timeout, 0); | |
200 } | |
201 | |
202 int pthread_rwlock_rdlock(pthread_rwlock_t *rwlock) { | |
203 return rdlock_internal(rwlock, NULL, 0); | |
204 } | |
205 | |
206 int pthread_rwlock_tryrdlock(pthread_rwlock_t *rwlock) { | |
207 return rdlock_internal(rwlock, NULL, 1); | |
208 } | |
209 | |
210 int pthread_rwlock_wrlock(pthread_rwlock_t *rwlock) { | |
211 return rwlock_internal(rwlock, NULL, 0); | |
212 } | |
213 | |
214 int pthread_rwlock_timedwrlock(pthread_rwlock_t *rwlock, | |
215 const struct timespec *abs_timeout) { | |
216 return rwlock_internal(rwlock, abs_timeout, 0); | |
217 } | |
218 | |
219 int pthread_rwlock_trywrlock(pthread_rwlock_t *rwlock) { | |
220 return rwlock_internal(rwlock, NULL, 1); | |
221 } | |
222 | |
223 int pthread_rwlock_unlock(pthread_rwlock_t *rwlock) { | |
224 int rc2; | |
225 int rc = pthread_mutex_lock(&rwlock->mutex); | |
226 if (rc != 0) | |
227 return rc; | |
228 | |
229 if (rwlock->writer_thread_id != NACL_PTHREAD_ILLEGAL_THREAD_ID) { | |
230 /* The write lock is held. Ensure it's the current thread that holds it. */ | |
231 if (rwlock->writer_thread_id != pthread_self()) { | |
232 rc = EPERM; | |
233 goto done; | |
234 } | |
235 | |
236 /* Release write lock. */ | |
237 rwlock->writer_thread_id = NACL_PTHREAD_ILLEGAL_THREAD_ID; | |
238 if (rwlock->writers_waiting > 0) { | |
239 /* Wake a waiting writer if there is one. */ | |
240 rc = pthread_cond_signal(&rwlock->write_possible); | |
241 } else { | |
242 /* Otherwise wake a waiting reader. */ | |
243 rc = pthread_cond_signal(&rwlock->read_possible); | |
244 } | |
245 } else { | |
246 if (rwlock->reader_count == 0) { | |
247 rc = EPERM; | |
248 goto done; | |
249 } | |
250 | |
251 /* Release read lock. */ | |
252 rwlock->reader_count--; | |
253 __nc_get_tdb()->rdlock_count--; | |
254 if (rwlock->reader_count == 0 && rwlock->writers_waiting > 0) { | |
255 /* Wake a waiting writer. */ | |
256 rc = pthread_cond_signal(&rwlock->write_possible); | |
257 } | |
258 } | |
259 | |
260 done: | |
261 rc2 = pthread_mutex_unlock(&rwlock->mutex); | |
262 return rc == 0 ? rc2 : rc; | |
263 } | |
264 | |
265 int pthread_rwlock_destroy(pthread_rwlock_t *rwlock) { | |
266 /* Return EBUSY if another thread holds the mutex. */ | |
267 int rc = pthread_mutex_trylock(&rwlock->mutex); | |
268 if (rc != 0) { | |
269 return rc; | |
270 } | |
271 | |
272 /* Return EBUSY if there are active readers or an active writer. */ | |
273 if (rwlock->reader_count != 0) { | |
274 pthread_mutex_unlock(&rwlock->mutex); | |
275 return EBUSY; | |
276 } | |
277 if (rwlock->writer_thread_id != NACL_PTHREAD_ILLEGAL_THREAD_ID) { | |
278 pthread_mutex_unlock(&rwlock->mutex); | |
279 return EBUSY; | |
280 } | |
281 | |
282 int rc1 = pthread_cond_destroy(&rwlock->write_possible); | |
283 int rc2 = pthread_cond_destroy(&rwlock->read_possible); | |
284 | |
285 /* Finally unlock the mutex and destroy it. */ | |
286 pthread_mutex_unlock(&rwlock->mutex); | |
287 int rc3 = pthread_mutex_destroy(&rwlock->mutex); | |
288 if (rc1 != 0) | |
289 return rc1; | |
290 if (rc2 != 0) | |
291 return rc2; | |
292 return rc3; | |
293 } | |
OLD | NEW |