| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkSharedMutex.h" | 8 #include "SkSharedMutex.h" |
| 9 | 9 |
| 10 #include "SkAtomics.h" | 10 #include "SkAtomics.h" |
| 11 #include "SkSemaphore.h" | 11 #include "SkSemaphore.h" |
| 12 #include "SkTypes.h" | 12 #include "SkTypes.h" |
| 13 | 13 |
| 14 | |
| 15 #if defined(THREAD_SANITIZER) | 14 #if defined(THREAD_SANITIZER) |
| 16 | 15 |
| 17 /* Report that a lock has been created at address "lock". */ | 16 /* Report that a lock has been created at address "lock". */ |
| 18 #define ANNOTATE_RWLOCK_CREATE(lock) \ | 17 #define ANNOTATE_RWLOCK_CREATE(lock) \ |
| 19 AnnotateRWLockCreate(__FILE__, __LINE__, lock) | 18 AnnotateRWLockCreate(__FILE__, __LINE__, lock) |
| 20 | 19 |
| 21 /* Report that the lock at address "lock" is about to be destroyed. */ | 20 /* Report that the lock at address "lock" is about to be destroyed. */ |
| 22 #define ANNOTATE_RWLOCK_DESTROY(lock) \ | 21 #define ANNOTATE_RWLOCK_DESTROY(lock) \ |
| 23 AnnotateRWLockDestroy(__FILE__, __LINE__, lock) | 22 AnnotateRWLockDestroy(__FILE__, __LINE__, lock) |
| 24 | 23 |
| (...skipping 136 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 161 fSharedQueue.wait(); | 160 fSharedQueue.wait(); |
| 162 } | 161 } |
| 163 ANNOTATE_RWLOCK_ACQUIRED(this, 0); | 162 ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
| 164 | 163 |
| 165 } | 164 } |
| 166 | 165 |
| 167 void SkSharedMutex::releaseShared() { | 166 void SkSharedMutex::releaseShared() { |
| 168 ANNOTATE_RWLOCK_RELEASED(this, 0); | 167 ANNOTATE_RWLOCK_RELEASED(this, 0); |
| 169 | 168 |
| 170 // Decrement the shared count. | 169 // Decrement the shared count. |
| 171 int32_t oldQueueCounts = fQueueCounts.fetch_add(-1 << kSharedOffset, | 170 int32_t oldQueueCounts = fQueueCounts.fetch_add(~0U << kSharedOffset, |
| 172 sk_memory_order_release); | 171 sk_memory_order_release); |
| 173 | 172 |
| 174 // If shared count is going to zero (because the old count == 1) and there a
re exclusive | 173 // If shared count is going to zero (because the old count == 1) and there a
re exclusive |
| 175 // waiters, then run a single exclusive waiter. | 174 // waiters, then run a single exclusive waiter. |
| 176 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 | 175 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
| 177 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { | 176 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
| 178 fExclusiveQueue.signal(); | 177 fExclusiveQueue.signal(); |
| 179 } | 178 } |
| 180 } | 179 } |
| OLD | NEW |