| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #include "SkSharedMutex.h" | 8 #include "SkSharedMutex.h" |
| 9 | 9 |
| 10 #include "SkAtomics.h" | 10 #include "SkAtomics.h" |
| (...skipping 166 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 177 fSharedQueue.wait(); | 177 fSharedQueue.wait(); |
| 178 } | 178 } |
| 179 ANNOTATE_RWLOCK_ACQUIRED(this, 0); | 179 ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
| 180 | 180 |
| 181 } | 181 } |
| 182 | 182 |
| 183 void SkSharedMutex::releaseShared() { | 183 void SkSharedMutex::releaseShared() { |
| 184 ANNOTATE_RWLOCK_RELEASED(this, 0); | 184 ANNOTATE_RWLOCK_RELEASED(this, 0); |
| 185 | 185 |
| 186 // Decrement the shared count. | 186 // Decrement the shared count. |
| 187 int32_t oldQueueCounts = fQueueCounts.fetch_add(~0U << kSharedOffset, | 187 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset, |
| 188 sk_memory_order_release); | 188 sk_memory_order_release); |
| 189 | 189 |
| 190 // If shared count is going to zero (because the old count == 1) and there a
re exclusive | 190 // If shared count is going to zero (because the old count == 1) and there a
re exclusive |
| 191 // waiters, then run a single exclusive waiter. | 191 // waiters, then run a single exclusive waiter. |
| 192 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 | 192 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
| 193 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { | 193 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
| 194 fExclusiveQueue.signal(); | 194 fExclusiveQueue.signal(); |
| 195 } | 195 } |
| 196 } | 196 } |
| 197 | 197 |
| 198 #ifdef SK_DEBUG | 198 #ifdef SK_DEBUG |
| 199 void SkSharedMutex::assertHeldShared() const { | 199 void SkSharedMutex::assertHeldShared() const { |
| 200 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); | 200 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
| 201 // A very loose assert about the mutex being shared. | 201 // A very loose assert about the mutex being shared. |
| 202 SkASSERTF((queueCounts & kSharedMask) > 0, | 202 SkASSERTF((queueCounts & kSharedMask) > 0, |
| 203 "running shared: %d, exclusive: %d, waiting shared: %d", | 203 "running shared: %d, exclusive: %d, waiting shared: %d", |
| 204 (queueCounts & kSharedMask) >> kSharedOffset, | 204 (queueCounts & kSharedMask) >> kSharedOffset, |
| 205 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | 205 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
| 206 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | 206 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
| 207 } | 207 } |
| 208 | 208 |
| 209 #endif | 209 #endif |
| OLD | NEW |