OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkSharedMutex.h" | 8 #include "SkSharedMutex.h" |
9 | 9 |
10 #include "SkAtomics.h" | 10 #include "SkAtomics.h" |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
134 | 134 |
135 if (waitingShared > 0) { | 135 if (waitingShared > 0) { |
136 // Run all the shared. | 136 // Run all the shared. |
137 fSharedQueue.signal(waitingShared); | 137 fSharedQueue.signal(waitingShared); |
138 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | 138 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
139 // Run a single exclusive waiter. | 139 // Run a single exclusive waiter. |
140 fExclusiveQueue.signal(); | 140 fExclusiveQueue.signal(); |
141 } | 141 } |
142 } | 142 } |
143 | 143 |
| 144 #ifdef SK_DEBUG |
| 145 void SkSharedMutex::assertHeld() const { |
| 146 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
| 147 // These are very loose asserts about the mutex being held exclusively. |
| 148 SkASSERTF(0 == (queueCounts & kSharedMask), |
| 149 "running shared: %d, exclusive: %d, waiting shared: %d", |
| 150 (queueCounts & kSharedMask) >> kSharedOffset, |
| 151 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
| 152 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
| 153 SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0, |
| 154 "running shared: %d, exclusive: %d, waiting shared: %d", |
| 155 (queueCounts & kSharedMask) >> kSharedOffset, |
| 156 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
| 157 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
| 158 } |
| 159 #endif |
| 160 |
144 void SkSharedMutex::acquireShared() { | 161 void SkSharedMutex::acquireShared() { |
145 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); | 162 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
146 int32_t newQueueCounts; | 163 int32_t newQueueCounts; |
147 do { | 164 do { |
148 newQueueCounts = oldQueueCounts; | 165 newQueueCounts = oldQueueCounts; |
149 // If there are waiting exclusives then this shared lock waits else it r
uns. | 166 // If there are waiting exclusives then this shared lock waits else it r
uns. |
150 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | 167 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
151 newQueueCounts += 1 << kWaitingSharedOffset; | 168 newQueueCounts += 1 << kWaitingSharedOffset; |
152 } else { | 169 } else { |
153 newQueueCounts += 1 << kSharedOffset; | 170 newQueueCounts += 1 << kSharedOffset; |
(...skipping 16 matching lines...) Expand all Loading... |
170 int32_t oldQueueCounts = fQueueCounts.fetch_add(~0U << kSharedOffset, | 187 int32_t oldQueueCounts = fQueueCounts.fetch_add(~0U << kSharedOffset, |
171 sk_memory_order_release); | 188 sk_memory_order_release); |
172 | 189 |
173 // If shared count is going to zero (because the old count == 1) and there a
re exclusive | 190 // If shared count is going to zero (because the old count == 1) and there a
re exclusive |
174 // waiters, then run a single exclusive waiter. | 191 // waiters, then run a single exclusive waiter. |
175 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 | 192 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
176 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { | 193 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
177 fExclusiveQueue.signal(); | 194 fExclusiveQueue.signal(); |
178 } | 195 } |
179 } | 196 } |
| 197 |
| 198 #ifdef SK_DEBUG |
| 199 void SkSharedMutex::assertHeldShared() const { |
| 200 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
| 201 // A very loose assert about the mutex being shared. |
| 202 SkASSERTF((queueCounts & kSharedMask) > 0, |
| 203 "running shared: %d, exclusive: %d, waiting shared: %d", |
| 204 (queueCounts & kSharedMask) >> kSharedOffset, |
| 205 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
| 206 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
| 207 } |
| 208 |
| 209 #endif |
OLD | NEW |