OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkSharedMutex.h" | 8 #include "SkSharedMutex.h" |
9 | 9 |
10 #include "SkAtomics.h" | 10 #include "SkAtomics.h" |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
59 | 59 |
60 #else | 60 #else |
61 | 61 |
62 #define ANNOTATE_RWLOCK_CREATE(lock) | 62 #define ANNOTATE_RWLOCK_CREATE(lock) |
63 #define ANNOTATE_RWLOCK_DESTROY(lock) | 63 #define ANNOTATE_RWLOCK_DESTROY(lock) |
64 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) | 64 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) |
65 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) | 65 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) |
66 | 66 |
67 #endif | 67 #endif |
68 | 68 |
| 69 #ifdef SK_DEBUG |
| 70 |
| 71 SkSharedMutex::SkSharedMutex() { ANNOTATE_RWLOCK_CREATE(this); } |
| 72 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } |
| 73 void SkSharedMutex::acquire() { |
| 74 int currentShardCount; |
| 75 int waitingExclusiveCount; |
| 76 { |
| 77 SkAutoMutexAcquire l(&fMu); |
| 78 SkThreadID threadID; |
| 79 if (!fWaitingExclusive.TryAdd(threadID)) { |
| 80 SkDebugf("Thread %lx already has an exclusive lock\n", threadID.toIn
t()); |
| 81 SkASSERT(false); |
| 82 } |
| 83 |
| 84 currentShardCount = fCurrentShared.Count(); |
| 85 waitingExclusiveCount = fWaitingExclusive.Count(); |
| 86 } |
| 87 |
| 88 if (currentShardCount > 0 || waitingExclusiveCount > 1) { |
| 89 fExclusiveQueue.wait(); |
| 90 } |
| 91 |
| 92 ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
| 93 } |
| 94 |
| 95 // Implementation Detail: |
| 96 // The shared threads need two seperate queues to keep the threads that were add
ed after the |
| 97 // exclusive lock separate from the threads added before. |
| 98 void SkSharedMutex::release() { |
| 99 ANNOTATE_RWLOCK_RELEASED(this, 1); |
| 100 int sharedWaitingCount; |
| 101 int exclusiveWaitingCount; |
| 102 int sharedQueueSelect; |
| 103 { |
| 104 SkAutoMutexAcquire l(&fMu); |
| 105 SkASSERT(0 == fCurrentShared.Count()); |
| 106 SkThreadID threadID; |
| 107 if (!fWaitingExclusive.TryRemove(threadID)) { |
| 108 SkDebugf("Thread %lx did not have the lock held.\n", threadID.toInt(
)); |
| 109 SkASSERT(false); |
| 110 } |
| 111 exclusiveWaitingCount = fWaitingExclusive.Count(); |
| 112 sharedWaitingCount = fWaitingShared.Count(); |
| 113 fWaitingShared.Swap(fCurrentShared); |
| 114 sharedQueueSelect = fSharedQueueSelect; |
| 115 if (sharedWaitingCount > 0) { |
| 116 fSharedQueueSelect = 1 - fSharedQueueSelect; |
| 117 } |
| 118 } |
| 119 |
| 120 if (sharedWaitingCount > 0) { |
| 121 fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount); |
| 122 } else if (exclusiveWaitingCount > 0) { |
| 123 fExclusiveQueue.signal(); |
| 124 } |
| 125 } |
| 126 |
| 127 void SkSharedMutex::assertHeld() const { |
| 128 SkThreadID threadID; |
| 129 SkAutoMutexAcquire l(&fMu); |
| 130 SkASSERT(0 == fCurrentShared.Count()); |
| 131 SkASSERT(fWaitingExclusive.Find(threadID)); |
| 132 } |
| 133 |
| 134 void SkSharedMutex::acquireShared() { |
| 135 int exclusiveWaitingCount; |
| 136 SkThreadID threadID; |
| 137 int sharedQueueSelect; |
| 138 { |
| 139 SkAutoMutexAcquire l(&fMu); |
| 140 exclusiveWaitingCount = fWaitingExclusive.Count(); |
| 141 if (exclusiveWaitingCount > 0) { |
| 142 if (!fWaitingShared.TryAdd(threadID)) { |
| 143 SkDebugf("Thread %lx was already waiting!\n", threadID.toInt()); |
| 144 SkASSERT(false); |
| 145 } |
| 146 } else { |
| 147 if (!fCurrentShared.TryAdd(threadID)) { |
| 148 SkDebugf("Thread %lx already holds a shared lock!\n", threadID.t
oInt()); |
| 149 SkASSERT(false); |
| 150 } |
| 151 } |
| 152 sharedQueueSelect = fSharedQueueSelect; |
| 153 } |
| 154 |
| 155 if (exclusiveWaitingCount > 0) { |
| 156 fSharedQueue[sharedQueueSelect].wait(); |
| 157 } |
| 158 |
| 159 ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
| 160 } |
| 161 |
| 162 void SkSharedMutex::releaseShared() { |
| 163 ANNOTATE_RWLOCK_RELEASED(this, 0); |
| 164 |
| 165 int currentSharedCount; |
| 166 int waitingExclusiveCount; |
| 167 SkThreadID threadID; |
| 168 { |
| 169 SkAutoMutexAcquire l(&fMu); |
| 170 if (!fCurrentShared.TryRemove(threadID)) { |
| 171 SkDebugf("Thread %lx does not hold a shared lock.\n", threadID.toInt
()); |
| 172 SkASSERT(false); |
| 173 } |
| 174 currentSharedCount = fCurrentShared.Count(); |
| 175 waitingExclusiveCount = fWaitingExclusive.Count(); |
| 176 } |
| 177 |
| 178 if (0 == currentSharedCount && waitingExclusiveCount > 0) { |
| 179 fExclusiveQueue.signal(); |
| 180 } |
| 181 } |
| 182 |
| 183 void SkSharedMutex::assertHeldShared() const { |
| 184 SkThreadID threadID; |
| 185 SkAutoMutexAcquire l(&fMu); |
| 186 SkASSERT(fCurrentShared.Find(threadID)); |
| 187 } |
| 188 |
| 189 #else |
| 190 |
69 // The fQueueCounts fields holds many counts in an int32_t in order to make mana
ging them atomic. | 191 // The fQueueCounts fields holds many counts in an int32_t in order to make mana
ging them atomic. |
70 // These three counts must be the same size, so each gets 10 bits. The 10 bits r
epresent | 192 // These three counts must be the same size, so each gets 10 bits. The 10 bits r
epresent |
71 // the log of the count which is 1024. | 193 // the log of the count which is 1024. |
72 // | 194 // |
73 // The three counts held in fQueueCounts are: | 195 // The three counts held in fQueueCounts are: |
74 // * Shared - the number of shared lock holders currently running. | 196 // * Shared - the number of shared lock holders currently running. |
75 // * WaitingExclusive - the number of threads waiting for an exclusive lock. | 197 // * WaitingExclusive - the number of threads waiting for an exclusive lock. |
76 // * WaitingShared - the number of threads waiting to run while waiting for an e
xclusive thread | 198 // * WaitingShared - the number of threads waiting to run while waiting for an e
xclusive thread |
77 // to finish. | 199 // to finish. |
78 static const int kLogThreadCount = 10; | 200 static const int kLogThreadCount = 10; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
134 | 256 |
135 if (waitingShared > 0) { | 257 if (waitingShared > 0) { |
136 // Run all the shared. | 258 // Run all the shared. |
137 fSharedQueue.signal(waitingShared); | 259 fSharedQueue.signal(waitingShared); |
138 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | 260 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
139 // Run a single exclusive waiter. | 261 // Run a single exclusive waiter. |
140 fExclusiveQueue.signal(); | 262 fExclusiveQueue.signal(); |
141 } | 263 } |
142 } | 264 } |
143 | 265 |
144 #ifdef SK_DEBUG | |
145 void SkSharedMutex::assertHeld() const { | |
146 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); | |
147 // These are very loose asserts about the mutex being held exclusively. | |
148 SkASSERTF(0 == (queueCounts & kSharedMask), | |
149 "running shared: %d, exclusive: %d, waiting shared: %d", | |
150 (queueCounts & kSharedMask) >> kSharedOffset, | |
151 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | |
152 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | |
153 SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0, | |
154 "running shared: %d, exclusive: %d, waiting shared: %d", | |
155 (queueCounts & kSharedMask) >> kSharedOffset, | |
156 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | |
157 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | |
158 } | |
159 #endif | |
160 | |
161 void SkSharedMutex::acquireShared() { | 266 void SkSharedMutex::acquireShared() { |
162 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); | 267 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
163 int32_t newQueueCounts; | 268 int32_t newQueueCounts; |
164 do { | 269 do { |
165 newQueueCounts = oldQueueCounts; | 270 newQueueCounts = oldQueueCounts; |
166 // If there are waiting exclusives then this shared lock waits else it r
uns. | 271 // If there are waiting exclusives then this shared lock waits else it r
uns. |
167 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | 272 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
168 newQueueCounts += 1 << kWaitingSharedOffset; | 273 newQueueCounts += 1 << kWaitingSharedOffset; |
169 } else { | 274 } else { |
170 newQueueCounts += 1 << kSharedOffset; | 275 newQueueCounts += 1 << kSharedOffset; |
(...skipping 17 matching lines...) Expand all Loading... |
188 sk_memory_order_release); | 293 sk_memory_order_release); |
189 | 294 |
190 // If shared count is going to zero (because the old count == 1) and there a
re exclusive | 295 // If shared count is going to zero (because the old count == 1) and there a
re exclusive |
191 // waiters, then run a single exclusive waiter. | 296 // waiters, then run a single exclusive waiter. |
192 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 | 297 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
193 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { | 298 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
194 fExclusiveQueue.signal(); | 299 fExclusiveQueue.signal(); |
195 } | 300 } |
196 } | 301 } |
197 | 302 |
198 #ifdef SK_DEBUG | |
199 void SkSharedMutex::assertHeldShared() const { | |
200 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); | |
201 // A very loose assert about the mutex being shared. | |
202 SkASSERTF((queueCounts & kSharedMask) > 0, | |
203 "running shared: %d, exclusive: %d, waiting shared: %d", | |
204 (queueCounts & kSharedMask) >> kSharedOffset, | |
205 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | |
206 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | |
207 } | |
208 | |
209 #endif | 303 #endif |
OLD | NEW |