OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkSharedMutex.h" | 8 #include "SkSharedMutex.h" |
9 | 9 |
10 #include "SkAtomics.h" | 10 #include "SkAtomics.h" |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
59 | 59 |
60 #else | 60 #else |
61 | 61 |
62 #define ANNOTATE_RWLOCK_CREATE(lock) | 62 #define ANNOTATE_RWLOCK_CREATE(lock) |
63 #define ANNOTATE_RWLOCK_DESTROY(lock) | 63 #define ANNOTATE_RWLOCK_DESTROY(lock) |
64 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) | 64 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) |
65 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) | 65 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) |
66 | 66 |
67 #endif | 67 #endif |
68 | 68 |
69 // The fQueueCounts fields holds many counts in an int32_t in order to make mana ging them atomic. | 69 #ifdef SK_DEBUG |
70 // These three counts must be the same size, so each gets 10 bits. The 10 bits r epresent | 70 |
71 // the log of the count which is 1024. | 71 SkSharedMutex::SkSharedMutex() { ANNOTATE_RWLOCK_CREATE(this); } |
72 // | 72 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } |
73 // The three counts held in fQueueCounts are: | 73 void SkSharedMutex::acquire() { |
74 // * Shared - the number of shared lock holders currently running. | 74 ThreadID threadID; |
75 // * WaitingExclusive - the number of threads waiting for an exclusive lock. | 75 int currentShardCount; |
mtklein_C
2015/09/17 19:51:42
shard -> shared?
herb_g
2015/09/17 21:39:41
Done.
| |
76 // * WaitingShared - the number of threads waiting to run while waiting for an e xclusive thread | 76 int waitingExclusiveCount; |
77 // to finish. | 77 { |
78 static const int kLogThreadCount = 10; | 78 SkAutoMutexAcquire l(&fMu); |
79 | 79 |
80 enum { | 80 if (!fWaitingExclusive.tryAdd(threadID)) { |
81 kSharedOffset = (0 * kLogThreadCount), | 81 SkDebugf("Thread %lx already has an exclusive lock\n", threadID. toInt()); |
82 kWaitingExlusiveOffset = (1 * kLogThreadCount), | 82 SkASSERT(false); |
mtklein_C
2015/09/17 19:51:42
SkFAIL or SkDEBUGFAIL
herb_g
2015/09/17 21:39:41
Done.
| |
83 kWaitingSharedOffset = (2 * kLogThreadCount), | 83 } |
84 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, | 84 |
85 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOff set, | 85 currentShardCount = fCurrentShared.count(); |
86 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffse t, | 86 waitingExclusiveCount = fWaitingExclusive.count(); |
87 }; | 87 } |
88 | 88 |
89 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); } | 89 if (currentShardCount > 0 || waitingExclusiveCount > 1) { |
90 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } | 90 fExclusiveQueue.wait(); |
91 void SkSharedMutex::acquire() { | 91 } |
92 // Increment the count of exclusive queue waiters. | 92 |
93 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset, | 93 ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
94 sk_memory_order_acquire); | 94 } |
95 | 95 |
96 // If there are no other exclusive waiters and no shared threads are running then run | 96 // Implementation Detail: |
97 // else wait. | 97 // The shared threads need two seperate queues to keep the threads that were added after the |
98 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kShare dMask) > 0) { | 98 // exclusive lock separate from the threads added before. |
99 fExclusiveQueue.wait(); | 99 void SkSharedMutex::release() { |
100 } | 100 ANNOTATE_RWLOCK_RELEASED(this, 1); |
101 ANNOTATE_RWLOCK_ACQUIRED(this, 1); | 101 ThreadID threadID; |
102 } | 102 int sharedWaitingCount; |
103 | 103 int exclusiveWaitingCount; |
104 void SkSharedMutex::release() { | 104 int sharedQueueSelect; |
105 ANNOTATE_RWLOCK_RELEASED(this, 1); | 105 { |
106 | 106 SkAutoMutexAcquire l(&fMu); |
107 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); | 107 SkASSERT(0 == fCurrentShared.count()); |
108 int32_t waitingShared; | 108 if (!fWaitingExclusive.tryRemove(threadID)) { |
109 int32_t newQueueCounts; | 109 SkDebugf("Thread %lx did not have the lock held.\n", threadID.to Int()); |
mtklein_C
2015/09/17 19:51:42
ditto, etc.
herb_g
2015/09/17 21:39:41
Done.
| |
110 do { | 110 SkASSERT(false); |
111 newQueueCounts = oldQueueCounts; | 111 } |
112 | 112 exclusiveWaitingCount = fWaitingExclusive.count(); |
113 // Decrement exclusive waiters. | 113 sharedWaitingCount = fWaitingShared.count(); |
114 newQueueCounts -= 1 << kWaitingExlusiveOffset; | 114 fWaitingShared.swap(fCurrentShared); |
115 | 115 sharedQueueSelect = fSharedQueueSelect; |
116 // The number of threads waiting to acquire a shared lock. | 116 if (sharedWaitingCount > 0) { |
117 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedO ffset; | 117 fSharedQueueSelect = 1 - fSharedQueueSelect; |
118 | 118 } |
119 // If there are any move the counts of all the shared waiters to actual shared. They are | 119 } |
120 // going to run next. | 120 |
121 if (sharedWaitingCount > 0) { | |
122 fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount); | |
123 } else if (exclusiveWaitingCount > 0) { | |
124 fExclusiveQueue.signal(); | |
125 } | |
126 } | |
127 | |
128 void SkSharedMutex::assertHeld() const { | |
129 ThreadID threadID; | |
130 SkAutoMutexAcquire l(&fMu); | |
131 SkASSERT(0 == fCurrentShared.count()); | |
132 SkASSERT(fWaitingExclusive.find(threadID)); | |
133 } | |
134 | |
135 void SkSharedMutex::acquireShared() { | |
136 ThreadID threadID; | |
137 int exclusiveWaitingCount; | |
138 int sharedQueueSelect; | |
139 { | |
140 SkAutoMutexAcquire l(&fMu); | |
141 exclusiveWaitingCount = fWaitingExclusive.count(); | |
142 if (exclusiveWaitingCount > 0) { | |
143 if (!fWaitingShared.tryAdd(threadID)) { | |
144 SkDebugf("Thread %lx was already waiting!\n", threadID.toInt ()); | |
145 SkASSERT(false); | |
146 } | |
147 } else { | |
148 if (!fCurrentShared.tryAdd(threadID)) { | |
149 SkDebugf("Thread %lx already holds a shared lock!\n", thread ID.toInt()); | |
150 SkASSERT(false); | |
151 } | |
152 } | |
153 sharedQueueSelect = fSharedQueueSelect; | |
154 } | |
155 | |
156 if (exclusiveWaitingCount > 0) { | |
157 fSharedQueue[sharedQueueSelect].wait(); | |
158 } | |
159 | |
160 ANNOTATE_RWLOCK_ACQUIRED(this, 0); | |
161 } | |
162 | |
163 void SkSharedMutex::releaseShared() { | |
164 ANNOTATE_RWLOCK_RELEASED(this, 0); | |
165 ThreadID threadID; | |
166 | |
167 int currentSharedCount; | |
168 int waitingExclusiveCount; | |
169 { | |
170 SkAutoMutexAcquire l(&fMu); | |
171 if (!fCurrentShared.tryRemove(threadID)) { | |
172 SkDebugf("Thread %lx does not hold a shared lock.\n", threadID.t oInt()); | |
173 SkASSERT(false); | |
174 } | |
175 currentSharedCount = fCurrentShared.count(); | |
176 waitingExclusiveCount = fWaitingExclusive.count(); | |
177 } | |
178 | |
179 if (0 == currentSharedCount && waitingExclusiveCount > 0) { | |
180 fExclusiveQueue.signal(); | |
181 } | |
182 } | |
183 | |
184 void SkSharedMutex::assertHeldShared() const { | |
185 ThreadID threadID; | |
186 SkAutoMutexAcquire l(&fMu); | |
187 SkASSERT(fCurrentShared.find(threadID)); | |
188 } | |
189 | |
190 #else | |
191 | |
192 // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic. | |
193 // These three counts must be the same size, so each gets 10 bits. The 10 bi ts represent | |
194 // the log of the count which is 1024. | |
195 // | |
196 // The three counts held in fQueueCounts are: | |
197 // * Shared - the number of shared lock holders currently running. | |
198 // * WaitingExclusive - the number of threads waiting for an exclusive lock. | |
199 // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread | |
200 // to finish. | |
201 static const int kLogThreadCount = 10; | |
202 | |
203 enum { | |
204 kSharedOffset = (0 * kLogThreadCount), | |
205 kWaitingExlusiveOffset = (1 * kLogThreadCount), | |
206 kWaitingSharedOffset = (2 * kLogThreadCount), | |
207 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, | |
208 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiv eOffset, | |
209 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedO ffset, | |
210 }; | |
211 | |
212 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(th is); } | |
213 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } | |
214 void SkSharedMutex::acquire() { | |
215 // Increment the count of exclusive queue waiters. | |
216 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOff set, | |
217 sk_memory_order_acquire) ; | |
218 | |
219 // If there are no other exclusive waiters and no shared threads are run ning then run | |
220 // else wait. | |
221 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kS haredMask) > 0) { | |
222 fExclusiveQueue.wait(); | |
223 } | |
224 ANNOTATE_RWLOCK_ACQUIRED(this, 1); | |
225 } | |
226 | |
227 void SkSharedMutex::release() { | |
228 ANNOTATE_RWLOCK_RELEASED(this, 1); | |
229 | |
230 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); | |
231 int32_t waitingShared; | |
232 int32_t newQueueCounts; | |
233 do { | |
234 newQueueCounts = oldQueueCounts; | |
235 | |
236 // Decrement exclusive waiters. | |
237 newQueueCounts -= 1 << kWaitingExlusiveOffset; | |
238 | |
239 // The number of threads waiting to acquire a shared lock. | |
240 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSha redOffset; | |
241 | |
242 // If there are any move the counts of all the shared waiters to act ual shared. They are | |
243 // going to run next. | |
244 if (waitingShared > 0) { | |
245 | |
246 // Set waiting shared to zero. | |
247 newQueueCounts &= ~kWaitingSharedMask; | |
248 | |
249 // Because this is the exclusive release, then there are zero re aders. So, the bits | |
250 // for shared locks should be zero. Since those bits are zero, w e can just |= in the | |
251 // waitingShared count instead of clearing with an &= and then | = the count. | |
252 newQueueCounts |= waitingShared << kSharedOffset; | |
253 } | |
254 | |
255 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, | |
256 sk_memory_order_release, sk_memo ry_order_relaxed)); | |
257 | |
121 if (waitingShared > 0) { | 258 if (waitingShared > 0) { |
122 | 259 // Run all the shared. |
123 // Set waiting shared to zero. | 260 fSharedQueue.signal(waitingShared); |
124 newQueueCounts &= ~kWaitingSharedMask; | 261 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
125 | 262 // Run a single exclusive waiter. |
126 // Because this is the exclusive release, then there are zero reader s. So, the bits | 263 fExclusiveQueue.signal(); |
127 // for shared locks should be zero. Since those bits are zero, we ca n just |= in the | 264 } |
128 // waitingShared count instead of clearing with an &= and then |= th e count. | 265 } |
129 newQueueCounts |= waitingShared << kSharedOffset; | 266 |
130 } | 267 void SkSharedMutex::acquireShared() { |
131 | 268 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
132 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, | 269 int32_t newQueueCounts; |
133 sk_memory_order_release, sk_memory_o rder_relaxed)); | 270 do { |
134 | 271 newQueueCounts = oldQueueCounts; |
135 if (waitingShared > 0) { | 272 // If there are waiting exclusives then this shared lock waits else it runs. |
136 // Run all the shared. | 273 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
137 fSharedQueue.signal(waitingShared); | 274 newQueueCounts += 1 << kWaitingSharedOffset; |
138 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | 275 } else { |
139 // Run a single exclusive waiter. | 276 newQueueCounts += 1 << kSharedOffset; |
140 fExclusiveQueue.signal(); | 277 } |
141 } | 278 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, |
142 } | 279 sk_memory_order_acquire, sk_memo ry_order_relaxed)); |
143 | 280 |
144 #ifdef SK_DEBUG | 281 // If there are waiting exclusives, then this shared waits until after i t runs. |
145 void SkSharedMutex::assertHeld() const { | 282 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
146 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); | 283 fSharedQueue.wait(); |
147 // These are very loose asserts about the mutex being held exclusively. | 284 } |
148 SkASSERTF(0 == (queueCounts & kSharedMask), | 285 ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
149 "running shared: %d, exclusive: %d, waiting shared: %d", | 286 |
150 (queueCounts & kSharedMask) >> kSharedOffset, | 287 } |
151 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | 288 |
152 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | 289 void SkSharedMutex::releaseShared() { |
153 SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0, | 290 ANNOTATE_RWLOCK_RELEASED(this, 0); |
154 "running shared: %d, exclusive: %d, waiting shared: %d", | 291 |
155 (queueCounts & kSharedMask) >> kSharedOffset, | 292 // Decrement the shared count. |
156 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | 293 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset, |
157 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | 294 sk_memory_order_release) ; |
158 } | 295 |
296 // If shared count is going to zero (because the old count == 1) and the re are exclusive | |
297 // waiters, then run a single exclusive waiter. | |
298 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 | |
299 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { | |
300 fExclusiveQueue.signal(); | |
301 } | |
302 } | |
303 | |
159 #endif | 304 #endif |
160 | |
161 void SkSharedMutex::acquireShared() { | |
162 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); | |
163 int32_t newQueueCounts; | |
164 do { | |
165 newQueueCounts = oldQueueCounts; | |
166 // If there are waiting exclusives then this shared lock waits else it r uns. | |
167 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | |
168 newQueueCounts += 1 << kWaitingSharedOffset; | |
169 } else { | |
170 newQueueCounts += 1 << kSharedOffset; | |
171 } | |
172 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, | |
173 sk_memory_order_acquire, sk_memory_o rder_relaxed)); | |
174 | |
175 // If there are waiting exclusives, then this shared waits until after it ru ns. | |
176 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | |
177 fSharedQueue.wait(); | |
178 } | |
179 ANNOTATE_RWLOCK_ACQUIRED(this, 0); | |
180 | |
181 } | |
182 | |
183 void SkSharedMutex::releaseShared() { | |
184 ANNOTATE_RWLOCK_RELEASED(this, 0); | |
185 | |
186 // Decrement the shared count. | |
187 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset, | |
188 sk_memory_order_release); | |
189 | |
190 // If shared count is going to zero (because the old count == 1) and there a re exclusive | |
191 // waiters, then run a single exclusive waiter. | |
192 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 | |
193 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { | |
194 fExclusiveQueue.signal(); | |
195 } | |
196 } | |
197 | |
198 #ifdef SK_DEBUG | |
199 void SkSharedMutex::assertHeldShared() const { | |
200 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); | |
201 // A very loose assert about the mutex being shared. | |
202 SkASSERTF((queueCounts & kSharedMask) > 0, | |
203 "running shared: %d, exclusive: %d, waiting shared: %d", | |
204 (queueCounts & kSharedMask) >> kSharedOffset, | |
205 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | |
206 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | |
207 } | |
208 | |
209 #endif | |
OLD | NEW |