OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkSharedMutex.h" | 8 #include "SkSharedMutex.h" |
9 | 9 |
10 #include "SkAtomics.h" | 10 #include "SkAtomics.h" |
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
59 | 59 |
60 #else | 60 #else |
61 | 61 |
62 #define ANNOTATE_RWLOCK_CREATE(lock) | 62 #define ANNOTATE_RWLOCK_CREATE(lock) |
63 #define ANNOTATE_RWLOCK_DESTROY(lock) | 63 #define ANNOTATE_RWLOCK_DESTROY(lock) |
64 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) | 64 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) |
65 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) | 65 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) |
66 | 66 |
67 #endif | 67 #endif |
68 | 68 |
69 // The fQueueCounts fields holds many counts in an int32_t in order to make mana
ging them atomic. | 69 #ifdef SK_DEBUG |
70 // These three counts must be the same size, so each gets 10 bits. The 10 bits r
epresent | 70 |
71 // the log of the count which is 1024. | 71 #include "SkTDArray.h" |
72 // | 72 #ifdef SK_BUILD_FOR_WIN |
73 // The three counts held in fQueueCounts are: | 73 #include <windows.h> |
74 // * Shared - the number of shared lock holders currently running. | 74 static int64_t get_thread_id() { return GetCurrentThreadId(); } |
75 // * WaitingExclusive - the number of threads waiting for an exclusive lock. | 75 #else |
76 // * WaitingShared - the number of threads waiting to run while waiting for an e
xclusive thread | 76 #include <pthread.h> |
77 // to finish. | 77 static int64_t get_thread_id() { return (int64_t)pthread_self(); } |
78 static const int kLogThreadCount = 10; | 78 #endif |
79 | 79 |
80 enum { | 80 typedef int64_t ThreadID; |
81 kSharedOffset = (0 * kLogThreadCount), | 81 |
82 kWaitingExlusiveOffset = (1 * kLogThreadCount), | 82 class SkSharedMutex::ThreadIDSet { |
83 kWaitingSharedOffset = (2 * kLogThreadCount), | 83 public: |
84 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, | 84 // Returns true if threadID is in the set. |
85 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOff
set, | 85 bool find(ThreadID threadID) const { |
86 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffse
t, | 86 for (auto& t : fThreadIDs) { |
87 }; | 87 if (t == threadID) return true; |
88 | 88 } |
89 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this);
} | 89 return false; |
90 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } | 90 } |
91 void SkSharedMutex::acquire() { | 91 |
92 // Increment the count of exclusive queue waiters. | 92 // Returns true if did not already exist. |
93 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset, | 93 bool tryAdd(ThreadID threadID) { |
94 sk_memory_order_acquire); | 94 for (auto& t : fThreadIDs) { |
95 | 95 if (t == threadID) return false; |
96 // If there are no other exclusive waiters and no shared threads are running
then run | 96 } |
97 // else wait. | 97 fThreadIDs.append(1, &threadID); |
98 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kShare
dMask) > 0) { | 98 return true; |
99 fExclusiveQueue.wait(); | 99 } |
100 } | 100 // Returns true if already exists in Set. |
101 ANNOTATE_RWLOCK_ACQUIRED(this, 1); | 101 bool tryRemove(ThreadID threadID) { |
102 } | 102 for (int i = 0; i < fThreadIDs.count(); ++i) { |
103 | 103 if (fThreadIDs[i] == threadID) { |
104 void SkSharedMutex::release() { | 104 fThreadIDs.remove(i); |
105 ANNOTATE_RWLOCK_RELEASED(this, 1); | 105 return true; |
106 | 106 } |
107 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); | 107 } |
108 int32_t waitingShared; | 108 return false; |
109 int32_t newQueueCounts; | 109 } |
110 do { | 110 |
111 newQueueCounts = oldQueueCounts; | 111 void swap(ThreadIDSet& other) { |
112 | 112 fThreadIDs.swap(other.fThreadIDs); |
113 // Decrement exclusive waiters. | 113 } |
114 newQueueCounts -= 1 << kWaitingExlusiveOffset; | 114 |
115 | 115 int count() const { |
116 // The number of threads waiting to acquire a shared lock. | 116 return fThreadIDs.count(); |
117 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedO
ffset; | 117 } |
118 | 118 |
119 // If there are any move the counts of all the shared waiters to actual
shared. They are | 119 private: |
120 // going to run next. | 120 SkTDArray<ThreadID> fThreadIDs; |
| 121 }; |
| 122 |
| 123 SkSharedMutex::SkSharedMutex() |
| 124 : fCurrentShared(new ThreadIDSet) |
| 125 , fWaitingExclusive(new ThreadIDSet) |
| 126 , fWaitingShared(new ThreadIDSet){ |
| 127 ANNOTATE_RWLOCK_CREATE(this); |
| 128 } |
| 129 |
| 130 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } |
| 131 |
| 132 void SkSharedMutex::acquire() { |
| 133 ThreadID threadID(get_thread_id()); |
| 134 int currentSharedCount; |
| 135 int waitingExclusiveCount; |
| 136 { |
| 137 SkAutoMutexAcquire l(&fMu); |
| 138 |
| 139 if (!fWaitingExclusive->tryAdd(threadID)) { |
| 140 SkDEBUGFAILF("Thread %lx already has an exclusive lock\n", threa
dID); |
| 141 } |
| 142 |
| 143 currentSharedCount = fCurrentShared->count(); |
| 144 waitingExclusiveCount = fWaitingExclusive->count(); |
| 145 } |
| 146 |
| 147 if (currentSharedCount > 0 || waitingExclusiveCount > 1) { |
| 148 fExclusiveQueue.wait(); |
| 149 } |
| 150 |
| 151 ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
| 152 } |
| 153 |
| 154 // Implementation Detail: |
| 155 // The shared threads need two seperate queues to keep the threads that were
added after the |
| 156 // exclusive lock separate from the threads added before. |
| 157 void SkSharedMutex::release() { |
| 158 ANNOTATE_RWLOCK_RELEASED(this, 1); |
| 159 ThreadID threadID(get_thread_id()); |
| 160 int sharedWaitingCount; |
| 161 int exclusiveWaitingCount; |
| 162 int sharedQueueSelect; |
| 163 { |
| 164 SkAutoMutexAcquire l(&fMu); |
| 165 SkASSERT(0 == fCurrentShared->count()); |
| 166 if (!fWaitingExclusive->tryRemove(threadID)) { |
| 167 SkDEBUGFAILF("Thread %lx did not have the lock held.\n", threadI
D); |
| 168 } |
| 169 exclusiveWaitingCount = fWaitingExclusive->count(); |
| 170 sharedWaitingCount = fWaitingShared->count(); |
| 171 fWaitingShared.swap(fCurrentShared); |
| 172 sharedQueueSelect = fSharedQueueSelect; |
| 173 if (sharedWaitingCount > 0) { |
| 174 fSharedQueueSelect = 1 - fSharedQueueSelect; |
| 175 } |
| 176 } |
| 177 |
| 178 if (sharedWaitingCount > 0) { |
| 179 fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount); |
| 180 } else if (exclusiveWaitingCount > 0) { |
| 181 fExclusiveQueue.signal(); |
| 182 } |
| 183 } |
| 184 |
| 185 void SkSharedMutex::assertHeld() const { |
| 186 ThreadID threadID(get_thread_id()); |
| 187 SkAutoMutexAcquire l(&fMu); |
| 188 SkASSERT(0 == fCurrentShared->count()); |
| 189 SkASSERT(fWaitingExclusive->find(threadID)); |
| 190 } |
| 191 |
| 192 void SkSharedMutex::acquireShared() { |
| 193 ThreadID threadID(get_thread_id()); |
| 194 int exclusiveWaitingCount; |
| 195 int sharedQueueSelect; |
| 196 { |
| 197 SkAutoMutexAcquire l(&fMu); |
| 198 exclusiveWaitingCount = fWaitingExclusive->count(); |
| 199 if (exclusiveWaitingCount > 0) { |
| 200 if (!fWaitingShared->tryAdd(threadID)) { |
| 201 SkDEBUGFAILF("Thread %lx was already waiting!\n", threadID); |
| 202 } |
| 203 } else { |
| 204 if (!fCurrentShared->tryAdd(threadID)) { |
| 205 SkDEBUGFAILF("Thread %lx already holds a shared lock!\n", th
readID); |
| 206 } |
| 207 } |
| 208 sharedQueueSelect = fSharedQueueSelect; |
| 209 } |
| 210 |
| 211 if (exclusiveWaitingCount > 0) { |
| 212 fSharedQueue[sharedQueueSelect].wait(); |
| 213 } |
| 214 |
| 215 ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
| 216 } |
| 217 |
| 218 void SkSharedMutex::releaseShared() { |
| 219 ANNOTATE_RWLOCK_RELEASED(this, 0); |
| 220 ThreadID threadID(get_thread_id()); |
| 221 |
| 222 int currentSharedCount; |
| 223 int waitingExclusiveCount; |
| 224 { |
| 225 SkAutoMutexAcquire l(&fMu); |
| 226 if (!fCurrentShared->tryRemove(threadID)) { |
| 227 SkDEBUGFAILF("Thread %lx does not hold a shared lock.\n", thread
ID); |
| 228 } |
| 229 currentSharedCount = fCurrentShared->count(); |
| 230 waitingExclusiveCount = fWaitingExclusive->count(); |
| 231 } |
| 232 |
| 233 if (0 == currentSharedCount && waitingExclusiveCount > 0) { |
| 234 fExclusiveQueue.signal(); |
| 235 } |
| 236 } |
| 237 |
| 238 void SkSharedMutex::assertHeldShared() const { |
| 239 ThreadID threadID(get_thread_id()); |
| 240 SkAutoMutexAcquire l(&fMu); |
| 241 SkASSERT(fCurrentShared->find(threadID)); |
| 242 } |
| 243 |
| 244 #else |
| 245 |
| 246 // The fQueueCounts fields holds many counts in an int32_t in order to make
managing them atomic. |
| 247 // These three counts must be the same size, so each gets 10 bits. The 10 bi
ts represent |
| 248 // the log of the count which is 1024. |
| 249 // |
| 250 // The three counts held in fQueueCounts are: |
| 251 // * Shared - the number of shared lock holders currently running. |
| 252 // * WaitingExclusive - the number of threads waiting for an exclusive lock. |
| 253 // * WaitingShared - the number of threads waiting to run while waiting for
an exclusive thread |
| 254 // to finish. |
| 255 static const int kLogThreadCount = 10; |
| 256 |
| 257 enum { |
| 258 kSharedOffset = (0 * kLogThreadCount), |
| 259 kWaitingExlusiveOffset = (1 * kLogThreadCount), |
| 260 kWaitingSharedOffset = (2 * kLogThreadCount), |
| 261 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, |
| 262 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiv
eOffset, |
| 263 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedO
ffset, |
| 264 }; |
| 265 |
| 266 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(th
is); } |
| 267 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } |
| 268 void SkSharedMutex::acquire() { |
| 269 // Increment the count of exclusive queue waiters. |
| 270 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOff
set, |
| 271 sk_memory_order_acquire)
; |
| 272 |
| 273 // If there are no other exclusive waiters and no shared threads are run
ning then run |
| 274 // else wait. |
| 275 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kS
haredMask) > 0) { |
| 276 fExclusiveQueue.wait(); |
| 277 } |
| 278 ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
| 279 } |
| 280 |
| 281 void SkSharedMutex::release() { |
| 282 ANNOTATE_RWLOCK_RELEASED(this, 1); |
| 283 |
| 284 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
| 285 int32_t waitingShared; |
| 286 int32_t newQueueCounts; |
| 287 do { |
| 288 newQueueCounts = oldQueueCounts; |
| 289 |
| 290 // Decrement exclusive waiters. |
| 291 newQueueCounts -= 1 << kWaitingExlusiveOffset; |
| 292 |
| 293 // The number of threads waiting to acquire a shared lock. |
| 294 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSha
redOffset; |
| 295 |
| 296 // If there are any move the counts of all the shared waiters to act
ual shared. They are |
| 297 // going to run next. |
| 298 if (waitingShared > 0) { |
| 299 |
| 300 // Set waiting shared to zero. |
| 301 newQueueCounts &= ~kWaitingSharedMask; |
| 302 |
| 303 // Because this is the exclusive release, then there are zero re
aders. So, the bits |
| 304 // for shared locks should be zero. Since those bits are zero, w
e can just |= in the |
| 305 // waitingShared count instead of clearing with an &= and then |
= the count. |
| 306 newQueueCounts |= waitingShared << kSharedOffset; |
| 307 } |
| 308 |
| 309 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, |
| 310 sk_memory_order_release, sk_memo
ry_order_relaxed)); |
| 311 |
121 if (waitingShared > 0) { | 312 if (waitingShared > 0) { |
122 | 313 // Run all the shared. |
123 // Set waiting shared to zero. | 314 fSharedQueue.signal(waitingShared); |
124 newQueueCounts &= ~kWaitingSharedMask; | 315 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
125 | 316 // Run a single exclusive waiter. |
126 // Because this is the exclusive release, then there are zero reader
s. So, the bits | 317 fExclusiveQueue.signal(); |
127 // for shared locks should be zero. Since those bits are zero, we ca
n just |= in the | 318 } |
128 // waitingShared count instead of clearing with an &= and then |= th
e count. | 319 } |
129 newQueueCounts |= waitingShared << kSharedOffset; | 320 |
130 } | 321 void SkSharedMutex::acquireShared() { |
131 | 322 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
132 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, | 323 int32_t newQueueCounts; |
133 sk_memory_order_release, sk_memory_o
rder_relaxed)); | 324 do { |
134 | 325 newQueueCounts = oldQueueCounts; |
135 if (waitingShared > 0) { | 326 // If there are waiting exclusives then this shared lock waits else
it runs. |
136 // Run all the shared. | 327 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
137 fSharedQueue.signal(waitingShared); | 328 newQueueCounts += 1 << kWaitingSharedOffset; |
138 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | 329 } else { |
139 // Run a single exclusive waiter. | 330 newQueueCounts += 1 << kSharedOffset; |
140 fExclusiveQueue.signal(); | 331 } |
141 } | 332 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, |
142 } | 333 sk_memory_order_acquire, sk_memo
ry_order_relaxed)); |
143 | 334 |
144 #ifdef SK_DEBUG | 335 // If there are waiting exclusives, then this shared waits until after i
t runs. |
145 void SkSharedMutex::assertHeld() const { | 336 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
146 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); | 337 fSharedQueue.wait(); |
147 // These are very loose asserts about the mutex being held exclusively. | 338 } |
148 SkASSERTF(0 == (queueCounts & kSharedMask), | 339 ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
149 "running shared: %d, exclusive: %d, waiting shared: %d", | 340 |
150 (queueCounts & kSharedMask) >> kSharedOffset, | 341 } |
151 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | 342 |
152 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | 343 void SkSharedMutex::releaseShared() { |
153 SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0, | 344 ANNOTATE_RWLOCK_RELEASED(this, 0); |
154 "running shared: %d, exclusive: %d, waiting shared: %d", | 345 |
155 (queueCounts & kSharedMask) >> kSharedOffset, | 346 // Decrement the shared count. |
156 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | 347 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset, |
157 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | 348 sk_memory_order_release)
; |
158 } | 349 |
| 350 // If shared count is going to zero (because the old count == 1) and the
re are exclusive |
| 351 // waiters, then run a single exclusive waiter. |
| 352 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
| 353 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
| 354 fExclusiveQueue.signal(); |
| 355 } |
| 356 } |
| 357 |
159 #endif | 358 #endif |
160 | |
161 void SkSharedMutex::acquireShared() { | |
162 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); | |
163 int32_t newQueueCounts; | |
164 do { | |
165 newQueueCounts = oldQueueCounts; | |
166 // If there are waiting exclusives then this shared lock waits else it r
uns. | |
167 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | |
168 newQueueCounts += 1 << kWaitingSharedOffset; | |
169 } else { | |
170 newQueueCounts += 1 << kSharedOffset; | |
171 } | |
172 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, | |
173 sk_memory_order_acquire, sk_memory_o
rder_relaxed)); | |
174 | |
175 // If there are waiting exclusives, then this shared waits until after it ru
ns. | |
176 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | |
177 fSharedQueue.wait(); | |
178 } | |
179 ANNOTATE_RWLOCK_ACQUIRED(this, 0); | |
180 | |
181 } | |
182 | |
183 void SkSharedMutex::releaseShared() { | |
184 ANNOTATE_RWLOCK_RELEASED(this, 0); | |
185 | |
186 // Decrement the shared count. | |
187 int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset, | |
188 sk_memory_order_release); | |
189 | |
190 // If shared count is going to zero (because the old count == 1) and there a
re exclusive | |
191 // waiters, then run a single exclusive waiter. | |
192 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 | |
193 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { | |
194 fExclusiveQueue.signal(); | |
195 } | |
196 } | |
197 | |
198 #ifdef SK_DEBUG | |
199 void SkSharedMutex::assertHeldShared() const { | |
200 int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); | |
201 // A very loose assert about the mutex being shared. | |
202 SkASSERTF((queueCounts & kSharedMask) > 0, | |
203 "running shared: %d, exclusive: %d, waiting shared: %d", | |
204 (queueCounts & kSharedMask) >> kSharedOffset, | |
205 (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, | |
206 (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); | |
207 } | |
208 | |
209 #endif | |
OLD | NEW |