Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(141)

Unified Diff: src/core/SkSharedMutex.cpp

Issue 1307863009: Add debug mode to shared mutex. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: fixed up Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« src/core/SkSharedMutex.h ('K') | « src/core/SkSharedMutex.h ('k') | no next file » | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: src/core/SkSharedMutex.cpp
diff --git a/src/core/SkSharedMutex.cpp b/src/core/SkSharedMutex.cpp
index 05e434c35b8f259b7e645e5b4119ec2d2b8d1f58..15b2d1939cfea491a21a1766ff84412e2aae80f3 100644
--- a/src/core/SkSharedMutex.cpp
+++ b/src/core/SkSharedMutex.cpp
@@ -66,144 +66,239 @@ void AnnotateRWLockReleased(
#endif
-// The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
-// These three counts must be the same size, so each gets 10 bits. The 10 bits represent
-// the log of the count which is 1024.
-//
-// The three counts held in fQueueCounts are:
-// * Shared - the number of shared lock holders currently running.
-// * WaitingExclusive - the number of threads waiting for an exclusive lock.
-// * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
-// to finish.
-static const int kLogThreadCount = 10;
-
-enum {
- kSharedOffset = (0 * kLogThreadCount),
- kWaitingExlusiveOffset = (1 * kLogThreadCount),
- kWaitingSharedOffset = (2 * kLogThreadCount),
- kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
- kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
- kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
-};
-
-SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
-SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
-void SkSharedMutex::acquire() {
- // Increment the count of exclusive queue waiters.
- int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
- sk_memory_order_acquire);
-
- // If there are no other exclusive waiters and no shared threads are running then run
- // else wait.
- if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
- fExclusiveQueue.wait();
+#ifdef SK_DEBUG
+
+ SkSharedMutex::SkSharedMutex() { ANNOTATE_RWLOCK_CREATE(this); }
+ SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
+ void SkSharedMutex::acquire() {
+ ThreadID threadID;
+ int currentShardCount;
mtklein_C 2015/09/17 19:51:42 shard -> shared?
herb_g 2015/09/17 21:39:41 Done.
+ int waitingExclusiveCount;
+ {
+ SkAutoMutexAcquire l(&fMu);
+
+ if (!fWaitingExclusive.tryAdd(threadID)) {
+ SkDebugf("Thread %lx already has an exclusive lock\n", threadID.toInt());
+ SkASSERT(false);
mtklein_C 2015/09/17 19:51:42 SkFAIL or SkDEBUGFAIL
herb_g 2015/09/17 21:39:41 Done.
+ }
+
+ currentShardCount = fCurrentShared.count();
+ waitingExclusiveCount = fWaitingExclusive.count();
+ }
+
+ if (currentShardCount > 0 || waitingExclusiveCount > 1) {
+ fExclusiveQueue.wait();
+ }
+
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1);
}
- ANNOTATE_RWLOCK_ACQUIRED(this, 1);
-}
-void SkSharedMutex::release() {
- ANNOTATE_RWLOCK_RELEASED(this, 1);
+ // Implementation Detail:
+ // The shared threads need two seperate queues to keep the threads that were added after the
+ // exclusive lock separate from the threads added before.
+ void SkSharedMutex::release() {
+ ANNOTATE_RWLOCK_RELEASED(this, 1);
+ ThreadID threadID;
+ int sharedWaitingCount;
+ int exclusiveWaitingCount;
+ int sharedQueueSelect;
+ {
+ SkAutoMutexAcquire l(&fMu);
+ SkASSERT(0 == fCurrentShared.count());
+ if (!fWaitingExclusive.tryRemove(threadID)) {
+ SkDebugf("Thread %lx did not have the lock held.\n", threadID.toInt());
mtklein_C 2015/09/17 19:51:42 ditto, etc.
herb_g 2015/09/17 21:39:41 Done.
+ SkASSERT(false);
+ }
+ exclusiveWaitingCount = fWaitingExclusive.count();
+ sharedWaitingCount = fWaitingShared.count();
+ fWaitingShared.swap(fCurrentShared);
+ sharedQueueSelect = fSharedQueueSelect;
+ if (sharedWaitingCount > 0) {
+ fSharedQueueSelect = 1 - fSharedQueueSelect;
+ }
+ }
- int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
- int32_t waitingShared;
- int32_t newQueueCounts;
- do {
- newQueueCounts = oldQueueCounts;
+ if (sharedWaitingCount > 0) {
+ fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount);
+ } else if (exclusiveWaitingCount > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
- // Decrement exclusive waiters.
- newQueueCounts -= 1 << kWaitingExlusiveOffset;
+ void SkSharedMutex::assertHeld() const {
+ ThreadID threadID;
+ SkAutoMutexAcquire l(&fMu);
+ SkASSERT(0 == fCurrentShared.count());
+ SkASSERT(fWaitingExclusive.find(threadID));
+ }
- // The number of threads waiting to acquire a shared lock.
- waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
+ void SkSharedMutex::acquireShared() {
+ ThreadID threadID;
+ int exclusiveWaitingCount;
+ int sharedQueueSelect;
+ {
+ SkAutoMutexAcquire l(&fMu);
+ exclusiveWaitingCount = fWaitingExclusive.count();
+ if (exclusiveWaitingCount > 0) {
+ if (!fWaitingShared.tryAdd(threadID)) {
+ SkDebugf("Thread %lx was already waiting!\n", threadID.toInt());
+ SkASSERT(false);
+ }
+ } else {
+ if (!fCurrentShared.tryAdd(threadID)) {
+ SkDebugf("Thread %lx already holds a shared lock!\n", threadID.toInt());
+ SkASSERT(false);
+ }
+ }
+ sharedQueueSelect = fSharedQueueSelect;
+ }
- // If there are any move the counts of all the shared waiters to actual shared. They are
- // going to run next.
- if (waitingShared > 0) {
+ if (exclusiveWaitingCount > 0) {
+ fSharedQueue[sharedQueueSelect].wait();
+ }
- // Set waiting shared to zero.
- newQueueCounts &= ~kWaitingSharedMask;
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0);
+ }
- // Because this is the exclusive release, then there are zero readers. So, the bits
- // for shared locks should be zero. Since those bits are zero, we can just |= in the
- // waitingShared count instead of clearing with an &= and then |= the count.
- newQueueCounts |= waitingShared << kSharedOffset;
+ void SkSharedMutex::releaseShared() {
+ ANNOTATE_RWLOCK_RELEASED(this, 0);
+ ThreadID threadID;
+
+ int currentSharedCount;
+ int waitingExclusiveCount;
+ {
+ SkAutoMutexAcquire l(&fMu);
+ if (!fCurrentShared.tryRemove(threadID)) {
+ SkDebugf("Thread %lx does not hold a shared lock.\n", threadID.toInt());
+ SkASSERT(false);
+ }
+ currentSharedCount = fCurrentShared.count();
+ waitingExclusiveCount = fWaitingExclusive.count();
}
- } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
- sk_memory_order_release, sk_memory_order_relaxed));
+ if (0 == currentSharedCount && waitingExclusiveCount > 0) {
+ fExclusiveQueue.signal();
+ }
+ }
- if (waitingShared > 0) {
- // Run all the shared.
- fSharedQueue.signal(waitingShared);
- } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
- // Run a single exclusive waiter.
- fExclusiveQueue.signal();
+ void SkSharedMutex::assertHeldShared() const {
+ ThreadID threadID;
+ SkAutoMutexAcquire l(&fMu);
+ SkASSERT(fCurrentShared.find(threadID));
}
-}
-#ifdef SK_DEBUG
-void SkSharedMutex::assertHeld() const {
- int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed);
- // These are very loose asserts about the mutex being held exclusively.
- SkASSERTF(0 == (queueCounts & kSharedMask),
- "running shared: %d, exclusive: %d, waiting shared: %d",
- (queueCounts & kSharedMask) >> kSharedOffset,
- (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
- (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
- SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0,
- "running shared: %d, exclusive: %d, waiting shared: %d",
- (queueCounts & kSharedMask) >> kSharedOffset,
- (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
- (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
-}
-#endif
+#else
-void SkSharedMutex::acquireShared() {
- int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
- int32_t newQueueCounts;
- do {
- newQueueCounts = oldQueueCounts;
- // If there are waiting exclusives then this shared lock waits else it runs.
+ // The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic.
+ // These three counts must be the same size, so each gets 10 bits. The 10 bits represent
+ // the log of the count which is 1024.
+ //
+ // The three counts held in fQueueCounts are:
+ // * Shared - the number of shared lock holders currently running.
+ // * WaitingExclusive - the number of threads waiting for an exclusive lock.
+ // * WaitingShared - the number of threads waiting to run while waiting for an exclusive thread
+ // to finish.
+ static const int kLogThreadCount = 10;
+
+ enum {
+ kSharedOffset = (0 * kLogThreadCount),
+ kWaitingExlusiveOffset = (1 * kLogThreadCount),
+ kWaitingSharedOffset = (2 * kLogThreadCount),
+ kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
+ kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset,
+ kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset,
+ };
+
+ SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this); }
+ SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); }
+ void SkSharedMutex::acquire() {
+ // Increment the count of exclusive queue waiters.
+ int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset,
+ sk_memory_order_acquire);
+
+ // If there are no other exclusive waiters and no shared threads are running then run
+ // else wait.
+ if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) {
+ fExclusiveQueue.wait();
+ }
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1);
+ }
+
+ void SkSharedMutex::release() {
+ ANNOTATE_RWLOCK_RELEASED(this, 1);
+
+ int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
+ int32_t waitingShared;
+ int32_t newQueueCounts;
+ do {
+ newQueueCounts = oldQueueCounts;
+
+ // Decrement exclusive waiters.
+ newQueueCounts -= 1 << kWaitingExlusiveOffset;
+
+ // The number of threads waiting to acquire a shared lock.
+ waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset;
+
+ // If there are any move the counts of all the shared waiters to actual shared. They are
+ // going to run next.
+ if (waitingShared > 0) {
+
+ // Set waiting shared to zero.
+ newQueueCounts &= ~kWaitingSharedMask;
+
+ // Because this is the exclusive release, then there are zero readers. So, the bits
+ // for shared locks should be zero. Since those bits are zero, we can just |= in the
+ // waitingShared count instead of clearing with an &= and then |= the count.
+ newQueueCounts |= waitingShared << kSharedOffset;
+ }
+
+ } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
+ sk_memory_order_release, sk_memory_order_relaxed));
+
+ if (waitingShared > 0) {
+ // Run all the shared.
+ fSharedQueue.signal(waitingShared);
+ } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ // Run a single exclusive waiter.
+ fExclusiveQueue.signal();
+ }
+ }
+
+ void SkSharedMutex::acquireShared() {
+ int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
+ int32_t newQueueCounts;
+ do {
+ newQueueCounts = oldQueueCounts;
+ // If there are waiting exclusives then this shared lock waits else it runs.
+ if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
+ newQueueCounts += 1 << kWaitingSharedOffset;
+ } else {
+ newQueueCounts += 1 << kSharedOffset;
+ }
+ } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
+ sk_memory_order_acquire, sk_memory_order_relaxed));
+
+ // If there are waiting exclusives, then this shared waits until after it runs.
if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
- newQueueCounts += 1 << kWaitingSharedOffset;
- } else {
- newQueueCounts += 1 << kSharedOffset;
+ fSharedQueue.wait();
}
- } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
- sk_memory_order_acquire, sk_memory_order_relaxed));
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0);
- // If there are waiting exclusives, then this shared waits until after it runs.
- if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
- fSharedQueue.wait();
}
- ANNOTATE_RWLOCK_ACQUIRED(this, 0);
-
-}
-void SkSharedMutex::releaseShared() {
- ANNOTATE_RWLOCK_RELEASED(this, 0);
+ void SkSharedMutex::releaseShared() {
+ ANNOTATE_RWLOCK_RELEASED(this, 0);
- // Decrement the shared count.
- int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
- sk_memory_order_release);
+ // Decrement the shared count.
+ int32_t oldQueueCounts = fQueueCounts.fetch_sub(1 << kSharedOffset,
+ sk_memory_order_release);
- // If shared count is going to zero (because the old count == 1) and there are exclusive
- // waiters, then run a single exclusive waiter.
- if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
- && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
- fExclusiveQueue.signal();
+ // If shared count is going to zero (because the old count == 1) and there are exclusive
+ // waiters, then run a single exclusive waiter.
+ if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
+ && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
+ fExclusiveQueue.signal();
+ }
}
-}
-
-#ifdef SK_DEBUG
-void SkSharedMutex::assertHeldShared() const {
- int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed);
- // A very loose assert about the mutex being shared.
- SkASSERTF((queueCounts & kSharedMask) > 0,
- "running shared: %d, exclusive: %d, waiting shared: %d",
- (queueCounts & kSharedMask) >> kSharedOffset,
- (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
- (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
-}
#endif
« src/core/SkSharedMutex.h ('K') | « src/core/SkSharedMutex.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698