Index: src/core/SkSharedMutex.cpp |
diff --git a/src/core/SkSharedMutex.cpp b/src/core/SkSharedMutex.cpp |
index 4cf63120676e658f43b1b354c22d27b08eb7d549..82dca8b334775c58ac06c4a72eee76bc8382dd53 100644 |
--- a/src/core/SkSharedMutex.cpp |
+++ b/src/core/SkSharedMutex.cpp |
@@ -66,6 +66,128 @@ void AnnotateRWLockReleased( |
#endif |
+#ifdef SK_DEBUG |
+ |
+SkSharedMutex::SkSharedMutex() { ANNOTATE_RWLOCK_CREATE(this); } |
+SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } |
+void SkSharedMutex::acquire() { |
+ int currentShardCount; |
+ int waitingExclusiveCount; |
+ { |
+ SkAutoMutexAcquire l(&fMu); |
+ SkThreadID threadID; |
+ if (!fWaitingExclusive.TryAdd(threadID)) { |
+ SkDebugf("Thread %lx already has an exclusive lock\n", threadID.toInt()); |
+ SkASSERT(false); |
+ } |
+ |
+ currentShardCount = fCurrentShared.Count(); |
+ waitingExclusiveCount = fWaitingExclusive.Count(); |
+ } |
+ |
+ if (currentShardCount > 0 || waitingExclusiveCount > 1) { |
+ fExclusiveQueue.wait(); |
+ } |
+ |
+ ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
+} |
+ |
+// Implementation Detail: |
+// The shared threads need two seperate queues to keep the threads that were added after the |
+// exclusive lock separate from the threads added before. |
+void SkSharedMutex::release() { |
+ ANNOTATE_RWLOCK_RELEASED(this, 1); |
+ int sharedWaitingCount; |
+ int exclusiveWaitingCount; |
+ int sharedQueueSelect; |
+ { |
+ SkAutoMutexAcquire l(&fMu); |
+ SkASSERT(0 == fCurrentShared.Count()); |
+ SkThreadID threadID; |
+ if (!fWaitingExclusive.TryRemove(threadID)) { |
+ SkDebugf("Thread %lx did not have the lock held.\n", threadID.toInt()); |
+ SkASSERT(false); |
+ } |
+ exclusiveWaitingCount = fWaitingExclusive.Count(); |
+ sharedWaitingCount = fWaitingShared.Count(); |
+ fWaitingShared.Swap(fCurrentShared); |
+ sharedQueueSelect = fSharedQueueSelect; |
+ if (sharedWaitingCount > 0) { |
+ fSharedQueueSelect = 1 - fSharedQueueSelect; |
+ } |
+ } |
+ |
+ if (sharedWaitingCount > 0) { |
+ fSharedQueue[sharedQueueSelect].signal(sharedWaitingCount); |
+ } else if (exclusiveWaitingCount > 0) { |
+ fExclusiveQueue.signal(); |
+ } |
+} |
+ |
+void SkSharedMutex::assertHeld() const { |
+ SkThreadID threadID; |
+ SkAutoMutexAcquire l(&fMu); |
+ SkASSERT(0 == fCurrentShared.Count()); |
+ SkASSERT(fWaitingExclusive.Find(threadID)); |
+} |
+ |
+void SkSharedMutex::acquireShared() { |
+ int exclusiveWaitingCount; |
+ SkThreadID threadID; |
+ int sharedQueueSelect; |
+ { |
+ SkAutoMutexAcquire l(&fMu); |
+ exclusiveWaitingCount = fWaitingExclusive.Count(); |
+ if (exclusiveWaitingCount > 0) { |
+ if (!fWaitingShared.TryAdd(threadID)) { |
+ SkDebugf("Thread %lx was already waiting!\n", threadID.toInt()); |
+ SkASSERT(false); |
+ } |
+ } else { |
+ if (!fCurrentShared.TryAdd(threadID)) { |
+ SkDebugf("Thread %lx already holds a shared lock!\n", threadID.toInt()); |
+ SkASSERT(false); |
+ } |
+ } |
+ sharedQueueSelect = fSharedQueueSelect; |
+ } |
+ |
+ if (exclusiveWaitingCount > 0) { |
+ fSharedQueue[sharedQueueSelect].wait(); |
+ } |
+ |
+ ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
+} |
+ |
+void SkSharedMutex::releaseShared() { |
+ ANNOTATE_RWLOCK_RELEASED(this, 0); |
+ |
+ int currentSharedCount; |
+ int waitingExclusiveCount; |
+ SkThreadID threadID; |
+ { |
+ SkAutoMutexAcquire l(&fMu); |
+ if (!fCurrentShared.TryRemove(threadID)) { |
+ SkDebugf("Thread %lx does not hold a shared lock.\n", threadID.toInt()); |
+ SkASSERT(false); |
+ } |
+ currentSharedCount = fCurrentShared.Count(); |
+ waitingExclusiveCount = fWaitingExclusive.Count(); |
+ } |
+ |
+ if (0 == currentSharedCount && waitingExclusiveCount > 0) { |
+ fExclusiveQueue.signal(); |
+ } |
+} |
+ |
+void SkSharedMutex::assertHeldShared() const { |
+ SkThreadID threadID; |
+ SkAutoMutexAcquire l(&fMu); |
+ SkASSERT(fCurrentShared.Find(threadID)); |
+} |
+ |
+#else |
+ |
// The fQueueCounts fields holds many counts in an int32_t in order to make managing them atomic. |
// These three counts must be the same size, so each gets 10 bits. The 10 bits represent |
// the log of the count which is 1024. |
@@ -141,23 +263,6 @@ void SkSharedMutex::release() { |
} |
} |
-#ifdef SK_DEBUG |
-void SkSharedMutex::assertHeld() const { |
- int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
- // These are very loose asserts about the mutex being held exclusively. |
- SkASSERTF(0 == (queueCounts & kSharedMask), |
- "running shared: %d, exclusive: %d, waiting shared: %d", |
- (queueCounts & kSharedMask) >> kSharedOffset, |
- (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
- (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
- SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0, |
- "running shared: %d, exclusive: %d, waiting shared: %d", |
- (queueCounts & kSharedMask) >> kSharedOffset, |
- (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
- (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
-} |
-#endif |
- |
void SkSharedMutex::acquireShared() { |
int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
int32_t newQueueCounts; |
@@ -195,15 +300,4 @@ void SkSharedMutex::releaseShared() { |
} |
} |
-#ifdef SK_DEBUG |
-void SkSharedMutex::assertHeldShared() const { |
- int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
- // A very loose assert about the mutex being shared. |
- SkASSERTF((queueCounts & kSharedMask) > 0, |
- "running shared: %d, exclusive: %d, waiting shared: %d", |
- (queueCounts & kSharedMask) >> kSharedOffset, |
- (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
- (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
-} |
- |
#endif |