Index: src/core/SkSharedMutex.cpp |
diff --git a/src/core/SkSharedMutex.cpp b/src/core/SkSharedMutex.cpp |
index b9af10a2beb368e9564159bc2dfe1a1b4f4d163e..78472934fc23d19f10f94c4bbae74143be8ab663 100644 |
--- a/src/core/SkSharedMutex.cpp |
+++ b/src/core/SkSharedMutex.cpp |
@@ -141,6 +141,22 @@ void SkSharedMutex::release() { |
} |
} |
+ |
+void SkSharedMutex::assertHeld() const { |
+ int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
+ // These are very loose asserts about the mutex being held exclusively. |
+ SkASSERTF(0 == (queueCounts & kSharedMask), |
+ "running shared: %d, exclusive: %d, waiting shared: %d", |
+ (queueCounts & kSharedMask) >> kSharedOffset, |
+ (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
+ (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
+ SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0, |
+ "running shared: %d, exclusive: %d, waiting shared: %d", |
+ (queueCounts & kSharedMask) >> kSharedOffset, |
+ (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
+ (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
+} |
+ |
void SkSharedMutex::acquireShared() { |
int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
int32_t newQueueCounts; |
@@ -177,3 +193,13 @@ void SkSharedMutex::releaseShared() { |
fExclusiveQueue.signal(); |
} |
} |
+ |
+void SkSharedMutex::assertHeldShared() const { |
+ int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
+ // A very loose assert about the mutex being shared. |
+ SkASSERTF((queueCounts & kSharedMask) > 0, |
+ "running shared: %d, exclusive: %d, waiting shared: %d", |
+ (queueCounts & kSharedMask) >> kSharedOffset, |
+ (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset, |
+ (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset); |
+} |