| Index: src/core/SkSharedMutex.cpp
|
| diff --git a/src/core/SkSharedMutex.cpp b/src/core/SkSharedMutex.cpp
|
| index b9af10a2beb368e9564159bc2dfe1a1b4f4d163e..4cf63120676e658f43b1b354c22d27b08eb7d549 100644
|
| --- a/src/core/SkSharedMutex.cpp
|
| +++ b/src/core/SkSharedMutex.cpp
|
| @@ -141,6 +141,23 @@ void SkSharedMutex::release() {
|
| }
|
| }
|
|
|
| +#ifdef SK_DEBUG
|
| +void SkSharedMutex::assertHeld() const {
|
| + int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed);
|
| + // These are very loose asserts about the mutex being held exclusively.
|
| + SkASSERTF(0 == (queueCounts & kSharedMask),
|
| + "running shared: %d, exclusive: %d, waiting shared: %d",
|
| + (queueCounts & kSharedMask) >> kSharedOffset,
|
| + (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
|
| + (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
|
| + SkASSERTF((queueCounts & kWaitingExclusiveMask) > 0,
|
| + "running shared: %d, exclusive: %d, waiting shared: %d",
|
| + (queueCounts & kSharedMask) >> kSharedOffset,
|
| + (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
|
| + (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
|
| +}
|
| +#endif
|
| +
|
| void SkSharedMutex::acquireShared() {
|
| int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
|
| int32_t newQueueCounts;
|
| @@ -177,3 +194,16 @@ void SkSharedMutex::releaseShared() {
|
| fExclusiveQueue.signal();
|
| }
|
| }
|
| +
|
| +#ifdef SK_DEBUG
|
| +void SkSharedMutex::assertHeldShared() const {
|
| + int32_t queueCounts = fQueueCounts.load(sk_memory_order_relaxed);
|
| + // A very loose assert about the mutex being shared.
|
| + SkASSERTF((queueCounts & kSharedMask) > 0,
|
| + "running shared: %d, exclusive: %d, waiting shared: %d",
|
| + (queueCounts & kSharedMask) >> kSharedOffset,
|
| + (queueCounts & kWaitingExclusiveMask) >> kWaitingExlusiveOffset,
|
| + (queueCounts & kWaitingSharedMask) >> kWaitingSharedOffset);
|
| +}
|
| +
|
| +#endif
|
|
|