Chromium Code Reviews| Index: src/core/SkSharedLock.h |
| diff --git a/src/core/SkSharedLock.h b/src/core/SkSharedLock.h |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..b20bd072962beb9c0fddc87bd172519af6698742 |
| --- /dev/null |
| +++ b/src/core/SkSharedLock.h |
| @@ -0,0 +1,131 @@ |
| +/* |
| + * Copyright 2015 Google Inc. |
| + * |
| + * Use of this source code is governed by a BSD-style license that can be |
| + * found in the LICENSE file. |
| + */ |
| + |
| +#ifndef SkSharedLock_DEFINED |
| +#define SkSharedLock_DEFINED |
| + |
| +#include "SkAtomics.h" |
| +#include "SkSemaphore.h" |
| +#include "SkTypes.h" |
| + |
| +// This is a shared lock implementation similar to pthreads rwlocks. This implementation is |
| +// cribbed from Preshing's article: |
| +// http://preshing.com/20150316/semaphores-are-surprisingly-versatile/ |
| +// |
| +// This lock does not obey strict queue ordering. It will always alternate between readers and |
| +// a single writer. |
| +class SkSharedLock { |
|
mtklein
2015/06/29 16:29:22
Let's implement this class in SkSharedLock.cpp and
herb_g
2015/06/29 19:26:31
Done.
|
| +public: |
| + SkSharedLock() |
| + : fQueueCounts(0) { } |
| + |
| + // Acquire lock for exclusive use. |
| + void acquire() { |
| + // Increment the count of exclusive queue waiters. |
| + int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset, |
| + sk_memory_order_release); |
|
mtklein
2015/06/29 16:29:22
preshing sez: sk_memory_order_acquire
herb_g
2015/06/29 19:26:31
Right. release makes no sense because the code is
|
| + |
| + // If there are no other exclusive waiters and no shared threads are running then run |
| + // else wait. |
| + if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kSharedMask) > 0) { |
| + fExclusiveQueue.wait(); |
| + } |
| + } |
| + |
| + // Release lock for exclusive use. |
| + void release() { |
| + int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
| + int32_t waitingShared; |
| + int32_t newQueueCounts; |
| + do { |
| + newQueueCounts = oldQueueCounts; |
| + |
| + // Decrement exclusive waiters. |
| + newQueueCounts -= 1 << kWaitingExlusiveOffset; |
| + |
| + // The number of shared lock holders waiting. |
|
mtklein
2015/06/29 16:29:22
Number of threads waiting to hold a shared lock?
herb_g
2015/06/29 19:26:31
Done.
|
| + waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedOffset; |
| + |
| + // If there are any move the counts of all the shared waiters to actual shared. They are |
| + // going to run next. |
| + if (waitingShared > 0) { |
| + |
| + // Set waiting shared to zero. |
| + newQueueCounts &= ~kWaitingSharedMask; |
| + |
| + // Because we are waiting for writers then readers bits are zero. An or will slam |
|
mtklein
2015/06/29 16:29:22
Help me through this? Is this what you're saying?
herb_g
2015/06/29 19:26:31
Done.
|
| + // the count into the field. |
| + newQueueCounts |= waitingShared << kSharedOffset; |
| + } |
| + |
| + } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, |
| + sk_memory_order_release, sk_memory_order_relaxed)); |
| + |
| + |
| + if (waitingShared > 0) { |
| + // Run all the shared. |
| + fSharedQueue.signal(waitingShared); |
| + } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
| + // Run a single exclusive waiter. |
| + fExclusiveQueue.signal(); |
| + } |
| + } |
| + |
| + // Acquire lock for shared use. |
| + void acquireShared() { |
| + int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
| + int32_t newQueueCounts; |
| + do { |
| + newQueueCounts = oldQueueCounts; |
| + // If there are waiting exclusives then this shared lock waits else it runs. |
| + if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
| + newQueueCounts += 1 << kWaitingSharedOffset; |
| + } else { |
| + newQueueCounts += 1 << kSharedOffset; |
| + } |
| + } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, |
| + sk_memory_order_release, sk_memory_order_relaxed)); |
|
mtklein
2015/06/29 16:29:22
preshing sez: _acquire, _relaxed
herb_g
2015/06/29 19:26:31
Done.
|
| + |
| + // If there are waiting exclusives, then this shared waits until after it runs. |
| + if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
| + fSharedQueue.wait(); |
| + } |
| + } |
| + |
| + // Release lock for shared use. |
| + void releaseShared() { |
| + // Decrement the shared count. |
| + int32_t oldQueueCounts = fQueueCounts.fetch_add(-1 << kSharedOffset, |
| + sk_memory_order_release); |
| + |
| + // If shared count is going to zero (because the old count == 1) and there are exclusive |
| + // waiters, then run a single exclusive waiter. |
| + if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
| + && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
| + fExclusiveQueue.signal(); |
| + } |
| + } |
| + |
| +private: |
| + static const int kLogThreadCount = 10; |
|
mtklein
2015/06/29 16:29:22
Let's explain this? Maybe
// We assume <1024 thr
herb_g
2015/06/29 19:26:31
Done.
|
| + // current shared: 0 - 9; |
| + // waiting exclusive: 10 - 19; |
| + // waiting shared: 20 - 29; |
| + enum { |
| + kSharedOffset = (0 * kLogThreadCount), |
|
mtklein
2015/06/29 16:29:22
I personally find it a bit more readable to line u
herb_g
2015/06/29 19:26:31
Done.
|
| + kWaitingExlusiveOffset = (1 * kLogThreadCount), |
| + kWaitingSharedOffset = (2 * kLogThreadCount), |
| + kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, |
| + kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOffset, |
| + kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffset, |
| + }; |
| + SkAtomic<int32_t> fQueueCounts; |
|
mtklein
2015/06/29 16:29:22
// Using an int32_t makes it easier to call .fetch
herb_g
2015/06/29 19:26:31
Acknowledged.
|
| + SkSemaphore fSharedQueue; |
| + SkSemaphore fExclusiveQueue; |
| +}; |
| + |
| +#endif // SkSharedLock_DEFINED |