OLD | NEW |
(Empty) | |
| 1 /* |
| 2 * Copyright 2015 Google Inc. |
| 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. |
| 6 */ |
| 7 |
| 8 #include "SkSharedMutex.h" |
| 9 |
| 10 #include "SkAtomics.h" |
| 11 #include "SkSemaphore.h" |
| 12 #include "SkTypes.h" |
| 13 |
| 14 // The fQueueCounts fields holds many counts in an int32_t in order to make mana
ging them atomic. |
| 15 // These three counts must be the same size, so each gets 10 bits. The 10 bits r
epresent |
| 16 // the log of the count which is 1024. |
| 17 // |
| 18 // The three counts held in fQueueCounts are: |
| 19 // * Shared - the number of shared lock holders currently running. |
| 20 // * WaitingExclusive - the number of threads waiting for an exclusive lock. |
| 21 // * WaitingShared - the number of threads waiting to run while waiting for an e
xclusive thread |
| 22 // to finish. |
| 23 static const int kLogThreadCount = 10; |
| 24 |
| 25 enum { |
| 26 kSharedOffset = (0 * kLogThreadCount), |
| 27 kWaitingExlusiveOffset = (1 * kLogThreadCount), |
| 28 kWaitingSharedOffset = (2 * kLogThreadCount), |
| 29 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, |
| 30 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOff
set, |
| 31 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffse
t, |
| 32 }; |
| 33 |
| 34 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { } |
| 35 |
| 36 void SkSharedMutex::acquire() { |
| 37 // Increment the count of exclusive queue waiters. |
| 38 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset, |
| 39 sk_memory_order_acquire); |
| 40 |
| 41 // If there are no other exclusive waiters and no shared threads are running
then run |
| 42 // else wait. |
| 43 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kShare
dMask) > 0) { |
| 44 fExclusiveQueue.wait(); |
| 45 } |
| 46 } |
| 47 |
| 48 void SkSharedMutex::release() { |
| 49 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
| 50 int32_t waitingShared; |
| 51 int32_t newQueueCounts; |
| 52 do { |
| 53 newQueueCounts = oldQueueCounts; |
| 54 |
| 55 // Decrement exclusive waiters. |
| 56 newQueueCounts -= 1 << kWaitingExlusiveOffset; |
| 57 |
| 58 // The number of threads waiting to acquire a shared lock. |
| 59 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSharedO
ffset; |
| 60 |
| 61 // If there are any move the counts of all the shared waiters to actual
shared. They are |
| 62 // going to run next. |
| 63 if (waitingShared > 0) { |
| 64 |
| 65 // Set waiting shared to zero. |
| 66 newQueueCounts &= ~kWaitingSharedMask; |
| 67 |
| 68 // Because this is the exclusive release, then there are zero reader
s. So, the bits |
| 69 // for shared locks should be zero. Since those bits are zero, we ca
n just |= in the |
| 70 // waitingShared count instead of clearing with an &= and then |= th
e count. |
| 71 newQueueCounts |= waitingShared << kSharedOffset; |
| 72 } |
| 73 |
| 74 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, |
| 75 sk_memory_order_release, sk_memory_o
rder_relaxed)); |
| 76 |
| 77 if (waitingShared > 0) { |
| 78 // Run all the shared. |
| 79 fSharedQueue.signal(waitingShared); |
| 80 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
| 81 // Run a single exclusive waiter. |
| 82 fExclusiveQueue.signal(); |
| 83 } |
| 84 } |
| 85 |
| 86 void SkSharedMutex::acquireShared() { |
| 87 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
| 88 int32_t newQueueCounts; |
| 89 do { |
| 90 newQueueCounts = oldQueueCounts; |
| 91 // If there are waiting exclusives then this shared lock waits else it r
uns. |
| 92 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
| 93 newQueueCounts += 1 << kWaitingSharedOffset; |
| 94 } else { |
| 95 newQueueCounts += 1 << kSharedOffset; |
| 96 } |
| 97 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, |
| 98 sk_memory_order_acquire, sk_memory_o
rder_relaxed)); |
| 99 |
| 100 // If there are waiting exclusives, then this shared waits until after it ru
ns. |
| 101 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
| 102 fSharedQueue.wait(); |
| 103 } |
| 104 } |
| 105 |
| 106 void SkSharedMutex::releaseShared() { |
| 107 // Decrement the shared count. |
| 108 int32_t oldQueueCounts = fQueueCounts.fetch_add(-1 << kSharedOffset, |
| 109 sk_memory_order_release); |
| 110 |
| 111 // If shared count is going to zero (because the old count == 1) and there a
re exclusive |
| 112 // waiters, then run a single exclusive waiter. |
| 113 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
| 114 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
| 115 fExclusiveQueue.signal(); |
| 116 } |
| 117 } |
OLD | NEW |