OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #include "SkSharedMutex.h" | 8 #include "SkSharedMutex.h" |
9 | 9 |
10 #include "SkAtomics.h" | 10 #include "SkAtomics.h" |
11 #include "SkSemaphore.h" | 11 #include "SkSemaphore.h" |
12 #include "SkTypes.h" | 12 #include "SkTypes.h" |
13 | 13 |
| 14 |
| 15 #if defined(THREAD_SANITIZER) |
| 16 |
| 17 /* Report that a lock has been created at address "lock". */ |
| 18 #define ANNOTATE_RWLOCK_CREATE(lock) \ |
| 19 AnnotateRWLockCreate(__FILE__, __LINE__, lock) |
| 20 |
| 21 /* Report that the lock at address "lock" is about to be destroyed. */ |
| 22 #define ANNOTATE_RWLOCK_DESTROY(lock) \ |
| 23 AnnotateRWLockDestroy(__FILE__, __LINE__, lock) |
| 24 |
| 25 /* Report that the lock at address "lock" has been acquired. |
| 26 is_w=1 for writer lock, is_w=0 for reader lock. */ |
| 27 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) \ |
| 28 AnnotateRWLockAcquired(__FILE__, __LINE__, lock, is_w) |
| 29 |
| 30 /* Report that the lock at address "lock" is about to be released. */ |
| 31 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) \ |
| 32 AnnotateRWLockReleased(__FILE__, __LINE__, lock, is_w) |
| 33 |
| 34 #ifdef DYNAMIC_ANNOTATIONS_WANT_ATTRIBUTE_WEAK |
| 35 # ifdef __GNUC__ |
| 36 # define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK __attribute__((weak)) |
| 37 # else |
| 38 /* TODO(glider): for Windows support we may want to change this macro in order |
| 39 to prepend __declspec(selectany) to the annotations' declarations. */ |
| 40 # error weak annotations are not supported for your compiler |
| 41 # endif |
| 42 #else |
| 43 # define DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK |
| 44 #endif |
| 45 |
| 46 extern "C" { |
| 47 void AnnotateRWLockCreate( |
| 48 const char *file, int line, |
| 49 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; |
| 50 void AnnotateRWLockDestroy( |
| 51 const char *file, int line, |
| 52 const volatile void *lock) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; |
| 53 void AnnotateRWLockAcquired( |
| 54 const char *file, int line, |
| 55 const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; |
| 56 void AnnotateRWLockReleased( |
| 57 const char *file, int line, |
| 58 const volatile void *lock, long is_w) DYNAMIC_ANNOTATIONS_ATTRIBUTE_WEAK; |
| 59 } |
| 60 |
| 61 #else |
| 62 |
| 63 #define ANNOTATE_RWLOCK_CREATE(lock) |
| 64 #define ANNOTATE_RWLOCK_DESTROY(lock) |
| 65 #define ANNOTATE_RWLOCK_ACQUIRED(lock, is_w) |
| 66 #define ANNOTATE_RWLOCK_RELEASED(lock, is_w) |
| 67 |
| 68 #endif |
| 69 |
14 // The fQueueCounts fields holds many counts in an int32_t in order to make mana
ging them atomic. | 70 // The fQueueCounts fields holds many counts in an int32_t in order to make mana
ging them atomic. |
15 // These three counts must be the same size, so each gets 10 bits. The 10 bits r
epresent | 71 // These three counts must be the same size, so each gets 10 bits. The 10 bits r
epresent |
16 // the log of the count which is 1024. | 72 // the log of the count which is 1024. |
17 // | 73 // |
18 // The three counts held in fQueueCounts are: | 74 // The three counts held in fQueueCounts are: |
19 // * Shared - the number of shared lock holders currently running. | 75 // * Shared - the number of shared lock holders currently running. |
20 // * WaitingExclusive - the number of threads waiting for an exclusive lock. | 76 // * WaitingExclusive - the number of threads waiting for an exclusive lock. |
21 // * WaitingShared - the number of threads waiting to run while waiting for an e
xclusive thread | 77 // * WaitingShared - the number of threads waiting to run while waiting for an e
xclusive thread |
22 // to finish. | 78 // to finish. |
23 static const int kLogThreadCount = 10; | 79 static const int kLogThreadCount = 10; |
24 | 80 |
25 enum { | 81 enum { |
26 kSharedOffset = (0 * kLogThreadCount), | 82 kSharedOffset = (0 * kLogThreadCount), |
27 kWaitingExlusiveOffset = (1 * kLogThreadCount), | 83 kWaitingExlusiveOffset = (1 * kLogThreadCount), |
28 kWaitingSharedOffset = (2 * kLogThreadCount), | 84 kWaitingSharedOffset = (2 * kLogThreadCount), |
29 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, | 85 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset, |
30 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOff
set, | 86 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusiveOff
set, |
31 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffse
t, | 87 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffse
t, |
32 }; | 88 }; |
33 | 89 |
34 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { } | 90 SkSharedMutex::SkSharedMutex() : fQueueCounts(0) { ANNOTATE_RWLOCK_CREATE(this);
} |
35 | 91 SkSharedMutex::~SkSharedMutex() { ANNOTATE_RWLOCK_DESTROY(this); } |
36 void SkSharedMutex::acquire() { | 92 void SkSharedMutex::acquire() { |
37 // Increment the count of exclusive queue waiters. | 93 // Increment the count of exclusive queue waiters. |
38 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset, | 94 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOffset, |
39 sk_memory_order_acquire); | 95 sk_memory_order_acquire); |
40 | 96 |
41 // If there are no other exclusive waiters and no shared threads are running
then run | 97 // If there are no other exclusive waiters and no shared threads are running
then run |
42 // else wait. | 98 // else wait. |
43 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kShare
dMask) > 0) { | 99 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kShare
dMask) > 0) { |
44 fExclusiveQueue.wait(); | 100 fExclusiveQueue.wait(); |
45 } | 101 } |
| 102 ANNOTATE_RWLOCK_ACQUIRED(this, 1); |
46 } | 103 } |
47 | 104 |
48 void SkSharedMutex::release() { | 105 void SkSharedMutex::release() { |
| 106 ANNOTATE_RWLOCK_RELEASED(this, 1); |
| 107 |
49 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); | 108 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed); |
50 int32_t waitingShared; | 109 int32_t waitingShared; |
51 int32_t newQueueCounts; | 110 int32_t newQueueCounts; |
52 do { | 111 do { |
53 newQueueCounts = oldQueueCounts; | 112 newQueueCounts = oldQueueCounts; |
54 | 113 |
55 // Decrement exclusive waiters. | 114 // Decrement exclusive waiters. |
56 newQueueCounts -= 1 << kWaitingExlusiveOffset; | 115 newQueueCounts -= 1 << kWaitingExlusiveOffset; |
57 | 116 |
58 // The number of threads waiting to acquire a shared lock. | 117 // The number of threads waiting to acquire a shared lock. |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
94 } else { | 153 } else { |
95 newQueueCounts += 1 << kSharedOffset; | 154 newQueueCounts += 1 << kSharedOffset; |
96 } | 155 } |
97 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, | 156 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts, |
98 sk_memory_order_acquire, sk_memory_o
rder_relaxed)); | 157 sk_memory_order_acquire, sk_memory_o
rder_relaxed)); |
99 | 158 |
100 // If there are waiting exclusives, then this shared waits until after it ru
ns. | 159 // If there are waiting exclusives, then this shared waits until after it ru
ns. |
101 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { | 160 if ((newQueueCounts & kWaitingExclusiveMask) > 0) { |
102 fSharedQueue.wait(); | 161 fSharedQueue.wait(); |
103 } | 162 } |
| 163 ANNOTATE_RWLOCK_ACQUIRED(this, 0); |
| 164 |
104 } | 165 } |
105 | 166 |
106 void SkSharedMutex::releaseShared() { | 167 void SkSharedMutex::releaseShared() { |
| 168 ANNOTATE_RWLOCK_RELEASED(this, 0); |
| 169 |
107 // Decrement the shared count. | 170 // Decrement the shared count. |
108 int32_t oldQueueCounts = fQueueCounts.fetch_add(-1 << kSharedOffset, | 171 int32_t oldQueueCounts = fQueueCounts.fetch_add(-1 << kSharedOffset, |
109 sk_memory_order_release); | 172 sk_memory_order_release); |
110 | 173 |
111 // If shared count is going to zero (because the old count == 1) and there a
re exclusive | 174 // If shared count is going to zero (because the old count == 1) and there a
re exclusive |
112 // waiters, then run a single exclusive waiter. | 175 // waiters, then run a single exclusive waiter. |
113 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 | 176 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1 |
114 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { | 177 && (oldQueueCounts & kWaitingExclusiveMask) > 0) { |
115 fExclusiveQueue.signal(); | 178 fExclusiveQueue.signal(); |
116 } | 179 } |
117 } | 180 } |
OLD | NEW |