Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(182)

Side by Side Diff: src/core/SkSharedLock.h

Issue 1215503005: Implement shared locks in the style of pthread's rwlock. (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
(Empty)
1 /*
2 * Copyright 2015 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #ifndef SkSharedLock_DEFINED
9 #define SkSharedLock_DEFINED
10
11 #include "SkAtomics.h"
12 #include "SkSemaphore.h"
13 #include "SkTypes.h"
14
15 // This is a shared lock implementation similar to pthreads rwlocks. This implem entation is
16 // cribbed from Preshing's article:
17 // http://preshing.com/20150316/semaphores-are-surprisingly-versatile/
18 //
19 // This lock does not obey strict queue ordering. It will always alternate betwe en readers and
20 // a single writer.
21 class SkSharedLock {
mtklein 2015/06/29 16:29:22 Let's implement this class in SkSharedLock.cpp and
herb_g 2015/06/29 19:26:31 Done.
22 public:
23 SkSharedLock()
24 : fQueueCounts(0) { }
25
26 // Acquire lock for exclusive use.
27 void acquire() {
28 // Increment the count of exclusive queue waiters.
29 int32_t oldQueueCounts = fQueueCounts.fetch_add(1 << kWaitingExlusiveOff set,
30 sk_memory_order_release) ;
mtklein 2015/06/29 16:29:22 preshing sez: sk_memory_order_acquire
herb_g 2015/06/29 19:26:31 Right. release makes no sense because the code is
31
32 // If there are no other exclusive waiters and no shared threads are run ning then run
33 // else wait.
34 if ((oldQueueCounts & kWaitingExclusiveMask) > 0 || (oldQueueCounts & kS haredMask) > 0) {
35 fExclusiveQueue.wait();
36 }
37 }
38
39 // Release lock for exclusive use.
40 void release() {
41 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
42 int32_t waitingShared;
43 int32_t newQueueCounts;
44 do {
45 newQueueCounts = oldQueueCounts;
46
47 // Decrement exclusive waiters.
48 newQueueCounts -= 1 << kWaitingExlusiveOffset;
49
50 // The number of shared lock holders waiting.
mtklein 2015/06/29 16:29:22 Number of threads waiting to hold a shared lock?
herb_g 2015/06/29 19:26:31 Done.
51 waitingShared = (oldQueueCounts & kWaitingSharedMask) >> kWaitingSha redOffset;
52
53 // If there are any move the counts of all the shared waiters to act ual shared. They are
54 // going to run next.
55 if (waitingShared > 0) {
56
57 // Set waiting shared to zero.
58 newQueueCounts &= ~kWaitingSharedMask;
59
60 // Because we are waiting for writers then readers bits are zero . An or will slam
mtklein 2015/06/29 16:29:22 Help me through this? Is this what you're saying?
herb_g 2015/06/29 19:26:31 Done.
61 // the count into the field.
62 newQueueCounts |= waitingShared << kSharedOffset;
63 }
64
65 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
66 sk_memory_order_release, sk_memo ry_order_relaxed));
67
68
69 if (waitingShared > 0) {
70 // Run all the shared.
71 fSharedQueue.signal(waitingShared);
72 } else if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
73 // Run a single exclusive waiter.
74 fExclusiveQueue.signal();
75 }
76 }
77
78 // Acquire lock for shared use.
79 void acquireShared() {
80 int32_t oldQueueCounts = fQueueCounts.load(sk_memory_order_relaxed);
81 int32_t newQueueCounts;
82 do {
83 newQueueCounts = oldQueueCounts;
84 // If there are waiting exclusives then this shared lock waits else it runs.
85 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
86 newQueueCounts += 1 << kWaitingSharedOffset;
87 } else {
88 newQueueCounts += 1 << kSharedOffset;
89 }
90 } while (!fQueueCounts.compare_exchange(&oldQueueCounts, newQueueCounts,
91 sk_memory_order_release, sk_memo ry_order_relaxed));
mtklein 2015/06/29 16:29:22 preshing sez: _acquire, _relaxed
herb_g 2015/06/29 19:26:31 Done.
92
93 // If there are waiting exclusives, then this shared waits until after i t runs.
94 if ((newQueueCounts & kWaitingExclusiveMask) > 0) {
95 fSharedQueue.wait();
96 }
97 }
98
99 // Release lock for shared use.
100 void releaseShared() {
101 // Decrement the shared count.
102 int32_t oldQueueCounts = fQueueCounts.fetch_add(-1 << kSharedOffset,
103 sk_memory_order_release) ;
104
105 // If shared count is going to zero (because the old count == 1) and the re are exclusive
106 // waiters, then run a single exclusive waiter.
107 if (((oldQueueCounts & kSharedMask) >> kSharedOffset) == 1
108 && (oldQueueCounts & kWaitingExclusiveMask) > 0) {
109 fExclusiveQueue.signal();
110 }
111 }
112
113 private:
114 static const int kLogThreadCount = 10;
mtklein 2015/06/29 16:29:22 Let's explain this? Maybe // We assume <1024 thr
herb_g 2015/06/29 19:26:31 Done.
115 // current shared: 0 - 9;
116 // waiting exclusive: 10 - 19;
117 // waiting shared: 20 - 29;
118 enum {
119 kSharedOffset = (0 * kLogThreadCount),
mtklein 2015/06/29 16:29:22 I personally find it a bit more readable to line u
herb_g 2015/06/29 19:26:31 Done.
120 kWaitingExlusiveOffset = (1 * kLogThreadCount),
121 kWaitingSharedOffset = (2 * kLogThreadCount),
122 kSharedMask = ((1 << kLogThreadCount) - 1) << kSharedOffset,
123 kWaitingExclusiveMask = ((1 << kLogThreadCount) - 1) << kWaitingExlusive Offset,
124 kWaitingSharedMask = ((1 << kLogThreadCount) - 1) << kWaitingSharedOffse t,
125 };
126 SkAtomic<int32_t> fQueueCounts;
mtklein 2015/06/29 16:29:22 // Using an int32_t makes it easier to call .fetch
herb_g 2015/06/29 19:26:31 Acknowledged.
127 SkSemaphore fSharedQueue;
128 SkSemaphore fExclusiveQueue;
129 };
130
131 #endif // SkSharedLock_DEFINED
OLDNEW
« no previous file with comments | « gyp/core.gypi ('k') | tests/SkSharedLockTest.cpp » ('j') | tests/SkSharedLockTest.cpp » ('J')

Powered by Google App Engine
This is Rietveld 408576698