OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2013 Google Inc. | 2 * Copyright 2013 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkOnce_DEFINED | 8 #ifndef SkOnce_DEFINED |
9 #define SkOnce_DEFINED | 9 #define SkOnce_DEFINED |
10 | 10 |
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
47 // ---------------------- Implementation details below here. -----------------
------------ | 47 // ---------------------- Implementation details below here. -----------------
------------ |
48 | 48 |
49 // This is POD and must be zero-initialized. | 49 // This is POD and must be zero-initialized. |
50 struct SkSpinlock { | 50 struct SkSpinlock { |
51 void acquire() { | 51 void acquire() { |
52 SkASSERT(shouldBeZero == 0); | 52 SkASSERT(shouldBeZero == 0); |
53 // No memory barrier needed, but sk_atomic_cas gives us at least release
anyway. | 53 // No memory barrier needed, but sk_atomic_cas gives us at least release
anyway. |
54 while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) { | 54 while (!sk_atomic_cas(&thisIsPrivate, 0, 1)) { |
55 // spin | 55 // spin |
56 } | 56 } |
57 SK_ANNOTATE_RWLOCK_ACQUIRED(this, true); | |
58 } | 57 } |
59 | 58 |
60 void release() { | 59 void release() { |
61 SkASSERT(shouldBeZero == 0); | 60 SkASSERT(shouldBeZero == 0); |
62 SK_ANNOTATE_RWLOCK_RELEASED(this, true); | |
63 // This requires a release memory barrier before storing, which sk_atomi
c_cas guarantees. | 61 // This requires a release memory barrier before storing, which sk_atomi
c_cas guarantees. |
64 SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0)); | 62 SkAssertResult(sk_atomic_cas(&thisIsPrivate, 1, 0)); |
65 } | 63 } |
66 | 64 |
67 int32_t thisIsPrivate; | 65 int32_t thisIsPrivate; |
68 SkDEBUGCODE(int32_t shouldBeZero;) | 66 SkDEBUGCODE(int32_t shouldBeZero;) |
69 }; | 67 }; |
70 | 68 |
71 struct SkOnceFlag { | 69 struct SkOnceFlag { |
72 bool done; | 70 bool done; |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
140 // done before here---in particular, those done by calling f(arg)---are
observable | 138 // done before here---in particular, those done by calling f(arg)---are
observable |
141 // before the writes after the line, *done = true. | 139 // before the writes after the line, *done = true. |
142 // | 140 // |
143 // In version control terms this is like saying, "check in the work up | 141 // In version control terms this is like saying, "check in the work up |
144 // to and including f(arg), then check in *done=true as a subsequent cha
nge". | 142 // to and including f(arg), then check in *done=true as a subsequent cha
nge". |
145 // | 143 // |
146 // We'll use this in the fast path to make sure f(arg)'s effects are | 144 // We'll use this in the fast path to make sure f(arg)'s effects are |
147 // observable whenever we observe *done == true. | 145 // observable whenever we observe *done == true. |
148 release_barrier(); | 146 release_barrier(); |
149 *done = true; | 147 *done = true; |
150 SK_ANNOTATE_HAPPENS_BEFORE(done); | |
151 } | 148 } |
152 } | 149 } |
153 | 150 |
154 // This is our fast path, called all the time. We do really want it to be inlin
ed. | 151 // This is our fast path, called all the time. We do really want it to be inlin
ed. |
155 template <typename Lock, typename Func, typename Arg> | 152 template <typename Lock, typename Func, typename Arg> |
156 inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) { | 153 inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) { |
157 if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) { | 154 if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) { |
158 sk_once_slow(done, lock, f, arg, atExit); | 155 sk_once_slow(done, lock, f, arg, atExit); |
159 } | 156 } |
160 // Also known as a load-load/load-store barrier, this acquire barrier makes | 157 // Also known as a load-load/load-store barrier, this acquire barrier makes |
161 // sure that anything we read from memory---in particular, memory written by | 158 // sure that anything we read from memory---in particular, memory written by |
162 // calling f(arg)---is at least as current as the value we read from once->d
one. | 159 // calling f(arg)---is at least as current as the value we read from once->d
one. |
163 // | 160 // |
164 // In version control terms, this is a lot like saying "sync up to the | 161 // In version control terms, this is a lot like saying "sync up to the |
165 // commit where we wrote once->done = true". | 162 // commit where we wrote once->done = true". |
166 // | 163 // |
167 // The release barrier in sk_once_slow guaranteed that once->done = true | 164 // The release barrier in sk_once_slow guaranteed that once->done = true |
168 // happens after f(arg), so by syncing to once->done = true here we're | 165 // happens after f(arg), so by syncing to once->done = true here we're |
169 // forcing ourselves to also wait until the effects of f(arg) are readble. | 166 // forcing ourselves to also wait until the effects of f(arg) are readble. |
170 acquire_barrier(); | 167 acquire_barrier(); |
171 SK_ANNOTATE_HAPPENS_AFTER(done); | |
172 } | 168 } |
173 | 169 |
174 template <typename Func, typename Arg> | 170 template <typename Func, typename Arg> |
175 inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) { | 171 inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) { |
176 return SkOnce(&once->done, &once->lock, f, arg, atExit); | 172 return SkOnce(&once->done, &once->lock, f, arg, atExit); |
177 } | 173 } |
178 | 174 |
179 #undef SK_ANNOTATE_BENIGN_RACE | 175 #undef SK_ANNOTATE_BENIGN_RACE |
180 | 176 |
181 #endif // SkOnce_DEFINED | 177 #endif // SkOnce_DEFINED |
OLD | NEW |