OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2013 Google Inc. | 2 * Copyright 2013 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkOnce_DEFINED | 8 #ifndef SkOnce_DEFINED |
9 #define SkOnce_DEFINED | 9 #define SkOnce_DEFINED |
10 | 10 |
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
64 | 64 |
65 int32_t thisIsPrivate; | 65 int32_t thisIsPrivate; |
66 SkDEBUGCODE(int32_t shouldBeZero;) | 66 SkDEBUGCODE(int32_t shouldBeZero;) |
67 }; | 67 }; |
68 | 68 |
69 struct SkOnceFlag { | 69 struct SkOnceFlag { |
70 bool done; | 70 bool done; |
71 SkSpinlock lock; | 71 SkSpinlock lock; |
72 }; | 72 }; |
73 | 73 |
74 // TODO(bungeman, mtklein): move all these *barrier* functions to SkThread when refactoring lands. | |
bungeman-skia
2014/05/29 18:02:52
yay!!
| |
75 | |
76 #ifdef SK_BUILD_FOR_WIN | |
77 # include <intrin.h> | |
78 inline static void compiler_barrier() { | |
79 _ReadWriteBarrier(); | |
80 } | |
81 #else | |
82 inline static void compiler_barrier() { | |
83 asm volatile("" : : : "memory"); | |
84 } | |
85 #endif | |
86 | |
87 inline static void full_barrier_on_arm() { | |
88 #if (defined(SK_CPU_ARM) && SK_ARM_ARCH >= 7) || defined(SK_CPU_ARM64) | |
89 asm volatile("dmb ish" : : : "memory"); | |
90 #elif defined(SK_CPU_ARM) | |
91 asm volatile("mcr p15, 0, %0, c7, c10, 5" : : "r" (0) : "memory"); | |
92 #endif | |
93 } | |
94 | |
95 // On every platform, we issue a compiler barrier to prevent it from reordering | |
96 // code. That's enough for platforms like x86 where release and acquire | |
97 // barriers are no-ops. On other platforms we may need to be more careful; | |
98 // ARM, in particular, needs real code for both acquire and release. We use a | |
99 // full barrier, which acts as both, because that the finest precision ARM | |
100 // provides. | |
101 | |
102 inline static void release_barrier() { | |
103 compiler_barrier(); | |
104 full_barrier_on_arm(); | |
105 } | |
106 | |
107 inline static void acquire_barrier() { | |
108 compiler_barrier(); | |
109 full_barrier_on_arm(); | |
110 } | |
111 | |
112 // Works with SkSpinlock or SkMutex. | 74 // Works with SkSpinlock or SkMutex. |
113 template <typename Lock> | 75 template <typename Lock> |
114 class SkAutoLockAcquire { | 76 class SkAutoLockAcquire { |
115 public: | 77 public: |
116 explicit SkAutoLockAcquire(Lock* lock) : fLock(lock) { fLock->acquire(); } | 78 explicit SkAutoLockAcquire(Lock* lock) : fLock(lock) { fLock->acquire(); } |
117 ~SkAutoLockAcquire() { fLock->release(); } | 79 ~SkAutoLockAcquire() { fLock->release(); } |
118 private: | 80 private: |
119 Lock* fLock; | 81 Lock* fLock; |
120 }; | 82 }; |
121 | 83 |
(...skipping 14 matching lines...) Expand all Loading... | |
136 } | 98 } |
137 // Also known as a store-store/load-store barrier, this makes sure that the writes | 99 // Also known as a store-store/load-store barrier, this makes sure that the writes |
138 // done before here---in particular, those done by calling f(arg)---are observable | 100 // done before here---in particular, those done by calling f(arg)---are observable |
139 // before the writes after the line, *done = true. | 101 // before the writes after the line, *done = true. |
140 // | 102 // |
141 // In version control terms this is like saying, "check in the work up | 103 // In version control terms this is like saying, "check in the work up |
142 // to and including f(arg), then check in *done=true as a subsequent cha nge". | 104 // to and including f(arg), then check in *done=true as a subsequent cha nge". |
143 // | 105 // |
144 // We'll use this in the fast path to make sure f(arg)'s effects are | 106 // We'll use this in the fast path to make sure f(arg)'s effects are |
145 // observable whenever we observe *done == true. | 107 // observable whenever we observe *done == true. |
146 release_barrier(); | 108 sk_release_store(done, true); |
147 *done = true; | |
148 } | 109 } |
149 } | 110 } |
150 | 111 |
151 // This is our fast path, called all the time. We do really want it to be inlin ed. | 112 // This is our fast path, called all the time. We do really want it to be inlin ed. |
152 template <typename Lock, typename Func, typename Arg> | 113 template <typename Lock, typename Func, typename Arg> |
153 inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) { | 114 inline void SkOnce(bool* done, Lock* lock, Func f, Arg arg, void(*atExit)()) { |
154 if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) { | 115 if (!SK_ANNOTATE_UNPROTECTED_READ(*done)) { |
155 sk_once_slow(done, lock, f, arg, atExit); | 116 sk_once_slow(done, lock, f, arg, atExit); |
156 } | 117 } |
157 // Also known as a load-load/load-store barrier, this acquire barrier makes | 118 // Also known as a load-load/load-store barrier, this acquire barrier makes |
158 // sure that anything we read from memory---in particular, memory written by | 119 // sure that anything we read from memory---in particular, memory written by |
159 // calling f(arg)---is at least as current as the value we read from once->d one. | 120 // calling f(arg)---is at least as current as the value we read from done. |
160 // | 121 // |
161 // In version control terms, this is a lot like saying "sync up to the | 122 // In version control terms, this is a lot like saying "sync up to the |
162 // commit where we wrote once->done = true". | 123 // commit where we wrote done = true". |
163 // | 124 // |
164 // The release barrier in sk_once_slow guaranteed that once->done = true | 125 // The release barrier in sk_once_slow guaranteed that done = true |
165 // happens after f(arg), so by syncing to once->done = true here we're | 126 // happens after f(arg), so by syncing to done = true here we're |
166 // forcing ourselves to also wait until the effects of f(arg) are readble. | 127 // forcing ourselves to also wait until the effects of f(arg) are readble. |
167 acquire_barrier(); | 128 SkAssertResult(sk_acquire_load(done)); |
168 } | 129 } |
169 | 130 |
170 template <typename Func, typename Arg> | 131 template <typename Func, typename Arg> |
171 inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) { | 132 inline void SkOnce(SkOnceFlag* once, Func f, Arg arg, void(*atExit)()) { |
172 return SkOnce(&once->done, &once->lock, f, arg, atExit); | 133 return SkOnce(&once->done, &once->lock, f, arg, atExit); |
173 } | 134 } |
174 | 135 |
175 #undef SK_ANNOTATE_BENIGN_RACE | 136 #undef SK_ANNOTATE_BENIGN_RACE |
176 | 137 |
177 #endif // SkOnce_DEFINED | 138 #endif // SkOnce_DEFINED |
OLD | NEW |