OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkAtomics_DEFINED | 8 #ifndef SkAtomics_DEFINED |
9 #define SkAtomics_DEFINED | 9 #define SkAtomics_DEFINED |
10 | 10 |
(...skipping 141 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
152 // From here down we have shims for our old atomics API, to be weaned off of. | 152 // From here down we have shims for our old atomics API, to be weaned off of. |
153 // We use the default sequentially-consistent memory order to make things simple | 153 // We use the default sequentially-consistent memory order to make things simple |
154 // and to match the practical reality of our old _sync and _win implementations. | 154 // and to match the practical reality of our old _sync and _win implementations. |
155 | 155 |
156 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, +1); } | 156 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, +1); } |
157 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, -1); } | 157 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, -1); } |
158 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
dd(ptr, v); } | 158 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
dd(ptr, v); } |
159 | 159 |
160 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>
(ptr, +1); } | 160 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>
(ptr, +1); } |
161 | 161 |
162 inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) { | |
163 return sk_atomic_compare_exchange(ptr, &expected, desired); | |
164 } | |
165 | |
166 inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) { | |
167 (void)sk_atomic_compare_exchange(ptr, &expected, desired); | |
168 return expected; | |
169 } | |
170 | |
171 inline int32_t sk_atomic_conditional_inc(int32_t* ptr) { | |
172 int32_t prev = sk_atomic_load(ptr); | |
173 do { | |
174 if (0 == prev) { | |
175 break; | |
176 } | |
177 } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1)); | |
178 return prev; | |
179 } | |
180 | |
181 template <typename T> | |
182 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} | |
183 | |
184 template <typename T> | |
185 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } | |
186 | |
187 inline void sk_membar_acquire__after_atomic_dec() {} | |
188 inline void sk_membar_acquire__after_atomic_conditional_inc() {} | |
189 | |
190 #endif//SkAtomics_DEFINED | 162 #endif//SkAtomics_DEFINED |
OLD | NEW |