| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef SkAtomics_DEFINED | 8 #ifndef SkAtomics_DEFINED |
| 9 #define SkAtomics_DEFINED | 9 #define SkAtomics_DEFINED |
| 10 | 10 |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 template <typename T> | 23 template <typename T> |
| 24 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); | 24 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); |
| 25 | 25 |
| 26 template <typename T> | 26 template <typename T> |
| 27 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); | 27 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 28 | 28 |
| 29 template <typename T> | 29 template <typename T> |
| 30 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); | 30 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 31 | 31 |
| 32 template <typename T> | 32 template <typename T> |
| 33 T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 34 |
| 35 template <typename T> |
| 33 bool sk_atomic_compare_exchange(T*, T* expected, T desired, | 36 bool sk_atomic_compare_exchange(T*, T* expected, T desired, |
| 34 sk_memory_order success = sk_memory_order_seq_cs
t, | 37 sk_memory_order success = sk_memory_order_seq_cs
t, |
| 35 sk_memory_order failure = sk_memory_order_seq_cs
t); | 38 sk_memory_order failure = sk_memory_order_seq_cs
t); |
| 36 | 39 |
| 37 template <typename T> | 40 template <typename T> |
| 38 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); | 41 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 39 | 42 |
| 40 // A little wrapper class for small T (think, builtins: int, float, void*) to | 43 // A little wrapper class for small T (think, builtins: int, float, void*) to |
| 41 // ensure they're always used atomically. This is our stand-in for std::atomic<
T>. | 44 // ensure they're always used atomically. This is our stand-in for std::atomic<
T>. |
| 42 template <typename T> | 45 template <typename T> |
| 43 class SkAtomic : SkNoncopyable { | 46 class SkAtomic : SkNoncopyable { |
| 44 public: | 47 public: |
| 45 SkAtomic() {} | 48 SkAtomic() {} |
| 46 explicit SkAtomic(const T& val) : fVal(val) {} | 49 explicit SkAtomic(const T& val) : fVal(val) {} |
| 47 | 50 |
| 48 // It is essential we return by value rather than by const&. fVal may chang
e at any time. | 51 // It is essential we return by value rather than by const&. fVal may chang
e at any time. |
| 49 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { | 52 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { |
| 50 return sk_atomic_load(&fVal, mo); | 53 return sk_atomic_load(&fVal, mo); |
| 51 } | 54 } |
| 52 | 55 |
| 53 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 56 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
| 54 sk_atomic_store(&fVal, val, mo); | 57 sk_atomic_store(&fVal, val, mo); |
| 55 } | 58 } |
| 56 | 59 |
| 57 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 60 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
| 58 return sk_atomic_fetch_add(&fVal, val, mo); | 61 return sk_atomic_fetch_add(&fVal, val, mo); |
| 59 } | 62 } |
| 60 | 63 |
| 64 T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
| 65 return sk_atomic_fetch_sub(&fVal, val, mo); |
| 66 } |
| 67 |
| 61 bool compare_exchange(T* expected, const T& desired, | 68 bool compare_exchange(T* expected, const T& desired, |
| 62 sk_memory_order success = sk_memory_order_seq_cst, | 69 sk_memory_order success = sk_memory_order_seq_cst, |
| 63 sk_memory_order failure = sk_memory_order_seq_cst) { | 70 sk_memory_order failure = sk_memory_order_seq_cst) { |
| 64 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai
lure); | 71 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai
lure); |
| 65 } | 72 } |
| 66 private: | 73 private: |
| 67 T fVal; | 74 T fVal; |
| 68 }; | 75 }; |
| 69 | 76 |
| 70 // IWYU pragma: begin_exports | 77 // IWYU pragma: begin_exports |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 109 template <typename T> | 116 template <typename T> |
| 110 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} | 117 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} |
| 111 | 118 |
| 112 template <typename T> | 119 template <typename T> |
| 113 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } | 120 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } |
| 114 | 121 |
| 115 inline void sk_membar_acquire__after_atomic_dec() {} | 122 inline void sk_membar_acquire__after_atomic_dec() {} |
| 116 inline void sk_membar_acquire__after_atomic_conditional_inc() {} | 123 inline void sk_membar_acquire__after_atomic_conditional_inc() {} |
| 117 | 124 |
| 118 #endif//SkAtomics_DEFINED | 125 #endif//SkAtomics_DEFINED |
| OLD | NEW |