OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkAtomics_DEFINED | 8 #ifndef SkAtomics_DEFINED |
9 #define SkAtomics_DEFINED | 9 #define SkAtomics_DEFINED |
10 | 10 |
11 // This file is not part of the public Skia API. | 11 // This file is not part of the public Skia API. |
12 #include "SkTypes.h" | 12 #include "SkTypes.h" |
| 13 #include <atomic> |
| 14 |
| 15 // ~~~~~~~~ APIs ~~~~~~~~~ |
13 | 16 |
14 enum sk_memory_order { | 17 enum sk_memory_order { |
15 sk_memory_order_relaxed, | 18 sk_memory_order_relaxed, |
16 sk_memory_order_consume, | 19 sk_memory_order_consume, |
17 sk_memory_order_acquire, | 20 sk_memory_order_acquire, |
18 sk_memory_order_release, | 21 sk_memory_order_release, |
19 sk_memory_order_acq_rel, | 22 sk_memory_order_acq_rel, |
20 sk_memory_order_seq_cst, | 23 sk_memory_order_seq_cst, |
21 }; | 24 }; |
22 | 25 |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
79 | 82 |
80 bool compare_exchange(T* expected, const T& desired, | 83 bool compare_exchange(T* expected, const T& desired, |
81 sk_memory_order success = default_memory_order, | 84 sk_memory_order success = default_memory_order, |
82 sk_memory_order failure = default_memory_order) { | 85 sk_memory_order failure = default_memory_order) { |
83 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai
lure); | 86 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai
lure); |
84 } | 87 } |
85 private: | 88 private: |
86 T fVal; | 89 T fVal; |
87 }; | 90 }; |
88 | 91 |
89 // IWYU pragma: begin_exports | 92 // ~~~~~~~~ Implementations ~~~~~~~~~ |
90 #if defined(_MSC_VER) | 93 |
91 #include "../ports/SkAtomics_std.h" | 94 template <typename T> |
92 #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED) | 95 T sk_atomic_load(const T* ptr, sk_memory_order mo) { |
93 #include "../ports/SkAtomics_atomic.h" | 96 SkASSERT(mo == sk_memory_order_relaxed || |
94 #else | 97 mo == sk_memory_order_seq_cst || |
95 #include "../ports/SkAtomics_sync.h" | 98 mo == sk_memory_order_acquire || |
96 #endif | 99 mo == sk_memory_order_consume); |
97 // IWYU pragma: end_exports | 100 const std::atomic<T>* ap = reinterpret_cast<const std::atomic<T>*>(ptr); |
| 101 return std::atomic_load_explicit(ap, (std::memory_order)mo); |
| 102 } |
| 103 |
| 104 template <typename T> |
| 105 void sk_atomic_store(T* ptr, T val, sk_memory_order mo) { |
| 106 SkASSERT(mo == sk_memory_order_relaxed || |
| 107 mo == sk_memory_order_seq_cst || |
| 108 mo == sk_memory_order_release); |
| 109 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 110 return std::atomic_store_explicit(ap, val, (std::memory_order)mo); |
| 111 } |
| 112 |
| 113 template <typename T> |
| 114 T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order mo) { |
| 115 // All values of mo are valid. |
| 116 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 117 return std::atomic_fetch_add_explicit(ap, val, (std::memory_order)mo); |
| 118 } |
| 119 |
| 120 template <typename T> |
| 121 T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order mo) { |
| 122 // All values of mo are valid. |
| 123 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 124 return std::atomic_fetch_sub_explicit(ap, val, (std::memory_order)mo); |
| 125 } |
| 126 |
| 127 template <typename T> |
| 128 bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, |
| 129 sk_memory_order success, |
| 130 sk_memory_order failure) { |
| 131 // All values of success are valid. |
| 132 SkASSERT(failure == sk_memory_order_relaxed || |
| 133 failure == sk_memory_order_seq_cst || |
| 134 failure == sk_memory_order_acquire || |
| 135 failure == sk_memory_order_consume); |
| 136 SkASSERT(failure <= success); |
| 137 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 138 return std::atomic_compare_exchange_strong_explicit(ap, expected, desired, |
| 139 (std::memory_order)succe
ss, |
| 140 (std::memory_order)failu
re); |
| 141 } |
| 142 |
| 143 template <typename T> |
| 144 T sk_atomic_exchange(T* ptr, T val, sk_memory_order mo) { |
| 145 // All values of mo are valid. |
| 146 std::atomic<T>* ap = reinterpret_cast<std::atomic<T>*>(ptr); |
| 147 return std::atomic_exchange_explicit(ap, val, (std::memory_order)mo); |
| 148 } |
| 149 |
| 150 // ~~~~~~~~ Legacy APIs ~~~~~~~~~ |
98 | 151 |
99 // From here down we have shims for our old atomics API, to be weaned off of. | 152 // From here down we have shims for our old atomics API, to be weaned off of. |
100 // We use the default sequentially-consistent memory order to make things simple | 153 // We use the default sequentially-consistent memory order to make things simple |
101 // and to match the practical reality of our old _sync and _win implementations. | 154 // and to match the practical reality of our old _sync and _win implementations. |
102 | 155 |
103 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, +1); } | 156 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, +1); } |
104 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, -1); } | 157 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, -1); } |
105 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
dd(ptr, v); } | 158 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
dd(ptr, v); } |
106 | 159 |
107 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>
(ptr, +1); } | 160 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>
(ptr, +1); } |
(...skipping 20 matching lines...) Expand all Loading... |
128 template <typename T> | 181 template <typename T> |
129 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} | 182 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} |
130 | 183 |
131 template <typename T> | 184 template <typename T> |
132 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } | 185 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } |
133 | 186 |
134 inline void sk_membar_acquire__after_atomic_dec() {} | 187 inline void sk_membar_acquire__after_atomic_dec() {} |
135 inline void sk_membar_acquire__after_atomic_conditional_inc() {} | 188 inline void sk_membar_acquire__after_atomic_conditional_inc() {} |
136 | 189 |
137 #endif//SkAtomics_DEFINED | 190 #endif//SkAtomics_DEFINED |
OLD | NEW |