OLD | NEW |
1 #ifndef SkAtomics_DEFINED | 1 #ifndef SkAtomics_DEFINED |
2 #define SkAtomics_DEFINED | 2 #define SkAtomics_DEFINED |
3 | 3 |
4 // This file is not part of the public Skia API. | 4 // This file is not part of the public Skia API. |
5 #include "SkTypes.h" | 5 #include "SkTypes.h" |
6 | 6 |
| 7 enum sk_memory_order { |
| 8 sk_memory_order_relaxed, |
| 9 sk_memory_order_consume, |
| 10 sk_memory_order_acquire, |
| 11 sk_memory_order_release, |
| 12 sk_memory_order_acq_rel, |
| 13 sk_memory_order_seq_cst, |
| 14 }; |
| 15 |
| 16 template <typename T> |
| 17 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); |
| 18 |
| 19 template <typename T> |
| 20 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 21 |
| 22 template <typename T> |
| 23 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 24 |
| 25 template <typename T> |
| 26 bool sk_atomic_compare_exchange(T*, T* expected, T desired, |
| 27 sk_memory_order success = sk_memory_order_seq_cs
t, |
| 28 sk_memory_order failure = sk_memory_order_seq_cs
t); |
7 #if defined(_MSC_VER) | 29 #if defined(_MSC_VER) |
8 #include "../ports/SkAtomics_win.h" | 30 #include "../ports/SkAtomics_std.h" |
| 31 #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED) |
| 32 #include "../ports/SkAtomics_atomic.h" |
9 #else | 33 #else |
10 #include "../ports/SkAtomics_sync.h" | 34 #include "../ports/SkAtomics_sync.h" |
11 #endif | 35 #endif |
12 | 36 |
| 37 // From here down we have shims for our old atomics API, to be weaned off of. |
| 38 // We use the default sequentially-consistent memory order to make things simple |
| 39 // and to match the practical reality of our old _sync and _win implementations. |
| 40 |
| 41 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, +1); } |
| 42 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, -1); } |
| 43 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
dd(ptr, v); } |
| 44 |
| 45 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>
(ptr, +1); } |
| 46 |
| 47 inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) { |
| 48 return sk_atomic_compare_exchange(ptr, &expected, desired); |
| 49 } |
| 50 |
| 51 inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) { |
| 52 (void)sk_atomic_compare_exchange(ptr, &expected, desired); |
| 53 return expected; |
| 54 } |
| 55 |
| 56 inline int32_t sk_atomic_conditional_inc(int32_t* ptr) { |
| 57 int32_t prev = sk_atomic_load(ptr); |
| 58 do { |
| 59 if (0 == prev) { |
| 60 break; |
| 61 } |
| 62 } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1)); |
| 63 return prev; |
| 64 } |
| 65 |
| 66 template <typename T> |
| 67 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} |
| 68 |
| 69 template <typename T> |
| 70 T sk_consume_load(T* ptr) { |
| 71 // On every platform we care about, consume is the same as relaxed. |
| 72 // If we pass consume here, some compilers turn that into acquire, which is
overkill. |
| 73 return sk_atomic_load(ptr, sk_memory_order_relaxed); |
| 74 } |
| 75 |
| 76 template <typename T> |
| 77 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } |
| 78 |
| 79 inline void sk_membar_acquire__after_atomic_dec() {} |
| 80 inline void sk_membar_acquire__after_atomic_conditional_inc() {} |
| 81 |
13 #endif//SkAtomics_DEFINED | 82 #endif//SkAtomics_DEFINED |
OLD | NEW |