OLD | NEW |
1 #ifndef SkAtomics_DEFINED | 1 #ifndef SkAtomics_DEFINED |
2 #define SkAtomics_DEFINED | 2 #define SkAtomics_DEFINED |
3 | 3 |
4 // This file is not part of the public Skia API. | 4 // This file is not part of the public Skia API. |
5 #include "SkTypes.h" | 5 #include "SkTypes.h" |
6 | 6 |
7 enum sk_memory_order { | 7 enum sk_memory_order { |
8 sk_memory_order_relaxed, | 8 sk_memory_order_relaxed, |
9 sk_memory_order_consume, | 9 sk_memory_order_consume, |
10 sk_memory_order_acquire, | 10 sk_memory_order_acquire, |
(...skipping 16 matching lines...) Expand all Loading... |
27 sk_memory_order success = sk_memory_order_seq_cs
t, | 27 sk_memory_order success = sk_memory_order_seq_cs
t, |
28 sk_memory_order failure = sk_memory_order_seq_cs
t); | 28 sk_memory_order failure = sk_memory_order_seq_cs
t); |
29 #if defined(_MSC_VER) | 29 #if defined(_MSC_VER) |
30 #include "../ports/SkAtomics_std.h" | 30 #include "../ports/SkAtomics_std.h" |
31 #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED) | 31 #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED) |
32 #include "../ports/SkAtomics_atomic.h" | 32 #include "../ports/SkAtomics_atomic.h" |
33 #else | 33 #else |
34 #include "../ports/SkAtomics_sync.h" | 34 #include "../ports/SkAtomics_sync.h" |
35 #endif | 35 #endif |
36 | 36 |
| 37 inline void sk_memory_barrier(sk_memory_order mo) { |
| 38 int junk; |
| 39 (void)sk_atomic_fetch_add(&junk, 42, mo); |
| 40 } |
| 41 |
37 // From here down we have shims for our old atomics API, to be weaned off of. | 42 // From here down we have shims for our old atomics API, to be weaned off of. |
38 // We use the default sequentially-consistent memory order to make things simple | 43 // We use the default sequentially-consistent memory order to make things simple |
39 // and to match the practical reality of our old _sync and _win implementations. | 44 // and to match the practical reality of our old _sync and _win implementations. |
40 | 45 |
41 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, +1); } | 46 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, +1); } |
42 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, -1); } | 47 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, -1); } |
43 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
dd(ptr, v); } | 48 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
dd(ptr, v); } |
44 | 49 |
45 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>
(ptr, +1); } | 50 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>
(ptr, +1); } |
46 | 51 |
(...skipping 26 matching lines...) Expand all Loading... |
73 return sk_atomic_load(ptr, sk_memory_order_relaxed); | 78 return sk_atomic_load(ptr, sk_memory_order_relaxed); |
74 } | 79 } |
75 | 80 |
76 template <typename T> | 81 template <typename T> |
77 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } | 82 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } |
78 | 83 |
79 inline void sk_membar_acquire__after_atomic_dec() {} | 84 inline void sk_membar_acquire__after_atomic_dec() {} |
80 inline void sk_membar_acquire__after_atomic_conditional_inc() {} | 85 inline void sk_membar_acquire__after_atomic_conditional_inc() {} |
81 | 86 |
82 #endif//SkAtomics_DEFINED | 87 #endif//SkAtomics_DEFINED |
OLD | NEW |