OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkAtomics_DEFINED | 8 #ifndef SkAtomics_DEFINED |
9 #define SkAtomics_DEFINED | 9 #define SkAtomics_DEFINED |
10 | 10 |
(...skipping 24 matching lines...) Expand all Loading... |
35 template <typename T> | 35 template <typename T> |
36 bool sk_atomic_compare_exchange(T*, T* expected, T desired, | 36 bool sk_atomic_compare_exchange(T*, T* expected, T desired, |
37 sk_memory_order success = sk_memory_order_seq_cs
t, | 37 sk_memory_order success = sk_memory_order_seq_cs
t, |
38 sk_memory_order failure = sk_memory_order_seq_cs
t); | 38 sk_memory_order failure = sk_memory_order_seq_cs
t); |
39 | 39 |
40 template <typename T> | 40 template <typename T> |
41 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); | 41 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
42 | 42 |
43 // A little wrapper class for small T (think, builtins: int, float, void*) to | 43 // A little wrapper class for small T (think, builtins: int, float, void*) to |
44 // ensure they're always used atomically. This is our stand-in for std::atomic<
T>. | 44 // ensure they're always used atomically. This is our stand-in for std::atomic<
T>. |
45 template <typename T> | 45 // !!! Please _really_ know what you're doing if you change default_memory_order
. !!! |
| 46 template <typename T, sk_memory_order default_memory_order = sk_memory_order_seq
_cst> |
46 class SkAtomic : SkNoncopyable { | 47 class SkAtomic : SkNoncopyable { |
47 public: | 48 public: |
48 SkAtomic() {} | 49 SkAtomic() {} |
49 explicit SkAtomic(const T& val) : fVal(val) {} | 50 explicit SkAtomic(const T& val) : fVal(val) {} |
50 | 51 |
51 // It is essential we return by value rather than by const&. fVal may chang
e at any time. | 52 // It is essential we return by value rather than by const&. fVal may chang
e at any time. |
52 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { | 53 T load(sk_memory_order mo = default_memory_order) const { |
53 return sk_atomic_load(&fVal, mo); | 54 return sk_atomic_load(&fVal, mo); |
54 } | 55 } |
55 | 56 |
56 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 57 void store(const T& val, sk_memory_order mo = default_memory_order) { |
57 sk_atomic_store(&fVal, val, mo); | 58 sk_atomic_store(&fVal, val, mo); |
58 } | 59 } |
59 | 60 |
60 // Alias for .load(sk_memory_order_seq_cst). | 61 // Alias for .load(default_memory_order). |
61 operator T() const { | 62 operator T() const { |
62 return this->load(); | 63 return this->load(); |
63 } | 64 } |
64 | 65 |
65 // Alias for .store(v, sk_memory_order_seq_cst). | 66 // Alias for .store(v, default_memory_order). |
66 T operator=(const T& v) { | 67 T operator=(const T& v) { |
67 this->store(v); | 68 this->store(v); |
68 return v; | 69 return v; |
69 } | 70 } |
70 | 71 |
71 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 72 T fetch_add(const T& val, sk_memory_order mo = default_memory_order) { |
72 return sk_atomic_fetch_add(&fVal, val, mo); | 73 return sk_atomic_fetch_add(&fVal, val, mo); |
73 } | 74 } |
74 | 75 |
75 T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 76 T fetch_sub(const T& val, sk_memory_order mo = default_memory_order) { |
76 return sk_atomic_fetch_sub(&fVal, val, mo); | 77 return sk_atomic_fetch_sub(&fVal, val, mo); |
77 } | 78 } |
78 | 79 |
79 bool compare_exchange(T* expected, const T& desired, | 80 bool compare_exchange(T* expected, const T& desired, |
80 sk_memory_order success = sk_memory_order_seq_cst, | 81 sk_memory_order success = default_memory_order, |
81 sk_memory_order failure = sk_memory_order_seq_cst) { | 82 sk_memory_order failure = default_memory_order) { |
82 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai
lure); | 83 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai
lure); |
83 } | 84 } |
84 private: | 85 private: |
85 T fVal; | 86 T fVal; |
86 }; | 87 }; |
87 | 88 |
88 // IWYU pragma: begin_exports | 89 // IWYU pragma: begin_exports |
89 #if defined(_MSC_VER) | 90 #if defined(_MSC_VER) |
90 #include "../ports/SkAtomics_std.h" | 91 #include "../ports/SkAtomics_std.h" |
91 #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED) | 92 #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED) |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
127 template <typename T> | 128 template <typename T> |
128 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} | 129 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} |
129 | 130 |
130 template <typename T> | 131 template <typename T> |
131 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } | 132 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } |
132 | 133 |
133 inline void sk_membar_acquire__after_atomic_dec() {} | 134 inline void sk_membar_acquire__after_atomic_dec() {} |
134 inline void sk_membar_acquire__after_atomic_conditional_inc() {} | 135 inline void sk_membar_acquire__after_atomic_conditional_inc() {} |
135 | 136 |
136 #endif//SkAtomics_DEFINED | 137 #endif//SkAtomics_DEFINED |
OLD | NEW |