OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkAtomics_DEFINED | 8 #ifndef SkAtomics_DEFINED |
9 #define SkAtomics_DEFINED | 9 #define SkAtomics_DEFINED |
10 | 10 |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
50 | 50 |
51 // It is essential we return by value rather than by const&. fVal may chang e at any time. | 51 // It is essential we return by value rather than by const&. fVal may chang e at any time. |
52 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { | 52 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { |
53 return sk_atomic_load(&fVal, mo); | 53 return sk_atomic_load(&fVal, mo); |
54 } | 54 } |
55 | 55 |
56 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 56 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
57 sk_atomic_store(&fVal, val, mo); | 57 sk_atomic_store(&fVal, val, mo); |
58 } | 58 } |
59 | 59 |
60 // Alias for .load(sk_memory_order_seq_cst). | |
61 operator T() const { | |
62 return load(); | |
mtklein
2015/09/28 15:10:18
this->
| |
63 } | |
64 | |
65 // Alias for .store(v, sk_memory_order_seq_cst). | |
66 void operator=(const T& v) { | |
67 store(v); | |
mtklein
2015/09/28 15:10:18
this->
| |
68 } | |
69 | |
60 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 70 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
61 return sk_atomic_fetch_add(&fVal, val, mo); | 71 return sk_atomic_fetch_add(&fVal, val, mo); |
62 } | 72 } |
63 | 73 |
64 T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 74 T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
65 return sk_atomic_fetch_sub(&fVal, val, mo); | 75 return sk_atomic_fetch_sub(&fVal, val, mo); |
66 } | 76 } |
67 | 77 |
68 bool compare_exchange(T* expected, const T& desired, | 78 bool compare_exchange(T* expected, const T& desired, |
69 sk_memory_order success = sk_memory_order_seq_cst, | 79 sk_memory_order success = sk_memory_order_seq_cst, |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
116 template <typename T> | 126 template <typename T> |
117 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); } | 127 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); } |
118 | 128 |
119 template <typename T> | 129 template <typename T> |
120 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order _release); } | 130 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order _release); } |
121 | 131 |
122 inline void sk_membar_acquire__after_atomic_dec() {} | 132 inline void sk_membar_acquire__after_atomic_dec() {} |
123 inline void sk_membar_acquire__after_atomic_conditional_inc() {} | 133 inline void sk_membar_acquire__after_atomic_conditional_inc() {} |
124 | 134 |
125 #endif//SkAtomics_DEFINED | 135 #endif//SkAtomics_DEFINED |
OLD | NEW |