Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright 2015 Google Inc. | 2 * Copyright 2015 Google Inc. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
| 5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
| 6 */ | 6 */ |
| 7 | 7 |
| 8 #ifndef SkAtomics_DEFINED | 8 #ifndef SkAtomics_DEFINED |
| 9 #define SkAtomics_DEFINED | 9 #define SkAtomics_DEFINED |
| 10 | 10 |
| (...skipping 10 matching lines...) Expand all Loading... | |
| 21 }; | 21 }; |
| 22 | 22 |
| 23 template <typename T> | 23 template <typename T> |
| 24 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); | 24 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); |
| 25 | 25 |
| 26 template <typename T> | 26 template <typename T> |
| 27 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); | 27 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 28 | 28 |
| 29 template <typename T> | 29 template <typename T> |
| 30 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); | 30 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 31 | 31 |
|
mtklein_C
2015/09/03 21:16:08
Can you declare sk_atomic_fetch_sub() here for doc
herb_g
2015/09/04 17:26:12
Done.
| |
| 32 template <typename T> | 32 template <typename T> |
| 33 bool sk_atomic_compare_exchange(T*, T* expected, T desired, | 33 bool sk_atomic_compare_exchange(T*, T* expected, T desired, |
| 34 sk_memory_order success = sk_memory_order_seq_cs t, | 34 sk_memory_order success = sk_memory_order_seq_cs t, |
| 35 sk_memory_order failure = sk_memory_order_seq_cs t); | 35 sk_memory_order failure = sk_memory_order_seq_cs t); |
| 36 | 36 |
| 37 template <typename T> | 37 template <typename T> |
| 38 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); | 38 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); |
| 39 | 39 |
| 40 // A little wrapper class for small T (think, builtins: int, float, void*) to | 40 // A little wrapper class for small T (think, builtins: int, float, void*) to |
| 41 // ensure they're always used atomically. This is our stand-in for std::atomic< T>. | 41 // ensure they're always used atomically. This is our stand-in for std::atomic< T>. |
| 42 template <typename T> | 42 template <typename T> |
| 43 class SkAtomic : SkNoncopyable { | 43 class SkAtomic : SkNoncopyable { |
| 44 public: | 44 public: |
| 45 SkAtomic() {} | 45 SkAtomic() {} |
| 46 explicit SkAtomic(const T& val) : fVal(val) {} | 46 explicit SkAtomic(const T& val) : fVal(val) {} |
| 47 | 47 |
| 48 // It is essential we return by value rather than by const&. fVal may chang e at any time. | 48 // It is essential we return by value rather than by const&. fVal may chang e at any time. |
| 49 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { | 49 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { |
| 50 return sk_atomic_load(&fVal, mo); | 50 return sk_atomic_load(&fVal, mo); |
| 51 } | 51 } |
| 52 | 52 |
| 53 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 53 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
| 54 sk_atomic_store(&fVal, val, mo); | 54 sk_atomic_store(&fVal, val, mo); |
| 55 } | 55 } |
| 56 | 56 |
| 57 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | 57 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { |
| 58 return sk_atomic_fetch_add(&fVal, val, mo); | 58 return sk_atomic_fetch_add(&fVal, val, mo); |
| 59 } | 59 } |
| 60 | 60 |
| 61 T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | |
| 62 return sk_atomic_fetch_sub(&fVal, val, mo); | |
| 63 } | |
| 64 | |
| 61 bool compare_exchange(T* expected, const T& desired, | 65 bool compare_exchange(T* expected, const T& desired, |
| 62 sk_memory_order success = sk_memory_order_seq_cst, | 66 sk_memory_order success = sk_memory_order_seq_cst, |
| 63 sk_memory_order failure = sk_memory_order_seq_cst) { | 67 sk_memory_order failure = sk_memory_order_seq_cst) { |
| 64 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai lure); | 68 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai lure); |
| 65 } | 69 } |
| 66 private: | 70 private: |
| 67 T fVal; | 71 T fVal; |
| 68 }; | 72 }; |
| 69 | 73 |
| 70 // IWYU pragma: begin_exports | 74 // IWYU pragma: begin_exports |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 109 template <typename T> | 113 template <typename T> |
| 110 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); } | 114 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); } |
| 111 | 115 |
| 112 template <typename T> | 116 template <typename T> |
| 113 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order _release); } | 117 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order _release); } |
| 114 | 118 |
| 115 inline void sk_membar_acquire__after_atomic_dec() {} | 119 inline void sk_membar_acquire__after_atomic_dec() {} |
| 116 inline void sk_membar_acquire__after_atomic_conditional_inc() {} | 120 inline void sk_membar_acquire__after_atomic_conditional_inc() {} |
| 117 | 121 |
| 118 #endif//SkAtomics_DEFINED | 122 #endif//SkAtomics_DEFINED |
| OLD | NEW |