OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef SkAtomics_DEFINED | |
9 #define SkAtomics_DEFINED | |
10 | |
11 // This file is not part of the public Skia API. | |
12 #include "SkTypes.h" | |
13 | |
14 enum sk_memory_order { | |
15 sk_memory_order_relaxed, | |
16 sk_memory_order_consume, | |
17 sk_memory_order_acquire, | |
18 sk_memory_order_release, | |
19 sk_memory_order_acq_rel, | |
20 sk_memory_order_seq_cst, | |
21 }; | |
22 | |
23 template <typename T> | |
24 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); | |
25 | |
26 template <typename T> | |
27 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); | |
28 | |
29 template <typename T> | |
30 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); | |
31 | |
32 template <typename T> | |
33 T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst); | |
34 | |
35 template <typename T> | |
36 bool sk_atomic_compare_exchange(T*, T* expected, T desired, | |
37 sk_memory_order success = sk_memory_order_seq_cs
t, | |
38 sk_memory_order failure = sk_memory_order_seq_cs
t); | |
39 | |
40 template <typename T> | |
41 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); | |
42 | |
43 // A little wrapper class for small T (think, builtins: int, float, void*) to | |
44 // ensure they're always used atomically. This is our stand-in for std::atomic<
T>. | |
45 template <typename T> | |
46 class SkAtomic : SkNoncopyable { | |
47 public: | |
48 SkAtomic() {} | |
49 explicit SkAtomic(const T& val) : fVal(val) {} | |
50 | |
51 // It is essential we return by value rather than by const&. fVal may chang
e at any time. | |
52 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { | |
53 return sk_atomic_load(&fVal, mo); | |
54 } | |
55 | |
56 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | |
57 sk_atomic_store(&fVal, val, mo); | |
58 } | |
59 | |
60 // Alias for .load(sk_memory_order_seq_cst). | |
61 operator T() const { | |
62 return this->load(); | |
63 } | |
64 | |
65 // Alias for .store(v, sk_memory_order_seq_cst). | |
66 T operator=(const T& v) { | |
67 this->store(v); | |
68 return v; | |
69 } | |
70 | |
71 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | |
72 return sk_atomic_fetch_add(&fVal, val, mo); | |
73 } | |
74 | |
75 T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { | |
76 return sk_atomic_fetch_sub(&fVal, val, mo); | |
77 } | |
78 | |
79 bool compare_exchange(T* expected, const T& desired, | |
80 sk_memory_order success = sk_memory_order_seq_cst, | |
81 sk_memory_order failure = sk_memory_order_seq_cst) { | |
82 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai
lure); | |
83 } | |
84 private: | |
85 T fVal; | |
86 }; | |
87 | |
88 // IWYU pragma: begin_exports | |
89 #if defined(_MSC_VER) | |
90 #include "../ports/SkAtomics_std.h" | |
91 #elif !defined(SK_BUILD_FOR_IOS) && defined(__ATOMIC_RELAXED) | |
92 #include "../ports/SkAtomics_atomic.h" | |
93 #else | |
94 #include "../ports/SkAtomics_sync.h" | |
95 #endif | |
96 // IWYU pragma: end_exports | |
97 | |
98 // From here down we have shims for our old atomics API, to be weaned off of. | |
99 // We use the default sequentially-consistent memory order to make things simple | |
100 // and to match the practical reality of our old _sync and _win implementations. | |
101 | |
102 inline int32_t sk_atomic_inc(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, +1); } | |
103 inline int32_t sk_atomic_dec(int32_t* ptr) { return sk_atomic_fetch_a
dd(ptr, -1); } | |
104 inline int32_t sk_atomic_add(int32_t* ptr, int32_t v) { return sk_atomic_fetch_a
dd(ptr, v); } | |
105 | |
106 inline int64_t sk_atomic_inc(int64_t* ptr) { return sk_atomic_fetch_add<int64_t>
(ptr, +1); } | |
107 | |
108 inline bool sk_atomic_cas(int32_t* ptr, int32_t expected, int32_t desired) { | |
109 return sk_atomic_compare_exchange(ptr, &expected, desired); | |
110 } | |
111 | |
112 inline void* sk_atomic_cas(void** ptr, void* expected, void* desired) { | |
113 (void)sk_atomic_compare_exchange(ptr, &expected, desired); | |
114 return expected; | |
115 } | |
116 | |
117 inline int32_t sk_atomic_conditional_inc(int32_t* ptr) { | |
118 int32_t prev = sk_atomic_load(ptr); | |
119 do { | |
120 if (0 == prev) { | |
121 break; | |
122 } | |
123 } while(!sk_atomic_compare_exchange(ptr, &prev, prev+1)); | |
124 return prev; | |
125 } | |
126 | |
127 template <typename T> | |
128 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire);
} | |
129 | |
130 template <typename T> | |
131 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order
_release); } | |
132 | |
133 inline void sk_membar_acquire__after_atomic_dec() {} | |
134 inline void sk_membar_acquire__after_atomic_conditional_inc() {} | |
135 | |
136 #endif//SkAtomics_DEFINED | |
OLD | NEW |