OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 // | |
7 // This implementation uses C11 atomics' free functions through C++11's <atomic> | |
8 // header instead of C++11 atomics' types because the code base is currently | |
9 // written assuming atomicity revolves around accesses instead of C++11's memory | |
10 // locations. The burden is on the programmer to ensure that all memory | |
11 // locations accessed atomically are never accessed non-atomically (tsan should | |
12 // help with this). | |
13 // | |
14 // Of note in this implementation: | |
15 // * All NoBarrier variants are implemented as relaxed. | |
16 // * All Barrier variants are implemented as sequentially-consistent. | |
17 // * Compare exchange's failure ordering is always the same as the success one | |
18 // (except for release, which fails as acquire): using a weaker ordering is | |
19 // only valid under certain uses of compare exchange. | |
Dmitry Vyukov
2014/10/07 08:07:04
This is not correct.
failure order acquire is ille
hboehm
2014/10/07 18:57:02
Dmitry raises an interesting question, which shoul
| |
20 // * Acquire store doesn't exist in the C11 memory model, it is instead | |
21 // implemented as a acquire exchange. | |
Dmitry Vyukov
2014/10/07 08:07:04
This is not correct.
When it comes to these weird
hboehm
2014/10/07 18:57:02
I've encountered similar issues in Android. As fa
| |
22 // * Release load doesn't exist in the C11 memory model, it is instead | |
Dmitry Vyukov
2014/10/07 08:07:04
This is not correct.
see comment for acquire store
| |
23 // implemented as a release increment of zero. | |
24 // * Atomic increment is expected to return the post-incremented value, whereas | |
25 // C11 fetch add returns the previous value. The implementation therefore | |
26 // needs to increment twice (which the compiler should be able to detect and | |
Dmitry Vyukov
2014/10/07 08:07:04
how should a compiler optimize it? what code shoul
morisset
2014/10/07 18:27:44
On ARM/Power, it is possible to only do the additi
| |
27 // optimize). | |
28 | |
29 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
30 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
31 | |
32 #include <atomic> | |
33 | |
34 namespace base { | |
35 namespace subtle { | |
36 | |
37 // This implementation is transitional and maintains the original API for | |
38 // atomicops.h. This requires casting memory locations to the atomic types, and | |
39 // assumes that the API and the C++11 implementation are layout-compatible, | |
40 // which isn't true for all implementations or hardware platforms. The static | |
41 // assertion should detect this issue, were it to fire then this header | |
42 // shouldn't be used. | |
43 // | |
44 // TODO(jfb) If this header manages to stay committed then the API should be | |
45 // modified, and all call sites updated. | |
46 typedef volatile std::atomic<Atomic32>* AtomicLocation32; | |
47 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32), | |
48 "incompatible 32-bit atomic layout"); | |
49 | |
50 inline void MemoryBarrier() { | |
51 std::atomic_thread_fence(std::memory_order_seq_cst); | |
52 } | |
53 | |
54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
55 Atomic32 old_value, | |
56 Atomic32 new_value) { | |
57 auto expected = old_value; | |
58 std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, | |
59 &expected, | |
60 new_value, | |
61 std::memory_order_relaxed, | |
62 std::memory_order_relaxed); | |
63 return expected; | |
64 } | |
65 | |
66 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
67 Atomic32 new_value) { | |
68 return std::atomic_exchange_explicit( | |
69 (AtomicLocation32)ptr, new_value, std::memory_order_relaxed); | |
70 } | |
71 | |
72 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
73 Atomic32 increment) { | |
74 return increment + std::atomic_fetch_add_explicit((AtomicLocation32)ptr, | |
75 increment, | |
76 std::memory_order_relaxed); | |
77 } | |
78 | |
79 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
80 Atomic32 increment) { | |
81 return increment + std::atomic_fetch_add((AtomicLocation32)ptr, increment); | |
82 } | |
83 | |
84 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
85 Atomic32 old_value, | |
86 Atomic32 new_value) { | |
87 auto expected = old_value; | |
88 std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, | |
89 &expected, | |
90 new_value, | |
91 std::memory_order_acquire, | |
92 std::memory_order_acquire); | |
93 return expected; | |
94 } | |
95 | |
96 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
97 Atomic32 old_value, | |
98 Atomic32 new_value) { | |
99 auto expected = old_value; | |
100 std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, | |
101 &expected, | |
102 new_value, | |
103 std::memory_order_release, | |
104 std::memory_order_acquire); | |
105 return expected; | |
106 } | |
107 | |
108 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
109 std::atomic_store_explicit( | |
110 (AtomicLocation32)ptr, value, std::memory_order_relaxed); | |
111 } | |
112 | |
113 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
114 (void)std::atomic_exchange_explicit( | |
115 (AtomicLocation32)ptr, value, std::memory_order_acquire); | |
116 } | |
117 | |
118 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
119 std::atomic_store_explicit( | |
120 (AtomicLocation32)ptr, value, std::memory_order_release); | |
121 } | |
122 | |
123 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
124 return std::atomic_load_explicit((AtomicLocation32)ptr, | |
125 std::memory_order_relaxed); | |
126 } | |
127 | |
128 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
129 return std::atomic_load_explicit((AtomicLocation32)ptr, | |
130 std::memory_order_acquire); | |
131 } | |
132 | |
133 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
134 return std::atomic_fetch_add_explicit( | |
135 (AtomicLocation32)ptr, (Atomic32)0, std::memory_order_release); | |
136 } | |
137 | |
138 #ifdef ARCH_CPU_64_BITS | |
139 // 64-bit versions of the operations. | |
140 // See the 32-bit versions for comments. | |
141 | |
142 typedef volatile std::atomic<Atomic64>* AtomicLocation64; | |
143 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64), | |
144 "incompatible 64-bit atomic layout"); | |
145 | |
146 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
147 Atomic64 old_value, | |
148 Atomic64 new_value) { | |
149 auto expected = old_value; | |
150 std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, | |
151 &expected, | |
152 new_value, | |
153 std::memory_order_relaxed, | |
154 std::memory_order_relaxed); | |
155 return expected; | |
156 } | |
157 | |
158 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
159 Atomic64 new_value) { | |
160 return std::atomic_exchange_explicit( | |
161 (AtomicLocation64)ptr, new_value, std::memory_order_relaxed); | |
162 } | |
163 | |
164 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
165 Atomic64 increment) { | |
166 return increment + std::atomic_fetch_add_explicit((AtomicLocation64)ptr, | |
167 increment, | |
168 std::memory_order_relaxed); | |
169 } | |
170 | |
171 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
172 Atomic64 increment) { | |
173 return increment + std::atomic_fetch_add((AtomicLocation64)ptr, increment); | |
174 } | |
175 | |
176 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
177 Atomic64 old_value, | |
178 Atomic64 new_value) { | |
179 auto expected = old_value; | |
180 std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, | |
181 &expected, | |
182 new_value, | |
183 std::memory_order_acquire, | |
184 std::memory_order_acquire); | |
185 return expected; | |
186 } | |
187 | |
188 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
189 Atomic64 old_value, | |
190 Atomic64 new_value) { | |
191 auto expected = old_value; | |
192 std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, | |
193 &expected, | |
194 new_value, | |
195 std::memory_order_release, | |
196 std::memory_order_acquire); | |
197 return expected; | |
198 } | |
199 | |
200 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
201 std::atomic_store_explicit( | |
202 (AtomicLocation64)ptr, value, std::memory_order_relaxed); | |
203 } | |
204 | |
205 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
206 (void)std::atomic_exchange_explicit( | |
207 (AtomicLocation64)ptr, value, std::memory_order_acquire); | |
208 } | |
209 | |
210 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
211 std::atomic_store_explicit( | |
212 (AtomicLocation64)ptr, value, std::memory_order_release); | |
213 } | |
214 | |
215 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
216 return std::atomic_load_explicit((AtomicLocation64)ptr, | |
217 std::memory_order_relaxed); | |
218 } | |
219 | |
220 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
221 return std::atomic_load_explicit((AtomicLocation64)ptr, | |
222 std::memory_order_acquire); | |
223 } | |
224 | |
225 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
226 return std::atomic_fetch_add_explicit( | |
227 (AtomicLocation64)ptr, (Atomic64)0, std::memory_order_release); | |
228 } | |
229 | |
230 #endif // ARCH_CPU_64_BITS | |
231 } | |
232 } // namespace base::subtle | |
233 | |
234 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
OLD | NEW |