OLD | NEW |
| (Empty) |
1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 // | |
7 // This implementation uses C++11 atomics' member functions. The code base is | |
8 // currently written assuming atomicity revolves around accesses instead of | |
9 // C++11's memory locations. The burden is on the programmer to ensure that all | |
10 // memory locations accessed atomically are never accessed non-atomically (tsan | |
11 // should help with this). | |
12 // | |
13 // TODO(jfb) Modify the atomicops.h API and user code to declare atomic | |
14 // locations as truly atomic. See the static_assert below. | |
15 // | |
16 // Of note in this implementation: | |
17 // * All NoBarrier variants are implemented as relaxed. | |
18 // * All Barrier variants are implemented as sequentially-consistent. | |
19 // * Compare exchange's failure ordering is always the same as the success one | |
20 // (except for release, which fails as relaxed): using a weaker ordering is | |
21 // only valid under certain uses of compare exchange. | |
22 // * Acquire store doesn't exist in the C11 memory model, it is instead | |
23 // implemented as a relaxed store followed by a sequentially consistent | |
24 // fence. | |
25 // * Release load doesn't exist in the C11 memory model, it is instead | |
26 // implemented as sequentially consistent fence followed by a relaxed load. | |
27 // * Atomic increment is expected to return the post-incremented value, whereas | |
28 // C11 fetch add returns the previous value. The implementation therefore | |
29 // needs to increment twice (which the compiler should be able to detect and | |
30 // optimize). | |
31 | |
32 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
33 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
34 | |
35 #include <atomic> | |
36 #include <stdint.h> | |
37 | |
38 typedef int32_t Atomic32; | |
39 #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* | |
40 | |
41 namespace base { | |
42 namespace subtle { | |
43 | |
44 // This implementation is transitional and maintains the original API for | |
45 // atomicops.h. This requires casting memory locations to the atomic types, and | |
46 // assumes that the API and the C++11 implementation are layout-compatible, | |
47 // which isn't true for all implementations or hardware platforms. The static | |
48 // assertion should detect this issue, were it to fire then this header | |
49 // shouldn't be used. | |
50 // | |
51 // TODO(jfb) If this header manages to stay committed then the API should be | |
52 // modified, and all call sites updated. | |
53 typedef volatile std::atomic<Atomic32>* AtomicLocation32; | |
54 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32), | |
55 "incompatible 32-bit atomic layout"); | |
56 | |
57 inline void MemoryBarrier() { | |
58 #if defined(__GLIBCXX__) | |
59 // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but | |
60 // not defined, leading to the linker complaining about undefined references. | |
61 __atomic_thread_fence(std::memory_order_seq_cst); | |
62 #else | |
63 std::atomic_thread_fence(std::memory_order_seq_cst); | |
64 #endif | |
65 } | |
66 | |
67 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
68 Atomic32 old_value, | |
69 Atomic32 new_value) { | |
70 ((AtomicLocation32)ptr) | |
71 ->compare_exchange_strong(old_value, | |
72 new_value, | |
73 std::memory_order_relaxed, | |
74 std::memory_order_relaxed); | |
75 return old_value; | |
76 } | |
77 | |
78 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
79 Atomic32 new_value) { | |
80 return ((AtomicLocation32)ptr) | |
81 ->exchange(new_value, std::memory_order_relaxed); | |
82 } | |
83 | |
84 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
85 Atomic32 increment) { | |
86 return increment + | |
87 ((AtomicLocation32)ptr) | |
88 ->fetch_add(increment, std::memory_order_relaxed); | |
89 } | |
90 | |
91 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
92 Atomic32 increment) { | |
93 return increment + ((AtomicLocation32)ptr)->fetch_add(increment); | |
94 } | |
95 | |
96 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
97 Atomic32 old_value, | |
98 Atomic32 new_value) { | |
99 ((AtomicLocation32)ptr) | |
100 ->compare_exchange_strong(old_value, | |
101 new_value, | |
102 std::memory_order_acquire, | |
103 std::memory_order_acquire); | |
104 return old_value; | |
105 } | |
106 | |
107 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
108 Atomic32 old_value, | |
109 Atomic32 new_value) { | |
110 ((AtomicLocation32)ptr) | |
111 ->compare_exchange_strong(old_value, | |
112 new_value, | |
113 std::memory_order_release, | |
114 std::memory_order_relaxed); | |
115 return old_value; | |
116 } | |
117 | |
118 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
119 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed); | |
120 } | |
121 | |
122 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
123 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed); | |
124 MemoryBarrier(); | |
125 } | |
126 | |
127 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
128 ((AtomicLocation32)ptr)->store(value, std::memory_order_release); | |
129 } | |
130 | |
131 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
132 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed); | |
133 } | |
134 | |
135 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
136 return ((AtomicLocation32)ptr)->load(std::memory_order_acquire); | |
137 } | |
138 | |
139 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
140 MemoryBarrier(); | |
141 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed); | |
142 } | |
143 | |
144 #if defined(BASE_HAS_ATOMIC64) | |
145 typedef int64_t Atomic64; | |
146 | |
147 typedef volatile std::atomic<Atomic64>* AtomicLocation64; | |
148 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64), | |
149 "incompatible 64-bit atomic layout"); | |
150 | |
151 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
152 Atomic64 old_value, | |
153 Atomic64 new_value) { | |
154 ((AtomicLocation64)ptr) | |
155 ->compare_exchange_strong(old_value, | |
156 new_value, | |
157 std::memory_order_relaxed, | |
158 std::memory_order_relaxed); | |
159 return old_value; | |
160 } | |
161 | |
162 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
163 Atomic64 new_value) { | |
164 return ((AtomicLocation64)ptr) | |
165 ->exchange(new_value, std::memory_order_relaxed); | |
166 } | |
167 | |
168 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
169 Atomic64 increment) { | |
170 return increment + | |
171 ((AtomicLocation64)ptr) | |
172 ->fetch_add(increment, std::memory_order_relaxed); | |
173 } | |
174 | |
175 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
176 Atomic64 increment) { | |
177 return increment + ((AtomicLocation64)ptr)->fetch_add(increment); | |
178 } | |
179 | |
180 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
181 Atomic64 old_value, | |
182 Atomic64 new_value) { | |
183 ((AtomicLocation64)ptr) | |
184 ->compare_exchange_strong(old_value, | |
185 new_value, | |
186 std::memory_order_acquire, | |
187 std::memory_order_acquire); | |
188 return old_value; | |
189 } | |
190 | |
191 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
192 Atomic64 old_value, | |
193 Atomic64 new_value) { | |
194 ((AtomicLocation64)ptr) | |
195 ->compare_exchange_strong(old_value, | |
196 new_value, | |
197 std::memory_order_release, | |
198 std::memory_order_relaxed); | |
199 return old_value; | |
200 } | |
201 | |
202 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
203 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed); | |
204 } | |
205 | |
206 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
207 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed); | |
208 MemoryBarrier(); | |
209 } | |
210 | |
211 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
212 ((AtomicLocation64)ptr)->store(value, std::memory_order_release); | |
213 } | |
214 | |
215 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
216 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed); | |
217 } | |
218 | |
219 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
220 return ((AtomicLocation64)ptr)->load(std::memory_order_acquire); | |
221 } | |
222 | |
223 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
224 MemoryBarrier(); | |
225 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed); | |
226 } | |
227 #endif // defined(BASE_HAS_ATOMIC64) | |
228 } // namespace subtle | |
229 } // namespace base | |
230 | |
231 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
OLD | NEW |