OLD | NEW |
---|---|
(Empty) | |
1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 // | |
7 // This implementation uses C++11 atomics' member functions. The code base is | |
8 // currently written assuming atomicity revolves around accesses instead of | |
9 // C++11's memory locations. The burden is on the programmer to ensure that all | |
10 // memory locations accessed atomically are never accessed non-atomically (tsan | |
11 // should help with this). | |
12 // | |
13 // TODO(jfb) Modify the atomicops.h API and user code to declare atomic | |
14 // locations as truly atomic. See the static_assert below. | |
15 // | |
16 // Of note in this implementation: | |
17 // * All NoBarrier variants are implemented as relaxed. | |
18 // * All Barrier variants are implemented as sequentially-consistent. | |
19 // * Compare exchange's failure ordering is always the same as the success one | |
20 // (except for release, which fails as relaxed): using a weaker ordering is | |
21 // only valid under certain uses of compare exchange. | |
22 // * Acquire store doesn't exist in the C11 memory model, it is instead | |
23 // implemented as a relaxed store followed by a sequentially consistent | |
24 // fence. | |
25 // * Release load doesn't exist in the C11 memory model, it is instead | |
26 // implemented as sequentially consistent fence followed by a relaxed load. | |
27 // * Atomic increment is expected to return the post-incremented value, whereas | |
28 // C11 fetch add returns the previous value. The implementation therefore | |
29 // needs to increment twice (which the compiler should be able to detect and | |
30 // optimize). | |
31 | |
32 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
33 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
34 | |
35 #include <atomic> | |
36 | |
37 namespace base { | |
38 namespace subtle { | |
39 | |
40 // This implementation is transitional and maintains the original API for | |
41 // atomicops.h. This requires casting memory locations to the atomic types, and | |
42 // assumes that the API and the C++11 implementation are layout-compatible, | |
43 // which isn't true for all implementations or hardware platforms. The static | |
44 // assertion should detect this issue, were it to fire then this header | |
45 // shouldn't be used. | |
46 // | |
47 // TODO(jfb) If this header manages to stay committed then the API should be | |
48 // modified, and all call sites updated. | |
49 typedef volatile std::atomic<Atomic32>* AtomicLocation32; | |
50 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32), | |
51 "incompatible 32-bit atomic layout"); | |
52 | |
53 inline void MemoryBarrier() { | |
54 std::atomic_thread_fence(std::memory_order_seq_cst); | |
55 } | |
56 | |
57 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
58 Atomic32 old_value, | |
59 Atomic32 new_value) { | |
60 ((AtomicLocation32)ptr) | |
61 ->compare_exchange_strong(old_value, | |
Alexander Potapenko
2014/10/08 17:39:36
That's a strange way to wrap "->". Did clang-forma
| |
62 new_value, | |
63 std::memory_order_relaxed, | |
64 std::memory_order_relaxed); | |
65 return old_value; | |
66 } | |
67 | |
68 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
69 Atomic32 new_value) { | |
70 return ((AtomicLocation32)ptr) | |
71 ->exchange(new_value, std::memory_order_relaxed); | |
72 } | |
73 | |
74 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
75 Atomic32 increment) { | |
76 return increment + | |
77 ((AtomicLocation32)ptr) | |
78 ->fetch_add(increment, std::memory_order_relaxed); | |
79 } | |
80 | |
81 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
82 Atomic32 increment) { | |
83 return increment + ((AtomicLocation32)ptr)->fetch_add(increment); | |
84 } | |
85 | |
86 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
87 Atomic32 old_value, | |
88 Atomic32 new_value) { | |
89 ((AtomicLocation32)ptr) | |
90 ->compare_exchange_strong(old_value, | |
91 new_value, | |
92 std::memory_order_acquire, | |
93 std::memory_order_acquire); | |
94 return old_value; | |
95 } | |
96 | |
97 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
98 Atomic32 old_value, | |
99 Atomic32 new_value) { | |
100 ((AtomicLocation32)ptr) | |
101 ->compare_exchange_strong(old_value, | |
102 new_value, | |
103 std::memory_order_release, | |
104 std::memory_order_relaxed); | |
105 return old_value; | |
106 } | |
107 | |
108 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
109 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed); | |
110 } | |
111 | |
112 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
113 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed); | |
114 std::atomic_thread_fence(std::memory_order_seq_cst); | |
115 } | |
116 | |
117 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
118 ((AtomicLocation32)ptr)->store(value, std::memory_order_release); | |
119 } | |
120 | |
121 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
122 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed); | |
123 } | |
124 | |
125 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
126 return ((AtomicLocation32)ptr)->load(std::memory_order_acquire); | |
127 } | |
128 | |
129 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
130 std::atomic_thread_fence(std::memory_order_seq_cst); | |
131 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed); | |
132 } | |
133 | |
134 #ifdef ARCH_CPU_64_BITS | |
135 | |
136 typedef volatile std::atomic<Atomic64>* AtomicLocation64; | |
137 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64), | |
138 "incompatible 64-bit atomic layout"); | |
139 | |
140 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
141 Atomic64 old_value, | |
142 Atomic64 new_value) { | |
143 ((AtomicLocation64)ptr) | |
144 ->compare_exchange_strong(old_value, | |
145 new_value, | |
146 std::memory_order_relaxed, | |
147 std::memory_order_relaxed); | |
148 return old_value; | |
149 } | |
150 | |
151 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
152 Atomic64 new_value) { | |
153 return ((AtomicLocation64)ptr) | |
154 ->exchange(new_value, std::memory_order_relaxed); | |
155 } | |
156 | |
157 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
158 Atomic64 increment) { | |
159 return increment + | |
160 ((AtomicLocation64)ptr) | |
161 ->fetch_add(increment, std::memory_order_relaxed); | |
162 } | |
163 | |
164 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
165 Atomic64 increment) { | |
166 return increment + ((AtomicLocation64)ptr)->fetch_add(increment); | |
167 } | |
168 | |
169 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
170 Atomic64 old_value, | |
171 Atomic64 new_value) { | |
172 ((AtomicLocation64)ptr) | |
173 ->compare_exchange_strong(old_value, | |
174 new_value, | |
175 std::memory_order_acquire, | |
176 std::memory_order_acquire); | |
177 return old_value; | |
178 } | |
179 | |
180 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
181 Atomic64 old_value, | |
182 Atomic64 new_value) { | |
183 ((AtomicLocation64)ptr) | |
184 ->compare_exchange_strong(old_value, | |
185 new_value, | |
186 std::memory_order_release, | |
187 std::memory_order_relaxed); | |
188 return old_value; | |
189 } | |
190 | |
191 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
192 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed); | |
193 } | |
194 | |
195 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
196 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed); | |
197 std::atomic_thread_fence(std::memory_order_seq_cst); | |
198 } | |
199 | |
200 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
201 ((AtomicLocation64)ptr)->store(value, std::memory_order_release); | |
202 } | |
203 | |
204 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
205 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed); | |
206 } | |
207 | |
208 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
209 return ((AtomicLocation64)ptr)->load(std::memory_order_acquire); | |
210 } | |
211 | |
212 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
213 std::atomic_thread_fence(std::memory_order_seq_cst); | |
214 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed); | |
215 } | |
216 | |
217 #endif // ARCH_CPU_64_BITS | |
218 } | |
219 } // namespace base::subtle | |
220 | |
221 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
OLD | NEW |