OLD | NEW |
| (Empty) |
1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 // | |
7 // This implementation uses C++11 atomics' member functions. The code base is | |
8 // currently written assuming atomicity revolves around accesses instead of | |
9 // C++11's memory locations. The burden is on the programmer to ensure that all | |
10 // memory locations accessed atomically are never accessed non-atomically (tsan | |
11 // should help with this). | |
12 // | |
13 // TODO(jfb) Modify the atomicops.h API and user code to declare atomic | |
14 // locations as truly atomic. See the static_assert below. | |
15 // | |
16 // Of note in this implementation: | |
17 // * All NoBarrier variants are implemented as relaxed. | |
18 // * All Barrier variants are implemented as sequentially-consistent. | |
19 // * Compare exchange's failure ordering is always the same as the success one | |
20 // (except for release, which fails as relaxed): using a weaker ordering is | |
21 // only valid under certain uses of compare exchange. | |
22 // * Acquire store doesn't exist in the C11 memory model, it is instead | |
23 // implemented as a relaxed store followed by a sequentially consistent | |
24 // fence. | |
25 // * Release load doesn't exist in the C11 memory model, it is instead | |
26 // implemented as sequentially consistent fence followed by a relaxed load. | |
27 // * Atomic increment is expected to return the post-incremented value, whereas | |
28 // C11 fetch add returns the previous value. The implementation therefore | |
29 // needs to increment twice (which the compiler should be able to detect and | |
30 // optimize). | |
31 | |
32 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
33 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
34 | |
35 #include <stdint.h> | |
36 #include <atomic> | |
37 | |
38 typedef int32_t Atomic32; | |
39 #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* | |
40 | |
41 #include "build/build_config.h" | |
42 | |
43 namespace base { | |
44 namespace subtle { | |
45 | |
46 // This implementation is transitional and maintains the original API for | |
47 // atomicops.h. This requires casting memory locations to the atomic types, and | |
48 // assumes that the API and the C++11 implementation are layout-compatible, | |
49 // which isn't true for all implementations or hardware platforms. The static | |
50 // assertion should detect this issue, were it to fire then this header | |
51 // shouldn't be used. | |
52 // | |
53 // TODO(jfb) If this header manages to stay committed then the API should be | |
54 // modified, and all call sites updated. | |
55 typedef volatile std::atomic<Atomic32>* AtomicLocation32; | |
56 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32), | |
57 "incompatible 32-bit atomic layout"); | |
58 | |
59 inline void MemoryBarrier() { | |
60 #if defined(__GLIBCXX__) | |
61 // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but | |
62 // not defined, leading to the linker complaining about undefined references. | |
63 __atomic_thread_fence(std::memory_order_seq_cst); | |
64 #else | |
65 std::atomic_thread_fence(std::memory_order_seq_cst); | |
66 #endif | |
67 } | |
68 | |
69 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
70 Atomic32 old_value, | |
71 Atomic32 new_value) { | |
72 ((AtomicLocation32)ptr) | |
73 ->compare_exchange_strong(old_value, new_value, std::memory_order_relaxed, | |
74 std::memory_order_relaxed); | |
75 return old_value; | |
76 } | |
77 | |
78 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
79 Atomic32 new_value) { | |
80 return ((AtomicLocation32)ptr) | |
81 ->exchange(new_value, std::memory_order_relaxed); | |
82 } | |
83 | |
84 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
85 Atomic32 increment) { | |
86 return increment + ((AtomicLocation32)ptr) | |
87 ->fetch_add(increment, std::memory_order_relaxed); | |
88 } | |
89 | |
90 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
91 Atomic32 increment) { | |
92 return increment + ((AtomicLocation32)ptr)->fetch_add(increment); | |
93 } | |
94 | |
95 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
96 Atomic32 old_value, | |
97 Atomic32 new_value) { | |
98 ((AtomicLocation32)ptr) | |
99 ->compare_exchange_strong(old_value, new_value, std::memory_order_acquire, | |
100 std::memory_order_acquire); | |
101 return old_value; | |
102 } | |
103 | |
104 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
105 Atomic32 old_value, | |
106 Atomic32 new_value) { | |
107 ((AtomicLocation32)ptr) | |
108 ->compare_exchange_strong(old_value, new_value, std::memory_order_release, | |
109 std::memory_order_relaxed); | |
110 return old_value; | |
111 } | |
112 | |
113 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
114 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed); | |
115 } | |
116 | |
117 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
118 ((AtomicLocation32)ptr)->store(value, std::memory_order_relaxed); | |
119 MemoryBarrier(); | |
120 } | |
121 | |
122 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
123 ((AtomicLocation32)ptr)->store(value, std::memory_order_release); | |
124 } | |
125 | |
126 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
127 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed); | |
128 } | |
129 | |
130 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
131 return ((AtomicLocation32)ptr)->load(std::memory_order_acquire); | |
132 } | |
133 | |
134 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
135 MemoryBarrier(); | |
136 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed); | |
137 } | |
138 | |
139 #if defined(BASE_HAS_ATOMIC64) | |
140 typedef int64_t Atomic64; | |
141 | |
142 typedef volatile std::atomic<Atomic64>* AtomicLocation64; | |
143 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64), | |
144 "incompatible 64-bit atomic layout"); | |
145 | |
146 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
147 Atomic64 old_value, | |
148 Atomic64 new_value) { | |
149 ((AtomicLocation64)ptr) | |
150 ->compare_exchange_strong(old_value, new_value, std::memory_order_relaxed, | |
151 std::memory_order_relaxed); | |
152 return old_value; | |
153 } | |
154 | |
155 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
156 Atomic64 new_value) { | |
157 return ((AtomicLocation64)ptr) | |
158 ->exchange(new_value, std::memory_order_relaxed); | |
159 } | |
160 | |
161 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
162 Atomic64 increment) { | |
163 return increment + ((AtomicLocation64)ptr) | |
164 ->fetch_add(increment, std::memory_order_relaxed); | |
165 } | |
166 | |
167 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
168 Atomic64 increment) { | |
169 return increment + ((AtomicLocation64)ptr)->fetch_add(increment); | |
170 } | |
171 | |
172 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
173 Atomic64 old_value, | |
174 Atomic64 new_value) { | |
175 ((AtomicLocation64)ptr) | |
176 ->compare_exchange_strong(old_value, new_value, std::memory_order_acquire, | |
177 std::memory_order_acquire); | |
178 return old_value; | |
179 } | |
180 | |
181 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
182 Atomic64 old_value, | |
183 Atomic64 new_value) { | |
184 ((AtomicLocation64)ptr) | |
185 ->compare_exchange_strong(old_value, new_value, std::memory_order_release, | |
186 std::memory_order_relaxed); | |
187 return old_value; | |
188 } | |
189 | |
190 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
191 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed); | |
192 } | |
193 | |
194 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
195 ((AtomicLocation64)ptr)->store(value, std::memory_order_relaxed); | |
196 MemoryBarrier(); | |
197 } | |
198 | |
199 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
200 ((AtomicLocation64)ptr)->store(value, std::memory_order_release); | |
201 } | |
202 | |
203 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
204 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed); | |
205 } | |
206 | |
207 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
208 return ((AtomicLocation64)ptr)->load(std::memory_order_acquire); | |
209 } | |
210 | |
211 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
212 MemoryBarrier(); | |
213 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed); | |
214 } | |
215 #endif // defined(BASE_HAS_ATOMIC64) | |
216 } // namespace subtle | |
217 } // namespace base | |
218 | |
219 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
OLD | NEW |