OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 // | |
7 // This implementation uses C++11 atomics' member functions. The code base is | |
8 // currently written assuming atomicity revolves around accesses instead of | |
9 // C++11's memory locations. The burden is on the programmer to ensure that all | |
10 // memory locations accessed atomically are never accessed non-atomically (tsan | |
11 // should help with this). | |
12 // | |
13 // TODO(jfb) Modify the atomicops.h API and user code to declare atomic | |
14 // locations as truly atomic. See the static_assert below. | |
15 // | |
16 // Of note in this implementation: | |
17 // * All NoBarrier variants are implemented as relaxed. | |
18 // * All Barrier variants are implemented as sequentially-consistent. | |
19 // * Compare exchange's failure ordering is always the same as the success one | |
20 // (except for release, which fails as relaxed): using a weaker ordering is | |
21 // only valid under certain uses of compare exchange. | |
22 // * Acquire store doesn't exist in the C11 memory model, it is instead | |
23 // implemented as a relaxed store followed by a sequentially consistent | |
24 // fence. | |
25 // * Release load doesn't exist in the C11 memory model, it is instead | |
26 // implemented as sequentially consistent fence followed by a relaxed load. | |
Jarin
2016/10/20 11:03:39
We do not use acquire-store and release-load. How
Hannes Payer (out of office)
2016/10/20 17:48:01
Done.
| |
27 // * Atomic increment is expected to return the post-incremented value, whereas | |
28 // C11 fetch add returns the previous value. The implementation therefore | |
29 // needs to increment twice (which the compiler should be able to detect and | |
30 // optimize). | |
31 | |
32 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
33 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
34 | |
35 #include <atomic> | |
36 | |
37 #include "src/base/build_config.h" | |
38 | |
39 namespace v8 { | |
40 namespace base { | |
41 | |
42 // This implementation is transitional and maintains the original API for | |
43 // atomicops.h. This requires casting memory locations to the atomic types, and | |
44 // assumes that the API and the C++11 implementation are layout-compatible, | |
45 // which isn't true for all implementations or hardware platforms. The static | |
46 // assertion should detect this issue, were it to fire then this header | |
Michael Lippautz
2016/10/20 10:28:24
Last sentences of this paragraph do not apply for
Michael Lippautz
2016/10/20 12:30:15
+1 on removing the weirdos.
Hannes Payer (out of office)
2016/10/20 17:48:01
Done.
| |
47 // shouldn't be used. | |
48 // | |
49 | |
50 inline void MemoryBarrier() { | |
51 #if defined(__GLIBCXX__) | |
52 // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but | |
53 // not defined, leading to the linker complaining about undefined references. | |
54 __atomic_thread_fence(std::memory_order_seq_cst); | |
55 #else | |
56 std::atomic_thread_fence(std::memory_order_seq_cst); | |
57 #endif | |
58 } | |
59 | |
60 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
61 Atomic32 old_value, | |
62 Atomic32 new_value) { | |
63 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | |
64 __ATOMIC_RELAXED, __ATOMIC_RELAXED); | |
65 return old_value; | |
66 } | |
67 | |
68 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
69 Atomic32 new_value) { | |
70 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); | |
71 } | |
72 | |
73 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
74 Atomic32 increment) { | |
75 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); | |
76 } | |
77 | |
78 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
79 Atomic32 increment) { | |
80 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); | |
81 } | |
82 | |
83 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
84 Atomic32 old_value, Atomic32 new_value) { | |
85 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | |
86 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); | |
87 return old_value; | |
88 } | |
89 | |
90 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
91 Atomic32 old_value, Atomic32 new_value) { | |
92 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | |
93 __ATOMIC_RELEASE, __ATOMIC_RELAXED); | |
94 return old_value; | |
95 } | |
96 | |
97 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
98 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | |
99 } | |
100 | |
101 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
102 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | |
103 } | |
104 | |
105 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
106 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | |
107 MemoryBarrier(); | |
108 } | |
109 | |
110 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
111 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); | |
Jarin
2016/10/20 11:03:39
This actually makes me a bit worried. Are we sure
Hannes Payer (out of office)
2016/10/20 17:48:01
As discussed offline, we will look at performance
| |
112 } | |
113 | |
114 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | |
115 return __atomic_load_n(ptr, __ATOMIC_RELAXED); | |
116 } | |
117 | |
118 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
119 return __atomic_load_n(ptr, __ATOMIC_RELAXED); | |
120 } | |
121 | |
122 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
123 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); | |
124 } | |
125 | |
126 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
127 MemoryBarrier(); | |
128 return __atomic_load_n(ptr, __ATOMIC_RELAXED); | |
129 } | |
130 | |
131 #if defined(V8_HOST_ARCH_64_BIT) | |
132 | |
133 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
134 Atomic64 old_value, | |
135 Atomic64 new_value) { | |
136 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | |
137 __ATOMIC_RELAXED, __ATOMIC_RELAXED); | |
138 return old_value; | |
139 } | |
140 | |
141 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
142 Atomic64 new_value) { | |
143 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); | |
144 } | |
145 | |
146 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
147 Atomic64 increment) { | |
148 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); | |
149 } | |
150 | |
151 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
152 Atomic64 increment) { | |
153 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); | |
154 } | |
155 | |
156 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
157 Atomic64 old_value, Atomic64 new_value) { | |
158 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | |
159 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); | |
160 return old_value; | |
161 } | |
162 | |
163 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
164 Atomic64 old_value, Atomic64 new_value) { | |
165 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | |
166 __ATOMIC_RELEASE, __ATOMIC_RELEASE); | |
167 return old_value; | |
168 } | |
169 | |
170 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
171 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | |
172 } | |
173 | |
174 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
175 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | |
176 MemoryBarrier(); | |
177 } | |
178 | |
179 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
180 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); | |
181 } | |
182 | |
183 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
184 return __atomic_load_n(ptr, __ATOMIC_RELAXED); | |
185 } | |
186 | |
187 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
188 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); | |
189 } | |
190 | |
191 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
192 MemoryBarrier(); | |
193 return __atomic_load_n(ptr, __ATOMIC_RELAXED); | |
194 } | |
195 | |
196 #endif // defined(V8_HOST_ARCH_64_BIT) | |
197 } // namespace base | |
198 } // namespace v8 | |
199 | |
200 #endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
OLD | NEW |