OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 | |
6 // This file is an internal atomic implementation for compiler-based | |
7 // ThreadSanitizer. Use base/atomicops.h instead. | |
8 | |
9 #ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_ | |
10 #define V8_ATOMICOPS_INTERNALS_TSAN_H_ | |
11 | |
12 namespace v8 { | |
13 namespace internal { | |
14 | |
15 #ifndef TSAN_INTERFACE_ATOMIC_H | |
16 #define TSAN_INTERFACE_ATOMIC_H | |
17 | |
18 // This struct is not part of the public API of this module; clients may not | |
19 // use it. (However, it's exported via BASE_EXPORT because clients implicitly | |
20 // do use it at link time by inlining these functions.) | |
21 // Features of this x86. Values may not be correct before main() is run, | |
22 // but are set conservatively. | |
23 struct AtomicOps_x86CPUFeatureStruct { | |
24 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence | |
25 // after acquire compare-and-swap. | |
26 bool has_sse2; // Processor has SSE2. | |
27 }; | |
28 extern struct AtomicOps_x86CPUFeatureStruct | |
29 AtomicOps_Internalx86CPUFeatures; | |
30 | |
31 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | |
32 | |
33 extern "C" { | |
34 typedef char __tsan_atomic8; | |
35 typedef short __tsan_atomic16; // NOLINT | |
36 typedef int __tsan_atomic32; | |
37 typedef long __tsan_atomic64; // NOLINT | |
38 | |
39 #if defined(__SIZEOF_INT128__) \ | |
40 || (__clang_major__ * 100 + __clang_minor__ >= 302) | |
41 typedef __int128 __tsan_atomic128; | |
42 #define __TSAN_HAS_INT128 1 | |
43 #else | |
44 typedef char __tsan_atomic128; | |
45 #define __TSAN_HAS_INT128 0 | |
46 #endif | |
47 | |
48 typedef enum { | |
49 __tsan_memory_order_relaxed, | |
50 __tsan_memory_order_consume, | |
51 __tsan_memory_order_acquire, | |
52 __tsan_memory_order_release, | |
53 __tsan_memory_order_acq_rel, | |
54 __tsan_memory_order_seq_cst, | |
55 } __tsan_memory_order; | |
56 | |
57 __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a, | |
58 __tsan_memory_order mo); | |
59 __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a, | |
60 __tsan_memory_order mo); | |
61 __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a, | |
62 __tsan_memory_order mo); | |
63 __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a, | |
64 __tsan_memory_order mo); | |
65 __tsan_atomic128 __tsan_atomic128_load(const volatile __tsan_atomic128* a, | |
66 __tsan_memory_order mo); | |
67 | |
68 void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v, | |
69 __tsan_memory_order mo); | |
70 void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v, | |
71 __tsan_memory_order mo); | |
72 void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v, | |
73 __tsan_memory_order mo); | |
74 void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v, | |
75 __tsan_memory_order mo); | |
76 void __tsan_atomic128_store(volatile __tsan_atomic128* a, __tsan_atomic128 v, | |
77 __tsan_memory_order mo); | |
78 | |
79 __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a, | |
80 __tsan_atomic8 v, __tsan_memory_order mo); | |
81 __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a, | |
82 __tsan_atomic16 v, __tsan_memory_order mo); | |
83 __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a, | |
84 __tsan_atomic32 v, __tsan_memory_order mo); | |
85 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a, | |
86 __tsan_atomic64 v, __tsan_memory_order mo); | |
87 __tsan_atomic128 __tsan_atomic128_exchange(volatile __tsan_atomic128* a, | |
88 __tsan_atomic128 v, __tsan_memory_order mo); | |
89 | |
90 __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a, | |
91 __tsan_atomic8 v, __tsan_memory_order mo); | |
92 __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a, | |
93 __tsan_atomic16 v, __tsan_memory_order mo); | |
94 __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a, | |
95 __tsan_atomic32 v, __tsan_memory_order mo); | |
96 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a, | |
97 __tsan_atomic64 v, __tsan_memory_order mo); | |
98 __tsan_atomic128 __tsan_atomic128_fetch_add(volatile __tsan_atomic128* a, | |
99 __tsan_atomic128 v, __tsan_memory_order mo); | |
100 | |
101 __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a, | |
102 __tsan_atomic8 v, __tsan_memory_order mo); | |
103 __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a, | |
104 __tsan_atomic16 v, __tsan_memory_order mo); | |
105 __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a, | |
106 __tsan_atomic32 v, __tsan_memory_order mo); | |
107 __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a, | |
108 __tsan_atomic64 v, __tsan_memory_order mo); | |
109 __tsan_atomic128 __tsan_atomic128_fetch_and(volatile __tsan_atomic128* a, | |
110 __tsan_atomic128 v, __tsan_memory_order mo); | |
111 | |
112 __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a, | |
113 __tsan_atomic8 v, __tsan_memory_order mo); | |
114 __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a, | |
115 __tsan_atomic16 v, __tsan_memory_order mo); | |
116 __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a, | |
117 __tsan_atomic32 v, __tsan_memory_order mo); | |
118 __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a, | |
119 __tsan_atomic64 v, __tsan_memory_order mo); | |
120 __tsan_atomic128 __tsan_atomic128_fetch_or(volatile __tsan_atomic128* a, | |
121 __tsan_atomic128 v, __tsan_memory_order mo); | |
122 | |
123 __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a, | |
124 __tsan_atomic8 v, __tsan_memory_order mo); | |
125 __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a, | |
126 __tsan_atomic16 v, __tsan_memory_order mo); | |
127 __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a, | |
128 __tsan_atomic32 v, __tsan_memory_order mo); | |
129 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a, | |
130 __tsan_atomic64 v, __tsan_memory_order mo); | |
131 __tsan_atomic128 __tsan_atomic128_fetch_xor(volatile __tsan_atomic128* a, | |
132 __tsan_atomic128 v, __tsan_memory_order mo); | |
133 | |
134 __tsan_atomic8 __tsan_atomic8_fetch_nand(volatile __tsan_atomic8* a, | |
135 __tsan_atomic8 v, __tsan_memory_order mo); | |
136 __tsan_atomic16 __tsan_atomic16_fetch_nand(volatile __tsan_atomic16* a, | |
137 __tsan_atomic16 v, __tsan_memory_order mo); | |
138 __tsan_atomic32 __tsan_atomic32_fetch_nand(volatile __tsan_atomic32* a, | |
139 __tsan_atomic32 v, __tsan_memory_order mo); | |
140 __tsan_atomic64 __tsan_atomic64_fetch_nand(volatile __tsan_atomic64* a, | |
141 __tsan_atomic64 v, __tsan_memory_order mo); | |
142 __tsan_atomic128 __tsan_atomic128_fetch_nand(volatile __tsan_atomic128* a, | |
143 __tsan_atomic128 v, __tsan_memory_order mo); | |
144 | |
145 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a, | |
146 __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo, | |
147 __tsan_memory_order fail_mo); | |
148 int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a, | |
149 __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo, | |
150 __tsan_memory_order fail_mo); | |
151 int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a, | |
152 __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo, | |
153 __tsan_memory_order fail_mo); | |
154 int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a, | |
155 __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo, | |
156 __tsan_memory_order fail_mo); | |
157 int __tsan_atomic128_compare_exchange_weak(volatile __tsan_atomic128* a, | |
158 __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo, | |
159 __tsan_memory_order fail_mo); | |
160 | |
161 int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a, | |
162 __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo, | |
163 __tsan_memory_order fail_mo); | |
164 int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a, | |
165 __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo, | |
166 __tsan_memory_order fail_mo); | |
167 int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a, | |
168 __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo, | |
169 __tsan_memory_order fail_mo); | |
170 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a, | |
171 __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo, | |
172 __tsan_memory_order fail_mo); | |
173 int __tsan_atomic128_compare_exchange_strong(volatile __tsan_atomic128* a, | |
174 __tsan_atomic128* c, __tsan_atomic128 v, __tsan_memory_order mo, | |
175 __tsan_memory_order fail_mo); | |
176 | |
177 __tsan_atomic8 __tsan_atomic8_compare_exchange_val( | |
178 volatile __tsan_atomic8* a, __tsan_atomic8 c, __tsan_atomic8 v, | |
179 __tsan_memory_order mo, __tsan_memory_order fail_mo); | |
180 __tsan_atomic16 __tsan_atomic16_compare_exchange_val( | |
181 volatile __tsan_atomic16* a, __tsan_atomic16 c, __tsan_atomic16 v, | |
182 __tsan_memory_order mo, __tsan_memory_order fail_mo); | |
183 __tsan_atomic32 __tsan_atomic32_compare_exchange_val( | |
184 volatile __tsan_atomic32* a, __tsan_atomic32 c, __tsan_atomic32 v, | |
185 __tsan_memory_order mo, __tsan_memory_order fail_mo); | |
186 __tsan_atomic64 __tsan_atomic64_compare_exchange_val( | |
187 volatile __tsan_atomic64* a, __tsan_atomic64 c, __tsan_atomic64 v, | |
188 __tsan_memory_order mo, __tsan_memory_order fail_mo); | |
189 __tsan_atomic128 __tsan_atomic128_compare_exchange_val( | |
190 volatile __tsan_atomic128* a, __tsan_atomic128 c, __tsan_atomic128 v, | |
191 __tsan_memory_order mo, __tsan_memory_order fail_mo); | |
192 | |
193 void __tsan_atomic_thread_fence(__tsan_memory_order mo); | |
194 void __tsan_atomic_signal_fence(__tsan_memory_order mo); | |
195 } // extern "C" | |
196 | |
197 #endif // #ifndef TSAN_INTERFACE_ATOMIC_H | |
198 | |
199 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
200 Atomic32 old_value, | |
201 Atomic32 new_value) { | |
202 Atomic32 cmp = old_value; | |
203 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, | |
204 __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); | |
205 return cmp; | |
206 } | |
207 | |
208 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
209 Atomic32 new_value) { | |
210 return __tsan_atomic32_exchange(ptr, new_value, | |
211 __tsan_memory_order_relaxed); | |
212 } | |
213 | |
214 inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, | |
215 Atomic32 new_value) { | |
216 return __tsan_atomic32_exchange(ptr, new_value, | |
217 __tsan_memory_order_acquire); | |
218 } | |
219 | |
220 inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, | |
221 Atomic32 new_value) { | |
222 return __tsan_atomic32_exchange(ptr, new_value, | |
223 __tsan_memory_order_release); | |
224 } | |
225 | |
226 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
227 Atomic32 increment) { | |
228 return increment + __tsan_atomic32_fetch_add(ptr, increment, | |
229 __tsan_memory_order_relaxed); | |
230 } | |
231 | |
232 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
233 Atomic32 increment) { | |
234 return increment + __tsan_atomic32_fetch_add(ptr, increment, | |
235 __tsan_memory_order_acq_rel); | |
236 } | |
237 | |
238 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
239 Atomic32 old_value, | |
240 Atomic32 new_value) { | |
241 Atomic32 cmp = old_value; | |
242 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, | |
243 __tsan_memory_order_acquire, __tsan_memory_order_acquire); | |
244 return cmp; | |
245 } | |
246 | |
247 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
248 Atomic32 old_value, | |
249 Atomic32 new_value) { | |
250 Atomic32 cmp = old_value; | |
251 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, | |
252 __tsan_memory_order_release, __tsan_memory_order_relaxed); | |
253 return cmp; | |
254 } | |
255 | |
256 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
257 __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed); | |
258 } | |
259 | |
260 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
261 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); | |
262 } | |
263 | |
264 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
265 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); | |
266 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); | |
267 } | |
268 | |
269 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
270 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); | |
271 } | |
272 | |
273 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | |
274 return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed); | |
275 } | |
276 | |
277 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
278 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); | |
279 } | |
280 | |
281 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
282 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); | |
283 } | |
284 | |
285 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
286 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); | |
287 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); | |
288 } | |
289 | |
290 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
291 Atomic64 old_value, | |
292 Atomic64 new_value) { | |
293 Atomic64 cmp = old_value; | |
294 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, | |
295 __tsan_memory_order_relaxed, __tsan_memory_order_relaxed); | |
296 return cmp; | |
297 } | |
298 | |
299 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
300 Atomic64 new_value) { | |
301 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); | |
302 } | |
303 | |
304 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, | |
305 Atomic64 new_value) { | |
306 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); | |
307 } | |
308 | |
309 inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, | |
310 Atomic64 new_value) { | |
311 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); | |
312 } | |
313 | |
314 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
315 Atomic64 increment) { | |
316 return increment + __tsan_atomic64_fetch_add(ptr, increment, | |
317 __tsan_memory_order_relaxed); | |
318 } | |
319 | |
320 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
321 Atomic64 increment) { | |
322 return increment + __tsan_atomic64_fetch_add(ptr, increment, | |
323 __tsan_memory_order_acq_rel); | |
324 } | |
325 | |
326 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
327 __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); | |
328 } | |
329 | |
330 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
331 __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); | |
332 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); | |
333 } | |
334 | |
335 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
336 __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); | |
337 } | |
338 | |
339 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
340 return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); | |
341 } | |
342 | |
343 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
344 return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); | |
345 } | |
346 | |
347 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
348 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); | |
349 return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); | |
350 } | |
351 | |
352 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
353 Atomic64 old_value, | |
354 Atomic64 new_value) { | |
355 Atomic64 cmp = old_value; | |
356 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, | |
357 __tsan_memory_order_acquire, __tsan_memory_order_acquire); | |
358 return cmp; | |
359 } | |
360 | |
361 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
362 Atomic64 old_value, | |
363 Atomic64 new_value) { | |
364 Atomic64 cmp = old_value; | |
365 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, | |
366 __tsan_memory_order_release, __tsan_memory_order_relaxed); | |
367 return cmp; | |
368 } | |
369 | |
370 inline void MemoryBarrier() { | |
371 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); | |
372 } | |
373 | |
374 } // namespace internal | |
375 } // namespace v8 | |
376 | |
377 #undef ATOMICOPS_COMPILER_BARRIER | |
378 | |
379 #endif // V8_ATOMICOPS_INTERNALS_TSAN_H_ | |
OLD | NEW |