Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(344)

Side by Side Diff: base/atomicops_internals_tsan.h

Issue 10948035: Atomics implementation for compiler-based ThreadSanitizer (http://dev.chromium.org/developers/testi… (Closed) Base URL: svn://svn.chromium.org/chrome/trunk/src/
Patch Set: Created 8 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « base/atomicops.h ('k') | base/base.gypi » ('j') | base/base.gypi » ('J')
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Property Changes:
Added: svn:eol-style
+ LF
OLDNEW
(Empty)
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved.
Dmitry Vyukov 2012/09/19 17:04:29 2012?
Alexander Potapenko 2012/09/27 11:25:26 Done.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 // This file is an internal atomic implementation for compiler-based
6 // ThreadSanitizer. Use base/atomicops.h instead.
7
8 #ifndef BASE_ATOMICOPS_INTERNALS_TSAN_H_
9 #define BASE_ATOMICOPS_INTERNALS_TSAN_H_
10
11 #include "base/base_export.h"
12
13 // This struct is not part of the public API of this module; clients may not
14 // use it. (However, it's exported via BASE_EXPORT because clients implicitly
15 // do use it at link time by inlining these functions.)
16 // Features of this x86. Values may not be correct before main() is run,
17 // but are set conservatively.
18 struct AtomicOps_x86CPUFeatureStruct {
19 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
20 // after acquire compare-and-swap.
21 bool has_sse2; // Processor has SSE2.
22 };
23 BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct
24 AtomicOps_Internalx86CPUFeatures;
25
26 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
27
28 namespace base {
29 namespace subtle {
30
31 #ifndef TSAN_INTERFACE_ATOMIC_H
32 #define TSAN_INTERFACE_ATOMIC_H
33
34 #ifdef __cplusplus
35 extern "C" {
36 #endif
37
38 typedef char __tsan_atomic8;
39 typedef short __tsan_atomic16; // NOLINT
40 typedef int __tsan_atomic32;
41 typedef long __tsan_atomic64; // NOLINT
42
43 typedef enum {
44 __tsan_memory_order_relaxed = 1 << 0,
45 __tsan_memory_order_consume = 1 << 1,
46 __tsan_memory_order_acquire = 1 << 2,
47 __tsan_memory_order_release = 1 << 3,
48 __tsan_memory_order_acq_rel = 1 << 4,
49 __tsan_memory_order_seq_cst = 1 << 5,
50 } __tsan_memory_order;
51
52 __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8 *a,
53 __tsan_memory_order mo);
54 __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16 *a,
55 __tsan_memory_order mo);
56 __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32 *a,
57 __tsan_memory_order mo);
58 __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64 *a,
59 __tsan_memory_order mo);
60
61 void __tsan_atomic8_store(volatile __tsan_atomic8 *a, __tsan_atomic8 v,
62 __tsan_memory_order mo);
63 void __tsan_atomic16_store(volatile __tsan_atomic16 *a, __tsan_atomic16 v,
64 __tsan_memory_order mo);
65 void __tsan_atomic32_store(volatile __tsan_atomic32 *a, __tsan_atomic32 v,
66 __tsan_memory_order mo);
67 void __tsan_atomic64_store(volatile __tsan_atomic64 *a, __tsan_atomic64 v,
68 __tsan_memory_order mo);
69
70 __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8 *a,
71 __tsan_atomic8 v, __tsan_memory_order mo);
72 __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16 *a,
73 __tsan_atomic16 v, __tsan_memory_order mo);
74 __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32 *a,
75 __tsan_atomic32 v, __tsan_memory_order mo);
76 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64 *a,
77 __tsan_atomic64 v, __tsan_memory_order mo);
78
79 __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8 *a,
80 __tsan_atomic8 v, __tsan_memory_order mo);
81 __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16 *a,
82 __tsan_atomic16 v, __tsan_memory_order mo);
83 __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32 *a,
84 __tsan_atomic32 v, __tsan_memory_order mo);
85 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64 *a,
86 __tsan_atomic64 v, __tsan_memory_order mo);
87
88 __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8 *a,
89 __tsan_atomic8 v, __tsan_memory_order mo);
90 __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16 *a,
91 __tsan_atomic16 v, __tsan_memory_order mo);
92 __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32 *a,
93 __tsan_atomic32 v, __tsan_memory_order mo);
94 __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64 *a,
95 __tsan_atomic64 v, __tsan_memory_order mo);
96
97 __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8 *a,
98 __tsan_atomic8 v, __tsan_memory_order mo);
99 __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16 *a,
100 __tsan_atomic16 v, __tsan_memory_order mo);
101 __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32 *a,
102 __tsan_atomic32 v, __tsan_memory_order mo);
103 __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64 *a,
104 __tsan_atomic64 v, __tsan_memory_order mo);
105
106 __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8 *a,
107 __tsan_atomic8 v, __tsan_memory_order mo);
108 __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16 *a,
109 __tsan_atomic16 v, __tsan_memory_order mo);
110 __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32 *a,
111 __tsan_atomic32 v, __tsan_memory_order mo);
112 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64 *a,
113 __tsan_atomic64 v, __tsan_memory_order mo);
114
115 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8 *a,
116 __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
117 int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16 *a,
118 __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
119 int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32 *a,
120 __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
121 int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64 *a,
122 __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
123
124 int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8 *a,
125 __tsan_atomic8 *c, __tsan_atomic8 v, __tsan_memory_order mo);
126 int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16 *a,
127 __tsan_atomic16 *c, __tsan_atomic16 v, __tsan_memory_order mo);
128 int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32 *a,
129 __tsan_atomic32 *c, __tsan_atomic32 v, __tsan_memory_order mo);
130 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64 *a,
131 __tsan_atomic64 *c, __tsan_atomic64 v, __tsan_memory_order mo);
132
133 void __tsan_atomic_thread_fence(__tsan_memory_order mo);
134
135 #ifdef __cplusplus
136 } // extern "C"
137 #endif
138
139 #endif // #ifndef TSAN_INTERFACE_ATOMIC_H
140
141 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
142 Atomic32 old_value,
143 Atomic32 new_value) {
144 Atomic32 cmp = old_value;
145 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
146 __tsan_memory_order_relaxed);
147 return cmp;
148 }
149
150 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
151 Atomic32 new_value) {
152 return __tsan_atomic32_exchange(ptr, new_value,
153 __tsan_memory_order_relaxed);
154 }
155
156 inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
157 Atomic32 new_value) {
158 return __tsan_atomic32_exchange(ptr, new_value,
159 __tsan_memory_order_acquire);
160 }
161
162 inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
163 Atomic32 new_value) {
164 return __tsan_atomic32_exchange(ptr, new_value,
165 __tsan_memory_order_release);
166 }
167
168 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
169 Atomic32 increment) {
170 return increment + __tsan_atomic32_fetch_add(ptr, increment,
171 __tsan_memory_order_relaxed);
172 }
173
174 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
175 Atomic32 increment) {
176 return increment + __tsan_atomic32_fetch_add(ptr, increment,
177 __tsan_memory_order_acq_rel);
178 }
179
180 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
181 Atomic32 old_value,
182 Atomic32 new_value) {
183 Atomic32 cmp = old_value;
184 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
185 __tsan_memory_order_acquire);
186 return cmp;
187 }
188
189 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
190 Atomic32 old_value,
191 Atomic32 new_value) {
192 Atomic32 cmp = old_value;
193 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value,
194 __tsan_memory_order_release);
195 return cmp;
196 }
197
198 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
199 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
200 }
201
202 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
203 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed);
204 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
205 }
206
207 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
208 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
209 }
210
211 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
212 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
213 }
214
215 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
216 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
217 }
218
219 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
220 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
221 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed);
222 }
223
224 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
225 Atomic64 old_value,
226 Atomic64 new_value) {
227 Atomic64 cmp = old_value;
228 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
229 __tsan_memory_order_relaxed);
230 return cmp;
231 }
232
233 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
234 Atomic64 new_value) {
235 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed);
236 }
237
238 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
239 Atomic64 new_value) {
240 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire);
241 }
242
243 inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
244 Atomic64 new_value) {
245 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release);
246 }
247
248 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
249 Atomic64 increment) {
250 return increment + __tsan_atomic64_fetch_add(ptr, increment,
251 __tsan_memory_order_relaxed);
252 }
253
254 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
255 Atomic64 increment) {
256 return increment + __tsan_atomic64_fetch_add(ptr, increment,
257 __tsan_memory_order_acq_rel);
258 }
259
260 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
261 __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
262 }
263
264 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
265 __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed);
266 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
267 }
268
269 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
270 __tsan_atomic64_store(ptr, value, __tsan_memory_order_release);
271 }
272
273 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
274 return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
275 }
276
277 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
278 return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire);
279 }
280
281 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
282 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
283 return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed);
284 }
285
286 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
287 Atomic64 old_value,
288 Atomic64 new_value) {
289 Atomic64 cmp = old_value;
290 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
291 __tsan_memory_order_acquire);
292 return cmp;
293 }
294
295 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
296 Atomic64 old_value,
297 Atomic64 new_value) {
298 Atomic64 cmp = old_value;
299 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value,
300 __tsan_memory_order_release);
301 return cmp;
302 }
303
304 inline void MemoryBarrier() {
305 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst);
306 }
307
308 } // namespace base::subtle
309 } // namespace base
310
311 #undef ATOMICOPS_COMPILER_BARRIER
312
313 #endif // BASE_ATOMICOPS_INTERNALS_TSAN_H_
OLDNEW
« no previous file with comments | « base/atomicops.h ('k') | base/base.gypi » ('j') | base/base.gypi » ('J')

Powered by Google App Engine
This is Rietveld 408576698