OLD | NEW |
(Empty) | |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are |
| 4 // met: |
| 5 // |
| 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. |
| 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 |
| 28 |
| 29 // This file is an internal atomic implementation for compiler-based |
| 30 // ThreadSanitizer. Use base/atomicops.h instead. |
| 31 |
| 32 #ifndef V8_ATOMICOPS_INTERNALS_TSAN_H_ |
| 33 #define V8_ATOMICOPS_INTERNALS_TSAN_H_ |
| 34 |
| 35 // This struct is not part of the public API of this module; clients may not |
| 36 // use it. (However, it's exported via BASE_EXPORT because clients implicitly |
| 37 // do use it at link time by inlining these functions.) |
| 38 // Features of this x86. Values may not be correct before main() is run, |
| 39 // but are set conservatively. |
| 40 struct AtomicOps_x86CPUFeatureStruct { |
| 41 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence |
| 42 // after acquire compare-and-swap. |
| 43 bool has_sse2; // Processor has SSE2. |
| 44 }; |
| 45 extern struct AtomicOps_x86CPUFeatureStruct |
| 46 AtomicOps_Internalx86CPUFeatures; |
| 47 |
| 48 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
| 49 |
| 50 namespace v8 { |
| 51 namespace internal { |
| 52 |
| 53 #ifndef TSAN_INTERFACE_ATOMIC_H |
| 54 #define TSAN_INTERFACE_ATOMIC_H |
| 55 |
| 56 #ifdef __cplusplus |
| 57 extern "C" { |
| 58 #endif |
| 59 |
| 60 typedef char __tsan_atomic8; |
| 61 typedef short __tsan_atomic16; // NOLINT |
| 62 typedef int __tsan_atomic32; |
| 63 typedef long __tsan_atomic64; // NOLINT |
| 64 |
| 65 typedef enum { |
| 66 __tsan_memory_order_relaxed = (1 << 0) + 100500, |
| 67 __tsan_memory_order_consume = (1 << 1) + 100500, |
| 68 __tsan_memory_order_acquire = (1 << 2) + 100500, |
| 69 __tsan_memory_order_release = (1 << 3) + 100500, |
| 70 __tsan_memory_order_acq_rel = (1 << 4) + 100500, |
| 71 __tsan_memory_order_seq_cst = (1 << 5) + 100500, |
| 72 } __tsan_memory_order; |
| 73 |
| 74 __tsan_atomic8 __tsan_atomic8_load(const volatile __tsan_atomic8* a, |
| 75 __tsan_memory_order mo); |
| 76 __tsan_atomic16 __tsan_atomic16_load(const volatile __tsan_atomic16* a, |
| 77 __tsan_memory_order mo); |
| 78 __tsan_atomic32 __tsan_atomic32_load(const volatile __tsan_atomic32* a, |
| 79 __tsan_memory_order mo); |
| 80 __tsan_atomic64 __tsan_atomic64_load(const volatile __tsan_atomic64* a, |
| 81 __tsan_memory_order mo); |
| 82 |
| 83 void __tsan_atomic8_store(volatile __tsan_atomic8* a, __tsan_atomic8 v, |
| 84 __tsan_memory_order mo); |
| 85 void __tsan_atomic16_store(volatile __tsan_atomic16* a, __tsan_atomic16 v, |
| 86 __tsan_memory_order mo); |
| 87 void __tsan_atomic32_store(volatile __tsan_atomic32* a, __tsan_atomic32 v, |
| 88 __tsan_memory_order mo); |
| 89 void __tsan_atomic64_store(volatile __tsan_atomic64* a, __tsan_atomic64 v, |
| 90 __tsan_memory_order mo); |
| 91 |
| 92 __tsan_atomic8 __tsan_atomic8_exchange(volatile __tsan_atomic8* a, |
| 93 __tsan_atomic8 v, __tsan_memory_order mo); |
| 94 __tsan_atomic16 __tsan_atomic16_exchange(volatile __tsan_atomic16* a, |
| 95 __tsan_atomic16 v, __tsan_memory_order mo); |
| 96 __tsan_atomic32 __tsan_atomic32_exchange(volatile __tsan_atomic32* a, |
| 97 __tsan_atomic32 v, __tsan_memory_order mo); |
| 98 __tsan_atomic64 __tsan_atomic64_exchange(volatile __tsan_atomic64* a, |
| 99 __tsan_atomic64 v, __tsan_memory_order mo); |
| 100 |
| 101 __tsan_atomic8 __tsan_atomic8_fetch_add(volatile __tsan_atomic8* a, |
| 102 __tsan_atomic8 v, __tsan_memory_order mo); |
| 103 __tsan_atomic16 __tsan_atomic16_fetch_add(volatile __tsan_atomic16* a, |
| 104 __tsan_atomic16 v, __tsan_memory_order mo); |
| 105 __tsan_atomic32 __tsan_atomic32_fetch_add(volatile __tsan_atomic32* a, |
| 106 __tsan_atomic32 v, __tsan_memory_order mo); |
| 107 __tsan_atomic64 __tsan_atomic64_fetch_add(volatile __tsan_atomic64* a, |
| 108 __tsan_atomic64 v, __tsan_memory_order mo); |
| 109 |
| 110 __tsan_atomic8 __tsan_atomic8_fetch_and(volatile __tsan_atomic8* a, |
| 111 __tsan_atomic8 v, __tsan_memory_order mo); |
| 112 __tsan_atomic16 __tsan_atomic16_fetch_and(volatile __tsan_atomic16* a, |
| 113 __tsan_atomic16 v, __tsan_memory_order mo); |
| 114 __tsan_atomic32 __tsan_atomic32_fetch_and(volatile __tsan_atomic32* a, |
| 115 __tsan_atomic32 v, __tsan_memory_order mo); |
| 116 __tsan_atomic64 __tsan_atomic64_fetch_and(volatile __tsan_atomic64* a, |
| 117 __tsan_atomic64 v, __tsan_memory_order mo); |
| 118 |
| 119 __tsan_atomic8 __tsan_atomic8_fetch_or(volatile __tsan_atomic8* a, |
| 120 __tsan_atomic8 v, __tsan_memory_order mo); |
| 121 __tsan_atomic16 __tsan_atomic16_fetch_or(volatile __tsan_atomic16* a, |
| 122 __tsan_atomic16 v, __tsan_memory_order mo); |
| 123 __tsan_atomic32 __tsan_atomic32_fetch_or(volatile __tsan_atomic32* a, |
| 124 __tsan_atomic32 v, __tsan_memory_order mo); |
| 125 __tsan_atomic64 __tsan_atomic64_fetch_or(volatile __tsan_atomic64* a, |
| 126 __tsan_atomic64 v, __tsan_memory_order mo); |
| 127 |
| 128 __tsan_atomic8 __tsan_atomic8_fetch_xor(volatile __tsan_atomic8* a, |
| 129 __tsan_atomic8 v, __tsan_memory_order mo); |
| 130 __tsan_atomic16 __tsan_atomic16_fetch_xor(volatile __tsan_atomic16* a, |
| 131 __tsan_atomic16 v, __tsan_memory_order mo); |
| 132 __tsan_atomic32 __tsan_atomic32_fetch_xor(volatile __tsan_atomic32* a, |
| 133 __tsan_atomic32 v, __tsan_memory_order mo); |
| 134 __tsan_atomic64 __tsan_atomic64_fetch_xor(volatile __tsan_atomic64* a, |
| 135 __tsan_atomic64 v, __tsan_memory_order mo); |
| 136 |
| 137 int __tsan_atomic8_compare_exchange_weak(volatile __tsan_atomic8* a, |
| 138 __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo); |
| 139 int __tsan_atomic16_compare_exchange_weak(volatile __tsan_atomic16* a, |
| 140 __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo); |
| 141 int __tsan_atomic32_compare_exchange_weak(volatile __tsan_atomic32* a, |
| 142 __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo); |
| 143 int __tsan_atomic64_compare_exchange_weak(volatile __tsan_atomic64* a, |
| 144 __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo); |
| 145 |
| 146 int __tsan_atomic8_compare_exchange_strong(volatile __tsan_atomic8* a, |
| 147 __tsan_atomic8* c, __tsan_atomic8 v, __tsan_memory_order mo); |
| 148 int __tsan_atomic16_compare_exchange_strong(volatile __tsan_atomic16* a, |
| 149 __tsan_atomic16* c, __tsan_atomic16 v, __tsan_memory_order mo); |
| 150 int __tsan_atomic32_compare_exchange_strong(volatile __tsan_atomic32* a, |
| 151 __tsan_atomic32* c, __tsan_atomic32 v, __tsan_memory_order mo); |
| 152 int __tsan_atomic64_compare_exchange_strong(volatile __tsan_atomic64* a, |
| 153 __tsan_atomic64* c, __tsan_atomic64 v, __tsan_memory_order mo); |
| 154 |
| 155 void __tsan_atomic_thread_fence(__tsan_memory_order mo); |
| 156 |
| 157 #ifdef __cplusplus |
| 158 } // extern "C" |
| 159 #endif |
| 160 |
| 161 #endif // #ifndef TSAN_INTERFACE_ATOMIC_H |
| 162 |
| 163 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 164 Atomic32 old_value, |
| 165 Atomic32 new_value) { |
| 166 Atomic32 cmp = old_value; |
| 167 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, |
| 168 __tsan_memory_order_relaxed); |
| 169 return cmp; |
| 170 } |
| 171 |
| 172 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 173 Atomic32 new_value) { |
| 174 return __tsan_atomic32_exchange(ptr, new_value, |
| 175 __tsan_memory_order_relaxed); |
| 176 } |
| 177 |
| 178 inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr, |
| 179 Atomic32 new_value) { |
| 180 return __tsan_atomic32_exchange(ptr, new_value, |
| 181 __tsan_memory_order_acquire); |
| 182 } |
| 183 |
| 184 inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr, |
| 185 Atomic32 new_value) { |
| 186 return __tsan_atomic32_exchange(ptr, new_value, |
| 187 __tsan_memory_order_release); |
| 188 } |
| 189 |
| 190 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 191 Atomic32 increment) { |
| 192 return increment + __tsan_atomic32_fetch_add(ptr, increment, |
| 193 __tsan_memory_order_relaxed); |
| 194 } |
| 195 |
| 196 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 197 Atomic32 increment) { |
| 198 return increment + __tsan_atomic32_fetch_add(ptr, increment, |
| 199 __tsan_memory_order_acq_rel); |
| 200 } |
| 201 |
| 202 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 203 Atomic32 old_value, |
| 204 Atomic32 new_value) { |
| 205 Atomic32 cmp = old_value; |
| 206 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, |
| 207 __tsan_memory_order_acquire); |
| 208 return cmp; |
| 209 } |
| 210 |
| 211 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 212 Atomic32 old_value, |
| 213 Atomic32 new_value) { |
| 214 Atomic32 cmp = old_value; |
| 215 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, |
| 216 __tsan_memory_order_release); |
| 217 return cmp; |
| 218 } |
| 219 |
| 220 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 221 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); |
| 222 } |
| 223 |
| 224 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 225 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); |
| 226 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
| 227 } |
| 228 |
| 229 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 230 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); |
| 231 } |
| 232 |
| 233 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 234 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); |
| 235 } |
| 236 |
| 237 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 238 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); |
| 239 } |
| 240 |
| 241 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 242 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
| 243 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); |
| 244 } |
| 245 |
| 246 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 247 Atomic64 old_value, |
| 248 Atomic64 new_value) { |
| 249 Atomic64 cmp = old_value; |
| 250 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, |
| 251 __tsan_memory_order_relaxed); |
| 252 return cmp; |
| 253 } |
| 254 |
| 255 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 256 Atomic64 new_value) { |
| 257 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_relaxed); |
| 258 } |
| 259 |
| 260 inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr, |
| 261 Atomic64 new_value) { |
| 262 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_acquire); |
| 263 } |
| 264 |
| 265 inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr, |
| 266 Atomic64 new_value) { |
| 267 return __tsan_atomic64_exchange(ptr, new_value, __tsan_memory_order_release); |
| 268 } |
| 269 |
| 270 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| 271 Atomic64 increment) { |
| 272 return increment + __tsan_atomic64_fetch_add(ptr, increment, |
| 273 __tsan_memory_order_relaxed); |
| 274 } |
| 275 |
| 276 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 277 Atomic64 increment) { |
| 278 return increment + __tsan_atomic64_fetch_add(ptr, increment, |
| 279 __tsan_memory_order_acq_rel); |
| 280 } |
| 281 |
| 282 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 283 __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); |
| 284 } |
| 285 |
| 286 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 287 __tsan_atomic64_store(ptr, value, __tsan_memory_order_relaxed); |
| 288 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
| 289 } |
| 290 |
| 291 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 292 __tsan_atomic64_store(ptr, value, __tsan_memory_order_release); |
| 293 } |
| 294 |
| 295 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 296 return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); |
| 297 } |
| 298 |
| 299 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 300 return __tsan_atomic64_load(ptr, __tsan_memory_order_acquire); |
| 301 } |
| 302 |
| 303 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 304 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
| 305 return __tsan_atomic64_load(ptr, __tsan_memory_order_relaxed); |
| 306 } |
| 307 |
| 308 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 309 Atomic64 old_value, |
| 310 Atomic64 new_value) { |
| 311 Atomic64 cmp = old_value; |
| 312 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, |
| 313 __tsan_memory_order_acquire); |
| 314 return cmp; |
| 315 } |
| 316 |
| 317 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 318 Atomic64 old_value, |
| 319 Atomic64 new_value) { |
| 320 Atomic64 cmp = old_value; |
| 321 __tsan_atomic64_compare_exchange_strong(ptr, &cmp, new_value, |
| 322 __tsan_memory_order_release); |
| 323 return cmp; |
| 324 } |
| 325 |
| 326 inline void MemoryBarrier() { |
| 327 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
| 328 } |
| 329 |
| 330 } // namespace internal |
| 331 } // namespace v8 |
| 332 |
| 333 #undef ATOMICOPS_COMPILER_BARRIER |
| 334 |
| 335 #endif // V8_ATOMICOPS_INTERNALS_TSAN_H_ |
OLD | NEW |