| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 258 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 269 | 269 |
| 270 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 270 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 271 Atomic32 old_value, | 271 Atomic32 old_value, |
| 272 Atomic32 new_value) { | 272 Atomic32 new_value) { |
| 273 Atomic32 cmp = old_value; | 273 Atomic32 cmp = old_value; |
| 274 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, | 274 __tsan_atomic32_compare_exchange_strong(ptr, &cmp, new_value, |
| 275 __tsan_memory_order_release, __tsan_memory_order_relaxed); | 275 __tsan_memory_order_release, __tsan_memory_order_relaxed); |
| 276 return cmp; | 276 return cmp; |
| 277 } | 277 } |
| 278 | 278 |
| 279 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
| 280 __tsan_atomic8_store(ptr, value, __tsan_memory_order_relaxed); |
| 281 } |
| 282 |
| 279 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 283 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 280 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); | 284 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); |
| 281 } | 285 } |
| 282 | 286 |
| 283 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 287 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 284 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); | 288 __tsan_atomic32_store(ptr, value, __tsan_memory_order_relaxed); |
| 285 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); | 289 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
| 286 } | 290 } |
| 287 | 291 |
| 288 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 292 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 289 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); | 293 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); |
| 290 } | 294 } |
| 291 | 295 |
| 296 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
| 297 return __tsan_atomic8_load(ptr, __tsan_memory_order_relaxed); |
| 298 } |
| 299 |
| 292 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 300 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 293 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); | 301 return __tsan_atomic32_load(ptr, __tsan_memory_order_relaxed); |
| 294 } | 302 } |
| 295 | 303 |
| 296 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 304 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 297 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); | 305 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); |
| 298 } | 306 } |
| 299 | 307 |
| 300 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 308 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 301 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); | 309 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 385 inline void MemoryBarrier() { | 393 inline void MemoryBarrier() { |
| 386 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); | 394 __tsan_atomic_thread_fence(__tsan_memory_order_seq_cst); |
| 387 } | 395 } |
| 388 | 396 |
| 389 } // namespace internal | 397 } // namespace internal |
| 390 } // namespace v8 | 398 } // namespace v8 |
| 391 | 399 |
| 392 #undef ATOMICOPS_COMPILER_BARRIER | 400 #undef ATOMICOPS_COMPILER_BARRIER |
| 393 | 401 |
| 394 #endif // V8_ATOMICOPS_INTERNALS_TSAN_H_ | 402 #endif // V8_ATOMICOPS_INTERNALS_TSAN_H_ |
| OLD | NEW |