| OLD | NEW |
| 1 // Copyright 2016 the V8 project authors. All rights reserved. | 1 // Copyright 2016 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file is an internal atomic implementation, use atomicops.h instead. | 5 // This file is an internal atomic implementation, use atomicops.h instead. |
| 6 // | 6 // |
| 7 // This implementation uses C++11 atomics' member functions. The code base is | 7 // This implementation uses C++11 atomics' member functions. The code base is |
| 8 // currently written assuming atomicity revolves around accesses instead of | 8 // currently written assuming atomicity revolves around accesses instead of |
| 9 // C++11's memory locations. The burden is on the programmer to ensure that all | 9 // C++11's memory locations. The burden is on the programmer to ensure that all |
| 10 // memory locations accessed atomically are never accessed non-atomically (tsan | 10 // memory locations accessed atomically are never accessed non-atomically (tsan |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 42 inline void MemoryBarrier() { | 42 inline void MemoryBarrier() { |
| 43 #if defined(__GLIBCXX__) | 43 #if defined(__GLIBCXX__) |
| 44 // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but | 44 // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but |
| 45 // not defined, leading to the linker complaining about undefined references. | 45 // not defined, leading to the linker complaining about undefined references. |
| 46 __atomic_thread_fence(std::memory_order_seq_cst); | 46 __atomic_thread_fence(std::memory_order_seq_cst); |
| 47 #else | 47 #else |
| 48 std::atomic_thread_fence(std::memory_order_seq_cst); | 48 std::atomic_thread_fence(std::memory_order_seq_cst); |
| 49 #endif | 49 #endif |
| 50 } | 50 } |
| 51 | 51 |
| 52 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 52 inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, |
| 53 Atomic32 old_value, | 53 Atomic32 old_value, Atomic32 new_value) { |
| 54 Atomic32 new_value) { | |
| 55 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | 54 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 56 __ATOMIC_RELAXED, __ATOMIC_RELAXED); | 55 __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| 57 return old_value; | 56 return old_value; |
| 58 } | 57 } |
| 59 | 58 |
| 60 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 59 inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, |
| 61 Atomic32 new_value) { | 60 Atomic32 new_value) { |
| 62 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); | 61 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
| 63 } | 62 } |
| 64 | 63 |
| 65 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 64 inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, |
| 66 Atomic32 increment) { | 65 Atomic32 increment) { |
| 67 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); | 66 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); |
| 68 } | 67 } |
| 69 | 68 |
| 70 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 69 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 71 Atomic32 increment) { | 70 Atomic32 increment) { |
| 72 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); | 71 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); |
| 73 } | 72 } |
| 74 | 73 |
| 75 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 74 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 76 Atomic32 old_value, Atomic32 new_value) { | 75 Atomic32 old_value, Atomic32 new_value) { |
| 77 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | 76 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 78 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); | 77 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
| 79 return old_value; | 78 return old_value; |
| 80 } | 79 } |
| 81 | 80 |
| 82 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 81 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 83 Atomic32 old_value, Atomic32 new_value) { | 82 Atomic32 old_value, Atomic32 new_value) { |
| 84 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | 83 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 85 __ATOMIC_RELEASE, __ATOMIC_RELAXED); | 84 __ATOMIC_RELEASE, __ATOMIC_RELAXED); |
| 86 return old_value; | 85 return old_value; |
| 87 } | 86 } |
| 88 | 87 |
| 89 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 88 inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { |
| 90 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | 89 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| 91 } | 90 } |
| 92 | 91 |
| 93 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 92 inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 94 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | 93 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| 95 } | 94 } |
| 96 | 95 |
| 97 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 96 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 98 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); | 97 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
| 99 } | 98 } |
| 100 | 99 |
| 101 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 100 inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { |
| 102 return __atomic_load_n(ptr, __ATOMIC_RELAXED); | 101 return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| 103 } | 102 } |
| 104 | 103 |
| 105 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 104 inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { |
| 106 return __atomic_load_n(ptr, __ATOMIC_RELAXED); | 105 return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| 107 } | 106 } |
| 108 | 107 |
| 109 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 108 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 110 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); | 109 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
| 111 } | 110 } |
| 112 | 111 |
| 113 #if defined(V8_HOST_ARCH_64_BIT) | 112 #if defined(V8_HOST_ARCH_64_BIT) |
| 114 | 113 |
| 115 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 114 inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, |
| 116 Atomic64 old_value, | 115 Atomic64 old_value, Atomic64 new_value) { |
| 117 Atomic64 new_value) { | |
| 118 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | 116 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 119 __ATOMIC_RELAXED, __ATOMIC_RELAXED); | 117 __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| 120 return old_value; | 118 return old_value; |
| 121 } | 119 } |
| 122 | 120 |
| 123 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 121 inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, |
| 124 Atomic64 new_value) { | 122 Atomic64 new_value) { |
| 125 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); | 123 return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
| 126 } | 124 } |
| 127 | 125 |
| 128 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 126 inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, |
| 129 Atomic64 increment) { | 127 Atomic64 increment) { |
| 130 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); | 128 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); |
| 131 } | 129 } |
| 132 | 130 |
| 133 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 131 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 134 Atomic64 increment) { | 132 Atomic64 increment) { |
| 135 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); | 133 return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); |
| 136 } | 134 } |
| 137 | 135 |
| 138 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 136 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 139 Atomic64 old_value, Atomic64 new_value) { | 137 Atomic64 old_value, Atomic64 new_value) { |
| 140 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | 138 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 141 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); | 139 __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
| 142 return old_value; | 140 return old_value; |
| 143 } | 141 } |
| 144 | 142 |
| 145 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 143 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 146 Atomic64 old_value, Atomic64 new_value) { | 144 Atomic64 old_value, Atomic64 new_value) { |
| 147 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, | 145 __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| 148 __ATOMIC_RELEASE, __ATOMIC_RELAXED); | 146 __ATOMIC_RELEASE, __ATOMIC_RELAXED); |
| 149 return old_value; | 147 return old_value; |
| 150 } | 148 } |
| 151 | 149 |
| 152 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 150 inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 153 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); | 151 __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| 154 } | 152 } |
| 155 | 153 |
| 156 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 154 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 157 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); | 155 __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
| 158 } | 156 } |
| 159 | 157 |
| 160 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 158 inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { |
| 161 return __atomic_load_n(ptr, __ATOMIC_RELAXED); | 159 return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| 162 } | 160 } |
| 163 | 161 |
| 164 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 162 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 165 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); | 163 return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
| 166 } | 164 } |
| 167 | 165 |
| 168 #endif // defined(V8_HOST_ARCH_64_BIT) | 166 #endif // defined(V8_HOST_ARCH_64_BIT) |
| 169 } // namespace base | 167 } // namespace base |
| 170 } // namespace v8 | 168 } // namespace v8 |
| 171 | 169 |
| 172 #endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | 170 #endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| OLD | NEW |