Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 // This file is an internal atomic implementation, use atomicops.h instead. | |
| 6 // | |
| 7 // This implementation uses C11 atomics' free functions through C++11's <atomic> | |
| 8 // header instead of C++11 atomics' types because the code base is currently | |
| 9 // written assuming atomicity revolves around accesses instead of C++11's memory | |
|
Dmitry Vyukov
2014/10/08 08:05:44
I don't understand the "because" part.
You still
| |
| 10 // locations. The burden is on the programmer to ensure that all memory | |
| 11 // locations accessed atomically are never accessed non-atomically (tsan should | |
| 12 // help with this). | |
| 13 // | |
| 14 // Of note in this implementation: | |
| 15 // * All NoBarrier variants are implemented as relaxed. | |
| 16 // * All Barrier variants are implemented as sequentially-consistent. | |
| 17 // * Compare exchange's failure ordering is always the same as the success one | |
| 18 // (except for release, which fails as relaxed): using a weaker ordering is | |
| 19 // only valid under certain uses of compare exchange. | |
| 20 // * Acquire store doesn't exist in the C11 memory model, it is instead | |
| 21 // implemented as a relaxed store followed by a sequentially consistent | |
| 22 // fence. | |
| 23 // * Release load doesn't exist in the C11 memory model, it is instead | |
| 24 // implemented as sequentially consistent fence followed by a relaxed load. | |
| 25 // * Atomic increment is expected to return the post-incremented value, whereas | |
| 26 // C11 fetch add returns the previous value. The implementation therefore | |
| 27 // needs to increment twice (which the compiler should be able to detect and | |
| 28 // optimize). | |
| 29 | |
| 30 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
| 31 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
| 32 | |
| 33 #include <atomic> | |
| 34 | |
| 35 namespace base { | |
| 36 namespace subtle { | |
| 37 | |
| 38 // This implementation is transitional and maintains the original API for | |
| 39 // atomicops.h. This requires casting memory locations to the atomic types, and | |
| 40 // assumes that the API and the C++11 implementation are layout-compatible, | |
| 41 // which isn't true for all implementations or hardware platforms. The static | |
| 42 // assertion should detect this issue, were it to fire then this header | |
| 43 // shouldn't be used. | |
| 44 // | |
| 45 // TODO(jfb) If this header manages to stay committed then the API should be | |
| 46 // modified, and all call sites updated. | |
| 47 typedef volatile std::atomic<Atomic32>* AtomicLocation32; | |
| 48 static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32), | |
| 49 "incompatible 32-bit atomic layout"); | |
| 50 | |
| 51 inline void MemoryBarrier() { | |
| 52 std::atomic_thread_fence(std::memory_order_seq_cst); | |
| 53 } | |
| 54 | |
| 55 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
| 56 Atomic32 old_value, | |
| 57 Atomic32 new_value) { | |
| 58 auto expected = old_value; | |
| 59 std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, | |
| 60 &expected, | |
|
Dmitry Vyukov
2014/10/08 08:05:44
remove expected, just do &old_value
| |
| 61 new_value, | |
| 62 std::memory_order_relaxed, | |
| 63 std::memory_order_relaxed); | |
| 64 return expected; | |
| 65 } | |
| 66 | |
| 67 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
| 68 Atomic32 new_value) { | |
| 69 return std::atomic_exchange_explicit( | |
| 70 (AtomicLocation32)ptr, new_value, std::memory_order_relaxed); | |
|
Dmitry Vyukov
2014/10/08 08:05:44
I would use member functions, because they are sho
| |
| 71 } | |
| 72 | |
| 73 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
| 74 Atomic32 increment) { | |
| 75 return increment + std::atomic_fetch_add_explicit((AtomicLocation32)ptr, | |
| 76 increment, | |
| 77 std::memory_order_relaxed); | |
| 78 } | |
| 79 | |
| 80 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
| 81 Atomic32 increment) { | |
| 82 return increment + std::atomic_fetch_add((AtomicLocation32)ptr, increment); | |
| 83 } | |
| 84 | |
| 85 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
| 86 Atomic32 old_value, | |
| 87 Atomic32 new_value) { | |
| 88 auto expected = old_value; | |
| 89 std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, | |
| 90 &expected, | |
|
Dmitry Vyukov
2014/10/08 08:05:44
remove expected, use &old_value
| |
| 91 new_value, | |
| 92 std::memory_order_acquire, | |
| 93 std::memory_order_acquire); | |
| 94 return expected; | |
| 95 } | |
| 96 | |
| 97 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
| 98 Atomic32 old_value, | |
| 99 Atomic32 new_value) { | |
| 100 auto expected = old_value; | |
| 101 std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, | |
| 102 &expected, | |
|
Dmitry Vyukov
2014/10/08 08:05:44
remove expected, use &old_value
here and below
| |
| 103 new_value, | |
| 104 std::memory_order_release, | |
| 105 std::memory_order_relaxed); | |
| 106 return expected; | |
| 107 } | |
| 108 | |
| 109 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 110 std::atomic_store_explicit( | |
| 111 (AtomicLocation32)ptr, value, std::memory_order_relaxed); | |
| 112 } | |
| 113 | |
| 114 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 115 std::atomic_store_explicit( | |
| 116 (AtomicLocation32)ptr, value, std::memory_order_relaxed); | |
| 117 std::atomic_thread_fence(std::memory_order_seq_cst); | |
| 118 } | |
| 119 | |
| 120 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 121 std::atomic_store_explicit( | |
| 122 (AtomicLocation32)ptr, value, std::memory_order_release); | |
| 123 } | |
| 124 | |
| 125 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
| 126 return std::atomic_load_explicit((AtomicLocation32)ptr, | |
| 127 std::memory_order_relaxed); | |
| 128 } | |
| 129 | |
| 130 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
| 131 return std::atomic_load_explicit((AtomicLocation32)ptr, | |
| 132 std::memory_order_acquire); | |
| 133 } | |
| 134 | |
| 135 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
| 136 std::atomic_thread_fence(std::memory_order_seq_cst); | |
| 137 return std::atomic_load_explicit((AtomicLocation32)ptr, | |
| 138 std::memory_order_relaxed); | |
| 139 } | |
| 140 | |
| 141 #ifdef ARCH_CPU_64_BITS | |
| 142 // 64-bit versions of the operations. | |
| 143 // See the 32-bit versions for comments. | |
|
Dmitry Vyukov
2014/10/08 08:05:44
there are no comments on 32-bit versions
| |
| 144 | |
| 145 typedef volatile std::atomic<Atomic64>* AtomicLocation64; | |
| 146 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64), | |
| 147 "incompatible 64-bit atomic layout"); | |
| 148 | |
| 149 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
| 150 Atomic64 old_value, | |
| 151 Atomic64 new_value) { | |
| 152 auto expected = old_value; | |
| 153 std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, | |
| 154 &expected, | |
| 155 new_value, | |
| 156 std::memory_order_relaxed, | |
| 157 std::memory_order_relaxed); | |
| 158 return expected; | |
| 159 } | |
| 160 | |
| 161 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
| 162 Atomic64 new_value) { | |
| 163 return std::atomic_exchange_explicit( | |
| 164 (AtomicLocation64)ptr, new_value, std::memory_order_relaxed); | |
| 165 } | |
| 166 | |
| 167 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
| 168 Atomic64 increment) { | |
| 169 return increment + std::atomic_fetch_add_explicit((AtomicLocation64)ptr, | |
| 170 increment, | |
| 171 std::memory_order_relaxed); | |
| 172 } | |
| 173 | |
| 174 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
| 175 Atomic64 increment) { | |
| 176 return increment + std::atomic_fetch_add((AtomicLocation64)ptr, increment); | |
| 177 } | |
| 178 | |
| 179 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
| 180 Atomic64 old_value, | |
| 181 Atomic64 new_value) { | |
| 182 auto expected = old_value; | |
| 183 std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, | |
| 184 &expected, | |
| 185 new_value, | |
| 186 std::memory_order_acquire, | |
| 187 std::memory_order_acquire); | |
| 188 return expected; | |
| 189 } | |
| 190 | |
| 191 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
| 192 Atomic64 old_value, | |
| 193 Atomic64 new_value) { | |
| 194 auto expected = old_value; | |
| 195 std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, | |
| 196 &expected, | |
| 197 new_value, | |
| 198 std::memory_order_release, | |
| 199 std::memory_order_relaxed); | |
| 200 return expected; | |
| 201 } | |
| 202 | |
| 203 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 204 std::atomic_store_explicit( | |
| 205 (AtomicLocation64)ptr, value, std::memory_order_relaxed); | |
| 206 } | |
| 207 | |
| 208 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 209 std::atomic_store_explicit( | |
| 210 (AtomicLocation64)ptr, value, std::memory_order_relaxed); | |
| 211 std::atomic_thread_fence(std::memory_order_seq_cst); | |
| 212 } | |
| 213 | |
| 214 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
| 215 std::atomic_store_explicit( | |
| 216 (AtomicLocation64)ptr, value, std::memory_order_release); | |
| 217 } | |
| 218 | |
| 219 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
| 220 return std::atomic_load_explicit((AtomicLocation64)ptr, | |
| 221 std::memory_order_relaxed); | |
| 222 } | |
| 223 | |
| 224 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
| 225 return std::atomic_load_explicit((AtomicLocation64)ptr, | |
| 226 std::memory_order_acquire); | |
| 227 } | |
| 228 | |
| 229 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
| 230 std::atomic_thread_fence(std::memory_order_seq_cst); | |
| 231 return std::atomic_load_explicit((AtomicLocation64)ptr, | |
| 232 std::memory_order_relaxed); | |
| 233 } | |
| 234 | |
| 235 #endif // ARCH_CPU_64_BITS | |
| 236 } | |
| 237 } // namespace base::subtle | |
| 238 | |
| 239 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | |
| OLD | NEW |