Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file is an internal atomic implementation, use atomicops.h instead. | 5 // This file is an internal atomic implementation, use atomicops.h instead. |
| 6 // | 6 // |
| 7 // This implementation uses C++11 atomics' member functions. The code base is | 7 // This implementation uses C++11 atomics' member functions. The code base is |
| 8 // currently written assuming atomicity revolves around accesses instead of | 8 // currently written assuming atomicity revolves around accesses instead of |
| 9 // C++11's memory locations. The burden is on the programmer to ensure that all | 9 // C++11's memory locations. The burden is on the programmer to ensure that all |
| 10 // memory locations accessed atomically are never accessed non-atomically (tsan | 10 // memory locations accessed atomically are never accessed non-atomically (tsan |
| (...skipping 15 matching lines...) Expand all Loading... | |
| 26 // implemented as sequentially consistent fence followed by a relaxed load. | 26 // implemented as sequentially consistent fence followed by a relaxed load. |
| 27 // * Atomic increment is expected to return the post-incremented value, whereas | 27 // * Atomic increment is expected to return the post-incremented value, whereas |
| 28 // C11 fetch add returns the previous value. The implementation therefore | 28 // C11 fetch add returns the previous value. The implementation therefore |
| 29 // needs to increment twice (which the compiler should be able to detect and | 29 // needs to increment twice (which the compiler should be able to detect and |
| 30 // optimize). | 30 // optimize). |
| 31 | 31 |
| 32 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | 32 #ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| 33 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | 33 #define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| 34 | 34 |
| 35 #include <atomic> | 35 #include <atomic> |
| 36 #include <stdint.h> | |
| 37 | |
| 38 typedef int32_t Atomic32; | |
| 39 #define BASE_HAS_ATOMIC64 1 // Use only in tests and base/atomic* | |
|
JF
2015/12/26 00:27:24
I'm not sure I understand the comment: isn't this
Nico
2015/12/26 04:13:03
It's like this in the x86 version this patch delet
JF
2015/12/26 15:56:15
Is it required though? If so, could we only suppor
Nico
2015/12/26 16:35:05
In practice, there are three non-test uses of atom
JF
2015/12/26 16:58:37
OK thanks for looking into it!
Yes, I think 2 can
| |
| 36 | 40 |
| 37 namespace base { | 41 namespace base { |
| 38 namespace subtle { | 42 namespace subtle { |
| 39 | 43 |
| 40 // This implementation is transitional and maintains the original API for | 44 // This implementation is transitional and maintains the original API for |
| 41 // atomicops.h. This requires casting memory locations to the atomic types, and | 45 // atomicops.h. This requires casting memory locations to the atomic types, and |
| 42 // assumes that the API and the C++11 implementation are layout-compatible, | 46 // assumes that the API and the C++11 implementation are layout-compatible, |
| 43 // which isn't true for all implementations or hardware platforms. The static | 47 // which isn't true for all implementations or hardware platforms. The static |
| 44 // assertion should detect this issue, were it to fire then this header | 48 // assertion should detect this issue, were it to fire then this header |
| 45 // shouldn't be used. | 49 // shouldn't be used. |
| (...skipping 84 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 130 | 134 |
| 131 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 135 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 132 return ((AtomicLocation32)ptr)->load(std::memory_order_acquire); | 136 return ((AtomicLocation32)ptr)->load(std::memory_order_acquire); |
| 133 } | 137 } |
| 134 | 138 |
| 135 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 139 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 136 MemoryBarrier(); | 140 MemoryBarrier(); |
| 137 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed); | 141 return ((AtomicLocation32)ptr)->load(std::memory_order_relaxed); |
| 138 } | 142 } |
| 139 | 143 |
| 140 #if defined(ARCH_CPU_64_BITS) | 144 #if defined(BASE_HAS_ATOMIC64) |
| 145 typedef int64_t Atomic64; | |
| 141 | 146 |
| 142 typedef volatile std::atomic<Atomic64>* AtomicLocation64; | 147 typedef volatile std::atomic<Atomic64>* AtomicLocation64; |
| 143 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64), | 148 static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64), |
| 144 "incompatible 64-bit atomic layout"); | 149 "incompatible 64-bit atomic layout"); |
| 145 | 150 |
| 146 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 151 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 147 Atomic64 old_value, | 152 Atomic64 old_value, |
| 148 Atomic64 new_value) { | 153 Atomic64 new_value) { |
| 149 ((AtomicLocation64)ptr) | 154 ((AtomicLocation64)ptr) |
| 150 ->compare_exchange_strong(old_value, | 155 ->compare_exchange_strong(old_value, |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 212 } | 217 } |
| 213 | 218 |
| 214 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 219 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 215 return ((AtomicLocation64)ptr)->load(std::memory_order_acquire); | 220 return ((AtomicLocation64)ptr)->load(std::memory_order_acquire); |
| 216 } | 221 } |
| 217 | 222 |
| 218 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 223 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 219 MemoryBarrier(); | 224 MemoryBarrier(); |
| 220 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed); | 225 return ((AtomicLocation64)ptr)->load(std::memory_order_relaxed); |
| 221 } | 226 } |
| 222 | 227 #endif // defined(BASE_HAS_ATOMIC64) |
| 223 #endif // defined(ARCH_CPU_64_BITS) | |
| 224 } // namespace subtle | 228 } // namespace subtle |
| 225 } // namespace base | 229 } // namespace base |
| 226 | 230 |
| 227 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ | 231 #endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| OLD | NEW |