Chromium Code Reviews| Index: base/atomicops_internals_portable.h |
| diff --git a/base/atomicops_internals_portable.h b/base/atomicops_internals_portable.h |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..993b92f9e91f06c1eb58acaa79a612c05912701a |
| --- /dev/null |
| +++ b/base/atomicops_internals_portable.h |
| @@ -0,0 +1,239 @@ |
| +// Copyright (c) 2014 The Chromium Authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +// This file is an internal atomic implementation, use atomicops.h instead. |
| +// |
| +// This implementation uses C11 atomics' free functions through C++11's <atomic> |
| +// header instead of C++11 atomics' types because the code base is currently |
| +// written assuming atomicity revolves around accesses instead of C++11's memory |
|
Dmitry Vyukov
2014/10/08 08:05:44
I don't understand the "because" part.
You still
|
| +// locations. The burden is on the programmer to ensure that all memory |
| +// locations accessed atomically are never accessed non-atomically (tsan should |
| +// help with this). |
| +// |
| +// Of note in this implementation: |
| +// * All NoBarrier variants are implemented as relaxed. |
| +// * All Barrier variants are implemented as sequentially-consistent. |
| +// * Compare exchange's failure ordering is always the same as the success one |
| +// (except for release, which fails as relaxed): using a weaker ordering is |
| +// only valid under certain uses of compare exchange. |
| +// * Acquire store doesn't exist in the C11 memory model, it is instead |
| +// implemented as a relaxed store followed by a sequentially consistent |
| +// fence. |
| +// * Release load doesn't exist in the C11 memory model, it is instead |
| +// implemented as sequentially consistent fence followed by a relaxed load. |
| +// * Atomic increment is expected to return the post-incremented value, whereas |
| +// C11 fetch add returns the previous value. The implementation therefore |
| +// needs to increment twice (which the compiler should be able to detect and |
| +// optimize). |
| + |
| +#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| +#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| + |
| +#include <atomic> |
| + |
| +namespace base { |
| +namespace subtle { |
| + |
| +// This implementation is transitional and maintains the original API for |
| +// atomicops.h. This requires casting memory locations to the atomic types, and |
| +// assumes that the API and the C++11 implementation are layout-compatible, |
| +// which isn't true for all implementations or hardware platforms. The static |
| +// assertion should detect this issue, were it to fire then this header |
| +// shouldn't be used. |
| +// |
| +// TODO(jfb) If this header manages to stay committed then the API should be |
| +// modified, and all call sites updated. |
| +typedef volatile std::atomic<Atomic32>* AtomicLocation32; |
| +static_assert(sizeof(*(AtomicLocation32) nullptr) == sizeof(Atomic32), |
| + "incompatible 32-bit atomic layout"); |
| + |
| +inline void MemoryBarrier() { |
| + std::atomic_thread_fence(std::memory_order_seq_cst); |
| +} |
| + |
| +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| + Atomic32 old_value, |
| + Atomic32 new_value) { |
| + auto expected = old_value; |
| + std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, |
| + &expected, |
|
Dmitry Vyukov
2014/10/08 08:05:44
remove expected, just do &old_value
|
| + new_value, |
| + std::memory_order_relaxed, |
| + std::memory_order_relaxed); |
| + return expected; |
| +} |
| + |
| +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| + Atomic32 new_value) { |
| + return std::atomic_exchange_explicit( |
| + (AtomicLocation32)ptr, new_value, std::memory_order_relaxed); |
|
Dmitry Vyukov
2014/10/08 08:05:44
I would use member functions, because they are sho
|
| +} |
| + |
| +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| + Atomic32 increment) { |
| + return increment + std::atomic_fetch_add_explicit((AtomicLocation32)ptr, |
| + increment, |
| + std::memory_order_relaxed); |
| +} |
| + |
| +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| + Atomic32 increment) { |
| + return increment + std::atomic_fetch_add((AtomicLocation32)ptr, increment); |
| +} |
| + |
| +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| + Atomic32 old_value, |
| + Atomic32 new_value) { |
| + auto expected = old_value; |
| + std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, |
| + &expected, |
|
Dmitry Vyukov
2014/10/08 08:05:44
remove expected, use &old_value
|
| + new_value, |
| + std::memory_order_acquire, |
| + std::memory_order_acquire); |
| + return expected; |
| +} |
| + |
| +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| + Atomic32 old_value, |
| + Atomic32 new_value) { |
| + auto expected = old_value; |
| + std::atomic_compare_exchange_strong_explicit((AtomicLocation32)ptr, |
| + &expected, |
|
Dmitry Vyukov
2014/10/08 08:05:44
remove expected, use &old_value
here and below
|
| + new_value, |
| + std::memory_order_release, |
| + std::memory_order_relaxed); |
| + return expected; |
| +} |
| + |
| +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| + std::atomic_store_explicit( |
| + (AtomicLocation32)ptr, value, std::memory_order_relaxed); |
| +} |
| + |
| +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| + std::atomic_store_explicit( |
| + (AtomicLocation32)ptr, value, std::memory_order_relaxed); |
| + std::atomic_thread_fence(std::memory_order_seq_cst); |
| +} |
| + |
| +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| + std::atomic_store_explicit( |
| + (AtomicLocation32)ptr, value, std::memory_order_release); |
| +} |
| + |
| +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| + return std::atomic_load_explicit((AtomicLocation32)ptr, |
| + std::memory_order_relaxed); |
| +} |
| + |
| +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| + return std::atomic_load_explicit((AtomicLocation32)ptr, |
| + std::memory_order_acquire); |
| +} |
| + |
| +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| + std::atomic_thread_fence(std::memory_order_seq_cst); |
| + return std::atomic_load_explicit((AtomicLocation32)ptr, |
| + std::memory_order_relaxed); |
| +} |
| + |
| +#ifdef ARCH_CPU_64_BITS |
| +// 64-bit versions of the operations. |
| +// See the 32-bit versions for comments. |
|
Dmitry Vyukov
2014/10/08 08:05:44
there are no comments on 32-bit versions
|
| + |
| +typedef volatile std::atomic<Atomic64>* AtomicLocation64; |
| +static_assert(sizeof(*(AtomicLocation64) nullptr) == sizeof(Atomic64), |
| + "incompatible 64-bit atomic layout"); |
| + |
| +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| + Atomic64 old_value, |
| + Atomic64 new_value) { |
| + auto expected = old_value; |
| + std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, |
| + &expected, |
| + new_value, |
| + std::memory_order_relaxed, |
| + std::memory_order_relaxed); |
| + return expected; |
| +} |
| + |
| +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| + Atomic64 new_value) { |
| + return std::atomic_exchange_explicit( |
| + (AtomicLocation64)ptr, new_value, std::memory_order_relaxed); |
| +} |
| + |
| +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| + Atomic64 increment) { |
| + return increment + std::atomic_fetch_add_explicit((AtomicLocation64)ptr, |
| + increment, |
| + std::memory_order_relaxed); |
| +} |
| + |
| +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| + Atomic64 increment) { |
| + return increment + std::atomic_fetch_add((AtomicLocation64)ptr, increment); |
| +} |
| + |
| +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| + Atomic64 old_value, |
| + Atomic64 new_value) { |
| + auto expected = old_value; |
| + std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, |
| + &expected, |
| + new_value, |
| + std::memory_order_acquire, |
| + std::memory_order_acquire); |
| + return expected; |
| +} |
| + |
| +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| + Atomic64 old_value, |
| + Atomic64 new_value) { |
| + auto expected = old_value; |
| + std::atomic_compare_exchange_strong_explicit((AtomicLocation64)ptr, |
| + &expected, |
| + new_value, |
| + std::memory_order_release, |
| + std::memory_order_relaxed); |
| + return expected; |
| +} |
| + |
| +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| + std::atomic_store_explicit( |
| + (AtomicLocation64)ptr, value, std::memory_order_relaxed); |
| +} |
| + |
| +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| + std::atomic_store_explicit( |
| + (AtomicLocation64)ptr, value, std::memory_order_relaxed); |
| + std::atomic_thread_fence(std::memory_order_seq_cst); |
| +} |
| + |
| +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| + std::atomic_store_explicit( |
| + (AtomicLocation64)ptr, value, std::memory_order_release); |
| +} |
| + |
| +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| + return std::atomic_load_explicit((AtomicLocation64)ptr, |
| + std::memory_order_relaxed); |
| +} |
| + |
| +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| + return std::atomic_load_explicit((AtomicLocation64)ptr, |
| + std::memory_order_acquire); |
| +} |
| + |
| +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| + std::atomic_thread_fence(std::memory_order_seq_cst); |
| + return std::atomic_load_explicit((AtomicLocation64)ptr, |
| + std::memory_order_relaxed); |
| +} |
| + |
| +#endif // ARCH_CPU_64_BITS |
| +} |
| +} // namespace base::subtle |
| + |
| +#endif // BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |