Chromium Code Reviews| Index: src/base/atomicops_internals_portable.h |
| diff --git a/src/base/atomicops_internals_portable.h b/src/base/atomicops_internals_portable.h |
| new file mode 100644 |
| index 0000000000000000000000000000000000000000..d335d24262d571d6959f212418b70dc1dde4e1aa |
| --- /dev/null |
| +++ b/src/base/atomicops_internals_portable.h |
| @@ -0,0 +1,200 @@ |
| +// Copyright 2016 the V8 project authors. All rights reserved. |
| +// Use of this source code is governed by a BSD-style license that can be |
| +// found in the LICENSE file. |
| + |
| +// This file is an internal atomic implementation, use atomicops.h instead. |
| +// |
| +// This implementation uses C++11 atomics' member functions. The code base is |
| +// currently written assuming atomicity revolves around accesses instead of |
| +// C++11's memory locations. The burden is on the programmer to ensure that all |
| +// memory locations accessed atomically are never accessed non-atomically (tsan |
| +// should help with this). |
| +// |
| +// TODO(jfb) Modify the atomicops.h API and user code to declare atomic |
| +// locations as truly atomic. See the static_assert below. |
| +// |
| +// Of note in this implementation: |
| +// * All NoBarrier variants are implemented as relaxed. |
| +// * All Barrier variants are implemented as sequentially-consistent. |
| +// * Compare exchange's failure ordering is always the same as the success one |
| +// (except for release, which fails as relaxed): using a weaker ordering is |
| +// only valid under certain uses of compare exchange. |
| +// * Acquire store doesn't exist in the C11 memory model, it is instead |
| +// implemented as a relaxed store followed by a sequentially consistent |
| +// fence. |
| +// * Release load doesn't exist in the C11 memory model, it is instead |
| +// implemented as sequentially consistent fence followed by a relaxed load. |
|
Jarin
2016/10/20 11:03:39
We do not use acquire-store and release-load. How
Hannes Payer (out of office)
2016/10/20 17:48:01
Done.
|
| +// * Atomic increment is expected to return the post-incremented value, whereas |
| +// C11 fetch add returns the previous value. The implementation therefore |
| +// needs to increment twice (which the compiler should be able to detect and |
| +// optimize). |
| + |
| +#ifndef BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| +#define BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |
| + |
| +#include <atomic> |
| + |
| +#include "src/base/build_config.h" |
| + |
| +namespace v8 { |
| +namespace base { |
| + |
| +// This implementation is transitional and maintains the original API for |
| +// atomicops.h. This requires casting memory locations to the atomic types, and |
| +// assumes that the API and the C++11 implementation are layout-compatible, |
| +// which isn't true for all implementations or hardware platforms. The static |
| +// assertion should detect this issue, were it to fire then this header |
|
Michael Lippautz
2016/10/20 10:28:24
Last sentences of this paragraph do not apply for
Michael Lippautz
2016/10/20 12:30:15
+1 on removing the weirdos.
Hannes Payer (out of office)
2016/10/20 17:48:01
Done.
|
| +// shouldn't be used. |
| +// |
| + |
| +inline void MemoryBarrier() { |
| +#if defined(__GLIBCXX__) |
| + // Work around libstdc++ bug 51038 where atomic_thread_fence was declared but |
| + // not defined, leading to the linker complaining about undefined references. |
| + __atomic_thread_fence(std::memory_order_seq_cst); |
| +#else |
| + std::atomic_thread_fence(std::memory_order_seq_cst); |
| +#endif |
| +} |
| + |
| +inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| + Atomic32 old_value, |
| + Atomic32 new_value) { |
| + __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| + __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| + return old_value; |
| +} |
| + |
| +inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| + Atomic32 new_value) { |
| + return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| + Atomic32 increment) { |
| + return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| + Atomic32 increment) { |
| + return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); |
| +} |
| + |
| +inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| + Atomic32 old_value, Atomic32 new_value) { |
| + __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
| + return old_value; |
| +} |
| + |
| +inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| + Atomic32 old_value, Atomic32 new_value) { |
| + __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| + __ATOMIC_RELEASE, __ATOMIC_RELAXED); |
| + return old_value; |
| +} |
| + |
| +inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
| + __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| + __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| + __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| + MemoryBarrier(); |
| +} |
| + |
| +inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| + __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
|
Jarin
2016/10/20 11:03:39
This actually makes me a bit worried. Are we sure
Hannes Payer (out of office)
2016/10/20 17:48:01
As discussed offline, we will look at performance
|
| +} |
| + |
| +inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
| + return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| + return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| + return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
| +} |
| + |
| +inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| + MemoryBarrier(); |
| + return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| +} |
| + |
| +#if defined(V8_HOST_ARCH_64_BIT) |
| + |
| +inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| + Atomic64 old_value, |
| + Atomic64 new_value) { |
| + __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| + __ATOMIC_RELAXED, __ATOMIC_RELAXED); |
| + return old_value; |
| +} |
| + |
| +inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| + Atomic64 new_value) { |
| + return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| + Atomic64 increment) { |
| + return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| + Atomic64 increment) { |
| + return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_SEQ_CST); |
| +} |
| + |
| +inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| + Atomic64 old_value, Atomic64 new_value) { |
| + __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| + __ATOMIC_ACQUIRE, __ATOMIC_ACQUIRE); |
| + return old_value; |
| +} |
| + |
| +inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| + Atomic64 old_value, Atomic64 new_value) { |
| + __atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
| + __ATOMIC_RELEASE, __ATOMIC_RELEASE); |
| + return old_value; |
| +} |
| + |
| +inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| + __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| + __atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
| + MemoryBarrier(); |
| +} |
| + |
| +inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| + __atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
| +} |
| + |
| +inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| + return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| +} |
| + |
| +inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| + return __atomic_load_n(ptr, __ATOMIC_ACQUIRE); |
| +} |
| + |
| +inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| + MemoryBarrier(); |
| + return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
| +} |
| + |
| +#endif // defined(V8_HOST_ARCH_64_BIT) |
| +} // namespace base |
| +} // namespace v8 |
| + |
| +#endif // V8_BASE_ATOMICOPS_INTERNALS_PORTABLE_H_ |