Index: src/base/atomicops_internals_portable.h |
diff --git a/src/base/atomicops_internals_portable.h b/src/base/atomicops_internals_portable.h |
index 72c1d9a328c8605c815e7dda48529dac655d40f2..8f86785a68cc14347dfdf4f59a06bf83a476c464 100644 |
--- a/src/base/atomicops_internals_portable.h |
+++ b/src/base/atomicops_internals_portable.h |
@@ -49,21 +49,20 @@ inline void MemoryBarrier() { |
#endif |
} |
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
- Atomic32 old_value, |
- Atomic32 new_value) { |
+inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, |
+ Atomic32 old_value, Atomic32 new_value) { |
__atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
__ATOMIC_RELAXED, __ATOMIC_RELAXED); |
return old_value; |
} |
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
- Atomic32 new_value) { |
+inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, |
+ Atomic32 new_value) { |
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
} |
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
- Atomic32 increment) { |
+inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, |
+ Atomic32 increment) { |
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); |
} |
@@ -86,11 +85,11 @@ inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
return old_value; |
} |
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
+inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { |
__atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
} |
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
+inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) { |
__atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
} |
@@ -98,11 +97,11 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
__atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
} |
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
+inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { |
return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
} |
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
+inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { |
return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
} |
@@ -112,21 +111,20 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
#if defined(V8_HOST_ARCH_64_BIT) |
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
- Atomic64 old_value, |
- Atomic64 new_value) { |
+inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, |
+ Atomic64 old_value, Atomic64 new_value) { |
__atomic_compare_exchange_n(ptr, &old_value, new_value, false, |
__ATOMIC_RELAXED, __ATOMIC_RELAXED); |
return old_value; |
} |
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
- Atomic64 new_value) { |
+inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, |
+ Atomic64 new_value) { |
return __atomic_exchange_n(ptr, new_value, __ATOMIC_RELAXED); |
} |
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
- Atomic64 increment) { |
+inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, |
+ Atomic64 increment) { |
return increment + __atomic_fetch_add(ptr, increment, __ATOMIC_RELAXED); |
} |
@@ -149,7 +147,7 @@ inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
return old_value; |
} |
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
+inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { |
__atomic_store_n(ptr, value, __ATOMIC_RELAXED); |
} |
@@ -157,7 +155,7 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
__atomic_store_n(ptr, value, __ATOMIC_RELEASE); |
} |
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
+inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { |
return __atomic_load_n(ptr, __ATOMIC_RELAXED); |
} |