Index: src/base/atomicops_internals_x86_msvc.h |
diff --git a/src/base/atomicops_internals_x86_msvc.h b/src/base/atomicops_internals_x86_msvc.h |
index 0d2068e9f0aa11deb6affc34528bc631c8c19a61..cf3a3c0ee1323fc60f98d17622c7228a6dc167fe 100644 |
--- a/src/base/atomicops_internals_x86_msvc.h |
+++ b/src/base/atomicops_internals_x86_msvc.h |
@@ -22,17 +22,16 @@ |
namespace v8 { |
namespace base { |
-inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
- Atomic32 old_value, |
- Atomic32 new_value) { |
+inline Atomic32 Relaxed_CompareAndSwap(volatile Atomic32* ptr, |
+ Atomic32 old_value, Atomic32 new_value) { |
LONG result = InterlockedCompareExchange( |
reinterpret_cast<volatile LONG*>(ptr), static_cast<LONG>(new_value), |
static_cast<LONG>(old_value)); |
return static_cast<Atomic32>(result); |
} |
-inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
- Atomic32 new_value) { |
+inline Atomic32 Relaxed_AtomicExchange(volatile Atomic32* ptr, |
+ Atomic32 new_value) { |
LONG result = InterlockedExchange(reinterpret_cast<volatile LONG*>(ptr), |
static_cast<LONG>(new_value)); |
return static_cast<Atomic32>(result); |
@@ -45,8 +44,8 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
increment; |
} |
-inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
- Atomic32 increment) { |
+inline Atomic32 Relaxed_AtomicIncrement(volatile Atomic32* ptr, |
+ Atomic32 increment) { |
return Barrier_AtomicIncrement(ptr, increment); |
} |
@@ -63,20 +62,20 @@ inline void MemoryBarrier() { |
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ return Relaxed_CompareAndSwap(ptr, old_value, new_value); |
} |
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ return Relaxed_CompareAndSwap(ptr, old_value, new_value); |
} |
-inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
+inline void Relaxed_Store(volatile Atomic8* ptr, Atomic8 value) { |
*ptr = value; |
} |
-inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
+inline void Relaxed_Store(volatile Atomic32* ptr, Atomic32 value) { |
*ptr = value; |
} |
@@ -85,13 +84,9 @@ inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
// See comments in Atomic64 version of Release_Store() below. |
} |
-inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
- return *ptr; |
-} |
+inline Atomic8 Relaxed_Load(volatile const Atomic8* ptr) { return *ptr; } |
-inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
- return *ptr; |
-} |
+inline Atomic32 Relaxed_Load(volatile const Atomic32* ptr) { return *ptr; } |
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
Atomic32 value = *ptr; |
@@ -104,17 +99,16 @@ inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
static_assert(sizeof(Atomic64) == sizeof(PVOID), "atomic word is atomic"); |
-inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
- Atomic64 old_value, |
- Atomic64 new_value) { |
+inline Atomic64 Relaxed_CompareAndSwap(volatile Atomic64* ptr, |
+ Atomic64 old_value, Atomic64 new_value) { |
PVOID result = InterlockedCompareExchangePointer( |
reinterpret_cast<volatile PVOID*>(ptr), |
reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); |
return reinterpret_cast<Atomic64>(result); |
} |
-inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
- Atomic64 new_value) { |
+inline Atomic64 Relaxed_AtomicExchange(volatile Atomic64* ptr, |
+ Atomic64 new_value) { |
PVOID result = InterlockedExchangePointer( |
reinterpret_cast<volatile PVOID*>(ptr), |
reinterpret_cast<PVOID>(new_value)); |
@@ -128,12 +122,12 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
static_cast<LONGLONG>(increment)) + increment; |
} |
-inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
- Atomic64 increment) { |
+inline Atomic64 Relaxed_AtomicIncrement(volatile Atomic64* ptr, |
+ Atomic64 increment) { |
return Barrier_AtomicIncrement(ptr, increment); |
} |
-inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
+inline void Relaxed_Store(volatile Atomic64* ptr, Atomic64 value) { |
*ptr = value; |
} |
@@ -148,9 +142,7 @@ inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
// http://developer.intel.com/design/pentium4/manuals/index_new.htm |
} |
-inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
- return *ptr; |
-} |
+inline Atomic64 Relaxed_Load(volatile const Atomic64* ptr) { return *ptr; } |
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
Atomic64 value = *ptr; |
@@ -160,13 +152,13 @@ inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
Atomic64 old_value, |
Atomic64 new_value) { |
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ return Relaxed_CompareAndSwap(ptr, old_value, new_value); |
} |
inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
Atomic64 old_value, |
Atomic64 new_value) { |
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ return Relaxed_CompareAndSwap(ptr, old_value, new_value); |
} |