Index: src/base/atomicops_internals_mac.h |
diff --git a/src/base/atomicops_internals_mac.h b/src/base/atomicops_internals_mac.h |
index a046872e4d0fbff487fbf89770b4fe92d2d2d031..7ce71669c95b7830cd59a5c6eda158c458f48133 100644 |
--- a/src/base/atomicops_internals_mac.h |
+++ b/src/base/atomicops_internals_mac.h |
@@ -12,6 +12,10 @@ |
namespace v8 { |
namespace base { |
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
+ |
+inline void MemoryBarrier() { OSMemoryBarrier(); } |
+ |
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
@@ -46,10 +50,6 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
} |
-inline void MemoryBarrier() { |
- OSMemoryBarrier(); |
-} |
- |
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
@@ -80,11 +80,11 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
*ptr = value; |
- MemoryBarrier(); |
+ ATOMICOPS_COMPILER_BARRIER(); |
Jarin
2014/11/05 11:54:53
I think it is better not to touch anything else th
Hannes Payer (out of office)
2014/11/05 12:16:23
Done. However, I would also change the release sto
|
} |
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
- MemoryBarrier(); |
+ ATOMICOPS_COMPILER_BARRIER(); |
*ptr = value; |
} |
@@ -98,12 +98,12 @@ inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
Atomic32 value = *ptr; |
- MemoryBarrier(); |
+ ATOMICOPS_COMPILER_BARRIER(); |
Jarin
2014/11/05 11:54:53
How about just saying here
// On x86 processors,
Hannes Payer (out of office)
2014/11/05 12:16:23
Done.
|
return value; |
} |
inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
- MemoryBarrier(); |
+ ATOMICOPS_COMPILER_BARRIER(); |
return *ptr; |
} |
@@ -174,11 +174,11 @@ inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
*ptr = value; |
- MemoryBarrier(); |
+ ATOMICOPS_COMPILER_BARRIER(); |
} |
inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
- MemoryBarrier(); |
+ ATOMICOPS_COMPILER_BARRIER(); |
*ptr = value; |
} |
@@ -188,17 +188,18 @@ inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
Atomic64 value = *ptr; |
- MemoryBarrier(); |
+ ATOMICOPS_COMPILER_BARRIER(); |
Jarin
2014/11/05 11:54:53
As above, maybe we want
// On x86 processors, loa
Hannes Payer (out of office)
2014/11/05 12:16:23
Done.
|
return value; |
} |
inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
- MemoryBarrier(); |
+ ATOMICOPS_COMPILER_BARRIER(); |
return *ptr; |
} |
#endif // defined(__LP64__) |
+#undef ATOMICOPS_COMPILER_BARRIER |
} } // namespace v8::base |
#endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ |