Index: src/atomicops_internals_mips_gcc.h |
diff --git a/src/atomicops_internals_mips_gcc.h b/src/atomicops_internals_mips_gcc.h |
index 5113de2891088796aeb4ed77ea938e3e7137231c..9498fd76e1478c83ebfa77f1f390983e72fa41e5 100644 |
--- a/src/atomicops_internals_mips_gcc.h |
+++ b/src/atomicops_internals_mips_gcc.h |
@@ -30,7 +30,7 @@ |
#ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
#define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
-#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("sync" : : : "memory") |
+#define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
namespace v8 { |
namespace internal { |
@@ -48,16 +48,19 @@ namespace internal { |
inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
- Atomic32 prev; |
- __asm__ __volatile__("1:\n" |
- "ll %0, %1\n" // prev = *ptr |
+ Atomic32 prev, tmp; |
+ __asm__ __volatile__(".set push\n" |
+ ".set noreorder\n" |
+ "1:\n" |
+ "ll %0, %5\n" // prev = *ptr |
"bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
- "nop\n" // delay slot nop |
- "sc %2, %1\n" // *ptr = new_value (with atomic check) |
+ "move %2, %4\n" // tmp = new_value |
+ "sc %2, %1\n" // *ptr = tmp (with atomic check) |
"beqz %2, 1b\n" // start again on atomic error |
"nop\n" // delay slot nop |
"2:\n" |
- : "=&r" (prev), "=m" (*ptr), "+&r" (new_value) |
+ ".set pop\n" |
+ : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
: "Ir" (old_value), "r" (new_value), "m" (*ptr) |
: "memory"); |
return prev; |
@@ -68,12 +71,15 @@ inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
Atomic32 new_value) { |
Atomic32 temp, old; |
- __asm__ __volatile__("1:\n" |
+ __asm__ __volatile__(".set push\n" |
+ ".set noreorder\n" |
+ "1:\n" |
"ll %1, %2\n" // old = *ptr |
"move %0, %3\n" // temp = new_value |
"sc %0, %2\n" // *ptr = temp (with atomic check) |
"beqz %0, 1b\n" // start again on atomic error |
"nop\n" // delay slot nop |
+ ".set pop\n" |
: "=&r" (temp), "=&r" (old), "=m" (*ptr) |
: "r" (new_value), "m" (*ptr) |
: "memory"); |
@@ -87,13 +93,15 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
Atomic32 increment) { |
Atomic32 temp, temp2; |
- __asm__ __volatile__("1:\n" |
+ __asm__ __volatile__(".set push\n" |
+ ".set noreorder\n" |
+ "1:\n" |
"ll %0, %2\n" // temp = *ptr |
- "addu %0, %3\n" // temp = temp + increment |
- "move %1, %0\n" // temp2 = temp |
- "sc %0, %2\n" // *ptr = temp (with atomic check) |
- "beqz %0, 1b\n" // start again on atomic error |
- "nop\n" // delay slot nop |
+ "addu %1, %0, %3\n" // temp2 = temp + increment |
+ "sc %1, %2\n" // *ptr = temp2 (with atomic check) |
+ "beqz %1, 1b\n" // start again on atomic error |
+ "addu %1, %0, %3\n" // temp2 = temp + increment |
+ ".set pop\n" |
: "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
: "Ir" (increment), "m" (*ptr) |
: "memory"); |
@@ -103,6 +111,7 @@ inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
Atomic32 increment) { |
+ ATOMICOPS_COMPILER_BARRIER(); |
Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
ATOMICOPS_COMPILER_BARRIER(); |
return res; |
@@ -117,16 +126,19 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
- Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
ATOMICOPS_COMPILER_BARRIER(); |
- return x; |
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ ATOMICOPS_COMPILER_BARRIER(); |
+ return res; |
} |
inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
Atomic32 old_value, |
Atomic32 new_value) { |
ATOMICOPS_COMPILER_BARRIER(); |
- return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
+ ATOMICOPS_COMPILER_BARRIER(); |
+ return res; |
} |
inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
@@ -134,7 +146,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
} |
inline void MemoryBarrier() { |
- ATOMICOPS_COMPILER_BARRIER(); |
+ __asm__ __volatile__("sync" : : : "memory"); |
} |
inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |