Index: src/include/linux/mips/atomic_ops.h |
diff --git a/src/include/linux/mips/atomic_ops.h b/src/include/linux/mips/atomic_ops.h |
new file mode 100644 |
index 0000000000000000000000000000000000000000..afb1b3ce332eb9d8fdaa2338f1fe60a2fa86ad96 |
--- /dev/null |
+++ b/src/include/linux/mips/atomic_ops.h |
@@ -0,0 +1,74 @@ |
+/* |
+ * Copyright (c) 2012 The Native Client Authors. All rights reserved. |
+ * Use of this source code is governed by a BSD-style license that can be |
+ * found in the LICENSE file. |
+ */ |
+ |
+#ifndef NATIVE_CLIENT_SRC_INCLUDE_LINUX_MIPS_ATOMIC_OPS_LINUX_MIPS_H_ |
+#define NATIVE_CLIENT_SRC_INCLUDE_LINUX_MIPS_ATOMIC_OPS_LINUX_MIPS_H_ 1 |
+ |
+// Used only by trusted code. Untrusted code uses gcc intrinsics. |
Mark Seaborn
2012/09/08 02:43:14
Please use C /* ... */ comments in C (even if copy
petarj
2012/09/11 16:58:13
Done.
|
+ |
+#include "native_client/src/include/portability.h" |
+#include <stdint.h> |
+ |
+typedef int32_t Atomic32; |
+ |
+static INLINE Atomic32 CompareAndSwap(volatile Atomic32* ptr, |
+ Atomic32 old_value, |
+ Atomic32 new_value) { |
+ Atomic32 ret; |
+ |
+ __asm__ __volatile__("1:\n" |
Mark Seaborn
2012/09/08 02:43:14
You should really just use the GCC intrinsics for
petarj
2012/09/11 16:58:13
I suggest that we do a separate cleanup change for
Mark Seaborn
2012/09/13 19:44:36
Would you mind doing the separate cleanup change a
petarj
2012/09/14 23:14:48
Is this one what you had in mind:
http://coderevi
|
+ "ll %0, %1\n" // ret = *ptr |
+ "bne %0, %3, 2f\n" // if (ret != old_value) goto 2 |
+ "nop\n" // delay slot nop |
+ "sc %2, %1\n" // *ptr = new_value (with atomic check) |
+ "beqz %2, 1b\n" // start again on atomic error |
+ "nop\n" // delay slot nop |
+ "2:\n" |
+ : "=&r" (ret), "=m" (*ptr), "+&r" (new_value) |
+ : "Ir" (old_value), "r" (new_value), "m" (*ptr) |
+ : "memory"); |
+ |
+ |
+ return ret; |
+} |
+ |
+static INLINE Atomic32 AtomicExchange(volatile Atomic32* ptr, |
+ Atomic32 new_value) { |
+ Atomic32 tmp, old; |
+ |
+ __asm__ __volatile__("1:\n" |
+ "ll %1, %2\n" // old = *ptr |
+ "move %0, %3\n" // tmp = new_value |
+ "sc %0, %2\n" // *ptr = tmp (with atomic check) |
+ "beqz %0, 1b\n" // start again on atomic error |
+ "nop\n" // delay slot nop |
+ : "=&r" (tmp), "=&r" (old), "=m" (*ptr) |
+ : "r" (new_value), "m" (*ptr) |
+ : "memory"); |
+ |
+ return old; |
+} |
+ |
+static INLINE Atomic32 AtomicIncrement(volatile Atomic32* ptr, |
+ Atomic32 increment) { |
+ Atomic32 tmp, res; |
+ |
+ __asm__ __volatile__("1:\n" |
+ "ll %0, %2\n" // tmp = *ptr |
+ "addu %0, %3\n" // tmp = tmp + increment |
+ "move %1, %0\n" // res = tmp |
+ "sc %0, %2\n" // *ptr = tmp (with atomic check) |
+ "beqz %0, 1b\n" // start again on atomic error |
+ "nop\n" // delay slot nop |
+ : "=&r" (tmp), "=&r" (res), "=m" (*ptr) |
+ : "Ir" (increment), "m" (*ptr) |
+ : "memory"); |
+ // res now holds the final value. |
+ |
+ return res; |
+} |
+ |
+#endif /* NATIVE_CLIENT_SRC_INCLUDE_LINUX_MIPS_ATOMIC_OPS_LINUX_MIPS_H_ */ |