Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 /* | |
| 2 * Copyright (c) 2012 The Native Client Authors. All rights reserved. | |
| 3 * Use of this source code is governed by a BSD-style license that can be | |
| 4 * found in the LICENSE file. | |
| 5 */ | |
| 6 | |
| 7 #ifndef NATIVE_CLIENT_SRC_INCLUDE_LINUX_MIPS_ATOMIC_OPS_LINUX_MIPS_H_ | |
| 8 #define NATIVE_CLIENT_SRC_INCLUDE_LINUX_MIPS_ATOMIC_OPS_LINUX_MIPS_H_ 1 | |
| 9 | |
| 10 // Used only by trusted code. Untrusted code uses gcc intrinsics. | |
|
Mark Seaborn
2012/09/08 02:43:14
Please use C /* ... */ comments in C (even if copy
petarj
2012/09/11 16:58:13
Done.
| |
| 11 | |
| 12 #include "native_client/src/include/portability.h" | |
| 13 #include <stdint.h> | |
| 14 | |
| 15 typedef int32_t Atomic32; | |
| 16 | |
| 17 static INLINE Atomic32 CompareAndSwap(volatile Atomic32* ptr, | |
| 18 Atomic32 old_value, | |
| 19 Atomic32 new_value) { | |
| 20 Atomic32 ret; | |
| 21 | |
| 22 __asm__ __volatile__("1:\n" | |
|
Mark Seaborn
2012/09/08 02:43:14
You should really just use the GCC intrinsics for
petarj
2012/09/11 16:58:13
I suggest that we do a separate cleanup change for
Mark Seaborn
2012/09/13 19:44:36
Would you mind doing the separate cleanup change a
petarj
2012/09/14 23:14:48
Is this one what you had in mind:
http://coderevi
| |
| 23 "ll %0, %1\n" // ret = *ptr | |
| 24 "bne %0, %3, 2f\n" // if (ret != old_value) goto 2 | |
| 25 "nop\n" // delay slot nop | |
| 26 "sc %2, %1\n" // *ptr = new_value (with atomic check) | |
| 27 "beqz %2, 1b\n" // start again on atomic error | |
| 28 "nop\n" // delay slot nop | |
| 29 "2:\n" | |
| 30 : "=&r" (ret), "=m" (*ptr), "+&r" (new_value) | |
| 31 : "Ir" (old_value), "r" (new_value), "m" (*ptr) | |
| 32 : "memory"); | |
| 33 | |
| 34 | |
| 35 return ret; | |
| 36 } | |
| 37 | |
| 38 static INLINE Atomic32 AtomicExchange(volatile Atomic32* ptr, | |
| 39 Atomic32 new_value) { | |
| 40 Atomic32 tmp, old; | |
| 41 | |
| 42 __asm__ __volatile__("1:\n" | |
| 43 "ll %1, %2\n" // old = *ptr | |
| 44 "move %0, %3\n" // tmp = new_value | |
| 45 "sc %0, %2\n" // *ptr = tmp (with atomic check) | |
| 46 "beqz %0, 1b\n" // start again on atomic error | |
| 47 "nop\n" // delay slot nop | |
| 48 : "=&r" (tmp), "=&r" (old), "=m" (*ptr) | |
| 49 : "r" (new_value), "m" (*ptr) | |
| 50 : "memory"); | |
| 51 | |
| 52 return old; | |
| 53 } | |
| 54 | |
| 55 static INLINE Atomic32 AtomicIncrement(volatile Atomic32* ptr, | |
| 56 Atomic32 increment) { | |
| 57 Atomic32 tmp, res; | |
| 58 | |
| 59 __asm__ __volatile__("1:\n" | |
| 60 "ll %0, %2\n" // tmp = *ptr | |
| 61 "addu %0, %3\n" // tmp = tmp + increment | |
| 62 "move %1, %0\n" // res = tmp | |
| 63 "sc %0, %2\n" // *ptr = tmp (with atomic check) | |
| 64 "beqz %0, 1b\n" // start again on atomic error | |
| 65 "nop\n" // delay slot nop | |
| 66 : "=&r" (tmp), "=&r" (res), "=m" (*ptr) | |
| 67 : "Ir" (increment), "m" (*ptr) | |
| 68 : "memory"); | |
| 69 // res now holds the final value. | |
| 70 | |
| 71 return res; | |
| 72 } | |
| 73 | |
| 74 #endif /* NATIVE_CLIENT_SRC_INCLUDE_LINUX_MIPS_ATOMIC_OPS_LINUX_MIPS_H_ */ | |
| OLD | NEW |