OLD | NEW |
(Empty) | |
| 1 #ifndef __SH4A__ |
| 2 |
| 3 #include "sh_atomic.h" |
| 4 #include "atomic.h" |
| 5 #include "libc.h" |
| 6 |
| 7 static inline unsigned mask() |
| 8 { |
| 9 unsigned sr; |
| 10 __asm__ __volatile__ ( "\n" |
| 11 " stc sr,r0 \n" |
| 12 " mov r0,%0 \n" |
| 13 " or #0xf0,r0 \n" |
| 14 " ldc r0,sr \n" |
| 15 : "=&r"(sr) : : "memory", "r0" ); |
| 16 return sr; |
| 17 } |
| 18 |
| 19 static inline void unmask(unsigned sr) |
| 20 { |
| 21 __asm__ __volatile__ ( "ldc %0,sr" : : "r"(sr) : "memory" ); |
| 22 } |
| 23 |
| 24 /* gusa is a hack in the kernel which lets you create a sequence of instructions |
| 25 * which will be restarted if the process is preempted in the middle of the |
| 26 * sequence. It will do for implementing atomics on non-smp systems. ABI is: |
| 27 * r0 = address of first instruction after the atomic sequence |
| 28 * r1 = original stack pointer |
| 29 * r15 = -1 * length of atomic sequence in bytes |
| 30 */ |
| 31 #define GUSA_CLOBBERS "r0", "r1", "memory" |
| 32 #define GUSA_START(mem,old,nop) \ |
| 33 " .align 2\n" \ |
| 34 " mova 1f, r0\n" \ |
| 35 nop \ |
| 36 " mov r15, r1\n" \ |
| 37 " mov #(0f-1f), r15\n" \ |
| 38 "0: mov.l @" mem ", " old "\n" |
| 39 /* the target of mova must be 4 byte aligned, so we may need a nop */ |
| 40 #define GUSA_START_ODD(mem,old) GUSA_START(mem,old,"") |
| 41 #define GUSA_START_EVEN(mem,old) GUSA_START(mem,old,"\tnop\n") |
| 42 #define GUSA_END(mem,new) \ |
| 43 " mov.l " new ", @" mem "\n" \ |
| 44 "1: mov r1, r15\n" |
| 45 |
| 46 int __sh_cas(volatile int *p, int t, int s) |
| 47 { |
| 48 if (__sh_atomic_model == SH_A_LLSC) return __sh_cas_llsc(p, t, s); |
| 49 |
| 50 if (__sh_atomic_model == SH_A_IMASK) { |
| 51 unsigned sr = mask(); |
| 52 int old = *p; |
| 53 if (old==t) *p = s; |
| 54 unmask(sr); |
| 55 return old; |
| 56 } |
| 57 |
| 58 int old; |
| 59 __asm__ __volatile__( |
| 60 GUSA_START_EVEN("%1", "%0") |
| 61 " cmp/eq %0, %2\n" |
| 62 " bf 1f\n" |
| 63 GUSA_END("%1", "%3") |
| 64 : "=&r"(old) : "r"(p), "r"(t), "r"(s) : GUSA_CLOBBERS, "t"); |
| 65 return old; |
| 66 } |
| 67 |
| 68 int __sh_swap(volatile int *x, int v) |
| 69 { |
| 70 if (__sh_atomic_model == SH_A_LLSC) return __sh_swap_llsc(x, v); |
| 71 |
| 72 if (__sh_atomic_model == SH_A_IMASK) { |
| 73 unsigned sr = mask(); |
| 74 int old = *x; |
| 75 *x = v; |
| 76 unmask(sr); |
| 77 return old; |
| 78 } |
| 79 |
| 80 int old; |
| 81 __asm__ __volatile__( |
| 82 GUSA_START_EVEN("%1", "%0") |
| 83 GUSA_END("%1", "%2") |
| 84 : "=&r"(old) : "r"(x), "r"(v) : GUSA_CLOBBERS); |
| 85 return old; |
| 86 } |
| 87 |
| 88 int __sh_fetch_add(volatile int *x, int v) |
| 89 { |
| 90 if (__sh_atomic_model == SH_A_LLSC) return __sh_fetch_add_llsc(x, v); |
| 91 |
| 92 if (__sh_atomic_model == SH_A_IMASK) { |
| 93 unsigned sr = mask(); |
| 94 int old = *x; |
| 95 *x = old + v; |
| 96 unmask(sr); |
| 97 return old; |
| 98 } |
| 99 |
| 100 int old, dummy; |
| 101 __asm__ __volatile__( |
| 102 GUSA_START_EVEN("%2", "%0") |
| 103 " mov %0, %1\n" |
| 104 " add %3, %1\n" |
| 105 GUSA_END("%2", "%1") |
| 106 : "=&r"(old), "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS); |
| 107 return old; |
| 108 } |
| 109 |
| 110 void __sh_store(volatile int *p, int x) |
| 111 { |
| 112 if (__sh_atomic_model == SH_A_LLSC) return __sh_store_llsc(p, x); |
| 113 __asm__ __volatile__( |
| 114 " mov.l %1, @%0\n" |
| 115 : : "r"(p), "r"(x) : "memory"); |
| 116 } |
| 117 |
| 118 void __sh_and(volatile int *x, int v) |
| 119 { |
| 120 if (__sh_atomic_model == SH_A_LLSC) return __sh_and_llsc(x, v); |
| 121 |
| 122 if (__sh_atomic_model == SH_A_IMASK) { |
| 123 unsigned sr = mask(); |
| 124 int old = *x; |
| 125 *x = old & v; |
| 126 unmask(sr); |
| 127 return; |
| 128 } |
| 129 |
| 130 int dummy; |
| 131 __asm__ __volatile__( |
| 132 GUSA_START_ODD("%1", "%0") |
| 133 " and %2, %0\n" |
| 134 GUSA_END("%1", "%0") |
| 135 : "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS); |
| 136 } |
| 137 |
| 138 void __sh_or(volatile int *x, int v) |
| 139 { |
| 140 if (__sh_atomic_model == SH_A_LLSC) return __sh_or_llsc(x, v); |
| 141 |
| 142 if (__sh_atomic_model == SH_A_IMASK) { |
| 143 unsigned sr = mask(); |
| 144 int old = *x; |
| 145 *x = old | v; |
| 146 unmask(sr); |
| 147 return; |
| 148 } |
| 149 |
| 150 int dummy; |
| 151 __asm__ __volatile__( |
| 152 GUSA_START_ODD("%1", "%0") |
| 153 " or %2, %0\n" |
| 154 GUSA_END("%1", "%0") |
| 155 : "=&r"(dummy) : "r"(x), "r"(v) : GUSA_CLOBBERS); |
| 156 } |
| 157 |
| 158 #endif |
OLD | NEW |