OLD | NEW |
(Empty) | |
| 1 #ifndef _INTERNAL_ATOMIC_H |
| 2 #define _INTERNAL_ATOMIC_H |
| 3 |
| 4 #include <stdint.h> |
| 5 |
| 6 static inline int a_ctz_l(unsigned long x) |
| 7 { |
| 8 static const char debruijn32[32] = { |
| 9 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, |
| 10 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 |
| 11 }; |
| 12 return debruijn32[(x&-x)*0x076be629 >> 27]; |
| 13 } |
| 14 |
| 15 static inline int a_ctz_64(uint64_t x) |
| 16 { |
| 17 uint32_t y = x; |
| 18 if (!y) { |
| 19 y = x>>32; |
| 20 return 32 + a_ctz_l(y); |
| 21 } |
| 22 return a_ctz_l(y); |
| 23 } |
| 24 |
| 25 static inline int a_cas(volatile int *p, int t, int s) |
| 26 { |
| 27 int dummy; |
| 28 __asm__ __volatile__( |
| 29 ".set push\n" |
| 30 ".set mips2\n" |
| 31 ".set noreorder\n" |
| 32 " sync\n" |
| 33 "1: ll %0, %2\n" |
| 34 " bne %0, %3, 1f\n" |
| 35 " addu %1, %4, $0\n" |
| 36 " sc %1, %2\n" |
| 37 " beq %1, $0, 1b\n" |
| 38 " nop\n" |
| 39 " sync\n" |
| 40 "1: \n" |
| 41 ".set pop\n" |
| 42 : "=&r"(t), "=&r"(dummy), "+m"(*p) : "r"(t), "r"(s) : "memory" )
; |
| 43 return t; |
| 44 } |
| 45 |
| 46 static inline void *a_cas_p(volatile void *p, void *t, void *s) |
| 47 { |
| 48 return (void *)a_cas(p, (int)t, (int)s); |
| 49 } |
| 50 |
| 51 static inline int a_swap(volatile int *x, int v) |
| 52 { |
| 53 int old, dummy; |
| 54 __asm__ __volatile__( |
| 55 ".set push\n" |
| 56 ".set mips2\n" |
| 57 ".set noreorder\n" |
| 58 " sync\n" |
| 59 "1: ll %0, %2\n" |
| 60 " addu %1, %3, $0\n" |
| 61 " sc %1, %2\n" |
| 62 " beq %1, $0, 1b\n" |
| 63 " nop\n" |
| 64 " sync\n" |
| 65 ".set pop\n" |
| 66 : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" ); |
| 67 return old; |
| 68 } |
| 69 |
| 70 static inline int a_fetch_add(volatile int *x, int v) |
| 71 { |
| 72 int old, dummy; |
| 73 __asm__ __volatile__( |
| 74 ".set push\n" |
| 75 ".set mips2\n" |
| 76 ".set noreorder\n" |
| 77 " sync\n" |
| 78 "1: ll %0, %2\n" |
| 79 " addu %1, %0, %3\n" |
| 80 " sc %1, %2\n" |
| 81 " beq %1, $0, 1b\n" |
| 82 " nop\n" |
| 83 " sync\n" |
| 84 ".set pop\n" |
| 85 : "=&r"(old), "=&r"(dummy), "+m"(*x) : "r"(v) : "memory" ); |
| 86 return old; |
| 87 } |
| 88 |
| 89 static inline void a_inc(volatile int *x) |
| 90 { |
| 91 int dummy; |
| 92 __asm__ __volatile__( |
| 93 ".set push\n" |
| 94 ".set mips2\n" |
| 95 ".set noreorder\n" |
| 96 " sync\n" |
| 97 "1: ll %0, %1\n" |
| 98 " addu %0, %0, 1\n" |
| 99 " sc %0, %1\n" |
| 100 " beq %0, $0, 1b\n" |
| 101 " nop\n" |
| 102 " sync\n" |
| 103 ".set pop\n" |
| 104 : "=&r"(dummy), "+m"(*x) : : "memory" ); |
| 105 } |
| 106 |
| 107 static inline void a_dec(volatile int *x) |
| 108 { |
| 109 int dummy; |
| 110 __asm__ __volatile__( |
| 111 ".set push\n" |
| 112 ".set mips2\n" |
| 113 ".set noreorder\n" |
| 114 " sync\n" |
| 115 "1: ll %0, %1\n" |
| 116 " subu %0, %0, 1\n" |
| 117 " sc %0, %1\n" |
| 118 " beq %0, $0, 1b\n" |
| 119 " nop\n" |
| 120 " sync\n" |
| 121 ".set pop\n" |
| 122 : "=&r"(dummy), "+m"(*x) : : "memory" ); |
| 123 } |
| 124 |
| 125 static inline void a_store(volatile int *p, int x) |
| 126 { |
| 127 __asm__ __volatile__( |
| 128 ".set push\n" |
| 129 ".set mips2\n" |
| 130 ".set noreorder\n" |
| 131 " sync\n" |
| 132 " sw %1, %0\n" |
| 133 " sync\n" |
| 134 ".set pop\n" |
| 135 : "+m"(*p) : "r"(x) : "memory" ); |
| 136 } |
| 137 |
| 138 #define a_spin a_barrier |
| 139 |
| 140 static inline void a_barrier() |
| 141 { |
| 142 a_cas(&(int){0}, 0, 0); |
| 143 } |
| 144 |
| 145 static inline void a_crash() |
| 146 { |
| 147 *(volatile char *)0=0; |
| 148 } |
| 149 |
| 150 static inline void a_and(volatile int *p, int v) |
| 151 { |
| 152 int dummy; |
| 153 __asm__ __volatile__( |
| 154 ".set push\n" |
| 155 ".set mips2\n" |
| 156 ".set noreorder\n" |
| 157 " sync\n" |
| 158 "1: ll %0, %1\n" |
| 159 " and %0, %0, %2\n" |
| 160 " sc %0, %1\n" |
| 161 " beq %0, $0, 1b\n" |
| 162 " nop\n" |
| 163 " sync\n" |
| 164 ".set pop\n" |
| 165 : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" ); |
| 166 } |
| 167 |
| 168 static inline void a_or(volatile int *p, int v) |
| 169 { |
| 170 int dummy; |
| 171 __asm__ __volatile__( |
| 172 ".set push\n" |
| 173 ".set mips2\n" |
| 174 ".set noreorder\n" |
| 175 " sync\n" |
| 176 "1: ll %0, %1\n" |
| 177 " or %0, %0, %2\n" |
| 178 " sc %0, %1\n" |
| 179 " beq %0, $0, 1b\n" |
| 180 " nop\n" |
| 181 " sync\n" |
| 182 ".set pop\n" |
| 183 : "=&r"(dummy), "+m"(*p) : "r"(v) : "memory" ); |
| 184 } |
| 185 |
| 186 static inline void a_or_l(volatile void *p, long v) |
| 187 { |
| 188 a_or(p, v); |
| 189 } |
| 190 |
| 191 static inline void a_and_64(volatile uint64_t *p, uint64_t v) |
| 192 { |
| 193 union { uint64_t v; uint32_t r[2]; } u = { v }; |
| 194 a_and((int *)p, u.r[0]); |
| 195 a_and((int *)p+1, u.r[1]); |
| 196 } |
| 197 |
| 198 static inline void a_or_64(volatile uint64_t *p, uint64_t v) |
| 199 { |
| 200 union { uint64_t v; uint32_t r[2]; } u = { v }; |
| 201 a_or((int *)p, u.r[0]); |
| 202 a_or((int *)p+1, u.r[1]); |
| 203 } |
| 204 |
| 205 #endif |
OLD | NEW |