| OLD | NEW |
| 1 #define a_cas a_cas | 1 #define a_cas a_cas |
| 2 static inline int a_cas(volatile int *p, int t, int s) | 2 static inline int a_cas(volatile int* p, int t, int s) { |
| 3 { | 3 __asm__ __volatile__("lock ; cmpxchg %3, %1" |
| 4 » __asm__ __volatile__ ( | 4 : "=a"(t), "=m"(*p) |
| 5 » » "lock ; cmpxchg %3, %1" | 5 : "a"(t), "r"(s) |
| 6 » » : "=a"(t), "=m"(*p) : "a"(t), "r"(s) : "memory" ); | 6 : "memory"); |
| 7 » return t; | 7 return t; |
| 8 } | 8 } |
| 9 | 9 |
| 10 #define a_swap a_swap | 10 #define a_swap a_swap |
| 11 static inline int a_swap(volatile int *p, int v) | 11 static inline int a_swap(volatile int* p, int v) { |
| 12 { | 12 __asm__ __volatile__("xchg %0, %1" : "=r"(v), "=m"(*p) : "0"(v) : "memory"); |
| 13 » __asm__ __volatile__( | 13 return v; |
| 14 » » "xchg %0, %1" | |
| 15 » » : "=r"(v), "=m"(*p) : "0"(v) : "memory" ); | |
| 16 » return v; | |
| 17 } | 14 } |
| 18 | 15 |
| 19 #define a_fetch_add a_fetch_add | 16 #define a_fetch_add a_fetch_add |
| 20 static inline int a_fetch_add(volatile int *p, int v) | 17 static inline int a_fetch_add(volatile int* p, int v) { |
| 21 { | 18 __asm__ __volatile__("lock ; xadd %0, %1" |
| 22 » __asm__ __volatile__( | 19 : "=r"(v), "=m"(*p) |
| 23 » » "lock ; xadd %0, %1" | 20 : "0"(v) |
| 24 » » : "=r"(v), "=m"(*p) : "0"(v) : "memory" ); | 21 : "memory"); |
| 25 » return v; | 22 return v; |
| 26 } | 23 } |
| 27 | 24 |
| 28 #define a_and a_and | 25 #define a_and a_and |
| 29 static inline void a_and(volatile int *p, int v) | 26 static inline void a_and(volatile int* p, int v) { |
| 30 { | 27 __asm__ __volatile__("lock ; and %1, %0" : "=m"(*p) : "r"(v) : "memory"); |
| 31 » __asm__ __volatile__( | |
| 32 » » "lock ; and %1, %0" | |
| 33 » » : "=m"(*p) : "r"(v) : "memory" ); | |
| 34 } | 28 } |
| 35 | 29 |
| 36 #define a_or a_or | 30 #define a_or a_or |
| 37 static inline void a_or(volatile int *p, int v) | 31 static inline void a_or(volatile int* p, int v) { |
| 38 { | 32 __asm__ __volatile__("lock ; or %1, %0" : "=m"(*p) : "r"(v) : "memory"); |
| 39 » __asm__ __volatile__( | |
| 40 » » "lock ; or %1, %0" | |
| 41 » » : "=m"(*p) : "r"(v) : "memory" ); | |
| 42 } | 33 } |
| 43 | 34 |
| 44 #define a_inc a_inc | 35 #define a_inc a_inc |
| 45 static inline void a_inc(volatile int *p) | 36 static inline void a_inc(volatile int* p) { |
| 46 { | 37 __asm__ __volatile__("lock ; incl %0" : "=m"(*p) : "m"(*p) : "memory"); |
| 47 » __asm__ __volatile__( | |
| 48 » » "lock ; incl %0" | |
| 49 » » : "=m"(*p) : "m"(*p) : "memory" ); | |
| 50 } | 38 } |
| 51 | 39 |
| 52 #define a_dec a_dec | 40 #define a_dec a_dec |
| 53 static inline void a_dec(volatile int *p) | 41 static inline void a_dec(volatile int* p) { |
| 54 { | 42 __asm__ __volatile__("lock ; decl %0" : "=m"(*p) : "m"(*p) : "memory"); |
| 55 » __asm__ __volatile__( | |
| 56 » » "lock ; decl %0" | |
| 57 » » : "=m"(*p) : "m"(*p) : "memory" ); | |
| 58 } | 43 } |
| 59 | 44 |
| 60 #define a_store a_store | 45 #define a_store a_store |
| 61 static inline void a_store(volatile int *p, int x) | 46 static inline void a_store(volatile int* p, int x) { |
| 62 { | 47 __asm__ __volatile__("mov %1, %0 ; lock ; orl $0,(%%esp)" |
| 63 » __asm__ __volatile__( | 48 : "=m"(*p) |
| 64 » » "mov %1, %0 ; lock ; orl $0,(%%esp)" | 49 : "r"(x) |
| 65 » » : "=m"(*p) : "r"(x) : "memory" ); | 50 : "memory"); |
| 66 } | 51 } |
| 67 | 52 |
| 68 #define a_barrier a_barrier | 53 #define a_barrier a_barrier |
| 69 static inline void a_barrier() | 54 static inline void a_barrier() { |
| 70 { | 55 __asm__ __volatile__("" : : : "memory"); |
| 71 » __asm__ __volatile__( "" : : : "memory" ); | |
| 72 } | 56 } |
| 73 | 57 |
| 74 #define a_pause a_pause | 58 #define a_pause a_pause |
| 75 static inline void a_spin() | 59 static inline void a_spin() { |
| 76 { | 60 __asm__ __volatile__("pause" : : : "memory"); |
| 77 » __asm__ __volatile__( "pause" : : : "memory" ); | |
| 78 } | 61 } |
| 79 | 62 |
| 80 #define a_crash a_crash | 63 #define a_crash a_crash |
| 81 static inline void a_crash() | 64 static inline void a_crash() { |
| 82 { | 65 __asm__ __volatile__("hlt" : : : "memory"); |
| 83 » __asm__ __volatile__( "hlt" : : : "memory" ); | |
| 84 } | 66 } |
| 85 | 67 |
| 86 #define a_ctz_64 a_ctz_64 | 68 #define a_ctz_64 a_ctz_64 |
| 87 static inline int a_ctz_64(uint64_t x) | 69 static inline int a_ctz_64(uint64_t x) { |
| 88 { | 70 int r; |
| 89 » int r; | 71 __asm__("bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; add $32,%0\n1:" |
| 90 » __asm__( "bsf %1,%0 ; jnz 1f ; bsf %2,%0 ; add $32,%0\n1:" | 72 : "=&r"(r) |
| 91 » » : "=&r"(r) : "r"((unsigned)x), "r"((unsigned)(x>>32)) ); | 73 : "r"((unsigned)x), "r"((unsigned)(x >> 32))); |
| 92 » return r; | 74 return r; |
| 93 } | 75 } |
| 94 | 76 |
| 95 #define a_ctz_l a_ctz_l | 77 #define a_ctz_l a_ctz_l |
| 96 static inline int a_ctz_l(unsigned long x) | 78 static inline int a_ctz_l(unsigned long x) { |
| 97 { | 79 long r; |
| 98 » long r; | 80 __asm__("bsf %1,%0" : "=r"(r) : "r"(x)); |
| 99 » __asm__( "bsf %1,%0" : "=r"(r) : "r"(x) ); | 81 return r; |
| 100 » return r; | |
| 101 } | 82 } |
| OLD | NEW |