OLD | NEW |
| (Empty) |
1 #ifndef _INTERNAL_ATOMIC_H | |
2 #define _INTERNAL_ATOMIC_H | |
3 | |
4 #include <stdint.h> | |
5 | |
6 static inline int a_ctz_l(unsigned long x) | |
7 { | |
8 static const char debruijn32[32] = { | |
9 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, | |
10 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 | |
11 }; | |
12 return debruijn32[(x&-x)*0x076be629 >> 27]; | |
13 } | |
14 | |
15 static inline int a_ctz_64(uint64_t x) | |
16 { | |
17 uint32_t y = x; | |
18 if (!y) { | |
19 y = x>>32; | |
20 return 32 + a_ctz_l(y); | |
21 } | |
22 return a_ctz_l(y); | |
23 } | |
24 | |
25 static inline int a_cas(volatile int *p, int t, int s) | |
26 { | |
27 register int old, tmp; | |
28 __asm__ __volatile__ ( | |
29 " addi %0, r0, 0\n" | |
30 "1: lwx %0, %2, r0\n" | |
31 " rsubk %1, %0, %3\n" | |
32 " bnei %1, 1f\n" | |
33 " swx %4, %2, r0\n" | |
34 " addic %1, r0, 0\n" | |
35 " bnei %1, 1b\n" | |
36 "1: " | |
37 : "=&r"(old), "=&r"(tmp) | |
38 : "r"(p), "r"(t), "r"(s) | |
39 : "cc", "memory" ); | |
40 return old; | |
41 } | |
42 | |
43 static inline void *a_cas_p(volatile void *p, void *t, void *s) | |
44 { | |
45 return (void *)a_cas(p, (int)t, (int)s); | |
46 } | |
47 | |
48 static inline int a_swap(volatile int *x, int v) | |
49 { | |
50 register int old, tmp; | |
51 __asm__ __volatile__ ( | |
52 " addi %0, r0, 0\n" | |
53 "1: lwx %0, %2, r0\n" | |
54 " swx %3, %2, r0\n" | |
55 " addic %1, r0, 0\n" | |
56 " bnei %1, 1b\n" | |
57 "1: " | |
58 : "=&r"(old), "=&r"(tmp) | |
59 : "r"(x), "r"(v) | |
60 : "cc", "memory" ); | |
61 return old; | |
62 } | |
63 | |
64 static inline int a_fetch_add(volatile int *x, int v) | |
65 { | |
66 register int new, tmp; | |
67 __asm__ __volatile__ ( | |
68 " addi %0, r0, 0\n" | |
69 "1: lwx %0, %2, r0\n" | |
70 " addk %0, %0, %3\n" | |
71 " swx %0, %2, r0\n" | |
72 " addic %1, r0, 0\n" | |
73 " bnei %1, 1b\n" | |
74 "1: " | |
75 : "=&r"(new), "=&r"(tmp) | |
76 : "r"(x), "r"(v) | |
77 : "cc", "memory" ); | |
78 return new-v; | |
79 } | |
80 | |
81 static inline void a_inc(volatile int *x) | |
82 { | |
83 a_fetch_add(x, 1); | |
84 } | |
85 | |
86 static inline void a_dec(volatile int *x) | |
87 { | |
88 a_fetch_add(x, -1); | |
89 } | |
90 | |
91 static inline void a_store(volatile int *p, int x) | |
92 { | |
93 __asm__ __volatile__ ( | |
94 "swi %1, %0" | |
95 : "=m"(*p) : "r"(x) : "memory" ); | |
96 } | |
97 | |
98 #define a_spin a_barrier | |
99 | |
100 static inline void a_barrier() | |
101 { | |
102 a_cas(&(int){0}, 0, 0); | |
103 } | |
104 | |
105 static inline void a_crash() | |
106 { | |
107 *(volatile char *)0=0; | |
108 } | |
109 | |
110 static inline void a_and(volatile int *p, int v) | |
111 { | |
112 int old; | |
113 do old = *p; | |
114 while (a_cas(p, old, old&v) != old); | |
115 } | |
116 | |
117 static inline void a_or(volatile int *p, int v) | |
118 { | |
119 int old; | |
120 do old = *p; | |
121 while (a_cas(p, old, old|v) != old); | |
122 } | |
123 | |
124 static inline void a_or_l(volatile void *p, long v) | |
125 { | |
126 a_or(p, v); | |
127 } | |
128 | |
129 static inline void a_and_64(volatile uint64_t *p, uint64_t v) | |
130 { | |
131 union { uint64_t v; uint32_t r[2]; } u = { v }; | |
132 a_and((int *)p, u.r[0]); | |
133 a_and((int *)p+1, u.r[1]); | |
134 } | |
135 | |
136 static inline void a_or_64(volatile uint64_t *p, uint64_t v) | |
137 { | |
138 union { uint64_t v; uint32_t r[2]; } u = { v }; | |
139 a_or((int *)p, u.r[0]); | |
140 a_or((int *)p+1, u.r[1]); | |
141 } | |
142 | |
143 #endif | |
OLD | NEW |