OLD | NEW |
| (Empty) |
1 #ifndef _INTERNAL_ATOMIC_H | |
2 #define _INTERNAL_ATOMIC_H | |
3 | |
4 #include <stdint.h> | |
5 | |
6 static inline int a_ctz_l(unsigned long x) | |
7 { | |
8 static const char debruijn32[32] = { | |
9 0, 1, 23, 2, 29, 24, 19, 3, 30, 27, 25, 11, 20, 8, 4, 13, | |
10 31, 22, 28, 18, 26, 10, 7, 12, 21, 17, 9, 6, 16, 5, 15, 14 | |
11 }; | |
12 return debruijn32[(x&-x)*0x076be629 >> 27]; | |
13 } | |
14 | |
15 static inline int a_ctz_64(uint64_t x) | |
16 { | |
17 uint32_t y = x; | |
18 if (!y) { | |
19 y = x>>32; | |
20 return 32 + a_ctz_l(y); | |
21 } | |
22 return a_ctz_l(y); | |
23 } | |
24 | |
25 #define LLSC_CLOBBERS "r0", "t", "memory" | |
26 #define LLSC_START(mem) "synco\n" \ | |
27 "0: movli.l @" mem ", r0\n" | |
28 #define LLSC_END(mem) \ | |
29 "1: movco.l r0, @" mem "\n" \ | |
30 " bf 0b\n" \ | |
31 " synco\n" | |
32 | |
33 static inline int __sh_cas_llsc(volatile int *p, int t, int s) | |
34 { | |
35 int old; | |
36 __asm__ __volatile__( | |
37 LLSC_START("%1") | |
38 " mov r0, %0\n" | |
39 " cmp/eq %0, %2\n" | |
40 " bf 1f\n" | |
41 " mov %3, r0\n" | |
42 LLSC_END("%1") | |
43 : "=&r"(old) : "r"(p), "r"(t), "r"(s) : LLSC_CLOBBERS); | |
44 return old; | |
45 } | |
46 | |
47 static inline int __sh_swap_llsc(volatile int *x, int v) | |
48 { | |
49 int old; | |
50 __asm__ __volatile__( | |
51 LLSC_START("%1") | |
52 " mov r0, %0\n" | |
53 " mov %2, r0\n" | |
54 LLSC_END("%1") | |
55 : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS); | |
56 return old; | |
57 } | |
58 | |
59 static inline int __sh_fetch_add_llsc(volatile int *x, int v) | |
60 { | |
61 int old; | |
62 __asm__ __volatile__( | |
63 LLSC_START("%1") | |
64 " mov r0, %0\n" | |
65 " add %2, r0\n" | |
66 LLSC_END("%1") | |
67 : "=&r"(old) : "r"(x), "r"(v) : LLSC_CLOBBERS); | |
68 return old; | |
69 } | |
70 | |
71 static inline void __sh_store_llsc(volatile int *p, int x) | |
72 { | |
73 __asm__ __volatile__( | |
74 " synco\n" | |
75 " mov.l %1, @%0\n" | |
76 " synco\n" | |
77 : : "r"(p), "r"(x) : "memory"); | |
78 } | |
79 | |
80 static inline void __sh_and_llsc(volatile int *x, int v) | |
81 { | |
82 __asm__ __volatile__( | |
83 LLSC_START("%0") | |
84 " and %1, r0\n" | |
85 LLSC_END("%0") | |
86 : : "r"(x), "r"(v) : LLSC_CLOBBERS); | |
87 } | |
88 | |
89 static inline void __sh_or_llsc(volatile int *x, int v) | |
90 { | |
91 __asm__ __volatile__( | |
92 LLSC_START("%0") | |
93 " or %1, r0\n" | |
94 LLSC_END("%0") | |
95 : : "r"(x), "r"(v) : LLSC_CLOBBERS); | |
96 } | |
97 | |
98 #ifdef __SH4A__ | |
99 #define a_cas(p,t,s) __sh_cas_llsc(p,t,s) | |
100 #define a_swap(x,v) __sh_swap_llsc(x,v) | |
101 #define a_fetch_add(x,v) __sh_fetch_add_llsc(x, v) | |
102 #define a_store(x,v) __sh_store_llsc(x, v) | |
103 #define a_and(x,v) __sh_and_llsc(x, v) | |
104 #define a_or(x,v) __sh_or_llsc(x, v) | |
105 #else | |
106 | |
107 int __sh_cas(volatile int *, int, int); | |
108 int __sh_swap(volatile int *, int); | |
109 int __sh_fetch_add(volatile int *, int); | |
110 void __sh_store(volatile int *, int); | |
111 void __sh_and(volatile int *, int); | |
112 void __sh_or(volatile int *, int); | |
113 | |
114 #define a_cas(p,t,s) __sh_cas(p,t,s) | |
115 #define a_swap(x,v) __sh_swap(x,v) | |
116 #define a_fetch_add(x,v) __sh_fetch_add(x, v) | |
117 #define a_store(x,v) __sh_store(x, v) | |
118 #define a_and(x,v) __sh_and(x, v) | |
119 #define a_or(x,v) __sh_or(x, v) | |
120 #endif | |
121 | |
122 static inline void *a_cas_p(volatile void *p, void *t, void *s) | |
123 { | |
124 return (void *)a_cas(p, (int)t, (int)s); | |
125 } | |
126 | |
127 static inline void a_inc(volatile int *x) | |
128 { | |
129 a_fetch_add(x, 1); | |
130 } | |
131 | |
132 static inline void a_dec(volatile int *x) | |
133 { | |
134 a_fetch_add(x, -1); | |
135 } | |
136 | |
137 #define a_spin a_barrier | |
138 | |
139 static inline void a_barrier() | |
140 { | |
141 a_cas(&(int){0}, 0, 0); | |
142 } | |
143 | |
144 static inline void a_crash() | |
145 { | |
146 *(volatile char *)0=0; | |
147 } | |
148 | |
149 static inline void a_or_l(volatile void *p, long v) | |
150 { | |
151 a_or(p, v); | |
152 } | |
153 | |
154 static inline void a_and_64(volatile uint64_t *p, uint64_t v) | |
155 { | |
156 union { uint64_t v; uint32_t r[2]; } u = { v }; | |
157 a_and((int *)p, u.r[0]); | |
158 a_and((int *)p+1, u.r[1]); | |
159 } | |
160 | |
161 static inline void a_or_64(volatile uint64_t *p, uint64_t v) | |
162 { | |
163 union { uint64_t v; uint32_t r[2]; } u = { v }; | |
164 a_or((int *)p, u.r[0]); | |
165 a_or((int *)p+1, u.r[1]); | |
166 } | |
167 | |
168 #endif | |
OLD | NEW |