Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(36)

Side by Side Diff: fusl/arch/aarch64/atomic.h

Issue 1573973002: Add a "fork" of musl as //fusl. (Closed) Base URL: https://github.com/domokit/mojo.git@master
Patch Set: Created 4 years, 11 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « fusl/WHATSNEW ('k') | fusl/arch/aarch64/bits/alltypes.h.in » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 #ifndef _INTERNAL_ATOMIC_H
2 #define _INTERNAL_ATOMIC_H
3
4 #include <stdint.h>
5
6 static inline int a_ctz_64(uint64_t x)
7 {
8 __asm__(
9 " rbit %0, %1\n"
10 " clz %0, %0\n"
11 : "=r"(x) : "r"(x));
12 return x;
13 }
14
15 static inline int a_ctz_l(unsigned long x)
16 {
17 return a_ctz_64(x);
18 }
19
20 static inline void a_barrier()
21 {
22 __asm__ __volatile__("dmb ish");
23 }
24
25 static inline void *a_cas_p(volatile void *p, void *t, void *s)
26 {
27 void *old;
28 __asm__ __volatile__(
29 " dmb ish\n"
30 "1: ldxr %0,%3\n"
31 " cmp %0,%1\n"
32 " b.ne 1f\n"
33 " stxr %w0,%2,%3\n"
34 " cbnz %w0,1b\n"
35 " mov %0,%1\n"
36 "1: dmb ish\n"
37 : "=&r"(old)
38 : "r"(t), "r"(s), "Q"(*(long*)p)
39 : "memory", "cc");
40 return old;
41 }
42
43 static inline int a_cas(volatile int *p, int t, int s)
44 {
45 int old;
46 __asm__ __volatile__(
47 " dmb ish\n"
48 "1: ldxr %w0,%3\n"
49 " cmp %w0,%w1\n"
50 " b.ne 1f\n"
51 " stxr %w0,%w2,%3\n"
52 " cbnz %w0,1b\n"
53 " mov %w0,%w1\n"
54 "1: dmb ish\n"
55 : "=&r"(old)
56 : "r"(t), "r"(s), "Q"(*p)
57 : "memory", "cc");
58 return old;
59 }
60
61 static inline int a_swap(volatile int *x, int v)
62 {
63 int old, tmp;
64 __asm__ __volatile__(
65 " dmb ish\n"
66 "1: ldxr %w0,%3\n"
67 " stxr %w1,%w2,%3\n"
68 " cbnz %w1,1b\n"
69 " dmb ish\n"
70 : "=&r"(old), "=&r"(tmp)
71 : "r"(v), "Q"(*x)
72 : "memory", "cc" );
73 return old;
74 }
75
76 static inline int a_fetch_add(volatile int *x, int v)
77 {
78 int old, tmp;
79 __asm__ __volatile__(
80 " dmb ish\n"
81 "1: ldxr %w0,%3\n"
82 " add %w0,%w0,%w2\n"
83 " stxr %w1,%w0,%3\n"
84 " cbnz %w1,1b\n"
85 " dmb ish\n"
86 : "=&r"(old), "=&r"(tmp)
87 : "r"(v), "Q"(*x)
88 : "memory", "cc" );
89 return old-v;
90 }
91
92 static inline void a_inc(volatile int *x)
93 {
94 int tmp, tmp2;
95 __asm__ __volatile__(
96 " dmb ish\n"
97 "1: ldxr %w0,%2\n"
98 " add %w0,%w0,#1\n"
99 " stxr %w1,%w0,%2\n"
100 " cbnz %w1,1b\n"
101 " dmb ish\n"
102 : "=&r"(tmp), "=&r"(tmp2)
103 : "Q"(*x)
104 : "memory", "cc" );
105 }
106
107 static inline void a_dec(volatile int *x)
108 {
109 int tmp, tmp2;
110 __asm__ __volatile__(
111 " dmb ish\n"
112 "1: ldxr %w0,%2\n"
113 " sub %w0,%w0,#1\n"
114 " stxr %w1,%w0,%2\n"
115 " cbnz %w1,1b\n"
116 " dmb ish\n"
117 : "=&r"(tmp), "=&r"(tmp2)
118 : "Q"(*x)
119 : "memory", "cc" );
120 }
121
122 static inline void a_and_64(volatile uint64_t *p, uint64_t v)
123 {
124 int tmp, tmp2;
125 __asm__ __volatile__(
126 " dmb ish\n"
127 "1: ldxr %0,%3\n"
128 " and %0,%0,%2\n"
129 " stxr %w1,%0,%3\n"
130 " cbnz %w1,1b\n"
131 " dmb ish\n"
132 : "=&r"(tmp), "=&r"(tmp2)
133 : "r"(v), "Q"(*p)
134 : "memory", "cc" );
135 }
136
137 static inline void a_and(volatile int *p, int v)
138 {
139 int tmp, tmp2;
140 __asm__ __volatile__(
141 " dmb ish\n"
142 "1: ldxr %w0,%3\n"
143 " and %w0,%w0,%w2\n"
144 " stxr %w1,%w0,%3\n"
145 " cbnz %w1,1b\n"
146 " dmb ish\n"
147 : "=&r"(tmp), "=&r"(tmp2)
148 : "r"(v), "Q"(*p)
149 : "memory", "cc" );
150 }
151
152 static inline void a_or_64(volatile uint64_t *p, uint64_t v)
153 {
154 int tmp, tmp2;
155 __asm__ __volatile__(
156 " dmb ish\n"
157 "1: ldxr %0,%3\n"
158 " orr %0,%0,%2\n"
159 " stxr %w1,%0,%3\n"
160 " cbnz %w1,1b\n"
161 " dmb ish\n"
162 : "=&r"(tmp), "=&r"(tmp2)
163 : "r"(v), "Q"(*p)
164 : "memory", "cc" );
165 }
166
167 static inline void a_or_l(volatile void *p, long v)
168 {
169 return a_or_64(p, v);
170 }
171
172 static inline void a_or(volatile int *p, int v)
173 {
174 int tmp, tmp2;
175 __asm__ __volatile__(
176 " dmb ish\n"
177 "1: ldxr %w0,%3\n"
178 " orr %w0,%w0,%w2\n"
179 " stxr %w1,%w0,%3\n"
180 " cbnz %w1,1b\n"
181 " dmb ish\n"
182 : "=&r"(tmp), "=&r"(tmp2)
183 : "r"(v), "Q"(*p)
184 : "memory", "cc" );
185 }
186
187 static inline void a_store(volatile int *p, int x)
188 {
189 __asm__ __volatile__(
190 " dmb ish\n"
191 " str %w1,%0\n"
192 " dmb ish\n"
193 : "=m"(*p)
194 : "r"(x)
195 : "memory", "cc" );
196 }
197
198 #define a_spin a_barrier
199
200 static inline void a_crash()
201 {
202 *(volatile char *)0=0;
203 }
204
205
206 #endif
OLDNEW
« no previous file with comments | « fusl/WHATSNEW ('k') | fusl/arch/aarch64/bits/alltypes.h.in » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698