OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | |
6 | |
7 #ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ | |
8 #define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ | |
9 | |
10 #if defined(OS_QNX) | |
11 #include <sys/cpuinline.h> | |
12 #endif | |
13 | |
14 namespace base { | |
15 namespace subtle { | |
16 | |
17 inline void MemoryBarrier() { | |
18 __asm__ __volatile__ ( // NOLINT | |
19 "dmb ish \n\t" // Data memory barrier. | |
20 ::: "memory" | |
21 ); // NOLINT | |
22 } | |
23 | |
24 | |
25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
26 Atomic32 old_value, | |
27 Atomic32 new_value) { | |
28 Atomic32 prev; | |
29 int32_t temp; | |
30 | |
31 __asm__ __volatile__ ( // NOLINT | |
32 "0: \n\t" | |
33 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. | |
34 "cmp %w[prev], %w[old_value] \n\t" | |
35 "bne 1f \n\t" | |
36 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. | |
37 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
38 "1: \n\t" | |
39 "clrex \n\t" // In case we didn't swap. | |
40 : [prev]"=&r" (prev), | |
41 [temp]"=&r" (temp) | |
42 : [ptr]"r" (ptr), | |
JF
2014/03/20 21:55:50
Shouldn't ptr always be "m"?
rmcilroy
2014/03/21 15:27:20
I'm not entirely sure on this (I'm not very knowle
JF
2014/03/21 17:57:38
I looked into this and checked with Roland McGrath
rmcilroy
2014/03/24 15:19:25
Done.
| |
43 [old_value]"r" (old_value), | |
44 [new_value]"r" (new_value) | |
45 : "memory", "cc" | |
46 ); // NOLINT | |
47 | |
48 return prev; | |
49 } | |
50 | |
51 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
52 Atomic32 new_value) { | |
53 Atomic32 result; | |
54 int32_t temp; | |
55 | |
56 __asm__ __volatile__ ( // NOLINT | |
57 "0: \n\t" | |
58 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value. | |
59 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. | |
60 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
61 : [result]"=&r" (result), | |
62 [temp]"=&r" (temp) | |
63 : [ptr]"r" (ptr), | |
64 [new_value]"r" (new_value) | |
65 : "memory" | |
66 ); // NOLINT | |
67 | |
68 return result; | |
69 } | |
70 | |
71 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
72 Atomic32 increment) { | |
73 Atomic32 result; | |
74 int32_t temp; | |
75 | |
76 __asm__ __volatile__ ( // NOLINT | |
77 "0: \n\t" | |
78 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value. | |
79 "add %w[result], %w[result], %w[increment]\n\t" | |
80 "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result. | |
81 "cbnz %w[temp], 0b \n\t" // Retry on failure. | |
82 : [result]"=&r" (result), | |
83 [temp]"=&r" (temp) | |
84 : [ptr]"r" (ptr), | |
85 [increment]"r" (increment) | |
86 : "memory" | |
87 ); // NOLINT | |
88 | |
89 return result; | |
90 } | |
91 | |
92 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
93 Atomic32 increment) { | |
94 MemoryBarrier(); | |
95 Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); | |
96 MemoryBarrier(); | |
97 | |
98 return result; | |
99 } | |
100 | |
101 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
102 Atomic32 old_value, | |
103 Atomic32 new_value) { | |
104 Atomic32 prev; | |
105 int32_t temp; | |
106 | |
107 __asm__ __volatile__ ( // NOLINT | |
108 "0: \n\t" | |
109 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. | |
110 "cmp %w[prev], %w[old_value] \n\t" | |
111 "bne 1f \n\t" | |
112 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. | |
113 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
114 "dmb ish \n\t" // Data memory barrier. | |
115 "1: \n\t" | |
116 // If the compare failed the 'dmb' is unnecessary, but we still need a | |
117 // 'clrex'. | |
118 "clrex \n\t" | |
119 : [prev]"=&r" (prev), | |
120 [temp]"=&r" (temp) | |
121 : [ptr]"r" (ptr), | |
122 [old_value]"r" (old_value), | |
123 [new_value]"r" (new_value) | |
124 : "memory", "cc" | |
125 ); // NOLINT | |
126 | |
127 return prev; | |
128 } | |
129 | |
130 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
131 Atomic32 old_value, | |
132 Atomic32 new_value) { | |
133 Atomic32 prev; | |
134 int32_t temp; | |
135 | |
136 MemoryBarrier(); | |
137 | |
138 __asm__ __volatile__ ( // NOLINT | |
139 "0: \n\t" | |
140 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. | |
141 "cmp %w[prev], %w[old_value] \n\t" | |
142 "bne 1f \n\t" | |
143 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. | |
144 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
145 "1: \n\t" | |
146 // If the compare failed the we still need a 'clrex'. | |
147 "clrex \n\t" | |
148 : [prev]"=&r" (prev), | |
149 [temp]"=&r" (temp) | |
150 : [ptr]"r" (ptr), | |
151 [old_value]"r" (old_value), | |
152 [new_value]"r" (new_value) | |
153 : "memory", "cc" | |
154 ); // NOLINT | |
155 | |
156 return prev; | |
157 } | |
158 | |
159 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
160 *ptr = value; | |
161 } | |
162 | |
163 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
164 *ptr = value; | |
165 MemoryBarrier(); | |
166 } | |
167 | |
168 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
169 MemoryBarrier(); | |
170 *ptr = value; | |
171 } | |
172 | |
173 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
174 return *ptr; | |
175 } | |
176 | |
177 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
178 Atomic32 value = *ptr; | |
179 MemoryBarrier(); | |
180 return value; | |
181 } | |
182 | |
183 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
184 MemoryBarrier(); | |
185 return *ptr; | |
186 } | |
187 | |
188 // 64-bit versions of the operations. | |
189 // See the 32-bit versions for comments. | |
190 | |
191 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
192 Atomic64 old_value, | |
193 Atomic64 new_value) { | |
194 Atomic64 prev; | |
195 int32_t temp; | |
196 | |
197 __asm__ __volatile__ ( // NOLINT | |
198 "0: \n\t" | |
199 "ldxr %[prev], [%[ptr]] \n\t" | |
200 "cmp %[prev], %[old_value] \n\t" | |
201 "bne 1f \n\t" | |
202 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" | |
203 "cbnz %w[temp], 0b \n\t" | |
204 "1: \n\t" | |
205 "clrex \n\t" | |
206 : [prev]"=&r" (prev), | |
207 [temp]"=&r" (temp) | |
208 : [ptr]"r" (ptr), | |
209 [old_value]"r" (old_value), | |
210 [new_value]"r" (new_value) | |
211 : "memory", "cc" | |
212 ); // NOLINT | |
213 | |
214 return prev; | |
215 } | |
216 | |
217 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
218 Atomic64 new_value) { | |
219 Atomic64 result; | |
220 int32_t temp; | |
221 | |
222 __asm__ __volatile__ ( // NOLINT | |
223 "0: \n\t" | |
224 "ldxr %[result], [%[ptr]] \n\t" | |
225 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" | |
226 "cbnz %w[temp], 0b \n\t" | |
227 : [result]"=&r" (result), | |
228 [temp]"=&r" (temp) | |
229 : [ptr]"r" (ptr), | |
230 [new_value]"r" (new_value) | |
231 : "memory" | |
232 ); // NOLINT | |
233 | |
234 return result; | |
235 } | |
236 | |
237 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
238 Atomic64 increment) { | |
239 Atomic64 result; | |
240 int32_t temp; | |
241 | |
242 __asm__ __volatile__ ( // NOLINT | |
243 "0: \n\t" | |
244 "ldxr %[result], [%[ptr]] \n\t" | |
245 "add %[result], %[result], %[increment] \n\t" | |
246 "stxr %w[temp], %[result], [%[ptr]] \n\t" | |
247 "cbnz %w[temp], 0b \n\t" | |
248 : [result]"=&r" (result), | |
249 [temp]"=&r" (temp) | |
250 : [ptr]"r" (ptr), | |
251 [increment]"r" (increment) | |
252 : "memory" | |
253 ); // NOLINT | |
254 | |
255 return result; | |
256 } | |
257 | |
258 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
259 Atomic64 increment) { | |
260 MemoryBarrier(); | |
261 Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment); | |
262 MemoryBarrier(); | |
263 | |
264 return result; | |
265 } | |
266 | |
267 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
268 Atomic64 old_value, | |
269 Atomic64 new_value) { | |
270 Atomic64 prev; | |
271 int32_t temp; | |
272 | |
273 __asm__ __volatile__ ( // NOLINT | |
274 "0: \n\t" | |
275 "ldxr %[prev], [%[ptr]] \n\t" | |
276 "cmp %[prev], %[old_value] \n\t" | |
277 "bne 1f \n\t" | |
278 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" | |
279 "cbnz %w[temp], 0b \n\t" | |
280 "dmb ish \n\t" | |
281 "1: \n\t" | |
282 "clrex \n\t" | |
283 : [prev]"=&r" (prev), | |
284 [temp]"=&r" (temp) | |
285 : [ptr]"r" (ptr), | |
286 [old_value]"r" (old_value), | |
287 [new_value]"r" (new_value) | |
288 : "memory", "cc" | |
289 ); // NOLINT | |
290 | |
291 return prev; | |
292 } | |
293 | |
294 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
295 Atomic64 old_value, | |
296 Atomic64 new_value) { | |
297 Atomic64 prev; | |
298 int32_t temp; | |
299 | |
300 MemoryBarrier(); | |
301 | |
302 __asm__ __volatile__ ( // NOLINT | |
303 "0: \n\t" | |
304 "ldxr %[prev], [%[ptr]] \n\t" | |
305 "cmp %[prev], %[old_value] \n\t" | |
306 "bne 1f \n\t" | |
307 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" | |
308 "cbnz %w[temp], 0b \n\t" | |
309 "1: \n\t" | |
310 "clrex \n\t" | |
311 : [prev]"=&r" (prev), | |
312 [temp]"=&r" (temp) | |
313 : [ptr]"r" (ptr), | |
314 [old_value]"r" (old_value), | |
315 [new_value]"r" (new_value) | |
316 : "memory", "cc" | |
317 ); // NOLINT | |
318 | |
319 return prev; | |
320 } | |
321 | |
322 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
323 *ptr = value; | |
324 } | |
325 | |
326 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
327 *ptr = value; | |
328 MemoryBarrier(); | |
329 } | |
330 | |
331 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
332 MemoryBarrier(); | |
333 *ptr = value; | |
334 } | |
335 | |
336 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
337 return *ptr; | |
338 } | |
339 | |
340 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
341 Atomic64 value = *ptr; | |
342 MemoryBarrier(); | |
343 return value; | |
344 } | |
345 | |
346 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
347 MemoryBarrier(); | |
348 return *ptr; | |
349 } | |
350 | |
351 } // namespace base::subtle | |
352 } // namespace base | |
353 | |
354 #endif // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ | |
OLD | NEW |