OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2013 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | |
6 | |
7 #ifndef BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ | |
8 #define BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ | |
9 | |
10 #if defined(OS_QNX) | |
11 #include <sys/cpuinline.h> | |
12 #endif | |
13 | |
14 namespace base { | |
15 namespace subtle { | |
16 | |
17 inline void MemoryBarrier() { | |
18 __asm__ __volatile__ ( // NOLINT | |
19 "dmb ish \n\t" // Data memory barrier. | |
Nico
2014/03/20 17:35:01
Rodolph: After looking through docs a bit, it soun
| |
20 ::: "memory" | |
21 ); // NOLINT | |
22 } | |
23 | |
24 | |
25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
26 Atomic32 old_value, | |
27 Atomic32 new_value) { | |
28 Atomic32 prev; | |
29 int32_t temp; | |
30 | |
31 __asm__ __volatile__ ( // NOLINT | |
32 "0: \n\t" | |
33 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. | |
34 "cmp %w[prev], %w[old_value] \n\t" | |
35 "bne 1f \n\t" | |
36 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. | |
37 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
38 "1: \n\t" | |
39 "clrex \n\t" // In case we didn't swap. | |
40 : [prev]"=&r" (prev), | |
41 [temp]"=&r" (temp) | |
42 : [ptr]"r" (ptr), | |
43 [old_value]"r" (old_value), | |
44 [new_value]"r" (new_value) | |
45 : "memory", "cc" | |
46 ); // NOLINT | |
47 | |
48 return prev; | |
49 } | |
50 | |
51 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
52 Atomic32 new_value) { | |
53 Atomic32 result; | |
54 int32_t temp; | |
55 | |
56 __asm__ __volatile__ ( // NOLINT | |
57 "0: \n\t" | |
58 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value. | |
59 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. | |
60 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
61 : [result]"=&r" (result), | |
62 [temp]"=&r" (temp) | |
63 : [ptr]"r" (ptr), | |
64 [new_value]"r" (new_value) | |
65 : "memory" | |
66 ); // NOLINT | |
67 | |
68 return result; | |
69 } | |
70 | |
71 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
72 Atomic32 increment) { | |
73 Atomic32 result; | |
74 int32_t temp; | |
75 | |
76 __asm__ __volatile__ ( // NOLINT | |
77 "0: \n\t" | |
78 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value. | |
79 "add %w[result], %w[result], %w[increment]\n\t" | |
80 "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result. | |
81 "cbnz %w[temp], 0b \n\t" // Retry on failure. | |
82 : [result]"=&r" (result), | |
83 [temp]"=&r" (temp) | |
84 : [ptr]"r" (ptr), | |
85 [increment]"r" (increment) | |
86 : "memory" | |
87 ); // NOLINT | |
88 | |
89 return result; | |
90 } | |
91 | |
92 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
93 Atomic32 increment) { | |
94 Atomic32 result; | |
95 int32_t temp; | |
96 | |
97 __asm__ __volatile__ ( // NOLINT | |
98 "dmb ish \n\t" // Data memory barrier. | |
99 "0: \n\t" | |
100 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value. | |
101 "add %w[result], %w[result], %w[increment]\n\t" | |
102 "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result. | |
103 "cbnz %w[temp], 0b \n\t" // Retry on failure. | |
104 "dmb ish \n\t" // Data memory barrier. | |
105 : [result]"=&r" (result), | |
106 [temp]"=&r" (temp) | |
107 : [ptr]"r" (ptr), | |
108 [increment]"r" (increment) | |
109 : "memory" | |
Nico
2014/03/20 17:35:01
This looks identical to the
MemoryBarrier();
rmcilroy
2014/03/20 18:38:43
Good point - done.
| |
110 ); // NOLINT | |
111 | |
112 return result; | |
113 } | |
114 | |
115 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
116 Atomic32 old_value, | |
117 Atomic32 new_value) { | |
118 Atomic32 prev; | |
119 int32_t temp; | |
120 | |
121 __asm__ __volatile__ ( // NOLINT | |
122 "0: \n\t" | |
123 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. | |
124 "cmp %w[prev], %w[old_value] \n\t" | |
125 "bne 1f \n\t" | |
126 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. | |
127 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
128 "dmb ish \n\t" // Data memory barrier. | |
129 "1: \n\t" | |
130 // If the compare failed the 'dmb' is unnecessary, but we still need a | |
131 // 'clrex'. | |
132 "clrex \n\t" | |
133 : [prev]"=&r" (prev), | |
134 [temp]"=&r" (temp) | |
135 : [ptr]"r" (ptr), | |
136 [old_value]"r" (old_value), | |
137 [new_value]"r" (new_value) | |
138 : "memory", "cc" | |
139 ); // NOLINT | |
140 | |
141 return prev; | |
142 } | |
143 | |
144 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
145 Atomic32 old_value, | |
146 Atomic32 new_value) { | |
147 Atomic32 prev; | |
148 int32_t temp; | |
149 | |
150 __asm__ __volatile__ ( // NOLINT | |
151 "dmb ish \n\t" // Data memory barrier. | |
152 "0: \n\t" | |
153 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. | |
154 "cmp %w[prev], %w[old_value] \n\t" | |
155 "bne 1f \n\t" | |
156 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. | |
157 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
158 "1: \n\t" | |
159 // If the compare failed the we still need a 'clrex'. | |
160 "clrex \n\t" | |
161 : [prev]"=&r" (prev), | |
162 [temp]"=&r" (temp) | |
163 : [ptr]"r" (ptr), | |
164 [old_value]"r" (old_value), | |
165 [new_value]"r" (new_value) | |
166 : "memory", "cc" | |
167 ); // NOLINT | |
168 | |
169 return prev; | |
170 } | |
171 | |
172 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
173 *ptr = value; | |
174 } | |
175 | |
176 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
177 *ptr = value; | |
178 __asm__ __volatile__ ( // NOLINT | |
179 "dmb ish \n\t" // Data memory barrier. | |
180 ::: "memory" // Prevent gcc from reordering before the store above. | |
181 ); // NOLINT | |
Nico
2014/03/20 17:35:01
Also here and in the functions below: the arm32 ve
rmcilroy
2014/03/20 18:38:43
Done.
| |
182 } | |
183 | |
184 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
185 __asm__ __volatile__ ( // NOLINT | |
186 "dmb ish \n\t" // Data memory barrier. | |
187 ::: "memory" // Prevent gcc from reordering after the store below. | |
188 ); // NOLINT | |
189 *ptr = value; | |
190 } | |
191 | |
192 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
193 return *ptr; | |
194 } | |
195 | |
196 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
197 Atomic32 value = *ptr; | |
198 __asm__ __volatile__ ( // NOLINT | |
199 "dmb ish \n\t" // Data memory barrier. | |
200 ::: "memory" // Prevent gcc from reordering before the load above. | |
201 ); // NOLINT | |
202 return value; | |
203 } | |
204 | |
205 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
206 __asm__ __volatile__ ( // NOLINT | |
207 "dmb ish \n\t" // Data memory barrier. | |
208 ::: "memory" // Prevent gcc from reordering after the load below. | |
209 ); // NOLINT | |
210 return *ptr; | |
211 } | |
212 | |
213 // 64-bit versions of the operations. | |
214 // See the 32-bit versions for comments. | |
215 | |
216 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
217 Atomic64 old_value, | |
218 Atomic64 new_value) { | |
219 Atomic64 prev; | |
220 int32_t temp; | |
221 | |
222 __asm__ __volatile__ ( // NOLINT | |
223 "0: \n\t" | |
224 "ldxr %[prev], [%[ptr]] \n\t" | |
225 "cmp %[prev], %[old_value] \n\t" | |
226 "bne 1f \n\t" | |
227 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" | |
228 "cbnz %w[temp], 0b \n\t" | |
229 "1: \n\t" | |
230 "clrex \n\t" | |
231 : [prev]"=&r" (prev), | |
232 [temp]"=&r" (temp) | |
233 : [ptr]"r" (ptr), | |
234 [old_value]"r" (old_value), | |
235 [new_value]"r" (new_value) | |
236 : "memory", "cc" | |
237 ); // NOLINT | |
238 | |
239 return prev; | |
240 } | |
241 | |
242 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
243 Atomic64 new_value) { | |
244 Atomic64 result; | |
245 int32_t temp; | |
246 | |
247 __asm__ __volatile__ ( // NOLINT | |
248 "0: \n\t" | |
249 "ldxr %[result], [%[ptr]] \n\t" | |
250 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" | |
251 "cbnz %w[temp], 0b \n\t" | |
252 : [result]"=&r" (result), | |
253 [temp]"=&r" (temp) | |
254 : [ptr]"r" (ptr), | |
255 [new_value]"r" (new_value) | |
256 : "memory" | |
257 ); // NOLINT | |
258 | |
259 return result; | |
260 } | |
261 | |
262 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
263 Atomic64 increment) { | |
264 Atomic64 result; | |
265 int32_t temp; | |
266 | |
267 __asm__ __volatile__ ( // NOLINT | |
268 "0: \n\t" | |
269 "ldxr %[result], [%[ptr]] \n\t" | |
270 "add %[result], %[result], %[increment] \n\t" | |
271 "stxr %w[temp], %[result], [%[ptr]] \n\t" | |
272 "cbnz %w[temp], 0b \n\t" | |
273 : [result]"=&r" (result), | |
274 [temp]"=&r" (temp) | |
275 : [ptr]"r" (ptr), | |
276 [increment]"r" (increment) | |
277 : "memory" | |
278 ); // NOLINT | |
279 | |
280 return result; | |
281 } | |
282 | |
283 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
284 Atomic64 increment) { | |
285 Atomic64 result; | |
286 int32_t temp; | |
287 | |
288 __asm__ __volatile__ ( // NOLINT | |
289 "dmb ish \n\t" | |
290 "0: \n\t" | |
291 "ldxr %[result], [%[ptr]] \n\t" | |
292 "add %[result], %[result], %[increment] \n\t" | |
293 "stxr %w[temp], %[result], [%[ptr]] \n\t" | |
294 "cbnz %w[temp], 0b \n\t" | |
295 "dmb ish \n\t" | |
296 : [result]"=&r" (result), | |
297 [temp]"=&r" (temp) | |
298 : [ptr]"r" (ptr), | |
299 [increment]"r" (increment) | |
300 : "memory" | |
301 ); // NOLINT | |
302 | |
303 return result; | |
304 } | |
305 | |
306 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
307 Atomic64 old_value, | |
308 Atomic64 new_value) { | |
309 Atomic64 prev; | |
310 int32_t temp; | |
311 | |
312 __asm__ __volatile__ ( // NOLINT | |
313 "0: \n\t" | |
314 "ldxr %[prev], [%[ptr]] \n\t" | |
315 "cmp %[prev], %[old_value] \n\t" | |
316 "bne 1f \n\t" | |
317 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" | |
318 "cbnz %w[temp], 0b \n\t" | |
319 "dmb ish \n\t" | |
320 "1: \n\t" | |
321 "clrex \n\t" | |
322 : [prev]"=&r" (prev), | |
323 [temp]"=&r" (temp) | |
324 : [ptr]"r" (ptr), | |
325 [old_value]"r" (old_value), | |
326 [new_value]"r" (new_value) | |
327 : "memory", "cc" | |
328 ); // NOLINT | |
329 | |
330 return prev; | |
331 } | |
332 | |
333 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
334 Atomic64 old_value, | |
335 Atomic64 new_value) { | |
336 Atomic64 prev; | |
337 int32_t temp; | |
338 | |
339 __asm__ __volatile__ ( // NOLINT | |
340 "dmb ish \n\t" | |
341 "0: \n\t" | |
342 "ldxr %[prev], [%[ptr]] \n\t" | |
343 "cmp %[prev], %[old_value] \n\t" | |
344 "bne 1f \n\t" | |
345 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" | |
346 "cbnz %w[temp], 0b \n\t" | |
347 "1: \n\t" | |
348 "clrex \n\t" | |
349 : [prev]"=&r" (prev), | |
350 [temp]"=&r" (temp) | |
351 : [ptr]"r" (ptr), | |
352 [old_value]"r" (old_value), | |
353 [new_value]"r" (new_value) | |
354 : "memory", "cc" | |
355 ); // NOLINT | |
356 | |
357 return prev; | |
358 } | |
359 | |
360 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
361 *ptr = value; | |
362 } | |
363 | |
364 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
365 *ptr = value; | |
366 __asm__ __volatile__ ( // NOLINT | |
367 "dmb ish \n\t" | |
368 ::: "memory" | |
369 ); // NOLINT | |
370 } | |
371 | |
372 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
373 __asm__ __volatile__ ( // NOLINT | |
374 "dmb ish \n\t" | |
375 ::: "memory" | |
376 ); // NOLINT | |
377 *ptr = value; | |
378 } | |
379 | |
380 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
381 return *ptr; | |
382 } | |
383 | |
384 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
385 Atomic64 value = *ptr; | |
386 __asm__ __volatile__ ( // NOLINT | |
387 "dmb ish \n\t" | |
388 ::: "memory" | |
389 ); // NOLINT | |
390 return value; | |
391 } | |
392 | |
393 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
394 __asm__ __volatile__ ( // NOLINT | |
395 "dmb ish \n\t" | |
396 ::: "memory" | |
397 ); // NOLINT | |
398 return *ptr; | |
399 } | |
400 | |
401 } // namespace base::subtle | |
402 } // namespace base | |
403 | |
404 #endif // BASE_ATOMICOPS_INTERNALS_ARM64_GCC_H_ | |
OLD | NEW |