OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 | |
7 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |
8 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |
9 | |
10 namespace v8 { | |
11 namespace internal { | |
12 | |
13 inline void MemoryBarrier() { | |
14 __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT | |
15 } | |
16 | |
17 // NoBarrier versions of the operation include "memory" in the clobber list. | |
18 // This is not required for direct usage of the NoBarrier versions of the | |
19 // operations. However this is required for correctness when they are used as | |
20 // part of the Acquire or Release versions, to ensure that nothing from outside | |
21 // the call is reordered between the operation and the memory barrier. This does | |
22 // not change the code generated, so has no or minimal impact on the | |
23 // NoBarrier operations. | |
24 | |
25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
26 Atomic32 old_value, | |
27 Atomic32 new_value) { | |
28 Atomic32 prev; | |
29 int32_t temp; | |
30 | |
31 __asm__ __volatile__ ( // NOLINT | |
32 "0: \n\t" | |
33 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. | |
34 "cmp %w[prev], %w[old_value] \n\t" | |
35 "bne 1f \n\t" | |
36 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. | |
37 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
38 "1: \n\t" | |
39 : [prev]"=&r" (prev), | |
40 [temp]"=&r" (temp), | |
41 [ptr]"+Q" (*ptr) | |
42 : [old_value]"IJr" (old_value), | |
43 [new_value]"r" (new_value) | |
44 : "cc", "memory" | |
45 ); // NOLINT | |
46 | |
47 return prev; | |
48 } | |
49 | |
50 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
51 Atomic32 new_value) { | |
52 Atomic32 result; | |
53 int32_t temp; | |
54 | |
55 __asm__ __volatile__ ( // NOLINT | |
56 "0: \n\t" | |
57 "ldxr %w[result], %[ptr] \n\t" // Load the previous value. | |
58 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. | |
59 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. | |
60 : [result]"=&r" (result), | |
61 [temp]"=&r" (temp), | |
62 [ptr]"+Q" (*ptr) | |
63 : [new_value]"r" (new_value) | |
64 : "memory" | |
65 ); // NOLINT | |
66 | |
67 return result; | |
68 } | |
69 | |
70 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
71 Atomic32 increment) { | |
72 Atomic32 result; | |
73 int32_t temp; | |
74 | |
75 __asm__ __volatile__ ( // NOLINT | |
76 "0: \n\t" | |
77 "ldxr %w[result], %[ptr] \n\t" // Load the previous value. | |
78 "add %w[result], %w[result], %w[increment]\n\t" | |
79 "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result. | |
80 "cbnz %w[temp], 0b \n\t" // Retry on failure. | |
81 : [result]"=&r" (result), | |
82 [temp]"=&r" (temp), | |
83 [ptr]"+Q" (*ptr) | |
84 : [increment]"IJr" (increment) | |
85 : "memory" | |
86 ); // NOLINT | |
87 | |
88 return result; | |
89 } | |
90 | |
91 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
92 Atomic32 increment) { | |
93 Atomic32 result; | |
94 | |
95 MemoryBarrier(); | |
96 result = NoBarrier_AtomicIncrement(ptr, increment); | |
97 MemoryBarrier(); | |
98 | |
99 return result; | |
100 } | |
101 | |
102 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
103 Atomic32 old_value, | |
104 Atomic32 new_value) { | |
105 Atomic32 prev; | |
106 | |
107 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
108 MemoryBarrier(); | |
109 | |
110 return prev; | |
111 } | |
112 | |
113 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
114 Atomic32 old_value, | |
115 Atomic32 new_value) { | |
116 Atomic32 prev; | |
117 | |
118 MemoryBarrier(); | |
119 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
120 | |
121 return prev; | |
122 } | |
123 | |
124 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
125 *ptr = value; | |
126 } | |
127 | |
128 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
129 *ptr = value; | |
130 } | |
131 | |
132 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
133 *ptr = value; | |
134 MemoryBarrier(); | |
135 } | |
136 | |
137 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
138 __asm__ __volatile__ ( // NOLINT | |
139 "stlr %w[value], %[ptr] \n\t" | |
140 : [ptr]"=Q" (*ptr) | |
141 : [value]"r" (value) | |
142 : "memory" | |
143 ); // NOLINT | |
144 } | |
145 | |
146 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | |
147 return *ptr; | |
148 } | |
149 | |
150 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
151 return *ptr; | |
152 } | |
153 | |
154 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
155 Atomic32 value; | |
156 | |
157 __asm__ __volatile__ ( // NOLINT | |
158 "ldar %w[value], %[ptr] \n\t" | |
159 : [value]"=r" (value) | |
160 : [ptr]"Q" (*ptr) | |
161 : "memory" | |
162 ); // NOLINT | |
163 | |
164 return value; | |
165 } | |
166 | |
167 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
168 MemoryBarrier(); | |
169 return *ptr; | |
170 } | |
171 | |
172 // 64-bit versions of the operations. | |
173 // See the 32-bit versions for comments. | |
174 | |
175 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
176 Atomic64 old_value, | |
177 Atomic64 new_value) { | |
178 Atomic64 prev; | |
179 int32_t temp; | |
180 | |
181 __asm__ __volatile__ ( // NOLINT | |
182 "0: \n\t" | |
183 "ldxr %[prev], %[ptr] \n\t" | |
184 "cmp %[prev], %[old_value] \n\t" | |
185 "bne 1f \n\t" | |
186 "stxr %w[temp], %[new_value], %[ptr] \n\t" | |
187 "cbnz %w[temp], 0b \n\t" | |
188 "1: \n\t" | |
189 : [prev]"=&r" (prev), | |
190 [temp]"=&r" (temp), | |
191 [ptr]"+Q" (*ptr) | |
192 : [old_value]"IJr" (old_value), | |
193 [new_value]"r" (new_value) | |
194 : "cc", "memory" | |
195 ); // NOLINT | |
196 | |
197 return prev; | |
198 } | |
199 | |
200 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
201 Atomic64 new_value) { | |
202 Atomic64 result; | |
203 int32_t temp; | |
204 | |
205 __asm__ __volatile__ ( // NOLINT | |
206 "0: \n\t" | |
207 "ldxr %[result], %[ptr] \n\t" | |
208 "stxr %w[temp], %[new_value], %[ptr] \n\t" | |
209 "cbnz %w[temp], 0b \n\t" | |
210 : [result]"=&r" (result), | |
211 [temp]"=&r" (temp), | |
212 [ptr]"+Q" (*ptr) | |
213 : [new_value]"r" (new_value) | |
214 : "memory" | |
215 ); // NOLINT | |
216 | |
217 return result; | |
218 } | |
219 | |
220 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
221 Atomic64 increment) { | |
222 Atomic64 result; | |
223 int32_t temp; | |
224 | |
225 __asm__ __volatile__ ( // NOLINT | |
226 "0: \n\t" | |
227 "ldxr %[result], %[ptr] \n\t" | |
228 "add %[result], %[result], %[increment] \n\t" | |
229 "stxr %w[temp], %[result], %[ptr] \n\t" | |
230 "cbnz %w[temp], 0b \n\t" | |
231 : [result]"=&r" (result), | |
232 [temp]"=&r" (temp), | |
233 [ptr]"+Q" (*ptr) | |
234 : [increment]"IJr" (increment) | |
235 : "memory" | |
236 ); // NOLINT | |
237 | |
238 return result; | |
239 } | |
240 | |
241 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
242 Atomic64 increment) { | |
243 Atomic64 result; | |
244 | |
245 MemoryBarrier(); | |
246 result = NoBarrier_AtomicIncrement(ptr, increment); | |
247 MemoryBarrier(); | |
248 | |
249 return result; | |
250 } | |
251 | |
252 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
253 Atomic64 old_value, | |
254 Atomic64 new_value) { | |
255 Atomic64 prev; | |
256 | |
257 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
258 MemoryBarrier(); | |
259 | |
260 return prev; | |
261 } | |
262 | |
263 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
264 Atomic64 old_value, | |
265 Atomic64 new_value) { | |
266 Atomic64 prev; | |
267 | |
268 MemoryBarrier(); | |
269 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
270 | |
271 return prev; | |
272 } | |
273 | |
274 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
275 *ptr = value; | |
276 } | |
277 | |
278 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
279 *ptr = value; | |
280 MemoryBarrier(); | |
281 } | |
282 | |
283 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
284 __asm__ __volatile__ ( // NOLINT | |
285 "stlr %x[value], %[ptr] \n\t" | |
286 : [ptr]"=Q" (*ptr) | |
287 : [value]"r" (value) | |
288 : "memory" | |
289 ); // NOLINT | |
290 } | |
291 | |
292 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
293 return *ptr; | |
294 } | |
295 | |
296 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
297 Atomic64 value; | |
298 | |
299 __asm__ __volatile__ ( // NOLINT | |
300 "ldar %x[value], %[ptr] \n\t" | |
301 : [value]"=r" (value) | |
302 : [ptr]"Q" (*ptr) | |
303 : "memory" | |
304 ); // NOLINT | |
305 | |
306 return value; | |
307 } | |
308 | |
309 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
310 MemoryBarrier(); | |
311 return *ptr; | |
312 } | |
313 | |
314 } } // namespace v8::internal | |
315 | |
316 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ | |
OLD | NEW |