Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(294)

Side by Side Diff: src/atomicops_internals_arm64_gcc.h

Issue 220793002: ARM64: Fix and improve atomic operations. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 16 matching lines...) Expand all
27 27
28 // This file is an internal atomic implementation, use atomicops.h instead. 28 // This file is an internal atomic implementation, use atomicops.h instead.
29 29
30 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ 30 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
31 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ 31 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
32 32
33 namespace v8 { 33 namespace v8 {
34 namespace internal { 34 namespace internal {
35 35
36 inline void MemoryBarrier() { 36 inline void MemoryBarrier() {
37 __asm__ __volatile__ ( // NOLINT 37 __asm__ __volatile__ ("dmb ish" ::: "memory"); // NOLINT
38 "dmb ish \n\t" // Data memory barrier.
39 ::: "memory"
40 ); // NOLINT
41 } 38 }
42 39
43
44 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 40 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
45 Atomic32 old_value, 41 Atomic32 old_value,
46 Atomic32 new_value) { 42 Atomic32 new_value) {
47 Atomic32 prev; 43 Atomic32 prev;
48 int32_t temp; 44 int32_t temp;
49 45
50 __asm__ __volatile__ ( // NOLINT 46 __asm__ __volatile__ ( // NOLINT
51 "0: \n\t" 47 "0: \n\t"
52 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value. 48 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
53 "cmp %w[prev], %w[old_value] \n\t" 49 "cmp %w[prev], %w[old_value] \n\t"
54 "bne 1f \n\t" 50 "bne 1f \n\t"
55 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. 51 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
56 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. 52 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
57 "1: \n\t" 53 "1: \n\t"
58 "clrex \n\t" // In case we didn't swap.
59 : [prev]"=&r" (prev), 54 : [prev]"=&r" (prev),
60 [temp]"=&r" (temp), 55 [temp]"=&r" (temp),
61 [ptr]"+Q" (*ptr) 56 [ptr]"+Q" (*ptr)
62 : [old_value]"r" (old_value), 57 : [old_value]"IJr" (old_value),
rmcilroy 2014/04/01 10:41:19 Do the I and J constraints require that the value
Alexandre Rames 2014/04/01 10:50:55 It is just a hint. If the value does not fit it wi
63 [new_value]"r" (new_value) 58 [new_value]"r" (new_value)
64 : "memory", "cc" 59 : "cc"
65 ); // NOLINT 60 ); // NOLINT
66 61
67 return prev; 62 return prev;
68 } 63 }
69 64
70 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, 65 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
71 Atomic32 new_value) { 66 Atomic32 new_value) {
72 Atomic32 result; 67 Atomic32 result;
73 int32_t temp; 68 int32_t temp;
74 69
75 __asm__ __volatile__ ( // NOLINT 70 __asm__ __volatile__ ( // NOLINT
76 "0: \n\t" 71 "0: \n\t"
77 "ldxr %w[result], %[ptr] \n\t" // Load the previous value. 72 "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
78 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value. 73 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
79 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. 74 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
80 : [result]"=&r" (result), 75 : [result]"=&r" (result),
81 [temp]"=&r" (temp), 76 [temp]"=&r" (temp),
82 [ptr]"+Q" (*ptr) 77 [ptr]"+Q" (*ptr)
83 : [new_value]"r" (new_value) 78 : [new_value]"r" (new_value)
84 : "memory" 79 :
85 ); // NOLINT 80 ); // NOLINT
86 81
87 return result; 82 return result;
88 } 83 }
89 84
90 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, 85 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
91 Atomic32 increment) { 86 Atomic32 increment) {
92 Atomic32 result; 87 Atomic32 result;
93 int32_t temp; 88 int32_t temp;
94 89
95 __asm__ __volatile__ ( // NOLINT 90 __asm__ __volatile__ ( // NOLINT
96 "0: \n\t" 91 "0: \n\t"
97 "ldxr %w[result], %[ptr] \n\t" // Load the previous value. 92 "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
98 "add %w[result], %w[result], %w[increment]\n\t" 93 "add %w[result], %w[result], %w[increment]\n\t"
99 "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result. 94 "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
100 "cbnz %w[temp], 0b \n\t" // Retry on failure. 95 "cbnz %w[temp], 0b \n\t" // Retry on failure.
101 : [result]"=&r" (result), 96 : [result]"=&r" (result),
102 [temp]"=&r" (temp), 97 [temp]"=&r" (temp),
103 [ptr]"+Q" (*ptr) 98 [ptr]"+Q" (*ptr)
104 : [increment]"r" (increment) 99 : [increment]"IJr" (increment)
105 : "memory" 100 :
106 ); // NOLINT 101 ); // NOLINT
107 102
108 return result; 103 return result;
109 } 104 }
110 105
111 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 106 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
112 Atomic32 increment) { 107 Atomic32 increment) {
108 Atomic32 result;
109
113 MemoryBarrier(); 110 MemoryBarrier();
114 Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment); 111 result = NoBarrier_AtomicIncrement(ptr, increment);
115 MemoryBarrier(); 112 MemoryBarrier();
116 113
117 return result; 114 return result;
118 } 115 }
119 116
120 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 117 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
121 Atomic32 old_value, 118 Atomic32 old_value,
122 Atomic32 new_value) { 119 Atomic32 new_value) {
123 Atomic32 prev; 120 Atomic32 prev;
124 int32_t temp;
125 121
126 __asm__ __volatile__ ( // NOLINT 122 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
127 "0: \n\t" 123 MemoryBarrier();
128 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
129 "cmp %w[prev], %w[old_value] \n\t"
130 "bne 1f \n\t"
131 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
132 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
133 "dmb ish \n\t" // Data memory barrier.
134 "1: \n\t"
135 // If the compare failed the 'dmb' is unnecessary, but we still need a
136 // 'clrex'.
137 "clrex \n\t"
138 : [prev]"=&r" (prev),
139 [temp]"=&r" (temp),
140 [ptr]"+Q" (*ptr)
141 : [old_value]"r" (old_value),
142 [new_value]"r" (new_value)
143 : "memory", "cc"
144 ); // NOLINT
145 124
146 return prev; 125 return prev;
147 } 126 }
148 127
149 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 128 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
150 Atomic32 old_value, 129 Atomic32 old_value,
151 Atomic32 new_value) { 130 Atomic32 new_value) {
152 Atomic32 prev; 131 Atomic32 prev;
153 int32_t temp;
154 132
155 MemoryBarrier(); 133 MemoryBarrier();
156 134 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
157 __asm__ __volatile__ ( // NOLINT
158 "0: \n\t"
159 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
160 "cmp %w[prev], %w[old_value] \n\t"
161 "bne 1f \n\t"
162 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
163 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
164 "1: \n\t"
165 // If the compare failed the we still need a 'clrex'.
166 "clrex \n\t"
167 : [prev]"=&r" (prev),
168 [temp]"=&r" (temp),
169 [ptr]"+Q" (*ptr)
170 : [old_value]"r" (old_value),
171 [new_value]"r" (new_value)
172 : "memory", "cc"
173 ); // NOLINT
174 135
175 return prev; 136 return prev;
176 } 137 }
177 138
178 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { 139 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
179 *ptr = value; 140 *ptr = value;
180 } 141 }
181 142
182 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 143 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
183 *ptr = value; 144 *ptr = value;
184 MemoryBarrier(); 145 MemoryBarrier();
185 } 146 }
186 147
187 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 148 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
188 MemoryBarrier(); 149 __asm__ __volatile__ ( // NOLINT
189 *ptr = value; 150 "stlr %w[value], %[ptr] \n\t"
151 : [ptr]"=Q" (*ptr)
152 : [value]"r" (value)
153 : "memory"
154 ); // NOLINT
190 } 155 }
191 156
192 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { 157 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
193 return *ptr; 158 return *ptr;
194 } 159 }
195 160
196 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { 161 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
197 Atomic32 value = *ptr; 162 Atomic32 value;
198 MemoryBarrier(); 163
164 __asm__ __volatile__ ( // NOLINT
165 "ldar %w[value], %[ptr] \n\t"
166 : [value]"=r" (value)
167 : [ptr]"Q" (*ptr)
168 : "memory"
169 ); // NOLINT
170
199 return value; 171 return value;
200 } 172 }
201 173
202 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 174 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
203 MemoryBarrier(); 175 MemoryBarrier();
204 return *ptr; 176 return *ptr;
205 } 177 }
206 178
207 // 64-bit versions of the operations. 179 // 64-bit versions of the operations.
208 // See the 32-bit versions for comments. 180 // See the 32-bit versions for comments.
209 181
210 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 182 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
211 Atomic64 old_value, 183 Atomic64 old_value,
212 Atomic64 new_value) { 184 Atomic64 new_value) {
213 Atomic64 prev; 185 Atomic64 prev;
214 int32_t temp; 186 int32_t temp;
215 187
216 __asm__ __volatile__ ( // NOLINT 188 __asm__ __volatile__ ( // NOLINT
217 "0: \n\t" 189 "0: \n\t"
218 "ldxr %[prev], %[ptr] \n\t" 190 "ldxr %[prev], %[ptr] \n\t"
219 "cmp %[prev], %[old_value] \n\t" 191 "cmp %[prev], %[old_value] \n\t"
220 "bne 1f \n\t" 192 "bne 1f \n\t"
221 "stxr %w[temp], %[new_value], %[ptr] \n\t" 193 "stxr %w[temp], %[new_value], %[ptr] \n\t"
222 "cbnz %w[temp], 0b \n\t" 194 "cbnz %w[temp], 0b \n\t"
223 "1: \n\t" 195 "1: \n\t"
224 "clrex \n\t"
225 : [prev]"=&r" (prev), 196 : [prev]"=&r" (prev),
226 [temp]"=&r" (temp), 197 [temp]"=&r" (temp),
227 [ptr]"+Q" (*ptr) 198 [ptr]"+Q" (*ptr)
228 : [old_value]"r" (old_value), 199 : [old_value]"IJr" (old_value),
229 [new_value]"r" (new_value) 200 [new_value]"r" (new_value)
230 : "memory", "cc" 201 : "cc"
231 ); // NOLINT 202 ); // NOLINT
232 203
233 return prev; 204 return prev;
234 } 205 }
235 206
236 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, 207 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
237 Atomic64 new_value) { 208 Atomic64 new_value) {
238 Atomic64 result; 209 Atomic64 result;
239 int32_t temp; 210 int32_t temp;
240 211
241 __asm__ __volatile__ ( // NOLINT 212 __asm__ __volatile__ ( // NOLINT
242 "0: \n\t" 213 "0: \n\t"
243 "ldxr %[result], %[ptr] \n\t" 214 "ldxr %[result], %[ptr] \n\t"
244 "stxr %w[temp], %[new_value], %[ptr] \n\t" 215 "stxr %w[temp], %[new_value], %[ptr] \n\t"
245 "cbnz %w[temp], 0b \n\t" 216 "cbnz %w[temp], 0b \n\t"
246 : [result]"=&r" (result), 217 : [result]"=&r" (result),
247 [temp]"=&r" (temp), 218 [temp]"=&r" (temp),
248 [ptr]"+Q" (*ptr) 219 [ptr]"+Q" (*ptr)
249 : [new_value]"r" (new_value) 220 : [new_value]"r" (new_value)
250 : "memory" 221 :
251 ); // NOLINT 222 ); // NOLINT
252 223
253 return result; 224 return result;
254 } 225 }
255 226
256 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, 227 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
257 Atomic64 increment) { 228 Atomic64 increment) {
258 Atomic64 result; 229 Atomic64 result;
259 int32_t temp; 230 int32_t temp;
260 231
261 __asm__ __volatile__ ( // NOLINT 232 __asm__ __volatile__ ( // NOLINT
262 "0: \n\t" 233 "0: \n\t"
263 "ldxr %[result], %[ptr] \n\t" 234 "ldxr %[result], %[ptr] \n\t"
264 "add %[result], %[result], %[increment] \n\t" 235 "add %[result], %[result], %[increment] \n\t"
265 "stxr %w[temp], %[result], %[ptr] \n\t" 236 "stxr %w[temp], %[result], %[ptr] \n\t"
266 "cbnz %w[temp], 0b \n\t" 237 "cbnz %w[temp], 0b \n\t"
267 : [result]"=&r" (result), 238 : [result]"=&r" (result),
268 [temp]"=&r" (temp), 239 [temp]"=&r" (temp),
269 [ptr]"+Q" (*ptr) 240 [ptr]"+Q" (*ptr)
270 : [increment]"r" (increment) 241 : [increment]"IJr" (increment)
271 : "memory" 242 :
272 ); // NOLINT 243 ); // NOLINT
273 244
274 return result; 245 return result;
275 } 246 }
276 247
277 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 248 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
278 Atomic64 increment) { 249 Atomic64 increment) {
250 Atomic64 result;
251
279 MemoryBarrier(); 252 MemoryBarrier();
280 Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment); 253 result = NoBarrier_AtomicIncrement(ptr, increment);
281 MemoryBarrier(); 254 MemoryBarrier();
282 255
283 return result; 256 return result;
284 } 257 }
285 258
286 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 259 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
287 Atomic64 old_value, 260 Atomic64 old_value,
288 Atomic64 new_value) { 261 Atomic64 new_value) {
289 Atomic64 prev; 262 Atomic64 prev;
290 int32_t temp;
291 263
292 __asm__ __volatile__ ( // NOLINT 264 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
293 "0: \n\t" 265 MemoryBarrier();
294 "ldxr %[prev], %[ptr] \n\t"
295 "cmp %[prev], %[old_value] \n\t"
296 "bne 1f \n\t"
297 "stxr %w[temp], %[new_value], %[ptr] \n\t"
298 "cbnz %w[temp], 0b \n\t"
299 "dmb ish \n\t"
300 "1: \n\t"
301 "clrex \n\t"
302 : [prev]"=&r" (prev),
303 [temp]"=&r" (temp),
304 [ptr]"+Q" (*ptr)
305 : [old_value]"r" (old_value),
306 [new_value]"r" (new_value)
307 : "memory", "cc"
308 ); // NOLINT
309 266
310 return prev; 267 return prev;
311 } 268 }
312 269
313 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 270 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
314 Atomic64 old_value, 271 Atomic64 old_value,
315 Atomic64 new_value) { 272 Atomic64 new_value) {
316 Atomic64 prev; 273 Atomic64 prev;
317 int32_t temp;
318 274
319 MemoryBarrier(); 275 MemoryBarrier();
320 276 prev = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
321 __asm__ __volatile__ ( // NOLINT
322 "0: \n\t"
323 "ldxr %[prev], %[ptr] \n\t"
324 "cmp %[prev], %[old_value] \n\t"
325 "bne 1f \n\t"
326 "stxr %w[temp], %[new_value], %[ptr] \n\t"
327 "cbnz %w[temp], 0b \n\t"
328 "1: \n\t"
329 "clrex \n\t"
330 : [prev]"=&r" (prev),
331 [temp]"=&r" (temp),
332 [ptr]"+Q" (*ptr)
333 : [old_value]"r" (old_value),
334 [new_value]"r" (new_value)
335 : "memory", "cc"
336 ); // NOLINT
337 277
338 return prev; 278 return prev;
339 } 279 }
340 280
341 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 281 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
342 *ptr = value; 282 *ptr = value;
343 } 283 }
344 284
345 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { 285 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
346 *ptr = value; 286 *ptr = value;
347 MemoryBarrier(); 287 MemoryBarrier();
348 } 288 }
349 289
350 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { 290 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
351 MemoryBarrier(); 291 __asm__ __volatile__ ( // NOLINT
352 *ptr = value; 292 "stlr %x[value], %[ptr] \n\t"
293 : [ptr]"=Q" (*ptr)
294 : [value]"r" (value)
295 : "memory"
296 ); // NOLINT
353 } 297 }
354 298
355 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { 299 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
356 return *ptr; 300 return *ptr;
357 } 301 }
358 302
359 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { 303 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
360 Atomic64 value = *ptr; 304 Atomic32 value;
361 MemoryBarrier(); 305
306 __asm__ __volatile__ ( // NOLINT
307 "ldar %x[value], %[ptr] \n\t"
308 : [value]"=r" (value)
309 : [ptr]"Q" (*ptr)
310 : "memory"
311 ); // NOLINT
312
362 return value; 313 return value;
363 } 314 }
364 315
365 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { 316 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
366 MemoryBarrier(); 317 MemoryBarrier();
367 return *ptr; 318 return *ptr;
368 } 319 }
369 320
370 } } // namespace v8::internal 321 } } // namespace v8::internal
371 322
372 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ 323 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698