Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(431)

Side by Side Diff: src/atomicops_internals_arm64_gcc.h

Issue 212673006: Update atomicops_internals_arm64_gcc with changes made in chromium base/ (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « no previous file | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 15 matching lines...) Expand all
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 // This file is an internal atomic implementation, use atomicops.h instead. 28 // This file is an internal atomic implementation, use atomicops.h instead.
29 29
30 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ 30 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
31 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ 31 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
32 32
33 namespace v8 { 33 namespace v8 {
34 namespace internal { 34 namespace internal {
35 35
36 inline void MemoryBarrier() { /* Not used. */ } 36 inline void MemoryBarrier() {
37 __asm__ __volatile__ ( // NOLINT
38 "dmb ish \n\t" // Data memory barrier.
39 ::: "memory"
40 ); // NOLINT
41 }
42
37 43
38 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 44 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
39 Atomic32 old_value, 45 Atomic32 old_value,
40 Atomic32 new_value) { 46 Atomic32 new_value) {
41 Atomic32 prev; 47 Atomic32 prev;
42 int32_t temp; 48 int32_t temp;
43 49
44 __asm__ __volatile__ ( // NOLINT 50 __asm__ __volatile__ ( // NOLINT
45 "0: \n\t" 51 "0: \n\t"
46 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. 52 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
47 "cmp %w[prev], %w[old_value] \n\t" 53 "cmp %w[prev], %w[old_value] \n\t"
48 "bne 1f \n\t" 54 "bne 1f \n\t"
49 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. 55 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
50 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. 56 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
51 "1: \n\t" 57 "1: \n\t"
52 "clrex \n\t" // In case we didn't swap. 58 "clrex \n\t" // In case we didn't swap.
53 : [prev]"=&r" (prev), 59 : [prev]"=&r" (prev),
54 [temp]"=&r" (temp) 60 [temp]"=&r" (temp),
55 : [ptr]"r" (ptr), 61 [ptr]"+Q" (*ptr)
56 [old_value]"r" (old_value), 62 : [old_value]"r" (old_value),
57 [new_value]"r" (new_value) 63 [new_value]"r" (new_value)
58 : "memory", "cc" 64 : "memory", "cc"
59 ); // NOLINT 65 ); // NOLINT
60 66
61 return prev; 67 return prev;
62 } 68 }
63 69
64 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, 70 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
65 Atomic32 new_value) { 71 Atomic32 new_value) {
66 Atomic32 result; 72 Atomic32 result;
67 int32_t temp; 73 int32_t temp;
68 74
69 __asm__ __volatile__ ( // NOLINT 75 __asm__ __volatile__ ( // NOLINT
70 "0: \n\t" 76 "0: \n\t"
71 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value. 77 "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
72 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. 78 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
73 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. 79 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
74 : [result]"=&r" (result), 80 : [result]"=&r" (result),
75 [temp]"=&r" (temp) 81 [temp]"=&r" (temp),
76 : [ptr]"r" (ptr), 82 [ptr]"+Q" (*ptr)
77 [new_value]"r" (new_value) 83 : [new_value]"r" (new_value)
78 : "memory" 84 : "memory"
79 ); // NOLINT 85 ); // NOLINT
80 86
81 return result; 87 return result;
82 } 88 }
83 89
84 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, 90 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
85 Atomic32 increment) { 91 Atomic32 increment) {
86 Atomic32 result; 92 Atomic32 result;
87 int32_t temp; 93 int32_t temp;
88 94
89 __asm__ __volatile__ ( // NOLINT 95 __asm__ __volatile__ ( // NOLINT
90 "0: \n\t" 96 "0: \n\t"
91 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value. 97 "ldxr %w[result], %[ptr] \n\t" // Load the previous value.
92 "add %w[result], %w[result], %w[increment]\n\t" 98 "add %w[result], %w[result], %w[increment]\n\t"
93 "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result. 99 "stxr %w[temp], %w[result], %[ptr] \n\t" // Try to store the result.
94 "cbnz %w[temp], 0b \n\t" // Retry on failure. 100 "cbnz %w[temp], 0b \n\t" // Retry on failure.
95 : [result]"=&r" (result), 101 : [result]"=&r" (result),
96 [temp]"=&r" (temp) 102 [temp]"=&r" (temp),
97 : [ptr]"r" (ptr), 103 [ptr]"+Q" (*ptr)
98 [increment]"r" (increment) 104 : [increment]"r" (increment)
99 : "memory" 105 : "memory"
100 ); // NOLINT 106 ); // NOLINT
101 107
102 return result; 108 return result;
103 } 109 }
104 110
105 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, 111 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
106 Atomic32 increment) { 112 Atomic32 increment) {
107 Atomic32 result; 113 MemoryBarrier();
108 int32_t temp; 114 Atomic32 result = NoBarrier_AtomicIncrement(ptr, increment);
109 115 MemoryBarrier();
110 __asm__ __volatile__ ( // NOLINT
111 "dmb ish \n\t" // Data memory barrier.
112 "0: \n\t"
113 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
114 "add %w[result], %w[result], %w[increment]\n\t"
115 "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
116 "cbnz %w[temp], 0b \n\t" // Retry on failure.
117 "dmb ish \n\t" // Data memory barrier.
118 : [result]"=&r" (result),
119 [temp]"=&r" (temp)
120 : [ptr]"r" (ptr),
121 [increment]"r" (increment)
122 : "memory"
123 ); // NOLINT
124 116
125 return result; 117 return result;
126 } 118 }
127 119
128 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, 120 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
129 Atomic32 old_value, 121 Atomic32 old_value,
130 Atomic32 new_value) { 122 Atomic32 new_value) {
131 Atomic32 prev; 123 Atomic32 prev;
132 int32_t temp; 124 int32_t temp;
133 125
134 __asm__ __volatile__ ( // NOLINT 126 __asm__ __volatile__ ( // NOLINT
135 "0: \n\t" 127 "0: \n\t"
136 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. 128 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
137 "cmp %w[prev], %w[old_value] \n\t" 129 "cmp %w[prev], %w[old_value] \n\t"
138 "bne 1f \n\t" 130 "bne 1f \n\t"
139 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. 131 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
140 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. 132 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
141 "dmb ish \n\t" // Data memory barrier. 133 "dmb ish \n\t" // Data memory barrier.
142 "1: \n\t" 134 "1: \n\t"
143 // If the compare failed the 'dmb' is unnecessary, but we still need a 135 // If the compare failed the 'dmb' is unnecessary, but we still need a
144 // 'clrex'. 136 // 'clrex'.
145 "clrex \n\t" 137 "clrex \n\t"
146 : [prev]"=&r" (prev), 138 : [prev]"=&r" (prev),
147 [temp]"=&r" (temp) 139 [temp]"=&r" (temp),
148 : [ptr]"r" (ptr), 140 [ptr]"+Q" (*ptr)
149 [old_value]"r" (old_value), 141 : [old_value]"r" (old_value),
150 [new_value]"r" (new_value) 142 [new_value]"r" (new_value)
151 : "memory", "cc" 143 : "memory", "cc"
152 ); // NOLINT 144 ); // NOLINT
153 145
154 return prev; 146 return prev;
155 } 147 }
156 148
157 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, 149 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
158 Atomic32 old_value, 150 Atomic32 old_value,
159 Atomic32 new_value) { 151 Atomic32 new_value) {
160 Atomic32 prev; 152 Atomic32 prev;
161 int32_t temp; 153 int32_t temp;
162 154
155 MemoryBarrier();
156
163 __asm__ __volatile__ ( // NOLINT 157 __asm__ __volatile__ ( // NOLINT
164 "dmb ish \n\t" // Data memory barrier.
165 "0: \n\t" 158 "0: \n\t"
166 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value. 159 "ldxr %w[prev], %[ptr] \n\t" // Load the previous value.
167 "cmp %w[prev], %w[old_value] \n\t" 160 "cmp %w[prev], %w[old_value] \n\t"
168 "bne 1f \n\t" 161 "bne 1f \n\t"
169 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value. 162 "stxr %w[temp], %w[new_value], %[ptr] \n\t" // Try to store the new value.
170 "cbnz %w[temp], 0b \n\t" // Retry if it did not work. 163 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
171 "1: \n\t" 164 "1: \n\t"
172 // If the compare failed the we still need a 'clrex'. 165 // If the compare failed the we still need a 'clrex'.
173 "clrex \n\t" 166 "clrex \n\t"
174 : [prev]"=&r" (prev), 167 : [prev]"=&r" (prev),
175 [temp]"=&r" (temp) 168 [temp]"=&r" (temp),
176 : [ptr]"r" (ptr), 169 [ptr]"+Q" (*ptr)
177 [old_value]"r" (old_value), 170 : [old_value]"r" (old_value),
178 [new_value]"r" (new_value) 171 [new_value]"r" (new_value)
179 : "memory", "cc" 172 : "memory", "cc"
180 ); // NOLINT 173 ); // NOLINT
181 174
182 return prev; 175 return prev;
183 } 176 }
184 177
185 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { 178 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
186 *ptr = value; 179 *ptr = value;
187 } 180 }
188 181
189 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 182 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
190 *ptr = value; 183 *ptr = value;
191 __asm__ __volatile__ ( // NOLINT 184 MemoryBarrier();
192 "dmb ish \n\t" // Data memory barrier.
193 ::: "memory" // Prevent gcc from reordering before the store above.
194 ); // NOLINT
195 } 185 }
196 186
197 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 187 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
198 __asm__ __volatile__ ( // NOLINT 188 MemoryBarrier();
199 "dmb ish \n\t" // Data memory barrier.
200 ::: "memory" // Prevent gcc from reordering after the store below.
201 ); // NOLINT
202 *ptr = value; 189 *ptr = value;
203 } 190 }
204 191
205 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { 192 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
206 return *ptr; 193 return *ptr;
207 } 194 }
208 195
209 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { 196 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
210 Atomic32 value = *ptr; 197 Atomic32 value = *ptr;
211 __asm__ __volatile__ ( // NOLINT 198 MemoryBarrier();
212 "dmb ish \n\t" // Data memory barrier.
213 ::: "memory" // Prevent gcc from reordering before the load above.
214 ); // NOLINT
215 return value; 199 return value;
216 } 200 }
217 201
218 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 202 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
219 __asm__ __volatile__ ( // NOLINT 203 MemoryBarrier();
220 "dmb ish \n\t" // Data memory barrier.
221 ::: "memory" // Prevent gcc from reordering after the load below.
222 ); // NOLINT
223 return *ptr; 204 return *ptr;
224 } 205 }
225 206
226 // 64-bit versions of the operations. 207 // 64-bit versions of the operations.
227 // See the 32-bit versions for comments. 208 // See the 32-bit versions for comments.
228 209
229 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, 210 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
230 Atomic64 old_value, 211 Atomic64 old_value,
231 Atomic64 new_value) { 212 Atomic64 new_value) {
232 Atomic64 prev; 213 Atomic64 prev;
233 int32_t temp; 214 int32_t temp;
234 215
235 __asm__ __volatile__ ( // NOLINT 216 __asm__ __volatile__ ( // NOLINT
236 "0: \n\t" 217 "0: \n\t"
237 "ldxr %[prev], [%[ptr]] \n\t" 218 "ldxr %[prev], %[ptr] \n\t"
238 "cmp %[prev], %[old_value] \n\t" 219 "cmp %[prev], %[old_value] \n\t"
239 "bne 1f \n\t" 220 "bne 1f \n\t"
240 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" 221 "stxr %w[temp], %[new_value], %[ptr] \n\t"
241 "cbnz %w[temp], 0b \n\t" 222 "cbnz %w[temp], 0b \n\t"
242 "1: \n\t" 223 "1: \n\t"
243 "clrex \n\t" 224 "clrex \n\t"
244 : [prev]"=&r" (prev), 225 : [prev]"=&r" (prev),
245 [temp]"=&r" (temp) 226 [temp]"=&r" (temp),
246 : [ptr]"r" (ptr), 227 [ptr]"+Q" (*ptr)
247 [old_value]"r" (old_value), 228 : [old_value]"r" (old_value),
248 [new_value]"r" (new_value) 229 [new_value]"r" (new_value)
249 : "memory", "cc" 230 : "memory", "cc"
250 ); // NOLINT 231 ); // NOLINT
251 232
252 return prev; 233 return prev;
253 } 234 }
254 235
255 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, 236 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
256 Atomic64 new_value) { 237 Atomic64 new_value) {
257 Atomic64 result; 238 Atomic64 result;
258 int32_t temp; 239 int32_t temp;
259 240
260 __asm__ __volatile__ ( // NOLINT 241 __asm__ __volatile__ ( // NOLINT
261 "0: \n\t" 242 "0: \n\t"
262 "ldxr %[result], [%[ptr]] \n\t" 243 "ldxr %[result], %[ptr] \n\t"
263 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" 244 "stxr %w[temp], %[new_value], %[ptr] \n\t"
264 "cbnz %w[temp], 0b \n\t" 245 "cbnz %w[temp], 0b \n\t"
265 : [result]"=&r" (result), 246 : [result]"=&r" (result),
266 [temp]"=&r" (temp) 247 [temp]"=&r" (temp),
267 : [ptr]"r" (ptr), 248 [ptr]"+Q" (*ptr)
268 [new_value]"r" (new_value) 249 : [new_value]"r" (new_value)
269 : "memory" 250 : "memory"
270 ); // NOLINT 251 ); // NOLINT
271 252
272 return result; 253 return result;
273 } 254 }
274 255
275 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, 256 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
276 Atomic64 increment) { 257 Atomic64 increment) {
277 Atomic64 result; 258 Atomic64 result;
278 int32_t temp; 259 int32_t temp;
279 260
280 __asm__ __volatile__ ( // NOLINT 261 __asm__ __volatile__ ( // NOLINT
281 "0: \n\t" 262 "0: \n\t"
282 "ldxr %[result], [%[ptr]] \n\t" 263 "ldxr %[result], %[ptr] \n\t"
283 "add %[result], %[result], %[increment] \n\t" 264 "add %[result], %[result], %[increment] \n\t"
284 "stxr %w[temp], %[result], [%[ptr]] \n\t" 265 "stxr %w[temp], %[result], %[ptr] \n\t"
285 "cbnz %w[temp], 0b \n\t" 266 "cbnz %w[temp], 0b \n\t"
286 : [result]"=&r" (result), 267 : [result]"=&r" (result),
287 [temp]"=&r" (temp) 268 [temp]"=&r" (temp),
288 : [ptr]"r" (ptr), 269 [ptr]"+Q" (*ptr)
289 [increment]"r" (increment) 270 : [increment]"r" (increment)
290 : "memory" 271 : "memory"
291 ); // NOLINT 272 ); // NOLINT
292 273
293 return result; 274 return result;
294 } 275 }
295 276
296 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, 277 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
297 Atomic64 increment) { 278 Atomic64 increment) {
298 Atomic64 result; 279 MemoryBarrier();
299 int32_t temp; 280 Atomic64 result = NoBarrier_AtomicIncrement(ptr, increment);
300 281 MemoryBarrier();
301 __asm__ __volatile__ ( // NOLINT
302 "dmb ish \n\t"
303 "0: \n\t"
304 "ldxr %[result], [%[ptr]] \n\t"
305 "add %[result], %[result], %[increment] \n\t"
306 "stxr %w[temp], %[result], [%[ptr]] \n\t"
307 "cbnz %w[temp], 0b \n\t"
308 "dmb ish \n\t"
309 : [result]"=&r" (result),
310 [temp]"=&r" (temp)
311 : [ptr]"r" (ptr),
312 [increment]"r" (increment)
313 : "memory"
314 ); // NOLINT
315 282
316 return result; 283 return result;
317 } 284 }
318 285
319 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, 286 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
320 Atomic64 old_value, 287 Atomic64 old_value,
321 Atomic64 new_value) { 288 Atomic64 new_value) {
322 Atomic64 prev; 289 Atomic64 prev;
323 int32_t temp; 290 int32_t temp;
324 291
325 __asm__ __volatile__ ( // NOLINT 292 __asm__ __volatile__ ( // NOLINT
326 "0: \n\t" 293 "0: \n\t"
327 "ldxr %[prev], [%[ptr]] \n\t" 294 "ldxr %[prev], %[ptr] \n\t"
328 "cmp %[prev], %[old_value] \n\t" 295 "cmp %[prev], %[old_value] \n\t"
329 "bne 1f \n\t" 296 "bne 1f \n\t"
330 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" 297 "stxr %w[temp], %[new_value], %[ptr] \n\t"
331 "cbnz %w[temp], 0b \n\t" 298 "cbnz %w[temp], 0b \n\t"
332 "dmb ish \n\t" 299 "dmb ish \n\t"
333 "1: \n\t" 300 "1: \n\t"
334 "clrex \n\t" 301 "clrex \n\t"
335 : [prev]"=&r" (prev), 302 : [prev]"=&r" (prev),
336 [temp]"=&r" (temp) 303 [temp]"=&r" (temp),
337 : [ptr]"r" (ptr), 304 [ptr]"+Q" (*ptr)
338 [old_value]"r" (old_value), 305 : [old_value]"r" (old_value),
339 [new_value]"r" (new_value) 306 [new_value]"r" (new_value)
340 : "memory", "cc" 307 : "memory", "cc"
341 ); // NOLINT 308 ); // NOLINT
342 309
343 return prev; 310 return prev;
344 } 311 }
345 312
346 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, 313 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
347 Atomic64 old_value, 314 Atomic64 old_value,
348 Atomic64 new_value) { 315 Atomic64 new_value) {
349 Atomic64 prev; 316 Atomic64 prev;
350 int32_t temp; 317 int32_t temp;
351 318
319 MemoryBarrier();
320
352 __asm__ __volatile__ ( // NOLINT 321 __asm__ __volatile__ ( // NOLINT
353 "dmb ish \n\t"
354 "0: \n\t" 322 "0: \n\t"
355 "ldxr %[prev], [%[ptr]] \n\t" 323 "ldxr %[prev], %[ptr] \n\t"
356 "cmp %[prev], %[old_value] \n\t" 324 "cmp %[prev], %[old_value] \n\t"
357 "bne 1f \n\t" 325 "bne 1f \n\t"
358 "stxr %w[temp], %[new_value], [%[ptr]] \n\t" 326 "stxr %w[temp], %[new_value], %[ptr] \n\t"
359 "cbnz %w[temp], 0b \n\t" 327 "cbnz %w[temp], 0b \n\t"
360 "1: \n\t" 328 "1: \n\t"
361 "clrex \n\t" 329 "clrex \n\t"
362 : [prev]"=&r" (prev), 330 : [prev]"=&r" (prev),
363 [temp]"=&r" (temp) 331 [temp]"=&r" (temp),
364 : [ptr]"r" (ptr), 332 [ptr]"+Q" (*ptr)
365 [old_value]"r" (old_value), 333 : [old_value]"r" (old_value),
366 [new_value]"r" (new_value) 334 [new_value]"r" (new_value)
367 : "memory", "cc" 335 : "memory", "cc"
368 ); // NOLINT 336 ); // NOLINT
369 337
370 return prev; 338 return prev;
371 } 339 }
372 340
373 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { 341 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
374 *ptr = value; 342 *ptr = value;
375 } 343 }
376 344
377 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { 345 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
378 *ptr = value; 346 *ptr = value;
379 __asm__ __volatile__ ( // NOLINT 347 MemoryBarrier();
380 "dmb ish \n\t"
381 ::: "memory"
382 ); // NOLINT
383 } 348 }
384 349
385 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { 350 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
386 __asm__ __volatile__ ( // NOLINT 351 MemoryBarrier();
387 "dmb ish \n\t"
388 ::: "memory"
389 ); // NOLINT
390 *ptr = value; 352 *ptr = value;
391 } 353 }
392 354
393 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { 355 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
394 return *ptr; 356 return *ptr;
395 } 357 }
396 358
397 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { 359 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
398 Atomic64 value = *ptr; 360 Atomic64 value = *ptr;
399 __asm__ __volatile__ ( // NOLINT 361 MemoryBarrier();
400 "dmb ish \n\t"
401 ::: "memory"
402 ); // NOLINT
403 return value; 362 return value;
404 } 363 }
405 364
406 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { 365 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
407 __asm__ __volatile__ ( // NOLINT 366 MemoryBarrier();
408 "dmb ish \n\t"
409 ::: "memory"
410 ); // NOLINT
411 return *ptr; 367 return *ptr;
412 } 368 }
413 369
414 } } // namespace v8::internal 370 } } // namespace v8::internal
415 371
416 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_ 372 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
OLDNEW
« no previous file with comments | « no previous file | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698