Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(712)

Side by Side Diff: src/atomicops_internals_a64_gcc.h

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/atomicops.h ('k') | src/bootstrapper.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 // This file is an internal atomic implementation, use atomicops.h instead.
29
30 #ifndef V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
31 #define V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
32
33 namespace v8 {
34 namespace internal {
35
36 inline void MemoryBarrier() { /* Not used. */ }
37
38 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
39 Atomic32 old_value,
40 Atomic32 new_value) {
41 Atomic32 prev;
42 int32_t temp;
43
44 __asm__ __volatile__ ( // NOLINT
45 "0: \n\t"
46 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
47 "cmp %w[prev], %w[old_value] \n\t"
48 "bne 1f \n\t"
49 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
50 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
51 "1: \n\t"
52 "clrex \n\t" // In case we didn't swap.
53 : [prev]"=&r" (prev),
54 [temp]"=&r" (temp)
55 : [ptr]"r" (ptr),
56 [old_value]"r" (old_value),
57 [new_value]"r" (new_value)
58 : "memory", "cc"
59 ); // NOLINT
60
61 return prev;
62 }
63
64 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
65 Atomic32 new_value) {
66 Atomic32 result;
67 int32_t temp;
68
69 __asm__ __volatile__ ( // NOLINT
70 "0: \n\t"
71 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
72 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
73 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
74 : [result]"=&r" (result),
75 [temp]"=&r" (temp)
76 : [ptr]"r" (ptr),
77 [new_value]"r" (new_value)
78 : "memory"
79 ); // NOLINT
80
81 return result;
82 }
83
84 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
85 Atomic32 increment) {
86 Atomic32 result;
87 int32_t temp;
88
89 __asm__ __volatile__ ( // NOLINT
90 "0: \n\t"
91 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
92 "add %w[result], %w[result], %w[increment]\n\t"
93 "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
94 "cbnz %w[temp], 0b \n\t" // Retry on failure.
95 : [result]"=&r" (result),
96 [temp]"=&r" (temp)
97 : [ptr]"r" (ptr),
98 [increment]"r" (increment)
99 : "memory"
100 ); // NOLINT
101
102 return result;
103 }
104
105 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
106 Atomic32 increment) {
107 Atomic32 result;
108 int32_t temp;
109
110 __asm__ __volatile__ ( // NOLINT
111 "dmb ish \n\t" // Data memory barrier.
112 "0: \n\t"
113 "ldxr %w[result], [%[ptr]] \n\t" // Load the previous value.
114 "add %w[result], %w[result], %w[increment]\n\t"
115 "stxr %w[temp], %w[result], [%[ptr]] \n\t" // Try to store the result.
116 "cbnz %w[temp], 0b \n\t" // Retry on failure.
117 "dmb ish \n\t" // Data memory barrier.
118 : [result]"=&r" (result),
119 [temp]"=&r" (temp)
120 : [ptr]"r" (ptr),
121 [increment]"r" (increment)
122 : "memory"
123 ); // NOLINT
124
125 return result;
126 }
127
128 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
129 Atomic32 old_value,
130 Atomic32 new_value) {
131 Atomic32 prev;
132 int32_t temp;
133
134 __asm__ __volatile__ ( // NOLINT
135 "0: \n\t"
136 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
137 "cmp %w[prev], %w[old_value] \n\t"
138 "bne 1f \n\t"
139 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
140 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
141 "dmb ish \n\t" // Data memory barrier.
142 "1: \n\t"
143 // If the compare failed the 'dmb' is unnecessary, but we still need a
144 // 'clrex'.
145 "clrex \n\t"
146 : [prev]"=&r" (prev),
147 [temp]"=&r" (temp)
148 : [ptr]"r" (ptr),
149 [old_value]"r" (old_value),
150 [new_value]"r" (new_value)
151 : "memory", "cc"
152 ); // NOLINT
153
154 return prev;
155 }
156
157 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
158 Atomic32 old_value,
159 Atomic32 new_value) {
160 Atomic32 prev;
161 int32_t temp;
162
163 __asm__ __volatile__ ( // NOLINT
164 "dmb ish \n\t" // Data memory barrier.
165 "0: \n\t"
166 "ldxr %w[prev], [%[ptr]] \n\t" // Load the previous value.
167 "cmp %w[prev], %w[old_value] \n\t"
168 "bne 1f \n\t"
169 "stxr %w[temp], %w[new_value], [%[ptr]]\n\t" // Try to store the new value.
170 "cbnz %w[temp], 0b \n\t" // Retry if it did not work.
171 "1: \n\t"
172 // If the compare failed the we still need a 'clrex'.
173 "clrex \n\t"
174 : [prev]"=&r" (prev),
175 [temp]"=&r" (temp)
176 : [ptr]"r" (ptr),
177 [old_value]"r" (old_value),
178 [new_value]"r" (new_value)
179 : "memory", "cc"
180 ); // NOLINT
181
182 return prev;
183 }
184
185 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
186 *ptr = value;
187 }
188
189 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
190 *ptr = value;
191 __asm__ __volatile__ ( // NOLINT
192 "dmb ish \n\t" // Data memory barrier.
193 ::: "memory" // Prevent gcc from reordering before the store above.
194 ); // NOLINT
195 }
196
197 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
198 __asm__ __volatile__ ( // NOLINT
199 "dmb ish \n\t" // Data memory barrier.
200 ::: "memory" // Prevent gcc from reordering after the store below.
201 ); // NOLINT
202 *ptr = value;
203 }
204
205 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) {
206 return *ptr;
207 }
208
209 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) {
210 Atomic32 value = *ptr;
211 __asm__ __volatile__ ( // NOLINT
212 "dmb ish \n\t" // Data memory barrier.
213 ::: "memory" // Prevent gcc from reordering before the load above.
214 ); // NOLINT
215 return value;
216 }
217
218 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
219 __asm__ __volatile__ ( // NOLINT
220 "dmb ish \n\t" // Data memory barrier.
221 ::: "memory" // Prevent gcc from reordering after the load below.
222 ); // NOLINT
223 return *ptr;
224 }
225
226 // 64-bit versions of the operations.
227 // See the 32-bit versions for comments.
228
229 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
230 Atomic64 old_value,
231 Atomic64 new_value) {
232 Atomic64 prev;
233 int32_t temp;
234
235 __asm__ __volatile__ ( // NOLINT
236 "0: \n\t"
237 "ldxr %[prev], [%[ptr]] \n\t"
238 "cmp %[prev], %[old_value] \n\t"
239 "bne 1f \n\t"
240 "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
241 "cbnz %w[temp], 0b \n\t"
242 "1: \n\t"
243 "clrex \n\t"
244 : [prev]"=&r" (prev),
245 [temp]"=&r" (temp)
246 : [ptr]"r" (ptr),
247 [old_value]"r" (old_value),
248 [new_value]"r" (new_value)
249 : "memory", "cc"
250 ); // NOLINT
251
252 return prev;
253 }
254
255 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
256 Atomic64 new_value) {
257 Atomic64 result;
258 int32_t temp;
259
260 __asm__ __volatile__ ( // NOLINT
261 "0: \n\t"
262 "ldxr %[result], [%[ptr]] \n\t"
263 "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
264 "cbnz %w[temp], 0b \n\t"
265 : [result]"=&r" (result),
266 [temp]"=&r" (temp)
267 : [ptr]"r" (ptr),
268 [new_value]"r" (new_value)
269 : "memory"
270 ); // NOLINT
271
272 return result;
273 }
274
275 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
276 Atomic64 increment) {
277 Atomic64 result;
278 int32_t temp;
279
280 __asm__ __volatile__ ( // NOLINT
281 "0: \n\t"
282 "ldxr %[result], [%[ptr]] \n\t"
283 "add %[result], %[result], %[increment] \n\t"
284 "stxr %w[temp], %[result], [%[ptr]] \n\t"
285 "cbnz %w[temp], 0b \n\t"
286 : [result]"=&r" (result),
287 [temp]"=&r" (temp)
288 : [ptr]"r" (ptr),
289 [increment]"r" (increment)
290 : "memory"
291 ); // NOLINT
292
293 return result;
294 }
295
296 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
297 Atomic64 increment) {
298 Atomic64 result;
299 int32_t temp;
300
301 __asm__ __volatile__ ( // NOLINT
302 "dmb ish \n\t"
303 "0: \n\t"
304 "ldxr %[result], [%[ptr]] \n\t"
305 "add %[result], %[result], %[increment] \n\t"
306 "stxr %w[temp], %[result], [%[ptr]] \n\t"
307 "cbnz %w[temp], 0b \n\t"
308 "dmb ish \n\t"
309 : [result]"=&r" (result),
310 [temp]"=&r" (temp)
311 : [ptr]"r" (ptr),
312 [increment]"r" (increment)
313 : "memory"
314 ); // NOLINT
315
316 return result;
317 }
318
319 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
320 Atomic64 old_value,
321 Atomic64 new_value) {
322 Atomic64 prev;
323 int32_t temp;
324
325 __asm__ __volatile__ ( // NOLINT
326 "0: \n\t"
327 "ldxr %[prev], [%[ptr]] \n\t"
328 "cmp %[prev], %[old_value] \n\t"
329 "bne 1f \n\t"
330 "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
331 "cbnz %w[temp], 0b \n\t"
332 "dmb ish \n\t"
333 "1: \n\t"
334 "clrex \n\t"
335 : [prev]"=&r" (prev),
336 [temp]"=&r" (temp)
337 : [ptr]"r" (ptr),
338 [old_value]"r" (old_value),
339 [new_value]"r" (new_value)
340 : "memory", "cc"
341 ); // NOLINT
342
343 return prev;
344 }
345
346 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
347 Atomic64 old_value,
348 Atomic64 new_value) {
349 Atomic64 prev;
350 int32_t temp;
351
352 __asm__ __volatile__ ( // NOLINT
353 "dmb ish \n\t"
354 "0: \n\t"
355 "ldxr %[prev], [%[ptr]] \n\t"
356 "cmp %[prev], %[old_value] \n\t"
357 "bne 1f \n\t"
358 "stxr %w[temp], %[new_value], [%[ptr]] \n\t"
359 "cbnz %w[temp], 0b \n\t"
360 "1: \n\t"
361 "clrex \n\t"
362 : [prev]"=&r" (prev),
363 [temp]"=&r" (temp)
364 : [ptr]"r" (ptr),
365 [old_value]"r" (old_value),
366 [new_value]"r" (new_value)
367 : "memory", "cc"
368 ); // NOLINT
369
370 return prev;
371 }
372
373 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
374 *ptr = value;
375 }
376
377 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
378 *ptr = value;
379 __asm__ __volatile__ ( // NOLINT
380 "dmb ish \n\t"
381 ::: "memory"
382 ); // NOLINT
383 }
384
385 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
386 __asm__ __volatile__ ( // NOLINT
387 "dmb ish \n\t"
388 ::: "memory"
389 ); // NOLINT
390 *ptr = value;
391 }
392
393 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
394 return *ptr;
395 }
396
397 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
398 Atomic64 value = *ptr;
399 __asm__ __volatile__ ( // NOLINT
400 "dmb ish \n\t"
401 ::: "memory"
402 ); // NOLINT
403 return value;
404 }
405
406 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
407 __asm__ __volatile__ ( // NOLINT
408 "dmb ish \n\t"
409 ::: "memory"
410 ); // NOLINT
411 return *ptr;
412 }
413
414 } } // namespace v8::internal
415
416 #endif // V8_ATOMICOPS_INTERNALS_ARM_GCC_H_
OLDNEW
« no previous file with comments | « src/atomicops.h ('k') | src/bootstrapper.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698