OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use atomicops.h instead. | 5 // This file is an internal atomic implementation, use atomicops.h instead. |
6 | 6 |
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ | 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ |
8 #define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ | 8 #define V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ |
9 | 9 |
10 #include <libkern/OSAtomic.h> | 10 #include <libkern/OSAtomic.h> |
11 | 11 |
12 namespace v8 { | 12 namespace v8 { |
13 namespace base { | 13 namespace base { |
14 | 14 |
| 15 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
| 16 |
| 17 inline void MemoryBarrier() { OSMemoryBarrier(); } |
| 18 |
| 19 inline void AcquireMemoryBarrier() { |
| 20 // On x86 processors, loads already have acquire semantics, so |
| 21 // there is no need to put a full barrier here. |
| 22 #if V8_HOST_ARCH_IA32 || V8_HOST_ARCH_X64 |
| 23 ATOMICOPS_COMPILER_BARRIER(); |
| 24 #else |
| 25 MemoryBarrier(); |
| 26 #endif |
| 27 } |
| 28 |
15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 29 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
16 Atomic32 old_value, | 30 Atomic32 old_value, |
17 Atomic32 new_value) { | 31 Atomic32 new_value) { |
18 Atomic32 prev_value; | 32 Atomic32 prev_value; |
19 do { | 33 do { |
20 if (OSAtomicCompareAndSwap32(old_value, new_value, | 34 if (OSAtomicCompareAndSwap32(old_value, new_value, |
21 const_cast<Atomic32*>(ptr))) { | 35 const_cast<Atomic32*>(ptr))) { |
22 return old_value; | 36 return old_value; |
23 } | 37 } |
24 prev_value = *ptr; | 38 prev_value = *ptr; |
(...skipping 14 matching lines...) Expand all Loading... |
39 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 53 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
40 Atomic32 increment) { | 54 Atomic32 increment) { |
41 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); | 55 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); |
42 } | 56 } |
43 | 57 |
44 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 58 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
45 Atomic32 increment) { | 59 Atomic32 increment) { |
46 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); | 60 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
47 } | 61 } |
48 | 62 |
49 inline void MemoryBarrier() { | |
50 OSMemoryBarrier(); | |
51 } | |
52 | |
53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 63 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
54 Atomic32 old_value, | 64 Atomic32 old_value, |
55 Atomic32 new_value) { | 65 Atomic32 new_value) { |
56 Atomic32 prev_value; | 66 Atomic32 prev_value; |
57 do { | 67 do { |
58 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, | 68 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
59 const_cast<Atomic32*>(ptr))) { | 69 const_cast<Atomic32*>(ptr))) { |
60 return old_value; | 70 return old_value; |
61 } | 71 } |
62 prev_value = *ptr; | 72 prev_value = *ptr; |
(...skipping 28 matching lines...) Expand all Loading... |
91 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 101 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
92 return *ptr; | 102 return *ptr; |
93 } | 103 } |
94 | 104 |
95 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 105 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
96 return *ptr; | 106 return *ptr; |
97 } | 107 } |
98 | 108 |
99 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 109 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
100 Atomic32 value = *ptr; | 110 Atomic32 value = *ptr; |
101 MemoryBarrier(); | 111 AcquireMemoryBarrier(); |
102 return value; | 112 return value; |
103 } | 113 } |
104 | 114 |
105 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 115 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
106 MemoryBarrier(); | 116 MemoryBarrier(); |
107 return *ptr; | 117 return *ptr; |
108 } | 118 } |
109 | 119 |
110 #ifdef __LP64__ | 120 #ifdef __LP64__ |
111 | 121 |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
181 MemoryBarrier(); | 191 MemoryBarrier(); |
182 *ptr = value; | 192 *ptr = value; |
183 } | 193 } |
184 | 194 |
185 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 195 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
186 return *ptr; | 196 return *ptr; |
187 } | 197 } |
188 | 198 |
189 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 199 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
190 Atomic64 value = *ptr; | 200 Atomic64 value = *ptr; |
191 MemoryBarrier(); | 201 AcquireMemoryBarrier(); |
192 return value; | 202 return value; |
193 } | 203 } |
194 | 204 |
195 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 205 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
196 MemoryBarrier(); | 206 MemoryBarrier(); |
197 return *ptr; | 207 return *ptr; |
198 } | 208 } |
199 | 209 |
200 #endif // defined(__LP64__) | 210 #endif // defined(__LP64__) |
201 | 211 |
| 212 #undef ATOMICOPS_COMPILER_BARRIER |
202 } } // namespace v8::base | 213 } } // namespace v8::base |
203 | 214 |
204 #endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ | 215 #endif // V8_BASE_ATOMICOPS_INTERNALS_MAC_H_ |
OLD | NEW |