OLD | NEW |
| (Empty) |
1 // Copyright 2010 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 | |
7 #ifndef V8_ATOMICOPS_INTERNALS_MAC_H_ | |
8 #define V8_ATOMICOPS_INTERNALS_MAC_H_ | |
9 | |
10 #include <libkern/OSAtomic.h> | |
11 | |
12 namespace v8 { | |
13 namespace internal { | |
14 | |
15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
16 Atomic32 old_value, | |
17 Atomic32 new_value) { | |
18 Atomic32 prev_value; | |
19 do { | |
20 if (OSAtomicCompareAndSwap32(old_value, new_value, | |
21 const_cast<Atomic32*>(ptr))) { | |
22 return old_value; | |
23 } | |
24 prev_value = *ptr; | |
25 } while (prev_value == old_value); | |
26 return prev_value; | |
27 } | |
28 | |
29 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
30 Atomic32 new_value) { | |
31 Atomic32 old_value; | |
32 do { | |
33 old_value = *ptr; | |
34 } while (!OSAtomicCompareAndSwap32(old_value, new_value, | |
35 const_cast<Atomic32*>(ptr))); | |
36 return old_value; | |
37 } | |
38 | |
39 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
40 Atomic32 increment) { | |
41 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); | |
42 } | |
43 | |
44 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
45 Atomic32 increment) { | |
46 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); | |
47 } | |
48 | |
49 inline void MemoryBarrier() { | |
50 OSMemoryBarrier(); | |
51 } | |
52 | |
53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
54 Atomic32 old_value, | |
55 Atomic32 new_value) { | |
56 Atomic32 prev_value; | |
57 do { | |
58 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, | |
59 const_cast<Atomic32*>(ptr))) { | |
60 return old_value; | |
61 } | |
62 prev_value = *ptr; | |
63 } while (prev_value == old_value); | |
64 return prev_value; | |
65 } | |
66 | |
67 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
68 Atomic32 old_value, | |
69 Atomic32 new_value) { | |
70 return Acquire_CompareAndSwap(ptr, old_value, new_value); | |
71 } | |
72 | |
73 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
74 *ptr = value; | |
75 } | |
76 | |
77 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
78 *ptr = value; | |
79 } | |
80 | |
81 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
82 *ptr = value; | |
83 MemoryBarrier(); | |
84 } | |
85 | |
86 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
87 MemoryBarrier(); | |
88 *ptr = value; | |
89 } | |
90 | |
91 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | |
92 return *ptr; | |
93 } | |
94 | |
95 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
96 return *ptr; | |
97 } | |
98 | |
99 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
100 Atomic32 value = *ptr; | |
101 MemoryBarrier(); | |
102 return value; | |
103 } | |
104 | |
105 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
106 MemoryBarrier(); | |
107 return *ptr; | |
108 } | |
109 | |
110 #ifdef __LP64__ | |
111 | |
112 // 64-bit implementation on 64-bit platform | |
113 | |
114 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
115 Atomic64 old_value, | |
116 Atomic64 new_value) { | |
117 Atomic64 prev_value; | |
118 do { | |
119 if (OSAtomicCompareAndSwap64(old_value, new_value, | |
120 reinterpret_cast<volatile int64_t*>(ptr))) { | |
121 return old_value; | |
122 } | |
123 prev_value = *ptr; | |
124 } while (prev_value == old_value); | |
125 return prev_value; | |
126 } | |
127 | |
128 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
129 Atomic64 new_value) { | |
130 Atomic64 old_value; | |
131 do { | |
132 old_value = *ptr; | |
133 } while (!OSAtomicCompareAndSwap64(old_value, new_value, | |
134 reinterpret_cast<volatile int64_t*>(ptr))); | |
135 return old_value; | |
136 } | |
137 | |
138 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
139 Atomic64 increment) { | |
140 return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); | |
141 } | |
142 | |
143 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
144 Atomic64 increment) { | |
145 return OSAtomicAdd64Barrier(increment, | |
146 reinterpret_cast<volatile int64_t*>(ptr)); | |
147 } | |
148 | |
149 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
150 Atomic64 old_value, | |
151 Atomic64 new_value) { | |
152 Atomic64 prev_value; | |
153 do { | |
154 if (OSAtomicCompareAndSwap64Barrier( | |
155 old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { | |
156 return old_value; | |
157 } | |
158 prev_value = *ptr; | |
159 } while (prev_value == old_value); | |
160 return prev_value; | |
161 } | |
162 | |
163 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
164 Atomic64 old_value, | |
165 Atomic64 new_value) { | |
166 // The lib kern interface does not distinguish between | |
167 // Acquire and Release memory barriers; they are equivalent. | |
168 return Acquire_CompareAndSwap(ptr, old_value, new_value); | |
169 } | |
170 | |
171 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
172 *ptr = value; | |
173 } | |
174 | |
175 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
176 *ptr = value; | |
177 MemoryBarrier(); | |
178 } | |
179 | |
180 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
181 MemoryBarrier(); | |
182 *ptr = value; | |
183 } | |
184 | |
185 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
186 return *ptr; | |
187 } | |
188 | |
189 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
190 Atomic64 value = *ptr; | |
191 MemoryBarrier(); | |
192 return value; | |
193 } | |
194 | |
195 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
196 MemoryBarrier(); | |
197 return *ptr; | |
198 } | |
199 | |
200 #endif // defined(__LP64__) | |
201 | |
202 } } // namespace v8::internal | |
203 | |
204 #endif // V8_ATOMICOPS_INTERNALS_MAC_H_ | |
OLD | NEW |