OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
15 // | 15 // |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 // This file is an internal atomic implementation, use atomicops.h instead. | 28 // This file is an internal atomic implementation, use atomicops.h instead. |
29 | 29 |
30 #ifndef V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | 30 #ifndef V8_ATOMICOPS_INTERNALS_MAC_H_ |
31 #define V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | 31 #define V8_ATOMICOPS_INTERNALS_MAC_H_ |
32 | 32 |
33 #include <libkern/OSAtomic.h> | 33 #include <libkern/OSAtomic.h> |
34 | 34 |
35 namespace v8 { | 35 namespace v8 { |
36 namespace internal { | 36 namespace internal { |
37 | 37 |
38 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 38 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
39 Atomic32 old_value, | 39 Atomic32 old_value, |
40 Atomic32 new_value) { | 40 Atomic32 new_value) { |
41 Atomic32 prev_value; | 41 Atomic32 prev_value; |
(...skipping 16 matching lines...) Expand all Loading... |
58 const_cast<Atomic32*>(ptr))); | 58 const_cast<Atomic32*>(ptr))); |
59 return old_value; | 59 return old_value; |
60 } | 60 } |
61 | 61 |
62 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 62 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
63 Atomic32 increment) { | 63 Atomic32 increment) { |
64 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); | 64 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); |
65 } | 65 } |
66 | 66 |
67 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 67 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
68 Atomic32 increment) { | 68 Atomic32 increment) { |
69 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); | 69 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
70 } | 70 } |
71 | 71 |
72 inline void MemoryBarrier() { | 72 inline void MemoryBarrier() { |
73 OSMemoryBarrier(); | 73 OSMemoryBarrier(); |
74 } | 74 } |
75 | 75 |
76 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 76 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
77 Atomic32 old_value, | 77 Atomic32 old_value, |
78 Atomic32 new_value) { | 78 Atomic32 new_value) { |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
125 #ifdef __LP64__ | 125 #ifdef __LP64__ |
126 | 126 |
127 // 64-bit implementation on 64-bit platform | 127 // 64-bit implementation on 64-bit platform |
128 | 128 |
129 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 129 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
130 Atomic64 old_value, | 130 Atomic64 old_value, |
131 Atomic64 new_value) { | 131 Atomic64 new_value) { |
132 Atomic64 prev_value; | 132 Atomic64 prev_value; |
133 do { | 133 do { |
134 if (OSAtomicCompareAndSwap64(old_value, new_value, | 134 if (OSAtomicCompareAndSwap64(old_value, new_value, |
135 const_cast<Atomic64*>(ptr))) { | 135 reinterpret_cast<volatile int64_t*>(ptr))) { |
136 return old_value; | 136 return old_value; |
137 } | 137 } |
138 prev_value = *ptr; | 138 prev_value = *ptr; |
139 } while (prev_value == old_value); | 139 } while (prev_value == old_value); |
140 return prev_value; | 140 return prev_value; |
141 } | 141 } |
142 | 142 |
143 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 143 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
144 Atomic64 new_value) { | 144 Atomic64 new_value) { |
145 Atomic64 old_value; | 145 Atomic64 old_value; |
146 do { | 146 do { |
147 old_value = *ptr; | 147 old_value = *ptr; |
148 } while (!OSAtomicCompareAndSwap64(old_value, new_value, | 148 } while (!OSAtomicCompareAndSwap64(old_value, new_value, |
149 const_cast<Atomic64*>(ptr))); | 149 reinterpret_cast<volatile int64_t*>(ptr))); |
150 return old_value; | 150 return old_value; |
151 } | 151 } |
152 | 152 |
153 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 153 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
154 Atomic64 increment) { | 154 Atomic64 increment) { |
155 return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr)); | 155 return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); |
156 } | 156 } |
157 | 157 |
158 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 158 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
159 Atomic64 increment) { | 159 Atomic64 increment) { |
160 return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr)); | 160 return OSAtomicAdd64Barrier(increment, |
| 161 reinterpret_cast<volatile int64_t*>(ptr)); |
161 } | 162 } |
162 | 163 |
163 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 164 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
164 Atomic64 old_value, | 165 Atomic64 old_value, |
165 Atomic64 new_value) { | 166 Atomic64 new_value) { |
166 Atomic64 prev_value; | 167 Atomic64 prev_value; |
167 do { | 168 do { |
168 if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, | 169 if (OSAtomicCompareAndSwap64Barrier( |
169 const_cast<Atomic64*>(ptr))) { | 170 old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { |
170 return old_value; | 171 return old_value; |
171 } | 172 } |
172 prev_value = *ptr; | 173 prev_value = *ptr; |
173 } while (prev_value == old_value); | 174 } while (prev_value == old_value); |
174 return prev_value; | 175 return prev_value; |
175 } | 176 } |
176 | 177 |
177 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 178 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
178 Atomic64 old_value, | 179 Atomic64 old_value, |
179 Atomic64 new_value) { | 180 Atomic64 new_value) { |
(...skipping 26 matching lines...) Expand all Loading... |
206 return value; | 207 return value; |
207 } | 208 } |
208 | 209 |
209 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 210 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
210 MemoryBarrier(); | 211 MemoryBarrier(); |
211 return *ptr; | 212 return *ptr; |
212 } | 213 } |
213 | 214 |
214 #endif // defined(__LP64__) | 215 #endif // defined(__LP64__) |
215 | 216 |
216 // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different | |
217 // on the Mac, even when they are the same size. We need to explicitly cast | |
218 // from AtomicWord to Atomic32/64 to implement the AtomicWord interface. | |
219 #ifdef __LP64__ | |
220 #define AtomicWordCastType Atomic64 | |
221 #else | |
222 #define AtomicWordCastType Atomic32 | |
223 #endif | |
224 | |
225 inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, | |
226 AtomicWord old_value, | |
227 AtomicWord new_value) { | |
228 return NoBarrier_CompareAndSwap( | |
229 reinterpret_cast<volatile AtomicWordCastType*>(ptr), | |
230 old_value, new_value); | |
231 } | |
232 | |
233 inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, | |
234 AtomicWord new_value) { | |
235 return NoBarrier_AtomicExchange( | |
236 reinterpret_cast<volatile AtomicWordCastType*>(ptr), new_value); | |
237 } | |
238 | |
239 inline AtomicWord NoBarrier_AtomicIncrement(volatile AtomicWord* ptr, | |
240 AtomicWord increment) { | |
241 return NoBarrier_AtomicIncrement( | |
242 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); | |
243 } | |
244 | |
245 inline AtomicWord Barrier_AtomicIncrement(volatile AtomicWord* ptr, | |
246 AtomicWord increment) { | |
247 return Barrier_AtomicIncrement( | |
248 reinterpret_cast<volatile AtomicWordCastType*>(ptr), increment); | |
249 } | |
250 | |
251 inline AtomicWord Acquire_CompareAndSwap(volatile AtomicWord* ptr, | |
252 AtomicWord old_value, | |
253 AtomicWord new_value) { | |
254 return v8::internal::Acquire_CompareAndSwap( | |
255 reinterpret_cast<volatile AtomicWordCastType*>(ptr), | |
256 old_value, new_value); | |
257 } | |
258 | |
259 inline AtomicWord Release_CompareAndSwap(volatile AtomicWord* ptr, | |
260 AtomicWord old_value, | |
261 AtomicWord new_value) { | |
262 return v8::internal::Release_CompareAndSwap( | |
263 reinterpret_cast<volatile AtomicWordCastType*>(ptr), | |
264 old_value, new_value); | |
265 } | |
266 | |
267 inline void NoBarrier_Store(volatile AtomicWord* ptr, AtomicWord value) { | |
268 NoBarrier_Store( | |
269 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | |
270 } | |
271 | |
272 inline void Acquire_Store(volatile AtomicWord* ptr, AtomicWord value) { | |
273 return v8::internal::Acquire_Store( | |
274 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | |
275 } | |
276 | |
277 inline void Release_Store(volatile AtomicWord* ptr, AtomicWord value) { | |
278 return v8::internal::Release_Store( | |
279 reinterpret_cast<volatile AtomicWordCastType*>(ptr), value); | |
280 } | |
281 | |
282 inline AtomicWord NoBarrier_Load(volatile const AtomicWord* ptr) { | |
283 return NoBarrier_Load( | |
284 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | |
285 } | |
286 | |
287 inline AtomicWord Acquire_Load(volatile const AtomicWord* ptr) { | |
288 return v8::internal::Acquire_Load( | |
289 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | |
290 } | |
291 | |
292 inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { | |
293 return v8::internal::Release_Load( | |
294 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | |
295 } | |
296 | |
297 #undef AtomicWordCastType | |
298 | |
299 } } // namespace v8::internal | 217 } } // namespace v8::internal |
300 | 218 |
301 #endif // V8_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | 219 #endif // V8_ATOMICOPS_INTERNALS_MAC_H_ |
OLD | NEW |