OLD | NEW |
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
6 | 6 |
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | 7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
8 #define BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | 8 #define BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
9 #pragma once | 9 #pragma once |
10 | 10 |
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
103 #ifdef __LP64__ | 103 #ifdef __LP64__ |
104 | 104 |
105 // 64-bit implementation on 64-bit platform | 105 // 64-bit implementation on 64-bit platform |
106 | 106 |
107 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, | 107 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, |
108 Atomic64 old_value, | 108 Atomic64 old_value, |
109 Atomic64 new_value) { | 109 Atomic64 new_value) { |
110 Atomic64 prev_value; | 110 Atomic64 prev_value; |
111 do { | 111 do { |
112 if (OSAtomicCompareAndSwap64(old_value, new_value, | 112 if (OSAtomicCompareAndSwap64(old_value, new_value, |
113 const_cast<Atomic64*>(ptr))) { | 113 reinterpret_cast<volatile int64_t*>(ptr))) { |
114 return old_value; | 114 return old_value; |
115 } | 115 } |
116 prev_value = *ptr; | 116 prev_value = *ptr; |
117 } while (prev_value == old_value); | 117 } while (prev_value == old_value); |
118 return prev_value; | 118 return prev_value; |
119 } | 119 } |
120 | 120 |
121 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, | 121 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, |
122 Atomic64 new_value) { | 122 Atomic64 new_value) { |
123 Atomic64 old_value; | 123 Atomic64 old_value; |
124 do { | 124 do { |
125 old_value = *ptr; | 125 old_value = *ptr; |
126 } while (!OSAtomicCompareAndSwap64(old_value, new_value, | 126 } while (!OSAtomicCompareAndSwap64(old_value, new_value, |
127 const_cast<Atomic64*>(ptr))); | 127 reinterpret_cast<volatile int64_t*>(ptr))); |
128 return old_value; | 128 return old_value; |
129 } | 129 } |
130 | 130 |
131 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, | 131 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, |
132 Atomic64 increment) { | 132 Atomic64 increment) { |
133 return OSAtomicAdd64(increment, const_cast<Atomic64*>(ptr)); | 133 return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); |
134 } | 134 } |
135 | 135 |
136 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, | 136 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, |
137 Atomic64 increment) { | 137 Atomic64 increment) { |
138 return OSAtomicAdd64Barrier(increment, const_cast<Atomic64*>(ptr)); | 138 return OSAtomicAdd64Barrier(increment, |
| 139 reinterpret_cast<volatile int64_t*>(ptr)); |
139 } | 140 } |
140 | 141 |
141 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, | 142 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, |
142 Atomic64 old_value, | 143 Atomic64 old_value, |
143 Atomic64 new_value) { | 144 Atomic64 new_value) { |
144 Atomic64 prev_value; | 145 Atomic64 prev_value; |
145 do { | 146 do { |
146 if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, | 147 if (OSAtomicCompareAndSwap64Barrier(old_value, new_value, |
147 const_cast<Atomic64*>(ptr))) { | 148 reinterpret_cast<volatile int64_t*>(ptr))) { |
148 return old_value; | 149 return old_value; |
149 } | 150 } |
150 prev_value = *ptr; | 151 prev_value = *ptr; |
151 } while (prev_value == old_value); | 152 } while (prev_value == old_value); |
152 return prev_value; | 153 return prev_value; |
153 } | 154 } |
154 | 155 |
155 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, | 156 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, |
156 Atomic64 old_value, | 157 Atomic64 old_value, |
157 Atomic64 new_value) { | 158 Atomic64 new_value) { |
(...skipping 28 matching lines...) Expand all Loading... |
186 | 187 |
187 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { | 188 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { |
188 MemoryBarrier(); | 189 MemoryBarrier(); |
189 return *ptr; | 190 return *ptr; |
190 } | 191 } |
191 | 192 |
192 #endif // defined(__LP64__) | 193 #endif // defined(__LP64__) |
193 | 194 |
194 // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different | 195 // MacOS uses long for intptr_t, AtomicWord and Atomic32 are always different |
195 // on the Mac, even when they are the same size. We need to explicitly cast | 196 // on the Mac, even when they are the same size. We need to explicitly cast |
196 // from AtomicWord to Atomic32/64 to implement the AtomicWord interface. | 197 // from AtomicWord to Atomic32 to implement the AtomicWord interface. |
197 #ifdef __LP64__ | 198 // When in 64-bit mode, AtomicWord is the same as Atomic64, so we need not |
198 #define AtomicWordCastType Atomic64 | 199 // add duplicate definitions. |
199 #else | 200 #ifndef __LP64__ |
200 #define AtomicWordCastType Atomic32 | 201 #define AtomicWordCastType Atomic32 |
201 #endif | |
202 | 202 |
203 inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, | 203 inline AtomicWord NoBarrier_CompareAndSwap(volatile AtomicWord* ptr, |
204 AtomicWord old_value, | 204 AtomicWord old_value, |
205 AtomicWord new_value) { | 205 AtomicWord new_value) { |
206 return NoBarrier_CompareAndSwap( | 206 return NoBarrier_CompareAndSwap( |
207 reinterpret_cast<volatile AtomicWordCastType*>(ptr), | 207 reinterpret_cast<volatile AtomicWordCastType*>(ptr), |
208 old_value, new_value); | 208 old_value, new_value); |
209 } | 209 } |
210 | 210 |
211 inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, | 211 inline AtomicWord NoBarrier_AtomicExchange(volatile AtomicWord* ptr, |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
266 return base::subtle::Acquire_Load( | 266 return base::subtle::Acquire_Load( |
267 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | 267 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
268 } | 268 } |
269 | 269 |
270 inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { | 270 inline AtomicWord Release_Load(volatile const AtomicWord* ptr) { |
271 return base::subtle::Release_Load( | 271 return base::subtle::Release_Load( |
272 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); | 272 reinterpret_cast<volatile const AtomicWordCastType*>(ptr)); |
273 } | 273 } |
274 | 274 |
275 #undef AtomicWordCastType | 275 #undef AtomicWordCastType |
| 276 #endif |
276 | 277 |
277 } // namespace base::subtle | 278 } // namespace base::subtle |
278 } // namespace base | 279 } // namespace base |
279 | 280 |
280 #endif // BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ | 281 #endif // BASE_ATOMICOPS_INTERNALS_X86_MACOSX_H_ |
OLD | NEW |