| OLD | NEW |
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
| 6 | 6 |
| 7 #ifndef BASE_ATOMICOPS_INTERNALS_MAC_H_ | 7 #ifndef BASE_ATOMICOPS_INTERNALS_MAC_H_ |
| 8 #define BASE_ATOMICOPS_INTERNALS_MAC_H_ | 8 #define BASE_ATOMICOPS_INTERNALS_MAC_H_ |
| 9 | 9 |
| 10 #include <libkern/OSAtomic.h> | 10 #include <libkern/OSAtomic.h> |
| 11 | 11 |
| 12 namespace base { | 12 namespace base { |
| 13 namespace subtle { | 13 namespace subtle { |
| 14 | 14 |
| 15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32 *ptr, | 15 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 16 Atomic32 old_value, | 16 Atomic32 old_value, |
| 17 Atomic32 new_value) { | 17 Atomic32 new_value) { |
| 18 Atomic32 prev_value; | 18 Atomic32 prev_value; |
| 19 do { | 19 do { |
| 20 if (OSAtomicCompareAndSwap32(old_value, new_value, | 20 if (OSAtomicCompareAndSwap32(old_value, new_value, |
| 21 const_cast<Atomic32*>(ptr))) { | 21 const_cast<Atomic32*>(ptr))) { |
| 22 return old_value; | 22 return old_value; |
| 23 } | 23 } |
| 24 prev_value = *ptr; | 24 prev_value = *ptr; |
| 25 } while (prev_value == old_value); | 25 } while (prev_value == old_value); |
| 26 return prev_value; | 26 return prev_value; |
| 27 } | 27 } |
| 28 | 28 |
| 29 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32 *ptr, | 29 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 30 Atomic32 new_value) { | 30 Atomic32 new_value) { |
| 31 Atomic32 old_value; | 31 Atomic32 old_value; |
| 32 do { | 32 do { |
| 33 old_value = *ptr; | 33 old_value = *ptr; |
| 34 } while (!OSAtomicCompareAndSwap32(old_value, new_value, | 34 } while (!OSAtomicCompareAndSwap32(old_value, new_value, |
| 35 const_cast<Atomic32*>(ptr))); | 35 const_cast<Atomic32*>(ptr))); |
| 36 return old_value; | 36 return old_value; |
| 37 } | 37 } |
| 38 | 38 |
| 39 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32 *ptr, | 39 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 40 Atomic32 increment) { | 40 Atomic32 increment) { |
| 41 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); | 41 return OSAtomicAdd32(increment, const_cast<Atomic32*>(ptr)); |
| 42 } | 42 } |
| 43 | 43 |
| 44 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32 *ptr, | 44 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 45 Atomic32 increment) { | 45 Atomic32 increment) { |
| 46 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); | 46 return OSAtomicAdd32Barrier(increment, const_cast<Atomic32*>(ptr)); |
| 47 } | 47 } |
| 48 | 48 |
| 49 inline void MemoryBarrier() { | 49 inline void MemoryBarrier() { |
| 50 OSMemoryBarrier(); | 50 OSMemoryBarrier(); |
| 51 } | 51 } |
| 52 | 52 |
| 53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32 *ptr, | 53 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 54 Atomic32 old_value, | 54 Atomic32 old_value, |
| 55 Atomic32 new_value) { | 55 Atomic32 new_value) { |
| 56 Atomic32 prev_value; | 56 Atomic32 prev_value; |
| 57 do { | 57 do { |
| 58 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, | 58 if (OSAtomicCompareAndSwap32Barrier(old_value, new_value, |
| 59 const_cast<Atomic32*>(ptr))) { | 59 const_cast<Atomic32*>(ptr))) { |
| 60 return old_value; | 60 return old_value; |
| 61 } | 61 } |
| 62 prev_value = *ptr; | 62 prev_value = *ptr; |
| 63 } while (prev_value == old_value); | 63 } while (prev_value == old_value); |
| 64 return prev_value; | 64 return prev_value; |
| 65 } | 65 } |
| 66 | 66 |
| 67 inline Atomic32 Release_CompareAndSwap(volatile Atomic32 *ptr, | 67 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 68 Atomic32 old_value, | 68 Atomic32 old_value, |
| 69 Atomic32 new_value) { | 69 Atomic32 new_value) { |
| 70 return Acquire_CompareAndSwap(ptr, old_value, new_value); | 70 return Acquire_CompareAndSwap(ptr, old_value, new_value); |
| 71 } | 71 } |
| 72 | 72 |
| 73 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 73 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 74 *ptr = value; | 74 *ptr = value; |
| 75 } | 75 } |
| 76 | 76 |
| 77 inline void Acquire_Store(volatile Atomic32 *ptr, Atomic32 value) { | 77 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 78 *ptr = value; | 78 *ptr = value; |
| 79 MemoryBarrier(); | 79 MemoryBarrier(); |
| 80 } | 80 } |
| 81 | 81 |
| 82 inline void Release_Store(volatile Atomic32 *ptr, Atomic32 value) { | 82 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 83 MemoryBarrier(); | 83 MemoryBarrier(); |
| 84 *ptr = value; | 84 *ptr = value; |
| 85 } | 85 } |
| 86 | 86 |
| 87 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 87 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 88 return *ptr; | 88 return *ptr; |
| 89 } | 89 } |
| 90 | 90 |
| 91 inline Atomic32 Acquire_Load(volatile const Atomic32 *ptr) { | 91 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 92 Atomic32 value = *ptr; | 92 Atomic32 value = *ptr; |
| 93 MemoryBarrier(); | 93 MemoryBarrier(); |
| 94 return value; | 94 return value; |
| 95 } | 95 } |
| 96 | 96 |
| 97 inline Atomic32 Release_Load(volatile const Atomic32 *ptr) { | 97 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 98 MemoryBarrier(); | 98 MemoryBarrier(); |
| 99 return *ptr; | 99 return *ptr; |
| 100 } | 100 } |
| 101 | 101 |
| 102 #ifdef __LP64__ | 102 #ifdef __LP64__ |
| 103 | 103 |
| 104 // 64-bit implementation on 64-bit platform | 104 // 64-bit implementation on 64-bit platform |
| 105 | 105 |
| 106 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64 *ptr, | 106 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 107 Atomic64 old_value, | 107 Atomic64 old_value, |
| 108 Atomic64 new_value) { | 108 Atomic64 new_value) { |
| 109 Atomic64 prev_value; | 109 Atomic64 prev_value; |
| 110 do { | 110 do { |
| 111 if (OSAtomicCompareAndSwap64(old_value, new_value, | 111 if (OSAtomicCompareAndSwap64(old_value, new_value, |
| 112 reinterpret_cast<volatile int64_t*>(ptr))) { | 112 reinterpret_cast<volatile int64_t*>(ptr))) { |
| 113 return old_value; | 113 return old_value; |
| 114 } | 114 } |
| 115 prev_value = *ptr; | 115 prev_value = *ptr; |
| 116 } while (prev_value == old_value); | 116 } while (prev_value == old_value); |
| 117 return prev_value; | 117 return prev_value; |
| 118 } | 118 } |
| 119 | 119 |
| 120 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64 *ptr, | 120 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 121 Atomic64 new_value) { | 121 Atomic64 new_value) { |
| 122 Atomic64 old_value; | 122 Atomic64 old_value; |
| 123 do { | 123 do { |
| 124 old_value = *ptr; | 124 old_value = *ptr; |
| 125 } while (!OSAtomicCompareAndSwap64(old_value, new_value, | 125 } while (!OSAtomicCompareAndSwap64(old_value, new_value, |
| 126 reinterpret_cast<volatile int64_t*>(ptr))); | 126 reinterpret_cast<volatile int64_t*>(ptr))); |
| 127 return old_value; | 127 return old_value; |
| 128 } | 128 } |
| 129 | 129 |
| 130 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64 *ptr, | 130 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| 131 Atomic64 increment) { | 131 Atomic64 increment) { |
| 132 return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); | 132 return OSAtomicAdd64(increment, reinterpret_cast<volatile int64_t*>(ptr)); |
| 133 } | 133 } |
| 134 | 134 |
| 135 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64 *ptr, | 135 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 136 Atomic64 increment) { | 136 Atomic64 increment) { |
| 137 return OSAtomicAdd64Barrier(increment, | 137 return OSAtomicAdd64Barrier(increment, |
| 138 reinterpret_cast<volatile int64_t*>(ptr)); | 138 reinterpret_cast<volatile int64_t*>(ptr)); |
| 139 } | 139 } |
| 140 | 140 |
| 141 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64 *ptr, | 141 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 142 Atomic64 old_value, | 142 Atomic64 old_value, |
| 143 Atomic64 new_value) { | 143 Atomic64 new_value) { |
| 144 Atomic64 prev_value; | 144 Atomic64 prev_value; |
| 145 do { | 145 do { |
| 146 if (OSAtomicCompareAndSwap64Barrier( | 146 if (OSAtomicCompareAndSwap64Barrier( |
| 147 old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { | 147 old_value, new_value, reinterpret_cast<volatile int64_t*>(ptr))) { |
| 148 return old_value; | 148 return old_value; |
| 149 } | 149 } |
| 150 prev_value = *ptr; | 150 prev_value = *ptr; |
| 151 } while (prev_value == old_value); | 151 } while (prev_value == old_value); |
| 152 return prev_value; | 152 return prev_value; |
| 153 } | 153 } |
| 154 | 154 |
| 155 inline Atomic64 Release_CompareAndSwap(volatile Atomic64 *ptr, | 155 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 156 Atomic64 old_value, | 156 Atomic64 old_value, |
| 157 Atomic64 new_value) { | 157 Atomic64 new_value) { |
| 158 // The lib kern interface does not distinguish between | 158 // The lib kern interface does not distinguish between |
| 159 // Acquire and Release memory barriers; they are equivalent. | 159 // Acquire and Release memory barriers; they are equivalent. |
| 160 return Acquire_CompareAndSwap(ptr, old_value, new_value); | 160 return Acquire_CompareAndSwap(ptr, old_value, new_value); |
| 161 } | 161 } |
| 162 | 162 |
| 163 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 163 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 164 *ptr = value; | 164 *ptr = value; |
| 165 } | 165 } |
| 166 | 166 |
| 167 inline void Acquire_Store(volatile Atomic64 *ptr, Atomic64 value) { | 167 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 168 *ptr = value; | 168 *ptr = value; |
| 169 MemoryBarrier(); | 169 MemoryBarrier(); |
| 170 } | 170 } |
| 171 | 171 |
| 172 inline void Release_Store(volatile Atomic64 *ptr, Atomic64 value) { | 172 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 173 MemoryBarrier(); | 173 MemoryBarrier(); |
| 174 *ptr = value; | 174 *ptr = value; |
| 175 } | 175 } |
| 176 | 176 |
| 177 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 177 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 178 return *ptr; | 178 return *ptr; |
| 179 } | 179 } |
| 180 | 180 |
| 181 inline Atomic64 Acquire_Load(volatile const Atomic64 *ptr) { | 181 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 182 Atomic64 value = *ptr; | 182 Atomic64 value = *ptr; |
| 183 MemoryBarrier(); | 183 MemoryBarrier(); |
| 184 return value; | 184 return value; |
| 185 } | 185 } |
| 186 | 186 |
| 187 inline Atomic64 Release_Load(volatile const Atomic64 *ptr) { | 187 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 188 MemoryBarrier(); | 188 MemoryBarrier(); |
| 189 return *ptr; | 189 return *ptr; |
| 190 } | 190 } |
| 191 | 191 |
| 192 #endif // defined(__LP64__) | 192 #endif // defined(__LP64__) |
| 193 | 193 |
| 194 } // namespace base::subtle | 194 } // namespace base::subtle |
| 195 } // namespace base | 195 } // namespace base |
| 196 | 196 |
| 197 #endif // BASE_ATOMICOPS_INTERNALS_MAC_H_ | 197 #endif // BASE_ATOMICOPS_INTERNALS_MAC_H_ |
| OLD | NEW |