OLD | NEW |
(Empty) | |
| 1 // Copyright (c) 2011, Google Inc. |
| 2 // All rights reserved. |
| 3 // |
| 4 // Redistribution and use in source and binary forms, with or without |
| 5 // modification, are permitted provided that the following conditions are |
| 6 // met: |
| 7 // |
| 8 // * Redistributions of source code must retain the above copyright |
| 9 // notice, this list of conditions and the following disclaimer. |
| 10 // * Redistributions in binary form must reproduce the above |
| 11 // copyright notice, this list of conditions and the following disclaimer |
| 12 // in the documentation and/or other materials provided with the |
| 13 // distribution. |
| 14 // * Neither the name of Google Inc. nor the names of its |
| 15 // contributors may be used to endorse or promote products derived from |
| 16 // this software without specific prior written permission. |
| 17 // |
| 18 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 19 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 20 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 21 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 22 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 23 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 24 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 25 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 26 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 27 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 28 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 29 // --- |
| 30 // |
| 31 // Author: Sasha Levitskiy |
| 32 // based on atomicops-internals by Sanjay Ghemawat |
| 33 // |
| 34 // This file is an internal atomic implementation, use base/atomicops.h instead. |
| 35 // |
| 36 // This code implements ARM atomics for architectures V6 and newer. |
| 37 |
| 38 #ifndef BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ |
| 39 #define BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ |
| 40 |
| 41 #include <stdio.h> |
| 42 #include <stdlib.h> |
| 43 #include "base/basictypes.h" // For COMPILE_ASSERT |
| 44 |
| 45 typedef int32_t Atomic32; |
| 46 |
| 47 namespace base { |
| 48 namespace subtle { |
| 49 |
| 50 typedef int64_t Atomic64; |
| 51 |
| 52 // 32-bit low-level ops |
| 53 |
| 54 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 55 Atomic32 old_value, |
| 56 Atomic32 new_value) { |
| 57 Atomic32 oldval, res; |
| 58 do { |
| 59 __asm__ __volatile__( |
| 60 "ldrex %1, [%3]\n" |
| 61 "mov %0, #0\n" |
| 62 "teq %1, %4\n" |
| 63 "strexeq %0, %5, [%3]\n" |
| 64 : "=&r" (res), "=&r" (oldval), "+Qo" (*ptr) |
| 65 : "r" (ptr), "Ir" (old_value), "r" (new_value) |
| 66 : "cc"); |
| 67 } while (res); |
| 68 return oldval; |
| 69 } |
| 70 |
| 71 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 72 Atomic32 new_value) { |
| 73 Atomic32 tmp, old; |
| 74 __asm__ __volatile__( |
| 75 "1:\n" |
| 76 "ldrex %1, [%2]\n" |
| 77 "strex %0, %3, [%2]\n" |
| 78 "teq %0, #0\n" |
| 79 "bne 1b" |
| 80 : "=&r" (tmp), "=&r" (old) |
| 81 : "r" (ptr), "r" (new_value) |
| 82 : "cc", "memory"); |
| 83 return old; |
| 84 } |
| 85 |
| 86 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
| 87 Atomic32 increment) { |
| 88 Atomic32 tmp, res; |
| 89 __asm__ __volatile__( |
| 90 "1:\n" |
| 91 "ldrex %1, [%2]\n" |
| 92 "add %1, %1, %3\n" |
| 93 "strex %0, %1, [%2]\n" |
| 94 "teq %0, #0\n" |
| 95 "bne 1b" |
| 96 : "=&r" (tmp), "=&r"(res) |
| 97 : "r" (ptr), "r"(increment) |
| 98 : "cc", "memory"); |
| 99 return res; |
| 100 } |
| 101 |
| 102 inline void MemoryBarrier() { |
| 103 __asm__ __volatile__("dmb" : : : "memory"); |
| 104 } |
| 105 |
| 106 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 107 Atomic32 increment) { |
| 108 Atomic32 tmp, res; |
| 109 __asm__ __volatile__( |
| 110 "1:\n" |
| 111 "ldrex %1, [%2]\n" |
| 112 "add %1, %1, %3\n" |
| 113 "dmb\n" |
| 114 "strex %0, %1, [%2]\n" |
| 115 "teq %0, #0\n" |
| 116 "bne 1b" |
| 117 : "=&r" (tmp), "=&r"(res) |
| 118 : "r" (ptr), "r"(increment) |
| 119 : "cc", "memory"); |
| 120 return res; |
| 121 } |
| 122 |
| 123 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 124 Atomic32 old_value, |
| 125 Atomic32 new_value) { |
| 126 Atomic32 value = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 127 MemoryBarrier(); |
| 128 return value; |
| 129 } |
| 130 |
| 131 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 132 Atomic32 old_value, |
| 133 Atomic32 new_value) { |
| 134 MemoryBarrier(); |
| 135 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 136 } |
| 137 |
| 138 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 139 *ptr = value; |
| 140 } |
| 141 |
| 142 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 143 *ptr = value; |
| 144 MemoryBarrier(); |
| 145 } |
| 146 |
| 147 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 148 MemoryBarrier(); |
| 149 *ptr = value; |
| 150 } |
| 151 |
| 152 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 153 return *ptr; |
| 154 } |
| 155 |
| 156 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 157 Atomic32 value = *ptr; |
| 158 MemoryBarrier(); |
| 159 return value; |
| 160 } |
| 161 |
| 162 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 163 MemoryBarrier(); |
| 164 return *ptr; |
| 165 } |
| 166 |
| 167 // 64-bit versions are not implemented yet. |
| 168 |
| 169 inline void NotImplementedFatalError(const char *function_name) { |
| 170 fprintf(stderr, "64-bit %s() not implemented on this platform\n", |
| 171 function_name); |
| 172 abort(); |
| 173 } |
| 174 |
| 175 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 176 Atomic64 old_value, |
| 177 Atomic64 new_value) { |
| 178 NotImplementedFatalError("NoBarrier_CompareAndSwap"); |
| 179 return 0; |
| 180 } |
| 181 |
| 182 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 183 Atomic64 new_value) { |
| 184 NotImplementedFatalError("NoBarrier_AtomicExchange"); |
| 185 return 0; |
| 186 } |
| 187 |
| 188 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| 189 Atomic64 increment) { |
| 190 NotImplementedFatalError("NoBarrier_AtomicIncrement"); |
| 191 return 0; |
| 192 } |
| 193 |
| 194 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 195 Atomic64 increment) { |
| 196 NotImplementedFatalError("Barrier_AtomicIncrement"); |
| 197 return 0; |
| 198 } |
| 199 |
| 200 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 201 NotImplementedFatalError("NoBarrier_Store"); |
| 202 } |
| 203 |
| 204 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 205 NotImplementedFatalError("Acquire_Store64"); |
| 206 } |
| 207 |
| 208 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 209 NotImplementedFatalError("Release_Store"); |
| 210 } |
| 211 |
| 212 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 213 NotImplementedFatalError("NoBarrier_Load"); |
| 214 return 0; |
| 215 } |
| 216 |
| 217 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 218 NotImplementedFatalError("Atomic64 Acquire_Load"); |
| 219 return 0; |
| 220 } |
| 221 |
| 222 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 223 NotImplementedFatalError("Atomic64 Release_Load"); |
| 224 return 0; |
| 225 } |
| 226 |
| 227 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 228 Atomic64 old_value, |
| 229 Atomic64 new_value) { |
| 230 NotImplementedFatalError("Atomic64 Acquire_CompareAndSwap"); |
| 231 return 0; |
| 232 } |
| 233 |
| 234 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 235 Atomic64 old_value, |
| 236 Atomic64 new_value) { |
| 237 NotImplementedFatalError("Atomic64 Release_CompareAndSwap"); |
| 238 return 0; |
| 239 } |
| 240 |
| 241 } // namespace subtle ends |
| 242 } // namespace base ends |
| 243 |
| 244 #endif // BASE_ATOMICOPS_INTERNALS_ARM_V6PLUS_H_ |
OLD | NEW |