OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
6 // | 6 // |
7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
8 | 8 |
9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
44 } | 44 } |
45 | 45 |
46 // Atomically store new_value into *ptr, returning the previous value held in | 46 // Atomically store new_value into *ptr, returning the previous value held in |
47 // *ptr. This routine implies no memory barriers. | 47 // *ptr. This routine implies no memory barriers. |
48 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 48 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
49 Atomic32 new_value) { | 49 Atomic32 new_value) { |
50 Atomic32 temp, old; | 50 Atomic32 temp, old; |
51 __asm__ __volatile__(".set push\n" | 51 __asm__ __volatile__(".set push\n" |
52 ".set noreorder\n" | 52 ".set noreorder\n" |
53 "1:\n" | 53 "1:\n" |
54 "ll %1, %2\n" // old = *ptr | 54 "ll %1, %4\n" // old = *ptr |
55 "move %0, %3\n" // temp = new_value | 55 "move %0, %3\n" // temp = new_value |
56 "sc %0, %2\n" // *ptr = temp (with atomic check) | 56 "sc %0, %2\n" // *ptr = temp (with atomic check) |
57 "beqz %0, 1b\n" // start again on atomic error | 57 "beqz %0, 1b\n" // start again on atomic error |
58 "nop\n" // delay slot nop | 58 "nop\n" // delay slot nop |
59 ".set pop\n" | 59 ".set pop\n" |
60 : "=&r" (temp), "=&r" (old), "=m" (*ptr) | 60 : "=&r" (temp), "=&r" (old), "=m" (*ptr) |
61 : "r" (new_value), "m" (*ptr) | 61 : "r" (new_value), "m" (*ptr) |
62 : "memory"); | 62 : "memory"); |
63 | 63 |
64 return old; | 64 return old; |
65 } | 65 } |
66 | 66 |
67 // Atomically increment *ptr by "increment". Returns the new value of | 67 // Atomically increment *ptr by "increment". Returns the new value of |
68 // *ptr with the increment applied. This routine implies no memory barriers. | 68 // *ptr with the increment applied. This routine implies no memory barriers. |
69 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 69 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
70 Atomic32 increment) { | 70 Atomic32 increment) { |
71 Atomic32 temp, temp2; | 71 Atomic32 temp, temp2; |
72 | 72 |
73 __asm__ __volatile__(".set push\n" | 73 __asm__ __volatile__(".set push\n" |
74 ".set noreorder\n" | 74 ".set noreorder\n" |
75 "1:\n" | 75 "1:\n" |
76 "ll %0, %2\n" // temp = *ptr | 76 "ll %0, %4\n" // temp = *ptr |
77 "addu %1, %0, %3\n" // temp2 = temp + increment | 77 "addu %1, %0, %3\n" // temp2 = temp + increment |
78 "sc %1, %2\n" // *ptr = temp2 (with atomic check) | 78 "sc %1, %2\n" // *ptr = temp2 (with atomic check) |
79 "beqz %1, 1b\n" // start again on atomic error | 79 "beqz %1, 1b\n" // start again on atomic error |
80 "addu %1, %0, %3\n" // temp2 = temp + increment | 80 "addu %1, %0, %3\n" // temp2 = temp + increment |
81 ".set pop\n" | 81 ".set pop\n" |
82 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | 82 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
83 : "Ir" (increment), "m" (*ptr) | 83 : "Ir" (increment), "m" (*ptr) |
84 : "memory"); | 84 : "memory"); |
85 // temp2 now holds the final value. | 85 // temp2 now holds the final value. |
86 return temp2; | 86 return temp2; |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
141 Atomic32 value = *ptr; | 141 Atomic32 value = *ptr; |
142 MemoryBarrier(); | 142 MemoryBarrier(); |
143 return value; | 143 return value; |
144 } | 144 } |
145 | 145 |
146 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 146 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
147 MemoryBarrier(); | 147 MemoryBarrier(); |
148 return *ptr; | 148 return *ptr; |
149 } | 149 } |
150 | 150 |
| 151 #if defined(__LP64__) |
| 152 // 64-bit versions of the atomic ops. |
| 153 |
| 154 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 155 Atomic64 old_value, |
| 156 Atomic64 new_value) { |
| 157 Atomic64 prev, tmp; |
| 158 __asm__ __volatile__(".set push\n" |
| 159 ".set noreorder\n" |
| 160 "1:\n" |
| 161 "lld %0, %5\n" // prev = *ptr |
| 162 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
| 163 "move %2, %4\n" // tmp = new_value |
| 164 "scd %2, %1\n" // *ptr = tmp (with atomic check) |
| 165 "beqz %2, 1b\n" // start again on atomic error |
| 166 "nop\n" // delay slot nop |
| 167 "2:\n" |
| 168 ".set pop\n" |
| 169 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
| 170 : "Ir" (old_value), "r" (new_value), "m" (*ptr) |
| 171 : "memory"); |
| 172 return prev; |
| 173 } |
| 174 |
| 175 // Atomically store new_value into *ptr, returning the previous value held in |
| 176 // *ptr. This routine implies no memory barriers. |
| 177 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 178 Atomic64 new_value) { |
| 179 Atomic64 temp, old; |
| 180 __asm__ __volatile__(".set push\n" |
| 181 ".set noreorder\n" |
| 182 "1:\n" |
| 183 "lld %1, %4\n" // old = *ptr |
| 184 "move %0, %3\n" // temp = new_value |
| 185 "scd %0, %2\n" // *ptr = temp (with atomic check) |
| 186 "beqz %0, 1b\n" // start again on atomic error |
| 187 "nop\n" // delay slot nop |
| 188 ".set pop\n" |
| 189 : "=&r" (temp), "=&r" (old), "=m" (*ptr) |
| 190 : "r" (new_value), "m" (*ptr) |
| 191 : "memory"); |
| 192 |
| 193 return old; |
| 194 } |
| 195 |
| 196 // Atomically increment *ptr by "increment". Returns the new value of |
| 197 // *ptr with the increment applied. This routine implies no memory barriers. |
| 198 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
| 199 Atomic64 increment) { |
| 200 Atomic64 temp, temp2; |
| 201 |
| 202 __asm__ __volatile__(".set push\n" |
| 203 ".set noreorder\n" |
| 204 "1:\n" |
| 205 "lld %0, %4\n" // temp = *ptr |
| 206 "daddu %1, %0, %3\n" // temp2 = temp + increment |
| 207 "scd %1, %2\n" // *ptr = temp2 (with atomic check) |
| 208 "beqz %1, 1b\n" // start again on atomic error |
| 209 "daddu %1, %0, %3\n" // temp2 = temp + increment |
| 210 ".set pop\n" |
| 211 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
| 212 : "Ir" (increment), "m" (*ptr) |
| 213 : "memory"); |
| 214 // temp2 now holds the final value. |
| 215 return temp2; |
| 216 } |
| 217 |
| 218 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 219 Atomic64 increment) { |
| 220 MemoryBarrier(); |
| 221 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); |
| 222 MemoryBarrier(); |
| 223 return res; |
| 224 } |
| 225 |
| 226 // "Acquire" operations |
| 227 // ensure that no later memory access can be reordered ahead of the operation. |
| 228 // "Release" operations ensure that no previous memory access can be reordered |
| 229 // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 230 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
| 231 // access. |
| 232 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 233 Atomic64 old_value, |
| 234 Atomic64 new_value) { |
| 235 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 236 MemoryBarrier(); |
| 237 return res; |
| 238 } |
| 239 |
| 240 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 241 Atomic64 old_value, |
| 242 Atomic64 new_value) { |
| 243 MemoryBarrier(); |
| 244 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 245 } |
| 246 |
| 247 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 248 *ptr = value; |
| 249 } |
| 250 |
| 251 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 252 *ptr = value; |
| 253 MemoryBarrier(); |
| 254 } |
| 255 |
| 256 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 257 MemoryBarrier(); |
| 258 *ptr = value; |
| 259 } |
| 260 |
| 261 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 262 return *ptr; |
| 263 } |
| 264 |
| 265 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 266 Atomic64 value = *ptr; |
| 267 MemoryBarrier(); |
| 268 return value; |
| 269 } |
| 270 |
| 271 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 272 MemoryBarrier(); |
| 273 return *ptr; |
| 274 } |
| 275 #endif |
| 276 |
151 } // namespace base::subtle | 277 } // namespace base::subtle |
152 } // namespace base | 278 } // namespace base |
153 | 279 |
154 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 280 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
OLD | NEW |