OLD | NEW |
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
6 // | 6 // |
7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
8 | 8 |
9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
(...skipping 20 matching lines...) Expand all Loading... |
31 "1:\n" | 31 "1:\n" |
32 "ll %0, %5\n" // prev = *ptr | 32 "ll %0, %5\n" // prev = *ptr |
33 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 | 33 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
34 "move %2, %4\n" // tmp = new_value | 34 "move %2, %4\n" // tmp = new_value |
35 "sc %2, %1\n" // *ptr = tmp (with atomic check) | 35 "sc %2, %1\n" // *ptr = tmp (with atomic check) |
36 "beqz %2, 1b\n" // start again on atomic error | 36 "beqz %2, 1b\n" // start again on atomic error |
37 "nop\n" // delay slot nop | 37 "nop\n" // delay slot nop |
38 "2:\n" | 38 "2:\n" |
39 ".set pop\n" | 39 ".set pop\n" |
40 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) | 40 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
41 : "Ir" (old_value), "r" (new_value), "m" (*ptr) | 41 : "r" (old_value), "r" (new_value), "m" (*ptr) |
42 : "memory"); | 42 : "memory"); |
43 return prev; | 43 return prev; |
44 } | 44 } |
45 | 45 |
46 // Atomically store new_value into *ptr, returning the previous value held in | 46 // Atomically store new_value into *ptr, returning the previous value held in |
47 // *ptr. This routine implies no memory barriers. | 47 // *ptr. This routine implies no memory barriers. |
48 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 48 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
49 Atomic32 new_value) { | 49 Atomic32 new_value) { |
50 Atomic32 temp, old; | 50 Atomic32 temp, old; |
51 __asm__ __volatile__(".set push\n" | 51 __asm__ __volatile__(".set push\n" |
(...skipping 108 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
160 "1:\n" | 160 "1:\n" |
161 "lld %0, %5\n" // prev = *ptr | 161 "lld %0, %5\n" // prev = *ptr |
162 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 | 162 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
163 "move %2, %4\n" // tmp = new_value | 163 "move %2, %4\n" // tmp = new_value |
164 "scd %2, %1\n" // *ptr = tmp (with atomic check) | 164 "scd %2, %1\n" // *ptr = tmp (with atomic check) |
165 "beqz %2, 1b\n" // start again on atomic error | 165 "beqz %2, 1b\n" // start again on atomic error |
166 "nop\n" // delay slot nop | 166 "nop\n" // delay slot nop |
167 "2:\n" | 167 "2:\n" |
168 ".set pop\n" | 168 ".set pop\n" |
169 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) | 169 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
170 : "Ir" (old_value), "r" (new_value), "m" (*ptr) | 170 : "r" (old_value), "r" (new_value), "m" (*ptr) |
171 : "memory"); | 171 : "memory"); |
172 return prev; | 172 return prev; |
173 } | 173 } |
174 | 174 |
175 // Atomically store new_value into *ptr, returning the previous value held in | 175 // Atomically store new_value into *ptr, returning the previous value held in |
176 // *ptr. This routine implies no memory barriers. | 176 // *ptr. This routine implies no memory barriers. |
177 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 177 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
178 Atomic64 new_value) { | 178 Atomic64 new_value) { |
179 Atomic64 temp, old; | 179 Atomic64 temp, old; |
180 __asm__ __volatile__(".set push\n" | 180 __asm__ __volatile__(".set push\n" |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
271 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 271 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
272 MemoryBarrier(); | 272 MemoryBarrier(); |
273 return *ptr; | 273 return *ptr; |
274 } | 274 } |
275 #endif | 275 #endif |
276 | 276 |
277 } // namespace base::subtle | 277 } // namespace base::subtle |
278 } // namespace base | 278 } // namespace base |
279 | 279 |
280 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 280 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
OLD | NEW |