Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
| 6 // | 6 // |
| 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
| 8 | 8 |
| 9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| 10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| 11 | 11 |
| 12 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | 12 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
|
Mark Mentovai
2013/06/06 21:50:19
Please remove this macro now, along with the #unde
paul.l...
2013/06/06 21:57:51
Done.
| |
| 13 | 13 |
| 14 namespace base { | 14 namespace base { |
| 15 namespace subtle { | 15 namespace subtle { |
| 16 | 16 |
| 17 // Atomically execute: | 17 // Atomically execute: |
| 18 // result = *ptr; | 18 // result = *ptr; |
| 19 // if (*ptr == old_value) | 19 // if (*ptr == old_value) |
| 20 // *ptr = new_value; | 20 // *ptr = new_value; |
| 21 // return result; | 21 // return result; |
| 22 // | 22 // |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 83 ".set pop\n" | 83 ".set pop\n" |
| 84 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | 84 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
| 85 : "Ir" (increment), "m" (*ptr) | 85 : "Ir" (increment), "m" (*ptr) |
| 86 : "memory"); | 86 : "memory"); |
| 87 // temp2 now holds the final value. | 87 // temp2 now holds the final value. |
| 88 return temp2; | 88 return temp2; |
| 89 } | 89 } |
| 90 | 90 |
| 91 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 91 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 92 Atomic32 increment) { | 92 Atomic32 increment) { |
| 93 ATOMICOPS_COMPILER_BARRIER(); | 93 MemoryBarrier(); |
| 94 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); | 94 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
| 95 ATOMICOPS_COMPILER_BARRIER(); | 95 MemoryBarrier(); |
| 96 return res; | 96 return res; |
| 97 } | 97 } |
| 98 | 98 |
| 99 // "Acquire" operations | 99 // "Acquire" operations |
| 100 // ensure that no later memory access can be reordered ahead of the operation. | 100 // ensure that no later memory access can be reordered ahead of the operation. |
| 101 // "Release" operations ensure that no previous memory access can be reordered | 101 // "Release" operations ensure that no previous memory access can be reordered |
| 102 // after the operation. "Barrier" operations have both "Acquire" and "Release" | 102 // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 103 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory | 103 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
| 104 // access. | 104 // access. |
| 105 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 105 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 106 Atomic32 old_value, | 106 Atomic32 old_value, |
| 107 Atomic32 new_value) { | 107 Atomic32 new_value) { |
| 108 ATOMICOPS_COMPILER_BARRIER(); | |
| 109 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 108 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 110 ATOMICOPS_COMPILER_BARRIER(); | 109 MemoryBarrier(); |
| 111 return res; | 110 return res; |
| 112 } | 111 } |
| 113 | 112 |
| 114 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 113 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 115 Atomic32 old_value, | 114 Atomic32 old_value, |
| 116 Atomic32 new_value) { | 115 Atomic32 new_value) { |
| 117 ATOMICOPS_COMPILER_BARRIER(); | 116 MemoryBarrier(); |
| 118 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 117 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 119 ATOMICOPS_COMPILER_BARRIER(); | |
| 120 return res; | |
| 121 } | 118 } |
| 122 | 119 |
| 123 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 120 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 124 *ptr = value; | 121 *ptr = value; |
| 125 } | 122 } |
| 126 | 123 |
| 127 inline void MemoryBarrier() { | 124 inline void MemoryBarrier() { |
| 128 __asm__ __volatile__("sync" : : : "memory"); | 125 __asm__ __volatile__("sync" : : : "memory"); |
| 129 } | 126 } |
| 130 | 127 |
| (...skipping 21 matching lines...) Expand all Loading... | |
| 152 MemoryBarrier(); | 149 MemoryBarrier(); |
| 153 return *ptr; | 150 return *ptr; |
| 154 } | 151 } |
| 155 | 152 |
| 156 } // namespace base::subtle | 153 } // namespace base::subtle |
| 157 } // namespace base | 154 } // namespace base |
| 158 | 155 |
| 159 #undef ATOMICOPS_COMPILER_BARRIER | 156 #undef ATOMICOPS_COMPILER_BARRIER |
| 160 | 157 |
| 161 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 158 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| OLD | NEW |