OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
6 // | 6 // |
7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
8 | 8 |
9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
83 ".set pop\n" | 83 ".set pop\n" |
84 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | 84 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
85 : "Ir" (increment), "m" (*ptr) | 85 : "Ir" (increment), "m" (*ptr) |
86 : "memory"); | 86 : "memory"); |
87 // temp2 now holds the final value. | 87 // temp2 now holds the final value. |
88 return temp2; | 88 return temp2; |
89 } | 89 } |
90 | 90 |
91 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 91 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
92 Atomic32 increment) { | 92 Atomic32 increment) { |
93 ATOMICOPS_COMPILER_BARRIER(); | 93 MemoryBarrier(); |
94 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); | 94 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
95 ATOMICOPS_COMPILER_BARRIER(); | 95 MemoryBarrier(); |
96 return res; | 96 return res; |
97 } | 97 } |
98 | 98 |
99 // "Acquire" operations | 99 // "Acquire" operations |
100 // ensure that no later memory access can be reordered ahead of the operation. | 100 // ensure that no later memory access can be reordered ahead of the operation. |
101 // "Release" operations ensure that no previous memory access can be reordered | 101 // "Release" operations ensure that no previous memory access can be reordered |
102 // after the operation. "Barrier" operations have both "Acquire" and "Release" | 102 // after the operation. "Barrier" operations have both "Acquire" and "Release" |
103 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory | 103 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
104 // access. | 104 // access. |
105 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 105 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
106 Atomic32 old_value, | 106 Atomic32 old_value, |
107 Atomic32 new_value) { | 107 Atomic32 new_value) { |
108 ATOMICOPS_COMPILER_BARRIER(); | 108 MemoryBarrier(); |
Dmitry Vyukov
2013/06/06 08:14:44
It must be the other way around:
For acquire: memo
paul.l...
2013/06/06 17:07:03
Done. Thanks for the review!
| |
109 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 109 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
110 ATOMICOPS_COMPILER_BARRIER(); | |
111 return res; | 110 return res; |
112 } | 111 } |
113 | 112 |
114 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 113 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
115 Atomic32 old_value, | 114 Atomic32 old_value, |
116 Atomic32 new_value) { | 115 Atomic32 new_value) { |
117 ATOMICOPS_COMPILER_BARRIER(); | |
118 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 116 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
119 ATOMICOPS_COMPILER_BARRIER(); | 117 MemoryBarrier(); |
120 return res; | 118 return res; |
121 } | 119 } |
122 | 120 |
123 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 121 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
124 *ptr = value; | 122 *ptr = value; |
125 } | 123 } |
126 | 124 |
127 inline void MemoryBarrier() { | 125 inline void MemoryBarrier() { |
128 __asm__ __volatile__("sync" : : : "memory"); | 126 __asm__ __volatile__("sync" : : : "memory"); |
129 } | 127 } |
(...skipping 22 matching lines...) Expand all Loading... | |
152 MemoryBarrier(); | 150 MemoryBarrier(); |
153 return *ptr; | 151 return *ptr; |
154 } | 152 } |
155 | 153 |
156 } // namespace base::subtle | 154 } // namespace base::subtle |
157 } // namespace base | 155 } // namespace base |
158 | 156 |
159 #undef ATOMICOPS_COMPILER_BARRIER | 157 #undef ATOMICOPS_COMPILER_BARRIER |
160 | 158 |
161 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 159 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
OLD | NEW |