OLD | NEW |
---|---|
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
6 // | 6 // |
7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. | 7 // LinuxKernelCmpxchg and Barrier_AtomicIncrement are from Google Gears. |
8 | 8 |
9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 9 #ifndef BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 10 #define BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
141 Atomic32 value = *ptr; | 141 Atomic32 value = *ptr; |
142 MemoryBarrier(); | 142 MemoryBarrier(); |
143 return value; | 143 return value; |
144 } | 144 } |
145 | 145 |
146 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 146 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
147 MemoryBarrier(); | 147 MemoryBarrier(); |
148 return *ptr; | 148 return *ptr; |
149 } | 149 } |
150 | 150 |
151 #if defined(__LP64__) | |
152 // 64-bit versions of the atomic ops. | |
153 | |
154 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
155 Atomic64 old_value, | |
156 Atomic64 new_value) { | |
157 Atomic64 prev, tmp; | |
158 __asm__ __volatile__(".set push\n" | |
159 ".set noreorder\n" | |
160 "1:\n" | |
161 "lld %0, %5\n" // prev = *ptr | |
agl
2014/08/12 18:14:22
Can you post a disassembly of this function? (objd
gordanac
2014/08/12 20:30:22
Here it is:
0000000000000000 <NoBarrier_CompareAnd
agl
2014/08/12 21:05:49
That seems sane (although not optimised). But don'
petarj
2014/08/12 23:40:40
This function gets inlined, so the code is more op
agl
2014/08/13 17:52:37
Fair enough. glibc doing it should mean that any c
| |
162 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 | |
163 "move %2, %4\n" // tmp = new_value | |
164 "scd %2, %1\n" // *ptr = tmp (with atomic check) | |
165 "beqz %2, 1b\n" // start again on atomic error | |
166 "nop\n" // delay slot nop | |
167 "2:\n" | |
168 ".set pop\n" | |
169 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) | |
170 : "Ir" (old_value), "r" (new_value), "m" (*ptr) | |
171 : "memory"); | |
172 return prev; | |
173 } | |
174 | |
175 // Atomically store new_value into *ptr, returning the previous value held in | |
176 // *ptr. This routine implies no memory barriers. | |
177 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
178 Atomic64 new_value) { | |
179 Atomic64 temp, old; | |
180 __asm__ __volatile__(".set push\n" | |
181 ".set noreorder\n" | |
182 "1:\n" | |
183 "lld %1, %2\n" // old = *ptr | |
agl
2014/08/13 17:52:37
The %2 here is an output argument being used as an
gordanac
2014/08/14 12:09:33
Done.
| |
184 "move %0, %3\n" // temp = new_value | |
185 "scd %0, %2\n" // *ptr = temp (with atomic check) | |
186 "beqz %0, 1b\n" // start again on atomic error | |
187 "nop\n" // delay slot nop | |
188 ".set pop\n" | |
189 : "=&r" (temp), "=&r" (old), "=m" (*ptr) | |
190 : "r" (new_value), "m" (*ptr) | |
191 : "memory"); | |
192 | |
193 return old; | |
194 } | |
195 | |
196 // Atomically increment *ptr by "increment". Returns the new value of | |
197 // *ptr with the increment applied. This routine implies no memory barriers. | |
198 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
199 Atomic64 increment) { | |
200 Atomic64 temp, temp2; | |
201 | |
202 __asm__ __volatile__(".set push\n" | |
203 ".set noreorder\n" | |
204 "1:\n" | |
205 "lld %0, %2\n" // temp = *ptr | |
agl
2014/08/13 17:52:37
ditto.
gordanac
2014/08/14 12:09:33
Done.
| |
206 "daddu %1, %0, %3\n" // temp2 = temp + increment | |
207 "scd %1, %2\n" // *ptr = temp2 (with atomic check) | |
208 "beqz %1, 1b\n" // start again on atomic error | |
209 "daddu %1, %0, %3\n" // temp2 = temp + increment | |
210 ".set pop\n" | |
211 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | |
212 : "Ir" (increment), "m" (*ptr) | |
213 : "memory"); | |
214 // temp2 now holds the final value. | |
215 return temp2; | |
216 } | |
217 | |
218 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
219 Atomic64 increment) { | |
220 MemoryBarrier(); | |
221 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); | |
222 MemoryBarrier(); | |
223 return res; | |
224 } | |
225 | |
226 // "Acquire" operations | |
227 // ensure that no later memory access can be reordered ahead of the operation. | |
228 // "Release" operations ensure that no previous memory access can be reordered | |
229 // after the operation. "Barrier" operations have both "Acquire" and "Release" | |
230 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory | |
231 // access. | |
232 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
233 Atomic64 old_value, | |
234 Atomic64 new_value) { | |
235 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
236 MemoryBarrier(); | |
237 return res; | |
238 } | |
239 | |
240 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
241 Atomic64 old_value, | |
242 Atomic64 new_value) { | |
243 MemoryBarrier(); | |
244 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
245 } | |
246 | |
247 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
248 *ptr = value; | |
249 } | |
250 | |
251 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
252 *ptr = value; | |
253 MemoryBarrier(); | |
254 } | |
255 | |
256 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
257 MemoryBarrier(); | |
258 *ptr = value; | |
259 } | |
260 | |
261 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
262 return *ptr; | |
263 } | |
264 | |
265 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
266 Atomic64 value = *ptr; | |
267 MemoryBarrier(); | |
268 return value; | |
269 } | |
270 | |
271 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
272 MemoryBarrier(); | |
273 return *ptr; | |
274 } | |
275 #endif | |
276 | |
151 } // namespace base::subtle | 277 } // namespace base::subtle |
152 } // namespace base | 278 } // namespace base |
153 | 279 |
154 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 280 #endif // BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
OLD | NEW |