Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_mips_gcc.h

Issue 517273003: Cherry pick r573 from upstream protobuf (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Protocol Buffers - Google's data interchange format 1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2012 Google Inc. All rights reserved. 2 // Copyright 2012 Google Inc. All rights reserved.
3 // http://code.google.com/p/protobuf/ 3 // http://code.google.com/p/protobuf/
4 // 4 //
5 // Redistribution and use in source and binary forms, with or without 5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are 6 // modification, are permitted provided that the following conditions are
7 // met: 7 // met:
8 // 8 //
9 // * Redistributions of source code must retain the above copyright 9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer. 10 // notice, this list of conditions and the following disclaimer.
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
71 } 71 }
72 72
73 // Atomically store new_value into *ptr, returning the previous value held in 73 // Atomically store new_value into *ptr, returning the previous value held in
74 // *ptr. This routine implies no memory barriers. 74 // *ptr. This routine implies no memory barriers.
75 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, 75 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
76 Atomic32 new_value) { 76 Atomic32 new_value) {
77 Atomic32 temp, old; 77 Atomic32 temp, old;
78 __asm__ __volatile__(".set push\n" 78 __asm__ __volatile__(".set push\n"
79 ".set noreorder\n" 79 ".set noreorder\n"
80 "1:\n" 80 "1:\n"
81 "ll %1, %2\n" // old = *ptr 81 "ll %1, %4\n" // old = *ptr
82 "move %0, %3\n" // temp = new_value 82 "move %0, %3\n" // temp = new_value
83 "sc %0, %2\n" // *ptr = temp (with atomic check) 83 "sc %0, %2\n" // *ptr = temp (with atomic check)
84 "beqz %0, 1b\n" // start again on atomic error 84 "beqz %0, 1b\n" // start again on atomic error
85 "nop\n" // delay slot nop 85 "nop\n" // delay slot nop
86 ".set pop\n" 86 ".set pop\n"
87 : "=&r" (temp), "=&r" (old), "=m" (*ptr) 87 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
88 : "r" (new_value), "m" (*ptr) 88 : "r" (new_value), "m" (*ptr)
89 : "memory"); 89 : "memory");
90 90
91 return old; 91 return old;
92 } 92 }
93 93
94 // Atomically increment *ptr by "increment". Returns the new value of 94 // Atomically increment *ptr by "increment". Returns the new value of
95 // *ptr with the increment applied. This routine implies no memory barriers. 95 // *ptr with the increment applied. This routine implies no memory barriers.
96 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, 96 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
97 Atomic32 increment) { 97 Atomic32 increment) {
98 Atomic32 temp, temp2; 98 Atomic32 temp, temp2;
99 99
100 __asm__ __volatile__(".set push\n" 100 __asm__ __volatile__(".set push\n"
101 ".set noreorder\n" 101 ".set noreorder\n"
102 "1:\n" 102 "1:\n"
103 "ll %0, %2\n" // temp = *ptr 103 "ll %0, %4\n" // temp = *ptr
104 "addu %1, %0, %3\n" // temp2 = temp + increment 104 "addu %1, %0, %3\n" // temp2 = temp + increment
105 "sc %1, %2\n" // *ptr = temp2 (with atomic check) 105 "sc %1, %2\n" // *ptr = temp2 (with atomic check)
106 "beqz %1, 1b\n" // start again on atomic error 106 "beqz %1, 1b\n" // start again on atomic error
107 "addu %1, %0, %3\n" // temp2 = temp + increment 107 "addu %1, %0, %3\n" // temp2 = temp + increment
108 ".set pop\n" 108 ".set pop\n"
109 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) 109 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
110 : "Ir" (increment), "m" (*ptr) 110 : "Ir" (increment), "m" (*ptr)
111 : "memory"); 111 : "memory");
112 // temp2 now holds the final value. 112 // temp2 now holds the final value.
113 return temp2; 113 return temp2;
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
171 Atomic32 value = *ptr; 171 Atomic32 value = *ptr;
172 MemoryBarrier(); 172 MemoryBarrier();
173 return value; 173 return value;
174 } 174 }
175 175
176 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { 176 inline Atomic32 Release_Load(volatile const Atomic32* ptr) {
177 MemoryBarrier(); 177 MemoryBarrier();
178 return *ptr; 178 return *ptr;
179 } 179 }
180 180
181 #if defined(__LP64__)
182 // 64-bit versions of the atomic ops.
183
184 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
185 Atomic64 old_value,
186 Atomic64 new_value) {
187 Atomic64 prev, tmp;
188 __asm__ __volatile__(".set push\n"
189 ".set noreorder\n"
190 "1:\n"
191 "lld %0, %5\n" // prev = *ptr
192 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2
193 "move %2, %4\n" // tmp = new_value
194 "scd %2, %1\n" // *ptr = tmp (with atomic check)
195 "beqz %2, 1b\n" // start again on atomic error
196 "nop\n" // delay slot nop
197 "2:\n"
198 ".set pop\n"
199 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp)
200 : "Ir" (old_value), "r" (new_value), "m" (*ptr)
201 : "memory");
202 return prev;
203 }
204
205 // Atomically store new_value into *ptr, returning the previous value held in
206 // *ptr. This routine implies no memory barriers.
207 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
208 Atomic64 new_value) {
209 Atomic64 temp, old;
210 __asm__ __volatile__(".set push\n"
211 ".set noreorder\n"
212 "1:\n"
213 "lld %1, %4\n" // old = *ptr
214 "move %0, %3\n" // temp = new_value
215 "scd %0, %2\n" // *ptr = temp (with atomic check)
216 "beqz %0, 1b\n" // start again on atomic error
217 "nop\n" // delay slot nop
218 ".set pop\n"
219 : "=&r" (temp), "=&r" (old), "=m" (*ptr)
220 : "r" (new_value), "m" (*ptr)
221 : "memory");
222
223 return old;
224 }
225
226 // Atomically increment *ptr by "increment". Returns the new value of
227 // *ptr with the increment applied. This routine implies no memory barriers.
228 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
229 Atomic64 increment) {
230 Atomic64 temp, temp2;
231
232 __asm__ __volatile__(".set push\n"
233 ".set noreorder\n"
234 "1:\n"
235 "lld %0, %4\n" // temp = *ptr
236 "daddu %1, %0, %3\n" // temp2 = temp + increment
237 "scd %1, %2\n" // *ptr = temp2 (with atomic check)
238 "beqz %1, 1b\n" // start again on atomic error
239 "daddu %1, %0, %3\n" // temp2 = temp + increment
240 ".set pop\n"
241 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr)
242 : "Ir" (increment), "m" (*ptr)
243 : "memory");
244 // temp2 now holds the final value.
245 return temp2;
246 }
247
248 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
249 Atomic64 increment) {
250 MemoryBarrier();
251 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
252 MemoryBarrier();
253 return res;
254 }
255
256 // "Acquire" operations
257 // ensure that no later memory access can be reordered ahead of the operation.
258 // "Release" operations ensure that no previous memory access can be reordered
259 // after the operation. "Barrier" operations have both "Acquire" and "Release"
260 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
261 // access.
262 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
263 Atomic64 old_value,
264 Atomic64 new_value) {
265 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
266 MemoryBarrier();
267 return res;
268 }
269
270 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
271 Atomic64 old_value,
272 Atomic64 new_value) {
273 MemoryBarrier();
274 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
275 }
276
277 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) {
278 *ptr = value;
279 }
280
281 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) {
282 *ptr = value;
283 MemoryBarrier();
284 }
285
286 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) {
287 MemoryBarrier();
288 *ptr = value;
289 }
290
291 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) {
292 return *ptr;
293 }
294
295 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) {
296 Atomic64 value = *ptr;
297 MemoryBarrier();
298 return value;
299 }
300
301 inline Atomic64 Release_Load(volatile const Atomic64* ptr) {
302 MemoryBarrier();
303 return *ptr;
304 }
305 #endif
306
181 } // namespace internal 307 } // namespace internal
182 } // namespace protobuf 308 } // namespace protobuf
183 } // namespace google 309 } // namespace google
184 310
185 #undef ATOMICOPS_COMPILER_BARRIER 311 #undef ATOMICOPS_COMPILER_BARRIER
186 312
187 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ 313 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698