OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
84 | 84 |
85 return old; | 85 return old; |
86 } | 86 } |
87 | 87 |
88 // Atomically increment *ptr by "increment". Returns the new value of | 88 // Atomically increment *ptr by "increment". Returns the new value of |
89 // *ptr with the increment applied. This routine implies no memory barriers. | 89 // *ptr with the increment applied. This routine implies no memory barriers. |
90 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 90 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
91 Atomic32 increment) { | 91 Atomic32 increment) { |
92 Atomic32 temp, temp2; | 92 Atomic32 temp, temp2; |
93 | 93 |
94 __asm__ __volatile__(".set push\n" | 94 __asm__ __volatile__( |
95 ".set noreorder\n" | 95 ".set push\n" |
96 "1:\n" | 96 ".set noreorder\n" |
97 "ll %0, %2\n" // temp = *ptr | 97 "1:\n" |
98 "addu %1, %0, %3\n" // temp2 = temp + increment | 98 "ll %0, %2\n" // temp = *ptr |
99 "sc %1, %2\n" // *ptr = temp2 (with atomic check) | 99 "addu %1, %0, %3\n" // temp2 = temp + increment |
100 "beqz %1, 1b\n" // start again on atomic error | 100 "sc %1, %2\n" // *ptr = temp2 (with atomic check) |
101 "addu %1, %0, %3\n" // temp2 = temp + increment | 101 "beqz %1, 1b\n" // start again on atomic error |
102 ".set pop\n" | 102 "addu %1, %0, %3\n" // temp2 = temp + increment |
103 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | 103 ".set pop\n" |
104 : "Ir" (increment), "m" (*ptr) | 104 : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr) |
105 : "memory"); | 105 : "Ir"(increment), "m"(*ptr) |
| 106 : "memory"); |
106 // temp2 now holds the final value. | 107 // temp2 now holds the final value. |
107 return temp2; | 108 return temp2; |
108 } | 109 } |
109 | 110 |
110 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 111 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
111 Atomic32 increment) { | 112 Atomic32 increment) { |
112 MemoryBarrier(); | 113 MemoryBarrier(); |
113 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); | 114 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
114 MemoryBarrier(); | 115 MemoryBarrier(); |
115 return res; | 116 return res; |
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
221 | 222 |
222 return old; | 223 return old; |
223 } | 224 } |
224 | 225 |
225 // Atomically increment *ptr by "increment". Returns the new value of | 226 // Atomically increment *ptr by "increment". Returns the new value of |
226 // *ptr with the increment applied. This routine implies no memory barriers. | 227 // *ptr with the increment applied. This routine implies no memory barriers. |
227 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 228 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
228 Atomic64 increment) { | 229 Atomic64 increment) { |
229 Atomic64 temp, temp2; | 230 Atomic64 temp, temp2; |
230 | 231 |
231 __asm__ __volatile__(".set push\n" | 232 __asm__ __volatile__( |
232 ".set noreorder\n" | 233 ".set push\n" |
233 "1:\n" | 234 ".set noreorder\n" |
234 "lld %0, %2\n" // temp = *ptr | 235 "1:\n" |
235 "daddu %1, %0, %3\n" // temp2 = temp + increment | 236 "lld %0, %2\n" // temp = *ptr |
236 "scd %1, %2\n" // *ptr = temp2 (with atomic check) | 237 "daddu %1, %0, %3\n" // temp2 = temp + increment |
237 "beqz %1, 1b\n" // start again on atomic error | 238 "scd %1, %2\n" // *ptr = temp2 (with atomic check) |
238 "daddu %1, %0, %3\n" // temp2 = temp + increment | 239 "beqz %1, 1b\n" // start again on atomic error |
239 ".set pop\n" | 240 "daddu %1, %0, %3\n" // temp2 = temp + increment |
240 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | 241 ".set pop\n" |
241 : "Ir" (increment), "m" (*ptr) | 242 : "=&r"(temp), "=&r"(temp2), "=ZC"(*ptr) |
242 : "memory"); | 243 : "Ir"(increment), "m"(*ptr) |
| 244 : "memory"); |
243 // temp2 now holds the final value. | 245 // temp2 now holds the final value. |
244 return temp2; | 246 return temp2; |
245 } | 247 } |
246 | 248 |
247 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 249 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
248 Atomic64 increment) { | 250 Atomic64 increment) { |
249 MemoryBarrier(); | 251 MemoryBarrier(); |
250 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); | 252 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); |
251 MemoryBarrier(); | 253 MemoryBarrier(); |
252 return res; | 254 return res; |
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
299 | 301 |
300 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 302 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
301 MemoryBarrier(); | 303 MemoryBarrier(); |
302 return *ptr; | 304 return *ptr; |
303 } | 305 } |
304 | 306 |
305 } // namespace base | 307 } // namespace base |
306 } // namespace v8 | 308 } // namespace v8 |
307 | 309 |
308 #endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 310 #endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
OLD | NEW |