| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 52 "1:\n" | 52 "1:\n" |
| 53 "ll %0, %5\n" // prev = *ptr | 53 "ll %0, %5\n" // prev = *ptr |
| 54 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 | 54 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
| 55 "move %2, %4\n" // tmp = new_value | 55 "move %2, %4\n" // tmp = new_value |
| 56 "sc %2, %1\n" // *ptr = tmp (with atomic check) | 56 "sc %2, %1\n" // *ptr = tmp (with atomic check) |
| 57 "beqz %2, 1b\n" // start again on atomic error | 57 "beqz %2, 1b\n" // start again on atomic error |
| 58 "nop\n" // delay slot nop | 58 "nop\n" // delay slot nop |
| 59 "2:\n" | 59 "2:\n" |
| 60 ".set pop\n" | 60 ".set pop\n" |
| 61 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) | 61 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
| 62 : "Ir" (old_value), "r" (new_value), "m" (*ptr) | 62 : "r" (old_value), "r" (new_value), "m" (*ptr) |
| 63 : "memory"); | 63 : "memory"); |
| 64 return prev; | 64 return prev; |
| 65 } | 65 } |
| 66 | 66 |
| 67 // Atomically store new_value into *ptr, returning the previous value held in | 67 // Atomically store new_value into *ptr, returning the previous value held in |
| 68 // *ptr. This routine implies no memory barriers. | 68 // *ptr. This routine implies no memory barriers. |
| 69 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 69 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
| 70 Atomic32 new_value) { | 70 Atomic32 new_value) { |
| 71 Atomic32 temp, old; | 71 Atomic32 temp, old; |
| 72 __asm__ __volatile__(".set push\n" | 72 __asm__ __volatile__(".set push\n" |
| (...skipping 116 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 189 "1:\n" | 189 "1:\n" |
| 190 "lld %0, %5\n" // prev = *ptr | 190 "lld %0, %5\n" // prev = *ptr |
| 191 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 | 191 "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 |
| 192 "move %2, %4\n" // tmp = new_value | 192 "move %2, %4\n" // tmp = new_value |
| 193 "scd %2, %1\n" // *ptr = tmp (with atomic check) | 193 "scd %2, %1\n" // *ptr = tmp (with atomic check) |
| 194 "beqz %2, 1b\n" // start again on atomic error | 194 "beqz %2, 1b\n" // start again on atomic error |
| 195 "nop\n" // delay slot nop | 195 "nop\n" // delay slot nop |
| 196 "2:\n" | 196 "2:\n" |
| 197 ".set pop\n" | 197 ".set pop\n" |
| 198 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) | 198 : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) |
| 199 : "Ir" (old_value), "r" (new_value), "m" (*ptr) | 199 : "r" (old_value), "r" (new_value), "m" (*ptr) |
| 200 : "memory"); | 200 : "memory"); |
| 201 return prev; | 201 return prev; |
| 202 } | 202 } |
| 203 | 203 |
| 204 // Atomically store new_value into *ptr, returning the previous value held in | 204 // Atomically store new_value into *ptr, returning the previous value held in |
| 205 // *ptr. This routine implies no memory barriers. | 205 // *ptr. This routine implies no memory barriers. |
| 206 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 206 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
| 207 Atomic64 new_value) { | 207 Atomic64 new_value) { |
| 208 Atomic64 temp, old; | 208 Atomic64 temp, old; |
| 209 __asm__ __volatile__(".set push\n" | 209 __asm__ __volatile__(".set push\n" |
| (...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 298 } | 298 } |
| 299 | 299 |
| 300 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 300 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 301 MemoryBarrier(); | 301 MemoryBarrier(); |
| 302 return *ptr; | 302 return *ptr; |
| 303 } | 303 } |
| 304 | 304 |
| 305 } } // namespace v8::base | 305 } } // namespace v8::base |
| 306 | 306 |
| 307 #endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 307 #endif // V8_BASE_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| OLD | NEW |