| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 12 matching lines...) Expand all Loading... |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 // This file is an internal atomic implementation, use atomicops.h instead. | 28 // This file is an internal atomic implementation, use atomicops.h instead. |
| 29 | 29 |
| 30 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 30 #ifndef V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| 31 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 31 #define V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| 32 | 32 |
| 33 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | |
| 34 | |
| 35 namespace v8 { | 33 namespace v8 { |
| 36 namespace internal { | 34 namespace internal { |
| 37 | 35 |
| 38 // Atomically execute: | 36 // Atomically execute: |
| 39 // result = *ptr; | 37 // result = *ptr; |
| 40 // if (*ptr == old_value) | 38 // if (*ptr == old_value) |
| 41 // *ptr = new_value; | 39 // *ptr = new_value; |
| 42 // return result; | 40 // return result; |
| 43 // | 41 // |
| 44 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". | 42 // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 104 ".set pop\n" | 102 ".set pop\n" |
| 105 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | 103 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
| 106 : "Ir" (increment), "m" (*ptr) | 104 : "Ir" (increment), "m" (*ptr) |
| 107 : "memory"); | 105 : "memory"); |
| 108 // temp2 now holds the final value. | 106 // temp2 now holds the final value. |
| 109 return temp2; | 107 return temp2; |
| 110 } | 108 } |
| 111 | 109 |
| 112 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | 110 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
| 113 Atomic32 increment) { | 111 Atomic32 increment) { |
| 114 ATOMICOPS_COMPILER_BARRIER(); | 112 MemoryBarrier(); |
| 115 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); | 113 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
| 116 ATOMICOPS_COMPILER_BARRIER(); | 114 MemoryBarrier(); |
| 117 return res; | 115 return res; |
| 118 } | 116 } |
| 119 | 117 |
| 120 // "Acquire" operations | 118 // "Acquire" operations |
| 121 // ensure that no later memory access can be reordered ahead of the operation. | 119 // ensure that no later memory access can be reordered ahead of the operation. |
| 122 // "Release" operations ensure that no previous memory access can be reordered | 120 // "Release" operations ensure that no previous memory access can be reordered |
| 123 // after the operation. "Barrier" operations have both "Acquire" and "Release" | 121 // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 124 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory | 122 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory |
| 125 // access. | 123 // access. |
| 126 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 124 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 127 Atomic32 old_value, | 125 Atomic32 old_value, |
| 128 Atomic32 new_value) { | 126 Atomic32 new_value) { |
| 129 ATOMICOPS_COMPILER_BARRIER(); | |
| 130 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 127 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 131 ATOMICOPS_COMPILER_BARRIER(); | 128 MemoryBarrier(); |
| 132 return res; | 129 return res; |
| 133 } | 130 } |
| 134 | 131 |
| 135 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 132 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 136 Atomic32 old_value, | 133 Atomic32 old_value, |
| 137 Atomic32 new_value) { | 134 Atomic32 new_value) { |
| 138 ATOMICOPS_COMPILER_BARRIER(); | 135 MemoryBarrier(); |
| 139 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 136 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 140 ATOMICOPS_COMPILER_BARRIER(); | |
| 141 return res; | |
| 142 } | 137 } |
| 143 | 138 |
| 144 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 139 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 145 *ptr = value; | 140 *ptr = value; |
| 146 } | 141 } |
| 147 | 142 |
| 148 inline void MemoryBarrier() { | 143 inline void MemoryBarrier() { |
| 149 __asm__ __volatile__("sync" : : : "memory"); | 144 __asm__ __volatile__("sync" : : : "memory"); |
| 150 } | 145 } |
| 151 | 146 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 169 return value; | 164 return value; |
| 170 } | 165 } |
| 171 | 166 |
| 172 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 167 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 173 MemoryBarrier(); | 168 MemoryBarrier(); |
| 174 return *ptr; | 169 return *ptr; |
| 175 } | 170 } |
| 176 | 171 |
| 177 } } // namespace v8::internal | 172 } } // namespace v8::internal |
| 178 | 173 |
| 179 #undef ATOMICOPS_COMPILER_BARRIER | |
| 180 | |
| 181 #endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 174 #endif // V8_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| OLD | NEW |