| OLD | NEW |
| 1 // Protocol Buffers - Google's data interchange format | 1 // Protocol Buffers - Google's data interchange format |
| 2 // Copyright 2012 Google Inc. All rights reserved. | 2 // Copyright 2012 Google Inc. All rights reserved. |
| 3 // https://developers.google.com/protocol-buffers/ | 3 // https://developers.google.com/protocol-buffers/ |
| 4 // | 4 // |
| 5 // Redistribution and use in source and binary forms, with or without | 5 // Redistribution and use in source and binary forms, with or without |
| 6 // modification, are permitted provided that the following conditions are | 6 // modification, are permitted provided that the following conditions are |
| 7 // met: | 7 // met: |
| 8 // | 8 // |
| 9 // * Redistributions of source code must retain the above copyright | 9 // * Redistributions of source code must retain the above copyright |
| 10 // notice, this list of conditions and the following disclaimer. | 10 // notice, this list of conditions and the following disclaimer. |
| (...skipping 107 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 118 ATOMICOPS_COMPILER_BARRIER(); | 118 ATOMICOPS_COMPILER_BARRIER(); |
| 119 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); | 119 Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); |
| 120 ATOMICOPS_COMPILER_BARRIER(); | 120 ATOMICOPS_COMPILER_BARRIER(); |
| 121 return res; | 121 return res; |
| 122 } | 122 } |
| 123 | 123 |
| 124 // "Acquire" operations | 124 // "Acquire" operations |
| 125 // ensure that no later memory access can be reordered ahead of the operation. | 125 // ensure that no later memory access can be reordered ahead of the operation. |
| 126 // "Release" operations ensure that no previous memory access can be reordered | 126 // "Release" operations ensure that no previous memory access can be reordered |
| 127 // after the operation. "Barrier" operations have both "Acquire" and "Release" | 127 // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 128 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory | 128 // semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no |
| 129 // access. | 129 // memory access. |
| 130 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 130 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
| 131 Atomic32 old_value, | 131 Atomic32 old_value, |
| 132 Atomic32 new_value) { | 132 Atomic32 new_value) { |
| 133 ATOMICOPS_COMPILER_BARRIER(); | 133 ATOMICOPS_COMPILER_BARRIER(); |
| 134 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 134 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 135 ATOMICOPS_COMPILER_BARRIER(); | 135 ATOMICOPS_COMPILER_BARRIER(); |
| 136 return res; | 136 return res; |
| 137 } | 137 } |
| 138 | 138 |
| 139 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 139 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
| 140 Atomic32 old_value, | 140 Atomic32 old_value, |
| 141 Atomic32 new_value) { | 141 Atomic32 new_value) { |
| 142 ATOMICOPS_COMPILER_BARRIER(); | 142 ATOMICOPS_COMPILER_BARRIER(); |
| 143 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 143 Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 144 ATOMICOPS_COMPILER_BARRIER(); | 144 ATOMICOPS_COMPILER_BARRIER(); |
| 145 return res; | 145 return res; |
| 146 } | 146 } |
| 147 | 147 |
| 148 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 148 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 149 *ptr = value; | 149 *ptr = value; |
| 150 } | 150 } |
| 151 | 151 |
| 152 inline void MemoryBarrier() { | 152 inline void MemoryBarrierInternal() { |
| 153 __asm__ __volatile__("sync" : : : "memory"); | 153 __asm__ __volatile__("sync" : : : "memory"); |
| 154 } | 154 } |
| 155 | 155 |
| 156 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 156 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 157 *ptr = value; | 157 *ptr = value; |
| 158 MemoryBarrier(); | 158 MemoryBarrierInternal(); |
| 159 } | 159 } |
| 160 | 160 |
| 161 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 161 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 162 MemoryBarrier(); | 162 MemoryBarrierInternal(); |
| 163 *ptr = value; | 163 *ptr = value; |
| 164 } | 164 } |
| 165 | 165 |
| 166 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 166 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
| 167 return *ptr; | 167 return *ptr; |
| 168 } | 168 } |
| 169 | 169 |
| 170 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 170 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 171 Atomic32 value = *ptr; | 171 Atomic32 value = *ptr; |
| 172 MemoryBarrier(); | 172 MemoryBarrierInternal(); |
| 173 return value; | 173 return value; |
| 174 } | 174 } |
| 175 | 175 |
| 176 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 176 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 177 MemoryBarrier(); | 177 MemoryBarrierInternal(); |
| 178 return *ptr; | 178 return *ptr; |
| 179 } | 179 } |
| 180 | 180 |
| 181 #if defined(__LP64__) | 181 #if defined(__LP64__) |
| 182 // 64-bit versions of the atomic ops. | 182 // 64-bit versions of the atomic ops. |
| 183 | 183 |
| 184 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 184 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 185 Atomic64 old_value, | 185 Atomic64 old_value, |
| 186 Atomic64 new_value) { | 186 Atomic64 new_value) { |
| 187 Atomic64 prev, tmp; | 187 Atomic64 prev, tmp; |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 240 ".set pop\n" | 240 ".set pop\n" |
| 241 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) | 241 : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) |
| 242 : "Ir" (increment), "m" (*ptr) | 242 : "Ir" (increment), "m" (*ptr) |
| 243 : "memory"); | 243 : "memory"); |
| 244 // temp2 now holds the final value. | 244 // temp2 now holds the final value. |
| 245 return temp2; | 245 return temp2; |
| 246 } | 246 } |
| 247 | 247 |
| 248 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | 248 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, |
| 249 Atomic64 increment) { | 249 Atomic64 increment) { |
| 250 MemoryBarrier(); | 250 MemoryBarrierInternal(); |
| 251 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); | 251 Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); |
| 252 MemoryBarrier(); | 252 MemoryBarrierInternal(); |
| 253 return res; | 253 return res; |
| 254 } | 254 } |
| 255 | 255 |
| 256 // "Acquire" operations | 256 // "Acquire" operations |
| 257 // ensure that no later memory access can be reordered ahead of the operation. | 257 // ensure that no later memory access can be reordered ahead of the operation. |
| 258 // "Release" operations ensure that no previous memory access can be reordered | 258 // "Release" operations ensure that no previous memory access can be reordered |
| 259 // after the operation. "Barrier" operations have both "Acquire" and "Release" | 259 // after the operation. "Barrier" operations have both "Acquire" and "Release" |
| 260 // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory | 260 // semantics. A MemoryBarrierInternal() has "Barrier" semantics, but does no |
| 261 // access. | 261 // memory access. |
| 262 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 262 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 263 Atomic64 old_value, | 263 Atomic64 old_value, |
| 264 Atomic64 new_value) { | 264 Atomic64 new_value) { |
| 265 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 265 Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 266 MemoryBarrier(); | 266 MemoryBarrierInternal(); |
| 267 return res; | 267 return res; |
| 268 } | 268 } |
| 269 | 269 |
| 270 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 270 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 271 Atomic64 old_value, | 271 Atomic64 old_value, |
| 272 Atomic64 new_value) { | 272 Atomic64 new_value) { |
| 273 MemoryBarrier(); | 273 MemoryBarrierInternal(); |
| 274 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 274 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 275 } | 275 } |
| 276 | 276 |
| 277 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 277 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 278 *ptr = value; | 278 *ptr = value; |
| 279 } | 279 } |
| 280 | 280 |
| 281 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | 281 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 282 *ptr = value; | 282 *ptr = value; |
| 283 MemoryBarrier(); | 283 MemoryBarrierInternal(); |
| 284 } | 284 } |
| 285 | 285 |
| 286 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 286 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 287 MemoryBarrier(); | 287 MemoryBarrierInternal(); |
| 288 *ptr = value; | 288 *ptr = value; |
| 289 } | 289 } |
| 290 | 290 |
| 291 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 291 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
| 292 return *ptr; | 292 return *ptr; |
| 293 } | 293 } |
| 294 | 294 |
| 295 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 295 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 296 Atomic64 value = *ptr; | 296 Atomic64 value = *ptr; |
| 297 MemoryBarrier(); | 297 MemoryBarrierInternal(); |
| 298 return value; | 298 return value; |
| 299 } | 299 } |
| 300 | 300 |
| 301 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 301 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 302 MemoryBarrier(); | 302 MemoryBarrierInternal(); |
| 303 return *ptr; | 303 return *ptr; |
| 304 } | 304 } |
| 305 #endif | 305 #endif |
| 306 | 306 |
| 307 } // namespace internal | 307 } // namespace internal |
| 308 } // namespace protobuf | 308 } // namespace protobuf |
| 309 } // namespace google | 309 } // namespace google |
| 310 | 310 |
| 311 #undef ATOMICOPS_COMPILER_BARRIER | 311 #undef ATOMICOPS_COMPILER_BARRIER |
| 312 | 312 |
| 313 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ | 313 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ |
| OLD | NEW |