| OLD | NEW |
| 1 // Protocol Buffers - Google's data interchange format | 1 // Protocol Buffers - Google's data interchange format |
| 2 // Copyright 2012 Google Inc. All rights reserved. | 2 // Copyright 2012 Google Inc. All rights reserved. |
| 3 // https://developers.google.com/protocol-buffers/ | 3 // https://developers.google.com/protocol-buffers/ |
| 4 // | 4 // |
| 5 // Redistribution and use in source and binary forms, with or without | 5 // Redistribution and use in source and binary forms, with or without |
| 6 // modification, are permitted provided that the following conditions are | 6 // modification, are permitted provided that the following conditions are |
| 7 // met: | 7 // met: |
| 8 // | 8 // |
| 9 // * Redistributions of source code must retain the above copyright | 9 // * Redistributions of source code must retain the above copyright |
| 10 // notice, this list of conditions and the following disclaimer. | 10 // notice, this list of conditions and the following disclaimer. |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 112 } | 112 } |
| 113 | 113 |
| 114 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 114 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 115 *ptr = value; | 115 *ptr = value; |
| 116 } | 116 } |
| 117 | 117 |
| 118 #if defined(__x86_64__) | 118 #if defined(__x86_64__) |
| 119 | 119 |
| 120 // 64-bit implementations of memory barrier can be simpler, because it | 120 // 64-bit implementations of memory barrier can be simpler, because it |
| 121 // "mfence" is guaranteed to exist. | 121 // "mfence" is guaranteed to exist. |
| 122 inline void MemoryBarrier() { | 122 inline void MemoryBarrierInternal() { |
| 123 __asm__ __volatile__("mfence" : : : "memory"); | 123 __asm__ __volatile__("mfence" : : : "memory"); |
| 124 } | 124 } |
| 125 | 125 |
| 126 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 126 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 127 *ptr = value; | 127 *ptr = value; |
| 128 MemoryBarrier(); | 128 MemoryBarrierInternal(); |
| 129 } | 129 } |
| 130 | 130 |
| 131 #else | 131 #else |
| 132 | 132 |
| 133 inline void MemoryBarrier() { | 133 inline void MemoryBarrierInternal() { |
| 134 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | 134 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
| 135 __asm__ __volatile__("mfence" : : : "memory"); | 135 __asm__ __volatile__("mfence" : : : "memory"); |
| 136 } else { // mfence is faster but not present on PIII | 136 } else { // mfence is faster but not present on PIII |
| 137 Atomic32 x = 0; | 137 Atomic32 x = 0; |
| 138 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII | 138 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII |
| 139 } | 139 } |
| 140 } | 140 } |
| 141 | 141 |
| 142 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 142 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 143 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | 143 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
| (...skipping 17 matching lines...) Expand all Loading... |
| 161 } | 161 } |
| 162 | 162 |
| 163 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 163 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
| 164 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. | 164 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. |
| 165 // See comments in Atomic64 version of Release_Store(), below. | 165 // See comments in Atomic64 version of Release_Store(), below. |
| 166 ATOMICOPS_COMPILER_BARRIER(); | 166 ATOMICOPS_COMPILER_BARRIER(); |
| 167 return value; | 167 return value; |
| 168 } | 168 } |
| 169 | 169 |
| 170 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 170 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
| 171 MemoryBarrier(); | 171 MemoryBarrierInternal(); |
| 172 return *ptr; | 172 return *ptr; |
| 173 } | 173 } |
| 174 | 174 |
| 175 #if defined(__x86_64__) | 175 #if defined(__x86_64__) |
| 176 | 176 |
| 177 // 64-bit low-level operations on 64-bit platform. | 177 // 64-bit low-level operations on 64-bit platform. |
| 178 | 178 |
| 179 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 179 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
| 180 Atomic64 old_value, | 180 Atomic64 old_value, |
| 181 Atomic64 new_value) { | 181 Atomic64 new_value) { |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 218 } | 218 } |
| 219 return temp + increment; | 219 return temp + increment; |
| 220 } | 220 } |
| 221 | 221 |
| 222 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 222 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 223 *ptr = value; | 223 *ptr = value; |
| 224 } | 224 } |
| 225 | 225 |
| 226 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | 226 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 227 *ptr = value; | 227 *ptr = value; |
| 228 MemoryBarrier(); | 228 MemoryBarrierInternal(); |
| 229 } | 229 } |
| 230 | 230 |
| 231 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 231 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
| 232 ATOMICOPS_COMPILER_BARRIER(); | 232 ATOMICOPS_COMPILER_BARRIER(); |
| 233 | 233 |
| 234 *ptr = value; // An x86 store acts as a release barrier | 234 *ptr = value; // An x86 store acts as a release barrier |
| 235 // for current AMD/Intel chips as of Jan 2008. | 235 // for current AMD/Intel chips as of Jan 2008. |
| 236 // See also Acquire_Load(), below. | 236 // See also Acquire_Load(), below. |
| 237 | 237 |
| 238 // When new chips come out, check: | 238 // When new chips come out, check: |
| (...skipping 16 matching lines...) Expand all Loading... |
| 255 | 255 |
| 256 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 256 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
| 257 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, | 257 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, |
| 258 // for current AMD/Intel chips as of Jan 2008. | 258 // for current AMD/Intel chips as of Jan 2008. |
| 259 // See also Release_Store(), above. | 259 // See also Release_Store(), above. |
| 260 ATOMICOPS_COMPILER_BARRIER(); | 260 ATOMICOPS_COMPILER_BARRIER(); |
| 261 return value; | 261 return value; |
| 262 } | 262 } |
| 263 | 263 |
| 264 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 264 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
| 265 MemoryBarrier(); | 265 MemoryBarrierInternal(); |
| 266 return *ptr; | 266 return *ptr; |
| 267 } | 267 } |
| 268 | 268 |
| 269 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 269 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, |
| 270 Atomic64 old_value, | 270 Atomic64 old_value, |
| 271 Atomic64 new_value) { | 271 Atomic64 new_value) { |
| 272 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 272 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 273 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | 273 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { |
| 274 __asm__ __volatile__("lfence" : : : "memory"); | 274 __asm__ __volatile__("lfence" : : : "memory"); |
| 275 } | 275 } |
| 276 return x; | 276 return x; |
| 277 } | 277 } |
| 278 | 278 |
| 279 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | 279 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, |
| 280 Atomic64 old_value, | 280 Atomic64 old_value, |
| 281 Atomic64 new_value) { | 281 Atomic64 new_value) { |
| 282 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 282 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 283 } | 283 } |
| 284 | 284 |
| 285 #endif // defined(__x86_64__) | 285 #endif // defined(__x86_64__) |
| 286 | 286 |
| 287 } // namespace internal | 287 } // namespace internal |
| 288 } // namespace protobuf | 288 } // namespace protobuf |
| 289 } // namespace google | 289 } // namespace google |
| 290 | 290 |
| 291 #undef ATOMICOPS_COMPILER_BARRIER | 291 #undef ATOMICOPS_COMPILER_BARRIER |
| 292 | 292 |
| 293 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_ | 293 #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_X86_GCC_H_ |
| OLD | NEW |