| Index: third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
|
| diff --git a/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h b/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
|
| index 5324dfbcb7b45711f82274bedc87abd32f07342b..afcb3f8e51efc833cf4abc70fbbb79ce1c0a52a1 100644
|
| --- a/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
|
| +++ b/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
|
| @@ -37,17 +37,6 @@ namespace google {
|
| namespace protobuf {
|
| namespace internal {
|
|
|
| -// This struct is not part of the public API of this module; clients may not
|
| -// use it.
|
| -// Features of this x86. Values may not be correct before main() is run,
|
| -// but are set conservatively.
|
| -struct AtomicOps_x86CPUFeatureStruct {
|
| - bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
|
| - // after acquire compare-and-swap.
|
| - bool has_sse2; // Processor has SSE2.
|
| -};
|
| -extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
|
| -
|
| #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
|
|
|
| // 32-bit low-level operations on any platform.
|
| @@ -89,9 +78,6 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
| : "+r" (temp), "+m" (*ptr)
|
| : : "memory");
|
| // temp now holds the old value of *ptr
|
| - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| - __asm__ __volatile__("lfence" : : : "memory");
|
| - }
|
| return temp + increment;
|
| }
|
|
|
| @@ -99,9 +85,6 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| - __asm__ __volatile__("lfence" : : : "memory");
|
| - }
|
| return x;
|
| }
|
|
|
| @@ -131,22 +114,12 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| #else
|
|
|
| inline void MemoryBarrier() {
|
| - if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
|
| - __asm__ __volatile__("mfence" : : : "memory");
|
| - } else { // mfence is faster but not present on PIII
|
| - Atomic32 x = 0;
|
| - NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
|
| - }
|
| + __asm__ __volatile__("mfence" : : : "memory");
|
| }
|
|
|
| inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| - if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
|
| - *ptr = value;
|
| - __asm__ __volatile__("mfence" : : : "memory");
|
| - } else {
|
| - NoBarrier_AtomicExchange(ptr, value);
|
| - // acts as a barrier on PIII
|
| - }
|
| + *ptr = value;
|
| + __asm__ __volatile__("mfence" : : : "memory");
|
| }
|
| #endif
|
|
|
| @@ -213,9 +186,6 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
| : "+r" (temp), "+m" (*ptr)
|
| : : "memory");
|
| // temp now contains the previous value of *ptr
|
| - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| - __asm__ __volatile__("lfence" : : : "memory");
|
| - }
|
| return temp + increment;
|
| }
|
|
|
| @@ -270,9 +240,6 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
| Atomic64 old_value,
|
| Atomic64 new_value) {
|
| Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| - __asm__ __volatile__("lfence" : : : "memory");
|
| - }
|
| return x;
|
| }
|
|
|
|
|