| Index: third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
|
| diff --git a/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h b/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
|
| index edccc59dee615465b2f8982df61771b22db5aae0..516f2b0640cb9d449c990697cbc84ab8e1d18ec1 100644
|
| --- a/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
|
| +++ b/third_party/protobuf/src/google/protobuf/stubs/atomicops_internals_x86_gcc.h
|
| @@ -46,7 +46,9 @@ struct AtomicOps_x86CPUFeatureStruct {
|
| // after acquire compare-and-swap.
|
| bool has_sse2; // Processor has SSE2.
|
| };
|
| -extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
|
| +extern struct AtomicOps_x86CPUFeatureStruct cr_AtomicOps_Internalx86CPUFeatures;
|
| +
|
| +void AtomicOps_Internalx86CPUFeaturesInit();
|
|
|
| #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
|
|
|
| @@ -89,7 +91,7 @@ inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
|
| : "+r" (temp), "+m" (*ptr)
|
| : : "memory");
|
| // temp now holds the old value of *ptr
|
| - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| + if (cr_AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| __asm__ __volatile__("lfence" : : : "memory");
|
| }
|
| return temp + increment;
|
| @@ -99,7 +101,7 @@ inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
|
| Atomic32 old_value,
|
| Atomic32 new_value) {
|
| Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| + if (cr_AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| __asm__ __volatile__("lfence" : : : "memory");
|
| }
|
| return x;
|
| @@ -131,7 +133,7 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| #else
|
|
|
| inline void MemoryBarrier() {
|
| - if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
|
| + if (cr_AtomicOps_Internalx86CPUFeatures.has_sse2) {
|
| __asm__ __volatile__("mfence" : : : "memory");
|
| } else { // mfence is faster but not present on PIII
|
| Atomic32 x = 0;
|
| @@ -140,7 +142,7 @@ inline void MemoryBarrier() {
|
| }
|
|
|
| inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
|
| - if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
|
| + if (cr_AtomicOps_Internalx86CPUFeatures.has_sse2) {
|
| *ptr = value;
|
| __asm__ __volatile__("mfence" : : : "memory");
|
| } else {
|
| @@ -213,7 +215,7 @@ inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
|
| : "+r" (temp), "+m" (*ptr)
|
| : : "memory");
|
| // temp now contains the previous value of *ptr
|
| - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| + if (cr_AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| __asm__ __volatile__("lfence" : : : "memory");
|
| }
|
| return temp + increment;
|
| @@ -270,7 +272,7 @@ inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
|
| Atomic64 old_value,
|
| Atomic64 new_value) {
|
| Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
|
| - if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| + if (cr_AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) {
|
| __asm__ __volatile__("lfence" : : : "memory");
|
| }
|
| return x;
|
|
|