Index: src/base/atomicops_internals_x86_gcc.h |
diff --git a/src/base/atomicops_internals_x86_gcc.h b/src/base/atomicops_internals_x86_gcc.h |
index ec87c42121246f7b75d2cd3939ba2f28195925ad..90e1acf1eeae803828b11a2024b10acb78e52117 100644 |
--- a/src/base/atomicops_internals_x86_gcc.h |
+++ b/src/base/atomicops_internals_x86_gcc.h |
@@ -17,9 +17,6 @@ namespace base { |
struct AtomicOps_x86CPUFeatureStruct { |
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence |
// after acquire compare-and-swap. |
-#if !defined(__SSE2__) |
- bool has_sse2; // Processor has SSE2. |
-#endif |
}; |
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; |
@@ -94,10 +91,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
*ptr = value; |
} |
-#if defined(__x86_64__) || defined(__SSE2__) |
- |
-// 64-bit implementations of memory barrier can be simpler, because it |
-// "mfence" is guaranteed to exist. |
+// We require SSE2, so mfence is guaranteed to exist. |
inline void MemoryBarrier() { |
__asm__ __volatile__("mfence" : : : "memory"); |
} |
@@ -107,28 +101,6 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
MemoryBarrier(); |
} |
-#else |
- |
-inline void MemoryBarrier() { |
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
- __asm__ __volatile__("mfence" : : : "memory"); |
- } else { // mfence is faster but not present on PIII |
- Atomic32 x = 0; |
- NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII |
- } |
-} |
- |
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
- *ptr = value; |
- __asm__ __volatile__("mfence" : : : "memory"); |
- } else { |
- NoBarrier_AtomicExchange(ptr, value); |
- // acts as a barrier on PIII |
- } |
-} |
-#endif |
- |
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
ATOMICOPS_COMPILER_BARRIER(); |
*ptr = value; // An x86 store acts as a release barrier. |