Index: src/base/atomicops_internals_x86_gcc.h |
diff --git a/src/base/atomicops_internals_x86_gcc.h b/src/base/atomicops_internals_x86_gcc.h |
index 00b64484683fd5ca6e674d8e20417823cacf2c31..90e1acf1eeae803828b11a2024b10acb78e52117 100644 |
--- a/src/base/atomicops_internals_x86_gcc.h |
+++ b/src/base/atomicops_internals_x86_gcc.h |
@@ -17,7 +17,6 @@ namespace base { |
struct AtomicOps_x86CPUFeatureStruct { |
bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence |
// after acquire compare-and-swap. |
- bool has_sse2; // Processor has SSE2. |
}; |
extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; |
@@ -92,10 +91,7 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
*ptr = value; |
} |
-#if defined(__x86_64__) |
- |
-// 64-bit implementations of memory barrier can be simpler, because it |
-// "mfence" is guaranteed to exist. |
+// We require SSE2, so mfence is guaranteed to exist. |
inline void MemoryBarrier() { |
__asm__ __volatile__("mfence" : : : "memory"); |
} |
@@ -105,28 +101,6 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
MemoryBarrier(); |
} |
-#else |
- |
-inline void MemoryBarrier() { |
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
- __asm__ __volatile__("mfence" : : : "memory"); |
- } else { // mfence is faster but not present on PIII |
- Atomic32 x = 0; |
- NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII |
- } |
-} |
- |
-inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
- if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
- *ptr = value; |
- __asm__ __volatile__("mfence" : : : "memory"); |
- } else { |
- NoBarrier_AtomicExchange(ptr, value); |
- // acts as a barrier on PIII |
- } |
-} |
-#endif |
- |
inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
ATOMICOPS_COMPILER_BARRIER(); |
*ptr = value; // An x86 store acts as a release barrier. |