Chromium Code Reviews| Index: base/atomicops_internals_x86_gcc.h |
| diff --git a/base/atomicops_internals_x86_gcc.h b/base/atomicops_internals_x86_gcc.h |
| index ac02b17f5db0f672596a88d2ce8d93d21d907efe..36cf19892b950d22a04e0a4b395dd8ddf938c7fd 100644 |
| --- a/base/atomicops_internals_x86_gcc.h |
| +++ b/base/atomicops_internals_x86_gcc.h |
| @@ -17,7 +17,6 @@ |
| struct AtomicOps_x86CPUFeatureStruct { |
| bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence |
| // after acquire compare-and-swap. |
| - bool has_sse2; // Processor has SSE2. |
| }; |
| BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct |
| AtomicOps_Internalx86CPUFeatures; |
| @@ -92,8 +91,6 @@ inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| *ptr = value; |
| } |
| -#if defined(__x86_64__) |
| - |
| // 64-bit implementations of memory barrier can be simpler, because it |
|
Mark Mentovai
2014/05/19 22:24:35
This comment should go away.
Nico
2014/05/19 22:29:11
Done.
|
| // "mfence" is guaranteed to exist. |
| inline void MemoryBarrier() { |
| @@ -105,28 +102,6 @@ inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| MemoryBarrier(); |
| } |
| -#else |
| - |
| -inline void MemoryBarrier() { |
| - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
| - __asm__ __volatile__("mfence" : : : "memory"); |
| - } else { // mfence is faster but not present on PIII |
| - Atomic32 x = 0; |
| - NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII |
| - } |
| -} |
| - |
| -inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| - if (AtomicOps_Internalx86CPUFeatures.has_sse2) { |
| - *ptr = value; |
| - __asm__ __volatile__("mfence" : : : "memory"); |
| - } else { |
| - NoBarrier_AtomicExchange(ptr, value); |
| - // acts as a barrier on PIII |
| - } |
| -} |
| -#endif |
| - |
| inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| ATOMICOPS_COMPILER_BARRIER(); |
| *ptr = value; // An x86 store acts as a release barrier. |