| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 // This file is an internal atomic implementation, use atomicops.h instead. | 5 // This file is an internal atomic implementation, use atomicops.h instead. |
| 6 | 6 |
| 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
| 8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
| 9 | 9 |
| 10 namespace v8 { | 10 namespace v8 { |
| 11 namespace base { | 11 namespace base { |
| 12 | 12 |
| 13 // This struct is not part of the public API of this module; clients may not | 13 // This struct is not part of the public API of this module; clients may not |
| 14 // use it. | 14 // use it. |
| 15 // Features of this x86. Values may not be correct before main() is run, | 15 // Features of this x86. Values may not be correct before main() is run, |
| 16 // but are set conservatively. | 16 // but are set conservatively. |
| 17 struct AtomicOps_x86CPUFeatureStruct { | 17 struct AtomicOps_x86CPUFeatureStruct { |
| 18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence | 18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence |
| 19 // after acquire compare-and-swap. | 19 // after acquire compare-and-swap. |
| 20 bool has_sse2; // Processor has SSE2. | |
| 21 }; | 20 }; |
| 22 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; | 21 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; |
| 23 | 22 |
| 24 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | 23 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
| 25 | 24 |
| 26 // 32-bit low-level operations on any platform. | 25 // 32-bit low-level operations on any platform. |
| 27 | 26 |
| 28 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 27 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
| 29 Atomic32 old_value, | 28 Atomic32 old_value, |
| 30 Atomic32 new_value) { | 29 Atomic32 new_value) { |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 85 } | 84 } |
| 86 | 85 |
| 87 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 86 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
| 88 *ptr = value; | 87 *ptr = value; |
| 89 } | 88 } |
| 90 | 89 |
| 91 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 90 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 92 *ptr = value; | 91 *ptr = value; |
| 93 } | 92 } |
| 94 | 93 |
| 95 #if defined(__x86_64__) | 94 // We require SSE2, so mfence is guaranteed to exist. |
| 96 | |
| 97 // 64-bit implementations of memory barrier can be simpler, because it | |
| 98 // "mfence" is guaranteed to exist. | |
| 99 inline void MemoryBarrier() { | 95 inline void MemoryBarrier() { |
| 100 __asm__ __volatile__("mfence" : : : "memory"); | 96 __asm__ __volatile__("mfence" : : : "memory"); |
| 101 } | 97 } |
| 102 | 98 |
| 103 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 99 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 104 *ptr = value; | 100 *ptr = value; |
| 105 MemoryBarrier(); | 101 MemoryBarrier(); |
| 106 } | 102 } |
| 107 | 103 |
| 108 #else | |
| 109 | |
| 110 inline void MemoryBarrier() { | |
| 111 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
| 112 __asm__ __volatile__("mfence" : : : "memory"); | |
| 113 } else { // mfence is faster but not present on PIII | |
| 114 Atomic32 x = 0; | |
| 115 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII | |
| 116 } | |
| 117 } | |
| 118 | |
| 119 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
| 120 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
| 121 *ptr = value; | |
| 122 __asm__ __volatile__("mfence" : : : "memory"); | |
| 123 } else { | |
| 124 NoBarrier_AtomicExchange(ptr, value); | |
| 125 // acts as a barrier on PIII | |
| 126 } | |
| 127 } | |
| 128 #endif | |
| 129 | |
| 130 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 104 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
| 131 ATOMICOPS_COMPILER_BARRIER(); | 105 ATOMICOPS_COMPILER_BARRIER(); |
| 132 *ptr = value; // An x86 store acts as a release barrier. | 106 *ptr = value; // An x86 store acts as a release barrier. |
| 133 // See comments in Atomic64 version of Release_Store(), below. | 107 // See comments in Atomic64 version of Release_Store(), below. |
| 134 } | 108 } |
| 135 | 109 |
| 136 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 110 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
| 137 return *ptr; | 111 return *ptr; |
| 138 } | 112 } |
| 139 | 113 |
| (...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 263 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 237 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
| 264 } | 238 } |
| 265 | 239 |
| 266 #endif // defined(__x86_64__) | 240 #endif // defined(__x86_64__) |
| 267 | 241 |
| 268 } } // namespace v8::base | 242 } } // namespace v8::base |
| 269 | 243 |
| 270 #undef ATOMICOPS_COMPILER_BARRIER | 244 #undef ATOMICOPS_COMPILER_BARRIER |
| 271 | 245 |
| 272 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 246 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
| OLD | NEW |