OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use atomicops.h instead. | 5 // This file is an internal atomic implementation, use atomicops.h instead. |
6 | 6 |
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
9 | 9 |
10 namespace v8 { | 10 namespace v8 { |
11 namespace base { | 11 namespace base { |
12 | 12 |
13 // This struct is not part of the public API of this module; clients may not | 13 // This struct is not part of the public API of this module; clients may not |
14 // use it. | 14 // use it. |
15 // Features of this x86. Values may not be correct before main() is run, | 15 // Features of this x86. Values may not be correct before main() is run, |
16 // but are set conservatively. | 16 // but are set conservatively. |
17 struct AtomicOps_x86CPUFeatureStruct { | 17 struct AtomicOps_x86CPUFeatureStruct { |
18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence | 18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence |
19 // after acquire compare-and-swap. | 19 // after acquire compare-and-swap. |
20 #if !defined(__SSE2__) | |
21 bool has_sse2; // Processor has SSE2. | |
22 #endif | |
23 }; | 20 }; |
24 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; | 21 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; |
25 | 22 |
26 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | 23 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
27 | 24 |
28 // 32-bit low-level operations on any platform. | 25 // 32-bit low-level operations on any platform. |
29 | 26 |
30 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 27 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
31 Atomic32 old_value, | 28 Atomic32 old_value, |
32 Atomic32 new_value) { | 29 Atomic32 new_value) { |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
87 } | 84 } |
88 | 85 |
89 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 86 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
90 *ptr = value; | 87 *ptr = value; |
91 } | 88 } |
92 | 89 |
93 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 90 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
94 *ptr = value; | 91 *ptr = value; |
95 } | 92 } |
96 | 93 |
97 #if defined(__x86_64__) || defined(__SSE2__) | 94 // We require SSE2, so mfence is guaranteed to exist. |
98 | |
99 // 64-bit implementations of memory barrier can be simpler, because it | |
100 // "mfence" is guaranteed to exist. | |
101 inline void MemoryBarrier() { | 95 inline void MemoryBarrier() { |
102 __asm__ __volatile__("mfence" : : : "memory"); | 96 __asm__ __volatile__("mfence" : : : "memory"); |
103 } | 97 } |
104 | 98 |
105 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 99 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
106 *ptr = value; | 100 *ptr = value; |
107 MemoryBarrier(); | 101 MemoryBarrier(); |
108 } | 102 } |
109 | 103 |
110 #else | |
111 | |
112 inline void MemoryBarrier() { | |
113 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
114 __asm__ __volatile__("mfence" : : : "memory"); | |
115 } else { // mfence is faster but not present on PIII | |
116 Atomic32 x = 0; | |
117 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII | |
118 } | |
119 } | |
120 | |
121 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
122 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
123 *ptr = value; | |
124 __asm__ __volatile__("mfence" : : : "memory"); | |
125 } else { | |
126 NoBarrier_AtomicExchange(ptr, value); | |
127 // acts as a barrier on PIII | |
128 } | |
129 } | |
130 #endif | |
131 | |
132 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 104 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
133 ATOMICOPS_COMPILER_BARRIER(); | 105 ATOMICOPS_COMPILER_BARRIER(); |
134 *ptr = value; // An x86 store acts as a release barrier. | 106 *ptr = value; // An x86 store acts as a release barrier. |
135 // See comments in Atomic64 version of Release_Store(), below. | 107 // See comments in Atomic64 version of Release_Store(), below. |
136 } | 108 } |
137 | 109 |
138 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 110 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
139 return *ptr; | 111 return *ptr; |
140 } | 112 } |
141 | 113 |
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
265 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 237 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
266 } | 238 } |
267 | 239 |
268 #endif // defined(__x86_64__) | 240 #endif // defined(__x86_64__) |
269 | 241 |
270 } } // namespace v8::base | 242 } } // namespace v8::base |
271 | 243 |
272 #undef ATOMICOPS_COMPILER_BARRIER | 244 #undef ATOMICOPS_COMPILER_BARRIER |
273 | 245 |
274 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 246 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
OLD | NEW |