OLD | NEW |
| (Empty) |
1 // Copyright 2010 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use atomicops.h instead. | |
6 | |
7 #ifndef V8_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
8 #define V8_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
9 | |
10 namespace v8 { | |
11 namespace internal { | |
12 | |
13 // This struct is not part of the public API of this module; clients may not | |
14 // use it. | |
15 // Features of this x86. Values may not be correct before main() is run, | |
16 // but are set conservatively. | |
17 struct AtomicOps_x86CPUFeatureStruct { | |
18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence | |
19 // after acquire compare-and-swap. | |
20 bool has_sse2; // Processor has SSE2. | |
21 }; | |
22 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; | |
23 | |
24 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | |
25 | |
26 // 32-bit low-level operations on any platform. | |
27 | |
28 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
29 Atomic32 old_value, | |
30 Atomic32 new_value) { | |
31 Atomic32 prev; | |
32 __asm__ __volatile__("lock; cmpxchgl %1,%2" | |
33 : "=a" (prev) | |
34 : "q" (new_value), "m" (*ptr), "0" (old_value) | |
35 : "memory"); | |
36 return prev; | |
37 } | |
38 | |
39 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
40 Atomic32 new_value) { | |
41 __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. | |
42 : "=r" (new_value) | |
43 : "m" (*ptr), "0" (new_value) | |
44 : "memory"); | |
45 return new_value; // Now it's the previous value. | |
46 } | |
47 | |
48 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
49 Atomic32 increment) { | |
50 Atomic32 temp = increment; | |
51 __asm__ __volatile__("lock; xaddl %0,%1" | |
52 : "+r" (temp), "+m" (*ptr) | |
53 : : "memory"); | |
54 // temp now holds the old value of *ptr | |
55 return temp + increment; | |
56 } | |
57 | |
58 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
59 Atomic32 increment) { | |
60 Atomic32 temp = increment; | |
61 __asm__ __volatile__("lock; xaddl %0,%1" | |
62 : "+r" (temp), "+m" (*ptr) | |
63 : : "memory"); | |
64 // temp now holds the old value of *ptr | |
65 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
66 __asm__ __volatile__("lfence" : : : "memory"); | |
67 } | |
68 return temp + increment; | |
69 } | |
70 | |
71 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
72 Atomic32 old_value, | |
73 Atomic32 new_value) { | |
74 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
75 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
76 __asm__ __volatile__("lfence" : : : "memory"); | |
77 } | |
78 return x; | |
79 } | |
80 | |
81 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
82 Atomic32 old_value, | |
83 Atomic32 new_value) { | |
84 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
85 } | |
86 | |
87 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | |
88 *ptr = value; | |
89 } | |
90 | |
91 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
92 *ptr = value; | |
93 } | |
94 | |
95 #if defined(__x86_64__) | |
96 | |
97 // 64-bit implementations of memory barrier can be simpler, because it | |
98 // "mfence" is guaranteed to exist. | |
99 inline void MemoryBarrier() { | |
100 __asm__ __volatile__("mfence" : : : "memory"); | |
101 } | |
102 | |
103 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
104 *ptr = value; | |
105 MemoryBarrier(); | |
106 } | |
107 | |
108 #else | |
109 | |
110 inline void MemoryBarrier() { | |
111 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
112 __asm__ __volatile__("mfence" : : : "memory"); | |
113 } else { // mfence is faster but not present on PIII | |
114 Atomic32 x = 0; | |
115 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII | |
116 } | |
117 } | |
118 | |
119 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
120 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
121 *ptr = value; | |
122 __asm__ __volatile__("mfence" : : : "memory"); | |
123 } else { | |
124 NoBarrier_AtomicExchange(ptr, value); | |
125 // acts as a barrier on PIII | |
126 } | |
127 } | |
128 #endif | |
129 | |
130 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
131 ATOMICOPS_COMPILER_BARRIER(); | |
132 *ptr = value; // An x86 store acts as a release barrier. | |
133 // See comments in Atomic64 version of Release_Store(), below. | |
134 } | |
135 | |
136 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | |
137 return *ptr; | |
138 } | |
139 | |
140 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
141 return *ptr; | |
142 } | |
143 | |
144 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
145 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. | |
146 // See comments in Atomic64 version of Release_Store(), below. | |
147 ATOMICOPS_COMPILER_BARRIER(); | |
148 return value; | |
149 } | |
150 | |
151 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
152 MemoryBarrier(); | |
153 return *ptr; | |
154 } | |
155 | |
156 #if defined(__x86_64__) && defined(V8_HOST_ARCH_64_BIT) | |
157 | |
158 // 64-bit low-level operations on 64-bit platform. | |
159 | |
160 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
161 Atomic64 old_value, | |
162 Atomic64 new_value) { | |
163 Atomic64 prev; | |
164 __asm__ __volatile__("lock; cmpxchgq %1,%2" | |
165 : "=a" (prev) | |
166 : "q" (new_value), "m" (*ptr), "0" (old_value) | |
167 : "memory"); | |
168 return prev; | |
169 } | |
170 | |
171 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
172 Atomic64 new_value) { | |
173 __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. | |
174 : "=r" (new_value) | |
175 : "m" (*ptr), "0" (new_value) | |
176 : "memory"); | |
177 return new_value; // Now it's the previous value. | |
178 } | |
179 | |
180 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
181 Atomic64 increment) { | |
182 Atomic64 temp = increment; | |
183 __asm__ __volatile__("lock; xaddq %0,%1" | |
184 : "+r" (temp), "+m" (*ptr) | |
185 : : "memory"); | |
186 // temp now contains the previous value of *ptr | |
187 return temp + increment; | |
188 } | |
189 | |
190 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
191 Atomic64 increment) { | |
192 Atomic64 temp = increment; | |
193 __asm__ __volatile__("lock; xaddq %0,%1" | |
194 : "+r" (temp), "+m" (*ptr) | |
195 : : "memory"); | |
196 // temp now contains the previous value of *ptr | |
197 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
198 __asm__ __volatile__("lfence" : : : "memory"); | |
199 } | |
200 return temp + increment; | |
201 } | |
202 | |
203 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
204 *ptr = value; | |
205 } | |
206 | |
207 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
208 *ptr = value; | |
209 MemoryBarrier(); | |
210 } | |
211 | |
212 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
213 ATOMICOPS_COMPILER_BARRIER(); | |
214 | |
215 *ptr = value; // An x86 store acts as a release barrier | |
216 // for current AMD/Intel chips as of Jan 2008. | |
217 // See also Acquire_Load(), below. | |
218 | |
219 // When new chips come out, check: | |
220 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: | |
221 // System Programming Guide, Chatper 7: Multiple-processor management, | |
222 // Section 7.2, Memory Ordering. | |
223 // Last seen at: | |
224 // http://developer.intel.com/design/pentium4/manuals/index_new.htm | |
225 // | |
226 // x86 stores/loads fail to act as barriers for a few instructions (clflush | |
227 // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are | |
228 // not generated by the compiler, and are rare. Users of these instructions | |
229 // need to know about cache behaviour in any case since all of these involve | |
230 // either flushing cache lines or non-temporal cache hints. | |
231 } | |
232 | |
233 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
234 return *ptr; | |
235 } | |
236 | |
237 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
238 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, | |
239 // for current AMD/Intel chips as of Jan 2008. | |
240 // See also Release_Store(), above. | |
241 ATOMICOPS_COMPILER_BARRIER(); | |
242 return value; | |
243 } | |
244 | |
245 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
246 MemoryBarrier(); | |
247 return *ptr; | |
248 } | |
249 | |
250 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
251 Atomic64 old_value, | |
252 Atomic64 new_value) { | |
253 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
254 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
255 __asm__ __volatile__("lfence" : : : "memory"); | |
256 } | |
257 return x; | |
258 } | |
259 | |
260 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
261 Atomic64 old_value, | |
262 Atomic64 new_value) { | |
263 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
264 } | |
265 | |
266 #endif // defined(__x86_64__) | |
267 | |
268 } } // namespace v8::internal | |
269 | |
270 #undef ATOMICOPS_COMPILER_BARRIER | |
271 | |
272 #endif // V8_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
OLD | NEW |