OLD | NEW |
| (Empty) |
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | |
6 | |
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
8 #define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
9 | |
10 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | |
11 | |
12 namespace base { | |
13 namespace subtle { | |
14 | |
15 // 32-bit low-level operations on any platform. | |
16 | |
17 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
18 Atomic32 old_value, | |
19 Atomic32 new_value) { | |
20 Atomic32 prev; | |
21 __asm__ __volatile__("lock; cmpxchgl %1,%2" | |
22 : "=a" (prev) | |
23 : "q" (new_value), "m" (*ptr), "0" (old_value) | |
24 : "memory"); | |
25 return prev; | |
26 } | |
27 | |
28 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
29 Atomic32 new_value) { | |
30 __asm__ __volatile__("xchgl %1,%0" // The lock prefix is implicit for xchg. | |
31 : "=r" (new_value) | |
32 : "m" (*ptr), "0" (new_value) | |
33 : "memory"); | |
34 return new_value; // Now it's the previous value. | |
35 } | |
36 | |
37 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
38 Atomic32 increment) { | |
39 Atomic32 temp = increment; | |
40 __asm__ __volatile__("lock; xaddl %0,%1" | |
41 : "+r" (temp), "+m" (*ptr) | |
42 : : "memory"); | |
43 // temp now holds the old value of *ptr | |
44 return temp + increment; | |
45 } | |
46 | |
47 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
48 Atomic32 increment) { | |
49 Atomic32 temp = increment; | |
50 __asm__ __volatile__("lock; xaddl %0,%1" | |
51 : "+r" (temp), "+m" (*ptr) | |
52 : : "memory"); | |
53 // temp now holds the old value of *ptr | |
54 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
55 __asm__ __volatile__("lfence" : : : "memory"); | |
56 } | |
57 return temp + increment; | |
58 } | |
59 | |
60 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
61 Atomic32 old_value, | |
62 Atomic32 new_value) { | |
63 Atomic32 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
64 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
65 __asm__ __volatile__("lfence" : : : "memory"); | |
66 } | |
67 return x; | |
68 } | |
69 | |
70 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
71 Atomic32 old_value, | |
72 Atomic32 new_value) { | |
73 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
74 } | |
75 | |
76 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
77 *ptr = value; | |
78 } | |
79 | |
80 inline void MemoryBarrier() { | |
81 __asm__ __volatile__("mfence" : : : "memory"); | |
82 } | |
83 | |
84 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
85 *ptr = value; | |
86 MemoryBarrier(); | |
87 } | |
88 | |
89 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
90 ATOMICOPS_COMPILER_BARRIER(); | |
91 *ptr = value; // An x86 store acts as a release barrier. | |
92 // See comments in Atomic64 version of Release_Store(), below. | |
93 } | |
94 | |
95 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
96 return *ptr; | |
97 } | |
98 | |
99 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
100 Atomic32 value = *ptr; // An x86 load acts as a acquire barrier. | |
101 // See comments in Atomic64 version of Release_Store(), below. | |
102 ATOMICOPS_COMPILER_BARRIER(); | |
103 return value; | |
104 } | |
105 | |
106 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
107 MemoryBarrier(); | |
108 return *ptr; | |
109 } | |
110 | |
111 #if defined(__x86_64__) | |
112 | |
113 // 64-bit low-level operations on 64-bit platform. | |
114 | |
115 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
116 Atomic64 old_value, | |
117 Atomic64 new_value) { | |
118 Atomic64 prev; | |
119 __asm__ __volatile__("lock; cmpxchgq %1,%2" | |
120 : "=a" (prev) | |
121 : "q" (new_value), "m" (*ptr), "0" (old_value) | |
122 : "memory"); | |
123 return prev; | |
124 } | |
125 | |
126 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
127 Atomic64 new_value) { | |
128 __asm__ __volatile__("xchgq %1,%0" // The lock prefix is implicit for xchg. | |
129 : "=r" (new_value) | |
130 : "m" (*ptr), "0" (new_value) | |
131 : "memory"); | |
132 return new_value; // Now it's the previous value. | |
133 } | |
134 | |
135 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
136 Atomic64 increment) { | |
137 Atomic64 temp = increment; | |
138 __asm__ __volatile__("lock; xaddq %0,%1" | |
139 : "+r" (temp), "+m" (*ptr) | |
140 : : "memory"); | |
141 // temp now contains the previous value of *ptr | |
142 return temp + increment; | |
143 } | |
144 | |
145 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
146 Atomic64 increment) { | |
147 Atomic64 temp = increment; | |
148 __asm__ __volatile__("lock; xaddq %0,%1" | |
149 : "+r" (temp), "+m" (*ptr) | |
150 : : "memory"); | |
151 // temp now contains the previous value of *ptr | |
152 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
153 __asm__ __volatile__("lfence" : : : "memory"); | |
154 } | |
155 return temp + increment; | |
156 } | |
157 | |
158 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
159 *ptr = value; | |
160 } | |
161 | |
162 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
163 *ptr = value; | |
164 MemoryBarrier(); | |
165 } | |
166 | |
167 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
168 ATOMICOPS_COMPILER_BARRIER(); | |
169 | |
170 *ptr = value; // An x86 store acts as a release barrier | |
171 // for current AMD/Intel chips as of Jan 2008. | |
172 // See also Acquire_Load(), below. | |
173 | |
174 // When new chips come out, check: | |
175 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: | |
176 // System Programming Guide, Chatper 7: Multiple-processor management, | |
177 // Section 7.2, Memory Ordering. | |
178 // Last seen at: | |
179 // http://developer.intel.com/design/pentium4/manuals/index_new.htm | |
180 // | |
181 // x86 stores/loads fail to act as barriers for a few instructions (clflush | |
182 // maskmovdqu maskmovq movntdq movnti movntpd movntps movntq) but these are | |
183 // not generated by the compiler, and are rare. Users of these instructions | |
184 // need to know about cache behaviour in any case since all of these involve | |
185 // either flushing cache lines or non-temporal cache hints. | |
186 } | |
187 | |
188 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
189 return *ptr; | |
190 } | |
191 | |
192 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
193 Atomic64 value = *ptr; // An x86 load acts as a acquire barrier, | |
194 // for current AMD/Intel chips as of Jan 2008. | |
195 // See also Release_Store(), above. | |
196 ATOMICOPS_COMPILER_BARRIER(); | |
197 return value; | |
198 } | |
199 | |
200 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
201 MemoryBarrier(); | |
202 return *ptr; | |
203 } | |
204 | |
205 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
206 Atomic64 old_value, | |
207 Atomic64 new_value) { | |
208 Atomic64 x = NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
209 if (AtomicOps_Internalx86CPUFeatures.has_amd_lock_mb_bug) { | |
210 __asm__ __volatile__("lfence" : : : "memory"); | |
211 } | |
212 return x; | |
213 } | |
214 | |
215 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
216 Atomic64 old_value, | |
217 Atomic64 new_value) { | |
218 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
219 } | |
220 | |
221 #endif // defined(__x86_64__) | |
222 | |
223 } // namespace subtle | |
224 } // namespace base | |
225 | |
226 #undef ATOMICOPS_COMPILER_BARRIER | |
227 | |
228 #endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | |
OLD | NEW |