OLD | NEW |
---|---|
1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. | 1 // Copyright (c) 2011 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | 5 // This file is an internal atomic implementation, use base/atomicops.h instead. |
6 | 6 |
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
8 #define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 8 #define BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
9 | 9 |
10 #include "base/base_export.h" | 10 #include "base/base_export.h" |
11 | 11 |
12 // This struct is not part of the public API of this module; clients may not | 12 // This struct is not part of the public API of this module; clients may not |
13 // use it. (However, it's exported via BASE_EXPORT because clients implicitly | 13 // use it. (However, it's exported via BASE_EXPORT because clients implicitly |
14 // do use it at link time by inlining these functions.) | 14 // do use it at link time by inlining these functions.) |
15 // Features of this x86. Values may not be correct before main() is run, | 15 // Features of this x86. Values may not be correct before main() is run, |
16 // but are set conservatively. | 16 // but are set conservatively. |
17 struct AtomicOps_x86CPUFeatureStruct { | 17 struct AtomicOps_x86CPUFeatureStruct { |
18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence | 18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence |
19 // after acquire compare-and-swap. | 19 // after acquire compare-and-swap. |
20 bool has_sse2; // Processor has SSE2. | |
21 }; | 20 }; |
22 BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct | 21 BASE_EXPORT extern struct AtomicOps_x86CPUFeatureStruct |
23 AtomicOps_Internalx86CPUFeatures; | 22 AtomicOps_Internalx86CPUFeatures; |
24 | 23 |
25 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") | 24 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") |
26 | 25 |
27 namespace base { | 26 namespace base { |
28 namespace subtle { | 27 namespace subtle { |
29 | 28 |
30 // 32-bit low-level operations on any platform. | 29 // 32-bit low-level operations on any platform. |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
85 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 84 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
86 Atomic32 old_value, | 85 Atomic32 old_value, |
87 Atomic32 new_value) { | 86 Atomic32 new_value) { |
88 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 87 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
89 } | 88 } |
90 | 89 |
91 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 90 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
92 *ptr = value; | 91 *ptr = value; |
93 } | 92 } |
94 | 93 |
95 #if defined(__x86_64__) | |
96 | |
97 // 64-bit implementations of memory barrier can be simpler, because it | 94 // 64-bit implementations of memory barrier can be simpler, because it |
Mark Mentovai
2014/05/19 22:24:35
This comment should go away.
Nico
2014/05/19 22:29:11
Done.
| |
98 // "mfence" is guaranteed to exist. | 95 // "mfence" is guaranteed to exist. |
99 inline void MemoryBarrier() { | 96 inline void MemoryBarrier() { |
100 __asm__ __volatile__("mfence" : : : "memory"); | 97 __asm__ __volatile__("mfence" : : : "memory"); |
101 } | 98 } |
102 | 99 |
103 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 100 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
104 *ptr = value; | 101 *ptr = value; |
105 MemoryBarrier(); | 102 MemoryBarrier(); |
106 } | 103 } |
107 | 104 |
108 #else | |
109 | |
110 inline void MemoryBarrier() { | |
111 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
112 __asm__ __volatile__("mfence" : : : "memory"); | |
113 } else { // mfence is faster but not present on PIII | |
114 Atomic32 x = 0; | |
115 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII | |
116 } | |
117 } | |
118 | |
119 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
120 if (AtomicOps_Internalx86CPUFeatures.has_sse2) { | |
121 *ptr = value; | |
122 __asm__ __volatile__("mfence" : : : "memory"); | |
123 } else { | |
124 NoBarrier_AtomicExchange(ptr, value); | |
125 // acts as a barrier on PIII | |
126 } | |
127 } | |
128 #endif | |
129 | |
130 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 105 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
131 ATOMICOPS_COMPILER_BARRIER(); | 106 ATOMICOPS_COMPILER_BARRIER(); |
132 *ptr = value; // An x86 store acts as a release barrier. | 107 *ptr = value; // An x86 store acts as a release barrier. |
133 // See comments in Atomic64 version of Release_Store(), below. | 108 // See comments in Atomic64 version of Release_Store(), below. |
134 } | 109 } |
135 | 110 |
136 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 111 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
137 return *ptr; | 112 return *ptr; |
138 } | 113 } |
139 | 114 |
(...skipping 120 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
260 } | 235 } |
261 | 236 |
262 #endif // defined(__x86_64__) | 237 #endif // defined(__x86_64__) |
263 | 238 |
264 } // namespace base::subtle | 239 } // namespace base::subtle |
265 } // namespace base | 240 } // namespace base |
266 | 241 |
267 #undef ATOMICOPS_COMPILER_BARRIER | 242 #undef ATOMICOPS_COMPILER_BARRIER |
268 | 243 |
269 #endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ | 244 #endif // BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ |
OLD | NEW |