Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(920)

Side by Side Diff: src/base/atomicops_internals_x86_gcc.h

Issue 350693005: Partial revert of r21901 (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2010 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 // This file is an internal atomic implementation, use atomicops.h instead. 5 // This file is an internal atomic implementation, use atomicops.h instead.
6 6
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ 7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ 8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
9 9
10 namespace v8 { 10 namespace v8 {
11 namespace base { 11 namespace base {
12 12
13 // This struct is not part of the public API of this module; clients may not 13 // This struct is not part of the public API of this module; clients may not
14 // use it. 14 // use it.
15 // Features of this x86. Values may not be correct before main() is run, 15 // Features of this x86. Values may not be correct before main() is run,
16 // but are set conservatively. 16 // but are set conservatively.
17 struct AtomicOps_x86CPUFeatureStruct { 17 struct AtomicOps_x86CPUFeatureStruct {
18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence 18 bool has_amd_lock_mb_bug; // Processor has AMD memory-barrier bug; do lfence
19 // after acquire compare-and-swap. 19 // after acquire compare-and-swap.
20 #if !defined(__SSE2__)
21 bool has_sse2; // Processor has SSE2.
22 #endif
20 }; 23 };
21 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures; 24 extern struct AtomicOps_x86CPUFeatureStruct AtomicOps_Internalx86CPUFeatures;
22 25
23 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") 26 #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory")
24 27
25 // 32-bit low-level operations on any platform. 28 // 32-bit low-level operations on any platform.
26 29
27 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, 30 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
28 Atomic32 old_value, 31 Atomic32 old_value,
29 Atomic32 new_value) { 32 Atomic32 new_value) {
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after
84 } 87 }
85 88
86 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { 89 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) {
87 *ptr = value; 90 *ptr = value;
88 } 91 }
89 92
90 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { 93 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) {
91 *ptr = value; 94 *ptr = value;
92 } 95 }
93 96
94 // We require SSE2, so mfence is guaranteed to exist. 97 #if defined(__x86_64__) || defined(__SSE2__)
98
99 // 64-bit implementations of memory barrier can be simpler, because it
100 // "mfence" is guaranteed to exist.
95 inline void MemoryBarrier() { 101 inline void MemoryBarrier() {
96 __asm__ __volatile__("mfence" : : : "memory"); 102 __asm__ __volatile__("mfence" : : : "memory");
97 } 103 }
98 104
99 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { 105 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
100 *ptr = value; 106 *ptr = value;
101 MemoryBarrier(); 107 MemoryBarrier();
102 } 108 }
103 109
110 #else
111
112 inline void MemoryBarrier() {
113 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
114 __asm__ __volatile__("mfence" : : : "memory");
115 } else { // mfence is faster but not present on PIII
116 Atomic32 x = 0;
117 NoBarrier_AtomicExchange(&x, 0); // acts as a barrier on PIII
118 }
119 }
120
121 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) {
122 if (AtomicOps_Internalx86CPUFeatures.has_sse2) {
123 *ptr = value;
124 __asm__ __volatile__("mfence" : : : "memory");
125 } else {
126 NoBarrier_AtomicExchange(ptr, value);
127 // acts as a barrier on PIII
128 }
129 }
130 #endif
131
104 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { 132 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) {
105 ATOMICOPS_COMPILER_BARRIER(); 133 ATOMICOPS_COMPILER_BARRIER();
106 *ptr = value; // An x86 store acts as a release barrier. 134 *ptr = value; // An x86 store acts as a release barrier.
107 // See comments in Atomic64 version of Release_Store(), below. 135 // See comments in Atomic64 version of Release_Store(), below.
108 } 136 }
109 137
110 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { 138 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) {
111 return *ptr; 139 return *ptr;
112 } 140 }
113 141
(...skipping 123 matching lines...) Expand 10 before | Expand all | Expand 10 after
237 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); 265 return NoBarrier_CompareAndSwap(ptr, old_value, new_value);
238 } 266 }
239 267
240 #endif // defined(__x86_64__) 268 #endif // defined(__x86_64__)
241 269
242 } } // namespace v8::base 270 } } // namespace v8::base
243 271
244 #undef ATOMICOPS_COMPILER_BARRIER 272 #undef ATOMICOPS_COMPILER_BARRIER
245 273
246 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_ 274 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_GCC_H_
OLDNEW
« no previous file with comments | « no previous file | src/base/atomicops_internals_x86_gcc.cc » ('j') | src/base/atomicops_internals_x86_gcc.cc » ('J')

Powered by Google App Engine
This is Rietveld 408576698