OLD | NEW |
| (Empty) |
1 // Copyright (c) 2006-2008 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 // This file is an internal atomic implementation, use base/atomicops.h instead. | |
6 | |
7 #ifndef BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | |
8 #define BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | |
9 | |
10 #include <windows.h> | |
11 | |
12 #include <intrin.h> | |
13 | |
14 #include "base/macros.h" | |
15 | |
16 #if defined(ARCH_CPU_64_BITS) | |
17 // windows.h #defines this (only on x64). This causes problems because the | |
18 // public API also uses MemoryBarrier at the public name for this fence. So, on | |
19 // X64, undef it, and call its documented | |
20 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) | |
21 // implementation directly. | |
22 #undef MemoryBarrier | |
23 #endif | |
24 | |
25 namespace base { | |
26 namespace subtle { | |
27 | |
28 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | |
29 Atomic32 old_value, | |
30 Atomic32 new_value) { | |
31 LONG result = _InterlockedCompareExchange( | |
32 reinterpret_cast<volatile LONG*>(ptr), | |
33 static_cast<LONG>(new_value), | |
34 static_cast<LONG>(old_value)); | |
35 return static_cast<Atomic32>(result); | |
36 } | |
37 | |
38 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | |
39 Atomic32 new_value) { | |
40 LONG result = _InterlockedExchange( | |
41 reinterpret_cast<volatile LONG*>(ptr), | |
42 static_cast<LONG>(new_value)); | |
43 return static_cast<Atomic32>(result); | |
44 } | |
45 | |
46 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
47 Atomic32 increment) { | |
48 return _InterlockedExchangeAdd( | |
49 reinterpret_cast<volatile LONG*>(ptr), | |
50 static_cast<LONG>(increment)) + increment; | |
51 } | |
52 | |
53 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | |
54 Atomic32 increment) { | |
55 return Barrier_AtomicIncrement(ptr, increment); | |
56 } | |
57 | |
58 inline void MemoryBarrier() { | |
59 #if defined(ARCH_CPU_64_BITS) | |
60 // See #undef and note at the top of this file. | |
61 __faststorefence(); | |
62 #else | |
63 // We use MemoryBarrier from WinNT.h | |
64 ::MemoryBarrier(); | |
65 #endif | |
66 } | |
67 | |
68 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | |
69 Atomic32 old_value, | |
70 Atomic32 new_value) { | |
71 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
72 } | |
73 | |
74 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | |
75 Atomic32 old_value, | |
76 Atomic32 new_value) { | |
77 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
78 } | |
79 | |
80 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | |
81 *ptr = value; | |
82 } | |
83 | |
84 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | |
85 NoBarrier_AtomicExchange(ptr, value); | |
86 // acts as a barrier in this implementation | |
87 } | |
88 | |
89 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | |
90 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | |
91 // See comments in Atomic64 version of Release_Store() below. | |
92 } | |
93 | |
94 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | |
95 return *ptr; | |
96 } | |
97 | |
98 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | |
99 Atomic32 value = *ptr; | |
100 return value; | |
101 } | |
102 | |
103 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | |
104 MemoryBarrier(); | |
105 return *ptr; | |
106 } | |
107 | |
108 #if defined(_WIN64) | |
109 | |
110 // 64-bit low-level operations on 64-bit platform. | |
111 | |
112 COMPILE_ASSERT(sizeof(Atomic64) == sizeof(PVOID), atomic_word_is_atomic); | |
113 | |
114 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | |
115 Atomic64 old_value, | |
116 Atomic64 new_value) { | |
117 PVOID result = InterlockedCompareExchangePointer( | |
118 reinterpret_cast<volatile PVOID*>(ptr), | |
119 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | |
120 return reinterpret_cast<Atomic64>(result); | |
121 } | |
122 | |
123 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | |
124 Atomic64 new_value) { | |
125 PVOID result = InterlockedExchangePointer( | |
126 reinterpret_cast<volatile PVOID*>(ptr), | |
127 reinterpret_cast<PVOID>(new_value)); | |
128 return reinterpret_cast<Atomic64>(result); | |
129 } | |
130 | |
131 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
132 Atomic64 increment) { | |
133 return InterlockedExchangeAdd64( | |
134 reinterpret_cast<volatile LONGLONG*>(ptr), | |
135 static_cast<LONGLONG>(increment)) + increment; | |
136 } | |
137 | |
138 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | |
139 Atomic64 increment) { | |
140 return Barrier_AtomicIncrement(ptr, increment); | |
141 } | |
142 | |
143 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | |
144 *ptr = value; | |
145 } | |
146 | |
147 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | |
148 NoBarrier_AtomicExchange(ptr, value); | |
149 // acts as a barrier in this implementation | |
150 } | |
151 | |
152 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | |
153 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | |
154 | |
155 // When new chips come out, check: | |
156 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: | |
157 // System Programming Guide, Chatper 7: Multiple-processor management, | |
158 // Section 7.2, Memory Ordering. | |
159 // Last seen at: | |
160 // http://developer.intel.com/design/pentium4/manuals/index_new.htm | |
161 } | |
162 | |
163 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | |
164 return *ptr; | |
165 } | |
166 | |
167 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | |
168 Atomic64 value = *ptr; | |
169 return value; | |
170 } | |
171 | |
172 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | |
173 MemoryBarrier(); | |
174 return *ptr; | |
175 } | |
176 | |
177 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
178 Atomic64 old_value, | |
179 Atomic64 new_value) { | |
180 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
181 } | |
182 | |
183 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
184 Atomic64 old_value, | |
185 Atomic64 new_value) { | |
186 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
187 } | |
188 | |
189 | |
190 #endif // defined(_WIN64) | |
191 | |
192 } // namespace subtle | |
193 } // namespace base | |
194 | |
195 #endif // BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | |
OLD | NEW |