OLD | NEW |
---|---|
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
danno
2014/07/29 13:24:07
Please use the new compact header.
andrew_low
2014/07/30 13:27:04
Acknowledged.
| |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Redistribution and use in source and binary forms, with or without |
3 // found in the LICENSE file. | 3 // modification, are permitted provided that the following conditions are |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
4 | 27 |
5 // This file is an internal atomic implementation, use atomicops.h instead. | 28 // This file is an internal atomic implementation, use atomicops.h instead. |
29 // | |
6 | 30 |
7 #ifndef V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 31 #ifndef V8_BASE_ATOMICOPS_INTERNALS_PPC_H_ |
8 #define V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 32 #define V8_BASE_ATOMICOPS_INTERNALS_PPC_H_ |
9 | |
10 #include "src/base/macros.h" | |
11 #include "src/base/win32-headers.h" | |
12 | |
13 #if defined(V8_HOST_ARCH_64_BIT) | |
14 // windows.h #defines this (only on x64). This causes problems because the | |
15 // public API also uses MemoryBarrier at the public name for this fence. So, on | |
16 // X64, undef it, and call its documented | |
17 // (http://msdn.microsoft.com/en-us/library/windows/desktop/ms684208.aspx) | |
18 // implementation directly. | |
19 #undef MemoryBarrier | |
20 #endif | |
21 | 33 |
22 namespace v8 { | 34 namespace v8 { |
23 namespace base { | 35 namespace base { |
24 | 36 |
25 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, | 37 inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, |
26 Atomic32 old_value, | 38 Atomic32 old_value, |
27 Atomic32 new_value) { | 39 Atomic32 new_value) { |
28 LONG result = InterlockedCompareExchange( | 40 return(__sync_val_compare_and_swap( ptr, old_value, new_value)); |
29 reinterpret_cast<volatile LONG*>(ptr), | |
30 static_cast<LONG>(new_value), | |
31 static_cast<LONG>(old_value)); | |
32 return static_cast<Atomic32>(result); | |
33 } | 41 } |
34 | 42 |
35 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, | 43 inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, |
36 Atomic32 new_value) { | 44 Atomic32 new_value) { |
37 LONG result = InterlockedExchange( | 45 Atomic32 old_value; |
38 reinterpret_cast<volatile LONG*>(ptr), | 46 do { |
39 static_cast<LONG>(new_value)); | 47 old_value = *ptr; |
40 return static_cast<Atomic32>(result); | 48 } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false); |
41 } | 49 return old_value; |
42 | |
43 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, | |
44 Atomic32 increment) { | |
45 return InterlockedExchangeAdd( | |
46 reinterpret_cast<volatile LONG*>(ptr), | |
47 static_cast<LONG>(increment)) + increment; | |
48 } | 50 } |
49 | 51 |
50 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, | 52 inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, |
51 Atomic32 increment) { | 53 Atomic32 increment) { |
52 return Barrier_AtomicIncrement(ptr, increment); | 54 return Barrier_AtomicIncrement(ptr, increment); |
53 } | 55 } |
54 | 56 |
55 #if !(defined(_MSC_VER) && _MSC_VER >= 1400) | 57 inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, |
56 #error "We require at least vs2005 for MemoryBarrier" | 58 Atomic32 increment) { |
57 #endif | 59 for (;;) { |
58 inline void MemoryBarrier() { | 60 Atomic32 old_value = *ptr; |
59 #if defined(V8_HOST_ARCH_64_BIT) | 61 Atomic32 new_value = old_value + increment; |
60 // See #undef and note at the top of this file. | 62 if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { |
61 __faststorefence(); | 63 return new_value; |
62 #else | 64 // The exchange took place as expected. |
63 // We use MemoryBarrier from WinNT.h | 65 } |
64 ::MemoryBarrier(); | 66 // Otherwise, *ptr changed mid-loop and we need to retry. |
65 #endif | 67 } |
66 } | 68 } |
67 | 69 |
68 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, | 70 inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, |
69 Atomic32 old_value, | 71 Atomic32 old_value, |
70 Atomic32 new_value) { | 72 Atomic32 new_value) { |
71 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 73 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
72 } | 74 } |
73 | 75 |
74 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, | 76 inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, |
75 Atomic32 old_value, | 77 Atomic32 old_value, |
76 Atomic32 new_value) { | 78 Atomic32 new_value) { |
77 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | 79 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); |
78 } | 80 } |
79 | 81 |
80 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { | 82 inline void NoBarrier_Store(volatile Atomic8* ptr, Atomic8 value) { |
81 *ptr = value; | 83 *ptr = value; |
82 } | 84 } |
83 | 85 |
84 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { | 86 inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { |
85 *ptr = value; | 87 *ptr = value; |
86 } | 88 } |
87 | 89 |
90 inline void MemoryBarrier() { | |
91 __asm__ __volatile__("sync" : : : "memory"); | |
92 } | |
93 | |
88 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { | 94 inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { |
89 NoBarrier_AtomicExchange(ptr, value); | 95 *ptr = value; |
90 // acts as a barrier in this implementation | 96 MemoryBarrier(); |
91 } | 97 } |
92 | 98 |
93 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { | 99 inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { |
94 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | 100 MemoryBarrier(); |
95 // See comments in Atomic64 version of Release_Store() below. | 101 *ptr = value; |
96 } | 102 } |
97 | 103 |
98 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { | 104 inline Atomic8 NoBarrier_Load(volatile const Atomic8* ptr) { |
99 return *ptr; | 105 return *ptr; |
100 } | 106 } |
101 | 107 |
102 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { | 108 inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { |
103 return *ptr; | 109 return *ptr; |
104 } | 110 } |
105 | 111 |
106 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { | 112 inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { |
107 Atomic32 value = *ptr; | 113 Atomic32 value = *ptr; |
114 MemoryBarrier(); | |
108 return value; | 115 return value; |
109 } | 116 } |
110 | 117 |
111 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { | 118 inline Atomic32 Release_Load(volatile const Atomic32* ptr) { |
112 MemoryBarrier(); | 119 MemoryBarrier(); |
113 return *ptr; | 120 return *ptr; |
114 } | 121 } |
115 | 122 |
116 #if defined(_WIN64) | 123 #ifdef V8_TARGET_ARCH_PPC64 |
117 | |
118 // 64-bit low-level operations on 64-bit platform. | |
119 | |
120 STATIC_ASSERT(sizeof(Atomic64) == sizeof(PVOID)); | |
121 | |
122 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, | 124 inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, |
123 Atomic64 old_value, | 125 Atomic64 old_value, |
124 Atomic64 new_value) { | 126 Atomic64 new_value) { |
125 PVOID result = InterlockedCompareExchangePointer( | 127 return(__sync_val_compare_and_swap( ptr, old_value, new_value)); |
126 reinterpret_cast<volatile PVOID*>(ptr), | |
127 reinterpret_cast<PVOID>(new_value), reinterpret_cast<PVOID>(old_value)); | |
128 return reinterpret_cast<Atomic64>(result); | |
129 } | 128 } |
130 | 129 |
131 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, | 130 inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, |
132 Atomic64 new_value) { | 131 Atomic64 new_value) { |
133 PVOID result = InterlockedExchangePointer( | 132 Atomic64 old_value; |
134 reinterpret_cast<volatile PVOID*>(ptr), | 133 do { |
135 reinterpret_cast<PVOID>(new_value)); | 134 old_value = *ptr; |
136 return reinterpret_cast<Atomic64>(result); | 135 } while (__sync_bool_compare_and_swap(ptr, old_value, new_value) == false); |
137 } | 136 return old_value; |
138 | |
139 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
140 Atomic64 increment) { | |
141 return InterlockedExchangeAdd64( | |
142 reinterpret_cast<volatile LONGLONG*>(ptr), | |
143 static_cast<LONGLONG>(increment)) + increment; | |
144 } | 137 } |
145 | 138 |
146 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, | 139 inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, |
147 Atomic64 increment) { | 140 Atomic64 increment) { |
148 return Barrier_AtomicIncrement(ptr, increment); | 141 return Barrier_AtomicIncrement(ptr, increment); |
149 } | 142 } |
150 | 143 |
144 inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, | |
145 Atomic64 increment) { | |
146 for (;;) { | |
147 Atomic64 old_value = *ptr; | |
148 Atomic64 new_value = old_value + increment; | |
149 if (__sync_bool_compare_and_swap(ptr, old_value, new_value)) { | |
150 return new_value; | |
151 // The exchange took place as expected. | |
152 } | |
153 // Otherwise, *ptr changed mid-loop and we need to retry. | |
154 } | |
155 } | |
156 | |
157 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | |
158 Atomic64 old_value, | |
159 Atomic64 new_value) { | |
160 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
161 } | |
162 | |
163 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
164 Atomic64 old_value, | |
165 Atomic64 new_value) { | |
166 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
167 } | |
168 | |
151 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { | 169 inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { |
152 *ptr = value; | 170 *ptr = value; |
153 } | 171 } |
154 | 172 |
155 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { | 173 inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { |
156 NoBarrier_AtomicExchange(ptr, value); | 174 *ptr = value; |
157 // acts as a barrier in this implementation | 175 MemoryBarrier(); |
158 } | 176 } |
159 | 177 |
160 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { | 178 inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { |
161 *ptr = value; // works w/o barrier for current Intel chips as of June 2005 | 179 MemoryBarrier(); |
162 | 180 *ptr = value; |
163 // When new chips come out, check: | |
164 // IA-32 Intel Architecture Software Developer's Manual, Volume 3: | |
165 // System Programming Guide, Chatper 7: Multiple-processor management, | |
166 // Section 7.2, Memory Ordering. | |
167 // Last seen at: | |
168 // http://developer.intel.com/design/pentium4/manuals/index_new.htm | |
169 } | 181 } |
170 | 182 |
171 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { | 183 inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { |
172 return *ptr; | 184 return *ptr; |
173 } | 185 } |
174 | 186 |
175 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { | 187 inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { |
176 Atomic64 value = *ptr; | 188 Atomic64 value = *ptr; |
189 MemoryBarrier(); | |
177 return value; | 190 return value; |
178 } | 191 } |
179 | 192 |
180 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { | 193 inline Atomic64 Release_Load(volatile const Atomic64* ptr) { |
181 MemoryBarrier(); | 194 MemoryBarrier(); |
182 return *ptr; | 195 return *ptr; |
183 } | 196 } |
184 | 197 |
185 inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, | 198 #endif |
186 Atomic64 old_value, | |
187 Atomic64 new_value) { | |
188 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
189 } | |
190 | |
191 inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, | |
192 Atomic64 old_value, | |
193 Atomic64 new_value) { | |
194 return NoBarrier_CompareAndSwap(ptr, old_value, new_value); | |
195 } | |
196 | |
197 | |
198 #endif // defined(_WIN64) | |
199 | 199 |
200 } } // namespace v8::base | 200 } } // namespace v8::base |
201 | 201 |
202 #endif // V8_BASE_ATOMICOPS_INTERNALS_X86_MSVC_H_ | 202 #endif // V8_BASE_ATOMICOPS_INTERNALS_PPC_GCC_H_ |
OLD | NEW |