OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. | |
3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) | |
4 * | |
5 * Redistribution and use in source and binary forms, with or without | |
6 * modification, are permitted provided that the following conditions | |
7 * are met: | |
8 * | |
9 * 1. Redistributions of source code must retain the above copyright | |
10 * notice, this list of conditions and the following disclaimer. | |
11 * 2. Redistributions in binary form must reproduce the above copyright | |
12 * notice, this list of conditions and the following disclaimer in the | |
13 * documentation and/or other materials provided with the distribution. | |
14 * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of | |
15 * its contributors may be used to endorse or promote products derived | |
16 * from this software without specific prior written permission. | |
17 * | |
18 * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY | |
19 * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED | |
20 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE | |
21 * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY | |
22 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES | |
23 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; | |
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | |
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | |
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
28 * | |
29 * | |
30 * Note: The implementations of InterlockedIncrement and InterlockedDecrement ar
e based | |
31 * on atomic_increment and atomic_exchange_and_add from the Boost C++ Library. T
he license | |
32 * is virtually identical to the Apple license above but is included here for co
mpleteness. | |
33 * | |
34 * Boost Software License - Version 1.0 - August 17th, 2003 | |
35 * | |
36 * Permission is hereby granted, free of charge, to any person or organization | |
37 * obtaining a copy of the software and accompanying documentation covered by | |
38 * this license (the "Software") to use, reproduce, display, distribute, | |
39 * execute, and transmit the Software, and to prepare derivative works of the | |
40 * Software, and to permit third-parties to whom the Software is furnished to | |
41 * do so, all subject to the following: | |
42 * | |
43 * The copyright notices in the Software and this entire statement, including | |
44 * the above license grant, this restriction and the following disclaimer, | |
45 * must be included in all copies of the Software, in whole or in part, and | |
46 * all derivative works of the Software, unless such copies or derivative | |
47 * works are solely in the form of machine-executable object code generated by | |
48 * a source language processor. | |
49 * | |
50 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR | |
51 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, | |
52 * FITNESS FOR A PARTICULAR PURPOSE, TITLE AND NON-INFRINGEMENT. IN NO EVENT | |
53 * SHALL THE COPYRIGHT HOLDERS OR ANYONE DISTRIBUTING THE SOFTWARE BE LIABLE | |
54 * FOR ANY DAMAGES OR OTHER LIABILITY, WHETHER IN CONTRACT, TORT OR OTHERWISE, | |
55 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER | |
56 * DEALINGS IN THE SOFTWARE. | |
57 */ | |
58 | |
59 #ifndef Atomics_h | |
60 #define Atomics_h | |
61 | |
62 #include <wtf/Platform.h> | |
63 #include <wtf/StdLibExtras.h> | |
64 #include <wtf/UnusedParam.h> | |
65 | |
66 #if OS(WINDOWS) | |
67 #include <windows.h> | |
68 #elif OS(QNX) | |
69 #include <atomic.h> | |
70 #elif OS(ANDROID) | |
71 #include <sys/atomics.h> | |
72 #endif | |
73 | |
74 namespace WTF { | |
75 | |
76 #if OS(WINDOWS) | |
77 #define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1 | |
78 | |
79 #if OS(WINCE) | |
80 inline int atomicIncrement(int* addend) { return InterlockedIncrement(reinterpre
t_cast<long*>(addend)); } | |
81 inline int atomicDecrement(int* addend) { return InterlockedDecrement(reinterpre
t_cast<long*>(addend)); } | |
82 #elif COMPILER(MINGW) || COMPILER(MSVC7_OR_LOWER) | |
83 inline int atomicIncrement(int* addend) { return InterlockedIncrement(reinterpre
t_cast<long*>(addend)); } | |
84 inline int atomicDecrement(int* addend) { return InterlockedDecrement(reinterpre
t_cast<long*>(addend)); } | |
85 | |
86 inline int64_t atomicIncrement(int64_t* addend) { return InterlockedIncrement64(
reinterpret_cast<long long*>(addend)); } | |
87 inline int64_t atomicDecrement(int64_t* addend) { return InterlockedDecrement64(
reinterpret_cast<long long*>(addend)); } | |
88 #else | |
89 inline int atomicIncrement(int volatile* addend) { return InterlockedIncrement(r
einterpret_cast<long volatile*>(addend)); } | |
90 inline int atomicDecrement(int volatile* addend) { return InterlockedDecrement(r
einterpret_cast<long volatile*>(addend)); } | |
91 | |
92 inline int64_t atomicIncrement(int64_t volatile* addend) { return InterlockedInc
rement64(reinterpret_cast<long long volatile*>(addend)); } | |
93 inline int64_t atomicDecrement(int64_t volatile* addend) { return InterlockedDec
rement64(reinterpret_cast<long long volatile*>(addend)); } | |
94 #endif | |
95 | |
96 #elif OS(QNX) | |
97 #define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1 | |
98 | |
99 // Note, atomic_{add, sub}_value() return the previous value of addend's content
. | |
100 inline int atomicIncrement(int volatile* addend) { return static_cast<int>(atomi
c_add_value(reinterpret_cast<unsigned volatile*>(addend), 1)) + 1; } | |
101 inline int atomicDecrement(int volatile* addend) { return static_cast<int>(atomi
c_sub_value(reinterpret_cast<unsigned volatile*>(addend), 1)) - 1; } | |
102 | |
103 #elif OS(ANDROID) | |
104 #define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1 | |
105 | |
106 // Note, __atomic_{inc, dec}() return the previous value of addend's content. | |
107 inline int atomicIncrement(int volatile* addend) { return __atomic_inc(addend) +
1; } | |
108 inline int atomicDecrement(int volatile* addend) { return __atomic_dec(addend) -
1; } | |
109 | |
110 #elif COMPILER(GCC) && !CPU(SPARC64) // sizeof(_Atomic_word) != sizeof(int) on s
parc64 gcc | |
111 #define WTF_USE_LOCKFREE_THREADSAFEREFCOUNTED 1 | |
112 | |
113 inline int atomicIncrement(int volatile* addend) { return __sync_add_and_fetch(a
ddend, 1); } | |
114 inline int atomicDecrement(int volatile* addend) { return __sync_sub_and_fetch(a
ddend, 1); } | |
115 | |
116 inline int64_t atomicIncrement(int64_t volatile* addend) { return __sync_add_and
_fetch(addend, 1); } | |
117 inline int64_t atomicDecrement(int64_t volatile* addend) { return __sync_sub_and
_fetch(addend, 1); } | |
118 | |
119 #endif | |
120 | |
121 #if OS(WINDOWS) | |
122 inline bool weakCompareAndSwap(volatile unsigned* location, unsigned expected, u
nsigned newValue) | |
123 { | |
124 #if OS(WINCE) | |
125 return InterlockedCompareExchange(reinterpret_cast<LONG*>(const_cast<unsigne
d*>(location)), static_cast<LONG>(newValue), static_cast<LONG>(expected)) == sta
tic_cast<LONG>(expected); | |
126 #else | |
127 return InterlockedCompareExchange(reinterpret_cast<LONG volatile*>(location)
, static_cast<LONG>(newValue), static_cast<LONG>(expected)) == static_cast<LONG>
(expected); | |
128 #endif | |
129 } | |
130 | |
131 inline bool weakCompareAndSwap(void*volatile* location, void* expected, void* ne
wValue) | |
132 { | |
133 return InterlockedCompareExchangePointer(location, newValue, expected) == ex
pected; | |
134 } | |
135 #else // OS(WINDOWS) --> not windows | |
136 #if COMPILER(GCC) && !COMPILER(CLANG) // Work around a gcc bug | |
137 inline bool weakCompareAndSwap(volatile unsigned* location, unsigned expected, u
nsigned newValue) | |
138 #else | |
139 inline bool weakCompareAndSwap(unsigned* location, unsigned expected, unsigned n
ewValue) | |
140 #endif | |
141 { | |
142 #if ENABLE(COMPARE_AND_SWAP) | |
143 #if CPU(X86) || CPU(X86_64) | |
144 unsigned char result; | |
145 asm volatile( | |
146 "lock; cmpxchgl %3, %2\n\t" | |
147 "sete %1" | |
148 : "+a"(expected), "=q"(result), "+m"(*location) | |
149 : "r"(newValue) | |
150 : "memory" | |
151 ); | |
152 #elif CPU(ARM_THUMB2) | |
153 unsigned tmp; | |
154 unsigned result; | |
155 asm volatile( | |
156 "movw %1, #1\n\t" | |
157 "ldrex %2, %0\n\t" | |
158 "cmp %3, %2\n\t" | |
159 "bne.n 0f\n\t" | |
160 "strex %1, %4, %0\n\t" | |
161 "0:" | |
162 : "+Q"(*location), "=&r"(result), "=&r"(tmp) | |
163 : "r"(expected), "r"(newValue) | |
164 : "memory"); | |
165 result = !result; | |
166 #else | |
167 #error "Bad architecture for compare and swap." | |
168 #endif | |
169 return result; | |
170 #else | |
171 UNUSED_PARAM(location); | |
172 UNUSED_PARAM(expected); | |
173 UNUSED_PARAM(newValue); | |
174 CRASH(); | |
175 return false; | |
176 #endif | |
177 } | |
178 | |
179 inline bool weakCompareAndSwap(void*volatile* location, void* expected, void* ne
wValue) | |
180 { | |
181 #if ENABLE(COMPARE_AND_SWAP) | |
182 #if CPU(X86_64) | |
183 bool result; | |
184 asm volatile( | |
185 "lock; cmpxchgq %3, %2\n\t" | |
186 "sete %1" | |
187 : "+a"(expected), "=q"(result), "+m"(*location) | |
188 : "r"(newValue) | |
189 : "memory" | |
190 ); | |
191 return result; | |
192 #else | |
193 return weakCompareAndSwap(bitwise_cast<unsigned*>(location), bitwise_cast<un
signed>(expected), bitwise_cast<unsigned>(newValue)); | |
194 #endif | |
195 #else // ENABLE(COMPARE_AND_SWAP) | |
196 UNUSED_PARAM(location); | |
197 UNUSED_PARAM(expected); | |
198 UNUSED_PARAM(newValue); | |
199 CRASH(); | |
200 return 0; | |
201 #endif // ENABLE(COMPARE_AND_SWAP) | |
202 } | |
203 #endif // OS(WINDOWS) (end of the not-windows case) | |
204 | |
205 inline bool weakCompareAndSwapUIntPtr(volatile uintptr_t* location, uintptr_t ex
pected, uintptr_t newValue) | |
206 { | |
207 return weakCompareAndSwap(reinterpret_cast<void*volatile*>(location), reinte
rpret_cast<void*>(expected), reinterpret_cast<void*>(newValue)); | |
208 } | |
209 | |
210 #if CPU(ARM_THUMB2) | |
211 | |
212 inline void memoryBarrierAfterLock() | |
213 { | |
214 asm volatile("dmb" ::: "memory"); | |
215 } | |
216 | |
217 inline void memoryBarrierBeforeUnlock() | |
218 { | |
219 asm volatile("dmb" ::: "memory"); | |
220 } | |
221 | |
222 #else | |
223 | |
224 inline void memoryBarrierAfterLock() { } | |
225 inline void memoryBarrierBeforeUnlock() { } | |
226 | |
227 #endif | |
228 | |
229 } // namespace WTF | |
230 | |
231 #if USE(LOCKFREE_THREADSAFEREFCOUNTED) | |
232 using WTF::atomicDecrement; | |
233 using WTF::atomicIncrement; | |
234 #endif | |
235 | |
236 #endif // Atomics_h | |
OLD | NEW |