OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. | 2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. |
3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) | 3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) |
4 * | 4 * |
5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
7 * are met: | 7 * are met: |
8 * | 8 * |
9 * 1. Redistributions of source code must retain the above copyright | 9 * 1. Redistributions of source code must retain the above copyright |
10 * notice, this list of conditions and the following disclaimer. | 10 * notice, this list of conditions and the following disclaimer. |
(...skipping 13 matching lines...) Expand all Loading... | |
24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND | 24 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND |
25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 25 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF | 26 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF |
27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 27 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
28 */ | 28 */ |
29 | 29 |
30 #ifndef Atomics_h | 30 #ifndef Atomics_h |
31 #define Atomics_h | 31 #define Atomics_h |
32 | 32 |
33 #include "wtf/Assertions.h" | 33 #include "wtf/Assertions.h" |
34 #include "wtf/CPU.h" | |
34 | 35 |
35 #include <stdint.h> | 36 #include <stdint.h> |
36 | 37 |
37 #if COMPILER(MSVC) | 38 #if COMPILER(MSVC) |
38 #include <windows.h> | 39 #include <windows.h> |
39 #endif | 40 #endif |
40 | 41 |
42 #if defined(THREAD_SANITIZER) | |
43 #include <sanitizer/tsan_interface_atomic.h> | |
44 #endif | |
45 | |
41 namespace WTF { | 46 namespace WTF { |
42 | 47 |
43 #if COMPILER(MSVC) | 48 #if COMPILER(MSVC) |
44 | 49 |
45 // atomicAdd returns the result of the addition. | 50 // atomicAdd returns the result of the addition. |
46 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) | 51 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) |
47 { | 52 { |
48 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), stat ic_cast<long>(increment)) + increment; | 53 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), stat ic_cast<long>(increment)) + increment; |
49 } | 54 } |
50 | 55 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
91 int ret = __sync_lock_test_and_set(ptr, 1); | 96 int ret = __sync_lock_test_and_set(ptr, 1); |
92 ASSERT(!ret || ret == 1); | 97 ASSERT(!ret || ret == 1); |
93 return ret; | 98 return ret; |
94 } | 99 } |
95 | 100 |
96 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) | 101 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) |
97 { | 102 { |
98 ASSERT(*ptr == 1); | 103 ASSERT(*ptr == 1); |
99 __sync_lock_release(ptr); | 104 __sync_lock_release(ptr); |
100 } | 105 } |
106 #endif | |
107 | |
108 #if defined(THREAD_SANITIZER) | |
109 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) | |
110 { | |
111 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); | |
112 } | |
113 | |
114 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) | |
115 { | |
116 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); | |
117 } | |
118 #else | |
119 | |
120 #if CPU(X86) || CPU(X86_64) | |
121 // Only compiler barrier is needed. | |
122 #if COMPILER(MSVC) | |
123 // Starting from Visual Studio 2005 compiler guarantees acquire and release | |
124 // semantics for operations on volatile variables. See MSDN entry for | |
125 // MemoryBarrier macro. | |
126 #define MEMORY_BARRIER() | |
127 #else | |
128 #define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory") | |
129 #endif | |
130 #elif CPU(ARM) && (OS(LINUX) || OS(ANDROID)) | |
131 // On ARM __sync_synchronize generates dmb which is very expensive on single | |
132 // core devices which don't actually need it. Avoid the cost by calling into | |
133 // kuser_memory_barrier helper. | |
134 inline void memoryBarrier() | |
135 { | |
136 // Note: This is a function call, which is also an implicit compiler barrier . | |
137 typedef void (*KernelMemoryBarrierFunc)(); | |
138 ((KernelMemoryBarrierFunc)0xffff0fa0)(); | |
139 } | |
140 #define MEMORY_BARRIER() memoryBarrier() | |
141 #else | |
142 // Fallback to the compiler intrinsic on all other platforms. | |
143 #define MEMORY_BARRIER() __sync_synchronize() | |
144 #endif | |
145 | |
146 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) | |
147 { | |
148 MEMORY_BARRIER(); | |
wibling-chromium
2014/03/20 08:05:31
I don't quite understand this. Wouldn't you normal
Dmitry Vyukov
2014/03/20 09:27:49
No
| |
149 *ptr = value; | |
150 } | |
151 | |
152 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) | |
153 { | |
154 int value = *ptr; | |
155 MEMORY_BARRIER(); | |
156 return value; | |
157 } | |
158 | |
159 #undef MEMORY_BARRIER | |
101 | 160 |
102 #endif | 161 #endif |
103 | 162 |
104 } // namespace WTF | 163 } // namespace WTF |
105 | 164 |
106 using WTF::atomicAdd; | 165 using WTF::atomicAdd; |
107 using WTF::atomicSubtract; | 166 using WTF::atomicSubtract; |
108 using WTF::atomicDecrement; | 167 using WTF::atomicDecrement; |
109 using WTF::atomicIncrement; | 168 using WTF::atomicIncrement; |
110 using WTF::atomicTestAndSetToOne; | 169 using WTF::atomicTestAndSetToOne; |
111 using WTF::atomicSetOneToZero; | 170 using WTF::atomicSetOneToZero; |
171 using WTF::acquireLoad; | |
172 using WTF::releaseStore; | |
112 | 173 |
113 #endif // Atomics_h | 174 #endif // Atomics_h |
OLD | NEW |