| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. | 2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. |
| 3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) | 3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) |
| 4 * | 4 * |
| 5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
| 6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
| 7 * are met: | 7 * are met: |
| 8 * | 8 * |
| 9 * 1. Redistributions of source code must retain the above copyright | 9 * 1. Redistributions of source code must retain the above copyright |
| 10 * notice, this list of conditions and the following disclaimer. | 10 * notice, this list of conditions and the following disclaimer. |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 46 | 46 |
| 47 #if defined(ADDRESS_SANITIZER) | 47 #if defined(ADDRESS_SANITIZER) |
| 48 #include <sanitizer/asan_interface.h> | 48 #include <sanitizer/asan_interface.h> |
| 49 #endif | 49 #endif |
| 50 | 50 |
| 51 namespace WTF { | 51 namespace WTF { |
| 52 | 52 |
| 53 #if COMPILER(MSVC) | 53 #if COMPILER(MSVC) |
| 54 | 54 |
| 55 // atomicAdd returns the result of the addition. | 55 // atomicAdd returns the result of the addition. |
| 56 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) | 56 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { |
| 57 { | 57 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), |
| 58 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), stat
ic_cast<long>(increment)) + increment; | 58 static_cast<long>(increment)) + |
| 59 increment; |
| 59 } | 60 } |
| 60 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment) | 61 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, |
| 61 { | 62 unsigned increment) { |
| 62 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), stat
ic_cast<long>(increment)) + increment; | 63 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), |
| 64 static_cast<long>(increment)) + |
| 65 increment; |
| 63 } | 66 } |
| 64 #if defined(_WIN64) | 67 #if defined(_WIN64) |
| 65 ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend,
unsigned long long increment) | 68 ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend, |
| 66 { | 69 unsigned long long increment) { |
| 67 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend
), static_cast<long long>(increment)) + increment; | 70 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend), |
| 71 static_cast<long long>(increment)) + |
| 72 increment; |
| 68 } | 73 } |
| 69 #endif | 74 #endif |
| 70 | 75 |
| 71 // atomicSubtract returns the result of the subtraction. | 76 // atomicSubtract returns the result of the subtraction. |
| 72 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) | 77 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { |
| 73 { | 78 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), |
| 74 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), stat
ic_cast<long>(-decrement)) - decrement; | 79 static_cast<long>(-decrement)) - |
| 80 decrement; |
| 75 } | 81 } |
| 76 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrem
ent) | 82 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, |
| 77 { | 83 unsigned decrement) { |
| 78 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), -sta
tic_cast<long>(decrement)) - decrement; | 84 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), |
| 85 -static_cast<long>(decrement)) - |
| 86 decrement; |
| 79 } | 87 } |
| 80 #if defined(_WIN64) | 88 #if defined(_WIN64) |
| 81 ALWAYS_INLINE unsigned long long atomicSubtract(unsigned long long volatile* add
end, unsigned long long decrement) | 89 ALWAYS_INLINE unsigned long long atomicSubtract( |
| 82 { | 90 unsigned long long volatile* addend, |
| 83 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend
), -static_cast<long long>(decrement)) - decrement; | 91 unsigned long long decrement) { |
| 92 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend), |
| 93 -static_cast<long long>(decrement)) - |
| 94 decrement; |
| 84 } | 95 } |
| 85 #endif | 96 #endif |
| 86 | 97 |
| 87 ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return InterlockedIncr
ement(reinterpret_cast<long volatile*>(addend)); } | 98 ALWAYS_INLINE int atomicIncrement(int volatile* addend) { |
| 88 ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return InterlockedDecr
ement(reinterpret_cast<long volatile*>(addend)); } | 99 return InterlockedIncrement(reinterpret_cast<long volatile*>(addend)); |
| 89 | 100 } |
| 90 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return Interlo
ckedIncrement64(reinterpret_cast<long long volatile*>(addend)); } | 101 ALWAYS_INLINE int atomicDecrement(int volatile* addend) { |
| 91 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return Interlo
ckedDecrement64(reinterpret_cast<long long volatile*>(addend)); } | 102 return InterlockedDecrement(reinterpret_cast<long volatile*>(addend)); |
| 92 | |
| 93 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) | |
| 94 { | |
| 95 int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1); | |
| 96 ASSERT(!ret || ret == 1); | |
| 97 return ret; | |
| 98 } | 103 } |
| 99 | 104 |
| 100 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) | 105 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { |
| 101 { | 106 return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend)); |
| 102 ASSERT(*ptr == 1); | 107 } |
| 103 InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0); | 108 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { |
| 109 return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend)); |
| 110 } |
| 111 |
| 112 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) { |
| 113 int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1); |
| 114 ASSERT(!ret || ret == 1); |
| 115 return ret; |
| 116 } |
| 117 |
| 118 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) { |
| 119 ASSERT(*ptr == 1); |
| 120 InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0); |
| 104 } | 121 } |
| 105 | 122 |
| 106 #else | 123 #else |
| 107 | 124 |
| 108 // atomicAdd returns the result of the addition. | 125 // atomicAdd returns the result of the addition. |
| 109 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { return __sync
_add_and_fetch(addend, increment); } | 126 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { |
| 110 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment)
{ return __sync_add_and_fetch(addend, increment); } | 127 return __sync_add_and_fetch(addend, increment); |
| 111 ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend, unsigned l
ong increment) { return __sync_add_and_fetch(addend, increment); } | 128 } |
| 129 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, |
| 130 unsigned increment) { |
| 131 return __sync_add_and_fetch(addend, increment); |
| 132 } |
| 133 ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend, |
| 134 unsigned long increment) { |
| 135 return __sync_add_and_fetch(addend, increment); |
| 136 } |
| 112 // atomicSubtract returns the result of the subtraction. | 137 // atomicSubtract returns the result of the subtraction. |
| 113 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { return _
_sync_sub_and_fetch(addend, decrement); } | 138 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { |
| 114 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrem
ent) { return __sync_sub_and_fetch(addend, decrement); } | 139 return __sync_sub_and_fetch(addend, decrement); |
| 115 ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend, unsig
ned long decrement) { return __sync_sub_and_fetch(addend, decrement); } | 140 } |
| 116 | 141 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, |
| 117 ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return atomicAdd(adden
d, 1); } | 142 unsigned decrement) { |
| 118 ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return atomicSubtract(
addend, 1); } | 143 return __sync_sub_and_fetch(addend, decrement); |
| 119 | 144 } |
| 120 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return __sync_
add_and_fetch(addend, 1); } | 145 ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend, |
| 121 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return __sync_
sub_and_fetch(addend, 1); } | 146 unsigned long decrement) { |
| 122 | 147 return __sync_sub_and_fetch(addend, decrement); |
| 123 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) | |
| 124 { | |
| 125 int ret = __sync_lock_test_and_set(ptr, 1); | |
| 126 ASSERT(!ret || ret == 1); | |
| 127 return ret; | |
| 128 } | 148 } |
| 129 | 149 |
| 130 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) | 150 ALWAYS_INLINE int atomicIncrement(int volatile* addend) { |
| 131 { | 151 return atomicAdd(addend, 1); |
| 132 ASSERT(*ptr == 1); | 152 } |
| 133 __sync_lock_release(ptr); | 153 ALWAYS_INLINE int atomicDecrement(int volatile* addend) { |
| 154 return atomicSubtract(addend, 1); |
| 155 } |
| 156 |
| 157 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { |
| 158 return __sync_add_and_fetch(addend, 1); |
| 159 } |
| 160 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { |
| 161 return __sync_sub_and_fetch(addend, 1); |
| 162 } |
| 163 |
| 164 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) { |
| 165 int ret = __sync_lock_test_and_set(ptr, 1); |
| 166 ASSERT(!ret || ret == 1); |
| 167 return ret; |
| 168 } |
| 169 |
| 170 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) { |
| 171 ASSERT(*ptr == 1); |
| 172 __sync_lock_release(ptr); |
| 134 } | 173 } |
| 135 #endif | 174 #endif |
| 136 | 175 |
| 137 #if defined(THREAD_SANITIZER) | 176 #if defined(THREAD_SANITIZER) |
| 138 // The definitions below assume an LP64 data model. This is fine because | 177 // The definitions below assume an LP64 data model. This is fine because |
| 139 // TSan is only supported on x86_64 Linux. | 178 // TSan is only supported on x86_64 Linux. |
| 140 #if CPU(64BIT) && OS(LINUX) | 179 #if CPU(64BIT) && OS(LINUX) |
| 141 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) | 180 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) { |
| 142 { | 181 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); |
| 143 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); | |
| 144 } | 182 } |
| 145 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) | 183 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) { |
| 146 { | 184 __tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr), |
| 147 __tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr), static_cast<int>
(value), __tsan_memory_order_release); | 185 static_cast<int>(value), __tsan_memory_order_release); |
| 148 } | 186 } |
| 149 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) | 187 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) { |
| 150 { | 188 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), |
| 151 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), stat
ic_cast<__tsan_atomic64>(value), __tsan_memory_order_release); | 189 static_cast<__tsan_atomic64>(value), |
| 190 __tsan_memory_order_release); |
| 152 } | 191 } |
| 153 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value
) | 192 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, |
| 154 { | 193 unsigned long value) { |
| 155 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), stat
ic_cast<__tsan_atomic64>(value), __tsan_memory_order_release); | 194 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), |
| 195 static_cast<__tsan_atomic64>(value), |
| 196 __tsan_memory_order_release); |
| 156 } | 197 } |
| 157 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long
long value) | 198 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, |
| 158 { | 199 unsigned long long value) { |
| 159 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), stat
ic_cast<__tsan_atomic64>(value), __tsan_memory_order_release); | 200 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), |
| 201 static_cast<__tsan_atomic64>(value), |
| 202 __tsan_memory_order_release); |
| 160 } | 203 } |
| 161 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) | 204 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) { |
| 162 { | 205 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), |
| 163 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), rein
terpret_cast<__tsan_atomic64>(value), __tsan_memory_order_release); | 206 reinterpret_cast<__tsan_atomic64>(value), |
| 207 __tsan_memory_order_release); |
| 164 } | 208 } |
| 165 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) | 209 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) { |
| 166 { | 210 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); |
| 167 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); | |
| 168 } | 211 } |
| 169 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) | 212 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) { |
| 170 { | 213 return static_cast<unsigned>(__tsan_atomic32_load( |
| 171 return static_cast<unsigned>(__tsan_atomic32_load(reinterpret_cast<volatile
const int*>(ptr), __tsan_memory_order_acquire)); | 214 reinterpret_cast<volatile const int*>(ptr), __tsan_memory_order_acquire)); |
| 172 } | 215 } |
| 173 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) | 216 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) { |
| 174 { | 217 return static_cast<long>(__tsan_atomic64_load( |
| 175 return static_cast<long>(__tsan_atomic64_load(reinterpret_cast<volatile cons
t __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); | 218 reinterpret_cast<volatile const __tsan_atomic64*>(ptr), |
| 219 __tsan_memory_order_acquire)); |
| 176 } | 220 } |
| 177 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) | 221 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) { |
| 178 { | 222 return static_cast<unsigned long>(__tsan_atomic64_load( |
| 179 return static_cast<unsigned long>(__tsan_atomic64_load(reinterpret_cast<vola
tile const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); | 223 reinterpret_cast<volatile const __tsan_atomic64*>(ptr), |
| 224 __tsan_memory_order_acquire)); |
| 180 } | 225 } |
| 181 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) | 226 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) { |
| 182 { | 227 return reinterpret_cast<void*>(__tsan_atomic64_load( |
| 183 return reinterpret_cast<void*>(__tsan_atomic64_load(reinterpret_cast<volatil
e const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); | 228 reinterpret_cast<volatile const __tsan_atomic64*>(ptr), |
| 229 __tsan_memory_order_acquire)); |
| 184 } | 230 } |
| 185 | 231 |
| 186 // Do not use noBarrierStore/noBarrierLoad for synchronization. | 232 // Do not use noBarrierStore/noBarrierLoad for synchronization. |
| 187 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) | 233 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) { |
| 188 { | 234 static_assert(sizeof(int) == sizeof(float), |
| 189 static_assert(sizeof(int) == sizeof(float), "int and float are different siz
es"); | 235 "int and float are different sizes"); |
| 190 union { | 236 union { |
| 191 int ivalue; | 237 int ivalue; |
| 192 float fvalue; | 238 float fvalue; |
| 193 } u; | 239 } u; |
| 194 u.fvalue = value; | 240 u.fvalue = value; |
| 195 __tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr), u.iv
alue, __tsan_memory_order_relaxed); | 241 __tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr), |
| 242 u.ivalue, __tsan_memory_order_relaxed); |
| 196 } | 243 } |
| 197 | 244 |
| 198 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) | 245 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) { |
| 199 { | 246 static_assert(sizeof(int) == sizeof(float), |
| 200 static_assert(sizeof(int) == sizeof(float), "int and float are different siz
es"); | 247 "int and float are different sizes"); |
| 201 union { | 248 union { |
| 202 int ivalue; | 249 int ivalue; |
| 203 float fvalue; | 250 float fvalue; |
| 204 } u; | 251 } u; |
| 205 u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr),
__tsan_memory_order_relaxed); | 252 u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr), |
| 206 return u.fvalue; | 253 __tsan_memory_order_relaxed); |
| 254 return u.fvalue; |
| 207 } | 255 } |
| 208 #endif | 256 #endif |
| 209 | 257 |
| 210 #else // defined(THREAD_SANITIZER) | 258 #else // defined(THREAD_SANITIZER) |
| 211 | 259 |
| 212 #if CPU(X86) || CPU(X86_64) | 260 #if CPU(X86) || CPU(X86_64) |
| 213 // Only compiler barrier is needed. | 261 // Only compiler barrier is needed. |
| 214 #if COMPILER(MSVC) | 262 #if COMPILER(MSVC) |
| 215 // Starting from Visual Studio 2005 compiler guarantees acquire and release | 263 // Starting from Visual Studio 2005 compiler guarantees acquire and release |
| 216 // semantics for operations on volatile variables. See MSDN entry for | 264 // semantics for operations on volatile variables. See MSDN entry for |
| 217 // MemoryBarrier macro. | 265 // MemoryBarrier macro. |
| 218 #define MEMORY_BARRIER() | 266 #define MEMORY_BARRIER() |
| 219 #else | 267 #else |
| 220 #define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory") | 268 #define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory") |
| 221 #endif | 269 #endif |
| 222 #elif CPU(ARM) && (OS(LINUX) || OS(ANDROID)) | 270 #elif CPU(ARM) && (OS(LINUX) || OS(ANDROID)) |
| 223 // On ARM __sync_synchronize generates dmb which is very expensive on single | 271 // On ARM __sync_synchronize generates dmb which is very expensive on single |
| 224 // core devices which don't actually need it. Avoid the cost by calling into | 272 // core devices which don't actually need it. Avoid the cost by calling into |
| 225 // kuser_memory_barrier helper. | 273 // kuser_memory_barrier helper. |
| 226 inline void memoryBarrier() | 274 inline void memoryBarrier() { |
| 227 { | 275 // Note: This is a function call, which is also an implicit compiler barrier. |
| 228 // Note: This is a function call, which is also an implicit compiler barrier
. | 276 typedef void (*KernelMemoryBarrierFunc)(); |
| 229 typedef void (*KernelMemoryBarrierFunc)(); | 277 ((KernelMemoryBarrierFunc)0xffff0fa0)(); |
| 230 ((KernelMemoryBarrierFunc)0xffff0fa0)(); | |
| 231 } | 278 } |
| 232 #define MEMORY_BARRIER() memoryBarrier() | 279 #define MEMORY_BARRIER() memoryBarrier() |
| 233 #else | 280 #else |
| 234 // Fallback to the compiler intrinsic on all other platforms. | 281 // Fallback to the compiler intrinsic on all other platforms. |
| 235 #define MEMORY_BARRIER() __sync_synchronize() | 282 #define MEMORY_BARRIER() __sync_synchronize() |
| 236 #endif | 283 #endif |
| 237 | 284 |
| 238 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) | 285 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) { |
| 239 { | 286 MEMORY_BARRIER(); |
| 240 MEMORY_BARRIER(); | 287 *ptr = value; |
| 241 *ptr = value; | |
| 242 } | 288 } |
| 243 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) | 289 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) { |
| 244 { | 290 MEMORY_BARRIER(); |
| 245 MEMORY_BARRIER(); | 291 *ptr = value; |
| 246 *ptr = value; | |
| 247 } | 292 } |
| 248 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) | 293 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) { |
| 249 { | 294 MEMORY_BARRIER(); |
| 250 MEMORY_BARRIER(); | 295 *ptr = value; |
| 251 *ptr = value; | |
| 252 } | 296 } |
| 253 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value
) | 297 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, |
| 254 { | 298 unsigned long value) { |
| 255 MEMORY_BARRIER(); | 299 MEMORY_BARRIER(); |
| 256 *ptr = value; | 300 *ptr = value; |
| 257 } | 301 } |
| 258 #if CPU(64BIT) | 302 #if CPU(64BIT) |
| 259 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long
long value) | 303 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, |
| 260 { | 304 unsigned long long value) { |
| 261 MEMORY_BARRIER(); | 305 MEMORY_BARRIER(); |
| 262 *ptr = value; | 306 *ptr = value; |
| 263 } | 307 } |
| 264 #endif | 308 #endif |
| 265 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) | 309 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) { |
| 266 { | 310 MEMORY_BARRIER(); |
| 267 MEMORY_BARRIER(); | 311 *ptr = value; |
| 268 *ptr = value; | |
| 269 } | 312 } |
| 270 | 313 |
| 271 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) | 314 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) { |
| 272 { | 315 int value = *ptr; |
| 273 int value = *ptr; | 316 MEMORY_BARRIER(); |
| 274 MEMORY_BARRIER(); | 317 return value; |
| 275 return value; | |
| 276 } | 318 } |
| 277 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) | 319 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) { |
| 278 { | 320 unsigned value = *ptr; |
| 279 unsigned value = *ptr; | 321 MEMORY_BARRIER(); |
| 280 MEMORY_BARRIER(); | 322 return value; |
| 281 return value; | |
| 282 } | 323 } |
| 283 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) | 324 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) { |
| 284 { | 325 long value = *ptr; |
| 285 long value = *ptr; | 326 MEMORY_BARRIER(); |
| 286 MEMORY_BARRIER(); | 327 return value; |
| 287 return value; | |
| 288 } | 328 } |
| 289 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) | 329 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) { |
| 290 { | 330 unsigned long value = *ptr; |
| 291 unsigned long value = *ptr; | 331 MEMORY_BARRIER(); |
| 292 MEMORY_BARRIER(); | 332 return value; |
| 293 return value; | |
| 294 } | 333 } |
| 295 #if CPU(64BIT) | 334 #if CPU(64BIT) |
| 296 ALWAYS_INLINE unsigned long long acquireLoad(volatile const unsigned long long*
ptr) | 335 ALWAYS_INLINE unsigned long long acquireLoad( |
| 297 { | 336 volatile const unsigned long long* ptr) { |
| 298 unsigned long long value = *ptr; | 337 unsigned long long value = *ptr; |
| 299 MEMORY_BARRIER(); | 338 MEMORY_BARRIER(); |
| 300 return value; | 339 return value; |
| 301 } | 340 } |
| 302 #endif | 341 #endif |
| 303 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) | 342 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) { |
| 304 { | 343 void* value = *ptr; |
| 305 void* value = *ptr; | 344 MEMORY_BARRIER(); |
| 306 MEMORY_BARRIER(); | 345 return value; |
| 307 return value; | |
| 308 } | 346 } |
| 309 | 347 |
| 310 // Do not use noBarrierStore/noBarrierLoad for synchronization. | 348 // Do not use noBarrierStore/noBarrierLoad for synchronization. |
| 311 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) | 349 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) { |
| 312 { | 350 *ptr = value; |
| 313 *ptr = value; | |
| 314 } | 351 } |
| 315 | 352 |
| 316 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) | 353 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) { |
| 317 { | 354 float value = *ptr; |
| 318 float value = *ptr; | 355 return value; |
| 319 return value; | |
| 320 } | 356 } |
| 321 | 357 |
| 322 #if defined(ADDRESS_SANITIZER) | 358 #if defined(ADDRESS_SANITIZER) |
| 323 | 359 |
| 324 NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned*
ptr, unsigned value) | 360 NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore( |
| 325 { | 361 volatile unsigned* ptr, |
| 326 MEMORY_BARRIER(); | 362 unsigned value) { |
| 327 *ptr = value; | 363 MEMORY_BARRIER(); |
| 364 *ptr = value; |
| 328 } | 365 } |
| 329 | 366 |
| 330 NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const
unsigned* ptr) | 367 NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad( |
| 331 { | 368 volatile const unsigned* ptr) { |
| 332 unsigned value = *ptr; | 369 unsigned value = *ptr; |
| 333 MEMORY_BARRIER(); | 370 MEMORY_BARRIER(); |
| 334 return value; | 371 return value; |
| 335 } | 372 } |
| 336 | 373 |
| 337 #endif // defined(ADDRESS_SANITIZER) | 374 #endif // defined(ADDRESS_SANITIZER) |
| 338 | 375 |
| 339 #undef MEMORY_BARRIER | 376 #undef MEMORY_BARRIER |
| 340 | 377 |
| 341 #endif | 378 #endif |
| 342 | 379 |
| 343 #if !defined(ADDRESS_SANITIZER) | 380 #if !defined(ADDRESS_SANITIZER) |
| 344 | 381 |
| 345 ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr, unsigned value
) | 382 ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr, |
| 346 { | 383 unsigned value) { |
| 347 releaseStore(ptr, value); | 384 releaseStore(ptr, value); |
| 348 } | 385 } |
| 349 | 386 |
| 350 ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) | 387 ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) { |
| 351 { | 388 return acquireLoad(ptr); |
| 352 return acquireLoad(ptr); | |
| 353 } | 389 } |
| 354 | 390 |
| 355 #endif | 391 #endif |
| 356 | 392 |
| 357 } // namespace WTF | 393 } // namespace WTF |
| 358 | 394 |
| 359 using WTF::atomicAdd; | 395 using WTF::atomicAdd; |
| 360 using WTF::atomicSubtract; | 396 using WTF::atomicSubtract; |
| 361 using WTF::atomicDecrement; | 397 using WTF::atomicDecrement; |
| 362 using WTF::atomicIncrement; | 398 using WTF::atomicIncrement; |
| 363 using WTF::atomicTestAndSetToOne; | 399 using WTF::atomicTestAndSetToOne; |
| 364 using WTF::atomicSetOneToZero; | 400 using WTF::atomicSetOneToZero; |
| 365 using WTF::acquireLoad; | 401 using WTF::acquireLoad; |
| 366 using WTF::releaseStore; | 402 using WTF::releaseStore; |
| 367 using WTF::noBarrierLoad; | 403 using WTF::noBarrierLoad; |
| 368 using WTF::noBarrierStore; | 404 using WTF::noBarrierStore; |
| 369 | 405 |
| 370 // These methods allow loading from and storing to poisoned memory. Only | 406 // These methods allow loading from and storing to poisoned memory. Only |
| 371 // use these methods if you know what you are doing since they will | 407 // use these methods if you know what you are doing since they will |
| 372 // silence use-after-poison errors from ASan. | 408 // silence use-after-poison errors from ASan. |
| 373 using WTF::asanUnsafeAcquireLoad; | 409 using WTF::asanUnsafeAcquireLoad; |
| 374 using WTF::asanUnsafeReleaseStore; | 410 using WTF::asanUnsafeReleaseStore; |
| 375 | 411 |
| 376 #endif // Atomics_h | 412 #endif // Atomics_h |
| OLD | NEW |