| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. | 2 * Copyright (C) 2007, 2008, 2010, 2012 Apple Inc. All rights reserved. |
| 3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) | 3 * Copyright (C) 2007 Justin Haygood (jhaygood@reaktix.com) |
| 4 * | 4 * |
| 5 * Redistribution and use in source and binary forms, with or without | 5 * Redistribution and use in source and binary forms, with or without |
| 6 * modification, are permitted provided that the following conditions | 6 * modification, are permitted provided that the following conditions |
| 7 * are met: | 7 * are met: |
| 8 * | 8 * |
| 9 * 1. Redistributions of source code must retain the above copyright | 9 * 1. Redistributions of source code must retain the above copyright |
| 10 * notice, this list of conditions and the following disclaimer. | 10 * notice, this list of conditions and the following disclaimer. |
| (...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 46 | 46 |
| 47 #if defined(ADDRESS_SANITIZER) | 47 #if defined(ADDRESS_SANITIZER) |
| 48 #include <sanitizer/asan_interface.h> | 48 #include <sanitizer/asan_interface.h> |
| 49 #endif | 49 #endif |
| 50 | 50 |
| 51 namespace WTF { | 51 namespace WTF { |
| 52 | 52 |
| 53 #if COMPILER(MSVC) | 53 #if COMPILER(MSVC) |
| 54 | 54 |
| 55 // atomicAdd returns the result of the addition. | 55 // atomicAdd returns the result of the addition. |
| 56 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) | 56 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { |
| 57 { | 57 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), static
_cast<long>(increment)) + increment; |
| 58 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), stat
ic_cast<long>(increment)) + increment; | |
| 59 } | 58 } |
| 60 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment) | 59 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment)
{ |
| 61 { | 60 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), static
_cast<long>(increment)) + increment; |
| 62 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), stat
ic_cast<long>(increment)) + increment; | |
| 63 } | 61 } |
| 64 #if defined(_WIN64) | 62 #if defined(_WIN64) |
| 65 ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend,
unsigned long long increment) | 63 ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend,
unsigned long long increment) { |
| 66 { | 64 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend),
static_cast<long long>(increment)) + increment; |
| 67 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend
), static_cast<long long>(increment)) + increment; | |
| 68 } | 65 } |
| 69 #endif | 66 #endif |
| 70 | 67 |
| 71 // atomicSubtract returns the result of the subtraction. | 68 // atomicSubtract returns the result of the subtraction. |
| 72 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) | 69 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { |
| 73 { | 70 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), static
_cast<long>(-decrement)) - decrement; |
| 74 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), stat
ic_cast<long>(-decrement)) - decrement; | |
| 75 } | 71 } |
| 76 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrem
ent) | 72 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrem
ent) { |
| 77 { | 73 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), -stati
c_cast<long>(decrement)) - decrement; |
| 78 return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), -sta
tic_cast<long>(decrement)) - decrement; | |
| 79 } | 74 } |
| 80 #if defined(_WIN64) | 75 #if defined(_WIN64) |
| 81 ALWAYS_INLINE unsigned long long atomicSubtract(unsigned long long volatile* add
end, unsigned long long decrement) | 76 ALWAYS_INLINE unsigned long long atomicSubtract(unsigned long long volatile* add
end, unsigned long long decrement) { |
| 82 { | 77 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend),
-static_cast<long long>(decrement)) - decrement; |
| 83 return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend
), -static_cast<long long>(decrement)) - decrement; | |
| 84 } | 78 } |
| 85 #endif | 79 #endif |
| 86 | 80 |
| 87 ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return InterlockedIncr
ement(reinterpret_cast<long volatile*>(addend)); } | 81 ALWAYS_INLINE int atomicIncrement(int volatile* addend) { |
| 88 ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return InterlockedDecr
ement(reinterpret_cast<long volatile*>(addend)); } | 82 return InterlockedIncrement(reinterpret_cast<long volatile*>(addend)); |
| 89 | 83 } |
| 90 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return Interlo
ckedIncrement64(reinterpret_cast<long long volatile*>(addend)); } | 84 ALWAYS_INLINE int atomicDecrement(int volatile* addend) { |
| 91 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return Interlo
ckedDecrement64(reinterpret_cast<long long volatile*>(addend)); } | 85 return InterlockedDecrement(reinterpret_cast<long volatile*>(addend)); |
| 92 | |
| 93 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) | |
| 94 { | |
| 95 int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1); | |
| 96 ASSERT(!ret || ret == 1); | |
| 97 return ret; | |
| 98 } | 86 } |
| 99 | 87 |
| 100 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) | 88 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { |
| 101 { | 89 return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend)); |
| 102 ASSERT(*ptr == 1); | 90 } |
| 103 InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0); | 91 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { |
| 92 return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend)); |
| 93 } |
| 94 |
| 95 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) { |
| 96 int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1); |
| 97 ASSERT(!ret || ret == 1); |
| 98 return ret; |
| 99 } |
| 100 |
| 101 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) { |
| 102 ASSERT(*ptr == 1); |
| 103 InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0); |
| 104 } | 104 } |
| 105 | 105 |
| 106 #else | 106 #else |
| 107 | 107 |
| 108 // atomicAdd returns the result of the addition. | 108 // atomicAdd returns the result of the addition. |
| 109 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { return __sync
_add_and_fetch(addend, increment); } | 109 ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { |
| 110 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment)
{ return __sync_add_and_fetch(addend, increment); } | 110 return __sync_add_and_fetch(addend, increment); |
| 111 ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend, unsigned l
ong increment) { return __sync_add_and_fetch(addend, increment); } | 111 } |
| 112 ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment)
{ |
| 113 return __sync_add_and_fetch(addend, increment); |
| 114 } |
| 115 ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend, unsigned l
ong increment) { |
| 116 return __sync_add_and_fetch(addend, increment); |
| 117 } |
| 112 // atomicSubtract returns the result of the subtraction. | 118 // atomicSubtract returns the result of the subtraction. |
| 113 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { return _
_sync_sub_and_fetch(addend, decrement); } | 119 ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { |
| 114 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrem
ent) { return __sync_sub_and_fetch(addend, decrement); } | 120 return __sync_sub_and_fetch(addend, decrement); |
| 115 ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend, unsig
ned long decrement) { return __sync_sub_and_fetch(addend, decrement); } | 121 } |
| 116 | 122 ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrem
ent) { |
| 117 ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return atomicAdd(adden
d, 1); } | 123 return __sync_sub_and_fetch(addend, decrement); |
| 118 ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return atomicSubtract(
addend, 1); } | 124 } |
| 119 | 125 ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend, unsig
ned long decrement) { |
| 120 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return __sync_
add_and_fetch(addend, 1); } | 126 return __sync_sub_and_fetch(addend, decrement); |
| 121 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return __sync_
sub_and_fetch(addend, 1); } | |
| 122 | |
| 123 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) | |
| 124 { | |
| 125 int ret = __sync_lock_test_and_set(ptr, 1); | |
| 126 ASSERT(!ret || ret == 1); | |
| 127 return ret; | |
| 128 } | 127 } |
| 129 | 128 |
| 130 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) | 129 ALWAYS_INLINE int atomicIncrement(int volatile* addend) { |
| 131 { | 130 return atomicAdd(addend, 1); |
| 132 ASSERT(*ptr == 1); | 131 } |
| 133 __sync_lock_release(ptr); | 132 ALWAYS_INLINE int atomicDecrement(int volatile* addend) { |
| 133 return atomicSubtract(addend, 1); |
| 134 } |
| 135 |
| 136 ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { |
| 137 return __sync_add_and_fetch(addend, 1); |
| 138 } |
| 139 ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { |
| 140 return __sync_sub_and_fetch(addend, 1); |
| 141 } |
| 142 |
| 143 ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) { |
| 144 int ret = __sync_lock_test_and_set(ptr, 1); |
| 145 ASSERT(!ret || ret == 1); |
| 146 return ret; |
| 147 } |
| 148 |
| 149 ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) { |
| 150 ASSERT(*ptr == 1); |
| 151 __sync_lock_release(ptr); |
| 134 } | 152 } |
| 135 #endif | 153 #endif |
| 136 | 154 |
| 137 #if defined(THREAD_SANITIZER) | 155 #if defined(THREAD_SANITIZER) |
| 138 // The definitions below assume an LP64 data model. This is fine because | 156 // The definitions below assume an LP64 data model. This is fine because |
| 139 // TSan is only supported on x86_64 Linux. | 157 // TSan is only supported on x86_64 Linux. |
| 140 #if CPU(64BIT) && OS(LINUX) | 158 #if CPU(64BIT) && OS(LINUX) |
| 141 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) | 159 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) { |
| 142 { | 160 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); |
| 143 __tsan_atomic32_store(ptr, value, __tsan_memory_order_release); | |
| 144 } | 161 } |
| 145 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) | 162 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) { |
| 146 { | 163 __tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr), static_cast<int>(v
alue), __tsan_memory_order_release); |
| 147 __tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr), static_cast<int>
(value), __tsan_memory_order_release); | |
| 148 } | 164 } |
| 149 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) | 165 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) { |
| 150 { | 166 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), static
_cast<__tsan_atomic64>(value), __tsan_memory_order_release); |
| 151 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), stat
ic_cast<__tsan_atomic64>(value), __tsan_memory_order_release); | |
| 152 } | 167 } |
| 153 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value
) | 168 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value
) { |
| 154 { | 169 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), static
_cast<__tsan_atomic64>(value), __tsan_memory_order_release); |
| 155 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), stat
ic_cast<__tsan_atomic64>(value), __tsan_memory_order_release); | |
| 156 } | 170 } |
| 157 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long
long value) | 171 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long
long value) { |
| 158 { | 172 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), static
_cast<__tsan_atomic64>(value), __tsan_memory_order_release); |
| 159 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), stat
ic_cast<__tsan_atomic64>(value), __tsan_memory_order_release); | |
| 160 } | 173 } |
| 161 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) | 174 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) { |
| 162 { | 175 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), reinte
rpret_cast<__tsan_atomic64>(value), __tsan_memory_order_release); |
| 163 __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), rein
terpret_cast<__tsan_atomic64>(value), __tsan_memory_order_release); | |
| 164 } | 176 } |
| 165 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) | 177 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) { |
| 166 { | 178 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); |
| 167 return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire); | |
| 168 } | 179 } |
| 169 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) | 180 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) { |
| 170 { | 181 return static_cast<unsigned>(__tsan_atomic32_load(reinterpret_cast<volatile co
nst int*>(ptr), __tsan_memory_order_acquire)); |
| 171 return static_cast<unsigned>(__tsan_atomic32_load(reinterpret_cast<volatile
const int*>(ptr), __tsan_memory_order_acquire)); | |
| 172 } | 182 } |
| 173 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) | 183 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) { |
| 174 { | 184 return static_cast<long>(__tsan_atomic64_load(reinterpret_cast<volatile const
__tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); |
| 175 return static_cast<long>(__tsan_atomic64_load(reinterpret_cast<volatile cons
t __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); | |
| 176 } | 185 } |
| 177 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) | 186 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) { |
| 178 { | 187 return static_cast<unsigned long>(__tsan_atomic64_load(reinterpret_cast<volati
le const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); |
| 179 return static_cast<unsigned long>(__tsan_atomic64_load(reinterpret_cast<vola
tile const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); | |
| 180 } | 188 } |
| 181 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) | 189 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) { |
| 182 { | 190 return reinterpret_cast<void*>(__tsan_atomic64_load(reinterpret_cast<volatile
const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); |
| 183 return reinterpret_cast<void*>(__tsan_atomic64_load(reinterpret_cast<volatil
e const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); | |
| 184 } | 191 } |
| 185 | 192 |
| 186 // Do not use noBarrierStore/noBarrierLoad for synchronization. | 193 // Do not use noBarrierStore/noBarrierLoad for synchronization. |
| 187 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) | 194 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) { |
| 188 { | 195 static_assert(sizeof(int) == sizeof(float), "int and float are different sizes
"); |
| 189 static_assert(sizeof(int) == sizeof(float), "int and float are different siz
es"); | 196 union { |
| 190 union { | 197 int ivalue; |
| 191 int ivalue; | 198 float fvalue; |
| 192 float fvalue; | 199 } u; |
| 193 } u; | 200 u.fvalue = value; |
| 194 u.fvalue = value; | 201 __tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr), u.ival
ue, __tsan_memory_order_relaxed); |
| 195 __tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr), u.iv
alue, __tsan_memory_order_relaxed); | |
| 196 } | 202 } |
| 197 | 203 |
| 198 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) | 204 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) { |
| 199 { | 205 static_assert(sizeof(int) == sizeof(float), "int and float are different sizes
"); |
| 200 static_assert(sizeof(int) == sizeof(float), "int and float are different siz
es"); | 206 union { |
| 201 union { | 207 int ivalue; |
| 202 int ivalue; | 208 float fvalue; |
| 203 float fvalue; | 209 } u; |
| 204 } u; | 210 u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr), __
tsan_memory_order_relaxed); |
| 205 u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr),
__tsan_memory_order_relaxed); | 211 return u.fvalue; |
| 206 return u.fvalue; | |
| 207 } | 212 } |
| 208 #endif | 213 #endif |
| 209 | 214 |
| 210 #else // defined(THREAD_SANITIZER) | 215 #else // defined(THREAD_SANITIZER) |
| 211 | 216 |
| 212 #if CPU(X86) || CPU(X86_64) | 217 #if CPU(X86) || CPU(X86_64) |
| 213 // Only compiler barrier is needed. | 218 // Only compiler barrier is needed. |
| 214 #if COMPILER(MSVC) | 219 #if COMPILER(MSVC) |
| 215 // Starting from Visual Studio 2005 compiler guarantees acquire and release | 220 // Starting from Visual Studio 2005 compiler guarantees acquire and release |
| 216 // semantics for operations on volatile variables. See MSDN entry for | 221 // semantics for operations on volatile variables. See MSDN entry for |
| 217 // MemoryBarrier macro. | 222 // MemoryBarrier macro. |
| 218 #define MEMORY_BARRIER() | 223 #define MEMORY_BARRIER() |
| 219 #else | 224 #else |
| 220 #define MEMORY_BARRIER() __asm__ __volatile__("" : : : "memory") | 225 #define MEMORY_BARRIER() __asm__ __volatile__("" \ |
| 226 : \ |
| 227 : \ |
| 228 : "memory") |
| 221 #endif | 229 #endif |
| 222 #elif CPU(ARM) && (OS(LINUX) || OS(ANDROID)) | 230 #elif CPU(ARM) && (OS(LINUX) || OS(ANDROID)) |
| 223 // On ARM __sync_synchronize generates dmb which is very expensive on single | 231 // On ARM __sync_synchronize generates dmb which is very expensive on single |
| 224 // core devices which don't actually need it. Avoid the cost by calling into | 232 // core devices which don't actually need it. Avoid the cost by calling into |
| 225 // kuser_memory_barrier helper. | 233 // kuser_memory_barrier helper. |
| 226 inline void memoryBarrier() | 234 inline void memoryBarrier() { |
| 227 { | 235 // Note: This is a function call, which is also an implicit compiler barrier. |
| 228 // Note: This is a function call, which is also an implicit compiler barrier
. | 236 typedef void (*KernelMemoryBarrierFunc)(); |
| 229 typedef void (*KernelMemoryBarrierFunc)(); | 237 ((KernelMemoryBarrierFunc)0xffff0fa0)(); |
| 230 ((KernelMemoryBarrierFunc)0xffff0fa0)(); | |
| 231 } | 238 } |
| 232 #define MEMORY_BARRIER() memoryBarrier() | 239 #define MEMORY_BARRIER() memoryBarrier() |
| 233 #else | 240 #else |
| 234 // Fallback to the compiler intrinsic on all other platforms. | 241 // Fallback to the compiler intrinsic on all other platforms. |
| 235 #define MEMORY_BARRIER() __sync_synchronize() | 242 #define MEMORY_BARRIER() __sync_synchronize() |
| 236 #endif | 243 #endif |
| 237 | 244 |
| 238 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) | 245 ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) { |
| 239 { | 246 MEMORY_BARRIER(); |
| 240 MEMORY_BARRIER(); | 247 *ptr = value; |
| 241 *ptr = value; | |
| 242 } | 248 } |
| 243 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) | 249 ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) { |
| 244 { | 250 MEMORY_BARRIER(); |
| 245 MEMORY_BARRIER(); | 251 *ptr = value; |
| 246 *ptr = value; | |
| 247 } | 252 } |
| 248 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) | 253 ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) { |
| 249 { | 254 MEMORY_BARRIER(); |
| 250 MEMORY_BARRIER(); | 255 *ptr = value; |
| 251 *ptr = value; | |
| 252 } | 256 } |
| 253 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value
) | 257 ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value
) { |
| 254 { | 258 MEMORY_BARRIER(); |
| 255 MEMORY_BARRIER(); | 259 *ptr = value; |
| 256 *ptr = value; | |
| 257 } | 260 } |
| 258 #if CPU(64BIT) | 261 #if CPU(64BIT) |
| 259 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long
long value) | 262 ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long
long value) { |
| 260 { | 263 MEMORY_BARRIER(); |
| 261 MEMORY_BARRIER(); | 264 *ptr = value; |
| 262 *ptr = value; | |
| 263 } | 265 } |
| 264 #endif | 266 #endif |
| 265 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) | 267 ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) { |
| 266 { | 268 MEMORY_BARRIER(); |
| 267 MEMORY_BARRIER(); | 269 *ptr = value; |
| 268 *ptr = value; | |
| 269 } | 270 } |
| 270 | 271 |
| 271 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) | 272 ALWAYS_INLINE int acquireLoad(volatile const int* ptr) { |
| 272 { | 273 int value = *ptr; |
| 273 int value = *ptr; | 274 MEMORY_BARRIER(); |
| 274 MEMORY_BARRIER(); | 275 return value; |
| 275 return value; | |
| 276 } | 276 } |
| 277 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) | 277 ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) { |
| 278 { | 278 unsigned value = *ptr; |
| 279 unsigned value = *ptr; | 279 MEMORY_BARRIER(); |
| 280 MEMORY_BARRIER(); | 280 return value; |
| 281 return value; | |
| 282 } | 281 } |
| 283 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) | 282 ALWAYS_INLINE long acquireLoad(volatile const long* ptr) { |
| 284 { | 283 long value = *ptr; |
| 285 long value = *ptr; | 284 MEMORY_BARRIER(); |
| 286 MEMORY_BARRIER(); | 285 return value; |
| 287 return value; | |
| 288 } | 286 } |
| 289 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) | 287 ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) { |
| 290 { | 288 unsigned long value = *ptr; |
| 291 unsigned long value = *ptr; | 289 MEMORY_BARRIER(); |
| 292 MEMORY_BARRIER(); | 290 return value; |
| 293 return value; | |
| 294 } | 291 } |
| 295 #if CPU(64BIT) | 292 #if CPU(64BIT) |
| 296 ALWAYS_INLINE unsigned long long acquireLoad(volatile const unsigned long long*
ptr) | 293 ALWAYS_INLINE unsigned long long acquireLoad(volatile const unsigned long long*
ptr) { |
| 297 { | 294 unsigned long long value = *ptr; |
| 298 unsigned long long value = *ptr; | 295 MEMORY_BARRIER(); |
| 299 MEMORY_BARRIER(); | 296 return value; |
| 300 return value; | |
| 301 } | 297 } |
| 302 #endif | 298 #endif |
| 303 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) | 299 ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) { |
| 304 { | 300 void* value = *ptr; |
| 305 void* value = *ptr; | 301 MEMORY_BARRIER(); |
| 306 MEMORY_BARRIER(); | 302 return value; |
| 307 return value; | |
| 308 } | 303 } |
| 309 | 304 |
| 310 // Do not use noBarrierStore/noBarrierLoad for synchronization. | 305 // Do not use noBarrierStore/noBarrierLoad for synchronization. |
| 311 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) | 306 ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) { |
| 312 { | 307 *ptr = value; |
| 313 *ptr = value; | |
| 314 } | 308 } |
| 315 | 309 |
| 316 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) | 310 ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) { |
| 317 { | 311 float value = *ptr; |
| 318 float value = *ptr; | 312 return value; |
| 319 return value; | |
| 320 } | 313 } |
| 321 | 314 |
| 322 #if defined(ADDRESS_SANITIZER) | 315 #if defined(ADDRESS_SANITIZER) |
| 323 | 316 |
| 324 NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned*
ptr, unsigned value) | 317 NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned*
ptr, unsigned value) { |
| 325 { | 318 MEMORY_BARRIER(); |
| 326 MEMORY_BARRIER(); | 319 *ptr = value; |
| 327 *ptr = value; | |
| 328 } | 320 } |
| 329 | 321 |
| 330 NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const
unsigned* ptr) | 322 NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const
unsigned* ptr) { |
| 331 { | 323 unsigned value = *ptr; |
| 332 unsigned value = *ptr; | 324 MEMORY_BARRIER(); |
| 333 MEMORY_BARRIER(); | 325 return value; |
| 334 return value; | |
| 335 } | 326 } |
| 336 | 327 |
| 337 #endif // defined(ADDRESS_SANITIZER) | 328 #endif // defined(ADDRESS_SANITIZER) |
| 338 | 329 |
| 339 #undef MEMORY_BARRIER | 330 #undef MEMORY_BARRIER |
| 340 | 331 |
| 341 #endif | 332 #endif |
| 342 | 333 |
| 343 #if !defined(ADDRESS_SANITIZER) | 334 #if !defined(ADDRESS_SANITIZER) |
| 344 | 335 |
| 345 ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr, unsigned value
) | 336 ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr, unsigned value
) { |
| 346 { | 337 releaseStore(ptr, value); |
| 347 releaseStore(ptr, value); | |
| 348 } | 338 } |
| 349 | 339 |
| 350 ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) | 340 ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) { |
| 351 { | 341 return acquireLoad(ptr); |
| 352 return acquireLoad(ptr); | |
| 353 } | 342 } |
| 354 | 343 |
| 355 #endif | 344 #endif |
| 356 | 345 |
| 357 } // namespace WTF | 346 } // namespace WTF |
| 358 | 347 |
| 359 using WTF::atomicAdd; | 348 using WTF::atomicAdd; |
| 360 using WTF::atomicSubtract; | 349 using WTF::atomicSubtract; |
| 361 using WTF::atomicDecrement; | 350 using WTF::atomicDecrement; |
| 362 using WTF::atomicIncrement; | 351 using WTF::atomicIncrement; |
| 363 using WTF::atomicTestAndSetToOne; | 352 using WTF::atomicTestAndSetToOne; |
| 364 using WTF::atomicSetOneToZero; | 353 using WTF::atomicSetOneToZero; |
| 365 using WTF::acquireLoad; | 354 using WTF::acquireLoad; |
| 366 using WTF::releaseStore; | 355 using WTF::releaseStore; |
| 367 using WTF::noBarrierLoad; | 356 using WTF::noBarrierLoad; |
| 368 using WTF::noBarrierStore; | 357 using WTF::noBarrierStore; |
| 369 | 358 |
| 370 // These methods allow loading from and storing to poisoned memory. Only | 359 // These methods allow loading from and storing to poisoned memory. Only |
| 371 // use these methods if you know what you are doing since they will | 360 // use these methods if you know what you are doing since they will |
| 372 // silence use-after-poison errors from ASan. | 361 // silence use-after-poison errors from ASan. |
| 373 using WTF::asanUnsafeAcquireLoad; | 362 using WTF::asanUnsafeAcquireLoad; |
| 374 using WTF::asanUnsafeReleaseStore; | 363 using WTF::asanUnsafeReleaseStore; |
| 375 | 364 |
| 376 #endif // Atomics_h | 365 #endif // Atomics_h |
| OLD | NEW |