| Index: third_party/WebKit/Source/wtf/Atomics.h
|
| diff --git a/third_party/WebKit/Source/wtf/Atomics.h b/third_party/WebKit/Source/wtf/Atomics.h
|
| index cdd631efe8aea48f021681350150f48f18274f7e..fcb08983b04b7354afbb1670823bd5c3f0a71e64 100644
|
| --- a/third_party/WebKit/Source/wtf/Atomics.h
|
| +++ b/third_party/WebKit/Source/wtf/Atomics.h
|
| @@ -53,84 +53,123 @@ namespace WTF {
|
| #if COMPILER(MSVC)
|
|
|
| // atomicAdd returns the result of the addition.
|
| -ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment)
|
| -{
|
| - return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), static_cast<long>(increment)) + increment;
|
| +ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) {
|
| + return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
|
| + static_cast<long>(increment)) +
|
| + increment;
|
| }
|
| -ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment)
|
| -{
|
| - return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), static_cast<long>(increment)) + increment;
|
| +ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend,
|
| + unsigned increment) {
|
| + return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
|
| + static_cast<long>(increment)) +
|
| + increment;
|
| }
|
| #if defined(_WIN64)
|
| -ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend, unsigned long long increment)
|
| -{
|
| - return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend), static_cast<long long>(increment)) + increment;
|
| +ALWAYS_INLINE unsigned long long atomicAdd(unsigned long long volatile* addend,
|
| + unsigned long long increment) {
|
| + return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend),
|
| + static_cast<long long>(increment)) +
|
| + increment;
|
| }
|
| #endif
|
|
|
| // atomicSubtract returns the result of the subtraction.
|
| -ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement)
|
| -{
|
| - return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), static_cast<long>(-decrement)) - decrement;
|
| +ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) {
|
| + return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
|
| + static_cast<long>(-decrement)) -
|
| + decrement;
|
| }
|
| -ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrement)
|
| -{
|
| - return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend), -static_cast<long>(decrement)) - decrement;
|
| +ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend,
|
| + unsigned decrement) {
|
| + return InterlockedExchangeAdd(reinterpret_cast<long volatile*>(addend),
|
| + -static_cast<long>(decrement)) -
|
| + decrement;
|
| }
|
| #if defined(_WIN64)
|
| -ALWAYS_INLINE unsigned long long atomicSubtract(unsigned long long volatile* addend, unsigned long long decrement)
|
| -{
|
| - return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend), -static_cast<long long>(decrement)) - decrement;
|
| +ALWAYS_INLINE unsigned long long atomicSubtract(
|
| + unsigned long long volatile* addend,
|
| + unsigned long long decrement) {
|
| + return InterlockedExchangeAdd64(reinterpret_cast<long long volatile*>(addend),
|
| + -static_cast<long long>(decrement)) -
|
| + decrement;
|
| }
|
| #endif
|
|
|
| -ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return InterlockedIncrement(reinterpret_cast<long volatile*>(addend)); }
|
| -ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return InterlockedDecrement(reinterpret_cast<long volatile*>(addend)); }
|
| +ALWAYS_INLINE int atomicIncrement(int volatile* addend) {
|
| + return InterlockedIncrement(reinterpret_cast<long volatile*>(addend));
|
| +}
|
| +ALWAYS_INLINE int atomicDecrement(int volatile* addend) {
|
| + return InterlockedDecrement(reinterpret_cast<long volatile*>(addend));
|
| +}
|
|
|
| -ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend)); }
|
| -ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend)); }
|
| +ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) {
|
| + return InterlockedIncrement64(reinterpret_cast<long long volatile*>(addend));
|
| +}
|
| +ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) {
|
| + return InterlockedDecrement64(reinterpret_cast<long long volatile*>(addend));
|
| +}
|
|
|
| -ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr)
|
| -{
|
| - int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1);
|
| - ASSERT(!ret || ret == 1);
|
| - return ret;
|
| +ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) {
|
| + int ret = InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 1);
|
| + ASSERT(!ret || ret == 1);
|
| + return ret;
|
| }
|
|
|
| -ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr)
|
| -{
|
| - ASSERT(*ptr == 1);
|
| - InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0);
|
| +ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) {
|
| + ASSERT(*ptr == 1);
|
| + InterlockedExchange(reinterpret_cast<long volatile*>(ptr), 0);
|
| }
|
|
|
| #else
|
|
|
| // atomicAdd returns the result of the addition.
|
| -ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) { return __sync_add_and_fetch(addend, increment); }
|
| -ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend, unsigned increment) { return __sync_add_and_fetch(addend, increment); }
|
| -ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend, unsigned long increment) { return __sync_add_and_fetch(addend, increment); }
|
| +ALWAYS_INLINE int atomicAdd(int volatile* addend, int increment) {
|
| + return __sync_add_and_fetch(addend, increment);
|
| +}
|
| +ALWAYS_INLINE unsigned atomicAdd(unsigned volatile* addend,
|
| + unsigned increment) {
|
| + return __sync_add_and_fetch(addend, increment);
|
| +}
|
| +ALWAYS_INLINE unsigned long atomicAdd(unsigned long volatile* addend,
|
| + unsigned long increment) {
|
| + return __sync_add_and_fetch(addend, increment);
|
| +}
|
| // atomicSubtract returns the result of the subtraction.
|
| -ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) { return __sync_sub_and_fetch(addend, decrement); }
|
| -ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend, unsigned decrement) { return __sync_sub_and_fetch(addend, decrement); }
|
| -ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend, unsigned long decrement) { return __sync_sub_and_fetch(addend, decrement); }
|
| +ALWAYS_INLINE int atomicSubtract(int volatile* addend, int decrement) {
|
| + return __sync_sub_and_fetch(addend, decrement);
|
| +}
|
| +ALWAYS_INLINE unsigned atomicSubtract(unsigned volatile* addend,
|
| + unsigned decrement) {
|
| + return __sync_sub_and_fetch(addend, decrement);
|
| +}
|
| +ALWAYS_INLINE unsigned long atomicSubtract(unsigned long volatile* addend,
|
| + unsigned long decrement) {
|
| + return __sync_sub_and_fetch(addend, decrement);
|
| +}
|
|
|
| -ALWAYS_INLINE int atomicIncrement(int volatile* addend) { return atomicAdd(addend, 1); }
|
| -ALWAYS_INLINE int atomicDecrement(int volatile* addend) { return atomicSubtract(addend, 1); }
|
| +ALWAYS_INLINE int atomicIncrement(int volatile* addend) {
|
| + return atomicAdd(addend, 1);
|
| +}
|
| +ALWAYS_INLINE int atomicDecrement(int volatile* addend) {
|
| + return atomicSubtract(addend, 1);
|
| +}
|
|
|
| -ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) { return __sync_add_and_fetch(addend, 1); }
|
| -ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) { return __sync_sub_and_fetch(addend, 1); }
|
| +ALWAYS_INLINE int64_t atomicIncrement(int64_t volatile* addend) {
|
| + return __sync_add_and_fetch(addend, 1);
|
| +}
|
| +ALWAYS_INLINE int64_t atomicDecrement(int64_t volatile* addend) {
|
| + return __sync_sub_and_fetch(addend, 1);
|
| +}
|
|
|
| -ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr)
|
| -{
|
| - int ret = __sync_lock_test_and_set(ptr, 1);
|
| - ASSERT(!ret || ret == 1);
|
| - return ret;
|
| +ALWAYS_INLINE int atomicTestAndSetToOne(int volatile* ptr) {
|
| + int ret = __sync_lock_test_and_set(ptr, 1);
|
| + ASSERT(!ret || ret == 1);
|
| + return ret;
|
| }
|
|
|
| -ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr)
|
| -{
|
| - ASSERT(*ptr == 1);
|
| - __sync_lock_release(ptr);
|
| +ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr) {
|
| + ASSERT(*ptr == 1);
|
| + __sync_lock_release(ptr);
|
| }
|
| #endif
|
|
|
| @@ -138,76 +177,85 @@ ALWAYS_INLINE void atomicSetOneToZero(int volatile* ptr)
|
| // The definitions below assume an LP64 data model. This is fine because
|
| // TSan is only supported on x86_64 Linux.
|
| #if CPU(64BIT) && OS(LINUX)
|
| -ALWAYS_INLINE void releaseStore(volatile int* ptr, int value)
|
| -{
|
| - __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
|
| -}
|
| -ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value)
|
| -{
|
| - __tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr), static_cast<int>(value), __tsan_memory_order_release);
|
| -}
|
| -ALWAYS_INLINE void releaseStore(volatile long* ptr, long value)
|
| -{
|
| - __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), static_cast<__tsan_atomic64>(value), __tsan_memory_order_release);
|
| -}
|
| -ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value)
|
| -{
|
| - __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), static_cast<__tsan_atomic64>(value), __tsan_memory_order_release);
|
| -}
|
| -ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long long value)
|
| -{
|
| - __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), static_cast<__tsan_atomic64>(value), __tsan_memory_order_release);
|
| -}
|
| -ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value)
|
| -{
|
| - __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), reinterpret_cast<__tsan_atomic64>(value), __tsan_memory_order_release);
|
| -}
|
| -ALWAYS_INLINE int acquireLoad(volatile const int* ptr)
|
| -{
|
| - return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
|
| -}
|
| -ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr)
|
| -{
|
| - return static_cast<unsigned>(__tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr), __tsan_memory_order_acquire));
|
| -}
|
| -ALWAYS_INLINE long acquireLoad(volatile const long* ptr)
|
| -{
|
| - return static_cast<long>(__tsan_atomic64_load(reinterpret_cast<volatile const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire));
|
| -}
|
| -ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr)
|
| -{
|
| - return static_cast<unsigned long>(__tsan_atomic64_load(reinterpret_cast<volatile const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire));
|
| -}
|
| -ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr)
|
| -{
|
| - return reinterpret_cast<void*>(__tsan_atomic64_load(reinterpret_cast<volatile const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire));
|
| +ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) {
|
| + __tsan_atomic32_store(ptr, value, __tsan_memory_order_release);
|
| +}
|
| +ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) {
|
| + __tsan_atomic32_store(reinterpret_cast<volatile int*>(ptr),
|
| + static_cast<int>(value), __tsan_memory_order_release);
|
| +}
|
| +ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) {
|
| + __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
|
| + static_cast<__tsan_atomic64>(value),
|
| + __tsan_memory_order_release);
|
| +}
|
| +ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr,
|
| + unsigned long value) {
|
| + __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
|
| + static_cast<__tsan_atomic64>(value),
|
| + __tsan_memory_order_release);
|
| +}
|
| +ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr,
|
| + unsigned long long value) {
|
| + __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
|
| + static_cast<__tsan_atomic64>(value),
|
| + __tsan_memory_order_release);
|
| +}
|
| +ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) {
|
| + __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr),
|
| + reinterpret_cast<__tsan_atomic64>(value),
|
| + __tsan_memory_order_release);
|
| +}
|
| +ALWAYS_INLINE int acquireLoad(volatile const int* ptr) {
|
| + return __tsan_atomic32_load(ptr, __tsan_memory_order_acquire);
|
| +}
|
| +ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) {
|
| + return static_cast<unsigned>(__tsan_atomic32_load(
|
| + reinterpret_cast<volatile const int*>(ptr), __tsan_memory_order_acquire));
|
| +}
|
| +ALWAYS_INLINE long acquireLoad(volatile const long* ptr) {
|
| + return static_cast<long>(__tsan_atomic64_load(
|
| + reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
|
| + __tsan_memory_order_acquire));
|
| +}
|
| +ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) {
|
| + return static_cast<unsigned long>(__tsan_atomic64_load(
|
| + reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
|
| + __tsan_memory_order_acquire));
|
| +}
|
| +ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) {
|
| + return reinterpret_cast<void*>(__tsan_atomic64_load(
|
| + reinterpret_cast<volatile const __tsan_atomic64*>(ptr),
|
| + __tsan_memory_order_acquire));
|
| }
|
|
|
| // Do not use noBarrierStore/noBarrierLoad for synchronization.
|
| -ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value)
|
| -{
|
| - static_assert(sizeof(int) == sizeof(float), "int and float are different sizes");
|
| - union {
|
| - int ivalue;
|
| - float fvalue;
|
| - } u;
|
| - u.fvalue = value;
|
| - __tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr), u.ivalue, __tsan_memory_order_relaxed);
|
| -}
|
| -
|
| -ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr)
|
| -{
|
| - static_assert(sizeof(int) == sizeof(float), "int and float are different sizes");
|
| - union {
|
| - int ivalue;
|
| - float fvalue;
|
| - } u;
|
| - u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr), __tsan_memory_order_relaxed);
|
| - return u.fvalue;
|
| +ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) {
|
| + static_assert(sizeof(int) == sizeof(float),
|
| + "int and float are different sizes");
|
| + union {
|
| + int ivalue;
|
| + float fvalue;
|
| + } u;
|
| + u.fvalue = value;
|
| + __tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr),
|
| + u.ivalue, __tsan_memory_order_relaxed);
|
| +}
|
| +
|
| +ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) {
|
| + static_assert(sizeof(int) == sizeof(float),
|
| + "int and float are different sizes");
|
| + union {
|
| + int ivalue;
|
| + float fvalue;
|
| + } u;
|
| + u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr),
|
| + __tsan_memory_order_relaxed);
|
| + return u.fvalue;
|
| }
|
| #endif
|
|
|
| -#else // defined(THREAD_SANITIZER)
|
| +#else // defined(THREAD_SANITIZER)
|
|
|
| #if CPU(X86) || CPU(X86_64)
|
| // Only compiler barrier is needed.
|
| @@ -223,11 +271,10 @@ ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr)
|
| // On ARM __sync_synchronize generates dmb which is very expensive on single
|
| // core devices which don't actually need it. Avoid the cost by calling into
|
| // kuser_memory_barrier helper.
|
| -inline void memoryBarrier()
|
| -{
|
| - // Note: This is a function call, which is also an implicit compiler barrier.
|
| - typedef void (*KernelMemoryBarrierFunc)();
|
| - ((KernelMemoryBarrierFunc)0xffff0fa0)();
|
| +inline void memoryBarrier() {
|
| + // Note: This is a function call, which is also an implicit compiler barrier.
|
| + typedef void (*KernelMemoryBarrierFunc)();
|
| + ((KernelMemoryBarrierFunc)0xffff0fa0)();
|
| }
|
| #define MEMORY_BARRIER() memoryBarrier()
|
| #else
|
| @@ -235,106 +282,96 @@ inline void memoryBarrier()
|
| #define MEMORY_BARRIER() __sync_synchronize()
|
| #endif
|
|
|
| -ALWAYS_INLINE void releaseStore(volatile int* ptr, int value)
|
| -{
|
| - MEMORY_BARRIER();
|
| - *ptr = value;
|
| +ALWAYS_INLINE void releaseStore(volatile int* ptr, int value) {
|
| + MEMORY_BARRIER();
|
| + *ptr = value;
|
| }
|
| -ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value)
|
| -{
|
| - MEMORY_BARRIER();
|
| - *ptr = value;
|
| +ALWAYS_INLINE void releaseStore(volatile unsigned* ptr, unsigned value) {
|
| + MEMORY_BARRIER();
|
| + *ptr = value;
|
| }
|
| -ALWAYS_INLINE void releaseStore(volatile long* ptr, long value)
|
| -{
|
| - MEMORY_BARRIER();
|
| - *ptr = value;
|
| +ALWAYS_INLINE void releaseStore(volatile long* ptr, long value) {
|
| + MEMORY_BARRIER();
|
| + *ptr = value;
|
| }
|
| -ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr, unsigned long value)
|
| -{
|
| - MEMORY_BARRIER();
|
| - *ptr = value;
|
| +ALWAYS_INLINE void releaseStore(volatile unsigned long* ptr,
|
| + unsigned long value) {
|
| + MEMORY_BARRIER();
|
| + *ptr = value;
|
| }
|
| #if CPU(64BIT)
|
| -ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr, unsigned long long value)
|
| -{
|
| - MEMORY_BARRIER();
|
| - *ptr = value;
|
| +ALWAYS_INLINE void releaseStore(volatile unsigned long long* ptr,
|
| + unsigned long long value) {
|
| + MEMORY_BARRIER();
|
| + *ptr = value;
|
| }
|
| #endif
|
| -ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value)
|
| -{
|
| - MEMORY_BARRIER();
|
| - *ptr = value;
|
| -}
|
| -
|
| -ALWAYS_INLINE int acquireLoad(volatile const int* ptr)
|
| -{
|
| - int value = *ptr;
|
| - MEMORY_BARRIER();
|
| - return value;
|
| -}
|
| -ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr)
|
| -{
|
| - unsigned value = *ptr;
|
| - MEMORY_BARRIER();
|
| - return value;
|
| -}
|
| -ALWAYS_INLINE long acquireLoad(volatile const long* ptr)
|
| -{
|
| - long value = *ptr;
|
| - MEMORY_BARRIER();
|
| - return value;
|
| -}
|
| -ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr)
|
| -{
|
| - unsigned long value = *ptr;
|
| - MEMORY_BARRIER();
|
| - return value;
|
| +ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) {
|
| + MEMORY_BARRIER();
|
| + *ptr = value;
|
| +}
|
| +
|
| +ALWAYS_INLINE int acquireLoad(volatile const int* ptr) {
|
| + int value = *ptr;
|
| + MEMORY_BARRIER();
|
| + return value;
|
| +}
|
| +ALWAYS_INLINE unsigned acquireLoad(volatile const unsigned* ptr) {
|
| + unsigned value = *ptr;
|
| + MEMORY_BARRIER();
|
| + return value;
|
| +}
|
| +ALWAYS_INLINE long acquireLoad(volatile const long* ptr) {
|
| + long value = *ptr;
|
| + MEMORY_BARRIER();
|
| + return value;
|
| +}
|
| +ALWAYS_INLINE unsigned long acquireLoad(volatile const unsigned long* ptr) {
|
| + unsigned long value = *ptr;
|
| + MEMORY_BARRIER();
|
| + return value;
|
| }
|
| #if CPU(64BIT)
|
| -ALWAYS_INLINE unsigned long long acquireLoad(volatile const unsigned long long* ptr)
|
| -{
|
| - unsigned long long value = *ptr;
|
| - MEMORY_BARRIER();
|
| - return value;
|
| +ALWAYS_INLINE unsigned long long acquireLoad(
|
| + volatile const unsigned long long* ptr) {
|
| + unsigned long long value = *ptr;
|
| + MEMORY_BARRIER();
|
| + return value;
|
| }
|
| #endif
|
| -ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr)
|
| -{
|
| - void* value = *ptr;
|
| - MEMORY_BARRIER();
|
| - return value;
|
| +ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) {
|
| + void* value = *ptr;
|
| + MEMORY_BARRIER();
|
| + return value;
|
| }
|
|
|
| // Do not use noBarrierStore/noBarrierLoad for synchronization.
|
| -ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value)
|
| -{
|
| - *ptr = value;
|
| +ALWAYS_INLINE void noBarrierStore(volatile float* ptr, float value) {
|
| + *ptr = value;
|
| }
|
|
|
| -ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr)
|
| -{
|
| - float value = *ptr;
|
| - return value;
|
| +ALWAYS_INLINE float noBarrierLoad(volatile const float* ptr) {
|
| + float value = *ptr;
|
| + return value;
|
| }
|
|
|
| #if defined(ADDRESS_SANITIZER)
|
|
|
| -NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr, unsigned value)
|
| -{
|
| - MEMORY_BARRIER();
|
| - *ptr = value;
|
| +NO_SANITIZE_ADDRESS ALWAYS_INLINE void asanUnsafeReleaseStore(
|
| + volatile unsigned* ptr,
|
| + unsigned value) {
|
| + MEMORY_BARRIER();
|
| + *ptr = value;
|
| }
|
|
|
| -NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr)
|
| -{
|
| - unsigned value = *ptr;
|
| - MEMORY_BARRIER();
|
| - return value;
|
| +NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(
|
| + volatile const unsigned* ptr) {
|
| + unsigned value = *ptr;
|
| + MEMORY_BARRIER();
|
| + return value;
|
| }
|
|
|
| -#endif // defined(ADDRESS_SANITIZER)
|
| +#endif // defined(ADDRESS_SANITIZER)
|
|
|
| #undef MEMORY_BARRIER
|
|
|
| @@ -342,19 +379,18 @@ NO_SANITIZE_ADDRESS ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const
|
|
|
| #if !defined(ADDRESS_SANITIZER)
|
|
|
| -ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr, unsigned value)
|
| -{
|
| - releaseStore(ptr, value);
|
| +ALWAYS_INLINE void asanUnsafeReleaseStore(volatile unsigned* ptr,
|
| + unsigned value) {
|
| + releaseStore(ptr, value);
|
| }
|
|
|
| -ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr)
|
| -{
|
| - return acquireLoad(ptr);
|
| +ALWAYS_INLINE unsigned asanUnsafeAcquireLoad(volatile const unsigned* ptr) {
|
| + return acquireLoad(ptr);
|
| }
|
|
|
| #endif
|
|
|
| -} // namespace WTF
|
| +} // namespace WTF
|
|
|
| using WTF::atomicAdd;
|
| using WTF::atomicSubtract;
|
| @@ -373,4 +409,4 @@ using WTF::noBarrierStore;
|
| using WTF::asanUnsafeAcquireLoad;
|
| using WTF::asanUnsafeReleaseStore;
|
|
|
| -#endif // Atomics_h
|
| +#endif // Atomics_h
|
|
|