Index: Source/wtf/Atomics.h |
diff --git a/Source/wtf/Atomics.h b/Source/wtf/Atomics.h |
index 6013a8321849c34627727b7387080855f064a685..23969d070048148c172106b12cb3327cbd299346 100644 |
--- a/Source/wtf/Atomics.h |
+++ b/Source/wtf/Atomics.h |
@@ -162,6 +162,26 @@ ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) |
{ |
__tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), reinterpret_cast<__tsan_atomic64>(value), __tsan_memory_order_release); |
} |
+ALWAYS_INLINE void releaseStore(volatile float* ptr, float value) |
+{ |
+ static_assert(sizeof(int) == sizeof(float), "int and float are different sizes"); |
+ union { |
+ int ivalue; |
+ float fvalue; |
+ } u; |
+ u.fvalue = value; |
+ __tsan_atomic32_store(reinterpret_cast<volatile __tsan_atomic32*>(ptr), u.ivalue, __tsan_memory_order_release); |
+} |
+ALWAYS_INLINE void releaseStore(volatile double* ptr, double value) |
+{ |
+ static_assert(sizeof(long) == sizeof(double), "long and double are different sizes"); |
+ union { |
+ long ivalue; |
+ double dvalue; |
+ } u; |
+ u.dvalue = value; |
+ __tsan_atomic64_store(reinterpret_cast<volatile __tsan_atomic64*>(ptr), u.ivalue, __tsan_memory_order_release); |
+} |
ALWAYS_INLINE int acquireLoad(volatile const int* ptr) |
{ |
@@ -183,6 +203,26 @@ ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) |
{ |
return reinterpret_cast<void*>(__tsan_atomic64_load(reinterpret_cast<volatile const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); |
} |
+ALWAYS_INLINE float acquireLoad(volatile const float* ptr) |
+{ |
+ static_assert(sizeof(int) == sizeof(float), "int and float are different sizes"); |
+ union { |
+ int ivalue; |
+ float fvalue; |
+ } u; |
+ u.ivalue = __tsan_atomic32_load(reinterpret_cast<volatile const int*>(ptr), __tsan_memory_order_acquire); |
+ return u.fvalue; |
+} |
+ALWAYS_INLINE double acquireLoad(volatile const double* ptr) |
+{ |
+ static_assert(sizeof(long) == sizeof(double), "long and double are different sizes"); |
+ union { |
+ long ivalue; |
+ double dvalue; |
+ } u; |
+ u.ivalue = static_cast<long>(__tsan_atomic64_load(reinterpret_cast<volatile const __tsan_atomic64*>(ptr), __tsan_memory_order_acquire)); |
+ return u.dvalue; |
+} |
#endif |
#else // defined(THREAD_SANITIZER) |
@@ -243,6 +283,17 @@ ALWAYS_INLINE void releaseStore(void* volatile* ptr, void* value) |
MEMORY_BARRIER(); |
*ptr = value; |
} |
+ALWAYS_INLINE void releaseStore(volatile float* ptr, float value) |
+{ |
+ MEMORY_BARRIER(); |
+ *ptr = value; |
+} |
+ALWAYS_INLINE void releaseStore(volatile double* ptr, double value) |
+{ |
+ MEMORY_BARRIER(); |
+ *ptr = value; |
+} |
+ |
ALWAYS_INLINE int acquireLoad(volatile const int* ptr) |
{ |
@@ -280,6 +331,18 @@ ALWAYS_INLINE void* acquireLoad(void* volatile const* ptr) |
MEMORY_BARRIER(); |
return value; |
} |
+ALWAYS_INLINE float acquireLoad(volatile const float* ptr) |
+{ |
+ float value = *ptr; |
+ MEMORY_BARRIER(); |
+ return value; |
+} |
+ALWAYS_INLINE double acquireLoad(volatile const double* ptr) |
Alexander Potapenko
2015/08/10 14:01:30
I'm no expert in ARM CPUs, do you have an idea whe
|
+{ |
+ double value = *ptr; |
+ MEMORY_BARRIER(); |
+ return value; |
+} |
#if defined(ADDRESS_SANITIZER) |