| Index: src/ports/SkBarriers_tsan.h
|
| diff --git a/src/ports/SkBarriers_tsan.h b/src/ports/SkBarriers_tsan.h
|
| index ae68b3daf9d1865ca1414c17a0a4c2bc9c2d5727..6f273907abae1857699b0ef2e4bbe4edbff9c512 100644
|
| --- a/src/ports/SkBarriers_tsan.h
|
| +++ b/src/ports/SkBarriers_tsan.h
|
| @@ -8,41 +8,18 @@
|
| #ifndef SkBarriers_tsan_DEFINED
|
| #define SkBarriers_tsan_DEFINED
|
|
|
| -#include <sanitizer/tsan_interface_atomic.h>
|
| -
|
| static inline void sk_compiler_barrier() { asm volatile("" : : : "memory"); }
|
|
|
| -// We'd do this as separate functions, but you can't partially specialize functions...
|
| -template <typename T, size_t bits>
|
| -struct SkBarriers {
|
| - static T AcquireLoad(T*);
|
| - static void ReleaseStore(T*, T);
|
| -};
|
| -
|
| -#define SK_BARRIERS(BITS) \
|
| - template <typename T> \
|
| - struct SkBarriers<T, BITS> { \
|
| - static T AcquireLoad(T* ptr) { \
|
| - return (T)__tsan_atomic ## BITS ## _load((__tsan_atomic ## BITS*)ptr, \
|
| - __tsan_memory_order_acquire); \
|
| - } \
|
| - static void ReleaseStore(T* ptr, T val) { \
|
| - __tsan_atomic ## BITS ## _store((__tsan_atomic ## BITS*)ptr, \
|
| - val, \
|
| - __tsan_memory_order_release); \
|
| - } \
|
| - }
|
| -SK_BARRIERS(8);
|
| -SK_BARRIERS(16);
|
| -SK_BARRIERS(32);
|
| -SK_BARRIERS(64);
|
| -#undef SK_BARRIERS
|
| -
|
| template <typename T>
|
| -T sk_acquire_load(T* ptr) { return SkBarriers<T, 8*sizeof(T)>::AcquireLoad(ptr); }
|
| +T sk_acquire_load(T* ptr) {
|
| + SkASSERT(__atomic_always_lock_free(sizeof(T), ptr));
|
| + return __atomic_load_n(ptr, __ATOMIC_ACQUIRE);
|
| +}
|
|
|
| template <typename T>
|
| -void sk_release_store(T* ptr, T val) { SkBarriers<T, 8*sizeof(T)>::ReleaseStore(ptr, val); }
|
| -
|
| +void sk_release_store(T* ptr, T val) {
|
| + SkASSERT(__atomic_always_lock_free(sizeof(T), ptr));
|
| + return __atomic_store_n(ptr, val, __ATOMIC_RELEASE);
|
| +}
|
|
|
| #endif//SkBarriers_tsan_DEFINED
|
|
|