| Index: include/ports/SkAtomics_sync.h
|
| diff --git a/include/ports/SkAtomics_sync.h b/include/ports/SkAtomics_sync.h
|
| index 9389c00103a0e513b3471fed5952f4d3f0693f1c..66da4d35eeb3c47524e4aeb9832aa757ee4cf8fe 100644
|
| --- a/include/ports/SkAtomics_sync.h
|
| +++ b/include/ports/SkAtomics_sync.h
|
| @@ -1,55 +1,51 @@
|
| -/*
|
| - * Copyright 2013 Google Inc.
|
| - *
|
| - * Use of this source code is governed by a BSD-style license that can be
|
| - * found in the LICENSE file.
|
| - */
|
| -
|
| #ifndef SkAtomics_sync_DEFINED
|
| #define SkAtomics_sync_DEFINED
|
|
|
| -/** GCC/Clang __sync based atomics. */
|
| -
|
| -#include <stdint.h>
|
| -
|
| -static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t* addr) {
|
| - return __sync_fetch_and_add(addr, 1);
|
| +// This file is mostly a shim. We'd like to delete it. Please don't put much
|
| +// effort into maintaining it, and if you find bugs in it, the right fix is to
|
| +// delete this file and upgrade your compiler to something that supports
|
| +// __atomic builtins or std::atomic.
|
| +
|
| +static inline void barrier(sk_memory_order mo) {
|
| + asm volatile("" : : : "memory"); // Prevents the compiler from reordering code.
|
| + #if SK_CPU_X86
|
| + // On x86, we generally don't need an extra memory barrier for loads or stores.
|
| + if (sk_memory_order_seq_cst == mo) { __sync_synchronize(); }
|
| + #else
|
| + // On other platforms (e.g. ARM) we do unless the memory order is relaxed.
|
| + if (sk_memory_order_relaxed != mo) { __sync_synchronize(); }
|
| + #endif
|
| }
|
|
|
| -static inline __attribute__((always_inline)) int64_t sk_atomic_inc(int64_t* addr) {
|
| -#if defined(__mips__) && !defined(__LP64__) && !defined(__GCC_HAVE_SYNC_COMPARE_AND_SWAP_8)
|
| - /** Some versions of the GCC 32-bit MIPS toolchains (e.g. 4.8) for android are missing
|
| - * support for the __sync* functions that operate on 64-bit values. The workaround
|
| - * is to use __atomic* functions until we can move everything to <stdatomic.h>.
|
| - */
|
| - return __atomic_fetch_add(addr, 1, __ATOMIC_SEQ_CST);
|
| -#else
|
| - return __sync_fetch_and_add(addr, 1);
|
| -#endif
|
| -}
|
| +// These barriers only support our majority use cases: acquire and relaxed loads, release stores.
|
| +// For anything more complicated, please consider deleting this file and upgrading your compiler.
|
|
|
| -static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t* addr, int32_t inc) {
|
| - return __sync_fetch_and_add(addr, inc);
|
| +template <typename T>
|
| +T sk_atomic_load(const T* ptr, sk_memory_order mo) {
|
| + T val = *ptr;
|
| + barrier(mo);
|
| + return val;
|
| }
|
|
|
| -static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t* addr) {
|
| - return __sync_fetch_and_add(addr, -1);
|
| +template <typename T>
|
| +void sk_atomic_store(T* ptr, T val, sk_memory_order mo) {
|
| + barrier(mo);
|
| + *ptr = val;
|
| }
|
|
|
| -static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomic_dec() { }
|
| -
|
| -static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr,
|
| - int32_t before,
|
| - int32_t after) {
|
| - return __sync_bool_compare_and_swap(addr, before, after);
|
| +template <typename T>
|
| +T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) {
|
| + return __sync_fetch_and_add(ptr, val);
|
| }
|
|
|
| -static inline __attribute__((always_inline)) void* sk_atomic_cas(void** addr,
|
| - void* before,
|
| - void* after) {
|
| - return __sync_val_compare_and_swap(addr, before, after);
|
| +template <typename T>
|
| +bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order, sk_memory_order) {
|
| + T prev = __sync_val_compare_and_swap(ptr, *expected, desired);
|
| + if (prev == *expected) {
|
| + return true;
|
| + }
|
| + *expected = prev;
|
| + return false;
|
| }
|
|
|
| -static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomic_conditional_inc() { }
|
| -
|
| -#endif
|
| +#endif//SkAtomics_sync_DEFINED
|
|
|