| OLD | NEW |
| 1 /* | |
| 2 * Copyright 2013 Google Inc. | |
| 3 * | |
| 4 * Use of this source code is governed by a BSD-style license that can be | |
| 5 * found in the LICENSE file. | |
| 6 */ | |
| 7 | |
| 8 #ifndef SkAtomics_sync_DEFINED | 1 #ifndef SkAtomics_sync_DEFINED |
| 9 #define SkAtomics_sync_DEFINED | 2 #define SkAtomics_sync_DEFINED |
| 10 | 3 |
| 11 /** GCC/Clang __sync based atomics. */ | 4 // This file is mostly a shim. We'd like to delete it. Please don't put much |
| 5 // effort into maintaining it, and if you find bugs in it, the right fix is to |
| 6 // delete this file and upgrade your compiler to something that supports |
| 7 // __atomic builtins or std::atomic. |
| 12 | 8 |
| 13 #include <stdint.h> | 9 static inline void barrier(sk_memory_order mo) { |
| 14 | 10 asm volatile("" : : : "memory"); // Prevents the compiler from reordering c
ode. |
| 15 static inline __attribute__((always_inline)) int32_t sk_atomic_inc(int32_t* addr
) { | 11 #if SK_CPU_X86 |
| 16 return __sync_fetch_and_add(addr, 1); | 12 // On x86, we generally don't need an extra memory barrier for loads or
stores. |
| 13 if (sk_memory_order_seq_cst == mo) { __sync_synchronize(); } |
| 14 #else |
| 15 // On other platforms (e.g. ARM) we do unless the memory order is relaxe
d. |
| 16 if (sk_memory_order_relaxed != mo) { __sync_synchronize(); } |
| 17 #endif |
| 17 } | 18 } |
| 18 | 19 |
| 19 static inline __attribute__((always_inline)) int64_t sk_atomic_inc(int64_t* addr
) { | 20 // These barriers only support our majority use cases: acquire and relaxed loads
, release stores. |
| 20 #if defined(__mips__) && !defined(__LP64__) && !defined(__GCC_HAVE_SYNC_COMPARE_
AND_SWAP_8) | 21 // For anything more complicated, please consider deleting this file and upgradi
ng your compiler. |
| 21 /** Some versions of the GCC 32-bit MIPS toolchains (e.g. 4.8) for android a
re missing | 22 |
| 22 * support for the __sync* functions that operate on 64-bit values. The work
around | 23 template <typename T> |
| 23 * is to use __atomic* functions until we can move everything to <stdatomic.
h>. | 24 T sk_atomic_load(const T* ptr, sk_memory_order mo) { |
| 24 */ | 25 T val = *ptr; |
| 25 return __atomic_fetch_add(addr, 1, __ATOMIC_SEQ_CST); | 26 barrier(mo); |
| 26 #else | 27 return val; |
| 27 return __sync_fetch_and_add(addr, 1); | |
| 28 #endif | |
| 29 } | 28 } |
| 30 | 29 |
| 31 static inline __attribute__((always_inline)) int32_t sk_atomic_add(int32_t* addr
, int32_t inc) { | 30 template <typename T> |
| 32 return __sync_fetch_and_add(addr, inc); | 31 void sk_atomic_store(T* ptr, T val, sk_memory_order mo) { |
| 32 barrier(mo); |
| 33 *ptr = val; |
| 33 } | 34 } |
| 34 | 35 |
| 35 static inline __attribute__((always_inline)) int32_t sk_atomic_dec(int32_t* addr
) { | 36 template <typename T> |
| 36 return __sync_fetch_and_add(addr, -1); | 37 T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) { |
| 38 return __sync_fetch_and_add(ptr, val); |
| 37 } | 39 } |
| 38 | 40 |
| 39 static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomi
c_dec() { } | 41 template <typename T> |
| 40 | 42 bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order,
sk_memory_order) { |
| 41 static inline __attribute__((always_inline)) bool sk_atomic_cas(int32_t* addr, | 43 T prev = __sync_val_compare_and_swap(ptr, *expected, desired); |
| 42 int32_t before, | 44 if (prev == *expected) { |
| 43 int32_t after) { | 45 return true; |
| 44 return __sync_bool_compare_and_swap(addr, before, after); | 46 } |
| 47 *expected = prev; |
| 48 return false; |
| 45 } | 49 } |
| 46 | 50 |
| 47 static inline __attribute__((always_inline)) void* sk_atomic_cas(void** addr, | 51 #endif//SkAtomics_sync_DEFINED |
| 48 void* before, | |
| 49 void* after) { | |
| 50 return __sync_val_compare_and_swap(addr, before, after); | |
| 51 } | |
| 52 | |
| 53 static inline __attribute__((always_inline)) void sk_membar_acquire__after_atomi
c_conditional_inc() { } | |
| 54 | |
| 55 #endif | |
| OLD | NEW |