OLD | NEW |
| (Empty) |
1 /* | |
2 * Copyright 2015 Google Inc. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license that can be | |
5 * found in the LICENSE file. | |
6 */ | |
7 | |
8 #ifndef SkAtomics_sync_DEFINED | |
9 #define SkAtomics_sync_DEFINED | |
10 | |
11 // This file is mostly a shim. We'd like to delete it. Please don't put much | |
12 // effort into maintaining it, and if you find bugs in it, the right fix is to | |
13 // delete this file and upgrade your compiler to something that supports | |
14 // __atomic builtins or std::atomic. | |
15 | |
16 static inline void barrier(sk_memory_order mo) { | |
17 asm volatile("" : : : "memory"); // Prevents the compiler from reordering c
ode. | |
18 #if SK_CPU_X86 | |
19 // On x86, we generally don't need an extra memory barrier for loads or
stores. | |
20 if (sk_memory_order_seq_cst == mo) { __sync_synchronize(); } | |
21 #else | |
22 // On other platforms (e.g. ARM) we do unless the memory order is relaxe
d. | |
23 if (sk_memory_order_relaxed != mo) { __sync_synchronize(); } | |
24 #endif | |
25 } | |
26 | |
27 // These barriers only support our majority use cases: acquire and relaxed loads
, release stores. | |
28 // For anything more complicated, please consider deleting this file and upgradi
ng your compiler. | |
29 | |
30 template <typename T> | |
31 T sk_atomic_load(const T* ptr, sk_memory_order mo) { | |
32 T val = *ptr; | |
33 barrier(mo); | |
34 return val; | |
35 } | |
36 | |
37 template <typename T> | |
38 void sk_atomic_store(T* ptr, T val, sk_memory_order mo) { | |
39 barrier(mo); | |
40 *ptr = val; | |
41 } | |
42 | |
43 template <typename T> | |
44 T sk_atomic_fetch_add(T* ptr, T val, sk_memory_order) { | |
45 return __sync_fetch_and_add(ptr, val); | |
46 } | |
47 | |
48 template <typename T> | |
49 T sk_atomic_fetch_sub(T* ptr, T val, sk_memory_order) { | |
50 return __sync_fetch_and_sub(ptr, val); | |
51 } | |
52 | |
53 template <typename T> | |
54 bool sk_atomic_compare_exchange(T* ptr, T* expected, T desired, sk_memory_order,
sk_memory_order) { | |
55 T prev = __sync_val_compare_and_swap(ptr, *expected, desired); | |
56 if (prev == *expected) { | |
57 return true; | |
58 } | |
59 *expected = prev; | |
60 return false; | |
61 } | |
62 | |
63 template <typename T> | |
64 T sk_atomic_exchange(T* ptr, T val, sk_memory_order) { | |
65 // There is no __sync exchange. Emulate it with a CAS loop. | |
66 T prev; | |
67 do { | |
68 prev = sk_atomic_load(ptr); | |
69 } while(!sk_atomic_compare_exchange(ptr, &prev, val)); | |
70 return prev; | |
71 } | |
72 | |
73 #endif//SkAtomics_sync_DEFINED | |
OLD | NEW |