Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(17)

Side by Side Diff: include/core/SkAtomics.h

Issue 1327703003: Revert of Parallel cache - preliminary (Closed) Base URL: https://skia.googlesource.com/skia.git@master
Patch Set: Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « no previous file | include/ports/SkAtomics_atomic.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright 2015 Google Inc. 2 * Copyright 2015 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #ifndef SkAtomics_DEFINED 8 #ifndef SkAtomics_DEFINED
9 #define SkAtomics_DEFINED 9 #define SkAtomics_DEFINED
10 10
(...skipping 12 matching lines...) Expand all
23 template <typename T> 23 template <typename T>
24 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst); 24 T sk_atomic_load(const T*, sk_memory_order = sk_memory_order_seq_cst);
25 25
26 template <typename T> 26 template <typename T>
27 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst); 27 void sk_atomic_store(T*, T, sk_memory_order = sk_memory_order_seq_cst);
28 28
29 template <typename T> 29 template <typename T>
30 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst); 30 T sk_atomic_fetch_add(T*, T, sk_memory_order = sk_memory_order_seq_cst);
31 31
32 template <typename T> 32 template <typename T>
33 T sk_atomic_fetch_sub(T*, T, sk_memory_order = sk_memory_order_seq_cst);
34
35 template <typename T>
36 bool sk_atomic_compare_exchange(T*, T* expected, T desired, 33 bool sk_atomic_compare_exchange(T*, T* expected, T desired,
37 sk_memory_order success = sk_memory_order_seq_cs t, 34 sk_memory_order success = sk_memory_order_seq_cs t,
38 sk_memory_order failure = sk_memory_order_seq_cs t); 35 sk_memory_order failure = sk_memory_order_seq_cs t);
39 36
40 template <typename T> 37 template <typename T>
41 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst); 38 T sk_atomic_exchange(T*, T, sk_memory_order = sk_memory_order_seq_cst);
42 39
43 // A little wrapper class for small T (think, builtins: int, float, void*) to 40 // A little wrapper class for small T (think, builtins: int, float, void*) to
44 // ensure they're always used atomically. This is our stand-in for std::atomic< T>. 41 // ensure they're always used atomically. This is our stand-in for std::atomic< T>.
45 template <typename T> 42 template <typename T>
46 class SkAtomic : SkNoncopyable { 43 class SkAtomic : SkNoncopyable {
47 public: 44 public:
48 SkAtomic() {} 45 SkAtomic() {}
49 explicit SkAtomic(const T& val) : fVal(val) {} 46 explicit SkAtomic(const T& val) : fVal(val) {}
50 47
51 // It is essential we return by value rather than by const&. fVal may chang e at any time. 48 // It is essential we return by value rather than by const&. fVal may chang e at any time.
52 T load(sk_memory_order mo = sk_memory_order_seq_cst) const { 49 T load(sk_memory_order mo = sk_memory_order_seq_cst) const {
53 return sk_atomic_load(&fVal, mo); 50 return sk_atomic_load(&fVal, mo);
54 } 51 }
55 52
56 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { 53 void store(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
57 sk_atomic_store(&fVal, val, mo); 54 sk_atomic_store(&fVal, val, mo);
58 } 55 }
59 56
60 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) { 57 T fetch_add(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
61 return sk_atomic_fetch_add(&fVal, val, mo); 58 return sk_atomic_fetch_add(&fVal, val, mo);
62 } 59 }
63 60
64 T fetch_sub(const T& val, sk_memory_order mo = sk_memory_order_seq_cst) {
65 return sk_atomic_fetch_sub(&fVal, val, mo);
66 }
67
68 bool compare_exchange(T* expected, const T& desired, 61 bool compare_exchange(T* expected, const T& desired,
69 sk_memory_order success = sk_memory_order_seq_cst, 62 sk_memory_order success = sk_memory_order_seq_cst,
70 sk_memory_order failure = sk_memory_order_seq_cst) { 63 sk_memory_order failure = sk_memory_order_seq_cst) {
71 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai lure); 64 return sk_atomic_compare_exchange(&fVal, expected, desired, success, fai lure);
72 } 65 }
73 private: 66 private:
74 T fVal; 67 T fVal;
75 }; 68 };
76 69
77 // IWYU pragma: begin_exports 70 // IWYU pragma: begin_exports
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
116 template <typename T> 109 template <typename T>
117 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); } 110 T sk_acquire_load(T* ptr) { return sk_atomic_load(ptr, sk_memory_order_acquire); }
118 111
119 template <typename T> 112 template <typename T>
120 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order _release); } 113 void sk_release_store(T* ptr, T val) { sk_atomic_store(ptr, val, sk_memory_order _release); }
121 114
122 inline void sk_membar_acquire__after_atomic_dec() {} 115 inline void sk_membar_acquire__after_atomic_dec() {}
123 inline void sk_membar_acquire__after_atomic_conditional_inc() {} 116 inline void sk_membar_acquire__after_atomic_conditional_inc() {}
124 117
125 #endif//SkAtomics_DEFINED 118 #endif//SkAtomics_DEFINED
OLDNEW
« no previous file with comments | « no previous file | include/ports/SkAtomics_atomic.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698