OLD | NEW |
1 /* | 1 /* |
2 * Copyright 2012 Google Inc. | 2 * Copyright 2012 Google Inc. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license that can be | 4 * Use of this source code is governed by a BSD-style license that can be |
5 * found in the LICENSE file. | 5 * found in the LICENSE file. |
6 */ | 6 */ |
7 | 7 |
8 #ifndef SkWeakRefCnt_DEFINED | 8 #ifndef SkWeakRefCnt_DEFINED |
9 #define SkWeakRefCnt_DEFINED | 9 #define SkWeakRefCnt_DEFINED |
10 | 10 |
11 #include "SkRefCnt.h" | 11 #include "SkRefCnt.h" |
12 #include "../private/SkAtomics.h" | 12 #include <atomic> |
13 | 13 |
14 /** \class SkWeakRefCnt | 14 /** \class SkWeakRefCnt |
15 | 15 |
16 SkWeakRefCnt is the base class for objects that may be shared by multiple | 16 SkWeakRefCnt is the base class for objects that may be shared by multiple |
17 objects. When an existing strong owner wants to share a reference, it calls | 17 objects. When an existing strong owner wants to share a reference, it calls |
18 ref(). When a strong owner wants to release its reference, it calls | 18 ref(). When a strong owner wants to release its reference, it calls |
19 unref(). When the shared object's strong reference count goes to zero as | 19 unref(). When the shared object's strong reference count goes to zero as |
20 the result of an unref() call, its (virtual) weak_dispose method is called. | 20 the result of an unref() call, its (virtual) weak_dispose method is called. |
21 It is an error for the destructor to be called explicitly (or via the | 21 It is an error for the destructor to be called explicitly (or via the |
22 object going out of scope on the stack or calling delete) if | 22 object going out of scope on the stack or calling delete) if |
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
55 The strong references collectively hold one weak reference. When the | 55 The strong references collectively hold one weak reference. When the |
56 strong reference count goes to zero, the collectively held weak | 56 strong reference count goes to zero, the collectively held weak |
57 reference is released. | 57 reference is released. |
58 */ | 58 */ |
59 SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {} | 59 SkWeakRefCnt() : SkRefCnt(), fWeakCnt(1) {} |
60 | 60 |
61 /** Destruct, asserting that the weak reference count is 1. | 61 /** Destruct, asserting that the weak reference count is 1. |
62 */ | 62 */ |
63 virtual ~SkWeakRefCnt() { | 63 virtual ~SkWeakRefCnt() { |
64 #ifdef SK_DEBUG | 64 #ifdef SK_DEBUG |
65 SkASSERT(fWeakCnt == 1); | 65 SkASSERT(getWeakCnt() == 1); |
66 fWeakCnt = 0; | 66 std::atomic_store_explicit(&fWeakCnt, 0, std::memory_order_relaxed); |
67 #endif | 67 #endif |
68 } | 68 } |
69 | 69 |
70 /** Return the weak reference count. | 70 #ifdef SK_DEBUG |
71 */ | 71 /** Return the weak reference count. */ |
72 int32_t getWeakCnt() const { return fWeakCnt; } | 72 int32_t getWeakCnt() const { |
| 73 return std::atomic_load_explicit(&fWeakCnt, std::memory_order_relaxed); |
| 74 } |
73 | 75 |
74 #ifdef SK_DEBUG | |
75 void validate() const { | 76 void validate() const { |
76 this->INHERITED::validate(); | 77 this->INHERITED::validate(); |
77 SkASSERT(fWeakCnt > 0); | 78 SkASSERT(getWeakCnt() > 0); |
78 } | 79 } |
79 #endif | 80 #endif |
80 | 81 |
| 82 private: |
| 83 /** If fRefCnt is 0, returns 0. |
| 84 * Otherwise increments fRefCnt, acquires, and returns the old value. |
| 85 */ |
| 86 int32_t atomic_conditional_acquire_strong_ref() const { |
| 87 int32_t prev = std::atomic_load_explicit(&fRefCnt, std::memory_order_rel
axed); |
| 88 do { |
| 89 if (0 == prev) { |
| 90 break; |
| 91 } |
| 92 } while(!std::atomic_compare_exchange_weak_explicit(&fRefCnt, &prev, pre
v+1, |
| 93 std::memory_order_ac
quire, |
| 94 std::memory_order_re
laxed)); |
| 95 return prev; |
| 96 } |
| 97 |
| 98 public: |
81 /** Creates a strong reference from a weak reference, if possible. The | 99 /** Creates a strong reference from a weak reference, if possible. The |
82 caller must already be an owner. If try_ref() returns true the owner | 100 caller must already be an owner. If try_ref() returns true the owner |
83 is in posession of an additional strong reference. Both the original | 101 is in posession of an additional strong reference. Both the original |
84 reference and new reference must be properly unreferenced. If try_ref() | 102 reference and new reference must be properly unreferenced. If try_ref() |
85 returns false, no strong reference could be created and the owner's | 103 returns false, no strong reference could be created and the owner's |
86 reference is in the same state as before the call. | 104 reference is in the same state as before the call. |
87 */ | 105 */ |
88 bool SK_WARN_UNUSED_RESULT try_ref() const { | 106 bool SK_WARN_UNUSED_RESULT try_ref() const { |
89 if (sk_atomic_conditional_inc(&fRefCnt) != 0) { | 107 if (atomic_conditional_acquire_strong_ref() != 0) { |
90 // Acquire barrier (L/SL), if not provided above. | 108 // Acquire barrier (L/SL), if not provided above. |
91 // Prevents subsequent code from happening before the increment. | 109 // Prevents subsequent code from happening before the increment. |
92 sk_membar_acquire__after_atomic_conditional_inc(); | |
93 return true; | 110 return true; |
94 } | 111 } |
95 return false; | 112 return false; |
96 } | 113 } |
97 | 114 |
98 /** Increment the weak reference count. Must be balanced by a call to | 115 /** Increment the weak reference count. Must be balanced by a call to |
99 weak_unref(). | 116 weak_unref(). |
100 */ | 117 */ |
101 void weak_ref() const { | 118 void weak_ref() const { |
102 SkASSERT(fRefCnt > 0); | 119 SkASSERT(getRefCnt() > 0); |
103 SkASSERT(fWeakCnt > 0); | 120 SkASSERT(getWeakCnt() > 0); |
104 sk_atomic_inc(&fWeakCnt); // No barrier required. | 121 // No barrier required. |
| 122 (void)std::atomic_fetch_add_explicit(&fWeakCnt, +1, std::memory_order_re
laxed); |
105 } | 123 } |
106 | 124 |
107 /** Decrement the weak reference count. If the weak reference count is 1 | 125 /** Decrement the weak reference count. If the weak reference count is 1 |
108 before the decrement, then call delete on the object. Note that if this | 126 before the decrement, then call delete on the object. Note that if this |
109 is the case, then the object needs to have been allocated via new, and | 127 is the case, then the object needs to have been allocated via new, and |
110 not on the stack. | 128 not on the stack. |
111 */ | 129 */ |
112 void weak_unref() const { | 130 void weak_unref() const { |
113 SkASSERT(fWeakCnt > 0); | 131 SkASSERT(getWeakCnt() > 0); |
114 // Release barrier (SL/S), if not provided below. | 132 // A release here acts in place of all releases we "should" have been do
ing in ref(). |
115 if (sk_atomic_dec(&fWeakCnt) == 1) { | 133 if (1 == std::atomic_fetch_add_explicit(&fWeakCnt, -1, std::memory_order
_acq_rel)) { |
116 // Acquire barrier (L/SL), if not provided above. | 134 // Like try_ref(), the acquire is only needed on success, to make su
re |
117 // Prevents code in destructor from happening before the decrement. | 135 // code in internal_dispose() doesn't happen before the decrement. |
118 sk_membar_acquire__after_atomic_dec(); | |
119 #ifdef SK_DEBUG | 136 #ifdef SK_DEBUG |
120 // so our destructor won't complain | 137 // so our destructor won't complain |
121 fWeakCnt = 1; | 138 std::atomic_store_explicit(&fWeakCnt, 1, std::memory_order_relaxed); |
122 #endif | 139 #endif |
123 this->INHERITED::internal_dispose(); | 140 this->INHERITED::internal_dispose(); |
124 } | 141 } |
125 } | 142 } |
126 | 143 |
127 /** Returns true if there are no strong references to the object. When this | 144 /** Returns true if there are no strong references to the object. When this |
128 is the case all future calls to try_ref() will return false. | 145 is the case all future calls to try_ref() will return false. |
129 */ | 146 */ |
130 bool weak_expired() const { | 147 bool weak_expired() const { |
131 return fRefCnt == 0; | 148 return fRefCnt == 0; |
(...skipping 12 matching lines...) Expand all Loading... |
144 /** Called when the strong reference count goes to zero. Calls weak_dispose | 161 /** Called when the strong reference count goes to zero. Calls weak_dispose |
145 on the object and releases the implicit weak reference held | 162 on the object and releases the implicit weak reference held |
146 collectively by the strong references. | 163 collectively by the strong references. |
147 */ | 164 */ |
148 void internal_dispose() const override { | 165 void internal_dispose() const override { |
149 weak_dispose(); | 166 weak_dispose(); |
150 weak_unref(); | 167 weak_unref(); |
151 } | 168 } |
152 | 169 |
153 /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */ | 170 /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */ |
154 mutable int32_t fWeakCnt; | 171 mutable std::atomic<int32_t> fWeakCnt; |
155 | 172 |
156 typedef SkRefCnt INHERITED; | 173 typedef SkRefCnt INHERITED; |
157 }; | 174 }; |
158 | 175 |
159 #endif | 176 #endif |
OLD | NEW |