Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(316)

Side by Side Diff: include/core/SkWeakRefCnt.h

Issue 19808007: Split atomic and mutex implementations and make inlinable. (Closed) Base URL: http://skia.googlecode.com/svn/trunk/
Patch Set: Include all the files. Created 7 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 /* 1 /*
2 * Copyright 2012 Google Inc. 2 * Copyright 2012 Google Inc.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license that can be 4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file. 5 * found in the LICENSE file.
6 */ 6 */
7 7
8 #ifndef SkWeakRefCnt_DEFINED 8 #ifndef SkWeakRefCnt_DEFINED
9 #define SkWeakRefCnt_DEFINED 9 #define SkWeakRefCnt_DEFINED
10 10
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
80 80
81 /** Creates a strong reference from a weak reference, if possible. The 81 /** Creates a strong reference from a weak reference, if possible. The
82 caller must already be an owner. If try_ref() returns true the owner 82 caller must already be an owner. If try_ref() returns true the owner
83 is in posession of an additional strong reference. Both the original 83 is in posession of an additional strong reference. Both the original
84 reference and new reference must be properly unreferenced. If try_ref() 84 reference and new reference must be properly unreferenced. If try_ref()
85 returns false, no strong reference could be created and the owner's 85 returns false, no strong reference could be created and the owner's
86 reference is in the same state as before the call. 86 reference is in the same state as before the call.
87 */ 87 */
88 bool SK_WARN_UNUSED_RESULT try_ref() const { 88 bool SK_WARN_UNUSED_RESULT try_ref() const {
89 if (sk_atomic_conditional_inc(&fRefCnt) != 0) { 89 if (sk_atomic_conditional_inc(&fRefCnt) != 0) {
90 // Aquire barrier (L/SL), if not provided above. 90 // Acquire barrier (L/SL), if not provided above.
91 // Prevents subsequent code from happening before the increment. 91 // Prevents subsequent code from happening before the increment.
92 sk_membar_aquire__after_atomic_conditional_inc(); 92 sk_membar_acquire__after_atomic_conditional_inc();
93 return true; 93 return true;
94 } 94 }
95 return false; 95 return false;
96 } 96 }
97 97
98 /** Increment the weak reference count. Must be balanced by a call to 98 /** Increment the weak reference count. Must be balanced by a call to
99 weak_unref(). 99 weak_unref().
100 */ 100 */
101 void weak_ref() const { 101 void weak_ref() const {
102 SkASSERT(fRefCnt > 0); 102 SkASSERT(fRefCnt > 0);
103 SkASSERT(fWeakCnt > 0); 103 SkASSERT(fWeakCnt > 0);
104 sk_atomic_inc(&fWeakCnt); // No barrier required. 104 sk_atomic_inc(&fWeakCnt); // No barrier required.
105 } 105 }
106 106
107 /** Decrement the weak reference count. If the weak reference count is 1 107 /** Decrement the weak reference count. If the weak reference count is 1
108 before the decrement, then call delete on the object. Note that if this 108 before the decrement, then call delete on the object. Note that if this
109 is the case, then the object needs to have been allocated via new, and 109 is the case, then the object needs to have been allocated via new, and
110 not on the stack. 110 not on the stack.
111 */ 111 */
112 void weak_unref() const { 112 void weak_unref() const {
113 SkASSERT(fWeakCnt > 0); 113 SkASSERT(fWeakCnt > 0);
114 // Release barrier (SL/S), if not provided below. 114 // Release barrier (SL/S), if not provided below.
115 if (sk_atomic_dec(&fWeakCnt) == 1) { 115 if (sk_atomic_dec(&fWeakCnt) == 1) {
116 // Aquire barrier (L/SL), if not provided above. 116 // Acquire barrier (L/SL), if not provided above.
117 // Prevents code in destructor from happening before the decrement. 117 // Prevents code in destructor from happening before the decrement.
118 sk_membar_aquire__after_atomic_dec(); 118 sk_membar_acquire__after_atomic_dec();
119 #ifdef SK_DEBUG 119 #ifdef SK_DEBUG
120 // so our destructor won't complain 120 // so our destructor won't complain
121 fWeakCnt = 1; 121 fWeakCnt = 1;
122 #endif 122 #endif
123 SkRefCnt::internal_dispose(); 123 SkRefCnt::internal_dispose();
124 } 124 }
125 } 125 }
126 126
127 /** Returns true if there are no strong references to the object. When this 127 /** Returns true if there are no strong references to the object. When this
128 is the case all future calls to try_ref() will return false. 128 is the case all future calls to try_ref() will return false.
(...skipping 21 matching lines...) Expand all
150 weak_unref(); 150 weak_unref();
151 } 151 }
152 152
153 /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */ 153 /* Invariant: fWeakCnt = #weak + (fRefCnt > 0 ? 1 : 0) */
154 mutable int32_t fWeakCnt; 154 mutable int32_t fWeakCnt;
155 155
156 typedef SkRefCnt INHERITED; 156 typedef SkRefCnt INHERITED;
157 }; 157 };
158 158
159 #endif 159 #endif
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698