OLD | NEW |
---|---|
1 | 1 |
2 /* | 2 /* |
3 * Copyright 2006 The Android Open Source Project | 3 * Copyright 2006 The Android Open Source Project |
4 * | 4 * |
5 * Use of this source code is governed by a BSD-style license that can be | 5 * Use of this source code is governed by a BSD-style license that can be |
6 * found in the LICENSE file. | 6 * found in the LICENSE file. |
7 */ | 7 */ |
8 | 8 |
9 | 9 |
10 #ifndef SkRefCnt_DEFINED | 10 #ifndef SkRefCnt_DEFINED |
(...skipping 23 matching lines...) Expand all Loading... | |
34 | 34 |
35 /** Destruct, asserting that the reference count is 1. | 35 /** Destruct, asserting that the reference count is 1. |
36 */ | 36 */ |
37 virtual ~SkRefCnt() { | 37 virtual ~SkRefCnt() { |
38 #ifdef SK_DEBUG | 38 #ifdef SK_DEBUG |
39 SkASSERT(fRefCnt == 1); | 39 SkASSERT(fRefCnt == 1); |
40 fRefCnt = 0; // illegal value, to catch us if we reuse after delete | 40 fRefCnt = 0; // illegal value, to catch us if we reuse after delete |
41 #endif | 41 #endif |
42 } | 42 } |
43 | 43 |
44 /** Return the reference count. | 44 /** Return the reference count. Use only for debugging. */ |
bsalomon
2013/07/15 13:04:08
It seems a little weird that these obviously call
bungeman-skia
2013/07/15 16:02:41
I believe that with this change this is only used
| |
45 */ | 45 int32_t getRefCnt() const { return sk_atomic_unprotected_read(fRefCnt); } |
46 int32_t getRefCnt() const { return fRefCnt; } | 46 |
47 /** Returns true if the caller is the only owner. | |
48 * | |
49 * This provides no memory barriers. | |
50 * | |
51 * Using the object without additional memory barriers when this returns | |
52 * true is safe so long as the object is only modified when this returns | |
53 * true and the caller is still the only owner. This is true in the usual | |
54 * case of optimizing copy on write semantics. | |
55 * | |
56 * If other threads may have modified the object (even within a mutex), | |
57 * then an acquire barrier (L/SL) is required before using the object | |
58 * (may already be provided by the load in unique() on some platforms). | |
59 * Without this barrier, modifications to the object made by the other | |
60 * thread may not yet be visible or complete on the current thread. | |
61 */ | |
62 bool unique() const { | |
Alexander Potapenko
2013/07/15 15:05:34
I suggest to add an sk_atomic_acquire_load() funct
bungeman-skia
2013/07/15 16:02:41
Yes, this is addressed in the comment above. Addin
Alexander Potapenko
2013/07/16 14:03:30
Yes, I was talking about false positives from TSan
bungeman-skia
2013/07/16 15:50:57
All writes here are atomic. The entire point of th
| |
63 // sk_atomic_unprotected_read forces an atomic read of fRefCnt and | |
64 // marks the read has a benign race with ref() and unref(). | |
65 return (1 == sk_atomic_unprotected_read(fRefCnt)); | |
Dmitry Vyukov
2013/07/16 13:17:51
sk_atomic_unprotected_read() needs to use Acquire_
bungeman-skia
2013/07/16 15:50:57
I've tried to clarify via email, but nothing inter
| |
66 } | |
47 | 67 |
48 /** Increment the reference count. Must be balanced by a call to unref(). | 68 /** Increment the reference count. Must be balanced by a call to unref(). |
49 */ | 69 */ |
50 void ref() const { | 70 void ref() const { |
51 SkASSERT(fRefCnt > 0); | 71 SkASSERT(fRefCnt > 0); |
52 sk_atomic_inc(&fRefCnt); // No barrier required. | 72 sk_atomic_inc(&fRefCnt); // No barrier required. |
53 } | 73 } |
54 | 74 |
55 /** Decrement the reference count. If the reference count is 1 before the | 75 /** Decrement the reference count. If the reference count is 1 before the |
56 decrement, then delete the object. Note that if this is the case, then | 76 decrement, then delete the object. Note that if this is the case, then |
57 the object needs to have been allocated via new, and not on the stack. | 77 the object needs to have been allocated via new, and not on the stack. |
58 */ | 78 */ |
59 void unref() const { | 79 void unref() const { |
60 SkASSERT(fRefCnt > 0); | 80 SkASSERT(fRefCnt > 0); |
61 // Release barrier (SL/S), if not provided below. | 81 // Release barrier (SL/S), if not provided below. |
62 if (sk_atomic_dec(&fRefCnt) == 1) { | 82 if (sk_atomic_dec(&fRefCnt) == 1) { |
63 // Aquire barrier (L/SL), if not provided above. | 83 // Aquire barrier (L/SL), if not provided above. |
64 // Prevents code in dispose from happening before the decrement. | 84 // Prevents code in dispose from happening before the decrement. |
65 sk_membar_aquire__after_atomic_dec(); | 85 sk_membar_aquire__after_atomic_dec(); |
66 internal_dispose(); | 86 internal_dispose(); |
67 } | 87 } |
68 } | 88 } |
69 | 89 |
70 void validate() const { | 90 void validate() const { |
71 SkASSERT(fRefCnt > 0); | 91 SkASSERT(fRefCnt > 0); |
72 } | 92 } |
73 | 93 |
74 /** | 94 /** |
75 * Alias for ref(), for compatibility with scoped_refptr. | |
76 */ | |
77 void AddRef() { this->ref(); } | |
78 | |
79 /** | |
80 * Alias for unref(), for compatibility with scoped_refptr. | |
81 */ | |
82 void Release() { this->unref(); } | |
83 | |
84 /** | |
85 * Alias for unref(), for compatibility with WTF::RefPtr. | 95 * Alias for unref(), for compatibility with WTF::RefPtr. |
86 */ | 96 */ |
87 void deref() { this->unref(); } | 97 void deref() { this->unref(); } |
88 | 98 |
89 protected: | 99 protected: |
90 /** | 100 /** |
91 * Allow subclasses to call this if they've overridden internal_dispose | 101 * Allow subclasses to call this if they've overridden internal_dispose |
92 * so they can reset fRefCnt before the destructor is called. Should only | 102 * so they can reset fRefCnt before the destructor is called. Should only |
93 * be called right before calling through to inherited internal_dispose() | 103 * be called right before calling through to inherited internal_dispose() |
94 * or before calling the destructor. | 104 * or before calling the destructor. |
95 */ | 105 */ |
96 void internal_dispose_restore_refcnt_to_1() const { | 106 void internal_dispose_restore_refcnt_to_1() const { |
97 #ifdef SK_DEBUG | 107 #ifdef SK_DEBUG |
98 SkASSERT(0 == fRefCnt); | 108 SkASSERT(0 == fRefCnt); |
99 fRefCnt = 1; | 109 fRefCnt = 1; |
100 #endif | 110 #endif |
101 } | 111 } |
102 | 112 |
103 private: | 113 private: |
104 /** | 114 /** |
105 * Called when the ref count goes to 0. | 115 * Called when the ref count goes to 0. |
106 */ | 116 */ |
107 virtual void internal_dispose() const { | 117 virtual void internal_dispose() const { |
108 this->internal_dispose_restore_refcnt_to_1(); | 118 this->internal_dispose_restore_refcnt_to_1(); |
109 SkDELETE(this); | 119 SkDELETE(this); |
110 } | 120 } |
111 | 121 |
122 // The following friends are those which override internal_dispose() | |
123 // and conditionally call SkRefCnt::internal_dispose(). | |
124 friend class GrTexture; | |
112 friend class SkWeakRefCnt; | 125 friend class SkWeakRefCnt; |
113 friend class GrTexture; // to allow GrTexture's internal_dispose to | |
114 // call SkRefCnt's & directly set fRefCnt (to 1) | |
115 | 126 |
116 mutable int32_t fRefCnt; | 127 mutable int32_t fRefCnt; |
117 | 128 |
118 typedef SkNoncopyable INHERITED; | 129 typedef SkNoncopyable INHERITED; |
119 }; | 130 }; |
120 | 131 |
121 /////////////////////////////////////////////////////////////////////////////// | 132 /////////////////////////////////////////////////////////////////////////////// |
122 | 133 |
123 /** Helper macro to safely assign one SkRefCnt[TS]* to another, checking for | 134 /** Helper macro to safely assign one SkRefCnt[TS]* to another, checking for |
124 null in on each side of the assignment, and ensuring that ref() is called | 135 null in on each side of the assignment, and ensuring that ref() is called |
(...skipping 137 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
262 typedef T* SkRefPtr::*unspecified_bool_type; | 273 typedef T* SkRefPtr::*unspecified_bool_type; |
263 operator unspecified_bool_type() const { | 274 operator unspecified_bool_type() const { |
264 return fObj ? &SkRefPtr::fObj : NULL; | 275 return fObj ? &SkRefPtr::fObj : NULL; |
265 } | 276 } |
266 | 277 |
267 private: | 278 private: |
268 T* fObj; | 279 T* fObj; |
269 }; | 280 }; |
270 | 281 |
271 #endif | 282 #endif |
OLD | NEW |