OLD | NEW |
1 /* | 1 /* |
2 ****************************************************************************** | 2 ****************************************************************************** |
3 * Copyright (C) 2014, International Business Machines | 3 * Copyright (C) 2015, International Business Machines |
4 * Corporation and others. All Rights Reserved. | 4 * Corporation and others. All Rights Reserved. |
5 ****************************************************************************** | 5 ****************************************************************************** |
6 * sharedobject.cpp | 6 * sharedobject.cpp |
7 */ | 7 */ |
8 #include "sharedobject.h" | 8 #include "sharedobject.h" |
| 9 #include "uassert.h" |
9 | 10 |
10 U_NAMESPACE_BEGIN | 11 U_NAMESPACE_BEGIN |
| 12 |
11 SharedObject::~SharedObject() {} | 13 SharedObject::~SharedObject() {} |
12 | 14 |
| 15 UnifiedCacheBase::~UnifiedCacheBase() {} |
| 16 |
13 void | 17 void |
14 SharedObject::addRef() const { | 18 SharedObject::addRef(UBool fromWithinCache) const { |
15 umtx_atomic_inc(&totalRefCount); | 19 umtx_atomic_inc(&totalRefCount); |
| 20 |
| 21 // Although items in use may not be correct immediately, it |
| 22 // will be correct eventually. |
| 23 if (umtx_atomic_inc(&hardRefCount) == 1 && cachePtr != NULL) { |
| 24 // If this object is cached, and the hardRefCount goes from 0 to 1, |
| 25 // then the increment must happen from within the cache while the |
| 26 // cache global mutex is locked. In this way, we can be rest assured |
| 27 // that data races can't happen if the cache performs some task if |
| 28 // the hardRefCount is zero while the global cache mutex is locked. |
| 29 U_ASSERT(fromWithinCache); |
| 30 cachePtr->incrementItemsInUse(); |
| 31 } |
16 } | 32 } |
17 | 33 |
18 void | 34 void |
19 SharedObject::removeRef() const { | 35 SharedObject::removeRef(UBool fromWithinCache) const { |
20 if(umtx_atomic_dec(&totalRefCount) == 0) { | 36 UBool decrementItemsInUse = (umtx_atomic_dec(&hardRefCount) == 0); |
| 37 UBool allReferencesGone = (umtx_atomic_dec(&totalRefCount) == 0); |
| 38 |
| 39 // Although items in use may not be correct immediately, it |
| 40 // will be correct eventually. |
| 41 if (decrementItemsInUse && cachePtr != NULL) { |
| 42 if (fromWithinCache) { |
| 43 cachePtr->decrementItemsInUse(); |
| 44 } else { |
| 45 cachePtr->decrementItemsInUseWithLockingAndEviction(); |
| 46 } |
| 47 } |
| 48 if (allReferencesGone) { |
21 delete this; | 49 delete this; |
22 } | 50 } |
23 } | 51 } |
24 | 52 |
25 void | 53 void |
26 SharedObject::addSoftRef() const { | 54 SharedObject::addSoftRef() const { |
27 addRef(); | 55 umtx_atomic_inc(&totalRefCount); |
28 umtx_atomic_inc(&softRefCount); | 56 ++softRefCount; |
29 } | 57 } |
30 | 58 |
31 void | 59 void |
32 SharedObject::removeSoftRef() const { | 60 SharedObject::removeSoftRef() const { |
33 umtx_atomic_dec(&softRefCount); | 61 --softRefCount; |
34 removeRef(); | 62 if (umtx_atomic_dec(&totalRefCount) == 0) { |
35 } | 63 delete this; |
36 | 64 } |
37 UBool | |
38 SharedObject::allSoftReferences() const { | |
39 return umtx_loadAcquire(totalRefCount) == umtx_loadAcquire(softRefCount); | |
40 } | 65 } |
41 | 66 |
42 int32_t | 67 int32_t |
43 SharedObject::getRefCount() const { | 68 SharedObject::getRefCount() const { |
44 return umtx_loadAcquire(totalRefCount); | 69 return umtx_loadAcquire(totalRefCount); |
45 } | 70 } |
46 | 71 |
47 int32_t | 72 int32_t |
48 SharedObject::getSoftRefCount() const { | 73 SharedObject::getHardRefCount() const { |
49 return umtx_loadAcquire(softRefCount); | 74 return umtx_loadAcquire(hardRefCount); |
50 } | 75 } |
51 | 76 |
52 void | 77 void |
53 SharedObject::deleteIfZeroRefCount() const { | 78 SharedObject::deleteIfZeroRefCount() const { |
54 if(getRefCount() == 0) { | 79 if(getRefCount() == 0) { |
55 delete this; | 80 delete this; |
56 } | 81 } |
57 } | 82 } |
58 | 83 |
59 U_NAMESPACE_END | 84 U_NAMESPACE_END |
OLD | NEW |