| Index: source/common/sharedobject.cpp
|
| diff --git a/source/common/sharedobject.cpp b/source/common/sharedobject.cpp
|
| index 6affcd09cd5035395b43f22cdb395db5ef845d6c..e5e034bec13b9e2b447b6f30884ff84480fe0da7 100644
|
| --- a/source/common/sharedobject.cpp
|
| +++ b/source/common/sharedobject.cpp
|
| @@ -1,42 +1,67 @@
|
| /*
|
| ******************************************************************************
|
| -* Copyright (C) 2014, International Business Machines
|
| +* Copyright (C) 2015, International Business Machines
|
| * Corporation and others. All Rights Reserved.
|
| ******************************************************************************
|
| * sharedobject.cpp
|
| */
|
| #include "sharedobject.h"
|
| +#include "uassert.h"
|
|
|
| U_NAMESPACE_BEGIN
|
| +
|
| SharedObject::~SharedObject() {}
|
|
|
| +UnifiedCacheBase::~UnifiedCacheBase() {}
|
| +
|
| void
|
| -SharedObject::addRef() const {
|
| +SharedObject::addRef(UBool fromWithinCache) const {
|
| umtx_atomic_inc(&totalRefCount);
|
| +
|
| + // Although items in use may not be correct immediately, it
|
| + // will be correct eventually.
|
| + if (umtx_atomic_inc(&hardRefCount) == 1 && cachePtr != NULL) {
|
| + // If this object is cached, and the hardRefCount goes from 0 to 1,
|
| + // then the increment must happen from within the cache while the
|
| + // cache global mutex is locked. In this way, we can be rest assured
|
| + // that data races can't happen if the cache performs some task if
|
| + // the hardRefCount is zero while the global cache mutex is locked.
|
| + U_ASSERT(fromWithinCache);
|
| + cachePtr->incrementItemsInUse();
|
| + }
|
| }
|
|
|
| void
|
| -SharedObject::removeRef() const {
|
| - if(umtx_atomic_dec(&totalRefCount) == 0) {
|
| +SharedObject::removeRef(UBool fromWithinCache) const {
|
| + UBool decrementItemsInUse = (umtx_atomic_dec(&hardRefCount) == 0);
|
| + UBool allReferencesGone = (umtx_atomic_dec(&totalRefCount) == 0);
|
| +
|
| + // Although items in use may not be correct immediately, it
|
| + // will be correct eventually.
|
| + if (decrementItemsInUse && cachePtr != NULL) {
|
| + if (fromWithinCache) {
|
| + cachePtr->decrementItemsInUse();
|
| + } else {
|
| + cachePtr->decrementItemsInUseWithLockingAndEviction();
|
| + }
|
| + }
|
| + if (allReferencesGone) {
|
| delete this;
|
| }
|
| }
|
|
|
| void
|
| SharedObject::addSoftRef() const {
|
| - addRef();
|
| - umtx_atomic_inc(&softRefCount);
|
| + umtx_atomic_inc(&totalRefCount);
|
| + ++softRefCount;
|
| }
|
|
|
| void
|
| SharedObject::removeSoftRef() const {
|
| - umtx_atomic_dec(&softRefCount);
|
| - removeRef();
|
| -}
|
| -
|
| -UBool
|
| -SharedObject::allSoftReferences() const {
|
| - return umtx_loadAcquire(totalRefCount) == umtx_loadAcquire(softRefCount);
|
| + --softRefCount;
|
| + if (umtx_atomic_dec(&totalRefCount) == 0) {
|
| + delete this;
|
| + }
|
| }
|
|
|
| int32_t
|
| @@ -45,8 +70,8 @@ SharedObject::getRefCount() const {
|
| }
|
|
|
| int32_t
|
| -SharedObject::getSoftRefCount() const {
|
| - return umtx_loadAcquire(softRefCount);
|
| +SharedObject::getHardRefCount() const {
|
| + return umtx_loadAcquire(hardRefCount);
|
| }
|
|
|
| void
|
|
|