Chromium Code Reviews| Index: chromecast/base/device_capabilities_impl.cc |
| diff --git a/chromecast/base/device_capabilities_impl.cc b/chromecast/base/device_capabilities_impl.cc |
| index 7361390cfd20c2f031dbd18b60f41f8d79d6465a..971e2734fa6708f25d9a3b459dc923bd7023a720 100644 |
| --- a/chromecast/base/device_capabilities_impl.cc |
| +++ b/chromecast/base/device_capabilities_impl.cc |
| @@ -5,6 +5,8 @@ |
| #include "chromecast/base/device_capabilities_impl.h" |
| #include "base/logging.h" |
| +#include "base/single_thread_task_runner.h" |
| +#include "base/thread_task_runner_handle.h" |
| #include "base/values.h" |
| #include "chromecast/base/serializers.h" |
| @@ -67,108 +69,195 @@ DeviceCapabilities::Validator::Validator(DeviceCapabilities* capabilities) |
| void DeviceCapabilities::Validator::SetValidatedValue( |
| const std::string& path, |
| scoped_ptr<base::Value> new_value) const { |
| - capabilities_->SetValidatedValueInternal(path, new_value.Pass()); |
| + capabilities_->SetValidatedValue(path, new_value.Pass()); |
| +} |
| + |
| +template <class T> |
| +DeviceCapabilitiesImpl::ImmutableRefCounted<T>::ImmutableRefCounted() |
| + : data(new T) {} |
| + |
| +template <class T> |
| +DeviceCapabilitiesImpl::ImmutableRefCounted<T>::ImmutableRefCounted( |
| + scoped_ptr<const T> data_arg) |
| + : data(data_arg.Pass()) { |
| + DCHECK(data.get()); |
| +} |
| + |
| +DeviceCapabilitiesImpl::ValidatorInfo::ValidatorInfo(Validator* validator_arg) |
| + : validator(validator_arg), |
| + task_runner(base::ThreadTaskRunnerHandle::Get()) { |
| + DCHECK(validator); |
| + DCHECK(task_runner.get()); |
| +} |
| + |
| +DeviceCapabilitiesImpl::ValidatorInfo::~ValidatorInfo() { |
| + // Check that ValidatorInfo is being destroyed on the same thread that it was |
| + // constructed on. |
| + DCHECK(task_runner->BelongsToCurrentThread()); |
| } |
| DeviceCapabilitiesImpl::DeviceCapabilitiesImpl() |
| - : capabilities_(new base::DictionaryValue), |
| - capabilities_str_(SerializeToJson(*capabilities_)) { |
| + : capabilities_(new ImmutableRefCountedDictionary), |
| + capabilities_str_( |
| + new ImmutableRefCountedString(SerializeToJson(*capabilities_->data))), |
| + task_runner_for_writes_(base::ThreadTaskRunnerHandle::Get()), |
| + observer_list_(new base::ObserverListThreadSafe<Observer>) { |
| DCHECK(capabilities_str_.get()); |
| + DCHECK(task_runner_for_writes_.get()); |
| } |
| DeviceCapabilitiesImpl::~DeviceCapabilitiesImpl() { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| // Make sure that any registered Validators have unregistered at this point |
| DCHECK(validator_map_.empty()); |
| + // Make sure that all observers have been removed at this point |
| + observer_list_->AssertEmpty(); |
| } |
| void DeviceCapabilitiesImpl::Register(const std::string& key, |
| Validator* validator) { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| DCHECK(IsValidRegisterKey(key)); |
| DCHECK(validator); |
| - bool added = validator_map_.insert(std::make_pair(key, validator)).second; |
| + base::AutoLock auto_lock(validation_lock_); |
| + bool added = |
| + validator_map_.insert(std::make_pair(key, ValidatorInfo(validator))) |
| + .second; |
| // Check that a validator has not already been registered for this key |
| DCHECK(added); |
| } |
| void DeviceCapabilitiesImpl::Unregister(const std::string& key, |
| const Validator* validator) { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| + base::AutoLock auto_lock(validation_lock_); |
| + auto validator_it = validator_map_.find(key); |
| + DCHECK(validator_it != validator_map_.end()); |
| // Check that validator being unregistered matches the original for |key|. |
| // This prevents managers from accidentally unregistering incorrect |
| // validators. |
| - DCHECK_EQ(validator, GetValidator(key)); |
| + DCHECK_EQ(validator, validator_it->second.validator); |
| + // Check that validator is unregistering on same thread that it was |
| + // registered on |
| + DCHECK(validator_it->second.task_runner->BelongsToCurrentThread()); |
| bool erased = validator_map_.erase(key); |
| DCHECK(erased); |
| } |
| DeviceCapabilities::Validator* DeviceCapabilitiesImpl::GetValidator( |
| const std::string& key) const { |
| + base::AutoLock auto_lock(validation_lock_); |
| auto validator_it = validator_map_.find(key); |
| - return validator_it == validator_map_.end() ? nullptr : validator_it->second; |
| + return validator_it == validator_map_.end() ? nullptr |
| + : validator_it->second.validator; |
| } |
| bool DeviceCapabilitiesImpl::BluetoothSupported() const { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| + scoped_refptr<ImmutableRefCountedDictionary> capabilities_ref = |
| + GetCapabilitiesReference(); |
| bool bluetooth_supported = false; |
| - bool found_key = |
| - capabilities_->GetBoolean(kKeyBluetoothSupported, &bluetooth_supported); |
| + bool found_key = capabilities_ref->data->GetBoolean(kKeyBluetoothSupported, |
| + &bluetooth_supported); |
| DCHECK(found_key); |
| return bluetooth_supported; |
| } |
| bool DeviceCapabilitiesImpl::DisplaySupported() const { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| + scoped_refptr<ImmutableRefCountedDictionary> capabilities_ref = |
| + GetCapabilitiesReference(); |
| bool display_supported = false; |
| - bool found_key = |
| - capabilities_->GetBoolean(kKeyDisplaySupported, &display_supported); |
| + bool found_key = capabilities_ref->data->GetBoolean(kKeyDisplaySupported, |
| + &display_supported); |
| DCHECK(found_key); |
| return display_supported; |
| } |
| -bool DeviceCapabilitiesImpl::GetCapability( |
| - const std::string& path, |
| - const base::Value** out_value) const { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| - return capabilities_ && capabilities_->Get(path, out_value); |
| +scoped_ptr<base::Value> DeviceCapabilitiesImpl::GetCapability( |
| + const std::string& path) const { |
| + scoped_refptr<ImmutableRefCountedDictionary> capabilities_ref = |
| + GetCapabilitiesReference(); |
| + const base::Value* value = nullptr; |
| + bool found_path = capabilities_ref->data->Get(path, &value); |
| + return found_path ? value->CreateDeepCopy() : scoped_ptr<base::Value>(); |
| } |
| -const std::string& DeviceCapabilitiesImpl::GetCapabilitiesString() const { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| - return *capabilities_str_; |
| +std::string DeviceCapabilitiesImpl::GetCapabilitiesString() const { |
| + scoped_refptr<ImmutableRefCountedString> capabilities_str_ref = |
| + GetCapabilitiesStrReference(); |
| + return *capabilities_str_ref->data; |
| } |
| -const base::DictionaryValue* DeviceCapabilitiesImpl::GetCapabilities() const { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| - return capabilities_.get(); |
| +scoped_ptr<base::DictionaryValue> DeviceCapabilitiesImpl::GetCapabilities() |
|
byungchul
2015/10/27 20:38:22
wrap line after scoped<>
esum
2015/10/28 00:29:50
Done.
|
| + const { |
| + scoped_refptr<ImmutableRefCountedDictionary> capabilities_ref = |
| + GetCapabilitiesReference(); |
| + return capabilities_ref->data->CreateDeepCopy(); |
| } |
| void DeviceCapabilitiesImpl::SetCapability( |
| const std::string& path, |
| scoped_ptr<base::Value> proposed_value) { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| DCHECK(proposed_value.get()); |
| if (!IsValidPath(path)) { |
| LOG(DFATAL) << "Invalid capability path encountered for SetCapability()"; |
| return; |
| } |
| - // Check for Validator registered under first key per the Register() |
| - // interface. |
| - auto validator_it = validator_map_.find(GetFirstKey(path)); |
| - if (validator_it == validator_map_.end()) { |
| - SetValidatedValueInternal(path, proposed_value.Pass()); |
| - return; |
| + { |
| + base::AutoLock auto_lock(validation_lock_); |
| + // Check for Validator registered under first key per the Register() |
| + // interface. |
| + auto validator_it = validator_map_.find(GetFirstKey(path)); |
| + if (validator_it != validator_map_.end()) { |
| + // We do not want to post a task directly for the Validator's Validate() |
| + // method here because if another thread is in the middle of unregistering |
| + // that Validator, there will be an outstanding call to Validate() that |
| + // occurs after it has unregistered. Posting a task on Validator's |
| + // task_runner that checks if Validator is still registered before calling |
| + // Validate fixes this. |
| + validator_it->second.task_runner->PostTask( |
|
maclellant
2015/10/27 21:00:32
You can move this outside the lock once you have t
esum
2015/10/27 21:14:25
If you cache the ValidatorInfo reference and relea
maclellant
2015/10/27 21:20:07
Well before you were posting unretained pointer an
|
| + FROM_HERE, |
| + base::Bind(&DeviceCapabilitiesImpl::RunValidateOnCurrentThread, |
| + base::Unretained(this), path, |
|
byungchul
2015/10/27 20:38:22
It is not safe to use Unretained() here. This one
|
| + base::Passed(&proposed_value), |
| + validator_it->second.validator)); |
| + return; |
| + } |
| } |
| + // Since we are done checking for a registered Validator at this point, we |
| + // can release the lock. All further member access will be for capabilities. |
| + SetValidatedValue(path, proposed_value.Pass()); |
| +} |
| - validator_it->second->Validate(path, proposed_value.Pass()); |
| +void DeviceCapabilitiesImpl::RunValidateOnCurrentThread( |
| + const std::string& path, |
| + scoped_ptr<base::Value> proposed_value, |
| + Validator* validator) { |
| + { |
| + base::AutoLock auto_lock(validation_lock_); |
| + auto validator_it = validator_map_.find(GetFirstKey(path)); |
| + // Checking that |validator| is still registered because it may have been |
| + // unregistered, or even another Validator got registered in its place from |
| + // the time that the RunValidateOnCurrentThread() task was posted in |
| + // SetCapability(). In either case, we just want to abort call to |
| + // Validate() by returning here. |
| + if (validator_it == validator_map_.end() || |
| + validator_it->second.validator != validator) |
| + return; |
| + |
| + // Check that we are on same thread that Validator registered on before |
| + // calling Validate(). |
| + DCHECK(validator_it->second.task_runner->BelongsToCurrentThread()); |
| + |
| + // We can release the lock here because we know that the Validator has to |
| + // get unregistered on the thread that we're on right now. So there cannot |
| + // concurrently be a call to Unregister() here. |
| + } |
|
byungchul
2015/10/27 20:38:22
Lock twice doesn't look great. What about using a
esum
2015/10/27 20:57:45
Oh wow didn't think of that. I like this a lot mor
maclellant
2015/10/27 21:00:32
Would there be any concerns with accessing the wea
byungchul
2015/10/27 21:07:39
You can get weak_ptr on any thread, but can access
esum
2015/10/27 21:14:25
Yes SetCapability() will run on the capabilities t
maclellant
2015/10/27 21:20:07
+1
Got it, wasn't sure if weak_ptr was accessed o
esum
2015/10/28 00:29:51
Done.
|
| + // No locks should be held at this point in the code to ensure that there |
| + // are no deadlocks if Validate() calls DeviceCapabilities methods. |
| + validator->Validate(path, proposed_value.Pass()); |
| } |
| void DeviceCapabilitiesImpl::MergeDictionary( |
| const base::DictionaryValue& dict_value) { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| for (base::DictionaryValue::Iterator it(dict_value); !it.IsAtEnd(); |
| it.Advance()) { |
| SetCapability(it.key(), it.value().CreateDeepCopy()); |
| @@ -177,42 +266,84 @@ void DeviceCapabilitiesImpl::MergeDictionary( |
| void DeviceCapabilitiesImpl::AddCapabilitiesObserver(Observer* observer) { |
| DCHECK(observer); |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| - observer_list_.AddObserver(observer); |
| + observer_list_->AddObserver(observer); |
| } |
| void DeviceCapabilitiesImpl::RemoveCapabilitiesObserver(Observer* observer) { |
| DCHECK(observer); |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| - observer_list_.RemoveObserver(observer); |
| + observer_list_->RemoveObserver(observer); |
| } |
| -void DeviceCapabilitiesImpl::SetValidatedValueInternal( |
| +void DeviceCapabilitiesImpl::SetValidatedValue( |
| const std::string& path, |
| scoped_ptr<base::Value> new_value) { |
| - DCHECK(thread_checker_.CalledOnValidThread()); |
| + // All internal writes/modifications of capabilities must occur on same |
| + // thread to avoid race conditions. |
| + if (!task_runner_for_writes_->BelongsToCurrentThread()) { |
| + task_runner_for_writes_->PostTask( |
| + FROM_HERE, |
| + base::Bind(&DeviceCapabilitiesImpl::SetValidatedValue, |
| + base::Unretained(this), path, base::Passed(&new_value))); |
| + return; |
| + } |
| + |
| DCHECK(IsValidPath(path)); |
| DCHECK(new_value.get()); |
| + // We don't need to acquire lock here when reading capabilities_ because |
| + // we know that all writes to capabilities_ must occur serially on thread |
| + // that we're on. |
| const base::Value* cur_value = nullptr; |
| - bool capability_unchaged = |
| - GetCapability(path, &cur_value) && cur_value->Equals(new_value.get()); |
| - if (capability_unchaged) { |
| + bool capability_unchanged = capabilities_->data->Get(path, &cur_value) && |
| + cur_value->Equals(new_value.get()); |
| + if (capability_unchanged) { |
| VLOG(1) << "Ignoring unchanged capability: " << path; |
| return; |
| } |
| - capabilities_->Set(path, new_value.Pass()); |
| - UpdateStrAndNotifyChanged(path); |
| + // In this sequence, we create a deep copy, modify the deep copy, and then |
| + // do a pointer swap. We do this to have minimal time spent in the |
| + // capabilities_lock_. If we were to lock and modify the capabilities |
| + // dictionary directly, there may be expensive writes that block other |
| + // threads. |
| + scoped_ptr<base::DictionaryValue> capabilities_deep_copy( |
| + capabilities_->data->CreateDeepCopy()); |
| + capabilities_deep_copy->Set(path, new_value.Pass()); |
| + |
| + scoped_refptr<ImmutableRefCountedString> new_capabilities_str( |
| + new ImmutableRefCountedString(SerializeToJson(*capabilities_deep_copy))); |
| + DCHECK(capabilities_str_->data.get()); |
| + scoped_refptr<ImmutableRefCountedDictionary> new_capabilities( |
| + new ImmutableRefCountedDictionary(capabilities_deep_copy.Pass())); |
| + |
| + { |
| + base::AutoLock auto_lock(capabilities_lock_); |
| + // Using swap instead of assignment operator here because it's a little |
| + // faster. Avoids an extra call to AddRef()/Release(). |
| + capabilities_.swap(new_capabilities); |
|
maclellant
2015/10/27 21:00:32
Since you always update these two fields together,
esum
2015/10/27 21:14:25
Sure, SGTM.
esum
2015/10/28 00:29:50
Done.
|
| + capabilities_str_.swap(new_capabilities_str); |
| + } |
| + |
| + // Even though ObseverListThreadSafe notifications are always asynchronous |
| + // (posts task even if to same thread), no locks should be held at this point |
| + // in the code. This is just to be safe that no deadlocks occur if Observers |
| + // call DeviceCapabilities methods in OnCapabilitiesChanged(). |
| + observer_list_->Notify(FROM_HERE, &Observer::OnCapabilitiesChanged, path); |
| } |
| -void DeviceCapabilitiesImpl::UpdateStrAndNotifyChanged( |
| - const std::string& path) { |
| - // Update capabilities string here since all updates to capabilities must |
| - // ultimately call this method no matter where the update originated from. |
| - capabilities_str_ = SerializeToJson(*capabilities_); |
| - DCHECK(capabilities_str_.get()); |
| - FOR_EACH_OBSERVER(Observer, observer_list_, OnCapabilitiesChanged(path)); |
| +scoped_refptr<DeviceCapabilitiesImpl::ImmutableRefCountedDictionary> |
| +DeviceCapabilitiesImpl::GetCapabilitiesReference() const { |
| + // Need to acquire lock here when copy constructing capabilities_ otherwise |
| + // we could be concurrently be writing to scoped_refptr in |
| + // SetValidatedValue(), which could cause a bad scoped_refptr read. |
| + base::AutoLock auto_lock(capabilities_lock_); |
| + return capabilities_; |
| +} |
| + |
| +scoped_refptr<DeviceCapabilitiesImpl::ImmutableRefCountedString> |
| +DeviceCapabilitiesImpl::GetCapabilitiesStrReference() const { |
| + base::AutoLock auto_lock(capabilities_lock_); |
| + return capabilities_str_; |
| } |
| } // namespace chromecast |