| Index: base/task_scheduler/scheduler_lock_impl.cc
|
| diff --git a/base/task_scheduler/scheduler_lock_impl.cc b/base/task_scheduler/scheduler_lock_impl.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..d61f63ebd69ea3252d894e069d478fa54d92e737
|
| --- /dev/null
|
| +++ b/base/task_scheduler/scheduler_lock_impl.cc
|
| @@ -0,0 +1,138 @@
|
| +// Copyright 2016 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "base/task_scheduler/scheduler_lock_impl.h"
|
| +
|
| +#include <algorithm>
|
| +#include <unordered_map>
|
| +#include <vector>
|
| +
|
| +#include "base/lazy_instance.h"
|
| +#include "base/logging.h"
|
| +#include "base/synchronization/condition_variable.h"
|
| +#include "base/threading/platform_thread.h"
|
| +
|
| +namespace base {
|
| +namespace internal {
|
| +
|
| +namespace {
|
| +
|
| +class SafeAcquisitionTracker {
|
| + public:
|
| + SafeAcquisitionTracker() = default;
|
| +
|
| + void RegisterLock(
|
| + const SchedulerLockImpl* const lock,
|
| + const SchedulerLockImpl* const predecessor) {
|
| + // Reentrant locks are unsupported.
|
| + DCHECK(lock != predecessor);
|
| + AutoLock auto_lock(metadata_lock_);
|
| + allowed_predecessor_map[lock] = predecessor;
|
| + AssertSafePredecessor(lock);
|
| + }
|
| +
|
| + void UnregisterLock(const SchedulerLockImpl* const lock) {
|
| + AutoLock auto_lock(metadata_lock_);
|
| + allowed_predecessor_map.erase(lock);
|
| + }
|
| +
|
| + void RecordAcquisition(const SchedulerLockImpl* const lock) {
|
| + AutoLock auto_lock(metadata_lock_);
|
| + const PlatformThreadId id = PlatformThread::CurrentId();
|
| + AssertSafeAcquire(id, lock);
|
| + acquired_locks_[id].push_back(lock);
|
| + }
|
| +
|
| + void RecordRelease(const SchedulerLockImpl* const lock) {
|
| + AutoLock auto_lock(metadata_lock_);
|
| + const PlatformThreadId id = PlatformThread::CurrentId();
|
| + LockVector& thread_locks = acquired_locks_[id];
|
| + const auto iter =
|
| + std::find(thread_locks.begin(), thread_locks.end(), lock);
|
| + DCHECK(iter != thread_locks.end());
|
| + thread_locks.erase(iter);
|
| + if (thread_locks.empty())
|
| + acquired_locks_.erase(id);
|
| + }
|
| +
|
| + private:
|
| + using LockVector = std::vector<const SchedulerLockImpl*>;
|
| + using PredecessorMap = std::unordered_map<
|
| + const SchedulerLockImpl*, const SchedulerLockImpl*>;
|
| + using AcquisitionMap =
|
| + std::unordered_map<base::PlatformThreadId, LockVector>;
|
| +
|
| + // This asserts that the lock is safe to acquire. This means that this should
|
| + // be run before actually recording the acquisition.
|
| + void AssertSafeAcquire(PlatformThreadId id,
|
| + const SchedulerLockImpl* const lock) const {
|
| + metadata_lock_.AssertAcquired();
|
| + const auto& thread_lock_pair = acquired_locks_.find(id);
|
| +
|
| + // If the thread currently holds no locks, this is inherently safe.
|
| + if (thread_lock_pair == acquired_locks_.end())
|
| + return;
|
| +
|
| + // Otherwise, make sure that the previous lock acquired is an allowed
|
| + // predecessor.
|
| + const SchedulerLockImpl* allowed_predecessor =
|
| + allowed_predecessor_map.at(lock);
|
| + DCHECK(thread_lock_pair->second.back() == allowed_predecessor);
|
| + }
|
| +
|
| + void AssertSafePredecessor(const SchedulerLockImpl* lock) const {
|
| + metadata_lock_.AssertAcquired();
|
| + for (const SchedulerLockImpl* predecessor =
|
| + allowed_predecessor_map.at(lock);
|
| + predecessor != nullptr;
|
| + predecessor = allowed_predecessor_map.at(predecessor)) {
|
| + if (predecessor == lock)
|
| + DCHECK(false);
|
| + }
|
| + }
|
| +
|
| + // Synchronizes everything in SafeAcquisitionTracker.
|
| + base::Lock metadata_lock_;
|
| +
|
| + PredecessorMap allowed_predecessor_map;
|
| + AcquisitionMap acquired_locks_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker);
|
| +};
|
| +
|
| +LazyInstance<SafeAcquisitionTracker> g_safe_acquisition_tracker =
|
| + LAZY_INSTANCE_INITIALIZER;
|
| +
|
| +} // namespace
|
| +
|
| +SchedulerLockImpl::SchedulerLockImpl() : SchedulerLockImpl(nullptr) {}
|
| +
|
| +SchedulerLockImpl::SchedulerLockImpl(const SchedulerLockImpl* predecessor) {
|
| + g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor);
|
| +}
|
| +
|
| +SchedulerLockImpl::~SchedulerLockImpl() {
|
| + g_safe_acquisition_tracker.Get().UnregisterLock(this);
|
| +}
|
| +
|
| +void SchedulerLockImpl::Acquire() {
|
| + lock_.Acquire();
|
| + g_safe_acquisition_tracker.Get().RecordAcquisition(this);
|
| +}
|
| +
|
| +void SchedulerLockImpl::Release() {
|
| + lock_.Release();
|
| + g_safe_acquisition_tracker.Get().RecordRelease(this);
|
| +}
|
| +
|
| +void SchedulerLockImpl::AssertAcquired() const {
|
| + lock_.AssertAcquired();
|
| +}
|
| +
|
| +scoped_ptr<ConditionVariable> SchedulerLockImpl::CreateConditionVariable() {
|
| + return scoped_ptr<ConditionVariable>(new ConditionVariable(&lock_));
|
| +}
|
| +
|
| +} // namespace internal
|
| +} // base
|
|
|