| Index: base/task_scheduler/scheduler_lock.cc
|
| diff --git a/base/task_scheduler/scheduler_lock.cc b/base/task_scheduler/scheduler_lock.cc
|
| new file mode 100644
|
| index 0000000000000000000000000000000000000000..07e4c1f4c5b6a0e9ae33c38e505d5afdc0c02f26
|
| --- /dev/null
|
| +++ b/base/task_scheduler/scheduler_lock.cc
|
| @@ -0,0 +1,137 @@
|
| +// Copyright 2016 The Chromium Authors. All rights reserved.
|
| +// Use of this source code is governed by a BSD-style license that can be
|
| +// found in the LICENSE file.
|
| +
|
| +#include "base/task_scheduler/scheduler_lock.h"
|
| +
|
| +#include <algorithm>
|
| +#include <unordered_map>
|
| +#include <vector>
|
| +
|
| +#include "base/logging.h"
|
| +#include "base/memory/singleton.h"
|
| +#include "base/threading/platform_thread.h"
|
| +
|
| +#if DCHECK_IS_ON()
|
| +#define DCHECK_ON_ONLY(statement) statement
|
| +#else
|
| +#define DCHECK_ON_ONLY(statement)
|
| +#endif
|
| +
|
| +namespace base {
|
| +namespace internal {
|
| +
|
| +namespace {
|
| +
|
| +class SafeAcquisitionTracker {
|
| + public:
|
| + static SafeAcquisitionTracker* GetInstance() {
|
| + return base::Singleton<SafeAcquisitionTracker>::get();
|
| + }
|
| +
|
| + void RegisterLock(
|
| + const SchedulerLock* const lock,
|
| + const SchedulerLock* const predecessor) {
|
| + // Reentrant locks are unexpected.
|
| + CHECK(lock != predecessor);
|
| + AutoLock auto_lock(metadata_lock_);
|
| + allowed_predecessors_[lock] = predecessor;
|
| + AssertSafePredecessor(lock);
|
| + }
|
| +
|
| + void UnregisterLock(const SchedulerLock* const lock) {
|
| + AutoLock auto_lock(metadata_lock_);
|
| + allowed_predecessors_.erase(lock);
|
| + }
|
| +
|
| + void RecordAcquisition(const SchedulerLock* const lock) {
|
| + AutoLock auto_lock(metadata_lock_);
|
| + const PlatformThreadId id = PlatformThread::CurrentId();
|
| + AssertSafeAcquire(id, lock);
|
| + LockVector& thread_locks = acquired_locks_[id];
|
| + thread_locks.push_back(lock);
|
| + }
|
| +
|
| + void RecordRelease(const SchedulerLock* const lock) {
|
| + AutoLock auto_lock(metadata_lock_);
|
| + const PlatformThreadId id = PlatformThread::CurrentId();
|
| + LockVector& thread_locks = acquired_locks_[id];
|
| + const auto iter =
|
| + std::find(thread_locks.begin(), thread_locks.end(), lock);
|
| + DCHECK(iter != thread_locks.end());
|
| + thread_locks.erase(iter);
|
| + }
|
| +
|
| + private:
|
| + friend struct base::DefaultSingletonTraits<SafeAcquisitionTracker>;
|
| + using LockVector = std::vector<const SchedulerLock*>;
|
| + using PredecessorMap = std::unordered_map<
|
| + const SchedulerLock*, const SchedulerLock*>;
|
| + using AcquisitionMap =
|
| + std::unordered_map<base::PlatformThreadId, LockVector>;
|
| +
|
| + SafeAcquisitionTracker() = default;
|
| +
|
| + void AssertSafeAcquire(PlatformThreadId id, const SchedulerLock* const lock) {
|
| + metadata_lock_.AssertAcquired();
|
| + const LockVector& thread_locks = acquired_locks_[id];
|
| +
|
| + // If the thread hasn't ever acquired a lock, this is inherently safe.
|
| + if (thread_locks.empty())
|
| + return;
|
| +
|
| + // Otherwise, make sure that the previous lock acquired is an allowed
|
| + // predecessor.
|
| + const SchedulerLock* allowed_predecessor = allowed_predecessors_[lock];
|
| + DCHECK(thread_locks.back() == allowed_predecessor);
|
| + }
|
| +
|
| + void AssertSafePredecessor(const SchedulerLock* lock) {
|
| + metadata_lock_.AssertAcquired();
|
| + for (const SchedulerLock* predecessor = allowed_predecessors_[lock];
|
| + predecessor != nullptr;
|
| + predecessor = allowed_predecessors_[predecessor]) {
|
| + if (predecessor == lock)
|
| + NOTREACHED();
|
| + }
|
| + }
|
| +
|
| + // Synchronizes everything in SafeAcquisitionTracker.
|
| + base::Lock metadata_lock_;
|
| +
|
| + PredecessorMap allowed_predecessors_;
|
| + AcquisitionMap acquired_locks_;
|
| +
|
| + DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker);
|
| +};
|
| +
|
| +} // namespace
|
| +
|
| +SchedulerLock::SchedulerLock() : SchedulerLock(nullptr) {}
|
| +
|
| +SchedulerLock::SchedulerLock(const SchedulerLock* predecessor) {
|
| + DCHECK_ON_ONLY(
|
| + SafeAcquisitionTracker::GetInstance()->RegisterLock(this, predecessor));
|
| +}
|
| +
|
| +SchedulerLock::~SchedulerLock() {
|
| + DCHECK_ON_ONLY(SafeAcquisitionTracker::GetInstance()->UnregisterLock(this));
|
| +}
|
| +
|
| +void SchedulerLock::Acquire() {
|
| + lock_.Acquire();
|
| + DCHECK_ON_ONLY(
|
| + SafeAcquisitionTracker::GetInstance()->RecordAcquisition(this));
|
| +}
|
| +
|
| +void SchedulerLock::Release() {
|
| + lock_.Release();
|
| + DCHECK_ON_ONLY(SafeAcquisitionTracker::GetInstance()->RecordRelease(this));
|
| +}
|
| +
|
| +Lock* SchedulerLock::RawLockForConditionVariable() {
|
| + return &lock_;
|
| +}
|
| +
|
| +} // namespace internal
|
| +} // base
|
|
|