Chromium Code Reviews| OLD | NEW |
|---|---|
| (Empty) | |
| 1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "base/task_scheduler/scheduler_lock_impl.h" | |
| 6 | |
| 7 #include <algorithm> | |
| 8 #include <unordered_map> | |
| 9 #include <vector> | |
| 10 | |
| 11 #include "base/lazy_instance.h" | |
| 12 #include "base/logging.h" | |
| 13 #include "base/synchronization/condition_variable.h" | |
| 14 #include "base/threading/platform_thread.h" | |
| 15 #include "base/threading/thread_local_storage.h" | |
| 16 | |
| 17 namespace base { | |
| 18 namespace internal { | |
| 19 | |
| 20 namespace { | |
| 21 | |
| 22 class SafeAcquisitionTracker { | |
| 23 public: | |
| 24 SafeAcquisitionTracker() : tls_acquired_locks_(&OnTLSDestroy) {} | |
| 25 | |
| 26 void RegisterLock( | |
| 27 const SchedulerLockImpl* const lock, | |
| 28 const SchedulerLockImpl* const predecessor) { | |
| 29 // Reentrant locks are unsupported. | |
| 30 DCHECK(lock != predecessor); | |
| 31 AutoLock auto_lock(allowed_predecessor_map_lock_); | |
| 32 allowed_predecessor_map_[lock] = predecessor; | |
| 33 AssertSafePredecessor(lock); | |
| 34 } | |
| 35 | |
| 36 void UnregisterLock(const SchedulerLockImpl* const lock) { | |
| 37 AutoLock auto_lock(allowed_predecessor_map_lock_); | |
| 38 allowed_predecessor_map_.erase(lock); | |
| 39 } | |
| 40 | |
| 41 void RecordAcquisition(const SchedulerLockImpl* const lock) { | |
| 42 AssertSafeAcquire(lock); | |
| 43 GetAcquiredLocksOnCurrentThread()->push_back(lock); | |
| 44 } | |
| 45 | |
| 46 void RecordRelease(const SchedulerLockImpl* const lock) { | |
| 47 LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread(); | |
| 48 const auto iter_at_lock = | |
| 49 std::find(acquired_locks->begin(), acquired_locks->end(), lock); | |
| 50 DCHECK(iter_at_lock != acquired_locks->end()); | |
| 51 acquired_locks->erase(iter_at_lock); | |
| 52 } | |
| 53 | |
| 54 private: | |
| 55 using LockVector = std::vector<const SchedulerLockImpl*>; | |
| 56 using PredecessorMap = std::unordered_map< | |
| 57 const SchedulerLockImpl*, const SchedulerLockImpl*>; | |
| 58 | |
| 59 // This asserts that the lock is safe to acquire. This means that this should | |
| 60 // be run before actually recording the acquisition. | |
| 61 void AssertSafeAcquire(const SchedulerLockImpl* const lock) { | |
| 62 const LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread(); | |
| 63 | |
| 64 // If the thread currently holds no locks, this is inherently safe. | |
| 65 if (acquired_locks->empty()) | |
| 66 return; | |
| 67 | |
| 68 // Otherwise, make sure that the previous lock acquired is an allowed | |
| 69 // predecessor. | |
| 70 AutoLock auto_lock(allowed_predecessor_map_lock_); | |
| 71 const SchedulerLockImpl* allowed_predecessor = | |
| 72 allowed_predecessor_map_.at(lock); | |
| 73 DCHECK(acquired_locks->back() == allowed_predecessor); | |
| 74 } | |
| 75 | |
| 76 void AssertSafePredecessor(const SchedulerLockImpl* lock) const { | |
| 77 allowed_predecessor_map_lock_.AssertAcquired(); | |
| 78 for (const SchedulerLockImpl* predecessor = | |
| 79 allowed_predecessor_map_.at(lock); | |
| 80 predecessor != nullptr; | |
| 81 predecessor = allowed_predecessor_map_.at(predecessor)) { | |
| 82 if (predecessor == lock) | |
|
danakj
2016/03/08 21:22:56
nit: prefer DCHECK(predecessor != lock) rather tha
robliao
2016/03/08 22:14:08
Done with "Scheduler lock predecessor cycle detect
| |
| 83 NOTREACHED(); | |
| 84 } | |
| 85 } | |
| 86 | |
| 87 LockVector* GetAcquiredLocksOnCurrentThread() { | |
| 88 if (!tls_acquired_locks_.Get()) | |
| 89 tls_acquired_locks_.Set(new LockVector); | |
| 90 | |
| 91 return reinterpret_cast<LockVector*>(tls_acquired_locks_.Get()); | |
| 92 } | |
| 93 | |
| 94 static void OnTLSDestroy(void* value) { | |
| 95 delete reinterpret_cast<LockVector*>(value); | |
| 96 } | |
| 97 | |
| 98 // Synchronizes access to |allowed_predecessor_map_|. | |
| 99 mutable Lock allowed_predecessor_map_lock_; | |
|
danakj
2016/03/08 21:22:56
Why mutable?
robliao
2016/03/08 22:14:08
Removed. In an earlier revision AssertSafeAcquire
| |
| 100 | |
| 101 // A map of allowed predecessors. | |
| 102 PredecessorMap allowed_predecessor_map_; | |
| 103 | |
| 104 // A thread-local slot holding a vector of locks currently acquired on the | |
| 105 // current thread. | |
| 106 ThreadLocalStorage::Slot tls_acquired_locks_; | |
| 107 | |
| 108 DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker); | |
| 109 }; | |
| 110 | |
| 111 LazyInstance<SafeAcquisitionTracker>::Leaky g_safe_acquisition_tracker = | |
| 112 LAZY_INSTANCE_INITIALIZER; | |
| 113 | |
| 114 } // namespace | |
| 115 | |
| 116 SchedulerLockImpl::SchedulerLockImpl() : SchedulerLockImpl(nullptr) {} | |
| 117 | |
| 118 SchedulerLockImpl::SchedulerLockImpl(const SchedulerLockImpl* predecessor) { | |
| 119 g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor); | |
| 120 } | |
| 121 | |
| 122 SchedulerLockImpl::~SchedulerLockImpl() { | |
| 123 g_safe_acquisition_tracker.Get().UnregisterLock(this); | |
| 124 } | |
| 125 | |
| 126 void SchedulerLockImpl::Acquire() { | |
| 127 lock_.Acquire(); | |
| 128 g_safe_acquisition_tracker.Get().RecordAcquisition(this); | |
| 129 } | |
| 130 | |
| 131 void SchedulerLockImpl::Release() { | |
| 132 lock_.Release(); | |
| 133 g_safe_acquisition_tracker.Get().RecordRelease(this); | |
| 134 } | |
| 135 | |
| 136 void SchedulerLockImpl::AssertAcquired() const { | |
| 137 lock_.AssertAcquired(); | |
| 138 } | |
| 139 | |
| 140 scoped_ptr<ConditionVariable> SchedulerLockImpl::CreateConditionVariable() { | |
| 141 return scoped_ptr<ConditionVariable>(new ConditionVariable(&lock_)); | |
| 142 } | |
| 143 | |
| 144 } // namespace internal | |
| 145 } // base | |
| OLD | NEW |