OLD | NEW |
---|---|
(Empty) | |
1 // Copyright 2016 The Chromium Authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 #include "base/task_scheduler/scheduler_lock_impl.h" | |
6 | |
7 #include <algorithm> | |
8 #include <unordered_map> | |
9 #include <vector> | |
10 | |
11 #include "base/lazy_instance.h" | |
12 #include "base/logging.h" | |
13 #include "base/synchronization/condition_variable.h" | |
14 #include "base/threading/platform_thread.h" | |
15 #include "base/threading/thread_local_storage.h" | |
16 | |
17 namespace base { | |
18 namespace internal { | |
19 | |
20 namespace { | |
21 | |
22 class SafeAcquisitionTracker { | |
23 public: | |
24 SafeAcquisitionTracker() : tls_acquired_locks_(&OnTLSDestroy) {} | |
25 | |
26 void RegisterLock( | |
27 const SchedulerLockImpl* const lock, | |
28 const SchedulerLockImpl* const predecessor) { | |
29 DCHECK_NE(lock, predecessor) << "Reentrant locks are unsupported."; | |
30 AutoLock auto_lock(allowed_predecessor_map_lock_); | |
31 allowed_predecessor_map_[lock] = predecessor; | |
32 AssertSafePredecessor(lock); | |
33 } | |
34 | |
35 void UnregisterLock(const SchedulerLockImpl* const lock) { | |
36 AutoLock auto_lock(allowed_predecessor_map_lock_); | |
37 allowed_predecessor_map_.erase(lock); | |
38 } | |
39 | |
40 void RecordAcquisition(const SchedulerLockImpl* const lock) { | |
41 AssertSafeAcquire(lock); | |
42 GetAcquiredLocksOnCurrentThread()->push_back(lock); | |
43 } | |
44 | |
45 void RecordRelease(const SchedulerLockImpl* const lock) { | |
46 LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread(); | |
47 const auto iter_at_lock = | |
48 std::find(acquired_locks->begin(), acquired_locks->end(), lock); | |
49 DCHECK(iter_at_lock != acquired_locks->end()); | |
robliao
2016/03/14 19:09:53
Note: This can't be converted as there's no operat
| |
50 acquired_locks->erase(iter_at_lock); | |
51 } | |
52 | |
53 private: | |
54 using LockVector = std::vector<const SchedulerLockImpl*>; | |
55 using PredecessorMap = std::unordered_map< | |
56 const SchedulerLockImpl*, const SchedulerLockImpl*>; | |
57 | |
58 // This asserts that the lock is safe to acquire. This means that this should | |
59 // be run before actually recording the acquisition. | |
60 void AssertSafeAcquire(const SchedulerLockImpl* const lock) { | |
61 const LockVector* acquired_locks = GetAcquiredLocksOnCurrentThread(); | |
62 | |
63 // If the thread currently holds no locks, this is inherently safe. | |
64 if (acquired_locks->empty()) | |
65 return; | |
66 | |
67 // Otherwise, make sure that the previous lock acquired is an allowed | |
68 // predecessor. | |
69 AutoLock auto_lock(allowed_predecessor_map_lock_); | |
70 const SchedulerLockImpl* allowed_predecessor = | |
71 allowed_predecessor_map_.at(lock); | |
72 DCHECK_EQ(acquired_locks->back(), allowed_predecessor); | |
73 } | |
74 | |
75 void AssertSafePredecessor(const SchedulerLockImpl* lock) const { | |
76 allowed_predecessor_map_lock_.AssertAcquired(); | |
77 for (const SchedulerLockImpl* predecessor = | |
78 allowed_predecessor_map_.at(lock); | |
79 predecessor != nullptr; | |
80 predecessor = allowed_predecessor_map_.at(predecessor)) { | |
81 DCHECK_NE(predecessor, lock) << | |
82 "Scheduler lock predecessor cycle detected."; | |
83 } | |
84 } | |
85 | |
86 LockVector* GetAcquiredLocksOnCurrentThread() { | |
87 if (!tls_acquired_locks_.Get()) | |
88 tls_acquired_locks_.Set(new LockVector); | |
89 | |
90 return reinterpret_cast<LockVector*>(tls_acquired_locks_.Get()); | |
91 } | |
92 | |
93 static void OnTLSDestroy(void* value) { | |
94 delete reinterpret_cast<LockVector*>(value); | |
95 } | |
96 | |
97 // Synchronizes access to |allowed_predecessor_map_|. | |
98 Lock allowed_predecessor_map_lock_; | |
99 | |
100 // A map of allowed predecessors. | |
101 PredecessorMap allowed_predecessor_map_; | |
102 | |
103 // A thread-local slot holding a vector of locks currently acquired on the | |
104 // current thread. | |
105 ThreadLocalStorage::Slot tls_acquired_locks_; | |
106 | |
107 DISALLOW_COPY_AND_ASSIGN(SafeAcquisitionTracker); | |
108 }; | |
109 | |
110 LazyInstance<SafeAcquisitionTracker>::Leaky g_safe_acquisition_tracker = | |
111 LAZY_INSTANCE_INITIALIZER; | |
112 | |
113 } // namespace | |
114 | |
115 SchedulerLockImpl::SchedulerLockImpl() : SchedulerLockImpl(nullptr) {} | |
116 | |
117 SchedulerLockImpl::SchedulerLockImpl(const SchedulerLockImpl* predecessor) { | |
118 g_safe_acquisition_tracker.Get().RegisterLock(this, predecessor); | |
119 } | |
120 | |
121 SchedulerLockImpl::~SchedulerLockImpl() { | |
122 g_safe_acquisition_tracker.Get().UnregisterLock(this); | |
123 } | |
124 | |
125 void SchedulerLockImpl::Acquire() { | |
126 lock_.Acquire(); | |
127 g_safe_acquisition_tracker.Get().RecordAcquisition(this); | |
128 } | |
129 | |
130 void SchedulerLockImpl::Release() { | |
131 lock_.Release(); | |
132 g_safe_acquisition_tracker.Get().RecordRelease(this); | |
133 } | |
134 | |
135 void SchedulerLockImpl::AssertAcquired() const { | |
136 lock_.AssertAcquired(); | |
137 } | |
138 | |
139 scoped_ptr<ConditionVariable> SchedulerLockImpl::CreateConditionVariable() { | |
140 return scoped_ptr<ConditionVariable>(new ConditionVariable(&lock_)); | |
141 } | |
142 | |
143 } // namespace internal | |
144 } // base | |
OLD | NEW |