OLD | NEW |
1 // Copyright 2014 The Chromium Authors. All rights reserved. | 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include "components/scheduler/base/task_queue_manager.h" | 5 #include "components/scheduler/base/task_queue_manager.h" |
6 | 6 |
7 #include <queue> | 7 #include <queue> |
8 #include <set> | 8 #include <set> |
9 | 9 |
10 #include "base/bind.h" | 10 #include "base/bind.h" |
11 #include "components/scheduler/base/lazy_now.h" | 11 #include "components/scheduler/base/real_time_domain.h" |
12 #include "components/scheduler/base/task_queue_impl.h" | 12 #include "components/scheduler/base/task_queue_impl.h" |
13 #include "components/scheduler/base/task_queue_manager_delegate.h" | 13 #include "components/scheduler/base/task_queue_manager_delegate.h" |
14 #include "components/scheduler/base/task_queue_selector.h" | 14 #include "components/scheduler/base/task_queue_selector.h" |
15 #include "components/scheduler/base/task_queue_sets.h" | 15 #include "components/scheduler/base/task_queue_sets.h" |
16 | 16 |
17 namespace { | |
18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max(); | |
19 } | |
20 | |
21 namespace scheduler { | 17 namespace scheduler { |
22 | 18 |
23 TaskQueueManager::TaskQueueManager( | 19 TaskQueueManager::TaskQueueManager( |
24 scoped_refptr<TaskQueueManagerDelegate> delegate, | 20 scoped_refptr<TaskQueueManagerDelegate> delegate, |
25 const char* tracing_category, | 21 const char* tracing_category, |
26 const char* disabled_by_default_tracing_category, | 22 const char* disabled_by_default_tracing_category, |
27 const char* disabled_by_default_verbose_tracing_category) | 23 const char* disabled_by_default_verbose_tracing_category) |
28 : delegate_(delegate), | 24 : delegate_(delegate), |
29 task_was_run_on_quiescence_monitored_queue_(false), | 25 task_was_run_on_quiescence_monitored_queue_(false), |
30 pending_dowork_count_(0), | 26 pending_dowork_count_(0), |
31 work_batch_size_(1), | 27 work_batch_size_(1), |
32 tracing_category_(tracing_category), | 28 tracing_category_(tracing_category), |
33 disabled_by_default_tracing_category_( | 29 disabled_by_default_tracing_category_( |
34 disabled_by_default_tracing_category), | 30 disabled_by_default_tracing_category), |
35 disabled_by_default_verbose_tracing_category_( | 31 disabled_by_default_verbose_tracing_category_( |
36 disabled_by_default_verbose_tracing_category), | 32 disabled_by_default_verbose_tracing_category), |
37 observer_(nullptr), | 33 observer_(nullptr), |
38 deletion_sentinel_(new DeletionSentinel()), | 34 deletion_sentinel_(new DeletionSentinel()), |
39 weak_factory_(this) { | 35 weak_factory_(this) { |
40 DCHECK(delegate->RunsTasksOnCurrentThread()); | 36 DCHECK(delegate->RunsTasksOnCurrentThread()); |
41 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, | 37 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, |
42 "TaskQueueManager", this); | 38 "TaskQueueManager", this); |
43 selector_.SetTaskQueueSelectorObserver(this); | 39 selector_.SetTaskQueueSelectorObserver(this); |
44 | 40 |
45 decrement_pending_and_do_work_closure_ = | 41 decrement_pending_and_do_work_closure_ = |
46 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); | 42 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); |
47 do_work_closure_ = | 43 do_work_closure_ = |
48 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); | 44 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); |
| 45 |
| 46 real_time_domain_ = |
| 47 make_scoped_refptr(new RealTimeDomain(delegate.get(), do_work_closure_)); |
| 48 RegisterTimeDomain(real_time_domain_); |
49 } | 49 } |
50 | 50 |
51 TaskQueueManager::~TaskQueueManager() { | 51 TaskQueueManager::~TaskQueueManager() { |
52 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, | 52 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, |
53 "TaskQueueManager", this); | 53 "TaskQueueManager", this); |
54 | 54 |
55 while (!queues_.empty()) | 55 while (!queues_.empty()) |
56 (*queues_.begin())->UnregisterTaskQueue(); | 56 (*queues_.begin())->UnregisterTaskQueue(); |
57 | 57 |
58 selector_.SetTaskQueueSelectorObserver(nullptr); | 58 selector_.SetTaskQueueSelectorObserver(nullptr); |
59 } | 59 } |
60 | 60 |
| 61 void TaskQueueManager::RegisterTimeDomain( |
| 62 const scoped_refptr<TimeDomain>& time_domain) { |
| 63 time_domains_.insert(time_domain); |
| 64 } |
| 65 |
| 66 void TaskQueueManager::UnregisterTimeDomain( |
| 67 const scoped_refptr<TimeDomain>& time_domain) { |
| 68 time_domains_.erase(time_domain); |
| 69 } |
| 70 |
61 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( | 71 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( |
62 const TaskQueue::Spec& spec) { | 72 const TaskQueue::Spec& spec) { |
63 TRACE_EVENT1(tracing_category_, | 73 TRACE_EVENT1(tracing_category_, |
64 "TaskQueueManager::NewTaskQueue", "queue_name", spec.name); | 74 "TaskQueueManager::NewTaskQueue", "queue_name", spec.name); |
65 DCHECK(main_thread_checker_.CalledOnValidThread()); | 75 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 76 TimeDomain* time_domain = |
| 77 spec.time_domain ? spec.time_domain : real_time_domain_.get(); |
| 78 DCHECK(time_domains_.find(make_scoped_refptr(time_domain)) != |
| 79 time_domains_.end()); |
66 scoped_refptr<internal::TaskQueueImpl> queue( | 80 scoped_refptr<internal::TaskQueueImpl> queue( |
67 make_scoped_refptr(new internal::TaskQueueImpl( | 81 make_scoped_refptr(new internal::TaskQueueImpl( |
68 this, spec, disabled_by_default_tracing_category_, | 82 this, time_domain, spec, disabled_by_default_tracing_category_, |
69 disabled_by_default_verbose_tracing_category_))); | 83 disabled_by_default_verbose_tracing_category_))); |
70 queues_.insert(queue); | 84 queues_.insert(queue); |
71 selector_.AddQueue(queue.get()); | 85 selector_.AddQueue(queue.get()); |
72 return queue; | 86 return queue; |
73 } | 87 } |
74 | 88 |
75 void TaskQueueManager::SetObserver(Observer* observer) { | 89 void TaskQueueManager::SetObserver(Observer* observer) { |
76 DCHECK(main_thread_checker_.CalledOnValidThread()); | 90 DCHECK(main_thread_checker_.CalledOnValidThread()); |
77 observer_ = observer; | 91 observer_ = observer; |
78 } | 92 } |
79 | 93 |
80 void TaskQueueManager::UnregisterTaskQueue( | 94 void TaskQueueManager::UnregisterTaskQueue( |
81 scoped_refptr<internal::TaskQueueImpl> task_queue) { | 95 scoped_refptr<internal::TaskQueueImpl> task_queue) { |
82 TRACE_EVENT1(tracing_category_, | 96 TRACE_EVENT1(tracing_category_, |
83 "TaskQueueManager::UnregisterTaskQueue", "queue_name", | 97 "TaskQueueManager::UnregisterTaskQueue", "queue_name", |
84 task_queue->GetName()); | 98 task_queue->GetName()); |
85 DCHECK(main_thread_checker_.CalledOnValidThread()); | 99 DCHECK(main_thread_checker_.CalledOnValidThread()); |
86 if (observer_) | 100 if (observer_) |
87 observer_->OnUnregisterTaskQueue(task_queue); | 101 observer_->OnUnregisterTaskQueue(task_queue); |
88 | 102 |
89 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being | 103 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being |
90 // freed while any of our structures hold hold a raw pointer to it. | 104 // freed while any of our structures hold hold a raw pointer to it. |
91 queues_to_delete_.insert(task_queue); | 105 queues_to_delete_.insert(task_queue); |
92 queues_.erase(task_queue); | 106 queues_.erase(task_queue); |
93 selector_.RemoveQueue(task_queue.get()); | 107 selector_.RemoveQueue(task_queue.get()); |
94 | |
95 // We need to remove |task_queue| from delayed_wakeup_multimap_ which is a | |
96 // little awkward since it's keyed by time. O(n) running time. | |
97 for (DelayedWakeupMultimap::iterator iter = delayed_wakeup_multimap_.begin(); | |
98 iter != delayed_wakeup_multimap_.end();) { | |
99 if (iter->second == task_queue.get()) { | |
100 DelayedWakeupMultimap::iterator temp = iter; | |
101 iter++; | |
102 // O(1) amortized. | |
103 delayed_wakeup_multimap_.erase(temp); | |
104 } else { | |
105 iter++; | |
106 } | |
107 } | |
108 | |
109 // |newly_updatable_| might contain |task_queue|, we use | |
110 // MoveNewlyUpdatableQueuesIntoUpdatableQueueSet to flush it out. | |
111 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet(); | |
112 updatable_queue_set_.erase(task_queue.get()); | |
113 } | |
114 | |
115 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() { | |
116 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
117 bool found_pending_task = false; | |
118 base::TimeTicks next_pending_delayed_task( | |
119 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); | |
120 for (auto& queue : queues_) { | |
121 base::TimeTicks queues_next_pending_delayed_task; | |
122 if (queue->NextPendingDelayedTaskRunTime( | |
123 &queues_next_pending_delayed_task)) { | |
124 found_pending_task = true; | |
125 next_pending_delayed_task = | |
126 std::min(next_pending_delayed_task, queues_next_pending_delayed_task); | |
127 } | |
128 } | |
129 | |
130 if (!found_pending_task) | |
131 return base::TimeTicks(); | |
132 | |
133 DCHECK_NE(next_pending_delayed_task, | |
134 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); | |
135 return next_pending_delayed_task; | |
136 } | |
137 | |
138 void TaskQueueManager::RegisterAsUpdatableTaskQueue( | |
139 internal::TaskQueueImpl* queue) { | |
140 base::AutoLock lock(newly_updatable_lock_); | |
141 newly_updatable_.push_back(queue); | |
142 } | |
143 | |
144 void TaskQueueManager::UnregisterAsUpdatableTaskQueue( | |
145 internal::TaskQueueImpl* queue) { | |
146 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
147 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet(); | |
148 #ifndef NDEBUG | |
149 { | |
150 base::AutoLock lock(newly_updatable_lock_); | |
151 DCHECK(!(updatable_queue_set_.find(queue) == updatable_queue_set_.end() && | |
152 std::find(newly_updatable_.begin(), newly_updatable_.end(), | |
153 queue) != newly_updatable_.end())); | |
154 } | |
155 #endif | |
156 updatable_queue_set_.erase(queue); | |
157 } | |
158 | |
159 void TaskQueueManager::MoveNewlyUpdatableQueuesIntoUpdatableQueueSet() { | |
160 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
161 base::AutoLock lock(newly_updatable_lock_); | |
162 while (!newly_updatable_.empty()) { | |
163 updatable_queue_set_.insert(newly_updatable_.back()); | |
164 newly_updatable_.pop_back(); | |
165 } | |
166 } | 108 } |
167 | 109 |
168 void TaskQueueManager::UpdateWorkQueues( | 110 void TaskQueueManager::UpdateWorkQueues( |
169 bool should_trigger_wakeup, | 111 bool should_trigger_wakeup, |
170 const internal::TaskQueueImpl::Task* previous_task) { | 112 const internal::TaskQueueImpl::Task* previous_task) { |
171 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
172 TRACE_EVENT0(disabled_by_default_tracing_category_, | 113 TRACE_EVENT0(disabled_by_default_tracing_category_, |
173 "TaskQueueManager::UpdateWorkQueues"); | 114 "TaskQueueManager::UpdateWorkQueues"); |
174 internal::LazyNow lazy_now(delegate().get()); | |
175 | 115 |
176 // Move any ready delayed tasks into the incomming queues. | 116 for (const scoped_refptr<TimeDomain>& time_domain : time_domains_) { |
177 WakeupReadyDelayedQueues(&lazy_now); | 117 time_domain->UpdateWorkQueues(should_trigger_wakeup, previous_task); |
178 | |
179 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet(); | |
180 | |
181 auto iter = updatable_queue_set_.begin(); | |
182 while (iter != updatable_queue_set_.end()) { | |
183 internal::TaskQueueImpl* queue = *iter++; | |
184 // NOTE Update work queue may erase itself from |updatable_queue_set_|. | |
185 // This is fine, erasing an element won't invalidate any interator, as long | |
186 // as the iterator isn't the element being delated. | |
187 if (queue->work_queue().empty()) | |
188 queue->UpdateWorkQueue(&lazy_now, should_trigger_wakeup, previous_task); | |
189 } | 118 } |
190 } | 119 } |
191 | 120 |
192 void TaskQueueManager::ScheduleDelayedWorkTask( | |
193 scoped_refptr<internal::TaskQueueImpl> queue, | |
194 base::TimeTicks delayed_run_time) { | |
195 internal::LazyNow lazy_now(delegate().get()); | |
196 ScheduleDelayedWork(queue.get(), delayed_run_time, &lazy_now); | |
197 } | |
198 | |
199 void TaskQueueManager::ScheduleDelayedWork(internal::TaskQueueImpl* queue, | |
200 base::TimeTicks delayed_run_time, | |
201 internal::LazyNow* lazy_now) { | |
202 if (!delegate_->BelongsToCurrentThread()) { | |
203 // NOTE posting a delayed task from a different thread is not expected to be | |
204 // common. This pathway is less optimal than perhaps it could be because | |
205 // it causes two main thread tasks to be run. Should this assumption prove | |
206 // to be false in future, we may need to revisit this. | |
207 delegate_->PostTask( | |
208 FROM_HERE, base::Bind(&TaskQueueManager::ScheduleDelayedWorkTask, | |
209 weak_factory_.GetWeakPtr(), | |
210 scoped_refptr<internal::TaskQueueImpl>(queue), | |
211 delayed_run_time)); | |
212 return; | |
213 } | |
214 | |
215 // Make sure there's one (and only one) task posted to |delegate_| | |
216 // to call |DelayedDoWork| at |delayed_run_time|. | |
217 if (delayed_wakeup_multimap_.find(delayed_run_time) == | |
218 delayed_wakeup_multimap_.end()) { | |
219 base::TimeDelta delay = | |
220 std::max(base::TimeDelta(), delayed_run_time - lazy_now->Now()); | |
221 delegate_->PostDelayedTask(FROM_HERE, do_work_closure_, delay); | |
222 } | |
223 delayed_wakeup_multimap_.insert(std::make_pair(delayed_run_time, queue)); | |
224 } | |
225 | |
226 void TaskQueueManager::WakeupReadyDelayedQueues(internal::LazyNow* lazy_now) { | |
227 // Wake up any queues with pending delayed work. Note std::multipmap stores | |
228 // the elements sorted by key, so the begin() iterator points to the earliest | |
229 // queue to wakeup. | |
230 std::set<internal::TaskQueueImpl*> dedup_set; | |
231 while (!delayed_wakeup_multimap_.empty()) { | |
232 DelayedWakeupMultimap::iterator next_wakeup = | |
233 delayed_wakeup_multimap_.begin(); | |
234 if (next_wakeup->first > lazy_now->Now()) | |
235 break; | |
236 // A queue could have any number of delayed tasks pending so it's worthwhile | |
237 // deduping calls to MoveReadyDelayedTasksToIncomingQueue since it takes a | |
238 // lock. NOTE the order in which these are called matters since the order | |
239 // in which EnqueueTaskLocks is called is respected when choosing which | |
240 // queue to execute a task from. | |
241 if (dedup_set.insert(next_wakeup->second).second) | |
242 next_wakeup->second->MoveReadyDelayedTasksToIncomingQueue(lazy_now); | |
243 delayed_wakeup_multimap_.erase(next_wakeup); | |
244 } | |
245 } | |
246 | |
247 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { | 121 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { |
248 bool on_main_thread = delegate_->BelongsToCurrentThread(); | 122 bool on_main_thread = delegate_->BelongsToCurrentThread(); |
249 if (on_main_thread) { | 123 if (on_main_thread) { |
250 // We only want one pending DoWork posted from the main thread, or we risk | 124 // We only want one pending DoWork posted from the main thread, or we risk |
251 // an explosion of pending DoWorks which could starve out everything else. | 125 // an explosion of pending DoWorks which could starve out everything else. |
252 if (pending_dowork_count_ > 0) { | 126 if (pending_dowork_count_ > 0) { |
253 return; | 127 return; |
254 } | 128 } |
255 pending_dowork_count_++; | 129 pending_dowork_count_++; |
256 delegate_->PostTask(FROM_HERE, decrement_pending_and_do_work_closure_); | 130 delegate_->PostTask(FROM_HERE, decrement_pending_and_do_work_closure_); |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
296 | 170 |
297 // Only run a single task per batch in nested run loops so that we can | 171 // Only run a single task per batch in nested run loops so that we can |
298 // properly exit the nested loop when someone calls RunLoop::Quit(). | 172 // properly exit the nested loop when someone calls RunLoop::Quit(). |
299 if (delegate_->IsNested()) | 173 if (delegate_->IsNested()) |
300 break; | 174 break; |
301 } | 175 } |
302 | 176 |
303 // TODO(alexclarke): Consider refactoring the above loop to terminate only | 177 // TODO(alexclarke): Consider refactoring the above loop to terminate only |
304 // when there's no more work left to be done, rather than posting a | 178 // when there's no more work left to be done, rather than posting a |
305 // continuation task. | 179 // continuation task. |
306 if (!selector_.EnabledWorkQueuesEmpty()) { | 180 if (!selector_.EnabledWorkQueuesEmpty() || TryAdvanceTimeDomains()) { |
307 MaybePostDoWorkOnMainRunner(); | 181 MaybePostDoWorkOnMainRunner(); |
308 } else { | 182 } else { |
309 // Tell the task runner we have no more work. | 183 // Tell the task runner we have no more work. |
310 delegate_->OnNoMoreImmediateWork(); | 184 delegate_->OnNoMoreImmediateWork(); |
311 } | 185 } |
312 } | 186 } |
313 | 187 |
| 188 bool TaskQueueManager::TryAdvanceTimeDomains() { |
| 189 bool can_advance = false; |
| 190 for (const scoped_refptr<TimeDomain>& time_domain : time_domains_) { |
| 191 can_advance |= time_domain->MaybeAdvanceTime(); |
| 192 } |
| 193 return can_advance; |
| 194 } |
| 195 |
314 bool TaskQueueManager::SelectQueueToService( | 196 bool TaskQueueManager::SelectQueueToService( |
315 internal::TaskQueueImpl** out_queue) { | 197 internal::TaskQueueImpl** out_queue) { |
316 bool should_run = selector_.SelectQueueToService(out_queue); | 198 bool should_run = selector_.SelectQueueToService(out_queue); |
317 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( | 199 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( |
318 disabled_by_default_tracing_category_, "TaskQueueManager", this, | 200 disabled_by_default_tracing_category_, "TaskQueueManager", this, |
319 AsValueWithSelectorResult(should_run, *out_queue)); | 201 AsValueWithSelectorResult(should_run, *out_queue)); |
320 return should_run; | 202 return should_run; |
321 } | 203 } |
322 | 204 |
323 void TaskQueueManager::DidQueueTask( | 205 void TaskQueueManager::DidQueueTask( |
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
419 state->BeginArray("queues"); | 301 state->BeginArray("queues"); |
420 for (auto& queue : queues_) | 302 for (auto& queue : queues_) |
421 queue->AsValueInto(state.get()); | 303 queue->AsValueInto(state.get()); |
422 state->EndArray(); | 304 state->EndArray(); |
423 state->BeginDictionary("selector"); | 305 state->BeginDictionary("selector"); |
424 selector_.AsValueInto(state.get()); | 306 selector_.AsValueInto(state.get()); |
425 state->EndDictionary(); | 307 state->EndDictionary(); |
426 if (should_run) | 308 if (should_run) |
427 state->SetString("selected_queue", selected_queue->GetName()); | 309 state->SetString("selected_queue", selected_queue->GetName()); |
428 | 310 |
429 state->BeginArray("updatable_queue_set"); | 311 state->BeginArray("time_domains"); |
430 for (auto& queue : updatable_queue_set_) | 312 for (auto& time_domain : time_domains_) |
431 state->AppendString(queue->GetName()); | 313 time_domain->AsValueInto(state.get()); |
432 state->EndArray(); | 314 state->EndArray(); |
433 return state; | 315 return state; |
434 } | 316 } |
435 | 317 |
436 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { | 318 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { |
437 DCHECK(main_thread_checker_.CalledOnValidThread()); | 319 DCHECK(main_thread_checker_.CalledOnValidThread()); |
438 // Only schedule DoWork if there's something to do. | 320 // Only schedule DoWork if there's something to do. |
439 if (!queue->work_queue().empty()) | 321 if (!queue->work_queue().empty()) |
440 MaybePostDoWorkOnMainRunner(); | 322 MaybePostDoWorkOnMainRunner(); |
441 } | 323 } |
442 | 324 |
443 } // namespace scheduler | 325 } // namespace scheduler |
OLD | NEW |