Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(93)

Side by Side Diff: components/scheduler/base/task_queue_manager.cc

Issue 1432263002: (reland) Adds TimeDomains to the TaskQueueManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Addressing some more feedback. Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "components/scheduler/base/task_queue_manager.h" 5 #include "components/scheduler/base/task_queue_manager.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
11 #include "components/scheduler/base/lazy_now.h" 11 #include "components/scheduler/base/real_time_domain.h"
12 #include "components/scheduler/base/task_queue_impl.h" 12 #include "components/scheduler/base/task_queue_impl.h"
13 #include "components/scheduler/base/task_queue_manager_delegate.h" 13 #include "components/scheduler/base/task_queue_manager_delegate.h"
14 #include "components/scheduler/base/task_queue_selector.h" 14 #include "components/scheduler/base/task_queue_selector.h"
15 #include "components/scheduler/base/task_queue_sets.h" 15 #include "components/scheduler/base/task_queue_sets.h"
16 16
17 namespace { 17 namespace {
18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max(); 18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max();
19 } 19 }
20 20
21 namespace scheduler { 21 namespace scheduler {
(...skipping 17 matching lines...) Expand all
39 weak_factory_(this) { 39 weak_factory_(this) {
40 DCHECK(delegate->RunsTasksOnCurrentThread()); 40 DCHECK(delegate->RunsTasksOnCurrentThread());
41 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, 41 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
42 "TaskQueueManager", this); 42 "TaskQueueManager", this);
43 selector_.SetTaskQueueSelectorObserver(this); 43 selector_.SetTaskQueueSelectorObserver(this);
44 44
45 decrement_pending_and_do_work_closure_ = 45 decrement_pending_and_do_work_closure_ =
46 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); 46 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
47 do_work_closure_ = 47 do_work_closure_ =
48 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); 48 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
49
50 default_time_domain_ =
51 make_scoped_refptr(new RealTimeDomain(delegate.get(), do_work_closure_));
52 time_domains_.insert(default_time_domain_);
49 } 53 }
50 54
51 TaskQueueManager::~TaskQueueManager() { 55 TaskQueueManager::~TaskQueueManager() {
52 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, 56 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
53 "TaskQueueManager", this); 57 "TaskQueueManager", this);
54 58
55 while (!queues_.empty()) 59 while (!queues_.empty())
56 (*queues_.begin())->UnregisterTaskQueue(); 60 (*queues_.begin())->UnregisterTaskQueue();
57 61
58 selector_.SetTaskQueueSelectorObserver(nullptr); 62 selector_.SetTaskQueueSelectorObserver(nullptr);
59 } 63 }
60 64
61 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( 65 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue(
62 const TaskQueue::Spec& spec) { 66 const TaskQueue::Spec& spec) {
63 TRACE_EVENT1(tracing_category_, 67 TRACE_EVENT1(tracing_category_,
64 "TaskQueueManager::NewTaskQueue", "queue_name", spec.name); 68 "TaskQueueManager::NewTaskQueue", "queue_name", spec.name);
65 DCHECK(main_thread_checker_.CalledOnValidThread()); 69 DCHECK(main_thread_checker_.CalledOnValidThread());
66 scoped_refptr<internal::TaskQueueImpl> queue( 70 scoped_refptr<internal::TaskQueueImpl> queue(
67 make_scoped_refptr(new internal::TaskQueueImpl( 71 make_scoped_refptr(new internal::TaskQueueImpl(
68 this, spec, disabled_by_default_tracing_category_, 72 this,
73 spec.time_domain ? spec.time_domain : default_time_domain_.get(),
74 spec, disabled_by_default_tracing_category_,
69 disabled_by_default_verbose_tracing_category_))); 75 disabled_by_default_verbose_tracing_category_)));
70 queues_.insert(queue); 76 queues_.insert(queue);
71 selector_.AddQueue(queue.get()); 77 selector_.AddQueue(queue.get());
72 return queue; 78 return queue;
73 } 79 }
74 80
75 void TaskQueueManager::SetObserver(Observer* observer) { 81 void TaskQueueManager::SetObserver(Observer* observer) {
76 DCHECK(main_thread_checker_.CalledOnValidThread()); 82 DCHECK(main_thread_checker_.CalledOnValidThread());
77 observer_ = observer; 83 observer_ = observer;
78 } 84 }
79 85
80 void TaskQueueManager::UnregisterTaskQueue( 86 void TaskQueueManager::UnregisterTaskQueue(
81 scoped_refptr<internal::TaskQueueImpl> task_queue) { 87 scoped_refptr<internal::TaskQueueImpl> task_queue) {
82 TRACE_EVENT1(tracing_category_, 88 TRACE_EVENT1(tracing_category_,
83 "TaskQueueManager::UnregisterTaskQueue", "queue_name", 89 "TaskQueueManager::UnregisterTaskQueue", "queue_name",
84 task_queue->GetName()); 90 task_queue->GetName());
85 DCHECK(main_thread_checker_.CalledOnValidThread()); 91 DCHECK(main_thread_checker_.CalledOnValidThread());
86 if (observer_) 92 if (observer_)
87 observer_->OnUnregisterTaskQueue(task_queue); 93 observer_->OnUnregisterTaskQueue(task_queue);
88 94
89 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being 95 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being
90 // freed while any of our structures hold hold a raw pointer to it. 96 // freed while any of our structures hold hold a raw pointer to it.
91 queues_to_delete_.insert(task_queue); 97 queues_to_delete_.insert(task_queue);
92 queues_.erase(task_queue); 98 queues_.erase(task_queue);
93 selector_.RemoveQueue(task_queue.get()); 99 selector_.RemoveQueue(task_queue.get());
94
95 // We need to remove |task_queue| from delayed_wakeup_multimap_ which is a
96 // little awkward since it's keyed by time. O(n) running time.
97 for (DelayedWakeupMultimap::iterator iter = delayed_wakeup_multimap_.begin();
98 iter != delayed_wakeup_multimap_.end();) {
99 if (iter->second == task_queue.get()) {
100 DelayedWakeupMultimap::iterator temp = iter;
101 iter++;
102 // O(1) amortized.
103 delayed_wakeup_multimap_.erase(temp);
104 } else {
105 iter++;
106 }
107 }
108
109 // |newly_updatable_| might contain |task_queue|, we use
110 // MoveNewlyUpdatableQueuesIntoUpdatableQueueSet to flush it out.
111 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet();
112 updatable_queue_set_.erase(task_queue.get());
113 } 100 }
114 101
115 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() { 102 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() {
116 DCHECK(main_thread_checker_.CalledOnValidThread()); 103 DCHECK(main_thread_checker_.CalledOnValidThread());
117 bool found_pending_task = false; 104 bool found_pending_task = false;
118 base::TimeTicks next_pending_delayed_task( 105 base::TimeTicks next_pending_delayed_task(
119 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); 106 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
120 for (auto& queue : queues_) { 107 for (auto& queue : queues_) {
121 base::TimeTicks queues_next_pending_delayed_task; 108 base::TimeTicks queues_next_pending_delayed_task;
122 if (queue->NextPendingDelayedTaskRunTime( 109 if (queue->NextPendingDelayedTaskRunTime(
123 &queues_next_pending_delayed_task)) { 110 &queues_next_pending_delayed_task)) {
124 found_pending_task = true; 111 found_pending_task = true;
125 next_pending_delayed_task = 112 next_pending_delayed_task =
126 std::min(next_pending_delayed_task, queues_next_pending_delayed_task); 113 std::min(next_pending_delayed_task, queues_next_pending_delayed_task);
127 } 114 }
128 } 115 }
129 116
130 if (!found_pending_task) 117 if (!found_pending_task)
131 return base::TimeTicks(); 118 return base::TimeTicks();
132 119
133 DCHECK_NE(next_pending_delayed_task, 120 DCHECK_NE(next_pending_delayed_task,
134 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); 121 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
135 return next_pending_delayed_task; 122 return next_pending_delayed_task;
136 } 123 }
137 124
138 void TaskQueueManager::RegisterAsUpdatableTaskQueue(
139 internal::TaskQueueImpl* queue) {
140 base::AutoLock lock(newly_updatable_lock_);
141 newly_updatable_.push_back(queue);
142 }
143
144 void TaskQueueManager::UnregisterAsUpdatableTaskQueue(
145 internal::TaskQueueImpl* queue) {
146 DCHECK(main_thread_checker_.CalledOnValidThread());
147 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet();
148 #ifndef NDEBUG
149 {
150 base::AutoLock lock(newly_updatable_lock_);
151 DCHECK(!(updatable_queue_set_.find(queue) == updatable_queue_set_.end() &&
152 std::find(newly_updatable_.begin(), newly_updatable_.end(),
153 queue) != newly_updatable_.end()));
154 }
155 #endif
156 updatable_queue_set_.erase(queue);
157 }
158
159 void TaskQueueManager::MoveNewlyUpdatableQueuesIntoUpdatableQueueSet() {
160 DCHECK(main_thread_checker_.CalledOnValidThread());
161 base::AutoLock lock(newly_updatable_lock_);
162 while (!newly_updatable_.empty()) {
163 updatable_queue_set_.insert(newly_updatable_.back());
164 newly_updatable_.pop_back();
165 }
166 }
167
168 void TaskQueueManager::UpdateWorkQueues( 125 void TaskQueueManager::UpdateWorkQueues(
169 bool should_trigger_wakeup, 126 bool should_trigger_wakeup,
170 const internal::TaskQueueImpl::Task* previous_task) { 127 const internal::TaskQueueImpl::Task* previous_task) {
171 DCHECK(main_thread_checker_.CalledOnValidThread());
172 TRACE_EVENT0(disabled_by_default_tracing_category_, 128 TRACE_EVENT0(disabled_by_default_tracing_category_,
173 "TaskQueueManager::UpdateWorkQueues"); 129 "TaskQueueManager::UpdateWorkQueues");
174 internal::LazyNow lazy_now(delegate().get());
175 130
176 // Move any ready delayed tasks into the incomming queues. 131 for (const scoped_refptr<TimeDomain>& time_domain : time_domains_) {
177 WakeupReadyDelayedQueues(&lazy_now); 132 time_domain->UpdateWorkQueues(should_trigger_wakeup, previous_task);
178
179 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet();
180
181 auto iter = updatable_queue_set_.begin();
182 while (iter != updatable_queue_set_.end()) {
183 internal::TaskQueueImpl* queue = *iter++;
184 // NOTE Update work queue may erase itself from |updatable_queue_set_|.
185 // This is fine, erasing an element won't invalidate any interator, as long
186 // as the iterator isn't the element being delated.
187 if (queue->work_queue().empty())
188 queue->UpdateWorkQueue(&lazy_now, should_trigger_wakeup, previous_task);
189 } 133 }
190 } 134 }
191 135
192 void TaskQueueManager::ScheduleDelayedWorkTask(
193 scoped_refptr<internal::TaskQueueImpl> queue,
194 base::TimeTicks delayed_run_time) {
195 internal::LazyNow lazy_now(delegate().get());
196 ScheduleDelayedWork(queue.get(), delayed_run_time, &lazy_now);
197 }
198
199 void TaskQueueManager::ScheduleDelayedWork(internal::TaskQueueImpl* queue,
200 base::TimeTicks delayed_run_time,
201 internal::LazyNow* lazy_now) {
202 if (!delegate_->BelongsToCurrentThread()) {
203 // NOTE posting a delayed task from a different thread is not expected to be
204 // common. This pathway is less optimal than perhaps it could be because
205 // it causes two main thread tasks to be run. Should this assumption prove
206 // to be false in future, we may need to revisit this.
207 delegate_->PostTask(
208 FROM_HERE, base::Bind(&TaskQueueManager::ScheduleDelayedWorkTask,
209 weak_factory_.GetWeakPtr(),
210 scoped_refptr<internal::TaskQueueImpl>(queue),
211 delayed_run_time));
212 return;
213 }
214
215 // Make sure there's one (and only one) task posted to |delegate_|
216 // to call |DelayedDoWork| at |delayed_run_time|.
217 if (delayed_wakeup_multimap_.find(delayed_run_time) ==
218 delayed_wakeup_multimap_.end()) {
219 base::TimeDelta delay =
220 std::max(base::TimeDelta(), delayed_run_time - lazy_now->Now());
221 delegate_->PostDelayedTask(FROM_HERE, do_work_closure_, delay);
222 }
223 delayed_wakeup_multimap_.insert(std::make_pair(delayed_run_time, queue));
224 }
225
226 void TaskQueueManager::WakeupReadyDelayedQueues(internal::LazyNow* lazy_now) {
227 // Wake up any queues with pending delayed work. Note std::multipmap stores
228 // the elements sorted by key, so the begin() iterator points to the earliest
229 // queue to wakeup.
230 std::set<internal::TaskQueueImpl*> dedup_set;
231 while (!delayed_wakeup_multimap_.empty()) {
232 DelayedWakeupMultimap::iterator next_wakeup =
233 delayed_wakeup_multimap_.begin();
234 if (next_wakeup->first > lazy_now->Now())
235 break;
236 // A queue could have any number of delayed tasks pending so it's worthwhile
237 // deduping calls to MoveReadyDelayedTasksToIncomingQueue since it takes a
238 // lock. NOTE the order in which these are called matters since the order
239 // in which EnqueueTaskLocks is called is respected when choosing which
240 // queue to execute a task from.
241 if (dedup_set.insert(next_wakeup->second).second)
242 next_wakeup->second->MoveReadyDelayedTasksToIncomingQueue(lazy_now);
243 delayed_wakeup_multimap_.erase(next_wakeup);
244 }
245 }
246
247 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { 136 void TaskQueueManager::MaybePostDoWorkOnMainRunner() {
248 bool on_main_thread = delegate_->BelongsToCurrentThread(); 137 bool on_main_thread = delegate_->BelongsToCurrentThread();
249 if (on_main_thread) { 138 if (on_main_thread) {
250 // We only want one pending DoWork posted from the main thread, or we risk 139 // We only want one pending DoWork posted from the main thread, or we risk
251 // an explosion of pending DoWorks which could starve out everything else. 140 // an explosion of pending DoWorks which could starve out everything else.
252 if (pending_dowork_count_ > 0) { 141 if (pending_dowork_count_ > 0) {
253 return; 142 return;
254 } 143 }
255 pending_dowork_count_++; 144 pending_dowork_count_++;
256 delegate_->PostTask(FROM_HERE, decrement_pending_and_do_work_closure_); 145 delegate_->PostTask(FROM_HERE, decrement_pending_and_do_work_closure_);
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
296 185
297 // Only run a single task per batch in nested run loops so that we can 186 // Only run a single task per batch in nested run loops so that we can
298 // properly exit the nested loop when someone calls RunLoop::Quit(). 187 // properly exit the nested loop when someone calls RunLoop::Quit().
299 if (delegate_->IsNested()) 188 if (delegate_->IsNested())
300 break; 189 break;
301 } 190 }
302 191
303 // TODO(alexclarke): Consider refactoring the above loop to terminate only 192 // TODO(alexclarke): Consider refactoring the above loop to terminate only
304 // when there's no more work left to be done, rather than posting a 193 // when there's no more work left to be done, rather than posting a
305 // continuation task. 194 // continuation task.
306 if (!selector_.EnabledWorkQueuesEmpty()) { 195 if (!selector_.EnabledWorkQueuesEmpty() || ATimeDomainCanAdvance()) {
307 MaybePostDoWorkOnMainRunner(); 196 MaybePostDoWorkOnMainRunner();
308 } else { 197 } else {
309 // Tell the task runner we have no more work. 198 // Tell the task runner we have no more work.
310 delegate_->OnNoMoreImmediateWork(); 199 delegate_->OnNoMoreImmediateWork();
311 } 200 }
312 } 201 }
313 202
203 bool TaskQueueManager::ATimeDomainCanAdvance() {
204 bool can_advance = false;
205 for (const scoped_refptr<TimeDomain>& time_domain : time_domains_) {
206 can_advance |= time_domain->MaybeAdvanceTime();
207 }
208 return can_advance;
209 }
210
314 bool TaskQueueManager::SelectQueueToService( 211 bool TaskQueueManager::SelectQueueToService(
315 internal::TaskQueueImpl** out_queue) { 212 internal::TaskQueueImpl** out_queue) {
316 bool should_run = selector_.SelectQueueToService(out_queue); 213 bool should_run = selector_.SelectQueueToService(out_queue);
317 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( 214 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
318 disabled_by_default_tracing_category_, "TaskQueueManager", this, 215 disabled_by_default_tracing_category_, "TaskQueueManager", this,
319 AsValueWithSelectorResult(should_run, *out_queue)); 216 AsValueWithSelectorResult(should_run, *out_queue));
320 return should_run; 217 return should_run;
321 } 218 }
322 219
323 void TaskQueueManager::DidQueueTask( 220 void TaskQueueManager::DidQueueTask(
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
419 state->BeginArray("queues"); 316 state->BeginArray("queues");
420 for (auto& queue : queues_) 317 for (auto& queue : queues_)
421 queue->AsValueInto(state.get()); 318 queue->AsValueInto(state.get());
422 state->EndArray(); 319 state->EndArray();
423 state->BeginDictionary("selector"); 320 state->BeginDictionary("selector");
424 selector_.AsValueInto(state.get()); 321 selector_.AsValueInto(state.get());
425 state->EndDictionary(); 322 state->EndDictionary();
426 if (should_run) 323 if (should_run)
427 state->SetString("selected_queue", selected_queue->GetName()); 324 state->SetString("selected_queue", selected_queue->GetName());
428 325
429 state->BeginArray("updatable_queue_set"); 326 state->BeginArray("time_domains");
430 for (auto& queue : updatable_queue_set_) 327 for (auto& time_domain : time_domains_)
431 state->AppendString(queue->GetName()); 328 time_domain->AsValueInto(state.get());
432 state->EndArray(); 329 state->EndArray();
433 return state; 330 return state;
434 } 331 }
435 332
436 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { 333 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
437 DCHECK(main_thread_checker_.CalledOnValidThread()); 334 DCHECK(main_thread_checker_.CalledOnValidThread());
438 // Only schedule DoWork if there's something to do. 335 // Only schedule DoWork if there's something to do.
439 if (!queue->work_queue().empty()) 336 if (!queue->work_queue().empty())
440 MaybePostDoWorkOnMainRunner(); 337 MaybePostDoWorkOnMainRunner();
441 } 338 }
442 339
443 } // namespace scheduler 340 } // namespace scheduler
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698