Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(53)

Side by Side Diff: components/scheduler/base/task_queue_manager.cc

Issue 1461143003: Revert of Adds TimeDomains to the TaskQueueManager (Closed) Base URL: https://chromium.googlesource.com/chromium/src.git@master
Patch Set: Created 5 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright 2014 The Chromium Authors. All rights reserved. 1 // Copyright 2014 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include "components/scheduler/base/task_queue_manager.h" 5 #include "components/scheduler/base/task_queue_manager.h"
6 6
7 #include <queue> 7 #include <queue>
8 #include <set> 8 #include <set>
9 9
10 #include "base/bind.h" 10 #include "base/bind.h"
11 #include "components/scheduler/base/real_time_domain.h" 11 #include "components/scheduler/base/lazy_now.h"
12 #include "components/scheduler/base/task_queue_impl.h" 12 #include "components/scheduler/base/task_queue_impl.h"
13 #include "components/scheduler/base/task_queue_manager_delegate.h" 13 #include "components/scheduler/base/task_queue_manager_delegate.h"
14 #include "components/scheduler/base/task_queue_selector.h" 14 #include "components/scheduler/base/task_queue_selector.h"
15 #include "components/scheduler/base/task_queue_sets.h" 15 #include "components/scheduler/base/task_queue_sets.h"
16 16
17 namespace {
18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max();
19 }
20
17 namespace scheduler { 21 namespace scheduler {
18 22
19 TaskQueueManager::TaskQueueManager( 23 TaskQueueManager::TaskQueueManager(
20 scoped_refptr<TaskQueueManagerDelegate> delegate, 24 scoped_refptr<TaskQueueManagerDelegate> delegate,
21 const char* tracing_category, 25 const char* tracing_category,
22 const char* disabled_by_default_tracing_category, 26 const char* disabled_by_default_tracing_category,
23 const char* disabled_by_default_verbose_tracing_category) 27 const char* disabled_by_default_verbose_tracing_category)
24 : delegate_(delegate), 28 : delegate_(delegate),
25 task_was_run_on_quiescence_monitored_queue_(false), 29 task_was_run_on_quiescence_monitored_queue_(false),
26 pending_dowork_count_(0), 30 pending_dowork_count_(0),
27 work_batch_size_(1), 31 work_batch_size_(1),
28 tracing_category_(tracing_category), 32 tracing_category_(tracing_category),
29 disabled_by_default_tracing_category_( 33 disabled_by_default_tracing_category_(
30 disabled_by_default_tracing_category), 34 disabled_by_default_tracing_category),
31 disabled_by_default_verbose_tracing_category_( 35 disabled_by_default_verbose_tracing_category_(
32 disabled_by_default_verbose_tracing_category), 36 disabled_by_default_verbose_tracing_category),
33 observer_(nullptr), 37 observer_(nullptr),
34 deletion_sentinel_(new DeletionSentinel()), 38 deletion_sentinel_(new DeletionSentinel()),
35 weak_factory_(this) { 39 weak_factory_(this) {
36 DCHECK(delegate->RunsTasksOnCurrentThread()); 40 DCHECK(delegate->RunsTasksOnCurrentThread());
37 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, 41 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category,
38 "TaskQueueManager", this); 42 "TaskQueueManager", this);
39 selector_.SetTaskQueueSelectorObserver(this); 43 selector_.SetTaskQueueSelectorObserver(this);
40 44
41 decrement_pending_and_do_work_closure_ = 45 decrement_pending_and_do_work_closure_ =
42 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); 46 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true);
43 do_work_closure_ = 47 do_work_closure_ =
44 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); 48 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false);
45
46 real_time_domain_ =
47 make_scoped_refptr(new RealTimeDomain(delegate.get(), do_work_closure_));
48 RegisterTimeDomain(real_time_domain_);
49 } 49 }
50 50
51 TaskQueueManager::~TaskQueueManager() { 51 TaskQueueManager::~TaskQueueManager() {
52 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, 52 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_,
53 "TaskQueueManager", this); 53 "TaskQueueManager", this);
54 54
55 while (!queues_.empty()) 55 while (!queues_.empty())
56 (*queues_.begin())->UnregisterTaskQueue(); 56 (*queues_.begin())->UnregisterTaskQueue();
57 57
58 selector_.SetTaskQueueSelectorObserver(nullptr); 58 selector_.SetTaskQueueSelectorObserver(nullptr);
59 } 59 }
60 60
61 void TaskQueueManager::RegisterTimeDomain(
62 const scoped_refptr<TimeDomain>& time_domain) {
63 time_domains_.insert(time_domain);
64 }
65
66 void TaskQueueManager::UnregisterTimeDomain(
67 const scoped_refptr<TimeDomain>& time_domain) {
68 time_domains_.erase(time_domain);
69 }
70
71 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( 61 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue(
72 const TaskQueue::Spec& spec) { 62 const TaskQueue::Spec& spec) {
73 TRACE_EVENT1(tracing_category_, 63 TRACE_EVENT1(tracing_category_,
74 "TaskQueueManager::NewTaskQueue", "queue_name", spec.name); 64 "TaskQueueManager::NewTaskQueue", "queue_name", spec.name);
75 DCHECK(main_thread_checker_.CalledOnValidThread()); 65 DCHECK(main_thread_checker_.CalledOnValidThread());
76 TimeDomain* time_domain =
77 spec.time_domain ? spec.time_domain : real_time_domain_.get();
78 DCHECK(time_domains_.find(make_scoped_refptr(time_domain)) !=
79 time_domains_.end());
80 scoped_refptr<internal::TaskQueueImpl> queue( 66 scoped_refptr<internal::TaskQueueImpl> queue(
81 make_scoped_refptr(new internal::TaskQueueImpl( 67 make_scoped_refptr(new internal::TaskQueueImpl(
82 this, time_domain, spec, disabled_by_default_tracing_category_, 68 this, spec, disabled_by_default_tracing_category_,
83 disabled_by_default_verbose_tracing_category_))); 69 disabled_by_default_verbose_tracing_category_)));
84 queues_.insert(queue); 70 queues_.insert(queue);
85 selector_.AddQueue(queue.get()); 71 selector_.AddQueue(queue.get());
86 return queue; 72 return queue;
87 } 73 }
88 74
89 void TaskQueueManager::SetObserver(Observer* observer) { 75 void TaskQueueManager::SetObserver(Observer* observer) {
90 DCHECK(main_thread_checker_.CalledOnValidThread()); 76 DCHECK(main_thread_checker_.CalledOnValidThread());
91 observer_ = observer; 77 observer_ = observer;
92 } 78 }
93 79
94 void TaskQueueManager::UnregisterTaskQueue( 80 void TaskQueueManager::UnregisterTaskQueue(
95 scoped_refptr<internal::TaskQueueImpl> task_queue) { 81 scoped_refptr<internal::TaskQueueImpl> task_queue) {
96 TRACE_EVENT1(tracing_category_, 82 TRACE_EVENT1(tracing_category_,
97 "TaskQueueManager::UnregisterTaskQueue", "queue_name", 83 "TaskQueueManager::UnregisterTaskQueue", "queue_name",
98 task_queue->GetName()); 84 task_queue->GetName());
99 DCHECK(main_thread_checker_.CalledOnValidThread()); 85 DCHECK(main_thread_checker_.CalledOnValidThread());
100 if (observer_) 86 if (observer_)
101 observer_->OnUnregisterTaskQueue(task_queue); 87 observer_->OnUnregisterTaskQueue(task_queue);
102 88
103 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being 89 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being
104 // freed while any of our structures hold hold a raw pointer to it. 90 // freed while any of our structures hold hold a raw pointer to it.
105 queues_to_delete_.insert(task_queue); 91 queues_to_delete_.insert(task_queue);
106 queues_.erase(task_queue); 92 queues_.erase(task_queue);
107 selector_.RemoveQueue(task_queue.get()); 93 selector_.RemoveQueue(task_queue.get());
94
95 // We need to remove |task_queue| from delayed_wakeup_multimap_ which is a
96 // little awkward since it's keyed by time. O(n) running time.
97 for (DelayedWakeupMultimap::iterator iter = delayed_wakeup_multimap_.begin();
98 iter != delayed_wakeup_multimap_.end();) {
99 if (iter->second == task_queue.get()) {
100 DelayedWakeupMultimap::iterator temp = iter;
101 iter++;
102 // O(1) amortized.
103 delayed_wakeup_multimap_.erase(temp);
104 } else {
105 iter++;
106 }
107 }
108
109 // |newly_updatable_| might contain |task_queue|, we use
110 // MoveNewlyUpdatableQueuesIntoUpdatableQueueSet to flush it out.
111 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet();
112 updatable_queue_set_.erase(task_queue.get());
113 }
114
115 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() {
116 DCHECK(main_thread_checker_.CalledOnValidThread());
117 bool found_pending_task = false;
118 base::TimeTicks next_pending_delayed_task(
119 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
120 for (auto& queue : queues_) {
121 base::TimeTicks queues_next_pending_delayed_task;
122 if (queue->NextPendingDelayedTaskRunTime(
123 &queues_next_pending_delayed_task)) {
124 found_pending_task = true;
125 next_pending_delayed_task =
126 std::min(next_pending_delayed_task, queues_next_pending_delayed_task);
127 }
128 }
129
130 if (!found_pending_task)
131 return base::TimeTicks();
132
133 DCHECK_NE(next_pending_delayed_task,
134 base::TimeTicks::FromInternalValue(kMaxTimeTicks));
135 return next_pending_delayed_task;
136 }
137
138 void TaskQueueManager::RegisterAsUpdatableTaskQueue(
139 internal::TaskQueueImpl* queue) {
140 base::AutoLock lock(newly_updatable_lock_);
141 newly_updatable_.push_back(queue);
142 }
143
144 void TaskQueueManager::UnregisterAsUpdatableTaskQueue(
145 internal::TaskQueueImpl* queue) {
146 DCHECK(main_thread_checker_.CalledOnValidThread());
147 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet();
148 #ifndef NDEBUG
149 {
150 base::AutoLock lock(newly_updatable_lock_);
151 DCHECK(!(updatable_queue_set_.find(queue) == updatable_queue_set_.end() &&
152 std::find(newly_updatable_.begin(), newly_updatable_.end(),
153 queue) != newly_updatable_.end()));
154 }
155 #endif
156 updatable_queue_set_.erase(queue);
157 }
158
159 void TaskQueueManager::MoveNewlyUpdatableQueuesIntoUpdatableQueueSet() {
160 DCHECK(main_thread_checker_.CalledOnValidThread());
161 base::AutoLock lock(newly_updatable_lock_);
162 while (!newly_updatable_.empty()) {
163 updatable_queue_set_.insert(newly_updatable_.back());
164 newly_updatable_.pop_back();
165 }
108 } 166 }
109 167
110 void TaskQueueManager::UpdateWorkQueues( 168 void TaskQueueManager::UpdateWorkQueues(
111 bool should_trigger_wakeup, 169 bool should_trigger_wakeup,
112 const internal::TaskQueueImpl::Task* previous_task) { 170 const internal::TaskQueueImpl::Task* previous_task) {
171 DCHECK(main_thread_checker_.CalledOnValidThread());
113 TRACE_EVENT0(disabled_by_default_tracing_category_, 172 TRACE_EVENT0(disabled_by_default_tracing_category_,
114 "TaskQueueManager::UpdateWorkQueues"); 173 "TaskQueueManager::UpdateWorkQueues");
174 internal::LazyNow lazy_now(delegate().get());
115 175
116 for (const scoped_refptr<TimeDomain>& time_domain : time_domains_) { 176 // Move any ready delayed tasks into the incomming queues.
117 time_domain->UpdateWorkQueues(should_trigger_wakeup, previous_task); 177 WakeupReadyDelayedQueues(&lazy_now);
178
179 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet();
180
181 auto iter = updatable_queue_set_.begin();
182 while (iter != updatable_queue_set_.end()) {
183 internal::TaskQueueImpl* queue = *iter++;
184 // NOTE Update work queue may erase itself from |updatable_queue_set_|.
185 // This is fine, erasing an element won't invalidate any interator, as long
186 // as the iterator isn't the element being delated.
187 if (queue->work_queue().empty())
188 queue->UpdateWorkQueue(&lazy_now, should_trigger_wakeup, previous_task);
118 } 189 }
119 } 190 }
120 191
192 void TaskQueueManager::ScheduleDelayedWorkTask(
193 scoped_refptr<internal::TaskQueueImpl> queue,
194 base::TimeTicks delayed_run_time) {
195 internal::LazyNow lazy_now(delegate().get());
196 ScheduleDelayedWork(queue.get(), delayed_run_time, &lazy_now);
197 }
198
199 void TaskQueueManager::ScheduleDelayedWork(internal::TaskQueueImpl* queue,
200 base::TimeTicks delayed_run_time,
201 internal::LazyNow* lazy_now) {
202 if (!delegate_->BelongsToCurrentThread()) {
203 // NOTE posting a delayed task from a different thread is not expected to be
204 // common. This pathway is less optimal than perhaps it could be because
205 // it causes two main thread tasks to be run. Should this assumption prove
206 // to be false in future, we may need to revisit this.
207 delegate_->PostTask(
208 FROM_HERE, base::Bind(&TaskQueueManager::ScheduleDelayedWorkTask,
209 weak_factory_.GetWeakPtr(),
210 scoped_refptr<internal::TaskQueueImpl>(queue),
211 delayed_run_time));
212 return;
213 }
214
215 // Make sure there's one (and only one) task posted to |delegate_|
216 // to call |DelayedDoWork| at |delayed_run_time|.
217 if (delayed_wakeup_multimap_.find(delayed_run_time) ==
218 delayed_wakeup_multimap_.end()) {
219 base::TimeDelta delay =
220 std::max(base::TimeDelta(), delayed_run_time - lazy_now->Now());
221 delegate_->PostDelayedTask(FROM_HERE, do_work_closure_, delay);
222 }
223 delayed_wakeup_multimap_.insert(std::make_pair(delayed_run_time, queue));
224 }
225
226 void TaskQueueManager::WakeupReadyDelayedQueues(internal::LazyNow* lazy_now) {
227 // Wake up any queues with pending delayed work. Note std::multipmap stores
228 // the elements sorted by key, so the begin() iterator points to the earliest
229 // queue to wakeup.
230 std::set<internal::TaskQueueImpl*> dedup_set;
231 while (!delayed_wakeup_multimap_.empty()) {
232 DelayedWakeupMultimap::iterator next_wakeup =
233 delayed_wakeup_multimap_.begin();
234 if (next_wakeup->first > lazy_now->Now())
235 break;
236 // A queue could have any number of delayed tasks pending so it's worthwhile
237 // deduping calls to MoveReadyDelayedTasksToIncomingQueue since it takes a
238 // lock. NOTE the order in which these are called matters since the order
239 // in which EnqueueTaskLocks is called is respected when choosing which
240 // queue to execute a task from.
241 if (dedup_set.insert(next_wakeup->second).second)
242 next_wakeup->second->MoveReadyDelayedTasksToIncomingQueue(lazy_now);
243 delayed_wakeup_multimap_.erase(next_wakeup);
244 }
245 }
246
121 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { 247 void TaskQueueManager::MaybePostDoWorkOnMainRunner() {
122 bool on_main_thread = delegate_->BelongsToCurrentThread(); 248 bool on_main_thread = delegate_->BelongsToCurrentThread();
123 if (on_main_thread) { 249 if (on_main_thread) {
124 // We only want one pending DoWork posted from the main thread, or we risk 250 // We only want one pending DoWork posted from the main thread, or we risk
125 // an explosion of pending DoWorks which could starve out everything else. 251 // an explosion of pending DoWorks which could starve out everything else.
126 if (pending_dowork_count_ > 0) { 252 if (pending_dowork_count_ > 0) {
127 return; 253 return;
128 } 254 }
129 pending_dowork_count_++; 255 pending_dowork_count_++;
130 delegate_->PostTask(FROM_HERE, decrement_pending_and_do_work_closure_); 256 delegate_->PostTask(FROM_HERE, decrement_pending_and_do_work_closure_);
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
170 296
171 // Only run a single task per batch in nested run loops so that we can 297 // Only run a single task per batch in nested run loops so that we can
172 // properly exit the nested loop when someone calls RunLoop::Quit(). 298 // properly exit the nested loop when someone calls RunLoop::Quit().
173 if (delegate_->IsNested()) 299 if (delegate_->IsNested())
174 break; 300 break;
175 } 301 }
176 302
177 // TODO(alexclarke): Consider refactoring the above loop to terminate only 303 // TODO(alexclarke): Consider refactoring the above loop to terminate only
178 // when there's no more work left to be done, rather than posting a 304 // when there's no more work left to be done, rather than posting a
179 // continuation task. 305 // continuation task.
180 if (!selector_.EnabledWorkQueuesEmpty() || TryAdvanceTimeDomains()) { 306 if (!selector_.EnabledWorkQueuesEmpty()) {
181 MaybePostDoWorkOnMainRunner(); 307 MaybePostDoWorkOnMainRunner();
182 } else { 308 } else {
183 // Tell the task runner we have no more work. 309 // Tell the task runner we have no more work.
184 delegate_->OnNoMoreImmediateWork(); 310 delegate_->OnNoMoreImmediateWork();
185 } 311 }
186 } 312 }
187 313
188 bool TaskQueueManager::TryAdvanceTimeDomains() {
189 bool can_advance = false;
190 for (const scoped_refptr<TimeDomain>& time_domain : time_domains_) {
191 can_advance |= time_domain->MaybeAdvanceTime();
192 }
193 return can_advance;
194 }
195
196 bool TaskQueueManager::SelectQueueToService( 314 bool TaskQueueManager::SelectQueueToService(
197 internal::TaskQueueImpl** out_queue) { 315 internal::TaskQueueImpl** out_queue) {
198 bool should_run = selector_.SelectQueueToService(out_queue); 316 bool should_run = selector_.SelectQueueToService(out_queue);
199 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( 317 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID(
200 disabled_by_default_tracing_category_, "TaskQueueManager", this, 318 disabled_by_default_tracing_category_, "TaskQueueManager", this,
201 AsValueWithSelectorResult(should_run, *out_queue)); 319 AsValueWithSelectorResult(should_run, *out_queue));
202 return should_run; 320 return should_run;
203 } 321 }
204 322
205 void TaskQueueManager::DidQueueTask( 323 void TaskQueueManager::DidQueueTask(
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
301 state->BeginArray("queues"); 419 state->BeginArray("queues");
302 for (auto& queue : queues_) 420 for (auto& queue : queues_)
303 queue->AsValueInto(state.get()); 421 queue->AsValueInto(state.get());
304 state->EndArray(); 422 state->EndArray();
305 state->BeginDictionary("selector"); 423 state->BeginDictionary("selector");
306 selector_.AsValueInto(state.get()); 424 selector_.AsValueInto(state.get());
307 state->EndDictionary(); 425 state->EndDictionary();
308 if (should_run) 426 if (should_run)
309 state->SetString("selected_queue", selected_queue->GetName()); 427 state->SetString("selected_queue", selected_queue->GetName());
310 428
311 state->BeginArray("time_domains"); 429 state->BeginArray("updatable_queue_set");
312 for (auto& time_domain : time_domains_) 430 for (auto& queue : updatable_queue_set_)
313 time_domain->AsValueInto(state.get()); 431 state->AppendString(queue->GetName());
314 state->EndArray(); 432 state->EndArray();
315 return state; 433 return state;
316 } 434 }
317 435
318 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { 436 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) {
319 DCHECK(main_thread_checker_.CalledOnValidThread()); 437 DCHECK(main_thread_checker_.CalledOnValidThread());
320 // Only schedule DoWork if there's something to do. 438 // Only schedule DoWork if there's something to do.
321 if (!queue->work_queue().empty()) 439 if (!queue->work_queue().empty())
322 MaybePostDoWorkOnMainRunner(); 440 MaybePostDoWorkOnMainRunner();
323 } 441 }
324 442
325 } // namespace scheduler 443 } // namespace scheduler
OLDNEW
« no previous file with comments | « components/scheduler/base/task_queue_manager.h ('k') | components/scheduler/base/task_queue_manager_unittest.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698