OLD | NEW |
(Empty) | |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. |
| 4 |
| 5 #include "components/scheduler/child/task_queue_manager.h" |
| 6 |
| 7 #include <queue> |
| 8 #include <set> |
| 9 |
| 10 #include "base/bind.h" |
| 11 #include "base/time/default_tick_clock.h" |
| 12 #include "components/scheduler/child/lazy_now.h" |
| 13 #include "components/scheduler/child/nestable_single_thread_task_runner.h" |
| 14 #include "components/scheduler/child/task_queue_impl.h" |
| 15 #include "components/scheduler/child/task_queue_selector.h" |
| 16 #include "components/scheduler/child/task_queue_sets.h" |
| 17 |
| 18 namespace { |
| 19 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max(); |
| 20 } |
| 21 |
| 22 namespace scheduler { |
| 23 |
| 24 TaskQueueManager::TaskQueueManager( |
| 25 scoped_refptr<NestableSingleThreadTaskRunner> main_task_runner, |
| 26 const char* disabled_by_default_tracing_category, |
| 27 const char* disabled_by_default_verbose_tracing_category) |
| 28 : main_task_runner_(main_task_runner), |
| 29 task_was_run_on_quiescence_monitored_queue_(false), |
| 30 pending_dowork_count_(0), |
| 31 work_batch_size_(1), |
| 32 time_source_(new base::DefaultTickClock), |
| 33 disabled_by_default_tracing_category_( |
| 34 disabled_by_default_tracing_category), |
| 35 disabled_by_default_verbose_tracing_category_( |
| 36 disabled_by_default_verbose_tracing_category), |
| 37 observer_(nullptr), |
| 38 deletion_sentinel_(new DeletionSentinel()), |
| 39 weak_factory_(this) { |
| 40 DCHECK(main_task_runner->RunsTasksOnCurrentThread()); |
| 41 TRACE_EVENT_OBJECT_CREATED_WITH_ID(disabled_by_default_tracing_category, |
| 42 "TaskQueueManager", this); |
| 43 selector_.SetTaskQueueSelectorObserver(this); |
| 44 |
| 45 do_work_from_main_thread_closure_ = |
| 46 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), true); |
| 47 do_work_from_other_thread_closure_ = |
| 48 base::Bind(&TaskQueueManager::DoWork, weak_factory_.GetWeakPtr(), false); |
| 49 delayed_queue_wakeup_closure_ = |
| 50 base::Bind(&TaskQueueManager::DelayedDoWork, weak_factory_.GetWeakPtr()); |
| 51 } |
| 52 |
| 53 TaskQueueManager::~TaskQueueManager() { |
| 54 TRACE_EVENT_OBJECT_DELETED_WITH_ID(disabled_by_default_tracing_category_, |
| 55 "TaskQueueManager", this); |
| 56 |
| 57 while (!queues_.empty()) |
| 58 (*queues_.begin())->UnregisterTaskQueue(); |
| 59 |
| 60 selector_.SetTaskQueueSelectorObserver(nullptr); |
| 61 } |
| 62 |
| 63 scoped_refptr<internal::TaskQueueImpl> TaskQueueManager::NewTaskQueue( |
| 64 const TaskQueue::Spec& spec) { |
| 65 TRACE_EVENT1(disabled_by_default_tracing_category_, |
| 66 "TaskQueueManager::NewTaskQueue", "queue_name", spec.name); |
| 67 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 68 scoped_refptr<internal::TaskQueueImpl> queue( |
| 69 make_scoped_refptr(new internal::TaskQueueImpl( |
| 70 this, spec, disabled_by_default_tracing_category_, |
| 71 disabled_by_default_verbose_tracing_category_))); |
| 72 queues_.insert(queue); |
| 73 selector_.AddQueue(queue.get()); |
| 74 return queue; |
| 75 } |
| 76 |
| 77 void TaskQueueManager::SetObserver(Observer* observer) { |
| 78 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 79 observer_ = observer; |
| 80 } |
| 81 |
| 82 void TaskQueueManager::UnregisterTaskQueue( |
| 83 scoped_refptr<internal::TaskQueueImpl> task_queue) { |
| 84 TRACE_EVENT1(disabled_by_default_tracing_category_, |
| 85 "TaskQueueManager::UnregisterTaskQueue", |
| 86 "queue_name", task_queue->GetName()); |
| 87 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 88 if (observer_) |
| 89 observer_->OnUnregisterTaskQueue(task_queue); |
| 90 |
| 91 // Add |task_queue| to |queues_to_delete_| so we can prevent it from being |
| 92 // freed while any of our structures hold hold a raw pointer to it. |
| 93 queues_to_delete_.insert(task_queue); |
| 94 queues_.erase(task_queue); |
| 95 selector_.RemoveQueue(task_queue.get()); |
| 96 |
| 97 // We need to remove |task_queue| from delayed_wakeup_map_ which is a little |
| 98 // awkward since it's keyed by time. O(n) running time. |
| 99 for (DelayedWakeupMultimap::iterator iter = delayed_wakeup_map_.begin(); |
| 100 iter != delayed_wakeup_map_.end();) { |
| 101 if (iter->second == task_queue.get()) { |
| 102 DelayedWakeupMultimap::iterator temp = iter; |
| 103 iter++; |
| 104 // O(1) amortized. |
| 105 delayed_wakeup_map_.erase(temp); |
| 106 } else { |
| 107 iter++; |
| 108 } |
| 109 } |
| 110 |
| 111 // |newly_updatable_| might contain |task_queue|, we use |
| 112 // MoveNewlyUpdatableQueuesIntoUpdatableQueueSet to flush it out. |
| 113 MoveNewlyUpdatableQueuesIntoUpdatableQueueSet(); |
| 114 updatable_queue_set_.erase(task_queue.get()); |
| 115 } |
| 116 |
| 117 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() { |
| 118 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 119 bool found_pending_task = false; |
| 120 base::TimeTicks next_pending_delayed_task( |
| 121 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); |
| 122 for (auto& queue : queues_) { |
| 123 base::TimeTicks queues_next_pending_delayed_task; |
| 124 if (queue->NextPendingDelayedTaskRunTime( |
| 125 &queues_next_pending_delayed_task)) { |
| 126 found_pending_task = true; |
| 127 next_pending_delayed_task = |
| 128 std::min(next_pending_delayed_task, queues_next_pending_delayed_task); |
| 129 } |
| 130 } |
| 131 |
| 132 if (!found_pending_task) |
| 133 return base::TimeTicks(); |
| 134 |
| 135 DCHECK_NE(next_pending_delayed_task, |
| 136 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); |
| 137 return next_pending_delayed_task; |
| 138 } |
| 139 |
| 140 void TaskQueueManager::RegisterAsUpdatableTaskQueue( |
| 141 internal::TaskQueueImpl* queue) { |
| 142 base::AutoLock lock(newly_updatable_lock_); |
| 143 newly_updatable_.push_back(queue); |
| 144 } |
| 145 |
| 146 void TaskQueueManager::UnregisterAsUpdatableTaskQueue( |
| 147 internal::TaskQueueImpl* queue) { |
| 148 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 149 updatable_queue_set_.erase(queue); |
| 150 } |
| 151 |
| 152 void TaskQueueManager::MoveNewlyUpdatableQueuesIntoUpdatableQueueSet() { |
| 153 base::AutoLock lock(newly_updatable_lock_); |
| 154 while (!newly_updatable_.empty()) { |
| 155 updatable_queue_set_.insert(newly_updatable_.back()); |
| 156 newly_updatable_.pop_back(); |
| 157 } |
| 158 } |
| 159 |
| 160 void TaskQueueManager::UpdateWorkQueues( |
| 161 bool should_trigger_wakeup, |
| 162 const internal::TaskQueueImpl::Task* previous_task) { |
| 163 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 164 TRACE_EVENT0(disabled_by_default_tracing_category_, |
| 165 "TaskQueueManager::UpdateWorkQueues"); |
| 166 internal::LazyNow lazy_now(this); |
| 167 |
| 168 // Move any ready delayed tasks into the incomming queues. |
| 169 WakeupReadyDelayedQueues(&lazy_now); |
| 170 |
| 171 // Insert any newly updatable queues into the updatable_queue_set_. |
| 172 { |
| 173 base::AutoLock lock(newly_updatable_lock_); |
| 174 while (!newly_updatable_.empty()) { |
| 175 updatable_queue_set_.insert(newly_updatable_.back()); |
| 176 newly_updatable_.pop_back(); |
| 177 } |
| 178 } |
| 179 |
| 180 auto iter = updatable_queue_set_.begin(); |
| 181 while (iter != updatable_queue_set_.end()) { |
| 182 internal::TaskQueueImpl* queue = *iter++; |
| 183 // NOTE Update work queue may erase itself from |updatable_queue_set_|. |
| 184 // This is fine, erasing an element won't invalidate any interator, as long |
| 185 // as the iterator isn't the element being delated. |
| 186 if (queue->work_queue().empty()) |
| 187 queue->UpdateWorkQueue(&lazy_now, should_trigger_wakeup, previous_task); |
| 188 } |
| 189 } |
| 190 |
| 191 void TaskQueueManager::ScheduleDelayedWorkTask( |
| 192 scoped_refptr<internal::TaskQueueImpl> queue, |
| 193 base::TimeTicks delayed_run_time) { |
| 194 internal::LazyNow lazy_now(this); |
| 195 ScheduleDelayedWork(queue.get(), delayed_run_time, &lazy_now); |
| 196 } |
| 197 |
| 198 void TaskQueueManager::ScheduleDelayedWork(internal::TaskQueueImpl* queue, |
| 199 base::TimeTicks delayed_run_time, |
| 200 internal::LazyNow* lazy_now) { |
| 201 if (!main_task_runner_->BelongsToCurrentThread()) { |
| 202 // NOTE posting a delayed task from a different thread is not expected to be |
| 203 // common. This pathway is less optimal than perhaps it could be because |
| 204 // it causes two main thread tasks to be run. Should this assumption prove |
| 205 // to be false in future, we may need to revisit this. |
| 206 main_task_runner_->PostTask( |
| 207 FROM_HERE, base::Bind(&TaskQueueManager::ScheduleDelayedWorkTask, |
| 208 weak_factory_.GetWeakPtr(), |
| 209 scoped_refptr<internal::TaskQueueImpl>(queue), |
| 210 delayed_run_time)); |
| 211 return; |
| 212 } |
| 213 if (delayed_run_time > lazy_now->Now()) { |
| 214 // Make sure there's one (and only one) task posted to |main_task_runner_| |
| 215 // to call |DelayedDoWork| at |delayed_run_time|. |
| 216 if (delayed_wakeup_map_.find(delayed_run_time) == |
| 217 delayed_wakeup_map_.end()) { |
| 218 base::TimeDelta delay = delayed_run_time - lazy_now->Now(); |
| 219 main_task_runner_->PostDelayedTask(FROM_HERE, |
| 220 delayed_queue_wakeup_closure_, delay); |
| 221 } |
| 222 delayed_wakeup_map_.insert(std::make_pair(delayed_run_time, queue)); |
| 223 } else { |
| 224 WakeupReadyDelayedQueues(lazy_now); |
| 225 } |
| 226 } |
| 227 |
| 228 void TaskQueueManager::DelayedDoWork() { |
| 229 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 230 |
| 231 { |
| 232 internal::LazyNow lazy_now(this); |
| 233 WakeupReadyDelayedQueues(&lazy_now); |
| 234 } |
| 235 |
| 236 DoWork(false); |
| 237 } |
| 238 |
| 239 void TaskQueueManager::WakeupReadyDelayedQueues(internal::LazyNow* lazy_now) { |
| 240 // Wake up any queues with pending delayed work. Note std::multipmap stores |
| 241 // the elements sorted by key, so the begin() iterator points to the earliest |
| 242 // queue to wakeup. |
| 243 std::set<internal::TaskQueueImpl*> dedup_set; |
| 244 while (!delayed_wakeup_map_.empty()) { |
| 245 DelayedWakeupMultimap::iterator next_wakeup = delayed_wakeup_map_.begin(); |
| 246 if (next_wakeup->first > lazy_now->Now()) |
| 247 break; |
| 248 // A queue could have any number of delayed tasks pending so it's worthwhile |
| 249 // deduping calls to MoveReadyDelayedTasksToIncomingQueue since it takes a |
| 250 // lock. NOTE the order in which these are called matters since the order |
| 251 // in which EnqueueTaskLocks is called is respected when choosing which |
| 252 // queue to execute a task from. |
| 253 if (dedup_set.insert(next_wakeup->second).second) |
| 254 next_wakeup->second->MoveReadyDelayedTasksToIncomingQueue(lazy_now); |
| 255 delayed_wakeup_map_.erase(next_wakeup); |
| 256 } |
| 257 } |
| 258 |
| 259 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { |
| 260 bool on_main_thread = main_task_runner_->BelongsToCurrentThread(); |
| 261 if (on_main_thread) { |
| 262 // We only want one pending DoWork posted from the main thread, or we risk |
| 263 // an explosion of pending DoWorks which could starve out everything else. |
| 264 if (pending_dowork_count_ > 0) { |
| 265 return; |
| 266 } |
| 267 pending_dowork_count_++; |
| 268 main_task_runner_->PostTask(FROM_HERE, do_work_from_main_thread_closure_); |
| 269 } else { |
| 270 main_task_runner_->PostTask(FROM_HERE, do_work_from_other_thread_closure_); |
| 271 } |
| 272 } |
| 273 |
| 274 void TaskQueueManager::DoWork(bool decrement_pending_dowork_count) { |
| 275 if (decrement_pending_dowork_count) { |
| 276 pending_dowork_count_--; |
| 277 DCHECK_GE(pending_dowork_count_, 0); |
| 278 } |
| 279 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 280 |
| 281 queues_to_delete_.clear(); |
| 282 |
| 283 // Pass false and nullptr to UpdateWorkQueues here to prevent waking up a |
| 284 // pump-after-wakeup queue. |
| 285 UpdateWorkQueues(false, nullptr); |
| 286 |
| 287 internal::TaskQueueImpl::Task previous_task; |
| 288 for (int i = 0; i < work_batch_size_; i++) { |
| 289 internal::TaskQueueImpl* queue; |
| 290 if (!SelectQueueToService(&queue)) |
| 291 break; |
| 292 |
| 293 switch (ProcessTaskFromWorkQueue(queue, &previous_task)) { |
| 294 case ProcessTaskResult::DEFERRED: |
| 295 // If a task was deferred, try again with another task. Note that this |
| 296 // means deferred tasks (i.e. non-nestable tasks) will never trigger |
| 297 // queue wake-ups. |
| 298 continue; |
| 299 case ProcessTaskResult::EXECUTED: |
| 300 break; |
| 301 case ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED: |
| 302 return; // The TaskQueueManager got deleted, we must bail out. |
| 303 } |
| 304 bool should_trigger_wakeup = queue->wakeup_policy() == |
| 305 TaskQueue::WakeupPolicy::CAN_WAKE_OTHER_QUEUES; |
| 306 UpdateWorkQueues(should_trigger_wakeup, &previous_task); |
| 307 |
| 308 // Only run a single task per batch in nested run loops so that we can |
| 309 // properly exit the nested loop when someone calls RunLoop::Quit(). |
| 310 if (main_task_runner_->IsNested()) |
| 311 break; |
| 312 } |
| 313 |
| 314 // TODO(alexclarke): Consider refactoring the above loop to terminate only |
| 315 // when there's no more work left to be done, rather than posting a |
| 316 // continuation task. |
| 317 if (!selector_.EnabledWorkQueuesEmpty()) |
| 318 MaybePostDoWorkOnMainRunner(); |
| 319 } |
| 320 |
| 321 bool TaskQueueManager::SelectQueueToService( |
| 322 internal::TaskQueueImpl** out_queue) { |
| 323 bool should_run = selector_.SelectQueueToService(out_queue); |
| 324 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( |
| 325 disabled_by_default_tracing_category_, "TaskQueueManager", this, |
| 326 AsValueWithSelectorResult(should_run, *out_queue)); |
| 327 return should_run; |
| 328 } |
| 329 |
| 330 void TaskQueueManager::DidQueueTask( |
| 331 const internal::TaskQueueImpl::Task& pending_task) { |
| 332 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", pending_task); |
| 333 } |
| 334 |
| 335 TaskQueueManager::ProcessTaskResult TaskQueueManager::ProcessTaskFromWorkQueue( |
| 336 internal::TaskQueueImpl* queue, |
| 337 internal::TaskQueueImpl::Task* out_previous_task) { |
| 338 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 339 scoped_refptr<DeletionSentinel> protect(deletion_sentinel_); |
| 340 // TODO(alexclarke): consider std::move() when allowed. |
| 341 internal::TaskQueueImpl::Task pending_task = queue->TakeTaskFromWorkQueue(); |
| 342 |
| 343 if (queue->GetQuiescenceMonitored()) |
| 344 task_was_run_on_quiescence_monitored_queue_ = true; |
| 345 |
| 346 if (!pending_task.nestable && main_task_runner_->IsNested()) { |
| 347 // Defer non-nestable work to the main task runner. NOTE these tasks can be |
| 348 // arbitrarily delayed so the additional delay should not be a problem. |
| 349 // TODO(skyostil): Figure out a way to not forget which task queue the |
| 350 // task is associated with. See http://crbug.com/522843. |
| 351 main_task_runner_->PostNonNestableTask(pending_task.posted_from, |
| 352 pending_task.task); |
| 353 return ProcessTaskResult::DEFERRED; |
| 354 } |
| 355 |
| 356 TRACE_TASK_EXECUTION("TaskQueueManager::ProcessTaskFromWorkQueue", |
| 357 pending_task); |
| 358 if (queue->GetShouldNotifyObservers()) { |
| 359 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_, |
| 360 WillProcessTask(pending_task)); |
| 361 queue->NotifyWillProcessTask(pending_task); |
| 362 } |
| 363 TRACE_EVENT1(disabled_by_default_tracing_category_, |
| 364 "TaskQueueManager::RunTask", "queue", queue->GetName()); |
| 365 task_annotator_.RunTask("TaskQueueManager::PostTask", pending_task); |
| 366 |
| 367 // Detect if the TaskQueueManager just got deleted. If this happens we must |
| 368 // not access any member variables after this point. |
| 369 if (protect->HasOneRef()) |
| 370 return ProcessTaskResult::TASK_QUEUE_MANAGER_DELETED; |
| 371 |
| 372 if (queue->GetShouldNotifyObservers()) { |
| 373 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_, |
| 374 DidProcessTask(pending_task)); |
| 375 queue->NotifyDidProcessTask(pending_task); |
| 376 } |
| 377 |
| 378 pending_task.task.Reset(); |
| 379 *out_previous_task = pending_task; |
| 380 return ProcessTaskResult::EXECUTED; |
| 381 } |
| 382 |
| 383 bool TaskQueueManager::RunsTasksOnCurrentThread() const { |
| 384 return main_task_runner_->RunsTasksOnCurrentThread(); |
| 385 } |
| 386 |
| 387 bool TaskQueueManager::PostDelayedTask( |
| 388 const tracked_objects::Location& from_here, |
| 389 const base::Closure& task, |
| 390 base::TimeDelta delay) { |
| 391 DCHECK_GE(delay, base::TimeDelta()); |
| 392 return main_task_runner_->PostDelayedTask(from_here, task, delay); |
| 393 } |
| 394 |
| 395 void TaskQueueManager::SetWorkBatchSize(int work_batch_size) { |
| 396 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 397 DCHECK_GE(work_batch_size, 1); |
| 398 work_batch_size_ = work_batch_size; |
| 399 } |
| 400 |
| 401 void TaskQueueManager::AddTaskObserver( |
| 402 base::MessageLoop::TaskObserver* task_observer) { |
| 403 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 404 task_observers_.AddObserver(task_observer); |
| 405 } |
| 406 |
| 407 void TaskQueueManager::RemoveTaskObserver( |
| 408 base::MessageLoop::TaskObserver* task_observer) { |
| 409 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 410 task_observers_.RemoveObserver(task_observer); |
| 411 } |
| 412 |
| 413 void TaskQueueManager::SetTimeSourceForTesting( |
| 414 scoped_ptr<base::TickClock> time_source) { |
| 415 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 416 time_source_ = time_source.Pass(); |
| 417 } |
| 418 |
| 419 bool TaskQueueManager::GetAndClearSystemIsQuiescentBit() { |
| 420 bool task_was_run = task_was_run_on_quiescence_monitored_queue_; |
| 421 task_was_run_on_quiescence_monitored_queue_ = false; |
| 422 return !task_was_run; |
| 423 } |
| 424 |
| 425 base::TimeTicks TaskQueueManager::Now() const { |
| 426 return time_source_->NowTicks(); |
| 427 } |
| 428 |
| 429 int TaskQueueManager::GetNextSequenceNumber() { |
| 430 return task_sequence_num_.GetNext(); |
| 431 } |
| 432 |
| 433 scoped_refptr<base::trace_event::ConvertableToTraceFormat> |
| 434 TaskQueueManager::AsValueWithSelectorResult( |
| 435 bool should_run, |
| 436 internal::TaskQueueImpl* selected_queue) const { |
| 437 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 438 scoped_refptr<base::trace_event::TracedValue> state = |
| 439 new base::trace_event::TracedValue(); |
| 440 state->BeginArray("queues"); |
| 441 for (auto& queue : queues_) |
| 442 queue->AsValueInto(state.get()); |
| 443 state->EndArray(); |
| 444 state->BeginDictionary("selector"); |
| 445 selector_.AsValueInto(state.get()); |
| 446 state->EndDictionary(); |
| 447 if (should_run) |
| 448 state->SetString("selected_queue", selected_queue->GetName()); |
| 449 |
| 450 state->BeginArray("updatable_queue_set"); |
| 451 for (auto& queue : updatable_queue_set_) |
| 452 state->AppendString(queue->GetName()); |
| 453 state->EndArray(); |
| 454 return state; |
| 455 } |
| 456 |
| 457 void TaskQueueManager::OnTaskQueueEnabled(internal::TaskQueueImpl* queue) { |
| 458 DCHECK(main_thread_checker_.CalledOnValidThread()); |
| 459 // Only schedule DoWork if there's something to do. |
| 460 if (!queue->work_queue().empty()) |
| 461 MaybePostDoWorkOnMainRunner(); |
| 462 } |
| 463 |
| 464 } // namespace scheduler |
OLD | NEW |