| OLD | NEW |
| (Empty) |
| 1 // Copyright 2014 The Chromium Authors. All rights reserved. | |
| 2 // Use of this source code is governed by a BSD-style license that can be | |
| 3 // found in the LICENSE file. | |
| 4 | |
| 5 #include "content/renderer/scheduler/task_queue_manager.h" | |
| 6 | |
| 7 #include <queue> | |
| 8 #include <set> | |
| 9 | |
| 10 #include "base/bind.h" | |
| 11 #include "base/trace_event/trace_event.h" | |
| 12 #include "base/trace_event/trace_event_argument.h" | |
| 13 #include "cc/test/test_now_source.h" | |
| 14 #include "content/renderer/scheduler/nestable_single_thread_task_runner.h" | |
| 15 #include "content/renderer/scheduler/task_queue_selector.h" | |
| 16 | |
| 17 namespace { | |
| 18 const int64_t kMaxTimeTicks = std::numeric_limits<int64>::max(); | |
| 19 } | |
| 20 | |
| 21 namespace content { | |
| 22 namespace internal { | |
| 23 | |
| 24 // Now() is somewhat expensive so it makes sense not to call Now() unless we | |
| 25 // really need to. | |
| 26 class LazyNow { | |
| 27 public: | |
| 28 explicit LazyNow(base::TimeTicks now) | |
| 29 : task_queue_manager_(nullptr), now_(now) { | |
| 30 DCHECK(!now.is_null()); | |
| 31 } | |
| 32 | |
| 33 explicit LazyNow(TaskQueueManager* task_queue_manager) | |
| 34 : task_queue_manager_(task_queue_manager) {} | |
| 35 | |
| 36 base::TimeTicks Now() { | |
| 37 if (now_.is_null()) | |
| 38 now_ = task_queue_manager_->Now(); | |
| 39 return now_; | |
| 40 } | |
| 41 | |
| 42 private: | |
| 43 TaskQueueManager* task_queue_manager_; // NOT OWNED | |
| 44 base::TimeTicks now_; | |
| 45 }; | |
| 46 | |
| 47 class TaskQueue : public base::SingleThreadTaskRunner { | |
| 48 public: | |
| 49 TaskQueue(TaskQueueManager* task_queue_manager); | |
| 50 | |
| 51 // base::SingleThreadTaskRunner implementation. | |
| 52 bool RunsTasksOnCurrentThread() const override; | |
| 53 bool PostDelayedTask(const tracked_objects::Location& from_here, | |
| 54 const base::Closure& task, | |
| 55 base::TimeDelta delay) override { | |
| 56 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NORMAL); | |
| 57 } | |
| 58 | |
| 59 bool PostNonNestableDelayedTask(const tracked_objects::Location& from_here, | |
| 60 const base::Closure& task, | |
| 61 base::TimeDelta delay) override { | |
| 62 return PostDelayedTaskImpl(from_here, task, delay, TaskType::NON_NESTABLE); | |
| 63 } | |
| 64 | |
| 65 bool IsQueueEmpty() const; | |
| 66 | |
| 67 void SetPumpPolicy(TaskQueueManager::PumpPolicy pump_policy); | |
| 68 void PumpQueue(); | |
| 69 | |
| 70 bool NextPendingDelayedTaskRunTime( | |
| 71 base::TimeTicks* next_pending_delayed_task); | |
| 72 | |
| 73 bool UpdateWorkQueue(LazyNow* lazy_now, | |
| 74 const base::PendingTask* previous_task); | |
| 75 base::PendingTask TakeTaskFromWorkQueue(); | |
| 76 | |
| 77 void WillDeleteTaskQueueManager(); | |
| 78 | |
| 79 base::TaskQueue& work_queue() { return work_queue_; } | |
| 80 | |
| 81 void set_name(const char* name) { name_ = name; } | |
| 82 | |
| 83 void AsValueInto(base::trace_event::TracedValue* state) const; | |
| 84 | |
| 85 private: | |
| 86 enum class TaskType { | |
| 87 NORMAL, | |
| 88 NON_NESTABLE, | |
| 89 }; | |
| 90 | |
| 91 ~TaskQueue() override; | |
| 92 | |
| 93 bool PostDelayedTaskImpl(const tracked_objects::Location& from_here, | |
| 94 const base::Closure& task, | |
| 95 base::TimeDelta delay, | |
| 96 TaskType task_type); | |
| 97 | |
| 98 // Delayed task posted to the underlying run loop, which locks |lock_| and | |
| 99 // calls MoveReadyDelayedTasksToIncomingQueueLocked to process dealyed tasks | |
| 100 // that need to be run now. | |
| 101 void MoveReadyDelayedTasksToIncomingQueue(); | |
| 102 | |
| 103 // Enqueues any delayed tasks which should be run now on the incoming_queue_ | |
| 104 // and calls ScheduleDelayedWorkLocked to ensure future tasks are scheduled. | |
| 105 // Must be called with |lock_| locked. | |
| 106 void MoveReadyDelayedTasksToIncomingQueueLocked(LazyNow* lazy_now); | |
| 107 | |
| 108 // Posts MoveReadyDelayedTasksToIncomingQueue if there isn't already a task | |
| 109 // posted on the underlying runloop for the next task's scheduled run time. | |
| 110 void ScheduleDelayedWorkLocked(LazyNow* lazy_now); | |
| 111 | |
| 112 void PumpQueueLocked(); | |
| 113 bool TaskIsOlderThanQueuedTasks(const base::PendingTask* task); | |
| 114 bool ShouldAutoPumpQueueLocked(const base::PendingTask* previous_task); | |
| 115 void EnqueueTaskLocked(const base::PendingTask& pending_task); | |
| 116 | |
| 117 void TraceQueueSize(bool is_locked) const; | |
| 118 static const char* PumpPolicyToString( | |
| 119 TaskQueueManager::PumpPolicy pump_policy); | |
| 120 static void QueueAsValueInto(const base::TaskQueue& queue, | |
| 121 base::trace_event::TracedValue* state); | |
| 122 static void QueueAsValueInto(const base::DelayedTaskQueue& queue, | |
| 123 base::trace_event::TracedValue* state); | |
| 124 static void TaskAsValueInto(const base::PendingTask& task, | |
| 125 base::trace_event::TracedValue* state); | |
| 126 | |
| 127 // This lock protects all members except the work queue and the | |
| 128 // main_thread_checker_. | |
| 129 mutable base::Lock lock_; | |
| 130 base::PlatformThreadId thread_id_; | |
| 131 TaskQueueManager* task_queue_manager_; | |
| 132 base::TaskQueue incoming_queue_; | |
| 133 TaskQueueManager::PumpPolicy pump_policy_; | |
| 134 const char* name_; | |
| 135 base::DelayedTaskQueue delayed_task_queue_; | |
| 136 std::set<base::TimeTicks> in_flight_kick_delayed_tasks_; | |
| 137 | |
| 138 base::ThreadChecker main_thread_checker_; | |
| 139 base::TaskQueue work_queue_; | |
| 140 | |
| 141 DISALLOW_COPY_AND_ASSIGN(TaskQueue); | |
| 142 }; | |
| 143 | |
| 144 TaskQueue::TaskQueue(TaskQueueManager* task_queue_manager) | |
| 145 : thread_id_(base::PlatformThread::CurrentId()), | |
| 146 task_queue_manager_(task_queue_manager), | |
| 147 pump_policy_(TaskQueueManager::PumpPolicy::AUTO), | |
| 148 name_(nullptr) { | |
| 149 } | |
| 150 | |
| 151 TaskQueue::~TaskQueue() { | |
| 152 } | |
| 153 | |
| 154 void TaskQueue::WillDeleteTaskQueueManager() { | |
| 155 base::AutoLock lock(lock_); | |
| 156 task_queue_manager_ = nullptr; | |
| 157 delayed_task_queue_ = base::DelayedTaskQueue(); | |
| 158 incoming_queue_ = base::TaskQueue(); | |
| 159 work_queue_ = base::TaskQueue(); | |
| 160 } | |
| 161 | |
| 162 bool TaskQueue::RunsTasksOnCurrentThread() const { | |
| 163 base::AutoLock lock(lock_); | |
| 164 return base::PlatformThread::CurrentId() == thread_id_; | |
| 165 } | |
| 166 | |
| 167 bool TaskQueue::PostDelayedTaskImpl(const tracked_objects::Location& from_here, | |
| 168 const base::Closure& task, | |
| 169 base::TimeDelta delay, | |
| 170 TaskType task_type) { | |
| 171 base::AutoLock lock(lock_); | |
| 172 if (!task_queue_manager_) | |
| 173 return false; | |
| 174 | |
| 175 base::PendingTask pending_task(from_here, task, base::TimeTicks(), | |
| 176 task_type != TaskType::NON_NESTABLE); | |
| 177 task_queue_manager_->DidQueueTask(&pending_task); | |
| 178 | |
| 179 if (delay > base::TimeDelta()) { | |
| 180 base::TimeTicks now = task_queue_manager_->Now(); | |
| 181 pending_task.delayed_run_time = now + delay; | |
| 182 delayed_task_queue_.push(pending_task); | |
| 183 TraceQueueSize(true); | |
| 184 // If we changed the topmost task, then it is time to reschedule. | |
| 185 if (delayed_task_queue_.top().task.Equals(pending_task.task)) { | |
| 186 LazyNow lazy_now(now); | |
| 187 ScheduleDelayedWorkLocked(&lazy_now); | |
| 188 } | |
| 189 return true; | |
| 190 } | |
| 191 EnqueueTaskLocked(pending_task); | |
| 192 return true; | |
| 193 } | |
| 194 | |
| 195 void TaskQueue::MoveReadyDelayedTasksToIncomingQueue() { | |
| 196 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 197 base::AutoLock lock(lock_); | |
| 198 if (!task_queue_manager_) | |
| 199 return; | |
| 200 | |
| 201 LazyNow lazy_now(task_queue_manager_); | |
| 202 MoveReadyDelayedTasksToIncomingQueueLocked(&lazy_now); | |
| 203 } | |
| 204 | |
| 205 void TaskQueue::MoveReadyDelayedTasksToIncomingQueueLocked(LazyNow* lazy_now) { | |
| 206 lock_.AssertAcquired(); | |
| 207 // Enqueue all delayed tasks that should be running now. | |
| 208 while (!delayed_task_queue_.empty() && | |
| 209 delayed_task_queue_.top().delayed_run_time <= lazy_now->Now()) { | |
| 210 in_flight_kick_delayed_tasks_.erase( | |
| 211 delayed_task_queue_.top().delayed_run_time); | |
| 212 EnqueueTaskLocked(delayed_task_queue_.top()); | |
| 213 delayed_task_queue_.pop(); | |
| 214 } | |
| 215 TraceQueueSize(true); | |
| 216 ScheduleDelayedWorkLocked(lazy_now); | |
| 217 } | |
| 218 | |
| 219 void TaskQueue::ScheduleDelayedWorkLocked(LazyNow* lazy_now) { | |
| 220 lock_.AssertAcquired(); | |
| 221 // Any remaining tasks are in the future, so queue a task to kick them. | |
| 222 if (!delayed_task_queue_.empty()) { | |
| 223 base::TimeTicks next_run_time = delayed_task_queue_.top().delayed_run_time; | |
| 224 DCHECK_GT(next_run_time, lazy_now->Now()); | |
| 225 // Make sure we don't have more than one | |
| 226 // MoveReadyDelayedTasksToIncomingQueue posted for a particular scheduled | |
| 227 // run time (note it's fine to have multiple ones in flight for distinct | |
| 228 // run times). | |
| 229 if (in_flight_kick_delayed_tasks_.find(next_run_time) == | |
| 230 in_flight_kick_delayed_tasks_.end()) { | |
| 231 in_flight_kick_delayed_tasks_.insert(next_run_time); | |
| 232 base::TimeDelta delay = next_run_time - lazy_now->Now(); | |
| 233 task_queue_manager_->PostDelayedTask( | |
| 234 FROM_HERE, | |
| 235 Bind(&TaskQueue::MoveReadyDelayedTasksToIncomingQueue, this), delay); | |
| 236 } | |
| 237 } | |
| 238 } | |
| 239 | |
| 240 bool TaskQueue::IsQueueEmpty() const { | |
| 241 if (!work_queue_.empty()) | |
| 242 return false; | |
| 243 | |
| 244 { | |
| 245 base::AutoLock lock(lock_); | |
| 246 return incoming_queue_.empty(); | |
| 247 } | |
| 248 } | |
| 249 | |
| 250 bool TaskQueue::TaskIsOlderThanQueuedTasks(const base::PendingTask* task) { | |
| 251 lock_.AssertAcquired(); | |
| 252 // A null task is passed when UpdateQueue is called before any task is run. | |
| 253 // In this case we don't want to pump an after_wakeup queue, so return true | |
| 254 // here. | |
| 255 if (!task) | |
| 256 return true; | |
| 257 | |
| 258 // Return false if there are no task in the incoming queue. | |
| 259 if (incoming_queue_.empty()) | |
| 260 return false; | |
| 261 | |
| 262 base::PendingTask oldest_queued_task = incoming_queue_.front(); | |
| 263 DCHECK(oldest_queued_task.delayed_run_time.is_null()); | |
| 264 DCHECK(task->delayed_run_time.is_null()); | |
| 265 | |
| 266 // Note: the comparison is correct due to the fact that the PendingTask | |
| 267 // operator inverts its comparison operation in order to work well in a heap | |
| 268 // based priority queue. | |
| 269 return oldest_queued_task < *task; | |
| 270 } | |
| 271 | |
| 272 bool TaskQueue::ShouldAutoPumpQueueLocked( | |
| 273 const base::PendingTask* previous_task) { | |
| 274 lock_.AssertAcquired(); | |
| 275 if (pump_policy_ == TaskQueueManager::PumpPolicy::MANUAL) | |
| 276 return false; | |
| 277 if (pump_policy_ == TaskQueueManager::PumpPolicy::AFTER_WAKEUP && | |
| 278 TaskIsOlderThanQueuedTasks(previous_task)) | |
| 279 return false; | |
| 280 if (incoming_queue_.empty()) | |
| 281 return false; | |
| 282 return true; | |
| 283 } | |
| 284 | |
| 285 bool TaskQueue::NextPendingDelayedTaskRunTime( | |
| 286 base::TimeTicks* next_pending_delayed_task) { | |
| 287 base::AutoLock lock(lock_); | |
| 288 if (delayed_task_queue_.empty()) | |
| 289 return false; | |
| 290 *next_pending_delayed_task = delayed_task_queue_.top().delayed_run_time; | |
| 291 return true; | |
| 292 } | |
| 293 | |
| 294 bool TaskQueue::UpdateWorkQueue(LazyNow* lazy_now, | |
| 295 const base::PendingTask* previous_task) { | |
| 296 if (!work_queue_.empty()) | |
| 297 return true; | |
| 298 | |
| 299 { | |
| 300 base::AutoLock lock(lock_); | |
| 301 MoveReadyDelayedTasksToIncomingQueueLocked(lazy_now); | |
| 302 if (!ShouldAutoPumpQueueLocked(previous_task)) | |
| 303 return false; | |
| 304 MoveReadyDelayedTasksToIncomingQueueLocked(lazy_now); | |
| 305 work_queue_.Swap(&incoming_queue_); | |
| 306 TraceQueueSize(true); | |
| 307 return true; | |
| 308 } | |
| 309 } | |
| 310 | |
| 311 base::PendingTask TaskQueue::TakeTaskFromWorkQueue() { | |
| 312 base::PendingTask pending_task = work_queue_.front(); | |
| 313 work_queue_.pop(); | |
| 314 TraceQueueSize(false); | |
| 315 return pending_task; | |
| 316 } | |
| 317 | |
| 318 void TaskQueue::TraceQueueSize(bool is_locked) const { | |
| 319 bool is_tracing; | |
| 320 TRACE_EVENT_CATEGORY_GROUP_ENABLED( | |
| 321 TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), &is_tracing); | |
| 322 if (!is_tracing || !name_) | |
| 323 return; | |
| 324 if (!is_locked) | |
| 325 lock_.Acquire(); | |
| 326 else | |
| 327 lock_.AssertAcquired(); | |
| 328 TRACE_COUNTER1( | |
| 329 TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), name_, | |
| 330 incoming_queue_.size() + work_queue_.size() + delayed_task_queue_.size()); | |
| 331 if (!is_locked) | |
| 332 lock_.Release(); | |
| 333 } | |
| 334 | |
| 335 void TaskQueue::EnqueueTaskLocked(const base::PendingTask& pending_task) { | |
| 336 lock_.AssertAcquired(); | |
| 337 if (!task_queue_manager_) | |
| 338 return; | |
| 339 if (pump_policy_ == TaskQueueManager::PumpPolicy::AUTO && | |
| 340 incoming_queue_.empty()) | |
| 341 task_queue_manager_->MaybePostDoWorkOnMainRunner(); | |
| 342 incoming_queue_.push(pending_task); | |
| 343 | |
| 344 if (!pending_task.delayed_run_time.is_null()) { | |
| 345 // Clear the delayed run time because we've already applied the delay | |
| 346 // before getting here. | |
| 347 incoming_queue_.back().delayed_run_time = base::TimeTicks(); | |
| 348 } | |
| 349 TraceQueueSize(true); | |
| 350 } | |
| 351 | |
| 352 void TaskQueue::SetPumpPolicy(TaskQueueManager::PumpPolicy pump_policy) { | |
| 353 base::AutoLock lock(lock_); | |
| 354 if (pump_policy == TaskQueueManager::PumpPolicy::AUTO && | |
| 355 pump_policy_ != TaskQueueManager::PumpPolicy::AUTO) { | |
| 356 PumpQueueLocked(); | |
| 357 } | |
| 358 pump_policy_ = pump_policy; | |
| 359 } | |
| 360 | |
| 361 void TaskQueue::PumpQueueLocked() { | |
| 362 lock_.AssertAcquired(); | |
| 363 if (task_queue_manager_) { | |
| 364 LazyNow lazy_now(task_queue_manager_); | |
| 365 MoveReadyDelayedTasksToIncomingQueueLocked(&lazy_now); | |
| 366 } | |
| 367 while (!incoming_queue_.empty()) { | |
| 368 work_queue_.push(incoming_queue_.front()); | |
| 369 incoming_queue_.pop(); | |
| 370 } | |
| 371 if (!work_queue_.empty()) | |
| 372 task_queue_manager_->MaybePostDoWorkOnMainRunner(); | |
| 373 } | |
| 374 | |
| 375 void TaskQueue::PumpQueue() { | |
| 376 base::AutoLock lock(lock_); | |
| 377 PumpQueueLocked(); | |
| 378 } | |
| 379 | |
| 380 void TaskQueue::AsValueInto(base::trace_event::TracedValue* state) const { | |
| 381 base::AutoLock lock(lock_); | |
| 382 state->BeginDictionary(); | |
| 383 if (name_) | |
| 384 state->SetString("name", name_); | |
| 385 state->SetString("pump_policy", PumpPolicyToString(pump_policy_)); | |
| 386 state->BeginArray("incoming_queue"); | |
| 387 QueueAsValueInto(incoming_queue_, state); | |
| 388 state->EndArray(); | |
| 389 state->BeginArray("work_queue"); | |
| 390 QueueAsValueInto(work_queue_, state); | |
| 391 state->EndArray(); | |
| 392 state->BeginArray("delayed_task_queue"); | |
| 393 QueueAsValueInto(delayed_task_queue_, state); | |
| 394 state->EndArray(); | |
| 395 state->EndDictionary(); | |
| 396 } | |
| 397 | |
| 398 // static | |
| 399 const char* TaskQueue::PumpPolicyToString( | |
| 400 TaskQueueManager::PumpPolicy pump_policy) { | |
| 401 switch (pump_policy) { | |
| 402 case TaskQueueManager::PumpPolicy::AUTO: | |
| 403 return "auto"; | |
| 404 case TaskQueueManager::PumpPolicy::AFTER_WAKEUP: | |
| 405 return "after_wakeup"; | |
| 406 case TaskQueueManager::PumpPolicy::MANUAL: | |
| 407 return "manual"; | |
| 408 default: | |
| 409 NOTREACHED(); | |
| 410 return nullptr; | |
| 411 } | |
| 412 } | |
| 413 | |
| 414 // static | |
| 415 void TaskQueue::QueueAsValueInto(const base::TaskQueue& queue, | |
| 416 base::trace_event::TracedValue* state) { | |
| 417 base::TaskQueue queue_copy(queue); | |
| 418 while (!queue_copy.empty()) { | |
| 419 TaskAsValueInto(queue_copy.front(), state); | |
| 420 queue_copy.pop(); | |
| 421 } | |
| 422 } | |
| 423 | |
| 424 // static | |
| 425 void TaskQueue::QueueAsValueInto(const base::DelayedTaskQueue& queue, | |
| 426 base::trace_event::TracedValue* state) { | |
| 427 base::DelayedTaskQueue queue_copy(queue); | |
| 428 while (!queue_copy.empty()) { | |
| 429 TaskAsValueInto(queue_copy.top(), state); | |
| 430 queue_copy.pop(); | |
| 431 } | |
| 432 } | |
| 433 | |
| 434 // static | |
| 435 void TaskQueue::TaskAsValueInto(const base::PendingTask& task, | |
| 436 base::trace_event::TracedValue* state) { | |
| 437 state->BeginDictionary(); | |
| 438 state->SetString("posted_from", task.posted_from.ToString()); | |
| 439 state->SetInteger("sequence_num", task.sequence_num); | |
| 440 state->SetBoolean("nestable", task.nestable); | |
| 441 state->SetBoolean("is_high_res", task.is_high_res); | |
| 442 state->SetDouble( | |
| 443 "delayed_run_time", | |
| 444 (task.delayed_run_time - base::TimeTicks()).InMicroseconds() / 1000.0L); | |
| 445 state->EndDictionary(); | |
| 446 } | |
| 447 | |
| 448 } // namespace internal | |
| 449 | |
| 450 TaskQueueManager::TaskQueueManager( | |
| 451 size_t task_queue_count, | |
| 452 scoped_refptr<NestableSingleThreadTaskRunner> main_task_runner, | |
| 453 TaskQueueSelector* selector) | |
| 454 : main_task_runner_(main_task_runner), | |
| 455 selector_(selector), | |
| 456 pending_dowork_count_(0), | |
| 457 work_batch_size_(1), | |
| 458 time_source_(nullptr), | |
| 459 weak_factory_(this) { | |
| 460 DCHECK(main_task_runner->RunsTasksOnCurrentThread()); | |
| 461 TRACE_EVENT_OBJECT_CREATED_WITH_ID( | |
| 462 TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), "TaskQueueManager", | |
| 463 this); | |
| 464 | |
| 465 task_queue_manager_weak_ptr_ = weak_factory_.GetWeakPtr(); | |
| 466 for (size_t i = 0; i < task_queue_count; i++) { | |
| 467 scoped_refptr<internal::TaskQueue> queue( | |
| 468 make_scoped_refptr(new internal::TaskQueue(this))); | |
| 469 queues_.push_back(queue); | |
| 470 } | |
| 471 | |
| 472 std::vector<const base::TaskQueue*> work_queues; | |
| 473 for (const auto& queue: queues_) | |
| 474 work_queues.push_back(&queue->work_queue()); | |
| 475 selector_->RegisterWorkQueues(work_queues); | |
| 476 } | |
| 477 | |
| 478 TaskQueueManager::~TaskQueueManager() { | |
| 479 TRACE_EVENT_OBJECT_DELETED_WITH_ID( | |
| 480 TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), "TaskQueueManager", | |
| 481 this); | |
| 482 for (auto& queue : queues_) | |
| 483 queue->WillDeleteTaskQueueManager(); | |
| 484 } | |
| 485 | |
| 486 internal::TaskQueue* TaskQueueManager::Queue(size_t queue_index) const { | |
| 487 DCHECK_LT(queue_index, queues_.size()); | |
| 488 return queues_[queue_index].get(); | |
| 489 } | |
| 490 | |
| 491 scoped_refptr<base::SingleThreadTaskRunner> | |
| 492 TaskQueueManager::TaskRunnerForQueue(size_t queue_index) const { | |
| 493 return Queue(queue_index); | |
| 494 } | |
| 495 | |
| 496 bool TaskQueueManager::IsQueueEmpty(size_t queue_index) const { | |
| 497 internal::TaskQueue* queue = Queue(queue_index); | |
| 498 return queue->IsQueueEmpty(); | |
| 499 } | |
| 500 | |
| 501 base::TimeTicks TaskQueueManager::NextPendingDelayedTaskRunTime() { | |
| 502 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 503 bool found_pending_task = false; | |
| 504 base::TimeTicks next_pending_delayed_task( | |
| 505 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); | |
| 506 for (auto& queue : queues_) { | |
| 507 base::TimeTicks queues_next_pending_delayed_task; | |
| 508 if (queue->NextPendingDelayedTaskRunTime( | |
| 509 &queues_next_pending_delayed_task)) { | |
| 510 found_pending_task = true; | |
| 511 next_pending_delayed_task = | |
| 512 std::min(next_pending_delayed_task, queues_next_pending_delayed_task); | |
| 513 } | |
| 514 } | |
| 515 | |
| 516 if (!found_pending_task) | |
| 517 return base::TimeTicks(); | |
| 518 | |
| 519 DCHECK_NE(next_pending_delayed_task, | |
| 520 base::TimeTicks::FromInternalValue(kMaxTimeTicks)); | |
| 521 return next_pending_delayed_task; | |
| 522 } | |
| 523 | |
| 524 void TaskQueueManager::SetPumpPolicy(size_t queue_index, | |
| 525 PumpPolicy pump_policy) { | |
| 526 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 527 internal::TaskQueue* queue = Queue(queue_index); | |
| 528 queue->SetPumpPolicy(pump_policy); | |
| 529 } | |
| 530 | |
| 531 void TaskQueueManager::PumpQueue(size_t queue_index) { | |
| 532 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 533 internal::TaskQueue* queue = Queue(queue_index); | |
| 534 queue->PumpQueue(); | |
| 535 } | |
| 536 | |
| 537 bool TaskQueueManager::UpdateWorkQueues( | |
| 538 const base::PendingTask* previous_task) { | |
| 539 // TODO(skyostil): This is not efficient when the number of queues grows very | |
| 540 // large due to the number of locks taken. Consider optimizing when we get | |
| 541 // there. | |
| 542 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 543 internal::LazyNow lazy_now(this); | |
| 544 bool has_work = false; | |
| 545 for (auto& queue : queues_) { | |
| 546 has_work |= queue->UpdateWorkQueue(&lazy_now, previous_task); | |
| 547 if (!queue->work_queue().empty()) { | |
| 548 // Currently we should not be getting tasks with delayed run times in any | |
| 549 // of the work queues. | |
| 550 DCHECK(queue->work_queue().front().delayed_run_time.is_null()); | |
| 551 } | |
| 552 } | |
| 553 return has_work; | |
| 554 } | |
| 555 | |
| 556 void TaskQueueManager::MaybePostDoWorkOnMainRunner() { | |
| 557 bool on_main_thread = main_task_runner_->BelongsToCurrentThread(); | |
| 558 if (on_main_thread) { | |
| 559 // We only want one pending DoWork posted from the main thread, or we risk | |
| 560 // an explosion of pending DoWorks which could starve out everything else. | |
| 561 if (pending_dowork_count_ > 0) { | |
| 562 return; | |
| 563 } | |
| 564 pending_dowork_count_++; | |
| 565 } | |
| 566 | |
| 567 main_task_runner_->PostTask( | |
| 568 FROM_HERE, Bind(&TaskQueueManager::DoWork, task_queue_manager_weak_ptr_, | |
| 569 on_main_thread)); | |
| 570 } | |
| 571 | |
| 572 void TaskQueueManager::DoWork(bool posted_from_main_thread) { | |
| 573 if (posted_from_main_thread) { | |
| 574 pending_dowork_count_--; | |
| 575 DCHECK_GE(pending_dowork_count_, 0); | |
| 576 } | |
| 577 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 578 | |
| 579 // Pass nullptr to UpdateWorkQueues here to prevent waking up a | |
| 580 // pump-after-wakeup queue. | |
| 581 if (!UpdateWorkQueues(nullptr)) | |
| 582 return; | |
| 583 | |
| 584 base::PendingTask previous_task((tracked_objects::Location()), | |
| 585 (base::Closure())); | |
| 586 for (int i = 0; i < work_batch_size_; i++) { | |
| 587 size_t queue_index; | |
| 588 if (!SelectWorkQueueToService(&queue_index)) | |
| 589 return; | |
| 590 // Note that this function won't post another call to DoWork if one is | |
| 591 // already pending, so it is safe to call it in a loop. | |
| 592 MaybePostDoWorkOnMainRunner(); | |
| 593 ProcessTaskFromWorkQueue(queue_index, i > 0, &previous_task); | |
| 594 | |
| 595 if (!UpdateWorkQueues(&previous_task)) | |
| 596 return; | |
| 597 } | |
| 598 } | |
| 599 | |
| 600 bool TaskQueueManager::SelectWorkQueueToService(size_t* out_queue_index) { | |
| 601 bool should_run = selector_->SelectWorkQueueToService(out_queue_index); | |
| 602 TRACE_EVENT_OBJECT_SNAPSHOT_WITH_ID( | |
| 603 TRACE_DISABLED_BY_DEFAULT("renderer.scheduler"), "TaskQueueManager", this, | |
| 604 AsValueWithSelectorResult(should_run, *out_queue_index)); | |
| 605 return should_run; | |
| 606 } | |
| 607 | |
| 608 void TaskQueueManager::DidQueueTask(base::PendingTask* pending_task) { | |
| 609 pending_task->sequence_num = task_sequence_num_.GetNext(); | |
| 610 task_annotator_.DidQueueTask("TaskQueueManager::PostTask", *pending_task); | |
| 611 } | |
| 612 | |
| 613 void TaskQueueManager::ProcessTaskFromWorkQueue( | |
| 614 size_t queue_index, | |
| 615 bool has_previous_task, | |
| 616 base::PendingTask* previous_task) { | |
| 617 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 618 internal::TaskQueue* queue = Queue(queue_index); | |
| 619 base::PendingTask pending_task = queue->TakeTaskFromWorkQueue(); | |
| 620 if (!pending_task.nestable && main_task_runner_->IsNested()) { | |
| 621 // Defer non-nestable work to the main task runner. NOTE these tasks can be | |
| 622 // arbitrarily delayed so the additional delay should not be a problem. | |
| 623 main_task_runner_->PostNonNestableTask(pending_task.posted_from, | |
| 624 pending_task.task); | |
| 625 } else { | |
| 626 // Suppress "will" task observer notifications for the first and "did" | |
| 627 // notifications for the last task in the batch to avoid duplicate | |
| 628 // notifications. | |
| 629 if (has_previous_task) { | |
| 630 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_, | |
| 631 DidProcessTask(*previous_task)); | |
| 632 FOR_EACH_OBSERVER(base::MessageLoop::TaskObserver, task_observers_, | |
| 633 WillProcessTask(pending_task)); | |
| 634 } | |
| 635 task_annotator_.RunTask("TaskQueueManager::PostTask", | |
| 636 "TaskQueueManager::RunTask", pending_task); | |
| 637 pending_task.task.Reset(); | |
| 638 *previous_task = pending_task; | |
| 639 } | |
| 640 } | |
| 641 | |
| 642 bool TaskQueueManager::RunsTasksOnCurrentThread() const { | |
| 643 return main_task_runner_->RunsTasksOnCurrentThread(); | |
| 644 } | |
| 645 | |
| 646 bool TaskQueueManager::PostDelayedTask( | |
| 647 const tracked_objects::Location& from_here, | |
| 648 const base::Closure& task, | |
| 649 base::TimeDelta delay) { | |
| 650 DCHECK(delay > base::TimeDelta()); | |
| 651 return main_task_runner_->PostDelayedTask(from_here, task, delay); | |
| 652 } | |
| 653 | |
| 654 void TaskQueueManager::SetQueueName(size_t queue_index, const char* name) { | |
| 655 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 656 internal::TaskQueue* queue = Queue(queue_index); | |
| 657 queue->set_name(name); | |
| 658 } | |
| 659 | |
| 660 void TaskQueueManager::SetWorkBatchSize(int work_batch_size) { | |
| 661 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 662 DCHECK_GE(work_batch_size, 1); | |
| 663 work_batch_size_ = work_batch_size; | |
| 664 } | |
| 665 | |
| 666 void TaskQueueManager::AddTaskObserver( | |
| 667 base::MessageLoop::TaskObserver* task_observer) { | |
| 668 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 669 base::MessageLoop::current()->AddTaskObserver(task_observer); | |
| 670 task_observers_.AddObserver(task_observer); | |
| 671 } | |
| 672 | |
| 673 void TaskQueueManager::RemoveTaskObserver( | |
| 674 base::MessageLoop::TaskObserver* task_observer) { | |
| 675 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 676 base::MessageLoop::current()->RemoveTaskObserver(task_observer); | |
| 677 task_observers_.RemoveObserver(task_observer); | |
| 678 } | |
| 679 | |
| 680 void TaskQueueManager::SetTimeSourceForTesting( | |
| 681 scoped_refptr<cc::TestNowSource> time_source) { | |
| 682 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 683 time_source_ = time_source; | |
| 684 } | |
| 685 | |
| 686 base::TimeTicks TaskQueueManager::Now() const { | |
| 687 return UNLIKELY(time_source_) ? time_source_->Now() : base::TimeTicks::Now(); | |
| 688 } | |
| 689 | |
| 690 scoped_refptr<base::trace_event::ConvertableToTraceFormat> | |
| 691 TaskQueueManager::AsValueWithSelectorResult(bool should_run, | |
| 692 size_t selected_queue) const { | |
| 693 DCHECK(main_thread_checker_.CalledOnValidThread()); | |
| 694 scoped_refptr<base::trace_event::TracedValue> state = | |
| 695 new base::trace_event::TracedValue(); | |
| 696 state->BeginArray("queues"); | |
| 697 for (auto& queue : queues_) | |
| 698 queue->AsValueInto(state.get()); | |
| 699 state->EndArray(); | |
| 700 state->BeginDictionary("selector"); | |
| 701 selector_->AsValueInto(state.get()); | |
| 702 state->EndDictionary(); | |
| 703 if (should_run) | |
| 704 state->SetInteger("selected_queue", selected_queue); | |
| 705 return state; | |
| 706 } | |
| 707 | |
| 708 } // namespace content | |
| OLD | NEW |